Skip to content

Commit d2551a6

Browse files
jlawrynogregkh
authored andcommitted
accel/ivpu: Use dma_resv_lock() instead of a custom mutex
commit 98d3f77 upstream. This fixes a potential race conditions in: - ivpu_bo_unbind_locked() where we modified the shmem->sgt without holding the dma_resv_lock(). - ivpu_bo_print_info() where we read the shmem->pages without holding the dma_resv_lock(). Using dma_resv_lock() also protects against future syncronisation issues that may arise when accessing drm_gem_shmem_object or drm_gem_object members. Fixes: 4232800 ("accel/ivpu: Refactor BO creation functions") Cc: stable@vger.kernel.org # v6.9+ Reviewed-by: Lizhi Hou <lizhi.hou@amd.com> Signed-off-by: Jacek Lawrynowicz <jacek.lawrynowicz@linux.intel.com> Link: https://lore.kernel.org/r/20250528154325.500684-1-jacek.lawrynowicz@linux.intel.com Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
1 parent 954b190 commit d2551a6

File tree

2 files changed

+34
-30
lines changed

2 files changed

+34
-30
lines changed

drivers/accel/ivpu/ivpu_gem.c

Lines changed: 34 additions & 29 deletions
Original file line numberDiff line numberDiff line change
@@ -31,6 +31,16 @@ static inline void ivpu_dbg_bo(struct ivpu_device *vdev, struct ivpu_bo *bo, con
3131
(bool)bo->base.base.import_attach);
3232
}
3333

34+
static inline int ivpu_bo_lock(struct ivpu_bo *bo)
35+
{
36+
return dma_resv_lock(bo->base.base.resv, NULL);
37+
}
38+
39+
static inline void ivpu_bo_unlock(struct ivpu_bo *bo)
40+
{
41+
dma_resv_unlock(bo->base.base.resv);
42+
}
43+
3444
/*
3545
* ivpu_bo_pin() - pin the backing physical pages and map them to VPU.
3646
*
@@ -41,22 +51,22 @@ static inline void ivpu_dbg_bo(struct ivpu_device *vdev, struct ivpu_bo *bo, con
4151
int __must_check ivpu_bo_pin(struct ivpu_bo *bo)
4252
{
4353
struct ivpu_device *vdev = ivpu_bo_to_vdev(bo);
54+
struct sg_table *sgt;
4455
int ret = 0;
4556

46-
mutex_lock(&bo->lock);
47-
4857
ivpu_dbg_bo(vdev, bo, "pin");
49-
drm_WARN_ON(&vdev->drm, !bo->ctx);
5058

51-
if (!bo->mmu_mapped) {
52-
struct sg_table *sgt = drm_gem_shmem_get_pages_sgt(&bo->base);
59+
sgt = drm_gem_shmem_get_pages_sgt(&bo->base);
60+
if (IS_ERR(sgt)) {
61+
ret = PTR_ERR(sgt);
62+
ivpu_err(vdev, "Failed to map BO in IOMMU: %d\n", ret);
63+
return ret;
64+
}
5365

54-
if (IS_ERR(sgt)) {
55-
ret = PTR_ERR(sgt);
56-
ivpu_err(vdev, "Failed to map BO in IOMMU: %d\n", ret);
57-
goto unlock;
58-
}
66+
ivpu_bo_lock(bo);
5967

68+
if (!bo->mmu_mapped) {
69+
drm_WARN_ON(&vdev->drm, !bo->ctx);
6070
ret = ivpu_mmu_context_map_sgt(vdev, bo->ctx, bo->vpu_addr, sgt,
6171
ivpu_bo_is_snooped(bo));
6272
if (ret) {
@@ -67,7 +77,7 @@ int __must_check ivpu_bo_pin(struct ivpu_bo *bo)
6777
}
6878

6979
unlock:
70-
mutex_unlock(&bo->lock);
80+
ivpu_bo_unlock(bo);
7181

7282
return ret;
7383
}
@@ -82,7 +92,7 @@ ivpu_bo_alloc_vpu_addr(struct ivpu_bo *bo, struct ivpu_mmu_context *ctx,
8292
if (!drm_dev_enter(&vdev->drm, &idx))
8393
return -ENODEV;
8494

85-
mutex_lock(&bo->lock);
95+
ivpu_bo_lock(bo);
8696

8797
ret = ivpu_mmu_context_insert_node(ctx, range, ivpu_bo_size(bo), &bo->mm_node);
8898
if (!ret) {
@@ -92,7 +102,7 @@ ivpu_bo_alloc_vpu_addr(struct ivpu_bo *bo, struct ivpu_mmu_context *ctx,
92102
ivpu_err(vdev, "Failed to add BO to context %u: %d\n", ctx->id, ret);
93103
}
94104

95-
mutex_unlock(&bo->lock);
105+
ivpu_bo_unlock(bo);
96106

97107
drm_dev_exit(idx);
98108

@@ -103,7 +113,7 @@ static void ivpu_bo_unbind_locked(struct ivpu_bo *bo)
103113
{
104114
struct ivpu_device *vdev = ivpu_bo_to_vdev(bo);
105115

106-
lockdep_assert(lockdep_is_held(&bo->lock) || !kref_read(&bo->base.base.refcount));
116+
lockdep_assert(dma_resv_held(bo->base.base.resv) || !kref_read(&bo->base.base.refcount));
107117

108118
if (bo->mmu_mapped) {
109119
drm_WARN_ON(&vdev->drm, !bo->ctx);
@@ -121,14 +131,12 @@ static void ivpu_bo_unbind_locked(struct ivpu_bo *bo)
121131
if (bo->base.base.import_attach)
122132
return;
123133

124-
dma_resv_lock(bo->base.base.resv, NULL);
125134
if (bo->base.sgt) {
126135
dma_unmap_sgtable(vdev->drm.dev, bo->base.sgt, DMA_BIDIRECTIONAL, 0);
127136
sg_free_table(bo->base.sgt);
128137
kfree(bo->base.sgt);
129138
bo->base.sgt = NULL;
130139
}
131-
dma_resv_unlock(bo->base.base.resv);
132140
}
133141

134142
void ivpu_bo_unbind_all_bos_from_context(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx)
@@ -140,12 +148,12 @@ void ivpu_bo_unbind_all_bos_from_context(struct ivpu_device *vdev, struct ivpu_m
140148

141149
mutex_lock(&vdev->bo_list_lock);
142150
list_for_each_entry(bo, &vdev->bo_list, bo_list_node) {
143-
mutex_lock(&bo->lock);
151+
ivpu_bo_lock(bo);
144152
if (bo->ctx == ctx) {
145153
ivpu_dbg_bo(vdev, bo, "unbind");
146154
ivpu_bo_unbind_locked(bo);
147155
}
148-
mutex_unlock(&bo->lock);
156+
ivpu_bo_unlock(bo);
149157
}
150158
mutex_unlock(&vdev->bo_list_lock);
151159
}
@@ -165,7 +173,6 @@ struct drm_gem_object *ivpu_gem_create_object(struct drm_device *dev, size_t siz
165173
bo->base.pages_mark_dirty_on_put = true; /* VPU can dirty a BO anytime */
166174

167175
INIT_LIST_HEAD(&bo->bo_list_node);
168-
mutex_init(&bo->lock);
169176

170177
return &bo->base.base;
171178
}
@@ -243,8 +250,6 @@ static void ivpu_gem_bo_free(struct drm_gem_object *obj)
243250
drm_WARN_ON(&vdev->drm, bo->mmu_mapped);
244251
drm_WARN_ON(&vdev->drm, bo->ctx);
245252

246-
mutex_destroy(&bo->lock);
247-
248253
drm_WARN_ON(obj->dev, bo->base.pages_use_count > 1);
249254
drm_gem_shmem_free(&bo->base);
250255
}
@@ -327,9 +332,9 @@ ivpu_bo_create(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx,
327332
goto err_put;
328333

329334
if (flags & DRM_IVPU_BO_MAPPABLE) {
330-
dma_resv_lock(bo->base.base.resv, NULL);
335+
ivpu_bo_lock(bo);
331336
ret = drm_gem_shmem_vmap(&bo->base, &map);
332-
dma_resv_unlock(bo->base.base.resv);
337+
ivpu_bo_unlock(bo);
333338

334339
if (ret)
335340
goto err_put;
@@ -352,9 +357,9 @@ void ivpu_bo_free(struct ivpu_bo *bo)
352357
struct iosys_map map = IOSYS_MAP_INIT_VADDR(bo->base.vaddr);
353358

354359
if (bo->flags & DRM_IVPU_BO_MAPPABLE) {
355-
dma_resv_lock(bo->base.base.resv, NULL);
360+
ivpu_bo_lock(bo);
356361
drm_gem_shmem_vunmap(&bo->base, &map);
357-
dma_resv_unlock(bo->base.base.resv);
362+
ivpu_bo_unlock(bo);
358363
}
359364

360365
drm_gem_object_put(&bo->base.base);
@@ -373,12 +378,12 @@ int ivpu_bo_info_ioctl(struct drm_device *dev, void *data, struct drm_file *file
373378

374379
bo = to_ivpu_bo(obj);
375380

376-
mutex_lock(&bo->lock);
381+
ivpu_bo_lock(bo);
377382
args->flags = bo->flags;
378383
args->mmap_offset = drm_vma_node_offset_addr(&obj->vma_node);
379384
args->vpu_addr = bo->vpu_addr;
380385
args->size = obj->size;
381-
mutex_unlock(&bo->lock);
386+
ivpu_bo_unlock(bo);
382387

383388
drm_gem_object_put(obj);
384389
return ret;
@@ -412,7 +417,7 @@ int ivpu_bo_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file
412417

413418
static void ivpu_bo_print_info(struct ivpu_bo *bo, struct drm_printer *p)
414419
{
415-
mutex_lock(&bo->lock);
420+
ivpu_bo_lock(bo);
416421

417422
drm_printf(p, "%-9p %-3u 0x%-12llx %-10lu 0x%-8x %-4u",
418423
bo, bo->ctx_id, bo->vpu_addr, bo->base.base.size,
@@ -429,7 +434,7 @@ static void ivpu_bo_print_info(struct ivpu_bo *bo, struct drm_printer *p)
429434

430435
drm_printf(p, "\n");
431436

432-
mutex_unlock(&bo->lock);
437+
ivpu_bo_unlock(bo);
433438
}
434439

435440
void ivpu_bo_list(struct drm_device *dev, struct drm_printer *p)

drivers/accel/ivpu/ivpu_gem.h

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -17,7 +17,6 @@ struct ivpu_bo {
1717
struct list_head bo_list_node;
1818
struct drm_mm_node mm_node;
1919

20-
struct mutex lock; /* Protects: ctx, mmu_mapped, vpu_addr */
2120
u64 vpu_addr;
2221
u32 flags;
2322
u32 job_status; /* Valid only for command buffer */

0 commit comments

Comments
 (0)