Skip to content

Commit fc896cf

Browse files
committed
drm/msm: Take lru lock once per submit_pin_objects()
Split out pin_count incrementing and lru updating into a separate loop so we can take the lru lock only once for all objs. Since we are still holding the obj lock, it is safe to split this up. Signed-off-by: Rob Clark <robdclark@chromium.org> Patchwork: https://patchwork.freedesktop.org/patch/551025/
1 parent 6ba5daa commit fc896cf

File tree

3 files changed

+46
-17
lines changed

3 files changed

+46
-17
lines changed

drivers/gpu/drm/msm/msm_gem.c

Lines changed: 29 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -222,9 +222,7 @@ static void put_pages(struct drm_gem_object *obj)
222222
static struct page **msm_gem_pin_pages_locked(struct drm_gem_object *obj,
223223
unsigned madv)
224224
{
225-
struct msm_drm_private *priv = obj->dev->dev_private;
226225
struct msm_gem_object *msm_obj = to_msm_bo(obj);
227-
struct page **p;
228226

229227
msm_gem_assert_locked(obj);
230228

@@ -234,16 +232,29 @@ static struct page **msm_gem_pin_pages_locked(struct drm_gem_object *obj,
234232
return ERR_PTR(-EBUSY);
235233
}
236234

237-
p = get_pages(obj);
238-
if (IS_ERR(p))
239-
return p;
235+
return get_pages(obj);
236+
}
237+
238+
/*
239+
* Update the pin count of the object, call under lru.lock
240+
*/
241+
void msm_gem_pin_obj_locked(struct drm_gem_object *obj)
242+
{
243+
struct msm_drm_private *priv = obj->dev->dev_private;
244+
245+
msm_gem_assert_locked(obj);
246+
247+
to_msm_bo(obj)->pin_count++;
248+
drm_gem_lru_move_tail_locked(&priv->lru.pinned, obj);
249+
}
250+
251+
static void pin_obj_locked(struct drm_gem_object *obj)
252+
{
253+
struct msm_drm_private *priv = obj->dev->dev_private;
240254

241255
mutex_lock(&priv->lru.lock);
242-
msm_obj->pin_count++;
243-
update_lru_locked(obj);
256+
msm_gem_pin_obj_locked(obj);
244257
mutex_unlock(&priv->lru.lock);
245-
246-
return p;
247258
}
248259

249260
struct page **msm_gem_pin_pages(struct drm_gem_object *obj)
@@ -252,6 +263,8 @@ struct page **msm_gem_pin_pages(struct drm_gem_object *obj)
252263

253264
msm_gem_lock(obj);
254265
p = msm_gem_pin_pages_locked(obj, MSM_MADV_WILLNEED);
266+
if (!IS_ERR(p))
267+
pin_obj_locked(obj);
255268
msm_gem_unlock(obj);
256269

257270
return p;
@@ -463,7 +476,7 @@ int msm_gem_pin_vma_locked(struct drm_gem_object *obj, struct msm_gem_vma *vma)
463476
{
464477
struct msm_gem_object *msm_obj = to_msm_bo(obj);
465478
struct page **pages;
466-
int ret, prot = IOMMU_READ;
479+
int prot = IOMMU_READ;
467480

468481
if (!(msm_obj->flags & MSM_BO_GPU_READONLY))
469482
prot |= IOMMU_WRITE;
@@ -480,11 +493,7 @@ int msm_gem_pin_vma_locked(struct drm_gem_object *obj, struct msm_gem_vma *vma)
480493
if (IS_ERR(pages))
481494
return PTR_ERR(pages);
482495

483-
ret = msm_gem_vma_map(vma, prot, msm_obj->sgt, obj->size);
484-
if (ret)
485-
msm_gem_unpin_locked(obj);
486-
487-
return ret;
496+
return msm_gem_vma_map(vma, prot, msm_obj->sgt, obj->size);
488497
}
489498

490499
void msm_gem_unpin_locked(struct drm_gem_object *obj)
@@ -536,8 +545,10 @@ static int get_and_pin_iova_range_locked(struct drm_gem_object *obj,
536545
return PTR_ERR(vma);
537546

538547
ret = msm_gem_pin_vma_locked(obj, vma);
539-
if (!ret)
548+
if (!ret) {
540549
*iova = vma->iova;
550+
pin_obj_locked(obj);
551+
}
541552

542553
return ret;
543554
}
@@ -700,6 +711,8 @@ static void *get_vaddr(struct drm_gem_object *obj, unsigned madv)
700711
if (IS_ERR(pages))
701712
return ERR_CAST(pages);
702713

714+
pin_obj_locked(obj);
715+
703716
/* increment vmap_count *before* vmap() call, so shrinker can
704717
* check vmap_count (is_vunmapable()) outside of msm_obj lock.
705718
* This guarantees that we won't try to msm_gem_vunmap() this

drivers/gpu/drm/msm/msm_gem.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -142,6 +142,7 @@ int msm_gem_get_and_pin_iova(struct drm_gem_object *obj,
142142
struct msm_gem_address_space *aspace, uint64_t *iova);
143143
void msm_gem_unpin_iova(struct drm_gem_object *obj,
144144
struct msm_gem_address_space *aspace);
145+
void msm_gem_pin_obj_locked(struct drm_gem_object *obj);
145146
struct page **msm_gem_pin_pages(struct drm_gem_object *obj);
146147
void msm_gem_unpin_pages(struct drm_gem_object *obj);
147148
int msm_gem_dumb_create(struct drm_file *file, struct drm_device *dev,

drivers/gpu/drm/msm/msm_gem_submit.c

Lines changed: 16 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -384,6 +384,7 @@ static int submit_fence_sync(struct msm_gem_submit *submit, bool no_implicit)
384384

385385
static int submit_pin_objects(struct msm_gem_submit *submit)
386386
{
387+
struct msm_drm_private *priv = submit->dev->dev_private;
387388
int i, ret = 0;
388389

389390
submit->valid = true;
@@ -403,7 +404,7 @@ static int submit_pin_objects(struct msm_gem_submit *submit)
403404
if (ret)
404405
break;
405406

406-
submit->bos[i].flags |= BO_OBJ_PINNED | BO_VMA_PINNED;
407+
submit->bos[i].flags |= BO_VMA_PINNED;
407408
submit->bos[i].vma = vma;
408409

409410
if (vma->iova == submit->bos[i].iova) {
@@ -416,6 +417,20 @@ static int submit_pin_objects(struct msm_gem_submit *submit)
416417
}
417418
}
418419

420+
/*
421+
* A second loop while holding the LRU lock (a) avoids acquiring/dropping
422+
* the LRU lock for each individual bo, while (b) avoiding holding the
423+
* LRU lock while calling msm_gem_pin_vma_locked() (which could trigger
424+
* get_pages() which could trigger reclaim.. and if we held the LRU lock
425+
* could trigger deadlock with the shrinker).
426+
*/
427+
mutex_lock(&priv->lru.lock);
428+
for (i = 0; i < submit->nr_bos; i++) {
429+
msm_gem_pin_obj_locked(submit->bos[i].obj);
430+
submit->bos[i].flags |= BO_OBJ_PINNED;
431+
}
432+
mutex_unlock(&priv->lru.lock);
433+
419434
return ret;
420435
}
421436

0 commit comments

Comments
 (0)