@@ -222,9 +222,7 @@ static void put_pages(struct drm_gem_object *obj)
222
222
static struct page * * msm_gem_pin_pages_locked (struct drm_gem_object * obj ,
223
223
unsigned madv )
224
224
{
225
- struct msm_drm_private * priv = obj -> dev -> dev_private ;
226
225
struct msm_gem_object * msm_obj = to_msm_bo (obj );
227
- struct page * * p ;
228
226
229
227
msm_gem_assert_locked (obj );
230
228
@@ -234,16 +232,29 @@ static struct page **msm_gem_pin_pages_locked(struct drm_gem_object *obj,
234
232
return ERR_PTR (- EBUSY );
235
233
}
236
234
237
- p = get_pages (obj );
238
- if (IS_ERR (p ))
239
- return p ;
235
+ return get_pages (obj );
236
+ }
237
+
238
+ /*
239
+ * Update the pin count of the object, call under lru.lock
240
+ */
241
+ void msm_gem_pin_obj_locked (struct drm_gem_object * obj )
242
+ {
243
+ struct msm_drm_private * priv = obj -> dev -> dev_private ;
244
+
245
+ msm_gem_assert_locked (obj );
246
+
247
+ to_msm_bo (obj )-> pin_count ++ ;
248
+ drm_gem_lru_move_tail_locked (& priv -> lru .pinned , obj );
249
+ }
250
+
251
+ static void pin_obj_locked (struct drm_gem_object * obj )
252
+ {
253
+ struct msm_drm_private * priv = obj -> dev -> dev_private ;
240
254
241
255
mutex_lock (& priv -> lru .lock );
242
- msm_obj -> pin_count ++ ;
243
- update_lru_locked (obj );
256
+ msm_gem_pin_obj_locked (obj );
244
257
mutex_unlock (& priv -> lru .lock );
245
-
246
- return p ;
247
258
}
248
259
249
260
struct page * * msm_gem_pin_pages (struct drm_gem_object * obj )
@@ -252,6 +263,8 @@ struct page **msm_gem_pin_pages(struct drm_gem_object *obj)
252
263
253
264
msm_gem_lock (obj );
254
265
p = msm_gem_pin_pages_locked (obj , MSM_MADV_WILLNEED );
266
+ if (!IS_ERR (p ))
267
+ pin_obj_locked (obj );
255
268
msm_gem_unlock (obj );
256
269
257
270
return p ;
@@ -463,7 +476,7 @@ int msm_gem_pin_vma_locked(struct drm_gem_object *obj, struct msm_gem_vma *vma)
463
476
{
464
477
struct msm_gem_object * msm_obj = to_msm_bo (obj );
465
478
struct page * * pages ;
466
- int ret , prot = IOMMU_READ ;
479
+ int prot = IOMMU_READ ;
467
480
468
481
if (!(msm_obj -> flags & MSM_BO_GPU_READONLY ))
469
482
prot |= IOMMU_WRITE ;
@@ -480,11 +493,7 @@ int msm_gem_pin_vma_locked(struct drm_gem_object *obj, struct msm_gem_vma *vma)
480
493
if (IS_ERR (pages ))
481
494
return PTR_ERR (pages );
482
495
483
- ret = msm_gem_vma_map (vma , prot , msm_obj -> sgt , obj -> size );
484
- if (ret )
485
- msm_gem_unpin_locked (obj );
486
-
487
- return ret ;
496
+ return msm_gem_vma_map (vma , prot , msm_obj -> sgt , obj -> size );
488
497
}
489
498
490
499
void msm_gem_unpin_locked (struct drm_gem_object * obj )
@@ -536,8 +545,10 @@ static int get_and_pin_iova_range_locked(struct drm_gem_object *obj,
536
545
return PTR_ERR (vma );
537
546
538
547
ret = msm_gem_pin_vma_locked (obj , vma );
539
- if (!ret )
548
+ if (!ret ) {
540
549
* iova = vma -> iova ;
550
+ pin_obj_locked (obj );
551
+ }
541
552
542
553
return ret ;
543
554
}
@@ -700,6 +711,8 @@ static void *get_vaddr(struct drm_gem_object *obj, unsigned madv)
700
711
if (IS_ERR (pages ))
701
712
return ERR_CAST (pages );
702
713
714
+ pin_obj_locked (obj );
715
+
703
716
/* increment vmap_count *before* vmap() call, so shrinker can
704
717
* check vmap_count (is_vunmapable()) outside of msm_obj lock.
705
718
* This guarantees that we won't try to msm_gem_vunmap() this
0 commit comments