@@ -31,6 +31,16 @@ static inline void ivpu_dbg_bo(struct ivpu_device *vdev, struct ivpu_bo *bo, con
31
31
(bool )bo -> base .base .import_attach );
32
32
}
33
33
34
+ static inline int ivpu_bo_lock (struct ivpu_bo * bo )
35
+ {
36
+ return dma_resv_lock (bo -> base .base .resv , NULL );
37
+ }
38
+
39
+ static inline void ivpu_bo_unlock (struct ivpu_bo * bo )
40
+ {
41
+ dma_resv_unlock (bo -> base .base .resv );
42
+ }
43
+
34
44
/*
35
45
* ivpu_bo_pin() - pin the backing physical pages and map them to VPU.
36
46
*
@@ -41,22 +51,22 @@ static inline void ivpu_dbg_bo(struct ivpu_device *vdev, struct ivpu_bo *bo, con
41
51
int __must_check ivpu_bo_pin (struct ivpu_bo * bo )
42
52
{
43
53
struct ivpu_device * vdev = ivpu_bo_to_vdev (bo );
54
+ struct sg_table * sgt ;
44
55
int ret = 0 ;
45
56
46
- mutex_lock (& bo -> lock );
47
-
48
57
ivpu_dbg_bo (vdev , bo , "pin" );
49
- drm_WARN_ON (& vdev -> drm , !bo -> ctx );
50
58
51
- if (!bo -> mmu_mapped ) {
52
- struct sg_table * sgt = drm_gem_shmem_get_pages_sgt (& bo -> base );
59
+ sgt = drm_gem_shmem_get_pages_sgt (& bo -> base );
60
+ if (IS_ERR (sgt )) {
61
+ ret = PTR_ERR (sgt );
62
+ ivpu_err (vdev , "Failed to map BO in IOMMU: %d\n" , ret );
63
+ return ret ;
64
+ }
53
65
54
- if (IS_ERR (sgt )) {
55
- ret = PTR_ERR (sgt );
56
- ivpu_err (vdev , "Failed to map BO in IOMMU: %d\n" , ret );
57
- goto unlock ;
58
- }
66
+ ivpu_bo_lock (bo );
59
67
68
+ if (!bo -> mmu_mapped ) {
69
+ drm_WARN_ON (& vdev -> drm , !bo -> ctx );
60
70
ret = ivpu_mmu_context_map_sgt (vdev , bo -> ctx , bo -> vpu_addr , sgt ,
61
71
ivpu_bo_is_snooped (bo ));
62
72
if (ret ) {
@@ -67,7 +77,7 @@ int __must_check ivpu_bo_pin(struct ivpu_bo *bo)
67
77
}
68
78
69
79
unlock :
70
- mutex_unlock ( & bo -> lock );
80
+ ivpu_bo_unlock ( bo );
71
81
72
82
return ret ;
73
83
}
@@ -82,7 +92,7 @@ ivpu_bo_alloc_vpu_addr(struct ivpu_bo *bo, struct ivpu_mmu_context *ctx,
82
92
if (!drm_dev_enter (& vdev -> drm , & idx ))
83
93
return - ENODEV ;
84
94
85
- mutex_lock ( & bo -> lock );
95
+ ivpu_bo_lock ( bo );
86
96
87
97
ret = ivpu_mmu_context_insert_node (ctx , range , ivpu_bo_size (bo ), & bo -> mm_node );
88
98
if (!ret ) {
@@ -92,7 +102,7 @@ ivpu_bo_alloc_vpu_addr(struct ivpu_bo *bo, struct ivpu_mmu_context *ctx,
92
102
ivpu_err (vdev , "Failed to add BO to context %u: %d\n" , ctx -> id , ret );
93
103
}
94
104
95
- mutex_unlock ( & bo -> lock );
105
+ ivpu_bo_unlock ( bo );
96
106
97
107
drm_dev_exit (idx );
98
108
@@ -103,7 +113,7 @@ static void ivpu_bo_unbind_locked(struct ivpu_bo *bo)
103
113
{
104
114
struct ivpu_device * vdev = ivpu_bo_to_vdev (bo );
105
115
106
- lockdep_assert (lockdep_is_held ( & bo -> lock ) || !kref_read (& bo -> base .base .refcount ));
116
+ lockdep_assert (dma_resv_held ( bo -> base . base . resv ) || !kref_read (& bo -> base .base .refcount ));
107
117
108
118
if (bo -> mmu_mapped ) {
109
119
drm_WARN_ON (& vdev -> drm , !bo -> ctx );
@@ -121,14 +131,12 @@ static void ivpu_bo_unbind_locked(struct ivpu_bo *bo)
121
131
if (bo -> base .base .import_attach )
122
132
return ;
123
133
124
- dma_resv_lock (bo -> base .base .resv , NULL );
125
134
if (bo -> base .sgt ) {
126
135
dma_unmap_sgtable (vdev -> drm .dev , bo -> base .sgt , DMA_BIDIRECTIONAL , 0 );
127
136
sg_free_table (bo -> base .sgt );
128
137
kfree (bo -> base .sgt );
129
138
bo -> base .sgt = NULL ;
130
139
}
131
- dma_resv_unlock (bo -> base .base .resv );
132
140
}
133
141
134
142
void ivpu_bo_unbind_all_bos_from_context (struct ivpu_device * vdev , struct ivpu_mmu_context * ctx )
@@ -140,12 +148,12 @@ void ivpu_bo_unbind_all_bos_from_context(struct ivpu_device *vdev, struct ivpu_m
140
148
141
149
mutex_lock (& vdev -> bo_list_lock );
142
150
list_for_each_entry (bo , & vdev -> bo_list , bo_list_node ) {
143
- mutex_lock ( & bo -> lock );
151
+ ivpu_bo_lock ( bo );
144
152
if (bo -> ctx == ctx ) {
145
153
ivpu_dbg_bo (vdev , bo , "unbind" );
146
154
ivpu_bo_unbind_locked (bo );
147
155
}
148
- mutex_unlock ( & bo -> lock );
156
+ ivpu_bo_unlock ( bo );
149
157
}
150
158
mutex_unlock (& vdev -> bo_list_lock );
151
159
}
@@ -165,7 +173,6 @@ struct drm_gem_object *ivpu_gem_create_object(struct drm_device *dev, size_t siz
165
173
bo -> base .pages_mark_dirty_on_put = true; /* VPU can dirty a BO anytime */
166
174
167
175
INIT_LIST_HEAD (& bo -> bo_list_node );
168
- mutex_init (& bo -> lock );
169
176
170
177
return & bo -> base .base ;
171
178
}
@@ -243,8 +250,6 @@ static void ivpu_gem_bo_free(struct drm_gem_object *obj)
243
250
drm_WARN_ON (& vdev -> drm , bo -> mmu_mapped );
244
251
drm_WARN_ON (& vdev -> drm , bo -> ctx );
245
252
246
- mutex_destroy (& bo -> lock );
247
-
248
253
drm_WARN_ON (obj -> dev , bo -> base .pages_use_count > 1 );
249
254
drm_gem_shmem_free (& bo -> base );
250
255
}
@@ -327,9 +332,9 @@ ivpu_bo_create(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx,
327
332
goto err_put ;
328
333
329
334
if (flags & DRM_IVPU_BO_MAPPABLE ) {
330
- dma_resv_lock (bo -> base . base . resv , NULL );
335
+ ivpu_bo_lock (bo );
331
336
ret = drm_gem_shmem_vmap (& bo -> base , & map );
332
- dma_resv_unlock (bo -> base . base . resv );
337
+ ivpu_bo_unlock (bo );
333
338
334
339
if (ret )
335
340
goto err_put ;
@@ -352,9 +357,9 @@ void ivpu_bo_free(struct ivpu_bo *bo)
352
357
struct iosys_map map = IOSYS_MAP_INIT_VADDR (bo -> base .vaddr );
353
358
354
359
if (bo -> flags & DRM_IVPU_BO_MAPPABLE ) {
355
- dma_resv_lock (bo -> base . base . resv , NULL );
360
+ ivpu_bo_lock (bo );
356
361
drm_gem_shmem_vunmap (& bo -> base , & map );
357
- dma_resv_unlock (bo -> base . base . resv );
362
+ ivpu_bo_unlock (bo );
358
363
}
359
364
360
365
drm_gem_object_put (& bo -> base .base );
@@ -373,12 +378,12 @@ int ivpu_bo_info_ioctl(struct drm_device *dev, void *data, struct drm_file *file
373
378
374
379
bo = to_ivpu_bo (obj );
375
380
376
- mutex_lock ( & bo -> lock );
381
+ ivpu_bo_lock ( bo );
377
382
args -> flags = bo -> flags ;
378
383
args -> mmap_offset = drm_vma_node_offset_addr (& obj -> vma_node );
379
384
args -> vpu_addr = bo -> vpu_addr ;
380
385
args -> size = obj -> size ;
381
- mutex_unlock ( & bo -> lock );
386
+ ivpu_bo_unlock ( bo );
382
387
383
388
drm_gem_object_put (obj );
384
389
return ret ;
@@ -412,7 +417,7 @@ int ivpu_bo_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file
412
417
413
418
static void ivpu_bo_print_info (struct ivpu_bo * bo , struct drm_printer * p )
414
419
{
415
- mutex_lock ( & bo -> lock );
420
+ ivpu_bo_lock ( bo );
416
421
417
422
drm_printf (p , "%-9p %-3u 0x%-12llx %-10lu 0x%-8x %-4u" ,
418
423
bo , bo -> ctx_id , bo -> vpu_addr , bo -> base .base .size ,
@@ -429,7 +434,7 @@ static void ivpu_bo_print_info(struct ivpu_bo *bo, struct drm_printer *p)
429
434
430
435
drm_printf (p , "\n" );
431
436
432
- mutex_unlock ( & bo -> lock );
437
+ ivpu_bo_unlock ( bo );
433
438
}
434
439
435
440
void ivpu_bo_list (struct drm_device * dev , struct drm_printer * p )
0 commit comments