4
4
* Author: Lars-Peter Clausen <lars@metafoo.de>
5
5
*/
6
6
7
+ #include <linux/atomic.h>
8
+ #include <linux/cleanup.h>
7
9
#include <linux/slab.h>
8
10
#include <linux/kernel.h>
9
11
#include <linux/module.h>
15
17
#include <linux/iio/buffer_impl.h>
16
18
#include <linux/iio/iio.h>
17
19
#include <linux/iio/buffer-dma.h>
20
+ #include <linux/dma-buf.h>
21
+ #include <linux/dma-fence.h>
18
22
#include <linux/dma-mapping.h>
19
23
#include <linux/sizes.h>
20
24
@@ -100,17 +104,22 @@ static void iio_buffer_block_release(struct kref *kref)
100
104
{
101
105
struct iio_dma_buffer_block * block = container_of (kref ,
102
106
struct iio_dma_buffer_block , kref );
107
+ struct iio_dma_buffer_queue * queue = block -> queue ;
103
108
104
- WARN_ON (block -> state != IIO_BLOCK_STATE_DEAD );
109
+ WARN_ON (block -> fileio && block -> state != IIO_BLOCK_STATE_DEAD );
105
110
106
111
#ifdef CONFIG_IIO_DMA_BUF_MMAP_LEGACY
107
112
dma_free_coherent (block -> queue -> dev , PAGE_ALIGN (block -> block .size ),
108
113
block -> vaddr , block -> phys_addr );
109
114
#else
110
- dma_free_coherent (block -> queue -> dev , PAGE_ALIGN (block -> size ),
111
- block -> vaddr , block -> phys_addr );
115
+ if (block -> fileio ) {
116
+ dma_free_coherent (queue -> dev , PAGE_ALIGN (block -> size ),
117
+ block -> vaddr , block -> phys_addr );
118
+ } else {
119
+ atomic_dec (& queue -> num_dmabufs );
120
+ }
112
121
#endif
113
- iio_buffer_put (& block -> queue -> buffer );
122
+ iio_buffer_put (& queue -> buffer );
114
123
kfree (block );
115
124
}
116
125
@@ -173,25 +182,28 @@ static struct iio_dma_buffer_queue *iio_buffer_to_queue(struct iio_buffer *buf)
173
182
}
174
183
175
184
static struct iio_dma_buffer_block * iio_dma_buffer_alloc_block (
176
- struct iio_dma_buffer_queue * queue , size_t size )
185
+ struct iio_dma_buffer_queue * queue , size_t size , bool fileio )
177
186
{
178
187
struct iio_dma_buffer_block * block ;
179
188
180
189
block = kzalloc (sizeof (* block ), GFP_KERNEL );
181
190
if (!block )
182
191
return NULL ;
183
192
184
- block -> vaddr = dma_alloc_coherent (queue -> dev , PAGE_ALIGN (size ),
185
- & block -> phys_addr , GFP_KERNEL );
186
- if (!block -> vaddr ) {
187
- kfree (block );
188
- return NULL ;
193
+ if (fileio ) {
194
+ block -> vaddr = dma_alloc_coherent (queue -> dev , PAGE_ALIGN (size ),
195
+ & block -> phys_addr , GFP_KERNEL );
196
+ if (!block -> vaddr ) {
197
+ kfree (block );
198
+ return NULL ;
199
+ }
189
200
}
190
201
191
202
#ifdef CONFIG_IIO_DMA_BUF_MMAP_LEGACY
192
203
block -> block .size = size ;
193
204
block -> state = IIO_BLOCK_STATE_DEQUEUED ;
194
205
#else
206
+ block -> fileio = fileio ;
195
207
block -> size = size ;
196
208
block -> state = IIO_BLOCK_STATE_DONE ;
197
209
#endif
@@ -201,6 +213,9 @@ static struct iio_dma_buffer_block *iio_dma_buffer_alloc_block(
201
213
202
214
iio_buffer_get (& queue -> buffer );
203
215
216
+ if (!fileio )
217
+ atomic_inc (& queue -> num_dmabufs );
218
+
204
219
return block ;
205
220
}
206
221
@@ -239,13 +254,23 @@ void iio_dma_buffer_block_done(struct iio_dma_buffer_block *block)
239
254
{
240
255
struct iio_dma_buffer_queue * queue = block -> queue ;
241
256
unsigned long flags ;
257
+ #ifndef CONFIG_IIO_DMA_BUF_MMAP_LEGACY
258
+ bool cookie ;
242
259
260
+ cookie = dma_fence_begin_signalling ();
261
+ #endif
243
262
spin_lock_irqsave (& queue -> list_lock , flags );
244
263
_iio_dma_buffer_block_done (block );
245
264
spin_unlock_irqrestore (& queue -> list_lock , flags );
246
-
265
+ #ifndef CONFIG_IIO_DMA_BUF_MMAP_LEGACY
266
+ if (!block -> fileio )
267
+ iio_buffer_signal_dmabuf_done (block -> fence , 0 );
268
+ #endif
247
269
iio_buffer_block_put_atomic (block );
248
270
iio_dma_buffer_queue_wake (queue );
271
+ #ifndef CONFIG_IIO_DMA_BUF_MMAP_LEGACY
272
+ dma_fence_end_signalling (cookie );
273
+ #endif
249
274
}
250
275
EXPORT_SYMBOL_GPL (iio_dma_buffer_block_done );
251
276
@@ -264,7 +289,11 @@ void iio_dma_buffer_block_list_abort(struct iio_dma_buffer_queue *queue,
264
289
{
265
290
struct iio_dma_buffer_block * block , * _block ;
266
291
unsigned long flags ;
292
+ #ifndef CONFIG_IIO_DMA_BUF_MMAP_LEGACY
293
+ bool cookie ;
267
294
295
+ cookie = dma_fence_begin_signalling ();
296
+ #endif
268
297
spin_lock_irqsave (& queue -> list_lock , flags );
269
298
list_for_each_entry_safe (block , _block , list , head ) {
270
299
list_del (& block -> head );
@@ -274,11 +303,21 @@ void iio_dma_buffer_block_list_abort(struct iio_dma_buffer_queue *queue,
274
303
block -> bytes_used = 0 ;
275
304
#endif
276
305
_iio_dma_buffer_block_done (block );
306
+ #ifndef CONFIG_IIO_DMA_BUF_MMAP_LEGACY
307
+ if (!block -> fileio )
308
+ iio_buffer_signal_dmabuf_done (block -> fence , - EINTR );
309
+ #endif
277
310
iio_buffer_block_put_atomic (block );
278
311
}
279
312
spin_unlock_irqrestore (& queue -> list_lock , flags );
280
313
314
+ if (queue -> fileio .enabled )
315
+ queue -> fileio .enabled = false;
316
+
281
317
iio_dma_buffer_queue_wake (queue );
318
+ #ifndef CONFIG_IIO_DMA_BUF_MMAP_LEGACY
319
+ dma_fence_end_signalling (cookie );
320
+ #endif
282
321
}
283
322
EXPORT_SYMBOL_GPL (iio_dma_buffer_block_list_abort );
284
323
@@ -298,6 +337,18 @@ static bool iio_dma_block_reusable(struct iio_dma_buffer_block *block)
298
337
}
299
338
}
300
339
340
+ #ifndef CONFIG_IIO_DMA_BUF_MMAP_LEGACY
341
+ static bool iio_dma_buffer_can_use_fileio (struct iio_dma_buffer_queue * queue )
342
+ {
343
+ /*
344
+ * Note that queue->num_dmabufs cannot increase while the queue is
345
+ * locked, it can only decrease, so it does not race against
346
+ * iio_dma_buffer_alloc_block().
347
+ */
348
+ return queue -> fileio .enabled || !atomic_read (& queue -> num_dmabufs );
349
+ }
350
+ #endif
351
+
301
352
/**
302
353
* iio_dma_buffer_request_update() - DMA buffer request_update callback
303
354
* @buffer: The buffer which to request an update
@@ -327,8 +378,13 @@ int iio_dma_buffer_request_update(struct iio_buffer *buffer)
327
378
#ifdef CONFIG_IIO_DMA_BUF_MMAP_LEGACY
328
379
if (queue -> num_blocks )
329
380
goto out_unlock ;
330
- #endif
381
+ #else
382
+ queue -> fileio .enabled = iio_dma_buffer_can_use_fileio (queue );
331
383
384
+ /* If DMABUFs were created, disable fileio interface */
385
+ if (!queue -> fileio .enabled )
386
+ goto out_unlock ;
387
+ #endif
332
388
/* Allocations are page aligned */
333
389
if (PAGE_ALIGN (queue -> fileio .block_size ) == PAGE_ALIGN (size ))
334
390
try_reuse = true;
@@ -369,7 +425,7 @@ int iio_dma_buffer_request_update(struct iio_buffer *buffer)
369
425
}
370
426
371
427
if (!block ) {
372
- block = iio_dma_buffer_alloc_block (queue , size );
428
+ block = iio_dma_buffer_alloc_block (queue , size , true );
373
429
if (!block ) {
374
430
ret = - ENOMEM ;
375
431
goto out_unlock ;
@@ -442,8 +498,14 @@ static void iio_dma_buffer_submit_block(struct iio_dma_buffer_queue *queue,
442
498
443
499
block -> state = IIO_BLOCK_STATE_ACTIVE ;
444
500
iio_buffer_block_get (block );
501
+
445
502
ret = queue -> ops -> submit (queue , block );
446
503
if (ret ) {
504
+ #ifndef CONFIG_IIO_DMA_BUF_MMAP_LEGACY
505
+ if (!block -> fileio )
506
+ iio_buffer_signal_dmabuf_done (block -> fence , ret );
507
+ #endif
508
+
447
509
/*
448
510
* This is a bit of a problem and there is not much we can do
449
511
* other then wait for the buffer to be disabled and re-enabled
@@ -459,13 +521,40 @@ static void iio_dma_buffer_submit_block(struct iio_dma_buffer_queue *queue,
459
521
}
460
522
461
523
#ifdef CONFIG_IIO_DMA_BUF_MMAP_LEGACY
524
+ static struct iio_dma_buffer_block
525
+ * iio_dma_buffer_mmap_alloc_block (struct iio_dma_buffer_queue * queue , size_t size )
526
+ {
527
+ struct iio_dma_buffer_block * block ;
528
+
529
+ block = kzalloc (sizeof (* block ), GFP_KERNEL );
530
+ if (!block )
531
+ return NULL ;
532
+
533
+ block -> vaddr = dma_alloc_coherent (queue -> dev , PAGE_ALIGN (size ),
534
+ & block -> phys_addr , GFP_KERNEL );
535
+ if (!block -> vaddr ) {
536
+ kfree (block );
537
+ return NULL ;
538
+ }
539
+
540
+ block -> block .size = size ;
541
+ block -> state = IIO_BLOCK_STATE_DEQUEUED ;
542
+ block -> queue = queue ;
543
+ INIT_LIST_HEAD (& block -> head );
544
+ kref_init (& block -> kref );
545
+
546
+ iio_buffer_get (& queue -> buffer );
547
+
548
+ return block ;
549
+ }
550
+
462
551
static int iio_dma_buffer_fileio_alloc (struct iio_dma_buffer_queue * queue ,
463
552
struct iio_dev * indio_dev )
464
553
{
465
554
size_t size = queue -> buffer .bytes_per_datum * queue -> buffer .length ;
466
555
struct iio_dma_buffer_block * block ;
467
556
468
- block = iio_dma_buffer_alloc_block (queue , size );
557
+ block = iio_dma_buffer_mmap_alloc_block (queue , size );
469
558
if (!block )
470
559
return - ENOMEM ;
471
560
@@ -761,7 +850,7 @@ int iio_dma_buffer_alloc_blocks(struct iio_buffer *buffer,
761
850
}
762
851
763
852
for (i = queue -> num_blocks ; i < num_blocks ; i ++ ) {
764
- blocks [i ] = iio_dma_buffer_alloc_block (queue , req -> size );
853
+ blocks [i ] = iio_dma_buffer_mmap_alloc_block (queue , req -> size );
765
854
if (!blocks [i ])
766
855
break ;
767
856
blocks [i ]-> block .id = i ;
@@ -809,7 +898,6 @@ int iio_dma_buffer_free_blocks(struct iio_buffer *buffer)
809
898
}
810
899
EXPORT_SYMBOL_GPL (iio_dma_buffer_free_blocks );
811
900
812
-
813
901
int iio_dma_buffer_query_block (struct iio_buffer * buffer ,
814
902
struct iio_buffer_block * block )
815
903
{
@@ -912,7 +1000,6 @@ int iio_dma_buffer_dequeue_block(struct iio_buffer *buffer,
912
1000
}
913
1001
EXPORT_SYMBOL_GPL (iio_dma_buffer_dequeue_block );
914
1002
915
-
916
1003
static void iio_dma_buffer_mmap_open (struct vm_area_struct * area )
917
1004
{
918
1005
struct iio_dma_buffer_block * block = area -> vm_private_data ;
@@ -965,6 +1052,110 @@ int iio_dma_buffer_mmap(struct iio_buffer *buffer,
965
1052
block -> phys_addr , vma -> vm_end - vma -> vm_start );
966
1053
}
967
1054
EXPORT_SYMBOL_GPL (iio_dma_buffer_mmap );
1055
+ #else
1056
+ struct iio_dma_buffer_block *
1057
+ iio_dma_buffer_attach_dmabuf (struct iio_buffer * buffer ,
1058
+ struct dma_buf_attachment * attach )
1059
+ {
1060
+ struct iio_dma_buffer_queue * queue = iio_buffer_to_queue (buffer );
1061
+ struct iio_dma_buffer_block * block ;
1062
+
1063
+ guard (mutex )(& queue -> lock );
1064
+
1065
+ /*
1066
+ * If the buffer is enabled and in fileio mode new blocks can't be
1067
+ * allocated.
1068
+ */
1069
+ if (queue -> fileio .enabled )
1070
+ return ERR_PTR (- EBUSY );
1071
+
1072
+ block = iio_dma_buffer_alloc_block (queue , attach -> dmabuf -> size , false);
1073
+ if (!block )
1074
+ return ERR_PTR (- ENOMEM );
1075
+
1076
+ /* Free memory that might be in use for fileio mode */
1077
+ iio_dma_buffer_fileio_free (queue );
1078
+
1079
+ return block ;
1080
+ }
1081
+ EXPORT_SYMBOL_GPL (iio_dma_buffer_attach_dmabuf );
1082
+
1083
+ void iio_dma_buffer_detach_dmabuf (struct iio_buffer * buffer ,
1084
+ struct iio_dma_buffer_block * block )
1085
+ {
1086
+ block -> state = IIO_BLOCK_STATE_DEAD ;
1087
+ iio_buffer_block_put_atomic (block );
1088
+ }
1089
+ EXPORT_SYMBOL_GPL (iio_dma_buffer_detach_dmabuf );
1090
+
1091
+ static int iio_dma_can_enqueue_block (struct iio_dma_buffer_block * block )
1092
+ {
1093
+ struct iio_dma_buffer_queue * queue = block -> queue ;
1094
+
1095
+ /* If in fileio mode buffers can't be enqueued. */
1096
+ if (queue -> fileio .enabled )
1097
+ return - EBUSY ;
1098
+
1099
+ switch (block -> state ) {
1100
+ case IIO_BLOCK_STATE_QUEUED :
1101
+ return - EPERM ;
1102
+ case IIO_BLOCK_STATE_ACTIVE :
1103
+ case IIO_BLOCK_STATE_DEAD :
1104
+ return - EBUSY ;
1105
+ case IIO_BLOCK_STATE_DONE :
1106
+ break ;
1107
+ }
1108
+
1109
+ return 0 ;
1110
+ }
1111
+
1112
+ int iio_dma_buffer_enqueue_dmabuf (struct iio_buffer * buffer ,
1113
+ struct iio_dma_buffer_block * block ,
1114
+ struct dma_fence * fence ,
1115
+ struct sg_table * sgt ,
1116
+ size_t size , bool cyclic )
1117
+ {
1118
+ struct iio_dma_buffer_queue * queue = iio_buffer_to_queue (buffer );
1119
+ bool cookie ;
1120
+ int ret ;
1121
+
1122
+ WARN_ON (!mutex_is_locked (& queue -> lock ));
1123
+
1124
+ cookie = dma_fence_begin_signalling ();
1125
+
1126
+ ret = iio_dma_can_enqueue_block (block );
1127
+ if (ret < 0 )
1128
+ goto out_end_signalling ;
1129
+
1130
+ block -> bytes_used = size ;
1131
+ block -> cyclic = cyclic ;
1132
+ block -> sg_table = sgt ;
1133
+ block -> fence = fence ;
1134
+
1135
+ iio_dma_buffer_enqueue (queue , block );
1136
+
1137
+ out_end_signalling :
1138
+ dma_fence_end_signalling (cookie );
1139
+
1140
+ return ret ;
1141
+ }
1142
+ EXPORT_SYMBOL_GPL (iio_dma_buffer_enqueue_dmabuf );
1143
+
1144
+ void iio_dma_buffer_lock_queue (struct iio_buffer * buffer )
1145
+ {
1146
+ struct iio_dma_buffer_queue * queue = iio_buffer_to_queue (buffer );
1147
+
1148
+ mutex_lock (& queue -> lock );
1149
+ }
1150
+ EXPORT_SYMBOL_GPL (iio_dma_buffer_lock_queue );
1151
+
1152
+ void iio_dma_buffer_unlock_queue (struct iio_buffer * buffer )
1153
+ {
1154
+ struct iio_dma_buffer_queue * queue = iio_buffer_to_queue (buffer );
1155
+
1156
+ mutex_unlock (& queue -> lock );
1157
+ }
1158
+ EXPORT_SYMBOL_GPL (iio_dma_buffer_unlock_queue );
968
1159
#endif
969
1160
970
1161
/**
0 commit comments