Skip to content

Commit 1df339e

Browse files
pcercueinunojsa
authored andcommitted
iio: buffer-dma: Enable support for DMABUFs
Implement iio_dma_buffer_attach_dmabuf(), iio_dma_buffer_detach_dmabuf() and iio_dma_buffer_transfer_dmabuf(), which can then be used by the IIO DMA buffer implementations. Signed-off-by: Paul Cercueil <paul@crapouillou.net> Co-developed-by: Nuno Sa <nuno.sa@analog.com> Signed-off-by: Nuno Sa <nuno.sa@analog.com> Link: https://patch.msgid.link/20240620122726.41232-5-paul@crapouillou.net Signed-off-by: Jonathan Cameron <Jonathan.Cameron@huawei.com>
1 parent bfd65c7 commit 1df339e

File tree

2 files changed

+240
-17
lines changed

2 files changed

+240
-17
lines changed

drivers/iio/buffer/industrialio-buffer-dma.c

Lines changed: 208 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -4,6 +4,8 @@
44
* Author: Lars-Peter Clausen <lars@metafoo.de>
55
*/
66

7+
#include <linux/atomic.h>
8+
#include <linux/cleanup.h>
79
#include <linux/slab.h>
810
#include <linux/kernel.h>
911
#include <linux/module.h>
@@ -15,6 +17,8 @@
1517
#include <linux/iio/buffer_impl.h>
1618
#include <linux/iio/iio.h>
1719
#include <linux/iio/buffer-dma.h>
20+
#include <linux/dma-buf.h>
21+
#include <linux/dma-fence.h>
1822
#include <linux/dma-mapping.h>
1923
#include <linux/sizes.h>
2024

@@ -100,17 +104,22 @@ static void iio_buffer_block_release(struct kref *kref)
100104
{
101105
struct iio_dma_buffer_block *block = container_of(kref,
102106
struct iio_dma_buffer_block, kref);
107+
struct iio_dma_buffer_queue *queue = block->queue;
103108

104-
WARN_ON(block->state != IIO_BLOCK_STATE_DEAD);
109+
WARN_ON(block->fileio && block->state != IIO_BLOCK_STATE_DEAD);
105110

106111
#ifdef CONFIG_IIO_DMA_BUF_MMAP_LEGACY
107112
dma_free_coherent(block->queue->dev, PAGE_ALIGN(block->block.size),
108113
block->vaddr, block->phys_addr);
109114
#else
110-
dma_free_coherent(block->queue->dev, PAGE_ALIGN(block->size),
111-
block->vaddr, block->phys_addr);
115+
if (block->fileio) {
116+
dma_free_coherent(queue->dev, PAGE_ALIGN(block->size),
117+
block->vaddr, block->phys_addr);
118+
} else {
119+
atomic_dec(&queue->num_dmabufs);
120+
}
112121
#endif
113-
iio_buffer_put(&block->queue->buffer);
122+
iio_buffer_put(&queue->buffer);
114123
kfree(block);
115124
}
116125

@@ -173,25 +182,28 @@ static struct iio_dma_buffer_queue *iio_buffer_to_queue(struct iio_buffer *buf)
173182
}
174183

175184
static struct iio_dma_buffer_block *iio_dma_buffer_alloc_block(
176-
struct iio_dma_buffer_queue *queue, size_t size)
185+
struct iio_dma_buffer_queue *queue, size_t size, bool fileio)
177186
{
178187
struct iio_dma_buffer_block *block;
179188

180189
block = kzalloc(sizeof(*block), GFP_KERNEL);
181190
if (!block)
182191
return NULL;
183192

184-
block->vaddr = dma_alloc_coherent(queue->dev, PAGE_ALIGN(size),
185-
&block->phys_addr, GFP_KERNEL);
186-
if (!block->vaddr) {
187-
kfree(block);
188-
return NULL;
193+
if (fileio) {
194+
block->vaddr = dma_alloc_coherent(queue->dev, PAGE_ALIGN(size),
195+
&block->phys_addr, GFP_KERNEL);
196+
if (!block->vaddr) {
197+
kfree(block);
198+
return NULL;
199+
}
189200
}
190201

191202
#ifdef CONFIG_IIO_DMA_BUF_MMAP_LEGACY
192203
block->block.size = size;
193204
block->state = IIO_BLOCK_STATE_DEQUEUED;
194205
#else
206+
block->fileio = fileio;
195207
block->size = size;
196208
block->state = IIO_BLOCK_STATE_DONE;
197209
#endif
@@ -201,6 +213,9 @@ static struct iio_dma_buffer_block *iio_dma_buffer_alloc_block(
201213

202214
iio_buffer_get(&queue->buffer);
203215

216+
if (!fileio)
217+
atomic_inc(&queue->num_dmabufs);
218+
204219
return block;
205220
}
206221

@@ -239,13 +254,23 @@ void iio_dma_buffer_block_done(struct iio_dma_buffer_block *block)
239254
{
240255
struct iio_dma_buffer_queue *queue = block->queue;
241256
unsigned long flags;
257+
#ifndef CONFIG_IIO_DMA_BUF_MMAP_LEGACY
258+
bool cookie;
242259

260+
cookie = dma_fence_begin_signalling();
261+
#endif
243262
spin_lock_irqsave(&queue->list_lock, flags);
244263
_iio_dma_buffer_block_done(block);
245264
spin_unlock_irqrestore(&queue->list_lock, flags);
246-
265+
#ifndef CONFIG_IIO_DMA_BUF_MMAP_LEGACY
266+
if (!block->fileio)
267+
iio_buffer_signal_dmabuf_done(block->fence, 0);
268+
#endif
247269
iio_buffer_block_put_atomic(block);
248270
iio_dma_buffer_queue_wake(queue);
271+
#ifndef CONFIG_IIO_DMA_BUF_MMAP_LEGACY
272+
dma_fence_end_signalling(cookie);
273+
#endif
249274
}
250275
EXPORT_SYMBOL_GPL(iio_dma_buffer_block_done);
251276

@@ -264,7 +289,11 @@ void iio_dma_buffer_block_list_abort(struct iio_dma_buffer_queue *queue,
264289
{
265290
struct iio_dma_buffer_block *block, *_block;
266291
unsigned long flags;
292+
#ifndef CONFIG_IIO_DMA_BUF_MMAP_LEGACY
293+
bool cookie;
267294

295+
cookie = dma_fence_begin_signalling();
296+
#endif
268297
spin_lock_irqsave(&queue->list_lock, flags);
269298
list_for_each_entry_safe(block, _block, list, head) {
270299
list_del(&block->head);
@@ -274,11 +303,21 @@ void iio_dma_buffer_block_list_abort(struct iio_dma_buffer_queue *queue,
274303
block->bytes_used = 0;
275304
#endif
276305
_iio_dma_buffer_block_done(block);
306+
#ifndef CONFIG_IIO_DMA_BUF_MMAP_LEGACY
307+
if (!block->fileio)
308+
iio_buffer_signal_dmabuf_done(block->fence, -EINTR);
309+
#endif
277310
iio_buffer_block_put_atomic(block);
278311
}
279312
spin_unlock_irqrestore(&queue->list_lock, flags);
280313

314+
if (queue->fileio.enabled)
315+
queue->fileio.enabled = false;
316+
281317
iio_dma_buffer_queue_wake(queue);
318+
#ifndef CONFIG_IIO_DMA_BUF_MMAP_LEGACY
319+
dma_fence_end_signalling(cookie);
320+
#endif
282321
}
283322
EXPORT_SYMBOL_GPL(iio_dma_buffer_block_list_abort);
284323

@@ -298,6 +337,18 @@ static bool iio_dma_block_reusable(struct iio_dma_buffer_block *block)
298337
}
299338
}
300339

340+
#ifndef CONFIG_IIO_DMA_BUF_MMAP_LEGACY
341+
static bool iio_dma_buffer_can_use_fileio(struct iio_dma_buffer_queue *queue)
342+
{
343+
/*
344+
* Note that queue->num_dmabufs cannot increase while the queue is
345+
* locked, it can only decrease, so it does not race against
346+
* iio_dma_buffer_alloc_block().
347+
*/
348+
return queue->fileio.enabled || !atomic_read(&queue->num_dmabufs);
349+
}
350+
#endif
351+
301352
/**
302353
* iio_dma_buffer_request_update() - DMA buffer request_update callback
303354
* @buffer: The buffer which to request an update
@@ -327,8 +378,13 @@ int iio_dma_buffer_request_update(struct iio_buffer *buffer)
327378
#ifdef CONFIG_IIO_DMA_BUF_MMAP_LEGACY
328379
if (queue->num_blocks)
329380
goto out_unlock;
330-
#endif
381+
#else
382+
queue->fileio.enabled = iio_dma_buffer_can_use_fileio(queue);
331383

384+
/* If DMABUFs were created, disable fileio interface */
385+
if (!queue->fileio.enabled)
386+
goto out_unlock;
387+
#endif
332388
/* Allocations are page aligned */
333389
if (PAGE_ALIGN(queue->fileio.block_size) == PAGE_ALIGN(size))
334390
try_reuse = true;
@@ -369,7 +425,7 @@ int iio_dma_buffer_request_update(struct iio_buffer *buffer)
369425
}
370426

371427
if (!block) {
372-
block = iio_dma_buffer_alloc_block(queue, size);
428+
block = iio_dma_buffer_alloc_block(queue, size, true);
373429
if (!block) {
374430
ret = -ENOMEM;
375431
goto out_unlock;
@@ -442,8 +498,14 @@ static void iio_dma_buffer_submit_block(struct iio_dma_buffer_queue *queue,
442498

443499
block->state = IIO_BLOCK_STATE_ACTIVE;
444500
iio_buffer_block_get(block);
501+
445502
ret = queue->ops->submit(queue, block);
446503
if (ret) {
504+
#ifndef CONFIG_IIO_DMA_BUF_MMAP_LEGACY
505+
if (!block->fileio)
506+
iio_buffer_signal_dmabuf_done(block->fence, ret);
507+
#endif
508+
447509
/*
448510
* This is a bit of a problem and there is not much we can do
449511
* other then wait for the buffer to be disabled and re-enabled
@@ -459,13 +521,40 @@ static void iio_dma_buffer_submit_block(struct iio_dma_buffer_queue *queue,
459521
}
460522

461523
#ifdef CONFIG_IIO_DMA_BUF_MMAP_LEGACY
524+
static struct iio_dma_buffer_block
525+
*iio_dma_buffer_mmap_alloc_block(struct iio_dma_buffer_queue *queue, size_t size)
526+
{
527+
struct iio_dma_buffer_block *block;
528+
529+
block = kzalloc(sizeof(*block), GFP_KERNEL);
530+
if (!block)
531+
return NULL;
532+
533+
block->vaddr = dma_alloc_coherent(queue->dev, PAGE_ALIGN(size),
534+
&block->phys_addr, GFP_KERNEL);
535+
if (!block->vaddr) {
536+
kfree(block);
537+
return NULL;
538+
}
539+
540+
block->block.size = size;
541+
block->state = IIO_BLOCK_STATE_DEQUEUED;
542+
block->queue = queue;
543+
INIT_LIST_HEAD(&block->head);
544+
kref_init(&block->kref);
545+
546+
iio_buffer_get(&queue->buffer);
547+
548+
return block;
549+
}
550+
462551
static int iio_dma_buffer_fileio_alloc(struct iio_dma_buffer_queue *queue,
463552
struct iio_dev *indio_dev)
464553
{
465554
size_t size = queue->buffer.bytes_per_datum * queue->buffer.length;
466555
struct iio_dma_buffer_block *block;
467556

468-
block = iio_dma_buffer_alloc_block(queue, size);
557+
block = iio_dma_buffer_mmap_alloc_block(queue, size);
469558
if (!block)
470559
return -ENOMEM;
471560

@@ -761,7 +850,7 @@ int iio_dma_buffer_alloc_blocks(struct iio_buffer *buffer,
761850
}
762851

763852
for (i = queue->num_blocks; i < num_blocks; i++) {
764-
blocks[i] = iio_dma_buffer_alloc_block(queue, req->size);
853+
blocks[i] = iio_dma_buffer_mmap_alloc_block(queue, req->size);
765854
if (!blocks[i])
766855
break;
767856
blocks[i]->block.id = i;
@@ -809,7 +898,6 @@ int iio_dma_buffer_free_blocks(struct iio_buffer *buffer)
809898
}
810899
EXPORT_SYMBOL_GPL(iio_dma_buffer_free_blocks);
811900

812-
813901
int iio_dma_buffer_query_block(struct iio_buffer *buffer,
814902
struct iio_buffer_block *block)
815903
{
@@ -912,7 +1000,6 @@ int iio_dma_buffer_dequeue_block(struct iio_buffer *buffer,
9121000
}
9131001
EXPORT_SYMBOL_GPL(iio_dma_buffer_dequeue_block);
9141002

915-
9161003
static void iio_dma_buffer_mmap_open(struct vm_area_struct *area)
9171004
{
9181005
struct iio_dma_buffer_block *block = area->vm_private_data;
@@ -965,6 +1052,110 @@ int iio_dma_buffer_mmap(struct iio_buffer *buffer,
9651052
block->phys_addr, vma->vm_end - vma->vm_start);
9661053
}
9671054
EXPORT_SYMBOL_GPL(iio_dma_buffer_mmap);
1055+
#else
1056+
struct iio_dma_buffer_block *
1057+
iio_dma_buffer_attach_dmabuf(struct iio_buffer *buffer,
1058+
struct dma_buf_attachment *attach)
1059+
{
1060+
struct iio_dma_buffer_queue *queue = iio_buffer_to_queue(buffer);
1061+
struct iio_dma_buffer_block *block;
1062+
1063+
guard(mutex)(&queue->lock);
1064+
1065+
/*
1066+
* If the buffer is enabled and in fileio mode new blocks can't be
1067+
* allocated.
1068+
*/
1069+
if (queue->fileio.enabled)
1070+
return ERR_PTR(-EBUSY);
1071+
1072+
block = iio_dma_buffer_alloc_block(queue, attach->dmabuf->size, false);
1073+
if (!block)
1074+
return ERR_PTR(-ENOMEM);
1075+
1076+
/* Free memory that might be in use for fileio mode */
1077+
iio_dma_buffer_fileio_free(queue);
1078+
1079+
return block;
1080+
}
1081+
EXPORT_SYMBOL_GPL(iio_dma_buffer_attach_dmabuf);
1082+
1083+
void iio_dma_buffer_detach_dmabuf(struct iio_buffer *buffer,
1084+
struct iio_dma_buffer_block *block)
1085+
{
1086+
block->state = IIO_BLOCK_STATE_DEAD;
1087+
iio_buffer_block_put_atomic(block);
1088+
}
1089+
EXPORT_SYMBOL_GPL(iio_dma_buffer_detach_dmabuf);
1090+
1091+
static int iio_dma_can_enqueue_block(struct iio_dma_buffer_block *block)
1092+
{
1093+
struct iio_dma_buffer_queue *queue = block->queue;
1094+
1095+
/* If in fileio mode buffers can't be enqueued. */
1096+
if (queue->fileio.enabled)
1097+
return -EBUSY;
1098+
1099+
switch (block->state) {
1100+
case IIO_BLOCK_STATE_QUEUED:
1101+
return -EPERM;
1102+
case IIO_BLOCK_STATE_ACTIVE:
1103+
case IIO_BLOCK_STATE_DEAD:
1104+
return -EBUSY;
1105+
case IIO_BLOCK_STATE_DONE:
1106+
break;
1107+
}
1108+
1109+
return 0;
1110+
}
1111+
1112+
int iio_dma_buffer_enqueue_dmabuf(struct iio_buffer *buffer,
1113+
struct iio_dma_buffer_block *block,
1114+
struct dma_fence *fence,
1115+
struct sg_table *sgt,
1116+
size_t size, bool cyclic)
1117+
{
1118+
struct iio_dma_buffer_queue *queue = iio_buffer_to_queue(buffer);
1119+
bool cookie;
1120+
int ret;
1121+
1122+
WARN_ON(!mutex_is_locked(&queue->lock));
1123+
1124+
cookie = dma_fence_begin_signalling();
1125+
1126+
ret = iio_dma_can_enqueue_block(block);
1127+
if (ret < 0)
1128+
goto out_end_signalling;
1129+
1130+
block->bytes_used = size;
1131+
block->cyclic = cyclic;
1132+
block->sg_table = sgt;
1133+
block->fence = fence;
1134+
1135+
iio_dma_buffer_enqueue(queue, block);
1136+
1137+
out_end_signalling:
1138+
dma_fence_end_signalling(cookie);
1139+
1140+
return ret;
1141+
}
1142+
EXPORT_SYMBOL_GPL(iio_dma_buffer_enqueue_dmabuf);
1143+
1144+
void iio_dma_buffer_lock_queue(struct iio_buffer *buffer)
1145+
{
1146+
struct iio_dma_buffer_queue *queue = iio_buffer_to_queue(buffer);
1147+
1148+
mutex_lock(&queue->lock);
1149+
}
1150+
EXPORT_SYMBOL_GPL(iio_dma_buffer_lock_queue);
1151+
1152+
void iio_dma_buffer_unlock_queue(struct iio_buffer *buffer)
1153+
{
1154+
struct iio_dma_buffer_queue *queue = iio_buffer_to_queue(buffer);
1155+
1156+
mutex_unlock(&queue->lock);
1157+
}
1158+
EXPORT_SYMBOL_GPL(iio_dma_buffer_unlock_queue);
9681159
#endif
9691160

9701161
/**

0 commit comments

Comments
 (0)