Skip to content

Commit 2d63a48

Browse files
pcercueinunojsa
authored andcommitted
iio: buffer-dmaengine: Support new DMABUF based userspace API
Use the functions provided by the buffer-dma core to implement the DMABUF userspace API in the buffer-dmaengine IIO buffer implementation. Since we want to be able to transfer an arbitrary number of bytes and not necesarily the full DMABUF, the associated scatterlist is converted to an array of DMA addresses + lengths, which is then passed to dmaengine_prep_slave_dma_array(). Signed-off-by: Paul Cercueil <paul@crapouillou.net> Co-developed-by: Nuno Sa <nuno.sa@analog.com> Signed-off-by: Nuno Sa <nuno.sa@analog.com> Link: https://patch.msgid.link/20240620122726.41232-6-paul@crapouillou.net Signed-off-by: Jonathan Cameron <Jonathan.Cameron@huawei.com>
1 parent 1df339e commit 2d63a48

File tree

1 file changed

+32
-20
lines changed

1 file changed

+32
-20
lines changed

drivers/iio/buffer/industrialio-buffer-dmaengine.c

Lines changed: 32 additions & 20 deletions
Original file line numberDiff line numberDiff line change
@@ -65,13 +65,21 @@ static void iio_dmaengine_buffer_block_done(void *data,
6565
int iio_dmaengine_buffer_submit_block(struct iio_dma_buffer_queue *queue,
6666
struct iio_dma_buffer_block *block)
6767
{
68-
struct dmaengine_buffer *dmaengine_buffer;
68+
struct dmaengine_buffer *dmaengine_buffer =
69+
iio_buffer_to_dmaengine_buffer(&block->queue->buffer);
6970
struct dma_async_tx_descriptor *desc;
7071
enum dma_transfer_direction dma_dir;
72+
#ifndef CONFIG_IIO_DMA_BUF_MMAP_LEGACY
73+
struct scatterlist *sgl;
74+
struct dma_vec *vecs;
75+
#endif
7176
size_t max_size;
7277
dma_cookie_t cookie;
73-
74-
dmaengine_buffer = iio_buffer_to_dmaengine_buffer(&block->queue->buffer);
78+
#ifndef CONFIG_IIO_DMA_BUF_MMAP_LEGACY
79+
size_t len_total;
80+
unsigned int i;
81+
int nents;
82+
#endif
7583

7684
#ifdef CONFIG_IIO_DMA_BUF_MMAP_LEGACY
7785
if (queue->buffer.direction == IIO_BUFFER_DIRECTION_IN) {
@@ -101,20 +109,15 @@ int iio_dmaengine_buffer_submit_block(struct iio_dma_buffer_queue *queue,
101109
DMA_PREP_INTERRUPT);
102110
if (!desc)
103111
return -ENOMEM;
104-
105-
desc->callback_result = iio_dmaengine_buffer_block_done;
106-
desc->callback_param = block;
107112
}
108113
#else
109114
max_size = min(block->size, dmaengine_buffer->max_size);
110115
max_size = round_down(max_size, dmaengine_buffer->align);
111116

112-
if (queue->buffer.direction == IIO_BUFFER_DIRECTION_IN) {
113-
block->bytes_used = max_size;
117+
if (queue->buffer.direction == IIO_BUFFER_DIRECTION_IN)
114118
dma_dir = DMA_DEV_TO_MEM;
115-
} else {
119+
else
116120
dma_dir = DMA_MEM_TO_DEV;
117-
}
118121

119122
if (block->sg_table) {
120123
sgl = block->sg_table->sgl;
@@ -145,23 +148,25 @@ int iio_dmaengine_buffer_submit_block(struct iio_dma_buffer_queue *queue,
145148
vecs, nents, dma_dir,
146149
DMA_PREP_INTERRUPT);
147150
kfree(vecs);
148-
>>>>>>> 7232b3a0f5ea (s)
149151
} else {
150-
dma_dir = DMA_MEM_TO_DEV;
151-
}
152+
if (queue->buffer.direction == IIO_BUFFER_DIRECTION_IN)
153+
block->bytes_used = max_size;
152154

153-
if (!block->bytes_used || block->bytes_used > max_size)
154-
return -EINVAL;
155+
if (!block->bytes_used || block->bytes_used > max_size)
156+
return -EINVAL;
155157

156-
desc = dmaengine_prep_slave_single(dmaengine_buffer->chan,
157-
block->phys_addr, block->bytes_used, dma_dir,
158-
DMA_PREP_INTERRUPT);
158+
desc = dmaengine_prep_slave_single(dmaengine_buffer->chan,
159+
block->phys_addr,
160+
block->bytes_used,
161+
dma_dir,
162+
DMA_PREP_INTERRUPT);
163+
}
159164
if (!desc)
160165
return -ENOMEM;
161-
166+
#endif
162167
desc->callback_result = iio_dmaengine_buffer_block_done;
163168
desc->callback_param = block;
164-
#endif
169+
165170
cookie = dmaengine_submit(desc);
166171
if (dma_submit_error(cookie))
167172
return dma_submit_error(cookie);
@@ -213,6 +218,13 @@ static const struct iio_buffer_access_funcs iio_dmaengine_buffer_ops = {
213218
.enqueue_block = iio_dma_buffer_enqueue_block,
214219
.dequeue_block = iio_dma_buffer_dequeue_block,
215220
.mmap = iio_dma_buffer_mmap,
221+
#else
222+
.enqueue_dmabuf = iio_dma_buffer_enqueue_dmabuf,
223+
.attach_dmabuf = iio_dma_buffer_attach_dmabuf,
224+
.detach_dmabuf = iio_dma_buffer_detach_dmabuf,
225+
226+
.lock_queue = iio_dma_buffer_lock_queue,
227+
.unlock_queue = iio_dma_buffer_unlock_queue,
216228
#endif
217229
.modes = INDIO_BUFFER_HARDWARE,
218230
.flags = INDIO_BUFFER_FLAG_FIXED_WATERMARK,

0 commit comments

Comments
 (0)