Skip to content

Commit 60d1a5d

Browse files
dlechnunojsa
authored andcommitted
iio: buffer-dmaengine: split requesting DMA channel from allocating buffer
Refactor the IIO dmaengine buffer code to split requesting the DMA channel from allocating the buffer. We want to be able to add a new function where the IIO device driver manages the DMA channel, so these two actions need to be separate. To do this, calling dma_request_chan() is moved from iio_dmaengine_buffer_alloc() to iio_dmaengine_buffer_setup_ext(). A new __iio_dmaengine_buffer_setup_ext() helper function is added to simplify error unwinding and will also be used by a new function in a later patch. iio_dmaengine_buffer_free() now only frees the buffer and does not release the DMA channel. A new iio_dmaengine_buffer_teardown() function is added to unwind everything done in iio_dmaengine_buffer_setup_ext(). This keeps things more symmetrical with obvious pairs alloc/free and setup/teardown. Calling dma_get_slave_caps() in iio_dmaengine_buffer_alloc() is moved so that we can avoid any gotos for error unwinding. [ADI tree: had to adjust for devm_iio_dmaengine_buffer_setup_with_ops] Reviewed-by: Nuno Sa <nuno.sa@analog.com> Signed-off-by: David Lechner <dlechner@baylibre.com> Link: https://patch.msgid.link/20250207-dlech-mainline-spi-engine-offload-2-v8-8-e48a489be48c@baylibre.com Signed-off-by: Jonathan Cameron <Jonathan.Cameron@huawei.com>
1 parent e6ce25b commit 60d1a5d

File tree

3 files changed

+76
-58
lines changed

3 files changed

+76
-58
lines changed

drivers/iio/adc/adi-axi-adc.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -135,7 +135,7 @@ static struct iio_buffer *axi_adc_request_buffer(struct iio_backend *back,
135135
static void axi_adc_free_buffer(struct iio_backend *back,
136136
struct iio_buffer *buffer)
137137
{
138-
iio_dmaengine_buffer_free(buffer);
138+
iio_dmaengine_buffer_teardown(buffer);
139139
}
140140

141141
static const struct regmap_config axi_adc_regmap_config = {

drivers/iio/buffer/industrialio-buffer-dmaengine.c

Lines changed: 74 additions & 56 deletions
Original file line numberDiff line numberDiff line change
@@ -255,39 +255,31 @@ static const struct iio_dev_attr *iio_dmaengine_buffer_attrs[] = {
255255

256256
/**
257257
* iio_dmaengine_buffer_alloc() - Allocate new buffer which uses DMAengine
258-
* @dev: DMA channel consumer device
259-
* @channel: DMA channel name, typically "rx".
258+
* @chan: DMA channel.
260259
*
261260
* This allocates a new IIO buffer which internally uses the DMAengine framework
262-
* to perform its transfers. The parent device will be used to request the DMA
263-
* channel.
261+
* to perform its transfers.
264262
*
265263
* Once done using the buffer iio_dmaengine_buffer_free() should be used to
266264
* release it.
267265
*/
268-
static struct iio_buffer *iio_dmaengine_buffer_alloc(struct device *dev,
269-
const char *channel, const struct iio_dma_buffer_ops *ops, void *data)
266+
static struct iio_buffer *iio_dmaengine_buffer_alloc(struct dma_chan *chan,
267+
const struct iio_dma_buffer_ops *ops,
268+
void *data)
270269
{
271270
struct dmaengine_buffer *dmaengine_buffer;
272271
unsigned int width, src_width, dest_width;
273272
struct dma_slave_caps caps;
274-
struct dma_chan *chan;
275273
int ret;
276274

275+
ret = dma_get_slave_caps(chan, &caps);
276+
if (ret < 0)
277+
return ERR_PTR(ret);
278+
277279
dmaengine_buffer = kzalloc(sizeof(*dmaengine_buffer), GFP_KERNEL);
278280
if (!dmaengine_buffer)
279281
return ERR_PTR(-ENOMEM);
280282

281-
chan = dma_request_chan(dev, channel);
282-
if (IS_ERR(chan)) {
283-
ret = PTR_ERR(chan);
284-
goto err_free;
285-
}
286-
287-
ret = dma_get_slave_caps(chan, &caps);
288-
if (ret < 0)
289-
goto err_free;
290-
291283
/* Needs to be aligned to the maximum of the minimums */
292284
if (caps.src_addr_widths)
293285
src_width = __ffs(caps.src_addr_widths);
@@ -320,10 +312,6 @@ static struct iio_buffer *iio_dmaengine_buffer_alloc(struct device *dev,
320312
dmaengine_buffer->queue.buffer.access = &iio_dmaengine_buffer_ops;
321313

322314
return &dmaengine_buffer->queue.buffer;
323-
324-
err_free:
325-
kfree(dmaengine_buffer);
326-
return ERR_PTR(ret);
327315
}
328316

329317
/**
@@ -332,17 +320,59 @@ static struct iio_buffer *iio_dmaengine_buffer_alloc(struct device *dev,
332320
*
333321
* Frees a buffer previously allocated with iio_dmaengine_buffer_alloc().
334322
*/
335-
void iio_dmaengine_buffer_free(struct iio_buffer *buffer)
323+
static void iio_dmaengine_buffer_free(struct iio_buffer *buffer)
336324
{
337325
struct dmaengine_buffer *dmaengine_buffer =
338326
iio_buffer_to_dmaengine_buffer(buffer);
339327

340328
iio_dma_buffer_exit(&dmaengine_buffer->queue);
341-
dma_release_channel(dmaengine_buffer->chan);
342-
343329
iio_buffer_put(buffer);
344330
}
345-
EXPORT_SYMBOL_NS_GPL(iio_dmaengine_buffer_free, IIO_DMAENGINE_BUFFER);
331+
332+
/**
333+
* iio_dmaengine_buffer_teardown() - Releases DMA channel and frees buffer
334+
* @buffer: Buffer to free
335+
*
336+
* Releases the DMA channel and frees the buffer previously setup with
337+
* iio_dmaengine_buffer_setup_ext().
338+
*/
339+
void iio_dmaengine_buffer_teardown(struct iio_buffer *buffer)
340+
{
341+
struct dmaengine_buffer *dmaengine_buffer =
342+
iio_buffer_to_dmaengine_buffer(buffer);
343+
struct dma_chan *chan = dmaengine_buffer->chan;
344+
345+
iio_dmaengine_buffer_free(buffer);
346+
dma_release_channel(chan);
347+
}
348+
EXPORT_SYMBOL_NS_GPL(iio_dmaengine_buffer_teardown, IIO_DMAENGINE_BUFFER);
349+
350+
static struct iio_buffer
351+
*__iio_dmaengine_buffer_setup_ext(struct iio_dev *indio_dev,
352+
struct dma_chan *chan,
353+
enum iio_buffer_direction dir,
354+
const struct iio_dma_buffer_ops *ops,
355+
void *data)
356+
{
357+
struct iio_buffer *buffer;
358+
int ret;
359+
360+
buffer = iio_dmaengine_buffer_alloc(chan, ops, data);
361+
if (IS_ERR(buffer))
362+
return ERR_CAST(buffer);
363+
364+
indio_dev->modes |= INDIO_BUFFER_HARDWARE;
365+
366+
buffer->direction = dir;
367+
368+
ret = iio_device_attach_buffer(indio_dev, buffer);
369+
if (ret) {
370+
iio_dmaengine_buffer_free(buffer);
371+
return ERR_PTR(ret);
372+
}
373+
374+
return buffer;
375+
}
346376

347377
/**
348378
* iio_dmaengine_buffer_setup_ext() - Setup a DMA buffer for an IIO device
@@ -356,30 +386,24 @@ EXPORT_SYMBOL_NS_GPL(iio_dmaengine_buffer_free, IIO_DMAENGINE_BUFFER);
356386
* It also appends the INDIO_BUFFER_HARDWARE mode to the supported modes of the
357387
* IIO device.
358388
*
359-
* Once done using the buffer iio_dmaengine_buffer_free() should be used to
389+
* Once done using the buffer iio_dmaengine_buffer_teardown() should be used to
360390
* release it.
361391
*/
362392
struct iio_buffer *iio_dmaengine_buffer_setup_ext(struct device *dev,
363393
struct iio_dev *indio_dev,
364394
const char *channel,
365395
enum iio_buffer_direction dir)
366396
{
397+
struct dma_chan *chan;
367398
struct iio_buffer *buffer;
368-
int ret;
369-
370-
buffer = iio_dmaengine_buffer_alloc(dev, channel, NULL, NULL);
371-
if (IS_ERR(buffer))
372-
return ERR_CAST(buffer);
373-
374-
indio_dev->modes |= INDIO_BUFFER_HARDWARE;
375399

376-
buffer->direction = dir;
400+
chan = dma_request_chan(dev, channel);
401+
if (IS_ERR(chan))
402+
return ERR_CAST(chan);
377403

378-
ret = iio_device_attach_buffer(indio_dev, buffer);
379-
if (ret) {
380-
iio_dmaengine_buffer_free(buffer);
381-
return ERR_PTR(ret);
382-
}
404+
buffer = __iio_dmaengine_buffer_setup_ext(indio_dev, chan, dir, NULL, NULL);
405+
if (IS_ERR(buffer))
406+
dma_release_channel(chan);
383407

384408
return buffer;
385409
}
@@ -397,29 +421,23 @@ static struct iio_buffer
397421
const struct iio_dma_buffer_ops *ops,
398422
void *data)
399423
{
424+
struct dma_chan *chan;
400425
struct iio_buffer *buffer;
401-
int ret;
402-
403-
buffer = iio_dmaengine_buffer_alloc(dev, channel, ops, data);
404-
if (IS_ERR(buffer))
405-
return ERR_CAST(buffer);
406-
407-
indio_dev->modes |= INDIO_BUFFER_HARDWARE;
408426

409-
buffer->direction = dir;
427+
chan = dma_request_chan(dev, channel);
428+
if (IS_ERR(chan))
429+
return ERR_CAST(chan);
410430

411-
ret = iio_device_attach_buffer(indio_dev, buffer);
412-
if (ret) {
413-
iio_dmaengine_buffer_free(buffer);
414-
return ERR_PTR(ret);
415-
}
431+
buffer = __iio_dmaengine_buffer_setup_ext(indio_dev, chan, dir, ops, data);
432+
if (IS_ERR(buffer))
433+
dma_release_channel(chan);
416434

417435
return buffer;
418436
}
419437

420-
static void __devm_iio_dmaengine_buffer_free(void *buffer)
438+
static void devm_iio_dmaengine_buffer_teardown(void *buffer)
421439
{
422-
iio_dmaengine_buffer_free(buffer);
440+
iio_dmaengine_buffer_teardown(buffer);
423441
}
424442

425443
/**
@@ -445,7 +463,7 @@ int devm_iio_dmaengine_buffer_setup_ext(struct device *dev,
445463
if (IS_ERR(buffer))
446464
return PTR_ERR(buffer);
447465

448-
return devm_add_action_or_reset(dev, __devm_iio_dmaengine_buffer_free,
466+
return devm_add_action_or_reset(dev, devm_iio_dmaengine_buffer_teardown,
449467
buffer);
450468
}
451469
EXPORT_SYMBOL_NS_GPL(devm_iio_dmaengine_buffer_setup_ext, IIO_DMAENGINE_BUFFER);
@@ -468,7 +486,7 @@ int devm_iio_dmaengine_buffer_setup_with_ops(struct device *dev,
468486
if (IS_ERR(buffer))
469487
return PTR_ERR(buffer);
470488

471-
return devm_add_action_or_reset(dev, __devm_iio_dmaengine_buffer_free,
489+
return devm_add_action_or_reset(dev, devm_iio_dmaengine_buffer_teardown,
472490
buffer);
473491
}
474492
EXPORT_SYMBOL_NS_GPL(devm_iio_dmaengine_buffer_setup_with_ops, IIO_DMAENGINE_BUFFER);

include/linux/iio/buffer-dmaengine.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -20,7 +20,7 @@ int iio_dmaengine_buffer_submit_block(struct iio_dma_buffer_queue *queue,
2020
struct iio_dma_buffer_block *block);
2121
void iio_dmaengine_buffer_abort(struct iio_dma_buffer_queue *queue);
2222

23-
void iio_dmaengine_buffer_free(struct iio_buffer *buffer);
23+
void iio_dmaengine_buffer_teardown(struct iio_buffer *buffer);
2424
struct iio_buffer *iio_dmaengine_buffer_setup_ext(struct device *dev,
2525
struct iio_dev *indio_dev,
2626
const char *channel,

0 commit comments

Comments
 (0)