@@ -255,39 +255,31 @@ static const struct iio_dev_attr *iio_dmaengine_buffer_attrs[] = {
255
255
256
256
/**
257
257
* iio_dmaengine_buffer_alloc() - Allocate new buffer which uses DMAengine
258
- * @dev: DMA channel consumer device
259
- * @channel: DMA channel name, typically "rx".
258
+ * @chan: DMA channel.
260
259
*
261
260
* This allocates a new IIO buffer which internally uses the DMAengine framework
262
- * to perform its transfers. The parent device will be used to request the DMA
263
- * channel.
261
+ * to perform its transfers.
264
262
*
265
263
* Once done using the buffer iio_dmaengine_buffer_free() should be used to
266
264
* release it.
267
265
*/
268
- static struct iio_buffer * iio_dmaengine_buffer_alloc (struct device * dev ,
269
- const char * channel , const struct iio_dma_buffer_ops * ops , void * data )
266
+ static struct iio_buffer * iio_dmaengine_buffer_alloc (struct dma_chan * chan ,
267
+ const struct iio_dma_buffer_ops * ops ,
268
+ void * data )
270
269
{
271
270
struct dmaengine_buffer * dmaengine_buffer ;
272
271
unsigned int width , src_width , dest_width ;
273
272
struct dma_slave_caps caps ;
274
- struct dma_chan * chan ;
275
273
int ret ;
276
274
275
+ ret = dma_get_slave_caps (chan , & caps );
276
+ if (ret < 0 )
277
+ return ERR_PTR (ret );
278
+
277
279
dmaengine_buffer = kzalloc (sizeof (* dmaengine_buffer ), GFP_KERNEL );
278
280
if (!dmaengine_buffer )
279
281
return ERR_PTR (- ENOMEM );
280
282
281
- chan = dma_request_chan (dev , channel );
282
- if (IS_ERR (chan )) {
283
- ret = PTR_ERR (chan );
284
- goto err_free ;
285
- }
286
-
287
- ret = dma_get_slave_caps (chan , & caps );
288
- if (ret < 0 )
289
- goto err_free ;
290
-
291
283
/* Needs to be aligned to the maximum of the minimums */
292
284
if (caps .src_addr_widths )
293
285
src_width = __ffs (caps .src_addr_widths );
@@ -320,10 +312,6 @@ static struct iio_buffer *iio_dmaengine_buffer_alloc(struct device *dev,
320
312
dmaengine_buffer -> queue .buffer .access = & iio_dmaengine_buffer_ops ;
321
313
322
314
return & dmaengine_buffer -> queue .buffer ;
323
-
324
- err_free :
325
- kfree (dmaengine_buffer );
326
- return ERR_PTR (ret );
327
315
}
328
316
329
317
/**
@@ -332,17 +320,59 @@ static struct iio_buffer *iio_dmaengine_buffer_alloc(struct device *dev,
332
320
*
333
321
* Frees a buffer previously allocated with iio_dmaengine_buffer_alloc().
334
322
*/
335
- void iio_dmaengine_buffer_free (struct iio_buffer * buffer )
323
+ static void iio_dmaengine_buffer_free (struct iio_buffer * buffer )
336
324
{
337
325
struct dmaengine_buffer * dmaengine_buffer =
338
326
iio_buffer_to_dmaengine_buffer (buffer );
339
327
340
328
iio_dma_buffer_exit (& dmaengine_buffer -> queue );
341
- dma_release_channel (dmaengine_buffer -> chan );
342
-
343
329
iio_buffer_put (buffer );
344
330
}
345
- EXPORT_SYMBOL_NS_GPL (iio_dmaengine_buffer_free , IIO_DMAENGINE_BUFFER );
331
+
332
+ /**
333
+ * iio_dmaengine_buffer_teardown() - Releases DMA channel and frees buffer
334
+ * @buffer: Buffer to free
335
+ *
336
+ * Releases the DMA channel and frees the buffer previously setup with
337
+ * iio_dmaengine_buffer_setup_ext().
338
+ */
339
+ void iio_dmaengine_buffer_teardown (struct iio_buffer * buffer )
340
+ {
341
+ struct dmaengine_buffer * dmaengine_buffer =
342
+ iio_buffer_to_dmaengine_buffer (buffer );
343
+ struct dma_chan * chan = dmaengine_buffer -> chan ;
344
+
345
+ iio_dmaengine_buffer_free (buffer );
346
+ dma_release_channel (chan );
347
+ }
348
+ EXPORT_SYMBOL_NS_GPL (iio_dmaengine_buffer_teardown , IIO_DMAENGINE_BUFFER );
349
+
350
+ static struct iio_buffer
351
+ * __iio_dmaengine_buffer_setup_ext (struct iio_dev * indio_dev ,
352
+ struct dma_chan * chan ,
353
+ enum iio_buffer_direction dir ,
354
+ const struct iio_dma_buffer_ops * ops ,
355
+ void * data )
356
+ {
357
+ struct iio_buffer * buffer ;
358
+ int ret ;
359
+
360
+ buffer = iio_dmaengine_buffer_alloc (chan , ops , data );
361
+ if (IS_ERR (buffer ))
362
+ return ERR_CAST (buffer );
363
+
364
+ indio_dev -> modes |= INDIO_BUFFER_HARDWARE ;
365
+
366
+ buffer -> direction = dir ;
367
+
368
+ ret = iio_device_attach_buffer (indio_dev , buffer );
369
+ if (ret ) {
370
+ iio_dmaengine_buffer_free (buffer );
371
+ return ERR_PTR (ret );
372
+ }
373
+
374
+ return buffer ;
375
+ }
346
376
347
377
/**
348
378
* iio_dmaengine_buffer_setup_ext() - Setup a DMA buffer for an IIO device
@@ -356,30 +386,24 @@ EXPORT_SYMBOL_NS_GPL(iio_dmaengine_buffer_free, IIO_DMAENGINE_BUFFER);
356
386
* It also appends the INDIO_BUFFER_HARDWARE mode to the supported modes of the
357
387
* IIO device.
358
388
*
359
- * Once done using the buffer iio_dmaengine_buffer_free () should be used to
389
+ * Once done using the buffer iio_dmaengine_buffer_teardown () should be used to
360
390
* release it.
361
391
*/
362
392
struct iio_buffer * iio_dmaengine_buffer_setup_ext (struct device * dev ,
363
393
struct iio_dev * indio_dev ,
364
394
const char * channel ,
365
395
enum iio_buffer_direction dir )
366
396
{
397
+ struct dma_chan * chan ;
367
398
struct iio_buffer * buffer ;
368
- int ret ;
369
-
370
- buffer = iio_dmaengine_buffer_alloc (dev , channel , NULL , NULL );
371
- if (IS_ERR (buffer ))
372
- return ERR_CAST (buffer );
373
-
374
- indio_dev -> modes |= INDIO_BUFFER_HARDWARE ;
375
399
376
- buffer -> direction = dir ;
400
+ chan = dma_request_chan (dev , channel );
401
+ if (IS_ERR (chan ))
402
+ return ERR_CAST (chan );
377
403
378
- ret = iio_device_attach_buffer (indio_dev , buffer );
379
- if (ret ) {
380
- iio_dmaengine_buffer_free (buffer );
381
- return ERR_PTR (ret );
382
- }
404
+ buffer = __iio_dmaengine_buffer_setup_ext (indio_dev , chan , dir , NULL , NULL );
405
+ if (IS_ERR (buffer ))
406
+ dma_release_channel (chan );
383
407
384
408
return buffer ;
385
409
}
@@ -397,29 +421,23 @@ static struct iio_buffer
397
421
const struct iio_dma_buffer_ops * ops ,
398
422
void * data )
399
423
{
424
+ struct dma_chan * chan ;
400
425
struct iio_buffer * buffer ;
401
- int ret ;
402
-
403
- buffer = iio_dmaengine_buffer_alloc (dev , channel , ops , data );
404
- if (IS_ERR (buffer ))
405
- return ERR_CAST (buffer );
406
-
407
- indio_dev -> modes |= INDIO_BUFFER_HARDWARE ;
408
426
409
- buffer -> direction = dir ;
427
+ chan = dma_request_chan (dev , channel );
428
+ if (IS_ERR (chan ))
429
+ return ERR_CAST (chan );
410
430
411
- ret = iio_device_attach_buffer (indio_dev , buffer );
412
- if (ret ) {
413
- iio_dmaengine_buffer_free (buffer );
414
- return ERR_PTR (ret );
415
- }
431
+ buffer = __iio_dmaengine_buffer_setup_ext (indio_dev , chan , dir , ops , data );
432
+ if (IS_ERR (buffer ))
433
+ dma_release_channel (chan );
416
434
417
435
return buffer ;
418
436
}
419
437
420
- static void __devm_iio_dmaengine_buffer_free (void * buffer )
438
+ static void devm_iio_dmaengine_buffer_teardown (void * buffer )
421
439
{
422
- iio_dmaengine_buffer_free (buffer );
440
+ iio_dmaengine_buffer_teardown (buffer );
423
441
}
424
442
425
443
/**
@@ -445,7 +463,7 @@ int devm_iio_dmaengine_buffer_setup_ext(struct device *dev,
445
463
if (IS_ERR (buffer ))
446
464
return PTR_ERR (buffer );
447
465
448
- return devm_add_action_or_reset (dev , __devm_iio_dmaengine_buffer_free ,
466
+ return devm_add_action_or_reset (dev , devm_iio_dmaengine_buffer_teardown ,
449
467
buffer );
450
468
}
451
469
EXPORT_SYMBOL_NS_GPL (devm_iio_dmaengine_buffer_setup_ext , IIO_DMAENGINE_BUFFER );
@@ -468,7 +486,7 @@ int devm_iio_dmaengine_buffer_setup_with_ops(struct device *dev,
468
486
if (IS_ERR (buffer ))
469
487
return PTR_ERR (buffer );
470
488
471
- return devm_add_action_or_reset (dev , __devm_iio_dmaengine_buffer_free ,
489
+ return devm_add_action_or_reset (dev , devm_iio_dmaengine_buffer_teardown ,
472
490
buffer );
473
491
}
474
492
EXPORT_SYMBOL_NS_GPL (devm_iio_dmaengine_buffer_setup_with_ops , IIO_DMAENGINE_BUFFER );
0 commit comments