@@ -86,9 +86,7 @@ struct spi_mcux_data {
86
86
struct spi_dma_stream dma_rx ;
87
87
struct spi_dma_stream dma_tx ;
88
88
/* dummy value used for transferring NOP when tx buf is null */
89
- uint32_t dummy_tx_buffer ;
90
- /* dummy value used to read RX data into when rx buf is null */
91
- uint32_t dummy_rx_buffer ;
89
+ uint32_t dummy_buffer ;
92
90
#endif
93
91
};
94
92
@@ -287,231 +285,244 @@ static void spi_mcux_dma_callback(const struct device *dev, void *arg, uint32_t
287
285
spi_context_complete (& data -> ctx , spi_dev , 0 );
288
286
}
289
287
290
- static int spi_mcux_dma_tx_load (const struct device * dev , const uint8_t * buf , size_t len )
288
+ static struct dma_block_config * spi_mcux_dma_common_load (struct spi_dma_stream * stream ,
289
+ const struct device * dev ,
290
+ const uint8_t * buf , size_t len )
291
291
{
292
292
struct spi_mcux_data * data = dev -> data ;
293
- struct dma_block_config * blk_cfg ;
294
- LPSPI_Type * base = (LPSPI_Type * )DEVICE_MMIO_NAMED_GET (dev , reg_base );
295
-
296
- /* remember active TX DMA channel (used in callback) */
297
- struct spi_dma_stream * stream = & data -> dma_tx ;
298
-
299
- blk_cfg = & stream -> dma_blk_cfg ;
293
+ struct dma_block_config * blk_cfg = & stream -> dma_blk_cfg ;
300
294
301
295
/* prepare the block for this TX DMA channel */
302
296
memset (blk_cfg , 0 , sizeof (struct dma_block_config ));
303
297
298
+ blk_cfg -> block_size = len ;
299
+
304
300
if (buf == NULL ) {
305
- /* Treat the transfer as a peripheral to peripheral one, so that DMA
306
- * reads from this address each time
307
- */
308
- blk_cfg -> source_address = (uint32_t )& data -> dummy_tx_buffer ;
301
+ blk_cfg -> source_address = (uint32_t )& data -> dummy_buffer ;
302
+ blk_cfg -> dest_address = (uint32_t )& data -> dummy_buffer ;
303
+ /* pretend it is peripheral xfer so DMA just xfer to dummy buf */
309
304
stream -> dma_cfg .channel_direction = PERIPHERAL_TO_PERIPHERAL ;
310
305
} else {
311
- /* tx direction has memory as source and periph as dest. */
312
306
blk_cfg -> source_address = (uint32_t )buf ;
307
+ blk_cfg -> dest_address = (uint32_t )buf ;
308
+ }
309
+
310
+ /* Transfer 1 byte each DMA loop */
311
+ stream -> dma_cfg .source_burst_length = 1 ;
312
+ stream -> dma_cfg .user_data = (void * )dev ;
313
+ stream -> dma_cfg .head_block = blk_cfg ;
314
+
315
+ return blk_cfg ;
316
+ }
317
+
318
+ static int spi_mcux_dma_tx_load (const struct device * dev , const uint8_t * buf , size_t len )
319
+ {
320
+ LPSPI_Type * base = (LPSPI_Type * )DEVICE_MMIO_NAMED_GET (dev , reg_base );
321
+ struct spi_mcux_data * data = dev -> data ;
322
+ /* remember active TX DMA channel (used in callback) */
323
+ struct spi_dma_stream * stream = & data -> dma_tx ;
324
+ struct dma_block_config * blk_cfg = spi_mcux_dma_common_load (stream , dev , buf , len );
325
+
326
+ if (buf != NULL ) {
327
+ /* tx direction has memory as source and periph as dest. */
313
328
stream -> dma_cfg .channel_direction = MEMORY_TO_PERIPHERAL ;
314
329
}
315
- /* Enable scatter/gather */
316
- blk_cfg -> source_gather_en = 1 ;
330
+
317
331
/* Dest is LPSPI tx fifo */
318
332
blk_cfg -> dest_address = LPSPI_GetTxRegisterAddress (base );
319
- blk_cfg -> block_size = len ;
320
- /* Transfer 1 byte each DMA loop */
321
- stream -> dma_cfg .source_burst_length = 1 ;
322
333
323
- stream -> dma_cfg .head_block = & stream -> dma_blk_cfg ;
324
334
/* give the client dev as arg, as the callback comes from the dma */
325
- stream -> dma_cfg .user_data = (struct device * )dev ;
326
335
/* pass our client origin to the dma: data->dma_tx.dma_channel */
327
- return dma_config (data -> dma_tx . dma_dev , data -> dma_tx . channel , & stream -> dma_cfg );
336
+ return dma_config (stream -> dma_dev , stream -> channel , & stream -> dma_cfg );
328
337
}
329
338
330
339
static int spi_mcux_dma_rx_load (const struct device * dev , uint8_t * buf , size_t len )
331
340
{
332
- struct spi_mcux_data * data = dev -> data ;
333
- struct dma_block_config * blk_cfg ;
334
341
LPSPI_Type * base = (LPSPI_Type * )DEVICE_MMIO_NAMED_GET (dev , reg_base );
335
-
342
+ struct spi_mcux_data * data = dev -> data ;
336
343
/* retrieve active RX DMA channel (used in callback) */
337
344
struct spi_dma_stream * stream = & data -> dma_rx ;
345
+ struct dma_block_config * blk_cfg = spi_mcux_dma_common_load (stream , dev , buf , len );
338
346
339
- blk_cfg = & stream -> dma_blk_cfg ;
340
-
341
- /* prepare the block for this RX DMA channel */
342
- memset (blk_cfg , 0 , sizeof (struct dma_block_config ));
343
-
344
- if (buf == NULL ) {
345
- /* Treat the transfer as a peripheral to peripheral one, so that DMA
346
- * reads from this address each time
347
- */
348
- blk_cfg -> dest_address = (uint32_t )& data -> dummy_rx_buffer ;
349
- stream -> dma_cfg .channel_direction = PERIPHERAL_TO_PERIPHERAL ;
350
- } else {
347
+ if (buf != NULL ) {
351
348
/* rx direction has periph as source and mem as dest. */
352
- blk_cfg -> dest_address = (uint32_t )buf ;
353
349
stream -> dma_cfg .channel_direction = PERIPHERAL_TO_MEMORY ;
354
350
}
355
- blk_cfg -> block_size = len ;
356
- /* Enable scatter/gather */
357
- blk_cfg -> dest_scatter_en = 1 ;
351
+
358
352
/* Source is LPSPI rx fifo */
359
353
blk_cfg -> source_address = LPSPI_GetRxRegisterAddress (base );
360
- stream -> dma_cfg .source_burst_length = 1 ;
361
-
362
- stream -> dma_cfg .head_block = blk_cfg ;
363
- stream -> dma_cfg .user_data = (struct device * )dev ;
364
354
365
355
/* pass our client origin to the dma: data->dma_rx.channel */
366
- return dma_config (data -> dma_rx . dma_dev , data -> dma_rx . channel , & stream -> dma_cfg );
356
+ return dma_config (stream -> dma_dev , stream -> channel , & stream -> dma_cfg );
367
357
}
368
358
369
359
static int wait_dma_rx_tx_done (const struct device * dev )
370
360
{
371
361
struct spi_mcux_data * data = dev -> data ;
372
- int ret = -1 ;
362
+ int ret ;
373
363
374
- while ( 1 ) {
364
+ do {
375
365
ret = spi_context_wait_for_completion (& data -> ctx );
376
366
if (ret ) {
377
367
LOG_DBG ("Timed out waiting for SPI context to complete" );
378
368
return ret ;
379
- }
380
- if (data -> status_flags & LPSPI_DMA_ERROR_FLAG ) {
369
+ } else if (data -> status_flags & LPSPI_DMA_ERROR_FLAG ) {
381
370
return - EIO ;
382
371
}
372
+ } while (!((data -> status_flags & LPSPI_DMA_DONE_FLAG ) == LPSPI_DMA_DONE_FLAG ));
383
373
384
- if ((data -> status_flags & LPSPI_DMA_DONE_FLAG ) == LPSPI_DMA_DONE_FLAG ) {
385
- LOG_DBG ("DMA block completed" );
386
- return 0 ;
387
- }
388
- }
374
+ LOG_DBG ("DMA block completed" );
375
+ return 0 ;
389
376
}
390
377
391
378
static inline int spi_mcux_dma_rxtx_load (const struct device * dev , size_t * dma_size )
392
379
{
393
- struct spi_mcux_data * lpspi_data = dev -> data ;
380
+ struct spi_mcux_data * data = dev -> data ;
381
+ struct spi_context * ctx = & data -> ctx ;
394
382
int ret = 0 ;
395
383
396
384
/* Clear status flags */
397
- lpspi_data -> status_flags = 0U ;
385
+ data -> status_flags = 0U ;
386
+
398
387
/* Load dma blocks of equal length */
399
- * dma_size = MIN (lpspi_data -> ctx .tx_len , lpspi_data -> ctx .rx_len );
400
- if (* dma_size == 0 ) {
401
- * dma_size = MAX (lpspi_data -> ctx .tx_len , lpspi_data -> ctx .rx_len );
402
- }
388
+ * dma_size = spi_context_max_continuous_chunk (ctx );
403
389
404
- ret = spi_mcux_dma_tx_load (dev , lpspi_data -> ctx . tx_buf , * dma_size );
390
+ ret = spi_mcux_dma_tx_load (dev , ctx -> tx_buf , * dma_size );
405
391
if (ret != 0 ) {
406
392
return ret ;
407
393
}
408
394
409
- ret = spi_mcux_dma_rx_load (dev , lpspi_data -> ctx . rx_buf , * dma_size );
395
+ ret = spi_mcux_dma_rx_load (dev , ctx -> rx_buf , * dma_size );
410
396
if (ret != 0 ) {
411
397
return ret ;
412
398
}
413
399
414
400
/* Start DMA */
415
- ret = dma_start (lpspi_data -> dma_tx .dma_dev , lpspi_data -> dma_tx .channel );
401
+ ret = dma_start (data -> dma_tx .dma_dev , data -> dma_tx .channel );
416
402
if (ret != 0 ) {
417
403
return ret ;
418
404
}
419
405
420
- ret = dma_start (lpspi_data -> dma_rx .dma_dev , lpspi_data -> dma_rx .channel );
406
+ ret = dma_start (data -> dma_rx .dma_dev , data -> dma_rx .channel );
421
407
return ret ;
422
408
}
423
409
424
- static int transceive_dma (const struct device * dev , const struct spi_config * spi_cfg ,
425
- const struct spi_buf_set * tx_bufs , const struct spi_buf_set * rx_bufs ,
426
- bool asynchronous , spi_callback_t cb , void * userdata )
410
+ #ifdef CONFIG_SPI_ASYNC
411
+ static int transceive_dma_async (const struct device * dev , spi_callback_t cb , void * userdata )
427
412
{
428
413
struct spi_mcux_data * data = dev -> data ;
429
- LPSPI_Type * base = (LPSPI_Type * )DEVICE_MMIO_NAMED_GET (dev , reg_base );
430
- int ret ;
414
+ struct spi_context * ctx = & data -> ctx ;
431
415
size_t dma_size ;
416
+ int ret ;
432
417
433
- if (! asynchronous ) {
434
- spi_context_lock ( & data -> ctx , asynchronous , cb , userdata , spi_cfg ) ;
435
- }
418
+ ctx -> asynchronous = true;
419
+ ctx -> callback = cb ;
420
+ ctx -> callback_data = userdata ;
436
421
437
- ret = spi_mcux_configure (dev , spi_cfg );
422
+ ret = spi_mcux_dma_rxtx_load (dev , & dma_size );
438
423
if (ret ) {
439
- if (!asynchronous ) {
440
- spi_context_release (& data -> ctx , ret );
441
- }
442
424
return ret ;
443
425
}
444
426
445
- #ifdef CONFIG_SOC_SERIES_MCXN
446
- base -> TCR |= LPSPI_TCR_CONT_MASK ;
447
- #endif
427
+ /* Enable DMA Requests */
428
+ LPSPI_EnableDMA (base , kLPSPI_TxDmaEnable | kLPSPI_RxDmaEnable );
448
429
449
- /* DMA is fast enough watermarks are not required */
450
- LPSPI_SetFifoWatermarks (base , 0U , 0U );
430
+ return 0 ;
431
+ }
432
+ #else
433
+ #define transceive_dma_async (...) 0
434
+ #endif /* CONFIG_SPI_ASYNC */
451
435
452
- spi_context_buffers_setup (& data -> ctx , tx_bufs , rx_bufs , 1 );
436
+ static int transceive_dma_sync (const struct device * dev )
437
+ {
438
+ LPSPI_Type * base = (LPSPI_Type * )DEVICE_MMIO_NAMED_GET (dev , reg_base );
439
+ struct spi_mcux_data * data = dev -> data ;
440
+ struct spi_context * ctx = & data -> ctx ;
441
+ size_t dma_size ;
442
+ int ret ;
453
443
454
- if (!asynchronous ) {
455
- spi_context_cs_control (& data -> ctx , true);
444
+ spi_context_cs_control (ctx , true);
456
445
457
- /* Send each spi buf via DMA, updating context as DMA completes */
458
- while (data -> ctx . rx_len > 0 || data -> ctx . tx_len > 0 ) {
459
- /* Load dma block */
460
- ret = spi_mcux_dma_rxtx_load (dev , & dma_size );
461
- if (ret != 0 ) {
462
- goto out ;
463
- }
446
+ /* Send each spi buf via DMA, updating context as DMA completes */
447
+ while (ctx -> rx_len > 0 || ctx -> tx_len > 0 ) {
448
+ /* Load dma block */
449
+ ret = spi_mcux_dma_rxtx_load (dev , & dma_size );
450
+ if (ret ) {
451
+ return ret ;
452
+ }
464
453
465
454
#ifdef CONFIG_SOC_SERIES_MCXN
466
- while (!(LPSPI_GetStatusFlags (base ) & kLPSPI_TxDataRequestFlag )) {
467
- /* wait until previous tx finished */
468
- }
455
+ while (!(LPSPI_GetStatusFlags (base ) & kLPSPI_TxDataRequestFlag )) {
456
+ /* wait until previous tx finished */
457
+ }
469
458
#endif
470
459
471
- /* Enable DMA Requests */
472
- LPSPI_EnableDMA (base , kLPSPI_TxDmaEnable | kLPSPI_RxDmaEnable );
460
+ /* Enable DMA Requests */
461
+ LPSPI_EnableDMA (base , kLPSPI_TxDmaEnable | kLPSPI_RxDmaEnable );
473
462
474
- /* Wait for DMA to finish */
475
- ret = wait_dma_rx_tx_done (dev );
476
- if (ret != 0 ) {
477
- goto out ;
478
- }
463
+ /* Wait for DMA to finish */
464
+ ret = wait_dma_rx_tx_done (dev );
465
+ if (ret ) {
466
+ return ret ;
467
+ }
479
468
480
469
#ifndef CONFIG_SOC_SERIES_MCXN
481
- while ((LPSPI_GetStatusFlags (base ) & kLPSPI_ModuleBusyFlag )) {
482
- /* wait until module is idle */
483
- }
470
+ while ((LPSPI_GetStatusFlags (base ) & kLPSPI_ModuleBusyFlag )) {
471
+ /* wait until module is idle */
472
+ }
484
473
#endif
485
474
486
- /* Disable DMA */
487
- LPSPI_DisableDMA (base , kLPSPI_TxDmaEnable | kLPSPI_RxDmaEnable );
488
-
489
- /* Update SPI contexts with amount of data we just sent */
490
- spi_context_update_tx (& data -> ctx , 1 , dma_size );
491
- spi_context_update_rx (& data -> ctx , 1 , dma_size );
492
- }
493
- spi_context_cs_control (& data -> ctx , false);
494
- base -> TCR = 0 ;
475
+ /* Disable DMA */
476
+ LPSPI_DisableDMA (base , kLPSPI_TxDmaEnable | kLPSPI_RxDmaEnable );
495
477
496
- out :
497
- spi_context_release (& data -> ctx , ret );
478
+ /* Update SPI contexts with amount of data we just sent */
479
+ spi_context_update_tx (ctx , 1 , dma_size );
480
+ spi_context_update_rx (ctx , 1 , dma_size );
498
481
}
499
- #if CONFIG_SPI_ASYNC
500
- else {
501
- data -> ctx .asynchronous = asynchronous ;
502
- data -> ctx .callback = cb ;
503
- data -> ctx .callback_data = userdata ;
504
482
505
- ret = spi_mcux_dma_rxtx_load (dev , & dma_size );
506
- if (ret != 0 ) {
507
- goto out ;
508
- }
483
+ spi_context_cs_control (ctx , false);
509
484
510
- /* Enable DMA Requests */
511
- LPSPI_EnableDMA (base , kLPSPI_TxDmaEnable | kLPSPI_RxDmaEnable );
485
+ base -> TCR = 0 ;
486
+
487
+ return 0 ;
488
+ }
489
+
490
+ static int transceive_dma (const struct device * dev , const struct spi_config * spi_cfg ,
491
+ const struct spi_buf_set * tx_bufs , const struct spi_buf_set * rx_bufs ,
492
+ bool asynchronous , spi_callback_t cb , void * userdata )
493
+ {
494
+ struct spi_mcux_data * data = dev -> data ;
495
+ LPSPI_Type * base = (LPSPI_Type * )DEVICE_MMIO_NAMED_GET (dev , reg_base );
496
+ int ret ;
497
+
498
+ if (!asynchronous ) {
499
+ spi_context_lock (& data -> ctx , asynchronous , cb , userdata , spi_cfg );
500
+ }
501
+
502
+ ret = spi_mcux_configure (dev , spi_cfg );
503
+ if (ret && !asynchronous ) {
504
+ goto out ;
505
+ } else if (ret ) {
506
+ return ret ;
512
507
}
508
+
509
+ #ifdef CONFIG_SOC_SERIES_MCXN
510
+ base -> TCR |= LPSPI_TCR_CONT_MASK ;
513
511
#endif
514
512
513
+ /* DMA is fast enough watermarks are not required */
514
+ LPSPI_SetFifoWatermarks (base , 0U , 0U );
515
+
516
+ spi_context_buffers_setup (& data -> ctx , tx_bufs , rx_bufs , 1 );
517
+
518
+ if (asynchronous ) {
519
+ ret = transceive_dma_async (dev , cb , userdata );
520
+ } else {
521
+ ret = transceive_dma_sync (dev );
522
+ }
523
+
524
+ out :
525
+ spi_context_release (& data -> ctx , ret );
515
526
return ret ;
516
527
}
517
528
#else
0 commit comments