1
1
/*
2
- * Copyright (c) 2024 Analog Devices, Inc.
2
+ * Copyright (c) 2024-2025 Analog Devices, Inc.
3
3
*
4
4
* SPDX-License-Identifier: Apache-2.0
5
5
*/
27
27
LOG_MODULE_REGISTER (spi_max32 , CONFIG_SPI_LOG_LEVEL );
28
28
#include "spi_context.h"
29
29
30
+ #define SPI_MAX32_MIN_WORD_BITS 2
31
+ #define SPI_MAX32_MAX_WORD_BITS 16
32
+
30
33
#ifdef CONFIG_SPI_MAX32_DMA
31
34
struct max32_spi_dma_config {
32
35
const struct device * dev ;
@@ -87,9 +90,11 @@ static int spi_configure(const struct device *dev, const struct spi_config *conf
87
90
mxc_spi_regs_t * regs = cfg -> regs ;
88
91
struct max32_spi_data * data = dev -> data ;
89
92
93
+ #ifndef CONFIG_SPI_RTIO
90
94
if (spi_context_configured (& data -> ctx , config )) {
91
95
return 0 ;
92
96
}
97
+ #endif
93
98
94
99
if (SPI_OP_MODE_GET (config -> operation ) & SPI_OP_MODE_SLAVE ) {
95
100
return - ENOTSUP ;
@@ -163,18 +168,21 @@ static inline int spi_max32_get_dfs_shift(const struct spi_context *ctx)
163
168
return 1 ;
164
169
}
165
170
166
- static void spi_max32_setup (mxc_spi_regs_t * spi , mxc_spi_req_t * req )
171
+ static void spi_max32_setup (mxc_spi_regs_t * spi , mxc_spi_req_t * req , uint8_t dfs_shift )
167
172
{
168
173
req -> rxCnt = 0 ;
169
174
req -> txCnt = 0 ;
170
175
176
+ spi -> ctrl0 &= ~ADI_MAX32_SPI_CTRL_EN ;
177
+
171
178
if (spi -> ctrl0 & ADI_MAX32_SPI_CTRL_MASTER_MODE ) {
172
179
MXC_SPI_SetSlave (spi , req -> ssIdx );
173
180
}
174
181
182
+ /* SPI_CTRL1 holds the number of words so apply dfs_shift first */
175
183
if (req -> rxData && req -> rxLen ) {
176
184
MXC_SETFIELD (spi -> ctrl1 , MXC_F_SPI_CTRL1_RX_NUM_CHAR ,
177
- req -> rxLen << MXC_F_SPI_CTRL1_RX_NUM_CHAR_POS );
185
+ ( req -> rxLen >> dfs_shift ) << MXC_F_SPI_CTRL1_RX_NUM_CHAR_POS );
178
186
spi -> dma |= MXC_F_SPI_DMA_RX_FIFO_EN ;
179
187
} else {
180
188
spi -> ctrl1 &= ~MXC_F_SPI_CTRL1_RX_NUM_CHAR ;
@@ -183,7 +191,7 @@ static void spi_max32_setup(mxc_spi_regs_t *spi, mxc_spi_req_t *req)
183
191
184
192
if (req -> txLen ) {
185
193
MXC_SETFIELD (spi -> ctrl1 , MXC_F_SPI_CTRL1_TX_NUM_CHAR ,
186
- req -> txLen << MXC_F_SPI_CTRL1_TX_NUM_CHAR_POS );
194
+ ( req -> txLen >> dfs_shift ) << MXC_F_SPI_CTRL1_TX_NUM_CHAR_POS );
187
195
spi -> dma |= MXC_F_SPI_DMA_TX_FIFO_EN ;
188
196
} else {
189
197
spi -> ctrl1 &= ~MXC_F_SPI_CTRL1_TX_NUM_CHAR ;
@@ -206,8 +214,8 @@ static int spi_max32_transceive_sync(mxc_spi_regs_t *spi, struct max32_spi_data
206
214
MXC_SPI_ClearTXFIFO (spi );
207
215
MXC_SPI_ClearRXFIFO (spi );
208
216
209
- tx_len = req -> txLen << dfs_shift ;
210
- rx_len = req -> rxLen << dfs_shift ;
217
+ tx_len = req -> txLen ;
218
+ rx_len = req -> rxLen ;
211
219
do {
212
220
remain = tx_len - req -> txCnt ;
213
221
if (remain > 0 ) {
@@ -251,8 +259,6 @@ static int spi_max32_transceive(const struct device *dev)
251
259
uint32_t len ;
252
260
uint8_t dfs_shift ;
253
261
254
- MXC_SPI_ClearTXFIFO (cfg -> regs );
255
-
256
262
dfs_shift = spi_max32_get_dfs_shift (ctx );
257
263
258
264
len = spi_context_max_continuous_chunk (ctx );
@@ -263,64 +269,82 @@ static int spi_max32_transceive(const struct device *dev)
263
269
len = sqe -> rx .buf_len ;
264
270
data -> req .rxData = sqe -> rx .buf ;
265
271
data -> req .rxLen = sqe -> rx .buf_len ;
272
+ if (data -> req .rxData == NULL ) {
273
+ data -> req .rxData = data -> dummy ;
274
+ data -> req .rxLen = 0 ;
275
+ }
266
276
data -> req .txData = NULL ;
267
- data -> req .txLen = len >> dfs_shift ;
277
+ data -> req .txLen = len ;
268
278
break ;
269
279
case RTIO_OP_TX :
270
280
len = sqe -> tx .buf_len ;
271
281
data -> req .rxLen = 0 ;
272
282
data -> req .rxData = data -> dummy ;
273
283
data -> req .txData = (uint8_t * )sqe -> tx .buf ;
274
- data -> req .txLen = len >> dfs_shift ;
284
+ data -> req .txLen = len ;
275
285
break ;
276
286
case RTIO_OP_TINY_TX :
277
287
len = sqe -> tiny_tx .buf_len ;
278
288
data -> req .txData = (uint8_t * )sqe -> tiny_tx .buf ;
279
289
data -> req .rxData = data -> dummy ;
280
- data -> req .txLen = len >> dfs_shift ;
290
+ data -> req .txLen = len ;
281
291
data -> req .rxLen = 0 ;
282
292
break ;
283
293
case RTIO_OP_TXRX :
284
294
len = sqe -> txrx .buf_len ;
285
295
data -> req .txData = (uint8_t * )sqe -> txrx .tx_buf ;
286
296
data -> req .rxData = sqe -> txrx .rx_buf ;
287
- data -> req .txLen = len >> dfs_shift ;
288
- data -> req .rxLen = len >> dfs_shift ;
297
+ data -> req .txLen = len ;
298
+ data -> req .rxLen = len ;
299
+ if (data -> req .rxData == NULL ) {
300
+ data -> req .rxData = data -> dummy ;
301
+ data -> req .rxLen = 0 ;
302
+ }
289
303
break ;
290
304
default :
291
305
break ;
292
306
}
293
307
#else
294
- data -> req .txLen = len >> dfs_shift ;
308
+ data -> req .txLen = len ;
295
309
data -> req .txData = (uint8_t * )ctx -> tx_buf ;
296
- data -> req .rxLen = len >> dfs_shift ;
297
- data -> req .rxData = ctx -> rx_buf ;
298
-
310
+ data -> req .rxLen = len ;
299
311
data -> req .rxData = ctx -> rx_buf ;
300
312
301
- data -> req .rxLen = len >> dfs_shift ;
302
313
if (!data -> req .rxData ) {
303
314
/* Pass a dummy buffer to HAL if receive buffer is NULL, otherwise
304
315
* corrupt data is read during subsequent transactions.
305
316
*/
306
317
data -> req .rxData = data -> dummy ;
307
318
data -> req .rxLen = 0 ;
319
+
320
+ if (!data -> req .txData && !data -> req .txLen ) {
321
+ /* Both RX and TX are NULL, nothing to do */
322
+ spi_context_update_tx (& data -> ctx , dfs_shift ? 2 : 1 , len );
323
+ spi_context_update_rx (& data -> ctx , dfs_shift ? 2 : 1 , len );
324
+ if (!spi_context_tx_on (ctx ) && !spi_context_rx_on (ctx )) {
325
+ spi_context_complete (ctx , dev , 0 );
326
+ }
327
+
328
+ return 0 ;
329
+ }
308
330
}
309
331
#endif
310
332
data -> req .spi = cfg -> regs ;
311
333
data -> req .ssIdx = ctx -> config -> slave ;
312
334
data -> req .ssDeassert = 0 ;
313
335
data -> req .txCnt = 0 ;
314
336
data -> req .rxCnt = 0 ;
315
- spi_max32_setup (cfg -> regs , & data -> req );
337
+ spi_max32_setup (cfg -> regs , & data -> req , dfs_shift );
316
338
#ifdef CONFIG_SPI_MAX32_INTERRUPT
317
- MXC_SPI_SetTXThreshold (cfg -> regs , 1 );
339
+ MXC_SPI_SetTXThreshold (cfg -> regs , 1 << dfs_shift );
318
340
if (data -> req .rxLen ) {
319
- MXC_SPI_SetRXThreshold (cfg -> regs , 2 );
341
+ MXC_SPI_SetRXThreshold (cfg -> regs , 2 << dfs_shift );
320
342
MXC_SPI_EnableInt (cfg -> regs , ADI_MAX32_SPI_INT_EN_RX_THD );
321
343
}
322
344
MXC_SPI_EnableInt (cfg -> regs , ADI_MAX32_SPI_INT_EN_TX_THD | ADI_MAX32_SPI_INT_EN_MST_DONE );
323
345
346
+ MXC_SPI_ClearTXFIFO (cfg -> regs );
347
+ MXC_SPI_ClearRXFIFO (cfg -> regs );
324
348
if (!data -> req .txData ) {
325
349
data -> req .txCnt =
326
350
MXC_SPI_WriteTXFIFO (cfg -> regs , data -> dummy , MIN (len , sizeof (data -> dummy )));
@@ -334,8 +358,8 @@ static int spi_max32_transceive(const struct device *dev)
334
358
if (ret ) {
335
359
ret = - EIO ;
336
360
} else {
337
- spi_context_update_tx (ctx , 1 , len );
338
- spi_context_update_rx (ctx , 1 , len );
361
+ spi_context_update_tx (ctx , dfs_shift ? 2 : 1 , len );
362
+ spi_context_update_rx (ctx , dfs_shift ? 2 : 1 , len );
339
363
}
340
364
#endif
341
365
@@ -418,9 +442,18 @@ static int transceive(const struct device *dev, const struct spi_config *config,
418
442
}
419
443
}
420
444
#else
421
- struct spi_rtio * rtio_ctx = data -> rtio_ctx ;
445
+ /* Guard against unsupported word lengths here, as spi_configure is
446
+ * called at a later stage */
447
+ if ((SPI_WORD_SIZE_GET (config -> operation ) < SPI_MAX32_MIN_WORD_BITS ) ||
448
+ (SPI_WORD_SIZE_GET (config -> operation ) > SPI_MAX32_MAX_WORD_BITS )) {
449
+ ret = - ENOTSUP ;
450
+ } else {
451
+ if (tx_bufs || rx_bufs ) {
452
+ struct spi_rtio * rtio_ctx = data -> rtio_ctx ;
453
+ ret = spi_rtio_transceive (rtio_ctx , config , tx_bufs , rx_bufs );
454
+ }
455
+ }
422
456
423
- ret = spi_rtio_transceive (rtio_ctx , config , tx_bufs , rx_bufs );
424
457
#endif
425
458
spi_context_release (ctx , ret );
426
459
return ret ;
@@ -434,9 +467,10 @@ static void spi_max32_dma_callback(const struct device *dev, void *arg, uint32_t
434
467
const struct device * spi_dev = data -> dev ;
435
468
const struct max32_spi_config * config = spi_dev -> config ;
436
469
uint32_t len ;
470
+ uint8_t dfs = spi_max32_get_dfs_shift (& data -> ctx ) ? 2 : 1 ;
437
471
438
472
if (status < 0 ) {
439
- LOG_ERR ("DMA callback error with channel %d. " , channel );
473
+ LOG_ERR ("DMA callback error for channel %u: %d " , channel , status );
440
474
} else {
441
475
/* identify the origin of this callback */
442
476
if (channel == config -> tx_dma .channel ) {
@@ -447,14 +481,14 @@ static void spi_max32_dma_callback(const struct device *dev, void *arg, uint32_t
447
481
}
448
482
if ((data -> dma_stat & SPI_MAX32_DMA_DONE_FLAG ) == SPI_MAX32_DMA_DONE_FLAG ) {
449
483
len = spi_context_max_continuous_chunk (& data -> ctx );
450
- spi_context_update_tx (& data -> ctx , 1 , len );
451
- spi_context_update_rx (& data -> ctx , 1 , len );
484
+ spi_context_update_tx (& data -> ctx , dfs , len );
485
+ spi_context_update_rx (& data -> ctx , dfs , len );
452
486
spi_context_complete (& data -> ctx , spi_dev , status == 0 ? 0 : - EIO );
453
487
}
454
488
}
455
489
456
490
static int spi_max32_tx_dma_load (const struct device * dev , const uint8_t * buf , uint32_t len ,
457
- uint8_t word_shift )
491
+ uint8_t dfs_shift )
458
492
{
459
493
int ret ;
460
494
const struct max32_spi_config * config = dev -> config ;
@@ -467,9 +501,9 @@ static int spi_max32_tx_dma_load(const struct device *dev, const uint8_t *buf, u
467
501
dma_cfg .user_data = (void * )data ;
468
502
dma_cfg .dma_slot = config -> tx_dma .slot ;
469
503
dma_cfg .block_count = 1 ;
470
- dma_cfg .source_data_size = 1U << word_shift ;
471
- dma_cfg .source_burst_length = 1U ;
472
- dma_cfg .dest_data_size = 1U << word_shift ;
504
+ dma_cfg .source_data_size = 1U << dfs_shift ;
505
+ dma_cfg .source_burst_length = 1U << dfs_shift ;
506
+ dma_cfg .dest_data_size = 1U << dfs_shift ;
473
507
dma_cfg .head_block = & dma_blk ;
474
508
dma_blk .block_size = len ;
475
509
if (buf ) {
@@ -489,7 +523,7 @@ static int spi_max32_tx_dma_load(const struct device *dev, const uint8_t *buf, u
489
523
}
490
524
491
525
static int spi_max32_rx_dma_load (const struct device * dev , const uint8_t * buf , uint32_t len ,
492
- uint8_t word_shift )
526
+ uint8_t dfs_shift )
493
527
{
494
528
int ret ;
495
529
const struct max32_spi_config * config = dev -> config ;
@@ -502,9 +536,9 @@ static int spi_max32_rx_dma_load(const struct device *dev, const uint8_t *buf, u
502
536
dma_cfg .user_data = (void * )data ;
503
537
dma_cfg .dma_slot = config -> rx_dma .slot ;
504
538
dma_cfg .block_count = 1 ;
505
- dma_cfg .source_data_size = 1U << word_shift ;
506
- dma_cfg .source_burst_length = 1U ;
507
- dma_cfg .dest_data_size = 1U << word_shift ;
539
+ dma_cfg .source_data_size = 1U << dfs_shift ;
540
+ dma_cfg .source_burst_length = 1U << dfs_shift ;
541
+ dma_cfg .dest_data_size = 1U << dfs_shift ;
508
542
dma_cfg .head_block = & dma_blk ;
509
543
dma_blk .block_size = len ;
510
544
if (buf ) {
@@ -540,6 +574,7 @@ static int transceive_dma(const struct device *dev, const struct spi_config *con
540
574
spi_context_lock (ctx , async , cb , userdata , config );
541
575
542
576
MXC_SPI_ClearTXFIFO (spi );
577
+ MXC_SPI_ClearRXFIFO (spi );
543
578
544
579
ret = dma_get_status (cfg -> tx_dma .dev , cfg -> tx_dma .channel , & status );
545
580
if (ret < 0 || status .busy ) {
@@ -553,6 +588,12 @@ static int transceive_dma(const struct device *dev, const struct spi_config *con
553
588
goto unlock ;
554
589
}
555
590
591
+ /* Word sizes less than 8-bits are not supported in DMA mode */
592
+ if (SPI_WORD_SIZE_GET (config -> operation ) < 8 ) {
593
+ ret = - ENOTSUP ;
594
+ goto unlock ;
595
+ }
596
+
556
597
ret = spi_configure (dev , config );
557
598
if (ret != 0 ) {
558
599
ret = - EIO ;
@@ -581,12 +622,17 @@ static int transceive_dma(const struct device *dev, const struct spi_config *con
581
622
dfs_shift = spi_max32_get_dfs_shift (ctx );
582
623
word_count = len >> dfs_shift ;
583
624
625
+ if (word_count == 0 ) {
626
+ /* Nothing to do, continue */
627
+ continue ;
628
+ }
629
+
584
630
MXC_SETFIELD (spi -> ctrl1 , MXC_F_SPI_CTRL1_RX_NUM_CHAR ,
585
631
word_count << MXC_F_SPI_CTRL1_RX_NUM_CHAR_POS );
586
632
spi -> dma |= ADI_MAX32_SPI_DMA_RX_FIFO_CLEAR ;
587
633
spi -> dma |= MXC_F_SPI_DMA_RX_FIFO_EN ;
588
634
spi -> dma |= ADI_MAX32_SPI_DMA_RX_DMA_EN ;
589
- MXC_SPI_SetRXThreshold (spi , 0 );
635
+ MXC_SPI_SetRXThreshold (spi , dfs_shift ? 1 : 0 );
590
636
591
637
ret = spi_max32_rx_dma_load (dev , ctx -> rx_buf , len , dfs_shift );
592
638
if (ret < 0 ) {
@@ -598,7 +644,7 @@ static int transceive_dma(const struct device *dev, const struct spi_config *con
598
644
spi -> dma |= ADI_MAX32_SPI_DMA_TX_FIFO_CLEAR ;
599
645
spi -> dma |= MXC_F_SPI_DMA_TX_FIFO_EN ;
600
646
spi -> dma |= ADI_MAX32_SPI_DMA_TX_DMA_EN ;
601
- MXC_SPI_SetTXThreshold (spi , 1 );
647
+ MXC_SPI_SetTXThreshold (spi , 2 );
602
648
603
649
ret = spi_max32_tx_dma_load (dev , ctx -> tx_buf , len , dfs_shift );
604
650
if (ret < 0 ) {
@@ -754,6 +800,7 @@ static void spi_max32_callback(mxc_spi_req_t *req, int error)
754
800
struct spi_context * ctx = & data -> ctx ;
755
801
const struct device * dev = data -> dev ;
756
802
uint32_t len ;
803
+ uint8_t dfs ;
757
804
758
805
#ifdef CONFIG_SPI_RTIO
759
806
struct spi_rtio * rtio_ctx = data -> rtio_ctx ;
@@ -762,9 +809,10 @@ static void spi_max32_callback(mxc_spi_req_t *req, int error)
762
809
spi_max32_iodev_complete (data -> dev , 0 );
763
810
}
764
811
#endif
812
+ dfs = spi_max32_get_dfs_shift (ctx ) ? 2 : 1 ;
765
813
len = spi_context_max_continuous_chunk (ctx );
766
- spi_context_update_tx (ctx , 1 , len );
767
- spi_context_update_rx (ctx , 1 , len );
814
+ spi_context_update_tx (ctx , dfs , len );
815
+ spi_context_update_rx (ctx , dfs , len );
768
816
#ifdef CONFIG_SPI_ASYNC
769
817
if (ctx -> asynchronous && ((spi_context_tx_on (ctx ) || spi_context_rx_on (ctx )))) {
770
818
k_work_submit (& data -> async_work );
@@ -804,12 +852,11 @@ static void spi_max32_isr(const struct device *dev)
804
852
mxc_spi_req_t * req = & data -> req ;
805
853
mxc_spi_regs_t * spi = cfg -> regs ;
806
854
uint32_t flags , remain ;
807
- uint8_t dfs_shift = spi_max32_get_dfs_shift (& data -> ctx );
808
855
809
856
flags = MXC_SPI_GetFlags (spi );
810
857
MXC_SPI_ClearFlags (spi );
811
858
812
- remain = ( req -> txLen << dfs_shift ) - req -> txCnt ;
859
+ remain = req -> txLen - req -> txCnt ;
813
860
if (flags & ADI_MAX32_SPI_INT_FL_TX_THD ) {
814
861
if (remain ) {
815
862
if (!data -> req .txData ) {
@@ -824,10 +871,10 @@ static void spi_max32_isr(const struct device *dev)
824
871
}
825
872
}
826
873
827
- remain = ( req -> rxLen << dfs_shift ) - req -> rxCnt ;
874
+ remain = req -> rxLen - req -> rxCnt ;
828
875
if (remain ) {
829
876
req -> rxCnt += MXC_SPI_ReadRXFIFO (spi , & req -> rxData [req -> rxCnt ], remain );
830
- remain = ( req -> rxLen << dfs_shift ) - req -> rxCnt ;
877
+ remain = req -> rxLen - req -> rxCnt ;
831
878
if (remain >= MXC_SPI_FIFO_DEPTH ) {
832
879
MXC_SPI_SetRXThreshold (spi , 2 );
833
880
} else {
0 commit comments