23
23
#include <zephyr/irq.h>
24
24
LOG_MODULE_REGISTER (i2s_ll_stm32 );
25
25
26
- #define MODULO_INC (val , max ) { val = (++val < max) ? val : 0; }
27
-
28
26
static unsigned int div_round_closest (uint32_t dividend , uint32_t divisor )
29
27
{
30
28
return (dividend + (divisor / 2U )) / divisor ;
31
29
}
32
30
33
- static bool queue_is_empty (struct ring_buffer * rb )
31
+ static bool queue_is_empty (struct k_msgq * q )
34
32
{
35
- unsigned int key ;
36
-
37
- key = irq_lock ();
38
-
39
- if (rb -> tail != rb -> head ) {
40
- /* Ring buffer is not empty */
41
- irq_unlock (key );
42
- return false;
43
- }
44
-
45
- irq_unlock (key );
46
-
47
- return true;
33
+ return (k_msgq_num_used_get (q ) == 0 ) ? true : false;
48
34
}
49
35
50
36
/*
51
37
* Get data from the queue
52
38
*/
53
- static int queue_get (struct ring_buffer * rb , void * * mem_block , size_t * size )
39
+ static int queue_get (struct k_msgq * q , void * * mem_block , size_t * size , int32_t timeout )
54
40
{
55
- unsigned int key ;
41
+ struct queue_item item ;
42
+ int result = k_msgq_get (q , & item , SYS_TIMEOUT_MS (timeout ));
56
43
57
- key = irq_lock ();
58
-
59
- if (queue_is_empty (rb ) == true) {
60
- irq_unlock (key );
61
- return - ENOMEM ;
44
+ if (result == 0 ) {
45
+ * mem_block = item .mem_block ;
46
+ * size = item .size ;
62
47
}
63
-
64
- * mem_block = rb -> buf [rb -> tail ].mem_block ;
65
- * size = rb -> buf [rb -> tail ].size ;
66
- MODULO_INC (rb -> tail , rb -> len );
67
-
68
- irq_unlock (key );
69
-
70
- return 0 ;
48
+ return result ;
71
49
}
72
50
73
51
/*
74
52
* Put data in the queue
75
53
*/
76
- static int queue_put (struct ring_buffer * rb , void * mem_block , size_t size )
54
+ static int queue_put (struct k_msgq * q , void * mem_block , size_t size , int32_t timeout )
77
55
{
78
- uint16_t head_next ;
79
- unsigned int key ;
56
+ struct queue_item item = {.mem_block = mem_block , .size = size };
80
57
81
- key = irq_lock ();
58
+ return k_msgq_put (q , & item , SYS_TIMEOUT_MS (timeout ));
59
+ }
82
60
83
- head_next = rb -> head ;
84
- MODULO_INC (head_next , rb -> len );
61
+ static void stream_queue_drop (struct stream * s )
62
+ {
63
+ size_t size ;
64
+ void * mem_block ;
85
65
86
- if (head_next == rb -> tail ) {
87
- /* Ring buffer is full */
88
- irq_unlock (key );
89
- return - ENOMEM ;
66
+ while (queue_get (s -> msgq , & mem_block , & size , 0 ) == 0 ) {
67
+ k_mem_slab_free (s -> cfg .mem_slab , mem_block );
90
68
}
91
-
92
- rb -> buf [rb -> head ].mem_block = mem_block ;
93
- rb -> buf [rb -> head ].size = size ;
94
- rb -> head = head_next ;
95
-
96
- irq_unlock (key );
97
-
98
- return 0 ;
99
69
}
100
70
101
71
static int i2s_stm32_enable_clock (const struct device * dev )
@@ -224,7 +194,7 @@ static int i2s_stm32_configure(const struct device *dev, enum i2s_dir dir,
224
194
}
225
195
226
196
if (i2s_cfg -> frame_clk_freq == 0U ) {
227
- stream -> queue_drop (stream );
197
+ stream_queue_drop (stream );
228
198
memset (& stream -> cfg , 0 , sizeof (struct i2s_config ));
229
199
stream -> state = I2S_STATE_NOT_READY ;
230
200
return 0 ;
@@ -385,7 +355,7 @@ static int i2s_stm32_trigger(const struct device *dev, enum i2s_dir dir,
385
355
}
386
356
387
357
if (dir == I2S_DIR_TX ) {
388
- if ((queue_is_empty (& stream -> mem_block_queue ) == false) ||
358
+ if ((queue_is_empty (stream -> msgq ) == false) ||
389
359
(ll_func_i2s_dma_busy (cfg -> i2s ))) {
390
360
stream -> state = I2S_STATE_STOPPING ;
391
361
/*
@@ -412,7 +382,7 @@ static int i2s_stm32_trigger(const struct device *dev, enum i2s_dir dir,
412
382
return - EIO ;
413
383
}
414
384
stream -> stream_disable (stream , dev );
415
- stream -> queue_drop (stream );
385
+ stream_queue_drop (stream );
416
386
stream -> state = I2S_STATE_READY ;
417
387
break ;
418
388
@@ -422,7 +392,7 @@ static int i2s_stm32_trigger(const struct device *dev, enum i2s_dir dir,
422
392
return - EIO ;
423
393
}
424
394
stream -> state = I2S_STATE_READY ;
425
- stream -> queue_drop (stream );
395
+ stream_queue_drop (stream );
426
396
break ;
427
397
428
398
default :
@@ -444,16 +414,8 @@ static int i2s_stm32_read(const struct device *dev, void **mem_block,
444
414
return - EIO ;
445
415
}
446
416
447
- if (dev_data -> rx .state != I2S_STATE_ERROR ) {
448
- ret = k_sem_take (& dev_data -> rx .sem ,
449
- SYS_TIMEOUT_MS (dev_data -> rx .cfg .timeout ));
450
- if (ret < 0 ) {
451
- return ret ;
452
- }
453
- }
454
-
455
417
/* Get data from the beginning of RX queue */
456
- ret = queue_get (& dev_data -> rx .mem_block_queue , mem_block , size );
418
+ ret = queue_get (dev_data -> rx .msgq , mem_block , size , dev_data -> rx . cfg . timeout );
457
419
if (ret < 0 ) {
458
420
return - EIO ;
459
421
}
@@ -465,22 +427,15 @@ static int i2s_stm32_write(const struct device *dev, void *mem_block,
465
427
size_t size )
466
428
{
467
429
struct i2s_stm32_data * const dev_data = dev -> data ;
468
- int ret ;
469
430
470
431
if (dev_data -> tx .state != I2S_STATE_RUNNING &&
471
432
dev_data -> tx .state != I2S_STATE_READY ) {
472
433
LOG_DBG ("invalid state" );
473
434
return - EIO ;
474
435
}
475
436
476
- ret = k_sem_take (& dev_data -> tx .sem ,
477
- SYS_TIMEOUT_MS (dev_data -> tx .cfg .timeout ));
478
- if (ret < 0 ) {
479
- return ret ;
480
- }
481
-
482
437
/* Add data to the end of the TX queue */
483
- return queue_put (& dev_data -> tx .mem_block_queue , mem_block , size );
438
+ return queue_put (dev_data -> tx .msgq , mem_block , size , dev_data -> tx . cfg . timeout );
484
439
}
485
440
486
441
static DEVICE_API (i2s , i2s_stm32_driver_api ) = {
@@ -604,13 +559,12 @@ static void dma_rx_callback(const struct device *dma_dev, void *arg,
604
559
sys_cache_data_invd_range (mblk_tmp , stream -> cfg .block_size );
605
560
606
561
/* All block data received */
607
- ret = queue_put (& stream -> mem_block_queue , mblk_tmp ,
608
- stream -> cfg .block_size );
562
+ ret = queue_put (stream -> msgq , mblk_tmp ,
563
+ stream -> cfg .block_size , 0 );
609
564
if (ret < 0 ) {
610
565
stream -> state = I2S_STATE_ERROR ;
611
566
goto rx_disable ;
612
567
}
613
- k_sem_give (& stream -> sem );
614
568
615
569
/* Stop reception if we were requested */
616
570
if (stream -> state == I2S_STATE_STOPPING ) {
@@ -659,8 +613,8 @@ static void dma_tx_callback(const struct device *dma_dev, void *arg,
659
613
* as stated in zephyr i2s specification, in case of DRAIN command
660
614
* send all data in the transmit queue and stop the transmission.
661
615
*/
662
- if (queue_is_empty (& stream -> mem_block_queue ) == true) {
663
- stream -> queue_drop (stream );
616
+ if (queue_is_empty (stream -> msgq ) == true) {
617
+ stream_queue_drop (stream );
664
618
stream -> state = I2S_STATE_READY ;
665
619
goto tx_disable ;
666
620
} else if (stream -> tx_stop_for_drain == false) {
@@ -681,8 +635,8 @@ static void dma_tx_callback(const struct device *dma_dev, void *arg,
681
635
}
682
636
683
637
/* Prepare to send the next data block */
684
- ret = queue_get (& stream -> mem_block_queue , & stream -> mem_block ,
685
- & mem_block_size );
638
+ ret = queue_get (stream -> msgq , & stream -> mem_block ,
639
+ & mem_block_size , 0 );
686
640
if (ret < 0 ) {
687
641
if (stream -> state == I2S_STATE_STOPPING ) {
688
642
stream -> state = I2S_STATE_READY ;
@@ -691,7 +645,6 @@ static void dma_tx_callback(const struct device *dma_dev, void *arg,
691
645
}
692
646
goto tx_disable ;
693
647
}
694
- k_sem_give (& stream -> sem );
695
648
696
649
/* Assure cache coherency before DMA read operation */
697
650
sys_cache_data_flush_range (stream -> mem_block , mem_block_size );
@@ -765,10 +718,6 @@ static int i2s_stm32_initialize(const struct device *dev)
765
718
766
719
cfg -> irq_config (dev );
767
720
768
- k_sem_init (& dev_data -> rx .sem , 0 , CONFIG_I2S_STM32_RX_BLOCK_COUNT );
769
- k_sem_init (& dev_data -> tx .sem , CONFIG_I2S_STM32_TX_BLOCK_COUNT ,
770
- CONFIG_I2S_STM32_TX_BLOCK_COUNT );
771
-
772
721
for (i = 0 ; i < STM32_DMA_NUM_CHANNELS ; i ++ ) {
773
722
active_dma_rx_channel [i ] = NULL ;
774
723
active_dma_tx_channel [i ] = NULL ;
@@ -847,12 +796,11 @@ static int tx_stream_start(struct stream *stream, const struct device *dev)
847
796
size_t mem_block_size ;
848
797
int ret ;
849
798
850
- ret = queue_get (& stream -> mem_block_queue , & stream -> mem_block ,
851
- & mem_block_size );
799
+ ret = queue_get (stream -> msgq , & stream -> mem_block ,
800
+ & mem_block_size , 0 );
852
801
if (ret < 0 ) {
853
802
return ret ;
854
803
}
855
- k_sem_give (& stream -> sem );
856
804
857
805
/* Assure cache coherency before DMA read operation */
858
806
sys_cache_data_flush_range (stream -> mem_block , mem_block_size );
@@ -948,34 +896,6 @@ static void tx_stream_disable(struct stream *stream, const struct device *dev)
948
896
active_dma_tx_channel [stream -> dma_channel ] = NULL ;
949
897
}
950
898
951
- static void rx_queue_drop (struct stream * stream )
952
- {
953
- size_t size ;
954
- void * mem_block ;
955
-
956
- while (queue_get (& stream -> mem_block_queue , & mem_block , & size ) == 0 ) {
957
- k_mem_slab_free (stream -> cfg .mem_slab , mem_block );
958
- }
959
-
960
- k_sem_reset (& stream -> sem );
961
- }
962
-
963
- static void tx_queue_drop (struct stream * stream )
964
- {
965
- size_t size ;
966
- void * mem_block ;
967
- unsigned int n = 0U ;
968
-
969
- while (queue_get (& stream -> mem_block_queue , & mem_block , & size ) == 0 ) {
970
- k_mem_slab_free (stream -> cfg .mem_slab , mem_block );
971
- n ++ ;
972
- }
973
-
974
- for (; n > 0 ; n -- ) {
975
- k_sem_give (& stream -> sem );
976
- }
977
- }
978
-
979
899
static const struct device * get_dev_from_rx_dma_channel (uint32_t dma_channel )
980
900
{
981
901
return active_dma_rx_channel [dma_channel ];
@@ -1011,9 +931,7 @@ static const struct device *get_dev_from_tx_dma_channel(uint32_t dma_channel)
1011
931
STM32_DMA_FEATURES(index, dir)), \
1012
932
.stream_start = dir##_stream_start, \
1013
933
.stream_disable = dir##_stream_disable, \
1014
- .queue_drop = dir##_queue_drop, \
1015
- .mem_block_queue.buf = dir##_##index##_ring_buf, \
1016
- .mem_block_queue.len = ARRAY_SIZE(dir##_##index##_ring_buf) \
934
+ .msgq = &dir##_##index##_queue, \
1017
935
}
1018
936
1019
937
#define I2S_STM32_INIT (index ) \
@@ -1034,8 +952,8 @@ static const struct i2s_stm32_cfg i2s_stm32_config_##index = { \
1034
952
.master_clk_sel = DT_INST_PROP(index, mck_enabled) \
1035
953
}; \
1036
954
\
1037
- struct queue_item rx_##index##_ring_buf[CONFIG_I2S_STM32_RX_BLOCK_COUNT + 1] ;\
1038
- struct queue_item tx_##index##_ring_buf[CONFIG_I2S_STM32_TX_BLOCK_COUNT + 1] ;\
955
+ K_MSGQ_DEFINE( rx_##index##_queue, sizeof(struct queue_item), CONFIG_I2S_STM32_RX_BLOCK_COUNT, 4) ;\
956
+ K_MSGQ_DEFINE( tx_##index##_queue, sizeof(struct queue_item), CONFIG_I2S_STM32_TX_BLOCK_COUNT, 4) ;\
1039
957
\
1040
958
static struct i2s_stm32_data i2s_stm32_data_##index = { \
1041
959
UTIL_AND(DT_INST_DMAS_HAS_NAME(index, rx), \
0 commit comments