Skip to content

Commit 18d30e3

Browse files
cdesjardinskartben
authored andcommitted
drivers: i2s: Make the stm32 i2s driver use a msgq
Currently uses a home grown ringbuffer and a semaphore with irq locks. Signed-off-by: Chris Desjardins <chris@arch-embedded.com>
1 parent 9ae7c1e commit 18d30e3

File tree

2 files changed

+37
-129
lines changed

2 files changed

+37
-129
lines changed

drivers/i2s/i2s_ll_stm32.c

Lines changed: 36 additions & 118 deletions
Original file line numberDiff line numberDiff line change
@@ -23,79 +23,49 @@
2323
#include <zephyr/irq.h>
2424
LOG_MODULE_REGISTER(i2s_ll_stm32);
2525

26-
#define MODULO_INC(val, max) { val = (++val < max) ? val : 0; }
27-
2826
static unsigned int div_round_closest(uint32_t dividend, uint32_t divisor)
2927
{
3028
return (dividend + (divisor / 2U)) / divisor;
3129
}
3230

33-
static bool queue_is_empty(struct ring_buffer *rb)
31+
static bool queue_is_empty(struct k_msgq *q)
3432
{
35-
unsigned int key;
36-
37-
key = irq_lock();
38-
39-
if (rb->tail != rb->head) {
40-
/* Ring buffer is not empty */
41-
irq_unlock(key);
42-
return false;
43-
}
44-
45-
irq_unlock(key);
46-
47-
return true;
33+
return (k_msgq_num_used_get(q) == 0) ? true : false;
4834
}
4935

5036
/*
5137
* Get data from the queue
5238
*/
53-
static int queue_get(struct ring_buffer *rb, void **mem_block, size_t *size)
39+
static int queue_get(struct k_msgq *q, void **mem_block, size_t *size, int32_t timeout)
5440
{
55-
unsigned int key;
41+
struct queue_item item;
42+
int result = k_msgq_get(q, &item, SYS_TIMEOUT_MS(timeout));
5643

57-
key = irq_lock();
58-
59-
if (queue_is_empty(rb) == true) {
60-
irq_unlock(key);
61-
return -ENOMEM;
44+
if (result == 0) {
45+
*mem_block = item.mem_block;
46+
*size = item.size;
6247
}
63-
64-
*mem_block = rb->buf[rb->tail].mem_block;
65-
*size = rb->buf[rb->tail].size;
66-
MODULO_INC(rb->tail, rb->len);
67-
68-
irq_unlock(key);
69-
70-
return 0;
48+
return result;
7149
}
7250

7351
/*
7452
* Put data in the queue
7553
*/
76-
static int queue_put(struct ring_buffer *rb, void *mem_block, size_t size)
54+
static int queue_put(struct k_msgq *q, void *mem_block, size_t size, int32_t timeout)
7755
{
78-
uint16_t head_next;
79-
unsigned int key;
56+
struct queue_item item = {.mem_block = mem_block, .size = size};
8057

81-
key = irq_lock();
58+
return k_msgq_put(q, &item, SYS_TIMEOUT_MS(timeout));
59+
}
8260

83-
head_next = rb->head;
84-
MODULO_INC(head_next, rb->len);
61+
static void stream_queue_drop(struct stream *s)
62+
{
63+
size_t size;
64+
void *mem_block;
8565

86-
if (head_next == rb->tail) {
87-
/* Ring buffer is full */
88-
irq_unlock(key);
89-
return -ENOMEM;
66+
while (queue_get(s->msgq, &mem_block, &size, 0) == 0) {
67+
k_mem_slab_free(s->cfg.mem_slab, mem_block);
9068
}
91-
92-
rb->buf[rb->head].mem_block = mem_block;
93-
rb->buf[rb->head].size = size;
94-
rb->head = head_next;
95-
96-
irq_unlock(key);
97-
98-
return 0;
9969
}
10070

10171
static int i2s_stm32_enable_clock(const struct device *dev)
@@ -224,7 +194,7 @@ static int i2s_stm32_configure(const struct device *dev, enum i2s_dir dir,
224194
}
225195

226196
if (i2s_cfg->frame_clk_freq == 0U) {
227-
stream->queue_drop(stream);
197+
stream_queue_drop(stream);
228198
memset(&stream->cfg, 0, sizeof(struct i2s_config));
229199
stream->state = I2S_STATE_NOT_READY;
230200
return 0;
@@ -385,7 +355,7 @@ static int i2s_stm32_trigger(const struct device *dev, enum i2s_dir dir,
385355
}
386356

387357
if (dir == I2S_DIR_TX) {
388-
if ((queue_is_empty(&stream->mem_block_queue) == false) ||
358+
if ((queue_is_empty(stream->msgq) == false) ||
389359
(ll_func_i2s_dma_busy(cfg->i2s))) {
390360
stream->state = I2S_STATE_STOPPING;
391361
/*
@@ -412,7 +382,7 @@ static int i2s_stm32_trigger(const struct device *dev, enum i2s_dir dir,
412382
return -EIO;
413383
}
414384
stream->stream_disable(stream, dev);
415-
stream->queue_drop(stream);
385+
stream_queue_drop(stream);
416386
stream->state = I2S_STATE_READY;
417387
break;
418388

@@ -422,7 +392,7 @@ static int i2s_stm32_trigger(const struct device *dev, enum i2s_dir dir,
422392
return -EIO;
423393
}
424394
stream->state = I2S_STATE_READY;
425-
stream->queue_drop(stream);
395+
stream_queue_drop(stream);
426396
break;
427397

428398
default:
@@ -444,16 +414,8 @@ static int i2s_stm32_read(const struct device *dev, void **mem_block,
444414
return -EIO;
445415
}
446416

447-
if (dev_data->rx.state != I2S_STATE_ERROR) {
448-
ret = k_sem_take(&dev_data->rx.sem,
449-
SYS_TIMEOUT_MS(dev_data->rx.cfg.timeout));
450-
if (ret < 0) {
451-
return ret;
452-
}
453-
}
454-
455417
/* Get data from the beginning of RX queue */
456-
ret = queue_get(&dev_data->rx.mem_block_queue, mem_block, size);
418+
ret = queue_get(dev_data->rx.msgq, mem_block, size, dev_data->rx.cfg.timeout);
457419
if (ret < 0) {
458420
return -EIO;
459421
}
@@ -465,22 +427,15 @@ static int i2s_stm32_write(const struct device *dev, void *mem_block,
465427
size_t size)
466428
{
467429
struct i2s_stm32_data *const dev_data = dev->data;
468-
int ret;
469430

470431
if (dev_data->tx.state != I2S_STATE_RUNNING &&
471432
dev_data->tx.state != I2S_STATE_READY) {
472433
LOG_DBG("invalid state");
473434
return -EIO;
474435
}
475436

476-
ret = k_sem_take(&dev_data->tx.sem,
477-
SYS_TIMEOUT_MS(dev_data->tx.cfg.timeout));
478-
if (ret < 0) {
479-
return ret;
480-
}
481-
482437
/* Add data to the end of the TX queue */
483-
return queue_put(&dev_data->tx.mem_block_queue, mem_block, size);
438+
return queue_put(dev_data->tx.msgq, mem_block, size, dev_data->tx.cfg.timeout);
484439
}
485440

486441
static DEVICE_API(i2s, i2s_stm32_driver_api) = {
@@ -604,13 +559,12 @@ static void dma_rx_callback(const struct device *dma_dev, void *arg,
604559
sys_cache_data_invd_range(mblk_tmp, stream->cfg.block_size);
605560

606561
/* All block data received */
607-
ret = queue_put(&stream->mem_block_queue, mblk_tmp,
608-
stream->cfg.block_size);
562+
ret = queue_put(stream->msgq, mblk_tmp,
563+
stream->cfg.block_size, 0);
609564
if (ret < 0) {
610565
stream->state = I2S_STATE_ERROR;
611566
goto rx_disable;
612567
}
613-
k_sem_give(&stream->sem);
614568

615569
/* Stop reception if we were requested */
616570
if (stream->state == I2S_STATE_STOPPING) {
@@ -659,8 +613,8 @@ static void dma_tx_callback(const struct device *dma_dev, void *arg,
659613
* as stated in zephyr i2s specification, in case of DRAIN command
660614
* send all data in the transmit queue and stop the transmission.
661615
*/
662-
if (queue_is_empty(&stream->mem_block_queue) == true) {
663-
stream->queue_drop(stream);
616+
if (queue_is_empty(stream->msgq) == true) {
617+
stream_queue_drop(stream);
664618
stream->state = I2S_STATE_READY;
665619
goto tx_disable;
666620
} else if (stream->tx_stop_for_drain == false) {
@@ -681,8 +635,8 @@ static void dma_tx_callback(const struct device *dma_dev, void *arg,
681635
}
682636

683637
/* Prepare to send the next data block */
684-
ret = queue_get(&stream->mem_block_queue, &stream->mem_block,
685-
&mem_block_size);
638+
ret = queue_get(stream->msgq, &stream->mem_block,
639+
&mem_block_size, 0);
686640
if (ret < 0) {
687641
if (stream->state == I2S_STATE_STOPPING) {
688642
stream->state = I2S_STATE_READY;
@@ -691,7 +645,6 @@ static void dma_tx_callback(const struct device *dma_dev, void *arg,
691645
}
692646
goto tx_disable;
693647
}
694-
k_sem_give(&stream->sem);
695648

696649
/* Assure cache coherency before DMA read operation */
697650
sys_cache_data_flush_range(stream->mem_block, mem_block_size);
@@ -765,10 +718,6 @@ static int i2s_stm32_initialize(const struct device *dev)
765718

766719
cfg->irq_config(dev);
767720

768-
k_sem_init(&dev_data->rx.sem, 0, CONFIG_I2S_STM32_RX_BLOCK_COUNT);
769-
k_sem_init(&dev_data->tx.sem, CONFIG_I2S_STM32_TX_BLOCK_COUNT,
770-
CONFIG_I2S_STM32_TX_BLOCK_COUNT);
771-
772721
for (i = 0; i < STM32_DMA_NUM_CHANNELS; i++) {
773722
active_dma_rx_channel[i] = NULL;
774723
active_dma_tx_channel[i] = NULL;
@@ -847,12 +796,11 @@ static int tx_stream_start(struct stream *stream, const struct device *dev)
847796
size_t mem_block_size;
848797
int ret;
849798

850-
ret = queue_get(&stream->mem_block_queue, &stream->mem_block,
851-
&mem_block_size);
799+
ret = queue_get(stream->msgq, &stream->mem_block,
800+
&mem_block_size, 0);
852801
if (ret < 0) {
853802
return ret;
854803
}
855-
k_sem_give(&stream->sem);
856804

857805
/* Assure cache coherency before DMA read operation */
858806
sys_cache_data_flush_range(stream->mem_block, mem_block_size);
@@ -948,34 +896,6 @@ static void tx_stream_disable(struct stream *stream, const struct device *dev)
948896
active_dma_tx_channel[stream->dma_channel] = NULL;
949897
}
950898

951-
static void rx_queue_drop(struct stream *stream)
952-
{
953-
size_t size;
954-
void *mem_block;
955-
956-
while (queue_get(&stream->mem_block_queue, &mem_block, &size) == 0) {
957-
k_mem_slab_free(stream->cfg.mem_slab, mem_block);
958-
}
959-
960-
k_sem_reset(&stream->sem);
961-
}
962-
963-
static void tx_queue_drop(struct stream *stream)
964-
{
965-
size_t size;
966-
void *mem_block;
967-
unsigned int n = 0U;
968-
969-
while (queue_get(&stream->mem_block_queue, &mem_block, &size) == 0) {
970-
k_mem_slab_free(stream->cfg.mem_slab, mem_block);
971-
n++;
972-
}
973-
974-
for (; n > 0; n--) {
975-
k_sem_give(&stream->sem);
976-
}
977-
}
978-
979899
static const struct device *get_dev_from_rx_dma_channel(uint32_t dma_channel)
980900
{
981901
return active_dma_rx_channel[dma_channel];
@@ -1011,9 +931,7 @@ static const struct device *get_dev_from_tx_dma_channel(uint32_t dma_channel)
1011931
STM32_DMA_FEATURES(index, dir)), \
1012932
.stream_start = dir##_stream_start, \
1013933
.stream_disable = dir##_stream_disable, \
1014-
.queue_drop = dir##_queue_drop, \
1015-
.mem_block_queue.buf = dir##_##index##_ring_buf, \
1016-
.mem_block_queue.len = ARRAY_SIZE(dir##_##index##_ring_buf) \
934+
.msgq = &dir##_##index##_queue, \
1017935
}
1018936

1019937
#define I2S_STM32_INIT(index) \
@@ -1034,8 +952,8 @@ static const struct i2s_stm32_cfg i2s_stm32_config_##index = { \
1034952
.master_clk_sel = DT_INST_PROP(index, mck_enabled) \
1035953
}; \
1036954
\
1037-
struct queue_item rx_##index##_ring_buf[CONFIG_I2S_STM32_RX_BLOCK_COUNT + 1];\
1038-
struct queue_item tx_##index##_ring_buf[CONFIG_I2S_STM32_TX_BLOCK_COUNT + 1];\
955+
K_MSGQ_DEFINE(rx_##index##_queue, sizeof(struct queue_item), CONFIG_I2S_STM32_RX_BLOCK_COUNT, 4);\
956+
K_MSGQ_DEFINE(tx_##index##_queue, sizeof(struct queue_item), CONFIG_I2S_STM32_TX_BLOCK_COUNT, 4);\
1039957
\
1040958
static struct i2s_stm32_data i2s_stm32_data_##index = { \
1041959
UTIL_AND(DT_INST_DMAS_HAS_NAME(index, rx), \

drivers/i2s/i2s_ll_stm32.h

Lines changed: 1 addition & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -12,14 +12,6 @@ struct queue_item {
1212
size_t size;
1313
};
1414

15-
/* Minimal ring buffer implementation */
16-
struct ring_buffer {
17-
struct queue_item *buf;
18-
uint16_t len;
19-
uint16_t head;
20-
uint16_t tail;
21-
};
22-
2315
/* Device constant configuration parameters */
2416
struct i2s_stm32_cfg {
2517
SPI_TypeDef *i2s;
@@ -32,7 +24,7 @@ struct i2s_stm32_cfg {
3224

3325
struct stream {
3426
int32_t state;
35-
struct k_sem sem;
27+
struct k_msgq *msgq;
3628

3729
const struct device *dev_dma;
3830
uint32_t dma_channel;
@@ -44,13 +36,11 @@ struct stream {
4436
bool tx_stop_for_drain;
4537

4638
struct i2s_config cfg;
47-
struct ring_buffer mem_block_queue;
4839
void *mem_block;
4940
bool last_block;
5041
bool master;
5142
int (*stream_start)(struct stream *, const struct device *dev);
5243
void (*stream_disable)(struct stream *, const struct device *dev);
53-
void (*queue_drop)(struct stream *);
5444
};
5545

5646
/* Device run time data */

0 commit comments

Comments
 (0)