4
4
* SPDX-License-Identifier: Apache-2.0
5
5
*/
6
6
7
+ #include <zephyr/kernel.h>
7
8
#include <zephyr/drivers/uart.h>
8
9
#include <zephyr/sys/ring_buffer.h>
9
10
#include <zephyr/sys/atomic.h>
14
15
#include <zephyr/logging/log.h>
15
16
LOG_MODULE_REGISTER (uart_nus , CONFIG_UART_LOG_LEVEL );
16
17
18
+ K_THREAD_STACK_DEFINE (nus_work_queue_stack , 2048 );
19
+ static struct k_work_q nus_work_queue ;
20
+
17
21
struct uart_bt_data {
18
22
struct {
19
23
struct bt_nus_inst * inst ;
@@ -47,7 +51,7 @@ static void bt_notif_enabled(bool enabled, void *ctx)
47
51
LOG_DBG ("%s() - %s" , __func__ , enabled ? "enabled" : "disabled" );
48
52
49
53
if (!ring_buf_is_empty (dev_data -> uart .tx_ringbuf )) {
50
- k_work_reschedule ( & dev_data -> uart .tx_work , K_NO_WAIT );
54
+ k_work_reschedule_for_queue ( & nus_work_queue , & dev_data -> uart .tx_work , K_NO_WAIT );
51
55
}
52
56
}
53
57
@@ -71,7 +75,7 @@ static void bt_received(struct bt_conn *conn, const void *data, uint16_t len, vo
71
75
LOG_ERR ("RX Ring buffer full. received: %d, added to queue: %d" , len , put_len );
72
76
}
73
77
74
- k_work_submit ( & dev_data -> uart .cb_work );
78
+ k_work_submit_to_queue ( & nus_work_queue , & dev_data -> uart .cb_work );
75
79
}
76
80
77
81
static void cb_work_handler (struct k_work * work )
@@ -113,7 +117,7 @@ static void tx_work_handler(struct k_work *work)
113
117
} while (len > 0 && !err );
114
118
115
119
if ((ring_buf_space_get (dev_data -> uart .tx_ringbuf ) > 0 ) && dev_data -> uart .tx_irq_ena ) {
116
- k_work_submit ( & dev_data -> uart .cb_work );
120
+ k_work_submit_to_queue ( & nus_work_queue , & dev_data -> uart .cb_work );
117
121
}
118
122
}
119
123
@@ -128,7 +132,7 @@ static int uart_bt_fifo_fill(const struct device *dev, const uint8_t *tx_data, i
128
132
}
129
133
130
134
if (atomic_get (& dev_data -> bt .enabled )) {
131
- k_work_reschedule ( & dev_data -> uart .tx_work , K_NO_WAIT );
135
+ k_work_reschedule_for_queue ( & nus_work_queue , & dev_data -> uart .tx_work , K_NO_WAIT );
132
136
}
133
137
134
138
return wrote ;
@@ -169,7 +173,7 @@ static void uart_bt_poll_out(const struct device *dev, unsigned char c)
169
173
* data, so more than one byte is transmitted (e.g: when poll_out is
170
174
* called inside a for-loop).
171
175
*/
172
- k_work_schedule ( & dev_data -> uart .tx_work , K_MSEC (1 ));
176
+ k_work_schedule_for_queue ( & nus_work_queue , & dev_data -> uart .tx_work , K_MSEC (1 ));
173
177
}
174
178
}
175
179
@@ -191,7 +195,7 @@ static void uart_bt_irq_tx_enable(const struct device *dev)
191
195
dev_data -> uart .tx_irq_ena = true;
192
196
193
197
if (uart_bt_irq_tx_ready (dev )) {
194
- k_work_submit ( & dev_data -> uart .cb_work );
198
+ k_work_submit_to_queue ( & nus_work_queue , & dev_data -> uart .cb_work );
195
199
}
196
200
}
197
201
@@ -219,7 +223,7 @@ static void uart_bt_irq_rx_enable(const struct device *dev)
219
223
220
224
dev_data -> uart .rx_irq_ena = true;
221
225
222
- k_work_submit ( & dev_data -> uart .cb_work );
226
+ k_work_submit_to_queue ( & nus_work_queue , & dev_data -> uart .cb_work );
223
227
}
224
228
225
229
static void uart_bt_irq_rx_disable (const struct device * dev )
@@ -267,6 +271,19 @@ static const struct uart_driver_api uart_bt_driver_api = {
267
271
.irq_callback_set = uart_bt_irq_callback_set ,
268
272
};
269
273
274
+ static int uart_bt_workqueue_init (void )
275
+ {
276
+ k_work_queue_init (& nus_work_queue );
277
+ k_work_queue_start (& nus_work_queue , nus_work_queue_stack ,
278
+ K_THREAD_STACK_SIZEOF (nus_work_queue_stack ),
279
+ K_LOWEST_APPLICATION_THREAD_PRIO , NULL );
280
+
281
+ return 0 ;
282
+ }
283
+
284
+ /** The work-queue is shared across all instances, hence we initialize it separatedly */
285
+ SYS_INIT (uart_bt_workqueue_init , POST_KERNEL , CONFIG_SERIAL_INIT_PRIORITY );
286
+
270
287
static int uart_bt_init (const struct device * dev )
271
288
{
272
289
int err ;
0 commit comments