1
1
/*
2
2
* Copyright (c) 2024 Croxel Inc.
3
+ * Copyright (c) 2025 Croxel Inc.
3
4
*
4
5
* SPDX-License-Identifier: Apache-2.0
5
6
*/
6
7
7
8
#include <zephyr/rtio/work.h>
8
9
#include <zephyr/kernel.h>
9
10
10
- #define RTIO_WORKQ_PRIO_MED CONFIG_RTIO_WORKQ_PRIO_MED
11
- #define RTIO_WORKQ_PRIO_HIGH RTIO_WORKQ_PRIO_MED - 1
12
- #define RTIO_WORKQ_PRIO_LOW RTIO_WORKQ_PRIO_MED + 1
13
-
14
11
K_MEM_SLAB_DEFINE_STATIC (rtio_work_items_slab ,
15
12
sizeof (struct rtio_work_req ),
16
13
CONFIG_RTIO_WORKQ_POOL_ITEMS ,
17
14
4 );
18
-
19
- static void rtio_work_req_done_handler (struct k_p4wq_work * work )
20
- {
21
- struct rtio_work_req * req = CONTAINER_OF (work ,
22
- struct rtio_work_req ,
23
- work );
24
- k_mem_slab_free (& rtio_work_items_slab , req );
25
- }
26
-
27
- K_P4WQ_DEFINE_WITH_DONE_HANDLER (rtio_workq ,
28
- CONFIG_RTIO_WORKQ_THREADS_POOL ,
29
- CONFIG_RTIO_WORKQ_STACK_SIZE ,
30
- rtio_work_req_done_handler );
31
-
32
- static void rtio_work_handler (struct k_p4wq_work * work )
33
- {
34
- struct rtio_work_req * req = CONTAINER_OF (work ,
35
- struct rtio_work_req ,
36
- work );
37
- struct rtio_iodev_sqe * iodev_sqe = req -> iodev_sqe ;
38
-
39
- req -> handler (iodev_sqe );
40
- }
15
+ static K_THREAD_STACK_ARRAY_DEFINE (rtio_workq_threads_stack ,
16
+ CONFIG_RTIO_WORKQ_THREADS_POOL ,
17
+ CONFIG_RTIO_WORKQ_THREADS_POOL_STACK_SIZE ) ;
18
+ static struct k_thread rtio_work_threads [CONFIG_RTIO_WORKQ_THREADS_POOL ];
19
+ static K_QUEUE_DEFINE (rtio_workq );
41
20
42
21
struct rtio_work_req * rtio_work_req_alloc (void )
43
22
{
@@ -49,12 +28,6 @@ struct rtio_work_req *rtio_work_req_alloc(void)
49
28
return NULL ;
50
29
}
51
30
52
- /** Initialize work item before using it as it comes
53
- * from a Memory slab (no-init region).
54
- */
55
- req -> work .thread = NULL ;
56
- (void )k_sem_init (& req -> work .done_sem , 1 , 1 );
57
-
58
31
return req ;
59
32
}
60
33
@@ -71,31 +44,52 @@ void rtio_work_req_submit(struct rtio_work_req *req,
71
44
return ;
72
45
}
73
46
74
- struct k_p4wq_work * work = & req -> work ;
75
- struct rtio_sqe * sqe = & iodev_sqe -> sqe ;
76
-
77
- /** Link the relevant info so that we can get it on the k_p4wq_work work item.
78
- */
79
47
req -> iodev_sqe = iodev_sqe ;
80
48
req -> handler = handler ;
81
49
82
- /** Set the required information to handle the action */
83
- work -> handler = rtio_work_handler ;
84
- work -> deadline = 0 ;
85
-
86
- if (sqe -> prio == RTIO_PRIO_LOW ) {
87
- work -> priority = RTIO_WORKQ_PRIO_LOW ;
88
- } else if (sqe -> prio == RTIO_PRIO_HIGH ) {
89
- work -> priority = RTIO_WORKQ_PRIO_HIGH ;
90
- } else {
91
- work -> priority = RTIO_WORKQ_PRIO_MED ;
92
- }
93
-
94
- /** Decoupling action: Let the P4WQ execute the action. */
95
- k_p4wq_submit (& rtio_workq , work );
50
+ /** For now we're simply treating this as a FIFO queue. It may be
51
+ * desirable to expand this to handle queue ordering based on RTIO
52
+ * SQE priority.
53
+ */
54
+ k_queue_append (& rtio_workq , req );
96
55
}
97
56
98
57
uint32_t rtio_work_req_used_count_get (void )
99
58
{
100
59
return k_mem_slab_num_used_get (& rtio_work_items_slab );
101
60
}
61
+
62
+ static void rtio_workq_thread_fn (void * arg1 , void * arg2 , void * arg3 )
63
+ {
64
+ ARG_UNUSED (arg1 );
65
+ ARG_UNUSED (arg2 );
66
+ ARG_UNUSED (arg3 );
67
+
68
+ while (true) {
69
+ struct rtio_work_req * req = k_queue_get (& rtio_workq , K_FOREVER );
70
+
71
+ if (req != NULL ) {
72
+ req -> handler (req -> iodev_sqe );
73
+
74
+ k_mem_slab_free (& rtio_work_items_slab , req );
75
+ }
76
+ }
77
+ }
78
+
79
+ static int static_init (void )
80
+ {
81
+ for (size_t i = 0 ; i < ARRAY_SIZE (rtio_work_threads ) ; i ++ ) {
82
+ k_thread_create (& rtio_work_threads [i ],
83
+ rtio_workq_threads_stack [i ],
84
+ CONFIG_RTIO_WORKQ_THREADS_POOL_STACK_SIZE ,
85
+ rtio_workq_thread_fn ,
86
+ NULL , NULL , NULL ,
87
+ CONFIG_RTIO_WORKQ_THREADS_POOL_PRIO ,
88
+ 0 ,
89
+ K_NO_WAIT );
90
+ }
91
+
92
+ return 0 ;
93
+ }
94
+
95
+ SYS_INIT (static_init , POST_KERNEL , 1 );
0 commit comments