5
5
*/
6
6
7
7
#include <zephyr/ztest.h>
8
- #include <zephyr/arch/exception.h>
8
+ #include <zephyr/irq_offload.h>
9
+
10
+ #include <zephyr/kernel/thread_stack.h>
9
11
10
12
#define IV_CTRL_PROTECTION_EXCEPTION 21
11
13
14
+ #define CTRL_PROTECTION_ERRORCODE_NEAR_RET 1
12
15
#define CTRL_PROTECTION_ERRORCODE_ENDBRANCH 3
13
16
14
- extern int should_work (int a );
15
- extern int should_not_work (int a );
17
+ #define STACKSIZE 1024
18
+ #define THREAD_PRIORITY 5
19
+
20
+ K_SEM_DEFINE (error_handler_sem , 0 , 1 );
16
21
17
- static bool expect_fault ;
18
- static int expect_code ;
22
+ volatile bool expect_fault ;
23
+ volatile int expect_code ;
24
+ volatile int expect_reason ;
19
25
20
26
void k_sys_fatal_error_handler (unsigned int reason , const struct arch_esf * pEsf )
21
27
{
22
28
if (expect_fault ) {
23
29
#ifdef CONFIG_X86_64
24
- zassert_equal (pEsf -> vector , IV_CTRL_PROTECTION_EXCEPTION ,
25
- "unexpected exception" );
30
+ zassert_equal (pEsf -> vector , expect_reason , "unexpected exception" );
26
31
zassert_equal (pEsf -> code , expect_code , "unexpected error code" );
27
32
#else
28
- zassert_equal (z_x86_exception_vector , IV_CTRL_PROTECTION_EXCEPTION ,
29
- "unexpected exception" );
33
+ zassert_equal (z_x86_exception_vector , expect_reason , "unexpected exception" );
30
34
zassert_equal (pEsf -> errorCode , expect_code , "unexpected error code" );
31
35
#endif
32
36
printk ("fatal error expected as part of test case\n" );
33
37
expect_fault = false;
38
+
39
+ k_sem_give (& error_handler_sem );
34
40
} else {
35
41
printk ("fatal error was unexpected, aborting\n" );
36
42
TC_END_REPORT (TC_FAIL );
37
43
k_fatal_halt (reason );
38
44
}
39
45
}
40
46
47
+ #ifdef CONFIG_HW_SHADOW_STACK
48
+ void thread_a_entry (void * p1 , void * p2 , void * p3 );
49
+ K_SEM_DEFINE (thread_a_sem , 0 , 1 );
50
+ K_THREAD_DEFINE (thread_a , STACKSIZE , thread_a_entry , NULL , NULL , NULL ,
51
+ THREAD_PRIORITY , 0 , -1 );
52
+
53
+ void thread_b_entry (void * p1 , void * p2 , void * p3 );
54
+ K_SEM_DEFINE (thread_b_sem , 0 , 1 );
55
+ K_SEM_DEFINE (thread_b_irq_sem , 0 , 1 );
56
+ K_THREAD_DEFINE (thread_b , STACKSIZE , thread_b_entry , NULL , NULL , NULL ,
57
+ THREAD_PRIORITY , 0 , -1 );
58
+
59
+ static bool is_shstk_enabled (void )
60
+ {
61
+ long cur ;
62
+
63
+ cur = z_x86_msr_read (X86_S_CET_MSR );
64
+ return (cur & X86_S_CET_MSR_SHSTK_EN ) == X86_S_CET_MSR_SHSTK_EN ;
65
+ }
66
+
67
+ void thread_c_entry (void * p1 , void * p2 , void * p3 )
68
+ {
69
+ zassert_true (is_shstk_enabled (), "shadow stack not enabled on static thread" );
70
+ }
71
+
72
+ K_THREAD_DEFINE (thread_c , STACKSIZE , thread_c_entry , NULL , NULL , NULL ,
73
+ THREAD_PRIORITY , 0 , 0 );
74
+
75
+ void __attribute__((optimize ("O0" ))) foo (void )
76
+ {
77
+ printk ("foo called\n" );
78
+ }
79
+
80
+ void __attribute__((optimize ("O0" ))) fail (void )
81
+ {
82
+ long a [] = {0 };
83
+
84
+ printk ("should fail after this\n" );
85
+
86
+ * (a + 2 ) = (long )& foo ;
87
+ }
88
+
89
+ struct k_work work ;
90
+
91
+ void work_handler (struct k_work * wrk )
92
+ {
93
+ printk ("work handler\n" );
94
+
95
+ zassert_true (is_shstk_enabled (), "shadow stack not enabled" );
96
+ }
97
+
98
+ ZTEST (cet , test_shstk_work_q )
99
+ {
100
+ k_work_init (& work , work_handler );
101
+ k_work_submit (& work );
102
+ }
103
+
104
+ void intr_handler (const void * p )
105
+ {
106
+ printk ("interrupt handler\n" );
107
+
108
+ if (p != NULL ) {
109
+ /* Test one nested level. It should just work. */
110
+ printk ("trying interrupt handler\n" );
111
+ irq_offload (intr_handler , NULL );
112
+
113
+ k_sem_give ((struct k_sem * )p );
114
+ } else {
115
+ printk ("interrupt handler nested\n" );
116
+ }
117
+ }
118
+
119
+ void thread_b_entry (void * p1 , void * p2 , void * p3 )
120
+ {
121
+ k_sem_take (& thread_b_sem , K_FOREVER );
122
+
123
+ irq_offload (intr_handler , & thread_b_irq_sem );
124
+
125
+ k_sem_take (& thread_b_irq_sem , K_FOREVER );
126
+ }
127
+
128
+ ZTEST (cet , test_shstk_irq )
129
+ {
130
+ k_thread_start (thread_b );
131
+
132
+ k_sem_give (& thread_b_sem );
133
+
134
+ k_thread_join (thread_b , K_FOREVER );
135
+ }
136
+
137
+ void thread_a_entry (void * p1 , void * p2 , void * p3 )
138
+ {
139
+ k_sem_take (& thread_a_sem , K_FOREVER );
140
+
141
+ fail ();
142
+
143
+ zassert_unreachable ("should not reach here" );
144
+ }
145
+
146
+ ZTEST (cet , test_shstk )
147
+ {
148
+ k_thread_start (thread_a );
149
+
150
+ expect_fault = true;
151
+ expect_code = CTRL_PROTECTION_ERRORCODE_NEAR_RET ;
152
+ expect_reason = IV_CTRL_PROTECTION_EXCEPTION ;
153
+ k_sem_give (& thread_a_sem );
154
+
155
+ k_sem_take (& error_handler_sem , K_FOREVER );
156
+ k_thread_abort (thread_a );
157
+ }
158
+ #endif /* CONFIG_HW_SHADOW_STACK */
159
+
160
+ #ifdef CONFIG_X86_CET_IBT
161
+ extern int should_work (int a );
162
+ extern int should_not_work (int a );
163
+
41
164
/* Round trip to trick optimisations and ensure the calls are indirect */
42
165
int do_call (int (* func )(int ), int a )
43
166
{
@@ -50,8 +173,10 @@ ZTEST(cet, test_ibt)
50
173
51
174
expect_fault = true;
52
175
expect_code = CTRL_PROTECTION_ERRORCODE_ENDBRANCH ;
176
+ expect_reason = IV_CTRL_PROTECTION_EXCEPTION ;
53
177
do_call (should_not_work , 1 );
54
178
zassert_unreachable ("should_not_work did not fault" );
55
179
}
180
+ #endif /* CONFIG_X86_CET_IBT */
56
181
57
182
ZTEST_SUITE (cet , NULL , NULL , NULL , NULL , NULL );
0 commit comments