|
25 | 25 | #include <stdio.h>
|
26 | 26 | #include <string.h>
|
27 | 27 |
|
28 |
| -#include "cet_unwind.h" |
29 | 28 | #include "config.h"
|
30 | 29 | #include "libunwind.h"
|
31 | 30 | #include "libunwind_ext.h"
|
| 31 | +#include "shadow_stack_unwind.h" |
32 | 32 | #include "unwind.h"
|
33 | 33 |
|
34 | 34 | #if !defined(_LIBUNWIND_ARM_EHABI) && !defined(__USING_SJLJ_EXCEPTIONS__) && \
|
35 | 35 | !defined(__wasm__)
|
36 | 36 |
|
37 | 37 | #ifndef _LIBUNWIND_SUPPORT_SEH_UNWIND
|
38 | 38 |
|
39 |
| -// When CET is enabled, each "call" instruction will push return address to |
40 |
| -// CET shadow stack, each "ret" instruction will pop current CET shadow stack |
41 |
| -// top and compare it with target address which program will return. |
42 |
| -// In exception handing, some stack frames will be skipped before jumping to |
43 |
| -// landing pad and we must adjust CET shadow stack accordingly. |
44 |
| -// _LIBUNWIND_POP_CET_SSP is used to adjust CET shadow stack pointer and we |
45 |
| -// directly jump to __libunwind_Registers_x86/x86_64_jumpto instead of using |
46 |
| -// a regular function call to avoid pushing to CET shadow stack again. |
| 39 | +// When shadow stack is enabled, a separate stack containing only return |
| 40 | +// addresses would be maintained. On function return, the return address would |
| 41 | +// be compared to the popped address from shadow stack to ensure the return |
| 42 | +// target is not tempered with. When unwinding, we're skipping the normal return |
| 43 | +// procedure for multiple frames and thus need to pop the return addresses of |
| 44 | +// the skipped frames from shadow stack to avoid triggering an exception (using |
| 45 | +// `_LIBUNWIND_POP_SHSTK_SSP()`). Also, some architectures, like the x86-family |
| 46 | +// CET, push the return adddresses onto shadow stack with common call |
| 47 | +// instructions, so for these architectures, normal function calls should be |
| 48 | +// avoided when invoking the `jumpto()` function. To do this, we use inline |
| 49 | +// assemblies to "goto" the `jumpto()` for these architectures. |
47 | 50 | #if !defined(_LIBUNWIND_USE_CET) && !defined(_LIBUNWIND_USE_GCS)
|
48 | 51 | #define __unw_phase2_resume(cursor, fn) \
|
49 | 52 | do { \
|
50 | 53 | (void)fn; \
|
51 | 54 | __unw_resume((cursor)); \
|
52 | 55 | } while (0)
|
53 | 56 | #elif defined(_LIBUNWIND_TARGET_I386)
|
54 |
| -#define __cet_ss_step_size 4 |
| 57 | +#define __shstk_step_size (4) |
55 | 58 | #define __unw_phase2_resume(cursor, fn) \
|
56 | 59 | do { \
|
57 |
| - _LIBUNWIND_POP_CET_SSP((fn)); \ |
58 |
| - void *cetRegContext = __libunwind_cet_get_registers((cursor)); \ |
59 |
| - void *cetJumpAddress = __libunwind_cet_get_jump_target(); \ |
| 60 | + _LIBUNWIND_POP_SHSTK_SSP((fn)); \ |
| 61 | + void *shstkRegContext = __libunwind_shstk_get_registers((cursor)); \ |
| 62 | + void *shstkJumpAddress = __libunwind_shstk_get_jump_target(); \ |
60 | 63 | __asm__ volatile("push %%edi\n\t" \
|
61 | 64 | "sub $4, %%esp\n\t" \
|
62 |
| - "jmp *%%edx\n\t" :: "D"(cetRegContext), \ |
63 |
| - "d"(cetJumpAddress)); \ |
| 65 | + "jmp *%%edx\n\t" ::"D"(shstkRegContext), \ |
| 66 | + "d"(shstkJumpAddress)); \ |
64 | 67 | } while (0)
|
65 | 68 | #elif defined(_LIBUNWIND_TARGET_X86_64)
|
66 |
| -#define __cet_ss_step_size 8 |
| 69 | +#define __shstk_step_size (8) |
67 | 70 | #define __unw_phase2_resume(cursor, fn) \
|
68 | 71 | do { \
|
69 |
| - _LIBUNWIND_POP_CET_SSP((fn)); \ |
70 |
| - void *cetRegContext = __libunwind_cet_get_registers((cursor)); \ |
71 |
| - void *cetJumpAddress = __libunwind_cet_get_jump_target(); \ |
72 |
| - __asm__ volatile("jmpq *%%rdx\n\t" :: "D"(cetRegContext), \ |
73 |
| - "d"(cetJumpAddress)); \ |
| 72 | + _LIBUNWIND_POP_SHSTK_SSP((fn)); \ |
| 73 | + void *shstkRegContext = __libunwind_shstk_get_registers((cursor)); \ |
| 74 | + void *shstkJumpAddress = __libunwind_shstk_get_jump_target(); \ |
| 75 | + __asm__ volatile("jmpq *%%rdx\n\t" ::"D"(shstkRegContext), \ |
| 76 | + "d"(shstkJumpAddress)); \ |
74 | 77 | } while (0)
|
75 | 78 | #elif defined(_LIBUNWIND_TARGET_AARCH64)
|
76 |
| -#define __cet_ss_step_size 8 |
| 79 | +#define __shstk_step_size (8) |
77 | 80 | #define __unw_phase2_resume(cursor, fn) \
|
78 | 81 | do { \
|
79 |
| - _LIBUNWIND_POP_CET_SSP((fn)); \ |
80 |
| - void *cetRegContext = __libunwind_cet_get_registers((cursor)); \ |
81 |
| - void *cetJumpAddress = __libunwind_cet_get_jump_target(); \ |
| 82 | + _LIBUNWIND_POP_SHSTK_SSP((fn)); \ |
| 83 | + void *shstkRegContext = __libunwind_shstk_get_registers((cursor)); \ |
| 84 | + void *shstkJumpAddress = __libunwind_shstk_get_jump_target(); \ |
82 | 85 | __asm__ volatile("mov x0, %0\n\t" \
|
83 | 86 | "br %1\n\t" \
|
84 | 87 | : \
|
85 |
| - : "r"(cetRegContext), "r"(cetJumpAddress) \ |
| 88 | + : "r"(shstkRegContext), "r"(shstkJumpAddress) \ |
86 | 89 | : "x0"); \
|
87 | 90 | } while (0)
|
88 | 91 | #endif
|
@@ -255,16 +258,16 @@ unwind_phase2(unw_context_t *uc, unw_cursor_t *cursor, _Unwind_Exception *except
|
255 | 258 | }
|
256 | 259 | #endif
|
257 | 260 |
|
258 |
| -// In CET enabled environment, we check return address stored in normal stack |
259 |
| -// against return address stored in CET shadow stack, if the 2 addresses don't |
| 261 | +// In shadow stack enabled environment, we check return address stored in normal |
| 262 | +// stack against return address stored in shadow stack, if the 2 addresses don't |
260 | 263 | // match, it means return address in normal stack has been corrupted, we return
|
261 | 264 | // _URC_FATAL_PHASE2_ERROR.
|
262 | 265 | #if defined(_LIBUNWIND_USE_CET) || defined(_LIBUNWIND_USE_GCS)
|
263 | 266 | if (shadowStackTop != 0) {
|
264 | 267 | unw_word_t retInNormalStack;
|
265 | 268 | __unw_get_reg(cursor, UNW_REG_IP, &retInNormalStack);
|
266 |
| - unsigned long retInShadowStack = *( |
267 |
| - unsigned long *)(shadowStackTop + __cet_ss_step_size * framesWalked); |
| 269 | + unsigned long retInShadowStack = |
| 270 | + *(unsigned long *)(shadowStackTop + __shstk_step_size * framesWalked); |
268 | 271 | if (retInNormalStack != retInShadowStack)
|
269 | 272 | return _URC_FATAL_PHASE2_ERROR;
|
270 | 273 | }
|
|
0 commit comments