Skip to content

Commit b386c76

Browse files
committed
Add new system.[ch] file
Separating system-related implementations into a new file for improved modularity, but it requires changing 'dispatch_table' and 'TRAP_HANDLER_IMPL' to non-static, as system.c depends them.
1 parent 172e59d commit b386c76

File tree

4 files changed

+366
-19
lines changed

4 files changed

+366
-19
lines changed

Makefile

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -45,6 +45,10 @@ CFLAGS += $(CFLAGS_NO_CET)
4545

4646
OBJS_EXT :=
4747

48+
ifeq ($(call has, SYSTEM), 1)
49+
OBJS_EXT += system.o
50+
endif
51+
4852
# Integer Multiplication and Division instructions
4953
ENABLE_EXT_M ?= 1
5054
$(call set-feature, EXT_M)

src/emulate.c

Lines changed: 6 additions & 19 deletions
Original file line numberDiff line numberDiff line change
@@ -14,6 +14,10 @@
1414
#include <emscripten.h>
1515
#endif
1616

17+
#if RV32_HAS(SYSTEM)
18+
#include "system.h"
19+
#endif /* RV32_HAS(SYSTEM) */
20+
1721
#if RV32_HAS(EXT_F)
1822
#include <math.h>
1923
#include "softfloat.h"
@@ -71,19 +75,6 @@ static void rv_trap_default_handler(riscv_t *rv)
7175
rv->PC = rv->csr_mepc; /* mret */
7276
}
7377

74-
/*
75-
* Trap might occurs during block emulation. For instance, page fault.
76-
* In order to handle trap, we have to escape from block and execute
77-
* registered trap handler. This trap_handler function helps to execute
78-
* the registered trap handler, PC by PC. Once the trap is handled,
79-
* resume the previous execution flow where cause the trap.
80-
*
81-
* Now, rv32emu supports misaligned access and page fault handling.
82-
*/
83-
#if RV32_HAS(SYSTEM)
84-
static void trap_handler(riscv_t *rv);
85-
#endif
86-
8778
/* When a trap occurs in M-mode/S-mode, m/stval is either initialized to zero or
8879
* populated with exception-specific details to assist software in managing
8980
* the trap. Otherwise, the implementation never modifies m/stval, although
@@ -104,7 +95,7 @@ static void trap_handler(riscv_t *rv);
10495
* identifier called tval, as both are handled by TRAP_HANDLER_IMPL.
10596
*/
10697
#define TRAP_HANDLER_IMPL(type, code) \
107-
static void rv_trap_##type(riscv_t *rv, uint32_t tval) \
98+
void rv_trap_##type(riscv_t *rv, uint32_t tval) \
10899
{ \
109100
/* m/stvec (Machine/Supervisor Trap-Vector Base Address Register) \
110101
* m/stvec[MXLEN-1:2]: vector base address \
@@ -590,7 +581,7 @@ static bool do_fuse5(riscv_t *rv,
590581
}
591582

592583
/* clang-format off */
593-
static const void *dispatch_table[] = {
584+
const void *dispatch_table[] = {
594585
/* RV32 instructions */
595586
#define _(inst, can_branch, insn_len, translatable, reg_mask) [rv_insn_##inst] = do_##inst,
596587
RV_INSN_LIST
@@ -1122,10 +1113,6 @@ void rv_step(void *arg)
11221113
#endif
11231114
}
11241115

1125-
#if RV32_HAS(SYSTEM)
1126-
#include "system.c"
1127-
#endif /* SYSTEM */
1128-
11291116
void ebreak_handler(riscv_t *rv)
11301117
{
11311118
assert(rv);

src/system.c

Lines changed: 309 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,309 @@
1+
/*
2+
* rv32emu is freely redistributable under the MIT License. See the file
3+
* "LICENSE" for information on usage and redistribution of this file.
4+
*/
5+
6+
#include <assert.h>
7+
8+
#include "mpool.h"
9+
#include "system.h"
10+
11+
/* FIXME:
12+
* reuse RV_TRAP_LIST with X macro
13+
* RV_TRAP_LIST should be in some header file first
14+
*/
15+
#define TRAP_HANDLER_DECL(type) \
16+
extern void rv_trap_##type(riscv_t *rv, uint32_t tval)
17+
18+
TRAP_HANDLER_DECL(pagefault_insn);
19+
TRAP_HANDLER_DECL(pagefault_load);
20+
TRAP_HANDLER_DECL(pagefault_store);
21+
22+
extern void *dispatch_table[];
23+
24+
void trap_handler(riscv_t *rv)
25+
{
26+
rv_insn_t *ir = mpool_alloc(rv->block_ir_mp);
27+
assert(ir);
28+
29+
/* set to false by sret implementation */
30+
while (rv->is_trapped && !rv_has_halted(rv)) {
31+
uint32_t insn = rv->io.mem_ifetch(rv, rv->PC);
32+
assert(insn);
33+
34+
rv_decode(ir, insn);
35+
ir->impl = dispatch_table[ir->opcode];
36+
rv->compressed = is_compressed(insn);
37+
ir->impl(rv, ir, rv->csr_cycle, rv->PC);
38+
}
39+
}
40+
41+
static bool ppn_is_valid(riscv_t *rv, uint32_t ppn)
42+
{
43+
vm_attr_t *attr = PRIV(rv);
44+
const uint32_t nr_pg_max = attr->mem_size / RV_PG_SIZE;
45+
return ppn < nr_pg_max;
46+
}
47+
48+
#define PAGE_TABLE(ppn) \
49+
ppn_is_valid(rv, ppn) \
50+
? (uint32_t *) (attr->mem->mem_base + (ppn << (RV_PG_SHIFT))) \
51+
: NULL
52+
53+
/* Walk through page tables and get the corresponding PTE by virtual address if
54+
* exists
55+
* @rv: RISC-V emulator
56+
* @addr: virtual address
57+
* @level: the level of which the PTE is located
58+
* @return: NULL if a not found or fault else the corresponding PTE
59+
*/
60+
static uint32_t *mmu_walk(riscv_t *rv, const uint32_t addr, uint32_t *level)
61+
{
62+
vm_attr_t *attr = PRIV(rv);
63+
uint32_t ppn = rv->csr_satp & MASK(22);
64+
if (ppn == 0) /* Bare mode */
65+
return NULL;
66+
67+
/* root page table */
68+
uint32_t *page_table = PAGE_TABLE(ppn);
69+
if (!page_table)
70+
return NULL;
71+
72+
for (int i = 1; i >= 0; i--) {
73+
*level = 2 - i;
74+
uint32_t vpn =
75+
(addr >> RV_PG_SHIFT >> (i * (RV_PG_SHIFT - 2))) & MASK(10);
76+
uint32_t *pte = page_table + vpn;
77+
78+
uint8_t XWRV_bit = (*pte & MASK(4));
79+
switch (XWRV_bit) {
80+
case NEXT_PG_TBL: /* next level of the page table */
81+
ppn = (*pte >> (RV_PG_SHIFT - 2));
82+
page_table = PAGE_TABLE(ppn);
83+
if (!page_table)
84+
return NULL;
85+
break;
86+
case RO_PAGE:
87+
case RW_PAGE:
88+
case EO_PAGE:
89+
case RX_PAGE:
90+
case RWX_PAGE:
91+
ppn = (*pte >> (RV_PG_SHIFT - 2));
92+
if (*level == 1 &&
93+
unlikely(ppn & MASK(10))) /* misaligned superpage */
94+
return NULL;
95+
return pte; /* leaf PTE */
96+
case RESRV_PAGE1:
97+
case RESRV_PAGE2:
98+
default:
99+
return NULL;
100+
}
101+
}
102+
103+
return NULL;
104+
}
105+
106+
/* Verify the PTE and generate corresponding faults if needed
107+
* @op: the operation
108+
* @rv: RISC-V emulator
109+
* @pte: to be verified pte
110+
* @addr: the corresponding virtual address to cause fault
111+
* @return: false if a any fault is generated which caused by violating the
112+
* access permission else true
113+
*/
114+
/* FIXME: handle access fault, addr out of range check */
115+
#define MMU_FAULT_CHECK(op, rv, pte, addr, access_bits) \
116+
mmu_##op##_fault_check(rv, pte, addr, access_bits)
117+
#define MMU_FAULT_CHECK_IMPL(op, pgfault) \
118+
static bool mmu_##op##_fault_check(riscv_t *rv, uint32_t *pte, \
119+
uint32_t addr, uint32_t access_bits) \
120+
{ \
121+
if (pte && (!(*pte & PTE_V))) { \
122+
rv->is_trapped = true; \
123+
rv_trap_##pgfault(rv, addr); \
124+
return false; \
125+
} \
126+
if (!(pte && (*pte & access_bits))) { \
127+
rv->is_trapped = true; \
128+
rv_trap_##pgfault(rv, addr); \
129+
return false; \
130+
} \
131+
/* \
132+
* (1) When MXR=0, only loads from pages marked readable (R=1) will \
133+
* succeed. \
134+
* \
135+
* (2) When MXR=1, loads from pages marked either readable or \
136+
* executable (R=1 or X=1) will succeed. \
137+
*/ \
138+
if (pte && ((!(SSTATUS_MXR & rv->csr_sstatus) && !(*pte & PTE_R) && \
139+
(access_bits == PTE_R)) || \
140+
((SSTATUS_MXR & rv->csr_sstatus) && \
141+
!((*pte & PTE_R) | (*pte & PTE_X)) && \
142+
(access_bits == PTE_R)))) { \
143+
rv->is_trapped = true; \
144+
rv_trap_##pgfault(rv, addr); \
145+
return false; \
146+
} \
147+
/* \
148+
* When SUM=0, S-mode memory accesses to pages that are accessible by \
149+
* U-mode will fault. \
150+
*/ \
151+
if (pte && rv->priv_mode == RV_PRIV_S_MODE && \
152+
!(SSTATUS_SUM & rv->csr_sstatus) && (*pte & PTE_U)) { \
153+
rv->is_trapped = true; \
154+
rv_trap_##pgfault(rv, addr); \
155+
return false; \
156+
} \
157+
/* PTE not found, map it in handler */ \
158+
if (!pte) { \
159+
rv->is_trapped = true; \
160+
rv_trap_##pgfault(rv, addr); \
161+
return false; \
162+
} \
163+
/* valid PTE */ \
164+
return true; \
165+
}
166+
167+
MMU_FAULT_CHECK_IMPL(ifetch, pagefault_insn)
168+
MMU_FAULT_CHECK_IMPL(read, pagefault_load)
169+
MMU_FAULT_CHECK_IMPL(write, pagefault_store)
170+
171+
#define get_ppn_and_offset(ppn, offset) \
172+
uint32_t ppn; \
173+
uint32_t offset; \
174+
do { \
175+
ppn = *pte >> (RV_PG_SHIFT - 2) << RV_PG_SHIFT; \
176+
offset = level == 1 ? addr & MASK((RV_PG_SHIFT + 10)) \
177+
: addr & MASK(RV_PG_SHIFT); \
178+
} while (0)
179+
180+
uint32_t mmu_ifetch(riscv_t *rv, const uint32_t addr)
181+
{
182+
if (!rv->csr_satp)
183+
return memory_ifetch(addr);
184+
185+
uint32_t level;
186+
uint32_t *pte = mmu_walk(rv, addr, &level);
187+
bool ok = MMU_FAULT_CHECK(ifetch, rv, pte, addr, PTE_X);
188+
if (unlikely(!ok)) {
189+
pte = mmu_walk(rv, addr, &level);
190+
}
191+
192+
get_ppn_and_offset(ppn, offset);
193+
return memory_ifetch(ppn | offset);
194+
}
195+
196+
uint32_t mmu_read_w(riscv_t *rv, const uint32_t addr)
197+
{
198+
if (!rv->csr_satp)
199+
return memory_read_w(addr);
200+
201+
uint32_t level;
202+
uint32_t *pte = mmu_walk(rv, addr, &level);
203+
bool ok = MMU_FAULT_CHECK(read, rv, pte, addr, PTE_R);
204+
if (unlikely(!ok)) {
205+
pte = mmu_walk(rv, addr, &level);
206+
}
207+
208+
get_ppn_and_offset(ppn, offset);
209+
return memory_read_w(ppn | offset);
210+
}
211+
212+
uint16_t mmu_read_s(riscv_t *rv, const uint32_t addr)
213+
{
214+
if (!rv->csr_satp)
215+
return memory_read_s(addr);
216+
217+
uint32_t level;
218+
uint32_t *pte = mmu_walk(rv, addr, &level);
219+
bool ok = MMU_FAULT_CHECK(read, rv, pte, addr, PTE_R);
220+
if (unlikely(!ok)) {
221+
pte = mmu_walk(rv, addr, &level);
222+
}
223+
224+
get_ppn_and_offset(ppn, offset);
225+
return memory_read_s(ppn | offset);
226+
}
227+
228+
uint8_t mmu_read_b(riscv_t *rv, const uint32_t addr)
229+
{
230+
if (!rv->csr_satp)
231+
return memory_read_b(addr);
232+
233+
uint32_t level;
234+
uint32_t *pte = mmu_walk(rv, addr, &level);
235+
bool ok = MMU_FAULT_CHECK(read, rv, pte, addr, PTE_R);
236+
if (unlikely(!ok)) {
237+
pte = mmu_walk(rv, addr, &level);
238+
}
239+
240+
get_ppn_and_offset(ppn, offset);
241+
return memory_read_b(ppn | offset);
242+
}
243+
244+
void mmu_write_w(riscv_t *rv, const uint32_t addr, const uint32_t val)
245+
{
246+
if (!rv->csr_satp)
247+
return memory_write_w(addr, (uint8_t *) &val);
248+
249+
uint32_t level;
250+
uint32_t *pte = mmu_walk(rv, addr, &level);
251+
bool ok = MMU_FAULT_CHECK(write, rv, pte, addr, PTE_W);
252+
if (unlikely(!ok)) {
253+
pte = mmu_walk(rv, addr, &level);
254+
}
255+
256+
get_ppn_and_offset(ppn, offset);
257+
memory_write_w(ppn | offset, (uint8_t *) &val);
258+
}
259+
260+
void mmu_write_s(riscv_t *rv, const uint32_t addr, const uint16_t val)
261+
{
262+
if (!rv->csr_satp)
263+
return memory_write_s(addr, (uint8_t *) &val);
264+
265+
uint32_t level;
266+
uint32_t *pte = mmu_walk(rv, addr, &level);
267+
bool ok = MMU_FAULT_CHECK(write, rv, pte, addr, PTE_W);
268+
if (unlikely(!ok)) {
269+
pte = mmu_walk(rv, addr, &level);
270+
}
271+
272+
get_ppn_and_offset(ppn, offset);
273+
memory_write_s(ppn | offset, (uint8_t *) &val);
274+
}
275+
276+
void mmu_write_b(riscv_t *rv, const uint32_t addr, const uint8_t val)
277+
{
278+
if (!rv->csr_satp)
279+
return memory_write_b(addr, (uint8_t *) &val);
280+
281+
uint32_t level;
282+
uint32_t *pte = mmu_walk(rv, addr, &level);
283+
bool ok = MMU_FAULT_CHECK(write, rv, pte, addr, PTE_W);
284+
if (unlikely(!ok)) {
285+
pte = mmu_walk(rv, addr, &level);
286+
}
287+
288+
get_ppn_and_offset(ppn, offset);
289+
memory_write_b(ppn | offset, (uint8_t *) &val);
290+
}
291+
292+
riscv_io_t mmu_io = {
293+
/* memory read interface */
294+
.mem_ifetch = mmu_ifetch,
295+
.mem_read_w = mmu_read_w,
296+
.mem_read_s = mmu_read_s,
297+
.mem_read_b = mmu_read_b,
298+
299+
/* memory write interface */
300+
.mem_write_w = mmu_write_w,
301+
.mem_write_s = mmu_write_s,
302+
.mem_write_b = mmu_write_b,
303+
304+
/* system services or essential routines */
305+
.on_ecall = ecall_handler,
306+
.on_ebreak = ebreak_handler,
307+
.on_memcpy = memcpy_handler,
308+
.on_memset = memset_handler,
309+
};

0 commit comments

Comments
 (0)