|
| 1 | +/* |
| 2 | + * rv32emu is freely redistributable under the MIT License. See the file |
| 3 | + * "LICENSE" for information on usage and redistribution of this file. |
| 4 | + */ |
| 5 | + |
| 6 | +#include <assert.h> |
| 7 | + |
| 8 | +#include "mpool.h" |
| 9 | +#include "system.h" |
| 10 | + |
| 11 | +/* FIXME: |
| 12 | + * reuse RV_TRAP_LIST with X macro |
| 13 | + * RV_TRAP_LIST should be in some header file first |
| 14 | + */ |
| 15 | +#define TRAP_HANDLER_DECL(type) \ |
| 16 | + extern void rv_trap_##type(riscv_t *rv, uint32_t tval) |
| 17 | + |
| 18 | +TRAP_HANDLER_DECL(pagefault_insn); |
| 19 | +TRAP_HANDLER_DECL(pagefault_load); |
| 20 | +TRAP_HANDLER_DECL(pagefault_store); |
| 21 | + |
| 22 | +extern void *dispatch_table[]; |
| 23 | + |
| 24 | +void trap_handler(riscv_t *rv) |
| 25 | +{ |
| 26 | + rv_insn_t *ir = mpool_alloc(rv->block_ir_mp); |
| 27 | + assert(ir); |
| 28 | + |
| 29 | + /* set to false by sret implementation */ |
| 30 | + while (rv->is_trapped && !rv_has_halted(rv)) { |
| 31 | + uint32_t insn = rv->io.mem_ifetch(rv, rv->PC); |
| 32 | + assert(insn); |
| 33 | + |
| 34 | + rv_decode(ir, insn); |
| 35 | + ir->impl = dispatch_table[ir->opcode]; |
| 36 | + rv->compressed = is_compressed(insn); |
| 37 | + ir->impl(rv, ir, rv->csr_cycle, rv->PC); |
| 38 | + } |
| 39 | +} |
| 40 | + |
| 41 | +static bool ppn_is_valid(riscv_t *rv, uint32_t ppn) |
| 42 | +{ |
| 43 | + vm_attr_t *attr = PRIV(rv); |
| 44 | + const uint32_t nr_pg_max = attr->mem_size / RV_PG_SIZE; |
| 45 | + return ppn < nr_pg_max; |
| 46 | +} |
| 47 | + |
| 48 | +#define PAGE_TABLE(ppn) \ |
| 49 | + ppn_is_valid(rv, ppn) \ |
| 50 | + ? (uint32_t *) (attr->mem->mem_base + (ppn << (RV_PG_SHIFT))) \ |
| 51 | + : NULL |
| 52 | + |
| 53 | +/* Walk through page tables and get the corresponding PTE by virtual address if |
| 54 | + * exists |
| 55 | + * @rv: RISC-V emulator |
| 56 | + * @addr: virtual address |
| 57 | + * @level: the level of which the PTE is located |
| 58 | + * @return: NULL if a not found or fault else the corresponding PTE |
| 59 | + */ |
| 60 | +static uint32_t *mmu_walk(riscv_t *rv, const uint32_t addr, uint32_t *level) |
| 61 | +{ |
| 62 | + vm_attr_t *attr = PRIV(rv); |
| 63 | + uint32_t ppn = rv->csr_satp & MASK(22); |
| 64 | + if (ppn == 0) /* Bare mode */ |
| 65 | + return NULL; |
| 66 | + |
| 67 | + /* root page table */ |
| 68 | + uint32_t *page_table = PAGE_TABLE(ppn); |
| 69 | + if (!page_table) |
| 70 | + return NULL; |
| 71 | + |
| 72 | + for (int i = 1; i >= 0; i--) { |
| 73 | + *level = 2 - i; |
| 74 | + uint32_t vpn = |
| 75 | + (addr >> RV_PG_SHIFT >> (i * (RV_PG_SHIFT - 2))) & MASK(10); |
| 76 | + uint32_t *pte = page_table + vpn; |
| 77 | + |
| 78 | + uint8_t XWRV_bit = (*pte & MASK(4)); |
| 79 | + switch (XWRV_bit) { |
| 80 | + case NEXT_PG_TBL: /* next level of the page table */ |
| 81 | + ppn = (*pte >> (RV_PG_SHIFT - 2)); |
| 82 | + page_table = PAGE_TABLE(ppn); |
| 83 | + if (!page_table) |
| 84 | + return NULL; |
| 85 | + break; |
| 86 | + case RO_PAGE: |
| 87 | + case RW_PAGE: |
| 88 | + case EO_PAGE: |
| 89 | + case RX_PAGE: |
| 90 | + case RWX_PAGE: |
| 91 | + ppn = (*pte >> (RV_PG_SHIFT - 2)); |
| 92 | + if (*level == 1 && |
| 93 | + unlikely(ppn & MASK(10))) /* misaligned superpage */ |
| 94 | + return NULL; |
| 95 | + return pte; /* leaf PTE */ |
| 96 | + case RESRV_PAGE1: |
| 97 | + case RESRV_PAGE2: |
| 98 | + default: |
| 99 | + return NULL; |
| 100 | + } |
| 101 | + } |
| 102 | + |
| 103 | + return NULL; |
| 104 | +} |
| 105 | + |
| 106 | +/* Verify the PTE and generate corresponding faults if needed |
| 107 | + * @op: the operation |
| 108 | + * @rv: RISC-V emulator |
| 109 | + * @pte: to be verified pte |
| 110 | + * @addr: the corresponding virtual address to cause fault |
| 111 | + * @return: false if a any fault is generated which caused by violating the |
| 112 | + * access permission else true |
| 113 | + */ |
| 114 | +/* FIXME: handle access fault, addr out of range check */ |
| 115 | +#define MMU_FAULT_CHECK(op, rv, pte, addr, access_bits) \ |
| 116 | + mmu_##op##_fault_check(rv, pte, addr, access_bits) |
| 117 | +#define MMU_FAULT_CHECK_IMPL(op, pgfault) \ |
| 118 | + static bool mmu_##op##_fault_check(riscv_t *rv, uint32_t *pte, \ |
| 119 | + uint32_t addr, uint32_t access_bits) \ |
| 120 | + { \ |
| 121 | + if (pte && (!(*pte & PTE_V))) { \ |
| 122 | + rv->is_trapped = true; \ |
| 123 | + rv_trap_##pgfault(rv, addr); \ |
| 124 | + return false; \ |
| 125 | + } \ |
| 126 | + if (!(pte && (*pte & access_bits))) { \ |
| 127 | + rv->is_trapped = true; \ |
| 128 | + rv_trap_##pgfault(rv, addr); \ |
| 129 | + return false; \ |
| 130 | + } \ |
| 131 | + /* \ |
| 132 | + * (1) When MXR=0, only loads from pages marked readable (R=1) will \ |
| 133 | + * succeed. \ |
| 134 | + * \ |
| 135 | + * (2) When MXR=1, loads from pages marked either readable or \ |
| 136 | + * executable (R=1 or X=1) will succeed. \ |
| 137 | + */ \ |
| 138 | + if (pte && ((!(SSTATUS_MXR & rv->csr_sstatus) && !(*pte & PTE_R) && \ |
| 139 | + (access_bits == PTE_R)) || \ |
| 140 | + ((SSTATUS_MXR & rv->csr_sstatus) && \ |
| 141 | + !((*pte & PTE_R) | (*pte & PTE_X)) && \ |
| 142 | + (access_bits == PTE_R)))) { \ |
| 143 | + rv->is_trapped = true; \ |
| 144 | + rv_trap_##pgfault(rv, addr); \ |
| 145 | + return false; \ |
| 146 | + } \ |
| 147 | + /* \ |
| 148 | + * When SUM=0, S-mode memory accesses to pages that are accessible by \ |
| 149 | + * U-mode will fault. \ |
| 150 | + */ \ |
| 151 | + if (pte && rv->priv_mode == RV_PRIV_S_MODE && \ |
| 152 | + !(SSTATUS_SUM & rv->csr_sstatus) && (*pte & PTE_U)) { \ |
| 153 | + rv->is_trapped = true; \ |
| 154 | + rv_trap_##pgfault(rv, addr); \ |
| 155 | + return false; \ |
| 156 | + } \ |
| 157 | + /* PTE not found, map it in handler */ \ |
| 158 | + if (!pte) { \ |
| 159 | + rv->is_trapped = true; \ |
| 160 | + rv_trap_##pgfault(rv, addr); \ |
| 161 | + return false; \ |
| 162 | + } \ |
| 163 | + /* valid PTE */ \ |
| 164 | + return true; \ |
| 165 | + } |
| 166 | + |
| 167 | +MMU_FAULT_CHECK_IMPL(ifetch, pagefault_insn) |
| 168 | +MMU_FAULT_CHECK_IMPL(read, pagefault_load) |
| 169 | +MMU_FAULT_CHECK_IMPL(write, pagefault_store) |
| 170 | + |
| 171 | +#define get_ppn_and_offset(ppn, offset) \ |
| 172 | + uint32_t ppn; \ |
| 173 | + uint32_t offset; \ |
| 174 | + do { \ |
| 175 | + ppn = *pte >> (RV_PG_SHIFT - 2) << RV_PG_SHIFT; \ |
| 176 | + offset = level == 1 ? addr & MASK((RV_PG_SHIFT + 10)) \ |
| 177 | + : addr & MASK(RV_PG_SHIFT); \ |
| 178 | + } while (0) |
| 179 | + |
| 180 | +uint32_t mmu_ifetch(riscv_t *rv, const uint32_t addr) |
| 181 | +{ |
| 182 | + if (!rv->csr_satp) |
| 183 | + return memory_ifetch(addr); |
| 184 | + |
| 185 | + uint32_t level; |
| 186 | + uint32_t *pte = mmu_walk(rv, addr, &level); |
| 187 | + bool ok = MMU_FAULT_CHECK(ifetch, rv, pte, addr, PTE_X); |
| 188 | + if (unlikely(!ok)) { |
| 189 | + pte = mmu_walk(rv, addr, &level); |
| 190 | + } |
| 191 | + |
| 192 | + get_ppn_and_offset(ppn, offset); |
| 193 | + return memory_ifetch(ppn | offset); |
| 194 | +} |
| 195 | + |
| 196 | +uint32_t mmu_read_w(riscv_t *rv, const uint32_t addr) |
| 197 | +{ |
| 198 | + if (!rv->csr_satp) |
| 199 | + return memory_read_w(addr); |
| 200 | + |
| 201 | + uint32_t level; |
| 202 | + uint32_t *pte = mmu_walk(rv, addr, &level); |
| 203 | + bool ok = MMU_FAULT_CHECK(read, rv, pte, addr, PTE_R); |
| 204 | + if (unlikely(!ok)) { |
| 205 | + pte = mmu_walk(rv, addr, &level); |
| 206 | + } |
| 207 | + |
| 208 | + get_ppn_and_offset(ppn, offset); |
| 209 | + return memory_read_w(ppn | offset); |
| 210 | +} |
| 211 | + |
| 212 | +uint16_t mmu_read_s(riscv_t *rv, const uint32_t addr) |
| 213 | +{ |
| 214 | + if (!rv->csr_satp) |
| 215 | + return memory_read_s(addr); |
| 216 | + |
| 217 | + uint32_t level; |
| 218 | + uint32_t *pte = mmu_walk(rv, addr, &level); |
| 219 | + bool ok = MMU_FAULT_CHECK(read, rv, pte, addr, PTE_R); |
| 220 | + if (unlikely(!ok)) { |
| 221 | + pte = mmu_walk(rv, addr, &level); |
| 222 | + } |
| 223 | + |
| 224 | + get_ppn_and_offset(ppn, offset); |
| 225 | + return memory_read_s(ppn | offset); |
| 226 | +} |
| 227 | + |
| 228 | +uint8_t mmu_read_b(riscv_t *rv, const uint32_t addr) |
| 229 | +{ |
| 230 | + if (!rv->csr_satp) |
| 231 | + return memory_read_b(addr); |
| 232 | + |
| 233 | + uint32_t level; |
| 234 | + uint32_t *pte = mmu_walk(rv, addr, &level); |
| 235 | + bool ok = MMU_FAULT_CHECK(read, rv, pte, addr, PTE_R); |
| 236 | + if (unlikely(!ok)) { |
| 237 | + pte = mmu_walk(rv, addr, &level); |
| 238 | + } |
| 239 | + |
| 240 | + get_ppn_and_offset(ppn, offset); |
| 241 | + return memory_read_b(ppn | offset); |
| 242 | +} |
| 243 | + |
| 244 | +void mmu_write_w(riscv_t *rv, const uint32_t addr, const uint32_t val) |
| 245 | +{ |
| 246 | + if (!rv->csr_satp) |
| 247 | + return memory_write_w(addr, (uint8_t *) &val); |
| 248 | + |
| 249 | + uint32_t level; |
| 250 | + uint32_t *pte = mmu_walk(rv, addr, &level); |
| 251 | + bool ok = MMU_FAULT_CHECK(write, rv, pte, addr, PTE_W); |
| 252 | + if (unlikely(!ok)) { |
| 253 | + pte = mmu_walk(rv, addr, &level); |
| 254 | + } |
| 255 | + |
| 256 | + get_ppn_and_offset(ppn, offset); |
| 257 | + memory_write_w(ppn | offset, (uint8_t *) &val); |
| 258 | +} |
| 259 | + |
| 260 | +void mmu_write_s(riscv_t *rv, const uint32_t addr, const uint16_t val) |
| 261 | +{ |
| 262 | + if (!rv->csr_satp) |
| 263 | + return memory_write_s(addr, (uint8_t *) &val); |
| 264 | + |
| 265 | + uint32_t level; |
| 266 | + uint32_t *pte = mmu_walk(rv, addr, &level); |
| 267 | + bool ok = MMU_FAULT_CHECK(write, rv, pte, addr, PTE_W); |
| 268 | + if (unlikely(!ok)) { |
| 269 | + pte = mmu_walk(rv, addr, &level); |
| 270 | + } |
| 271 | + |
| 272 | + get_ppn_and_offset(ppn, offset); |
| 273 | + memory_write_s(ppn | offset, (uint8_t *) &val); |
| 274 | +} |
| 275 | + |
| 276 | +void mmu_write_b(riscv_t *rv, const uint32_t addr, const uint8_t val) |
| 277 | +{ |
| 278 | + if (!rv->csr_satp) |
| 279 | + return memory_write_b(addr, (uint8_t *) &val); |
| 280 | + |
| 281 | + uint32_t level; |
| 282 | + uint32_t *pte = mmu_walk(rv, addr, &level); |
| 283 | + bool ok = MMU_FAULT_CHECK(write, rv, pte, addr, PTE_W); |
| 284 | + if (unlikely(!ok)) { |
| 285 | + pte = mmu_walk(rv, addr, &level); |
| 286 | + } |
| 287 | + |
| 288 | + get_ppn_and_offset(ppn, offset); |
| 289 | + memory_write_b(ppn | offset, (uint8_t *) &val); |
| 290 | +} |
| 291 | + |
| 292 | +riscv_io_t mmu_io = { |
| 293 | + /* memory read interface */ |
| 294 | + .mem_ifetch = mmu_ifetch, |
| 295 | + .mem_read_w = mmu_read_w, |
| 296 | + .mem_read_s = mmu_read_s, |
| 297 | + .mem_read_b = mmu_read_b, |
| 298 | + |
| 299 | + /* memory write interface */ |
| 300 | + .mem_write_w = mmu_write_w, |
| 301 | + .mem_write_s = mmu_write_s, |
| 302 | + .mem_write_b = mmu_write_b, |
| 303 | + |
| 304 | + /* system services or essential routines */ |
| 305 | + .on_ecall = ecall_handler, |
| 306 | + .on_ebreak = ebreak_handler, |
| 307 | + .on_memcpy = memcpy_handler, |
| 308 | + .on_memset = memset_handler, |
| 309 | +}; |
0 commit comments