@@ -24,6 +24,7 @@ extern struct target_ops gdbstub_ops;
24
24
#endif
25
25
26
26
#include "decode.h"
27
+ #include "io.h"
27
28
#include "mpool.h"
28
29
#include "riscv.h"
29
30
#include "riscv_private.h"
@@ -51,7 +52,10 @@ extern struct target_ops gdbstub_ops;
51
52
_(breakpoint, 3) /* Breakpoint */ \
52
53
_ (load_misaligned , 4 ) /* Load address misaligned */ \
53
54
_ (store_misaligned , 6 ) /* Store/AMO address misaligned */ \
54
- _ (ecall_M , 11 ) /* Environment call from M-mode */
55
+ _ (ecall_M , 11 ) /* Environment call from M-mode */ \
56
+ _ (insn_pgfault , 12 ) /* Instruction page fault */ \
57
+ _ (load_pgfault , 13 ) /* Load page fault */ \
58
+ _ (store_pgfault , 15 ) /* Store page fault */
55
59
/* clang-format on */
56
60
57
61
enum {
@@ -196,6 +200,8 @@ static uint32_t *csr_get_ptr(riscv_t *rv, uint32_t csr)
196
200
case CSR_FCSR :
197
201
return (uint32_t * ) (& rv -> csr_fcsr );
198
202
#endif
203
+ case CSR_SATP :
204
+ return (uint32_t * ) (& rv -> csr_satp );
199
205
default :
200
206
return NULL ;
201
207
}
@@ -220,7 +226,16 @@ static uint32_t csr_csrrw(riscv_t *rv, uint32_t csr, uint32_t val)
220
226
out &= FFLAG_MASK ;
221
227
#endif
222
228
223
- * c = val ;
229
+ if (c == & rv -> csr_satp ) {
230
+ const uint8_t mode_sv32 = val >> 31 ;
231
+ if (mode_sv32 )
232
+ * c = val & MASK (22 ); /* store ppn */
233
+ else /* bare mode */
234
+ * c = 0 ; /* virtual mem addr maps to same physical mem addr directly
235
+ */
236
+ } else {
237
+ * c = val ;
238
+ }
224
239
225
240
return out ;
226
241
}
@@ -456,7 +471,7 @@ static bool do_fuse3(riscv_t *rv, rv_insn_t *ir, uint64_t cycle, uint32_t PC)
456
471
for (int i = 0 ; i < ir -> imm2 ; i ++ ) {
457
472
uint32_t addr = rv -> X [fuse [i ].rs1 ] + fuse [i ].imm ;
458
473
RV_EXC_MISALIGN_HANDLER (3 , store , false, 1 );
459
- rv -> io .mem_write_w (addr , rv -> X [fuse [i ].rs2 ]);
474
+ rv -> io .mem_write_w (rv , addr , rv -> X [fuse [i ].rs2 ]);
460
475
}
461
476
PC += ir -> imm2 * 4 ;
462
477
if (unlikely (RVOP_NO_NEXT (ir ))) {
@@ -480,7 +495,7 @@ static bool do_fuse4(riscv_t *rv, rv_insn_t *ir, uint64_t cycle, uint32_t PC)
480
495
for (int i = 0 ; i < ir -> imm2 ; i ++ ) {
481
496
uint32_t addr = rv -> X [fuse [i ].rs1 ] + fuse [i ].imm ;
482
497
RV_EXC_MISALIGN_HANDLER (3 , load , false, 1 );
483
- rv -> X [fuse [i ].rd ] = rv -> io .mem_read_w (addr );
498
+ rv -> X [fuse [i ].rd ] = rv -> io .mem_read_w (rv , addr );
484
499
}
485
500
PC += ir -> imm2 * 4 ;
486
501
if (unlikely (RVOP_NO_NEXT (ir ))) {
@@ -613,7 +628,8 @@ static void block_translate(riscv_t *rv, block_t *block)
613
628
prev_ir -> next = ir ;
614
629
615
630
/* fetch the next instruction */
616
- const uint32_t insn = rv -> io .mem_ifetch (block -> pc_end );
631
+ const uint32_t insn = rv -> io .mem_ifetch (rv , block -> pc_end );
632
+ printf ("insn: %d\n" , insn );
617
633
618
634
/* decode the instruction */
619
635
if (!rv_decode (ir , insn )) {
@@ -691,7 +707,7 @@ static bool detect_memset(riscv_t *rv, size_t type)
691
707
692
708
uint32_t tmp_pc = rv -> PC ;
693
709
for (uint32_t i = 0 ; i < memset_len ; i ++ ) {
694
- const uint32_t insn = rv -> io .mem_ifetch (tmp_pc );
710
+ const uint32_t insn = rv -> io .mem_ifetch (rv , tmp_pc );
695
711
if (unlikely (insn != memset_insn [i ]))
696
712
return false;
697
713
tmp_pc += 4 ;
@@ -712,7 +728,7 @@ static bool detect_memcpy(riscv_t *rv, size_t type)
712
728
713
729
uint32_t tmp_pc = rv -> PC ;
714
730
for (uint32_t i = 0 ; i < memcpy_len ; i ++ ) {
715
- const uint32_t insn = rv -> io .mem_ifetch (tmp_pc );
731
+ const uint32_t insn = rv -> io .mem_ifetch (rv , tmp_pc );
716
732
if (unlikely (insn != memcpy_insn [i ]))
717
733
return false;
718
734
tmp_pc += 4 ;
@@ -1178,6 +1194,230 @@ void rv_step(void *arg)
1178
1194
#endif
1179
1195
}
1180
1196
1197
+ static bool ppn_is_valid (riscv_t * rv , uint32_t ppn )
1198
+ {
1199
+ vm_attr_t * attr = PRIV (rv );
1200
+ const uint32_t nr_pg_max = attr -> mem_size / RV_PG_SIZE ;
1201
+ return ppn < nr_pg_max ;
1202
+ }
1203
+
1204
+ #define PAGE_TABLE (ppn ) \
1205
+ ppn_is_valid(rv, ppn) ? (uint32_t *) &attr->mem[ppn << (RV_PG_SHIFT - 2)] \
1206
+ : NULL
1207
+
1208
+ /* Walk through page tables and get the corresponding PTE by virtual address if
1209
+ * exists
1210
+ * @rv: RISC-V emulator
1211
+ * @addr: virtual address
1212
+ * @return: NULL if a not found or fault else the corresponding PTE
1213
+ */
1214
+ static uint32_t * mmu_walk (riscv_t * rv , const uint32_t addr )
1215
+ {
1216
+ vm_attr_t * attr = PRIV (rv );
1217
+ uint32_t ppn = rv -> csr_satp ;
1218
+ if (ppn == 0 ) /* Bare mode */
1219
+ return NULL ;
1220
+
1221
+ /* start from root page table */
1222
+ uint32_t * page_table = PAGE_TABLE (ppn );
1223
+ if (!page_table )
1224
+ return NULL ;
1225
+
1226
+ for (int level = 1 ; level >= 0 ; level -- ) {
1227
+ uint32_t vpn = addr >> RV_PG_SHIFT >> (level * (RV_PG_SHIFT - 2 ));
1228
+ uint32_t * pte = page_table + vpn ;
1229
+
1230
+ /* PTE XWRV bit in order */
1231
+ uint8_t XWRV_bit = (* pte & MASK (4 ));
1232
+ switch (XWRV_bit ) {
1233
+ case 0b0001 : /* next level of the page table */
1234
+ page_table = PAGE_TABLE (ppn );
1235
+ if (!page_table )
1236
+ return NULL ;
1237
+ break ;
1238
+ case 0b0011 :
1239
+ case 0b0111 :
1240
+ case 0b1001 :
1241
+ case 0b1011 :
1242
+ case 0b1111 :
1243
+ ppn = (* pte >> (RV_PG_SHIFT - 2 ));
1244
+ if (unlikely (ppn ) & MASK (10 )) /* misaligned superpage */
1245
+ return NULL ;
1246
+ return pte ; /* leaf PTE */
1247
+ case 0b0101 :
1248
+ case 0b1101 :
1249
+ return NULL ;
1250
+ }
1251
+ }
1252
+
1253
+ return NULL ;
1254
+ }
1255
+
1256
+ /* Verify the PTE and generate corresponding faults if needed
1257
+ * @op: the operation
1258
+ * @rv: RISC-V emulator
1259
+ * @pte: to be verified pte
1260
+ * @addr: the corresponding virtual address to cause fault
1261
+ * @return: false if a corresponding fault is generated else true
1262
+ */
1263
+ /* FIXME: handle access fault */
1264
+ #define MMU_FAULT_CHECK (op , rv , pte , addr , access_bits ) \
1265
+ mmu_##op##_fault_check(rv, pte, addr, access_bits)
1266
+ #define MMU_FAULT_CHECK_IMPL (op , pgfault ) \
1267
+ static bool mmu_##op##_fault_check(riscv_t *rv, uint32_t *pte, \
1268
+ uint32_t addr, uint32_t access_bits) \
1269
+ { \
1270
+ if (!pte && rv->csr_satp) { /* not found */ \
1271
+ rv_except_ ##pgfault(rv, addr); \
1272
+ return false; \
1273
+ } else if (pte && \
1274
+ (!(*pte & PTE_V) || (!(*pte & PTE_R) && (*pte & PTE_W)))) { \
1275
+ rv_except_##pgfault(rv, addr); \
1276
+ return false; \
1277
+ } else if (pte && (!(*pte & PTE_X) && (access_bits & PTE_X))) { \
1278
+ rv_except_##pgfault(rv, addr); \
1279
+ return false; \
1280
+ } else if (pte && (!(!(MSTATUS_MXR & rv->csr_mstatus) && \
1281
+ !(*pte & PTE_R) && (access_bits & PTE_R)) && \
1282
+ !((MSTATUS_MXR & rv->csr_mstatus) && \
1283
+ !((*pte & PTE_R) | (*pte & PTE_X)) && \
1284
+ (access_bits & PTE_R)))) { \
1285
+ rv_except_##pgfault(rv, addr); \
1286
+ return false; \
1287
+ } else if (pte && ((MSTATUS_MPRV & rv->csr_mstatus) && \
1288
+ !(MSTATUS_MPPH & \
1289
+ rv->csr_mstatus) && /* MPP=01 means S-mode */ \
1290
+ (MSTATUS_MPPL & rv -> csr_mstatus ))) { \
1291
+ if (!(MSTATUS_SUM & rv -> csr_mstatus ) && (* pte & PTE_U )) { \
1292
+ rv_except_ ##pgfault (rv, addr); \
1293
+ return false; \
1294
+ } \
1295
+ } \
1296
+ return true; \
1297
+ }
1298
+
1299
+ MMU_FAULT_CHECK_IMPL (ifetch , insn_pgfault )
1300
+ MMU_FAULT_CHECK_IMPL (read , load_pgfault )
1301
+ MMU_FAULT_CHECK_IMPL (write , store_pgfault )
1302
+
1303
+ #define get_ppn_and_offset (ppn , offset ) \
1304
+ do { \
1305
+ ppn = *pte << RV_PG_SHIFT; \
1306
+ offset = addr & MASK(RV_PG_SHIFT); \
1307
+ } while (0)
1308
+
1309
+ uint32_t mmu_ifetch (riscv_t * rv , const uint32_t addr )
1310
+ {
1311
+ uint32_t * pte = mmu_walk (rv , addr );
1312
+ bool ok = MMU_FAULT_CHECK (ifetch , rv , pte , addr , PTE_X );
1313
+ if (unlikely (!ok ))
1314
+ return 0 ;
1315
+
1316
+ if (rv -> csr_satp ) {
1317
+ uint32_t ppn ;
1318
+ uint32_t offset ;
1319
+ get_ppn_and_offset (ppn , offset );
1320
+ return memory_ifetch (ppn | offset );
1321
+ }
1322
+ return memory_ifetch (addr );
1323
+ }
1324
+
1325
+ uint32_t mmu_read_w (riscv_t * rv , const uint32_t addr )
1326
+ {
1327
+ uint32_t * pte = mmu_walk (rv , addr );
1328
+ bool ok = MMU_FAULT_CHECK (read , rv , pte , addr , PTE_R );
1329
+ if (unlikely (!ok ))
1330
+ return 0 ;
1331
+
1332
+ if (rv -> csr_satp ) {
1333
+ uint32_t ppn ;
1334
+ uint32_t offset ;
1335
+ get_ppn_and_offset (ppn , offset );
1336
+ return memory_read_w (ppn | offset );
1337
+ }
1338
+ return memory_read_w (addr );
1339
+ }
1340
+
1341
+ uint16_t mmu_read_s (riscv_t * rv , const uint32_t addr )
1342
+ {
1343
+ uint32_t * pte = mmu_walk (rv , addr );
1344
+ bool ok = MMU_FAULT_CHECK (read , rv , pte , addr , PTE_R );
1345
+ if (unlikely (!ok ))
1346
+ return 0 ;
1347
+
1348
+ if (rv -> csr_satp ) {
1349
+ uint32_t ppn ;
1350
+ uint32_t offset ;
1351
+ get_ppn_and_offset (ppn , offset );
1352
+ return memory_read_s (ppn | offset );
1353
+ }
1354
+ return memory_read_s (addr );
1355
+ }
1356
+
1357
+ uint8_t mmu_read_b (riscv_t * rv , const uint32_t addr )
1358
+ {
1359
+ uint32_t * pte = mmu_walk (rv , addr );
1360
+ bool ok = MMU_FAULT_CHECK (read , rv , pte , addr , PTE_R );
1361
+ if (unlikely (!ok ))
1362
+ return 0 ;
1363
+
1364
+ if (rv -> csr_satp ) {
1365
+ uint32_t ppn ;
1366
+ uint32_t offset ;
1367
+ get_ppn_and_offset (ppn , offset );
1368
+ return memory_read_b (ppn | offset );
1369
+ }
1370
+ return memory_read_b (addr );
1371
+ }
1372
+
1373
+ void mmu_write_w (riscv_t * rv , const uint32_t addr , const uint32_t val )
1374
+ {
1375
+ uint32_t * pte = mmu_walk (rv , addr );
1376
+ bool ok = MMU_FAULT_CHECK (write , rv , pte , addr , PTE_W );
1377
+ if (unlikely (!ok ))
1378
+ return ;
1379
+
1380
+ if (rv -> csr_satp ) {
1381
+ uint32_t ppn ;
1382
+ uint32_t offset ;
1383
+ get_ppn_and_offset (ppn , offset );
1384
+ return memory_write_w (ppn | offset , (uint8_t * ) & val );
1385
+ }
1386
+ return memory_write_w (addr , (uint8_t * ) & val );
1387
+ }
1388
+
1389
+ void mmu_write_s (riscv_t * rv , const uint32_t addr , const uint16_t val )
1390
+ {
1391
+ uint32_t * pte = mmu_walk (rv , addr );
1392
+ bool ok = MMU_FAULT_CHECK (write , rv , pte , addr , PTE_W );
1393
+ if (unlikely (!ok ))
1394
+ return ;
1395
+
1396
+ if (rv -> csr_satp ) {
1397
+ uint32_t ppn ;
1398
+ uint32_t offset ;
1399
+ get_ppn_and_offset (ppn , offset );
1400
+ return memory_write_s (ppn | offset , (uint8_t * ) & val );
1401
+ }
1402
+ return memory_write_s (addr , (uint8_t * ) & val );
1403
+ }
1404
+
1405
+ void mmu_write_b (riscv_t * rv , const uint32_t addr , const uint8_t val )
1406
+ {
1407
+ uint32_t * pte = mmu_walk (rv , addr );
1408
+ bool ok = MMU_FAULT_CHECK (write , rv , pte , addr , PTE_W );
1409
+ if (unlikely (!ok ))
1410
+ return ;
1411
+
1412
+ if (rv -> csr_satp ) {
1413
+ uint32_t ppn ;
1414
+ uint32_t offset ;
1415
+ get_ppn_and_offset (ppn , offset );
1416
+ return memory_write_b (ppn | offset , (uint8_t * ) & val );
1417
+ }
1418
+ return memory_write_b (addr , (uint8_t * ) & val );
1419
+ }
1420
+
1181
1421
void ebreak_handler (riscv_t * rv )
1182
1422
{
1183
1423
assert (rv );
@@ -1225,3 +1465,22 @@ void dump_registers(riscv_t *rv, char *out_file_path)
1225
1465
if (out_file_path [0 ] != '-' )
1226
1466
fclose (f );
1227
1467
}
1468
+
1469
+ riscv_io_t mmu_io = {
1470
+ /* memory read interface */
1471
+ .mem_ifetch = mmu_ifetch ,
1472
+ .mem_read_w = mmu_read_w ,
1473
+ .mem_read_s = mmu_read_s ,
1474
+ .mem_read_b = mmu_read_b ,
1475
+
1476
+ /* memory write interface */
1477
+ .mem_write_w = mmu_write_w ,
1478
+ .mem_write_s = mmu_write_s ,
1479
+ .mem_write_b = mmu_write_b ,
1480
+
1481
+ /* system services or essential routines */
1482
+ .on_ecall = ecall_handler ,
1483
+ .on_ebreak = ebreak_handler ,
1484
+ .on_memcpy = memcpy_handler ,
1485
+ .on_memset = memset_handler ,
1486
+ };
0 commit comments