@@ -24,6 +24,7 @@ extern struct target_ops gdbstub_ops;
24
24
#endif
25
25
26
26
#include "decode.h"
27
+ #include "io.h"
27
28
#include "mpool.h"
28
29
#include "riscv.h"
29
30
#include "riscv_private.h"
@@ -51,7 +52,10 @@ extern struct target_ops gdbstub_ops;
51
52
_(breakpoint, 3) /* Breakpoint */ \
52
53
_ (load_misaligned , 4 ) /* Load address misaligned */ \
53
54
_ (store_misaligned , 6 ) /* Store/AMO address misaligned */ \
54
- _ (ecall_M , 11 ) /* Environment call from M-mode */
55
+ _ (ecall_M , 11 ) /* Environment call from M-mode */ \
56
+ _ (insn_pgfault , 12 ) /* Instruction page fault */ \
57
+ _ (load_pgfault , 13 ) /* Load page fault */ \
58
+ _ (store_pgfault , 15 ) /* Store page fault */
55
59
/* clang-format on */
56
60
57
61
enum {
@@ -196,6 +200,8 @@ static uint32_t *csr_get_ptr(riscv_t *rv, uint32_t csr)
196
200
case CSR_FCSR :
197
201
return (uint32_t * ) (& rv -> csr_fcsr );
198
202
#endif
203
+ case CSR_SATP :
204
+ return (uint32_t * ) (& rv -> csr_satp );
199
205
default :
200
206
return NULL ;
201
207
}
@@ -220,7 +226,16 @@ static uint32_t csr_csrrw(riscv_t *rv, uint32_t csr, uint32_t val)
220
226
out &= FFLAG_MASK ;
221
227
#endif
222
228
223
- * c = val ;
229
+ if (c == & rv -> csr_satp ) {
230
+ const uint8_t mode_sv32 = val >> 31 ;
231
+ if (mode_sv32 )
232
+ * c = val & MASK (22 ); /* store ppn */
233
+ else /* bare mode */
234
+ * c = 0 ; /* virtual mem addr maps to same physical mem addr directly
235
+ */
236
+ } else {
237
+ * c = val ;
238
+ }
224
239
225
240
return out ;
226
241
}
@@ -456,7 +471,7 @@ static bool do_fuse3(riscv_t *rv, rv_insn_t *ir, uint64_t cycle, uint32_t PC)
456
471
for (int i = 0 ; i < ir -> imm2 ; i ++ ) {
457
472
uint32_t addr = rv -> X [fuse [i ].rs1 ] + fuse [i ].imm ;
458
473
RV_EXC_MISALIGN_HANDLER (3 , store , false, 1 );
459
- rv -> io .mem_write_w (addr , rv -> X [fuse [i ].rs2 ]);
474
+ rv -> io .mem_write_w (rv , addr , rv -> X [fuse [i ].rs2 ]);
460
475
}
461
476
PC += ir -> imm2 * 4 ;
462
477
if (unlikely (RVOP_NO_NEXT (ir ))) {
@@ -480,7 +495,7 @@ static bool do_fuse4(riscv_t *rv, rv_insn_t *ir, uint64_t cycle, uint32_t PC)
480
495
for (int i = 0 ; i < ir -> imm2 ; i ++ ) {
481
496
uint32_t addr = rv -> X [fuse [i ].rs1 ] + fuse [i ].imm ;
482
497
RV_EXC_MISALIGN_HANDLER (3 , load , false, 1 );
483
- rv -> X [fuse [i ].rd ] = rv -> io .mem_read_w (addr );
498
+ rv -> X [fuse [i ].rd ] = rv -> io .mem_read_w (rv , addr );
484
499
}
485
500
PC += ir -> imm2 * 4 ;
486
501
if (unlikely (RVOP_NO_NEXT (ir ))) {
@@ -604,16 +619,18 @@ static void block_translate(riscv_t *rv, block_t *block)
604
619
block -> pc_start = block -> pc_end = rv -> PC ;
605
620
606
621
rv_insn_t * prev_ir = NULL ;
607
- rv_insn_t * ir = mpool_calloc (rv -> block_ir_mp );
622
+ rv_insn_t * ir = mpool_alloc (rv -> block_ir_mp );
608
623
block -> ir_head = ir ;
609
624
610
625
/* translate the basic block */
611
626
while (true) {
627
+ memset (ir , 0 , sizeof (rv_insn_t ));
628
+
612
629
if (prev_ir )
613
630
prev_ir -> next = ir ;
614
631
615
632
/* fetch the next instruction */
616
- const uint32_t insn = rv -> io .mem_ifetch (block -> pc_end );
633
+ const uint32_t insn = rv -> io .mem_ifetch (rv , block -> pc_end );
617
634
618
635
/* decode the instruction */
619
636
if (!rv_decode (ir , insn )) {
@@ -644,7 +661,7 @@ static void block_translate(riscv_t *rv, block_t *block)
644
661
break ;
645
662
}
646
663
647
- ir = mpool_calloc (rv -> block_ir_mp );
664
+ ir = mpool_alloc (rv -> block_ir_mp );
648
665
}
649
666
650
667
assert (prev_ir );
@@ -691,7 +708,7 @@ static bool detect_memset(riscv_t *rv, size_t type)
691
708
692
709
uint32_t tmp_pc = rv -> PC ;
693
710
for (uint32_t i = 0 ; i < memset_len ; i ++ ) {
694
- const uint32_t insn = rv -> io .mem_ifetch (tmp_pc );
711
+ const uint32_t insn = rv -> io .mem_ifetch (rv , tmp_pc );
695
712
if (unlikely (insn != memset_insn [i ]))
696
713
return false;
697
714
tmp_pc += 4 ;
@@ -712,7 +729,7 @@ static bool detect_memcpy(riscv_t *rv, size_t type)
712
729
713
730
uint32_t tmp_pc = rv -> PC ;
714
731
for (uint32_t i = 0 ; i < memcpy_len ; i ++ ) {
715
- const uint32_t insn = rv -> io .mem_ifetch (tmp_pc );
732
+ const uint32_t insn = rv -> io .mem_ifetch (rv , tmp_pc );
716
733
if (unlikely (insn != memcpy_insn [i ]))
717
734
return false;
718
735
tmp_pc += 4 ;
@@ -1178,6 +1195,230 @@ void rv_step(void *arg)
1178
1195
#endif
1179
1196
}
1180
1197
1198
+ static bool ppn_is_valid (riscv_t * rv , uint32_t ppn )
1199
+ {
1200
+ vm_attr_t * attr = PRIV (rv );
1201
+ const uint32_t nr_pg_max = attr -> mem_size / RV_PG_SIZE ;
1202
+ return ppn < nr_pg_max ;
1203
+ }
1204
+
1205
+ #define PAGE_TABLE (ppn ) \
1206
+ ppn_is_valid(rv, ppn) ? (uint32_t *) &attr->mem[ppn << (RV_PG_SHIFT - 2)] \
1207
+ : NULL
1208
+
1209
+ /* Walk through page tables and get the corresponding PTE by virtual address if
1210
+ * exists
1211
+ * @rv: RISC-V emulator
1212
+ * @addr: virtual address
1213
+ * @return: NULL if a not found or fault else the corresponding PTE
1214
+ */
1215
+ static uint32_t * mmu_walk (riscv_t * rv , const uint32_t addr )
1216
+ {
1217
+ vm_attr_t * attr = PRIV (rv );
1218
+ uint32_t ppn = rv -> csr_satp ;
1219
+ if (ppn == 0 ) /* Bare mode */
1220
+ return NULL ;
1221
+
1222
+ /* start from root page table */
1223
+ uint32_t * page_table = PAGE_TABLE (ppn );
1224
+ if (!page_table )
1225
+ return NULL ;
1226
+
1227
+ for (int level = 1 ; level >= 0 ; level -- ) {
1228
+ uint32_t vpn = addr >> RV_PG_SHIFT >> (level * (RV_PG_SHIFT - 2 ));
1229
+ uint32_t * pte = page_table + vpn ;
1230
+
1231
+ /* PTE XWRV bit in order */
1232
+ uint8_t XWRV_bit = (* pte & MASK (4 ));
1233
+ switch (XWRV_bit ) {
1234
+ case 0b0001 : /* next level of the page table */
1235
+ page_table = PAGE_TABLE (ppn );
1236
+ if (!page_table )
1237
+ return NULL ;
1238
+ break ;
1239
+ case 0b0011 :
1240
+ case 0b0111 :
1241
+ case 0b1001 :
1242
+ case 0b1011 :
1243
+ case 0b1111 :
1244
+ ppn = (* pte >> (RV_PG_SHIFT - 2 ));
1245
+ if (unlikely (ppn ) & MASK (10 )) /* misaligned superpage */
1246
+ return NULL ;
1247
+ return pte ; /* leaf PTE */
1248
+ case 0b0101 :
1249
+ case 0b1101 :
1250
+ return NULL ;
1251
+ }
1252
+ }
1253
+
1254
+ return NULL ;
1255
+ }
1256
+
1257
+ /* Verify the PTE and generate corresponding faults if needed
1258
+ * @op: the operation
1259
+ * @rv: RISC-V emulator
1260
+ * @pte: to be verified pte
1261
+ * @addr: the corresponding virtual address to cause fault
1262
+ * @return: false if a corresponding fault is generated else true
1263
+ */
1264
+ /* FIXME: handle access fault */
1265
+ #define MMU_FAULT_CHECK (op , rv , pte , addr , access_bits ) \
1266
+ mmu_##op##_fault_check(rv, pte, addr, access_bits)
1267
+ #define MMU_FAULT_CHECK_IMPL (op , pgfault ) \
1268
+ static bool mmu_##op##_fault_check(riscv_t *rv, uint32_t *pte, \
1269
+ uint32_t addr, uint32_t access_bits) \
1270
+ { \
1271
+ if (!pte && rv->csr_satp) { /* not found */ \
1272
+ rv_except_ ##pgfault(rv, addr); \
1273
+ return false; \
1274
+ } else if (pte && \
1275
+ (!(*pte & PTE_V) || (!(*pte & PTE_R) && (*pte & PTE_W)))) { \
1276
+ rv_except_##pgfault(rv, addr); \
1277
+ return false; \
1278
+ } else if (pte && (!(*pte & PTE_X) && (access_bits & PTE_X))) { \
1279
+ rv_except_##pgfault(rv, addr); \
1280
+ return false; \
1281
+ } else if (pte && (!(!(MSTATUS_MXR & rv->csr_mstatus) && \
1282
+ !(*pte & PTE_R) && (access_bits & PTE_R)) && \
1283
+ !((MSTATUS_MXR & rv->csr_mstatus) && \
1284
+ !((*pte & PTE_R) | (*pte & PTE_X)) && \
1285
+ (access_bits & PTE_R)))) { \
1286
+ rv_except_##pgfault(rv, addr); \
1287
+ return false; \
1288
+ } else if (pte && ((MSTATUS_MPRV & rv->csr_mstatus) && \
1289
+ !(MSTATUS_MPPH & \
1290
+ rv->csr_mstatus) && /* MPP=01 means S-mode */ \
1291
+ (MSTATUS_MPPL & rv -> csr_mstatus ))) { \
1292
+ if (!(MSTATUS_SUM & rv -> csr_mstatus ) && (* pte & PTE_U )) { \
1293
+ rv_except_ ##pgfault (rv, addr); \
1294
+ return false; \
1295
+ } \
1296
+ } \
1297
+ return true; \
1298
+ }
1299
+
1300
+ MMU_FAULT_CHECK_IMPL (ifetch , insn_pgfault )
1301
+ MMU_FAULT_CHECK_IMPL (read , load_pgfault )
1302
+ MMU_FAULT_CHECK_IMPL (write , store_pgfault )
1303
+
1304
+ #define get_ppn_and_offset (ppn , offset ) \
1305
+ do { \
1306
+ ppn = *pte << RV_PG_SHIFT; \
1307
+ offset = addr & MASK(RV_PG_SHIFT); \
1308
+ } while (0)
1309
+
1310
+ uint32_t mmu_ifetch (riscv_t * rv , const uint32_t addr )
1311
+ {
1312
+ uint32_t * pte = mmu_walk (rv , addr );
1313
+ bool ok = MMU_FAULT_CHECK (ifetch , rv , pte , addr , PTE_X );
1314
+ if (unlikely (!ok ))
1315
+ return 0 ;
1316
+
1317
+ if (rv -> csr_satp ) {
1318
+ uint32_t ppn ;
1319
+ uint32_t offset ;
1320
+ get_ppn_and_offset (ppn , offset );
1321
+ return memory_ifetch (ppn | offset );
1322
+ }
1323
+ return memory_ifetch (addr );
1324
+ }
1325
+
1326
+ uint32_t mmu_read_w (riscv_t * rv , const uint32_t addr )
1327
+ {
1328
+ uint32_t * pte = mmu_walk (rv , addr );
1329
+ bool ok = MMU_FAULT_CHECK (read , rv , pte , addr , PTE_R );
1330
+ if (unlikely (!ok ))
1331
+ return 0 ;
1332
+
1333
+ if (rv -> csr_satp ) {
1334
+ uint32_t ppn ;
1335
+ uint32_t offset ;
1336
+ get_ppn_and_offset (ppn , offset );
1337
+ return memory_read_w (ppn | offset );
1338
+ }
1339
+ return memory_read_w (addr );
1340
+ }
1341
+
1342
+ uint16_t mmu_read_s (riscv_t * rv , const uint32_t addr )
1343
+ {
1344
+ uint32_t * pte = mmu_walk (rv , addr );
1345
+ bool ok = MMU_FAULT_CHECK (read , rv , pte , addr , PTE_R );
1346
+ if (unlikely (!ok ))
1347
+ return 0 ;
1348
+
1349
+ if (rv -> csr_satp ) {
1350
+ uint32_t ppn ;
1351
+ uint32_t offset ;
1352
+ get_ppn_and_offset (ppn , offset );
1353
+ return memory_read_s (ppn | offset );
1354
+ }
1355
+ return memory_read_s (addr );
1356
+ }
1357
+
1358
+ uint8_t mmu_read_b (riscv_t * rv , const uint32_t addr )
1359
+ {
1360
+ uint32_t * pte = mmu_walk (rv , addr );
1361
+ bool ok = MMU_FAULT_CHECK (read , rv , pte , addr , PTE_R );
1362
+ if (unlikely (!ok ))
1363
+ return 0 ;
1364
+
1365
+ if (rv -> csr_satp ) {
1366
+ uint32_t ppn ;
1367
+ uint32_t offset ;
1368
+ get_ppn_and_offset (ppn , offset );
1369
+ return memory_read_b (ppn | offset );
1370
+ }
1371
+ return memory_read_b (addr );
1372
+ }
1373
+
1374
+ void mmu_write_w (riscv_t * rv , const uint32_t addr , const uint32_t val )
1375
+ {
1376
+ uint32_t * pte = mmu_walk (rv , addr );
1377
+ bool ok = MMU_FAULT_CHECK (write , rv , pte , addr , PTE_W );
1378
+ if (unlikely (!ok ))
1379
+ return ;
1380
+
1381
+ if (rv -> csr_satp ) {
1382
+ uint32_t ppn ;
1383
+ uint32_t offset ;
1384
+ get_ppn_and_offset (ppn , offset );
1385
+ return memory_write_w (ppn | offset , (uint8_t * ) & val );
1386
+ }
1387
+ return memory_write_w (addr , (uint8_t * ) & val );
1388
+ }
1389
+
1390
+ void mmu_write_s (riscv_t * rv , const uint32_t addr , const uint16_t val )
1391
+ {
1392
+ uint32_t * pte = mmu_walk (rv , addr );
1393
+ bool ok = MMU_FAULT_CHECK (write , rv , pte , addr , PTE_W );
1394
+ if (unlikely (!ok ))
1395
+ return ;
1396
+
1397
+ if (rv -> csr_satp ) {
1398
+ uint32_t ppn ;
1399
+ uint32_t offset ;
1400
+ get_ppn_and_offset (ppn , offset );
1401
+ return memory_write_s (ppn | offset , (uint8_t * ) & val );
1402
+ }
1403
+ return memory_write_s (addr , (uint8_t * ) & val );
1404
+ }
1405
+
1406
+ void mmu_write_b (riscv_t * rv , const uint32_t addr , const uint8_t val )
1407
+ {
1408
+ uint32_t * pte = mmu_walk (rv , addr );
1409
+ bool ok = MMU_FAULT_CHECK (write , rv , pte , addr , PTE_W );
1410
+ if (unlikely (!ok ))
1411
+ return ;
1412
+
1413
+ if (rv -> csr_satp ) {
1414
+ uint32_t ppn ;
1415
+ uint32_t offset ;
1416
+ get_ppn_and_offset (ppn , offset );
1417
+ return memory_write_b (ppn | offset , (uint8_t * ) & val );
1418
+ }
1419
+ return memory_write_b (addr , (uint8_t * ) & val );
1420
+ }
1421
+
1181
1422
void ebreak_handler (riscv_t * rv )
1182
1423
{
1183
1424
assert (rv );
@@ -1225,3 +1466,22 @@ void dump_registers(riscv_t *rv, char *out_file_path)
1225
1466
if (out_file_path [0 ] != '-' )
1226
1467
fclose (f );
1227
1468
}
1469
+
1470
+ riscv_io_t mmu_io = {
1471
+ /* memory read interface */
1472
+ .mem_ifetch = mmu_ifetch ,
1473
+ .mem_read_w = mmu_read_w ,
1474
+ .mem_read_s = mmu_read_s ,
1475
+ .mem_read_b = mmu_read_b ,
1476
+
1477
+ /* memory write interface */
1478
+ .mem_write_w = mmu_write_w ,
1479
+ .mem_write_s = mmu_write_s ,
1480
+ .mem_write_b = mmu_write_b ,
1481
+
1482
+ /* system services or essential routines */
1483
+ .on_ecall = ecall_handler ,
1484
+ .on_ebreak = ebreak_handler ,
1485
+ .on_memcpy = memcpy_handler ,
1486
+ .on_memset = memset_handler ,
1487
+ };
0 commit comments