Skip to content

Commit 1a12fd6

Browse files
committed
Drop unused structure "chain_entry_t"
This structure was out of date and might become buggy in the upcoming system simulation. Close #513
1 parent f31bc84 commit 1a12fd6

File tree

5 files changed

+152
-123
lines changed

5 files changed

+152
-123
lines changed

src/cache.c

Lines changed: 36 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -196,33 +196,58 @@ void *cache_get(const cache_t *cache, uint32_t key, bool update)
196196

197197
void *cache_put(cache_t *cache, uint32_t key, void *value)
198198
{
199-
void *delete_value = NULL;
200199
assert(cache->list_size <= cache->capacity);
201-
/* check the cache is full or not before adding a new entry */
202-
if (cache->list_size == cache->capacity) {
200+
201+
lfu_entry_t *replaced_entry = NULL, *entry;
202+
hlist_for_each_entry (entry, &cache->map->ht_list_head[cache_hash(key)],
203+
ht_list) {
204+
if (entry->key != key)
205+
continue;
206+
/* update the existing cache */
207+
if (entry->value != value) {
208+
replaced_entry = entry;
209+
break;
210+
} else {
211+
/* should not put an identical block to cache */
212+
assert(NULL);
213+
__UNREACHABLE;
214+
}
215+
}
216+
217+
/* get the entry to be replaced if cache is full */
218+
if (!replaced_entry && cache->list_size == cache->capacity) {
203219
for (int i = 0; i < THRESHOLD; i++) {
204220
if (list_empty(cache->lists[i]))
205221
continue;
206-
lfu_entry_t *delete_target =
222+
replaced_entry =
207223
list_last_entry(cache->lists[i], lfu_entry_t, list);
208-
list_del_init(&delete_target->list);
209-
hlist_del_init(&delete_target->ht_list);
210-
delete_value = delete_target->value;
211-
cache->list_size--;
212-
mpool_free(cache_mp, delete_target);
213224
break;
214225
}
226+
assert(replaced_entry);
215227
}
228+
229+
void *replaced_value = NULL;
230+
if (replaced_entry) {
231+
replaced_value = replaced_entry->value;
232+
list_del_init(&replaced_entry->list);
233+
hlist_del_init(&replaced_entry->ht_list);
234+
mpool_free(cache_mp, replaced_entry);
235+
cache->list_size--;
236+
}
237+
216238
lfu_entry_t *new_entry = mpool_alloc(cache_mp);
239+
INIT_LIST_HEAD(&new_entry->list);
240+
INIT_HLIST_NODE(&new_entry->ht_list);
217241
new_entry->key = key;
218242
new_entry->value = value;
219243
new_entry->frequency = 0;
220244
list_add(&new_entry->list, cache->lists[new_entry->frequency++]);
221-
cache->list_size++;
222245
hlist_add_head(&new_entry->ht_list,
223246
&cache->map->ht_list_head[cache_hash(key)]);
247+
cache->list_size++;
248+
224249
assert(cache->list_size <= cache->capacity);
225-
return delete_value;
250+
return replaced_value;
226251
}
227252

228253
void cache_free(cache_t *cache)

src/emulate.c

Lines changed: 103 additions & 100 deletions
Original file line numberDiff line numberDiff line change
@@ -533,8 +533,40 @@ FORCE_INLINE bool insn_is_unconditional_branch(uint8_t opcode)
533533
case rv_insn_cebreak:
534534
#endif
535535
return true;
536+
default:
537+
return false;
536538
}
537-
return false;
539+
__UNREACHABLE;
540+
}
541+
542+
FORCE_INLINE bool insn_is_direct_branch(uint8_t opcode)
543+
{
544+
switch (opcode) {
545+
case rv_insn_jal:
546+
#if RV32_HAS(EXT_C)
547+
case rv_insn_cjal:
548+
case rv_insn_cj:
549+
#endif
550+
return true;
551+
default:
552+
return false;
553+
}
554+
__UNREACHABLE;
555+
}
556+
557+
FORCE_INLINE bool insn_is_indirect_branch(uint8_t opcode)
558+
{
559+
switch (opcode) {
560+
case rv_insn_jalr:
561+
#if RV32_HAS(EXT_C)
562+
case rv_insn_cjalr:
563+
case rv_insn_cjr:
564+
#endif
565+
return true;
566+
default:
567+
return false;
568+
}
569+
__UNREACHABLE;
538570
}
539571

540572
static void block_translate(riscv_t *rv, block_t *block)
@@ -571,11 +603,7 @@ static void block_translate(riscv_t *rv, block_t *block)
571603
#endif
572604
/* stop on branch */
573605
if (insn_is_branch(ir->opcode)) {
574-
if (ir->opcode == rv_insn_jalr
575-
#if RV32_HAS(EXT_C)
576-
|| ir->opcode == rv_insn_cjalr || ir->opcode == rv_insn_cjr
577-
#endif
578-
) {
606+
if (insn_is_indirect_branch(ir->opcode)) {
579607
ir->branch_table = calloc(1, sizeof(branch_history_table_t));
580608
assert(ir->branch_table);
581609
memset(ir->branch_table->PC, -1,
@@ -768,95 +796,89 @@ static block_t *block_find_or_translate(riscv_t *rv)
768796
#if !RV32_HAS(JIT)
769797
block_map_t *map = &rv->block_map;
770798
/* lookup the next block in the block map */
771-
block_t *next = block_find(map, rv->PC);
799+
block_t *next_blk = block_find(map, rv->PC);
772800
#else
773801
/* lookup the next block in the block cache */
774-
block_t *next = (block_t *) cache_get(rv->block_cache, rv->PC, true);
802+
/*
803+
* The function "cache_get()" gets the cached block by the given "key (PC)".
804+
* In system simulation, the returned block might be dropped because it is
805+
* not the one from the current process (by checking SATP CSR register).
806+
*/
807+
block_t *next_blk = (block_t *) cache_get(rv->block_cache, rv->PC, true);
775808
#endif
776809

777-
if (!next) {
810+
if (next_blk)
811+
return next_blk;
812+
778813
#if !RV32_HAS(JIT)
779-
if (map->size * 1.25 > map->block_capacity) {
780-
block_map_clear(rv);
781-
prev = NULL;
782-
}
814+
/* clear block list if it is going to be filled */
815+
if (map->size * 1.25 > map->block_capacity) {
816+
block_map_clear(rv);
817+
prev = NULL;
818+
}
783819
#endif
784-
/* allocate a new block */
785-
next = block_alloc(rv);
786-
block_translate(rv, next);
820+
/* allocate a new block */
821+
next_blk = block_alloc(rv);
822+
823+
block_translate(rv, next_blk);
787824

788-
optimize_constant(rv, next);
825+
optimize_constant(rv, next_blk);
789826
#if RV32_HAS(GDBSTUB)
790-
if (likely(!rv->debug_mode))
827+
if (likely(!rv->debug_mode))
791828
#endif
792-
/* macro operation fusion */
793-
match_pattern(rv, next);
829+
/* macro operation fusion */
830+
match_pattern(rv, next_blk);
794831

795832
#if !RV32_HAS(JIT)
796-
/* insert the block into block map */
797-
block_insert(&rv->block_map, next);
833+
/* insert the block into block map */
834+
block_insert(&rv->block_map, next_blk);
798835
#else
799-
/* insert the block into block cache */
800-
block_t *delete_target = cache_put(rv->block_cache, rv->PC, &(*next));
801-
if (delete_target) {
802-
if (prev == delete_target)
803-
prev = NULL;
804-
chain_entry_t *entry, *safe;
805-
/* correctly remove deleted block from its chained block */
806-
rv_insn_t *taken = delete_target->ir_tail->branch_taken,
807-
*untaken = delete_target->ir_tail->branch_untaken;
808-
if (taken && taken->pc != delete_target->pc_start) {
809-
block_t *target = cache_get(rv->block_cache, taken->pc, false);
810-
bool flag = false;
811-
list_for_each_entry_safe (entry, safe, &target->list, list) {
812-
if (entry->block == delete_target) {
813-
list_del_init(&entry->list);
814-
mpool_free(rv->chain_entry_mp, entry);
815-
flag = true;
816-
}
817-
}
818-
assert(flag);
819-
}
820-
if (untaken && untaken->pc != delete_target->pc_start) {
821-
block_t *target =
822-
cache_get(rv->block_cache, untaken->pc, false);
823-
assert(target);
824-
bool flag = false;
825-
list_for_each_entry_safe (entry, safe, &target->list, list) {
826-
if (entry->block == delete_target) {
827-
list_del_init(&entry->list);
828-
mpool_free(rv->chain_entry_mp, entry);
829-
flag = true;
830-
}
831-
}
832-
assert(flag);
833-
}
834-
/* correctly remove deleted block from the block chained to it */
835-
list_for_each_entry_safe (entry, safe, &delete_target->list, list) {
836-
if (entry->block == delete_target)
837-
continue;
838-
rv_insn_t *target = entry->block->ir_tail;
839-
if (target->branch_taken == delete_target->ir_head)
840-
target->branch_taken = NULL;
841-
else if (target->branch_untaken == delete_target->ir_head)
842-
target->branch_untaken = NULL;
843-
mpool_free(rv->chain_entry_mp, entry);
844-
}
845-
/* free deleted block */
846-
uint32_t idx;
847-
rv_insn_t *ir, *next;
848-
for (idx = 0, ir = delete_target->ir_head;
849-
idx < delete_target->n_insn; idx++, ir = next) {
850-
free(ir->fuse);
851-
next = ir->next;
852-
mpool_free(rv->block_ir_mp, ir);
853-
}
854-
mpool_free(rv->block_mp, delete_target);
836+
list_add(&next_blk->list, &rv->block_list);
837+
838+
/* insert the block into block cache */
839+
block_t *replaced_blk = cache_put(rv->block_cache, rv->PC, &(*next_blk));
840+
841+
if (!replaced_blk)
842+
return next_blk;
843+
844+
list_del_init(&replaced_blk->list);
845+
846+
if (prev == replaced_blk)
847+
prev = NULL;
848+
849+
/* remove the connection from parents */
850+
rv_insn_t *replaced_blk_entry = replaced_blk->ir_head;
851+
852+
/* TODO: record parents of each block to avoid traversing all blocks */
853+
block_t *entry;
854+
list_for_each_entry (entry, &rv->block_list, list) {
855+
rv_insn_t *taken = entry->ir_tail->branch_taken,
856+
*untaken = entry->ir_tail->branch_untaken;
857+
858+
if (taken == replaced_blk_entry) {
859+
entry->ir_tail->branch_taken = NULL;
855860
}
856-
#endif
861+
if (untaken == replaced_blk_entry) {
862+
entry->ir_tail->branch_untaken = NULL;
863+
}
864+
}
865+
866+
/* free IRs in replaced block */
867+
for (rv_insn_t *ir = replaced_blk->ir_head, *next_ir; ir != NULL;
868+
ir = next_ir) {
869+
next_ir = ir->next;
870+
871+
if (ir->fuse)
872+
free(ir->fuse);
873+
874+
mpool_free(rv->block_ir_mp, ir);
857875
}
858876

859-
return next;
877+
mpool_free(rv->block_mp, replaced_blk);
878+
#endif
879+
880+
assert(next_blk);
881+
return next_blk;
860882
}
861883

862884
#if RV32_HAS(JIT)
@@ -918,31 +940,12 @@ void rv_step(void *arg)
918940
if (!insn_is_unconditional_branch(last_ir->opcode)) {
919941
if (is_branch_taken && !last_ir->branch_taken) {
920942
last_ir->branch_taken = block->ir_head;
921-
#if RV32_HAS(JIT)
922-
chain_entry_t *new_entry = mpool_alloc(rv->chain_entry_mp);
923-
new_entry->block = prev;
924-
list_add(&new_entry->list, &block->list);
925-
#endif
926943
} else if (!is_branch_taken && !last_ir->branch_untaken) {
927944
last_ir->branch_untaken = block->ir_head;
928-
#if RV32_HAS(JIT)
929-
chain_entry_t *new_entry = mpool_alloc(rv->chain_entry_mp);
930-
new_entry->block = prev;
931-
list_add(&new_entry->list, &block->list);
932-
#endif
933945
}
934-
} else if (IF_insn(last_ir, jal)
935-
#if RV32_HAS(EXT_C)
936-
|| IF_insn(last_ir, cj) || IF_insn(last_ir, cjal)
937-
#endif
938-
) {
946+
} else if (insn_is_direct_branch(last_ir->opcode)) {
939947
if (!last_ir->branch_taken) {
940948
last_ir->branch_taken = block->ir_head;
941-
#if RV32_HAS(JIT)
942-
chain_entry_t *new_entry = mpool_alloc(rv->chain_entry_mp);
943-
new_entry->block = prev;
944-
list_add(&new_entry->list, &block->list);
945-
#endif
946949
}
947950
}
948951
}

src/riscv.c

Lines changed: 1 addition & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -315,9 +315,7 @@ riscv_t *rv_create(riscv_user_t rv_attr)
315315
/* initialize the block map */
316316
block_map_init(&rv->block_map, BLOCK_MAP_CAPACITY_BITS);
317317
#else
318-
rv->chain_entry_mp =
319-
mpool_create(sizeof(chain_entry_t) << BLOCK_IR_MAP_CAPACITY_BITS,
320-
sizeof(chain_entry_t));
318+
INIT_LIST_HEAD(&rv->block_list);
321319
rv->jit_state = jit_state_init(CODE_CACHE_SIZE);
322320
rv->block_cache = cache_create(BLOCK_MAP_CAPACITY_BITS);
323321
assert(rv->block_cache);
@@ -426,7 +424,6 @@ void rv_delete(riscv_t *rv)
426424
pthread_mutex_destroy(&rv->wait_queue_lock);
427425
jit_cache_exit(rv->jit_cache);
428426
#endif
429-
mpool_destroy(rv->chain_entry_mp);
430427
jit_state_exit(rv->jit_state);
431428
cache_free(rv->block_cache);
432429
#endif

src/riscv_private.h

Lines changed: 2 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -100,18 +100,12 @@ typedef struct block {
100100
#endif
101101
} block_t;
102102

103-
#if RV32_HAS(JIT)
104-
typedef struct {
105-
block_t *block;
106-
struct list_head list;
107-
} chain_entry_t;
108-
#if RV32_HAS(T2C)
103+
#if RV32_HAS(JIT) && RV32_HAS(T2C)
109104
typedef struct {
110105
block_t *block;
111106
struct list_head list;
112107
} queue_entry_t;
113108
#endif
114-
#endif
115109

116110
typedef struct {
117111
uint32_t block_capacity; /**< max number of entries in the block map */
@@ -178,7 +172,7 @@ struct riscv_internal {
178172
block_map_t block_map; /**< basic block map */
179173
#else
180174
struct cache *block_cache;
181-
struct mpool *chain_entry_mp;
175+
struct list_head block_list; /**< list of all translated blocks */
182176
#if RV32_HAS(T2C)
183177
struct list_head wait_queue;
184178
pthread_mutex_t wait_queue_lock;

src/utils.h

Lines changed: 10 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -106,12 +106,22 @@ static inline void list_del_init(struct list_head *node)
106106
list_entry((head)->prev, type, member)
107107

108108
#ifdef __HAVE_TYPEOF
109+
#define list_for_each_entry(entry, head, member) \
110+
for (entry = list_entry((head)->next, __typeof__(*entry), member); \
111+
&entry->member != (head); \
112+
entry = list_entry(entry->member.next, __typeof__(*entry), member))
113+
109114
#define list_for_each_entry_safe(entry, safe, head, member) \
110115
for (entry = list_entry((head)->next, __typeof__(*entry), member), \
111116
safe = list_entry(entry->member.next, __typeof__(*entry), member); \
112117
&entry->member != (head); entry = safe, \
113118
safe = list_entry(safe->member.next, __typeof__(*entry), member))
114119
#else
120+
#define list_for_each_entry(entry, head, member, type) \
121+
for (entry = list_entry((head)->next, type, member); \
122+
&entry->member != (head); \
123+
entry = list_entry(entry->member.next, type, member))
124+
115125
#define list_for_each_entry_safe(entry, safe, head, member, type) \
116126
for (entry = list_entry((head)->next, type, member), \
117127
safe = list_entry(entry->member.next, type, member); \

0 commit comments

Comments
 (0)