@@ -39,9 +39,9 @@ struct tlb_entry_t {
39
39
};
40
40
41
41
struct xlate_flags_t {
42
- const bool forced_virt : 1 ;
43
- const bool hlvx : 1 ;
44
- const bool lr : 1 ;
42
+ const bool forced_virt : 1 { false } ;
43
+ const bool hlvx : 1 { false } ;
44
+ const bool lr : 1 { false } ;
45
45
46
46
bool is_special_access () const {
47
47
return forced_virt || hlvx || lr;
@@ -72,7 +72,7 @@ class mmu_t
72
72
73
73
mem_access_info_t generate_access_info (reg_t addr, access_type type, xlate_flags_t xlate_flags) {
74
74
if (!proc)
75
- return {addr, 0 , false , {false , false , false }, type};
75
+ return {addr, 0 , false , {}, type};
76
76
bool virt = proc->state .v ;
77
77
reg_t mode = proc->state .prv ;
78
78
if (type != FETCH) {
@@ -94,7 +94,7 @@ class mmu_t
94
94
~mmu_t ();
95
95
96
96
template <typename T>
97
- T ALWAYS_INLINE load (reg_t addr, xlate_flags_t xlate_flags = {false , false , false }) {
97
+ T ALWAYS_INLINE load (reg_t addr, xlate_flags_t xlate_flags = {}) {
98
98
target_endian<T> res;
99
99
reg_t vpn = addr >> PGSHIFT;
100
100
bool aligned = (addr & (sizeof (T) - 1 )) == 0 ;
@@ -114,30 +114,21 @@ class mmu_t
114
114
115
115
template <typename T>
116
116
T load_reserved (reg_t addr) {
117
- bool forced_virt = false ;
118
- bool hlvx = false ;
119
- bool lr = true ;
120
- return load<T>(addr, {forced_virt, hlvx, lr});
117
+ return load<T>(addr, {.lr = true });
121
118
}
122
119
123
120
template <typename T>
124
121
T guest_load (reg_t addr) {
125
- bool forced_virt = true ;
126
- bool hlvx = false ;
127
- bool lr = false ;
128
- return load<T>(addr, {forced_virt, hlvx, lr});
122
+ return load<T>(addr, {.forced_virt = true });
129
123
}
130
124
131
125
template <typename T>
132
126
T guest_load_x (reg_t addr) {
133
- bool forced_virt = true ;
134
- bool hlvx = true ;
135
- bool lr = false ;
136
- return load<T>(addr, {forced_virt, hlvx, lr});
127
+ return load<T>(addr, {.forced_virt =true , .hlvx =true });
137
128
}
138
129
139
130
template <typename T>
140
- void ALWAYS_INLINE store (reg_t addr, T val, xlate_flags_t xlate_flags = {false , false , false }) {
131
+ void ALWAYS_INLINE store (reg_t addr, T val, xlate_flags_t xlate_flags = {}) {
141
132
reg_t vpn = addr >> PGSHIFT;
142
133
bool aligned = (addr & (sizeof (T) - 1 )) == 0 ;
143
134
bool tlb_hit = tlb_store_tag[vpn % TLB_ENTRIES] == vpn;
@@ -155,10 +146,7 @@ class mmu_t
155
146
156
147
template <typename T>
157
148
void guest_store (reg_t addr, T val) {
158
- bool forced_virt = true ;
159
- bool hlvx = false ;
160
- bool lr = false ;
161
- store (addr, val, {forced_virt, hlvx, lr});
149
+ store (addr, val, {.forced_virt =true });
162
150
}
163
151
164
152
// AMO/Zicbom faults should be reported as store faults
@@ -180,7 +168,7 @@ class mmu_t
180
168
template <typename T, typename op>
181
169
T amo (reg_t addr, op f) {
182
170
convert_load_traps_to_store_traps ({
183
- store_slow_path (addr, sizeof (T), nullptr , {false , false , false }, false , true );
171
+ store_slow_path (addr, sizeof (T), nullptr , {}, false , true );
184
172
auto lhs = load<T>(addr);
185
173
store<T>(addr, f (lhs));
186
174
return lhs;
@@ -190,7 +178,7 @@ class mmu_t
190
178
template <typename T>
191
179
T amo_compare_and_swap (reg_t addr, T comp, T swap) {
192
180
convert_load_traps_to_store_traps ({
193
- store_slow_path (addr, sizeof (T), nullptr , {false , false , false }, false , true );
181
+ store_slow_path (addr, sizeof (T), nullptr , {}, false , true );
194
182
auto lhs = load<T>(addr);
195
183
if (lhs == comp)
196
184
store<T>(addr, swap);
@@ -230,7 +218,7 @@ class mmu_t
230
218
for (size_t offset = 0 ; offset < blocksz; offset += 1 )
231
219
check_triggers (triggers::OPERATION_STORE, base + offset, false , addr, std::nullopt);
232
220
convert_load_traps_to_store_traps ({
233
- const reg_t paddr = translate (generate_access_info (addr, LOAD, {false , false , false }), 1 );
221
+ const reg_t paddr = translate (generate_access_info (addr, LOAD, {}), 1 );
234
222
if (sim->reservable (paddr)) {
235
223
if (tracer.interested_in_range (paddr, paddr + PGSIZE, LOAD))
236
224
tracer.clean_invalidate (paddr, blocksz, clean, inval);
@@ -249,10 +237,10 @@ class mmu_t
249
237
{
250
238
if (vaddr & (size-1 )) {
251
239
// Raise either access fault or misaligned exception
252
- store_slow_path (vaddr, size, nullptr , {false , false , false }, false , true );
240
+ store_slow_path (vaddr, size, nullptr , {}, false , true );
253
241
}
254
242
255
- reg_t paddr = translate (generate_access_info (vaddr, STORE, {false , false , false }), 1 );
243
+ reg_t paddr = translate (generate_access_info (vaddr, STORE, {}), 1 );
256
244
if (sim->reservable (paddr))
257
245
return load_reservation_address == paddr;
258
246
else
0 commit comments