16
16
#include <linux/percpu.h>
17
17
#include <linux/spinlock.h>
18
18
#include <asm/cpufeature.h>
19
+ #include <asm/kvm_nacl.h>
19
20
20
21
struct aia_hgei_control {
21
22
raw_spinlock_t lock ;
@@ -88,7 +89,7 @@ void kvm_riscv_vcpu_aia_sync_interrupts(struct kvm_vcpu *vcpu)
88
89
struct kvm_vcpu_aia_csr * csr = & vcpu -> arch .aia_context .guest_csr ;
89
90
90
91
if (kvm_riscv_aia_available ())
91
- csr -> vsieh = csr_read (CSR_VSIEH );
92
+ csr -> vsieh = ncsr_read (CSR_VSIEH );
92
93
}
93
94
#endif
94
95
@@ -115,7 +116,7 @@ bool kvm_riscv_vcpu_aia_has_interrupts(struct kvm_vcpu *vcpu, u64 mask)
115
116
116
117
hgei = aia_find_hgei (vcpu );
117
118
if (hgei > 0 )
118
- return !!(csr_read (CSR_HGEIP ) & BIT (hgei ));
119
+ return !!(ncsr_read (CSR_HGEIP ) & BIT (hgei ));
119
120
120
121
return false;
121
122
}
@@ -128,45 +129,73 @@ void kvm_riscv_vcpu_aia_update_hvip(struct kvm_vcpu *vcpu)
128
129
return ;
129
130
130
131
#ifdef CONFIG_32BIT
131
- csr_write (CSR_HVIPH , vcpu -> arch .aia_context .guest_csr .hviph );
132
+ ncsr_write (CSR_HVIPH , vcpu -> arch .aia_context .guest_csr .hviph );
132
133
#endif
133
- csr_write (CSR_HVICTL , aia_hvictl_value (!!(csr -> hvip & BIT (IRQ_VS_EXT ))));
134
+ ncsr_write (CSR_HVICTL , aia_hvictl_value (!!(csr -> hvip & BIT (IRQ_VS_EXT ))));
134
135
}
135
136
136
137
void kvm_riscv_vcpu_aia_load (struct kvm_vcpu * vcpu , int cpu )
137
138
{
138
139
struct kvm_vcpu_aia_csr * csr = & vcpu -> arch .aia_context .guest_csr ;
140
+ void * nsh ;
139
141
140
142
if (!kvm_riscv_aia_available ())
141
143
return ;
142
144
143
- csr_write (CSR_VSISELECT , csr -> vsiselect );
144
- csr_write (CSR_HVIPRIO1 , csr -> hviprio1 );
145
- csr_write (CSR_HVIPRIO2 , csr -> hviprio2 );
145
+ if (kvm_riscv_nacl_sync_csr_available ()) {
146
+ nsh = nacl_shmem ();
147
+ nacl_csr_write (nsh , CSR_VSISELECT , csr -> vsiselect );
148
+ nacl_csr_write (nsh , CSR_HVIPRIO1 , csr -> hviprio1 );
149
+ nacl_csr_write (nsh , CSR_HVIPRIO2 , csr -> hviprio2 );
150
+ #ifdef CONFIG_32BIT
151
+ nacl_csr_write (nsh , CSR_VSIEH , csr -> vsieh );
152
+ nacl_csr_write (nsh , CSR_HVIPH , csr -> hviph );
153
+ nacl_csr_write (nsh , CSR_HVIPRIO1H , csr -> hviprio1h );
154
+ nacl_csr_write (nsh , CSR_HVIPRIO2H , csr -> hviprio2h );
155
+ #endif
156
+ } else {
157
+ csr_write (CSR_VSISELECT , csr -> vsiselect );
158
+ csr_write (CSR_HVIPRIO1 , csr -> hviprio1 );
159
+ csr_write (CSR_HVIPRIO2 , csr -> hviprio2 );
146
160
#ifdef CONFIG_32BIT
147
- csr_write (CSR_VSIEH , csr -> vsieh );
148
- csr_write (CSR_HVIPH , csr -> hviph );
149
- csr_write (CSR_HVIPRIO1H , csr -> hviprio1h );
150
- csr_write (CSR_HVIPRIO2H , csr -> hviprio2h );
161
+ csr_write (CSR_VSIEH , csr -> vsieh );
162
+ csr_write (CSR_HVIPH , csr -> hviph );
163
+ csr_write (CSR_HVIPRIO1H , csr -> hviprio1h );
164
+ csr_write (CSR_HVIPRIO2H , csr -> hviprio2h );
151
165
#endif
166
+ }
152
167
}
153
168
154
169
void kvm_riscv_vcpu_aia_put (struct kvm_vcpu * vcpu )
155
170
{
156
171
struct kvm_vcpu_aia_csr * csr = & vcpu -> arch .aia_context .guest_csr ;
172
+ void * nsh ;
157
173
158
174
if (!kvm_riscv_aia_available ())
159
175
return ;
160
176
161
- csr -> vsiselect = csr_read (CSR_VSISELECT );
162
- csr -> hviprio1 = csr_read (CSR_HVIPRIO1 );
163
- csr -> hviprio2 = csr_read (CSR_HVIPRIO2 );
177
+ if (kvm_riscv_nacl_available ()) {
178
+ nsh = nacl_shmem ();
179
+ csr -> vsiselect = nacl_csr_read (nsh , CSR_VSISELECT );
180
+ csr -> hviprio1 = nacl_csr_read (nsh , CSR_HVIPRIO1 );
181
+ csr -> hviprio2 = nacl_csr_read (nsh , CSR_HVIPRIO2 );
164
182
#ifdef CONFIG_32BIT
165
- csr -> vsieh = csr_read ( CSR_VSIEH );
166
- csr -> hviph = csr_read ( CSR_HVIPH );
167
- csr -> hviprio1h = csr_read ( CSR_HVIPRIO1H );
168
- csr -> hviprio2h = csr_read ( CSR_HVIPRIO2H );
183
+ csr -> vsieh = nacl_csr_read ( nsh , CSR_VSIEH );
184
+ csr -> hviph = nacl_csr_read ( nsh , CSR_HVIPH );
185
+ csr -> hviprio1h = nacl_csr_read ( nsh , CSR_HVIPRIO1H );
186
+ csr -> hviprio2h = nacl_csr_read ( nsh , CSR_HVIPRIO2H );
169
187
#endif
188
+ } else {
189
+ csr -> vsiselect = csr_read (CSR_VSISELECT );
190
+ csr -> hviprio1 = csr_read (CSR_HVIPRIO1 );
191
+ csr -> hviprio2 = csr_read (CSR_HVIPRIO2 );
192
+ #ifdef CONFIG_32BIT
193
+ csr -> vsieh = csr_read (CSR_VSIEH );
194
+ csr -> hviph = csr_read (CSR_HVIPH );
195
+ csr -> hviprio1h = csr_read (CSR_HVIPRIO1H );
196
+ csr -> hviprio2h = csr_read (CSR_HVIPRIO2H );
197
+ #endif
198
+ }
170
199
}
171
200
172
201
int kvm_riscv_vcpu_aia_get_csr (struct kvm_vcpu * vcpu ,
@@ -250,20 +279,20 @@ static u8 aia_get_iprio8(struct kvm_vcpu *vcpu, unsigned int irq)
250
279
251
280
switch (bitpos / BITS_PER_LONG ) {
252
281
case 0 :
253
- hviprio = csr_read (CSR_HVIPRIO1 );
282
+ hviprio = ncsr_read (CSR_HVIPRIO1 );
254
283
break ;
255
284
case 1 :
256
285
#ifndef CONFIG_32BIT
257
- hviprio = csr_read (CSR_HVIPRIO2 );
286
+ hviprio = ncsr_read (CSR_HVIPRIO2 );
258
287
break ;
259
288
#else
260
- hviprio = csr_read (CSR_HVIPRIO1H );
289
+ hviprio = ncsr_read (CSR_HVIPRIO1H );
261
290
break ;
262
291
case 2 :
263
- hviprio = csr_read (CSR_HVIPRIO2 );
292
+ hviprio = ncsr_read (CSR_HVIPRIO2 );
264
293
break ;
265
294
case 3 :
266
- hviprio = csr_read (CSR_HVIPRIO2H );
295
+ hviprio = ncsr_read (CSR_HVIPRIO2H );
267
296
break ;
268
297
#endif
269
298
default :
@@ -283,20 +312,20 @@ static void aia_set_iprio8(struct kvm_vcpu *vcpu, unsigned int irq, u8 prio)
283
312
284
313
switch (bitpos / BITS_PER_LONG ) {
285
314
case 0 :
286
- hviprio = csr_read (CSR_HVIPRIO1 );
315
+ hviprio = ncsr_read (CSR_HVIPRIO1 );
287
316
break ;
288
317
case 1 :
289
318
#ifndef CONFIG_32BIT
290
- hviprio = csr_read (CSR_HVIPRIO2 );
319
+ hviprio = ncsr_read (CSR_HVIPRIO2 );
291
320
break ;
292
321
#else
293
- hviprio = csr_read (CSR_HVIPRIO1H );
322
+ hviprio = ncsr_read (CSR_HVIPRIO1H );
294
323
break ;
295
324
case 2 :
296
- hviprio = csr_read (CSR_HVIPRIO2 );
325
+ hviprio = ncsr_read (CSR_HVIPRIO2 );
297
326
break ;
298
327
case 3 :
299
- hviprio = csr_read (CSR_HVIPRIO2H );
328
+ hviprio = ncsr_read (CSR_HVIPRIO2H );
300
329
break ;
301
330
#endif
302
331
default :
@@ -308,20 +337,20 @@ static void aia_set_iprio8(struct kvm_vcpu *vcpu, unsigned int irq, u8 prio)
308
337
309
338
switch (bitpos / BITS_PER_LONG ) {
310
339
case 0 :
311
- csr_write (CSR_HVIPRIO1 , hviprio );
340
+ ncsr_write (CSR_HVIPRIO1 , hviprio );
312
341
break ;
313
342
case 1 :
314
343
#ifndef CONFIG_32BIT
315
- csr_write (CSR_HVIPRIO2 , hviprio );
344
+ ncsr_write (CSR_HVIPRIO2 , hviprio );
316
345
break ;
317
346
#else
318
- csr_write (CSR_HVIPRIO1H , hviprio );
347
+ ncsr_write (CSR_HVIPRIO1H , hviprio );
319
348
break ;
320
349
case 2 :
321
- csr_write (CSR_HVIPRIO2 , hviprio );
350
+ ncsr_write (CSR_HVIPRIO2 , hviprio );
322
351
break ;
323
352
case 3 :
324
- csr_write (CSR_HVIPRIO2H , hviprio );
353
+ ncsr_write (CSR_HVIPRIO2H , hviprio );
325
354
break ;
326
355
#endif
327
356
default :
@@ -377,7 +406,7 @@ int kvm_riscv_vcpu_aia_rmw_ireg(struct kvm_vcpu *vcpu, unsigned int csr_num,
377
406
return KVM_INSN_ILLEGAL_TRAP ;
378
407
379
408
/* First try to emulate in kernel space */
380
- isel = csr_read (CSR_VSISELECT ) & ISELECT_MASK ;
409
+ isel = ncsr_read (CSR_VSISELECT ) & ISELECT_MASK ;
381
410
if (isel >= ISELECT_IPRIO0 && isel <= ISELECT_IPRIO15 )
382
411
return aia_rmw_iprio (vcpu , isel , val , new_val , wr_mask );
383
412
else if (isel >= IMSIC_FIRST && isel <= IMSIC_LAST &&
0 commit comments