Skip to content

Commit dab5560

Browse files
committed
RISC-V: KVM: Use nacl_csr_xyz() for accessing AIA CSRs
When running under some other hypervisor, prefer nacl_csr_xyz() for accessing AIA CSRs in the run-loop. This makes CSR access faster whenever SBI nested acceleration is available. Signed-off-by: Anup Patel <apatel@ventanamicro.com> Reviewed-by: Atish Patra <atishp@rivosinc.com> Link: https://lore.kernel.org/r/20241020194734.58686-11-apatel@ventanamicro.com Signed-off-by: Anup Patel <anup@brainfault.org>
1 parent e28e6b6 commit dab5560

File tree

1 file changed

+63
-34
lines changed

1 file changed

+63
-34
lines changed

arch/riscv/kvm/aia.c

Lines changed: 63 additions & 34 deletions
Original file line numberDiff line numberDiff line change
@@ -16,6 +16,7 @@
1616
#include <linux/percpu.h>
1717
#include <linux/spinlock.h>
1818
#include <asm/cpufeature.h>
19+
#include <asm/kvm_nacl.h>
1920

2021
struct aia_hgei_control {
2122
raw_spinlock_t lock;
@@ -88,7 +89,7 @@ void kvm_riscv_vcpu_aia_sync_interrupts(struct kvm_vcpu *vcpu)
8889
struct kvm_vcpu_aia_csr *csr = &vcpu->arch.aia_context.guest_csr;
8990

9091
if (kvm_riscv_aia_available())
91-
csr->vsieh = csr_read(CSR_VSIEH);
92+
csr->vsieh = ncsr_read(CSR_VSIEH);
9293
}
9394
#endif
9495

@@ -115,7 +116,7 @@ bool kvm_riscv_vcpu_aia_has_interrupts(struct kvm_vcpu *vcpu, u64 mask)
115116

116117
hgei = aia_find_hgei(vcpu);
117118
if (hgei > 0)
118-
return !!(csr_read(CSR_HGEIP) & BIT(hgei));
119+
return !!(ncsr_read(CSR_HGEIP) & BIT(hgei));
119120

120121
return false;
121122
}
@@ -128,45 +129,73 @@ void kvm_riscv_vcpu_aia_update_hvip(struct kvm_vcpu *vcpu)
128129
return;
129130

130131
#ifdef CONFIG_32BIT
131-
csr_write(CSR_HVIPH, vcpu->arch.aia_context.guest_csr.hviph);
132+
ncsr_write(CSR_HVIPH, vcpu->arch.aia_context.guest_csr.hviph);
132133
#endif
133-
csr_write(CSR_HVICTL, aia_hvictl_value(!!(csr->hvip & BIT(IRQ_VS_EXT))));
134+
ncsr_write(CSR_HVICTL, aia_hvictl_value(!!(csr->hvip & BIT(IRQ_VS_EXT))));
134135
}
135136

136137
void kvm_riscv_vcpu_aia_load(struct kvm_vcpu *vcpu, int cpu)
137138
{
138139
struct kvm_vcpu_aia_csr *csr = &vcpu->arch.aia_context.guest_csr;
140+
void *nsh;
139141

140142
if (!kvm_riscv_aia_available())
141143
return;
142144

143-
csr_write(CSR_VSISELECT, csr->vsiselect);
144-
csr_write(CSR_HVIPRIO1, csr->hviprio1);
145-
csr_write(CSR_HVIPRIO2, csr->hviprio2);
145+
if (kvm_riscv_nacl_sync_csr_available()) {
146+
nsh = nacl_shmem();
147+
nacl_csr_write(nsh, CSR_VSISELECT, csr->vsiselect);
148+
nacl_csr_write(nsh, CSR_HVIPRIO1, csr->hviprio1);
149+
nacl_csr_write(nsh, CSR_HVIPRIO2, csr->hviprio2);
150+
#ifdef CONFIG_32BIT
151+
nacl_csr_write(nsh, CSR_VSIEH, csr->vsieh);
152+
nacl_csr_write(nsh, CSR_HVIPH, csr->hviph);
153+
nacl_csr_write(nsh, CSR_HVIPRIO1H, csr->hviprio1h);
154+
nacl_csr_write(nsh, CSR_HVIPRIO2H, csr->hviprio2h);
155+
#endif
156+
} else {
157+
csr_write(CSR_VSISELECT, csr->vsiselect);
158+
csr_write(CSR_HVIPRIO1, csr->hviprio1);
159+
csr_write(CSR_HVIPRIO2, csr->hviprio2);
146160
#ifdef CONFIG_32BIT
147-
csr_write(CSR_VSIEH, csr->vsieh);
148-
csr_write(CSR_HVIPH, csr->hviph);
149-
csr_write(CSR_HVIPRIO1H, csr->hviprio1h);
150-
csr_write(CSR_HVIPRIO2H, csr->hviprio2h);
161+
csr_write(CSR_VSIEH, csr->vsieh);
162+
csr_write(CSR_HVIPH, csr->hviph);
163+
csr_write(CSR_HVIPRIO1H, csr->hviprio1h);
164+
csr_write(CSR_HVIPRIO2H, csr->hviprio2h);
151165
#endif
166+
}
152167
}
153168

154169
void kvm_riscv_vcpu_aia_put(struct kvm_vcpu *vcpu)
155170
{
156171
struct kvm_vcpu_aia_csr *csr = &vcpu->arch.aia_context.guest_csr;
172+
void *nsh;
157173

158174
if (!kvm_riscv_aia_available())
159175
return;
160176

161-
csr->vsiselect = csr_read(CSR_VSISELECT);
162-
csr->hviprio1 = csr_read(CSR_HVIPRIO1);
163-
csr->hviprio2 = csr_read(CSR_HVIPRIO2);
177+
if (kvm_riscv_nacl_available()) {
178+
nsh = nacl_shmem();
179+
csr->vsiselect = nacl_csr_read(nsh, CSR_VSISELECT);
180+
csr->hviprio1 = nacl_csr_read(nsh, CSR_HVIPRIO1);
181+
csr->hviprio2 = nacl_csr_read(nsh, CSR_HVIPRIO2);
164182
#ifdef CONFIG_32BIT
165-
csr->vsieh = csr_read(CSR_VSIEH);
166-
csr->hviph = csr_read(CSR_HVIPH);
167-
csr->hviprio1h = csr_read(CSR_HVIPRIO1H);
168-
csr->hviprio2h = csr_read(CSR_HVIPRIO2H);
183+
csr->vsieh = nacl_csr_read(nsh, CSR_VSIEH);
184+
csr->hviph = nacl_csr_read(nsh, CSR_HVIPH);
185+
csr->hviprio1h = nacl_csr_read(nsh, CSR_HVIPRIO1H);
186+
csr->hviprio2h = nacl_csr_read(nsh, CSR_HVIPRIO2H);
169187
#endif
188+
} else {
189+
csr->vsiselect = csr_read(CSR_VSISELECT);
190+
csr->hviprio1 = csr_read(CSR_HVIPRIO1);
191+
csr->hviprio2 = csr_read(CSR_HVIPRIO2);
192+
#ifdef CONFIG_32BIT
193+
csr->vsieh = csr_read(CSR_VSIEH);
194+
csr->hviph = csr_read(CSR_HVIPH);
195+
csr->hviprio1h = csr_read(CSR_HVIPRIO1H);
196+
csr->hviprio2h = csr_read(CSR_HVIPRIO2H);
197+
#endif
198+
}
170199
}
171200

172201
int kvm_riscv_vcpu_aia_get_csr(struct kvm_vcpu *vcpu,
@@ -250,20 +279,20 @@ static u8 aia_get_iprio8(struct kvm_vcpu *vcpu, unsigned int irq)
250279

251280
switch (bitpos / BITS_PER_LONG) {
252281
case 0:
253-
hviprio = csr_read(CSR_HVIPRIO1);
282+
hviprio = ncsr_read(CSR_HVIPRIO1);
254283
break;
255284
case 1:
256285
#ifndef CONFIG_32BIT
257-
hviprio = csr_read(CSR_HVIPRIO2);
286+
hviprio = ncsr_read(CSR_HVIPRIO2);
258287
break;
259288
#else
260-
hviprio = csr_read(CSR_HVIPRIO1H);
289+
hviprio = ncsr_read(CSR_HVIPRIO1H);
261290
break;
262291
case 2:
263-
hviprio = csr_read(CSR_HVIPRIO2);
292+
hviprio = ncsr_read(CSR_HVIPRIO2);
264293
break;
265294
case 3:
266-
hviprio = csr_read(CSR_HVIPRIO2H);
295+
hviprio = ncsr_read(CSR_HVIPRIO2H);
267296
break;
268297
#endif
269298
default:
@@ -283,20 +312,20 @@ static void aia_set_iprio8(struct kvm_vcpu *vcpu, unsigned int irq, u8 prio)
283312

284313
switch (bitpos / BITS_PER_LONG) {
285314
case 0:
286-
hviprio = csr_read(CSR_HVIPRIO1);
315+
hviprio = ncsr_read(CSR_HVIPRIO1);
287316
break;
288317
case 1:
289318
#ifndef CONFIG_32BIT
290-
hviprio = csr_read(CSR_HVIPRIO2);
319+
hviprio = ncsr_read(CSR_HVIPRIO2);
291320
break;
292321
#else
293-
hviprio = csr_read(CSR_HVIPRIO1H);
322+
hviprio = ncsr_read(CSR_HVIPRIO1H);
294323
break;
295324
case 2:
296-
hviprio = csr_read(CSR_HVIPRIO2);
325+
hviprio = ncsr_read(CSR_HVIPRIO2);
297326
break;
298327
case 3:
299-
hviprio = csr_read(CSR_HVIPRIO2H);
328+
hviprio = ncsr_read(CSR_HVIPRIO2H);
300329
break;
301330
#endif
302331
default:
@@ -308,20 +337,20 @@ static void aia_set_iprio8(struct kvm_vcpu *vcpu, unsigned int irq, u8 prio)
308337

309338
switch (bitpos / BITS_PER_LONG) {
310339
case 0:
311-
csr_write(CSR_HVIPRIO1, hviprio);
340+
ncsr_write(CSR_HVIPRIO1, hviprio);
312341
break;
313342
case 1:
314343
#ifndef CONFIG_32BIT
315-
csr_write(CSR_HVIPRIO2, hviprio);
344+
ncsr_write(CSR_HVIPRIO2, hviprio);
316345
break;
317346
#else
318-
csr_write(CSR_HVIPRIO1H, hviprio);
347+
ncsr_write(CSR_HVIPRIO1H, hviprio);
319348
break;
320349
case 2:
321-
csr_write(CSR_HVIPRIO2, hviprio);
350+
ncsr_write(CSR_HVIPRIO2, hviprio);
322351
break;
323352
case 3:
324-
csr_write(CSR_HVIPRIO2H, hviprio);
353+
ncsr_write(CSR_HVIPRIO2H, hviprio);
325354
break;
326355
#endif
327356
default:
@@ -377,7 +406,7 @@ int kvm_riscv_vcpu_aia_rmw_ireg(struct kvm_vcpu *vcpu, unsigned int csr_num,
377406
return KVM_INSN_ILLEGAL_TRAP;
378407

379408
/* First try to emulate in kernel space */
380-
isel = csr_read(CSR_VSISELECT) & ISELECT_MASK;
409+
isel = ncsr_read(CSR_VSISELECT) & ISELECT_MASK;
381410
if (isel >= ISELECT_IPRIO0 && isel <= ISELECT_IPRIO15)
382411
return aia_rmw_iprio(vcpu, isel, val, new_val, wr_mask);
383412
else if (isel >= IMSIC_FIRST && isel <= IMSIC_LAST &&

0 commit comments

Comments
 (0)