Skip to content

Commit e28e6b6

Browse files
committed
RISC-V: KVM: Use nacl_csr_xyz() for accessing H-extension CSRs
When running under some other hypervisor, prefer nacl_csr_xyz() for accessing H-extension CSRs in the run-loop. This makes CSR access faster whenever SBI nested acceleration is available. Signed-off-by: Anup Patel <apatel@ventanamicro.com> Reviewed-by: Atish Patra <atishp@rivosinc.com> Link: https://lore.kernel.org/r/20241020194734.58686-10-apatel@ventanamicro.com Signed-off-by: Anup Patel <anup@brainfault.org>
1 parent d466c19 commit e28e6b6

File tree

3 files changed

+87
-48
lines changed

3 files changed

+87
-48
lines changed

arch/riscv/kvm/mmu.c

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -15,7 +15,7 @@
1515
#include <linux/vmalloc.h>
1616
#include <linux/kvm_host.h>
1717
#include <linux/sched/signal.h>
18-
#include <asm/csr.h>
18+
#include <asm/kvm_nacl.h>
1919
#include <asm/page.h>
2020
#include <asm/pgtable.h>
2121

@@ -732,7 +732,7 @@ void kvm_riscv_gstage_update_hgatp(struct kvm_vcpu *vcpu)
732732
hgatp |= (READ_ONCE(k->vmid.vmid) << HGATP_VMID_SHIFT) & HGATP_VMID;
733733
hgatp |= (k->pgd_phys >> PAGE_SHIFT) & HGATP_PPN;
734734

735-
csr_write(CSR_HGATP, hgatp);
735+
ncsr_write(CSR_HGATP, hgatp);
736736

737737
if (!kvm_riscv_gstage_vmid_bits())
738738
kvm_riscv_local_hfence_gvma_all();

arch/riscv/kvm/vcpu.c

Lines changed: 71 additions & 32 deletions
Original file line numberDiff line numberDiff line change
@@ -17,8 +17,8 @@
1717
#include <linux/sched/signal.h>
1818
#include <linux/fs.h>
1919
#include <linux/kvm_host.h>
20-
#include <asm/csr.h>
2120
#include <asm/cacheflush.h>
21+
#include <asm/kvm_nacl.h>
2222
#include <asm/kvm_vcpu_vector.h>
2323

2424
#define CREATE_TRACE_POINTS
@@ -368,10 +368,10 @@ void kvm_riscv_vcpu_sync_interrupts(struct kvm_vcpu *vcpu)
368368
struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr;
369369

370370
/* Read current HVIP and VSIE CSRs */
371-
csr->vsie = csr_read(CSR_VSIE);
371+
csr->vsie = ncsr_read(CSR_VSIE);
372372

373373
/* Sync-up HVIP.VSSIP bit changes does by Guest */
374-
hvip = csr_read(CSR_HVIP);
374+
hvip = ncsr_read(CSR_HVIP);
375375
if ((csr->hvip ^ hvip) & (1UL << IRQ_VS_SOFT)) {
376376
if (hvip & (1UL << IRQ_VS_SOFT)) {
377377
if (!test_and_set_bit(IRQ_VS_SOFT,
@@ -568,26 +568,49 @@ static void kvm_riscv_vcpu_setup_config(struct kvm_vcpu *vcpu)
568568

569569
void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
570570
{
571+
void *nsh;
571572
struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr;
572573
struct kvm_vcpu_config *cfg = &vcpu->arch.cfg;
573574

574-
csr_write(CSR_VSSTATUS, csr->vsstatus);
575-
csr_write(CSR_VSIE, csr->vsie);
576-
csr_write(CSR_VSTVEC, csr->vstvec);
577-
csr_write(CSR_VSSCRATCH, csr->vsscratch);
578-
csr_write(CSR_VSEPC, csr->vsepc);
579-
csr_write(CSR_VSCAUSE, csr->vscause);
580-
csr_write(CSR_VSTVAL, csr->vstval);
581-
csr_write(CSR_HEDELEG, cfg->hedeleg);
582-
csr_write(CSR_HVIP, csr->hvip);
583-
csr_write(CSR_VSATP, csr->vsatp);
584-
csr_write(CSR_HENVCFG, cfg->henvcfg);
585-
if (IS_ENABLED(CONFIG_32BIT))
586-
csr_write(CSR_HENVCFGH, cfg->henvcfg >> 32);
587-
if (riscv_has_extension_unlikely(RISCV_ISA_EXT_SMSTATEEN)) {
588-
csr_write(CSR_HSTATEEN0, cfg->hstateen0);
575+
if (kvm_riscv_nacl_sync_csr_available()) {
576+
nsh = nacl_shmem();
577+
nacl_csr_write(nsh, CSR_VSSTATUS, csr->vsstatus);
578+
nacl_csr_write(nsh, CSR_VSIE, csr->vsie);
579+
nacl_csr_write(nsh, CSR_VSTVEC, csr->vstvec);
580+
nacl_csr_write(nsh, CSR_VSSCRATCH, csr->vsscratch);
581+
nacl_csr_write(nsh, CSR_VSEPC, csr->vsepc);
582+
nacl_csr_write(nsh, CSR_VSCAUSE, csr->vscause);
583+
nacl_csr_write(nsh, CSR_VSTVAL, csr->vstval);
584+
nacl_csr_write(nsh, CSR_HEDELEG, cfg->hedeleg);
585+
nacl_csr_write(nsh, CSR_HVIP, csr->hvip);
586+
nacl_csr_write(nsh, CSR_VSATP, csr->vsatp);
587+
nacl_csr_write(nsh, CSR_HENVCFG, cfg->henvcfg);
588+
if (IS_ENABLED(CONFIG_32BIT))
589+
nacl_csr_write(nsh, CSR_HENVCFGH, cfg->henvcfg >> 32);
590+
if (riscv_has_extension_unlikely(RISCV_ISA_EXT_SMSTATEEN)) {
591+
nacl_csr_write(nsh, CSR_HSTATEEN0, cfg->hstateen0);
592+
if (IS_ENABLED(CONFIG_32BIT))
593+
nacl_csr_write(nsh, CSR_HSTATEEN0H, cfg->hstateen0 >> 32);
594+
}
595+
} else {
596+
csr_write(CSR_VSSTATUS, csr->vsstatus);
597+
csr_write(CSR_VSIE, csr->vsie);
598+
csr_write(CSR_VSTVEC, csr->vstvec);
599+
csr_write(CSR_VSSCRATCH, csr->vsscratch);
600+
csr_write(CSR_VSEPC, csr->vsepc);
601+
csr_write(CSR_VSCAUSE, csr->vscause);
602+
csr_write(CSR_VSTVAL, csr->vstval);
603+
csr_write(CSR_HEDELEG, cfg->hedeleg);
604+
csr_write(CSR_HVIP, csr->hvip);
605+
csr_write(CSR_VSATP, csr->vsatp);
606+
csr_write(CSR_HENVCFG, cfg->henvcfg);
589607
if (IS_ENABLED(CONFIG_32BIT))
590-
csr_write(CSR_HSTATEEN0H, cfg->hstateen0 >> 32);
608+
csr_write(CSR_HENVCFGH, cfg->henvcfg >> 32);
609+
if (riscv_has_extension_unlikely(RISCV_ISA_EXT_SMSTATEEN)) {
610+
csr_write(CSR_HSTATEEN0, cfg->hstateen0);
611+
if (IS_ENABLED(CONFIG_32BIT))
612+
csr_write(CSR_HSTATEEN0H, cfg->hstateen0 >> 32);
613+
}
591614
}
592615

593616
kvm_riscv_gstage_update_hgatp(vcpu);
@@ -610,6 +633,7 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
610633

611634
void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
612635
{
636+
void *nsh;
613637
struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr;
614638

615639
vcpu->cpu = -1;
@@ -625,15 +649,28 @@ void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
625649
vcpu->arch.isa);
626650
kvm_riscv_vcpu_host_vector_restore(&vcpu->arch.host_context);
627651

628-
csr->vsstatus = csr_read(CSR_VSSTATUS);
629-
csr->vsie = csr_read(CSR_VSIE);
630-
csr->vstvec = csr_read(CSR_VSTVEC);
631-
csr->vsscratch = csr_read(CSR_VSSCRATCH);
632-
csr->vsepc = csr_read(CSR_VSEPC);
633-
csr->vscause = csr_read(CSR_VSCAUSE);
634-
csr->vstval = csr_read(CSR_VSTVAL);
635-
csr->hvip = csr_read(CSR_HVIP);
636-
csr->vsatp = csr_read(CSR_VSATP);
652+
if (kvm_riscv_nacl_available()) {
653+
nsh = nacl_shmem();
654+
csr->vsstatus = nacl_csr_read(nsh, CSR_VSSTATUS);
655+
csr->vsie = nacl_csr_read(nsh, CSR_VSIE);
656+
csr->vstvec = nacl_csr_read(nsh, CSR_VSTVEC);
657+
csr->vsscratch = nacl_csr_read(nsh, CSR_VSSCRATCH);
658+
csr->vsepc = nacl_csr_read(nsh, CSR_VSEPC);
659+
csr->vscause = nacl_csr_read(nsh, CSR_VSCAUSE);
660+
csr->vstval = nacl_csr_read(nsh, CSR_VSTVAL);
661+
csr->hvip = nacl_csr_read(nsh, CSR_HVIP);
662+
csr->vsatp = nacl_csr_read(nsh, CSR_VSATP);
663+
} else {
664+
csr->vsstatus = csr_read(CSR_VSSTATUS);
665+
csr->vsie = csr_read(CSR_VSIE);
666+
csr->vstvec = csr_read(CSR_VSTVEC);
667+
csr->vsscratch = csr_read(CSR_VSSCRATCH);
668+
csr->vsepc = csr_read(CSR_VSEPC);
669+
csr->vscause = csr_read(CSR_VSCAUSE);
670+
csr->vstval = csr_read(CSR_VSTVAL);
671+
csr->hvip = csr_read(CSR_HVIP);
672+
csr->vsatp = csr_read(CSR_VSATP);
673+
}
637674
}
638675

639676
static void kvm_riscv_check_vcpu_requests(struct kvm_vcpu *vcpu)
@@ -688,7 +725,7 @@ static void kvm_riscv_update_hvip(struct kvm_vcpu *vcpu)
688725
{
689726
struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr;
690727

691-
csr_write(CSR_HVIP, csr->hvip);
728+
ncsr_write(CSR_HVIP, csr->hvip);
692729
kvm_riscv_vcpu_aia_update_hvip(vcpu);
693730
}
694731

@@ -735,7 +772,9 @@ static void noinstr kvm_riscv_vcpu_enter_exit(struct kvm_vcpu *vcpu)
735772
kvm_riscv_vcpu_swap_in_guest_state(vcpu);
736773
guest_state_enter_irqoff();
737774

738-
hcntx->hstatus = csr_swap(CSR_HSTATUS, gcntx->hstatus);
775+
hcntx->hstatus = ncsr_swap(CSR_HSTATUS, gcntx->hstatus);
776+
777+
nsync_csr(-1UL);
739778

740779
__kvm_riscv_switch_to(&vcpu->arch);
741780

@@ -870,8 +909,8 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
870909
trap.sepc = vcpu->arch.guest_context.sepc;
871910
trap.scause = csr_read(CSR_SCAUSE);
872911
trap.stval = csr_read(CSR_STVAL);
873-
trap.htval = csr_read(CSR_HTVAL);
874-
trap.htinst = csr_read(CSR_HTINST);
912+
trap.htval = ncsr_read(CSR_HTVAL);
913+
trap.htinst = ncsr_read(CSR_HTINST);
875914

876915
/* Syncup interrupts state with HW */
877916
kvm_riscv_vcpu_sync_interrupts(vcpu);

arch/riscv/kvm/vcpu_timer.c

Lines changed: 14 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -11,8 +11,8 @@
1111
#include <linux/kvm_host.h>
1212
#include <linux/uaccess.h>
1313
#include <clocksource/timer-riscv.h>
14-
#include <asm/csr.h>
1514
#include <asm/delay.h>
15+
#include <asm/kvm_nacl.h>
1616
#include <asm/kvm_vcpu_timer.h>
1717

1818
static u64 kvm_riscv_current_cycles(struct kvm_guest_timer *gt)
@@ -72,12 +72,12 @@ static int kvm_riscv_vcpu_timer_cancel(struct kvm_vcpu_timer *t)
7272
static int kvm_riscv_vcpu_update_vstimecmp(struct kvm_vcpu *vcpu, u64 ncycles)
7373
{
7474
#if defined(CONFIG_32BIT)
75-
csr_write(CSR_VSTIMECMP, ncycles & 0xFFFFFFFF);
76-
csr_write(CSR_VSTIMECMPH, ncycles >> 32);
75+
ncsr_write(CSR_VSTIMECMP, ncycles & 0xFFFFFFFF);
76+
ncsr_write(CSR_VSTIMECMPH, ncycles >> 32);
7777
#else
78-
csr_write(CSR_VSTIMECMP, ncycles);
78+
ncsr_write(CSR_VSTIMECMP, ncycles);
7979
#endif
80-
return 0;
80+
return 0;
8181
}
8282

8383
static int kvm_riscv_vcpu_update_hrtimer(struct kvm_vcpu *vcpu, u64 ncycles)
@@ -289,10 +289,10 @@ static void kvm_riscv_vcpu_update_timedelta(struct kvm_vcpu *vcpu)
289289
struct kvm_guest_timer *gt = &vcpu->kvm->arch.timer;
290290

291291
#if defined(CONFIG_32BIT)
292-
csr_write(CSR_HTIMEDELTA, (u32)(gt->time_delta));
293-
csr_write(CSR_HTIMEDELTAH, (u32)(gt->time_delta >> 32));
292+
ncsr_write(CSR_HTIMEDELTA, (u32)(gt->time_delta));
293+
ncsr_write(CSR_HTIMEDELTAH, (u32)(gt->time_delta >> 32));
294294
#else
295-
csr_write(CSR_HTIMEDELTA, gt->time_delta);
295+
ncsr_write(CSR_HTIMEDELTA, gt->time_delta);
296296
#endif
297297
}
298298

@@ -306,10 +306,10 @@ void kvm_riscv_vcpu_timer_restore(struct kvm_vcpu *vcpu)
306306
return;
307307

308308
#if defined(CONFIG_32BIT)
309-
csr_write(CSR_VSTIMECMP, (u32)t->next_cycles);
310-
csr_write(CSR_VSTIMECMPH, (u32)(t->next_cycles >> 32));
309+
ncsr_write(CSR_VSTIMECMP, (u32)t->next_cycles);
310+
ncsr_write(CSR_VSTIMECMPH, (u32)(t->next_cycles >> 32));
311311
#else
312-
csr_write(CSR_VSTIMECMP, t->next_cycles);
312+
ncsr_write(CSR_VSTIMECMP, t->next_cycles);
313313
#endif
314314

315315
/* timer should be enabled for the remaining operations */
@@ -327,10 +327,10 @@ void kvm_riscv_vcpu_timer_sync(struct kvm_vcpu *vcpu)
327327
return;
328328

329329
#if defined(CONFIG_32BIT)
330-
t->next_cycles = csr_read(CSR_VSTIMECMP);
331-
t->next_cycles |= (u64)csr_read(CSR_VSTIMECMPH) << 32;
330+
t->next_cycles = ncsr_read(CSR_VSTIMECMP);
331+
t->next_cycles |= (u64)ncsr_read(CSR_VSTIMECMPH) << 32;
332332
#else
333-
t->next_cycles = csr_read(CSR_VSTIMECMP);
333+
t->next_cycles = ncsr_read(CSR_VSTIMECMP);
334334
#endif
335335
}
336336

0 commit comments

Comments
 (0)