17
17
#include <linux/sched/signal.h>
18
18
#include <linux/fs.h>
19
19
#include <linux/kvm_host.h>
20
- #include <asm/csr.h>
21
20
#include <asm/cacheflush.h>
21
+ #include <asm/kvm_nacl.h>
22
22
#include <asm/kvm_vcpu_vector.h>
23
23
24
24
#define CREATE_TRACE_POINTS
@@ -368,10 +368,10 @@ void kvm_riscv_vcpu_sync_interrupts(struct kvm_vcpu *vcpu)
368
368
struct kvm_vcpu_csr * csr = & vcpu -> arch .guest_csr ;
369
369
370
370
/* Read current HVIP and VSIE CSRs */
371
- csr -> vsie = csr_read (CSR_VSIE );
371
+ csr -> vsie = ncsr_read (CSR_VSIE );
372
372
373
373
/* Sync-up HVIP.VSSIP bit changes does by Guest */
374
- hvip = csr_read (CSR_HVIP );
374
+ hvip = ncsr_read (CSR_HVIP );
375
375
if ((csr -> hvip ^ hvip ) & (1UL << IRQ_VS_SOFT )) {
376
376
if (hvip & (1UL << IRQ_VS_SOFT )) {
377
377
if (!test_and_set_bit (IRQ_VS_SOFT ,
@@ -568,26 +568,49 @@ static void kvm_riscv_vcpu_setup_config(struct kvm_vcpu *vcpu)
568
568
569
569
void kvm_arch_vcpu_load (struct kvm_vcpu * vcpu , int cpu )
570
570
{
571
+ void * nsh ;
571
572
struct kvm_vcpu_csr * csr = & vcpu -> arch .guest_csr ;
572
573
struct kvm_vcpu_config * cfg = & vcpu -> arch .cfg ;
573
574
574
- csr_write (CSR_VSSTATUS , csr -> vsstatus );
575
- csr_write (CSR_VSIE , csr -> vsie );
576
- csr_write (CSR_VSTVEC , csr -> vstvec );
577
- csr_write (CSR_VSSCRATCH , csr -> vsscratch );
578
- csr_write (CSR_VSEPC , csr -> vsepc );
579
- csr_write (CSR_VSCAUSE , csr -> vscause );
580
- csr_write (CSR_VSTVAL , csr -> vstval );
581
- csr_write (CSR_HEDELEG , cfg -> hedeleg );
582
- csr_write (CSR_HVIP , csr -> hvip );
583
- csr_write (CSR_VSATP , csr -> vsatp );
584
- csr_write (CSR_HENVCFG , cfg -> henvcfg );
585
- if (IS_ENABLED (CONFIG_32BIT ))
586
- csr_write (CSR_HENVCFGH , cfg -> henvcfg >> 32 );
587
- if (riscv_has_extension_unlikely (RISCV_ISA_EXT_SMSTATEEN )) {
588
- csr_write (CSR_HSTATEEN0 , cfg -> hstateen0 );
575
+ if (kvm_riscv_nacl_sync_csr_available ()) {
576
+ nsh = nacl_shmem ();
577
+ nacl_csr_write (nsh , CSR_VSSTATUS , csr -> vsstatus );
578
+ nacl_csr_write (nsh , CSR_VSIE , csr -> vsie );
579
+ nacl_csr_write (nsh , CSR_VSTVEC , csr -> vstvec );
580
+ nacl_csr_write (nsh , CSR_VSSCRATCH , csr -> vsscratch );
581
+ nacl_csr_write (nsh , CSR_VSEPC , csr -> vsepc );
582
+ nacl_csr_write (nsh , CSR_VSCAUSE , csr -> vscause );
583
+ nacl_csr_write (nsh , CSR_VSTVAL , csr -> vstval );
584
+ nacl_csr_write (nsh , CSR_HEDELEG , cfg -> hedeleg );
585
+ nacl_csr_write (nsh , CSR_HVIP , csr -> hvip );
586
+ nacl_csr_write (nsh , CSR_VSATP , csr -> vsatp );
587
+ nacl_csr_write (nsh , CSR_HENVCFG , cfg -> henvcfg );
588
+ if (IS_ENABLED (CONFIG_32BIT ))
589
+ nacl_csr_write (nsh , CSR_HENVCFGH , cfg -> henvcfg >> 32 );
590
+ if (riscv_has_extension_unlikely (RISCV_ISA_EXT_SMSTATEEN )) {
591
+ nacl_csr_write (nsh , CSR_HSTATEEN0 , cfg -> hstateen0 );
592
+ if (IS_ENABLED (CONFIG_32BIT ))
593
+ nacl_csr_write (nsh , CSR_HSTATEEN0H , cfg -> hstateen0 >> 32 );
594
+ }
595
+ } else {
596
+ csr_write (CSR_VSSTATUS , csr -> vsstatus );
597
+ csr_write (CSR_VSIE , csr -> vsie );
598
+ csr_write (CSR_VSTVEC , csr -> vstvec );
599
+ csr_write (CSR_VSSCRATCH , csr -> vsscratch );
600
+ csr_write (CSR_VSEPC , csr -> vsepc );
601
+ csr_write (CSR_VSCAUSE , csr -> vscause );
602
+ csr_write (CSR_VSTVAL , csr -> vstval );
603
+ csr_write (CSR_HEDELEG , cfg -> hedeleg );
604
+ csr_write (CSR_HVIP , csr -> hvip );
605
+ csr_write (CSR_VSATP , csr -> vsatp );
606
+ csr_write (CSR_HENVCFG , cfg -> henvcfg );
589
607
if (IS_ENABLED (CONFIG_32BIT ))
590
- csr_write (CSR_HSTATEEN0H , cfg -> hstateen0 >> 32 );
608
+ csr_write (CSR_HENVCFGH , cfg -> henvcfg >> 32 );
609
+ if (riscv_has_extension_unlikely (RISCV_ISA_EXT_SMSTATEEN )) {
610
+ csr_write (CSR_HSTATEEN0 , cfg -> hstateen0 );
611
+ if (IS_ENABLED (CONFIG_32BIT ))
612
+ csr_write (CSR_HSTATEEN0H , cfg -> hstateen0 >> 32 );
613
+ }
591
614
}
592
615
593
616
kvm_riscv_gstage_update_hgatp (vcpu );
@@ -610,6 +633,7 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
610
633
611
634
void kvm_arch_vcpu_put (struct kvm_vcpu * vcpu )
612
635
{
636
+ void * nsh ;
613
637
struct kvm_vcpu_csr * csr = & vcpu -> arch .guest_csr ;
614
638
615
639
vcpu -> cpu = -1 ;
@@ -625,15 +649,28 @@ void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
625
649
vcpu -> arch .isa );
626
650
kvm_riscv_vcpu_host_vector_restore (& vcpu -> arch .host_context );
627
651
628
- csr -> vsstatus = csr_read (CSR_VSSTATUS );
629
- csr -> vsie = csr_read (CSR_VSIE );
630
- csr -> vstvec = csr_read (CSR_VSTVEC );
631
- csr -> vsscratch = csr_read (CSR_VSSCRATCH );
632
- csr -> vsepc = csr_read (CSR_VSEPC );
633
- csr -> vscause = csr_read (CSR_VSCAUSE );
634
- csr -> vstval = csr_read (CSR_VSTVAL );
635
- csr -> hvip = csr_read (CSR_HVIP );
636
- csr -> vsatp = csr_read (CSR_VSATP );
652
+ if (kvm_riscv_nacl_available ()) {
653
+ nsh = nacl_shmem ();
654
+ csr -> vsstatus = nacl_csr_read (nsh , CSR_VSSTATUS );
655
+ csr -> vsie = nacl_csr_read (nsh , CSR_VSIE );
656
+ csr -> vstvec = nacl_csr_read (nsh , CSR_VSTVEC );
657
+ csr -> vsscratch = nacl_csr_read (nsh , CSR_VSSCRATCH );
658
+ csr -> vsepc = nacl_csr_read (nsh , CSR_VSEPC );
659
+ csr -> vscause = nacl_csr_read (nsh , CSR_VSCAUSE );
660
+ csr -> vstval = nacl_csr_read (nsh , CSR_VSTVAL );
661
+ csr -> hvip = nacl_csr_read (nsh , CSR_HVIP );
662
+ csr -> vsatp = nacl_csr_read (nsh , CSR_VSATP );
663
+ } else {
664
+ csr -> vsstatus = csr_read (CSR_VSSTATUS );
665
+ csr -> vsie = csr_read (CSR_VSIE );
666
+ csr -> vstvec = csr_read (CSR_VSTVEC );
667
+ csr -> vsscratch = csr_read (CSR_VSSCRATCH );
668
+ csr -> vsepc = csr_read (CSR_VSEPC );
669
+ csr -> vscause = csr_read (CSR_VSCAUSE );
670
+ csr -> vstval = csr_read (CSR_VSTVAL );
671
+ csr -> hvip = csr_read (CSR_HVIP );
672
+ csr -> vsatp = csr_read (CSR_VSATP );
673
+ }
637
674
}
638
675
639
676
static void kvm_riscv_check_vcpu_requests (struct kvm_vcpu * vcpu )
@@ -688,7 +725,7 @@ static void kvm_riscv_update_hvip(struct kvm_vcpu *vcpu)
688
725
{
689
726
struct kvm_vcpu_csr * csr = & vcpu -> arch .guest_csr ;
690
727
691
- csr_write (CSR_HVIP , csr -> hvip );
728
+ ncsr_write (CSR_HVIP , csr -> hvip );
692
729
kvm_riscv_vcpu_aia_update_hvip (vcpu );
693
730
}
694
731
@@ -735,7 +772,9 @@ static void noinstr kvm_riscv_vcpu_enter_exit(struct kvm_vcpu *vcpu)
735
772
kvm_riscv_vcpu_swap_in_guest_state (vcpu );
736
773
guest_state_enter_irqoff ();
737
774
738
- hcntx -> hstatus = csr_swap (CSR_HSTATUS , gcntx -> hstatus );
775
+ hcntx -> hstatus = ncsr_swap (CSR_HSTATUS , gcntx -> hstatus );
776
+
777
+ nsync_csr (-1UL );
739
778
740
779
__kvm_riscv_switch_to (& vcpu -> arch );
741
780
@@ -870,8 +909,8 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
870
909
trap .sepc = vcpu -> arch .guest_context .sepc ;
871
910
trap .scause = csr_read (CSR_SCAUSE );
872
911
trap .stval = csr_read (CSR_STVAL );
873
- trap .htval = csr_read (CSR_HTVAL );
874
- trap .htinst = csr_read (CSR_HTINST );
912
+ trap .htval = ncsr_read (CSR_HTVAL );
913
+ trap .htinst = ncsr_read (CSR_HTINST );
875
914
876
915
/* Syncup interrupts state with HW */
877
916
kvm_riscv_vcpu_sync_interrupts (vcpu );
0 commit comments