@@ -90,6 +90,7 @@ int kvm_arch_vcpu_precreate(struct kvm *kvm, unsigned int id)
90
90
int kvm_arch_vcpu_create (struct kvm_vcpu * vcpu )
91
91
{
92
92
struct kvm_cpu_context * cntx ;
93
+ struct kvm_vcpu_csr * reset_csr = & vcpu -> arch .guest_reset_csr ;
93
94
94
95
/* Mark this VCPU never ran */
95
96
vcpu -> arch .ran_atleast_once = false;
@@ -106,6 +107,9 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
106
107
cntx -> hstatus |= HSTATUS_SPVP ;
107
108
cntx -> hstatus |= HSTATUS_SPV ;
108
109
110
+ /* By default, make CY, TM, and IR counters accessible in VU mode */
111
+ reset_csr -> scounteren = 0x7 ;
112
+
109
113
/* Setup VCPU timer */
110
114
kvm_riscv_vcpu_timer_init (vcpu );
111
115
@@ -699,6 +703,20 @@ static void kvm_riscv_update_hvip(struct kvm_vcpu *vcpu)
699
703
csr_write (CSR_HVIP , csr -> hvip );
700
704
}
701
705
706
+ /*
707
+ * Actually run the vCPU, entering an RCU extended quiescent state (EQS) while
708
+ * the vCPU is running.
709
+ *
710
+ * This must be noinstr as instrumentation may make use of RCU, and this is not
711
+ * safe during the EQS.
712
+ */
713
+ static void noinstr kvm_riscv_vcpu_enter_exit (struct kvm_vcpu * vcpu )
714
+ {
715
+ guest_state_enter_irqoff ();
716
+ __kvm_riscv_switch_to (& vcpu -> arch );
717
+ guest_state_exit_irqoff ();
718
+ }
719
+
702
720
int kvm_arch_vcpu_ioctl_run (struct kvm_vcpu * vcpu )
703
721
{
704
722
int ret ;
@@ -790,9 +808,9 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
790
808
continue ;
791
809
}
792
810
793
- guest_enter_irqoff ();
811
+ guest_timing_enter_irqoff ();
794
812
795
- __kvm_riscv_switch_to ( & vcpu -> arch );
813
+ kvm_riscv_vcpu_enter_exit ( vcpu );
796
814
797
815
vcpu -> mode = OUTSIDE_GUEST_MODE ;
798
816
vcpu -> stat .exits ++ ;
@@ -812,25 +830,21 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
812
830
kvm_riscv_vcpu_sync_interrupts (vcpu );
813
831
814
832
/*
815
- * We may have taken a host interrupt in VS/VU-mode (i.e.
816
- * while executing the guest). This interrupt is still
817
- * pending, as we haven't serviced it yet!
833
+ * We must ensure that any pending interrupts are taken before
834
+ * we exit guest timing so that timer ticks are accounted as
835
+ * guest time. Transiently unmask interrupts so that any
836
+ * pending interrupts are taken.
818
837
*
819
- * We're now back in HS-mode with interrupts disabled
820
- * so enabling the interrupts now will have the effect
821
- * of taking the interrupt again, in HS-mode this time .
838
+ * There's no barrier which ensures that pending interrupts are
839
+ * recognised, so we just hope that the CPU takes any pending
840
+ * interrupts between the enable and disable .
822
841
*/
823
842
local_irq_enable ();
843
+ local_irq_disable ();
824
844
825
- /*
826
- * We do local_irq_enable() before calling guest_exit() so
827
- * that if a timer interrupt hits while running the guest
828
- * we account that tick as being spent in the guest. We
829
- * enable preemption after calling guest_exit() so that if
830
- * we get preempted we make sure ticks after that is not
831
- * counted as guest time.
832
- */
833
- guest_exit ();
845
+ guest_timing_exit_irqoff ();
846
+
847
+ local_irq_enable ();
834
848
835
849
preempt_enable ();
836
850
0 commit comments