Skip to content

Commit 201c8d4

Browse files
Marc Zyngieroupton
authored andcommitted
KVM: arm64: nv: Add Maintenance Interrupt emulation
Emulating the vGIC means emulating the dreaded Maintenance Interrupt. This is a two-pronged problem: - while running L2, getting an MI translates into an MI injected in the L1 based on the state of the HW. - while running L1, we must accurately reflect the state of the MI line, based on the in-memory state. The MI INTID is added to the distributor, as expected on any virtualisation-capable implementation, and further patches will allow its configuration. Signed-off-by: Marc Zyngier <maz@kernel.org> Link: https://lore.kernel.org/r/20250225172930.1850838-11-maz@kernel.org Signed-off-by: Oliver Upton <oliver.upton@linux.dev>
1 parent 4b1b97f commit 201c8d4

File tree

6 files changed

+95
-0
lines changed

6 files changed

+95
-0
lines changed

arch/arm64/kvm/arm.c

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -819,6 +819,12 @@ int kvm_arch_vcpu_run_pid_change(struct kvm_vcpu *vcpu)
819819
if (ret)
820820
return ret;
821821

822+
if (vcpu_has_nv(vcpu)) {
823+
ret = kvm_vgic_vcpu_nv_init(vcpu);
824+
if (ret)
825+
return ret;
826+
}
827+
822828
/*
823829
* This needs to happen after any restriction has been applied
824830
* to the feature set.

arch/arm64/kvm/vgic/vgic-init.c

Lines changed: 29 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -198,6 +198,27 @@ static int kvm_vgic_dist_init(struct kvm *kvm, unsigned int nr_spis)
198198
return 0;
199199
}
200200

201+
/* Default GICv3 Maintenance Interrupt INTID, as per SBSA */
202+
#define DEFAULT_MI_INTID 25
203+
204+
int kvm_vgic_vcpu_nv_init(struct kvm_vcpu *vcpu)
205+
{
206+
int ret;
207+
208+
guard(mutex)(&vcpu->kvm->arch.config_lock);
209+
210+
/*
211+
* Matching the tradition established with the timers, provide
212+
* a default PPI for the maintenance interrupt. It makes
213+
* things easier to reason about.
214+
*/
215+
if (vcpu->kvm->arch.vgic.mi_intid == 0)
216+
vcpu->kvm->arch.vgic.mi_intid = DEFAULT_MI_INTID;
217+
ret = kvm_vgic_set_owner(vcpu, vcpu->kvm->arch.vgic.mi_intid, vcpu);
218+
219+
return ret;
220+
}
221+
201222
static int vgic_allocate_private_irqs_locked(struct kvm_vcpu *vcpu, u32 type)
202223
{
203224
struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
@@ -588,12 +609,20 @@ void kvm_vgic_cpu_down(void)
588609

589610
static irqreturn_t vgic_maintenance_handler(int irq, void *data)
590611
{
612+
struct kvm_vcpu *vcpu = *(struct kvm_vcpu **)data;
613+
591614
/*
592615
* We cannot rely on the vgic maintenance interrupt to be
593616
* delivered synchronously. This means we can only use it to
594617
* exit the VM, and we perform the handling of EOIed
595618
* interrupts on the exit path (see vgic_fold_lr_state).
619+
*
620+
* Of course, NV throws a wrench in this plan, and needs
621+
* something special.
596622
*/
623+
if (vcpu && vgic_state_is_nested(vcpu))
624+
vgic_v3_handle_nested_maint_irq(vcpu);
625+
597626
return IRQ_HANDLED;
598627
}
599628

arch/arm64/kvm/vgic/vgic-v3-nested.c

Lines changed: 45 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -73,6 +73,24 @@ static DEFINE_PER_CPU(struct shadow_if, shadow_if);
7373
* interrupt. The L0 active state will be cleared by the HW if the L1
7474
* interrupt was itself backed by a HW interrupt.
7575
*
76+
* Maintenance Interrupt (MI) management:
77+
*
78+
* Since the L2 guest runs the vgic in its full glory, MIs get delivered and
79+
* used as a handover point between L2 and L1.
80+
*
81+
* - on delivery of a MI to L0 while L2 is running: make the L1 MI pending,
82+
* and let it rip. This will initiate a vcpu_put() on L2, and allow L1 to
83+
* run and process the MI.
84+
*
85+
* - L1 MI is a fully virtual interrupt, not linked to the host's MI. Its
86+
* state must be computed at each entry/exit of the guest, much like we do
87+
* it for the PMU interrupt.
88+
*
89+
* - because most of the ICH_*_EL2 registers live in the VNCR page, the
90+
* quality of emulation is poor: L1 can setup the vgic so that an MI would
91+
* immediately fire, and not observe anything until the next exit. Trying
92+
* to read ICH_MISR_EL2 would do the trick, for example.
93+
*
7694
* System register emulation:
7795
*
7896
* We get two classes of registers:
@@ -341,3 +359,30 @@ void vgic_v3_put_nested(struct kvm_vcpu *vcpu)
341359

342360
shadow_if->lr_map = 0;
343361
}
362+
363+
/*
364+
* If we exit a L2 VM with a pending maintenance interrupt from the GIC,
365+
* then we need to forward this to L1 so that it can re-sync the appropriate
366+
* LRs and sample level triggered interrupts again.
367+
*/
368+
void vgic_v3_handle_nested_maint_irq(struct kvm_vcpu *vcpu)
369+
{
370+
bool state = read_sysreg_s(SYS_ICH_MISR_EL2);
371+
372+
/* This will force a switch back to L1 if the level is high */
373+
kvm_vgic_inject_irq(vcpu->kvm, vcpu,
374+
vcpu->kvm->arch.vgic.mi_intid, state, vcpu);
375+
376+
sysreg_clear_set_s(SYS_ICH_HCR_EL2, ICH_HCR_EL2_En, 0);
377+
}
378+
379+
void vgic_v3_nested_update_mi(struct kvm_vcpu *vcpu)
380+
{
381+
bool level;
382+
383+
level = __vcpu_sys_reg(vcpu, ICH_HCR_EL2) & ICH_HCR_EL2_En;
384+
if (level)
385+
level &= vgic_v3_get_misr(vcpu);
386+
kvm_vgic_inject_irq(vcpu->kvm, vcpu,
387+
vcpu->kvm->arch.vgic.mi_intid, level, vcpu);
388+
}

arch/arm64/kvm/vgic/vgic.c

Lines changed: 9 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -878,6 +878,9 @@ void kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu)
878878
return;
879879
}
880880

881+
if (vcpu_has_nv(vcpu))
882+
vgic_v3_nested_update_mi(vcpu);
883+
881884
/* An empty ap_list_head implies used_lrs == 0 */
882885
if (list_empty(&vcpu->arch.vgic_cpu.ap_list_head))
883886
return;
@@ -921,6 +924,9 @@ void kvm_vgic_flush_hwstate(struct kvm_vcpu *vcpu)
921924
*
922925
* - Otherwise, do exactly *NOTHING*. The guest state is
923926
* already loaded, and we can carry on with running it.
927+
*
928+
* If we have NV, but are not in a nested state, compute the
929+
* maintenance interrupt state, as it may fire.
924930
*/
925931
if (vgic_state_is_nested(vcpu)) {
926932
if (kvm_vgic_vcpu_pending_irq(vcpu))
@@ -929,6 +935,9 @@ void kvm_vgic_flush_hwstate(struct kvm_vcpu *vcpu)
929935
return;
930936
}
931937

938+
if (vcpu_has_nv(vcpu))
939+
vgic_v3_nested_update_mi(vcpu);
940+
932941
/*
933942
* If there are no virtual interrupts active or pending for this
934943
* VCPU, then there is no work to do and we can bail out without

arch/arm64/kvm/vgic/vgic.h

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -356,5 +356,7 @@ static inline bool kvm_has_gicv3(struct kvm *kvm)
356356
void vgic_v3_sync_nested(struct kvm_vcpu *vcpu);
357357
void vgic_v3_load_nested(struct kvm_vcpu *vcpu);
358358
void vgic_v3_put_nested(struct kvm_vcpu *vcpu);
359+
void vgic_v3_handle_nested_maint_irq(struct kvm_vcpu *vcpu);
360+
void vgic_v3_nested_update_mi(struct kvm_vcpu *vcpu);
359361

360362
#endif

include/kvm/arm_vgic.h

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -249,6 +249,9 @@ struct vgic_dist {
249249

250250
int nr_spis;
251251

252+
/* The GIC maintenance IRQ for nested hypervisors. */
253+
u32 mi_intid;
254+
252255
/* base addresses in guest physical address space: */
253256
gpa_t vgic_dist_base; /* distributor */
254257
union {
@@ -369,6 +372,7 @@ extern struct static_key_false vgic_v3_cpuif_trap;
369372
int kvm_set_legacy_vgic_v2_addr(struct kvm *kvm, struct kvm_arm_device_addr *dev_addr);
370373
void kvm_vgic_early_init(struct kvm *kvm);
371374
int kvm_vgic_vcpu_init(struct kvm_vcpu *vcpu);
375+
int kvm_vgic_vcpu_nv_init(struct kvm_vcpu *vcpu);
372376
int kvm_vgic_create(struct kvm *kvm, u32 type);
373377
void kvm_vgic_destroy(struct kvm *kvm);
374378
void kvm_vgic_vcpu_destroy(struct kvm_vcpu *vcpu);

0 commit comments

Comments
 (0)