Skip to content

Commit 7270cc9

Browse files
author
Marc Zyngier
committed
KVM: arm64: nv: Handle VNCR_EL2 invalidation from MMU notifiers
During an invalidation triggered by an MMU notifier, we need to make sure we can drop the *host* mapping that would have been translated by the stage-2 mapping being invalidated. For the moment, the invalidation is pretty brutal, as we nuke the full IPA range, and therefore any VNCR_EL2 mapping. At some point, we'll be more light-weight, and the code is able to deal with something more targetted. Reviewed-by: Oliver Upton <oliver.upton@linux.dev> Link: https://lore.kernel.org/r/20250514103501.2225951-12-maz@kernel.org Signed-off-by: Marc Zyngier <maz@kernel.org>
1 parent 2a359e0 commit 7270cc9

File tree

1 file changed

+75
-0
lines changed

1 file changed

+75
-0
lines changed

arch/arm64/kvm/nested.c

Lines changed: 75 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -434,6 +434,30 @@ static unsigned int ttl_to_size(u8 ttl)
434434
return max_size;
435435
}
436436

437+
static u8 pgshift_level_to_ttl(u16 shift, u8 level)
438+
{
439+
u8 ttl;
440+
441+
switch(shift) {
442+
case 12:
443+
ttl = TLBI_TTL_TG_4K;
444+
break;
445+
case 14:
446+
ttl = TLBI_TTL_TG_16K;
447+
break;
448+
case 16:
449+
ttl = TLBI_TTL_TG_64K;
450+
break;
451+
default:
452+
BUG();
453+
}
454+
455+
ttl <<= 2;
456+
ttl |= level & 3;
457+
458+
return ttl;
459+
}
460+
437461
/*
438462
* Compute the equivalent of the TTL field by parsing the shadow PT. The
439463
* granule size is extracted from the cached VTCR_EL2.TG0 while the level is
@@ -784,6 +808,53 @@ int kvm_inject_s2_fault(struct kvm_vcpu *vcpu, u64 esr_el2)
784808
return kvm_inject_nested_sync(vcpu, esr_el2);
785809
}
786810

811+
static void invalidate_vncr(struct vncr_tlb *vt)
812+
{
813+
vt->valid = false;
814+
if (vt->cpu != -1)
815+
clear_fixmap(vncr_fixmap(vt->cpu));
816+
}
817+
818+
static void kvm_invalidate_vncr_ipa(struct kvm *kvm, u64 start, u64 end)
819+
{
820+
struct kvm_vcpu *vcpu;
821+
unsigned long i;
822+
823+
lockdep_assert_held_write(&kvm->mmu_lock);
824+
825+
if (!kvm_has_feat(kvm, ID_AA64MMFR4_EL1, NV_frac, NV2_ONLY))
826+
return;
827+
828+
kvm_for_each_vcpu(i, vcpu, kvm) {
829+
struct vncr_tlb *vt = vcpu->arch.vncr_tlb;
830+
u64 ipa_start, ipa_end, ipa_size;
831+
832+
/*
833+
* Careful here: We end-up here from an MMU notifier,
834+
* and this can race against a vcpu not being onlined
835+
* yet, without the pseudo-TLB being allocated.
836+
*
837+
* Skip those, as they obviously don't participate in
838+
* the invalidation at this stage.
839+
*/
840+
if (!vt)
841+
continue;
842+
843+
if (!vt->valid)
844+
continue;
845+
846+
ipa_size = ttl_to_size(pgshift_level_to_ttl(vt->wi.pgshift,
847+
vt->wr.level));
848+
ipa_start = vt->wr.pa & (ipa_size - 1);
849+
ipa_end = ipa_start + ipa_size;
850+
851+
if (ipa_end <= start || ipa_start >= end)
852+
continue;
853+
854+
invalidate_vncr(vt);
855+
}
856+
}
857+
787858
void kvm_nested_s2_wp(struct kvm *kvm)
788859
{
789860
int i;
@@ -796,6 +867,8 @@ void kvm_nested_s2_wp(struct kvm *kvm)
796867
if (kvm_s2_mmu_valid(mmu))
797868
kvm_stage2_wp_range(mmu, 0, kvm_phys_size(mmu));
798869
}
870+
871+
kvm_invalidate_vncr_ipa(kvm, 0, BIT(kvm->arch.mmu.pgt->ia_bits));
799872
}
800873

801874
void kvm_nested_s2_unmap(struct kvm *kvm, bool may_block)
@@ -810,6 +883,8 @@ void kvm_nested_s2_unmap(struct kvm *kvm, bool may_block)
810883
if (kvm_s2_mmu_valid(mmu))
811884
kvm_stage2_unmap_range(mmu, 0, kvm_phys_size(mmu), may_block);
812885
}
886+
887+
kvm_invalidate_vncr_ipa(kvm, 0, BIT(kvm->arch.mmu.pgt->ia_bits));
813888
}
814889

815890
void kvm_nested_s2_flush(struct kvm *kvm)

0 commit comments

Comments
 (0)