Skip to content

Commit e7ef6ed

Browse files
Marc Zyngieroupton
authored andcommitted
KVM: arm64: Enforce NV limits on a per-idregs basis
As we are about to change the way the idreg reset values are computed, move all the NV limits into a function that initialises one register at a time. This will be most useful in the upcoming patches. We take this opportunity to remove the NV_FTR() macro and rely on the generated names instead. Signed-off-by: Marc Zyngier <maz@kernel.org> Reviewed-by: Joey Gouly <joey.gouly@arm.com> Link: https://lore.kernel.org/r/20250220134907.554085-9-maz@kernel.org Signed-off-by: Oliver Upton <oliver.upton@linux.dev>
1 parent 179fd7e commit e7ef6ed

File tree

1 file changed

+136
-103
lines changed

1 file changed

+136
-103
lines changed

arch/arm64/kvm/nested.c

Lines changed: 136 additions & 103 deletions
Original file line numberDiff line numberDiff line change
@@ -16,9 +16,6 @@
1616

1717
#include "sys_regs.h"
1818

19-
/* Protection against the sysreg repainting madness... */
20-
#define NV_FTR(r, f) ID_AA64##r##_EL1_##f
21-
2219
/*
2320
* Ratio of live shadow S2 MMU per vcpu. This is a trade-off between
2421
* memory usage and potential number of different sets of S2 PTs in
@@ -807,133 +804,169 @@ void kvm_arch_flush_shadow_all(struct kvm *kvm)
807804
* This list should get updated as new features get added to the NV
808805
* support, and new extension to the architecture.
809806
*/
807+
static u64 limit_nv_id_reg(struct kvm *kvm, u32 reg, u64 val)
808+
{
809+
switch (reg) {
810+
case SYS_ID_AA64ISAR0_EL1:
811+
/* Support everything but TME */
812+
val &= ~ID_AA64ISAR0_EL1_TME;
813+
break;
814+
815+
case SYS_ID_AA64ISAR1_EL1:
816+
/* Support everything but LS64 and Spec Invalidation */
817+
val &= ~(ID_AA64ISAR1_EL1_LS64 |
818+
ID_AA64ISAR1_EL1_SPECRES);
819+
break;
820+
821+
case SYS_ID_AA64PFR0_EL1:
822+
/* No RME, AMU, MPAM, S-EL2, or RAS */
823+
val &= ~(ID_AA64PFR0_EL1_RME |
824+
ID_AA64PFR0_EL1_AMU |
825+
ID_AA64PFR0_EL1_MPAM |
826+
ID_AA64PFR0_EL1_SEL2 |
827+
ID_AA64PFR0_EL1_RAS |
828+
ID_AA64PFR0_EL1_EL3 |
829+
ID_AA64PFR0_EL1_EL2 |
830+
ID_AA64PFR0_EL1_EL1 |
831+
ID_AA64PFR0_EL1_EL0);
832+
/* 64bit only at any EL */
833+
val |= SYS_FIELD_PREP_ENUM(ID_AA64PFR0_EL1, EL0, IMP);
834+
val |= SYS_FIELD_PREP_ENUM(ID_AA64PFR0_EL1, EL1, IMP);
835+
val |= SYS_FIELD_PREP_ENUM(ID_AA64PFR0_EL1, EL2, IMP);
836+
val |= SYS_FIELD_PREP_ENUM(ID_AA64PFR0_EL1, EL3, IMP);
837+
break;
838+
839+
case SYS_ID_AA64PFR1_EL1:
840+
/* Only support BTI, SSBS, CSV2_frac */
841+
val &= (ID_AA64PFR1_EL1_BT |
842+
ID_AA64PFR1_EL1_SSBS |
843+
ID_AA64PFR1_EL1_CSV2_frac);
844+
break;
845+
846+
case SYS_ID_AA64MMFR0_EL1:
847+
/* Hide ECV, ExS, Secure Memory */
848+
val &= ~(ID_AA64MMFR0_EL1_ECV |
849+
ID_AA64MMFR0_EL1_EXS |
850+
ID_AA64MMFR0_EL1_TGRAN4_2 |
851+
ID_AA64MMFR0_EL1_TGRAN16_2 |
852+
ID_AA64MMFR0_EL1_TGRAN64_2 |
853+
ID_AA64MMFR0_EL1_SNSMEM);
854+
855+
/* Disallow unsupported S2 page sizes */
856+
switch (PAGE_SIZE) {
857+
case SZ_64K:
858+
val |= SYS_FIELD_PREP_ENUM(ID_AA64MMFR0_EL1, TGRAN16_2, NI);
859+
fallthrough;
860+
case SZ_16K:
861+
val |= SYS_FIELD_PREP_ENUM(ID_AA64MMFR0_EL1, TGRAN4_2, NI);
862+
fallthrough;
863+
case SZ_4K:
864+
/* Support everything */
865+
break;
866+
}
867+
868+
/*
869+
* Since we can't support a guest S2 page size smaller
870+
* than the host's own page size (due to KVM only
871+
* populating its own S2 using the kernel's page
872+
* size), advertise the limitation using FEAT_GTG.
873+
*/
874+
switch (PAGE_SIZE) {
875+
case SZ_4K:
876+
val |= SYS_FIELD_PREP_ENUM(ID_AA64MMFR0_EL1, TGRAN4_2, IMP);
877+
fallthrough;
878+
case SZ_16K:
879+
val |= SYS_FIELD_PREP_ENUM(ID_AA64MMFR0_EL1, TGRAN16_2, IMP);
880+
fallthrough;
881+
case SZ_64K:
882+
val |= SYS_FIELD_PREP_ENUM(ID_AA64MMFR0_EL1, TGRAN64_2, IMP);
883+
break;
884+
}
885+
886+
/* Cap PARange to 48bits */
887+
val = ID_REG_LIMIT_FIELD_ENUM(val, ID_AA64MMFR0_EL1, PARANGE, 48);
888+
break;
889+
890+
case SYS_ID_AA64MMFR1_EL1:
891+
val &= (ID_AA64MMFR1_EL1_HCX |
892+
ID_AA64MMFR1_EL1_PAN |
893+
ID_AA64MMFR1_EL1_LO |
894+
ID_AA64MMFR1_EL1_HPDS |
895+
ID_AA64MMFR1_EL1_VH |
896+
ID_AA64MMFR1_EL1_VMIDBits);
897+
break;
898+
899+
case SYS_ID_AA64MMFR2_EL1:
900+
val &= ~(ID_AA64MMFR2_EL1_BBM |
901+
ID_AA64MMFR2_EL1_TTL |
902+
GENMASK_ULL(47, 44) |
903+
ID_AA64MMFR2_EL1_ST |
904+
ID_AA64MMFR2_EL1_CCIDX |
905+
ID_AA64MMFR2_EL1_VARange);
906+
907+
/* Force TTL support */
908+
val |= SYS_FIELD_PREP_ENUM(ID_AA64MMFR2_EL1, TTL, IMP);
909+
break;
910+
911+
case SYS_ID_AA64MMFR4_EL1:
912+
val = SYS_FIELD_PREP_ENUM(ID_AA64MMFR4_EL1, NV_frac, NV2_ONLY);
913+
val |= SYS_FIELD_PREP_ENUM(ID_AA64MMFR4_EL1, E2H0, NI_NV1);
914+
break;
915+
916+
case SYS_ID_AA64DFR0_EL1:
917+
/* Only limited support for PMU, Debug, BPs, WPs, and HPMN0 */
918+
val &= (ID_AA64DFR0_EL1_PMUVer |
919+
ID_AA64DFR0_EL1_WRPs |
920+
ID_AA64DFR0_EL1_BRPs |
921+
ID_AA64DFR0_EL1_DebugVer|
922+
ID_AA64DFR0_EL1_HPMN0);
923+
924+
/* Cap Debug to ARMv8.1 */
925+
val = ID_REG_LIMIT_FIELD_ENUM(val, ID_AA64DFR0_EL1, DebugVer, VHE);
926+
break;
927+
}
928+
929+
return val;
930+
}
931+
810932
static void limit_nv_id_regs(struct kvm *kvm)
811933
{
812-
u64 val, tmp;
934+
u64 val;
813935

814-
/* Support everything but TME */
815936
val = kvm_read_vm_id_reg(kvm, SYS_ID_AA64ISAR0_EL1);
816-
val &= ~NV_FTR(ISAR0, TME);
937+
val = limit_nv_id_reg(kvm, SYS_ID_AA64ISAR0_EL1, val);
817938
kvm_set_vm_id_reg(kvm, SYS_ID_AA64ISAR0_EL1, val);
818939

819-
/* Support everything but Spec Invalidation and LS64 */
820940
val = kvm_read_vm_id_reg(kvm, SYS_ID_AA64ISAR1_EL1);
821-
val &= ~(NV_FTR(ISAR1, LS64) |
822-
NV_FTR(ISAR1, SPECRES));
941+
val = limit_nv_id_reg(kvm, SYS_ID_AA64ISAR1_EL1, val);
823942
kvm_set_vm_id_reg(kvm, SYS_ID_AA64ISAR1_EL1, val);
824943

825-
/* No AMU, MPAM, S-EL2, or RAS */
826944
val = kvm_read_vm_id_reg(kvm, SYS_ID_AA64PFR0_EL1);
827-
val &= ~(GENMASK_ULL(55, 52) |
828-
NV_FTR(PFR0, AMU) |
829-
NV_FTR(PFR0, MPAM) |
830-
NV_FTR(PFR0, SEL2) |
831-
NV_FTR(PFR0, RAS) |
832-
NV_FTR(PFR0, EL3) |
833-
NV_FTR(PFR0, EL2) |
834-
NV_FTR(PFR0, EL1) |
835-
NV_FTR(PFR0, EL0));
836-
/* 64bit only at any EL */
837-
val |= FIELD_PREP(NV_FTR(PFR0, EL0), 0b0001);
838-
val |= FIELD_PREP(NV_FTR(PFR0, EL1), 0b0001);
839-
val |= FIELD_PREP(NV_FTR(PFR0, EL2), 0b0001);
840-
val |= FIELD_PREP(NV_FTR(PFR0, EL3), 0b0001);
945+
val = limit_nv_id_reg(kvm, SYS_ID_AA64PFR0_EL1, val);
841946
kvm_set_vm_id_reg(kvm, SYS_ID_AA64PFR0_EL1, val);
842947

843-
/* Only support BTI, SSBS, CSV2_frac */
844948
val = kvm_read_vm_id_reg(kvm, SYS_ID_AA64PFR1_EL1);
845-
val &= (NV_FTR(PFR1, BT) |
846-
NV_FTR(PFR1, SSBS) |
847-
NV_FTR(PFR1, CSV2_frac));
949+
val = limit_nv_id_reg(kvm, SYS_ID_AA64PFR1_EL1, val);
848950
kvm_set_vm_id_reg(kvm, SYS_ID_AA64PFR1_EL1, val);
849951

850-
/* Hide ECV, ExS, Secure Memory */
851952
val = kvm_read_vm_id_reg(kvm, SYS_ID_AA64MMFR0_EL1);
852-
val &= ~(NV_FTR(MMFR0, ECV) |
853-
NV_FTR(MMFR0, EXS) |
854-
NV_FTR(MMFR0, TGRAN4_2) |
855-
NV_FTR(MMFR0, TGRAN16_2) |
856-
NV_FTR(MMFR0, TGRAN64_2) |
857-
NV_FTR(MMFR0, SNSMEM));
858-
859-
/* Disallow unsupported S2 page sizes */
860-
switch (PAGE_SIZE) {
861-
case SZ_64K:
862-
val |= FIELD_PREP(NV_FTR(MMFR0, TGRAN16_2), 0b0001);
863-
fallthrough;
864-
case SZ_16K:
865-
val |= FIELD_PREP(NV_FTR(MMFR0, TGRAN4_2), 0b0001);
866-
fallthrough;
867-
case SZ_4K:
868-
/* Support everything */
869-
break;
870-
}
871-
/*
872-
* Since we can't support a guest S2 page size smaller than
873-
* the host's own page size (due to KVM only populating its
874-
* own S2 using the kernel's page size), advertise the
875-
* limitation using FEAT_GTG.
876-
*/
877-
switch (PAGE_SIZE) {
878-
case SZ_4K:
879-
val |= FIELD_PREP(NV_FTR(MMFR0, TGRAN4_2), 0b0010);
880-
fallthrough;
881-
case SZ_16K:
882-
val |= FIELD_PREP(NV_FTR(MMFR0, TGRAN16_2), 0b0010);
883-
fallthrough;
884-
case SZ_64K:
885-
val |= FIELD_PREP(NV_FTR(MMFR0, TGRAN64_2), 0b0010);
886-
break;
887-
}
888-
/* Cap PARange to 48bits */
889-
tmp = FIELD_GET(NV_FTR(MMFR0, PARANGE), val);
890-
if (tmp > 0b0101) {
891-
val &= ~NV_FTR(MMFR0, PARANGE);
892-
val |= FIELD_PREP(NV_FTR(MMFR0, PARANGE), 0b0101);
893-
}
953+
val = limit_nv_id_reg(kvm, SYS_ID_AA64MMFR0_EL1, val);
894954
kvm_set_vm_id_reg(kvm, SYS_ID_AA64MMFR0_EL1, val);
895955

896956
val = kvm_read_vm_id_reg(kvm, SYS_ID_AA64MMFR1_EL1);
897-
val &= (NV_FTR(MMFR1, HCX) |
898-
NV_FTR(MMFR1, PAN) |
899-
NV_FTR(MMFR1, LO) |
900-
NV_FTR(MMFR1, HPDS) |
901-
NV_FTR(MMFR1, VH) |
902-
NV_FTR(MMFR1, VMIDBits));
957+
val = limit_nv_id_reg(kvm, SYS_ID_AA64MMFR1_EL1, val);
903958
kvm_set_vm_id_reg(kvm, SYS_ID_AA64MMFR1_EL1, val);
904959

905960
val = kvm_read_vm_id_reg(kvm, SYS_ID_AA64MMFR2_EL1);
906-
val &= ~(NV_FTR(MMFR2, BBM) |
907-
NV_FTR(MMFR2, TTL) |
908-
GENMASK_ULL(47, 44) |
909-
NV_FTR(MMFR2, ST) |
910-
NV_FTR(MMFR2, CCIDX) |
911-
NV_FTR(MMFR2, VARange));
912-
913-
/* Force TTL support */
914-
val |= FIELD_PREP(NV_FTR(MMFR2, TTL), 0b0001);
961+
val = limit_nv_id_reg(kvm, SYS_ID_AA64MMFR2_EL1, val);
915962
kvm_set_vm_id_reg(kvm, SYS_ID_AA64MMFR2_EL1, val);
916963

917-
val = 0;
918-
if (!cpus_have_final_cap(ARM64_HAS_HCR_NV1))
919-
val |= FIELD_PREP(NV_FTR(MMFR4, E2H0),
920-
ID_AA64MMFR4_EL1_E2H0_NI_NV1);
964+
val = kvm_read_vm_id_reg(kvm, SYS_ID_AA64MMFR4_EL1);
965+
val = limit_nv_id_reg(kvm, SYS_ID_AA64MMFR4_EL1, val);
921966
kvm_set_vm_id_reg(kvm, SYS_ID_AA64MMFR4_EL1, val);
922967

923-
/* Only limited support for PMU, Debug, BPs, WPs, and HPMN0 */
924968
val = kvm_read_vm_id_reg(kvm, SYS_ID_AA64DFR0_EL1);
925-
val &= (NV_FTR(DFR0, PMUVer) |
926-
NV_FTR(DFR0, WRPs) |
927-
NV_FTR(DFR0, BRPs) |
928-
NV_FTR(DFR0, DebugVer) |
929-
NV_FTR(DFR0, HPMN0));
930-
931-
/* Cap Debug to ARMv8.1 */
932-
tmp = FIELD_GET(NV_FTR(DFR0, DebugVer), val);
933-
if (tmp > 0b0111) {
934-
val &= ~NV_FTR(DFR0, DebugVer);
935-
val |= FIELD_PREP(NV_FTR(DFR0, DebugVer), 0b0111);
936-
}
969+
val = limit_nv_id_reg(kvm, SYS_ID_AA64DFR0_EL1, val);
937970
kvm_set_vm_id_reg(kvm, SYS_ID_AA64DFR0_EL1, val);
938971
}
939972

0 commit comments

Comments
 (0)