Skip to content

Commit 13f64f6

Browse files
committed
Merge branch 'kvm-arm64/nv-idregs' into kvmarm/next
* kvm-arm64/nv-idregs: : Changes to exposure of NV features, courtesy of Marc Zyngier : : Apply NV-specific feature restrictions at reset rather than at the point : of KVM_RUN. This makes the true feature set visible to userspace, a : necessary step towards save/restore support or NV VMs. : : Add an additional vCPU feature flag for selecting the E2H0 flavor of NV, : such that the VHE-ness of the VM can be applied to the feature set. KVM: arm64: selftests: Test that TGRAN*_2 fields are writable KVM: arm64: Allow userspace to write ID_AA64MMFR0_EL1.TGRAN*_2 KVM: arm64: Advertise FEAT_ECV when possible KVM: arm64: Make ID_AA64MMFR4_EL1.NV_frac writable KVM: arm64: Allow userspace to limit NV support to nVHE KVM: arm64: Move NV-specific capping to idreg sanitisation KVM: arm64: Enforce NV limits on a per-idregs basis KVM: arm64: Make ID_REG_LIMIT_FIELD_ENUM() more widely available KVM: arm64: Consolidate idreg callbacks KVM: arm64: Advertise NV2 in the boot messages KVM: arm64: Mark HCR.EL2.{NV*,AT} RES0 when ID_AA64MMFR4_EL1.NV_frac is 0 KVM: arm64: Mark HCR.EL2.E2H RES0 when ID_AA64MMFR1_EL1.VH is zero KVM: arm64: Hide ID_AA64MMFR2_EL1.NV from guest and userspace arm64: cpufeature: Handle NV_frac as a synonym of NV2 Signed-off-by: Oliver Upton <oliver.upton@linux.dev>
2 parents 56e3e5c + edfd826 commit 13f64f6

File tree

8 files changed

+233
-171
lines changed

8 files changed

+233
-171
lines changed

arch/arm64/include/asm/kvm_nested.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -188,6 +188,7 @@ static inline bool kvm_supported_tlbi_s1e2_op(struct kvm_vcpu *vpcu, u32 instr)
188188
}
189189

190190
int kvm_init_nv_sysregs(struct kvm_vcpu *vcpu);
191+
u64 limit_nv_id_reg(struct kvm *kvm, u32 reg, u64 val);
191192

192193
#ifdef CONFIG_ARM64_PTR_AUTH
193194
bool kvm_auth_eretax(struct kvm_vcpu *vcpu, u64 *elr);

arch/arm64/include/uapi/asm/kvm.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -105,6 +105,7 @@ struct kvm_regs {
105105
#define KVM_ARM_VCPU_PTRAUTH_ADDRESS 5 /* VCPU uses address authentication */
106106
#define KVM_ARM_VCPU_PTRAUTH_GENERIC 6 /* VCPU uses generic authentication */
107107
#define KVM_ARM_VCPU_HAS_EL2 7 /* Support nested virtualization */
108+
#define KVM_ARM_VCPU_HAS_EL2_E2H0 8 /* Limit NV support to E2H RES0 */
108109

109110
struct kvm_vcpu_init {
110111
__u32 target;

arch/arm64/kernel/cpufeature.c

Lines changed: 13 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -497,6 +497,7 @@ static const struct arm64_ftr_bits ftr_id_aa64mmfr3[] = {
497497

498498
static const struct arm64_ftr_bits ftr_id_aa64mmfr4[] = {
499499
S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR4_EL1_E2H0_SHIFT, 4, 0),
500+
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR4_EL1_NV_frac_SHIFT, 4, 0),
500501
ARM64_FTR_END,
501502
};
502503

@@ -2162,7 +2163,7 @@ static bool has_nested_virt_support(const struct arm64_cpu_capabilities *cap,
21622163
if (kvm_get_mode() != KVM_MODE_NV)
21632164
return false;
21642165

2165-
if (!has_cpuid_feature(cap, scope)) {
2166+
if (!cpucap_multi_entry_cap_matches(cap, scope)) {
21662167
pr_warn("unavailable: %s\n", cap->desc);
21672168
return false;
21682169
}
@@ -2519,7 +2520,17 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
25192520
.capability = ARM64_HAS_NESTED_VIRT,
25202521
.type = ARM64_CPUCAP_SYSTEM_FEATURE,
25212522
.matches = has_nested_virt_support,
2522-
ARM64_CPUID_FIELDS(ID_AA64MMFR2_EL1, NV, NV2)
2523+
.match_list = (const struct arm64_cpu_capabilities []){
2524+
{
2525+
.matches = has_cpuid_feature,
2526+
ARM64_CPUID_FIELDS(ID_AA64MMFR2_EL1, NV, NV2)
2527+
},
2528+
{
2529+
.matches = has_cpuid_feature,
2530+
ARM64_CPUID_FIELDS(ID_AA64MMFR4_EL1, NV_frac, NV2_ONLY)
2531+
},
2532+
{ /* Sentinel */ }
2533+
},
25232534
},
25242535
{
25252536
.capability = ARM64_HAS_32BIT_EL0_DO_NOT_USE,

arch/arm64/kvm/arm.c

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -2836,11 +2836,12 @@ static __init int kvm_arm_init(void)
28362836
if (err)
28372837
goto out_hyp;
28382838

2839-
kvm_info("%s%sVHE mode initialized successfully\n",
2839+
kvm_info("%s%sVHE%s mode initialized successfully\n",
28402840
in_hyp_mode ? "" : (is_protected_kvm_enabled() ?
28412841
"Protected " : "Hyp "),
28422842
in_hyp_mode ? "" : (cpus_have_final_cap(ARM64_KVM_HVHE) ?
2843-
"h" : "n"));
2843+
"h" : "n"),
2844+
cpus_have_final_cap(ARM64_HAS_NESTED_VIRT) ? "+NV2": "");
28442845

28452846
/*
28462847
* FIXME: Do something reasonable if kvm_init() fails after pKVM

arch/arm64/kvm/nested.c

Lines changed: 152 additions & 133 deletions
Original file line numberDiff line numberDiff line change
@@ -16,9 +16,6 @@
1616

1717
#include "sys_regs.h"
1818

19-
/* Protection against the sysreg repainting madness... */
20-
#define NV_FTR(r, f) ID_AA64##r##_EL1_##f
21-
2219
/*
2320
* Ratio of live shadow S2 MMU per vcpu. This is a trade-off between
2421
* memory usage and potential number of different sets of S2 PTs in
@@ -54,6 +51,10 @@ int kvm_vcpu_init_nested(struct kvm_vcpu *vcpu)
5451
struct kvm_s2_mmu *tmp;
5552
int num_mmus, ret = 0;
5653

54+
if (test_bit(KVM_ARM_VCPU_HAS_EL2_E2H0, kvm->arch.vcpu_features) &&
55+
!cpus_have_final_cap(ARM64_HAS_HCR_NV1))
56+
return -EINVAL;
57+
5758
/*
5859
* Let's treat memory allocation failures as benign: If we fail to
5960
* allocate anything, return an error and keep the allocated array
@@ -807,134 +808,151 @@ void kvm_arch_flush_shadow_all(struct kvm *kvm)
807808
* This list should get updated as new features get added to the NV
808809
* support, and new extension to the architecture.
809810
*/
810-
static void limit_nv_id_regs(struct kvm *kvm)
811+
u64 limit_nv_id_reg(struct kvm *kvm, u32 reg, u64 val)
811812
{
812-
u64 val, tmp;
813-
814-
/* Support everything but TME */
815-
val = kvm_read_vm_id_reg(kvm, SYS_ID_AA64ISAR0_EL1);
816-
val &= ~NV_FTR(ISAR0, TME);
817-
kvm_set_vm_id_reg(kvm, SYS_ID_AA64ISAR0_EL1, val);
818-
819-
/* Support everything but Spec Invalidation and LS64 */
820-
val = kvm_read_vm_id_reg(kvm, SYS_ID_AA64ISAR1_EL1);
821-
val &= ~(NV_FTR(ISAR1, LS64) |
822-
NV_FTR(ISAR1, SPECRES));
823-
kvm_set_vm_id_reg(kvm, SYS_ID_AA64ISAR1_EL1, val);
824-
825-
/* No AMU, MPAM, S-EL2, or RAS */
826-
val = kvm_read_vm_id_reg(kvm, SYS_ID_AA64PFR0_EL1);
827-
val &= ~(GENMASK_ULL(55, 52) |
828-
NV_FTR(PFR0, AMU) |
829-
NV_FTR(PFR0, MPAM) |
830-
NV_FTR(PFR0, SEL2) |
831-
NV_FTR(PFR0, RAS) |
832-
NV_FTR(PFR0, EL3) |
833-
NV_FTR(PFR0, EL2) |
834-
NV_FTR(PFR0, EL1) |
835-
NV_FTR(PFR0, EL0));
836-
/* 64bit only at any EL */
837-
val |= FIELD_PREP(NV_FTR(PFR0, EL0), 0b0001);
838-
val |= FIELD_PREP(NV_FTR(PFR0, EL1), 0b0001);
839-
val |= FIELD_PREP(NV_FTR(PFR0, EL2), 0b0001);
840-
val |= FIELD_PREP(NV_FTR(PFR0, EL3), 0b0001);
841-
kvm_set_vm_id_reg(kvm, SYS_ID_AA64PFR0_EL1, val);
842-
843-
/* Only support BTI, SSBS, CSV2_frac */
844-
val = kvm_read_vm_id_reg(kvm, SYS_ID_AA64PFR1_EL1);
845-
val &= (NV_FTR(PFR1, BT) |
846-
NV_FTR(PFR1, SSBS) |
847-
NV_FTR(PFR1, CSV2_frac));
848-
kvm_set_vm_id_reg(kvm, SYS_ID_AA64PFR1_EL1, val);
849-
850-
/* Hide ECV, ExS, Secure Memory */
851-
val = kvm_read_vm_id_reg(kvm, SYS_ID_AA64MMFR0_EL1);
852-
val &= ~(NV_FTR(MMFR0, ECV) |
853-
NV_FTR(MMFR0, EXS) |
854-
NV_FTR(MMFR0, TGRAN4_2) |
855-
NV_FTR(MMFR0, TGRAN16_2) |
856-
NV_FTR(MMFR0, TGRAN64_2) |
857-
NV_FTR(MMFR0, SNSMEM));
858-
859-
/* Disallow unsupported S2 page sizes */
860-
switch (PAGE_SIZE) {
861-
case SZ_64K:
862-
val |= FIELD_PREP(NV_FTR(MMFR0, TGRAN16_2), 0b0001);
863-
fallthrough;
864-
case SZ_16K:
865-
val |= FIELD_PREP(NV_FTR(MMFR0, TGRAN4_2), 0b0001);
866-
fallthrough;
867-
case SZ_4K:
868-
/* Support everything */
813+
switch (reg) {
814+
case SYS_ID_AA64ISAR0_EL1:
815+
/* Support everything but TME */
816+
val &= ~ID_AA64ISAR0_EL1_TME;
869817
break;
870-
}
871-
/*
872-
* Since we can't support a guest S2 page size smaller than
873-
* the host's own page size (due to KVM only populating its
874-
* own S2 using the kernel's page size), advertise the
875-
* limitation using FEAT_GTG.
876-
*/
877-
switch (PAGE_SIZE) {
878-
case SZ_4K:
879-
val |= FIELD_PREP(NV_FTR(MMFR0, TGRAN4_2), 0b0010);
880-
fallthrough;
881-
case SZ_16K:
882-
val |= FIELD_PREP(NV_FTR(MMFR0, TGRAN16_2), 0b0010);
883-
fallthrough;
884-
case SZ_64K:
885-
val |= FIELD_PREP(NV_FTR(MMFR0, TGRAN64_2), 0b0010);
818+
819+
case SYS_ID_AA64ISAR1_EL1:
820+
/* Support everything but LS64 and Spec Invalidation */
821+
val &= ~(ID_AA64ISAR1_EL1_LS64 |
822+
ID_AA64ISAR1_EL1_SPECRES);
823+
break;
824+
825+
case SYS_ID_AA64PFR0_EL1:
826+
/* No RME, AMU, MPAM, S-EL2, or RAS */
827+
val &= ~(ID_AA64PFR0_EL1_RME |
828+
ID_AA64PFR0_EL1_AMU |
829+
ID_AA64PFR0_EL1_MPAM |
830+
ID_AA64PFR0_EL1_SEL2 |
831+
ID_AA64PFR0_EL1_RAS |
832+
ID_AA64PFR0_EL1_EL3 |
833+
ID_AA64PFR0_EL1_EL2 |
834+
ID_AA64PFR0_EL1_EL1 |
835+
ID_AA64PFR0_EL1_EL0);
836+
/* 64bit only at any EL */
837+
val |= SYS_FIELD_PREP_ENUM(ID_AA64PFR0_EL1, EL0, IMP);
838+
val |= SYS_FIELD_PREP_ENUM(ID_AA64PFR0_EL1, EL1, IMP);
839+
val |= SYS_FIELD_PREP_ENUM(ID_AA64PFR0_EL1, EL2, IMP);
840+
val |= SYS_FIELD_PREP_ENUM(ID_AA64PFR0_EL1, EL3, IMP);
841+
break;
842+
843+
case SYS_ID_AA64PFR1_EL1:
844+
/* Only support BTI, SSBS, CSV2_frac */
845+
val &= (ID_AA64PFR1_EL1_BT |
846+
ID_AA64PFR1_EL1_SSBS |
847+
ID_AA64PFR1_EL1_CSV2_frac);
848+
break;
849+
850+
case SYS_ID_AA64MMFR0_EL1:
851+
/* Hide ExS, Secure Memory */
852+
val &= ~(ID_AA64MMFR0_EL1_EXS |
853+
ID_AA64MMFR0_EL1_TGRAN4_2 |
854+
ID_AA64MMFR0_EL1_TGRAN16_2 |
855+
ID_AA64MMFR0_EL1_TGRAN64_2 |
856+
ID_AA64MMFR0_EL1_SNSMEM);
857+
858+
/* Hide CNTPOFF if present */
859+
val = ID_REG_LIMIT_FIELD_ENUM(val, ID_AA64MMFR0_EL1, ECV, IMP);
860+
861+
/* Disallow unsupported S2 page sizes */
862+
switch (PAGE_SIZE) {
863+
case SZ_64K:
864+
val |= SYS_FIELD_PREP_ENUM(ID_AA64MMFR0_EL1, TGRAN16_2, NI);
865+
fallthrough;
866+
case SZ_16K:
867+
val |= SYS_FIELD_PREP_ENUM(ID_AA64MMFR0_EL1, TGRAN4_2, NI);
868+
fallthrough;
869+
case SZ_4K:
870+
/* Support everything */
871+
break;
872+
}
873+
874+
/*
875+
* Since we can't support a guest S2 page size smaller
876+
* than the host's own page size (due to KVM only
877+
* populating its own S2 using the kernel's page
878+
* size), advertise the limitation using FEAT_GTG.
879+
*/
880+
switch (PAGE_SIZE) {
881+
case SZ_4K:
882+
val |= SYS_FIELD_PREP_ENUM(ID_AA64MMFR0_EL1, TGRAN4_2, IMP);
883+
fallthrough;
884+
case SZ_16K:
885+
val |= SYS_FIELD_PREP_ENUM(ID_AA64MMFR0_EL1, TGRAN16_2, IMP);
886+
fallthrough;
887+
case SZ_64K:
888+
val |= SYS_FIELD_PREP_ENUM(ID_AA64MMFR0_EL1, TGRAN64_2, IMP);
889+
break;
890+
}
891+
892+
/* Cap PARange to 48bits */
893+
val = ID_REG_LIMIT_FIELD_ENUM(val, ID_AA64MMFR0_EL1, PARANGE, 48);
894+
break;
895+
896+
case SYS_ID_AA64MMFR1_EL1:
897+
val &= (ID_AA64MMFR1_EL1_HCX |
898+
ID_AA64MMFR1_EL1_PAN |
899+
ID_AA64MMFR1_EL1_LO |
900+
ID_AA64MMFR1_EL1_HPDS |
901+
ID_AA64MMFR1_EL1_VH |
902+
ID_AA64MMFR1_EL1_VMIDBits);
903+
/* FEAT_E2H0 implies no VHE */
904+
if (test_bit(KVM_ARM_VCPU_HAS_EL2_E2H0, kvm->arch.vcpu_features))
905+
val &= ~ID_AA64MMFR1_EL1_VH;
906+
break;
907+
908+
case SYS_ID_AA64MMFR2_EL1:
909+
val &= ~(ID_AA64MMFR2_EL1_BBM |
910+
ID_AA64MMFR2_EL1_TTL |
911+
GENMASK_ULL(47, 44) |
912+
ID_AA64MMFR2_EL1_ST |
913+
ID_AA64MMFR2_EL1_CCIDX |
914+
ID_AA64MMFR2_EL1_VARange);
915+
916+
/* Force TTL support */
917+
val |= SYS_FIELD_PREP_ENUM(ID_AA64MMFR2_EL1, TTL, IMP);
918+
break;
919+
920+
case SYS_ID_AA64MMFR4_EL1:
921+
/*
922+
* You get EITHER
923+
*
924+
* - FEAT_VHE without FEAT_E2H0
925+
* - FEAT_NV limited to FEAT_NV2
926+
* - HCR_EL2.NV1 being RES0
927+
*
928+
* OR
929+
*
930+
* - FEAT_E2H0 without FEAT_VHE nor FEAT_NV
931+
*
932+
* Life is too short for anything else.
933+
*/
934+
if (test_bit(KVM_ARM_VCPU_HAS_EL2_E2H0, kvm->arch.vcpu_features)) {
935+
val = 0;
936+
} else {
937+
val = SYS_FIELD_PREP_ENUM(ID_AA64MMFR4_EL1, NV_frac, NV2_ONLY);
938+
val |= SYS_FIELD_PREP_ENUM(ID_AA64MMFR4_EL1, E2H0, NI_NV1);
939+
}
940+
break;
941+
942+
case SYS_ID_AA64DFR0_EL1:
943+
/* Only limited support for PMU, Debug, BPs, WPs, and HPMN0 */
944+
val &= (ID_AA64DFR0_EL1_PMUVer |
945+
ID_AA64DFR0_EL1_WRPs |
946+
ID_AA64DFR0_EL1_BRPs |
947+
ID_AA64DFR0_EL1_DebugVer|
948+
ID_AA64DFR0_EL1_HPMN0);
949+
950+
/* Cap Debug to ARMv8.1 */
951+
val = ID_REG_LIMIT_FIELD_ENUM(val, ID_AA64DFR0_EL1, DebugVer, VHE);
886952
break;
887953
}
888-
/* Cap PARange to 48bits */
889-
tmp = FIELD_GET(NV_FTR(MMFR0, PARANGE), val);
890-
if (tmp > 0b0101) {
891-
val &= ~NV_FTR(MMFR0, PARANGE);
892-
val |= FIELD_PREP(NV_FTR(MMFR0, PARANGE), 0b0101);
893-
}
894-
kvm_set_vm_id_reg(kvm, SYS_ID_AA64MMFR0_EL1, val);
895-
896-
val = kvm_read_vm_id_reg(kvm, SYS_ID_AA64MMFR1_EL1);
897-
val &= (NV_FTR(MMFR1, HCX) |
898-
NV_FTR(MMFR1, PAN) |
899-
NV_FTR(MMFR1, LO) |
900-
NV_FTR(MMFR1, HPDS) |
901-
NV_FTR(MMFR1, VH) |
902-
NV_FTR(MMFR1, VMIDBits));
903-
kvm_set_vm_id_reg(kvm, SYS_ID_AA64MMFR1_EL1, val);
904-
905-
val = kvm_read_vm_id_reg(kvm, SYS_ID_AA64MMFR2_EL1);
906-
val &= ~(NV_FTR(MMFR2, BBM) |
907-
NV_FTR(MMFR2, TTL) |
908-
GENMASK_ULL(47, 44) |
909-
NV_FTR(MMFR2, ST) |
910-
NV_FTR(MMFR2, CCIDX) |
911-
NV_FTR(MMFR2, VARange));
912-
913-
/* Force TTL support */
914-
val |= FIELD_PREP(NV_FTR(MMFR2, TTL), 0b0001);
915-
kvm_set_vm_id_reg(kvm, SYS_ID_AA64MMFR2_EL1, val);
916-
917-
val = 0;
918-
if (!cpus_have_final_cap(ARM64_HAS_HCR_NV1))
919-
val |= FIELD_PREP(NV_FTR(MMFR4, E2H0),
920-
ID_AA64MMFR4_EL1_E2H0_NI_NV1);
921-
kvm_set_vm_id_reg(kvm, SYS_ID_AA64MMFR4_EL1, val);
922-
923-
/* Only limited support for PMU, Debug, BPs, WPs, and HPMN0 */
924-
val = kvm_read_vm_id_reg(kvm, SYS_ID_AA64DFR0_EL1);
925-
val &= (NV_FTR(DFR0, PMUVer) |
926-
NV_FTR(DFR0, WRPs) |
927-
NV_FTR(DFR0, BRPs) |
928-
NV_FTR(DFR0, DebugVer) |
929-
NV_FTR(DFR0, HPMN0));
930-
931-
/* Cap Debug to ARMv8.1 */
932-
tmp = FIELD_GET(NV_FTR(DFR0, DebugVer), val);
933-
if (tmp > 0b0111) {
934-
val &= ~NV_FTR(DFR0, DebugVer);
935-
val |= FIELD_PREP(NV_FTR(DFR0, DebugVer), 0b0111);
936-
}
937-
kvm_set_vm_id_reg(kvm, SYS_ID_AA64DFR0_EL1, val);
954+
955+
return val;
938956
}
939957

940958
u64 kvm_vcpu_apply_reg_masks(const struct kvm_vcpu *vcpu,
@@ -981,8 +999,6 @@ int kvm_init_nv_sysregs(struct kvm_vcpu *vcpu)
981999
if (!kvm->arch.sysreg_masks)
9821000
return -ENOMEM;
9831001

984-
limit_nv_id_regs(kvm);
985-
9861002
/* VTTBR_EL2 */
9871003
res0 = res1 = 0;
9881004
if (!kvm_has_feat_enum(kvm, ID_AA64MMFR1_EL1, VMIDBits, 16))
@@ -1021,10 +1037,11 @@ int kvm_init_nv_sysregs(struct kvm_vcpu *vcpu)
10211037
res0 |= HCR_FIEN;
10221038
if (!kvm_has_feat(kvm, ID_AA64MMFR2_EL1, FWB, IMP))
10231039
res0 |= HCR_FWB;
1024-
if (!kvm_has_feat(kvm, ID_AA64MMFR2_EL1, NV, NV2))
1025-
res0 |= HCR_NV2;
1026-
if (!kvm_has_feat(kvm, ID_AA64MMFR2_EL1, NV, IMP))
1027-
res0 |= (HCR_AT | HCR_NV1 | HCR_NV);
1040+
/* Implementation choice: NV2 is the only supported config */
1041+
if (!kvm_has_feat(kvm, ID_AA64MMFR4_EL1, NV_frac, NV2_ONLY))
1042+
res0 |= (HCR_NV2 | HCR_NV | HCR_AT);
1043+
if (!kvm_has_feat(kvm, ID_AA64MMFR4_EL1, E2H0, NI))
1044+
res0 |= HCR_NV1;
10281045
if (!(kvm_vcpu_has_feature(kvm, KVM_ARM_VCPU_PTRAUTH_ADDRESS) &&
10291046
kvm_vcpu_has_feature(kvm, KVM_ARM_VCPU_PTRAUTH_GENERIC)))
10301047
res0 |= (HCR_API | HCR_APK);
@@ -1034,6 +1051,8 @@ int kvm_init_nv_sysregs(struct kvm_vcpu *vcpu)
10341051
res0 |= (HCR_TEA | HCR_TERR);
10351052
if (!kvm_has_feat(kvm, ID_AA64MMFR1_EL1, LO, IMP))
10361053
res0 |= HCR_TLOR;
1054+
if (!kvm_has_feat(kvm, ID_AA64MMFR1_EL1, VH, IMP))
1055+
res0 |= HCR_E2H;
10371056
if (!kvm_has_feat(kvm, ID_AA64MMFR4_EL1, E2H0, IMP))
10381057
res1 |= HCR_E2H;
10391058
set_sysreg_masks(kvm, HCR_EL2, res0, res1);

0 commit comments

Comments
 (0)