Skip to content

Commit 6eb0ed9

Browse files
author
Marc Zyngier
committed
Merge branch kvm-arm64/mte-frac into kvmarm-master/next
* kvm-arm64/mte-frac: : . : Prevent FEAT_MTE_ASYNC from being accidently exposed to a guest, : courtesy of Ben Horgan. From the cover letter: : : "The ID_AA64PFR1_EL1.MTE_frac field is currently hidden from KVM. : However, when ID_AA64PFR1_EL1.MTE==2, ID_AA64PFR1_EL1.MTE_frac==0 : indicates that MTE_ASYNC is supported. On a host with : ID_AA64PFR1_EL1.MTE==2 but without MTE_ASYNC support a guest with the : MTE capability enabled will incorrectly see MTE_ASYNC advertised as : supported. This series fixes that." : . KVM: selftests: Confirm exposing MTE_frac does not break migration KVM: arm64: Make MTE_frac masking conditional on MTE capability arm64/sysreg: Expose MTE_frac so that it is visible to KVM Signed-off-by: Marc Zyngier <maz@kernel.org>
2 parents cb86616 + 6901886 commit 6eb0ed9

File tree

3 files changed

+103
-3
lines changed

3 files changed

+103
-3
lines changed

arch/arm64/kernel/cpufeature.c

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -298,6 +298,7 @@ static const struct arm64_ftr_bits ftr_id_aa64pfr0[] = {
298298
static const struct arm64_ftr_bits ftr_id_aa64pfr1[] = {
299299
ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_GCS),
300300
FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR1_EL1_GCS_SHIFT, 4, 0),
301+
S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR1_EL1_MTE_frac_SHIFT, 4, 0),
301302
ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SME),
302303
FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR1_EL1_SME_SHIFT, 4, 0),
303304
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR1_EL1_MPAM_frac_SHIFT, 4, 0),

arch/arm64/kvm/sys_regs.c

Lines changed: 26 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1601,13 +1601,14 @@ static u64 __kvm_read_sanitised_id_reg(const struct kvm_vcpu *vcpu,
16011601
val = sanitise_id_aa64pfr0_el1(vcpu, val);
16021602
break;
16031603
case SYS_ID_AA64PFR1_EL1:
1604-
if (!kvm_has_mte(vcpu->kvm))
1604+
if (!kvm_has_mte(vcpu->kvm)) {
16051605
val &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_MTE);
1606+
val &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_MTE_frac);
1607+
}
16061608

16071609
val &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_SME);
16081610
val &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_RNDR_trap);
16091611
val &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_NMI);
1610-
val &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_MTE_frac);
16111612
val &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_GCS);
16121613
val &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_THE);
16131614
val &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_MTEX);
@@ -1954,11 +1955,34 @@ static int set_id_aa64pfr1_el1(struct kvm_vcpu *vcpu,
19541955
{
19551956
u64 hw_val = read_sanitised_ftr_reg(SYS_ID_AA64PFR1_EL1);
19561957
u64 mpam_mask = ID_AA64PFR1_EL1_MPAM_frac_MASK;
1958+
u8 mte = SYS_FIELD_GET(ID_AA64PFR1_EL1, MTE, hw_val);
1959+
u8 user_mte_frac = SYS_FIELD_GET(ID_AA64PFR1_EL1, MTE_frac, user_val);
1960+
u8 hw_mte_frac = SYS_FIELD_GET(ID_AA64PFR1_EL1, MTE_frac, hw_val);
19571961

19581962
/* See set_id_aa64pfr0_el1 for comment about MPAM */
19591963
if ((hw_val & mpam_mask) == (user_val & mpam_mask))
19601964
user_val &= ~ID_AA64PFR1_EL1_MPAM_frac_MASK;
19611965

1966+
/*
1967+
* Previously MTE_frac was hidden from guest. However, if the
1968+
* hardware supports MTE2 but not MTE_ASYM_FAULT then a value
1969+
* of 0 for this field indicates that the hardware supports
1970+
* MTE_ASYNC. Whereas, 0xf indicates MTE_ASYNC is not supported.
1971+
*
1972+
* As KVM must accept values from KVM provided by user-space,
1973+
* when ID_AA64PFR1_EL1.MTE is 2 allow user-space to set
1974+
* ID_AA64PFR1_EL1.MTE_frac to 0. However, ignore it to avoid
1975+
* incorrectly claiming hardware support for MTE_ASYNC in the
1976+
* guest.
1977+
*/
1978+
1979+
if (mte == ID_AA64PFR1_EL1_MTE_MTE2 &&
1980+
hw_mte_frac == ID_AA64PFR1_EL1_MTE_frac_NI &&
1981+
user_mte_frac == ID_AA64PFR1_EL1_MTE_frac_ASYNC) {
1982+
user_val &= ~ID_AA64PFR1_EL1_MTE_frac_MASK;
1983+
user_val |= hw_val & ID_AA64PFR1_EL1_MTE_frac_MASK;
1984+
}
1985+
19621986
return set_id_reg(vcpu, rd, user_val);
19631987
}
19641988

tools/testing/selftests/kvm/arm64/set_id_regs.c

Lines changed: 76 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -15,6 +15,8 @@
1515
#include "test_util.h"
1616
#include <linux/bitfield.h>
1717

18+
bool have_cap_arm_mte;
19+
1820
enum ftr_type {
1921
FTR_EXACT, /* Use a predefined safe value */
2022
FTR_LOWER_SAFE, /* Smaller value is safe */
@@ -543,6 +545,70 @@ static void test_user_set_mpam_reg(struct kvm_vcpu *vcpu)
543545
ksft_test_result_fail("ID_AA64PFR1_EL1.MPAM_frac value should not be ignored\n");
544546
}
545547

548+
#define MTE_IDREG_TEST 1
549+
static void test_user_set_mte_reg(struct kvm_vcpu *vcpu)
550+
{
551+
uint64_t masks[KVM_ARM_FEATURE_ID_RANGE_SIZE];
552+
struct reg_mask_range range = {
553+
.addr = (__u64)masks,
554+
};
555+
uint64_t val;
556+
uint64_t mte;
557+
uint64_t mte_frac;
558+
int idx, err;
559+
560+
if (!have_cap_arm_mte) {
561+
ksft_test_result_skip("MTE capability not supported, nothing to test\n");
562+
return;
563+
}
564+
565+
/* Get writable masks for feature ID registers */
566+
memset(range.reserved, 0, sizeof(range.reserved));
567+
vm_ioctl(vcpu->vm, KVM_ARM_GET_REG_WRITABLE_MASKS, &range);
568+
569+
idx = encoding_to_range_idx(SYS_ID_AA64PFR1_EL1);
570+
if ((masks[idx] & ID_AA64PFR1_EL1_MTE_frac_MASK) == ID_AA64PFR1_EL1_MTE_frac_MASK) {
571+
ksft_test_result_skip("ID_AA64PFR1_EL1.MTE_frac is officially writable, nothing to test\n");
572+
return;
573+
}
574+
575+
/*
576+
* When MTE is supported but MTE_ASYMM is not (ID_AA64PFR1_EL1.MTE == 2)
577+
* ID_AA64PFR1_EL1.MTE_frac == 0xF indicates MTE_ASYNC is unsupported
578+
* and MTE_frac == 0 indicates it is supported.
579+
*
580+
* As MTE_frac was previously unconditionally read as 0, check
581+
* that the set to 0 succeeds but does not change MTE_frac
582+
* from unsupported (0xF) to supported (0).
583+
*
584+
*/
585+
val = vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(SYS_ID_AA64PFR1_EL1));
586+
587+
mte = FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_MTE), val);
588+
mte_frac = FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_MTE_frac), val);
589+
if (mte != ID_AA64PFR1_EL1_MTE_MTE2 ||
590+
mte_frac != ID_AA64PFR1_EL1_MTE_frac_NI) {
591+
ksft_test_result_skip("MTE_ASYNC or MTE_ASYMM are supported, nothing to test\n");
592+
return;
593+
}
594+
595+
/* Try to set MTE_frac=0. */
596+
val &= ~ID_AA64PFR1_EL1_MTE_frac_MASK;
597+
val |= FIELD_PREP(ID_AA64PFR1_EL1_MTE_frac_MASK, 0);
598+
err = __vcpu_set_reg(vcpu, KVM_ARM64_SYS_REG(SYS_ID_AA64PFR1_EL1), val);
599+
if (err) {
600+
ksft_test_result_fail("ID_AA64PFR1_EL1.MTE_frac=0 was not accepted\n");
601+
return;
602+
}
603+
604+
val = vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(SYS_ID_AA64PFR1_EL1));
605+
mte_frac = FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_MTE_frac), val);
606+
if (mte_frac == ID_AA64PFR1_EL1_MTE_frac_NI)
607+
ksft_test_result_pass("ID_AA64PFR1_EL1.MTE_frac=0 accepted and still 0xF\n");
608+
else
609+
ksft_test_result_pass("ID_AA64PFR1_EL1.MTE_frac no longer 0xF\n");
610+
}
611+
546612
static void test_guest_reg_read(struct kvm_vcpu *vcpu)
547613
{
548614
bool done = false;
@@ -673,6 +739,14 @@ static void test_reset_preserves_id_regs(struct kvm_vcpu *vcpu)
673739
ksft_test_result_pass("%s\n", __func__);
674740
}
675741

742+
void kvm_arch_vm_post_create(struct kvm_vm *vm)
743+
{
744+
if (vm_check_cap(vm, KVM_CAP_ARM_MTE)) {
745+
vm_enable_cap(vm, KVM_CAP_ARM_MTE, 0);
746+
have_cap_arm_mte = true;
747+
}
748+
}
749+
676750
int main(void)
677751
{
678752
struct kvm_vcpu *vcpu;
@@ -701,14 +775,15 @@ int main(void)
701775
ARRAY_SIZE(ftr_id_aa64pfr1_el1) + ARRAY_SIZE(ftr_id_aa64mmfr0_el1) +
702776
ARRAY_SIZE(ftr_id_aa64mmfr1_el1) + ARRAY_SIZE(ftr_id_aa64mmfr2_el1) +
703777
ARRAY_SIZE(ftr_id_aa64zfr0_el1) - ARRAY_SIZE(test_regs) + 3 +
704-
MPAM_IDREG_TEST;
778+
MPAM_IDREG_TEST + MTE_IDREG_TEST;
705779

706780
ksft_set_plan(test_cnt);
707781

708782
test_vm_ftr_id_regs(vcpu, aarch64_only);
709783
test_vcpu_ftr_id_regs(vcpu);
710784
test_vcpu_non_ftr_id_regs(vcpu);
711785
test_user_set_mpam_reg(vcpu);
786+
test_user_set_mte_reg(vcpu);
712787

713788
test_guest_reg_read(vcpu);
714789

0 commit comments

Comments
 (0)