Skip to content

Commit 24bb181

Browse files
committed
Merge branch kvm-arm64/mpam-ni into kvmarm/next
* kvm-arm64/mpam-ni: : Hiding FEAT_MPAM from KVM guests, courtesy of James Morse + Joey Gouly : : Fix a longstanding bug where FEAT_MPAM was accidentally exposed to KVM : guests + the EL2 trap configuration was not explicitly configured. As : part of this, bring in skeletal support for initialising the MPAM CPU : context so KVM can actually set traps for its guests. : : Be warned -- if this series leads to boot failures on your system, : you're running on turd firmware. : : As an added bonus (that builds upon the infrastructure added by the MPAM : series), allow userspace to configure CTR_EL0.L1Ip, courtesy of Shameer : Kolothum. KVM: arm64: Make L1Ip feature in CTR_EL0 writable from userspace KVM: arm64: selftests: Test ID_AA64PFR0.MPAM isn't completely ignored KVM: arm64: Disable MPAM visibility by default and ignore VMM writes KVM: arm64: Add a macro for creating filtered sys_reg_descs entries KVM: arm64: Fix missing traps of guest accesses to the MPAM registers arm64: cpufeature: discover CPU support for MPAM arm64: head.S: Initialise MPAM EL2 registers and disable traps arm64/sysreg: Convert existing MPAM sysregs and add the remaining entries Signed-off-by: Oliver Upton <oliver.upton@linux.dev>
2 parents 7ccd615 + e9b57d7 commit 24bb181

File tree

14 files changed

+557
-46
lines changed

14 files changed

+557
-46
lines changed

Documentation/arch/arm64/cpu-feature-registers.rst

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -152,6 +152,8 @@ infrastructure:
152152
+------------------------------+---------+---------+
153153
| DIT | [51-48] | y |
154154
+------------------------------+---------+---------+
155+
| MPAM | [43-40] | n |
156+
+------------------------------+---------+---------+
155157
| SVE | [35-32] | y |
156158
+------------------------------+---------+---------+
157159
| GIC | [27-24] | n |

arch/arm64/include/asm/cpu.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -46,6 +46,7 @@ struct cpuinfo_arm64 {
4646
u64 reg_revidr;
4747
u64 reg_gmid;
4848
u64 reg_smidr;
49+
u64 reg_mpamidr;
4950

5051
u64 reg_id_aa64dfr0;
5152
u64 reg_id_aa64dfr1;

arch/arm64/include/asm/cpucaps.h

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -60,6 +60,11 @@ cpucap_is_possible(const unsigned int cap)
6060
return IS_ENABLED(CONFIG_ARM64_WORKAROUND_REPEAT_TLBI);
6161
case ARM64_WORKAROUND_SPECULATIVE_SSBS:
6262
return IS_ENABLED(CONFIG_ARM64_ERRATUM_3194386);
63+
case ARM64_MPAM:
64+
/*
65+
* KVM MPAM support doesn't rely on the host kernel supporting MPAM.
66+
*/
67+
return true;
6368
}
6469

6570
return true;

arch/arm64/include/asm/cpufeature.h

Lines changed: 17 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -612,6 +612,13 @@ static inline bool id_aa64pfr1_sme(u64 pfr1)
612612
return val > 0;
613613
}
614614

615+
static inline bool id_aa64pfr0_mpam(u64 pfr0)
616+
{
617+
u32 val = cpuid_feature_extract_unsigned_field(pfr0, ID_AA64PFR0_EL1_MPAM_SHIFT);
618+
619+
return val > 0;
620+
}
621+
615622
static inline bool id_aa64pfr1_mte(u64 pfr1)
616623
{
617624
u32 val = cpuid_feature_extract_unsigned_field(pfr1, ID_AA64PFR1_EL1_MTE_SHIFT);
@@ -838,6 +845,16 @@ static inline bool system_supports_poe(void)
838845
alternative_has_cap_unlikely(ARM64_HAS_S1POE);
839846
}
840847

848+
static __always_inline bool system_supports_mpam(void)
849+
{
850+
return alternative_has_cap_unlikely(ARM64_MPAM);
851+
}
852+
853+
static __always_inline bool system_supports_mpam_hcr(void)
854+
{
855+
return alternative_has_cap_unlikely(ARM64_MPAM_HCR);
856+
}
857+
841858
int do_emulate_mrs(struct pt_regs *regs, u32 sys_reg, u32 rt);
842859
bool try_emulate_mrs(struct pt_regs *regs, u32 isn);
843860

arch/arm64/include/asm/el2_setup.h

Lines changed: 14 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -220,6 +220,19 @@
220220
msr spsr_el2, x0
221221
.endm
222222

223+
.macro __init_el2_mpam
224+
/* Memory Partitioning And Monitoring: disable EL2 traps */
225+
mrs x1, id_aa64pfr0_el1
226+
ubfx x0, x1, #ID_AA64PFR0_EL1_MPAM_SHIFT, #4
227+
cbz x0, .Lskip_mpam_\@ // skip if no MPAM
228+
msr_s SYS_MPAM2_EL2, xzr // use the default partition
229+
// and disable lower traps
230+
mrs_s x0, SYS_MPAMIDR_EL1
231+
tbz x0, #MPAMIDR_EL1_HAS_HCR_SHIFT, .Lskip_mpam_\@ // skip if no MPAMHCR reg
232+
msr_s SYS_MPAMHCR_EL2, xzr // clear TRAP_MPAMIDR_EL1 -> EL2
233+
.Lskip_mpam_\@:
234+
.endm
235+
223236
/**
224237
* Initialize EL2 registers to sane values. This should be called early on all
225238
* cores that were booted in EL2. Note that everything gets initialised as
@@ -237,6 +250,7 @@
237250
__init_el2_stage2
238251
__init_el2_gicv3
239252
__init_el2_hstr
253+
__init_el2_mpam
240254
__init_el2_nvhe_idregs
241255
__init_el2_cptr
242256
__init_el2_fgt

arch/arm64/include/asm/kvm_arm.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -103,6 +103,7 @@
103103
#define HCR_HOST_VHE_FLAGS (HCR_RW | HCR_TGE | HCR_E2H)
104104

105105
#define HCRX_HOST_FLAGS (HCRX_EL2_MSCEn | HCRX_EL2_TCR2En | HCRX_EL2_EnFPM)
106+
#define MPAMHCR_HOST_FLAGS 0
106107

107108
/* TCR_EL2 Registers bits */
108109
#define TCR_EL2_DS (1UL << 32)

arch/arm64/include/asm/sysreg.h

Lines changed: 0 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -542,18 +542,6 @@
542542

543543
#define SYS_MAIR_EL2 sys_reg(3, 4, 10, 2, 0)
544544
#define SYS_AMAIR_EL2 sys_reg(3, 4, 10, 3, 0)
545-
#define SYS_MPAMHCR_EL2 sys_reg(3, 4, 10, 4, 0)
546-
#define SYS_MPAMVPMV_EL2 sys_reg(3, 4, 10, 4, 1)
547-
#define SYS_MPAM2_EL2 sys_reg(3, 4, 10, 5, 0)
548-
#define __SYS__MPAMVPMx_EL2(x) sys_reg(3, 4, 10, 6, x)
549-
#define SYS_MPAMVPM0_EL2 __SYS__MPAMVPMx_EL2(0)
550-
#define SYS_MPAMVPM1_EL2 __SYS__MPAMVPMx_EL2(1)
551-
#define SYS_MPAMVPM2_EL2 __SYS__MPAMVPMx_EL2(2)
552-
#define SYS_MPAMVPM3_EL2 __SYS__MPAMVPMx_EL2(3)
553-
#define SYS_MPAMVPM4_EL2 __SYS__MPAMVPMx_EL2(4)
554-
#define SYS_MPAMVPM5_EL2 __SYS__MPAMVPMx_EL2(5)
555-
#define SYS_MPAMVPM6_EL2 __SYS__MPAMVPMx_EL2(6)
556-
#define SYS_MPAMVPM7_EL2 __SYS__MPAMVPMx_EL2(7)
557545

558546
#define SYS_VBAR_EL2 sys_reg(3, 4, 12, 0, 0)
559547
#define SYS_RVBAR_EL2 sys_reg(3, 4, 12, 0, 1)

arch/arm64/kernel/cpufeature.c

Lines changed: 96 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -684,6 +684,14 @@ static const struct arm64_ftr_bits ftr_id_dfr1[] = {
684684
ARM64_FTR_END,
685685
};
686686

687+
static const struct arm64_ftr_bits ftr_mpamidr[] = {
688+
ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, MPAMIDR_EL1_PMG_MAX_SHIFT, MPAMIDR_EL1_PMG_MAX_WIDTH, 0),
689+
ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, MPAMIDR_EL1_VPMR_MAX_SHIFT, MPAMIDR_EL1_VPMR_MAX_WIDTH, 0),
690+
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, MPAMIDR_EL1_HAS_HCR_SHIFT, 1, 0),
691+
ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, MPAMIDR_EL1_PARTID_MAX_SHIFT, MPAMIDR_EL1_PARTID_MAX_WIDTH, 0),
692+
ARM64_FTR_END,
693+
};
694+
687695
/*
688696
* Common ftr bits for a 32bit register with all hidden, strict
689697
* attributes, with 4bit feature fields and a default safe value of
@@ -804,6 +812,9 @@ static const struct __ftr_reg_entry {
804812
ARM64_FTR_REG(SYS_ID_AA64MMFR3_EL1, ftr_id_aa64mmfr3),
805813
ARM64_FTR_REG(SYS_ID_AA64MMFR4_EL1, ftr_id_aa64mmfr4),
806814

815+
/* Op1 = 0, CRn = 10, CRm = 4 */
816+
ARM64_FTR_REG(SYS_MPAMIDR_EL1, ftr_mpamidr),
817+
807818
/* Op1 = 1, CRn = 0, CRm = 0 */
808819
ARM64_FTR_REG(SYS_GMID_EL1, ftr_gmid),
809820

@@ -1163,6 +1174,9 @@ void __init init_cpu_features(struct cpuinfo_arm64 *info)
11631174
cpacr_restore(cpacr);
11641175
}
11651176

1177+
if (id_aa64pfr0_mpam(info->reg_id_aa64pfr0))
1178+
init_cpu_ftr_reg(SYS_MPAMIDR_EL1, info->reg_mpamidr);
1179+
11661180
if (id_aa64pfr1_mte(info->reg_id_aa64pfr1))
11671181
init_cpu_ftr_reg(SYS_GMID_EL1, info->reg_gmid);
11681182
}
@@ -1419,6 +1433,11 @@ void update_cpu_features(int cpu,
14191433
cpacr_restore(cpacr);
14201434
}
14211435

1436+
if (id_aa64pfr0_mpam(info->reg_id_aa64pfr0)) {
1437+
taint |= check_update_ftr_reg(SYS_MPAMIDR_EL1, cpu,
1438+
info->reg_mpamidr, boot->reg_mpamidr);
1439+
}
1440+
14221441
/*
14231442
* The kernel uses the LDGM/STGM instructions and the number of tags
14241443
* they read/write depends on the GMID_EL1.BS field. Check that the
@@ -2377,6 +2396,36 @@ cpucap_panic_on_conflict(const struct arm64_cpu_capabilities *cap)
23772396
return !!(cap->type & ARM64_CPUCAP_PANIC_ON_CONFLICT);
23782397
}
23792398

2399+
static bool
2400+
test_has_mpam(const struct arm64_cpu_capabilities *entry, int scope)
2401+
{
2402+
if (!has_cpuid_feature(entry, scope))
2403+
return false;
2404+
2405+
/* Check firmware actually enabled MPAM on this cpu. */
2406+
return (read_sysreg_s(SYS_MPAM1_EL1) & MPAM1_EL1_MPAMEN);
2407+
}
2408+
2409+
static void
2410+
cpu_enable_mpam(const struct arm64_cpu_capabilities *entry)
2411+
{
2412+
/*
2413+
* Access by the kernel (at EL1) should use the reserved PARTID
2414+
* which is configured unrestricted. This avoids priority-inversion
2415+
* where latency sensitive tasks have to wait for a task that has
2416+
* been throttled to release the lock.
2417+
*/
2418+
write_sysreg_s(0, SYS_MPAM1_EL1);
2419+
}
2420+
2421+
static bool
2422+
test_has_mpam_hcr(const struct arm64_cpu_capabilities *entry, int scope)
2423+
{
2424+
u64 idr = read_sanitised_ftr_reg(SYS_MPAMIDR_EL1);
2425+
2426+
return idr & MPAMIDR_EL1_HAS_HCR;
2427+
}
2428+
23802429
static const struct arm64_cpu_capabilities arm64_features[] = {
23812430
{
23822431
.capability = ARM64_ALWAYS_BOOT,
@@ -2873,6 +2922,20 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
28732922
#endif
28742923
},
28752924
#endif
2925+
{
2926+
.desc = "Memory Partitioning And Monitoring",
2927+
.type = ARM64_CPUCAP_SYSTEM_FEATURE,
2928+
.capability = ARM64_MPAM,
2929+
.matches = test_has_mpam,
2930+
.cpu_enable = cpu_enable_mpam,
2931+
ARM64_CPUID_FIELDS(ID_AA64PFR0_EL1, MPAM, 1)
2932+
},
2933+
{
2934+
.desc = "Memory Partitioning And Monitoring Virtualisation",
2935+
.type = ARM64_CPUCAP_SYSTEM_FEATURE,
2936+
.capability = ARM64_MPAM_HCR,
2937+
.matches = test_has_mpam_hcr,
2938+
},
28762939
{
28772940
.desc = "NV1",
28782941
.capability = ARM64_HAS_HCR_NV1,
@@ -3396,6 +3459,36 @@ static void verify_hyp_capabilities(void)
33963459
}
33973460
}
33983461

3462+
static void verify_mpam_capabilities(void)
3463+
{
3464+
u64 cpu_idr = read_cpuid(ID_AA64PFR0_EL1);
3465+
u64 sys_idr = read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1);
3466+
u16 cpu_partid_max, cpu_pmg_max, sys_partid_max, sys_pmg_max;
3467+
3468+
if (FIELD_GET(ID_AA64PFR0_EL1_MPAM_MASK, cpu_idr) !=
3469+
FIELD_GET(ID_AA64PFR0_EL1_MPAM_MASK, sys_idr)) {
3470+
pr_crit("CPU%d: MPAM version mismatch\n", smp_processor_id());
3471+
cpu_die_early();
3472+
}
3473+
3474+
cpu_idr = read_cpuid(MPAMIDR_EL1);
3475+
sys_idr = read_sanitised_ftr_reg(SYS_MPAMIDR_EL1);
3476+
if (FIELD_GET(MPAMIDR_EL1_HAS_HCR, cpu_idr) !=
3477+
FIELD_GET(MPAMIDR_EL1_HAS_HCR, sys_idr)) {
3478+
pr_crit("CPU%d: Missing MPAM HCR\n", smp_processor_id());
3479+
cpu_die_early();
3480+
}
3481+
3482+
cpu_partid_max = FIELD_GET(MPAMIDR_EL1_PARTID_MAX, cpu_idr);
3483+
cpu_pmg_max = FIELD_GET(MPAMIDR_EL1_PMG_MAX, cpu_idr);
3484+
sys_partid_max = FIELD_GET(MPAMIDR_EL1_PARTID_MAX, sys_idr);
3485+
sys_pmg_max = FIELD_GET(MPAMIDR_EL1_PMG_MAX, sys_idr);
3486+
if (cpu_partid_max < sys_partid_max || cpu_pmg_max < sys_pmg_max) {
3487+
pr_crit("CPU%d: MPAM PARTID/PMG max values are mismatched\n", smp_processor_id());
3488+
cpu_die_early();
3489+
}
3490+
}
3491+
33993492
/*
34003493
* Run through the enabled system capabilities and enable() it on this CPU.
34013494
* The capabilities were decided based on the available CPUs at the boot time.
@@ -3422,6 +3515,9 @@ static void verify_local_cpu_capabilities(void)
34223515

34233516
if (is_hyp_mode_available())
34243517
verify_hyp_capabilities();
3518+
3519+
if (system_supports_mpam())
3520+
verify_mpam_capabilities();
34253521
}
34263522

34273523
void check_local_cpu_capabilities(void)

arch/arm64/kernel/cpuinfo.c

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -478,6 +478,9 @@ static void __cpuinfo_store_cpu(struct cpuinfo_arm64 *info)
478478
if (id_aa64pfr0_32bit_el0(info->reg_id_aa64pfr0))
479479
__cpuinfo_store_cpu_32bit(&info->aarch32);
480480

481+
if (id_aa64pfr0_mpam(info->reg_id_aa64pfr0))
482+
info->reg_mpamidr = read_cpuid(MPAMIDR_EL1);
483+
481484
cpuinfo_detect_icache_policy(info);
482485
}
483486

arch/arm64/kvm/hyp/include/hyp/switch.h

Lines changed: 31 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -204,6 +204,35 @@ static inline void __deactivate_traps_hfgxtr(struct kvm_vcpu *vcpu)
204204
__deactivate_fgt(hctxt, vcpu, kvm, HAFGRTR_EL2);
205205
}
206206

207+
static inline void __activate_traps_mpam(struct kvm_vcpu *vcpu)
208+
{
209+
u64 r = MPAM2_EL2_TRAPMPAM0EL1 | MPAM2_EL2_TRAPMPAM1EL1;
210+
211+
if (!system_supports_mpam())
212+
return;
213+
214+
/* trap guest access to MPAMIDR_EL1 */
215+
if (system_supports_mpam_hcr()) {
216+
write_sysreg_s(MPAMHCR_EL2_TRAP_MPAMIDR_EL1, SYS_MPAMHCR_EL2);
217+
} else {
218+
/* From v1.1 TIDR can trap MPAMIDR, set it unconditionally */
219+
r |= MPAM2_EL2_TIDR;
220+
}
221+
222+
write_sysreg_s(r, SYS_MPAM2_EL2);
223+
}
224+
225+
static inline void __deactivate_traps_mpam(void)
226+
{
227+
if (!system_supports_mpam())
228+
return;
229+
230+
write_sysreg_s(0, SYS_MPAM2_EL2);
231+
232+
if (system_supports_mpam_hcr())
233+
write_sysreg_s(MPAMHCR_HOST_FLAGS, SYS_MPAMHCR_EL2);
234+
}
235+
207236
static inline void __activate_traps_common(struct kvm_vcpu *vcpu)
208237
{
209238
/* Trap on AArch32 cp15 c15 (impdef sysregs) accesses (EL1 or EL0) */
@@ -244,6 +273,7 @@ static inline void __activate_traps_common(struct kvm_vcpu *vcpu)
244273
}
245274

246275
__activate_traps_hfgxtr(vcpu);
276+
__activate_traps_mpam(vcpu);
247277
}
248278

249279
static inline void __deactivate_traps_common(struct kvm_vcpu *vcpu)
@@ -263,6 +293,7 @@ static inline void __deactivate_traps_common(struct kvm_vcpu *vcpu)
263293
write_sysreg_s(HCRX_HOST_FLAGS, SYS_HCRX_EL2);
264294

265295
__deactivate_traps_hfgxtr(vcpu);
296+
__deactivate_traps_mpam();
266297
}
267298

268299
static inline void ___activate_traps(struct kvm_vcpu *vcpu, u64 hcr)

0 commit comments

Comments
 (0)