Skip to content

Commit ea1fda8

Browse files
committed
Merge tag 'x86_urgent_for_v6.12_rc5' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull x86 fixes from Borislav Petkov: - Prevent a certain range of pages which get marked as hypervisor-only, to get allocated to a CoCo (SNP) guest which cannot use them and thus fail booting - Fix the microcode loader on AMD to pay attention to the stepping of a patch and to handle the case where a BIOS config option splits the machine into logical NUMA nodes per L3 cache slice - Disable LAM from being built by default due to security concerns * tag 'x86_urgent_for_v6.12_rc5' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: x86/sev: Ensure that RMP table fixups are reserved x86/microcode/AMD: Split load_microcode_amd() x86/microcode/AMD: Pay attention to the stepping dynamically x86/lam: Disable ADDRESS_MASKING in most cases
2 parents f69a1ac + 88a921a commit ea1fda8

File tree

3 files changed

+38
-16
lines changed

3 files changed

+38
-16
lines changed

arch/x86/Kconfig

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -2257,6 +2257,7 @@ config RANDOMIZE_MEMORY_PHYSICAL_PADDING
22572257
config ADDRESS_MASKING
22582258
bool "Linear Address Masking support"
22592259
depends on X86_64
2260+
depends on COMPILE_TEST || !CPU_MITIGATIONS # wait for LASS
22602261
help
22612262
Linear Address Masking (LAM) modifies the checking that is applied
22622263
to 64-bit linear addresses, allowing software to use of the

arch/x86/kernel/cpu/microcode/amd.c

Lines changed: 35 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -584,7 +584,7 @@ void __init load_ucode_amd_bsp(struct early_load_data *ed, unsigned int cpuid_1_
584584
native_rdmsr(MSR_AMD64_PATCH_LEVEL, ed->new_rev, dummy);
585585
}
586586

587-
static enum ucode_state load_microcode_amd(u8 family, const u8 *data, size_t size);
587+
static enum ucode_state _load_microcode_amd(u8 family, const u8 *data, size_t size);
588588

589589
static int __init save_microcode_in_initrd(void)
590590
{
@@ -605,24 +605,27 @@ static int __init save_microcode_in_initrd(void)
605605
if (!desc.mc)
606606
return -EINVAL;
607607

608-
ret = load_microcode_amd(x86_family(cpuid_1_eax), desc.data, desc.size);
608+
ret = _load_microcode_amd(x86_family(cpuid_1_eax), desc.data, desc.size);
609609
if (ret > UCODE_UPDATED)
610610
return -EINVAL;
611611

612612
return 0;
613613
}
614614
early_initcall(save_microcode_in_initrd);
615615

616-
static inline bool patch_cpus_equivalent(struct ucode_patch *p, struct ucode_patch *n)
616+
static inline bool patch_cpus_equivalent(struct ucode_patch *p,
617+
struct ucode_patch *n,
618+
bool ignore_stepping)
617619
{
618620
/* Zen and newer hardcode the f/m/s in the patch ID */
619621
if (x86_family(bsp_cpuid_1_eax) >= 0x17) {
620622
union cpuid_1_eax p_cid = ucode_rev_to_cpuid(p->patch_id);
621623
union cpuid_1_eax n_cid = ucode_rev_to_cpuid(n->patch_id);
622624

623-
/* Zap stepping */
624-
p_cid.stepping = 0;
625-
n_cid.stepping = 0;
625+
if (ignore_stepping) {
626+
p_cid.stepping = 0;
627+
n_cid.stepping = 0;
628+
}
626629

627630
return p_cid.full == n_cid.full;
628631
} else {
@@ -644,13 +647,13 @@ static struct ucode_patch *cache_find_patch(struct ucode_cpu_info *uci, u16 equi
644647
WARN_ON_ONCE(!n.patch_id);
645648

646649
list_for_each_entry(p, &microcode_cache, plist)
647-
if (patch_cpus_equivalent(p, &n))
650+
if (patch_cpus_equivalent(p, &n, false))
648651
return p;
649652

650653
return NULL;
651654
}
652655

653-
static inline bool patch_newer(struct ucode_patch *p, struct ucode_patch *n)
656+
static inline int patch_newer(struct ucode_patch *p, struct ucode_patch *n)
654657
{
655658
/* Zen and newer hardcode the f/m/s in the patch ID */
656659
if (x86_family(bsp_cpuid_1_eax) >= 0x17) {
@@ -659,6 +662,9 @@ static inline bool patch_newer(struct ucode_patch *p, struct ucode_patch *n)
659662
zp.ucode_rev = p->patch_id;
660663
zn.ucode_rev = n->patch_id;
661664

665+
if (zn.stepping != zp.stepping)
666+
return -1;
667+
662668
return zn.rev > zp.rev;
663669
} else {
664670
return n->patch_id > p->patch_id;
@@ -668,10 +674,14 @@ static inline bool patch_newer(struct ucode_patch *p, struct ucode_patch *n)
668674
static void update_cache(struct ucode_patch *new_patch)
669675
{
670676
struct ucode_patch *p;
677+
int ret;
671678

672679
list_for_each_entry(p, &microcode_cache, plist) {
673-
if (patch_cpus_equivalent(p, new_patch)) {
674-
if (!patch_newer(p, new_patch)) {
680+
if (patch_cpus_equivalent(p, new_patch, true)) {
681+
ret = patch_newer(p, new_patch);
682+
if (ret < 0)
683+
continue;
684+
else if (!ret) {
675685
/* we already have the latest patch */
676686
kfree(new_patch->data);
677687
kfree(new_patch);
@@ -944,21 +954,30 @@ static enum ucode_state __load_microcode_amd(u8 family, const u8 *data,
944954
return UCODE_OK;
945955
}
946956

947-
static enum ucode_state load_microcode_amd(u8 family, const u8 *data, size_t size)
957+
static enum ucode_state _load_microcode_amd(u8 family, const u8 *data, size_t size)
948958
{
949-
struct cpuinfo_x86 *c;
950-
unsigned int nid, cpu;
951-
struct ucode_patch *p;
952959
enum ucode_state ret;
953960

954961
/* free old equiv table */
955962
free_equiv_cpu_table();
956963

957964
ret = __load_microcode_amd(family, data, size);
958-
if (ret != UCODE_OK) {
965+
if (ret != UCODE_OK)
959966
cleanup();
967+
968+
return ret;
969+
}
970+
971+
static enum ucode_state load_microcode_amd(u8 family, const u8 *data, size_t size)
972+
{
973+
struct cpuinfo_x86 *c;
974+
unsigned int nid, cpu;
975+
struct ucode_patch *p;
976+
enum ucode_state ret;
977+
978+
ret = _load_microcode_amd(family, data, size);
979+
if (ret != UCODE_OK)
960980
return ret;
961-
}
962981

963982
for_each_node(nid) {
964983
cpu = cpumask_first(cpumask_of_node(nid));

arch/x86/virt/svm/sev.c

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -173,6 +173,8 @@ static void __init __snp_fixup_e820_tables(u64 pa)
173173
e820__range_update(pa, PMD_SIZE, E820_TYPE_RAM, E820_TYPE_RESERVED);
174174
e820__range_update_table(e820_table_kexec, pa, PMD_SIZE, E820_TYPE_RAM, E820_TYPE_RESERVED);
175175
e820__range_update_table(e820_table_firmware, pa, PMD_SIZE, E820_TYPE_RAM, E820_TYPE_RESERVED);
176+
if (!memblock_is_region_reserved(pa, PMD_SIZE))
177+
memblock_reserve(pa, PMD_SIZE);
176178
}
177179
}
178180

0 commit comments

Comments
 (0)