Skip to content

Commit 1c811d4

Browse files
ardbiesheuvelbp3tk0v
authored andcommitted
x86/sev: Fix position dependent variable references in startup code
The early startup code executes from a 1:1 mapping of memory, which differs from the mapping that the code was linked and/or relocated to run at. The latter mapping is not active yet at this point, and so symbol references that rely on it will fault. Given that the core kernel is built without -fPIC, symbol references are typically emitted as absolute, and so any such references occuring in the early startup code will therefore crash the kernel. While an attempt was made to work around this for the early SEV/SME startup code, by forcing RIP-relative addressing for certain global SEV/SME variables via inline assembly (see snp_cpuid_get_table() for example), RIP-relative addressing must be pervasively enforced for SEV/SME global variables when accessed prior to page table fixups. __startup_64() already handles this issue for select non-SEV/SME global variables using fixup_pointer(), which adjusts the pointer relative to a `physaddr` argument. To avoid having to pass around this `physaddr` argument across all functions needing to apply pointer fixups, introduce a macro RIP_RELATIVE_REF() which generates a RIP-relative reference to a given global variable. It is used where necessary to force RIP-relative accesses to global variables. For backporting purposes, this patch makes no attempt at cleaning up other occurrences of this pattern, involving either inline asm or fixup_pointer(). Those will be addressed later. [ bp: Call it "rip_rel_ref" everywhere like other code shortens "rIP-relative reference" and make the asm wrapper __always_inline. ] Co-developed-by: Kevin Loughlin <kevinloughlin@google.com> Signed-off-by: Kevin Loughlin <kevinloughlin@google.com> Signed-off-by: Ard Biesheuvel <ardb@kernel.org> Signed-off-by: Borislav Petkov (AMD) <bp@alien8.de> Cc: <stable@kernel.org> Link: https://lore.kernel.org/all/20240130220845.1978329-1-kevinloughlin@google.com
1 parent f9e6f00 commit 1c811d4

File tree

7 files changed

+51
-36
lines changed

7 files changed

+51
-36
lines changed

arch/x86/coco/core.c

Lines changed: 1 addition & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -14,7 +14,7 @@
1414
#include <asm/processor.h>
1515

1616
enum cc_vendor cc_vendor __ro_after_init = CC_VENDOR_NONE;
17-
static u64 cc_mask __ro_after_init;
17+
u64 cc_mask __ro_after_init;
1818

1919
static bool noinstr intel_cc_platform_has(enum cc_attr attr)
2020
{
@@ -148,8 +148,3 @@ u64 cc_mkdec(u64 val)
148148
}
149149
}
150150
EXPORT_SYMBOL_GPL(cc_mkdec);
151-
152-
__init void cc_set_mask(u64 mask)
153-
{
154-
cc_mask = mask;
155-
}

arch/x86/include/asm/asm.h

Lines changed: 14 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -113,6 +113,20 @@
113113

114114
#endif
115115

116+
#ifndef __ASSEMBLY__
117+
#ifndef __pic__
118+
static __always_inline __pure void *rip_rel_ptr(void *p)
119+
{
120+
asm("leaq %c1(%%rip), %0" : "=r"(p) : "i"(p));
121+
122+
return p;
123+
}
124+
#define RIP_REL_REF(var) (*(typeof(&(var)))rip_rel_ptr(&(var)))
125+
#else
126+
#define RIP_REL_REF(var) (var)
127+
#endif
128+
#endif
129+
116130
/*
117131
* Macros to generate condition code outputs from inline assembly,
118132
* The output operand must be type "bool".

arch/x86/include/asm/coco.h

Lines changed: 7 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2,6 +2,7 @@
22
#ifndef _ASM_X86_COCO_H
33
#define _ASM_X86_COCO_H
44

5+
#include <asm/asm.h>
56
#include <asm/types.h>
67

78
enum cc_vendor {
@@ -11,9 +12,14 @@ enum cc_vendor {
1112
};
1213

1314
extern enum cc_vendor cc_vendor;
15+
extern u64 cc_mask;
1416

1517
#ifdef CONFIG_ARCH_HAS_CC_PLATFORM
16-
void cc_set_mask(u64 mask);
18+
static inline void cc_set_mask(u64 mask)
19+
{
20+
RIP_REL_REF(cc_mask) = mask;
21+
}
22+
1723
u64 cc_mkenc(u64 val);
1824
u64 cc_mkdec(u64 val);
1925
#else

arch/x86/include/asm/mem_encrypt.h

Lines changed: 9 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -15,7 +15,8 @@
1515
#include <linux/init.h>
1616
#include <linux/cc_platform.h>
1717

18-
#include <asm/bootparam.h>
18+
#include <asm/asm.h>
19+
struct boot_params;
1920

2021
#ifdef CONFIG_X86_MEM_ENCRYPT
2122
void __init mem_encrypt_init(void);
@@ -58,6 +59,11 @@ void __init mem_encrypt_free_decrypted_mem(void);
5859

5960
void __init sev_es_init_vc_handling(void);
6061

62+
static inline u64 sme_get_me_mask(void)
63+
{
64+
return RIP_REL_REF(sme_me_mask);
65+
}
66+
6167
#define __bss_decrypted __section(".bss..decrypted")
6268

6369
#else /* !CONFIG_AMD_MEM_ENCRYPT */
@@ -89,6 +95,8 @@ early_set_mem_enc_dec_hypercall(unsigned long vaddr, unsigned long size, bool en
8995

9096
static inline void mem_encrypt_free_decrypted_mem(void) { }
9197

98+
static inline u64 sme_get_me_mask(void) { return 0; }
99+
92100
#define __bss_decrypted
93101

94102
#endif /* CONFIG_AMD_MEM_ENCRYPT */
@@ -106,11 +114,6 @@ void add_encrypt_protection_map(void);
106114

107115
extern char __start_bss_decrypted[], __end_bss_decrypted[], __start_bss_decrypted_unused[];
108116

109-
static inline u64 sme_get_me_mask(void)
110-
{
111-
return sme_me_mask;
112-
}
113-
114117
#endif /* __ASSEMBLY__ */
115118

116119
#endif /* __X86_MEM_ENCRYPT_H__ */

arch/x86/kernel/sev-shared.c

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -560,9 +560,9 @@ static int snp_cpuid(struct ghcb *ghcb, struct es_em_ctxt *ctxt, struct cpuid_le
560560
leaf->eax = leaf->ebx = leaf->ecx = leaf->edx = 0;
561561

562562
/* Skip post-processing for out-of-range zero leafs. */
563-
if (!(leaf->fn <= cpuid_std_range_max ||
564-
(leaf->fn >= 0x40000000 && leaf->fn <= cpuid_hyp_range_max) ||
565-
(leaf->fn >= 0x80000000 && leaf->fn <= cpuid_ext_range_max)))
563+
if (!(leaf->fn <= RIP_REL_REF(cpuid_std_range_max) ||
564+
(leaf->fn >= 0x40000000 && leaf->fn <= RIP_REL_REF(cpuid_hyp_range_max)) ||
565+
(leaf->fn >= 0x80000000 && leaf->fn <= RIP_REL_REF(cpuid_ext_range_max))))
566566
return 0;
567567
}
568568

@@ -1072,11 +1072,11 @@ static void __init setup_cpuid_table(const struct cc_blob_sev_info *cc_info)
10721072
const struct snp_cpuid_fn *fn = &cpuid_table->fn[i];
10731073

10741074
if (fn->eax_in == 0x0)
1075-
cpuid_std_range_max = fn->eax;
1075+
RIP_REL_REF(cpuid_std_range_max) = fn->eax;
10761076
else if (fn->eax_in == 0x40000000)
1077-
cpuid_hyp_range_max = fn->eax;
1077+
RIP_REL_REF(cpuid_hyp_range_max) = fn->eax;
10781078
else if (fn->eax_in == 0x80000000)
1079-
cpuid_ext_range_max = fn->eax;
1079+
RIP_REL_REF(cpuid_ext_range_max) = fn->eax;
10801080
}
10811081
}
10821082

arch/x86/kernel/sev.c

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -748,7 +748,7 @@ void __init early_snp_set_memory_private(unsigned long vaddr, unsigned long padd
748748
* This eliminates worries about jump tables or checking boot_cpu_data
749749
* in the cc_platform_has() function.
750750
*/
751-
if (!(sev_status & MSR_AMD64_SEV_SNP_ENABLED))
751+
if (!(RIP_REL_REF(sev_status) & MSR_AMD64_SEV_SNP_ENABLED))
752752
return;
753753

754754
/*
@@ -767,7 +767,7 @@ void __init early_snp_set_memory_shared(unsigned long vaddr, unsigned long paddr
767767
* This eliminates worries about jump tables or checking boot_cpu_data
768768
* in the cc_platform_has() function.
769769
*/
770-
if (!(sev_status & MSR_AMD64_SEV_SNP_ENABLED))
770+
if (!(RIP_REL_REF(sev_status) & MSR_AMD64_SEV_SNP_ENABLED))
771771
return;
772772

773773
/* Ask hypervisor to mark the memory pages shared in the RMP table. */

arch/x86/mm/mem_encrypt_identity.c

Lines changed: 12 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -304,7 +304,8 @@ void __init sme_encrypt_kernel(struct boot_params *bp)
304304
* instrumentation or checking boot_cpu_data in the cc_platform_has()
305305
* function.
306306
*/
307-
if (!sme_get_me_mask() || sev_status & MSR_AMD64_SEV_ENABLED)
307+
if (!sme_get_me_mask() ||
308+
RIP_REL_REF(sev_status) & MSR_AMD64_SEV_ENABLED)
308309
return;
309310

310311
/*
@@ -541,11 +542,11 @@ void __init sme_enable(struct boot_params *bp)
541542
me_mask = 1UL << (ebx & 0x3f);
542543

543544
/* Check the SEV MSR whether SEV or SME is enabled */
544-
sev_status = __rdmsr(MSR_AMD64_SEV);
545-
feature_mask = (sev_status & MSR_AMD64_SEV_ENABLED) ? AMD_SEV_BIT : AMD_SME_BIT;
545+
RIP_REL_REF(sev_status) = msr = __rdmsr(MSR_AMD64_SEV);
546+
feature_mask = (msr & MSR_AMD64_SEV_ENABLED) ? AMD_SEV_BIT : AMD_SME_BIT;
546547

547548
/* The SEV-SNP CC blob should never be present unless SEV-SNP is enabled. */
548-
if (snp && !(sev_status & MSR_AMD64_SEV_SNP_ENABLED))
549+
if (snp && !(msr & MSR_AMD64_SEV_SNP_ENABLED))
549550
snp_abort();
550551

551552
/* Check if memory encryption is enabled */
@@ -571,7 +572,6 @@ void __init sme_enable(struct boot_params *bp)
571572
return;
572573
} else {
573574
/* SEV state cannot be controlled by a command line option */
574-
sme_me_mask = me_mask;
575575
goto out;
576576
}
577577

@@ -590,16 +590,13 @@ void __init sme_enable(struct boot_params *bp)
590590
cmdline_ptr = (const char *)((u64)bp->hdr.cmd_line_ptr |
591591
((u64)bp->ext_cmd_line_ptr << 32));
592592

593-
if (cmdline_find_option(cmdline_ptr, cmdline_arg, buffer, sizeof(buffer)) < 0)
594-
goto out;
595-
596-
if (!strncmp(buffer, cmdline_on, sizeof(buffer)))
597-
sme_me_mask = me_mask;
593+
if (cmdline_find_option(cmdline_ptr, cmdline_arg, buffer, sizeof(buffer)) < 0 ||
594+
strncmp(buffer, cmdline_on, sizeof(buffer)))
595+
return;
598596

599597
out:
600-
if (sme_me_mask) {
601-
physical_mask &= ~sme_me_mask;
602-
cc_vendor = CC_VENDOR_AMD;
603-
cc_set_mask(sme_me_mask);
604-
}
598+
RIP_REL_REF(sme_me_mask) = me_mask;
599+
physical_mask &= ~me_mask;
600+
cc_vendor = CC_VENDOR_AMD;
601+
cc_set_mask(me_mask);
605602
}

0 commit comments

Comments
 (0)