Skip to content

Commit 555b684

Browse files
committed
Merge tag 'x86_mm_for_6.9' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull x86 mm updates from Dave Hansen: - Add a warning when memory encryption conversions fail. These operations require VMM cooperation, even in CoCo environments where the VMM is untrusted. While it's _possible_ that memory pressure could trigger the new warning, the odds are that a guest would only see this from an attacking VMM. - Simplify page fault code by re-enabling interrupts unconditionally - Avoid truncation issues when pfns are passed in to pfn_to_kaddr() with small (<64-bit) types. * tag 'x86_mm_for_6.9' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: x86/mm/cpa: Warn for set_memory_XXcrypted() VMM fails x86/mm: Get rid of conditional IF flag handling in page fault path x86/mm: Ensure input to pfn_to_kaddr() is treated as a 64-bit type
2 parents 685d982 + 82ace18 commit 555b684

File tree

3 files changed

+32
-20
lines changed

3 files changed

+32
-20
lines changed

arch/x86/include/asm/page.h

Lines changed: 5 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -66,10 +66,14 @@ static inline void copy_user_page(void *to, void *from, unsigned long vaddr,
6666
* virt_addr_valid(kaddr) returns true.
6767
*/
6868
#define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
69-
#define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT)
7069
extern bool __virt_addr_valid(unsigned long kaddr);
7170
#define virt_addr_valid(kaddr) __virt_addr_valid((unsigned long) (kaddr))
7271

72+
static __always_inline void *pfn_to_kaddr(unsigned long pfn)
73+
{
74+
return __va(pfn << PAGE_SHIFT);
75+
}
76+
7377
static __always_inline u64 __canonical_address(u64 vaddr, u8 vaddr_bits)
7478
{
7579
return ((s64)vaddr << (64 - vaddr_bits)) >> (64 - vaddr_bits);

arch/x86/mm/fault.c

Lines changed: 14 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -1299,21 +1299,14 @@ void do_user_addr_fault(struct pt_regs *regs,
12991299
return;
13001300
}
13011301

1302-
/*
1303-
* It's safe to allow irq's after cr2 has been saved and the
1304-
* vmalloc fault has been handled.
1305-
*
1306-
* User-mode registers count as a user access even for any
1307-
* potential system fault or CPU buglet:
1308-
*/
1309-
if (user_mode(regs)) {
1310-
local_irq_enable();
1311-
flags |= FAULT_FLAG_USER;
1312-
} else {
1313-
if (regs->flags & X86_EFLAGS_IF)
1314-
local_irq_enable();
1302+
/* Legacy check - remove this after verifying that it doesn't trigger */
1303+
if (WARN_ON_ONCE(!(regs->flags & X86_EFLAGS_IF))) {
1304+
bad_area_nosemaphore(regs, error_code, address);
1305+
return;
13151306
}
13161307

1308+
local_irq_enable();
1309+
13171310
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
13181311

13191312
/*
@@ -1329,6 +1322,14 @@ void do_user_addr_fault(struct pt_regs *regs,
13291322
if (error_code & X86_PF_INSTR)
13301323
flags |= FAULT_FLAG_INSTRUCTION;
13311324

1325+
/*
1326+
* We set FAULT_FLAG_USER based on the register state, not
1327+
* based on X86_PF_USER. User space accesses that cause
1328+
* system page faults are still user accesses.
1329+
*/
1330+
if (user_mode(regs))
1331+
flags |= FAULT_FLAG_USER;
1332+
13321333
#ifdef CONFIG_X86_64
13331334
/*
13341335
* Faults in the vsyscall page might need emulation. The

arch/x86/mm/pat/set_memory.c

Lines changed: 13 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -2157,7 +2157,7 @@ static int __set_memory_enc_pgtable(unsigned long addr, int numpages, bool enc)
21572157

21582158
/* Notify hypervisor that we are about to set/clr encryption attribute. */
21592159
if (!x86_platform.guest.enc_status_change_prepare(addr, numpages, enc))
2160-
return -EIO;
2160+
goto vmm_fail;
21612161

21622162
ret = __change_page_attr_set_clr(&cpa, 1);
21632163

@@ -2170,13 +2170,20 @@ static int __set_memory_enc_pgtable(unsigned long addr, int numpages, bool enc)
21702170
*/
21712171
cpa_flush(&cpa, 0);
21722172

2173+
if (ret)
2174+
return ret;
2175+
21732176
/* Notify hypervisor that we have successfully set/clr encryption attribute. */
2174-
if (!ret) {
2175-
if (!x86_platform.guest.enc_status_change_finish(addr, numpages, enc))
2176-
ret = -EIO;
2177-
}
2177+
if (!x86_platform.guest.enc_status_change_finish(addr, numpages, enc))
2178+
goto vmm_fail;
21782179

2179-
return ret;
2180+
return 0;
2181+
2182+
vmm_fail:
2183+
WARN_ONCE(1, "CPA VMM failure to convert memory (addr=%p, numpages=%d) to %s.\n",
2184+
(void *)addr, numpages, enc ? "private" : "shared");
2185+
2186+
return -EIO;
21802187
}
21812188

21822189
static int __set_memory_enc_dec(unsigned long addr, int numpages, bool enc)

0 commit comments

Comments
 (0)