Skip to content

Commit 9855922

Browse files
committed
Merge tag 'x86_mm_for_6.6-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull x86 mm updates from Dave Hansen: "A pair of small x86/mm updates. The INVPCID one is purely a cleanup. The PAT one fixes a real issue, albeit a relatively obscure one (graphics device passthrough under Xen). The fix also makes the code much more readable. Summary: - Remove unnecessary "INVPCID single" feature tracking - Include PAT in page protection modify mask" * tag 'x86_mm_for_6.6-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: x86/mm: Remove "INVPCID single" feature tracking x86/mm: Fix PAT bit missing from page protection modify mask
2 parents ca96b16 + 54e3d94 commit 9855922

File tree

4 files changed

+19
-21
lines changed

4 files changed

+19
-21
lines changed

arch/x86/include/asm/cpufeatures.h

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -198,7 +198,6 @@
198198
#define X86_FEATURE_CAT_L3 ( 7*32+ 4) /* Cache Allocation Technology L3 */
199199
#define X86_FEATURE_CAT_L2 ( 7*32+ 5) /* Cache Allocation Technology L2 */
200200
#define X86_FEATURE_CDP_L3 ( 7*32+ 6) /* Code and Data Prioritization L3 */
201-
#define X86_FEATURE_INVPCID_SINGLE ( 7*32+ 7) /* Effectively INVPCID && CR4.PCIDE=1 */
202201
#define X86_FEATURE_HW_PSTATE ( 7*32+ 8) /* AMD HW-PState */
203202
#define X86_FEATURE_PROC_FEEDBACK ( 7*32+ 9) /* AMD ProcFeedbackInterface */
204203
#define X86_FEATURE_XCOMPACTED ( 7*32+10) /* "" Use compacted XSTATE (XSAVES or XSAVEC) */

arch/x86/include/asm/pgtable_types.h

Lines changed: 6 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -125,11 +125,12 @@
125125
* instance, and is *not* included in this mask since
126126
* pte_modify() does modify it.
127127
*/
128-
#define _PAGE_CHG_MASK (PTE_PFN_MASK | _PAGE_PCD | _PAGE_PWT | \
129-
_PAGE_SPECIAL | _PAGE_ACCESSED | _PAGE_DIRTY | \
130-
_PAGE_SOFT_DIRTY | _PAGE_DEVMAP | _PAGE_ENC | \
131-
_PAGE_UFFD_WP)
132-
#define _HPAGE_CHG_MASK (_PAGE_CHG_MASK | _PAGE_PSE)
128+
#define _COMMON_PAGE_CHG_MASK (PTE_PFN_MASK | _PAGE_PCD | _PAGE_PWT | \
129+
_PAGE_SPECIAL | _PAGE_ACCESSED | _PAGE_DIRTY |\
130+
_PAGE_SOFT_DIRTY | _PAGE_DEVMAP | _PAGE_ENC | \
131+
_PAGE_UFFD_WP)
132+
#define _PAGE_CHG_MASK (_COMMON_PAGE_CHG_MASK | _PAGE_PAT)
133+
#define _HPAGE_CHG_MASK (_COMMON_PAGE_CHG_MASK | _PAGE_PSE | _PAGE_PAT_LARGE)
133134

134135
/*
135136
* The cache modes defined here are used to translate between pure SW usage

arch/x86/mm/init.c

Lines changed: 0 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -306,15 +306,6 @@ static void setup_pcid(void)
306306
* start_secondary().
307307
*/
308308
cr4_set_bits(X86_CR4_PCIDE);
309-
310-
/*
311-
* INVPCID's single-context modes (2/3) only work if we set
312-
* X86_CR4_PCIDE, *and* we INVPCID support. It's unusable
313-
* on systems that have X86_CR4_PCIDE clear, or that have
314-
* no INVPCID support at all.
315-
*/
316-
if (boot_cpu_has(X86_FEATURE_INVPCID))
317-
setup_force_cpu_cap(X86_FEATURE_INVPCID_SINGLE);
318309
} else {
319310
/*
320311
* flush_tlb_all(), as currently implemented, won't work if

arch/x86/mm/tlb.c

Lines changed: 13 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -1142,21 +1142,28 @@ void flush_tlb_one_kernel(unsigned long addr)
11421142
*/
11431143
STATIC_NOPV void native_flush_tlb_one_user(unsigned long addr)
11441144
{
1145-
u32 loaded_mm_asid = this_cpu_read(cpu_tlbstate.loaded_mm_asid);
1145+
u32 loaded_mm_asid;
1146+
bool cpu_pcide;
11461147

1148+
/* Flush 'addr' from the kernel PCID: */
11471149
asm volatile("invlpg (%0)" ::"r" (addr) : "memory");
11481150

1151+
/* If PTI is off there is no user PCID and nothing to flush. */
11491152
if (!static_cpu_has(X86_FEATURE_PTI))
11501153
return;
11511154

1155+
loaded_mm_asid = this_cpu_read(cpu_tlbstate.loaded_mm_asid);
1156+
cpu_pcide = this_cpu_read(cpu_tlbstate.cr4) & X86_CR4_PCIDE;
1157+
11521158
/*
1153-
* Some platforms #GP if we call invpcid(type=1/2) before CR4.PCIDE=1.
1154-
* Just use invalidate_user_asid() in case we are called early.
1159+
* invpcid_flush_one(pcid>0) will #GP if CR4.PCIDE==0. Check
1160+
* 'cpu_pcide' to ensure that *this* CPU will not trigger those
1161+
* #GP's even if called before CR4.PCIDE has been initialized.
11551162
*/
1156-
if (!this_cpu_has(X86_FEATURE_INVPCID_SINGLE))
1157-
invalidate_user_asid(loaded_mm_asid);
1158-
else
1163+
if (boot_cpu_has(X86_FEATURE_INVPCID) && cpu_pcide)
11591164
invpcid_flush_one(user_pcid(loaded_mm_asid), addr);
1165+
else
1166+
invalidate_user_asid(loaded_mm_asid);
11601167
}
11611168

11621169
void flush_tlb_one_user(unsigned long addr)

0 commit comments

Comments
 (0)