File tree Expand file tree Collapse file tree 4 files changed +19
-21
lines changed Expand file tree Collapse file tree 4 files changed +19
-21
lines changed Original file line number Diff line number Diff line change 198
198
#define X86_FEATURE_CAT_L3 ( 7*32+ 4) /* Cache Allocation Technology L3 */
199
199
#define X86_FEATURE_CAT_L2 ( 7*32+ 5) /* Cache Allocation Technology L2 */
200
200
#define X86_FEATURE_CDP_L3 ( 7*32+ 6) /* Code and Data Prioritization L3 */
201
- #define X86_FEATURE_INVPCID_SINGLE ( 7*32+ 7) /* Effectively INVPCID && CR4.PCIDE=1 */
202
201
#define X86_FEATURE_HW_PSTATE ( 7*32+ 8) /* AMD HW-PState */
203
202
#define X86_FEATURE_PROC_FEEDBACK ( 7*32+ 9) /* AMD ProcFeedbackInterface */
204
203
#define X86_FEATURE_XCOMPACTED ( 7*32+10) /* "" Use compacted XSTATE (XSAVES or XSAVEC) */
Original file line number Diff line number Diff line change 125
125
* instance, and is *not* included in this mask since
126
126
* pte_modify() does modify it.
127
127
*/
128
- #define _PAGE_CHG_MASK (PTE_PFN_MASK | _PAGE_PCD | _PAGE_PWT | \
129
- _PAGE_SPECIAL | _PAGE_ACCESSED | _PAGE_DIRTY | \
130
- _PAGE_SOFT_DIRTY | _PAGE_DEVMAP | _PAGE_ENC | \
131
- _PAGE_UFFD_WP)
132
- #define _HPAGE_CHG_MASK (_PAGE_CHG_MASK | _PAGE_PSE)
128
+ #define _COMMON_PAGE_CHG_MASK (PTE_PFN_MASK | _PAGE_PCD | _PAGE_PWT | \
129
+ _PAGE_SPECIAL | _PAGE_ACCESSED | _PAGE_DIRTY |\
130
+ _PAGE_SOFT_DIRTY | _PAGE_DEVMAP | _PAGE_ENC | \
131
+ _PAGE_UFFD_WP)
132
+ #define _PAGE_CHG_MASK (_COMMON_PAGE_CHG_MASK | _PAGE_PAT)
133
+ #define _HPAGE_CHG_MASK (_COMMON_PAGE_CHG_MASK | _PAGE_PSE | _PAGE_PAT_LARGE)
133
134
134
135
/*
135
136
* The cache modes defined here are used to translate between pure SW usage
Original file line number Diff line number Diff line change @@ -306,15 +306,6 @@ static void setup_pcid(void)
306
306
* start_secondary().
307
307
*/
308
308
cr4_set_bits (X86_CR4_PCIDE );
309
-
310
- /*
311
- * INVPCID's single-context modes (2/3) only work if we set
312
- * X86_CR4_PCIDE, *and* we INVPCID support. It's unusable
313
- * on systems that have X86_CR4_PCIDE clear, or that have
314
- * no INVPCID support at all.
315
- */
316
- if (boot_cpu_has (X86_FEATURE_INVPCID ))
317
- setup_force_cpu_cap (X86_FEATURE_INVPCID_SINGLE );
318
309
} else {
319
310
/*
320
311
* flush_tlb_all(), as currently implemented, won't work if
Original file line number Diff line number Diff line change @@ -1142,21 +1142,28 @@ void flush_tlb_one_kernel(unsigned long addr)
1142
1142
*/
1143
1143
STATIC_NOPV void native_flush_tlb_one_user (unsigned long addr )
1144
1144
{
1145
- u32 loaded_mm_asid = this_cpu_read (cpu_tlbstate .loaded_mm_asid );
1145
+ u32 loaded_mm_asid ;
1146
+ bool cpu_pcide ;
1146
1147
1148
+ /* Flush 'addr' from the kernel PCID: */
1147
1149
asm volatile ("invlpg (%0)" ::"r" (addr ) : "memory" );
1148
1150
1151
+ /* If PTI is off there is no user PCID and nothing to flush. */
1149
1152
if (!static_cpu_has (X86_FEATURE_PTI ))
1150
1153
return ;
1151
1154
1155
+ loaded_mm_asid = this_cpu_read (cpu_tlbstate .loaded_mm_asid );
1156
+ cpu_pcide = this_cpu_read (cpu_tlbstate .cr4 ) & X86_CR4_PCIDE ;
1157
+
1152
1158
/*
1153
- * Some platforms #GP if we call invpcid(type=1/2) before CR4.PCIDE=1.
1154
- * Just use invalidate_user_asid() in case we are called early.
1159
+ * invpcid_flush_one(pcid>0) will #GP if CR4.PCIDE==0. Check
1160
+ * 'cpu_pcide' to ensure that *this* CPU will not trigger those
1161
+ * #GP's even if called before CR4.PCIDE has been initialized.
1155
1162
*/
1156
- if (!this_cpu_has (X86_FEATURE_INVPCID_SINGLE ))
1157
- invalidate_user_asid (loaded_mm_asid );
1158
- else
1163
+ if (boot_cpu_has (X86_FEATURE_INVPCID ) && cpu_pcide )
1159
1164
invpcid_flush_one (user_pcid (loaded_mm_asid ), addr );
1165
+ else
1166
+ invalidate_user_asid (loaded_mm_asid );
1160
1167
}
1161
1168
1162
1169
void flush_tlb_one_user (unsigned long addr )
You can’t perform that action at this time.
0 commit comments