|
75 | 75 | #define X86_FEATURE_CENTAUR_MCR ( 3*32+ 3) /* "centaur_mcr" Centaur MCRs (= MTRRs) */
|
76 | 76 | #define X86_FEATURE_K8 ( 3*32+ 4) /* Opteron, Athlon64 */
|
77 | 77 | #define X86_FEATURE_ZEN5 ( 3*32+ 5) /* CPU based on Zen5 microarchitecture */
|
78 |
| -#define X86_FEATURE_P3 ( 3*32+ 6) /* P3 */ |
79 |
| -#define X86_FEATURE_P4 ( 3*32+ 7) /* P4 */ |
| 78 | +/* Free ( 3*32+ 6) */ |
| 79 | +/* Free ( 3*32+ 7) */ |
80 | 80 | #define X86_FEATURE_CONSTANT_TSC ( 3*32+ 8) /* "constant_tsc" TSC ticks at a constant rate */
|
81 | 81 | #define X86_FEATURE_UP ( 3*32+ 9) /* "up" SMP kernel running on UP */
|
82 | 82 | #define X86_FEATURE_ART ( 3*32+10) /* "art" Always running timer (ART) */
|
|
329 | 329 | #define X86_FEATURE_CLZERO (13*32+ 0) /* "clzero" CLZERO instruction */
|
330 | 330 | #define X86_FEATURE_IRPERF (13*32+ 1) /* "irperf" Instructions Retired Count */
|
331 | 331 | #define X86_FEATURE_XSAVEERPTR (13*32+ 2) /* "xsaveerptr" Always save/restore FP error pointers */
|
| 332 | +#define X86_FEATURE_INVLPGB (13*32+ 3) /* INVLPGB and TLBSYNC instructions supported */ |
332 | 333 | #define X86_FEATURE_RDPRU (13*32+ 4) /* "rdpru" Read processor register at user level */
|
333 | 334 | #define X86_FEATURE_WBNOINVD (13*32+ 9) /* "wbnoinvd" WBNOINVD instruction */
|
334 | 335 | #define X86_FEATURE_AMD_IBPB (13*32+12) /* Indirect Branch Prediction Barrier */
|
|
377 | 378 | #define X86_FEATURE_V_SPEC_CTRL (15*32+20) /* "v_spec_ctrl" Virtual SPEC_CTRL */
|
378 | 379 | #define X86_FEATURE_VNMI (15*32+25) /* "vnmi" Virtual NMI */
|
379 | 380 | #define X86_FEATURE_SVME_ADDR_CHK (15*32+28) /* SVME addr check */
|
| 381 | +#define X86_FEATURE_IDLE_HLT (15*32+30) /* IDLE HLT intercept */ |
380 | 382 |
|
381 | 383 | /* Intel-defined CPU features, CPUID level 0x00000007:0 (ECX), word 16 */
|
382 | 384 | #define X86_FEATURE_AVX512VBMI (16*32+ 1) /* "avx512vbmi" AVX512 Vector Bit Manipulation instructions*/
|
|
434 | 436 | #define X86_FEATURE_SPEC_CTRL_SSBD (18*32+31) /* Speculative Store Bypass Disable */
|
435 | 437 |
|
436 | 438 | /* AMD-defined memory encryption features, CPUID level 0x8000001f (EAX), word 19 */
|
437 |
| -#define X86_FEATURE_SME (19*32+ 0) /* "sme" AMD Secure Memory Encryption */ |
438 |
| -#define X86_FEATURE_SEV (19*32+ 1) /* "sev" AMD Secure Encrypted Virtualization */ |
| 439 | +#define X86_FEATURE_SME (19*32+ 0) /* "sme" Secure Memory Encryption */ |
| 440 | +#define X86_FEATURE_SEV (19*32+ 1) /* "sev" Secure Encrypted Virtualization */ |
439 | 441 | #define X86_FEATURE_VM_PAGE_FLUSH (19*32+ 2) /* VM Page Flush MSR is supported */
|
440 |
| -#define X86_FEATURE_SEV_ES (19*32+ 3) /* "sev_es" AMD Secure Encrypted Virtualization - Encrypted State */ |
441 |
| -#define X86_FEATURE_SEV_SNP (19*32+ 4) /* "sev_snp" AMD Secure Encrypted Virtualization - Secure Nested Paging */ |
| 442 | +#define X86_FEATURE_SEV_ES (19*32+ 3) /* "sev_es" Secure Encrypted Virtualization - Encrypted State */ |
| 443 | +#define X86_FEATURE_SEV_SNP (19*32+ 4) /* "sev_snp" Secure Encrypted Virtualization - Secure Nested Paging */ |
442 | 444 | #define X86_FEATURE_V_TSC_AUX (19*32+ 9) /* Virtual TSC_AUX */
|
443 |
| -#define X86_FEATURE_SME_COHERENT (19*32+10) /* AMD hardware-enforced cache coherency */ |
444 |
| -#define X86_FEATURE_DEBUG_SWAP (19*32+14) /* "debug_swap" AMD SEV-ES full debug state swap support */ |
| 445 | +#define X86_FEATURE_SME_COHERENT (19*32+10) /* hardware-enforced cache coherency */ |
| 446 | +#define X86_FEATURE_DEBUG_SWAP (19*32+14) /* "debug_swap" SEV-ES full debug state swap support */ |
| 447 | +#define X86_FEATURE_RMPREAD (19*32+21) /* RMPREAD instruction */ |
| 448 | +#define X86_FEATURE_SEGMENTED_RMP (19*32+23) /* Segmented RMP support */ |
445 | 449 | #define X86_FEATURE_SVSM (19*32+28) /* "svsm" SVSM present */
|
| 450 | +#define X86_FEATURE_HV_INUSE_WR_ALLOWED (19*32+30) /* Allow Write to in-use hypervisor-owned pages */ |
446 | 451 |
|
447 | 452 | /* AMD-defined Extended Feature 2 EAX, CPUID level 0x80000021 (EAX), word 20 */
|
448 | 453 | #define X86_FEATURE_NO_NESTED_DATA_BP (20*32+ 0) /* No Nested Data Breakpoints */
|
|
455 | 460 | #define X86_FEATURE_SBPB (20*32+27) /* Selective Branch Prediction Barrier */
|
456 | 461 | #define X86_FEATURE_IBPB_BRTYPE (20*32+28) /* MSR_PRED_CMD[IBPB] flushes all branch type predictions */
|
457 | 462 | #define X86_FEATURE_SRSO_NO (20*32+29) /* CPU is not affected by SRSO */
|
| 463 | +#define X86_FEATURE_SRSO_USER_KERNEL_NO (20*32+30) /* CPU is not affected by SRSO across user/kernel boundaries */ |
| 464 | +#define X86_FEATURE_SRSO_BP_SPEC_REDUCE (20*32+31) /* |
| 465 | + * BP_CFG[BpSpecReduce] can be used to mitigate SRSO for VMs. |
| 466 | + * (SRSO_MSR_FIX in the official doc). |
| 467 | + */ |
458 | 468 |
|
459 | 469 | /*
|
460 | 470 | * Extended auxiliary flags: Linux defined - for features scattered in various
|
|
470 | 480 | #define X86_FEATURE_AMD_FAST_CPPC (21*32 + 5) /* Fast CPPC */
|
471 | 481 | #define X86_FEATURE_AMD_HETEROGENEOUS_CORES (21*32 + 6) /* Heterogeneous Core Topology */
|
472 | 482 | #define X86_FEATURE_AMD_WORKLOAD_CLASS (21*32 + 7) /* Workload Classification */
|
| 483 | +#define X86_FEATURE_PREFER_YMM (21*32 + 8) /* Avoid ZMM registers due to downclocking */ |
473 | 484 |
|
474 | 485 | /*
|
475 | 486 | * BUG word(s)
|
|
521 | 532 | #define X86_BUG_RFDS X86_BUG(1*32 + 2) /* "rfds" CPU is vulnerable to Register File Data Sampling */
|
522 | 533 | #define X86_BUG_BHI X86_BUG(1*32 + 3) /* "bhi" CPU is affected by Branch History Injection */
|
523 | 534 | #define X86_BUG_IBPB_NO_RET X86_BUG(1*32 + 4) /* "ibpb_no_ret" IBPB omits return target predictions */
|
| 535 | +#define X86_BUG_SPECTRE_V2_USER X86_BUG(1*32 + 5) /* "spectre_v2_user" CPU is affected by Spectre variant 2 attack between user processes */ |
524 | 536 | #endif /* _ASM_X86_CPUFEATURES_H */
|
0 commit comments