Skip to content

Commit 14dcf78

Browse files
committed
Merge branch 'for-next/cpus_have_const_cap' into for-next/core
* for-next/cpus_have_const_cap: (38 commits) : cpus_have_const_cap() removal arm64: Remove cpus_have_const_cap() arm64: Avoid cpus_have_const_cap() for ARM64_WORKAROUND_REPEAT_TLBI arm64: Avoid cpus_have_const_cap() for ARM64_WORKAROUND_NVIDIA_CARMEL_CNP arm64: Avoid cpus_have_const_cap() for ARM64_WORKAROUND_CAVIUM_23154 arm64: Avoid cpus_have_const_cap() for ARM64_WORKAROUND_2645198 arm64: Avoid cpus_have_const_cap() for ARM64_WORKAROUND_1742098 arm64: Avoid cpus_have_const_cap() for ARM64_WORKAROUND_1542419 arm64: Avoid cpus_have_const_cap() for ARM64_WORKAROUND_843419 arm64: Avoid cpus_have_const_cap() for ARM64_UNMAP_KERNEL_AT_EL0 arm64: Avoid cpus_have_const_cap() for ARM64_{SVE,SME,SME2,FA64} arm64: Avoid cpus_have_const_cap() for ARM64_SPECTRE_V2 arm64: Avoid cpus_have_const_cap() for ARM64_SSBS arm64: Avoid cpus_have_const_cap() for ARM64_MTE arm64: Avoid cpus_have_const_cap() for ARM64_HAS_TLB_RANGE arm64: Avoid cpus_have_const_cap() for ARM64_HAS_WFXT arm64: Avoid cpus_have_const_cap() for ARM64_HAS_RNG arm64: Avoid cpus_have_const_cap() for ARM64_HAS_EPAN arm64: Avoid cpus_have_const_cap() for ARM64_HAS_PAN arm64: Avoid cpus_have_const_cap() for ARM64_HAS_GIC_PRIO_MASKING arm64: Avoid cpus_have_const_cap() for ARM64_HAS_DIT ...
2 parents 2baca17 + e8d4006 commit 14dcf78

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

50 files changed

+430
-287
lines changed

arch/arm/xen/enlighten.c

Lines changed: 16 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -164,9 +164,6 @@ static int xen_starting_cpu(unsigned int cpu)
164164
BUG_ON(err);
165165
per_cpu(xen_vcpu, cpu) = vcpup;
166166

167-
if (!xen_kernel_unmapped_at_usr())
168-
xen_setup_runstate_info(cpu);
169-
170167
after_register_vcpu_info:
171168
enable_percpu_irq(xen_events_irq, 0);
172169
return 0;
@@ -523,9 +520,6 @@ static int __init xen_guest_init(void)
523520
return -EINVAL;
524521
}
525522

526-
if (!xen_kernel_unmapped_at_usr())
527-
xen_time_setup_guest();
528-
529523
if (xen_initial_domain())
530524
pvclock_gtod_register_notifier(&xen_pvclock_gtod_notifier);
531525

@@ -535,7 +529,13 @@ static int __init xen_guest_init(void)
535529
}
536530
early_initcall(xen_guest_init);
537531

538-
static int __init xen_pm_init(void)
532+
static int xen_starting_runstate_cpu(unsigned int cpu)
533+
{
534+
xen_setup_runstate_info(cpu);
535+
return 0;
536+
}
537+
538+
static int __init xen_late_init(void)
539539
{
540540
if (!xen_domain())
541541
return -ENODEV;
@@ -548,9 +548,16 @@ static int __init xen_pm_init(void)
548548
do_settimeofday64(&ts);
549549
}
550550

551-
return 0;
551+
if (xen_kernel_unmapped_at_usr())
552+
return 0;
553+
554+
xen_time_setup_guest();
555+
556+
return cpuhp_setup_state(CPUHP_AP_ARM_XEN_RUNSTATE_STARTING,
557+
"arm/xen_runstate:starting",
558+
xen_starting_runstate_cpu, NULL);
552559
}
553-
late_initcall(xen_pm_init);
560+
late_initcall(xen_late_init);
554561

555562

556563
/* empty stubs */

arch/arm64/include/asm/Kbuild

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -6,5 +6,5 @@ generic-y += qspinlock.h
66
generic-y += parport.h
77
generic-y += user.h
88

9-
generated-y += cpucaps.h
9+
generated-y += cpucap-defs.h
1010
generated-y += sysreg-defs.h

arch/arm64/include/asm/alternative-macros.h

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -226,8 +226,8 @@ alternative_endif
226226
static __always_inline bool
227227
alternative_has_cap_likely(const unsigned long cpucap)
228228
{
229-
compiletime_assert(cpucap < ARM64_NCAPS,
230-
"cpucap must be < ARM64_NCAPS");
229+
if (!cpucap_is_possible(cpucap))
230+
return false;
231231

232232
asm_volatile_goto(
233233
ALTERNATIVE_CB("b %l[l_no]", %[cpucap], alt_cb_patch_nops)
@@ -244,8 +244,8 @@ alternative_has_cap_likely(const unsigned long cpucap)
244244
static __always_inline bool
245245
alternative_has_cap_unlikely(const unsigned long cpucap)
246246
{
247-
compiletime_assert(cpucap < ARM64_NCAPS,
248-
"cpucap must be < ARM64_NCAPS");
247+
if (!cpucap_is_possible(cpucap))
248+
return false;
249249

250250
asm_volatile_goto(
251251
ALTERNATIVE("nop", "b %l[l_yes]", %[cpucap])

arch/arm64/include/asm/arch_gicv3.h

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -79,6 +79,14 @@ static inline u64 gic_read_iar_cavium_thunderx(void)
7979
return 0x3ff;
8080
}
8181

82+
static u64 __maybe_unused gic_read_iar(void)
83+
{
84+
if (alternative_has_cap_unlikely(ARM64_WORKAROUND_CAVIUM_23154))
85+
return gic_read_iar_cavium_thunderx();
86+
else
87+
return gic_read_iar_common();
88+
}
89+
8290
static inline void gic_write_ctlr(u32 val)
8391
{
8492
write_sysreg_s(val, SYS_ICC_CTLR_EL1);

arch/arm64/include/asm/archrandom.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -63,7 +63,7 @@ static __always_inline bool __cpu_has_rng(void)
6363
{
6464
if (unlikely(!system_capabilities_finalized() && !preemptible()))
6565
return this_cpu_has_cap(ARM64_HAS_RNG);
66-
return cpus_have_const_cap(ARM64_HAS_RNG);
66+
return alternative_has_cap_unlikely(ARM64_HAS_RNG);
6767
}
6868

6969
static inline size_t __must_check arch_get_random_longs(unsigned long *v, size_t max_longs)

arch/arm64/include/asm/cacheflush.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -132,7 +132,7 @@ void flush_dcache_folio(struct folio *);
132132

133133
static __always_inline void icache_inval_all_pou(void)
134134
{
135-
if (cpus_have_const_cap(ARM64_HAS_CACHE_DIC))
135+
if (alternative_has_cap_unlikely(ARM64_HAS_CACHE_DIC))
136136
return;
137137

138138
asm("ic ialluis");

arch/arm64/include/asm/cpucaps.h

Lines changed: 67 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,67 @@
1+
/* SPDX-License-Identifier: GPL-2.0-only */
2+
3+
#ifndef __ASM_CPUCAPS_H
4+
#define __ASM_CPUCAPS_H
5+
6+
#include <asm/cpucap-defs.h>
7+
8+
#ifndef __ASSEMBLY__
9+
#include <linux/types.h>
10+
/*
11+
* Check whether a cpucap is possible at compiletime.
12+
*/
13+
static __always_inline bool
14+
cpucap_is_possible(const unsigned int cap)
15+
{
16+
compiletime_assert(__builtin_constant_p(cap),
17+
"cap must be a constant");
18+
compiletime_assert(cap < ARM64_NCAPS,
19+
"cap must be < ARM64_NCAPS");
20+
21+
switch (cap) {
22+
case ARM64_HAS_PAN:
23+
return IS_ENABLED(CONFIG_ARM64_PAN);
24+
case ARM64_HAS_EPAN:
25+
return IS_ENABLED(CONFIG_ARM64_EPAN);
26+
case ARM64_SVE:
27+
return IS_ENABLED(CONFIG_ARM64_SVE);
28+
case ARM64_SME:
29+
case ARM64_SME2:
30+
case ARM64_SME_FA64:
31+
return IS_ENABLED(CONFIG_ARM64_SME);
32+
case ARM64_HAS_CNP:
33+
return IS_ENABLED(CONFIG_ARM64_CNP);
34+
case ARM64_HAS_ADDRESS_AUTH:
35+
case ARM64_HAS_GENERIC_AUTH:
36+
return IS_ENABLED(CONFIG_ARM64_PTR_AUTH);
37+
case ARM64_HAS_GIC_PRIO_MASKING:
38+
return IS_ENABLED(CONFIG_ARM64_PSEUDO_NMI);
39+
case ARM64_MTE:
40+
return IS_ENABLED(CONFIG_ARM64_MTE);
41+
case ARM64_BTI:
42+
return IS_ENABLED(CONFIG_ARM64_BTI);
43+
case ARM64_HAS_TLB_RANGE:
44+
return IS_ENABLED(CONFIG_ARM64_TLB_RANGE);
45+
case ARM64_UNMAP_KERNEL_AT_EL0:
46+
return IS_ENABLED(CONFIG_UNMAP_KERNEL_AT_EL0);
47+
case ARM64_WORKAROUND_843419:
48+
return IS_ENABLED(CONFIG_ARM64_ERRATUM_843419);
49+
case ARM64_WORKAROUND_1742098:
50+
return IS_ENABLED(CONFIG_ARM64_ERRATUM_1742098);
51+
case ARM64_WORKAROUND_2645198:
52+
return IS_ENABLED(CONFIG_ARM64_ERRATUM_2645198);
53+
case ARM64_WORKAROUND_2658417:
54+
return IS_ENABLED(CONFIG_ARM64_ERRATUM_2658417);
55+
case ARM64_WORKAROUND_CAVIUM_23154:
56+
return IS_ENABLED(CONFIG_CAVIUM_ERRATUM_23154);
57+
case ARM64_WORKAROUND_NVIDIA_CARMEL_CNP:
58+
return IS_ENABLED(CONFIG_NVIDIA_CARMEL_CNP_ERRATUM);
59+
case ARM64_WORKAROUND_REPEAT_TLBI:
60+
return IS_ENABLED(CONFIG_ARM64_WORKAROUND_REPEAT_TLBI);
61+
}
62+
63+
return true;
64+
}
65+
#endif /* __ASSEMBLY__ */
66+
67+
#endif /* __ASM_CPUCAPS_H */

arch/arm64/include/asm/cpufeature.h

Lines changed: 41 additions & 55 deletions
Original file line numberDiff line numberDiff line change
@@ -440,6 +440,11 @@ unsigned long cpu_get_elf_hwcap2(void);
440440
#define cpu_set_named_feature(name) cpu_set_feature(cpu_feature(name))
441441
#define cpu_have_named_feature(name) cpu_have_feature(cpu_feature(name))
442442

443+
static __always_inline bool boot_capabilities_finalized(void)
444+
{
445+
return alternative_has_cap_likely(ARM64_ALWAYS_BOOT);
446+
}
447+
443448
static __always_inline bool system_capabilities_finalized(void)
444449
{
445450
return alternative_has_cap_likely(ARM64_ALWAYS_SYSTEM);
@@ -452,6 +457,8 @@ static __always_inline bool system_capabilities_finalized(void)
452457
*/
453458
static __always_inline bool cpus_have_cap(unsigned int num)
454459
{
460+
if (__builtin_constant_p(num) && !cpucap_is_possible(num))
461+
return false;
455462
if (num >= ARM64_NCAPS)
456463
return false;
457464
return arch_test_bit(num, system_cpucaps);
@@ -460,55 +467,37 @@ static __always_inline bool cpus_have_cap(unsigned int num)
460467
/*
461468
* Test for a capability without a runtime check.
462469
*
463-
* Before capabilities are finalized, this returns false.
464-
* After capabilities are finalized, this is patched to avoid a runtime check.
470+
* Before boot capabilities are finalized, this will BUG().
471+
* After boot capabilities are finalized, this is patched to avoid a runtime
472+
* check.
465473
*
466474
* @num must be a compile-time constant.
467475
*/
468-
static __always_inline bool __cpus_have_const_cap(int num)
476+
static __always_inline bool cpus_have_final_boot_cap(int num)
469477
{
470-
if (num >= ARM64_NCAPS)
471-
return false;
472-
return alternative_has_cap_unlikely(num);
478+
if (boot_capabilities_finalized())
479+
return alternative_has_cap_unlikely(num);
480+
else
481+
BUG();
473482
}
474483

475484
/*
476485
* Test for a capability without a runtime check.
477486
*
478-
* Before capabilities are finalized, this will BUG().
479-
* After capabilities are finalized, this is patched to avoid a runtime check.
487+
* Before system capabilities are finalized, this will BUG().
488+
* After system capabilities are finalized, this is patched to avoid a runtime
489+
* check.
480490
*
481491
* @num must be a compile-time constant.
482492
*/
483493
static __always_inline bool cpus_have_final_cap(int num)
484494
{
485495
if (system_capabilities_finalized())
486-
return __cpus_have_const_cap(num);
496+
return alternative_has_cap_unlikely(num);
487497
else
488498
BUG();
489499
}
490500

491-
/*
492-
* Test for a capability, possibly with a runtime check for non-hyp code.
493-
*
494-
* For hyp code, this behaves the same as cpus_have_final_cap().
495-
*
496-
* For non-hyp code:
497-
* Before capabilities are finalized, this behaves as cpus_have_cap().
498-
* After capabilities are finalized, this is patched to avoid a runtime check.
499-
*
500-
* @num must be a compile-time constant.
501-
*/
502-
static __always_inline bool cpus_have_const_cap(int num)
503-
{
504-
if (is_hyp_code())
505-
return cpus_have_final_cap(num);
506-
else if (system_capabilities_finalized())
507-
return __cpus_have_const_cap(num);
508-
else
509-
return cpus_have_cap(num);
510-
}
511-
512501
static inline int __attribute_const__
513502
cpuid_feature_extract_signed_field_width(u64 features, int field, int width)
514503
{
@@ -628,7 +617,9 @@ static inline bool id_aa64pfr1_mte(u64 pfr1)
628617
return val >= ID_AA64PFR1_EL1_MTE_MTE2;
629618
}
630619

631-
void __init setup_cpu_features(void);
620+
void __init setup_system_features(void);
621+
void __init setup_user_features(void);
622+
632623
void check_local_cpu_capabilities(void);
633624

634625
u64 read_sanitised_ftr_reg(u32 id);
@@ -737,13 +728,12 @@ static inline bool system_supports_mixed_endian(void)
737728

738729
static __always_inline bool system_supports_fpsimd(void)
739730
{
740-
return !cpus_have_const_cap(ARM64_HAS_NO_FPSIMD);
731+
return alternative_has_cap_likely(ARM64_HAS_FPSIMD);
741732
}
742733

743734
static inline bool system_uses_hw_pan(void)
744735
{
745-
return IS_ENABLED(CONFIG_ARM64_PAN) &&
746-
cpus_have_const_cap(ARM64_HAS_PAN);
736+
return alternative_has_cap_unlikely(ARM64_HAS_PAN);
747737
}
748738

749739
static inline bool system_uses_ttbr0_pan(void)
@@ -754,26 +744,22 @@ static inline bool system_uses_ttbr0_pan(void)
754744

755745
static __always_inline bool system_supports_sve(void)
756746
{
757-
return IS_ENABLED(CONFIG_ARM64_SVE) &&
758-
cpus_have_const_cap(ARM64_SVE);
747+
return alternative_has_cap_unlikely(ARM64_SVE);
759748
}
760749

761750
static __always_inline bool system_supports_sme(void)
762751
{
763-
return IS_ENABLED(CONFIG_ARM64_SME) &&
764-
cpus_have_const_cap(ARM64_SME);
752+
return alternative_has_cap_unlikely(ARM64_SME);
765753
}
766754

767755
static __always_inline bool system_supports_sme2(void)
768756
{
769-
return IS_ENABLED(CONFIG_ARM64_SME) &&
770-
cpus_have_const_cap(ARM64_SME2);
757+
return alternative_has_cap_unlikely(ARM64_SME2);
771758
}
772759

773760
static __always_inline bool system_supports_fa64(void)
774761
{
775-
return IS_ENABLED(CONFIG_ARM64_SME) &&
776-
cpus_have_const_cap(ARM64_SME_FA64);
762+
return alternative_has_cap_unlikely(ARM64_SME_FA64);
777763
}
778764

779765
static __always_inline bool system_supports_tpidr2(void)
@@ -783,20 +769,17 @@ static __always_inline bool system_supports_tpidr2(void)
783769

784770
static __always_inline bool system_supports_cnp(void)
785771
{
786-
return IS_ENABLED(CONFIG_ARM64_CNP) &&
787-
cpus_have_const_cap(ARM64_HAS_CNP);
772+
return alternative_has_cap_unlikely(ARM64_HAS_CNP);
788773
}
789774

790775
static inline bool system_supports_address_auth(void)
791776
{
792-
return IS_ENABLED(CONFIG_ARM64_PTR_AUTH) &&
793-
cpus_have_const_cap(ARM64_HAS_ADDRESS_AUTH);
777+
return cpus_have_final_boot_cap(ARM64_HAS_ADDRESS_AUTH);
794778
}
795779

796780
static inline bool system_supports_generic_auth(void)
797781
{
798-
return IS_ENABLED(CONFIG_ARM64_PTR_AUTH) &&
799-
cpus_have_const_cap(ARM64_HAS_GENERIC_AUTH);
782+
return alternative_has_cap_unlikely(ARM64_HAS_GENERIC_AUTH);
800783
}
801784

802785
static inline bool system_has_full_ptr_auth(void)
@@ -806,14 +789,12 @@ static inline bool system_has_full_ptr_auth(void)
806789

807790
static __always_inline bool system_uses_irq_prio_masking(void)
808791
{
809-
return IS_ENABLED(CONFIG_ARM64_PSEUDO_NMI) &&
810-
cpus_have_const_cap(ARM64_HAS_GIC_PRIO_MASKING);
792+
return alternative_has_cap_unlikely(ARM64_HAS_GIC_PRIO_MASKING);
811793
}
812794

813795
static inline bool system_supports_mte(void)
814796
{
815-
return IS_ENABLED(CONFIG_ARM64_MTE) &&
816-
cpus_have_const_cap(ARM64_MTE);
797+
return alternative_has_cap_unlikely(ARM64_MTE);
817798
}
818799

819800
static inline bool system_has_prio_mask_debugging(void)
@@ -824,13 +805,18 @@ static inline bool system_has_prio_mask_debugging(void)
824805

825806
static inline bool system_supports_bti(void)
826807
{
827-
return IS_ENABLED(CONFIG_ARM64_BTI) && cpus_have_const_cap(ARM64_BTI);
808+
return cpus_have_final_cap(ARM64_BTI);
809+
}
810+
811+
static inline bool system_supports_bti_kernel(void)
812+
{
813+
return IS_ENABLED(CONFIG_ARM64_BTI_KERNEL) &&
814+
cpus_have_final_boot_cap(ARM64_BTI);
828815
}
829816

830817
static inline bool system_supports_tlb_range(void)
831818
{
832-
return IS_ENABLED(CONFIG_ARM64_TLB_RANGE) &&
833-
cpus_have_const_cap(ARM64_HAS_TLB_RANGE);
819+
return alternative_has_cap_unlikely(ARM64_HAS_TLB_RANGE);
834820
}
835821

836822
int do_emulate_mrs(struct pt_regs *regs, u32 sys_reg, u32 rt);

0 commit comments

Comments
 (0)