@@ -440,6 +440,11 @@ unsigned long cpu_get_elf_hwcap2(void);
440
440
#define cpu_set_named_feature (name ) cpu_set_feature(cpu_feature(name))
441
441
#define cpu_have_named_feature (name ) cpu_have_feature(cpu_feature(name))
442
442
443
+ static __always_inline bool boot_capabilities_finalized (void )
444
+ {
445
+ return alternative_has_cap_likely (ARM64_ALWAYS_BOOT );
446
+ }
447
+
443
448
static __always_inline bool system_capabilities_finalized (void )
444
449
{
445
450
return alternative_has_cap_likely (ARM64_ALWAYS_SYSTEM );
@@ -452,6 +457,8 @@ static __always_inline bool system_capabilities_finalized(void)
452
457
*/
453
458
static __always_inline bool cpus_have_cap (unsigned int num )
454
459
{
460
+ if (__builtin_constant_p (num ) && !cpucap_is_possible (num ))
461
+ return false;
455
462
if (num >= ARM64_NCAPS )
456
463
return false;
457
464
return arch_test_bit (num , system_cpucaps );
@@ -460,55 +467,37 @@ static __always_inline bool cpus_have_cap(unsigned int num)
460
467
/*
461
468
* Test for a capability without a runtime check.
462
469
*
463
- * Before capabilities are finalized, this returns false.
464
- * After capabilities are finalized, this is patched to avoid a runtime check.
470
+ * Before boot capabilities are finalized, this will BUG().
471
+ * After boot capabilities are finalized, this is patched to avoid a runtime
472
+ * check.
465
473
*
466
474
* @num must be a compile-time constant.
467
475
*/
468
- static __always_inline bool __cpus_have_const_cap (int num )
476
+ static __always_inline bool cpus_have_final_boot_cap (int num )
469
477
{
470
- if (num >= ARM64_NCAPS )
471
- return false;
472
- return alternative_has_cap_unlikely (num );
478
+ if (boot_capabilities_finalized ())
479
+ return alternative_has_cap_unlikely (num );
480
+ else
481
+ BUG ();
473
482
}
474
483
475
484
/*
476
485
* Test for a capability without a runtime check.
477
486
*
478
- * Before capabilities are finalized, this will BUG().
479
- * After capabilities are finalized, this is patched to avoid a runtime check.
487
+ * Before system capabilities are finalized, this will BUG().
488
+ * After system capabilities are finalized, this is patched to avoid a runtime
489
+ * check.
480
490
*
481
491
* @num must be a compile-time constant.
482
492
*/
483
493
static __always_inline bool cpus_have_final_cap (int num )
484
494
{
485
495
if (system_capabilities_finalized ())
486
- return __cpus_have_const_cap (num );
496
+ return alternative_has_cap_unlikely (num );
487
497
else
488
498
BUG ();
489
499
}
490
500
491
- /*
492
- * Test for a capability, possibly with a runtime check for non-hyp code.
493
- *
494
- * For hyp code, this behaves the same as cpus_have_final_cap().
495
- *
496
- * For non-hyp code:
497
- * Before capabilities are finalized, this behaves as cpus_have_cap().
498
- * After capabilities are finalized, this is patched to avoid a runtime check.
499
- *
500
- * @num must be a compile-time constant.
501
- */
502
- static __always_inline bool cpus_have_const_cap (int num )
503
- {
504
- if (is_hyp_code ())
505
- return cpus_have_final_cap (num );
506
- else if (system_capabilities_finalized ())
507
- return __cpus_have_const_cap (num );
508
- else
509
- return cpus_have_cap (num );
510
- }
511
-
512
501
static inline int __attribute_const__
513
502
cpuid_feature_extract_signed_field_width (u64 features , int field , int width )
514
503
{
@@ -628,7 +617,9 @@ static inline bool id_aa64pfr1_mte(u64 pfr1)
628
617
return val >= ID_AA64PFR1_EL1_MTE_MTE2 ;
629
618
}
630
619
631
- void __init setup_cpu_features (void );
620
+ void __init setup_system_features (void );
621
+ void __init setup_user_features (void );
622
+
632
623
void check_local_cpu_capabilities (void );
633
624
634
625
u64 read_sanitised_ftr_reg (u32 id );
@@ -737,13 +728,12 @@ static inline bool system_supports_mixed_endian(void)
737
728
738
729
static __always_inline bool system_supports_fpsimd (void )
739
730
{
740
- return ! cpus_have_const_cap ( ARM64_HAS_NO_FPSIMD );
731
+ return alternative_has_cap_likely ( ARM64_HAS_FPSIMD );
741
732
}
742
733
743
734
static inline bool system_uses_hw_pan (void )
744
735
{
745
- return IS_ENABLED (CONFIG_ARM64_PAN ) &&
746
- cpus_have_const_cap (ARM64_HAS_PAN );
736
+ return alternative_has_cap_unlikely (ARM64_HAS_PAN );
747
737
}
748
738
749
739
static inline bool system_uses_ttbr0_pan (void )
@@ -754,26 +744,22 @@ static inline bool system_uses_ttbr0_pan(void)
754
744
755
745
static __always_inline bool system_supports_sve (void )
756
746
{
757
- return IS_ENABLED (CONFIG_ARM64_SVE ) &&
758
- cpus_have_const_cap (ARM64_SVE );
747
+ return alternative_has_cap_unlikely (ARM64_SVE );
759
748
}
760
749
761
750
static __always_inline bool system_supports_sme (void )
762
751
{
763
- return IS_ENABLED (CONFIG_ARM64_SME ) &&
764
- cpus_have_const_cap (ARM64_SME );
752
+ return alternative_has_cap_unlikely (ARM64_SME );
765
753
}
766
754
767
755
static __always_inline bool system_supports_sme2 (void )
768
756
{
769
- return IS_ENABLED (CONFIG_ARM64_SME ) &&
770
- cpus_have_const_cap (ARM64_SME2 );
757
+ return alternative_has_cap_unlikely (ARM64_SME2 );
771
758
}
772
759
773
760
static __always_inline bool system_supports_fa64 (void )
774
761
{
775
- return IS_ENABLED (CONFIG_ARM64_SME ) &&
776
- cpus_have_const_cap (ARM64_SME_FA64 );
762
+ return alternative_has_cap_unlikely (ARM64_SME_FA64 );
777
763
}
778
764
779
765
static __always_inline bool system_supports_tpidr2 (void )
@@ -783,20 +769,17 @@ static __always_inline bool system_supports_tpidr2(void)
783
769
784
770
static __always_inline bool system_supports_cnp (void )
785
771
{
786
- return IS_ENABLED (CONFIG_ARM64_CNP ) &&
787
- cpus_have_const_cap (ARM64_HAS_CNP );
772
+ return alternative_has_cap_unlikely (ARM64_HAS_CNP );
788
773
}
789
774
790
775
static inline bool system_supports_address_auth (void )
791
776
{
792
- return IS_ENABLED (CONFIG_ARM64_PTR_AUTH ) &&
793
- cpus_have_const_cap (ARM64_HAS_ADDRESS_AUTH );
777
+ return cpus_have_final_boot_cap (ARM64_HAS_ADDRESS_AUTH );
794
778
}
795
779
796
780
static inline bool system_supports_generic_auth (void )
797
781
{
798
- return IS_ENABLED (CONFIG_ARM64_PTR_AUTH ) &&
799
- cpus_have_const_cap (ARM64_HAS_GENERIC_AUTH );
782
+ return alternative_has_cap_unlikely (ARM64_HAS_GENERIC_AUTH );
800
783
}
801
784
802
785
static inline bool system_has_full_ptr_auth (void )
@@ -806,14 +789,12 @@ static inline bool system_has_full_ptr_auth(void)
806
789
807
790
static __always_inline bool system_uses_irq_prio_masking (void )
808
791
{
809
- return IS_ENABLED (CONFIG_ARM64_PSEUDO_NMI ) &&
810
- cpus_have_const_cap (ARM64_HAS_GIC_PRIO_MASKING );
792
+ return alternative_has_cap_unlikely (ARM64_HAS_GIC_PRIO_MASKING );
811
793
}
812
794
813
795
static inline bool system_supports_mte (void )
814
796
{
815
- return IS_ENABLED (CONFIG_ARM64_MTE ) &&
816
- cpus_have_const_cap (ARM64_MTE );
797
+ return alternative_has_cap_unlikely (ARM64_MTE );
817
798
}
818
799
819
800
static inline bool system_has_prio_mask_debugging (void )
@@ -824,13 +805,18 @@ static inline bool system_has_prio_mask_debugging(void)
824
805
825
806
static inline bool system_supports_bti (void )
826
807
{
827
- return IS_ENABLED (CONFIG_ARM64_BTI ) && cpus_have_const_cap (ARM64_BTI );
808
+ return cpus_have_final_cap (ARM64_BTI );
809
+ }
810
+
811
+ static inline bool system_supports_bti_kernel (void )
812
+ {
813
+ return IS_ENABLED (CONFIG_ARM64_BTI_KERNEL ) &&
814
+ cpus_have_final_boot_cap (ARM64_BTI );
828
815
}
829
816
830
817
static inline bool system_supports_tlb_range (void )
831
818
{
832
- return IS_ENABLED (CONFIG_ARM64_TLB_RANGE ) &&
833
- cpus_have_const_cap (ARM64_HAS_TLB_RANGE );
819
+ return alternative_has_cap_unlikely (ARM64_HAS_TLB_RANGE );
834
820
}
835
821
836
822
int do_emulate_mrs (struct pt_regs * regs , u32 sys_reg , u32 rt );
0 commit comments