8
8
9
9
#include <linux/acpi.h>
10
10
#include <linux/bitmap.h>
11
+ #include <linux/cpu.h>
11
12
#include <linux/cpuhotplug.h>
12
13
#include <linux/ctype.h>
14
+ #include <linux/jump_label.h>
13
15
#include <linux/log2.h>
14
16
#include <linux/memory.h>
15
17
#include <linux/module.h>
@@ -44,6 +46,8 @@ struct riscv_isainfo hart_isa[NR_CPUS];
44
46
/* Performance information */
45
47
DEFINE_PER_CPU (long , misaligned_access_speed );
46
48
49
+ static cpumask_t fast_misaligned_access ;
50
+
47
51
/**
48
52
* riscv_isa_extension_base() - Get base extension word
49
53
*
@@ -643,6 +647,16 @@ static int check_unaligned_access(void *param)
643
647
(speed == RISCV_HWPROBE_MISALIGNED_FAST ) ? "fast" : "slow" );
644
648
645
649
per_cpu (misaligned_access_speed , cpu ) = speed ;
650
+
651
+ /*
652
+ * Set the value of fast_misaligned_access of a CPU. These operations
653
+ * are atomic to avoid race conditions.
654
+ */
655
+ if (speed == RISCV_HWPROBE_MISALIGNED_FAST )
656
+ cpumask_set_cpu (cpu , & fast_misaligned_access );
657
+ else
658
+ cpumask_clear_cpu (cpu , & fast_misaligned_access );
659
+
646
660
return 0 ;
647
661
}
648
662
@@ -655,13 +669,69 @@ static void check_unaligned_access_nonboot_cpu(void *param)
655
669
check_unaligned_access (pages [cpu ]);
656
670
}
657
671
672
+ DEFINE_STATIC_KEY_FALSE (fast_misaligned_access_speed_key );
673
+
674
+ static void modify_unaligned_access_branches (cpumask_t * mask , int weight )
675
+ {
676
+ if (cpumask_weight (mask ) == weight )
677
+ static_branch_enable_cpuslocked (& fast_misaligned_access_speed_key );
678
+ else
679
+ static_branch_disable_cpuslocked (& fast_misaligned_access_speed_key );
680
+ }
681
+
682
+ static void set_unaligned_access_static_branches_except_cpu (int cpu )
683
+ {
684
+ /*
685
+ * Same as set_unaligned_access_static_branches, except excludes the
686
+ * given CPU from the result. When a CPU is hotplugged into an offline
687
+ * state, this function is called before the CPU is set to offline in
688
+ * the cpumask, and thus the CPU needs to be explicitly excluded.
689
+ */
690
+
691
+ cpumask_t fast_except_me ;
692
+
693
+ cpumask_and (& fast_except_me , & fast_misaligned_access , cpu_online_mask );
694
+ cpumask_clear_cpu (cpu , & fast_except_me );
695
+
696
+ modify_unaligned_access_branches (& fast_except_me , num_online_cpus () - 1 );
697
+ }
698
+
699
+ static void set_unaligned_access_static_branches (void )
700
+ {
701
+ /*
702
+ * This will be called after check_unaligned_access_all_cpus so the
703
+ * result of unaligned access speed for all CPUs will be available.
704
+ *
705
+ * To avoid the number of online cpus changing between reading
706
+ * cpu_online_mask and calling num_online_cpus, cpus_read_lock must be
707
+ * held before calling this function.
708
+ */
709
+
710
+ cpumask_t fast_and_online ;
711
+
712
+ cpumask_and (& fast_and_online , & fast_misaligned_access , cpu_online_mask );
713
+
714
+ modify_unaligned_access_branches (& fast_and_online , num_online_cpus ());
715
+ }
716
+
717
+ static int lock_and_set_unaligned_access_static_branch (void )
718
+ {
719
+ cpus_read_lock ();
720
+ set_unaligned_access_static_branches ();
721
+ cpus_read_unlock ();
722
+
723
+ return 0 ;
724
+ }
725
+
726
+ arch_initcall_sync (lock_and_set_unaligned_access_static_branch );
727
+
658
728
static int riscv_online_cpu (unsigned int cpu )
659
729
{
660
730
static struct page * buf ;
661
731
662
732
/* We are already set since the last check */
663
733
if (per_cpu (misaligned_access_speed , cpu ) != RISCV_HWPROBE_MISALIGNED_UNKNOWN )
664
- return 0 ;
734
+ goto exit ;
665
735
666
736
buf = alloc_pages (GFP_KERNEL , MISALIGNED_BUFFER_ORDER );
667
737
if (!buf ) {
@@ -671,6 +741,17 @@ static int riscv_online_cpu(unsigned int cpu)
671
741
672
742
check_unaligned_access (buf );
673
743
__free_pages (buf , MISALIGNED_BUFFER_ORDER );
744
+
745
+ exit :
746
+ set_unaligned_access_static_branches ();
747
+
748
+ return 0 ;
749
+ }
750
+
751
+ static int riscv_offline_cpu (unsigned int cpu )
752
+ {
753
+ set_unaligned_access_static_branches_except_cpu (cpu );
754
+
674
755
return 0 ;
675
756
}
676
757
@@ -705,9 +786,12 @@ static int check_unaligned_access_all_cpus(void)
705
786
/* Check core 0. */
706
787
smp_call_on_cpu (0 , check_unaligned_access , bufs [0 ], true);
707
788
708
- /* Setup hotplug callback for any new CPUs that come online. */
789
+ /*
790
+ * Setup hotplug callbacks for any new CPUs that come online or go
791
+ * offline.
792
+ */
709
793
cpuhp_setup_state_nocalls (CPUHP_AP_ONLINE_DYN , "riscv:online" ,
710
- riscv_online_cpu , NULL );
794
+ riscv_online_cpu , riscv_offline_cpu );
711
795
712
796
out :
713
797
unaligned_emulation_finish ();
0 commit comments