@@ -25,6 +25,9 @@ static DEFINE_MUTEX(em_pd_mutex);
25
25
26
26
static void em_cpufreq_update_efficiencies (struct device * dev ,
27
27
struct em_perf_state * table );
28
+ static void em_check_capacity_update (void );
29
+ static void em_update_workfn (struct work_struct * work );
30
+ static DECLARE_DELAYED_WORK (em_update_work , em_update_workfn ) ;
28
31
29
32
static bool _is_cpu_device (struct device * dev )
30
33
{
@@ -583,6 +586,10 @@ int em_dev_register_perf_domain(struct device *dev, unsigned int nr_states,
583
586
584
587
unlock :
585
588
mutex_unlock (& em_pd_mutex );
589
+
590
+ if (_is_cpu_device (dev ))
591
+ em_check_capacity_update ();
592
+
586
593
return ret ;
587
594
}
588
595
EXPORT_SYMBOL_GPL (em_dev_register_perf_domain );
@@ -618,3 +625,120 @@ void em_dev_unregister_perf_domain(struct device *dev)
618
625
mutex_unlock (& em_pd_mutex );
619
626
}
620
627
EXPORT_SYMBOL_GPL (em_dev_unregister_perf_domain );
628
+
629
+ /*
630
+ * Adjustment of CPU performance values after boot, when all CPUs capacites
631
+ * are correctly calculated.
632
+ */
633
+ static void em_adjust_new_capacity (struct device * dev ,
634
+ struct em_perf_domain * pd ,
635
+ u64 max_cap )
636
+ {
637
+ struct em_perf_table __rcu * em_table ;
638
+ struct em_perf_state * ps , * new_ps ;
639
+ int ret , ps_size ;
640
+
641
+ em_table = em_table_alloc (pd );
642
+ if (!em_table ) {
643
+ dev_warn (dev , "EM: allocation failed\n" );
644
+ return ;
645
+ }
646
+
647
+ new_ps = em_table -> state ;
648
+
649
+ rcu_read_lock ();
650
+ ps = em_perf_state_from_pd (pd );
651
+ /* Initialize data based on old table */
652
+ ps_size = sizeof (struct em_perf_state ) * pd -> nr_perf_states ;
653
+ memcpy (new_ps , ps , ps_size );
654
+
655
+ rcu_read_unlock ();
656
+
657
+ em_init_performance (dev , pd , new_ps , pd -> nr_perf_states );
658
+ ret = em_compute_costs (dev , new_ps , NULL , pd -> nr_perf_states ,
659
+ pd -> flags );
660
+ if (ret ) {
661
+ dev_warn (dev , "EM: compute costs failed\n" );
662
+ return ;
663
+ }
664
+
665
+ ret = em_dev_update_perf_domain (dev , em_table );
666
+ if (ret )
667
+ dev_warn (dev , "EM: update failed %d\n" , ret );
668
+
669
+ /*
670
+ * This is one-time-update, so give up the ownership in this updater.
671
+ * The EM framework has incremented the usage counter and from now
672
+ * will keep the reference (then free the memory when needed).
673
+ */
674
+ em_table_free (em_table );
675
+ }
676
+
677
+ static void em_check_capacity_update (void )
678
+ {
679
+ cpumask_var_t cpu_done_mask ;
680
+ struct em_perf_state * table ;
681
+ struct em_perf_domain * pd ;
682
+ unsigned long cpu_capacity ;
683
+ int cpu ;
684
+
685
+ if (!zalloc_cpumask_var (& cpu_done_mask , GFP_KERNEL )) {
686
+ pr_warn ("no free memory\n" );
687
+ return ;
688
+ }
689
+
690
+ /* Check if CPUs capacity has changed than update EM */
691
+ for_each_possible_cpu (cpu ) {
692
+ struct cpufreq_policy * policy ;
693
+ unsigned long em_max_perf ;
694
+ struct device * dev ;
695
+ int nr_states ;
696
+
697
+ if (cpumask_test_cpu (cpu , cpu_done_mask ))
698
+ continue ;
699
+
700
+ policy = cpufreq_cpu_get (cpu );
701
+ if (!policy ) {
702
+ pr_debug ("Accessing cpu%d policy failed\n" , cpu );
703
+ schedule_delayed_work (& em_update_work ,
704
+ msecs_to_jiffies (1000 ));
705
+ break ;
706
+ }
707
+ cpufreq_cpu_put (policy );
708
+
709
+ pd = em_cpu_get (cpu );
710
+ if (!pd || em_is_artificial (pd ))
711
+ continue ;
712
+
713
+ cpumask_or (cpu_done_mask , cpu_done_mask ,
714
+ em_span_cpus (pd ));
715
+
716
+ nr_states = pd -> nr_perf_states ;
717
+ cpu_capacity = arch_scale_cpu_capacity (cpu );
718
+
719
+ rcu_read_lock ();
720
+ table = em_perf_state_from_pd (pd );
721
+ em_max_perf = table [pd -> nr_perf_states - 1 ].performance ;
722
+ rcu_read_unlock ();
723
+
724
+ /*
725
+ * Check if the CPU capacity has been adjusted during boot
726
+ * and trigger the update for new performance values.
727
+ */
728
+ if (em_max_perf == cpu_capacity )
729
+ continue ;
730
+
731
+ pr_debug ("updating cpu%d cpu_cap=%lu old capacity=%lu\n" ,
732
+ cpu , cpu_capacity , em_max_perf );
733
+
734
+ dev = get_cpu_device (cpu );
735
+ em_adjust_new_capacity (dev , pd , cpu_capacity );
736
+ }
737
+
738
+ free_cpumask_var (cpu_done_mask );
739
+ }
740
+
741
+ static void em_update_workfn (struct work_struct * work )
742
+ {
743
+ em_check_capacity_update ();
744
+ }
0 commit comments