17
17
#include <linux/cpufreq.h>
18
18
#include <linux/init.h>
19
19
#include <linux/percpu.h>
20
+ #include <linux/sched/isolation.h>
20
21
21
22
#include <asm/cpu.h>
22
23
#include <asm/cputype.h>
@@ -88,18 +89,28 @@ int __init parse_acpi_topology(void)
88
89
* initialized.
89
90
*/
90
91
static DEFINE_PER_CPU_READ_MOSTLY (unsigned long, arch_max_freq_scale ) = 1UL << (2 * SCHED_CAPACITY_SHIFT );
91
- static DEFINE_PER_CPU (u64 , arch_const_cycles_prev ) ;
92
- static DEFINE_PER_CPU (u64 , arch_core_cycles_prev ) ;
93
92
static cpumask_var_t amu_fie_cpus ;
94
93
94
+ struct amu_cntr_sample {
95
+ u64 arch_const_cycles_prev ;
96
+ u64 arch_core_cycles_prev ;
97
+ unsigned long last_scale_update ;
98
+ };
99
+
100
+ static DEFINE_PER_CPU_SHARED_ALIGNED (struct amu_cntr_sample , cpu_amu_samples ) ;
101
+
95
102
void update_freq_counters_refs (void )
96
103
{
97
- this_cpu_write (arch_core_cycles_prev , read_corecnt ());
98
- this_cpu_write (arch_const_cycles_prev , read_constcnt ());
104
+ struct amu_cntr_sample * amu_sample = this_cpu_ptr (& cpu_amu_samples );
105
+
106
+ amu_sample -> arch_core_cycles_prev = read_corecnt ();
107
+ amu_sample -> arch_const_cycles_prev = read_constcnt ();
99
108
}
100
109
101
110
static inline bool freq_counters_valid (int cpu )
102
111
{
112
+ struct amu_cntr_sample * amu_sample = per_cpu_ptr (& cpu_amu_samples , cpu );
113
+
103
114
if ((cpu >= nr_cpu_ids ) || !cpumask_test_cpu (cpu , cpu_present_mask ))
104
115
return false;
105
116
@@ -108,8 +119,8 @@ static inline bool freq_counters_valid(int cpu)
108
119
return false;
109
120
}
110
121
111
- if (unlikely (!per_cpu ( arch_const_cycles_prev , cpu ) ||
112
- !per_cpu ( arch_core_cycles_prev , cpu ) )) {
122
+ if (unlikely (!amu_sample -> arch_const_cycles_prev ||
123
+ !amu_sample -> arch_core_cycles_prev )) {
113
124
pr_debug ("CPU%d: cycle counters are not enabled.\n" , cpu );
114
125
return false;
115
126
}
@@ -152,17 +163,22 @@ void freq_inv_set_max_ratio(int cpu, u64 max_rate)
152
163
153
164
static void amu_scale_freq_tick (void )
154
165
{
166
+ struct amu_cntr_sample * amu_sample = this_cpu_ptr (& cpu_amu_samples );
155
167
u64 prev_core_cnt , prev_const_cnt ;
156
168
u64 core_cnt , const_cnt , scale ;
157
169
158
- prev_const_cnt = this_cpu_read ( arch_const_cycles_prev ) ;
159
- prev_core_cnt = this_cpu_read ( arch_core_cycles_prev ) ;
170
+ prev_const_cnt = amu_sample -> arch_const_cycles_prev ;
171
+ prev_core_cnt = amu_sample -> arch_core_cycles_prev ;
160
172
161
173
update_freq_counters_refs ();
162
174
163
- const_cnt = this_cpu_read ( arch_const_cycles_prev ) ;
164
- core_cnt = this_cpu_read ( arch_core_cycles_prev ) ;
175
+ const_cnt = amu_sample -> arch_const_cycles_prev ;
176
+ core_cnt = amu_sample -> arch_core_cycles_prev ;
165
177
178
+ /*
179
+ * This should not happen unless the AMUs have been reset and the
180
+ * counter values have not been restored - unlikely
181
+ */
166
182
if (unlikely (core_cnt <= prev_core_cnt ||
167
183
const_cnt <= prev_const_cnt ))
168
184
return ;
@@ -182,13 +198,88 @@ static void amu_scale_freq_tick(void)
182
198
183
199
scale = min_t (unsigned long , scale , SCHED_CAPACITY_SCALE );
184
200
this_cpu_write (arch_freq_scale , (unsigned long )scale );
201
+
202
+ amu_sample -> last_scale_update = jiffies ;
185
203
}
186
204
187
205
static struct scale_freq_data amu_sfd = {
188
206
.source = SCALE_FREQ_SOURCE_ARCH ,
189
207
.set_freq_scale = amu_scale_freq_tick ,
190
208
};
191
209
210
+ static __always_inline bool amu_fie_cpu_supported (unsigned int cpu )
211
+ {
212
+ return cpumask_available (amu_fie_cpus ) &&
213
+ cpumask_test_cpu (cpu , amu_fie_cpus );
214
+ }
215
+
216
+ #define AMU_SAMPLE_EXP_MS 20
217
+
218
+ int arch_freq_get_on_cpu (int cpu )
219
+ {
220
+ struct amu_cntr_sample * amu_sample ;
221
+ unsigned int start_cpu = cpu ;
222
+ unsigned long last_update ;
223
+ unsigned int freq = 0 ;
224
+ u64 scale ;
225
+
226
+ if (!amu_fie_cpu_supported (cpu ) || !arch_scale_freq_ref (cpu ))
227
+ return - EOPNOTSUPP ;
228
+
229
+ while (1 ) {
230
+
231
+ amu_sample = per_cpu_ptr (& cpu_amu_samples , cpu );
232
+
233
+ last_update = amu_sample -> last_scale_update ;
234
+
235
+ /*
236
+ * For those CPUs that are in full dynticks mode, or those that have
237
+ * not seen tick for a while, try an alternative source for the counters
238
+ * (and thus freq scale), if available, for given policy: this boils
239
+ * down to identifying an active cpu within the same freq domain, if any.
240
+ */
241
+ if (!housekeeping_cpu (cpu , HK_TYPE_TICK ) ||
242
+ time_is_before_jiffies (last_update + msecs_to_jiffies (AMU_SAMPLE_EXP_MS ))) {
243
+ struct cpufreq_policy * policy = cpufreq_cpu_get (cpu );
244
+ int ref_cpu = cpu ;
245
+
246
+ if (!policy )
247
+ return - EINVAL ;
248
+
249
+ if (!cpumask_intersects (policy -> related_cpus ,
250
+ housekeeping_cpumask (HK_TYPE_TICK ))) {
251
+ cpufreq_cpu_put (policy );
252
+ return - EOPNOTSUPP ;
253
+ }
254
+
255
+ do {
256
+ ref_cpu = cpumask_next_wrap (ref_cpu , policy -> cpus ,
257
+ start_cpu , true);
258
+
259
+ } while (ref_cpu < nr_cpu_ids && idle_cpu (ref_cpu ));
260
+
261
+ cpufreq_cpu_put (policy );
262
+
263
+ if (ref_cpu >= nr_cpu_ids )
264
+ /* No alternative to pull info from */
265
+ return - EAGAIN ;
266
+
267
+ cpu = ref_cpu ;
268
+ } else {
269
+ break ;
270
+ }
271
+ }
272
+ /*
273
+ * Reversed computation to the one used to determine
274
+ * the arch_freq_scale value
275
+ * (see amu_scale_freq_tick for details)
276
+ */
277
+ scale = arch_scale_freq_capacity (cpu );
278
+ freq = scale * arch_scale_freq_ref (cpu );
279
+ freq >>= SCHED_CAPACITY_SHIFT ;
280
+ return freq ;
281
+ }
282
+
192
283
static void amu_fie_setup (const struct cpumask * cpus )
193
284
{
194
285
int cpu ;
0 commit comments