Skip to content

Commit f73d925

Browse files
anna-marialxKAGA-KOKO
authored andcommitted
timers: Add get next timer interrupt functionality for remote CPUs
To prepare for the conversion of the NOHZ timer placement to a pull at expiry time model it's required to have functionality available getting the next timer interrupt on a remote CPU. Locking of the timer bases and getting the information for the next timer interrupt functionality is split into separate functions. This is required to be compliant with lock ordering when the new model is in place. Signed-off-by: Anna-Maria Behnsen <anna-maria@linutronix.de> Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Reviewed-by: Frederic Weisbecker <frederic@kernel.org> Link: https://lore.kernel.org/r/20240221090548.36600-14-anna-maria@linutronix.de
1 parent 70b4cf8 commit f73d925

File tree

2 files changed

+100
-5
lines changed

2 files changed

+100
-5
lines changed

kernel/time/tick-internal.h

Lines changed: 10 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -8,6 +8,11 @@
88
#include "timekeeping.h"
99
#include "tick-sched.h"
1010

11+
struct timer_events {
12+
u64 local;
13+
u64 global;
14+
};
15+
1116
#ifdef CONFIG_GENERIC_CLOCKEVENTS
1217

1318
# define TICK_DO_TIMER_NONE -1
@@ -154,6 +159,11 @@ extern unsigned long tick_nohz_active;
154159
extern void timers_update_nohz(void);
155160
# ifdef CONFIG_SMP
156161
extern struct static_key_false timers_migration_enabled;
162+
extern void fetch_next_timer_interrupt_remote(unsigned long basej, u64 basem,
163+
struct timer_events *tevt,
164+
unsigned int cpu);
165+
extern void timer_lock_remote_bases(unsigned int cpu);
166+
extern void timer_unlock_remote_bases(unsigned int cpu);
157167
# endif
158168
#else /* CONFIG_NO_HZ_COMMON */
159169
static inline void timers_update_nohz(void) { }

kernel/time/timer.c

Lines changed: 90 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -266,11 +266,6 @@ struct timer_base {
266266

267267
static DEFINE_PER_CPU(struct timer_base, timer_bases[NR_BASES]);
268268

269-
struct timer_events {
270-
u64 local;
271-
u64 global;
272-
};
273-
274269
#ifdef CONFIG_NO_HZ_COMMON
275270

276271
static DEFINE_STATIC_KEY_FALSE(timers_nohz_active);
@@ -2058,6 +2053,21 @@ static unsigned long fetch_next_timer_interrupt(unsigned long basej, u64 basem,
20582053
if (time_before(nextevt, basej))
20592054
nextevt = basej;
20602055
tevt->local = basem + (u64)(nextevt - basej) * TICK_NSEC;
2056+
2057+
/*
2058+
* This is required for the remote check only but it doesn't
2059+
* hurt, when it is done for both call sites:
2060+
*
2061+
* * The remote callers will only take care of the global timers
2062+
* as local timers will be handled by CPU itself. When not
2063+
* updating tevt->global with the already missed first global
2064+
* timer, it is possible that it will be missed completely.
2065+
*
2066+
* * The local callers will ignore the tevt->global anyway, when
2067+
* nextevt is max. one tick away.
2068+
*/
2069+
if (!local_first)
2070+
tevt->global = tevt->local;
20612071
return nextevt;
20622072
}
20632073

@@ -2076,6 +2086,81 @@ static unsigned long fetch_next_timer_interrupt(unsigned long basej, u64 basem,
20762086
return nextevt;
20772087
}
20782088

2089+
# ifdef CONFIG_SMP
2090+
/**
2091+
* fetch_next_timer_interrupt_remote() - Store next timers into @tevt
2092+
* @basej: base time jiffies
2093+
* @basem: base time clock monotonic
2094+
* @tevt: Pointer to the storage for the expiry values
2095+
* @cpu: Remote CPU
2096+
*
2097+
* Stores the next pending local and global timer expiry values in the
2098+
* struct pointed to by @tevt. If a queue is empty the corresponding
2099+
* field is set to KTIME_MAX. If local event expires before global
2100+
* event, global event is set to KTIME_MAX as well.
2101+
*
2102+
* Caller needs to make sure timer base locks are held (use
2103+
* timer_lock_remote_bases() for this purpose).
2104+
*/
2105+
void fetch_next_timer_interrupt_remote(unsigned long basej, u64 basem,
2106+
struct timer_events *tevt,
2107+
unsigned int cpu)
2108+
{
2109+
struct timer_base *base_local, *base_global;
2110+
2111+
/* Preset local / global events */
2112+
tevt->local = tevt->global = KTIME_MAX;
2113+
2114+
base_local = per_cpu_ptr(&timer_bases[BASE_LOCAL], cpu);
2115+
base_global = per_cpu_ptr(&timer_bases[BASE_GLOBAL], cpu);
2116+
2117+
lockdep_assert_held(&base_local->lock);
2118+
lockdep_assert_held(&base_global->lock);
2119+
2120+
fetch_next_timer_interrupt(basej, basem, base_local, base_global, tevt);
2121+
}
2122+
2123+
/**
2124+
* timer_unlock_remote_bases - unlock timer bases of cpu
2125+
* @cpu: Remote CPU
2126+
*
2127+
* Unlocks the remote timer bases.
2128+
*/
2129+
void timer_unlock_remote_bases(unsigned int cpu)
2130+
__releases(timer_bases[BASE_LOCAL]->lock)
2131+
__releases(timer_bases[BASE_GLOBAL]->lock)
2132+
{
2133+
struct timer_base *base_local, *base_global;
2134+
2135+
base_local = per_cpu_ptr(&timer_bases[BASE_LOCAL], cpu);
2136+
base_global = per_cpu_ptr(&timer_bases[BASE_GLOBAL], cpu);
2137+
2138+
raw_spin_unlock(&base_global->lock);
2139+
raw_spin_unlock(&base_local->lock);
2140+
}
2141+
2142+
/**
2143+
* timer_lock_remote_bases - lock timer bases of cpu
2144+
* @cpu: Remote CPU
2145+
*
2146+
* Locks the remote timer bases.
2147+
*/
2148+
void timer_lock_remote_bases(unsigned int cpu)
2149+
__acquires(timer_bases[BASE_LOCAL]->lock)
2150+
__acquires(timer_bases[BASE_GLOBAL]->lock)
2151+
{
2152+
struct timer_base *base_local, *base_global;
2153+
2154+
base_local = per_cpu_ptr(&timer_bases[BASE_LOCAL], cpu);
2155+
base_global = per_cpu_ptr(&timer_bases[BASE_GLOBAL], cpu);
2156+
2157+
lockdep_assert_irqs_disabled();
2158+
2159+
raw_spin_lock(&base_local->lock);
2160+
raw_spin_lock_nested(&base_global->lock, SINGLE_DEPTH_NESTING);
2161+
}
2162+
# endif /* CONFIG_SMP */
2163+
20792164
static inline u64 __get_next_timer_interrupt(unsigned long basej, u64 basem,
20802165
bool *idle)
20812166
{

0 commit comments

Comments
 (0)