@@ -682,19 +682,20 @@ static void timekeeping_update_from_shadow(struct tk_data *tkd, unsigned int act
682
682
}
683
683
684
684
/**
685
- * timekeeping_forward - update clock to given cycle now value
685
+ * timekeeping_forward_now - update clock to the current time
686
686
* @tk: Pointer to the timekeeper to update
687
- * @cycle_now: Current clocksource read value
688
687
*
689
688
* Forward the current clock to update its state since the last call to
690
689
* update_wall_time(). This is useful before significant clock changes,
691
690
* as it avoids having to deal with this time offset explicitly.
692
691
*/
693
- static void timekeeping_forward (struct timekeeper * tk , u64 cycle_now )
692
+ static void timekeeping_forward_now (struct timekeeper * tk )
694
693
{
695
- u64 delta = clocksource_delta (cycle_now , tk -> tkr_mono .cycle_last , tk -> tkr_mono .mask ,
696
- tk -> tkr_mono .clock -> max_raw_delta );
694
+ u64 cycle_now , delta ;
697
695
696
+ cycle_now = tk_clock_read (& tk -> tkr_mono );
697
+ delta = clocksource_delta (cycle_now , tk -> tkr_mono .cycle_last , tk -> tkr_mono .mask ,
698
+ tk -> tkr_mono .clock -> max_raw_delta );
698
699
tk -> tkr_mono .cycle_last = cycle_now ;
699
700
tk -> tkr_raw .cycle_last = cycle_now ;
700
701
@@ -709,21 +710,6 @@ static void timekeeping_forward(struct timekeeper *tk, u64 cycle_now)
709
710
}
710
711
}
711
712
712
- /**
713
- * timekeeping_forward_now - update clock to the current time
714
- * @tk: Pointer to the timekeeper to update
715
- *
716
- * Forward the current clock to update its state since the last call to
717
- * update_wall_time(). This is useful before significant clock changes,
718
- * as it avoids having to deal with this time offset explicitly.
719
- */
720
- static void timekeeping_forward_now (struct timekeeper * tk )
721
- {
722
- u64 cycle_now = tk_clock_read (& tk -> tkr_mono );
723
-
724
- timekeeping_forward (tk , cycle_now );
725
- }
726
-
727
713
/**
728
714
* ktime_get_real_ts64 - Returns the time of day in a timespec64.
729
715
* @ts: pointer to the timespec to be set
@@ -2165,54 +2151,6 @@ static u64 logarithmic_accumulation(struct timekeeper *tk, u64 offset,
2165
2151
return offset ;
2166
2152
}
2167
2153
2168
- static u64 timekeeping_accumulate (struct timekeeper * tk , u64 offset ,
2169
- enum timekeeping_adv_mode mode ,
2170
- unsigned int * clock_set )
2171
- {
2172
- int shift = 0 , maxshift ;
2173
-
2174
- /*
2175
- * TK_ADV_FREQ indicates that adjtimex(2) directly set the
2176
- * frequency or the tick length.
2177
- *
2178
- * Accumulate the offset, so that the new multiplier starts from
2179
- * now. This is required as otherwise for offsets, which are
2180
- * smaller than tk::cycle_interval, timekeeping_adjust() could set
2181
- * xtime_nsec backwards, which subsequently causes time going
2182
- * backwards in the coarse time getters. But even for the case
2183
- * where offset is greater than tk::cycle_interval the periodic
2184
- * accumulation does not have much value.
2185
- *
2186
- * Also reset tk::ntp_error as it does not make sense to keep the
2187
- * old accumulated error around in this case.
2188
- */
2189
- if (mode == TK_ADV_FREQ ) {
2190
- timekeeping_forward (tk , tk -> tkr_mono .cycle_last + offset );
2191
- tk -> ntp_error = 0 ;
2192
- return 0 ;
2193
- }
2194
-
2195
- /*
2196
- * With NO_HZ we may have to accumulate many cycle_intervals
2197
- * (think "ticks") worth of time at once. To do this efficiently,
2198
- * we calculate the largest doubling multiple of cycle_intervals
2199
- * that is smaller than the offset. We then accumulate that
2200
- * chunk in one go, and then try to consume the next smaller
2201
- * doubled multiple.
2202
- */
2203
- shift = ilog2 (offset ) - ilog2 (tk -> cycle_interval );
2204
- shift = max (0 , shift );
2205
- /* Bound shift to one less than what overflows tick_length */
2206
- maxshift = (64 - (ilog2 (ntp_tick_length ()) + 1 )) - 1 ;
2207
- shift = min (shift , maxshift );
2208
- while (offset >= tk -> cycle_interval ) {
2209
- offset = logarithmic_accumulation (tk , offset , shift , clock_set );
2210
- if (offset < tk -> cycle_interval << shift )
2211
- shift -- ;
2212
- }
2213
- return offset ;
2214
- }
2215
-
2216
2154
/*
2217
2155
* timekeeping_advance - Updates the timekeeper to the current time and
2218
2156
* current NTP tick length
@@ -2222,6 +2160,7 @@ static bool timekeeping_advance(enum timekeeping_adv_mode mode)
2222
2160
struct timekeeper * tk = & tk_core .shadow_timekeeper ;
2223
2161
struct timekeeper * real_tk = & tk_core .timekeeper ;
2224
2162
unsigned int clock_set = 0 ;
2163
+ int shift = 0 , maxshift ;
2225
2164
u64 offset ;
2226
2165
2227
2166
guard (raw_spinlock_irqsave )(& tk_core .lock );
@@ -2238,7 +2177,24 @@ static bool timekeeping_advance(enum timekeeping_adv_mode mode)
2238
2177
if (offset < real_tk -> cycle_interval && mode == TK_ADV_TICK )
2239
2178
return false;
2240
2179
2241
- offset = timekeeping_accumulate (tk , offset , mode , & clock_set );
2180
+ /*
2181
+ * With NO_HZ we may have to accumulate many cycle_intervals
2182
+ * (think "ticks") worth of time at once. To do this efficiently,
2183
+ * we calculate the largest doubling multiple of cycle_intervals
2184
+ * that is smaller than the offset. We then accumulate that
2185
+ * chunk in one go, and then try to consume the next smaller
2186
+ * doubled multiple.
2187
+ */
2188
+ shift = ilog2 (offset ) - ilog2 (tk -> cycle_interval );
2189
+ shift = max (0 , shift );
2190
+ /* Bound shift to one less than what overflows tick_length */
2191
+ maxshift = (64 - (ilog2 (ntp_tick_length ())+ 1 )) - 1 ;
2192
+ shift = min (shift , maxshift );
2193
+ while (offset >= tk -> cycle_interval ) {
2194
+ offset = logarithmic_accumulation (tk , offset , shift , & clock_set );
2195
+ if (offset < tk -> cycle_interval <<shift )
2196
+ shift -- ;
2197
+ }
2242
2198
2243
2199
/* Adjust the multiplier to correct NTP error */
2244
2200
timekeeping_adjust (tk , offset );
0 commit comments