@@ -29,7 +29,8 @@ struct timekeeper {
u32 mult;
/* The shift value of the current clocksource. */
int shift;
-
+ /* cycle value at last accumulation point */
+ cycle_t cycle_last;
/* Number of clock cycles in one NTP interval. */
cycle_t cycle_interval;
/* Number of clock shifted nano seconds in one NTP interval. */
@@ -142,7 +143,8 @@ static void tk_setup_internals(struct timekeeper *tk, struct clocksource *clock)
old_clock = tk->clock;
tk->clock = clock;
- clock->cycle_last = clock->read(clock);
+ tk->cycle_last = clock->read(clock);
+ clock->cycle_last = tk->cycle_last;
/* Do the ns -> cycle conversion first, using original mult */
tmp = NTP_INTERVAL_LENGTH;
@@ -195,7 +197,7 @@ static inline s64 timekeeping_get_ns(struct timekeeper *tk)
cycle_now = clock->read(clock);
/* calculate the delta since the last update_wall_time: */
- cycle_delta = (cycle_now - clock->cycle_last) & clock->mask;
+ cycle_delta = (cycle_now - tk->cycle_last) & clock->mask;
nsec = cycle_delta * tk->mult + tk->xtime_nsec;
return nsec >> tk->shift;
@@ -211,7 +213,7 @@ static inline s64 timekeeping_get_ns_raw(struct timekeeper *tk)
cycle_now = clock->read(clock);
/* calculate the delta since the last update_wall_time: */
- cycle_delta = (cycle_now - clock->cycle_last) & clock->mask;
+ cycle_delta = (cycle_now - tk->cycle_last) & clock->mask;
/* return delta convert to nanoseconds. */
return clocksource_cyc2ns(cycle_delta, clock->mult, clock->shift);
@@ -259,8 +261,9 @@ static void timekeeping_forward_now(struct timekeeper *tk)
clock = tk->clock;
cycle_now = clock->read(clock);
- cycle_delta = (cycle_now - clock->cycle_last) & clock->mask;
- clock->cycle_last = cycle_now;
+ cycle_delta = (cycle_now - tk->cycle_last) & clock->mask;
+ tk->cycle_last = cycle_now;
+ tk->clock->cycle_last = cycle_now;
tk->xtime_nsec += cycle_delta * tk->mult;
@@ -760,7 +763,8 @@ static void timekeeping_resume(void)
__timekeeping_inject_sleeptime(&ts);
}
/* re-base the last cycle value */
- timekeeper.clock->cycle_last = timekeeper.clock->read(timekeeper.clock);
+ timekeeper.cycle_last = timekeeper.clock->read(timekeeper.clock);
+ timekeeper.clock->cycle_last = timekeeper.cycle_last;
timekeeper.ntp_error = 0;
timekeeping_suspended = 0;
@@ -1026,7 +1030,7 @@ static cycle_t logarithmic_accumulation(struct timekeeper *tk, cycle_t offset,
/* Accumulate one shifted interval */
offset -= tk->cycle_interval << shift;
- tk->clock->cycle_last += tk->cycle_interval << shift;
+ tk->cycle_last += tk->cycle_interval << shift;
tk->xtime_nsec += tk->xtime_interval << shift;
while (tk->xtime_nsec >= nsecps) {
@@ -1079,7 +1083,7 @@ static void update_wall_time(void)
#ifdef CONFIG_ARCH_USES_GETTIMEOFFSET
offset = shadow_tk.cycle_interval;
#else
- offset = (clock->read(clock) - clock->cycle_last) & clock->mask;
+ offset = (clock->read(clock) - shadow_tk.cycle_last) & clock->mask;
#endif
/*
@@ -1153,6 +1157,7 @@ static void update_wall_time(void)
timekeeper = shadow_tk;
+ timekeeper.clock->cycle_last = timekeeper.cycle_last;
timekeeping_update(&timekeeper, false);
out:
The clocksource cycle_last value is problematic for working on shadow copies of the timekeeper, because the clocksource is global. Since its mostly used only for timekeeping, move cycle_last into the timekeeper. Unfortunately there are some uses for cycle_last outside of timekeeping (such as tsc_read, which makes sure we haven't skipped to a core that the TSC is behind the last read), so we keep the clocksource cycle_last updated as well. CC: Ingo Molnar <mingo@elte.hu> CC: Thomas Gleixner <tglx@linutronix.de> CC: Eric Dumazet <eric.dumazet@gmail.com> CC: Richard Cochran <richardcochran@gmail.com> Signed-off-by: John Stultz <john.stultz@linaro.org> --- kernel/time/timekeeping.c | 23 ++++++++++++++--------- 1 files changed, 14 insertions(+), 9 deletions(-)