@@ -110,7 +110,8 @@ static int __init start_sync_check_timer(void)
if (!cpu_feature_enabled(X86_FEATURE_TSC_ADJUST) || tsc_clocksource_reliable)
return 0;
- timer_setup(&tsc_sync_check_timer, tsc_sync_check_timer_fn, 0);
+ timer_setup(&tsc_sync_check_timer, tsc_sync_check_timer_fn,
+ TIMER_PINNED);
tsc_sync_check_timer.expires = jiffies + SYNC_CHECK_INTERVAL;
add_timer(&tsc_sync_check_timer);
@@ -523,7 +523,7 @@ static inline void clocksource_start_watchdog(void)
{
if (watchdog_running || !watchdog || list_empty(&watchdog_list))
return;
- timer_setup(&watchdog_timer, clocksource_watchdog, 0);
+ timer_setup(&watchdog_timer, clocksource_watchdog, TIMER_PINNED);
watchdog_timer.expires = jiffies + WATCHDOG_INTERVAL;
add_timer_on(&watchdog_timer, cpumask_first(cpu_online_mask));
watchdog_running = 1;
@@ -1670,10 +1670,13 @@ static void __queue_delayed_work(int cpu, struct workqueue_struct *wq,
dwork->cpu = cpu;
timer->expires = jiffies + delay;
- if (unlikely(cpu != WORK_CPU_UNBOUND))
+ if (unlikely(cpu != WORK_CPU_UNBOUND)) {
+ timer->flags |= TIMER_PINNED;
add_timer_on(timer, cpu);
- else
+ } else {
+ timer->flags &= ~TIMER_PINNED;
add_timer(timer);
+ }
}
/**
The hierachical timer pull model at expiry time is now in place. Timers which should expire on a dedicated CPU needs the TIMER_PINNED flag. Otherwise they will be queued on the dedicated CPU but in global timer base and those timers could also expire on other CPUs. Only timers with TIMER_PINNED flag will end up in local timer base Therefore add the missing TIMER_PINNED flag for those who use add_timer_on() without the flag. Signed-off-by: Anna-Maria Behnsen <anna-maria@linutronix.de> --- arch/x86/kernel/tsc_sync.c | 3 ++- kernel/time/clocksource.c | 2 +- kernel/workqueue.c | 7 +++++-- 3 files changed, 8 insertions(+), 4 deletions(-)