1
0
Fork 0

clocksource: Move watchdog downgrade to a work queue thread

Move the downgrade of an unstable clocksource from the timer interrupt
context into the process context of a work queue thread. This is
needed to be able to do the clocksource switch with stop_machine.

Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
Cc: Ingo Molnar <mingo@elte.hu>
Acked-by: John Stultz <johnstul@us.ibm.com>
Cc: Daniel Walker <dwalker@fifo99.com>
LKML-Reference: <20090814134809.354926067@de.ibm.com>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
hifive-unleashed-5.1
Martin Schwidefsky 2009-08-14 15:47:25 +02:00 committed by Thomas Gleixner
parent fb63a0ebe6
commit c55c87c892
2 changed files with 40 additions and 17 deletions

View File

@ -213,6 +213,7 @@ extern struct clocksource *clock; /* current clocksource */
#define CLOCK_SOURCE_WATCHDOG 0x10 #define CLOCK_SOURCE_WATCHDOG 0x10
#define CLOCK_SOURCE_VALID_FOR_HRES 0x20 #define CLOCK_SOURCE_VALID_FOR_HRES 0x20
#define CLOCK_SOURCE_UNSTABLE 0x40
/* simplify initialization of mask field */ /* simplify initialization of mask field */
#define CLOCKSOURCE_MASK(bits) (cycle_t)((bits) < 64 ? ((1ULL<<(bits))-1) : -1) #define CLOCKSOURCE_MASK(bits) (cycle_t)((bits) < 64 ? ((1ULL<<(bits))-1) : -1)

View File

@ -143,10 +143,13 @@ fs_initcall(clocksource_done_booting);
static LIST_HEAD(watchdog_list); static LIST_HEAD(watchdog_list);
static struct clocksource *watchdog; static struct clocksource *watchdog;
static struct timer_list watchdog_timer; static struct timer_list watchdog_timer;
static struct work_struct watchdog_work;
static DEFINE_SPINLOCK(watchdog_lock); static DEFINE_SPINLOCK(watchdog_lock);
static cycle_t watchdog_last; static cycle_t watchdog_last;
static int watchdog_running; static int watchdog_running;
static void clocksource_watchdog_work(struct work_struct *work);
/* /*
* Interval: 0.5sec Threshold: 0.0625s * Interval: 0.5sec Threshold: 0.0625s
*/ */
@ -158,15 +161,16 @@ static void clocksource_unstable(struct clocksource *cs, int64_t delta)
printk(KERN_WARNING "Clocksource %s unstable (delta = %Ld ns)\n", printk(KERN_WARNING "Clocksource %s unstable (delta = %Ld ns)\n",
cs->name, delta); cs->name, delta);
cs->flags &= ~(CLOCK_SOURCE_VALID_FOR_HRES | CLOCK_SOURCE_WATCHDOG); cs->flags &= ~(CLOCK_SOURCE_VALID_FOR_HRES | CLOCK_SOURCE_WATCHDOG);
clocksource_change_rating(cs, 0); cs->flags |= CLOCK_SOURCE_UNSTABLE;
list_del(&cs->wd_list); schedule_work(&watchdog_work);
} }
static void clocksource_watchdog(unsigned long data) static void clocksource_watchdog(unsigned long data)
{ {
struct clocksource *cs, *tmp; struct clocksource *cs;
cycle_t csnow, wdnow; cycle_t csnow, wdnow;
int64_t wd_nsec, cs_nsec; int64_t wd_nsec, cs_nsec;
int next_cpu;
spin_lock(&watchdog_lock); spin_lock(&watchdog_lock);
if (!watchdog_running) if (!watchdog_running)
@ -176,7 +180,12 @@ static void clocksource_watchdog(unsigned long data)
wd_nsec = cyc2ns(watchdog, (wdnow - watchdog_last) & watchdog->mask); wd_nsec = cyc2ns(watchdog, (wdnow - watchdog_last) & watchdog->mask);
watchdog_last = wdnow; watchdog_last = wdnow;
list_for_each_entry_safe(cs, tmp, &watchdog_list, wd_list) { list_for_each_entry(cs, &watchdog_list, wd_list) {
/* Clocksource already marked unstable? */
if (cs->flags & CLOCK_SOURCE_UNSTABLE)
continue;
csnow = cs->read(cs); csnow = cs->read(cs);
/* Clocksource initialized ? */ /* Clocksource initialized ? */
@ -207,19 +216,15 @@ static void clocksource_watchdog(unsigned long data)
} }
} }
if (!list_empty(&watchdog_list)) {
/* /*
* Cycle through CPUs to check if the CPUs stay * Cycle through CPUs to check if the CPUs stay synchronized
* synchronized to each other. * to each other.
*/ */
int next_cpu = cpumask_next(raw_smp_processor_id(), next_cpu = cpumask_next(raw_smp_processor_id(), cpu_online_mask);
cpu_online_mask);
if (next_cpu >= nr_cpu_ids) if (next_cpu >= nr_cpu_ids)
next_cpu = cpumask_first(cpu_online_mask); next_cpu = cpumask_first(cpu_online_mask);
watchdog_timer.expires += WATCHDOG_INTERVAL; watchdog_timer.expires += WATCHDOG_INTERVAL;
add_timer_on(&watchdog_timer, next_cpu); add_timer_on(&watchdog_timer, next_cpu);
}
out: out:
spin_unlock(&watchdog_lock); spin_unlock(&watchdog_lock);
} }
@ -228,6 +233,7 @@ static inline void clocksource_start_watchdog(void)
{ {
if (watchdog_running || !watchdog || list_empty(&watchdog_list)) if (watchdog_running || !watchdog || list_empty(&watchdog_list))
return; return;
INIT_WORK(&watchdog_work, clocksource_watchdog_work);
init_timer(&watchdog_timer); init_timer(&watchdog_timer);
watchdog_timer.function = clocksource_watchdog; watchdog_timer.function = clocksource_watchdog;
watchdog_last = watchdog->read(watchdog); watchdog_last = watchdog->read(watchdog);
@ -313,6 +319,22 @@ static void clocksource_dequeue_watchdog(struct clocksource *cs)
spin_unlock_irqrestore(&watchdog_lock, flags); spin_unlock_irqrestore(&watchdog_lock, flags);
} }
static void clocksource_watchdog_work(struct work_struct *work)
{
struct clocksource *cs, *tmp;
unsigned long flags;
spin_lock_irqsave(&watchdog_lock, flags);
list_for_each_entry_safe(cs, tmp, &watchdog_list, wd_list)
if (cs->flags & CLOCK_SOURCE_UNSTABLE) {
list_del_init(&cs->wd_list);
clocksource_change_rating(cs, 0);
}
/* Check if the watchdog timer needs to be stopped. */
clocksource_stop_watchdog();
spin_unlock(&watchdog_lock);
}
#else /* CONFIG_CLOCKSOURCE_WATCHDOG */ #else /* CONFIG_CLOCKSOURCE_WATCHDOG */
static void clocksource_enqueue_watchdog(struct clocksource *cs) static void clocksource_enqueue_watchdog(struct clocksource *cs)