| From 76bdf0c3151369f5b15af4baac2708e59347d664 Mon Sep 17 00:00:00 2001 |
| From: Thomas Gleixner <tglx@linutronix.de> |
| Date: Mon, 13 Dec 2010 16:33:39 +0100 |
| Subject: [PATCH] x86: Convert mce timer to hrtimer |
| |
| commit 76bdf0c3151369f5b15af4baac2708e59347d664 in tip. |
| |
| mce_timer is started in atomic contexts of cpu bringup. This results |
| in might_sleep() warnings on RT. Convert mce_timer to a hrtimer to |
| avoid this. |
| |
| Signed-off-by: Thomas Gleixner <tglx@linutronix.de> |
| |
| diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c |
| index a8aacd4..2b4aba9 100644 |
| --- a/arch/x86/kernel/cpu/mcheck/mce.c |
| +++ b/arch/x86/kernel/cpu/mcheck/mce.c |
| @@ -35,6 +35,7 @@ |
| #include <linux/fs.h> |
| #include <linux/mm.h> |
| #include <linux/debugfs.h> |
| +#include <linux/jiffies.h> |
| |
| #include <asm/processor.h> |
| #include <asm/hw_irq.h> |
| @@ -1131,17 +1132,14 @@ void mce_log_therm_throt_event(__u64 status) |
| * poller finds an MCE, poll 2x faster. When the poller finds no more |
| * errors, poll 2x slower (up to check_interval seconds). |
| */ |
| -static int check_interval = 5 * 60; /* 5 minutes */ |
| +static unsigned long check_interval = 5 * 60; /* 5 minutes */ |
| |
| -static DEFINE_PER_CPU(int, mce_next_interval); /* in jiffies */ |
| -static DEFINE_PER_CPU(struct timer_list, mce_timer); |
| +static DEFINE_PER_CPU(unsigned long, mce_next_interval); /* in jiffies */ |
| +static DEFINE_PER_CPU(struct hrtimer, mce_timer); |
| |
| -static void mce_start_timer(unsigned long data) |
| +static enum hrtimer_restart mce_start_timer(struct hrtimer *timer) |
| { |
| - struct timer_list *t = &per_cpu(mce_timer, data); |
| - int *n; |
| - |
| - WARN_ON(smp_processor_id() != data); |
| + unsigned long *n; |
| |
| if (mce_available(¤t_cpu_data)) { |
| machine_check_poll(MCP_TIMESTAMP, |
| @@ -1154,12 +1152,13 @@ static void mce_start_timer(unsigned long data) |
| */ |
| n = &__get_cpu_var(mce_next_interval); |
| if (mce_notify_irq()) |
| - *n = max(*n/2, HZ/100); |
| + *n = max(*n/2, HZ/100UL); |
| else |
| - *n = min(*n*2, (int)round_jiffies_relative(check_interval*HZ)); |
| + *n = min(*n*2, round_jiffies_relative(check_interval*HZ)); |
| |
| - t->expires = jiffies + *n; |
| - add_timer_on(t, smp_processor_id()); |
| + hrtimer_forward(timer, timer->base->get_time(), |
| + ns_to_ktime(jiffies_to_usecs(*n) * 1000)); |
| + return HRTIMER_RESTART; |
| } |
| |
| static void mce_do_trigger(struct work_struct *work) |
| @@ -1385,10 +1384,11 @@ static void __mcheck_cpu_init_vendor(struct cpuinfo_x86 *c) |
| |
| static void __mcheck_cpu_init_timer(void) |
| { |
| - struct timer_list *t = &__get_cpu_var(mce_timer); |
| - int *n = &__get_cpu_var(mce_next_interval); |
| + struct hrtimer *t = &__get_cpu_var(mce_timer); |
| + unsigned long *n = &__get_cpu_var(mce_next_interval); |
| |
| - setup_timer(t, mce_start_timer, smp_processor_id()); |
| + hrtimer_init(t, CLOCK_MONOTONIC, HRTIMER_MODE_REL); |
| + t->function = mce_start_timer; |
| |
| if (mce_ignore_ce) |
| return; |
| @@ -1396,8 +1396,9 @@ static void __mcheck_cpu_init_timer(void) |
| *n = check_interval * HZ; |
| if (!*n) |
| return; |
| - t->expires = round_jiffies(jiffies + *n); |
| - add_timer_on(t, smp_processor_id()); |
| + |
| + hrtimer_start_range_ns(t, ns_to_ktime(jiffies_to_usecs(*n) * 1000), |
| + 0 , HRTIMER_MODE_REL_PINNED); |
| } |
| |
| /* Handle unconfigured int18 (should never happen) */ |
| @@ -1711,7 +1712,7 @@ static int mce_resume(struct sys_device *dev) |
| |
| static void mce_cpu_restart(void *data) |
| { |
| - del_timer_sync(&__get_cpu_var(mce_timer)); |
| + hrtimer_cancel(&__get_cpu_var(mce_timer)); |
| if (!mce_available(¤t_cpu_data)) |
| return; |
| __mcheck_cpu_init_generic(); |
| @@ -1730,7 +1731,7 @@ static void mce_disable_ce(void *all) |
| if (!mce_available(¤t_cpu_data)) |
| return; |
| if (all) |
| - del_timer_sync(&__get_cpu_var(mce_timer)); |
| + hrtimer_cancel(&__get_cpu_var(mce_timer)); |
| cmci_clear(); |
| } |
| |
| @@ -1962,6 +1963,8 @@ static void __cpuinit mce_disable_cpu(void *h) |
| if (!mce_available(¤t_cpu_data)) |
| return; |
| |
| + hrtimer_cancel(&__get_cpu_var(mce_timer)); |
| + |
| if (!(action & CPU_TASKS_FROZEN)) |
| cmci_clear(); |
| for (i = 0; i < banks; i++) { |
| @@ -1988,6 +1991,7 @@ static void __cpuinit mce_reenable_cpu(void *h) |
| if (b->init) |
| wrmsrl(MSR_IA32_MCx_CTL(i), b->ctl); |
| } |
| + __mcheck_cpu_init_timer(); |
| } |
| |
| /* Get notified when a cpu comes on/off. Be hotplug friendly. */ |
| @@ -1995,7 +1999,6 @@ static int __cpuinit |
| mce_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu) |
| { |
| unsigned int cpu = (unsigned long)hcpu; |
| - struct timer_list *t = &per_cpu(mce_timer, cpu); |
| |
| switch (action) { |
| case CPU_ONLINE: |
| @@ -2012,16 +2015,10 @@ mce_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu) |
| break; |
| case CPU_DOWN_PREPARE: |
| case CPU_DOWN_PREPARE_FROZEN: |
| - del_timer_sync(t); |
| smp_call_function_single(cpu, mce_disable_cpu, &action, 1); |
| break; |
| case CPU_DOWN_FAILED: |
| case CPU_DOWN_FAILED_FROZEN: |
| - if (!mce_ignore_ce && check_interval) { |
| - t->expires = round_jiffies(jiffies + |
| - __get_cpu_var(mce_next_interval)); |
| - add_timer_on(t, cpu); |
| - } |
| smp_call_function_single(cpu, mce_reenable_cpu, &action, 1); |
| break; |
| case CPU_POST_DEAD: |
| -- |
| 1.7.1.1 |
| |