| From: Sebastian Andrzej Siewior <bigeasy@linutronix.de> |
| Date: Wed, 21 Aug 2013 17:48:46 +0200 |
| Subject: genirq: Do not invoke the affinity callback via a workqueue on RT |
| |
| Joe Korty reported, that __irq_set_affinity_locked() schedules a |
| workqueue while holding a rawlock which results in a might_sleep() |
| warning. |
| This patch moves the invokation into a process context so that we only |
| wakeup() a process while holding the lock. |
| |
| Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> |
| --- |
| include/linux/interrupt.h | 2 + |
| kernel/irq/manage.c | 79 ++++++++++++++++++++++++++++++++++++++++++++-- |
| 2 files changed, 78 insertions(+), 3 deletions(-) |
| |
| --- a/include/linux/interrupt.h |
| +++ b/include/linux/interrupt.h |
| @@ -217,6 +217,7 @@ extern void resume_device_irqs(void); |
| * @irq: Interrupt to which notification applies |
| * @kref: Reference count, for internal use |
| * @work: Work item, for internal use |
| + * @list: List item for deferred callbacks |
| * @notify: Function to be called on change. This will be |
| * called in process context. |
| * @release: Function to be called on release. This will be |
| @@ -228,6 +229,7 @@ struct irq_affinity_notify { |
| unsigned int irq; |
| struct kref kref; |
| struct work_struct work; |
| + struct list_head list; |
| void (*notify)(struct irq_affinity_notify *, const cpumask_t *mask); |
| void (*release)(struct kref *ref); |
| }; |
| --- a/kernel/irq/manage.c |
| +++ b/kernel/irq/manage.c |
| @@ -181,6 +181,62 @@ static inline void |
| irq_get_pending(struct cpumask *mask, struct irq_desc *desc) { } |
| #endif |
| |
| +#ifdef CONFIG_PREEMPT_RT_FULL |
| +static void _irq_affinity_notify(struct irq_affinity_notify *notify); |
| +static struct task_struct *set_affinity_helper; |
| +static LIST_HEAD(affinity_list); |
| +static DEFINE_RAW_SPINLOCK(affinity_list_lock); |
| + |
| +static int set_affinity_thread(void *unused) |
| +{ |
| + while (1) { |
| + struct irq_affinity_notify *notify; |
| + int empty; |
| + |
| + set_current_state(TASK_INTERRUPTIBLE); |
| + |
| + raw_spin_lock_irq(&affinity_list_lock); |
| + empty = list_empty(&affinity_list); |
| + raw_spin_unlock_irq(&affinity_list_lock); |
| + |
| + if (empty) |
| + schedule(); |
| + if (kthread_should_stop()) |
| + break; |
| + set_current_state(TASK_RUNNING); |
| +try_next: |
| + notify = NULL; |
| + |
| + raw_spin_lock_irq(&affinity_list_lock); |
| + if (!list_empty(&affinity_list)) { |
| + notify = list_first_entry(&affinity_list, |
| + struct irq_affinity_notify, list); |
| + list_del_init(¬ify->list); |
| + } |
| + raw_spin_unlock_irq(&affinity_list_lock); |
| + |
| + if (!notify) |
| + continue; |
| + _irq_affinity_notify(notify); |
| + goto try_next; |
| + } |
| + return 0; |
| +} |
| + |
| +static void init_helper_thread(void) |
| +{ |
| + if (set_affinity_helper) |
| + return; |
| + set_affinity_helper = kthread_run(set_affinity_thread, NULL, |
| + "affinity-cb"); |
| + WARN_ON(IS_ERR(set_affinity_helper)); |
| +} |
| +#else |
| + |
| +static inline void init_helper_thread(void) { } |
| + |
| +#endif |
| + |
| int irq_do_set_affinity(struct irq_data *data, const struct cpumask *mask, |
| bool force) |
| { |
| @@ -220,7 +276,17 @@ int irq_set_affinity_locked(struct irq_d |
| |
| if (desc->affinity_notify) { |
| kref_get(&desc->affinity_notify->kref); |
| + |
| +#ifdef CONFIG_PREEMPT_RT_FULL |
| + raw_spin_lock(&affinity_list_lock); |
| + if (list_empty(&desc->affinity_notify->list)) |
| + list_add_tail(&affinity_list, |
| + &desc->affinity_notify->list); |
| + raw_spin_unlock(&affinity_list_lock); |
| + wake_up_process(set_affinity_helper); |
| +#else |
| schedule_work(&desc->affinity_notify->work); |
| +#endif |
| } |
| irqd_set(data, IRQD_AFFINITY_SET); |
| |
| @@ -258,10 +324,8 @@ int irq_set_affinity_hint(unsigned int i |
| } |
| EXPORT_SYMBOL_GPL(irq_set_affinity_hint); |
| |
| -static void irq_affinity_notify(struct work_struct *work) |
| +static void _irq_affinity_notify(struct irq_affinity_notify *notify) |
| { |
| - struct irq_affinity_notify *notify = |
| - container_of(work, struct irq_affinity_notify, work); |
| struct irq_desc *desc = irq_to_desc(notify->irq); |
| cpumask_var_t cpumask; |
| unsigned long flags; |
| @@ -283,6 +347,13 @@ static void irq_affinity_notify(struct w |
| kref_put(¬ify->kref, notify->release); |
| } |
| |
| +static void irq_affinity_notify(struct work_struct *work) |
| +{ |
| + struct irq_affinity_notify *notify = |
| + container_of(work, struct irq_affinity_notify, work); |
| + _irq_affinity_notify(notify); |
| +} |
| + |
| /** |
| * irq_set_affinity_notifier - control notification of IRQ affinity changes |
| * @irq: Interrupt for which to enable/disable notification |
| @@ -312,6 +383,8 @@ irq_set_affinity_notifier(unsigned int i |
| notify->irq = irq; |
| kref_init(¬ify->kref); |
| INIT_WORK(¬ify->work, irq_affinity_notify); |
| + INIT_LIST_HEAD(¬ify->list); |
| + init_helper_thread(); |
| } |
| |
| raw_spin_lock_irqsave(&desc->lock, flags); |