| From 76666dbbdd40e963e7df84c123fc9aea4a2bcc69 Mon Sep 17 00:00:00 2001 |
| From: Sebastian Andrzej Siewior <bigeasy@linutronix.de> |
| Date: Wed, 21 Aug 2013 17:48:46 +0200 |
| Subject: [PATCH] genirq: do not invoke the affinity callback via a workqueue |
| |
| Joe Korty reported, that __irq_set_affinity_locked() schedules a |
| workqueue while holding a rawlock which results in a might_sleep() |
| warning. |
| This patch moves the invokation into a process context so that we only |
| wakeup() a process while holding the lock. |
| |
| Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> |
| --- |
| include/linux/interrupt.h | 1 |
| kernel/irq/manage.c | 79 ++++++++++++++++++++++++++++++++++++++++++++-- |
| 2 files changed, 77 insertions(+), 3 deletions(-) |
| |
| --- a/include/linux/interrupt.h |
| +++ b/include/linux/interrupt.h |
| @@ -261,6 +261,7 @@ struct irq_affinity_notify { |
| unsigned int irq; |
| struct kref kref; |
| struct work_struct work; |
| + struct list_head list; |
| void (*notify)(struct irq_affinity_notify *, const cpumask_t *mask); |
| void (*release)(struct kref *ref); |
| }; |
| --- a/kernel/irq/manage.c |
| +++ b/kernel/irq/manage.c |
| @@ -164,6 +164,62 @@ int irq_do_set_affinity(struct irq_data |
| return ret; |
| } |
| |
| +#ifdef CONFIG_PREEMPT_RT_FULL |
| +static void _irq_affinity_notify(struct irq_affinity_notify *notify); |
| +static struct task_struct *set_affinity_helper; |
| +static LIST_HEAD(affinity_list); |
| +static DEFINE_RAW_SPINLOCK(affinity_list_lock); |
| + |
| +static int set_affinity_thread(void *unused) |
| +{ |
| + while (1) { |
| + struct irq_affinity_notify *notify; |
| + int empty; |
| + |
| + set_current_state(TASK_INTERRUPTIBLE); |
| + |
| + raw_spin_lock_irq(&affinity_list_lock); |
| + empty = list_empty(&affinity_list); |
| + raw_spin_unlock_irq(&affinity_list_lock); |
| + |
| + if (empty) |
| + schedule(); |
| + if (kthread_should_stop()) |
| + break; |
| + set_current_state(TASK_RUNNING); |
| +try_next: |
| + notify = NULL; |
| + |
| + raw_spin_lock_irq(&affinity_list_lock); |
| + if (!list_empty(&affinity_list)) { |
| + notify = list_first_entry(&affinity_list, |
| + struct irq_affinity_notify, list); |
| + list_del_init(¬ify->list); |
| + } |
| + raw_spin_unlock_irq(&affinity_list_lock); |
| + |
| + if (!notify) |
| + continue; |
| + _irq_affinity_notify(notify); |
| + goto try_next; |
| + } |
| + return 0; |
| +} |
| + |
| +static void init_helper_thread(void) |
| +{ |
| + if (set_affinity_helper) |
| + return; |
| + set_affinity_helper = kthread_run(set_affinity_thread, NULL, |
| + "affinity-cb"); |
| + WARN_ON(IS_ERR(set_affinity_helper)); |
| +} |
| +#else |
| + |
| +static inline void init_helper_thread(void) { } |
| + |
| +#endif |
| + |
| int __irq_set_affinity_locked(struct irq_data *data, const struct cpumask *mask) |
| { |
| struct irq_chip *chip = irq_data_get_irq_chip(data); |
| @@ -182,7 +238,17 @@ int __irq_set_affinity_locked(struct irq |
| |
| if (desc->affinity_notify) { |
| kref_get(&desc->affinity_notify->kref); |
| + |
| +#ifdef CONFIG_PREEMPT_RT_FULL |
| + raw_spin_lock(&affinity_list_lock); |
| + if (list_empty(&desc->affinity_notify->list)) |
| + list_add_tail(&affinity_list, |
| + &desc->affinity_notify->list); |
| + raw_spin_unlock(&affinity_list_lock); |
| + wake_up_process(set_affinity_helper); |
| +#else |
| schedule_work(&desc->affinity_notify->work); |
| +#endif |
| } |
| irqd_set(data, IRQD_AFFINITY_SET); |
| |
| @@ -223,10 +289,8 @@ int irq_set_affinity_hint(unsigned int i |
| } |
| EXPORT_SYMBOL_GPL(irq_set_affinity_hint); |
| |
| -static void irq_affinity_notify(struct work_struct *work) |
| +static void _irq_affinity_notify(struct irq_affinity_notify *notify) |
| { |
| - struct irq_affinity_notify *notify = |
| - container_of(work, struct irq_affinity_notify, work); |
| struct irq_desc *desc = irq_to_desc(notify->irq); |
| cpumask_var_t cpumask; |
| unsigned long flags; |
| @@ -248,6 +312,13 @@ out: |
| kref_put(¬ify->kref, notify->release); |
| } |
| |
| +static void irq_affinity_notify(struct work_struct *work) |
| +{ |
| + struct irq_affinity_notify *notify = |
| + container_of(work, struct irq_affinity_notify, work); |
| + _irq_affinity_notify(notify); |
| +} |
| + |
| /** |
| * irq_set_affinity_notifier - control notification of IRQ affinity changes |
| * @irq: Interrupt for which to enable/disable notification |
| @@ -277,6 +348,8 @@ irq_set_affinity_notifier(unsigned int i |
| notify->irq = irq; |
| kref_init(¬ify->kref); |
| INIT_WORK(¬ify->work, irq_affinity_notify); |
| + INIT_LIST_HEAD(¬ify->list); |
| + init_helper_thread(); |
| } |
| |
| raw_spin_lock_irqsave(&desc->lock, flags); |