|  | Subject: workqueue: Use local irq lock instead of irq disable regions | 
|  | From: Thomas Gleixner <tglx@linutronix.de> | 
|  | Date: Sun, 17 Jul 2011 21:42:26 +0200 | 
|  |  | 
|  | Use a local_irq_lock as a replacement for irq off regions. We keep the | 
|  | semantic of irq-off in regard to the pool->lock and remain preemptible. | 
|  |  | 
|  | Signed-off-by: Thomas Gleixner <tglx@linutronix.de> | 
|  | --- | 
|  | kernel/workqueue.c |   40 +++++++++++++++++++++++++++------------- | 
|  | 1 file changed, 27 insertions(+), 13 deletions(-) | 
|  |  | 
|  | --- a/kernel/workqueue.c | 
|  | +++ b/kernel/workqueue.c | 
|  | @@ -49,6 +49,7 @@ | 
|  | #include <linux/uaccess.h> | 
|  | #include <linux/sched/isolation.h> | 
|  | #include <linux/nmi.h> | 
|  | +#include <linux/locallock.h> | 
|  |  | 
|  | #include "workqueue_internal.h" | 
|  |  | 
|  | @@ -352,6 +353,8 @@ EXPORT_SYMBOL_GPL(system_power_efficient | 
|  | struct workqueue_struct *system_freezable_power_efficient_wq __read_mostly; | 
|  | EXPORT_SYMBOL_GPL(system_freezable_power_efficient_wq); | 
|  |  | 
|  | +static DEFINE_LOCAL_IRQ_LOCK(pendingb_lock); | 
|  | + | 
|  | static int worker_thread(void *__worker); | 
|  | static void workqueue_sysfs_unregister(struct workqueue_struct *wq); | 
|  |  | 
|  | @@ -1105,9 +1108,11 @@ static void put_pwq_unlocked(struct pool | 
|  | * As both pwqs and pools are RCU protected, the | 
|  | * following lock operations are safe. | 
|  | */ | 
|  | -		spin_lock_irq(&pwq->pool->lock); | 
|  | +		rcu_read_lock(); | 
|  | +		local_spin_lock_irq(pendingb_lock, &pwq->pool->lock); | 
|  | put_pwq(pwq); | 
|  | -		spin_unlock_irq(&pwq->pool->lock); | 
|  | +		local_spin_unlock_irq(pendingb_lock, &pwq->pool->lock); | 
|  | +		rcu_read_unlock(); | 
|  | } | 
|  | } | 
|  |  | 
|  | @@ -1211,7 +1216,7 @@ static int try_to_grab_pending(struct wo | 
|  | struct worker_pool *pool; | 
|  | struct pool_workqueue *pwq; | 
|  |  | 
|  | -	local_irq_save(*flags); | 
|  | +	local_lock_irqsave(pendingb_lock, *flags); | 
|  |  | 
|  | /* try to steal the timer if it exists */ | 
|  | if (is_dwork) { | 
|  | @@ -1275,7 +1280,7 @@ static int try_to_grab_pending(struct wo | 
|  | spin_unlock(&pool->lock); | 
|  | fail: | 
|  | rcu_read_unlock(); | 
|  | -	local_irq_restore(*flags); | 
|  | +	local_unlock_irqrestore(pendingb_lock, *flags); | 
|  | if (work_is_canceling(work)) | 
|  | return -ENOENT; | 
|  | cpu_relax(); | 
|  | @@ -1380,7 +1385,13 @@ static void __queue_work(int cpu, struct | 
|  | * queued or lose PENDING.  Grabbing PENDING and queueing should | 
|  | * happen with IRQ disabled. | 
|  | */ | 
|  | +#ifndef CONFIG_PREEMPT_RT_FULL | 
|  | +	/* | 
|  | +	 * nort: On RT the "interrupts-disabled" rule has been replaced with | 
|  | +	 * pendingb_lock. | 
|  | +	 */ | 
|  | lockdep_assert_irqs_disabled(); | 
|  | +#endif | 
|  |  | 
|  | debug_work_activate(work); | 
|  |  | 
|  | @@ -1486,14 +1497,14 @@ bool queue_work_on(int cpu, struct workq | 
|  | bool ret = false; | 
|  | unsigned long flags; | 
|  |  | 
|  | -	local_irq_save(flags); | 
|  | +	local_lock_irqsave(pendingb_lock,flags); | 
|  |  | 
|  | if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) { | 
|  | __queue_work(cpu, wq, work); | 
|  | ret = true; | 
|  | } | 
|  |  | 
|  | -	local_irq_restore(flags); | 
|  | +	local_unlock_irqrestore(pendingb_lock, flags); | 
|  | return ret; | 
|  | } | 
|  | EXPORT_SYMBOL(queue_work_on); | 
|  | @@ -1502,8 +1513,11 @@ void delayed_work_timer_fn(struct timer_ | 
|  | { | 
|  | struct delayed_work *dwork = from_timer(dwork, t, timer); | 
|  |  | 
|  | +	/* XXX */ | 
|  | +	/* local_lock(pendingb_lock); */ | 
|  | /* should have been called from irqsafe timer with irq already off */ | 
|  | __queue_work(dwork->cpu, dwork->wq, &dwork->work); | 
|  | +	/* local_unlock(pendingb_lock); */ | 
|  | } | 
|  | EXPORT_SYMBOL(delayed_work_timer_fn); | 
|  |  | 
|  | @@ -1558,14 +1572,14 @@ bool queue_delayed_work_on(int cpu, stru | 
|  | unsigned long flags; | 
|  |  | 
|  | /* read the comment in __queue_work() */ | 
|  | -	local_irq_save(flags); | 
|  | +	local_lock_irqsave(pendingb_lock, flags); | 
|  |  | 
|  | if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) { | 
|  | __queue_delayed_work(cpu, wq, dwork, delay); | 
|  | ret = true; | 
|  | } | 
|  |  | 
|  | -	local_irq_restore(flags); | 
|  | +	local_unlock_irqrestore(pendingb_lock, flags); | 
|  | return ret; | 
|  | } | 
|  | EXPORT_SYMBOL(queue_delayed_work_on); | 
|  | @@ -1600,7 +1614,7 @@ bool mod_delayed_work_on(int cpu, struct | 
|  |  | 
|  | if (likely(ret >= 0)) { | 
|  | __queue_delayed_work(cpu, wq, dwork, delay); | 
|  | -		local_irq_restore(flags); | 
|  | +		local_unlock_irqrestore(pendingb_lock, flags); | 
|  | } | 
|  |  | 
|  | /* -ENOENT from try_to_grab_pending() becomes %true */ | 
|  | @@ -2937,7 +2951,7 @@ static bool __cancel_work_timer(struct w | 
|  |  | 
|  | /* tell other tasks trying to grab @work to back off */ | 
|  | mark_work_canceling(work); | 
|  | -	local_irq_restore(flags); | 
|  | +	local_unlock_irqrestore(pendingb_lock, flags); | 
|  |  | 
|  | /* | 
|  | * This allows canceling during early boot.  We know that @work | 
|  | @@ -2998,10 +3012,10 @@ EXPORT_SYMBOL_GPL(cancel_work_sync); | 
|  | */ | 
|  | bool flush_delayed_work(struct delayed_work *dwork) | 
|  | { | 
|  | -	local_irq_disable(); | 
|  | +	local_lock_irq(pendingb_lock); | 
|  | if (del_timer_sync(&dwork->timer)) | 
|  | __queue_work(dwork->cpu, dwork->wq, &dwork->work); | 
|  | -	local_irq_enable(); | 
|  | +	local_unlock_irq(pendingb_lock); | 
|  | return flush_work(&dwork->work); | 
|  | } | 
|  | EXPORT_SYMBOL(flush_delayed_work); | 
|  | @@ -3019,7 +3033,7 @@ static bool __cancel_work(struct work_st | 
|  | return false; | 
|  |  | 
|  | set_work_pool_and_clear_pending(work, get_work_pool_id(work)); | 
|  | -	local_irq_restore(flags); | 
|  | +	local_unlock_irqrestore(pendingb_lock, flags); | 
|  | return ret; | 
|  | } | 
|  |  |