| Subject: completion: Use simple wait queues |
| From: Thomas Gleixner <tglx@linutronix.de> |
| Date: Fri, 11 Jan 2013 11:23:51 +0100 |
| |
| Completions have no long lasting callbacks and therefor do not need |
| the complex waitqueue variant. Use simple waitqueues which reduces the |
| contention on the waitqueue lock. |
| |
| Signed-off-by: Thomas Gleixner <tglx@linutronix.de> |
| --- |
| drivers/net/wireless/intersil/orinoco/orinoco_usb.c | 2 - |
| drivers/usb/gadget/function/f_fs.c | 2 - |
| drivers/usb/gadget/legacy/inode.c | 4 +- |
| include/linux/completion.h | 9 ++--- |
| include/linux/suspend.h | 6 +++ |
| include/linux/swait.h | 1 |
| kernel/power/hibernate.c | 7 ++++ |
| kernel/power/suspend.c | 5 +++ |
| kernel/sched/completion.c | 32 ++++++++++---------- |
| kernel/sched/core.c | 10 +++++- |
| kernel/sched/swait.c | 20 ++++++++++++ |
| 11 files changed, 71 insertions(+), 27 deletions(-) |
| |
| --- a/drivers/net/wireless/intersil/orinoco/orinoco_usb.c |
| +++ b/drivers/net/wireless/intersil/orinoco/orinoco_usb.c |
| @@ -696,7 +696,7 @@ static void ezusb_req_ctx_wait(struct ez |
| while (!ctx->done.done && msecs--) |
| udelay(1000); |
| } else { |
| - wait_event_interruptible(ctx->done.wait, |
| + swait_event_interruptible(ctx->done.wait, |
| ctx->done.done); |
| } |
| break; |
| --- a/drivers/usb/gadget/function/f_fs.c |
| +++ b/drivers/usb/gadget/function/f_fs.c |
| @@ -1594,7 +1594,7 @@ static void ffs_data_put(struct ffs_data |
| pr_info("%s(): freeing\n", __func__); |
| ffs_data_clear(ffs); |
| BUG_ON(waitqueue_active(&ffs->ev.waitq) || |
| - waitqueue_active(&ffs->ep0req_completion.wait)); |
| + swait_active(&ffs->ep0req_completion.wait)); |
| kfree(ffs->dev_name); |
| kfree(ffs); |
| } |
| --- a/drivers/usb/gadget/legacy/inode.c |
| +++ b/drivers/usb/gadget/legacy/inode.c |
| @@ -345,7 +345,7 @@ ep_io (struct ep_data *epdata, void *buf |
| spin_unlock_irq (&epdata->dev->lock); |
| |
| if (likely (value == 0)) { |
| - value = wait_event_interruptible (done.wait, done.done); |
| + value = swait_event_interruptible (done.wait, done.done); |
| if (value != 0) { |
| spin_lock_irq (&epdata->dev->lock); |
| if (likely (epdata->ep != NULL)) { |
| @@ -354,7 +354,7 @@ ep_io (struct ep_data *epdata, void *buf |
| usb_ep_dequeue (epdata->ep, epdata->req); |
| spin_unlock_irq (&epdata->dev->lock); |
| |
| - wait_event (done.wait, done.done); |
| + swait_event (done.wait, done.done); |
| if (epdata->status == -ECONNRESET) |
| epdata->status = -EINTR; |
| } else { |
| --- a/include/linux/completion.h |
| +++ b/include/linux/completion.h |
| @@ -7,8 +7,7 @@ |
| * Atomic wait-for-completion handler data structures. |
| * See kernel/sched/completion.c for details. |
| */ |
| - |
| -#include <linux/wait.h> |
| +#include <linux/swait.h> |
| |
| /* |
| * struct completion - structure used to maintain state for a "completion" |
| @@ -24,11 +23,11 @@ |
| */ |
| struct completion { |
| unsigned int done; |
| - wait_queue_head_t wait; |
| + struct swait_queue_head wait; |
| }; |
| |
| #define COMPLETION_INITIALIZER(work) \ |
| - { 0, __WAIT_QUEUE_HEAD_INITIALIZER((work).wait) } |
| + { 0, __SWAIT_QUEUE_HEAD_INITIALIZER((work).wait) } |
| |
| #define COMPLETION_INITIALIZER_ONSTACK(work) \ |
| ({ init_completion(&work); work; }) |
| @@ -73,7 +72,7 @@ struct completion { |
| static inline void init_completion(struct completion *x) |
| { |
| x->done = 0; |
| - init_waitqueue_head(&x->wait); |
| + init_swait_queue_head(&x->wait); |
| } |
| |
| /** |
| --- a/include/linux/suspend.h |
| +++ b/include/linux/suspend.h |
| @@ -193,6 +193,12 @@ struct platform_freeze_ops { |
| void (*end)(void); |
| }; |
| |
| +#if defined(CONFIG_SUSPEND) || defined(CONFIG_HIBERNATION) |
| +extern bool pm_in_action; |
| +#else |
| +# define pm_in_action false |
| +#endif |
| + |
| #ifdef CONFIG_SUSPEND |
| /** |
| * suspend_set_ops - set platform dependent suspend operations |
| --- a/include/linux/swait.h |
| +++ b/include/linux/swait.h |
| @@ -87,6 +87,7 @@ static inline int swait_active(struct sw |
| extern void swake_up(struct swait_queue_head *q); |
| extern void swake_up_all(struct swait_queue_head *q); |
| extern void swake_up_locked(struct swait_queue_head *q); |
| +extern void swake_up_all_locked(struct swait_queue_head *q); |
| |
| extern void __prepare_to_swait(struct swait_queue_head *q, struct swait_queue *wait); |
| extern void prepare_to_swait(struct swait_queue_head *q, struct swait_queue *wait, int state); |
| --- a/kernel/power/hibernate.c |
| +++ b/kernel/power/hibernate.c |
| @@ -679,6 +679,10 @@ static int load_image_and_restore(void) |
| return error; |
| } |
| |
| +#ifndef CONFIG_SUSPEND |
| +bool pm_in_action; |
| +#endif |
| + |
| /** |
| * hibernate - Carry out system hibernation, including saving the image. |
| */ |
| @@ -692,6 +696,8 @@ int hibernate(void) |
| return -EPERM; |
| } |
| |
| + pm_in_action = true; |
| + |
| lock_system_sleep(); |
| /* The snapshot device should not be opened while we're running */ |
| if (!atomic_add_unless(&snapshot_device_available, -1, 0)) { |
| @@ -769,6 +775,7 @@ int hibernate(void) |
| atomic_inc(&snapshot_device_available); |
| Unlock: |
| unlock_system_sleep(); |
| + pm_in_action = false; |
| return error; |
| } |
| |
| --- a/kernel/power/suspend.c |
| +++ b/kernel/power/suspend.c |
| @@ -546,6 +546,8 @@ static int enter_state(suspend_state_t s |
| return error; |
| } |
| |
| +bool pm_in_action; |
| + |
| /** |
| * pm_suspend - Externally visible function for suspending the system. |
| * @state: System sleep state to enter. |
| @@ -560,6 +562,8 @@ int pm_suspend(suspend_state_t state) |
| if (state <= PM_SUSPEND_ON || state >= PM_SUSPEND_MAX) |
| return -EINVAL; |
| |
| + pm_in_action = true; |
| + |
| error = enter_state(state); |
| if (error) { |
| suspend_stats.fail++; |
| @@ -567,6 +571,7 @@ int pm_suspend(suspend_state_t state) |
| } else { |
| suspend_stats.success++; |
| } |
| + pm_in_action = false; |
| return error; |
| } |
| EXPORT_SYMBOL(pm_suspend); |
| --- a/kernel/sched/completion.c |
| +++ b/kernel/sched/completion.c |
| @@ -31,11 +31,11 @@ void complete(struct completion *x) |
| { |
| unsigned long flags; |
| |
| - spin_lock_irqsave(&x->wait.lock, flags); |
| + raw_spin_lock_irqsave(&x->wait.lock, flags); |
| if (x->done != UINT_MAX) |
| x->done++; |
| - __wake_up_locked(&x->wait, TASK_NORMAL, 1); |
| - spin_unlock_irqrestore(&x->wait.lock, flags); |
| + swake_up_locked(&x->wait); |
| + raw_spin_unlock_irqrestore(&x->wait.lock, flags); |
| } |
| EXPORT_SYMBOL(complete); |
| |
| @@ -52,10 +52,10 @@ void complete_all(struct completion *x) |
| { |
| unsigned long flags; |
| |
| - spin_lock_irqsave(&x->wait.lock, flags); |
| + raw_spin_lock_irqsave(&x->wait.lock, flags); |
| x->done = UINT_MAX; |
| - __wake_up_locked(&x->wait, TASK_NORMAL, 0); |
| - spin_unlock_irqrestore(&x->wait.lock, flags); |
| + swake_up_all_locked(&x->wait); |
| + raw_spin_unlock_irqrestore(&x->wait.lock, flags); |
| } |
| EXPORT_SYMBOL(complete_all); |
| |
| @@ -64,20 +64,20 @@ do_wait_for_common(struct completion *x, |
| long (*action)(long), long timeout, int state) |
| { |
| if (!x->done) { |
| - DECLARE_WAITQUEUE(wait, current); |
| + DECLARE_SWAITQUEUE(wait); |
| |
| - __add_wait_queue_tail_exclusive(&x->wait, &wait); |
| + __prepare_to_swait(&x->wait, &wait); |
| do { |
| if (signal_pending_state(state, current)) { |
| timeout = -ERESTARTSYS; |
| break; |
| } |
| __set_current_state(state); |
| - spin_unlock_irq(&x->wait.lock); |
| + raw_spin_unlock_irq(&x->wait.lock); |
| timeout = action(timeout); |
| - spin_lock_irq(&x->wait.lock); |
| + raw_spin_lock_irq(&x->wait.lock); |
| } while (!x->done && timeout); |
| - __remove_wait_queue(&x->wait, &wait); |
| + __finish_swait(&x->wait, &wait); |
| if (!x->done) |
| return timeout; |
| } |
| @@ -92,9 +92,9 @@ static inline long __sched |
| { |
| might_sleep(); |
| |
| - spin_lock_irq(&x->wait.lock); |
| + raw_spin_lock_irq(&x->wait.lock); |
| timeout = do_wait_for_common(x, action, timeout, state); |
| - spin_unlock_irq(&x->wait.lock); |
| + raw_spin_unlock_irq(&x->wait.lock); |
| return timeout; |
| } |
| |
| @@ -280,12 +280,12 @@ bool try_wait_for_completion(struct comp |
| if (!READ_ONCE(x->done)) |
| return 0; |
| |
| - spin_lock_irqsave(&x->wait.lock, flags); |
| + raw_spin_lock_irqsave(&x->wait.lock, flags); |
| if (!x->done) |
| ret = 0; |
| else if (x->done != UINT_MAX) |
| x->done--; |
| - spin_unlock_irqrestore(&x->wait.lock, flags); |
| + raw_spin_unlock_irqrestore(&x->wait.lock, flags); |
| return ret; |
| } |
| EXPORT_SYMBOL(try_wait_for_completion); |
| @@ -314,7 +314,7 @@ bool completion_done(struct completion * |
| * after it's acquired the lock. |
| */ |
| smp_rmb(); |
| - spin_unlock_wait(&x->wait.lock); |
| + raw_spin_unlock_wait(&x->wait.lock); |
| return true; |
| } |
| EXPORT_SYMBOL(completion_done); |
| --- a/kernel/sched/core.c |
| +++ b/kernel/sched/core.c |
| @@ -7527,7 +7527,10 @@ void migrate_disable(void) |
| return; |
| } |
| #ifdef CONFIG_SCHED_DEBUG |
| - WARN_ON_ONCE(p->migrate_disable_atomic); |
| + if (unlikely(p->migrate_disable_atomic)) { |
| + tracing_off(); |
| + WARN_ON_ONCE(1); |
| + } |
| #endif |
| |
| if (p->migrate_disable) { |
| @@ -7557,7 +7560,10 @@ void migrate_enable(void) |
| } |
| |
| #ifdef CONFIG_SCHED_DEBUG |
| - WARN_ON_ONCE(p->migrate_disable_atomic); |
| + if (unlikely(p->migrate_disable_atomic)) { |
| + tracing_off(); |
| + WARN_ON_ONCE(1); |
| + } |
| #endif |
| |
| WARN_ON_ONCE(p->migrate_disable <= 0); |
| --- a/kernel/sched/swait.c |
| +++ b/kernel/sched/swait.c |
| @@ -1,5 +1,6 @@ |
| #include <linux/sched/signal.h> |
| #include <linux/swait.h> |
| +#include <linux/suspend.h> |
| |
| void __init_swait_queue_head(struct swait_queue_head *q, const char *name, |
| struct lock_class_key *key) |
| @@ -29,6 +30,25 @@ void swake_up_locked(struct swait_queue_ |
| } |
| EXPORT_SYMBOL(swake_up_locked); |
| |
| +void swake_up_all_locked(struct swait_queue_head *q) |
| +{ |
| + struct swait_queue *curr; |
| + int wakes = 0; |
| + |
| + while (!list_empty(&q->task_list)) { |
| + |
| + curr = list_first_entry(&q->task_list, typeof(*curr), |
| + task_list); |
| + wake_up_process(curr->task); |
| + list_del_init(&curr->task_list); |
| + wakes++; |
| + } |
| + if (pm_in_action) |
| + return; |
| + WARN(wakes > 2, "complete_all() with %d waiters\n", wakes); |
| +} |
| +EXPORT_SYMBOL(swake_up_all_locked); |
| + |
| void swake_up(struct swait_queue_head *q) |
| { |
| unsigned long flags; |