| Subject: workqueue: Use normal rcu |
| From: Thomas Gleixner <tglx@linutronix.de> |
| Date: Wed, 24 Jul 2013 15:26:54 +0200 |
| |
| There is no need for sched_rcu. The undocumented reason why sched_rcu |
| is used is to avoid a few explicit rcu_read_lock()/unlock() pairs by |
| abusing the fact that sched_rcu reader side critical sections are also |
| protected by preempt or irq disabled regions. |
| |
| Signed-off-by: Thomas Gleixner <tglx@linutronix.de> |
| --- |
| kernel/workqueue.c | 85 +++++++++++++++++++++++++++++------------------------ |
| 1 file changed, 47 insertions(+), 38 deletions(-) |
| |
| --- a/kernel/workqueue.c |
| +++ b/kernel/workqueue.c |
| @@ -128,11 +128,11 @@ enum { |
| * |
| * PL: wq_pool_mutex protected. |
| * |
| - * PR: wq_pool_mutex protected for writes. Sched-RCU protected for reads. |
| + * PR: wq_pool_mutex protected for writes. RCU protected for reads. |
| * |
| * WQ: wq->mutex protected. |
| * |
| - * WR: wq->mutex protected for writes. Sched-RCU protected for reads. |
| + * WR: wq->mutex protected for writes. RCU protected for reads. |
| * |
| * MD: wq_mayday_lock protected. |
| */ |
| @@ -177,7 +177,7 @@ struct worker_pool { |
| atomic_t nr_running ____cacheline_aligned_in_smp; |
| |
| /* |
| - * Destruction of pool is sched-RCU protected to allow dereferences |
| + * Destruction of pool is RCU protected to allow dereferences |
| * from get_work_pool(). |
| */ |
| struct rcu_head rcu; |
| @@ -206,7 +206,7 @@ struct pool_workqueue { |
| /* |
| * Release of unbound pwq is punted to system_wq. See put_pwq() |
| * and pwq_unbound_release_workfn() for details. pool_workqueue |
| - * itself is also sched-RCU protected so that the first pwq can be |
| + * itself is also RCU protected so that the first pwq can be |
| * determined without grabbing wq->mutex. |
| */ |
| struct work_struct unbound_release_work; |
| @@ -314,14 +314,14 @@ static void copy_workqueue_attrs(struct |
| #include <trace/events/workqueue.h> |
| |
| #define assert_rcu_or_pool_mutex() \ |
| - rcu_lockdep_assert(rcu_read_lock_sched_held() || \ |
| + rcu_lockdep_assert(rcu_read_lock_held() || \ |
| lockdep_is_held(&wq_pool_mutex), \ |
| - "sched RCU or wq_pool_mutex should be held") |
| + "RCU or wq_pool_mutex should be held") |
| |
| #define assert_rcu_or_wq_mutex(wq) \ |
| - rcu_lockdep_assert(rcu_read_lock_sched_held() || \ |
| + rcu_lockdep_assert(rcu_read_lock_held() || \ |
| lockdep_is_held(&wq->mutex), \ |
| - "sched RCU or wq->mutex should be held") |
| + "RCU or wq->mutex should be held") |
| |
| #ifdef CONFIG_LOCKDEP |
| #define assert_manager_or_pool_lock(pool) \ |
| @@ -343,7 +343,7 @@ static void copy_workqueue_attrs(struct |
| * @pool: iteration cursor |
| * @pi: integer used for iteration |
| * |
| - * This must be called either with wq_pool_mutex held or sched RCU read |
| + * This must be called either with wq_pool_mutex held or RCU read |
| * locked. If the pool needs to be used beyond the locking in effect, the |
| * caller is responsible for guaranteeing that the pool stays online. |
| * |
| @@ -376,7 +376,7 @@ static void copy_workqueue_attrs(struct |
| * @pwq: iteration cursor |
| * @wq: the target workqueue |
| * |
| - * This must be called either with wq->mutex held or sched RCU read locked. |
| + * This must be called either with wq->mutex held or RCU read locked. |
| * If the pwq needs to be used beyond the locking in effect, the caller is |
| * responsible for guaranteeing that the pwq stays online. |
| * |
| @@ -524,7 +524,7 @@ static int worker_pool_assign_id(struct |
| * @wq: the target workqueue |
| * @node: the node ID |
| * |
| - * This must be called either with pwq_lock held or sched RCU read locked. |
| + * This must be called either with pwq_lock held or RCU read locked. |
| * If the pwq needs to be used beyond the locking in effect, the caller is |
| * responsible for guaranteeing that the pwq stays online. |
| */ |
| @@ -628,8 +628,8 @@ static struct pool_workqueue *get_work_p |
| * Return the worker_pool @work was last associated with. %NULL if none. |
| * |
| * Pools are created and destroyed under wq_pool_mutex, and allows read |
| - * access under sched-RCU read lock. As such, this function should be |
| - * called under wq_pool_mutex or with preemption disabled. |
| + * access under RCU read lock. As such, this function should be |
| + * called under wq_pool_mutex or inside of a rcu_read_lock() region. |
| * |
| * All fields of the returned pool are accessible as long as the above |
| * mentioned locking is in effect. If the returned pool needs to be used |
| @@ -1060,7 +1060,7 @@ static void put_pwq_unlocked(struct pool |
| { |
| if (pwq) { |
| /* |
| - * As both pwqs and pools are sched-RCU protected, the |
| + * As both pwqs and pools are RCU protected, the |
| * following lock operations are safe. |
| */ |
| spin_lock_irq(&pwq->pool->lock); |
| @@ -1184,6 +1184,7 @@ static int try_to_grab_pending(struct wo |
| if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) |
| return 0; |
| |
| + rcu_read_lock(); |
| /* |
| * The queueing is in progress, or it is already queued. Try to |
| * steal it from ->worklist without clearing WORK_STRUCT_PENDING. |
| @@ -1222,10 +1223,12 @@ static int try_to_grab_pending(struct wo |
| set_work_pool_and_keep_pending(work, pool->id); |
| |
| spin_unlock(&pool->lock); |
| + rcu_read_unlock(); |
| return 1; |
| } |
| spin_unlock(&pool->lock); |
| fail: |
| + rcu_read_unlock(); |
| local_irq_restore(*flags); |
| if (work_is_canceling(work)) |
| return -ENOENT; |
| @@ -1306,6 +1309,8 @@ static void __queue_work(int cpu, struct |
| if (unlikely(wq->flags & __WQ_DRAINING) && |
| WARN_ON_ONCE(!is_chained_work(wq))) |
| return; |
| + |
| + rcu_read_lock(); |
| retry: |
| if (req_cpu == WORK_CPU_UNBOUND) |
| cpu = raw_smp_processor_id(); |
| @@ -1362,10 +1367,8 @@ retry: |
| /* pwq determined, queue */ |
| trace_workqueue_queue_work(req_cpu, pwq, work); |
| |
| - if (WARN_ON(!list_empty(&work->entry))) { |
| - spin_unlock(&pwq->pool->lock); |
| - return; |
| - } |
| + if (WARN_ON(!list_empty(&work->entry))) |
| + goto out; |
| |
| pwq->nr_in_flight[pwq->work_color]++; |
| work_flags = work_color_to_flags(pwq->work_color); |
| @@ -1381,7 +1384,9 @@ retry: |
| |
| insert_work(pwq, work, worklist, work_flags); |
| |
| +out: |
| spin_unlock(&pwq->pool->lock); |
| + rcu_read_unlock(); |
| } |
| |
| /** |
| @@ -2763,14 +2768,14 @@ static bool start_flush_work(struct work |
| |
| might_sleep(); |
| |
| - local_irq_disable(); |
| + rcu_read_lock(); |
| pool = get_work_pool(work); |
| if (!pool) { |
| - local_irq_enable(); |
| + rcu_read_unlock(); |
| return false; |
| } |
| |
| - spin_lock(&pool->lock); |
| + spin_lock_irq(&pool->lock); |
| /* see the comment in try_to_grab_pending() with the same code */ |
| pwq = get_work_pwq(work); |
| if (pwq) { |
| @@ -2797,10 +2802,11 @@ static bool start_flush_work(struct work |
| else |
| lock_map_acquire_read(&pwq->wq->lockdep_map); |
| lock_map_release(&pwq->wq->lockdep_map); |
| - |
| + rcu_read_unlock(); |
| return true; |
| already_gone: |
| spin_unlock_irq(&pool->lock); |
| + rcu_read_unlock(); |
| return false; |
| } |
| |
| @@ -3111,7 +3117,8 @@ static ssize_t wq_pool_ids_show(struct d |
| const char *delim = ""; |
| int node, written = 0; |
| |
| - rcu_read_lock_sched(); |
| + get_online_cpus(); |
| + rcu_read_lock(); |
| for_each_node(node) { |
| written += scnprintf(buf + written, PAGE_SIZE - written, |
| "%s%d:%d", delim, node, |
| @@ -3119,7 +3126,8 @@ static ssize_t wq_pool_ids_show(struct d |
| delim = " "; |
| } |
| written += scnprintf(buf + written, PAGE_SIZE - written, "\n"); |
| - rcu_read_unlock_sched(); |
| + rcu_read_unlock(); |
| + put_online_cpus(); |
| |
| return written; |
| } |
| @@ -3482,7 +3490,7 @@ static void rcu_free_pool(struct rcu_hea |
| * put_unbound_pool - put a worker_pool |
| * @pool: worker_pool to put |
| * |
| - * Put @pool. If its refcnt reaches zero, it gets destroyed in sched-RCU |
| + * Put @pool. If its refcnt reaches zero, it gets destroyed in RCU |
| * safe manner. get_unbound_pool() calls this function on its failure path |
| * and this function should be able to release pools which went through, |
| * successfully or not, init_worker_pool(). |
| @@ -3529,8 +3537,8 @@ static void put_unbound_pool(struct work |
| del_timer_sync(&pool->idle_timer); |
| del_timer_sync(&pool->mayday_timer); |
| |
| - /* sched-RCU protected to allow dereferences from get_work_pool() */ |
| - call_rcu_sched(&pool->rcu, rcu_free_pool); |
| + /* RCU protected to allow dereferences from get_work_pool() */ |
| + call_rcu(&pool->rcu, rcu_free_pool); |
| } |
| |
| /** |
| @@ -3640,7 +3648,7 @@ static void pwq_unbound_release_workfn(s |
| put_unbound_pool(pool); |
| mutex_unlock(&wq_pool_mutex); |
| |
| - call_rcu_sched(&pwq->rcu, rcu_free_pwq); |
| + call_rcu(&pwq->rcu, rcu_free_pwq); |
| |
| /* |
| * If we're the last pwq going away, @wq is already dead and no one |
| @@ -4338,7 +4346,8 @@ bool workqueue_congested(int cpu, struct |
| struct pool_workqueue *pwq; |
| bool ret; |
| |
| - rcu_read_lock_sched(); |
| + rcu_read_lock(); |
| + preempt_disable(); |
| |
| if (cpu == WORK_CPU_UNBOUND) |
| cpu = smp_processor_id(); |
| @@ -4349,7 +4358,8 @@ bool workqueue_congested(int cpu, struct |
| pwq = unbound_pwq_by_node(wq, cpu_to_node(cpu)); |
| |
| ret = !list_empty(&pwq->delayed_works); |
| - rcu_read_unlock_sched(); |
| + preempt_enable(); |
| + rcu_read_unlock(); |
| |
| return ret; |
| } |
| @@ -4375,16 +4385,15 @@ unsigned int work_busy(struct work_struc |
| if (work_pending(work)) |
| ret |= WORK_BUSY_PENDING; |
| |
| - local_irq_save(flags); |
| + rcu_read_lock() |
| pool = get_work_pool(work); |
| if (pool) { |
| - spin_lock(&pool->lock); |
| + spin_lock_irqsave(&pool->lock, flags); |
| if (find_worker_executing_work(pool, work)) |
| ret |= WORK_BUSY_RUNNING; |
| - spin_unlock(&pool->lock); |
| + spin_unlock_irqrestore(&pool->lock, flags); |
| } |
| - local_irq_restore(flags); |
| - |
| + rcu_read_unlock(); |
| return ret; |
| } |
| EXPORT_SYMBOL_GPL(work_busy); |
| @@ -4829,16 +4838,16 @@ bool freeze_workqueues_busy(void) |
| * nr_active is monotonically decreasing. It's safe |
| * to peek without lock. |
| */ |
| - rcu_read_lock_sched(); |
| + rcu_read_lock(); |
| for_each_pwq(pwq, wq) { |
| WARN_ON_ONCE(pwq->nr_active < 0); |
| if (pwq->nr_active) { |
| busy = true; |
| - rcu_read_unlock_sched(); |
| + rcu_read_unlock(); |
| goto out_unlock; |
| } |
| } |
| - rcu_read_unlock_sched(); |
| + rcu_read_unlock(); |
| } |
| out_unlock: |
| mutex_unlock(&wq_pool_mutex); |