| From 2aca31e23f90adcb6e54cff43260298d06c5ae0b Mon Sep 17 00:00:00 2001 |
| From: Roman Penyaev <rpenyaev@suse.de> |
| Date: Sat, 21 Mar 2020 18:22:30 -0700 |
| Subject: [PATCH] epoll: fix possible lost wakeup on epoll_ctl() path |
| |
| commit 1b53734bd0b2feed8e7761771b2e76fc9126ea0c upstream. |
| |
| This fixes possible lost wakeup introduced by commit a218cc491420. |
| Originally modifications to ep->wq were serialized by ep->wq.lock, but |
| in commit a218cc491420 ("epoll: use rwlock in order to reduce |
| ep_poll_callback() contention") a new rw lock was introduced in order to |
| relax fd event path, i.e. callers of ep_poll_callback() function. |
| |
| After the change ep_modify and ep_insert (both are called on epoll_ctl() |
| path) were switched to ep->lock, but ep_poll (epoll_wait) was using |
| ep->wq.lock on wqueue list modification. |
| |
| The bug doesn't lead to any wqueue list corruptions, because wake up |
| path and list modifications were serialized by ep->wq.lock internally, |
| but actual waitqueue_active() check prior wake_up() call can be |
| reordered with modifications of ep ready list, thus wake up can be lost. |
| |
| And yes, can be healed by explicit smp_mb(): |
| |
| list_add_tail(&epi->rdlink, &ep->rdllist); |
| smp_mb(); |
| if (waitqueue_active(&ep->wq)) |
| wake_up(&ep->wp); |
| |
| But let's make it simple, thus current patch replaces ep->wq.lock with |
| the ep->lock for wqueue modifications, thus wake up path always observes |
| activeness of the wqueue correcty. |
| |
| Fixes: a218cc491420 ("epoll: use rwlock in order to reduce ep_poll_callback() contention") |
| Reported-by: Max Neunhoeffer <max@arangodb.com> |
| Signed-off-by: Roman Penyaev <rpenyaev@suse.de> |
| Signed-off-by: Andrew Morton <akpm@linux-foundation.org> |
| Tested-by: Max Neunhoeffer <max@arangodb.com> |
| Cc: Jakub Kicinski <kuba@kernel.org> |
| Cc: Christopher Kohlhoff <chris.kohlhoff@clearpool.io> |
| Cc: Davidlohr Bueso <dbueso@suse.de> |
| Cc: Jason Baron <jbaron@akamai.com> |
| Cc: Jes Sorensen <jes.sorensen@gmail.com> |
| Cc: <stable@vger.kernel.org> [5.1+] |
| Link: http://lkml.kernel.org/r/20200214170211.561524-1-rpenyaev@suse.de |
| References: https://bugzilla.kernel.org/show_bug.cgi?id=205933 |
| Bisected-by: Max Neunhoeffer <max@arangodb.com> |
| Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org> |
| Signed-off-by: Paul Gortmaker <paul.gortmaker@windriver.com> |
| |
| diff --git a/fs/eventpoll.c b/fs/eventpoll.c |
| index 0f9c073d78d5..abe19510cd58 100644 |
| --- a/fs/eventpoll.c |
| +++ b/fs/eventpoll.c |
| @@ -1881,9 +1881,9 @@ static int ep_poll(struct eventpoll *ep, struct epoll_event __user *events, |
| waiter = true; |
| init_waitqueue_entry(&wait, current); |
| |
| - spin_lock_irq(&ep->wq.lock); |
| + write_lock_irq(&ep->lock); |
| __add_wait_queue_exclusive(&ep->wq, &wait); |
| - spin_unlock_irq(&ep->wq.lock); |
| + write_unlock_irq(&ep->lock); |
| } |
| |
| for (;;) { |
| @@ -1931,9 +1931,9 @@ static int ep_poll(struct eventpoll *ep, struct epoll_event __user *events, |
| goto fetch_events; |
| |
| if (waiter) { |
| - spin_lock_irq(&ep->wq.lock); |
| + write_lock_irq(&ep->lock); |
| __remove_wait_queue(&ep->wq, &wait); |
| - spin_unlock_irq(&ep->wq.lock); |
| + write_unlock_irq(&ep->lock); |
| } |
| |
| return res; |
| -- |
| 2.7.4 |
| |