| From 87fb7d0cb7f8eff414a58846c364b5ade6311d1f Mon Sep 17 00:00:00 2001 |
| From: "J. Bruce Fields" <bfields@redhat.com> |
| Date: Fri, 17 Aug 2012 17:31:53 -0400 |
| Subject: [PATCH] svcrpc: fix svc_xprt_enqueue/svc_recv busy-looping |
| |
| commit d10f27a750312ed5638c876e4bd6aa83664cccd8 upstream. |
| |
| The rpc server tries to ensure that there will be room to send a reply |
| before it receives a request. |
| |
| It does this by tracking, in xpt_reserved, an upper bound on the total |
| size of the replies that is has already committed to for the socket. |
| |
| Currently it is adding in the estimate for a new reply *before* it |
| checks whether there is space available. If it finds that there is not |
| space, it then subtracts the estimate back out. |
| |
| This may lead the subsequent svc_xprt_enqueue to decide that there is |
| space after all. |
| |
| The results is a svc_recv() that will repeatedly return -EAGAIN, causing |
| server threads to loop without doing any actual work. |
| |
| Reported-by: Michael Tokarev <mjt@tls.msk.ru> |
| Tested-by: Michael Tokarev <mjt@tls.msk.ru> |
| Signed-off-by: J. Bruce Fields <bfields@redhat.com> |
| Signed-off-by: Paul Gortmaker <paul.gortmaker@windriver.com> |
| --- |
| net/sunrpc/svc_xprt.c | 7 ++----- |
| 1 file changed, 2 insertions(+), 5 deletions(-) |
| |
| diff --git a/net/sunrpc/svc_xprt.c b/net/sunrpc/svc_xprt.c |
| index 957a7e88e827..afa0bceb67ad 100644 |
| --- a/net/sunrpc/svc_xprt.c |
| +++ b/net/sunrpc/svc_xprt.c |
| @@ -310,7 +310,6 @@ static void svc_thread_dequeue(struct svc_pool *pool, struct svc_rqst *rqstp) |
| */ |
| void svc_xprt_enqueue(struct svc_xprt *xprt) |
| { |
| - struct svc_serv *serv = xprt->xpt_server; |
| struct svc_pool *pool; |
| struct svc_rqst *rqstp; |
| int cpu; |
| @@ -384,8 +383,6 @@ void svc_xprt_enqueue(struct svc_xprt *xprt) |
| rqstp, rqstp->rq_xprt); |
| rqstp->rq_xprt = xprt; |
| svc_xprt_get(xprt); |
| - rqstp->rq_reserved = serv->sv_max_mesg; |
| - atomic_add(rqstp->rq_reserved, &xprt->xpt_reserved); |
| pool->sp_stats.threads_woken++; |
| BUG_ON(xprt->xpt_pool != pool); |
| wake_up(&rqstp->rq_wait); |
| @@ -663,8 +660,6 @@ int svc_recv(struct svc_rqst *rqstp, long timeout) |
| if (xprt) { |
| rqstp->rq_xprt = xprt; |
| svc_xprt_get(xprt); |
| - rqstp->rq_reserved = serv->sv_max_mesg; |
| - atomic_add(rqstp->rq_reserved, &xprt->xpt_reserved); |
| } else { |
| /* No data pending. Go to sleep */ |
| svc_thread_enqueue(pool, rqstp); |
| @@ -754,6 +749,8 @@ int svc_recv(struct svc_rqst *rqstp, long timeout) |
| } else |
| len = xprt->xpt_ops->xpo_recvfrom(rqstp); |
| dprintk("svc: got len=%d\n", len); |
| + rqstp->rq_reserved = serv->sv_max_mesg; |
| + atomic_add(rqstp->rq_reserved, &xprt->xpt_reserved); |
| } |
| |
| /* No data, incomplete (TCP) read, or accept() */ |
| -- |
| 1.8.5.2 |
| |