blob: 1dae01284b6fb83da4725b54867263e34b95ed0b [file] [log] [blame]
From d5039b9b4833a5e7d8cabb87ba6c54f9ade9b022 Mon Sep 17 00:00:00 2001
From: Josef Bacik <jbacik@fb.com>
Date: Thu, 19 Jan 2017 16:08:49 -0500
Subject: nbd: only set MSG_MORE when we have more to send
[ Upstream commit d61b7f972dab2a7d187c38254845546dfc8eed85 ]
A user noticed that write performance was horrible over loopback and we
traced it to an inversion of when we need to set MSG_MORE. It should be
set when we have more bvec's to send, not when we are on the last bvec.
This patch made the test go from 20 iops to 78k iops.
Signed-off-by: Josef Bacik <jbacik@fb.com>
Fixes: 429a787be679 ("nbd: fix use-after-free of rq/bio in the xmit path")
Signed-off-by: Jens Axboe <axboe@fb.com>
Signed-off-by: Sasha Levin <sashal@kernel.org>
---
drivers/block/nbd.c | 6 ++----
1 file changed, 2 insertions(+), 4 deletions(-)
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
index 4d30da269060..42a53956aefe 100644
--- a/drivers/block/nbd.c
+++ b/drivers/block/nbd.c
@@ -269,7 +269,7 @@ static inline int sock_send_bvec(struct nbd_device *nbd, struct bio_vec *bvec,
static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd)
{
struct request *req = blk_mq_rq_from_pdu(cmd);
- int result, flags;
+ int result;
struct nbd_request request;
unsigned long size = blk_rq_bytes(req);
struct bio *bio;
@@ -309,7 +309,6 @@ static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd)
if (type != NBD_CMD_WRITE)
return 0;
- flags = 0;
bio = req->bio;
while (bio) {
struct bio *next = bio->bi_next;
@@ -318,9 +317,8 @@ static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd)
bio_for_each_segment(bvec, bio, iter) {
bool is_last = !next && bio_iter_last(bvec, iter);
+ int flags = is_last ? 0 : MSG_MORE;
- if (is_last)
- flags = MSG_MORE;
dev_dbg(nbd_to_dev(nbd), "request %p: sending %d bytes data\n",
cmd, bvec.bv_len);
result = sock_send_bvec(nbd, &bvec, flags);
--
2.17.1