blob: 4fddc6aba37ca9e1a91b921146502e7504637f75 [file] [log] [blame]
From c16cb4e2a4b1a487ca7feae5931dfb22ac495b76 Mon Sep 17 00:00:00 2001
From: Jens Axboe <axboe@kernel.dk>
Date: Tue, 12 Aug 2025 08:30:11 -0600
Subject: io_uring/net: commit partial buffers on retry
From: Jens Axboe <axboe@kernel.dk>
Commit 41b70df5b38bc80967d2e0ed55cc3c3896bba781 upstream.
Ring provided buffers are potentially only valid within the single
execution context in which they were acquired. io_uring deals with this
and invalidates them on retry. But on the networking side, if
MSG_WAITALL is set, or if the socket is of the streaming type and too
little was processed, then it will hang on to the buffer rather than
recycle or commit it. This is problematic for two reasons:
1) If someone unregisters the provided buffer ring before a later retry,
then the req->buf_list will no longer be valid.
2) If multiple sockers are using the same buffer group, then multiple
receives can consume the same memory. This can cause data corruption
in the application, as either receive could land in the same
userspace buffer.
Fix this by disallowing partial retries from pinning a provided buffer
across multiple executions, if ring provided buffers are used.
Cc: stable@vger.kernel.org
Reported-by: pt x <superman.xpt@gmail.com>
Fixes: c56e022c0a27 ("io_uring: add support for user mapped provided buffer ring")
Signed-off-by: Jens Axboe <axboe@kernel.dk>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
---
io_uring/net.c | 19 +++++++++++++------
1 file changed, 13 insertions(+), 6 deletions(-)
--- a/io_uring/net.c
+++ b/io_uring/net.c
@@ -351,6 +351,13 @@ static int io_setup_async_addr(struct io
return -EAGAIN;
}
+static void io_net_kbuf_recyle(struct io_kiocb *req)
+{
+ req->flags |= REQ_F_PARTIAL_IO;
+ if (req->flags & REQ_F_BUFFER_RING)
+ io_kbuf_recycle_ring(req);
+}
+
int io_sendmsg_prep_async(struct io_kiocb *req)
{
int ret;
@@ -442,7 +449,7 @@ int io_sendmsg(struct io_kiocb *req, uns
kmsg->msg.msg_controllen = 0;
kmsg->msg.msg_control = NULL;
sr->done_io += ret;
- req->flags |= REQ_F_PARTIAL_IO;
+ io_net_kbuf_recyle(req);
return io_setup_async_msg(req, kmsg, issue_flags);
}
if (ret == -ERESTARTSYS)
@@ -521,7 +528,7 @@ int io_send(struct io_kiocb *req, unsign
sr->len -= ret;
sr->buf += ret;
sr->done_io += ret;
- req->flags |= REQ_F_PARTIAL_IO;
+ io_net_kbuf_recyle(req);
return io_setup_async_addr(req, &__address, issue_flags);
}
if (ret == -ERESTARTSYS)
@@ -891,7 +898,7 @@ retry_multishot:
}
if (ret > 0 && io_net_retry(sock, flags)) {
sr->done_io += ret;
- req->flags |= REQ_F_PARTIAL_IO;
+ io_net_kbuf_recyle(req);
return io_setup_async_msg(req, kmsg, issue_flags);
}
if (ret == -ERESTARTSYS)
@@ -991,7 +998,7 @@ retry_multishot:
sr->len -= ret;
sr->buf += ret;
sr->done_io += ret;
- req->flags |= REQ_F_PARTIAL_IO;
+ io_net_kbuf_recyle(req);
return -EAGAIN;
}
if (ret == -ERESTARTSYS)
@@ -1235,7 +1242,7 @@ int io_send_zc(struct io_kiocb *req, uns
zc->len -= ret;
zc->buf += ret;
zc->done_io += ret;
- req->flags |= REQ_F_PARTIAL_IO;
+ io_net_kbuf_recyle(req);
return io_setup_async_addr(req, &__address, issue_flags);
}
if (ret == -ERESTARTSYS)
@@ -1306,7 +1313,7 @@ int io_sendmsg_zc(struct io_kiocb *req,
if (ret > 0 && io_net_retry(sock, flags)) {
sr->done_io += ret;
- req->flags |= REQ_F_PARTIAL_IO;
+ io_net_kbuf_recyle(req);
return io_setup_async_msg(req, kmsg, issue_flags);
}
if (ret == -ERESTARTSYS)