| From 9f43f31b479282293b797c2b8d64b750fec8455a Mon Sep 17 00:00:00 2001 |
| From: Jens Axboe <axboe@kernel.dk> |
| Date: Wed, 4 Dec 2019 08:53:43 -0700 |
| Subject: [PATCH] io_uring: ensure req->submit is copied when req is deferred |
| MIME-Version: 1.0 |
| Content-Type: text/plain; charset=UTF-8 |
| Content-Transfer-Encoding: 8bit |
| |
| There's an issue with deferred requests through drain, where if we do |
| need to defer, we're not copying over the sqe_submit state correctly. |
| This can result in using uninitialized data when we then later go and |
| submit the deferred request, like this check in __io_submit_sqe(): |
| |
| if (unlikely(s->index >= ctx->sq_entries)) |
| return -EINVAL; |
| |
| with 's' being uninitialized, we can randomly fail this check. Fix this |
| by copying sqe_submit state when we defer a request. |
| |
| Because it was fixed as part of a cleanup series in mainline, before |
| anyone realized we had this issue. That removed the separate states |
| of ->index vs ->submit.sqe. That series is not something I was |
| comfortable putting into stable, hence the much simpler addition. |
| Here's the patch in the series that fixes the same issue: |
| |
| commit cf6fd4bd559ee61a4454b161863c8de6f30f8dca |
| Author: Pavel Begunkov <asml.silence@gmail.com> |
| Date: Mon Nov 25 23:14:39 2019 +0300 |
| |
| io_uring: inline struct sqe_submit |
| |
| Reported-by: Andres Freund <andres@anarazel.de> |
| Reported-by: Tomáš Chaloupka |
| Signed-off-by: Jens Axboe <axboe@kernel.dk> |
| Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org> |
| Signed-off-by: Paul Gortmaker <paul.gortmaker@windriver.com> |
| |
| diff --git a/fs/io_uring.c b/fs/io_uring.c |
| index f9104032ff2f..abfbc55e8782 100644 |
| --- a/fs/io_uring.c |
| +++ b/fs/io_uring.c |
| @@ -1620,7 +1620,7 @@ static int io_poll_add(struct io_kiocb *req, const struct io_uring_sqe *sqe) |
| } |
| |
| static int io_req_defer(struct io_ring_ctx *ctx, struct io_kiocb *req, |
| - const struct io_uring_sqe *sqe) |
| + struct sqe_submit *s) |
| { |
| struct io_uring_sqe *sqe_copy; |
| |
| @@ -1638,7 +1638,8 @@ static int io_req_defer(struct io_ring_ctx *ctx, struct io_kiocb *req, |
| return 0; |
| } |
| |
| - memcpy(sqe_copy, sqe, sizeof(*sqe_copy)); |
| + memcpy(&req->submit, s, sizeof(*s)); |
| + memcpy(sqe_copy, s->sqe, sizeof(*sqe_copy)); |
| req->submit.sqe = sqe_copy; |
| |
| INIT_WORK(&req->work, io_sq_wq_submit_work); |
| @@ -1953,7 +1954,7 @@ static int io_submit_sqe(struct io_ring_ctx *ctx, struct sqe_submit *s, |
| if (unlikely(ret)) |
| goto out; |
| |
| - ret = io_req_defer(ctx, req, s->sqe); |
| + ret = io_req_defer(ctx, req, s); |
| if (ret) { |
| if (ret == -EIOCBQUEUED) |
| ret = 0; |
| -- |
| 2.7.4 |
| |