| From 6d816e088c359866f9867057e04f244c608c42fe Mon Sep 17 00:00:00 2001 |
| From: Jens Axboe <axboe@kernel.dk> |
| Date: Tue, 11 Aug 2020 08:04:14 -0600 |
| Subject: io_uring: hold 'ctx' reference around task_work queue + execute |
| |
| From: Jens Axboe <axboe@kernel.dk> |
| |
| commit 6d816e088c359866f9867057e04f244c608c42fe upstream. |
| |
| We're holding the request reference, but we need to go one higher |
| to ensure that the ctx remains valid after the request has finished. |
| If the ring is closed with pending task_work inflight, and the |
| given io_kiocb finishes sync during issue, then we need a reference |
| to the ring itself around the task_work execution cycle. |
| |
| Cc: stable@vger.kernel.org # v5.7+ |
| Reported-by: syzbot+9b260fc33297966f5a8e@syzkaller.appspotmail.com |
| Signed-off-by: Jens Axboe <axboe@kernel.dk> |
| Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org> |
| |
| |
| --- |
| fs/io_uring.c | 6 ++++++ |
| 1 file changed, 6 insertions(+) |
| |
| --- a/fs/io_uring.c |
| +++ b/fs/io_uring.c |
| @@ -4214,6 +4214,8 @@ static int __io_async_wake(struct io_kio |
| tsk = req->task; |
| req->result = mask; |
| init_task_work(&req->task_work, func); |
| + percpu_ref_get(&req->ctx->refs); |
| + |
| /* |
| * If this fails, then the task is exiting. When a task exits, the |
| * work gets canceled, so just cancel this request as well instead |
| @@ -4313,6 +4315,7 @@ static void io_poll_task_handler(struct |
| static void io_poll_task_func(struct callback_head *cb) |
| { |
| struct io_kiocb *req = container_of(cb, struct io_kiocb, task_work); |
| + struct io_ring_ctx *ctx = req->ctx; |
| struct io_kiocb *nxt = NULL; |
| |
| io_poll_task_handler(req, &nxt); |
| @@ -4323,6 +4326,7 @@ static void io_poll_task_func(struct cal |
| __io_queue_sqe(nxt, NULL); |
| mutex_unlock(&ctx->uring_lock); |
| } |
| + percpu_ref_put(&ctx->refs); |
| } |
| |
| static int io_poll_double_wake(struct wait_queue_entry *wait, unsigned mode, |
| @@ -4439,6 +4443,7 @@ static void io_async_task_func(struct ca |
| |
| if (io_poll_rewait(req, &apoll->poll)) { |
| spin_unlock_irq(&ctx->completion_lock); |
| + percpu_ref_put(&ctx->refs); |
| return; |
| } |
| |
| @@ -4476,6 +4481,7 @@ end_req: |
| |
| kfree(apoll->double_poll); |
| kfree(apoll); |
| + percpu_ref_put(&ctx->refs); |
| } |
| |
| static int io_async_wake(struct wait_queue_entry *wait, unsigned mode, int sync, |