blob: 8927d90c7ac06551084734b5a0c95fd006dde9dc [file] [log] [blame]
From 5bd2a0f4a0636485f1188cabc95f7ab8254382af Mon Sep 17 00:00:00 2001
From: Jens Axboe <axboe@kernel.dk>
Date: Mon, 25 Nov 2019 08:52:30 -0700
Subject: [PATCH] io_uring: async workers should inherit the user creds
[ Upstream commit 181e448d8709e517c9c7b523fcd209f24eb38ca7 ]
If we don't inherit the original task creds, then we can confuse users
like fuse that pass creds in the request header. See link below on
identical aio issue.
Link: https://lore.kernel.org/linux-fsdevel/26f0d78e-99ca-2f1b-78b9-433088053a61@scylladb.com/T/#u
Signed-off-by: Jens Axboe <axboe@kernel.dk>
Signed-off-by: Sasha Levin <sashal@kernel.org>
[PG: use v5.3.11-stable version of "backport".]
Signed-off-by: Paul Gortmaker <paul.gortmaker@windriver.com>
diff --git a/fs/io_uring.c b/fs/io_uring.c
index 5f5d809d529a..d56a78fba43d 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -258,6 +258,8 @@ struct io_ring_ctx {
struct user_struct *user;
+ struct cred *creds;
+
struct completion ctx_done;
struct {
@@ -1468,8 +1470,11 @@ static void io_poll_complete_work(struct work_struct *work)
struct io_poll_iocb *poll = &req->poll;
struct poll_table_struct pt = { ._key = poll->events };
struct io_ring_ctx *ctx = req->ctx;
+ const struct cred *old_cred;
__poll_t mask = 0;
+ old_cred = override_creds(ctx->creds);
+
if (!READ_ONCE(poll->canceled))
mask = vfs_poll(poll->file, &pt) & poll->events;
@@ -1484,7 +1489,7 @@ static void io_poll_complete_work(struct work_struct *work)
if (!mask && !READ_ONCE(poll->canceled)) {
add_wait_queue(poll->head, &poll->wait);
spin_unlock_irq(&ctx->completion_lock);
- return;
+ goto out;
}
list_del_init(&req->list);
io_poll_complete(ctx, req, mask);
@@ -1492,6 +1497,8 @@ static void io_poll_complete_work(struct work_struct *work)
io_cqring_ev_posted(ctx);
io_put_req(req);
+out:
+ revert_creds(old_cred);
}
static int io_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync,
@@ -1734,10 +1741,12 @@ static void io_sq_wq_submit_work(struct work_struct *work)
struct io_ring_ctx *ctx = req->ctx;
struct mm_struct *cur_mm = NULL;
struct async_list *async_list;
+ const struct cred *old_cred;
LIST_HEAD(req_list);
mm_segment_t old_fs;
int ret;
+ old_cred = override_creds(ctx->creds);
async_list = io_async_list_from_sqe(ctx, req->submit.sqe);
restart:
do {
@@ -1845,6 +1854,7 @@ static void io_sq_wq_submit_work(struct work_struct *work)
unuse_mm(cur_mm);
mmput(cur_mm);
}
+ revert_creds(old_cred);
}
/*
@@ -2108,6 +2118,7 @@ static int io_sq_thread(void *data)
struct sqe_submit sqes[IO_IOPOLL_BATCH];
struct io_ring_ctx *ctx = data;
struct mm_struct *cur_mm = NULL;
+ const struct cred *old_cred;
mm_segment_t old_fs;
DEFINE_WAIT(wait);
unsigned inflight;
@@ -2117,6 +2128,7 @@ static int io_sq_thread(void *data)
old_fs = get_fs();
set_fs(USER_DS);
+ old_cred = override_creds(ctx->creds);
timeout = inflight = 0;
while (!kthread_should_park()) {
@@ -2235,6 +2247,7 @@ static int io_sq_thread(void *data)
unuse_mm(cur_mm);
mmput(cur_mm);
}
+ revert_creds(old_cred);
kthread_parkme();
@@ -2898,6 +2911,8 @@ static void io_ring_ctx_free(struct io_ring_ctx *ctx)
io_unaccount_mem(ctx->user,
ring_pages(ctx->sq_entries, ctx->cq_entries));
free_uid(ctx->user);
+ if (ctx->creds)
+ put_cred(ctx->creds);
kfree(ctx);
}
@@ -3175,6 +3190,12 @@ static int io_uring_create(unsigned entries, struct io_uring_params *p)
ctx->account_mem = account_mem;
ctx->user = user;
+ ctx->creds = prepare_creds();
+ if (!ctx->creds) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
ret = io_allocate_scq_urings(ctx, p);
if (ret)
goto err;
--
2.7.4