| From foo@baz Thu Dec 21 09:02:40 CET 2017 |
| From: Sagi Grimberg <sagi@grimberg.me> |
| Date: Mon, 13 Mar 2017 13:27:51 +0200 |
| Subject: nvme-loop: handle cpu unplug when re-establishing the controller |
| |
| From: Sagi Grimberg <sagi@grimberg.me> |
| |
| |
| [ Upstream commit 945dd5bacc8978439af276976b5dcbbd42333dbc ] |
| |
| If a cpu unplug event has occured, we need to take the minimum |
| of the provided nr_io_queues and the number of online cpus, |
| otherwise we won't be able to connect them as blk-mq mapping |
| won't dispatch to those queues. |
| |
| Reviewed-by: Christoph Hellwig <hch@lst.de> |
| Signed-off-by: Sagi Grimberg <sagi@grimberg.me> |
| Signed-off-by: Sasha Levin <alexander.levin@verizon.com> |
| Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org> |
| --- |
| drivers/nvme/target/loop.c | 88 +++++++++++++++++++++++++-------------------- |
| 1 file changed, 50 insertions(+), 38 deletions(-) |
| |
| --- a/drivers/nvme/target/loop.c |
| +++ b/drivers/nvme/target/loop.c |
| @@ -223,8 +223,6 @@ static void nvme_loop_submit_async_event |
| static int nvme_loop_init_iod(struct nvme_loop_ctrl *ctrl, |
| struct nvme_loop_iod *iod, unsigned int queue_idx) |
| { |
| - BUG_ON(queue_idx >= ctrl->queue_count); |
| - |
| iod->req.cmd = &iod->cmd; |
| iod->req.rsp = &iod->rsp; |
| iod->queue = &ctrl->queues[queue_idx]; |
| @@ -314,6 +312,43 @@ free_ctrl: |
| kfree(ctrl); |
| } |
| |
| +static void nvme_loop_destroy_io_queues(struct nvme_loop_ctrl *ctrl) |
| +{ |
| + int i; |
| + |
| + for (i = 1; i < ctrl->queue_count; i++) |
| + nvmet_sq_destroy(&ctrl->queues[i].nvme_sq); |
| +} |
| + |
| +static int nvme_loop_init_io_queues(struct nvme_loop_ctrl *ctrl) |
| +{ |
| + struct nvmf_ctrl_options *opts = ctrl->ctrl.opts; |
| + unsigned int nr_io_queues; |
| + int ret, i; |
| + |
| + nr_io_queues = min(opts->nr_io_queues, num_online_cpus()); |
| + ret = nvme_set_queue_count(&ctrl->ctrl, &nr_io_queues); |
| + if (ret || !nr_io_queues) |
| + return ret; |
| + |
| + dev_info(ctrl->ctrl.device, "creating %d I/O queues.\n", nr_io_queues); |
| + |
| + for (i = 1; i <= nr_io_queues; i++) { |
| + ctrl->queues[i].ctrl = ctrl; |
| + ret = nvmet_sq_init(&ctrl->queues[i].nvme_sq); |
| + if (ret) |
| + goto out_destroy_queues; |
| + |
| + ctrl->queue_count++; |
| + } |
| + |
| + return 0; |
| + |
| +out_destroy_queues: |
| + nvme_loop_destroy_io_queues(ctrl); |
| + return ret; |
| +} |
| + |
| static int nvme_loop_configure_admin_queue(struct nvme_loop_ctrl *ctrl) |
| { |
| int error; |
| @@ -385,17 +420,13 @@ out_free_sq: |
| |
| static void nvme_loop_shutdown_ctrl(struct nvme_loop_ctrl *ctrl) |
| { |
| - int i; |
| - |
| nvme_stop_keep_alive(&ctrl->ctrl); |
| |
| if (ctrl->queue_count > 1) { |
| nvme_stop_queues(&ctrl->ctrl); |
| blk_mq_tagset_busy_iter(&ctrl->tag_set, |
| nvme_cancel_request, &ctrl->ctrl); |
| - |
| - for (i = 1; i < ctrl->queue_count; i++) |
| - nvmet_sq_destroy(&ctrl->queues[i].nvme_sq); |
| + nvme_loop_destroy_io_queues(ctrl); |
| } |
| |
| if (ctrl->ctrl.state == NVME_CTRL_LIVE) |
| @@ -467,19 +498,14 @@ static void nvme_loop_reset_ctrl_work(st |
| if (ret) |
| goto out_disable; |
| |
| - for (i = 1; i <= ctrl->ctrl.opts->nr_io_queues; i++) { |
| - ctrl->queues[i].ctrl = ctrl; |
| - ret = nvmet_sq_init(&ctrl->queues[i].nvme_sq); |
| - if (ret) |
| - goto out_free_queues; |
| - |
| - ctrl->queue_count++; |
| - } |
| + ret = nvme_loop_init_io_queues(ctrl); |
| + if (ret) |
| + goto out_destroy_admin; |
| |
| - for (i = 1; i <= ctrl->ctrl.opts->nr_io_queues; i++) { |
| + for (i = 1; i < ctrl->queue_count; i++) { |
| ret = nvmf_connect_io_queue(&ctrl->ctrl, i); |
| if (ret) |
| - goto out_free_queues; |
| + goto out_destroy_io; |
| } |
| |
| changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE); |
| @@ -492,9 +518,9 @@ static void nvme_loop_reset_ctrl_work(st |
| |
| return; |
| |
| -out_free_queues: |
| - for (i = 1; i < ctrl->queue_count; i++) |
| - nvmet_sq_destroy(&ctrl->queues[i].nvme_sq); |
| +out_destroy_io: |
| + nvme_loop_destroy_io_queues(ctrl); |
| +out_destroy_admin: |
| nvme_loop_destroy_admin_queue(ctrl); |
| out_disable: |
| dev_warn(ctrl->ctrl.device, "Removing after reset failure\n"); |
| @@ -533,25 +559,12 @@ static const struct nvme_ctrl_ops nvme_l |
| |
| static int nvme_loop_create_io_queues(struct nvme_loop_ctrl *ctrl) |
| { |
| - struct nvmf_ctrl_options *opts = ctrl->ctrl.opts; |
| int ret, i; |
| |
| - ret = nvme_set_queue_count(&ctrl->ctrl, &opts->nr_io_queues); |
| - if (ret || !opts->nr_io_queues) |
| + ret = nvme_loop_init_io_queues(ctrl); |
| + if (ret) |
| return ret; |
| |
| - dev_info(ctrl->ctrl.device, "creating %d I/O queues.\n", |
| - opts->nr_io_queues); |
| - |
| - for (i = 1; i <= opts->nr_io_queues; i++) { |
| - ctrl->queues[i].ctrl = ctrl; |
| - ret = nvmet_sq_init(&ctrl->queues[i].nvme_sq); |
| - if (ret) |
| - goto out_destroy_queues; |
| - |
| - ctrl->queue_count++; |
| - } |
| - |
| memset(&ctrl->tag_set, 0, sizeof(ctrl->tag_set)); |
| ctrl->tag_set.ops = &nvme_loop_mq_ops; |
| ctrl->tag_set.queue_depth = ctrl->ctrl.opts->queue_size; |
| @@ -575,7 +588,7 @@ static int nvme_loop_create_io_queues(st |
| goto out_free_tagset; |
| } |
| |
| - for (i = 1; i <= opts->nr_io_queues; i++) { |
| + for (i = 1; i < ctrl->queue_count; i++) { |
| ret = nvmf_connect_io_queue(&ctrl->ctrl, i); |
| if (ret) |
| goto out_cleanup_connect_q; |
| @@ -588,8 +601,7 @@ out_cleanup_connect_q: |
| out_free_tagset: |
| blk_mq_free_tag_set(&ctrl->tag_set); |
| out_destroy_queues: |
| - for (i = 1; i < ctrl->queue_count; i++) |
| - nvmet_sq_destroy(&ctrl->queues[i].nvme_sq); |
| + nvme_loop_destroy_io_queues(ctrl); |
| return ret; |
| } |
| |