| From 2a34c0872adf252f23a6fef2d051a169ac796cef Mon Sep 17 00:00:00 2001 |
| From: Ming Lei <ming.lei@canonical.com> |
| Date: Tue, 21 Apr 2015 10:00:20 +0800 |
| Subject: blk-mq: fix CPU hotplug handling |
| |
| From: Ming Lei <ming.lei@canonical.com> |
| |
| commit 2a34c0872adf252f23a6fef2d051a169ac796cef upstream. |
| |
| hctx->tags has to be set as NULL in case that it is to be unmapped |
| no matter if set->tags[hctx->queue_num] is NULL or not in blk_mq_map_swqueue() |
| because shared tags can be freed already from another request queue. |
| |
| The same situation has to be considered during handling CPU online too. |
| Unmapped hw queue can be remapped after CPU topo is changed, so we need |
| to allocate tags for the hw queue in blk_mq_map_swqueue(). Then tags |
| allocation for hw queue can be removed in hctx cpu online notifier, and it |
| is reasonable to do that after mapping is updated. |
| |
| Reported-by: Dongsu Park <dongsu.park@profitbricks.com> |
| Tested-by: Dongsu Park <dongsu.park@profitbricks.com> |
| Signed-off-by: Ming Lei <ming.lei@canonical.com> |
| Signed-off-by: Jens Axboe <axboe@fb.com> |
| Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org> |
| |
| --- |
| block/blk-mq.c | 34 +++++++++++++--------------------- |
| 1 file changed, 13 insertions(+), 21 deletions(-) |
| |
| --- a/block/blk-mq.c |
| +++ b/block/blk-mq.c |
| @@ -1573,22 +1573,6 @@ static int blk_mq_hctx_cpu_offline(struc |
| return NOTIFY_OK; |
| } |
| |
| -static int blk_mq_hctx_cpu_online(struct blk_mq_hw_ctx *hctx, int cpu) |
| -{ |
| - struct request_queue *q = hctx->queue; |
| - struct blk_mq_tag_set *set = q->tag_set; |
| - |
| - if (set->tags[hctx->queue_num]) |
| - return NOTIFY_OK; |
| - |
| - set->tags[hctx->queue_num] = blk_mq_init_rq_map(set, hctx->queue_num); |
| - if (!set->tags[hctx->queue_num]) |
| - return NOTIFY_STOP; |
| - |
| - hctx->tags = set->tags[hctx->queue_num]; |
| - return NOTIFY_OK; |
| -} |
| - |
| static int blk_mq_hctx_notify(void *data, unsigned long action, |
| unsigned int cpu) |
| { |
| @@ -1596,8 +1580,11 @@ static int blk_mq_hctx_notify(void *data |
| |
| if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) |
| return blk_mq_hctx_cpu_offline(hctx, cpu); |
| - else if (action == CPU_ONLINE || action == CPU_ONLINE_FROZEN) |
| - return blk_mq_hctx_cpu_online(hctx, cpu); |
| + |
| + /* |
| + * In case of CPU online, tags may be reallocated |
| + * in blk_mq_map_swqueue() after mapping is updated. |
| + */ |
| |
| return NOTIFY_OK; |
| } |
| @@ -1779,6 +1766,7 @@ static void blk_mq_map_swqueue(struct re |
| unsigned int i; |
| struct blk_mq_hw_ctx *hctx; |
| struct blk_mq_ctx *ctx; |
| + struct blk_mq_tag_set *set = q->tag_set; |
| |
| queue_for_each_hw_ctx(q, hctx, i) { |
| cpumask_clear(hctx->cpumask); |
| @@ -1805,16 +1793,20 @@ static void blk_mq_map_swqueue(struct re |
| * disable it and free the request entries. |
| */ |
| if (!hctx->nr_ctx) { |
| - struct blk_mq_tag_set *set = q->tag_set; |
| - |
| if (set->tags[i]) { |
| blk_mq_free_rq_map(set, set->tags[i], i); |
| set->tags[i] = NULL; |
| - hctx->tags = NULL; |
| } |
| + hctx->tags = NULL; |
| continue; |
| } |
| |
| + /* unmapped hw queue can be remapped after CPU topo changed */ |
| + if (!set->tags[i]) |
| + set->tags[i] = blk_mq_init_rq_map(set, i); |
| + hctx->tags = set->tags[i]; |
| + WARN_ON(!hctx->tags); |
| + |
| /* |
| * Initialize batch roundrobin counts |
| */ |