| From f5bbbbe4d63577026f908a809f22f5fd5a90ea1f Mon Sep 17 00:00:00 2001 |
| From: Jianchao Wang <jianchao.w.wang@oracle.com> |
| Date: Tue, 21 Aug 2018 15:15:04 +0800 |
| Subject: blk-mq: sync the update nr_hw_queues with blk_mq_queue_tag_busy_iter |
| |
| From: Jianchao Wang <jianchao.w.wang@oracle.com> |
| |
| commit f5bbbbe4d63577026f908a809f22f5fd5a90ea1f upstream. |
| |
| For blk-mq, part_in_flight/rw will invoke blk_mq_in_flight/rw to |
| account the inflight requests. It will access the queue_hw_ctx and |
| nr_hw_queues w/o any protection. When updating nr_hw_queues and |
| blk_mq_in_flight/rw occur concurrently, panic comes up. |
| |
| Before update nr_hw_queues, the q will be frozen. So we could use |
| q_usage_counter to avoid the race. percpu_ref_is_zero is used here |
| so that we will not miss any in-flight request. The access to |
| nr_hw_queues and queue_hw_ctx in blk_mq_queue_tag_busy_iter are |
| under rcu critical section, __blk_mq_update_nr_hw_queues could use |
| synchronize_rcu to ensure the zeroed q_usage_counter to be globally |
| visible. |
| |
| Signed-off-by: Jianchao Wang <jianchao.w.wang@oracle.com> |
| Reviewed-by: Ming Lei <ming.lei@redhat.com> |
| Signed-off-by: Jens Axboe <axboe@kernel.dk> |
| Cc: Giuliano Procida <gprocida@google.com> |
| Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org> |
| |
| --- |
| block/blk-mq-tag.c | 14 +++++++++++++- |
| block/blk-mq.c | 4 ++++ |
| 2 files changed, 17 insertions(+), 1 deletion(-) |
| |
| --- a/block/blk-mq-tag.c |
| +++ b/block/blk-mq-tag.c |
| @@ -336,6 +336,18 @@ void blk_mq_queue_tag_busy_iter(struct r |
| struct blk_mq_hw_ctx *hctx; |
| int i; |
| |
| + /* |
| + * __blk_mq_update_nr_hw_queues will update the nr_hw_queues and |
| + * queue_hw_ctx after freeze the queue. So we could use q_usage_counter |
| + * to avoid race with it. __blk_mq_update_nr_hw_queues will users |
| + * synchronize_rcu to ensure all of the users go out of the critical |
| + * section below and see zeroed q_usage_counter. |
| + */ |
| + rcu_read_lock(); |
| + if (percpu_ref_is_zero(&q->q_usage_counter)) { |
| + rcu_read_unlock(); |
| + return; |
| + } |
| |
| queue_for_each_hw_ctx(q, hctx, i) { |
| struct blk_mq_tags *tags = hctx->tags; |
| @@ -351,7 +363,7 @@ void blk_mq_queue_tag_busy_iter(struct r |
| bt_for_each(hctx, &tags->breserved_tags, fn, priv, true); |
| bt_for_each(hctx, &tags->bitmap_tags, fn, priv, false); |
| } |
| - |
| + rcu_read_unlock(); |
| } |
| |
| static unsigned int bt_unused_tags(const struct sbitmap_queue *bt) |
| --- a/block/blk-mq.c |
| +++ b/block/blk-mq.c |
| @@ -2346,6 +2346,10 @@ void blk_mq_update_nr_hw_queues(struct b |
| |
| list_for_each_entry(q, &set->tag_list, tag_set_list) |
| blk_mq_unfreeze_queue(q); |
| + /* |
| + * Sync with blk_mq_queue_tag_busy_iter. |
| + */ |
| + synchronize_rcu(); |
| } |
| EXPORT_SYMBOL_GPL(blk_mq_update_nr_hw_queues); |
| |