summaryrefslogtreecommitdiff
path: root/block
diff options
context:
space:
mode:
authorMing Lei <ming.lei@redhat.com>2018-01-06 16:27:40 +0800
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2018-04-13 19:48:11 +0200
commit3bab65f29e6bf4615519b1e6dba5684cc8b53a72 (patch)
treeffd75d4bd04fcf025d6ef1b198f7dc1bcb6a9417 /block
parent5736369b54e3a4dc00592df922fbd8d739fb5be2 (diff)
blk-mq: fix race between updating nr_hw_queues and switching io sched
[ Upstream commit fb350e0ad99359768e1e80b4784692031ec340e4 ] In both elevator_switch_mq() and blk_mq_update_nr_hw_queues(), sched tags can be allocated, and q->nr_hw_queue is used, and race is inevitable, for example: blk_mq_init_sched() may trigger use-after-free on hctx, which is freed in blk_mq_realloc_hw_ctxs() when nr_hw_queues is decreased. This patch fixes the race be holding q->sysfs_lock. Reviewed-by: Christoph Hellwig <hch@lst.de> Reported-by: Yi Zhang <yi.zhang@redhat.com> Tested-by: Yi Zhang <yi.zhang@redhat.com> Signed-off-by: Ming Lei <ming.lei@redhat.com> Signed-off-by: Jens Axboe <axboe@kernel.dk> Signed-off-by: Sasha Levin <alexander.levin@microsoft.com> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Diffstat (limited to 'block')
-rw-r--r--block/blk-mq.c4
1 files changed, 4 insertions, 0 deletions
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 84c1efc70d3b..fe24429c6278 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -1907,6 +1907,9 @@ static void blk_mq_realloc_hw_ctxs(struct blk_mq_tag_set *set,
struct blk_mq_hw_ctx **hctxs = q->queue_hw_ctx;
blk_mq_sysfs_unregister(q);
+
+ /* protect against switching io scheduler */
+ mutex_lock(&q->sysfs_lock);
for (i = 0; i < set->nr_hw_queues; i++) {
int node;
@@ -1956,6 +1959,7 @@ static void blk_mq_realloc_hw_ctxs(struct blk_mq_tag_set *set,
}
}
q->nr_hw_queues = i;
+ mutex_unlock(&q->sysfs_lock);
blk_mq_sysfs_register(q);
}