summaryrefslogtreecommitdiff
path: root/block
diff options
context:
space:
mode:
authorStefan Agner <stefan.agner@toradex.com>2018-01-24 15:03:41 +0100
committerStefan Agner <stefan.agner@toradex.com>2018-01-24 15:03:41 +0100
commit0710597796a48009846e1aa72644c27e846296a9 (patch)
treefbeb9a6ed9d1248fa022a549bcd50a7750de8dbe /block
parent953c6e30c9701fda69ef08e2476c541dc4fb1453 (diff)
parent7bbc6ca4887794cc44b41412a35bdfbe0cbd1c50 (diff)
Merge tag 'v4.9.76' into 4.9-1.0.x-imx-stable-merge
This is the 4.9.76 stable release Resolved conflicts drivers/clk/imx/clk-imx6q.c drivers/net/ethernet/freescale/fec_main.c
Diffstat (limited to 'block')
-rw-r--r--block/badblocks.c2
-rw-r--r--block/blk-core.c4
-rw-r--r--block/blk-mq-sysfs.c4
-rw-r--r--block/blk-mq-tag.c3
-rw-r--r--block/blk-mq.c4
-rw-r--r--block/blk-mq.h1
6 files changed, 11 insertions, 7 deletions
diff --git a/block/badblocks.c b/block/badblocks.c
index 6ebcef282314..2fe6c117ac96 100644
--- a/block/badblocks.c
+++ b/block/badblocks.c
@@ -178,7 +178,7 @@ int badblocks_set(struct badblocks *bb, sector_t s, int sectors,
if (bb->shift < 0)
/* badblocks are disabled */
- return 0;
+ return 1;
if (bb->shift) {
/* round the start down, and the end up */
diff --git a/block/blk-core.c b/block/blk-core.c
index b1c76aa73492..23daf40be371 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -527,8 +527,8 @@ void blk_set_queue_dying(struct request_queue *q)
blk_queue_for_each_rl(rl, q) {
if (rl->rq_pool) {
- wake_up(&rl->wait[BLK_RW_SYNC]);
- wake_up(&rl->wait[BLK_RW_ASYNC]);
+ wake_up_all(&rl->wait[BLK_RW_SYNC]);
+ wake_up_all(&rl->wait[BLK_RW_ASYNC]);
}
}
}
diff --git a/block/blk-mq-sysfs.c b/block/blk-mq-sysfs.c
index 01fb455d3377..8c0894e0713b 100644
--- a/block/blk-mq-sysfs.c
+++ b/block/blk-mq-sysfs.c
@@ -429,7 +429,7 @@ void blk_mq_hctx_kobj_init(struct blk_mq_hw_ctx *hctx)
kobject_init(&hctx->kobj, &blk_mq_hw_ktype);
}
-static void blk_mq_sysfs_init(struct request_queue *q)
+void blk_mq_sysfs_init(struct request_queue *q)
{
struct blk_mq_ctx *ctx;
int cpu;
@@ -449,8 +449,6 @@ int blk_mq_register_dev(struct device *dev, struct request_queue *q)
blk_mq_disable_hotplug();
- blk_mq_sysfs_init(q);
-
ret = kobject_add(&q->mq_kobj, kobject_get(&dev->kobj), "%s", "mq");
if (ret < 0)
goto out;
diff --git a/block/blk-mq-tag.c b/block/blk-mq-tag.c
index dcf5ce3ba4bf..4bc701b32ce2 100644
--- a/block/blk-mq-tag.c
+++ b/block/blk-mq-tag.c
@@ -311,6 +311,9 @@ int blk_mq_reinit_tagset(struct blk_mq_tag_set *set)
for (i = 0; i < set->nr_hw_queues; i++) {
struct blk_mq_tags *tags = set->tags[i];
+ if (!tags)
+ continue;
+
for (j = 0; j < tags->nr_tags; j++) {
if (!tags->rqs[j])
continue;
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 7b597ec4e9c5..10f8f94b7f20 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -1707,7 +1707,6 @@ static void blk_mq_init_cpu_queues(struct request_queue *q,
struct blk_mq_ctx *__ctx = per_cpu_ptr(q->queue_ctx, i);
struct blk_mq_hw_ctx *hctx;
- memset(__ctx, 0, sizeof(*__ctx));
__ctx->cpu = i;
spin_lock_init(&__ctx->lock);
INIT_LIST_HEAD(&__ctx->rq_list);
@@ -1970,6 +1969,9 @@ struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
if (!q->queue_ctx)
goto err_exit;
+ /* init q->mq_kobj and sw queues' kobjects */
+ blk_mq_sysfs_init(q);
+
q->queue_hw_ctx = kzalloc_node(nr_cpu_ids * sizeof(*(q->queue_hw_ctx)),
GFP_KERNEL, set->numa_node);
if (!q->queue_hw_ctx)
diff --git a/block/blk-mq.h b/block/blk-mq.h
index e5d25249028c..c55bcf67b956 100644
--- a/block/blk-mq.h
+++ b/block/blk-mq.h
@@ -50,6 +50,7 @@ static inline struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *q,
/*
* sysfs helpers
*/
+extern void blk_mq_sysfs_init(struct request_queue *q);
extern int blk_mq_sysfs_register(struct request_queue *q);
extern void blk_mq_sysfs_unregister(struct request_queue *q);
extern void blk_mq_hctx_kobj_init(struct blk_mq_hw_ctx *hctx);