summaryrefslogtreecommitdiff
path: root/block
diff options
context:
space:
mode:
Diffstat (limited to 'block')
-rw-r--r--block/bio-integrity.c17
-rw-r--r--block/blk-core.c108
-rw-r--r--block/blk-integrity.c192
-rw-r--r--block/blk-lib.c31
-rw-r--r--block/blk-merge.c32
-rw-r--r--block/blk-mq-sysfs.c6
-rw-r--r--block/blk-mq-tag.c5
-rw-r--r--block/blk-mq.c162
-rw-r--r--block/blk-mq.h2
-rw-r--r--block/blk-sysfs.c4
-rw-r--r--block/blk.h23
-rw-r--r--block/elevator.c2
-rw-r--r--block/genhd.c2
-rw-r--r--block/partition-generic.c1
-rw-r--r--block/t10-pi.c16
15 files changed, 353 insertions, 250 deletions
diff --git a/block/bio-integrity.c b/block/bio-integrity.c
index 14b8faf8b09d..f6325d573c10 100644
--- a/block/bio-integrity.c
+++ b/block/bio-integrity.c
@@ -32,6 +32,11 @@
static struct kmem_cache *bip_slab;
static struct workqueue_struct *kintegrityd_wq;
+void blk_flush_integrity(void)
+{
+ flush_workqueue(kintegrityd_wq);
+}
+
/**
* bio_integrity_alloc - Allocate integrity payload and attach it to bio
* @bio: bio to attach integrity metadata to
@@ -177,11 +182,11 @@ bool bio_integrity_enabled(struct bio *bio)
if (bi == NULL)
return false;
- if (bio_data_dir(bio) == READ && bi->verify_fn != NULL &&
+ if (bio_data_dir(bio) == READ && bi->profile->verify_fn != NULL &&
(bi->flags & BLK_INTEGRITY_VERIFY))
return true;
- if (bio_data_dir(bio) == WRITE && bi->generate_fn != NULL &&
+ if (bio_data_dir(bio) == WRITE && bi->profile->generate_fn != NULL &&
(bi->flags & BLK_INTEGRITY_GENERATE))
return true;
@@ -202,7 +207,7 @@ EXPORT_SYMBOL(bio_integrity_enabled);
static inline unsigned int bio_integrity_intervals(struct blk_integrity *bi,
unsigned int sectors)
{
- return sectors >> (ilog2(bi->interval) - 9);
+ return sectors >> (bi->interval_exp - 9);
}
static inline unsigned int bio_integrity_bytes(struct blk_integrity *bi,
@@ -229,7 +234,7 @@ static int bio_integrity_process(struct bio *bio,
bip->bip_vec->bv_offset;
iter.disk_name = bio->bi_bdev->bd_disk->disk_name;
- iter.interval = bi->interval;
+ iter.interval = 1 << bi->interval_exp;
iter.seed = bip_get_seed(bip);
iter.prot_buf = prot_buf;
@@ -340,7 +345,7 @@ int bio_integrity_prep(struct bio *bio)
/* Auto-generate integrity metadata if this is a write */
if (bio_data_dir(bio) == WRITE)
- bio_integrity_process(bio, bi->generate_fn);
+ bio_integrity_process(bio, bi->profile->generate_fn);
return 0;
}
@@ -361,7 +366,7 @@ static void bio_integrity_verify_fn(struct work_struct *work)
struct bio *bio = bip->bip_bio;
struct blk_integrity *bi = bdev_get_integrity(bio->bi_bdev);
- bio->bi_error = bio_integrity_process(bio, bi->verify_fn);
+ bio->bi_error = bio_integrity_process(bio, bi->profile->verify_fn);
/* Restore original bio completion handler */
bio->bi_end_io = bip->bip_end_io;
diff --git a/block/blk-core.c b/block/blk-core.c
index 2eb722d48773..89eec7965870 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -554,29 +554,30 @@ void blk_cleanup_queue(struct request_queue *q)
* Drain all requests queued before DYING marking. Set DEAD flag to
* prevent that q->request_fn() gets invoked after draining finished.
*/
- if (q->mq_ops) {
- blk_mq_freeze_queue(q);
- spin_lock_irq(lock);
- } else {
- spin_lock_irq(lock);
+ blk_freeze_queue(q);
+ spin_lock_irq(lock);
+ if (!q->mq_ops)
__blk_drain_queue(q, true);
- }
queue_flag_set(QUEUE_FLAG_DEAD, q);
spin_unlock_irq(lock);
+ /* for synchronous bio-based driver finish in-flight integrity i/o */
+ blk_flush_integrity();
+
/* @q won't process any more request, flush async actions */
del_timer_sync(&q->backing_dev_info.laptop_mode_wb_timer);
blk_sync_queue(q);
if (q->mq_ops)
blk_mq_free_queue(q);
+ percpu_ref_exit(&q->q_usage_counter);
spin_lock_irq(lock);
if (q->queue_lock != &q->__queue_lock)
q->queue_lock = &q->__queue_lock;
spin_unlock_irq(lock);
- bdi_destroy(&q->backing_dev_info);
+ bdi_unregister(&q->backing_dev_info);
/* @q is and will stay empty, shutdown and put */
blk_put_queue(q);
@@ -629,6 +630,40 @@ struct request_queue *blk_alloc_queue(gfp_t gfp_mask)
}
EXPORT_SYMBOL(blk_alloc_queue);
+int blk_queue_enter(struct request_queue *q, gfp_t gfp)
+{
+ while (true) {
+ int ret;
+
+ if (percpu_ref_tryget_live(&q->q_usage_counter))
+ return 0;
+
+ if (!(gfp & __GFP_WAIT))
+ return -EBUSY;
+
+ ret = wait_event_interruptible(q->mq_freeze_wq,
+ !atomic_read(&q->mq_freeze_depth) ||
+ blk_queue_dying(q));
+ if (blk_queue_dying(q))
+ return -ENODEV;
+ if (ret)
+ return ret;
+ }
+}
+
+void blk_queue_exit(struct request_queue *q)
+{
+ percpu_ref_put(&q->q_usage_counter);
+}
+
+static void blk_queue_usage_counter_release(struct percpu_ref *ref)
+{
+ struct request_queue *q =
+ container_of(ref, struct request_queue, q_usage_counter);
+
+ wake_up_all(&q->mq_freeze_wq);
+}
+
struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
{
struct request_queue *q;
@@ -690,11 +725,22 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
init_waitqueue_head(&q->mq_freeze_wq);
- if (blkcg_init_queue(q))
+ /*
+ * Init percpu_ref in atomic mode so that it's faster to shutdown.
+ * See blk_register_queue() for details.
+ */
+ if (percpu_ref_init(&q->q_usage_counter,
+ blk_queue_usage_counter_release,
+ PERCPU_REF_INIT_ATOMIC, GFP_KERNEL))
goto fail_bdi;
+ if (blkcg_init_queue(q))
+ goto fail_ref;
+
return q;
+fail_ref:
+ percpu_ref_exit(&q->q_usage_counter);
fail_bdi:
bdi_destroy(&q->backing_dev_info);
fail_split:
@@ -1594,6 +1640,30 @@ out:
return ret;
}
+unsigned int blk_plug_queued_count(struct request_queue *q)
+{
+ struct blk_plug *plug;
+ struct request *rq;
+ struct list_head *plug_list;
+ unsigned int ret = 0;
+
+ plug = current->plug;
+ if (!plug)
+ goto out;
+
+ if (q->mq_ops)
+ plug_list = &plug->mq_list;
+ else
+ plug_list = &plug->list;
+
+ list_for_each_entry(rq, plug_list, queuelist) {
+ if (rq->q == q)
+ ret++;
+ }
+out:
+ return ret;
+}
+
void init_request_from_bio(struct request *req, struct bio *bio)
{
req->cmd_type = REQ_TYPE_FS;
@@ -1641,9 +1711,11 @@ static void blk_queue_bio(struct request_queue *q, struct bio *bio)
* Check if we can merge with the plugged list before grabbing
* any locks.
*/
- if (!blk_queue_nomerges(q) &&
- blk_attempt_plug_merge(q, bio, &request_count, NULL))
- return;
+ if (!blk_queue_nomerges(q)) {
+ if (blk_attempt_plug_merge(q, bio, &request_count, NULL))
+ return;
+ } else
+ request_count = blk_plug_queued_count(q);
spin_lock_irq(q->queue_lock);
@@ -1966,9 +2038,19 @@ void generic_make_request(struct bio *bio)
do {
struct request_queue *q = bdev_get_queue(bio->bi_bdev);
- q->make_request_fn(q, bio);
+ if (likely(blk_queue_enter(q, __GFP_WAIT) == 0)) {
+
+ q->make_request_fn(q, bio);
+
+ blk_queue_exit(q);
- bio = bio_list_pop(current->bio_list);
+ bio = bio_list_pop(current->bio_list);
+ } else {
+ struct bio *bio_next = bio_list_pop(current->bio_list);
+
+ bio_io_error(bio);
+ bio = bio_next;
+ }
} while (bio);
current->bio_list = NULL; /* deactivate */
}
diff --git a/block/blk-integrity.c b/block/blk-integrity.c
index 75f29cf70188..d69c5c79f98e 100644
--- a/block/blk-integrity.c
+++ b/block/blk-integrity.c
@@ -30,10 +30,6 @@
#include "blk.h"
-static struct kmem_cache *integrity_cachep;
-
-static const char *bi_unsupported_name = "unsupported";
-
/**
* blk_rq_count_integrity_sg - Count number of integrity scatterlist elements
* @q: request queue
@@ -146,40 +142,40 @@ EXPORT_SYMBOL(blk_rq_map_integrity_sg);
*/
int blk_integrity_compare(struct gendisk *gd1, struct gendisk *gd2)
{
- struct blk_integrity *b1 = gd1->integrity;
- struct blk_integrity *b2 = gd2->integrity;
+ struct blk_integrity *b1 = &gd1->queue->integrity;
+ struct blk_integrity *b2 = &gd2->queue->integrity;
- if (!b1 && !b2)
+ if (!b1->profile && !b2->profile)
return 0;
- if (!b1 || !b2)
+ if (!b1->profile || !b2->profile)
return -1;
- if (b1->interval != b2->interval) {
+ if (b1->interval_exp != b2->interval_exp) {
pr_err("%s: %s/%s protection interval %u != %u\n",
__func__, gd1->disk_name, gd2->disk_name,
- b1->interval, b2->interval);
+ 1 << b1->interval_exp, 1 << b2->interval_exp);
return -1;
}
if (b1->tuple_size != b2->tuple_size) {
- printk(KERN_ERR "%s: %s/%s tuple sz %u != %u\n", __func__,
+ pr_err("%s: %s/%s tuple sz %u != %u\n", __func__,
gd1->disk_name, gd2->disk_name,
b1->tuple_size, b2->tuple_size);
return -1;
}
if (b1->tag_size && b2->tag_size && (b1->tag_size != b2->tag_size)) {
- printk(KERN_ERR "%s: %s/%s tag sz %u != %u\n", __func__,
+ pr_err("%s: %s/%s tag sz %u != %u\n", __func__,
gd1->disk_name, gd2->disk_name,
b1->tag_size, b2->tag_size);
return -1;
}
- if (strcmp(b1->name, b2->name)) {
- printk(KERN_ERR "%s: %s/%s type %s != %s\n", __func__,
+ if (b1->profile != b2->profile) {
+ pr_err("%s: %s/%s type %s != %s\n", __func__,
gd1->disk_name, gd2->disk_name,
- b1->name, b2->name);
+ b1->profile->name, b2->profile->name);
return -1;
}
@@ -249,8 +245,8 @@ struct integrity_sysfs_entry {
static ssize_t integrity_attr_show(struct kobject *kobj, struct attribute *attr,
char *page)
{
- struct blk_integrity *bi =
- container_of(kobj, struct blk_integrity, kobj);
+ struct gendisk *disk = container_of(kobj, struct gendisk, integrity_kobj);
+ struct blk_integrity *bi = &disk->queue->integrity;
struct integrity_sysfs_entry *entry =
container_of(attr, struct integrity_sysfs_entry, attr);
@@ -261,8 +257,8 @@ static ssize_t integrity_attr_store(struct kobject *kobj,
struct attribute *attr, const char *page,
size_t count)
{
- struct blk_integrity *bi =
- container_of(kobj, struct blk_integrity, kobj);
+ struct gendisk *disk = container_of(kobj, struct gendisk, integrity_kobj);
+ struct blk_integrity *bi = &disk->queue->integrity;
struct integrity_sysfs_entry *entry =
container_of(attr, struct integrity_sysfs_entry, attr);
ssize_t ret = 0;
@@ -275,18 +271,21 @@ static ssize_t integrity_attr_store(struct kobject *kobj,
static ssize_t integrity_format_show(struct blk_integrity *bi, char *page)
{
- if (bi != NULL && bi->name != NULL)
- return sprintf(page, "%s\n", bi->name);
+ if (bi->profile && bi->profile->name)
+ return sprintf(page, "%s\n", bi->profile->name);
else
return sprintf(page, "none\n");
}
static ssize_t integrity_tag_size_show(struct blk_integrity *bi, char *page)
{
- if (bi != NULL)
- return sprintf(page, "%u\n", bi->tag_size);
- else
- return sprintf(page, "0\n");
+ return sprintf(page, "%u\n", bi->tag_size);
+}
+
+static ssize_t integrity_interval_show(struct blk_integrity *bi, char *page)
+{
+ return sprintf(page, "%u\n",
+ bi->interval_exp ? 1 << bi->interval_exp : 0);
}
static ssize_t integrity_verify_store(struct blk_integrity *bi,
@@ -343,6 +342,11 @@ static struct integrity_sysfs_entry integrity_tag_size_entry = {
.show = integrity_tag_size_show,
};
+static struct integrity_sysfs_entry integrity_interval_entry = {
+ .attr = { .name = "protection_interval_bytes", .mode = S_IRUGO },
+ .show = integrity_interval_show,
+};
+
static struct integrity_sysfs_entry integrity_verify_entry = {
.attr = { .name = "read_verify", .mode = S_IRUGO | S_IWUSR },
.show = integrity_verify_show,
@@ -363,6 +367,7 @@ static struct integrity_sysfs_entry integrity_device_entry = {
static struct attribute *integrity_attrs[] = {
&integrity_format_entry.attr,
&integrity_tag_size_entry.attr,
+ &integrity_interval_entry.attr,
&integrity_verify_entry.attr,
&integrity_generate_entry.attr,
&integrity_device_entry.attr,
@@ -374,114 +379,89 @@ static const struct sysfs_ops integrity_ops = {
.store = &integrity_attr_store,
};
-static int __init blk_dev_integrity_init(void)
-{
- integrity_cachep = kmem_cache_create("blkdev_integrity",
- sizeof(struct blk_integrity),
- 0, SLAB_PANIC, NULL);
- return 0;
-}
-subsys_initcall(blk_dev_integrity_init);
-
-static void blk_integrity_release(struct kobject *kobj)
-{
- struct blk_integrity *bi =
- container_of(kobj, struct blk_integrity, kobj);
-
- kmem_cache_free(integrity_cachep, bi);
-}
-
static struct kobj_type integrity_ktype = {
.default_attrs = integrity_attrs,
.sysfs_ops = &integrity_ops,
- .release = blk_integrity_release,
};
-bool blk_integrity_is_initialized(struct gendisk *disk)
+static int blk_integrity_nop_fn(struct blk_integrity_iter *iter)
{
- struct blk_integrity *bi = blk_get_integrity(disk);
-
- return (bi && bi->name && strcmp(bi->name, bi_unsupported_name) != 0);
+ return 0;
}
-EXPORT_SYMBOL(blk_integrity_is_initialized);
+
+static struct blk_integrity_profile nop_profile = {
+ .name = "nop",
+ .generate_fn = blk_integrity_nop_fn,
+ .verify_fn = blk_integrity_nop_fn,
+};
/**
* blk_integrity_register - Register a gendisk as being integrity-capable
* @disk: struct gendisk pointer to make integrity-aware
- * @template: optional integrity profile to register
+ * @template: block integrity profile to register
*
- * Description: When a device needs to advertise itself as being able
- * to send/receive integrity metadata it must use this function to
- * register the capability with the block layer. The template is a
- * blk_integrity struct with values appropriate for the underlying
- * hardware. If template is NULL the new profile is allocated but
- * not filled out. See Documentation/block/data-integrity.txt.
+ * Description: When a device needs to advertise itself as being able to
+ * send/receive integrity metadata it must use this function to register
+ * the capability with the block layer. The template is a blk_integrity
+ * struct with values appropriate for the underlying hardware. See
+ * Documentation/block/data-integrity.txt.
*/
-int blk_integrity_register(struct gendisk *disk, struct blk_integrity *template)
+void blk_integrity_register(struct gendisk *disk, struct blk_integrity *template)
{
- struct blk_integrity *bi;
+ struct blk_integrity *bi = &disk->queue->integrity;
- BUG_ON(disk == NULL);
+ bi->flags = BLK_INTEGRITY_VERIFY | BLK_INTEGRITY_GENERATE |
+ template->flags;
+ bi->interval_exp = ilog2(queue_logical_block_size(disk->queue));
+ bi->profile = template->profile ? template->profile : &nop_profile;
+ bi->tuple_size = template->tuple_size;
+ bi->tag_size = template->tag_size;
- if (disk->integrity == NULL) {
- bi = kmem_cache_alloc(integrity_cachep,
- GFP_KERNEL | __GFP_ZERO);
- if (!bi)
- return -1;
-
- if (kobject_init_and_add(&bi->kobj, &integrity_ktype,
- &disk_to_dev(disk)->kobj,
- "%s", "integrity")) {
- kmem_cache_free(integrity_cachep, bi);
- return -1;
- }
-
- kobject_uevent(&bi->kobj, KOBJ_ADD);
-
- bi->flags |= BLK_INTEGRITY_VERIFY | BLK_INTEGRITY_GENERATE;
- bi->interval = queue_logical_block_size(disk->queue);
- disk->integrity = bi;
- } else
- bi = disk->integrity;
-
- /* Use the provided profile as template */
- if (template != NULL) {
- bi->name = template->name;
- bi->generate_fn = template->generate_fn;
- bi->verify_fn = template->verify_fn;
- bi->tuple_size = template->tuple_size;
- bi->tag_size = template->tag_size;
- bi->flags |= template->flags;
- } else
- bi->name = bi_unsupported_name;
-
- disk->queue->backing_dev_info.capabilities |= BDI_CAP_STABLE_WRITES;
-
- return 0;
+ blk_integrity_revalidate(disk);
}
EXPORT_SYMBOL(blk_integrity_register);
/**
- * blk_integrity_unregister - Remove block integrity profile
- * @disk: disk whose integrity profile to deallocate
+ * blk_integrity_unregister - Unregister block integrity profile
+ * @disk: disk whose integrity profile to unregister
*
- * Description: This function frees all memory used by the block
- * integrity profile. To be called at device teardown.
+ * Description: This function unregisters the integrity capability from
+ * a block device.
*/
void blk_integrity_unregister(struct gendisk *disk)
{
- struct blk_integrity *bi;
+ blk_integrity_revalidate(disk);
+ memset(&disk->queue->integrity, 0, sizeof(struct blk_integrity));
+}
+EXPORT_SYMBOL(blk_integrity_unregister);
+
+void blk_integrity_revalidate(struct gendisk *disk)
+{
+ struct blk_integrity *bi = &disk->queue->integrity;
- if (!disk || !disk->integrity)
+ if (!(disk->flags & GENHD_FL_UP))
return;
- disk->queue->backing_dev_info.capabilities &= ~BDI_CAP_STABLE_WRITES;
+ if (bi->profile)
+ disk->queue->backing_dev_info.capabilities |=
+ BDI_CAP_STABLE_WRITES;
+ else
+ disk->queue->backing_dev_info.capabilities &=
+ ~BDI_CAP_STABLE_WRITES;
+}
+
+void blk_integrity_add(struct gendisk *disk)
+{
+ if (kobject_init_and_add(&disk->integrity_kobj, &integrity_ktype,
+ &disk_to_dev(disk)->kobj, "%s", "integrity"))
+ return;
- bi = disk->integrity;
+ kobject_uevent(&disk->integrity_kobj, KOBJ_ADD);
+}
- kobject_uevent(&bi->kobj, KOBJ_REMOVE);
- kobject_del(&bi->kobj);
- kobject_put(&bi->kobj);
- disk->integrity = NULL;
+void blk_integrity_del(struct gendisk *disk)
+{
+ kobject_uevent(&disk->integrity_kobj, KOBJ_REMOVE);
+ kobject_del(&disk->integrity_kobj);
+ kobject_put(&disk->integrity_kobj);
}
-EXPORT_SYMBOL(blk_integrity_unregister);
diff --git a/block/blk-lib.c b/block/blk-lib.c
index bd40292e5009..9ebf65379556 100644
--- a/block/blk-lib.c
+++ b/block/blk-lib.c
@@ -26,13 +26,6 @@ static void bio_batch_end_io(struct bio *bio)
bio_put(bio);
}
-/*
- * Ensure that max discard sectors doesn't overflow bi_size and hopefully
- * it is of the proper granularity as long as the granularity is a power
- * of two.
- */
-#define MAX_BIO_SECTORS ((1U << 31) >> 9)
-
/**
* blkdev_issue_discard - queue a discard
* @bdev: blockdev to issue discard for
@@ -50,6 +43,8 @@ int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
DECLARE_COMPLETION_ONSTACK(wait);
struct request_queue *q = bdev_get_queue(bdev);
int type = REQ_WRITE | REQ_DISCARD;
+ unsigned int granularity;
+ int alignment;
struct bio_batch bb;
struct bio *bio;
int ret = 0;
@@ -61,6 +56,10 @@ int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
if (!blk_queue_discard(q))
return -EOPNOTSUPP;
+ /* Zero-sector (unknown) and one-sector granularities are the same. */
+ granularity = max(q->limits.discard_granularity >> 9, 1U);
+ alignment = (bdev_discard_alignment(bdev) >> 9) % granularity;
+
if (flags & BLKDEV_DISCARD_SECURE) {
if (!blk_queue_secdiscard(q))
return -EOPNOTSUPP;
@@ -74,7 +73,7 @@ int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
blk_start_plug(&plug);
while (nr_sects) {
unsigned int req_sects;
- sector_t end_sect;
+ sector_t end_sect, tmp;
bio = bio_alloc(gfp_mask, 1);
if (!bio) {
@@ -82,8 +81,22 @@ int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
break;
}
- req_sects = min_t(sector_t, nr_sects, MAX_BIO_SECTORS);
+ /* Make sure bi_size doesn't overflow */
+ req_sects = min_t(sector_t, nr_sects, UINT_MAX >> 9);
+
+ /*
+ * If splitting a request, and the next starting sector would be
+ * misaligned, stop the discard at the previous aligned sector.
+ */
end_sect = sector + req_sects;
+ tmp = end_sect;
+ if (req_sects < nr_sects &&
+ sector_div(tmp, granularity) != alignment) {
+ end_sect = end_sect - alignment;
+ sector_div(end_sect, granularity);
+ end_sect = end_sect * granularity + alignment;
+ req_sects = end_sect - sector;
+ }
bio->bi_iter.bi_sector = sector;
bio->bi_end_io = bio_batch_end_io;
diff --git a/block/blk-merge.c b/block/blk-merge.c
index c4e9c37f3e38..de5716d8e525 100644
--- a/block/blk-merge.c
+++ b/block/blk-merge.c
@@ -11,13 +11,16 @@
static struct bio *blk_bio_discard_split(struct request_queue *q,
struct bio *bio,
- struct bio_set *bs)
+ struct bio_set *bs,
+ unsigned *nsegs)
{
unsigned int max_discard_sectors, granularity;
int alignment;
sector_t tmp;
unsigned split_sectors;
+ *nsegs = 1;
+
/* Zero-sector (unknown) and one-sector granularities are the same. */
granularity = max(q->limits.discard_granularity >> 9, 1U);
@@ -51,8 +54,11 @@ static struct bio *blk_bio_discard_split(struct request_queue *q,
static struct bio *blk_bio_write_same_split(struct request_queue *q,
struct bio *bio,
- struct bio_set *bs)
+ struct bio_set *bs,
+ unsigned *nsegs)
{
+ *nsegs = 1;
+
if (!q->limits.max_write_same_sectors)
return NULL;
@@ -64,7 +70,8 @@ static struct bio *blk_bio_write_same_split(struct request_queue *q,
static struct bio *blk_bio_segment_split(struct request_queue *q,
struct bio *bio,
- struct bio_set *bs)
+ struct bio_set *bs,
+ unsigned *segs)
{
struct bio_vec bv, bvprv, *bvprvp = NULL;
struct bvec_iter iter;
@@ -106,24 +113,35 @@ new_segment:
sectors += bv.bv_len >> 9;
}
+ *segs = nsegs;
return NULL;
split:
+ *segs = nsegs;
return bio_split(bio, sectors, GFP_NOIO, bs);
}
void blk_queue_split(struct request_queue *q, struct bio **bio,
struct bio_set *bs)
{
- struct bio *split;
+ struct bio *split, *res;
+ unsigned nsegs;
if ((*bio)->bi_rw & REQ_DISCARD)
- split = blk_bio_discard_split(q, *bio, bs);
+ split = blk_bio_discard_split(q, *bio, bs, &nsegs);
else if ((*bio)->bi_rw & REQ_WRITE_SAME)
- split = blk_bio_write_same_split(q, *bio, bs);
+ split = blk_bio_write_same_split(q, *bio, bs, &nsegs);
else
- split = blk_bio_segment_split(q, *bio, q->bio_split);
+ split = blk_bio_segment_split(q, *bio, q->bio_split, &nsegs);
+
+ /* physical segments can be figured out during splitting */
+ res = split ? split : *bio;
+ res->bi_phys_segments = nsegs;
+ bio_set_flag(res, BIO_SEG_VALID);
if (split) {
+ /* there isn't chance to merge the splitted bio */
+ split->bi_rw |= REQ_NOMERGE;
+
bio_chain(split, *bio);
generic_make_request(*bio);
*bio = split;
diff --git a/block/blk-mq-sysfs.c b/block/blk-mq-sysfs.c
index 788fffd9b409..6f57a110289c 100644
--- a/block/blk-mq-sysfs.c
+++ b/block/blk-mq-sysfs.c
@@ -413,12 +413,6 @@ static void blk_mq_sysfs_init(struct request_queue *q)
kobject_init(&ctx->kobj, &blk_mq_ctx_ktype);
}
-/* see blk_register_queue() */
-void blk_mq_finish_init(struct request_queue *q)
-{
- percpu_ref_switch_to_percpu(&q->mq_usage_counter);
-}
-
int blk_mq_register_disk(struct gendisk *disk)
{
struct device *dev = disk_to_dev(disk);
diff --git a/block/blk-mq-tag.c b/block/blk-mq-tag.c
index ed96474d75cb..60ac684c8b8c 100644
--- a/block/blk-mq-tag.c
+++ b/block/blk-mq-tag.c
@@ -75,6 +75,10 @@ void blk_mq_tag_wakeup_all(struct blk_mq_tags *tags, bool include_reserve)
struct blk_mq_bitmap_tags *bt;
int i, wake_index;
+ /*
+ * Make sure all changes prior to this are visible from other CPUs.
+ */
+ smp_mb();
bt = &tags->bitmap_tags;
wake_index = atomic_read(&bt->wake_index);
for (i = 0; i < BT_WAIT_QUEUES; i++) {
@@ -641,6 +645,7 @@ void blk_mq_free_tags(struct blk_mq_tags *tags)
{
bt_free(&tags->bitmap_tags);
bt_free(&tags->breserved_tags);
+ free_cpumask_var(tags->cpumask);
kfree(tags);
}
diff --git a/block/blk-mq.c b/block/blk-mq.c
index d921cd5177f5..1c27b3eaef64 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -78,47 +78,13 @@ static void blk_mq_hctx_clear_pending(struct blk_mq_hw_ctx *hctx,
clear_bit(CTX_TO_BIT(hctx, ctx), &bm->word);
}
-static int blk_mq_queue_enter(struct request_queue *q, gfp_t gfp)
-{
- while (true) {
- int ret;
-
- if (percpu_ref_tryget_live(&q->mq_usage_counter))
- return 0;
-
- if (!(gfp & __GFP_WAIT))
- return -EBUSY;
-
- ret = wait_event_interruptible(q->mq_freeze_wq,
- !atomic_read(&q->mq_freeze_depth) ||
- blk_queue_dying(q));
- if (blk_queue_dying(q))
- return -ENODEV;
- if (ret)
- return ret;
- }
-}
-
-static void blk_mq_queue_exit(struct request_queue *q)
-{
- percpu_ref_put(&q->mq_usage_counter);
-}
-
-static void blk_mq_usage_counter_release(struct percpu_ref *ref)
-{
- struct request_queue *q =
- container_of(ref, struct request_queue, mq_usage_counter);
-
- wake_up_all(&q->mq_freeze_wq);
-}
-
void blk_mq_freeze_queue_start(struct request_queue *q)
{
int freeze_depth;
freeze_depth = atomic_inc_return(&q->mq_freeze_depth);
if (freeze_depth == 1) {
- percpu_ref_kill(&q->mq_usage_counter);
+ percpu_ref_kill(&q->q_usage_counter);
blk_mq_run_hw_queues(q, false);
}
}
@@ -126,18 +92,34 @@ EXPORT_SYMBOL_GPL(blk_mq_freeze_queue_start);
static void blk_mq_freeze_queue_wait(struct request_queue *q)
{
- wait_event(q->mq_freeze_wq, percpu_ref_is_zero(&q->mq_usage_counter));
+ wait_event(q->mq_freeze_wq, percpu_ref_is_zero(&q->q_usage_counter));
}
/*
* Guarantee no request is in use, so we can change any data structure of
* the queue afterward.
*/
-void blk_mq_freeze_queue(struct request_queue *q)
+void blk_freeze_queue(struct request_queue *q)
{
+ /*
+ * In the !blk_mq case we are only calling this to kill the
+ * q_usage_counter, otherwise this increases the freeze depth
+ * and waits for it to return to zero. For this reason there is
+ * no blk_unfreeze_queue(), and blk_freeze_queue() is not
+ * exported to drivers as the only user for unfreeze is blk_mq.
+ */
blk_mq_freeze_queue_start(q);
blk_mq_freeze_queue_wait(q);
}
+
+void blk_mq_freeze_queue(struct request_queue *q)
+{
+ /*
+ * ...just an alias to keep freeze and unfreeze actions balanced
+ * in the blk_mq_* namespace
+ */
+ blk_freeze_queue(q);
+}
EXPORT_SYMBOL_GPL(blk_mq_freeze_queue);
void blk_mq_unfreeze_queue(struct request_queue *q)
@@ -147,7 +129,7 @@ void blk_mq_unfreeze_queue(struct request_queue *q)
freeze_depth = atomic_dec_return(&q->mq_freeze_depth);
WARN_ON_ONCE(freeze_depth < 0);
if (!freeze_depth) {
- percpu_ref_reinit(&q->mq_usage_counter);
+ percpu_ref_reinit(&q->q_usage_counter);
wake_up_all(&q->mq_freeze_wq);
}
}
@@ -256,7 +238,7 @@ struct request *blk_mq_alloc_request(struct request_queue *q, int rw, gfp_t gfp,
struct blk_mq_alloc_data alloc_data;
int ret;
- ret = blk_mq_queue_enter(q, gfp);
+ ret = blk_queue_enter(q, gfp);
if (ret)
return ERR_PTR(ret);
@@ -279,7 +261,7 @@ struct request *blk_mq_alloc_request(struct request_queue *q, int rw, gfp_t gfp,
}
blk_mq_put_ctx(ctx);
if (!rq) {
- blk_mq_queue_exit(q);
+ blk_queue_exit(q);
return ERR_PTR(-EWOULDBLOCK);
}
return rq;
@@ -298,7 +280,7 @@ static void __blk_mq_free_request(struct blk_mq_hw_ctx *hctx,
clear_bit(REQ_ATOM_STARTED, &rq->atomic_flags);
blk_mq_put_tag(hctx, tag, &ctx->last_tag);
- blk_mq_queue_exit(q);
+ blk_queue_exit(q);
}
void blk_mq_free_hctx_request(struct blk_mq_hw_ctx *hctx, struct request *rq)
@@ -990,18 +972,25 @@ void blk_mq_delay_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs)
}
EXPORT_SYMBOL(blk_mq_delay_queue);
-static void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx,
- struct request *rq, bool at_head)
+static inline void __blk_mq_insert_req_list(struct blk_mq_hw_ctx *hctx,
+ struct blk_mq_ctx *ctx,
+ struct request *rq,
+ bool at_head)
{
- struct blk_mq_ctx *ctx = rq->mq_ctx;
-
trace_block_rq_insert(hctx->queue, rq);
if (at_head)
list_add(&rq->queuelist, &ctx->rq_list);
else
list_add_tail(&rq->queuelist, &ctx->rq_list);
+}
+
+static void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx,
+ struct request *rq, bool at_head)
+{
+ struct blk_mq_ctx *ctx = rq->mq_ctx;
+ __blk_mq_insert_req_list(hctx, ctx, rq, at_head);
blk_mq_hctx_mark_pending(hctx, ctx);
}
@@ -1057,8 +1046,9 @@ static void blk_mq_insert_requests(struct request_queue *q,
rq = list_first_entry(list, struct request, queuelist);
list_del_init(&rq->queuelist);
rq->mq_ctx = ctx;
- __blk_mq_insert_request(hctx, rq, false);
+ __blk_mq_insert_req_list(hctx, ctx, rq, false);
}
+ blk_mq_hctx_mark_pending(hctx, ctx);
spin_unlock(&ctx->lock);
blk_mq_run_hw_queue(hctx, from_schedule);
@@ -1140,7 +1130,7 @@ static inline bool blk_mq_merge_queue_io(struct blk_mq_hw_ctx *hctx,
struct blk_mq_ctx *ctx,
struct request *rq, struct bio *bio)
{
- if (!hctx_allow_merges(hctx)) {
+ if (!hctx_allow_merges(hctx) || !bio_mergeable(bio)) {
blk_mq_bio_to_request(rq, bio);
spin_lock(&ctx->lock);
insert_rq:
@@ -1177,11 +1167,7 @@ static struct request *blk_mq_map_request(struct request_queue *q,
int rw = bio_data_dir(bio);
struct blk_mq_alloc_data alloc_data;
- if (unlikely(blk_mq_queue_enter(q, GFP_KERNEL))) {
- bio_io_error(bio);
- return NULL;
- }
-
+ blk_queue_enter_live(q);
ctx = blk_mq_get_ctx(q);
hctx = q->mq_ops->map_queue(q, ctx->cpu);
@@ -1268,9 +1254,12 @@ static void blk_mq_make_request(struct request_queue *q, struct bio *bio)
blk_queue_split(q, &bio, q->bio_split);
- if (!is_flush_fua && !blk_queue_nomerges(q) &&
- blk_attempt_plug_merge(q, bio, &request_count, &same_queue_rq))
- return;
+ if (!is_flush_fua && !blk_queue_nomerges(q)) {
+ if (blk_attempt_plug_merge(q, bio, &request_count,
+ &same_queue_rq))
+ return;
+ } else
+ request_count = blk_plug_queued_count(q);
rq = blk_mq_map_request(q, bio, &data);
if (unlikely(!rq))
@@ -1377,7 +1366,7 @@ static void blk_sq_make_request(struct request_queue *q, struct bio *bio)
plug = current->plug;
if (plug) {
blk_mq_bio_to_request(rq, bio);
- if (list_empty(&plug->mq_list))
+ if (!request_count)
trace_block_plug(q);
else if (request_count >= BLK_MAX_REQUEST_COUNT) {
blk_flush_plug_list(plug, false);
@@ -1684,7 +1673,7 @@ static int blk_mq_init_hctx(struct request_queue *q,
INIT_LIST_HEAD(&hctx->dispatch);
hctx->queue = q;
hctx->queue_num = hctx_idx;
- hctx->flags = set->flags;
+ hctx->flags = set->flags & ~BLK_MQ_F_TAG_SHARED;
blk_mq_init_cpu_notifier(&hctx->cpu_notifier,
blk_mq_hctx_notify, hctx);
@@ -1871,27 +1860,26 @@ static void blk_mq_map_swqueue(struct request_queue *q,
}
}
-static void blk_mq_update_tag_set_depth(struct blk_mq_tag_set *set)
+static void queue_set_hctx_shared(struct request_queue *q, bool shared)
{
struct blk_mq_hw_ctx *hctx;
- struct request_queue *q;
- bool shared;
int i;
- if (set->tag_list.next == set->tag_list.prev)
- shared = false;
- else
- shared = true;
+ queue_for_each_hw_ctx(q, hctx, i) {
+ if (shared)
+ hctx->flags |= BLK_MQ_F_TAG_SHARED;
+ else
+ hctx->flags &= ~BLK_MQ_F_TAG_SHARED;
+ }
+}
+
+static void blk_mq_update_tag_set_depth(struct blk_mq_tag_set *set, bool shared)
+{
+ struct request_queue *q;
list_for_each_entry(q, &set->tag_list, tag_set_list) {
blk_mq_freeze_queue(q);
-
- queue_for_each_hw_ctx(q, hctx, i) {
- if (shared)
- hctx->flags |= BLK_MQ_F_TAG_SHARED;
- else
- hctx->flags &= ~BLK_MQ_F_TAG_SHARED;
- }
+ queue_set_hctx_shared(q, shared);
blk_mq_unfreeze_queue(q);
}
}
@@ -1902,7 +1890,12 @@ static void blk_mq_del_queue_tag_set(struct request_queue *q)
mutex_lock(&set->tag_list_lock);
list_del_init(&q->tag_set_list);
- blk_mq_update_tag_set_depth(set);
+ if (list_is_singular(&set->tag_list)) {
+ /* just transitioned to unshared */
+ set->flags &= ~BLK_MQ_F_TAG_SHARED;
+ /* update existing queue */
+ blk_mq_update_tag_set_depth(set, false);
+ }
mutex_unlock(&set->tag_list_lock);
}
@@ -1912,8 +1905,17 @@ static void blk_mq_add_queue_tag_set(struct blk_mq_tag_set *set,
q->tag_set = set;
mutex_lock(&set->tag_list_lock);
+
+ /* Check to see if we're transitioning to shared (from 1 to 2 queues). */
+ if (!list_empty(&set->tag_list) && !(set->flags & BLK_MQ_F_TAG_SHARED)) {
+ set->flags |= BLK_MQ_F_TAG_SHARED;
+ /* update existing queue */
+ blk_mq_update_tag_set_depth(set, true);
+ }
+ if (set->flags & BLK_MQ_F_TAG_SHARED)
+ queue_set_hctx_shared(q, true);
list_add_tail(&q->tag_set_list, &set->tag_list);
- blk_mq_update_tag_set_depth(set);
+
mutex_unlock(&set->tag_list_lock);
}
@@ -2000,14 +2002,6 @@ struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
hctxs[i]->queue_num = i;
}
- /*
- * Init percpu_ref in atomic mode so that it's faster to shutdown.
- * See blk_register_queue() for details.
- */
- if (percpu_ref_init(&q->mq_usage_counter, blk_mq_usage_counter_release,
- PERCPU_REF_INIT_ATOMIC, GFP_KERNEL))
- goto err_hctxs;
-
setup_timer(&q->timeout, blk_mq_rq_timer, (unsigned long) q);
blk_queue_rq_timeout(q, set->timeout ? set->timeout : 30 * HZ);
@@ -2088,8 +2082,6 @@ void blk_mq_free_queue(struct request_queue *q)
blk_mq_exit_hw_queues(q, set, set->nr_hw_queues);
blk_mq_free_hw_queues(q, set);
-
- percpu_ref_exit(&q->mq_usage_counter);
}
/* Basically redo blk_mq_init_queue with queue frozen */
@@ -2307,10 +2299,8 @@ void blk_mq_free_tag_set(struct blk_mq_tag_set *set)
int i;
for (i = 0; i < set->nr_hw_queues; i++) {
- if (set->tags[i]) {
+ if (set->tags[i])
blk_mq_free_rq_map(set, set->tags[i], i);
- free_cpumask_var(set->tags[i]->cpumask);
- }
}
kfree(set->tags);
diff --git a/block/blk-mq.h b/block/blk-mq.h
index f4fea7964910..b44dce165761 100644
--- a/block/blk-mq.h
+++ b/block/blk-mq.h
@@ -29,8 +29,6 @@ void __blk_mq_complete_request(struct request *rq);
void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async);
void blk_mq_freeze_queue(struct request_queue *q);
void blk_mq_free_queue(struct request_queue *q);
-void blk_mq_clone_flush_request(struct request *flush_rq,
- struct request *orig_rq);
int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr);
void blk_mq_wake_waiters(struct request_queue *q);
diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c
index 3e44a9da2a13..31849e328b45 100644
--- a/block/blk-sysfs.c
+++ b/block/blk-sysfs.c
@@ -540,6 +540,7 @@ static void blk_release_queue(struct kobject *kobj)
struct request_queue *q =
container_of(kobj, struct request_queue, kobj);
+ bdi_exit(&q->backing_dev_info);
blkcg_exit_queue(q);
if (q->elevator) {
@@ -599,9 +600,8 @@ int blk_register_queue(struct gendisk *disk)
*/
if (!blk_queue_init_done(q)) {
queue_flag_set_unlocked(QUEUE_FLAG_INIT_DONE, q);
+ percpu_ref_switch_to_percpu(&q->q_usage_counter);
blk_queue_bypass_end(q);
- if (q->mq_ops)
- blk_mq_finish_init(q);
}
ret = blk_trace_init_sysfs(dev);
diff --git a/block/blk.h b/block/blk.h
index 98614ad37c81..da722eb786df 100644
--- a/block/blk.h
+++ b/block/blk.h
@@ -72,6 +72,28 @@ void blk_dequeue_request(struct request *rq);
void __blk_queue_free_tags(struct request_queue *q);
bool __blk_end_bidi_request(struct request *rq, int error,
unsigned int nr_bytes, unsigned int bidi_bytes);
+int blk_queue_enter(struct request_queue *q, gfp_t gfp);
+void blk_queue_exit(struct request_queue *q);
+void blk_freeze_queue(struct request_queue *q);
+
+static inline void blk_queue_enter_live(struct request_queue *q)
+{
+ /*
+ * Given that running in generic_make_request() context
+ * guarantees that a live reference against q_usage_counter has
+ * been established, further references under that same context
+ * need not check that the queue has been frozen (marked dead).
+ */
+ percpu_ref_get(&q->q_usage_counter);
+}
+
+#ifdef CONFIG_BLK_DEV_INTEGRITY
+void blk_flush_integrity(void);
+#else
+static inline void blk_flush_integrity(void)
+{
+}
+#endif
void blk_rq_timed_out_timer(unsigned long data);
unsigned long blk_rq_timeout(unsigned long timeout);
@@ -86,6 +108,7 @@ bool bio_attempt_back_merge(struct request_queue *q, struct request *req,
bool blk_attempt_plug_merge(struct request_queue *q, struct bio *bio,
unsigned int *request_count,
struct request **same_queue_rq);
+unsigned int blk_plug_queued_count(struct request_queue *q);
void blk_account_io_start(struct request *req, bool new_io);
void blk_account_io_completion(struct request *req, unsigned int bytes);
diff --git a/block/elevator.c b/block/elevator.c
index 84d63943f2de..c3555c9c672f 100644
--- a/block/elevator.c
+++ b/block/elevator.c
@@ -420,7 +420,7 @@ int elv_merge(struct request_queue *q, struct request **req, struct bio *bio)
* noxmerges: Only simple one-hit cache try
* merges: All merge tries attempted
*/
- if (blk_queue_nomerges(q))
+ if (blk_queue_nomerges(q) || !bio_mergeable(bio))
return ELEVATOR_NO_MERGE;
/*
diff --git a/block/genhd.c b/block/genhd.c
index 0c706f33a599..e5cafa51567c 100644
--- a/block/genhd.c
+++ b/block/genhd.c
@@ -630,6 +630,7 @@ void add_disk(struct gendisk *disk)
WARN_ON(retval);
disk_add_events(disk);
+ blk_integrity_add(disk);
}
EXPORT_SYMBOL(add_disk);
@@ -638,6 +639,7 @@ void del_gendisk(struct gendisk *disk)
struct disk_part_iter piter;
struct hd_struct *part;
+ blk_integrity_del(disk);
disk_del_events(disk);
/* invalidate stuff */
diff --git a/block/partition-generic.c b/block/partition-generic.c
index e7711133284e..3b030157ec85 100644
--- a/block/partition-generic.c
+++ b/block/partition-generic.c
@@ -428,6 +428,7 @@ rescan:
if (disk->fops->revalidate_disk)
disk->fops->revalidate_disk(disk);
+ blk_integrity_revalidate(disk);
check_disk_size_change(disk, bdev);
bdev->bd_invalidated = 0;
if (!get_capacity(disk) || !(state = check_partition(disk, bdev)))
diff --git a/block/t10-pi.c b/block/t10-pi.c
index 24d6e9715318..2c97912335a9 100644
--- a/block/t10-pi.c
+++ b/block/t10-pi.c
@@ -160,38 +160,30 @@ static int t10_pi_type3_verify_ip(struct blk_integrity_iter *iter)
return t10_pi_verify(iter, t10_pi_ip_fn, 3);
}
-struct blk_integrity t10_pi_type1_crc = {
+struct blk_integrity_profile t10_pi_type1_crc = {
.name = "T10-DIF-TYPE1-CRC",
.generate_fn = t10_pi_type1_generate_crc,
.verify_fn = t10_pi_type1_verify_crc,
- .tuple_size = sizeof(struct t10_pi_tuple),
- .tag_size = 0,
};
EXPORT_SYMBOL(t10_pi_type1_crc);
-struct blk_integrity t10_pi_type1_ip = {
+struct blk_integrity_profile t10_pi_type1_ip = {
.name = "T10-DIF-TYPE1-IP",
.generate_fn = t10_pi_type1_generate_ip,
.verify_fn = t10_pi_type1_verify_ip,
- .tuple_size = sizeof(struct t10_pi_tuple),
- .tag_size = 0,
};
EXPORT_SYMBOL(t10_pi_type1_ip);
-struct blk_integrity t10_pi_type3_crc = {
+struct blk_integrity_profile t10_pi_type3_crc = {
.name = "T10-DIF-TYPE3-CRC",
.generate_fn = t10_pi_type3_generate_crc,
.verify_fn = t10_pi_type3_verify_crc,
- .tuple_size = sizeof(struct t10_pi_tuple),
- .tag_size = 0,
};
EXPORT_SYMBOL(t10_pi_type3_crc);
-struct blk_integrity t10_pi_type3_ip = {
+struct blk_integrity_profile t10_pi_type3_ip = {
.name = "T10-DIF-TYPE3-IP",
.generate_fn = t10_pi_type3_generate_ip,
.verify_fn = t10_pi_type3_verify_ip,
- .tuple_size = sizeof(struct t10_pi_tuple),
- .tag_size = 0,
};
EXPORT_SYMBOL(t10_pi_type3_ip);