summaryrefslogtreecommitdiff
path: root/drivers/md
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2014-01-30 11:19:05 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2014-01-30 11:19:05 -0800
commitf568849edac8611d603e00bd6cbbcfea09395ae6 (patch)
treeb9472d640fe5d87426d38c9d81d946cf197ad3fb /drivers/md
parentd9894c228b11273e720bb63ba120d1d326fe9d94 (diff)
parent675675ada486dde5bf9aa51665e90706bff11a35 (diff)
Merge branch 'for-3.14/core' of git://git.kernel.dk/linux-block
Pull core block IO changes from Jens Axboe: "The major piece in here is the immutable bio_ve series from Kent, the rest is fairly minor. It was supposed to go in last round, but various issues pushed it to this release instead. The pull request contains: - Various smaller blk-mq fixes from different folks. Nothing major here, just minor fixes and cleanups. - Fix for a memory leak in the error path in the block ioctl code from Christian Engelmayer. - Header export fix from CaiZhiyong. - Finally the immutable biovec changes from Kent Overstreet. This enables some nice future work on making arbitrarily sized bios possible, and splitting more efficient. Related fixes to immutable bio_vecs: - dm-cache immutable fixup from Mike Snitzer. - btrfs immutable fixup from Muthu Kumar. - bio-integrity fix from Nic Bellinger, which is also going to stable" * 'for-3.14/core' of git://git.kernel.dk/linux-block: (44 commits) xtensa: fixup simdisk driver to work with immutable bio_vecs block/blk-mq-cpu.c: use hotcpu_notifier() blk-mq: for_each_* macro correctness block: Fix memory leak in rw_copy_check_uvector() handling bio-integrity: Fix bio_integrity_verify segment start bug block: remove unrelated header files and export symbol blk-mq: uses page->list incorrectly blk-mq: use __smp_call_function_single directly btrfs: fix missing increment of bi_remaining Revert "block: Warn and free bio if bi_end_io is not set" block: Warn and free bio if bi_end_io is not set blk-mq: fix initializing request's start time block: blk-mq: don't export blk_mq_free_queue() block: blk-mq: make blk_sync_queue support mq block: blk-mq: support draining mq queue dm cache: increment bi_remaining when bi_end_io is restored block: fixup for generic bio chaining block: Really silence spurious compiler warnings block: Silence spurious compiler warnings block: Kill bio_pair_split() ...
Diffstat (limited to 'drivers/md')
-rw-r--r--drivers/md/bcache/bcache.h2
-rw-r--r--drivers/md/bcache/btree.c8
-rw-r--r--drivers/md/bcache/debug.c21
-rw-r--r--drivers/md/bcache/io.c196
-rw-r--r--drivers/md/bcache/journal.c12
-rw-r--r--drivers/md/bcache/movinggc.c4
-rw-r--r--drivers/md/bcache/request.c131
-rw-r--r--drivers/md/bcache/super.c20
-rw-r--r--drivers/md/bcache/util.c4
-rw-r--r--drivers/md/bcache/writeback.c6
-rw-r--r--drivers/md/bcache/writeback.h2
-rw-r--r--drivers/md/dm-bio-record.h37
-rw-r--r--drivers/md/dm-bufio.c2
-rw-r--r--drivers/md/dm-cache-policy-mq.c4
-rw-r--r--drivers/md/dm-cache-target.c28
-rw-r--r--drivers/md/dm-crypt.c64
-rw-r--r--drivers/md/dm-delay.c7
-rw-r--r--drivers/md/dm-flakey.c7
-rw-r--r--drivers/md/dm-io.c37
-rw-r--r--drivers/md/dm-linear.c3
-rw-r--r--drivers/md/dm-raid1.c20
-rw-r--r--drivers/md/dm-region-hash.c3
-rw-r--r--drivers/md/dm-snap.c19
-rw-r--r--drivers/md/dm-stripe.c13
-rw-r--r--drivers/md/dm-switch.c4
-rw-r--r--drivers/md/dm-thin.c30
-rw-r--r--drivers/md/dm-verity.c62
-rw-r--r--drivers/md/dm.c189
-rw-r--r--drivers/md/faulty.c19
-rw-r--r--drivers/md/linear.c96
-rw-r--r--drivers/md/md.c12
-rw-r--r--drivers/md/multipath.c13
-rw-r--r--drivers/md/raid0.c79
-rw-r--r--drivers/md/raid1.c73
-rw-r--r--drivers/md/raid10.c194
-rw-r--r--drivers/md/raid5.c84
36 files changed, 569 insertions, 936 deletions
diff --git a/drivers/md/bcache/bcache.h b/drivers/md/bcache/bcache.h
index 754f43177483..dbdbca5a9591 100644
--- a/drivers/md/bcache/bcache.h
+++ b/drivers/md/bcache/bcache.h
@@ -280,7 +280,6 @@ struct bcache_device {
unsigned long sectors_dirty_last;
long sectors_dirty_derivative;
- mempool_t *unaligned_bvec;
struct bio_set *bio_split;
unsigned data_csum:1;
@@ -902,7 +901,6 @@ void bch_bbio_endio(struct cache_set *, struct bio *, int, const char *);
void bch_bbio_free(struct bio *, struct cache_set *);
struct bio *bch_bbio_alloc(struct cache_set *);
-struct bio *bch_bio_split(struct bio *, int, gfp_t, struct bio_set *);
void bch_generic_make_request(struct bio *, struct bio_split_pool *);
void __bch_submit_bbio(struct bio *, struct cache_set *);
void bch_submit_bbio(struct bio *, struct cache_set *, struct bkey *, unsigned);
diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c
index 31bb53fcc67a..946ecd3b048b 100644
--- a/drivers/md/bcache/btree.c
+++ b/drivers/md/bcache/btree.c
@@ -299,7 +299,7 @@ void bch_btree_node_read(struct btree *b)
bio = bch_bbio_alloc(b->c);
bio->bi_rw = REQ_META|READ_SYNC;
- bio->bi_size = KEY_SIZE(&b->key) << 9;
+ bio->bi_iter.bi_size = KEY_SIZE(&b->key) << 9;
bio->bi_end_io = btree_node_read_endio;
bio->bi_private = &cl;
@@ -362,7 +362,7 @@ static void btree_node_write_done(struct closure *cl)
struct bio_vec *bv;
int n;
- __bio_for_each_segment(bv, b->bio, n, 0)
+ bio_for_each_segment_all(bv, b->bio, n)
__free_page(bv->bv_page);
__btree_node_write_done(cl);
@@ -395,7 +395,7 @@ static void do_btree_node_write(struct btree *b)
b->bio->bi_end_io = btree_node_write_endio;
b->bio->bi_private = cl;
b->bio->bi_rw = REQ_META|WRITE_SYNC|REQ_FUA;
- b->bio->bi_size = set_blocks(i, b->c) * block_bytes(b->c);
+ b->bio->bi_iter.bi_size = set_blocks(i, b->c) * block_bytes(b->c);
bch_bio_map(b->bio, i);
/*
@@ -421,7 +421,7 @@ static void do_btree_node_write(struct btree *b)
struct bio_vec *bv;
void *base = (void *) ((unsigned long) i & ~(PAGE_SIZE - 1));
- bio_for_each_segment(bv, b->bio, j)
+ bio_for_each_segment_all(bv, b->bio, j)
memcpy(page_address(bv->bv_page),
base + j * PAGE_SIZE, PAGE_SIZE);
diff --git a/drivers/md/bcache/debug.c b/drivers/md/bcache/debug.c
index 264fcfbd6290..03cb4d114e16 100644
--- a/drivers/md/bcache/debug.c
+++ b/drivers/md/bcache/debug.c
@@ -173,7 +173,8 @@ void bch_data_verify(struct cached_dev *dc, struct bio *bio)
{
char name[BDEVNAME_SIZE];
struct bio *check;
- struct bio_vec *bv;
+ struct bio_vec bv, *bv2;
+ struct bvec_iter iter;
int i;
check = bio_clone(bio, GFP_NOIO);
@@ -185,23 +186,23 @@ void bch_data_verify(struct cached_dev *dc, struct bio *bio)
submit_bio_wait(READ_SYNC, check);
- bio_for_each_segment(bv, bio, i) {
- void *p1 = kmap_atomic(bv->bv_page);
- void *p2 = page_address(check->bi_io_vec[i].bv_page);
+ bio_for_each_segment(bv, bio, iter) {
+ void *p1 = kmap_atomic(bv.bv_page);
+ void *p2 = page_address(check->bi_io_vec[iter.bi_idx].bv_page);
- cache_set_err_on(memcmp(p1 + bv->bv_offset,
- p2 + bv->bv_offset,
- bv->bv_len),
+ cache_set_err_on(memcmp(p1 + bv.bv_offset,
+ p2 + bv.bv_offset,
+ bv.bv_len),
dc->disk.c,
"verify failed at dev %s sector %llu",
bdevname(dc->bdev, name),
- (uint64_t) bio->bi_sector);
+ (uint64_t) bio->bi_iter.bi_sector);
kunmap_atomic(p1);
}
- bio_for_each_segment_all(bv, check, i)
- __free_page(bv->bv_page);
+ bio_for_each_segment_all(bv2, check, i)
+ __free_page(bv2->bv_page);
out_put:
bio_put(check);
}
diff --git a/drivers/md/bcache/io.c b/drivers/md/bcache/io.c
index 9056632995b1..fa028fa82df4 100644
--- a/drivers/md/bcache/io.c
+++ b/drivers/md/bcache/io.c
@@ -11,178 +11,40 @@
#include <linux/blkdev.h>
-static void bch_bi_idx_hack_endio(struct bio *bio, int error)
-{
- struct bio *p = bio->bi_private;
-
- bio_endio(p, error);
- bio_put(bio);
-}
-
-static void bch_generic_make_request_hack(struct bio *bio)
-{
- if (bio->bi_idx) {
- struct bio *clone = bio_alloc(GFP_NOIO, bio_segments(bio));
-
- memcpy(clone->bi_io_vec,
- bio_iovec(bio),
- bio_segments(bio) * sizeof(struct bio_vec));
-
- clone->bi_sector = bio->bi_sector;
- clone->bi_bdev = bio->bi_bdev;
- clone->bi_rw = bio->bi_rw;
- clone->bi_vcnt = bio_segments(bio);
- clone->bi_size = bio->bi_size;
-
- clone->bi_private = bio;
- clone->bi_end_io = bch_bi_idx_hack_endio;
-
- bio = clone;
- }
-
- /*
- * Hack, since drivers that clone bios clone up to bi_max_vecs, but our
- * bios might have had more than that (before we split them per device
- * limitations).
- *
- * To be taken out once immutable bvec stuff is in.
- */
- bio->bi_max_vecs = bio->bi_vcnt;
-
- generic_make_request(bio);
-}
-
-/**
- * bch_bio_split - split a bio
- * @bio: bio to split
- * @sectors: number of sectors to split from the front of @bio
- * @gfp: gfp mask
- * @bs: bio set to allocate from
- *
- * Allocates and returns a new bio which represents @sectors from the start of
- * @bio, and updates @bio to represent the remaining sectors.
- *
- * If bio_sectors(@bio) was less than or equal to @sectors, returns @bio
- * unchanged.
- *
- * The newly allocated bio will point to @bio's bi_io_vec, if the split was on a
- * bvec boundry; it is the caller's responsibility to ensure that @bio is not
- * freed before the split.
- */
-struct bio *bch_bio_split(struct bio *bio, int sectors,
- gfp_t gfp, struct bio_set *bs)
-{
- unsigned idx = bio->bi_idx, vcnt = 0, nbytes = sectors << 9;
- struct bio_vec *bv;
- struct bio *ret = NULL;
-
- BUG_ON(sectors <= 0);
-
- if (sectors >= bio_sectors(bio))
- return bio;
-
- if (bio->bi_rw & REQ_DISCARD) {
- ret = bio_alloc_bioset(gfp, 1, bs);
- if (!ret)
- return NULL;
- idx = 0;
- goto out;
- }
-
- bio_for_each_segment(bv, bio, idx) {
- vcnt = idx - bio->bi_idx;
-
- if (!nbytes) {
- ret = bio_alloc_bioset(gfp, vcnt, bs);
- if (!ret)
- return NULL;
-
- memcpy(ret->bi_io_vec, bio_iovec(bio),
- sizeof(struct bio_vec) * vcnt);
-
- break;
- } else if (nbytes < bv->bv_len) {
- ret = bio_alloc_bioset(gfp, ++vcnt, bs);
- if (!ret)
- return NULL;
-
- memcpy(ret->bi_io_vec, bio_iovec(bio),
- sizeof(struct bio_vec) * vcnt);
-
- ret->bi_io_vec[vcnt - 1].bv_len = nbytes;
- bv->bv_offset += nbytes;
- bv->bv_len -= nbytes;
- break;
- }
-
- nbytes -= bv->bv_len;
- }
-out:
- ret->bi_bdev = bio->bi_bdev;
- ret->bi_sector = bio->bi_sector;
- ret->bi_size = sectors << 9;
- ret->bi_rw = bio->bi_rw;
- ret->bi_vcnt = vcnt;
- ret->bi_max_vecs = vcnt;
-
- bio->bi_sector += sectors;
- bio->bi_size -= sectors << 9;
- bio->bi_idx = idx;
-
- if (bio_integrity(bio)) {
- if (bio_integrity_clone(ret, bio, gfp)) {
- bio_put(ret);
- return NULL;
- }
-
- bio_integrity_trim(ret, 0, bio_sectors(ret));
- bio_integrity_trim(bio, bio_sectors(ret), bio_sectors(bio));
- }
-
- return ret;
-}
-
static unsigned bch_bio_max_sectors(struct bio *bio)
{
- unsigned ret = bio_sectors(bio);
struct request_queue *q = bdev_get_queue(bio->bi_bdev);
- unsigned max_segments = min_t(unsigned, BIO_MAX_PAGES,
- queue_max_segments(q));
+ struct bio_vec bv;
+ struct bvec_iter iter;
+ unsigned ret = 0, seg = 0;
if (bio->bi_rw & REQ_DISCARD)
- return min(ret, q->limits.max_discard_sectors);
-
- if (bio_segments(bio) > max_segments ||
- q->merge_bvec_fn) {
- struct bio_vec *bv;
- int i, seg = 0;
-
- ret = 0;
-
- bio_for_each_segment(bv, bio, i) {
- struct bvec_merge_data bvm = {
- .bi_bdev = bio->bi_bdev,
- .bi_sector = bio->bi_sector,
- .bi_size = ret << 9,
- .bi_rw = bio->bi_rw,
- };
-
- if (seg == max_segments)
- break;
+ return min(bio_sectors(bio), q->limits.max_discard_sectors);
+
+ bio_for_each_segment(bv, bio, iter) {
+ struct bvec_merge_data bvm = {
+ .bi_bdev = bio->bi_bdev,
+ .bi_sector = bio->bi_iter.bi_sector,
+ .bi_size = ret << 9,
+ .bi_rw = bio->bi_rw,
+ };
+
+ if (seg == min_t(unsigned, BIO_MAX_PAGES,
+ queue_max_segments(q)))
+ break;
- if (q->merge_bvec_fn &&
- q->merge_bvec_fn(q, &bvm, bv) < (int) bv->bv_len)
- break;
+ if (q->merge_bvec_fn &&
+ q->merge_bvec_fn(q, &bvm, &bv) < (int) bv.bv_len)
+ break;
- seg++;
- ret += bv->bv_len >> 9;
- }
+ seg++;
+ ret += bv.bv_len >> 9;
}
ret = min(ret, queue_max_sectors(q));
WARN_ON(!ret);
- ret = max_t(int, ret, bio_iovec(bio)->bv_len >> 9);
+ ret = max_t(int, ret, bio_iovec(bio).bv_len >> 9);
return ret;
}
@@ -193,7 +55,7 @@ static void bch_bio_submit_split_done(struct closure *cl)
s->bio->bi_end_io = s->bi_end_io;
s->bio->bi_private = s->bi_private;
- bio_endio(s->bio, 0);
+ bio_endio_nodec(s->bio, 0);
closure_debug_destroy(&s->cl);
mempool_free(s, s->p->bio_split_hook);
@@ -232,19 +94,19 @@ void bch_generic_make_request(struct bio *bio, struct bio_split_pool *p)
bio_get(bio);
do {
- n = bch_bio_split(bio, bch_bio_max_sectors(bio),
- GFP_NOIO, s->p->bio_split);
+ n = bio_next_split(bio, bch_bio_max_sectors(bio),
+ GFP_NOIO, s->p->bio_split);
n->bi_end_io = bch_bio_submit_split_endio;
n->bi_private = &s->cl;
closure_get(&s->cl);
- bch_generic_make_request_hack(n);
+ generic_make_request(n);
} while (n != bio);
continue_at(&s->cl, bch_bio_submit_split_done, NULL);
submit:
- bch_generic_make_request_hack(bio);
+ generic_make_request(bio);
}
/* Bios with headers */
@@ -272,8 +134,8 @@ void __bch_submit_bbio(struct bio *bio, struct cache_set *c)
{
struct bbio *b = container_of(bio, struct bbio, bio);
- bio->bi_sector = PTR_OFFSET(&b->key, 0);
- bio->bi_bdev = PTR_CACHE(c, &b->key, 0)->bdev;
+ bio->bi_iter.bi_sector = PTR_OFFSET(&b->key, 0);
+ bio->bi_bdev = PTR_CACHE(c, &b->key, 0)->bdev;
b->submit_time_us = local_clock_us();
closure_bio_submit(bio, bio->bi_private, PTR_CACHE(c, &b->key, 0));
diff --git a/drivers/md/bcache/journal.c b/drivers/md/bcache/journal.c
index ecdaa671bd50..7eafdf09a0ae 100644
--- a/drivers/md/bcache/journal.c
+++ b/drivers/md/bcache/journal.c
@@ -51,10 +51,10 @@ reread: left = ca->sb.bucket_size - offset;
len = min_t(unsigned, left, PAGE_SECTORS * 8);
bio_reset(bio);
- bio->bi_sector = bucket + offset;
+ bio->bi_iter.bi_sector = bucket + offset;
bio->bi_bdev = ca->bdev;
bio->bi_rw = READ;
- bio->bi_size = len << 9;
+ bio->bi_iter.bi_size = len << 9;
bio->bi_end_io = journal_read_endio;
bio->bi_private = &cl;
@@ -437,13 +437,13 @@ static void do_journal_discard(struct cache *ca)
atomic_set(&ja->discard_in_flight, DISCARD_IN_FLIGHT);
bio_init(bio);
- bio->bi_sector = bucket_to_sector(ca->set,
+ bio->bi_iter.bi_sector = bucket_to_sector(ca->set,
ca->sb.d[ja->discard_idx]);
bio->bi_bdev = ca->bdev;
bio->bi_rw = REQ_WRITE|REQ_DISCARD;
bio->bi_max_vecs = 1;
bio->bi_io_vec = bio->bi_inline_vecs;
- bio->bi_size = bucket_bytes(ca);
+ bio->bi_iter.bi_size = bucket_bytes(ca);
bio->bi_end_io = journal_discard_endio;
closure_get(&ca->set->cl);
@@ -608,10 +608,10 @@ static void journal_write_unlocked(struct closure *cl)
atomic_long_add(sectors, &ca->meta_sectors_written);
bio_reset(bio);
- bio->bi_sector = PTR_OFFSET(k, i);
+ bio->bi_iter.bi_sector = PTR_OFFSET(k, i);
bio->bi_bdev = ca->bdev;
bio->bi_rw = REQ_WRITE|REQ_SYNC|REQ_META|REQ_FLUSH|REQ_FUA;
- bio->bi_size = sectors << 9;
+ bio->bi_iter.bi_size = sectors << 9;
bio->bi_end_io = journal_write_endio;
bio->bi_private = w;
diff --git a/drivers/md/bcache/movinggc.c b/drivers/md/bcache/movinggc.c
index f2f0998c4a91..052bd24d24b4 100644
--- a/drivers/md/bcache/movinggc.c
+++ b/drivers/md/bcache/movinggc.c
@@ -86,7 +86,7 @@ static void moving_init(struct moving_io *io)
bio_get(bio);
bio_set_prio(bio, IOPRIO_PRIO_VALUE(IOPRIO_CLASS_IDLE, 0));
- bio->bi_size = KEY_SIZE(&io->w->key) << 9;
+ bio->bi_iter.bi_size = KEY_SIZE(&io->w->key) << 9;
bio->bi_max_vecs = DIV_ROUND_UP(KEY_SIZE(&io->w->key),
PAGE_SECTORS);
bio->bi_private = &io->cl;
@@ -102,7 +102,7 @@ static void write_moving(struct closure *cl)
if (!op->error) {
moving_init(io);
- io->bio.bio.bi_sector = KEY_START(&io->w->key);
+ io->bio.bio.bi_iter.bi_sector = KEY_START(&io->w->key);
op->write_prio = 1;
op->bio = &io->bio.bio;
diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c
index 61bcfc21d2a0..c906571997d7 100644
--- a/drivers/md/bcache/request.c
+++ b/drivers/md/bcache/request.c
@@ -197,14 +197,14 @@ static bool verify(struct cached_dev *dc, struct bio *bio)
static void bio_csum(struct bio *bio, struct bkey *k)
{
- struct bio_vec *bv;
+ struct bio_vec bv;
+ struct bvec_iter iter;
uint64_t csum = 0;
- int i;
- bio_for_each_segment(bv, bio, i) {
- void *d = kmap(bv->bv_page) + bv->bv_offset;
- csum = bch_crc64_update(csum, d, bv->bv_len);
- kunmap(bv->bv_page);
+ bio_for_each_segment(bv, bio, iter) {
+ void *d = kmap(bv.bv_page) + bv.bv_offset;
+ csum = bch_crc64_update(csum, d, bv.bv_len);
+ kunmap(bv.bv_page);
}
k->ptr[KEY_PTRS(k)] = csum & (~0ULL >> 1);
@@ -260,7 +260,7 @@ static void bch_data_invalidate(struct closure *cl)
struct bio *bio = op->bio;
pr_debug("invalidating %i sectors from %llu",
- bio_sectors(bio), (uint64_t) bio->bi_sector);
+ bio_sectors(bio), (uint64_t) bio->bi_iter.bi_sector);
while (bio_sectors(bio)) {
unsigned sectors = min(bio_sectors(bio),
@@ -269,11 +269,11 @@ static void bch_data_invalidate(struct closure *cl)
if (bch_keylist_realloc(&op->insert_keys, 0, op->c))
goto out;
- bio->bi_sector += sectors;
- bio->bi_size -= sectors << 9;
+ bio->bi_iter.bi_sector += sectors;
+ bio->bi_iter.bi_size -= sectors << 9;
bch_keylist_add(&op->insert_keys,
- &KEY(op->inode, bio->bi_sector, sectors));
+ &KEY(op->inode, bio->bi_iter.bi_sector, sectors));
}
op->insert_data_done = true;
@@ -363,14 +363,14 @@ static void bch_data_insert_start(struct closure *cl)
k = op->insert_keys.top;
bkey_init(k);
SET_KEY_INODE(k, op->inode);
- SET_KEY_OFFSET(k, bio->bi_sector);
+ SET_KEY_OFFSET(k, bio->bi_iter.bi_sector);
if (!bch_alloc_sectors(op->c, k, bio_sectors(bio),
op->write_point, op->write_prio,
op->writeback))
goto err;
- n = bch_bio_split(bio, KEY_SIZE(k), GFP_NOIO, split);
+ n = bio_next_split(bio, KEY_SIZE(k), GFP_NOIO, split);
n->bi_end_io = bch_data_insert_endio;
n->bi_private = cl;
@@ -521,7 +521,7 @@ static bool check_should_bypass(struct cached_dev *dc, struct bio *bio)
(bio->bi_rw & REQ_WRITE)))
goto skip;
- if (bio->bi_sector & (c->sb.block_size - 1) ||
+ if (bio->bi_iter.bi_sector & (c->sb.block_size - 1) ||
bio_sectors(bio) & (c->sb.block_size - 1)) {
pr_debug("skipping unaligned io");
goto skip;
@@ -545,8 +545,8 @@ static bool check_should_bypass(struct cached_dev *dc, struct bio *bio)
spin_lock(&dc->io_lock);
- hlist_for_each_entry(i, iohash(dc, bio->bi_sector), hash)
- if (i->last == bio->bi_sector &&
+ hlist_for_each_entry(i, iohash(dc, bio->bi_iter.bi_sector), hash)
+ if (i->last == bio->bi_iter.bi_sector &&
time_before(jiffies, i->jiffies))
goto found;
@@ -555,8 +555,8 @@ static bool check_should_bypass(struct cached_dev *dc, struct bio *bio)
add_sequential(task);
i->sequential = 0;
found:
- if (i->sequential + bio->bi_size > i->sequential)
- i->sequential += bio->bi_size;
+ if (i->sequential + bio->bi_iter.bi_size > i->sequential)
+ i->sequential += bio->bi_iter.bi_size;
i->last = bio_end_sector(bio);
i->jiffies = jiffies + msecs_to_jiffies(5000);
@@ -605,7 +605,6 @@ struct search {
unsigned insert_bio_sectors;
unsigned recoverable:1;
- unsigned unaligned_bvec:1;
unsigned write:1;
unsigned read_dirty_data:1;
@@ -649,15 +648,15 @@ static int cache_lookup_fn(struct btree_op *op, struct btree *b, struct bkey *k)
struct bkey *bio_key;
unsigned ptr;
- if (bkey_cmp(k, &KEY(s->iop.inode, bio->bi_sector, 0)) <= 0)
+ if (bkey_cmp(k, &KEY(s->iop.inode, bio->bi_iter.bi_sector, 0)) <= 0)
return MAP_CONTINUE;
if (KEY_INODE(k) != s->iop.inode ||
- KEY_START(k) > bio->bi_sector) {
+ KEY_START(k) > bio->bi_iter.bi_sector) {
unsigned bio_sectors = bio_sectors(bio);
unsigned sectors = KEY_INODE(k) == s->iop.inode
? min_t(uint64_t, INT_MAX,
- KEY_START(k) - bio->bi_sector)
+ KEY_START(k) - bio->bi_iter.bi_sector)
: INT_MAX;
int ret = s->d->cache_miss(b, s, bio, sectors);
@@ -679,14 +678,14 @@ static int cache_lookup_fn(struct btree_op *op, struct btree *b, struct bkey *k)
if (KEY_DIRTY(k))
s->read_dirty_data = true;
- n = bch_bio_split(bio, min_t(uint64_t, INT_MAX,
- KEY_OFFSET(k) - bio->bi_sector),
- GFP_NOIO, s->d->bio_split);
+ n = bio_next_split(bio, min_t(uint64_t, INT_MAX,
+ KEY_OFFSET(k) - bio->bi_iter.bi_sector),
+ GFP_NOIO, s->d->bio_split);
bio_key = &container_of(n, struct bbio, bio)->key;
bch_bkey_copy_single_ptr(bio_key, k, ptr);
- bch_cut_front(&KEY(s->iop.inode, n->bi_sector, 0), bio_key);
+ bch_cut_front(&KEY(s->iop.inode, n->bi_iter.bi_sector, 0), bio_key);
bch_cut_back(&KEY(s->iop.inode, bio_end_sector(n), 0), bio_key);
n->bi_end_io = bch_cache_read_endio;
@@ -713,7 +712,7 @@ static void cache_lookup(struct closure *cl)
struct bio *bio = &s->bio.bio;
int ret = bch_btree_map_keys(&s->op, s->iop.c,
- &KEY(s->iop.inode, bio->bi_sector, 0),
+ &KEY(s->iop.inode, bio->bi_iter.bi_sector, 0),
cache_lookup_fn, MAP_END_KEY);
if (ret == -EAGAIN)
continue_at(cl, cache_lookup, bcache_wq);
@@ -758,10 +757,12 @@ static void bio_complete(struct search *s)
static void do_bio_hook(struct search *s)
{
struct bio *bio = &s->bio.bio;
- memcpy(bio, s->orig_bio, sizeof(struct bio));
+ bio_init(bio);
+ __bio_clone_fast(bio, s->orig_bio);
bio->bi_end_io = request_endio;
bio->bi_private = &s->cl;
+
atomic_set(&bio->bi_cnt, 3);
}
@@ -773,9 +774,6 @@ static void search_free(struct closure *cl)
if (s->iop.bio)
bio_put(s->iop.bio);
- if (s->unaligned_bvec)
- mempool_free(s->bio.bio.bi_io_vec, s->d->unaligned_bvec);
-
closure_debug_destroy(cl);
mempool_free(s, s->d->c->search);
}
@@ -783,7 +781,6 @@ static void search_free(struct closure *cl)
static struct search *search_alloc(struct bio *bio, struct bcache_device *d)
{
struct search *s;
- struct bio_vec *bv;
s = mempool_alloc(d->c->search, GFP_NOIO);
memset(s, 0, offsetof(struct search, iop.insert_keys));
@@ -802,15 +799,6 @@ static struct search *search_alloc(struct bio *bio, struct bcache_device *d)
s->start_time = jiffies;
do_bio_hook(s);
- if (bio->bi_size != bio_segments(bio) * PAGE_SIZE) {
- bv = mempool_alloc(d->unaligned_bvec, GFP_NOIO);
- memcpy(bv, bio_iovec(bio),
- sizeof(struct bio_vec) * bio_segments(bio));
-
- s->bio.bio.bi_io_vec = bv;
- s->unaligned_bvec = 1;
- }
-
return s;
}
@@ -849,26 +837,13 @@ static void cached_dev_read_error(struct closure *cl)
{
struct search *s = container_of(cl, struct search, cl);
struct bio *bio = &s->bio.bio;
- struct bio_vec *bv;
- int i;
if (s->recoverable) {
/* Retry from the backing device: */
trace_bcache_read_retry(s->orig_bio);
s->iop.error = 0;
- bv = s->bio.bio.bi_io_vec;
do_bio_hook(s);
- s->bio.bio.bi_io_vec = bv;
-
- if (!s->unaligned_bvec)
- bio_for_each_segment(bv, s->orig_bio, i)
- bv->bv_offset = 0, bv->bv_len = PAGE_SIZE;
- else
- memcpy(s->bio.bio.bi_io_vec,
- bio_iovec(s->orig_bio),
- sizeof(struct bio_vec) *
- bio_segments(s->orig_bio));
/* XXX: invalidate cache */
@@ -893,9 +868,9 @@ static void cached_dev_read_done(struct closure *cl)
if (s->iop.bio) {
bio_reset(s->iop.bio);
- s->iop.bio->bi_sector = s->cache_miss->bi_sector;
+ s->iop.bio->bi_iter.bi_sector = s->cache_miss->bi_iter.bi_sector;
s->iop.bio->bi_bdev = s->cache_miss->bi_bdev;
- s->iop.bio->bi_size = s->insert_bio_sectors << 9;
+ s->iop.bio->bi_iter.bi_size = s->insert_bio_sectors << 9;
bch_bio_map(s->iop.bio, NULL);
bio_copy_data(s->cache_miss, s->iop.bio);
@@ -904,8 +879,7 @@ static void cached_dev_read_done(struct closure *cl)
s->cache_miss = NULL;
}
- if (verify(dc, &s->bio.bio) && s->recoverable &&
- !s->unaligned_bvec && !s->read_dirty_data)
+ if (verify(dc, &s->bio.bio) && s->recoverable && !s->read_dirty_data)
bch_data_verify(dc, s->orig_bio);
bio_complete(s);
@@ -945,7 +919,7 @@ static int cached_dev_cache_miss(struct btree *b, struct search *s,
struct bio *miss, *cache_bio;
if (s->cache_miss || s->iop.bypass) {
- miss = bch_bio_split(bio, sectors, GFP_NOIO, s->d->bio_split);
+ miss = bio_next_split(bio, sectors, GFP_NOIO, s->d->bio_split);
ret = miss == bio ? MAP_DONE : MAP_CONTINUE;
goto out_submit;
}
@@ -959,7 +933,7 @@ static int cached_dev_cache_miss(struct btree *b, struct search *s,
s->insert_bio_sectors = min(sectors, bio_sectors(bio) + reada);
s->iop.replace_key = KEY(s->iop.inode,
- bio->bi_sector + s->insert_bio_sectors,
+ bio->bi_iter.bi_sector + s->insert_bio_sectors,
s->insert_bio_sectors);
ret = bch_btree_insert_check_key(b, &s->op, &s->iop.replace_key);
@@ -968,7 +942,7 @@ static int cached_dev_cache_miss(struct btree *b, struct search *s,
s->iop.replace = true;
- miss = bch_bio_split(bio, sectors, GFP_NOIO, s->d->bio_split);
+ miss = bio_next_split(bio, sectors, GFP_NOIO, s->d->bio_split);
/* btree_search_recurse()'s btree iterator is no good anymore */
ret = miss == bio ? MAP_DONE : -EINTR;
@@ -979,9 +953,9 @@ static int cached_dev_cache_miss(struct btree *b, struct search *s,
if (!cache_bio)
goto out_submit;
- cache_bio->bi_sector = miss->bi_sector;
- cache_bio->bi_bdev = miss->bi_bdev;
- cache_bio->bi_size = s->insert_bio_sectors << 9;
+ cache_bio->bi_iter.bi_sector = miss->bi_iter.bi_sector;
+ cache_bio->bi_bdev = miss->bi_bdev;
+ cache_bio->bi_iter.bi_size = s->insert_bio_sectors << 9;
cache_bio->bi_end_io = request_endio;
cache_bio->bi_private = &s->cl;
@@ -1031,7 +1005,7 @@ static void cached_dev_write(struct cached_dev *dc, struct search *s)
{
struct closure *cl = &s->cl;
struct bio *bio = &s->bio.bio;
- struct bkey start = KEY(dc->disk.id, bio->bi_sector, 0);
+ struct bkey start = KEY(dc->disk.id, bio->bi_iter.bi_sector, 0);
struct bkey end = KEY(dc->disk.id, bio_end_sector(bio), 0);
bch_keybuf_check_overlapping(&s->iop.c->moving_gc_keys, &start, &end);
@@ -1087,8 +1061,7 @@ static void cached_dev_write(struct cached_dev *dc, struct search *s)
closure_bio_submit(flush, cl, s->d);
}
} else {
- s->iop.bio = bio_clone_bioset(bio, GFP_NOIO,
- dc->disk.bio_split);
+ s->iop.bio = bio_clone_fast(bio, GFP_NOIO, dc->disk.bio_split);
closure_bio_submit(bio, cl, s->d);
}
@@ -1126,13 +1099,13 @@ static void cached_dev_make_request(struct request_queue *q, struct bio *bio)
part_stat_unlock();
bio->bi_bdev = dc->bdev;
- bio->bi_sector += dc->sb.data_offset;
+ bio->bi_iter.bi_sector += dc->sb.data_offset;
if (cached_dev_get(dc)) {
s = search_alloc(bio, d);
trace_bcache_request_start(s->d, bio);
- if (!bio->bi_size) {
+ if (!bio->bi_iter.bi_size) {
/*
* can't call bch_journal_meta from under
* generic_make_request
@@ -1204,24 +1177,24 @@ void bch_cached_dev_request_init(struct cached_dev *dc)
static int flash_dev_cache_miss(struct btree *b, struct search *s,
struct bio *bio, unsigned sectors)
{
- struct bio_vec *bv;
- int i;
+ struct bio_vec bv;
+ struct bvec_iter iter;
/* Zero fill bio */
- bio_for_each_segment(bv, bio, i) {
- unsigned j = min(bv->bv_len >> 9, sectors);
+ bio_for_each_segment(bv, bio, iter) {
+ unsigned j = min(bv.bv_len >> 9, sectors);
- void *p = kmap(bv->bv_page);
- memset(p + bv->bv_offset, 0, j << 9);
- kunmap(bv->bv_page);
+ void *p = kmap(bv.bv_page);
+ memset(p + bv.bv_offset, 0, j << 9);
+ kunmap(bv.bv_page);
sectors -= j;
}
- bio_advance(bio, min(sectors << 9, bio->bi_size));
+ bio_advance(bio, min(sectors << 9, bio->bi_iter.bi_size));
- if (!bio->bi_size)
+ if (!bio->bi_iter.bi_size)
return MAP_DONE;
return MAP_CONTINUE;
@@ -1255,7 +1228,7 @@ static void flash_dev_make_request(struct request_queue *q, struct bio *bio)
trace_bcache_request_start(s->d, bio);
- if (!bio->bi_size) {
+ if (!bio->bi_iter.bi_size) {
/*
* can't call bch_journal_meta from under
* generic_make_request
@@ -1265,7 +1238,7 @@ static void flash_dev_make_request(struct request_queue *q, struct bio *bio)
bcache_wq);
} else if (rw) {
bch_keybuf_check_overlapping(&s->iop.c->moving_gc_keys,
- &KEY(d->id, bio->bi_sector, 0),
+ &KEY(d->id, bio->bi_iter.bi_sector, 0),
&KEY(d->id, bio_end_sector(bio), 0));
s->iop.bypass = (bio->bi_rw & REQ_DISCARD) != 0;
diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
index c57bfa071a57..93d593f957f6 100644
--- a/drivers/md/bcache/super.c
+++ b/drivers/md/bcache/super.c
@@ -233,9 +233,9 @@ static void __write_super(struct cache_sb *sb, struct bio *bio)
struct cache_sb *out = page_address(bio->bi_io_vec[0].bv_page);
unsigned i;
- bio->bi_sector = SB_SECTOR;
- bio->bi_rw = REQ_SYNC|REQ_META;
- bio->bi_size = SB_SIZE;
+ bio->bi_iter.bi_sector = SB_SECTOR;
+ bio->bi_rw = REQ_SYNC|REQ_META;
+ bio->bi_iter.bi_size = SB_SIZE;
bch_bio_map(bio, NULL);
out->offset = cpu_to_le64(sb->offset);
@@ -347,7 +347,7 @@ static void uuid_io(struct cache_set *c, unsigned long rw,
struct bio *bio = bch_bbio_alloc(c);
bio->bi_rw = REQ_SYNC|REQ_META|rw;
- bio->bi_size = KEY_SIZE(k) << 9;
+ bio->bi_iter.bi_size = KEY_SIZE(k) << 9;
bio->bi_end_io = uuid_endio;
bio->bi_private = cl;
@@ -503,10 +503,10 @@ static void prio_io(struct cache *ca, uint64_t bucket, unsigned long rw)
closure_init_stack(cl);
- bio->bi_sector = bucket * ca->sb.bucket_size;
- bio->bi_bdev = ca->bdev;
- bio->bi_rw = REQ_SYNC|REQ_META|rw;
- bio->bi_size = bucket_bytes(ca);
+ bio->bi_iter.bi_sector = bucket * ca->sb.bucket_size;
+ bio->bi_bdev = ca->bdev;
+ bio->bi_rw = REQ_SYNC|REQ_META|rw;
+ bio->bi_iter.bi_size = bucket_bytes(ca);
bio->bi_end_io = prio_endio;
bio->bi_private = ca;
@@ -739,8 +739,6 @@ static void bcache_device_free(struct bcache_device *d)
}
bio_split_pool_free(&d->bio_split_hook);
- if (d->unaligned_bvec)
- mempool_destroy(d->unaligned_bvec);
if (d->bio_split)
bioset_free(d->bio_split);
if (is_vmalloc_addr(d->full_dirty_stripes))
@@ -793,8 +791,6 @@ static int bcache_device_init(struct bcache_device *d, unsigned block_size,
return minor;
if (!(d->bio_split = bioset_create(4, offsetof(struct bbio, bio))) ||
- !(d->unaligned_bvec = mempool_create_kmalloc_pool(1,
- sizeof(struct bio_vec) * BIO_MAX_PAGES)) ||
bio_split_pool_init(&d->bio_split_hook) ||
!(d->disk = alloc_disk(1))) {
ida_simple_remove(&bcache_minor, minor);
diff --git a/drivers/md/bcache/util.c b/drivers/md/bcache/util.c
index bb37618e7664..db3ae4c2b223 100644
--- a/drivers/md/bcache/util.c
+++ b/drivers/md/bcache/util.c
@@ -224,10 +224,10 @@ uint64_t bch_next_delay(struct bch_ratelimit *d, uint64_t done)
void bch_bio_map(struct bio *bio, void *base)
{
- size_t size = bio->bi_size;
+ size_t size = bio->bi_iter.bi_size;
struct bio_vec *bv = bio->bi_io_vec;
- BUG_ON(!bio->bi_size);
+ BUG_ON(!bio->bi_iter.bi_size);
BUG_ON(bio->bi_vcnt);
bv->bv_offset = base ? ((unsigned long) base) % PAGE_SIZE : 0;
diff --git a/drivers/md/bcache/writeback.c b/drivers/md/bcache/writeback.c
index 6c44fe059c27..f4300e4c0114 100644
--- a/drivers/md/bcache/writeback.c
+++ b/drivers/md/bcache/writeback.c
@@ -111,7 +111,7 @@ static void dirty_init(struct keybuf_key *w)
if (!io->dc->writeback_percent)
bio_set_prio(bio, IOPRIO_PRIO_VALUE(IOPRIO_CLASS_IDLE, 0));
- bio->bi_size = KEY_SIZE(&w->key) << 9;
+ bio->bi_iter.bi_size = KEY_SIZE(&w->key) << 9;
bio->bi_max_vecs = DIV_ROUND_UP(KEY_SIZE(&w->key), PAGE_SECTORS);
bio->bi_private = w;
bio->bi_io_vec = bio->bi_inline_vecs;
@@ -184,7 +184,7 @@ static void write_dirty(struct closure *cl)
dirty_init(w);
io->bio.bi_rw = WRITE;
- io->bio.bi_sector = KEY_START(&w->key);
+ io->bio.bi_iter.bi_sector = KEY_START(&w->key);
io->bio.bi_bdev = io->dc->bdev;
io->bio.bi_end_io = dirty_endio;
@@ -253,7 +253,7 @@ static void read_dirty(struct cached_dev *dc)
io->dc = dc;
dirty_init(w);
- io->bio.bi_sector = PTR_OFFSET(&w->key, 0);
+ io->bio.bi_iter.bi_sector = PTR_OFFSET(&w->key, 0);
io->bio.bi_bdev = PTR_CACHE(dc->disk.c,
&w->key, 0)->bdev;
io->bio.bi_rw = READ;
diff --git a/drivers/md/bcache/writeback.h b/drivers/md/bcache/writeback.h
index c9ddcf4614b9..e2f8598937ac 100644
--- a/drivers/md/bcache/writeback.h
+++ b/drivers/md/bcache/writeback.h
@@ -50,7 +50,7 @@ static inline bool should_writeback(struct cached_dev *dc, struct bio *bio,
return false;
if (dc->partial_stripes_expensive &&
- bcache_dev_stripe_dirty(dc, bio->bi_sector,
+ bcache_dev_stripe_dirty(dc, bio->bi_iter.bi_sector,
bio_sectors(bio)))
return true;
diff --git a/drivers/md/dm-bio-record.h b/drivers/md/dm-bio-record.h
index 3a8cfa2645c7..dd3646111561 100644
--- a/drivers/md/dm-bio-record.h
+++ b/drivers/md/dm-bio-record.h
@@ -17,55 +17,24 @@
* original bio state.
*/
-struct dm_bio_vec_details {
-#if PAGE_SIZE < 65536
- __u16 bv_len;
- __u16 bv_offset;
-#else
- unsigned bv_len;
- unsigned bv_offset;
-#endif
-};
-
struct dm_bio_details {
- sector_t bi_sector;
struct block_device *bi_bdev;
- unsigned int bi_size;
- unsigned short bi_idx;
unsigned long bi_flags;
- struct dm_bio_vec_details bi_io_vec[BIO_MAX_PAGES];
+ struct bvec_iter bi_iter;
};
static inline void dm_bio_record(struct dm_bio_details *bd, struct bio *bio)
{
- unsigned i;
-
- bd->bi_sector = bio->bi_sector;
bd->bi_bdev = bio->bi_bdev;
- bd->bi_size = bio->bi_size;
- bd->bi_idx = bio->bi_idx;
bd->bi_flags = bio->bi_flags;
-
- for (i = 0; i < bio->bi_vcnt; i++) {
- bd->bi_io_vec[i].bv_len = bio->bi_io_vec[i].bv_len;
- bd->bi_io_vec[i].bv_offset = bio->bi_io_vec[i].bv_offset;
- }
+ bd->bi_iter = bio->bi_iter;
}
static inline void dm_bio_restore(struct dm_bio_details *bd, struct bio *bio)
{
- unsigned i;
-
- bio->bi_sector = bd->bi_sector;
bio->bi_bdev = bd->bi_bdev;
- bio->bi_size = bd->bi_size;
- bio->bi_idx = bd->bi_idx;
bio->bi_flags = bd->bi_flags;
-
- for (i = 0; i < bio->bi_vcnt; i++) {
- bio->bi_io_vec[i].bv_len = bd->bi_io_vec[i].bv_len;
- bio->bi_io_vec[i].bv_offset = bd->bi_io_vec[i].bv_offset;
- }
+ bio->bi_iter = bd->bi_iter;
}
#endif
diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c
index 9ed42125514b..66c5d130c8c2 100644
--- a/drivers/md/dm-bufio.c
+++ b/drivers/md/dm-bufio.c
@@ -540,7 +540,7 @@ static void use_inline_bio(struct dm_buffer *b, int rw, sector_t block,
bio_init(&b->bio);
b->bio.bi_io_vec = b->bio_vec;
b->bio.bi_max_vecs = DM_BUFIO_INLINE_VECS;
- b->bio.bi_sector = block << b->c->sectors_per_block_bits;
+ b->bio.bi_iter.bi_sector = block << b->c->sectors_per_block_bits;
b->bio.bi_bdev = b->c->bdev;
b->bio.bi_end_io = end_io;
diff --git a/drivers/md/dm-cache-policy-mq.c b/drivers/md/dm-cache-policy-mq.c
index 930e8c3d73e9..1e018e986610 100644
--- a/drivers/md/dm-cache-policy-mq.c
+++ b/drivers/md/dm-cache-policy-mq.c
@@ -72,7 +72,7 @@ static enum io_pattern iot_pattern(struct io_tracker *t)
static void iot_update_stats(struct io_tracker *t, struct bio *bio)
{
- if (bio->bi_sector == from_oblock(t->last_end_oblock) + 1)
+ if (bio->bi_iter.bi_sector == from_oblock(t->last_end_oblock) + 1)
t->nr_seq_samples++;
else {
/*
@@ -87,7 +87,7 @@ static void iot_update_stats(struct io_tracker *t, struct bio *bio)
t->nr_rand_samples++;
}
- t->last_end_oblock = to_oblock(bio->bi_sector + bio_sectors(bio) - 1);
+ t->last_end_oblock = to_oblock(bio_end_sector(bio) - 1);
}
static void iot_check_for_pattern_switch(struct io_tracker *t)
diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c
index 09334c275c79..ffd472e015ca 100644
--- a/drivers/md/dm-cache-target.c
+++ b/drivers/md/dm-cache-target.c
@@ -85,6 +85,12 @@ static void dm_unhook_bio(struct dm_hook_info *h, struct bio *bio)
{
bio->bi_end_io = h->bi_end_io;
bio->bi_private = h->bi_private;
+
+ /*
+ * Must bump bi_remaining to allow bio to complete with
+ * restored bi_end_io.
+ */
+ atomic_inc(&bio->bi_remaining);
}
/*----------------------------------------------------------------*/
@@ -664,15 +670,17 @@ static void remap_to_origin(struct cache *cache, struct bio *bio)
static void remap_to_cache(struct cache *cache, struct bio *bio,
dm_cblock_t cblock)
{
- sector_t bi_sector = bio->bi_sector;
+ sector_t bi_sector = bio->bi_iter.bi_sector;
bio->bi_bdev = cache->cache_dev->bdev;
if (!block_size_is_power_of_two(cache))
- bio->bi_sector = (from_cblock(cblock) * cache->sectors_per_block) +
- sector_div(bi_sector, cache->sectors_per_block);
+ bio->bi_iter.bi_sector =
+ (from_cblock(cblock) * cache->sectors_per_block) +
+ sector_div(bi_sector, cache->sectors_per_block);
else
- bio->bi_sector = (from_cblock(cblock) << cache->sectors_per_block_shift) |
- (bi_sector & (cache->sectors_per_block - 1));
+ bio->bi_iter.bi_sector =
+ (from_cblock(cblock) << cache->sectors_per_block_shift) |
+ (bi_sector & (cache->sectors_per_block - 1));
}
static void check_if_tick_bio_needed(struct cache *cache, struct bio *bio)
@@ -712,7 +720,7 @@ static void remap_to_cache_dirty(struct cache *cache, struct bio *bio,
static dm_oblock_t get_bio_block(struct cache *cache, struct bio *bio)
{
- sector_t block_nr = bio->bi_sector;
+ sector_t block_nr = bio->bi_iter.bi_sector;
if (!block_size_is_power_of_two(cache))
(void) sector_div(block_nr, cache->sectors_per_block);
@@ -1027,7 +1035,7 @@ static void issue_overwrite(struct dm_cache_migration *mg, struct bio *bio)
static bool bio_writes_complete_block(struct cache *cache, struct bio *bio)
{
return (bio_data_dir(bio) == WRITE) &&
- (bio->bi_size == (cache->sectors_per_block << SECTOR_SHIFT));
+ (bio->bi_iter.bi_size == (cache->sectors_per_block << SECTOR_SHIFT));
}
static void avoid_copy(struct dm_cache_migration *mg)
@@ -1252,7 +1260,7 @@ static void process_flush_bio(struct cache *cache, struct bio *bio)
size_t pb_data_size = get_per_bio_data_size(cache);
struct per_bio_data *pb = get_per_bio_data(bio, pb_data_size);
- BUG_ON(bio->bi_size);
+ BUG_ON(bio->bi_iter.bi_size);
if (!pb->req_nr)
remap_to_origin(cache, bio);
else
@@ -1275,9 +1283,9 @@ static void process_flush_bio(struct cache *cache, struct bio *bio)
*/
static void process_discard_bio(struct cache *cache, struct bio *bio)
{
- dm_block_t start_block = dm_sector_div_up(bio->bi_sector,
+ dm_block_t start_block = dm_sector_div_up(bio->bi_iter.bi_sector,
cache->discard_block_size);
- dm_block_t end_block = bio->bi_sector + bio_sectors(bio);
+ dm_block_t end_block = bio_end_sector(bio);
dm_block_t b;
end_block = block_div(end_block, cache->discard_block_size);
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
index 81b0fa660452..784695d22fde 100644
--- a/drivers/md/dm-crypt.c
+++ b/drivers/md/dm-crypt.c
@@ -39,10 +39,8 @@ struct convert_context {
struct completion restart;
struct bio *bio_in;
struct bio *bio_out;
- unsigned int offset_in;
- unsigned int offset_out;
- unsigned int idx_in;
- unsigned int idx_out;
+ struct bvec_iter iter_in;
+ struct bvec_iter iter_out;
sector_t cc_sector;
atomic_t cc_pending;
};
@@ -826,10 +824,10 @@ static void crypt_convert_init(struct crypt_config *cc,
{
ctx->bio_in = bio_in;
ctx->bio_out = bio_out;
- ctx->offset_in = 0;
- ctx->offset_out = 0;
- ctx->idx_in = bio_in ? bio_in->bi_idx : 0;
- ctx->idx_out = bio_out ? bio_out->bi_idx : 0;
+ if (bio_in)
+ ctx->iter_in = bio_in->bi_iter;
+ if (bio_out)
+ ctx->iter_out = bio_out->bi_iter;
ctx->cc_sector = sector + cc->iv_offset;
init_completion(&ctx->restart);
}
@@ -857,8 +855,8 @@ static int crypt_convert_block(struct crypt_config *cc,
struct convert_context *ctx,
struct ablkcipher_request *req)
{
- struct bio_vec *bv_in = bio_iovec_idx(ctx->bio_in, ctx->idx_in);
- struct bio_vec *bv_out = bio_iovec_idx(ctx->bio_out, ctx->idx_out);
+ struct bio_vec bv_in = bio_iter_iovec(ctx->bio_in, ctx->iter_in);
+ struct bio_vec bv_out = bio_iter_iovec(ctx->bio_out, ctx->iter_out);
struct dm_crypt_request *dmreq;
u8 *iv;
int r;
@@ -869,24 +867,15 @@ static int crypt_convert_block(struct crypt_config *cc,
dmreq->iv_sector = ctx->cc_sector;
dmreq->ctx = ctx;
sg_init_table(&dmreq->sg_in, 1);
- sg_set_page(&dmreq->sg_in, bv_in->bv_page, 1 << SECTOR_SHIFT,
- bv_in->bv_offset + ctx->offset_in);
+ sg_set_page(&dmreq->sg_in, bv_in.bv_page, 1 << SECTOR_SHIFT,
+ bv_in.bv_offset);
sg_init_table(&dmreq->sg_out, 1);
- sg_set_page(&dmreq->sg_out, bv_out->bv_page, 1 << SECTOR_SHIFT,
- bv_out->bv_offset + ctx->offset_out);
+ sg_set_page(&dmreq->sg_out, bv_out.bv_page, 1 << SECTOR_SHIFT,
+ bv_out.bv_offset);
- ctx->offset_in += 1 << SECTOR_SHIFT;
- if (ctx->offset_in >= bv_in->bv_len) {
- ctx->offset_in = 0;
- ctx->idx_in++;
- }
-
- ctx->offset_out += 1 << SECTOR_SHIFT;
- if (ctx->offset_out >= bv_out->bv_len) {
- ctx->offset_out = 0;
- ctx->idx_out++;
- }
+ bio_advance_iter(ctx->bio_in, &ctx->iter_in, 1 << SECTOR_SHIFT);
+ bio_advance_iter(ctx->bio_out, &ctx->iter_out, 1 << SECTOR_SHIFT);
if (cc->iv_gen_ops) {
r = cc->iv_gen_ops->generator(cc, iv, dmreq);
@@ -937,8 +926,7 @@ static int crypt_convert(struct crypt_config *cc,
atomic_set(&ctx->cc_pending, 1);
- while(ctx->idx_in < ctx->bio_in->bi_vcnt &&
- ctx->idx_out < ctx->bio_out->bi_vcnt) {
+ while (ctx->iter_in.bi_size && ctx->iter_out.bi_size) {
crypt_alloc_req(cc, ctx);
@@ -1021,7 +1009,7 @@ static struct bio *crypt_alloc_buffer(struct dm_crypt_io *io, unsigned size,
size -= len;
}
- if (!clone->bi_size) {
+ if (!clone->bi_iter.bi_size) {
bio_put(clone);
return NULL;
}
@@ -1161,7 +1149,7 @@ static int kcryptd_io_read(struct dm_crypt_io *io, gfp_t gfp)
crypt_inc_pending(io);
clone_init(io, clone);
- clone->bi_sector = cc->start + io->sector;
+ clone->bi_iter.bi_sector = cc->start + io->sector;
generic_make_request(clone);
return 0;
@@ -1207,9 +1195,9 @@ static void kcryptd_crypt_write_io_submit(struct dm_crypt_io *io, int async)
}
/* crypt_convert should have filled the clone bio */
- BUG_ON(io->ctx.idx_out < clone->bi_vcnt);
+ BUG_ON(io->ctx.iter_out.bi_size);
- clone->bi_sector = cc->start + io->sector;
+ clone->bi_iter.bi_sector = cc->start + io->sector;
if (async)
kcryptd_queue_io(io);
@@ -1224,7 +1212,7 @@ static void kcryptd_crypt_write_convert(struct dm_crypt_io *io)
struct dm_crypt_io *new_io;
int crypt_finished;
unsigned out_of_pages = 0;
- unsigned remaining = io->base_bio->bi_size;
+ unsigned remaining = io->base_bio->bi_iter.bi_size;
sector_t sector = io->sector;
int r;
@@ -1246,9 +1234,9 @@ static void kcryptd_crypt_write_convert(struct dm_crypt_io *io)
}
io->ctx.bio_out = clone;
- io->ctx.idx_out = 0;
+ io->ctx.iter_out = clone->bi_iter;
- remaining -= clone->bi_size;
+ remaining -= clone->bi_iter.bi_size;
sector += bio_sectors(clone);
crypt_inc_pending(io);
@@ -1290,8 +1278,7 @@ static void kcryptd_crypt_write_convert(struct dm_crypt_io *io)
crypt_inc_pending(new_io);
crypt_convert_init(cc, &new_io->ctx, NULL,
io->base_bio, sector);
- new_io->ctx.idx_in = io->ctx.idx_in;
- new_io->ctx.offset_in = io->ctx.offset_in;
+ new_io->ctx.iter_in = io->ctx.iter_in;
/*
* Fragments after the first use the base_io
@@ -1869,11 +1856,12 @@ static int crypt_map(struct dm_target *ti, struct bio *bio)
if (unlikely(bio->bi_rw & (REQ_FLUSH | REQ_DISCARD))) {
bio->bi_bdev = cc->dev->bdev;
if (bio_sectors(bio))
- bio->bi_sector = cc->start + dm_target_offset(ti, bio->bi_sector);
+ bio->bi_iter.bi_sector = cc->start +
+ dm_target_offset(ti, bio->bi_iter.bi_sector);
return DM_MAPIO_REMAPPED;
}
- io = crypt_io_alloc(cc, bio, dm_target_offset(ti, bio->bi_sector));
+ io = crypt_io_alloc(cc, bio, dm_target_offset(ti, bio->bi_iter.bi_sector));
if (bio_data_dir(io->base_bio) == READ) {
if (kcryptd_io_read(io, GFP_NOWAIT))
diff --git a/drivers/md/dm-delay.c b/drivers/md/dm-delay.c
index a8a511c053a5..42c3a27a14cc 100644
--- a/drivers/md/dm-delay.c
+++ b/drivers/md/dm-delay.c
@@ -277,14 +277,15 @@ static int delay_map(struct dm_target *ti, struct bio *bio)
if ((bio_data_dir(bio) == WRITE) && (dc->dev_write)) {
bio->bi_bdev = dc->dev_write->bdev;
if (bio_sectors(bio))
- bio->bi_sector = dc->start_write +
- dm_target_offset(ti, bio->bi_sector);
+ bio->bi_iter.bi_sector = dc->start_write +
+ dm_target_offset(ti, bio->bi_iter.bi_sector);
return delay_bio(dc, dc->write_delay, bio);
}
bio->bi_bdev = dc->dev_read->bdev;
- bio->bi_sector = dc->start_read + dm_target_offset(ti, bio->bi_sector);
+ bio->bi_iter.bi_sector = dc->start_read +
+ dm_target_offset(ti, bio->bi_iter.bi_sector);
return delay_bio(dc, dc->read_delay, bio);
}
diff --git a/drivers/md/dm-flakey.c b/drivers/md/dm-flakey.c
index c80a0ec5f126..b257e46876d3 100644
--- a/drivers/md/dm-flakey.c
+++ b/drivers/md/dm-flakey.c
@@ -248,7 +248,8 @@ static void flakey_map_bio(struct dm_target *ti, struct bio *bio)
bio->bi_bdev = fc->dev->bdev;
if (bio_sectors(bio))
- bio->bi_sector = flakey_map_sector(ti, bio->bi_sector);
+ bio->bi_iter.bi_sector =
+ flakey_map_sector(ti, bio->bi_iter.bi_sector);
}
static void corrupt_bio_data(struct bio *bio, struct flakey_c *fc)
@@ -265,8 +266,8 @@ static void corrupt_bio_data(struct bio *bio, struct flakey_c *fc)
DMDEBUG("Corrupting data bio=%p by writing %u to byte %u "
"(rw=%c bi_rw=%lu bi_sector=%llu cur_bytes=%u)\n",
bio, fc->corrupt_bio_value, fc->corrupt_bio_byte,
- (bio_data_dir(bio) == WRITE) ? 'w' : 'r',
- bio->bi_rw, (unsigned long long)bio->bi_sector, bio_bytes);
+ (bio_data_dir(bio) == WRITE) ? 'w' : 'r', bio->bi_rw,
+ (unsigned long long)bio->bi_iter.bi_sector, bio_bytes);
}
}
diff --git a/drivers/md/dm-io.c b/drivers/md/dm-io.c
index 2a20986a2fec..b2b8a10e8427 100644
--- a/drivers/md/dm-io.c
+++ b/drivers/md/dm-io.c
@@ -201,26 +201,29 @@ static void list_dp_init(struct dpages *dp, struct page_list *pl, unsigned offse
/*
* Functions for getting the pages from a bvec.
*/
-static void bvec_get_page(struct dpages *dp,
+static void bio_get_page(struct dpages *dp,
struct page **p, unsigned long *len, unsigned *offset)
{
- struct bio_vec *bvec = (struct bio_vec *) dp->context_ptr;
- *p = bvec->bv_page;
- *len = bvec->bv_len;
- *offset = bvec->bv_offset;
+ struct bio *bio = dp->context_ptr;
+ struct bio_vec bvec = bio_iovec(bio);
+ *p = bvec.bv_page;
+ *len = bvec.bv_len;
+ *offset = bvec.bv_offset;
}
-static void bvec_next_page(struct dpages *dp)
+static void bio_next_page(struct dpages *dp)
{
- struct bio_vec *bvec = (struct bio_vec *) dp->context_ptr;
- dp->context_ptr = bvec + 1;
+ struct bio *bio = dp->context_ptr;
+ struct bio_vec bvec = bio_iovec(bio);
+
+ bio_advance(bio, bvec.bv_len);
}
-static void bvec_dp_init(struct dpages *dp, struct bio_vec *bvec)
+static void bio_dp_init(struct dpages *dp, struct bio *bio)
{
- dp->get_page = bvec_get_page;
- dp->next_page = bvec_next_page;
- dp->context_ptr = bvec;
+ dp->get_page = bio_get_page;
+ dp->next_page = bio_next_page;
+ dp->context_ptr = bio;
}
/*
@@ -304,14 +307,14 @@ static void do_region(int rw, unsigned region, struct dm_io_region *where,
dm_sector_div_up(remaining, (PAGE_SIZE >> SECTOR_SHIFT)));
bio = bio_alloc_bioset(GFP_NOIO, num_bvecs, io->client->bios);
- bio->bi_sector = where->sector + (where->count - remaining);
+ bio->bi_iter.bi_sector = where->sector + (where->count - remaining);
bio->bi_bdev = where->bdev;
bio->bi_end_io = endio;
store_io_and_region_in_bio(bio, io, region);
if (rw & REQ_DISCARD) {
num_sectors = min_t(sector_t, q->limits.max_discard_sectors, remaining);
- bio->bi_size = num_sectors << SECTOR_SHIFT;
+ bio->bi_iter.bi_size = num_sectors << SECTOR_SHIFT;
remaining -= num_sectors;
} else if (rw & REQ_WRITE_SAME) {
/*
@@ -320,7 +323,7 @@ static void do_region(int rw, unsigned region, struct dm_io_region *where,
dp->get_page(dp, &page, &len, &offset);
bio_add_page(bio, page, logical_block_size, offset);
num_sectors = min_t(sector_t, q->limits.max_write_same_sectors, remaining);
- bio->bi_size = num_sectors << SECTOR_SHIFT;
+ bio->bi_iter.bi_size = num_sectors << SECTOR_SHIFT;
offset = 0;
remaining -= num_sectors;
@@ -457,8 +460,8 @@ static int dp_init(struct dm_io_request *io_req, struct dpages *dp,
list_dp_init(dp, io_req->mem.ptr.pl, io_req->mem.offset);
break;
- case DM_IO_BVEC:
- bvec_dp_init(dp, io_req->mem.ptr.bvec);
+ case DM_IO_BIO:
+ bio_dp_init(dp, io_req->mem.ptr.bio);
break;
case DM_IO_VMA:
diff --git a/drivers/md/dm-linear.c b/drivers/md/dm-linear.c
index 4f99d267340c..53e848c10939 100644
--- a/drivers/md/dm-linear.c
+++ b/drivers/md/dm-linear.c
@@ -85,7 +85,8 @@ static void linear_map_bio(struct dm_target *ti, struct bio *bio)
bio->bi_bdev = lc->dev->bdev;
if (bio_sectors(bio))
- bio->bi_sector = linear_map_sector(ti, bio->bi_sector);
+ bio->bi_iter.bi_sector =
+ linear_map_sector(ti, bio->bi_iter.bi_sector);
}
static int linear_map(struct dm_target *ti, struct bio *bio)
diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c
index 9584443c5614..f284e0bfb25f 100644
--- a/drivers/md/dm-raid1.c
+++ b/drivers/md/dm-raid1.c
@@ -432,7 +432,7 @@ static int mirror_available(struct mirror_set *ms, struct bio *bio)
region_t region = dm_rh_bio_to_region(ms->rh, bio);
if (log->type->in_sync(log, region, 0))
- return choose_mirror(ms, bio->bi_sector) ? 1 : 0;
+ return choose_mirror(ms, bio->bi_iter.bi_sector) ? 1 : 0;
return 0;
}
@@ -442,15 +442,15 @@ static int mirror_available(struct mirror_set *ms, struct bio *bio)
*/
static sector_t map_sector(struct mirror *m, struct bio *bio)
{
- if (unlikely(!bio->bi_size))
+ if (unlikely(!bio->bi_iter.bi_size))
return 0;
- return m->offset + dm_target_offset(m->ms->ti, bio->bi_sector);
+ return m->offset + dm_target_offset(m->ms->ti, bio->bi_iter.bi_sector);
}
static void map_bio(struct mirror *m, struct bio *bio)
{
bio->bi_bdev = m->dev->bdev;
- bio->bi_sector = map_sector(m, bio);
+ bio->bi_iter.bi_sector = map_sector(m, bio);
}
static void map_region(struct dm_io_region *io, struct mirror *m,
@@ -526,8 +526,8 @@ static void read_async_bio(struct mirror *m, struct bio *bio)
struct dm_io_region io;
struct dm_io_request io_req = {
.bi_rw = READ,
- .mem.type = DM_IO_BVEC,
- .mem.ptr.bvec = bio->bi_io_vec + bio->bi_idx,
+ .mem.type = DM_IO_BIO,
+ .mem.ptr.bio = bio,
.notify.fn = read_callback,
.notify.context = bio,
.client = m->ms->io_client,
@@ -559,7 +559,7 @@ static void do_reads(struct mirror_set *ms, struct bio_list *reads)
* We can only read balance if the region is in sync.
*/
if (likely(region_in_sync(ms, region, 1)))
- m = choose_mirror(ms, bio->bi_sector);
+ m = choose_mirror(ms, bio->bi_iter.bi_sector);
else if (m && atomic_read(&m->error_count))
m = NULL;
@@ -629,8 +629,8 @@ static void do_write(struct mirror_set *ms, struct bio *bio)
struct mirror *m;
struct dm_io_request io_req = {
.bi_rw = WRITE | (bio->bi_rw & WRITE_FLUSH_FUA),
- .mem.type = DM_IO_BVEC,
- .mem.ptr.bvec = bio->bi_io_vec + bio->bi_idx,
+ .mem.type = DM_IO_BIO,
+ .mem.ptr.bio = bio,
.notify.fn = write_callback,
.notify.context = bio,
.client = ms->io_client,
@@ -1181,7 +1181,7 @@ static int mirror_map(struct dm_target *ti, struct bio *bio)
* The region is in-sync and we can perform reads directly.
* Store enough information so we can retry if it fails.
*/
- m = choose_mirror(ms, bio->bi_sector);
+ m = choose_mirror(ms, bio->bi_iter.bi_sector);
if (unlikely(!m))
return -EIO;
diff --git a/drivers/md/dm-region-hash.c b/drivers/md/dm-region-hash.c
index 69732e03eb34..b929fd5f4984 100644
--- a/drivers/md/dm-region-hash.c
+++ b/drivers/md/dm-region-hash.c
@@ -126,7 +126,8 @@ EXPORT_SYMBOL_GPL(dm_rh_region_to_sector);
region_t dm_rh_bio_to_region(struct dm_region_hash *rh, struct bio *bio)
{
- return dm_rh_sector_to_region(rh, bio->bi_sector - rh->target_begin);
+ return dm_rh_sector_to_region(rh, bio->bi_iter.bi_sector -
+ rh->target_begin);
}
EXPORT_SYMBOL_GPL(dm_rh_bio_to_region);
diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c
index 717718558bd9..ebddef5237e4 100644
--- a/drivers/md/dm-snap.c
+++ b/drivers/md/dm-snap.c
@@ -1438,6 +1438,7 @@ out:
if (full_bio) {
full_bio->bi_end_io = pe->full_bio_end_io;
full_bio->bi_private = pe->full_bio_private;
+ atomic_inc(&full_bio->bi_remaining);
}
free_pending_exception(pe);
@@ -1619,11 +1620,10 @@ static void remap_exception(struct dm_snapshot *s, struct dm_exception *e,
struct bio *bio, chunk_t chunk)
{
bio->bi_bdev = s->cow->bdev;
- bio->bi_sector = chunk_to_sector(s->store,
- dm_chunk_number(e->new_chunk) +
- (chunk - e->old_chunk)) +
- (bio->bi_sector &
- s->store->chunk_mask);
+ bio->bi_iter.bi_sector =
+ chunk_to_sector(s->store, dm_chunk_number(e->new_chunk) +
+ (chunk - e->old_chunk)) +
+ (bio->bi_iter.bi_sector & s->store->chunk_mask);
}
static int snapshot_map(struct dm_target *ti, struct bio *bio)
@@ -1641,7 +1641,7 @@ static int snapshot_map(struct dm_target *ti, struct bio *bio)
return DM_MAPIO_REMAPPED;
}
- chunk = sector_to_chunk(s->store, bio->bi_sector);
+ chunk = sector_to_chunk(s->store, bio->bi_iter.bi_sector);
/* Full snapshots are not usable */
/* To get here the table must be live so s->active is always set. */
@@ -1702,7 +1702,8 @@ static int snapshot_map(struct dm_target *ti, struct bio *bio)
r = DM_MAPIO_SUBMITTED;
if (!pe->started &&
- bio->bi_size == (s->store->chunk_size << SECTOR_SHIFT)) {
+ bio->bi_iter.bi_size ==
+ (s->store->chunk_size << SECTOR_SHIFT)) {
pe->started = 1;
up_write(&s->lock);
start_full_bio(pe, bio);
@@ -1758,7 +1759,7 @@ static int snapshot_merge_map(struct dm_target *ti, struct bio *bio)
return DM_MAPIO_REMAPPED;
}
- chunk = sector_to_chunk(s->store, bio->bi_sector);
+ chunk = sector_to_chunk(s->store, bio->bi_iter.bi_sector);
down_write(&s->lock);
@@ -2095,7 +2096,7 @@ static int do_origin(struct dm_dev *origin, struct bio *bio)
down_read(&_origins_lock);
o = __lookup_origin(origin->bdev);
if (o)
- r = __origin_write(&o->snapshots, bio->bi_sector, bio);
+ r = __origin_write(&o->snapshots, bio->bi_iter.bi_sector, bio);
up_read(&_origins_lock);
return r;
diff --git a/drivers/md/dm-stripe.c b/drivers/md/dm-stripe.c
index 73c1712dad96..d1600d2aa2e2 100644
--- a/drivers/md/dm-stripe.c
+++ b/drivers/md/dm-stripe.c
@@ -259,13 +259,15 @@ static int stripe_map_range(struct stripe_c *sc, struct bio *bio,
{
sector_t begin, end;
- stripe_map_range_sector(sc, bio->bi_sector, target_stripe, &begin);
+ stripe_map_range_sector(sc, bio->bi_iter.bi_sector,
+ target_stripe, &begin);
stripe_map_range_sector(sc, bio_end_sector(bio),
target_stripe, &end);
if (begin < end) {
bio->bi_bdev = sc->stripe[target_stripe].dev->bdev;
- bio->bi_sector = begin + sc->stripe[target_stripe].physical_start;
- bio->bi_size = to_bytes(end - begin);
+ bio->bi_iter.bi_sector = begin +
+ sc->stripe[target_stripe].physical_start;
+ bio->bi_iter.bi_size = to_bytes(end - begin);
return DM_MAPIO_REMAPPED;
} else {
/* The range doesn't map to the target stripe */
@@ -293,9 +295,10 @@ static int stripe_map(struct dm_target *ti, struct bio *bio)
return stripe_map_range(sc, bio, target_bio_nr);
}
- stripe_map_sector(sc, bio->bi_sector, &stripe, &bio->bi_sector);
+ stripe_map_sector(sc, bio->bi_iter.bi_sector,
+ &stripe, &bio->bi_iter.bi_sector);
- bio->bi_sector += sc->stripe[stripe].physical_start;
+ bio->bi_iter.bi_sector += sc->stripe[stripe].physical_start;
bio->bi_bdev = sc->stripe[stripe].dev->bdev;
return DM_MAPIO_REMAPPED;
diff --git a/drivers/md/dm-switch.c b/drivers/md/dm-switch.c
index ff9ac4be4721..09a688b3d48c 100644
--- a/drivers/md/dm-switch.c
+++ b/drivers/md/dm-switch.c
@@ -311,11 +311,11 @@ error:
static int switch_map(struct dm_target *ti, struct bio *bio)
{
struct switch_ctx *sctx = ti->private;
- sector_t offset = dm_target_offset(ti, bio->bi_sector);
+ sector_t offset = dm_target_offset(ti, bio->bi_iter.bi_sector);
unsigned path_nr = switch_get_path_nr(sctx, offset);
bio->bi_bdev = sctx->path_list[path_nr].dmdev->bdev;
- bio->bi_sector = sctx->path_list[path_nr].start + offset;
+ bio->bi_iter.bi_sector = sctx->path_list[path_nr].start + offset;
return DM_MAPIO_REMAPPED;
}
diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
index 726228b33a01..faaf944597ab 100644
--- a/drivers/md/dm-thin.c
+++ b/drivers/md/dm-thin.c
@@ -414,7 +414,7 @@ static bool block_size_is_power_of_two(struct pool *pool)
static dm_block_t get_bio_block(struct thin_c *tc, struct bio *bio)
{
struct pool *pool = tc->pool;
- sector_t block_nr = bio->bi_sector;
+ sector_t block_nr = bio->bi_iter.bi_sector;
if (block_size_is_power_of_two(pool))
block_nr >>= pool->sectors_per_block_shift;
@@ -427,14 +427,15 @@ static dm_block_t get_bio_block(struct thin_c *tc, struct bio *bio)
static void remap(struct thin_c *tc, struct bio *bio, dm_block_t block)
{
struct pool *pool = tc->pool;
- sector_t bi_sector = bio->bi_sector;
+ sector_t bi_sector = bio->bi_iter.bi_sector;
bio->bi_bdev = tc->pool_dev->bdev;
if (block_size_is_power_of_two(pool))
- bio->bi_sector = (block << pool->sectors_per_block_shift) |
- (bi_sector & (pool->sectors_per_block - 1));
+ bio->bi_iter.bi_sector =
+ (block << pool->sectors_per_block_shift) |
+ (bi_sector & (pool->sectors_per_block - 1));
else
- bio->bi_sector = (block * pool->sectors_per_block) +
+ bio->bi_iter.bi_sector = (block * pool->sectors_per_block) +
sector_div(bi_sector, pool->sectors_per_block);
}
@@ -612,8 +613,10 @@ static void cell_defer_no_holder(struct thin_c *tc, struct dm_bio_prison_cell *c
static void process_prepared_mapping_fail(struct dm_thin_new_mapping *m)
{
- if (m->bio)
+ if (m->bio) {
m->bio->bi_end_io = m->saved_bi_end_io;
+ atomic_inc(&m->bio->bi_remaining);
+ }
cell_error(m->tc->pool, m->cell);
list_del(&m->list);
mempool_free(m, m->tc->pool->mapping_pool);
@@ -627,8 +630,10 @@ static void process_prepared_mapping(struct dm_thin_new_mapping *m)
int r;
bio = m->bio;
- if (bio)
+ if (bio) {
bio->bi_end_io = m->saved_bi_end_io;
+ atomic_inc(&bio->bi_remaining);
+ }
if (m->err) {
cell_error(pool, m->cell);
@@ -731,7 +736,8 @@ static void process_prepared(struct pool *pool, struct list_head *head,
*/
static int io_overlaps_block(struct pool *pool, struct bio *bio)
{
- return bio->bi_size == (pool->sectors_per_block << SECTOR_SHIFT);
+ return bio->bi_iter.bi_size ==
+ (pool->sectors_per_block << SECTOR_SHIFT);
}
static int io_overwrites_block(struct pool *pool, struct bio *bio)
@@ -1136,7 +1142,7 @@ static void process_shared_bio(struct thin_c *tc, struct bio *bio,
if (bio_detain(pool, &key, bio, &cell))
return;
- if (bio_data_dir(bio) == WRITE && bio->bi_size)
+ if (bio_data_dir(bio) == WRITE && bio->bi_iter.bi_size)
break_sharing(tc, bio, block, &key, lookup_result, cell);
else {
struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
@@ -1159,7 +1165,7 @@ static void provision_block(struct thin_c *tc, struct bio *bio, dm_block_t block
/*
* Remap empty bios (flushes) immediately, without provisioning.
*/
- if (!bio->bi_size) {
+ if (!bio->bi_iter.bi_size) {
inc_all_io_entry(pool, bio);
cell_defer_no_holder(tc, cell);
@@ -1258,7 +1264,7 @@ static void process_bio_read_only(struct thin_c *tc, struct bio *bio)
r = dm_thin_find_block(tc->td, block, 1, &lookup_result);
switch (r) {
case 0:
- if (lookup_result.shared && (rw == WRITE) && bio->bi_size)
+ if (lookup_result.shared && (rw == WRITE) && bio->bi_iter.bi_size)
handle_unserviceable_bio(tc->pool, bio);
else {
inc_all_io_entry(tc->pool, bio);
@@ -2939,7 +2945,7 @@ out_unlock:
static int thin_map(struct dm_target *ti, struct bio *bio)
{
- bio->bi_sector = dm_target_offset(ti, bio->bi_sector);
+ bio->bi_iter.bi_sector = dm_target_offset(ti, bio->bi_iter.bi_sector);
return thin_bio_map(ti, bio);
}
diff --git a/drivers/md/dm-verity.c b/drivers/md/dm-verity.c
index 4b7941db3aff..796007a5e0e1 100644
--- a/drivers/md/dm-verity.c
+++ b/drivers/md/dm-verity.c
@@ -73,15 +73,10 @@ struct dm_verity_io {
sector_t block;
unsigned n_blocks;
- /* saved bio vector */
- struct bio_vec *io_vec;
- unsigned io_vec_size;
+ struct bvec_iter iter;
struct work_struct work;
- /* A space for short vectors; longer vectors are allocated separately. */
- struct bio_vec io_vec_inline[DM_VERITY_IO_VEC_INLINE];
-
/*
* Three variably-size fields follow this struct:
*
@@ -284,9 +279,10 @@ release_ret_r:
static int verity_verify_io(struct dm_verity_io *io)
{
struct dm_verity *v = io->v;
+ struct bio *bio = dm_bio_from_per_bio_data(io,
+ v->ti->per_bio_data_size);
unsigned b;
int i;
- unsigned vector = 0, offset = 0;
for (b = 0; b < io->n_blocks; b++) {
struct shash_desc *desc;
@@ -336,31 +332,22 @@ test_block_hash:
}
todo = 1 << v->data_dev_block_bits;
- do {
- struct bio_vec *bv;
+ while (io->iter.bi_size) {
u8 *page;
- unsigned len;
-
- BUG_ON(vector >= io->io_vec_size);
- bv = &io->io_vec[vector];
- page = kmap_atomic(bv->bv_page);
- len = bv->bv_len - offset;
- if (likely(len >= todo))
- len = todo;
- r = crypto_shash_update(desc,
- page + bv->bv_offset + offset, len);
+ struct bio_vec bv = bio_iter_iovec(bio, io->iter);
+
+ page = kmap_atomic(bv.bv_page);
+ r = crypto_shash_update(desc, page + bv.bv_offset,
+ bv.bv_len);
kunmap_atomic(page);
+
if (r < 0) {
DMERR("crypto_shash_update failed: %d", r);
return r;
}
- offset += len;
- if (likely(offset == bv->bv_len)) {
- offset = 0;
- vector++;
- }
- todo -= len;
- } while (todo);
+
+ bio_advance_iter(bio, &io->iter, bv.bv_len);
+ }
if (!v->version) {
r = crypto_shash_update(desc, v->salt, v->salt_size);
@@ -383,8 +370,6 @@ test_block_hash:
return -EIO;
}
}
- BUG_ON(vector != io->io_vec_size);
- BUG_ON(offset);
return 0;
}
@@ -400,10 +385,7 @@ static void verity_finish_io(struct dm_verity_io *io, int error)
bio->bi_end_io = io->orig_bi_end_io;
bio->bi_private = io->orig_bi_private;
- if (io->io_vec != io->io_vec_inline)
- mempool_free(io->io_vec, v->vec_mempool);
-
- bio_endio(bio, error);
+ bio_endio_nodec(bio, error);
}
static void verity_work(struct work_struct *w)
@@ -493,9 +475,9 @@ static int verity_map(struct dm_target *ti, struct bio *bio)
struct dm_verity_io *io;
bio->bi_bdev = v->data_dev->bdev;
- bio->bi_sector = verity_map_sector(v, bio->bi_sector);
+ bio->bi_iter.bi_sector = verity_map_sector(v, bio->bi_iter.bi_sector);
- if (((unsigned)bio->bi_sector | bio_sectors(bio)) &
+ if (((unsigned)bio->bi_iter.bi_sector | bio_sectors(bio)) &
((1 << (v->data_dev_block_bits - SECTOR_SHIFT)) - 1)) {
DMERR_LIMIT("unaligned io");
return -EIO;
@@ -514,18 +496,12 @@ static int verity_map(struct dm_target *ti, struct bio *bio)
io->v = v;
io->orig_bi_end_io = bio->bi_end_io;
io->orig_bi_private = bio->bi_private;
- io->block = bio->bi_sector >> (v->data_dev_block_bits - SECTOR_SHIFT);
- io->n_blocks = bio->bi_size >> v->data_dev_block_bits;
+ io->block = bio->bi_iter.bi_sector >> (v->data_dev_block_bits - SECTOR_SHIFT);
+ io->n_blocks = bio->bi_iter.bi_size >> v->data_dev_block_bits;
bio->bi_end_io = verity_end_io;
bio->bi_private = io;
- io->io_vec_size = bio_segments(bio);
- if (io->io_vec_size < DM_VERITY_IO_VEC_INLINE)
- io->io_vec = io->io_vec_inline;
- else
- io->io_vec = mempool_alloc(v->vec_mempool, GFP_NOIO);
- memcpy(io->io_vec, bio_iovec(bio),
- io->io_vec_size * sizeof(struct bio_vec));
+ io->iter = bio->bi_iter;
verity_submit_prefetch(v, io);
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index b49c76284241..8c53b09b9a2c 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -575,7 +575,7 @@ static void start_io_acct(struct dm_io *io)
atomic_inc_return(&md->pending[rw]));
if (unlikely(dm_stats_used(&md->stats)))
- dm_stats_account_io(&md->stats, bio->bi_rw, bio->bi_sector,
+ dm_stats_account_io(&md->stats, bio->bi_rw, bio->bi_iter.bi_sector,
bio_sectors(bio), false, 0, &io->stats_aux);
}
@@ -593,7 +593,7 @@ static void end_io_acct(struct dm_io *io)
part_stat_unlock();
if (unlikely(dm_stats_used(&md->stats)))
- dm_stats_account_io(&md->stats, bio->bi_rw, bio->bi_sector,
+ dm_stats_account_io(&md->stats, bio->bi_rw, bio->bi_iter.bi_sector,
bio_sectors(bio), true, duration, &io->stats_aux);
/*
@@ -742,7 +742,7 @@ static void dec_pending(struct dm_io *io, int error)
if (io_error == DM_ENDIO_REQUEUE)
return;
- if ((bio->bi_rw & REQ_FLUSH) && bio->bi_size) {
+ if ((bio->bi_rw & REQ_FLUSH) && bio->bi_iter.bi_size) {
/*
* Preflush done for flush with data, reissue
* without REQ_FLUSH.
@@ -797,7 +797,7 @@ static void end_clone_bio(struct bio *clone, int error)
struct dm_rq_clone_bio_info *info = clone->bi_private;
struct dm_rq_target_io *tio = info->tio;
struct bio *bio = info->orig;
- unsigned int nr_bytes = info->orig->bi_size;
+ unsigned int nr_bytes = info->orig->bi_iter.bi_size;
bio_put(clone);
@@ -1128,7 +1128,7 @@ static void __map_bio(struct dm_target_io *tio)
* this io.
*/
atomic_inc(&tio->io->io_count);
- sector = clone->bi_sector;
+ sector = clone->bi_iter.bi_sector;
r = ti->type->map(ti, clone);
if (r == DM_MAPIO_REMAPPED) {
/* the bio has been remapped so dispatch it */
@@ -1155,76 +1155,32 @@ struct clone_info {
struct dm_io *io;
sector_t sector;
sector_t sector_count;
- unsigned short idx;
};
static void bio_setup_sector(struct bio *bio, sector_t sector, sector_t len)
{
- bio->bi_sector = sector;
- bio->bi_size = to_bytes(len);
-}
-
-static void bio_setup_bv(struct bio *bio, unsigned short idx, unsigned short bv_count)
-{
- bio->bi_idx = idx;
- bio->bi_vcnt = idx + bv_count;
- bio->bi_flags &= ~(1 << BIO_SEG_VALID);
-}
-
-static void clone_bio_integrity(struct bio *bio, struct bio *clone,
- unsigned short idx, unsigned len, unsigned offset,
- unsigned trim)
-{
- if (!bio_integrity(bio))
- return;
-
- bio_integrity_clone(clone, bio, GFP_NOIO);
-
- if (trim)
- bio_integrity_trim(clone, bio_sector_offset(bio, idx, offset), len);
-}
-
-/*
- * Creates a little bio that just does part of a bvec.
- */
-static void clone_split_bio(struct dm_target_io *tio, struct bio *bio,
- sector_t sector, unsigned short idx,
- unsigned offset, unsigned len)
-{
- struct bio *clone = &tio->clone;
- struct bio_vec *bv = bio->bi_io_vec + idx;
-
- *clone->bi_io_vec = *bv;
-
- bio_setup_sector(clone, sector, len);
-
- clone->bi_bdev = bio->bi_bdev;
- clone->bi_rw = bio->bi_rw;
- clone->bi_vcnt = 1;
- clone->bi_io_vec->bv_offset = offset;
- clone->bi_io_vec->bv_len = clone->bi_size;
- clone->bi_flags |= 1 << BIO_CLONED;
-
- clone_bio_integrity(bio, clone, idx, len, offset, 1);
+ bio->bi_iter.bi_sector = sector;
+ bio->bi_iter.bi_size = to_bytes(len);
}
/*
* Creates a bio that consists of range of complete bvecs.
*/
static void clone_bio(struct dm_target_io *tio, struct bio *bio,
- sector_t sector, unsigned short idx,
- unsigned short bv_count, unsigned len)
+ sector_t sector, unsigned len)
{
struct bio *clone = &tio->clone;
- unsigned trim = 0;
- __bio_clone(clone, bio);
- bio_setup_sector(clone, sector, len);
- bio_setup_bv(clone, idx, bv_count);
+ __bio_clone_fast(clone, bio);
+
+ if (bio_integrity(bio))
+ bio_integrity_clone(clone, bio, GFP_NOIO);
+
+ bio_advance(clone, to_bytes(sector - clone->bi_iter.bi_sector));
+ clone->bi_iter.bi_size = to_bytes(len);
- if (idx != bio->bi_idx || clone->bi_size < bio->bi_size)
- trim = 1;
- clone_bio_integrity(bio, clone, idx, len, 0, trim);
+ if (bio_integrity(bio))
+ bio_integrity_trim(clone, 0, len);
}
static struct dm_target_io *alloc_tio(struct clone_info *ci,
@@ -1257,7 +1213,7 @@ static void __clone_and_map_simple_bio(struct clone_info *ci,
* ci->bio->bi_max_vecs is BIO_INLINE_VECS anyway, for both flush
* and discard, so no need for concern about wasted bvec allocations.
*/
- __bio_clone(clone, ci->bio);
+ __bio_clone_fast(clone, ci->bio);
if (len)
bio_setup_sector(clone, ci->sector, len);
@@ -1286,10 +1242,7 @@ static int __send_empty_flush(struct clone_info *ci)
}
static void __clone_and_map_data_bio(struct clone_info *ci, struct dm_target *ti,
- sector_t sector, int nr_iovecs,
- unsigned short idx, unsigned short bv_count,
- unsigned offset, unsigned len,
- unsigned split_bvec)
+ sector_t sector, unsigned len)
{
struct bio *bio = ci->bio;
struct dm_target_io *tio;
@@ -1303,11 +1256,8 @@ static void __clone_and_map_data_bio(struct clone_info *ci, struct dm_target *ti
num_target_bios = ti->num_write_bios(ti, bio);
for (target_bio_nr = 0; target_bio_nr < num_target_bios; target_bio_nr++) {
- tio = alloc_tio(ci, ti, nr_iovecs, target_bio_nr);
- if (split_bvec)
- clone_split_bio(tio, bio, sector, idx, offset, len);
- else
- clone_bio(tio, bio, sector, idx, bv_count, len);
+ tio = alloc_tio(ci, ti, 0, target_bio_nr);
+ clone_bio(tio, bio, sector, len);
__map_bio(tio);
}
}
@@ -1379,68 +1329,13 @@ static int __send_write_same(struct clone_info *ci)
}
/*
- * Find maximum number of sectors / bvecs we can process with a single bio.
- */
-static sector_t __len_within_target(struct clone_info *ci, sector_t max, int *idx)
-{
- struct bio *bio = ci->bio;
- sector_t bv_len, total_len = 0;
-
- for (*idx = ci->idx; max && (*idx < bio->bi_vcnt); (*idx)++) {
- bv_len = to_sector(bio->bi_io_vec[*idx].bv_len);
-
- if (bv_len > max)
- break;
-
- max -= bv_len;
- total_len += bv_len;
- }
-
- return total_len;
-}
-
-static int __split_bvec_across_targets(struct clone_info *ci,
- struct dm_target *ti, sector_t max)
-{
- struct bio *bio = ci->bio;
- struct bio_vec *bv = bio->bi_io_vec + ci->idx;
- sector_t remaining = to_sector(bv->bv_len);
- unsigned offset = 0;
- sector_t len;
-
- do {
- if (offset) {
- ti = dm_table_find_target(ci->map, ci->sector);
- if (!dm_target_is_valid(ti))
- return -EIO;
-
- max = max_io_len(ci->sector, ti);
- }
-
- len = min(remaining, max);
-
- __clone_and_map_data_bio(ci, ti, ci->sector, 1, ci->idx, 0,
- bv->bv_offset + offset, len, 1);
-
- ci->sector += len;
- ci->sector_count -= len;
- offset += to_bytes(len);
- } while (remaining -= len);
-
- ci->idx++;
-
- return 0;
-}
-
-/*
* Select the correct strategy for processing a non-flush bio.
*/
static int __split_and_process_non_flush(struct clone_info *ci)
{
struct bio *bio = ci->bio;
struct dm_target *ti;
- sector_t len, max;
- int idx;
+ unsigned len;
if (unlikely(bio->bi_rw & REQ_DISCARD))
return __send_discard(ci);
@@ -1451,41 +1346,14 @@ static int __split_and_process_non_flush(struct clone_info *ci)
if (!dm_target_is_valid(ti))
return -EIO;
- max = max_io_len(ci->sector, ti);
-
- /*
- * Optimise for the simple case where we can do all of
- * the remaining io with a single clone.
- */
- if (ci->sector_count <= max) {
- __clone_and_map_data_bio(ci, ti, ci->sector, bio->bi_max_vecs,
- ci->idx, bio->bi_vcnt - ci->idx, 0,
- ci->sector_count, 0);
- ci->sector_count = 0;
- return 0;
- }
-
- /*
- * There are some bvecs that don't span targets.
- * Do as many of these as possible.
- */
- if (to_sector(bio->bi_io_vec[ci->idx].bv_len) <= max) {
- len = __len_within_target(ci, max, &idx);
-
- __clone_and_map_data_bio(ci, ti, ci->sector, bio->bi_max_vecs,
- ci->idx, idx - ci->idx, 0, len, 0);
+ len = min_t(sector_t, max_io_len(ci->sector, ti), ci->sector_count);
- ci->sector += len;
- ci->sector_count -= len;
- ci->idx = idx;
+ __clone_and_map_data_bio(ci, ti, ci->sector, len);
- return 0;
- }
+ ci->sector += len;
+ ci->sector_count -= len;
- /*
- * Handle a bvec that must be split between two or more targets.
- */
- return __split_bvec_across_targets(ci, ti, max);
+ return 0;
}
/*
@@ -1510,8 +1378,7 @@ static void __split_and_process_bio(struct mapped_device *md,
ci.io->bio = bio;
ci.io->md = md;
spin_lock_init(&ci.io->endio_lock);
- ci.sector = bio->bi_sector;
- ci.idx = bio->bi_idx;
+ ci.sector = bio->bi_iter.bi_sector;
start_io_acct(ci.io);
diff --git a/drivers/md/faulty.c b/drivers/md/faulty.c
index 3193aefe982b..e8b4574956c7 100644
--- a/drivers/md/faulty.c
+++ b/drivers/md/faulty.c
@@ -74,8 +74,8 @@ static void faulty_fail(struct bio *bio, int error)
{
struct bio *b = bio->bi_private;
- b->bi_size = bio->bi_size;
- b->bi_sector = bio->bi_sector;
+ b->bi_iter.bi_size = bio->bi_iter.bi_size;
+ b->bi_iter.bi_sector = bio->bi_iter.bi_sector;
bio_put(bio);
@@ -185,26 +185,31 @@ static void make_request(struct mddev *mddev, struct bio *bio)
return;
}
- if (check_sector(conf, bio->bi_sector, bio_end_sector(bio), WRITE))
+ if (check_sector(conf, bio->bi_iter.bi_sector,
+ bio_end_sector(bio), WRITE))
failit = 1;
if (check_mode(conf, WritePersistent)) {
- add_sector(conf, bio->bi_sector, WritePersistent);
+ add_sector(conf, bio->bi_iter.bi_sector,
+ WritePersistent);
failit = 1;
}
if (check_mode(conf, WriteTransient))
failit = 1;
} else {
/* read request */
- if (check_sector(conf, bio->bi_sector, bio_end_sector(bio), READ))
+ if (check_sector(conf, bio->bi_iter.bi_sector,
+ bio_end_sector(bio), READ))
failit = 1;
if (check_mode(conf, ReadTransient))
failit = 1;
if (check_mode(conf, ReadPersistent)) {
- add_sector(conf, bio->bi_sector, ReadPersistent);
+ add_sector(conf, bio->bi_iter.bi_sector,
+ ReadPersistent);
failit = 1;
}
if (check_mode(conf, ReadFixable)) {
- add_sector(conf, bio->bi_sector, ReadFixable);
+ add_sector(conf, bio->bi_iter.bi_sector,
+ ReadFixable);
failit = 1;
}
}
diff --git a/drivers/md/linear.c b/drivers/md/linear.c
index f03fabd2b37b..56f534b4a2d2 100644
--- a/drivers/md/linear.c
+++ b/drivers/md/linear.c
@@ -288,65 +288,65 @@ static int linear_stop (struct mddev *mddev)
static void linear_make_request(struct mddev *mddev, struct bio *bio)
{
+ char b[BDEVNAME_SIZE];
struct dev_info *tmp_dev;
- sector_t start_sector;
+ struct bio *split;
+ sector_t start_sector, end_sector, data_offset;
if (unlikely(bio->bi_rw & REQ_FLUSH)) {
md_flush_request(mddev, bio);
return;
}
- rcu_read_lock();
- tmp_dev = which_dev(mddev, bio->bi_sector);
- start_sector = tmp_dev->end_sector - tmp_dev->rdev->sectors;
-
-
- if (unlikely(bio->bi_sector >= (tmp_dev->end_sector)
- || (bio->bi_sector < start_sector))) {
- char b[BDEVNAME_SIZE];
-
- printk(KERN_ERR
- "md/linear:%s: make_request: Sector %llu out of bounds on "
- "dev %s: %llu sectors, offset %llu\n",
- mdname(mddev),
- (unsigned long long)bio->bi_sector,
- bdevname(tmp_dev->rdev->bdev, b),
- (unsigned long long)tmp_dev->rdev->sectors,
- (unsigned long long)start_sector);
- rcu_read_unlock();
- bio_io_error(bio);
- return;
- }
- if (unlikely(bio_end_sector(bio) > tmp_dev->end_sector)) {
- /* This bio crosses a device boundary, so we have to
- * split it.
- */
- struct bio_pair *bp;
- sector_t end_sector = tmp_dev->end_sector;
+ do {
+ rcu_read_lock();
- rcu_read_unlock();
-
- bp = bio_split(bio, end_sector - bio->bi_sector);
+ tmp_dev = which_dev(mddev, bio->bi_iter.bi_sector);
+ start_sector = tmp_dev->end_sector - tmp_dev->rdev->sectors;
+ end_sector = tmp_dev->end_sector;
+ data_offset = tmp_dev->rdev->data_offset;
+ bio->bi_bdev = tmp_dev->rdev->bdev;
- linear_make_request(mddev, &bp->bio1);
- linear_make_request(mddev, &bp->bio2);
- bio_pair_release(bp);
- return;
- }
-
- bio->bi_bdev = tmp_dev->rdev->bdev;
- bio->bi_sector = bio->bi_sector - start_sector
- + tmp_dev->rdev->data_offset;
- rcu_read_unlock();
+ rcu_read_unlock();
- if (unlikely((bio->bi_rw & REQ_DISCARD) &&
- !blk_queue_discard(bdev_get_queue(bio->bi_bdev)))) {
- /* Just ignore it */
- bio_endio(bio, 0);
- return;
- }
+ if (unlikely(bio->bi_iter.bi_sector >= end_sector ||
+ bio->bi_iter.bi_sector < start_sector))
+ goto out_of_bounds;
+
+ if (unlikely(bio_end_sector(bio) > end_sector)) {
+ /* This bio crosses a device boundary, so we have to
+ * split it.
+ */
+ split = bio_split(bio, end_sector -
+ bio->bi_iter.bi_sector,
+ GFP_NOIO, fs_bio_set);
+ bio_chain(split, bio);
+ } else {
+ split = bio;
+ }
- generic_make_request(bio);
+ split->bi_iter.bi_sector = split->bi_iter.bi_sector -
+ start_sector + data_offset;
+
+ if (unlikely((split->bi_rw & REQ_DISCARD) &&
+ !blk_queue_discard(bdev_get_queue(split->bi_bdev)))) {
+ /* Just ignore it */
+ bio_endio(split, 0);
+ } else
+ generic_make_request(split);
+ } while (split != bio);
+ return;
+
+out_of_bounds:
+ printk(KERN_ERR
+ "md/linear:%s: make_request: Sector %llu out of bounds on "
+ "dev %s: %llu sectors, offset %llu\n",
+ mdname(mddev),
+ (unsigned long long)bio->bi_iter.bi_sector,
+ bdevname(tmp_dev->rdev->bdev, b),
+ (unsigned long long)tmp_dev->rdev->sectors,
+ (unsigned long long)start_sector);
+ bio_io_error(bio);
}
static void linear_status (struct seq_file *seq, struct mddev *mddev)
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 40c531359a15..4ad5cc4e63e8 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -393,7 +393,7 @@ static void md_submit_flush_data(struct work_struct *ws)
struct mddev *mddev = container_of(ws, struct mddev, flush_work);
struct bio *bio = mddev->flush_bio;
- if (bio->bi_size == 0)
+ if (bio->bi_iter.bi_size == 0)
/* an empty barrier - all done */
bio_endio(bio, 0);
else {
@@ -754,7 +754,7 @@ void md_super_write(struct mddev *mddev, struct md_rdev *rdev,
struct bio *bio = bio_alloc_mddev(GFP_NOIO, 1, mddev);
bio->bi_bdev = rdev->meta_bdev ? rdev->meta_bdev : rdev->bdev;
- bio->bi_sector = sector;
+ bio->bi_iter.bi_sector = sector;
bio_add_page(bio, page, size, 0);
bio->bi_private = rdev;
bio->bi_end_io = super_written;
@@ -782,18 +782,16 @@ int sync_page_io(struct md_rdev *rdev, sector_t sector, int size,
struct bio *bio = bio_alloc_mddev(GFP_NOIO, 1, rdev->mddev);
int ret;
- rw |= REQ_SYNC;
-
bio->bi_bdev = (metadata_op && rdev->meta_bdev) ?
rdev->meta_bdev : rdev->bdev;
if (metadata_op)
- bio->bi_sector = sector + rdev->sb_start;
+ bio->bi_iter.bi_sector = sector + rdev->sb_start;
else if (rdev->mddev->reshape_position != MaxSector &&
(rdev->mddev->reshape_backwards ==
(sector >= rdev->mddev->reshape_position)))
- bio->bi_sector = sector + rdev->new_data_offset;
+ bio->bi_iter.bi_sector = sector + rdev->new_data_offset;
else
- bio->bi_sector = sector + rdev->data_offset;
+ bio->bi_iter.bi_sector = sector + rdev->data_offset;
bio_add_page(bio, page, size, 0);
submit_bio_wait(rw, bio);
diff --git a/drivers/md/multipath.c b/drivers/md/multipath.c
index 1642eae75a33..849ad39f547b 100644
--- a/drivers/md/multipath.c
+++ b/drivers/md/multipath.c
@@ -100,7 +100,7 @@ static void multipath_end_request(struct bio *bio, int error)
md_error (mp_bh->mddev, rdev);
printk(KERN_ERR "multipath: %s: rescheduling sector %llu\n",
bdevname(rdev->bdev,b),
- (unsigned long long)bio->bi_sector);
+ (unsigned long long)bio->bi_iter.bi_sector);
multipath_reschedule_retry(mp_bh);
} else
multipath_end_bh_io(mp_bh, error);
@@ -132,7 +132,7 @@ static void multipath_make_request(struct mddev *mddev, struct bio * bio)
multipath = conf->multipaths + mp_bh->path;
mp_bh->bio = *bio;
- mp_bh->bio.bi_sector += multipath->rdev->data_offset;
+ mp_bh->bio.bi_iter.bi_sector += multipath->rdev->data_offset;
mp_bh->bio.bi_bdev = multipath->rdev->bdev;
mp_bh->bio.bi_rw |= REQ_FAILFAST_TRANSPORT;
mp_bh->bio.bi_end_io = multipath_end_request;
@@ -355,21 +355,22 @@ static void multipathd(struct md_thread *thread)
spin_unlock_irqrestore(&conf->device_lock, flags);
bio = &mp_bh->bio;
- bio->bi_sector = mp_bh->master_bio->bi_sector;
+ bio->bi_iter.bi_sector = mp_bh->master_bio->bi_iter.bi_sector;
if ((mp_bh->path = multipath_map (conf))<0) {
printk(KERN_ALERT "multipath: %s: unrecoverable IO read"
" error for block %llu\n",
bdevname(bio->bi_bdev,b),
- (unsigned long long)bio->bi_sector);
+ (unsigned long long)bio->bi_iter.bi_sector);
multipath_end_bh_io(mp_bh, -EIO);
} else {
printk(KERN_ERR "multipath: %s: redirecting sector %llu"
" to another IO path\n",
bdevname(bio->bi_bdev,b),
- (unsigned long long)bio->bi_sector);
+ (unsigned long long)bio->bi_iter.bi_sector);
*bio = *(mp_bh->master_bio);
- bio->bi_sector += conf->multipaths[mp_bh->path].rdev->data_offset;
+ bio->bi_iter.bi_sector +=
+ conf->multipaths[mp_bh->path].rdev->data_offset;
bio->bi_bdev = conf->multipaths[mp_bh->path].rdev->bdev;
bio->bi_rw |= REQ_FAILFAST_TRANSPORT;
bio->bi_end_io = multipath_end_request;
diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c
index c4d420b7d2f4..407a99e46f69 100644
--- a/drivers/md/raid0.c
+++ b/drivers/md/raid0.c
@@ -501,10 +501,11 @@ static inline int is_io_in_chunk_boundary(struct mddev *mddev,
unsigned int chunk_sects, struct bio *bio)
{
if (likely(is_power_of_2(chunk_sects))) {
- return chunk_sects >= ((bio->bi_sector & (chunk_sects-1))
+ return chunk_sects >=
+ ((bio->bi_iter.bi_sector & (chunk_sects-1))
+ bio_sectors(bio));
} else{
- sector_t sector = bio->bi_sector;
+ sector_t sector = bio->bi_iter.bi_sector;
return chunk_sects >= (sector_div(sector, chunk_sects)
+ bio_sectors(bio));
}
@@ -512,64 +513,44 @@ static inline int is_io_in_chunk_boundary(struct mddev *mddev,
static void raid0_make_request(struct mddev *mddev, struct bio *bio)
{
- unsigned int chunk_sects;
- sector_t sector_offset;
struct strip_zone *zone;
struct md_rdev *tmp_dev;
+ struct bio *split;
if (unlikely(bio->bi_rw & REQ_FLUSH)) {
md_flush_request(mddev, bio);
return;
}
- chunk_sects = mddev->chunk_sectors;
- if (unlikely(!is_io_in_chunk_boundary(mddev, chunk_sects, bio))) {
- sector_t sector = bio->bi_sector;
- struct bio_pair *bp;
- /* Sanity check -- queue functions should prevent this happening */
- if (bio_segments(bio) > 1)
- goto bad_map;
- /* This is a one page bio that upper layers
- * refuse to split for us, so we need to split it.
- */
- if (likely(is_power_of_2(chunk_sects)))
- bp = bio_split(bio, chunk_sects - (sector &
- (chunk_sects-1)));
- else
- bp = bio_split(bio, chunk_sects -
- sector_div(sector, chunk_sects));
- raid0_make_request(mddev, &bp->bio1);
- raid0_make_request(mddev, &bp->bio2);
- bio_pair_release(bp);
- return;
- }
+ do {
+ sector_t sector = bio->bi_iter.bi_sector;
+ unsigned chunk_sects = mddev->chunk_sectors;
- sector_offset = bio->bi_sector;
- zone = find_zone(mddev->private, &sector_offset);
- tmp_dev = map_sector(mddev, zone, bio->bi_sector,
- &sector_offset);
- bio->bi_bdev = tmp_dev->bdev;
- bio->bi_sector = sector_offset + zone->dev_start +
- tmp_dev->data_offset;
-
- if (unlikely((bio->bi_rw & REQ_DISCARD) &&
- !blk_queue_discard(bdev_get_queue(bio->bi_bdev)))) {
- /* Just ignore it */
- bio_endio(bio, 0);
- return;
- }
+ unsigned sectors = chunk_sects -
+ (likely(is_power_of_2(chunk_sects))
+ ? (sector & (chunk_sects-1))
+ : sector_div(sector, chunk_sects));
- generic_make_request(bio);
- return;
-
-bad_map:
- printk("md/raid0:%s: make_request bug: can't convert block across chunks"
- " or bigger than %dk %llu %d\n",
- mdname(mddev), chunk_sects / 2,
- (unsigned long long)bio->bi_sector, bio_sectors(bio) / 2);
+ if (sectors < bio_sectors(bio)) {
+ split = bio_split(bio, sectors, GFP_NOIO, fs_bio_set);
+ bio_chain(split, bio);
+ } else {
+ split = bio;
+ }
- bio_io_error(bio);
- return;
+ zone = find_zone(mddev->private, &sector);
+ tmp_dev = map_sector(mddev, zone, sector, &sector);
+ split->bi_bdev = tmp_dev->bdev;
+ split->bi_iter.bi_sector = sector + zone->dev_start +
+ tmp_dev->data_offset;
+
+ if (unlikely((split->bi_rw & REQ_DISCARD) &&
+ !blk_queue_discard(bdev_get_queue(split->bi_bdev)))) {
+ /* Just ignore it */
+ bio_endio(split, 0);
+ } else
+ generic_make_request(split);
+ } while (split != bio);
}
static void raid0_status(struct seq_file *seq, struct mddev *mddev)
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index a49cfcc7a343..fd3a2a14b587 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -229,7 +229,7 @@ static void call_bio_endio(struct r1bio *r1_bio)
int done;
struct r1conf *conf = r1_bio->mddev->private;
sector_t start_next_window = r1_bio->start_next_window;
- sector_t bi_sector = bio->bi_sector;
+ sector_t bi_sector = bio->bi_iter.bi_sector;
if (bio->bi_phys_segments) {
unsigned long flags;
@@ -265,9 +265,8 @@ static void raid_end_bio_io(struct r1bio *r1_bio)
if (!test_and_set_bit(R1BIO_Returned, &r1_bio->state)) {
pr_debug("raid1: sync end %s on sectors %llu-%llu\n",
(bio_data_dir(bio) == WRITE) ? "write" : "read",
- (unsigned long long) bio->bi_sector,
- (unsigned long long) bio->bi_sector +
- bio_sectors(bio) - 1);
+ (unsigned long long) bio->bi_iter.bi_sector,
+ (unsigned long long) bio_end_sector(bio) - 1);
call_bio_endio(r1_bio);
}
@@ -466,9 +465,8 @@ static void raid1_end_write_request(struct bio *bio, int error)
struct bio *mbio = r1_bio->master_bio;
pr_debug("raid1: behind end write sectors"
" %llu-%llu\n",
- (unsigned long long) mbio->bi_sector,
- (unsigned long long) mbio->bi_sector +
- bio_sectors(mbio) - 1);
+ (unsigned long long) mbio->bi_iter.bi_sector,
+ (unsigned long long) bio_end_sector(mbio) - 1);
call_bio_endio(r1_bio);
}
}
@@ -875,7 +873,7 @@ static bool need_to_wait_for_sync(struct r1conf *conf, struct bio *bio)
else if ((conf->next_resync - RESYNC_WINDOW_SECTORS
>= bio_end_sector(bio)) ||
(conf->next_resync + NEXT_NORMALIO_DISTANCE
- <= bio->bi_sector))
+ <= bio->bi_iter.bi_sector))
wait = false;
else
wait = true;
@@ -913,14 +911,14 @@ static sector_t wait_barrier(struct r1conf *conf, struct bio *bio)
if (bio && bio_data_dir(bio) == WRITE) {
if (conf->next_resync + NEXT_NORMALIO_DISTANCE
- <= bio->bi_sector) {
+ <= bio->bi_iter.bi_sector) {
if (conf->start_next_window == MaxSector)
conf->start_next_window =
conf->next_resync +
NEXT_NORMALIO_DISTANCE;
if ((conf->start_next_window + NEXT_NORMALIO_DISTANCE)
- <= bio->bi_sector)
+ <= bio->bi_iter.bi_sector)
conf->next_window_requests++;
else
conf->current_window_requests++;
@@ -1027,7 +1025,8 @@ do_sync_io:
if (bvecs[i].bv_page)
put_page(bvecs[i].bv_page);
kfree(bvecs);
- pr_debug("%dB behind alloc failed, doing sync I/O\n", bio->bi_size);
+ pr_debug("%dB behind alloc failed, doing sync I/O\n",
+ bio->bi_iter.bi_size);
}
struct raid1_plug_cb {
@@ -1107,7 +1106,7 @@ static void make_request(struct mddev *mddev, struct bio * bio)
if (bio_data_dir(bio) == WRITE &&
bio_end_sector(bio) > mddev->suspend_lo &&
- bio->bi_sector < mddev->suspend_hi) {
+ bio->bi_iter.bi_sector < mddev->suspend_hi) {
/* As the suspend_* range is controlled by
* userspace, we want an interruptible
* wait.
@@ -1118,7 +1117,7 @@ static void make_request(struct mddev *mddev, struct bio * bio)
prepare_to_wait(&conf->wait_barrier,
&w, TASK_INTERRUPTIBLE);
if (bio_end_sector(bio) <= mddev->suspend_lo ||
- bio->bi_sector >= mddev->suspend_hi)
+ bio->bi_iter.bi_sector >= mddev->suspend_hi)
break;
schedule();
}
@@ -1140,7 +1139,7 @@ static void make_request(struct mddev *mddev, struct bio * bio)
r1_bio->sectors = bio_sectors(bio);
r1_bio->state = 0;
r1_bio->mddev = mddev;
- r1_bio->sector = bio->bi_sector;
+ r1_bio->sector = bio->bi_iter.bi_sector;
/* We might need to issue multiple reads to different
* devices if there are bad blocks around, so we keep
@@ -1180,12 +1179,13 @@ read_again:
r1_bio->read_disk = rdisk;
read_bio = bio_clone_mddev(bio, GFP_NOIO, mddev);
- bio_trim(read_bio, r1_bio->sector - bio->bi_sector,
+ bio_trim(read_bio, r1_bio->sector - bio->bi_iter.bi_sector,
max_sectors);
r1_bio->bios[rdisk] = read_bio;
- read_bio->bi_sector = r1_bio->sector + mirror->rdev->data_offset;
+ read_bio->bi_iter.bi_sector = r1_bio->sector +
+ mirror->rdev->data_offset;
read_bio->bi_bdev = mirror->rdev->bdev;
read_bio->bi_end_io = raid1_end_read_request;
read_bio->bi_rw = READ | do_sync;
@@ -1197,7 +1197,7 @@ read_again:
*/
sectors_handled = (r1_bio->sector + max_sectors
- - bio->bi_sector);
+ - bio->bi_iter.bi_sector);
r1_bio->sectors = max_sectors;
spin_lock_irq(&conf->device_lock);
if (bio->bi_phys_segments == 0)
@@ -1218,7 +1218,8 @@ read_again:
r1_bio->sectors = bio_sectors(bio) - sectors_handled;
r1_bio->state = 0;
r1_bio->mddev = mddev;
- r1_bio->sector = bio->bi_sector + sectors_handled;
+ r1_bio->sector = bio->bi_iter.bi_sector +
+ sectors_handled;
goto read_again;
} else
generic_make_request(read_bio);
@@ -1321,7 +1322,7 @@ read_again:
if (r1_bio->bios[j])
rdev_dec_pending(conf->mirrors[j].rdev, mddev);
r1_bio->state = 0;
- allow_barrier(conf, start_next_window, bio->bi_sector);
+ allow_barrier(conf, start_next_window, bio->bi_iter.bi_sector);
md_wait_for_blocked_rdev(blocked_rdev, mddev);
start_next_window = wait_barrier(conf, bio);
/*
@@ -1348,7 +1349,7 @@ read_again:
bio->bi_phys_segments++;
spin_unlock_irq(&conf->device_lock);
}
- sectors_handled = r1_bio->sector + max_sectors - bio->bi_sector;
+ sectors_handled = r1_bio->sector + max_sectors - bio->bi_iter.bi_sector;
atomic_set(&r1_bio->remaining, 1);
atomic_set(&r1_bio->behind_remaining, 0);
@@ -1360,7 +1361,7 @@ read_again:
continue;
mbio = bio_clone_mddev(bio, GFP_NOIO, mddev);
- bio_trim(mbio, r1_bio->sector - bio->bi_sector, max_sectors);
+ bio_trim(mbio, r1_bio->sector - bio->bi_iter.bi_sector, max_sectors);
if (first_clone) {
/* do behind I/O ?
@@ -1394,7 +1395,7 @@ read_again:
r1_bio->bios[i] = mbio;
- mbio->bi_sector = (r1_bio->sector +
+ mbio->bi_iter.bi_sector = (r1_bio->sector +
conf->mirrors[i].rdev->data_offset);
mbio->bi_bdev = conf->mirrors[i].rdev->bdev;
mbio->bi_end_io = raid1_end_write_request;
@@ -1434,7 +1435,7 @@ read_again:
r1_bio->sectors = bio_sectors(bio) - sectors_handled;
r1_bio->state = 0;
r1_bio->mddev = mddev;
- r1_bio->sector = bio->bi_sector + sectors_handled;
+ r1_bio->sector = bio->bi_iter.bi_sector + sectors_handled;
goto retry_write;
}
@@ -1958,14 +1959,14 @@ static int process_checks(struct r1bio *r1_bio)
/* fixup the bio for reuse */
bio_reset(b);
b->bi_vcnt = vcnt;
- b->bi_size = r1_bio->sectors << 9;
- b->bi_sector = r1_bio->sector +
+ b->bi_iter.bi_size = r1_bio->sectors << 9;
+ b->bi_iter.bi_sector = r1_bio->sector +
conf->mirrors[i].rdev->data_offset;
b->bi_bdev = conf->mirrors[i].rdev->bdev;
b->bi_end_io = end_sync_read;
b->bi_private = r1_bio;
- size = b->bi_size;
+ size = b->bi_iter.bi_size;
for (j = 0; j < vcnt ; j++) {
struct bio_vec *bi;
bi = &b->bi_io_vec[j];
@@ -2220,11 +2221,11 @@ static int narrow_write_error(struct r1bio *r1_bio, int i)
}
wbio->bi_rw = WRITE;
- wbio->bi_sector = r1_bio->sector;
- wbio->bi_size = r1_bio->sectors << 9;
+ wbio->bi_iter.bi_sector = r1_bio->sector;
+ wbio->bi_iter.bi_size = r1_bio->sectors << 9;
bio_trim(wbio, sector - r1_bio->sector, sectors);
- wbio->bi_sector += rdev->data_offset;
+ wbio->bi_iter.bi_sector += rdev->data_offset;
wbio->bi_bdev = rdev->bdev;
if (submit_bio_wait(WRITE, wbio) == 0)
/* failure! */
@@ -2338,7 +2339,8 @@ read_more:
}
r1_bio->read_disk = disk;
bio = bio_clone_mddev(r1_bio->master_bio, GFP_NOIO, mddev);
- bio_trim(bio, r1_bio->sector - bio->bi_sector, max_sectors);
+ bio_trim(bio, r1_bio->sector - bio->bi_iter.bi_sector,
+ max_sectors);
r1_bio->bios[r1_bio->read_disk] = bio;
rdev = conf->mirrors[disk].rdev;
printk_ratelimited(KERN_ERR
@@ -2347,7 +2349,7 @@ read_more:
mdname(mddev),
(unsigned long long)r1_bio->sector,
bdevname(rdev->bdev, b));
- bio->bi_sector = r1_bio->sector + rdev->data_offset;
+ bio->bi_iter.bi_sector = r1_bio->sector + rdev->data_offset;
bio->bi_bdev = rdev->bdev;
bio->bi_end_io = raid1_end_read_request;
bio->bi_rw = READ | do_sync;
@@ -2356,7 +2358,7 @@ read_more:
/* Drat - have to split this up more */
struct bio *mbio = r1_bio->master_bio;
int sectors_handled = (r1_bio->sector + max_sectors
- - mbio->bi_sector);
+ - mbio->bi_iter.bi_sector);
r1_bio->sectors = max_sectors;
spin_lock_irq(&conf->device_lock);
if (mbio->bi_phys_segments == 0)
@@ -2374,7 +2376,8 @@ read_more:
r1_bio->state = 0;
set_bit(R1BIO_ReadError, &r1_bio->state);
r1_bio->mddev = mddev;
- r1_bio->sector = mbio->bi_sector + sectors_handled;
+ r1_bio->sector = mbio->bi_iter.bi_sector +
+ sectors_handled;
goto read_more;
} else
@@ -2598,7 +2601,7 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr, int *skipp
}
if (bio->bi_end_io) {
atomic_inc(&rdev->nr_pending);
- bio->bi_sector = sector_nr + rdev->data_offset;
+ bio->bi_iter.bi_sector = sector_nr + rdev->data_offset;
bio->bi_bdev = rdev->bdev;
bio->bi_private = r1_bio;
}
@@ -2698,7 +2701,7 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr, int *skipp
continue;
/* remove last page from this bio */
bio->bi_vcnt--;
- bio->bi_size -= len;
+ bio->bi_iter.bi_size -= len;
bio->bi_flags &= ~(1<< BIO_SEG_VALID);
}
goto bio_full;
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index 8d39d63281b9..33fc408e5eac 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -1152,14 +1152,12 @@ static void raid10_unplug(struct blk_plug_cb *cb, bool from_schedule)
kfree(plug);
}
-static void make_request(struct mddev *mddev, struct bio * bio)
+static void __make_request(struct mddev *mddev, struct bio *bio)
{
struct r10conf *conf = mddev->private;
struct r10bio *r10_bio;
struct bio *read_bio;
int i;
- sector_t chunk_mask = (conf->geo.chunk_mask & conf->prev.chunk_mask);
- int chunk_sects = chunk_mask + 1;
const int rw = bio_data_dir(bio);
const unsigned long do_sync = (bio->bi_rw & REQ_SYNC);
const unsigned long do_fua = (bio->bi_rw & REQ_FUA);
@@ -1174,88 +1172,27 @@ static void make_request(struct mddev *mddev, struct bio * bio)
int max_sectors;
int sectors;
- if (unlikely(bio->bi_rw & REQ_FLUSH)) {
- md_flush_request(mddev, bio);
- return;
- }
-
- /* If this request crosses a chunk boundary, we need to
- * split it. This will only happen for 1 PAGE (or less) requests.
- */
- if (unlikely((bio->bi_sector & chunk_mask) + bio_sectors(bio)
- > chunk_sects
- && (conf->geo.near_copies < conf->geo.raid_disks
- || conf->prev.near_copies < conf->prev.raid_disks))) {
- struct bio_pair *bp;
- /* Sanity check -- queue functions should prevent this happening */
- if (bio_segments(bio) > 1)
- goto bad_map;
- /* This is a one page bio that upper layers
- * refuse to split for us, so we need to split it.
- */
- bp = bio_split(bio,
- chunk_sects - (bio->bi_sector & (chunk_sects - 1)) );
-
- /* Each of these 'make_request' calls will call 'wait_barrier'.
- * If the first succeeds but the second blocks due to the resync
- * thread raising the barrier, we will deadlock because the
- * IO to the underlying device will be queued in generic_make_request
- * and will never complete, so will never reduce nr_pending.
- * So increment nr_waiting here so no new raise_barriers will
- * succeed, and so the second wait_barrier cannot block.
- */
- spin_lock_irq(&conf->resync_lock);
- conf->nr_waiting++;
- spin_unlock_irq(&conf->resync_lock);
-
- make_request(mddev, &bp->bio1);
- make_request(mddev, &bp->bio2);
-
- spin_lock_irq(&conf->resync_lock);
- conf->nr_waiting--;
- wake_up(&conf->wait_barrier);
- spin_unlock_irq(&conf->resync_lock);
-
- bio_pair_release(bp);
- return;
- bad_map:
- printk("md/raid10:%s: make_request bug: can't convert block across chunks"
- " or bigger than %dk %llu %d\n", mdname(mddev), chunk_sects/2,
- (unsigned long long)bio->bi_sector, bio_sectors(bio) / 2);
-
- bio_io_error(bio);
- return;
- }
-
- md_write_start(mddev, bio);
-
- /*
- * Register the new request and wait if the reconstruction
- * thread has put up a bar for new requests.
- * Continue immediately if no resync is active currently.
- */
- wait_barrier(conf);
-
sectors = bio_sectors(bio);
while (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
- bio->bi_sector < conf->reshape_progress &&
- bio->bi_sector + sectors > conf->reshape_progress) {
+ bio->bi_iter.bi_sector < conf->reshape_progress &&
+ bio->bi_iter.bi_sector + sectors > conf->reshape_progress) {
/* IO spans the reshape position. Need to wait for
* reshape to pass
*/
allow_barrier(conf);
wait_event(conf->wait_barrier,
- conf->reshape_progress <= bio->bi_sector ||
- conf->reshape_progress >= bio->bi_sector + sectors);
+ conf->reshape_progress <= bio->bi_iter.bi_sector ||
+ conf->reshape_progress >= bio->bi_iter.bi_sector +
+ sectors);
wait_barrier(conf);
}
if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
bio_data_dir(bio) == WRITE &&
(mddev->reshape_backwards
- ? (bio->bi_sector < conf->reshape_safe &&
- bio->bi_sector + sectors > conf->reshape_progress)
- : (bio->bi_sector + sectors > conf->reshape_safe &&
- bio->bi_sector < conf->reshape_progress))) {
+ ? (bio->bi_iter.bi_sector < conf->reshape_safe &&
+ bio->bi_iter.bi_sector + sectors > conf->reshape_progress)
+ : (bio->bi_iter.bi_sector + sectors > conf->reshape_safe &&
+ bio->bi_iter.bi_sector < conf->reshape_progress))) {
/* Need to update reshape_position in metadata */
mddev->reshape_position = conf->reshape_progress;
set_bit(MD_CHANGE_DEVS, &mddev->flags);
@@ -1273,7 +1210,7 @@ static void make_request(struct mddev *mddev, struct bio * bio)
r10_bio->sectors = sectors;
r10_bio->mddev = mddev;
- r10_bio->sector = bio->bi_sector;
+ r10_bio->sector = bio->bi_iter.bi_sector;
r10_bio->state = 0;
/* We might need to issue multiple reads to different
@@ -1302,13 +1239,13 @@ read_again:
slot = r10_bio->read_slot;
read_bio = bio_clone_mddev(bio, GFP_NOIO, mddev);
- bio_trim(read_bio, r10_bio->sector - bio->bi_sector,
+ bio_trim(read_bio, r10_bio->sector - bio->bi_iter.bi_sector,
max_sectors);
r10_bio->devs[slot].bio = read_bio;
r10_bio->devs[slot].rdev = rdev;
- read_bio->bi_sector = r10_bio->devs[slot].addr +
+ read_bio->bi_iter.bi_sector = r10_bio->devs[slot].addr +
choose_data_offset(r10_bio, rdev);
read_bio->bi_bdev = rdev->bdev;
read_bio->bi_end_io = raid10_end_read_request;
@@ -1320,7 +1257,7 @@ read_again:
* need another r10_bio.
*/
sectors_handled = (r10_bio->sector + max_sectors
- - bio->bi_sector);
+ - bio->bi_iter.bi_sector);
r10_bio->sectors = max_sectors;
spin_lock_irq(&conf->device_lock);
if (bio->bi_phys_segments == 0)
@@ -1341,7 +1278,8 @@ read_again:
r10_bio->sectors = bio_sectors(bio) - sectors_handled;
r10_bio->state = 0;
r10_bio->mddev = mddev;
- r10_bio->sector = bio->bi_sector + sectors_handled;
+ r10_bio->sector = bio->bi_iter.bi_sector +
+ sectors_handled;
goto read_again;
} else
generic_make_request(read_bio);
@@ -1499,7 +1437,8 @@ retry_write:
bio->bi_phys_segments++;
spin_unlock_irq(&conf->device_lock);
}
- sectors_handled = r10_bio->sector + max_sectors - bio->bi_sector;
+ sectors_handled = r10_bio->sector + max_sectors -
+ bio->bi_iter.bi_sector;
atomic_set(&r10_bio->remaining, 1);
bitmap_startwrite(mddev->bitmap, r10_bio->sector, r10_bio->sectors, 0);
@@ -1510,11 +1449,11 @@ retry_write:
if (r10_bio->devs[i].bio) {
struct md_rdev *rdev = conf->mirrors[d].rdev;
mbio = bio_clone_mddev(bio, GFP_NOIO, mddev);
- bio_trim(mbio, r10_bio->sector - bio->bi_sector,
+ bio_trim(mbio, r10_bio->sector - bio->bi_iter.bi_sector,
max_sectors);
r10_bio->devs[i].bio = mbio;
- mbio->bi_sector = (r10_bio->devs[i].addr+
+ mbio->bi_iter.bi_sector = (r10_bio->devs[i].addr+
choose_data_offset(r10_bio,
rdev));
mbio->bi_bdev = rdev->bdev;
@@ -1553,11 +1492,11 @@ retry_write:
rdev = conf->mirrors[d].rdev;
}
mbio = bio_clone_mddev(bio, GFP_NOIO, mddev);
- bio_trim(mbio, r10_bio->sector - bio->bi_sector,
+ bio_trim(mbio, r10_bio->sector - bio->bi_iter.bi_sector,
max_sectors);
r10_bio->devs[i].repl_bio = mbio;
- mbio->bi_sector = (r10_bio->devs[i].addr +
+ mbio->bi_iter.bi_sector = (r10_bio->devs[i].addr +
choose_data_offset(
r10_bio, rdev));
mbio->bi_bdev = rdev->bdev;
@@ -1591,11 +1530,57 @@ retry_write:
r10_bio->sectors = bio_sectors(bio) - sectors_handled;
r10_bio->mddev = mddev;
- r10_bio->sector = bio->bi_sector + sectors_handled;
+ r10_bio->sector = bio->bi_iter.bi_sector + sectors_handled;
r10_bio->state = 0;
goto retry_write;
}
one_write_done(r10_bio);
+}
+
+static void make_request(struct mddev *mddev, struct bio *bio)
+{
+ struct r10conf *conf = mddev->private;
+ sector_t chunk_mask = (conf->geo.chunk_mask & conf->prev.chunk_mask);
+ int chunk_sects = chunk_mask + 1;
+
+ struct bio *split;
+
+ if (unlikely(bio->bi_rw & REQ_FLUSH)) {
+ md_flush_request(mddev, bio);
+ return;
+ }
+
+ md_write_start(mddev, bio);
+
+ /*
+ * Register the new request and wait if the reconstruction
+ * thread has put up a bar for new requests.
+ * Continue immediately if no resync is active currently.
+ */
+ wait_barrier(conf);
+
+ do {
+
+ /*
+ * If this request crosses a chunk boundary, we need to split
+ * it.
+ */
+ if (unlikely((bio->bi_iter.bi_sector & chunk_mask) +
+ bio_sectors(bio) > chunk_sects
+ && (conf->geo.near_copies < conf->geo.raid_disks
+ || conf->prev.near_copies <
+ conf->prev.raid_disks))) {
+ split = bio_split(bio, chunk_sects -
+ (bio->bi_iter.bi_sector &
+ (chunk_sects - 1)),
+ GFP_NOIO, fs_bio_set);
+ bio_chain(split, bio);
+ } else {
+ split = bio;
+ }
+
+ __make_request(mddev, split);
+ } while (split != bio);
/* In case raid10d snuck in to freeze_array */
wake_up(&conf->wait_barrier);
@@ -2124,10 +2109,10 @@ static void sync_request_write(struct mddev *mddev, struct r10bio *r10_bio)
bio_reset(tbio);
tbio->bi_vcnt = vcnt;
- tbio->bi_size = r10_bio->sectors << 9;
+ tbio->bi_iter.bi_size = r10_bio->sectors << 9;
tbio->bi_rw = WRITE;
tbio->bi_private = r10_bio;
- tbio->bi_sector = r10_bio->devs[i].addr;
+ tbio->bi_iter.bi_sector = r10_bio->devs[i].addr;
for (j=0; j < vcnt ; j++) {
tbio->bi_io_vec[j].bv_offset = 0;
@@ -2144,7 +2129,7 @@ static void sync_request_write(struct mddev *mddev, struct r10bio *r10_bio)
atomic_inc(&r10_bio->remaining);
md_sync_acct(conf->mirrors[d].rdev->bdev, bio_sectors(tbio));
- tbio->bi_sector += conf->mirrors[d].rdev->data_offset;
+ tbio->bi_iter.bi_sector += conf->mirrors[d].rdev->data_offset;
tbio->bi_bdev = conf->mirrors[d].rdev->bdev;
generic_make_request(tbio);
}
@@ -2614,8 +2599,8 @@ static int narrow_write_error(struct r10bio *r10_bio, int i)
sectors = sect_to_write;
/* Write at 'sector' for 'sectors' */
wbio = bio_clone_mddev(bio, GFP_NOIO, mddev);
- bio_trim(wbio, sector - bio->bi_sector, sectors);
- wbio->bi_sector = (r10_bio->devs[i].addr+
+ bio_trim(wbio, sector - bio->bi_iter.bi_sector, sectors);
+ wbio->bi_iter.bi_sector = (r10_bio->devs[i].addr+
choose_data_offset(r10_bio, rdev) +
(sector - r10_bio->sector));
wbio->bi_bdev = rdev->bdev;
@@ -2687,10 +2672,10 @@ read_more:
(unsigned long long)r10_bio->sector);
bio = bio_clone_mddev(r10_bio->master_bio,
GFP_NOIO, mddev);
- bio_trim(bio, r10_bio->sector - bio->bi_sector, max_sectors);
+ bio_trim(bio, r10_bio->sector - bio->bi_iter.bi_sector, max_sectors);
r10_bio->devs[slot].bio = bio;
r10_bio->devs[slot].rdev = rdev;
- bio->bi_sector = r10_bio->devs[slot].addr
+ bio->bi_iter.bi_sector = r10_bio->devs[slot].addr
+ choose_data_offset(r10_bio, rdev);
bio->bi_bdev = rdev->bdev;
bio->bi_rw = READ | do_sync;
@@ -2701,7 +2686,7 @@ read_more:
struct bio *mbio = r10_bio->master_bio;
int sectors_handled =
r10_bio->sector + max_sectors
- - mbio->bi_sector;
+ - mbio->bi_iter.bi_sector;
r10_bio->sectors = max_sectors;
spin_lock_irq(&conf->device_lock);
if (mbio->bi_phys_segments == 0)
@@ -2719,7 +2704,7 @@ read_more:
set_bit(R10BIO_ReadError,
&r10_bio->state);
r10_bio->mddev = mddev;
- r10_bio->sector = mbio->bi_sector
+ r10_bio->sector = mbio->bi_iter.bi_sector
+ sectors_handled;
goto read_more;
@@ -3157,7 +3142,8 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr,
bio->bi_end_io = end_sync_read;
bio->bi_rw = READ;
from_addr = r10_bio->devs[j].addr;
- bio->bi_sector = from_addr + rdev->data_offset;
+ bio->bi_iter.bi_sector = from_addr +
+ rdev->data_offset;
bio->bi_bdev = rdev->bdev;
atomic_inc(&rdev->nr_pending);
/* and we write to 'i' (if not in_sync) */
@@ -3181,7 +3167,7 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr,
bio->bi_private = r10_bio;
bio->bi_end_io = end_sync_write;
bio->bi_rw = WRITE;
- bio->bi_sector = to_addr
+ bio->bi_iter.bi_sector = to_addr
+ rdev->data_offset;
bio->bi_bdev = rdev->bdev;
atomic_inc(&r10_bio->remaining);
@@ -3210,7 +3196,8 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr,
bio->bi_private = r10_bio;
bio->bi_end_io = end_sync_write;
bio->bi_rw = WRITE;
- bio->bi_sector = to_addr + rdev->data_offset;
+ bio->bi_iter.bi_sector = to_addr +
+ rdev->data_offset;
bio->bi_bdev = rdev->bdev;
atomic_inc(&r10_bio->remaining);
break;
@@ -3328,7 +3315,7 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr,
bio->bi_private = r10_bio;
bio->bi_end_io = end_sync_read;
bio->bi_rw = READ;
- bio->bi_sector = sector +
+ bio->bi_iter.bi_sector = sector +
conf->mirrors[d].rdev->data_offset;
bio->bi_bdev = conf->mirrors[d].rdev->bdev;
count++;
@@ -3350,7 +3337,7 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr,
bio->bi_private = r10_bio;
bio->bi_end_io = end_sync_write;
bio->bi_rw = WRITE;
- bio->bi_sector = sector +
+ bio->bi_iter.bi_sector = sector +
conf->mirrors[d].replacement->data_offset;
bio->bi_bdev = conf->mirrors[d].replacement->bdev;
count++;
@@ -3397,7 +3384,7 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr,
bio2 = bio2->bi_next) {
/* remove last page from this bio */
bio2->bi_vcnt--;
- bio2->bi_size -= len;
+ bio2->bi_iter.bi_size -= len;
bio2->bi_flags &= ~(1<< BIO_SEG_VALID);
}
goto bio_full;
@@ -4418,7 +4405,7 @@ read_more:
read_bio = bio_alloc_mddev(GFP_KERNEL, RESYNC_PAGES, mddev);
read_bio->bi_bdev = rdev->bdev;
- read_bio->bi_sector = (r10_bio->devs[r10_bio->read_slot].addr
+ read_bio->bi_iter.bi_sector = (r10_bio->devs[r10_bio->read_slot].addr
+ rdev->data_offset);
read_bio->bi_private = r10_bio;
read_bio->bi_end_io = end_sync_read;
@@ -4426,7 +4413,7 @@ read_more:
read_bio->bi_flags &= ~(BIO_POOL_MASK - 1);
read_bio->bi_flags |= 1 << BIO_UPTODATE;
read_bio->bi_vcnt = 0;
- read_bio->bi_size = 0;
+ read_bio->bi_iter.bi_size = 0;
r10_bio->master_bio = read_bio;
r10_bio->read_slot = r10_bio->devs[r10_bio->read_slot].devnum;
@@ -4452,7 +4439,8 @@ read_more:
bio_reset(b);
b->bi_bdev = rdev2->bdev;
- b->bi_sector = r10_bio->devs[s/2].addr + rdev2->new_data_offset;
+ b->bi_iter.bi_sector = r10_bio->devs[s/2].addr +
+ rdev2->new_data_offset;
b->bi_private = r10_bio;
b->bi_end_io = end_reshape_write;
b->bi_rw = WRITE;
@@ -4479,7 +4467,7 @@ read_more:
bio2 = bio2->bi_next) {
/* Remove last page from this bio */
bio2->bi_vcnt--;
- bio2->bi_size -= len;
+ bio2->bi_iter.bi_size -= len;
bio2->bi_flags &= ~(1<<BIO_SEG_VALID);
}
goto bio_full;
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 03f82ab87d9e..67ca9c3d2939 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -133,7 +133,7 @@ static inline void unlock_all_device_hash_locks_irq(struct r5conf *conf)
static inline struct bio *r5_next_bio(struct bio *bio, sector_t sector)
{
int sectors = bio_sectors(bio);
- if (bio->bi_sector + sectors < sector + STRIPE_SECTORS)
+ if (bio->bi_iter.bi_sector + sectors < sector + STRIPE_SECTORS)
return bio->bi_next;
else
return NULL;
@@ -225,7 +225,7 @@ static void return_io(struct bio *return_bi)
return_bi = bi->bi_next;
bi->bi_next = NULL;
- bi->bi_size = 0;
+ bi->bi_iter.bi_size = 0;
trace_block_bio_complete(bdev_get_queue(bi->bi_bdev),
bi, 0);
bio_endio(bi, 0);
@@ -852,10 +852,10 @@ static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s)
bi->bi_rw, i);
atomic_inc(&sh->count);
if (use_new_offset(conf, sh))
- bi->bi_sector = (sh->sector
+ bi->bi_iter.bi_sector = (sh->sector
+ rdev->new_data_offset);
else
- bi->bi_sector = (sh->sector
+ bi->bi_iter.bi_sector = (sh->sector
+ rdev->data_offset);
if (test_bit(R5_ReadNoMerge, &sh->dev[i].flags))
bi->bi_rw |= REQ_NOMERGE;
@@ -863,7 +863,7 @@ static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s)
bi->bi_vcnt = 1;
bi->bi_io_vec[0].bv_len = STRIPE_SIZE;
bi->bi_io_vec[0].bv_offset = 0;
- bi->bi_size = STRIPE_SIZE;
+ bi->bi_iter.bi_size = STRIPE_SIZE;
/*
* If this is discard request, set bi_vcnt 0. We don't
* want to confuse SCSI because SCSI will replace payload
@@ -899,15 +899,15 @@ static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s)
rbi->bi_rw, i);
atomic_inc(&sh->count);
if (use_new_offset(conf, sh))
- rbi->bi_sector = (sh->sector
+ rbi->bi_iter.bi_sector = (sh->sector
+ rrdev->new_data_offset);
else
- rbi->bi_sector = (sh->sector
+ rbi->bi_iter.bi_sector = (sh->sector
+ rrdev->data_offset);
rbi->bi_vcnt = 1;
rbi->bi_io_vec[0].bv_len = STRIPE_SIZE;
rbi->bi_io_vec[0].bv_offset = 0;
- rbi->bi_size = STRIPE_SIZE;
+ rbi->bi_iter.bi_size = STRIPE_SIZE;
/*
* If this is discard request, set bi_vcnt 0. We don't
* want to confuse SCSI because SCSI will replace payload
@@ -935,24 +935,24 @@ static struct dma_async_tx_descriptor *
async_copy_data(int frombio, struct bio *bio, struct page *page,
sector_t sector, struct dma_async_tx_descriptor *tx)
{
- struct bio_vec *bvl;
+ struct bio_vec bvl;
+ struct bvec_iter iter;
struct page *bio_page;
- int i;
int page_offset;
struct async_submit_ctl submit;
enum async_tx_flags flags = 0;
- if (bio->bi_sector >= sector)
- page_offset = (signed)(bio->bi_sector - sector) * 512;
+ if (bio->bi_iter.bi_sector >= sector)
+ page_offset = (signed)(bio->bi_iter.bi_sector - sector) * 512;
else
- page_offset = (signed)(sector - bio->bi_sector) * -512;
+ page_offset = (signed)(sector - bio->bi_iter.bi_sector) * -512;
if (frombio)
flags |= ASYNC_TX_FENCE;
init_async_submit(&submit, flags, tx, NULL, NULL, NULL);
- bio_for_each_segment(bvl, bio, i) {
- int len = bvl->bv_len;
+ bio_for_each_segment(bvl, bio, iter) {
+ int len = bvl.bv_len;
int clen;
int b_offset = 0;
@@ -968,8 +968,8 @@ async_copy_data(int frombio, struct bio *bio, struct page *page,
clen = len;
if (clen > 0) {
- b_offset += bvl->bv_offset;
- bio_page = bvl->bv_page;
+ b_offset += bvl.bv_offset;
+ bio_page = bvl.bv_page;
if (frombio)
tx = async_memcpy(page, bio_page, page_offset,
b_offset, clen, &submit);
@@ -1012,7 +1012,7 @@ static void ops_complete_biofill(void *stripe_head_ref)
BUG_ON(!dev->read);
rbi = dev->read;
dev->read = NULL;
- while (rbi && rbi->bi_sector <
+ while (rbi && rbi->bi_iter.bi_sector <
dev->sector + STRIPE_SECTORS) {
rbi2 = r5_next_bio(rbi, dev->sector);
if (!raid5_dec_bi_active_stripes(rbi)) {
@@ -1048,7 +1048,7 @@ static void ops_run_biofill(struct stripe_head *sh)
dev->read = rbi = dev->toread;
dev->toread = NULL;
spin_unlock_irq(&sh->stripe_lock);
- while (rbi && rbi->bi_sector <
+ while (rbi && rbi->bi_iter.bi_sector <
dev->sector + STRIPE_SECTORS) {
tx = async_copy_data(0, rbi, dev->page,
dev->sector, tx);
@@ -1390,7 +1390,7 @@ ops_run_biodrain(struct stripe_head *sh, struct dma_async_tx_descriptor *tx)
wbi = dev->written = chosen;
spin_unlock_irq(&sh->stripe_lock);
- while (wbi && wbi->bi_sector <
+ while (wbi && wbi->bi_iter.bi_sector <
dev->sector + STRIPE_SECTORS) {
if (wbi->bi_rw & REQ_FUA)
set_bit(R5_WantFUA, &dev->flags);
@@ -2615,7 +2615,7 @@ static int add_stripe_bio(struct stripe_head *sh, struct bio *bi, int dd_idx, in
int firstwrite=0;
pr_debug("adding bi b#%llu to stripe s#%llu\n",
- (unsigned long long)bi->bi_sector,
+ (unsigned long long)bi->bi_iter.bi_sector,
(unsigned long long)sh->sector);
/*
@@ -2633,12 +2633,12 @@ static int add_stripe_bio(struct stripe_head *sh, struct bio *bi, int dd_idx, in
firstwrite = 1;
} else
bip = &sh->dev[dd_idx].toread;
- while (*bip && (*bip)->bi_sector < bi->bi_sector) {
- if (bio_end_sector(*bip) > bi->bi_sector)
+ while (*bip && (*bip)->bi_iter.bi_sector < bi->bi_iter.bi_sector) {
+ if (bio_end_sector(*bip) > bi->bi_iter.bi_sector)
goto overlap;
bip = & (*bip)->bi_next;
}
- if (*bip && (*bip)->bi_sector < bio_end_sector(bi))
+ if (*bip && (*bip)->bi_iter.bi_sector < bio_end_sector(bi))
goto overlap;
BUG_ON(*bip && bi->bi_next && (*bip) != bi->bi_next);
@@ -2652,7 +2652,7 @@ static int add_stripe_bio(struct stripe_head *sh, struct bio *bi, int dd_idx, in
sector_t sector = sh->dev[dd_idx].sector;
for (bi=sh->dev[dd_idx].towrite;
sector < sh->dev[dd_idx].sector + STRIPE_SECTORS &&
- bi && bi->bi_sector <= sector;
+ bi && bi->bi_iter.bi_sector <= sector;
bi = r5_next_bio(bi, sh->dev[dd_idx].sector)) {
if (bio_end_sector(bi) >= sector)
sector = bio_end_sector(bi);
@@ -2662,7 +2662,7 @@ static int add_stripe_bio(struct stripe_head *sh, struct bio *bi, int dd_idx, in
}
pr_debug("added bi b#%llu to stripe s#%llu, disk %d.\n",
- (unsigned long long)(*bip)->bi_sector,
+ (unsigned long long)(*bip)->bi_iter.bi_sector,
(unsigned long long)sh->sector, dd_idx);
spin_unlock_irq(&sh->stripe_lock);
@@ -2737,7 +2737,7 @@ handle_failed_stripe(struct r5conf *conf, struct stripe_head *sh,
if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags))
wake_up(&conf->wait_for_overlap);
- while (bi && bi->bi_sector <
+ while (bi && bi->bi_iter.bi_sector <
sh->dev[i].sector + STRIPE_SECTORS) {
struct bio *nextbi = r5_next_bio(bi, sh->dev[i].sector);
clear_bit(BIO_UPTODATE, &bi->bi_flags);
@@ -2756,7 +2756,7 @@ handle_failed_stripe(struct r5conf *conf, struct stripe_head *sh,
bi = sh->dev[i].written;
sh->dev[i].written = NULL;
if (bi) bitmap_end = 1;
- while (bi && bi->bi_sector <
+ while (bi && bi->bi_iter.bi_sector <
sh->dev[i].sector + STRIPE_SECTORS) {
struct bio *bi2 = r5_next_bio(bi, sh->dev[i].sector);
clear_bit(BIO_UPTODATE, &bi->bi_flags);
@@ -2780,7 +2780,7 @@ handle_failed_stripe(struct r5conf *conf, struct stripe_head *sh,
spin_unlock_irq(&sh->stripe_lock);
if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags))
wake_up(&conf->wait_for_overlap);
- while (bi && bi->bi_sector <
+ while (bi && bi->bi_iter.bi_sector <
sh->dev[i].sector + STRIPE_SECTORS) {
struct bio *nextbi =
r5_next_bio(bi, sh->dev[i].sector);
@@ -3004,7 +3004,7 @@ static void handle_stripe_clean_event(struct r5conf *conf,
clear_bit(R5_UPTODATE, &dev->flags);
wbi = dev->written;
dev->written = NULL;
- while (wbi && wbi->bi_sector <
+ while (wbi && wbi->bi_iter.bi_sector <
dev->sector + STRIPE_SECTORS) {
wbi2 = r5_next_bio(wbi, dev->sector);
if (!raid5_dec_bi_active_stripes(wbi)) {
@@ -4096,7 +4096,7 @@ static int raid5_mergeable_bvec(struct request_queue *q,
static int in_chunk_boundary(struct mddev *mddev, struct bio *bio)
{
- sector_t sector = bio->bi_sector + get_start_sect(bio->bi_bdev);
+ sector_t sector = bio->bi_iter.bi_sector + get_start_sect(bio->bi_bdev);
unsigned int chunk_sectors = mddev->chunk_sectors;
unsigned int bio_sectors = bio_sectors(bio);
@@ -4233,9 +4233,9 @@ static int chunk_aligned_read(struct mddev *mddev, struct bio * raid_bio)
/*
* compute position
*/
- align_bi->bi_sector = raid5_compute_sector(conf, raid_bio->bi_sector,
- 0,
- &dd_idx, NULL);
+ align_bi->bi_iter.bi_sector =
+ raid5_compute_sector(conf, raid_bio->bi_iter.bi_sector,
+ 0, &dd_idx, NULL);
end_sector = bio_end_sector(align_bi);
rcu_read_lock();
@@ -4260,7 +4260,8 @@ static int chunk_aligned_read(struct mddev *mddev, struct bio * raid_bio)
align_bi->bi_flags &= ~(1 << BIO_SEG_VALID);
if (!bio_fits_rdev(align_bi) ||
- is_badblock(rdev, align_bi->bi_sector, bio_sectors(align_bi),
+ is_badblock(rdev, align_bi->bi_iter.bi_sector,
+ bio_sectors(align_bi),
&first_bad, &bad_sectors)) {
/* too big in some way, or has a known bad block */
bio_put(align_bi);
@@ -4269,7 +4270,7 @@ static int chunk_aligned_read(struct mddev *mddev, struct bio * raid_bio)
}
/* No reshape active, so we can trust rdev->data_offset */
- align_bi->bi_sector += rdev->data_offset;
+ align_bi->bi_iter.bi_sector += rdev->data_offset;
spin_lock_irq(&conf->device_lock);
wait_event_lock_irq(conf->wait_for_stripe,
@@ -4281,7 +4282,7 @@ static int chunk_aligned_read(struct mddev *mddev, struct bio * raid_bio)
if (mddev->gendisk)
trace_block_bio_remap(bdev_get_queue(align_bi->bi_bdev),
align_bi, disk_devt(mddev->gendisk),
- raid_bio->bi_sector);
+ raid_bio->bi_iter.bi_sector);
generic_make_request(align_bi);
return 1;
} else {
@@ -4464,8 +4465,8 @@ static void make_discard_request(struct mddev *mddev, struct bio *bi)
/* Skip discard while reshape is happening */
return;
- logical_sector = bi->bi_sector & ~((sector_t)STRIPE_SECTORS-1);
- last_sector = bi->bi_sector + (bi->bi_size>>9);
+ logical_sector = bi->bi_iter.bi_sector & ~((sector_t)STRIPE_SECTORS-1);
+ last_sector = bi->bi_iter.bi_sector + (bi->bi_iter.bi_size>>9);
bi->bi_next = NULL;
bi->bi_phys_segments = 1; /* over-loaded to count active stripes */
@@ -4569,7 +4570,7 @@ static void make_request(struct mddev *mddev, struct bio * bi)
return;
}
- logical_sector = bi->bi_sector & ~((sector_t)STRIPE_SECTORS-1);
+ logical_sector = bi->bi_iter.bi_sector & ~((sector_t)STRIPE_SECTORS-1);
last_sector = bio_end_sector(bi);
bi->bi_next = NULL;
bi->bi_phys_segments = 1; /* over-loaded to count active stripes */
@@ -5053,7 +5054,8 @@ static int retry_aligned_read(struct r5conf *conf, struct bio *raid_bio)
int remaining;
int handled = 0;
- logical_sector = raid_bio->bi_sector & ~((sector_t)STRIPE_SECTORS-1);
+ logical_sector = raid_bio->bi_iter.bi_sector &
+ ~((sector_t)STRIPE_SECTORS-1);
sector = raid5_compute_sector(conf, logical_sector,
0, &dd_idx, NULL);
last_sector = bio_end_sector(raid_bio);