summaryrefslogtreecommitdiff
path: root/block
diff options
context:
space:
mode:
authorChristoph Hellwig <hch@infradead.org>2011-09-12 12:12:01 +0200
committerJens Axboe <jaxboe@fusionio.com>2011-09-12 12:12:01 +0200
commit5a7bbad27a410350e64a2d7f5ec18fc73836c14f (patch)
tree3447cd62dbcbd77b4071e2eb7576f1d7632ef2d3 /block
parentc20e8de27fef9f59869c81c288ad6cf28200e00c (diff)
block: remove support for bio remapping from ->make_request
There is very little benefit in allowing to let a ->make_request instance update the bios device and sector and loop around it in __generic_make_request when we can archive the same through calling generic_make_request from the driver and letting the loop in generic_make_request handle it. Note that various drivers got the return value from ->make_request and returned non-zero values for errors. Signed-off-by: Christoph Hellwig <hch@lst.de> Acked-by: NeilBrown <neilb@suse.de> Signed-off-by: Jens Axboe <jaxboe@fusionio.com>
Diffstat (limited to 'block')
-rw-r--r--block/blk-core.c153
1 files changed, 62 insertions, 91 deletions
diff --git a/block/blk-core.c b/block/blk-core.c
index ab673f0b8c30..f58e019be67b 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -1211,7 +1211,7 @@ void init_request_from_bio(struct request *req, struct bio *bio)
blk_rq_bio_prep(req->q, req, bio);
}
-int blk_queue_bio(struct request_queue *q, struct bio *bio)
+void blk_queue_bio(struct request_queue *q, struct bio *bio)
{
const bool sync = !!(bio->bi_rw & REQ_SYNC);
struct blk_plug *plug;
@@ -1236,7 +1236,7 @@ int blk_queue_bio(struct request_queue *q, struct bio *bio)
* any locks.
*/
if (attempt_plug_merge(current, q, bio))
- goto out;
+ return;
spin_lock_irq(q->queue_lock);
@@ -1312,8 +1312,6 @@ get_rq:
out_unlock:
spin_unlock_irq(q->queue_lock);
}
-out:
- return 0;
}
EXPORT_SYMBOL_GPL(blk_queue_bio); /* for device mapper only */
@@ -1441,112 +1439,85 @@ static inline int bio_check_eod(struct bio *bio, unsigned int nr_sectors)
static inline void __generic_make_request(struct bio *bio)
{
struct request_queue *q;
- sector_t old_sector;
- int ret, nr_sectors = bio_sectors(bio);
- dev_t old_dev;
+ int nr_sectors = bio_sectors(bio);
int err = -EIO;
+ char b[BDEVNAME_SIZE];
+ struct hd_struct *part;
might_sleep();
if (bio_check_eod(bio, nr_sectors))
goto end_io;
- /*
- * Resolve the mapping until finished. (drivers are
- * still free to implement/resolve their own stacking
- * by explicitly returning 0)
- *
- * NOTE: we don't repeat the blk_size check for each new device.
- * Stacking drivers are expected to know what they are doing.
- */
- old_sector = -1;
- old_dev = 0;
- do {
- char b[BDEVNAME_SIZE];
- struct hd_struct *part;
-
- q = bdev_get_queue(bio->bi_bdev);
- if (unlikely(!q)) {
- printk(KERN_ERR
- "generic_make_request: Trying to access "
- "nonexistent block-device %s (%Lu)\n",
- bdevname(bio->bi_bdev, b),
- (long long) bio->bi_sector);
- goto end_io;
- }
-
- if (unlikely(!(bio->bi_rw & REQ_DISCARD) &&
- nr_sectors > queue_max_hw_sectors(q))) {
- printk(KERN_ERR "bio too big device %s (%u > %u)\n",
- bdevname(bio->bi_bdev, b),
- bio_sectors(bio),
- queue_max_hw_sectors(q));
- goto end_io;
- }
-
- if (unlikely(test_bit(QUEUE_FLAG_DEAD, &q->queue_flags)))
- goto end_io;
-
- part = bio->bi_bdev->bd_part;
- if (should_fail_request(part, bio->bi_size) ||
- should_fail_request(&part_to_disk(part)->part0,
- bio->bi_size))
- goto end_io;
+ q = bdev_get_queue(bio->bi_bdev);
+ if (unlikely(!q)) {
+ printk(KERN_ERR
+ "generic_make_request: Trying to access "
+ "nonexistent block-device %s (%Lu)\n",
+ bdevname(bio->bi_bdev, b),
+ (long long) bio->bi_sector);
+ goto end_io;
+ }
- /*
- * If this device has partitions, remap block n
- * of partition p to block n+start(p) of the disk.
- */
- blk_partition_remap(bio);
+ if (unlikely(!(bio->bi_rw & REQ_DISCARD) &&
+ nr_sectors > queue_max_hw_sectors(q))) {
+ printk(KERN_ERR "bio too big device %s (%u > %u)\n",
+ bdevname(bio->bi_bdev, b),
+ bio_sectors(bio),
+ queue_max_hw_sectors(q));
+ goto end_io;
+ }
- if (bio_integrity_enabled(bio) && bio_integrity_prep(bio))
- goto end_io;
+ if (unlikely(test_bit(QUEUE_FLAG_DEAD, &q->queue_flags)))
+ goto end_io;
- if (old_sector != -1)
- trace_block_bio_remap(q, bio, old_dev, old_sector);
+ part = bio->bi_bdev->bd_part;
+ if (should_fail_request(part, bio->bi_size) ||
+ should_fail_request(&part_to_disk(part)->part0,
+ bio->bi_size))
+ goto end_io;
- old_sector = bio->bi_sector;
- old_dev = bio->bi_bdev->bd_dev;
+ /*
+ * If this device has partitions, remap block n
+ * of partition p to block n+start(p) of the disk.
+ */
+ blk_partition_remap(bio);
- if (bio_check_eod(bio, nr_sectors))
- goto end_io;
+ if (bio_integrity_enabled(bio) && bio_integrity_prep(bio))
+ goto end_io;
- /*
- * Filter flush bio's early so that make_request based
- * drivers without flush support don't have to worry
- * about them.
- */
- if ((bio->bi_rw & (REQ_FLUSH | REQ_FUA)) && !q->flush_flags) {
- bio->bi_rw &= ~(REQ_FLUSH | REQ_FUA);
- if (!nr_sectors) {
- err = 0;
- goto end_io;
- }
- }
+ if (bio_check_eod(bio, nr_sectors))
+ goto end_io;
- if ((bio->bi_rw & REQ_DISCARD) &&
- (!blk_queue_discard(q) ||
- ((bio->bi_rw & REQ_SECURE) &&
- !blk_queue_secdiscard(q)))) {
- err = -EOPNOTSUPP;
+ /*
+ * Filter flush bio's early so that make_request based
+ * drivers without flush support don't have to worry
+ * about them.
+ */
+ if ((bio->bi_rw & (REQ_FLUSH | REQ_FUA)) && !q->flush_flags) {
+ bio->bi_rw &= ~(REQ_FLUSH | REQ_FUA);
+ if (!nr_sectors) {
+ err = 0;
goto end_io;
}
+ }
- if (blk_throtl_bio(q, &bio))
- goto end_io;
-
- /*
- * If bio = NULL, bio has been throttled and will be submitted
- * later.
- */
- if (!bio)
- break;
-
- trace_block_bio_queue(q, bio);
+ if ((bio->bi_rw & REQ_DISCARD) &&
+ (!blk_queue_discard(q) ||
+ ((bio->bi_rw & REQ_SECURE) &&
+ !blk_queue_secdiscard(q)))) {
+ err = -EOPNOTSUPP;
+ goto end_io;
+ }
- ret = q->make_request_fn(q, bio);
- } while (ret);
+ if (blk_throtl_bio(q, &bio))
+ goto end_io;
+ /* if bio = NULL, bio has been throttled and will be submitted later. */
+ if (!bio)
+ return;
+ trace_block_bio_queue(q, bio);
+ q->make_request_fn(q, bio);
return;
end_io: