summaryrefslogtreecommitdiff
path: root/block
diff options
context:
space:
mode:
authorAkinobu Mita <akinobu.mita@gmail.com>2011-07-26 16:09:03 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2011-07-26 16:49:46 -0700
commitb2c9cd3793e5878e301ec2219785a7b8ca402ef1 (patch)
treee0a8a447ea8c6d59c20a1e190e2c397bfcd46a49 /block
parentb2588c4b4c3c075e9b45d61065d86c60de2b6441 (diff)
fail_make_request: cleanup should_fail_request
This changes should_fail_request() to more usable wrapper function of should_fail(). It can avoid putting #ifdef CONFIG_FAIL_MAKE_REQUEST in the middle of a function. Signed-off-by: Akinobu Mita <akinobu.mita@gmail.com> Cc: Jens Axboe <axboe@kernel.dk> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'block')
-rw-r--r--block/blk-core.c26
1 files changed, 12 insertions, 14 deletions
diff --git a/block/blk-core.c b/block/blk-core.c
index f9255815a5bb..b850bedad229 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -1361,14 +1361,9 @@ static int __init setup_fail_make_request(char *str)
}
__setup("fail_make_request=", setup_fail_make_request);
-static int should_fail_request(struct bio *bio)
+static bool should_fail_request(struct hd_struct *part, unsigned int bytes)
{
- struct hd_struct *part = bio->bi_bdev->bd_part;
-
- if (part_to_disk(part)->part0.make_it_fail || part->make_it_fail)
- return should_fail(&fail_make_request, bio->bi_size);
-
- return 0;
+ return part->make_it_fail && should_fail(&fail_make_request, bytes);
}
static int __init fail_make_request_debugfs(void)
@@ -1381,9 +1376,10 @@ late_initcall(fail_make_request_debugfs);
#else /* CONFIG_FAIL_MAKE_REQUEST */
-static inline int should_fail_request(struct bio *bio)
+static inline bool should_fail_request(struct hd_struct *part,
+ unsigned int bytes)
{
- return 0;
+ return false;
}
#endif /* CONFIG_FAIL_MAKE_REQUEST */
@@ -1466,6 +1462,7 @@ static inline void __generic_make_request(struct bio *bio)
old_dev = 0;
do {
char b[BDEVNAME_SIZE];
+ struct hd_struct *part;
q = bdev_get_queue(bio->bi_bdev);
if (unlikely(!q)) {
@@ -1489,7 +1486,10 @@ static inline void __generic_make_request(struct bio *bio)
if (unlikely(test_bit(QUEUE_FLAG_DEAD, &q->queue_flags)))
goto end_io;
- if (should_fail_request(bio))
+ part = bio->bi_bdev->bd_part;
+ if (should_fail_request(part, bio->bi_size) ||
+ should_fail_request(&part_to_disk(part)->part0,
+ bio->bi_size))
goto end_io;
/*
@@ -1704,11 +1704,9 @@ int blk_insert_cloned_request(struct request_queue *q, struct request *rq)
if (blk_rq_check_limits(q, rq))
return -EIO;
-#ifdef CONFIG_FAIL_MAKE_REQUEST
- if (rq->rq_disk && rq->rq_disk->part0.make_it_fail &&
- should_fail(&fail_make_request, blk_rq_bytes(rq)))
+ if (rq->rq_disk &&
+ should_fail_request(&rq->rq_disk->part0, blk_rq_bytes(rq)))
return -EIO;
-#endif
spin_lock_irqsave(q->queue_lock, flags);