summaryrefslogtreecommitdiff
path: root/drivers/md/bcache/request.c
diff options
context:
space:
mode:
authorKent Overstreet <kmo@daterainc.com>2013-09-10 14:27:42 -0700
committerKent Overstreet <kmo@daterainc.com>2013-11-10 21:56:43 -0800
commit5ceaaad7047745c1c02150c39d3fb623b7948d48 (patch)
treec03201ceda61314a0ec4b521e5d2d4d89bb1f0c2 /drivers/md/bcache/request.c
parent098fb25498214069e6bbf908515f2952dd7654d0 (diff)
bcache: Bypass torture test
More testing ftw! Also, now verify mode doesn't break if you read dirty data. Signed-off-by: Kent Overstreet <kmo@daterainc.com>
Diffstat (limited to 'drivers/md/bcache/request.c')
-rw-r--r--drivers/md/bcache/request.c14
1 files changed, 13 insertions, 1 deletions
diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c
index 9f5a1386f77a..fbcc851ed5a5 100644
--- a/drivers/md/bcache/request.c
+++ b/drivers/md/bcache/request.c
@@ -528,6 +528,13 @@ static bool check_should_bypass(struct cached_dev *dc, struct bio *bio)
goto skip;
}
+ if (bypass_torture_test(dc)) {
+ if ((get_random_int() & 3) == 3)
+ goto skip;
+ else
+ goto rescale;
+ }
+
if (!congested && !dc->sequential_cutoff)
goto rescale;
@@ -601,6 +608,7 @@ struct search {
unsigned recoverable:1;
unsigned unaligned_bvec:1;
unsigned write:1;
+ unsigned read_dirty_data:1;
unsigned long start_time;
@@ -669,6 +677,9 @@ static int cache_lookup_fn(struct btree_op *op, struct btree *b, struct bkey *k)
PTR_BUCKET(b->c, k, ptr)->prio = INITIAL_PRIO;
+ if (KEY_DIRTY(k))
+ s->read_dirty_data = true;
+
n = bch_bio_split(bio, min_t(uint64_t, INT_MAX,
KEY_OFFSET(k) - bio->bi_sector),
GFP_NOIO, s->d->bio_split);
@@ -894,7 +905,8 @@ static void cached_dev_read_done(struct closure *cl)
s->cache_miss = NULL;
}
- if (verify(dc, &s->bio.bio) && s->recoverable && !s->unaligned_bvec)
+ if (verify(dc, &s->bio.bio) && s->recoverable &&
+ !s->unaligned_bvec && !s->read_dirty_data)
bch_data_verify(dc, s->orig_bio);
bio_complete(s);