summaryrefslogtreecommitdiff
path: root/block
diff options
context:
space:
mode:
authorKeith Busch <keith.busch@intel.com>2015-03-11 23:56:39 -0400
committerJens Axboe <axboe@fb.com>2015-03-13 08:30:55 -0600
commitbfd343aa1718457d34b99ce6573085ac340da288 (patch)
tree43e7156fc29628569a5a065e38e54719fa754325 /block
parentb94ec296403e99d5ac9a8c48332cec4118d44b94 (diff)
blk-mq: don't wait in blk_mq_queue_enter() if __GFP_WAIT isn't set
Return -EBUSY if we're unable to enter a queue immediately when allocating a blk-mq request without __GFP_WAIT. Signed-off-by: Keith Busch <keith.busch@intel.com> Signed-off-by: Mike Snitzer <snitzer@redhat.com> Signed-off-by: Jens Axboe <axboe@fb.com>
Diffstat (limited to 'block')
-rw-r--r--block/blk-mq.c9
1 files changed, 6 insertions, 3 deletions
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 06614ce0f475..59fa23935a0f 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -77,7 +77,7 @@ static void blk_mq_hctx_clear_pending(struct blk_mq_hw_ctx *hctx,
clear_bit(CTX_TO_BIT(hctx, ctx), &bm->word);
}
-static int blk_mq_queue_enter(struct request_queue *q)
+static int blk_mq_queue_enter(struct request_queue *q, gfp_t gfp)
{
while (true) {
int ret;
@@ -85,6 +85,9 @@ static int blk_mq_queue_enter(struct request_queue *q)
if (percpu_ref_tryget_live(&q->mq_usage_counter))
return 0;
+ if (!(gfp & __GFP_WAIT))
+ return -EBUSY;
+
ret = wait_event_interruptible(q->mq_freeze_wq,
!q->mq_freeze_depth || blk_queue_dying(q));
if (blk_queue_dying(q))
@@ -256,7 +259,7 @@ struct request *blk_mq_alloc_request(struct request_queue *q, int rw, gfp_t gfp,
struct blk_mq_alloc_data alloc_data;
int ret;
- ret = blk_mq_queue_enter(q);
+ ret = blk_mq_queue_enter(q, gfp);
if (ret)
return ERR_PTR(ret);
@@ -1186,7 +1189,7 @@ static struct request *blk_mq_map_request(struct request_queue *q,
int rw = bio_data_dir(bio);
struct blk_mq_alloc_data alloc_data;
- if (unlikely(blk_mq_queue_enter(q))) {
+ if (unlikely(blk_mq_queue_enter(q, GFP_KERNEL))) {
bio_endio(bio, -EIO);
return NULL;
}