summaryrefslogtreecommitdiff
path: root/block/blk-core.c
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2008-06-16 11:27:45 +0200
committerIngo Molnar <mingo@elte.hu>2008-06-16 11:27:45 +0200
commitd939d2851fd12568e2eabb2916769e8a57ba5c89 (patch)
treef3158a5ddd41541a61126f9e48de1ce89c632f64 /block/blk-core.c
parent9f5314fb4d556d3132c784d0df47352b2830ca53 (diff)
parent066519068ad2fbe98c7f45552b1f592903a9c8c8 (diff)
Merge branch 'linus' into x86/irqtip-x86-irq-2008-06-16_09.27_Mon
Diffstat (limited to 'block/blk-core.c')
-rw-r--r--block/blk-core.c42
1 files changed, 19 insertions, 23 deletions
diff --git a/block/blk-core.c b/block/blk-core.c
index 2987fe47b5ee..1905aaba49fb 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -482,6 +482,7 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
kobject_init(&q->kobj, &blk_queue_ktype);
mutex_init(&q->sysfs_lock);
+ spin_lock_init(&q->__queue_lock);
return q;
}
@@ -544,10 +545,8 @@ blk_init_queue_node(request_fn_proc *rfn, spinlock_t *lock, int node_id)
* if caller didn't supply a lock, they get per-queue locking with
* our embedded lock
*/
- if (!lock) {
- spin_lock_init(&q->__queue_lock);
+ if (!lock)
lock = &q->__queue_lock;
- }
q->request_fn = rfn;
q->prep_rq_fn = NULL;
@@ -807,35 +806,32 @@ static struct request *get_request_wait(struct request_queue *q, int rw_flags,
rq = get_request(q, rw_flags, bio, GFP_NOIO);
while (!rq) {
DEFINE_WAIT(wait);
+ struct io_context *ioc;
struct request_list *rl = &q->rq;
prepare_to_wait_exclusive(&rl->wait[rw], &wait,
TASK_UNINTERRUPTIBLE);
- rq = get_request(q, rw_flags, bio, GFP_NOIO);
-
- if (!rq) {
- struct io_context *ioc;
-
- blk_add_trace_generic(q, bio, rw, BLK_TA_SLEEPRQ);
+ blk_add_trace_generic(q, bio, rw, BLK_TA_SLEEPRQ);
- __generic_unplug_device(q);
- spin_unlock_irq(q->queue_lock);
- io_schedule();
+ __generic_unplug_device(q);
+ spin_unlock_irq(q->queue_lock);
+ io_schedule();
- /*
- * After sleeping, we become a "batching" process and
- * will be able to allocate at least one request, and
- * up to a big batch of them for a small period time.
- * See ioc_batching, ioc_set_batching
- */
- ioc = current_io_context(GFP_NOIO, q->node);
- ioc_set_batching(q, ioc);
+ /*
+ * After sleeping, we become a "batching" process and
+ * will be able to allocate at least one request, and
+ * up to a big batch of them for a small period time.
+ * See ioc_batching, ioc_set_batching
+ */
+ ioc = current_io_context(GFP_NOIO, q->node);
+ ioc_set_batching(q, ioc);
- spin_lock_irq(q->queue_lock);
- }
+ spin_lock_irq(q->queue_lock);
finish_wait(&rl->wait[rw], &wait);
- }
+
+ rq = get_request(q, rw_flags, bio, GFP_NOIO);
+ };
return rq;
}