summaryrefslogtreecommitdiff
path: root/block/cfq-iosched.c
diff options
context:
space:
mode:
authorVasily Tarasov <vtaras@openvz.org>2007-07-20 10:06:38 +0200
committerJens Axboe <jens.axboe@oracle.com>2007-07-20 10:06:38 +0200
commitc2dea2d1fdbce86942dba0a968c523d8b7858bb5 (patch)
treec876b93785ee3b25c341be0dd5080a5176f27736 /block/cfq-iosched.c
parent9a79b2274186fade17134929d4f85b70d59a3840 (diff)
cfq: async queue allocation per priority
If we have two processes with different ioprio_class, but the same ioprio_data, their async requests will fall into the same queue. I guess such behavior is not expected, because it's not right to put real-time requests and best-effort requests in the same queue. The attached patch fixes the problem by introducing additional *cfqq fields on cfqd, pointing to per-(class,priority) async queues. Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
Diffstat (limited to 'block/cfq-iosched.c')
-rw-r--r--block/cfq-iosched.c56
1 files changed, 44 insertions, 12 deletions
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index 9755a3cfad26..bc7190eed10d 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -92,7 +92,11 @@ struct cfq_data {
struct cfq_queue *active_queue;
struct cfq_io_context *active_cic;
- struct cfq_queue *async_cfqq[IOPRIO_BE_NR];
+ /*
+ * async queue for each priority case
+ */
+ struct cfq_queue *async_cfqq[2][IOPRIO_BE_NR];
+ struct cfq_queue *async_idle_cfqq;
struct timer_list idle_class_timer;
@@ -1414,24 +1418,44 @@ out:
return cfqq;
}
+static struct cfq_queue **
+cfq_async_queue_prio(struct cfq_data *cfqd, int ioprio_class, int ioprio)
+{
+ switch(ioprio_class) {
+ case IOPRIO_CLASS_RT:
+ return &cfqd->async_cfqq[0][ioprio];
+ case IOPRIO_CLASS_BE:
+ return &cfqd->async_cfqq[1][ioprio];
+ case IOPRIO_CLASS_IDLE:
+ return &cfqd->async_idle_cfqq;
+ default:
+ BUG();
+ }
+}
+
static struct cfq_queue *
cfq_get_queue(struct cfq_data *cfqd, int is_sync, struct task_struct *tsk,
gfp_t gfp_mask)
{
const int ioprio = task_ioprio(tsk);
+ const int ioprio_class = task_ioprio_class(tsk);
+ struct cfq_queue **async_cfqq = NULL;
struct cfq_queue *cfqq = NULL;
- if (!is_sync)
- cfqq = cfqd->async_cfqq[ioprio];
+ if (!is_sync) {
+ async_cfqq = cfq_async_queue_prio(cfqd, ioprio_class, ioprio);
+ cfqq = *async_cfqq;
+ }
+
if (!cfqq)
cfqq = cfq_find_alloc_queue(cfqd, is_sync, tsk, gfp_mask);
/*
* pin the queue now that it's allocated, scheduler exit will prune it
*/
- if (!is_sync && !cfqd->async_cfqq[ioprio]) {
+ if (!is_sync && !(*async_cfqq)) {
atomic_inc(&cfqq->ref);
- cfqd->async_cfqq[ioprio] = cfqq;
+ *async_cfqq = cfqq;
}
atomic_inc(&cfqq->ref);
@@ -2042,11 +2066,24 @@ static void cfq_shutdown_timer_wq(struct cfq_data *cfqd)
blk_sync_queue(cfqd->queue);
}
+static void cfq_put_async_queues(struct cfq_data *cfqd)
+{
+ int i;
+
+ for (i = 0; i < IOPRIO_BE_NR; i++) {
+ if (cfqd->async_cfqq[0][i])
+ cfq_put_queue(cfqd->async_cfqq[0][i]);
+ if (cfqd->async_cfqq[1][i])
+ cfq_put_queue(cfqd->async_cfqq[1][i]);
+ if (cfqd->async_idle_cfqq)
+ cfq_put_queue(cfqd->async_idle_cfqq);
+ }
+}
+
static void cfq_exit_queue(elevator_t *e)
{
struct cfq_data *cfqd = e->elevator_data;
request_queue_t *q = cfqd->queue;
- int i;
cfq_shutdown_timer_wq(cfqd);
@@ -2063,12 +2100,7 @@ static void cfq_exit_queue(elevator_t *e)
__cfq_exit_single_io_context(cfqd, cic);
}
- /*
- * Put the async queues
- */
- for (i = 0; i < IOPRIO_BE_NR; i++)
- if (cfqd->async_cfqq[i])
- cfq_put_queue(cfqd->async_cfqq[i]);
+ cfq_put_async_queues(cfqd);
spin_unlock_irq(q->queue_lock);