summaryrefslogtreecommitdiff
path: root/block/elevator.c
diff options
context:
space:
mode:
authorJens Axboe <axboe@suse.de>2006-07-13 11:55:04 +0200
committerJens Axboe <axboe@nelson.home.kernel.dk>2006-09-30 20:26:57 +0200
commit2e662b65f05d550b6799ed6bfa9963b82279e6b7 (patch)
tree82911ec73a52d149d74a3d13c3c5eedb269a19cb /block/elevator.c
parent10fd48f2376db52f08bf0420d2c4f580e39269e1 (diff)
[PATCH] elevator: abstract out the rbtree sort handling
The rbtree sort/lookup/reposition logic is mostly duplicated in cfq/deadline/as, so move it to the elevator core. The io schedulers still provide the actual rb root, as we don't want to impose any sort of specific handling on the schedulers. Introduce the helpers and rb_node in struct request to help migrate the IO schedulers. Signed-off-by: Jens Axboe <axboe@suse.de>
Diffstat (limited to 'block/elevator.c')
-rw-r--r--block/elevator.c123
1 files changed, 109 insertions, 14 deletions
diff --git a/block/elevator.c b/block/elevator.c
index cff1102dac9d..cbbc36ba016a 100644
--- a/block/elevator.c
+++ b/block/elevator.c
@@ -239,6 +239,8 @@ int elevator_init(request_queue_t *q, char *name)
return ret;
}
+EXPORT_SYMBOL(elevator_init);
+
void elevator_exit(elevator_t *e)
{
mutex_lock(&e->sysfs_lock);
@@ -250,6 +252,8 @@ void elevator_exit(elevator_t *e)
kobject_put(&e->kobj);
}
+EXPORT_SYMBOL(elevator_exit);
+
static inline void __elv_rqhash_del(struct request *rq)
{
hlist_del_init(&rq->hash);
@@ -298,9 +302,68 @@ static struct request *elv_rqhash_find(request_queue_t *q, sector_t offset)
}
/*
+ * RB-tree support functions for inserting/lookup/removal of requests
+ * in a sorted RB tree.
+ */
+struct request *elv_rb_add(struct rb_root *root, struct request *rq)
+{
+ struct rb_node **p = &root->rb_node;
+ struct rb_node *parent = NULL;
+ struct request *__rq;
+
+ while (*p) {
+ parent = *p;
+ __rq = rb_entry(parent, struct request, rb_node);
+
+ if (rq->sector < __rq->sector)
+ p = &(*p)->rb_left;
+ else if (rq->sector > __rq->sector)
+ p = &(*p)->rb_right;
+ else
+ return __rq;
+ }
+
+ rb_link_node(&rq->rb_node, parent, p);
+ rb_insert_color(&rq->rb_node, root);
+ return NULL;
+}
+
+EXPORT_SYMBOL(elv_rb_add);
+
+void elv_rb_del(struct rb_root *root, struct request *rq)
+{
+ BUG_ON(RB_EMPTY_NODE(&rq->rb_node));
+ rb_erase(&rq->rb_node, root);
+ RB_CLEAR_NODE(&rq->rb_node);
+}
+
+EXPORT_SYMBOL(elv_rb_del);
+
+struct request *elv_rb_find(struct rb_root *root, sector_t sector)
+{
+ struct rb_node *n = root->rb_node;
+ struct request *rq;
+
+ while (n) {
+ rq = rb_entry(n, struct request, rb_node);
+
+ if (sector < rq->sector)
+ n = n->rb_left;
+ else if (sector > rq->sector)
+ n = n->rb_right;
+ else
+ return rq;
+ }
+
+ return NULL;
+}
+
+EXPORT_SYMBOL(elv_rb_find);
+
+/*
* Insert rq into dispatch queue of q. Queue lock must be held on
- * entry. If sort != 0, rq is sort-inserted; otherwise, rq will be
- * appended to the dispatch queue. To be used by specific elevators.
+ * entry. rq is sort insted into the dispatch queue. To be used by
+ * specific elevators.
*/
void elv_dispatch_sort(request_queue_t *q, struct request *rq)
{
@@ -335,8 +398,12 @@ void elv_dispatch_sort(request_queue_t *q, struct request *rq)
list_add(&rq->queuelist, entry);
}
+EXPORT_SYMBOL(elv_dispatch_sort);
+
/*
- * This should be in elevator.h, but that requires pulling in rq and q
+ * Insert rq into dispatch queue of q. Queue lock must be held on
+ * entry. rq is added to the back of the dispatch queue. To be used by
+ * specific elevators.
*/
void elv_dispatch_add_tail(struct request_queue *q, struct request *rq)
{
@@ -352,6 +419,8 @@ void elv_dispatch_add_tail(struct request_queue *q, struct request *rq)
list_add_tail(&rq->queuelist, &q->queue_head);
}
+EXPORT_SYMBOL(elv_dispatch_add_tail);
+
int elv_merge(request_queue_t *q, struct request **req, struct bio *bio)
{
elevator_t *e = q->elevator;
@@ -384,14 +453,15 @@ int elv_merge(request_queue_t *q, struct request **req, struct bio *bio)
return ELEVATOR_NO_MERGE;
}
-void elv_merged_request(request_queue_t *q, struct request *rq)
+void elv_merged_request(request_queue_t *q, struct request *rq, int type)
{
elevator_t *e = q->elevator;
if (e->ops->elevator_merged_fn)
- e->ops->elevator_merged_fn(q, rq);
+ e->ops->elevator_merged_fn(q, rq, type);
- elv_rqhash_reposition(q, rq);
+ if (type == ELEVATOR_BACK_MERGE)
+ elv_rqhash_reposition(q, rq);
q->last_merge = rq;
}
@@ -577,6 +647,8 @@ void __elv_add_request(request_queue_t *q, struct request *rq, int where,
elv_insert(q, rq, where);
}
+EXPORT_SYMBOL(__elv_add_request);
+
void elv_add_request(request_queue_t *q, struct request *rq, int where,
int plug)
{
@@ -587,6 +659,8 @@ void elv_add_request(request_queue_t *q, struct request *rq, int where,
spin_unlock_irqrestore(q->queue_lock, flags);
}
+EXPORT_SYMBOL(elv_add_request);
+
static inline struct request *__elv_next_request(request_queue_t *q)
{
struct request *rq;
@@ -670,6 +744,8 @@ struct request *elv_next_request(request_queue_t *q)
return rq;
}
+EXPORT_SYMBOL(elv_next_request);
+
void elv_dequeue_request(request_queue_t *q, struct request *rq)
{
BUG_ON(list_empty(&rq->queuelist));
@@ -686,6 +762,8 @@ void elv_dequeue_request(request_queue_t *q, struct request *rq)
q->in_flight++;
}
+EXPORT_SYMBOL(elv_dequeue_request);
+
int elv_queue_empty(request_queue_t *q)
{
elevator_t *e = q->elevator;
@@ -699,6 +777,8 @@ int elv_queue_empty(request_queue_t *q)
return 1;
}
+EXPORT_SYMBOL(elv_queue_empty);
+
struct request *elv_latter_request(request_queue_t *q, struct request *rq)
{
elevator_t *e = q->elevator;
@@ -1025,11 +1105,26 @@ ssize_t elv_iosched_show(request_queue_t *q, char *name)
return len;
}
-EXPORT_SYMBOL(elv_dispatch_sort);
-EXPORT_SYMBOL(elv_add_request);
-EXPORT_SYMBOL(__elv_add_request);
-EXPORT_SYMBOL(elv_next_request);
-EXPORT_SYMBOL(elv_dequeue_request);
-EXPORT_SYMBOL(elv_queue_empty);
-EXPORT_SYMBOL(elevator_exit);
-EXPORT_SYMBOL(elevator_init);
+struct request *elv_rb_former_request(request_queue_t *q, struct request *rq)
+{
+ struct rb_node *rbprev = rb_prev(&rq->rb_node);
+
+ if (rbprev)
+ return rb_entry_rq(rbprev);
+
+ return NULL;
+}
+
+EXPORT_SYMBOL(elv_rb_former_request);
+
+struct request *elv_rb_latter_request(request_queue_t *q, struct request *rq)
+{
+ struct rb_node *rbnext = rb_next(&rq->rb_node);
+
+ if (rbnext)
+ return rb_entry_rq(rbnext);
+
+ return NULL;
+}
+
+EXPORT_SYMBOL(elv_rb_latter_request);