1
0
Fork 0

[PATCH] as-iosched: migrate to using the elevator rb functions

This removes the rbtree handling from AS.

Signed-off-by: Jens Axboe <axboe@suse.de>
Signed-off-by: Nick Piggin <npiggin@suse.de>
wifi-calibration
Jens Axboe 2006-07-18 21:06:01 +02:00 committed by Jens Axboe
parent 2e662b65f0
commit e37f346e34
1 changed files with 29 additions and 151 deletions

View File

@ -149,12 +149,6 @@ enum arq_state {
};
struct as_rq {
/*
* rbtree index, key is the starting offset
*/
struct rb_node rb_node;
sector_t rb_key;
struct request *request;
struct io_context *io_context; /* The submitting task */
@ -268,101 +262,22 @@ static void as_put_io_context(struct as_rq *arq)
/*
* rb tree support functions
*/
#define rb_entry_arq(node) rb_entry((node), struct as_rq, rb_node)
#define ARQ_RB_ROOT(ad, arq) (&(ad)->sort_list[(arq)->is_sync])
#define rq_rb_key(rq) (rq)->sector
/*
* as_find_first_arq finds the first (lowest sector numbered) request
* for the specified data_dir. Used to sweep back to the start of the disk
* (1-way elevator) after we process the last (highest sector) request.
*/
static struct as_rq *as_find_first_arq(struct as_data *ad, int data_dir)
static void as_add_arq_rb(struct as_data *ad, struct request *rq)
{
struct rb_node *n = ad->sort_list[data_dir].rb_node;
struct as_rq *arq = RQ_DATA(rq);
struct request *alias;
if (n == NULL)
return NULL;
for (;;) {
if (n->rb_left == NULL)
return rb_entry_arq(n);
n = n->rb_left;
}
}
/*
* Add the request to the rb tree if it is unique. If there is an alias (an
* existing request against the same sector), which can happen when using
* direct IO, then return the alias.
*/
static struct as_rq *__as_add_arq_rb(struct as_data *ad, struct as_rq *arq)
{
struct rb_node **p = &ARQ_RB_ROOT(ad, arq)->rb_node;
struct rb_node *parent = NULL;
struct as_rq *__arq;
struct request *rq = arq->request;
arq->rb_key = rq_rb_key(rq);
while (*p) {
parent = *p;
__arq = rb_entry_arq(parent);
if (arq->rb_key < __arq->rb_key)
p = &(*p)->rb_left;
else if (arq->rb_key > __arq->rb_key)
p = &(*p)->rb_right;
else
return __arq;
}
rb_link_node(&arq->rb_node, parent, p);
rb_insert_color(&arq->rb_node, ARQ_RB_ROOT(ad, arq));
return NULL;
}
static void as_add_arq_rb(struct as_data *ad, struct as_rq *arq)
{
struct as_rq *alias;
while ((unlikely(alias = __as_add_arq_rb(ad, arq)))) {
as_move_to_dispatch(ad, alias);
while ((unlikely(alias = elv_rb_add(ARQ_RB_ROOT(ad, arq), rq)))) {
as_move_to_dispatch(ad, RQ_DATA(alias));
as_antic_stop(ad);
}
}
static inline void as_del_arq_rb(struct as_data *ad, struct as_rq *arq)
static inline void as_del_arq_rb(struct as_data *ad, struct request *rq)
{
if (RB_EMPTY_NODE(&arq->rb_node)) {
WARN_ON(1);
return;
}
rb_erase(&arq->rb_node, ARQ_RB_ROOT(ad, arq));
RB_CLEAR_NODE(&arq->rb_node);
}
static struct request *
as_find_arq_rb(struct as_data *ad, sector_t sector, int data_dir)
{
struct rb_node *n = ad->sort_list[data_dir].rb_node;
struct as_rq *arq;
while (n) {
arq = rb_entry_arq(n);
if (sector < arq->rb_key)
n = n->rb_left;
else if (sector > arq->rb_key)
n = n->rb_right;
else
return arq->request;
}
return NULL;
elv_rb_del(ARQ_RB_ROOT(ad, RQ_DATA(rq)), rq);
}
/*
@ -455,32 +370,29 @@ as_choose_req(struct as_data *ad, struct as_rq *arq1, struct as_rq *arq2)
* this with as_choose_req form the basis for how the scheduler chooses
* what request to process next. Anticipation works on top of this.
*/
static struct as_rq *as_find_next_arq(struct as_data *ad, struct as_rq *last)
static struct as_rq *as_find_next_arq(struct as_data *ad, struct as_rq *arq)
{
const int data_dir = last->is_sync;
struct as_rq *ret;
struct request *last = arq->request;
struct rb_node *rbnext = rb_next(&last->rb_node);
struct rb_node *rbprev = rb_prev(&last->rb_node);
struct as_rq *arq_next, *arq_prev;
struct as_rq *next = NULL, *prev = NULL;
BUG_ON(!RB_EMPTY_NODE(&last->rb_node));
BUG_ON(RB_EMPTY_NODE(&last->rb_node));
if (rbprev)
arq_prev = rb_entry_arq(rbprev);
else
arq_prev = NULL;
prev = RQ_DATA(rb_entry_rq(rbprev));
if (rbnext)
arq_next = rb_entry_arq(rbnext);
next = RQ_DATA(rb_entry_rq(rbnext));
else {
arq_next = as_find_first_arq(ad, data_dir);
if (arq_next == last)
arq_next = NULL;
const int data_dir = arq->is_sync;
rbnext = rb_first(&ad->sort_list[data_dir]);
if (rbnext && rbnext != &last->rb_node)
next = RQ_DATA(rb_entry_rq(rbnext));
}
ret = as_choose_req(ad, arq_next, arq_prev);
return ret;
return as_choose_req(ad, next, prev);
}
/*
@ -982,7 +894,7 @@ static void as_remove_queued_request(request_queue_t *q, struct request *rq)
ad->next_arq[data_dir] = as_find_next_arq(ad, arq);
list_del_init(&arq->fifo);
as_del_arq_rb(ad, arq);
as_del_arq_rb(ad, rq);
}
/*
@ -1039,7 +951,7 @@ static void as_move_to_dispatch(struct as_data *ad, struct as_rq *arq)
struct request *rq = arq->request;
const int data_dir = arq->is_sync;
BUG_ON(RB_EMPTY_NODE(&arq->rb_node));
BUG_ON(RB_EMPTY_NODE(&rq->rb_node));
as_antic_stop(ad);
ad->antic_status = ANTIC_OFF;
@ -1269,7 +1181,7 @@ static void as_add_request(request_queue_t *q, struct request *rq)
atomic_inc(&arq->io_context->aic->nr_queued);
}
as_add_arq_rb(ad, arq);
as_add_arq_rb(ad, rq);
/*
* set expire time (only used for reads) and add to fifo list
@ -1315,32 +1227,6 @@ static int as_queue_empty(request_queue_t *q)
&& list_empty(&ad->fifo_list[REQ_SYNC]);
}
static struct request *as_former_request(request_queue_t *q,
struct request *rq)
{
struct as_rq *arq = RQ_DATA(rq);
struct rb_node *rbprev = rb_prev(&arq->rb_node);
struct request *ret = NULL;
if (rbprev)
ret = rb_entry_arq(rbprev)->request;
return ret;
}
static struct request *as_latter_request(request_queue_t *q,
struct request *rq)
{
struct as_rq *arq = RQ_DATA(rq);
struct rb_node *rbnext = rb_next(&arq->rb_node);
struct request *ret = NULL;
if (rbnext)
ret = rb_entry_arq(rbnext)->request;
return ret;
}
static int
as_merge(request_queue_t *q, struct request **req, struct bio *bio)
{
@ -1351,7 +1237,7 @@ as_merge(request_queue_t *q, struct request **req, struct bio *bio)
/*
* check for front merge
*/
__rq = as_find_arq_rb(ad, rb_key, bio_data_dir(bio));
__rq = elv_rb_find(&ad->sort_list[bio_data_dir(bio)], rb_key);
if (__rq && elv_rq_merge_ok(__rq, bio)) {
*req = __rq;
return ELEVATOR_FRONT_MERGE;
@ -1360,17 +1246,16 @@ as_merge(request_queue_t *q, struct request **req, struct bio *bio)
return ELEVATOR_NO_MERGE;
}
static void as_merged_request(request_queue_t *q, struct request *req)
static void as_merged_request(request_queue_t *q, struct request *req, int type)
{
struct as_data *ad = q->elevator->elevator_data;
struct as_rq *arq = RQ_DATA(req);
/*
* if the merge was a front merge, we need to reposition request
*/
if (rq_rb_key(req) != arq->rb_key) {
as_del_arq_rb(ad, arq);
as_add_arq_rb(ad, arq);
if (type == ELEVATOR_FRONT_MERGE) {
as_del_arq_rb(ad, req);
as_add_arq_rb(ad, req);
/*
* Note! At this stage of this and the next function, our next
* request may not be optimal - eg the request may have "grown"
@ -1382,18 +1267,12 @@ static void as_merged_request(request_queue_t *q, struct request *req)
static void as_merged_requests(request_queue_t *q, struct request *req,
struct request *next)
{
struct as_data *ad = q->elevator->elevator_data;
struct as_rq *arq = RQ_DATA(req);
struct as_rq *anext = RQ_DATA(next);
BUG_ON(!arq);
BUG_ON(!anext);
if (rq_rb_key(req) != arq->rb_key) {
as_del_arq_rb(ad, arq);
as_add_arq_rb(ad, arq);
}
/*
* if anext expires before arq, assign its expire time to arq
* and move into anext position (anext will be deleted) in fifo
@ -1468,7 +1347,6 @@ static int as_set_request(request_queue_t *q, struct request *rq,
if (arq) {
memset(arq, 0, sizeof(*arq));
RB_CLEAR_NODE(&arq->rb_node);
arq->request = rq;
arq->state = AS_RQ_PRESCHED;
arq->io_context = NULL;
@ -1654,8 +1532,8 @@ static struct elevator_type iosched_as = {
.elevator_deactivate_req_fn = as_deactivate_request,
.elevator_queue_empty_fn = as_queue_empty,
.elevator_completed_req_fn = as_completed_request,
.elevator_former_req_fn = as_former_request,
.elevator_latter_req_fn = as_latter_request,
.elevator_former_req_fn = elv_rb_former_request,
.elevator_latter_req_fn = elv_rb_latter_request,
.elevator_set_req_fn = as_set_request,
.elevator_put_req_fn = as_put_request,
.elevator_may_queue_fn = as_may_queue,