1
0
Fork 0

block/cfq: cache rightmost rb_node

For the same reasons we already cache the leftmost pointer, apply the same
optimization for rb_last() calls.  Users must explicitly do this as
rb_root_cached only deals with the smallest node.

[dave@stgolabs.net: brain fart #1]
  Link: http://lkml.kernel.org/r/20170731155955.GD21328@linux-80c1.suse
Link: http://lkml.kernel.org/r/20170719014603.19029-18-dave@stgolabs.net
Signed-off-by: Davidlohr Bueso <dbueso@suse.de>
Cc: Jens Axboe <axboe@fb.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
zero-colors
Davidlohr Bueso 2017-09-08 16:15:25 -07:00 committed by Linus Torvalds
parent fa90b2fd30
commit f0f1a45f95
1 changed files with 14 additions and 5 deletions

View File

@ -94,11 +94,13 @@ struct cfq_ttime {
*/
struct cfq_rb_root {
struct rb_root_cached rb;
struct rb_node *rb_rightmost;
unsigned count;
u64 min_vdisktime;
struct cfq_ttime ttime;
};
#define CFQ_RB_ROOT (struct cfq_rb_root) { .rb = RB_ROOT_CACHED, \
.rb_rightmost = NULL, \
.ttime = {.last_end_request = ktime_get_ns(),},}
/*
@ -1180,6 +1182,9 @@ static struct cfq_group *cfq_rb_first_group(struct cfq_rb_root *root)
static void cfq_rb_erase(struct rb_node *n, struct cfq_rb_root *root)
{
if (root->rb_rightmost == n)
root->rb_rightmost = rb_prev(n);
rb_erase_cached(n, &root->rb);
RB_CLEAR_NODE(n);
@ -1236,20 +1241,24 @@ __cfq_group_service_tree_add(struct cfq_rb_root *st, struct cfq_group *cfqg)
struct rb_node *parent = NULL;
struct cfq_group *__cfqg;
s64 key = cfqg_key(st, cfqg);
bool leftmost = true;
bool leftmost = true, rightmost = true;
while (*node != NULL) {
parent = *node;
__cfqg = rb_entry_cfqg(parent);
if (key < cfqg_key(st, __cfqg))
if (key < cfqg_key(st, __cfqg)) {
node = &parent->rb_left;
else {
rightmost = false;
} else {
node = &parent->rb_right;
leftmost = false;
}
}
if (rightmost)
st->rb_rightmost = &cfqg->rb_node;
rb_link_node(&cfqg->rb_node, parent, node);
rb_insert_color_cached(&cfqg->rb_node, &st->rb, leftmost);
}
@ -1352,7 +1361,7 @@ cfq_group_notify_queue_add(struct cfq_data *cfqd, struct cfq_group *cfqg)
* so that groups get lesser vtime based on their weights, so that
* if group does not loose all if it was not continuously backlogged.
*/
n = rb_last(&st->rb.rb_root);
n = st->rb_rightmost;
if (n) {
__cfqg = rb_entry_cfqg(n);
cfqg->vdisktime = __cfqg->vdisktime +
@ -2201,7 +2210,7 @@ static void cfq_service_tree_add(struct cfq_data *cfqd, struct cfq_queue *cfqq,
st = st_for(cfqq->cfqg, cfqq_class(cfqq), cfqq_type(cfqq));
if (cfq_class_idle(cfqq)) {
rb_key = CFQ_IDLE_DELAY;
parent = rb_last(&st->rb.rb_root);
parent = st->rb_rightmost;
if (parent && parent != &cfqq->rb_node) {
__cfqq = rb_entry(parent, struct cfq_queue, rb_node);
rb_key += __cfqq->rb_key;