1
0
Fork 0

cfq-iosched: fold cfq_find_alloc_queue() into cfq_get_queue()

cfq_find_alloc_queue() checks whether a queue actually needs to be
allocated, which is unnecessary as its sole caller, cfq_get_queue(),
only calls it if so.  Also, the oom queue fallback logic is scattered
between cfq_get_queue() and cfq_find_alloc_queue().  There really
isn't much going on in the latter and things can be made simpler by
folding it into cfq_get_queue().

This patch collapses cfq_find_alloc_queue() into cfq_get_queue().  The
change is fairly straight-forward with one exception - async_cfqq is
now initialized to NULL and the "!is_sync" test in the last if
conditional is replaced with "async_cfqq" test.  This is because gcc
(5.1.1) gets confused for some reason and warns that async_cfqq may be
used uninitialized otherwise.  Oh well, the code isn't necessarily
worse this way.

This patch doesn't cause any functional difference.

v2: Updated to reflect GFP_ATOMIC -> GPF_NOWAIT.

Signed-off-by: Tejun Heo <tj@kernel.org>
Reviewed-by: Jeff Moyer <jmoyer@redhat.com>
Cc: Vivek Goyal <vgoyal@redhat.com>
Cc: Arianna Avanzini <avanzini.arianna@gmail.com>
Signed-off-by: Jens Axboe <axboe@fb.com>
hifive-unleashed-5.1
Tejun Heo 2015-08-18 14:55:04 -07:00 committed by Jens Axboe
parent 322731ed0d
commit d4aad7ff04
1 changed files with 15 additions and 34 deletions

View File

@ -3640,33 +3640,6 @@ static void check_blkcg_changed(struct cfq_io_cq *cic, struct bio *bio)
static inline void check_blkcg_changed(struct cfq_io_cq *cic, struct bio *bio) { }
#endif /* CONFIG_CFQ_GROUP_IOSCHED */
static struct cfq_queue *
cfq_find_alloc_queue(struct cfq_data *cfqd, struct cfq_group *cfqg, bool is_sync,
struct cfq_io_cq *cic, struct bio *bio)
{
struct cfq_queue *cfqq;
cfqq = cic_to_cfqq(cic, is_sync);
/*
* Always try a new alloc if we fell back to the OOM cfqq
* originally, since it should just be a temporary situation.
*/
if (!cfqq || cfqq == &cfqd->oom_cfqq) {
cfqq = kmem_cache_alloc_node(cfq_pool,
GFP_NOWAIT | __GFP_ZERO,
cfqd->queue->node);
if (cfqq) {
cfq_init_cfqq(cfqd, cfqq, current->pid, is_sync);
cfq_init_prio_data(cfqq, cic);
cfq_link_cfqq_cfqg(cfqq, cfqg);
cfq_log_cfqq(cfqd, cfqq, "alloced");
} else
cfqq = &cfqd->oom_cfqq;
}
return cfqq;
}
static struct cfq_queue **
cfq_async_queue_prio(struct cfq_data *cfqd, int ioprio_class, int ioprio)
{
@ -3691,7 +3664,7 @@ cfq_get_queue(struct cfq_data *cfqd, bool is_sync, struct cfq_io_cq *cic,
{
int ioprio_class = IOPRIO_PRIO_CLASS(cic->ioprio);
int ioprio = IOPRIO_PRIO_DATA(cic->ioprio);
struct cfq_queue **async_cfqq;
struct cfq_queue **async_cfqq = NULL;
struct cfq_queue *cfqq;
struct cfq_group *cfqg;
@ -3714,12 +3687,20 @@ cfq_get_queue(struct cfq_data *cfqd, bool is_sync, struct cfq_io_cq *cic,
goto out;
}
cfqq = cfq_find_alloc_queue(cfqd, cfqg, is_sync, cic, bio);
cfqq = kmem_cache_alloc_node(cfq_pool, GFP_NOWAIT | __GFP_ZERO,
cfqd->queue->node);
if (!cfqq) {
cfqq = &cfqd->oom_cfqq;
goto out;
}
/*
* pin the queue now that it's allocated, scheduler exit will prune it
*/
if (!is_sync && cfqq != &cfqd->oom_cfqq) {
cfq_init_cfqq(cfqd, cfqq, current->pid, is_sync);
cfq_init_prio_data(cfqq, cic);
cfq_link_cfqq_cfqg(cfqq, cfqg);
cfq_log_cfqq(cfqd, cfqq, "alloced");
if (async_cfqq) {
/* a new async queue is created, pin and remember */
cfqq->ref++;
*async_cfqq = cfqq;
}
@ -4469,7 +4450,7 @@ static int cfq_init_queue(struct request_queue *q, struct elevator_type *e)
cfqd->prio_trees[i] = RB_ROOT;
/*
* Our fallback cfqq if cfq_find_alloc_queue() runs into OOM issues.
* Our fallback cfqq if cfq_get_queue() runs into OOM issues.
* Grab a permanent reference to it, so that the normal code flow
* will not attempt to free it. oom_cfqq is linked to root_group
* but shouldn't hold a reference as it'll never be unlinked. Lose