1
0
Fork 0

blkcg: restructure blkg_policy_data allocation in blkcg_activate_policy()

When a policy gets activated, it needs to allocate and install its
policy data on all existing blkg's (blkcg_gq's).  Because blkg
iteration is protected by a spinlock, it currently counts the total
number of blkg's in the system, allocates the matching number of
policy data on a list and installs them during a single iteration.

This can be simplified by using speculative GFP_NOWAIT allocations
while iterating and falling back to a preallocated policy data on
failure.  If the preallocated one has already been consumed, it
releases the lock, preallocate with GFP_KERNEL and then restarts the
iteration.  This can be a bit more expensive than before but policy
activation is a very cold path and shouldn't matter.

Signed-off-by: Tejun Heo <tj@kernel.org>
Signed-off-by: Jens Axboe <axboe@fb.com>
hifive-unleashed-5.1
Tejun Heo 2015-08-18 14:55:09 -07:00 committed by Jens Axboe
parent bc915e61cd
commit 4c55f4f9ad
2 changed files with 21 additions and 37 deletions

View File

@ -1047,65 +1047,52 @@ EXPORT_SYMBOL_GPL(blkio_cgrp_subsys);
int blkcg_activate_policy(struct request_queue *q, int blkcg_activate_policy(struct request_queue *q,
const struct blkcg_policy *pol) const struct blkcg_policy *pol)
{ {
LIST_HEAD(pds); struct blkg_policy_data *pd_prealloc = NULL;
struct blkcg_gq *blkg; struct blkcg_gq *blkg;
struct blkg_policy_data *pd, *nd; int ret;
int cnt = 0, ret;
if (blkcg_policy_enabled(q, pol)) if (blkcg_policy_enabled(q, pol))
return 0; return 0;
/* count and allocate policy_data for all existing blkgs */
blk_queue_bypass_start(q); blk_queue_bypass_start(q);
spin_lock_irq(q->queue_lock); pd_prealloc:
list_for_each_entry(blkg, &q->blkg_list, q_node) if (!pd_prealloc) {
cnt++; pd_prealloc = kzalloc_node(pol->pd_size, GFP_KERNEL, q->node);
spin_unlock_irq(q->queue_lock); if (!pd_prealloc) {
/* allocate per-blkg policy data for all existing blkgs */
while (cnt--) {
pd = kzalloc_node(pol->pd_size, GFP_KERNEL, q->node);
if (!pd) {
ret = -ENOMEM; ret = -ENOMEM;
goto out_free; goto out_bypass_end;
} }
list_add_tail(&pd->alloc_node, &pds);
} }
/*
* Install the allocated pds and cpds. With @q bypassing, no new blkg
* should have been created while the queue lock was dropped.
*/
spin_lock_irq(q->queue_lock); spin_lock_irq(q->queue_lock);
list_for_each_entry(blkg, &q->blkg_list, q_node) { list_for_each_entry(blkg, &q->blkg_list, q_node) {
if (WARN_ON(list_empty(&pds))) { struct blkg_policy_data *pd;
/* umm... this shouldn't happen, just abort */
ret = -ENOMEM;
goto out_unlock;
}
pd = list_first_entry(&pds, struct blkg_policy_data, alloc_node);
list_del_init(&pd->alloc_node);
/* grab blkcg lock too while installing @pd on @blkg */ if (blkg->pd[pol->plid])
spin_lock(&blkg->blkcg->lock); continue;
pd = kzalloc_node(pol->pd_size, GFP_NOWAIT, q->node);
if (!pd)
swap(pd, pd_prealloc);
if (!pd) {
spin_unlock_irq(q->queue_lock);
goto pd_prealloc;
}
blkg->pd[pol->plid] = pd; blkg->pd[pol->plid] = pd;
pd->blkg = blkg; pd->blkg = blkg;
pd->plid = pol->plid; pd->plid = pol->plid;
pol->pd_init_fn(blkg); pol->pd_init_fn(blkg);
spin_unlock(&blkg->blkcg->lock);
} }
__set_bit(pol->plid, q->blkcg_pols); __set_bit(pol->plid, q->blkcg_pols);
ret = 0; ret = 0;
out_unlock:
spin_unlock_irq(q->queue_lock); spin_unlock_irq(q->queue_lock);
out_free: out_bypass_end:
blk_queue_bypass_end(q); blk_queue_bypass_end(q);
list_for_each_entry_safe(pd, nd, &pds, alloc_node) kfree(pd_prealloc);
kfree(pd);
return ret; return ret;
} }
EXPORT_SYMBOL_GPL(blkcg_activate_policy); EXPORT_SYMBOL_GPL(blkcg_activate_policy);

View File

@ -80,9 +80,6 @@ struct blkg_policy_data {
/* the blkg and policy id this per-policy data belongs to */ /* the blkg and policy id this per-policy data belongs to */
struct blkcg_gq *blkg; struct blkcg_gq *blkg;
int plid; int plid;
/* used during policy activation */
struct list_head alloc_node;
}; };
/* /*