1
0
Fork 0

Revert "blkcg: allocate struct blkcg_gq outside request queue spinlock"

I inadvertently applied the v5 version of this patch, whereas
the agreed upon version was v5. Revert this one so we can apply
the right one.

This reverts commit 7fc6b87a9f.
hifive-unleashed-5.1
Jens Axboe 2017-03-29 11:25:48 -06:00
parent 48b99c9d65
commit d708f0d502
2 changed files with 54 additions and 92 deletions

View File

@ -165,18 +165,16 @@ struct blkcg_gq *blkg_lookup_slowpath(struct blkcg *blkcg,
EXPORT_SYMBOL_GPL(blkg_lookup_slowpath);
/*
* If gfp mask allows blocking, this function temporarily drops rcu and queue
* locks to allocate memory.
* If @new_blkg is %NULL, this function tries to allocate a new one as
* necessary using %GFP_NOWAIT. @new_blkg is always consumed on return.
*/
static struct blkcg_gq *blkg_create(struct blkcg *blkcg,
struct request_queue *q, gfp_t gfp,
const struct blkcg_policy *pol)
struct request_queue *q,
struct blkcg_gq *new_blkg)
{
struct blkcg_gq *blkg = NULL;
struct blkcg_gq *blkg;
struct bdi_writeback_congested *wb_congested;
int i, ret;
const bool drop_locks = gfpflags_allow_blocking(gfp);
bool preloaded = false;
WARN_ON_ONCE(!rcu_read_lock_held());
lockdep_assert_held(q->queue_lock);
@ -187,53 +185,31 @@ static struct blkcg_gq *blkg_create(struct blkcg *blkcg,
goto err_free_blkg;
}
if (drop_locks) {
spin_unlock_irq(q->queue_lock);
rcu_read_unlock();
}
wb_congested = wb_congested_get_create(q->backing_dev_info,
blkcg->css.id, gfp);
blkg = blkg_alloc(blkcg, q, gfp);
if (drop_locks) {
preloaded = !radix_tree_preload(gfp);
rcu_read_lock();
spin_lock_irq(q->queue_lock);
}
if (unlikely(!wb_congested || !blkg)) {
blkcg->css.id,
GFP_NOWAIT | __GFP_NOWARN);
if (!wb_congested) {
ret = -ENOMEM;
goto err_put;
goto err_put_css;
}
/* allocate */
if (!new_blkg) {
new_blkg = blkg_alloc(blkcg, q, GFP_NOWAIT | __GFP_NOWARN);
if (unlikely(!new_blkg)) {
ret = -ENOMEM;
goto err_put_congested;
}
}
blkg = new_blkg;
blkg->wb_congested = wb_congested;
if (pol) {
WARN_ON(!drop_locks);
if (!blkcg_policy_enabled(q, pol)) {
ret = -EOPNOTSUPP;
goto err_put;
}
/*
* This could be the first entry point of blkcg implementation
* and we shouldn't allow anything to go through for a bypassing
* queue.
*/
if (unlikely(blk_queue_bypass(q))) {
ret = blk_queue_dying(q) ? -ENODEV : -EBUSY;
goto err_put;
}
}
/* link parent */
if (blkcg_parent(blkcg)) {
blkg->parent = __blkg_lookup(blkcg_parent(blkcg), q, false);
if (WARN_ON_ONCE(!blkg->parent)) {
ret = -ENODEV;
goto err_put;
goto err_put_congested;
}
blkg_get(blkg->parent);
}
@ -260,9 +236,6 @@ static struct blkcg_gq *blkg_create(struct blkcg *blkcg,
pol->pd_online_fn(blkg->pd[i]);
}
}
if (preloaded)
radix_tree_preload_end();
blkg->online = true;
spin_unlock(&blkcg->lock);
@ -273,45 +246,44 @@ static struct blkcg_gq *blkg_create(struct blkcg *blkcg,
blkg_put(blkg);
return ERR_PTR(ret);
err_put:
if (preloaded)
radix_tree_preload_end();
if (wb_congested)
wb_congested_put(wb_congested);
err_put_congested:
wb_congested_put(wb_congested);
err_put_css:
css_put(&blkcg->css);
err_free_blkg:
blkg_free(blkg);
blkg_free(new_blkg);
return ERR_PTR(ret);
}
/**
* __blkg_lookup_create - lookup blkg, try to create one if not there
* blkg_lookup_create - lookup blkg, try to create one if not there
* @blkcg: blkcg of interest
* @q: request_queue of interest
* @gfp: gfp mask
* @pol: blkcg policy (optional)
*
* Lookup blkg for the @blkcg - @q pair. If it doesn't exist, try to
* create one. blkg creation is performed recursively from blkcg_root such
* that all non-root blkg's have access to the parent blkg. This function
* should be called under RCU read lock and @q->queue_lock.
*
* When gfp mask allows blocking, rcu and queue locks may be dropped for
* allocating memory. In this case, the locks will be reacquired on return.
*
* Returns pointer to the looked up or created blkg on success, ERR_PTR()
* value on error. If @q is dead, returns ERR_PTR(-EINVAL). If @q is not
* dead and bypassing, returns ERR_PTR(-EBUSY).
*/
struct blkcg_gq *__blkg_lookup_create(struct blkcg *blkcg,
struct request_queue *q, gfp_t gfp,
const struct blkcg_policy *pol)
struct blkcg_gq *blkg_lookup_create(struct blkcg *blkcg,
struct request_queue *q)
{
struct blkcg_gq *blkg;
WARN_ON_ONCE(!rcu_read_lock_held());
lockdep_assert_held(q->queue_lock);
/*
* This could be the first entry point of blkcg implementation and
* we shouldn't allow anything to go through for a bypassing queue.
*/
if (unlikely(blk_queue_bypass(q)))
return ERR_PTR(blk_queue_dying(q) ? -ENODEV : -EBUSY);
blkg = __blkg_lookup(blkcg, q, true);
if (blkg)
return blkg;
@ -329,35 +301,12 @@ struct blkcg_gq *__blkg_lookup_create(struct blkcg *blkcg,
parent = blkcg_parent(parent);
}
blkg = blkg_create(pos, q, gfp, pol);
blkg = blkg_create(pos, q, NULL);
if (pos == blkcg || IS_ERR(blkg))
return blkg;
}
}
/**
* blkg_lookup_create - lookup blkg, try to create one if not there
*
* Performs an initial queue bypass check and then passes control to
* __blkg_lookup_create().
*/
struct blkcg_gq *blkg_lookup_create(struct blkcg *blkcg,
struct request_queue *q, gfp_t gfp,
const struct blkcg_policy *pol)
{
WARN_ON_ONCE(!rcu_read_lock_held());
lockdep_assert_held(q->queue_lock);
/*
* This could be the first entry point of blkcg implementation and
* we shouldn't allow anything to go through for a bypassing queue.
*/
if (unlikely(blk_queue_bypass(q)))
return ERR_PTR(blk_queue_dying(q) ? -ENODEV : -EBUSY);
return __blkg_lookup_create(blkcg, q, gfp, pol);
}
static void blkg_destroy(struct blkcg_gq *blkg)
{
struct blkcg *blkcg = blkg->blkcg;
@ -868,7 +817,7 @@ int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol,
spin_lock_irq(disk->queue->queue_lock);
if (blkcg_policy_enabled(disk->queue, pol))
blkg = blkg_lookup_create(blkcg, disk->queue, GFP_KERNEL, pol);
blkg = blkg_lookup_create(blkcg, disk->queue);
else
blkg = ERR_PTR(-EOPNOTSUPP);
@ -1107,15 +1056,30 @@ free_blkcg:
*/
int blkcg_init_queue(struct request_queue *q)
{
struct blkcg_gq *blkg;
struct blkcg_gq *new_blkg, *blkg;
bool preloaded;
int ret;
new_blkg = blkg_alloc(&blkcg_root, q, GFP_KERNEL);
if (!new_blkg)
return -ENOMEM;
preloaded = !radix_tree_preload(GFP_KERNEL);
/*
* Make sure the root blkg exists and count the existing blkgs. As
* @q is bypassing at this point, blkg_lookup_create() can't be
* used. Open code insertion.
*/
rcu_read_lock();
spin_lock_irq(q->queue_lock);
blkg = __blkg_lookup_create(&blkcg_root, q, GFP_KERNEL, NULL);
blkg = blkg_create(&blkcg_root, q, new_blkg);
spin_unlock_irq(q->queue_lock);
rcu_read_unlock();
if (preloaded)
radix_tree_preload_end();
if (IS_ERR(blkg))
return PTR_ERR(blkg);

View File

@ -172,8 +172,7 @@ extern struct cgroup_subsys_state * const blkcg_root_css;
struct blkcg_gq *blkg_lookup_slowpath(struct blkcg *blkcg,
struct request_queue *q, bool update_hint);
struct blkcg_gq *blkg_lookup_create(struct blkcg *blkcg,
struct request_queue *q, gfp_t gfp,
const struct blkcg_policy *pol);
struct request_queue *q);
int blkcg_init_queue(struct request_queue *q);
void blkcg_drain_queue(struct request_queue *q);
void blkcg_exit_queue(struct request_queue *q);
@ -695,8 +694,7 @@ static inline bool blkcg_bio_issue_check(struct request_queue *q,
blkg = blkg_lookup(blkcg, q);
if (unlikely(!blkg)) {
spin_lock_irq(q->queue_lock);
blkg = blkg_lookup_create(blkcg, q, GFP_NOWAIT | __GFP_NOWARN,
NULL);
blkg = blkg_lookup_create(blkcg, q);
if (IS_ERR(blkg))
blkg = NULL;
spin_unlock_irq(q->queue_lock);