From 49f4c2dc2b5066e9211101c59cc0828e81d41614 Mon Sep 17 00:00:00 2001 From: "Dennis Zhou (Facebook)" Date: Tue, 11 Sep 2018 14:41:27 -0400 Subject: [PATCH] blkcg: update blkg_lookup_create to do locking To know when to create a blkg, the general pattern is to do a blkg_lookup and if that fails, lock and then do a lookup again and if that fails finally create. It doesn't make much sense for everyone who wants to do creation to write this themselves. This changes blkg_lookup_create to do locking and implement this pattern. The old blkg_lookup_create is renamed to __blkg_lookup_create. If a call site wants to do its own error handling or already owns the queue lock, they can use __blkg_lookup_create. This will be used in upcoming patches. Signed-off-by: Dennis Zhou Reviewed-by: Josef Bacik Acked-by: Tejun Heo Reviewed-by: Liu Bo Signed-off-by: Jens Axboe --- block/blk-cgroup.c | 31 ++++++++++++++++++++++++++++--- block/blk-iolatency.c | 2 +- include/linux/blk-cgroup.h | 4 +++- 3 files changed, 32 insertions(+), 5 deletions(-) diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c index c19f9078da1e..cd0d97bed83d 100644 --- a/block/blk-cgroup.c +++ b/block/blk-cgroup.c @@ -259,7 +259,7 @@ err_free_blkg: } /** - * blkg_lookup_create - lookup blkg, try to create one if not there + * __blkg_lookup_create - lookup blkg, try to create one if not there * @blkcg: blkcg of interest * @q: request_queue of interest * @@ -272,8 +272,8 @@ err_free_blkg: * value on error. If @q is dead, returns ERR_PTR(-EINVAL). If @q is not * dead and bypassing, returns ERR_PTR(-EBUSY). */ -struct blkcg_gq *blkg_lookup_create(struct blkcg *blkcg, - struct request_queue *q) +struct blkcg_gq *__blkg_lookup_create(struct blkcg *blkcg, + struct request_queue *q) { struct blkcg_gq *blkg; @@ -310,6 +310,31 @@ struct blkcg_gq *blkg_lookup_create(struct blkcg *blkcg, } } +/** + * blkg_lookup_create - find or create a blkg + * @blkcg: target block cgroup + * @q: target request_queue + * + * This looks up or creates the blkg representing the unique pair + * of the blkcg and the request_queue. + */ +struct blkcg_gq *blkg_lookup_create(struct blkcg *blkcg, + struct request_queue *q) +{ + struct blkcg_gq *blkg = blkg_lookup(blkcg, q); + unsigned long flags; + + if (unlikely(!blkg)) { + spin_lock_irqsave(q->queue_lock, flags); + + blkg = __blkg_lookup_create(blkcg, q); + + spin_unlock_irqrestore(q->queue_lock, flags); + } + + return blkg; +} + static void blkg_destroy(struct blkcg_gq *blkg) { struct blkcg *blkcg = blkg->blkcg; diff --git a/block/blk-iolatency.c b/block/blk-iolatency.c index 82450c37f2aa..ffde3ab9f84c 100644 --- a/block/blk-iolatency.c +++ b/block/blk-iolatency.c @@ -407,7 +407,7 @@ static void blkcg_iolatency_throttle(struct rq_qos *rqos, struct bio *bio, if (unlikely(!blkg)) { if (!lock) spin_lock_irq(q->queue_lock); - blkg = blkg_lookup_create(blkcg, q); + blkg = __blkg_lookup_create(blkcg, q); if (IS_ERR(blkg)) blkg = NULL; if (!lock) diff --git a/include/linux/blk-cgroup.h b/include/linux/blk-cgroup.h index 24067a1f8b36..cc0f238530f6 100644 --- a/include/linux/blk-cgroup.h +++ b/include/linux/blk-cgroup.h @@ -184,6 +184,8 @@ extern struct cgroup_subsys_state * const blkcg_root_css; struct blkcg_gq *blkg_lookup_slowpath(struct blkcg *blkcg, struct request_queue *q, bool update_hint); +struct blkcg_gq *__blkg_lookup_create(struct blkcg *blkcg, + struct request_queue *q); struct blkcg_gq *blkg_lookup_create(struct blkcg *blkcg, struct request_queue *q); int blkcg_init_queue(struct request_queue *q); @@ -897,7 +899,7 @@ static inline bool blkcg_bio_issue_check(struct request_queue *q, blkg = blkg_lookup(blkcg, q); if (unlikely(!blkg)) { spin_lock_irq(q->queue_lock); - blkg = blkg_lookup_create(blkcg, q); + blkg = __blkg_lookup_create(blkcg, q); if (IS_ERR(blkg)) blkg = NULL; spin_unlock_irq(q->queue_lock);