diff --git a/block/blk-core.c b/block/blk-core.c index ee1b35fe8572..8340f69670d8 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -320,6 +320,19 @@ void blk_cleanup_queue(struct request_queue *q) if (queue_is_mq(q)) blk_mq_exit_queue(q); + /* + * In theory, request pool of sched_tags belongs to request queue. + * However, the current implementation requires tag_set for freeing + * requests, so free the pool now. + * + * Queue has become frozen, there can't be any in-queue requests, so + * it is safe to free requests now. + */ + mutex_lock(&q->sysfs_lock); + if (q->elevator) + blk_mq_sched_free_requests(q); + mutex_unlock(&q->sysfs_lock); + percpu_ref_exit(&q->q_usage_counter); /* @q is and will stay empty, shutdown and put */ diff --git a/block/blk-mq-sched.c b/block/blk-mq-sched.c index 74c6bb871f7e..500cb04901cc 100644 --- a/block/blk-mq-sched.c +++ b/block/blk-mq-sched.c @@ -475,14 +475,18 @@ static int blk_mq_sched_alloc_tags(struct request_queue *q, return ret; } +/* called in queue's release handler, tagset has gone away */ static void blk_mq_sched_tags_teardown(struct request_queue *q) { - struct blk_mq_tag_set *set = q->tag_set; struct blk_mq_hw_ctx *hctx; int i; - queue_for_each_hw_ctx(q, hctx, i) - blk_mq_sched_free_tags(set, hctx, i); + queue_for_each_hw_ctx(q, hctx, i) { + if (hctx->sched_tags) { + blk_mq_free_rq_map(hctx->sched_tags); + hctx->sched_tags = NULL; + } + } } int blk_mq_init_sched(struct request_queue *q, struct elevator_type *e) @@ -523,6 +527,7 @@ int blk_mq_init_sched(struct request_queue *q, struct elevator_type *e) ret = e->ops.init_hctx(hctx, i); if (ret) { eq = q->elevator; + blk_mq_sched_free_requests(q); blk_mq_exit_sched(q, eq); kobject_put(&eq->kobj); return ret; @@ -534,11 +539,30 @@ int blk_mq_init_sched(struct request_queue *q, struct elevator_type *e) return 0; err: + blk_mq_sched_free_requests(q); blk_mq_sched_tags_teardown(q); q->elevator = NULL; return ret; } +/* + * called in either blk_queue_cleanup or elevator_switch, tagset + * is required for freeing requests + */ +void blk_mq_sched_free_requests(struct request_queue *q) +{ + struct blk_mq_hw_ctx *hctx; + int i; + + lockdep_assert_held(&q->sysfs_lock); + WARN_ON(!q->elevator); + + queue_for_each_hw_ctx(q, hctx, i) { + if (hctx->sched_tags) + blk_mq_free_rqs(q->tag_set, hctx->sched_tags, i); + } +} + void blk_mq_exit_sched(struct request_queue *q, struct elevator_queue *e) { struct blk_mq_hw_ctx *hctx; diff --git a/block/blk-mq-sched.h b/block/blk-mq-sched.h index c7bdb52367ac..3cf92cbbd8ac 100644 --- a/block/blk-mq-sched.h +++ b/block/blk-mq-sched.h @@ -28,6 +28,7 @@ void blk_mq_sched_dispatch_requests(struct blk_mq_hw_ctx *hctx); int blk_mq_init_sched(struct request_queue *q, struct elevator_type *e); void blk_mq_exit_sched(struct request_queue *q, struct elevator_queue *e); +void blk_mq_sched_free_requests(struct request_queue *q); static inline bool blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio) diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c index 75b5281cc577..977c659dcd18 100644 --- a/block/blk-sysfs.c +++ b/block/blk-sysfs.c @@ -850,7 +850,7 @@ static void blk_exit_queue(struct request_queue *q) */ if (q->elevator) { ioc_clear_queue(q); - elevator_exit(q, q->elevator); + __elevator_exit(q, q->elevator); q->elevator = NULL; } diff --git a/block/blk.h b/block/blk.h index 91b3581b7c7a..7814aa207153 100644 --- a/block/blk.h +++ b/block/blk.h @@ -6,6 +6,7 @@ #include #include #include "blk-mq.h" +#include "blk-mq-sched.h" /* Max future timer expiry for timeouts */ #define BLK_MAX_TIMEOUT (5 * HZ) @@ -176,10 +177,17 @@ void blk_insert_flush(struct request *rq); int elevator_init_mq(struct request_queue *q); int elevator_switch_mq(struct request_queue *q, struct elevator_type *new_e); -void elevator_exit(struct request_queue *, struct elevator_queue *); +void __elevator_exit(struct request_queue *, struct elevator_queue *); int elv_register_queue(struct request_queue *q); void elv_unregister_queue(struct request_queue *q); +static inline void elevator_exit(struct request_queue *q, + struct elevator_queue *e) +{ + blk_mq_sched_free_requests(q); + __elevator_exit(q, e); +} + struct hd_struct *__disk_get_part(struct gendisk *disk, int partno); #ifdef CONFIG_FAIL_IO_TIMEOUT diff --git a/block/elevator.c b/block/elevator.c index ec55d5fc0b3e..2f17d66d0e61 100644 --- a/block/elevator.c +++ b/block/elevator.c @@ -178,7 +178,7 @@ static void elevator_release(struct kobject *kobj) kfree(e); } -void elevator_exit(struct request_queue *q, struct elevator_queue *e) +void __elevator_exit(struct request_queue *q, struct elevator_queue *e) { mutex_lock(&e->sysfs_lock); if (e->type->ops.exit_sched)