1
0
Fork 0

blk-mq: only allocate a single mq_map per tag_set

The mapping is identical for all queues in a tag_set, so stop wasting
memory for building multiple.  Note that for now I've kept the mq_map
pointer in the request_queue, but we'll need to investigate if we can
remove it without suffering too much from the additional pointer chasing.
The same would apply to the mq_ops pointer as well.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Keith Busch <keith.busch@intel.com>
Signed-off-by: Jens Axboe <axboe@fb.com>
hifive-unleashed-5.1
Christoph Hellwig 2016-09-14 16:18:53 +02:00 committed by Jens Axboe
parent 4e68a01142
commit bdd17e75cd
2 changed files with 15 additions and 8 deletions

View File

@ -1973,7 +1973,6 @@ void blk_mq_release(struct request_queue *q)
kfree(hctx);
}
kfree(q->mq_map);
q->mq_map = NULL;
kfree(q->queue_hw_ctx);
@ -2072,9 +2071,7 @@ struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
if (!q->queue_hw_ctx)
goto err_percpu;
q->mq_map = blk_mq_make_queue_map(set);
if (!q->mq_map)
goto err_map;
q->mq_map = set->mq_map;
blk_mq_realloc_hw_ctxs(set, q);
if (!q->nr_hw_queues)
@ -2124,8 +2121,6 @@ struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
return q;
err_hctxs:
kfree(q->mq_map);
err_map:
kfree(q->queue_hw_ctx);
err_percpu:
free_percpu(q->queue_ctx);
@ -2347,14 +2342,22 @@ int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set)
if (!set->tags)
return -ENOMEM;
set->mq_map = blk_mq_make_queue_map(set);
if (!set->mq_map)
goto out_free_tags;
if (blk_mq_alloc_rq_maps(set))
goto enomem;
goto out_free_mq_map;
mutex_init(&set->tag_list_lock);
INIT_LIST_HEAD(&set->tag_list);
return 0;
enomem:
out_free_mq_map:
kfree(set->mq_map);
set->mq_map = NULL;
out_free_tags:
kfree(set->tags);
set->tags = NULL;
return -ENOMEM;
@ -2370,6 +2373,9 @@ void blk_mq_free_tag_set(struct blk_mq_tag_set *set)
blk_mq_free_rq_map(set, set->tags[i], i);
}
kfree(set->mq_map);
set->mq_map = NULL;
kfree(set->tags);
set->tags = NULL;
}

View File

@ -67,6 +67,7 @@ struct blk_mq_hw_ctx {
};
struct blk_mq_tag_set {
unsigned int *mq_map;
struct blk_mq_ops *ops;
unsigned int nr_hw_queues;
unsigned int queue_depth; /* max hw supported */