1
0
Fork 0

blk-mq: fix hctx/ctx kobject use-after-free

The kobject memory shouldn't have been freed before the kobject
is released because driver core can access it freely before its
release.

This patch frees hctx in its release callback. For ctx, they
share one single per-cpu variable which is associated with
the request queue, so free ctx in q->mq_kobj's release handler.

Signed-off-by: Sasha Levin <sasha.levin@oracle.com>
(fix ctx kobjects)
Signed-off-by: Ming Lei <ming.lei@canonical.com>
Signed-off-by: Jens Axboe <axboe@fb.com>
hifive-unleashed-5.1
Ming Lei 2015-01-20 11:00:56 +08:00 committed by Jens Axboe
parent 6222d1721d
commit 76d697d107
2 changed files with 24 additions and 7 deletions

View File

@ -15,6 +15,26 @@
static void blk_mq_sysfs_release(struct kobject *kobj)
{
struct request_queue *q;
q = container_of(kobj, struct request_queue, mq_kobj);
free_percpu(q->queue_ctx);
}
static void blk_mq_ctx_release(struct kobject *kobj)
{
struct blk_mq_ctx *ctx;
ctx = container_of(kobj, struct blk_mq_ctx, kobj);
kobject_put(&ctx->queue->mq_kobj);
}
static void blk_mq_hctx_release(struct kobject *kobj)
{
struct blk_mq_hw_ctx *hctx;
hctx = container_of(kobj, struct blk_mq_hw_ctx, kobj);
kfree(hctx);
}
struct blk_mq_ctx_sysfs_entry {
@ -318,13 +338,13 @@ static struct kobj_type blk_mq_ktype = {
static struct kobj_type blk_mq_ctx_ktype = {
.sysfs_ops = &blk_mq_sysfs_ops,
.default_attrs = default_ctx_attrs,
.release = blk_mq_sysfs_release,
.release = blk_mq_ctx_release,
};
static struct kobj_type blk_mq_hw_ktype = {
.sysfs_ops = &blk_mq_hw_sysfs_ops,
.default_attrs = default_hw_ctx_attrs,
.release = blk_mq_sysfs_release,
.release = blk_mq_hctx_release,
};
static void blk_mq_unregister_hctx(struct blk_mq_hw_ctx *hctx)
@ -355,6 +375,7 @@ static int blk_mq_register_hctx(struct blk_mq_hw_ctx *hctx)
return ret;
hctx_for_each_ctx(hctx, ctx, i) {
kobject_get(&q->mq_kobj);
ret = kobject_add(&ctx->kobj, &hctx->kobj, "cpu%u", ctx->cpu);
if (ret)
break;

View File

@ -1641,10 +1641,8 @@ static void blk_mq_free_hw_queues(struct request_queue *q,
struct blk_mq_hw_ctx *hctx;
unsigned int i;
queue_for_each_hw_ctx(q, hctx, i) {
queue_for_each_hw_ctx(q, hctx, i)
free_cpumask_var(hctx->cpumask);
kfree(hctx);
}
}
static int blk_mq_init_hctx(struct request_queue *q,
@ -2002,11 +2000,9 @@ void blk_mq_free_queue(struct request_queue *q)
percpu_ref_exit(&q->mq_usage_counter);
free_percpu(q->queue_ctx);
kfree(q->queue_hw_ctx);
kfree(q->mq_map);
q->queue_ctx = NULL;
q->queue_hw_ctx = NULL;
q->mq_map = NULL;