1
0
Fork 0

blk-mq: pass along blk_mq_alloc_tag_set return values

Two of the blk-mq based drivers do not pass back the return value
from blk_mq_alloc_tag_set, instead just returning -ENOMEM.

blk_mq_alloc_tag_set returns -EINVAL if the number of queues or
queue depth is bad.  -ENOMEM implies that retrying after freeing some
memory might be more successful, but that won't ever change
in the -EINVAL cases.

Change the null_blk and mtip32xx drivers to pass along
the return value.

Signed-off-by: Robert Elliott <elliott@hp.com>
Signed-off-by: Jens Axboe <axboe@fb.com>
hifive-unleashed-5.1
Robert Elliott 2014-09-02 11:38:49 -05:00 committed by Jens Axboe
parent 0738854939
commit dc501dc0d9
2 changed files with 21 additions and 9 deletions

View File

@ -3918,7 +3918,6 @@ skip_create_disk:
if (rv) { if (rv) {
dev_err(&dd->pdev->dev, dev_err(&dd->pdev->dev,
"Unable to allocate request queue\n"); "Unable to allocate request queue\n");
rv = -ENOMEM;
goto block_queue_alloc_init_error; goto block_queue_alloc_init_error;
} }

View File

@ -462,17 +462,21 @@ static int null_add_dev(void)
struct gendisk *disk; struct gendisk *disk;
struct nullb *nullb; struct nullb *nullb;
sector_t size; sector_t size;
int rv;
nullb = kzalloc_node(sizeof(*nullb), GFP_KERNEL, home_node); nullb = kzalloc_node(sizeof(*nullb), GFP_KERNEL, home_node);
if (!nullb) if (!nullb) {
rv = -ENOMEM;
goto out; goto out;
}
spin_lock_init(&nullb->lock); spin_lock_init(&nullb->lock);
if (queue_mode == NULL_Q_MQ && use_per_node_hctx) if (queue_mode == NULL_Q_MQ && use_per_node_hctx)
submit_queues = nr_online_nodes; submit_queues = nr_online_nodes;
if (setup_queues(nullb)) rv = setup_queues(nullb);
if (rv)
goto out_free_nullb; goto out_free_nullb;
if (queue_mode == NULL_Q_MQ) { if (queue_mode == NULL_Q_MQ) {
@ -484,22 +488,29 @@ static int null_add_dev(void)
nullb->tag_set.flags = BLK_MQ_F_SHOULD_MERGE; nullb->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
nullb->tag_set.driver_data = nullb; nullb->tag_set.driver_data = nullb;
if (blk_mq_alloc_tag_set(&nullb->tag_set)) rv = blk_mq_alloc_tag_set(&nullb->tag_set);
if (rv)
goto out_cleanup_queues; goto out_cleanup_queues;
nullb->q = blk_mq_init_queue(&nullb->tag_set); nullb->q = blk_mq_init_queue(&nullb->tag_set);
if (!nullb->q) if (!nullb->q) {
rv = -ENOMEM;
goto out_cleanup_tags; goto out_cleanup_tags;
}
} else if (queue_mode == NULL_Q_BIO) { } else if (queue_mode == NULL_Q_BIO) {
nullb->q = blk_alloc_queue_node(GFP_KERNEL, home_node); nullb->q = blk_alloc_queue_node(GFP_KERNEL, home_node);
if (!nullb->q) if (!nullb->q) {
rv = -ENOMEM;
goto out_cleanup_queues; goto out_cleanup_queues;
}
blk_queue_make_request(nullb->q, null_queue_bio); blk_queue_make_request(nullb->q, null_queue_bio);
init_driver_queues(nullb); init_driver_queues(nullb);
} else { } else {
nullb->q = blk_init_queue_node(null_request_fn, &nullb->lock, home_node); nullb->q = blk_init_queue_node(null_request_fn, &nullb->lock, home_node);
if (!nullb->q) if (!nullb->q) {
rv = -ENOMEM;
goto out_cleanup_queues; goto out_cleanup_queues;
}
blk_queue_prep_rq(nullb->q, null_rq_prep_fn); blk_queue_prep_rq(nullb->q, null_rq_prep_fn);
blk_queue_softirq_done(nullb->q, null_softirq_done_fn); blk_queue_softirq_done(nullb->q, null_softirq_done_fn);
init_driver_queues(nullb); init_driver_queues(nullb);
@ -509,8 +520,10 @@ static int null_add_dev(void)
queue_flag_set_unlocked(QUEUE_FLAG_NONROT, nullb->q); queue_flag_set_unlocked(QUEUE_FLAG_NONROT, nullb->q);
disk = nullb->disk = alloc_disk_node(1, home_node); disk = nullb->disk = alloc_disk_node(1, home_node);
if (!disk) if (!disk) {
rv = -ENOMEM;
goto out_cleanup_blk_queue; goto out_cleanup_blk_queue;
}
mutex_lock(&lock); mutex_lock(&lock);
list_add_tail(&nullb->list, &nullb_list); list_add_tail(&nullb->list, &nullb_list);
@ -544,7 +557,7 @@ out_cleanup_queues:
out_free_nullb: out_free_nullb:
kfree(nullb); kfree(nullb);
out: out:
return -ENOMEM; return rv;
} }
static int __init null_init(void) static int __init null_init(void)