1
0
Fork 0

dm: various cleanups to md->queue initialization code

Also, add dm_sysfs_init() error handling to dm_create().

Signed-off-by: Mike Snitzer <snitzer@redhat.com>
hifive-unleashed-5.1
Mike Snitzer 2018-01-12 09:32:21 -05:00
parent ac514ffc96
commit c12c9a3c38
3 changed files with 12 additions and 22 deletions

View File

@ -129,8 +129,6 @@ struct mapped_device {
struct srcu_struct io_barrier;
};
void dm_init_md_queue(struct mapped_device *md);
void dm_init_normal_md_queue(struct mapped_device *md);
int md_in_flight(struct mapped_device *md);
void disable_write_same(struct mapped_device *md);
void disable_write_zeroes(struct mapped_device *md);

View File

@ -704,7 +704,6 @@ int dm_old_init_request_queue(struct mapped_device *md, struct dm_table *t)
/* disable dm_old_request_fn's merge heuristic by default */
md->seq_rq_merge_deadline_usecs = 0;
dm_init_normal_md_queue(md);
blk_queue_softirq_done(md->queue, dm_softirq_done);
/* Initialize the request-based DM worker thread */
@ -814,7 +813,6 @@ int dm_mq_init_request_queue(struct mapped_device *md, struct dm_table *t)
err = PTR_ERR(q);
goto out_tag_set;
}
dm_init_md_queue(md);
/* backfill 'mq' sysfs registration normally done in blk_register_queue */
err = blk_mq_register_dev(disk_to_dev(md->disk), q);

View File

@ -1733,20 +1733,9 @@ static const struct dax_operations dm_dax_ops;
static void dm_wq_work(struct work_struct *work);
void dm_init_md_queue(struct mapped_device *md)
{
/*
* Initialize data that will only be used by a non-blk-mq DM queue
* - must do so here (in alloc_dev callchain) before queue is used
*/
md->queue->queuedata = md;
md->queue->backing_dev_info->congested_data = md;
}
void dm_init_normal_md_queue(struct mapped_device *md)
static void dm_init_normal_md_queue(struct mapped_device *md)
{
md->use_blk_mq = false;
dm_init_md_queue(md);
/*
* Initialize aspects of queue that aren't relevant for blk-mq
@ -1846,10 +1835,10 @@ static struct mapped_device *alloc_dev(int minor)
md->queue = blk_alloc_queue_node(GFP_KERNEL, numa_node_id);
if (!md->queue)
goto bad;
md->queue->queuedata = md;
md->queue->backing_dev_info->congested_data = md;
dm_init_md_queue(md);
md->disk = alloc_disk_node(1, numa_node_id);
md->disk = alloc_disk_node(1, md->numa_node_id);
if (!md->disk)
goto bad;
@ -2082,13 +2071,18 @@ static struct dm_table *__unbind(struct mapped_device *md)
*/
int dm_create(int minor, struct mapped_device **result)
{
int r;
struct mapped_device *md;
md = alloc_dev(minor);
if (!md)
return -ENXIO;
dm_sysfs_init(md);
r = dm_sysfs_init(md);
if (r) {
free_dev(md);
return r;
}
*result = md;
return 0;
@ -2145,6 +2139,7 @@ int dm_setup_md_queue(struct mapped_device *md, struct dm_table *t)
switch (type) {
case DM_TYPE_REQUEST_BASED:
dm_init_normal_md_queue(md);
r = dm_old_init_request_queue(md, t);
if (r) {
DMERR("Cannot initialize queue for request-based mapped device");
@ -2236,7 +2231,6 @@ EXPORT_SYMBOL_GPL(dm_device_name);
static void __dm_destroy(struct mapped_device *md, bool wait)
{
struct request_queue *q = dm_get_md_queue(md);
struct dm_table *map;
int srcu_idx;
@ -2247,7 +2241,7 @@ static void __dm_destroy(struct mapped_device *md, bool wait)
set_bit(DMF_FREEING, &md->flags);
spin_unlock(&_minor_lock);
blk_set_queue_dying(q);
blk_set_queue_dying(md->queue);
if (dm_request_based(md) && md->kworker_task)
kthread_flush_worker(&md->kworker);