1
0
Fork 0

dm ioctl: refactor dm_table_complete

This change unifies the various checks and finalization that occurs on a
table prior to use.  By doing so, it allows table construction without
traversing the dm-ioctl interface.

Signed-off-by: Will Drewry <wad@chromium.org>
Signed-off-by: Alasdair G Kergon <agk@redhat.com>
hifive-unleashed-5.1
Will Drewry 2010-08-12 04:14:03 +01:00 committed by Alasdair G Kergon
parent b1d5552838
commit 26803b9f06
3 changed files with 52 additions and 38 deletions

View File

@ -1131,28 +1131,9 @@ static int populate_table(struct dm_table *table,
next = spec->next;
}
r = dm_table_set_type(table);
if (r) {
DMWARN("unable to set table type");
return r;
}
return dm_table_complete(table);
}
static int table_prealloc_integrity(struct dm_table *t,
struct mapped_device *md)
{
struct list_head *devices = dm_table_get_devices(t);
struct dm_dev_internal *dd;
list_for_each_entry(dd, devices, list)
if (bdev_get_integrity(dd->dm_dev.bdev))
return blk_integrity_register(dm_disk(md), NULL);
return 0;
}
static int table_load(struct dm_ioctl *param, size_t param_size)
{
int r;
@ -1174,21 +1155,6 @@ static int table_load(struct dm_ioctl *param, size_t param_size)
goto out;
}
r = table_prealloc_integrity(t, md);
if (r) {
DMERR("%s: could not register integrity profile.",
dm_device_name(md));
dm_table_destroy(t);
goto out;
}
r = dm_table_alloc_md_mempools(t);
if (r) {
DMWARN("unable to allocate mempools for this table");
dm_table_destroy(t);
goto out;
}
/* Protect md->type and md->queue against concurrent table loads. */
dm_lock_md_type(md);
if (dm_get_md_type(md) == DM_TYPE_NONE)

View File

@ -245,7 +245,7 @@ void dm_table_destroy(struct dm_table *t)
msleep(1);
smp_mb();
/* free the indexes (see dm_table_complete) */
/* free the indexes */
if (t->depth >= 2)
vfree(t->index[t->depth - 2]);
@ -778,7 +778,7 @@ int dm_table_add_target(struct dm_table *t, const char *type,
return r;
}
int dm_table_set_type(struct dm_table *t)
static int dm_table_set_type(struct dm_table *t)
{
unsigned i;
unsigned bio_based = 0, request_based = 0;
@ -900,7 +900,7 @@ static int setup_indexes(struct dm_table *t)
/*
* Builds the btree to index the map.
*/
int dm_table_complete(struct dm_table *t)
static int dm_table_build_index(struct dm_table *t)
{
int r = 0;
unsigned int leaf_nodes;
@ -919,6 +919,55 @@ int dm_table_complete(struct dm_table *t)
return r;
}
/*
* Register the mapped device for blk_integrity support if
* the underlying devices support it.
*/
static int dm_table_prealloc_integrity(struct dm_table *t, struct mapped_device *md)
{
struct list_head *devices = dm_table_get_devices(t);
struct dm_dev_internal *dd;
list_for_each_entry(dd, devices, list)
if (bdev_get_integrity(dd->dm_dev.bdev))
return blk_integrity_register(dm_disk(md), NULL);
return 0;
}
/*
* Prepares the table for use by building the indices,
* setting the type, and allocating mempools.
*/
int dm_table_complete(struct dm_table *t)
{
int r;
r = dm_table_set_type(t);
if (r) {
DMERR("unable to set table type");
return r;
}
r = dm_table_build_index(t);
if (r) {
DMERR("unable to build btrees");
return r;
}
r = dm_table_prealloc_integrity(t, t->md);
if (r) {
DMERR("could not register integrity profile.");
return r;
}
r = dm_table_alloc_md_mempools(t);
if (r)
DMERR("unable to allocate mempools");
return r;
}
static DEFINE_MUTEX(_event_lock);
void dm_table_event_callback(struct dm_table *t,
void (*fn)(void *), void *context)

View File

@ -59,7 +59,6 @@ void dm_table_postsuspend_targets(struct dm_table *t);
int dm_table_resume_targets(struct dm_table *t);
int dm_table_any_congested(struct dm_table *t, int bdi_bits);
int dm_table_any_busy_target(struct dm_table *t);
int dm_table_set_type(struct dm_table *t);
unsigned dm_table_get_type(struct dm_table *t);
bool dm_table_request_based(struct dm_table *t);
int dm_table_alloc_md_mempools(struct dm_table *t);