1
0
Fork 0

dm: export struct dm_dev

Split struct dm_dev in two and publish the part that other targets need in
include/linux/device-mapper.h.

Signed-off-by: Mikulas Patocka <mpatocka@redhat.com>
Signed-off-by: Alasdair G Kergon <agk@redhat.com>
hifive-unleashed-5.1
Mikulas Patocka 2008-10-10 13:37:09 +01:00 committed by Alasdair G Kergon
parent 933f01d433
commit 82b1519b34
4 changed files with 49 additions and 40 deletions

View File

@ -1131,7 +1131,7 @@ static void retrieve_deps(struct dm_table *table,
unsigned int count = 0;
struct list_head *tmp;
size_t len, needed;
struct dm_dev *dd;
struct dm_dev_internal *dd;
struct dm_target_deps *deps;
deps = get_result_buffer(param, param_size, &len);
@ -1157,7 +1157,7 @@ static void retrieve_deps(struct dm_table *table,
deps->count = count;
count = 0;
list_for_each_entry (dd, dm_table_get_devices(table), list)
deps->dev[count++] = huge_encode_dev(dd->bdev->bd_dev);
deps->dev[count++] = huge_encode_dev(dd->dm_dev.bdev->bd_dev);
param->data_size = param->data_start + needed;
}

View File

@ -250,7 +250,8 @@ static void free_devices(struct list_head *devices)
struct list_head *tmp, *next;
list_for_each_safe(tmp, next, devices) {
struct dm_dev *dd = list_entry(tmp, struct dm_dev, list);
struct dm_dev_internal *dd =
list_entry(tmp, struct dm_dev_internal, list);
kfree(dd);
}
}
@ -327,12 +328,12 @@ static int lookup_device(const char *path, dev_t *dev)
/*
* See if we've already got a device in the list.
*/
static struct dm_dev *find_device(struct list_head *l, dev_t dev)
static struct dm_dev_internal *find_device(struct list_head *l, dev_t dev)
{
struct dm_dev *dd;
struct dm_dev_internal *dd;
list_for_each_entry (dd, l, list)
if (dd->bdev->bd_dev == dev)
if (dd->dm_dev.bdev->bd_dev == dev)
return dd;
return NULL;
@ -341,45 +342,47 @@ static struct dm_dev *find_device(struct list_head *l, dev_t dev)
/*
* Open a device so we can use it as a map destination.
*/
static int open_dev(struct dm_dev *d, dev_t dev, struct mapped_device *md)
static int open_dev(struct dm_dev_internal *d, dev_t dev,
struct mapped_device *md)
{
static char *_claim_ptr = "I belong to device-mapper";
struct block_device *bdev;
int r;
BUG_ON(d->bdev);
BUG_ON(d->dm_dev.bdev);
bdev = open_by_devnum(dev, d->mode);
bdev = open_by_devnum(dev, d->dm_dev.mode);
if (IS_ERR(bdev))
return PTR_ERR(bdev);
r = bd_claim_by_disk(bdev, _claim_ptr, dm_disk(md));
if (r)
blkdev_put(bdev);
else
d->bdev = bdev;
d->dm_dev.bdev = bdev;
return r;
}
/*
* Close a device that we've been using.
*/
static void close_dev(struct dm_dev *d, struct mapped_device *md)
static void close_dev(struct dm_dev_internal *d, struct mapped_device *md)
{
if (!d->bdev)
if (!d->dm_dev.bdev)
return;
bd_release_from_disk(d->bdev, dm_disk(md));
blkdev_put(d->bdev);
d->bdev = NULL;
bd_release_from_disk(d->dm_dev.bdev, dm_disk(md));
blkdev_put(d->dm_dev.bdev);
d->dm_dev.bdev = NULL;
}
/*
* If possible, this checks an area of a destination device is valid.
*/
static int check_device_area(struct dm_dev *dd, sector_t start, sector_t len)
static int check_device_area(struct dm_dev_internal *dd, sector_t start,
sector_t len)
{
sector_t dev_size = dd->bdev->bd_inode->i_size >> SECTOR_SHIFT;
sector_t dev_size = dd->dm_dev.bdev->bd_inode->i_size >> SECTOR_SHIFT;
if (!dev_size)
return 1;
@ -392,16 +395,17 @@ static int check_device_area(struct dm_dev *dd, sector_t start, sector_t len)
* careful to leave things as they were if we fail to reopen the
* device.
*/
static int upgrade_mode(struct dm_dev *dd, int new_mode, struct mapped_device *md)
static int upgrade_mode(struct dm_dev_internal *dd, int new_mode,
struct mapped_device *md)
{
int r;
struct dm_dev dd_copy;
dev_t dev = dd->bdev->bd_dev;
struct dm_dev_internal dd_copy;
dev_t dev = dd->dm_dev.bdev->bd_dev;
dd_copy = *dd;
dd->mode |= new_mode;
dd->bdev = NULL;
dd->dm_dev.mode |= new_mode;
dd->dm_dev.bdev = NULL;
r = open_dev(dd, dev, md);
if (!r)
close_dev(&dd_copy, md);
@ -421,7 +425,7 @@ static int __table_get_device(struct dm_table *t, struct dm_target *ti,
{
int r;
dev_t uninitialized_var(dev);
struct dm_dev *dd;
struct dm_dev_internal *dd;
unsigned int major, minor;
BUG_ON(!t);
@ -443,20 +447,20 @@ static int __table_get_device(struct dm_table *t, struct dm_target *ti,
if (!dd)
return -ENOMEM;
dd->mode = mode;
dd->bdev = NULL;
dd->dm_dev.mode = mode;
dd->dm_dev.bdev = NULL;
if ((r = open_dev(dd, dev, t->md))) {
kfree(dd);
return r;
}
format_dev_t(dd->name, dev);
format_dev_t(dd->dm_dev.name, dev);
atomic_set(&dd->count, 0);
list_add(&dd->list, &t->devices);
} else if (dd->mode != (mode | dd->mode)) {
} else if (dd->dm_dev.mode != (mode | dd->dm_dev.mode)) {
r = upgrade_mode(dd, mode, t->md);
if (r)
return r;
@ -465,11 +469,11 @@ static int __table_get_device(struct dm_table *t, struct dm_target *ti,
if (!check_device_area(dd, start, len)) {
DMWARN("device %s too small for target", path);
dm_put_device(ti, dd);
dm_put_device(ti, &dd->dm_dev);
return -EINVAL;
}
*result = dd;
*result = &dd->dm_dev;
return 0;
}
@ -540,8 +544,11 @@ int dm_get_device(struct dm_target *ti, const char *path, sector_t start,
/*
* Decrement a devices use count and remove it if necessary.
*/
void dm_put_device(struct dm_target *ti, struct dm_dev *dd)
void dm_put_device(struct dm_target *ti, struct dm_dev *d)
{
struct dm_dev_internal *dd = container_of(d, struct dm_dev_internal,
dm_dev);
if (atomic_dec_and_test(&dd->count)) {
close_dev(dd, ti->table->md);
list_del(&dd->list);
@ -937,12 +944,12 @@ int dm_table_resume_targets(struct dm_table *t)
int dm_table_any_congested(struct dm_table *t, int bdi_bits)
{
struct dm_dev *dd;
struct dm_dev_internal *dd;
struct list_head *devices = dm_table_get_devices(t);
int r = 0;
list_for_each_entry(dd, devices, list) {
struct request_queue *q = bdev_get_queue(dd->bdev);
struct request_queue *q = bdev_get_queue(dd->dm_dev.bdev);
r |= bdi_congested(&q->backing_dev_info, bdi_bits);
}
@ -951,11 +958,11 @@ int dm_table_any_congested(struct dm_table *t, int bdi_bits)
void dm_table_unplug_all(struct dm_table *t)
{
struct dm_dev *dd;
struct dm_dev_internal *dd;
struct list_head *devices = dm_table_get_devices(t);
list_for_each_entry(dd, devices, list) {
struct request_queue *q = bdev_get_queue(dd->bdev);
struct request_queue *q = bdev_get_queue(dd->dm_dev.bdev);
blk_unplug(q);
}

View File

@ -25,13 +25,10 @@
/*
* List of devices that a metadevice uses and should open/close.
*/
struct dm_dev {
struct dm_dev_internal {
struct list_head list;
atomic_t count;
int mode;
struct block_device *bdev;
char name[16];
struct dm_dev dm_dev;
};
struct dm_table;

View File

@ -13,7 +13,6 @@
struct dm_target;
struct dm_table;
struct dm_dev;
struct mapped_device;
struct bio_vec;
@ -84,6 +83,12 @@ void dm_error(const char *message);
*/
void dm_set_device_limits(struct dm_target *ti, struct block_device *bdev);
struct dm_dev {
struct block_device *bdev;
int mode;
char name[16];
};
/*
* Constructors should call these functions to ensure destination devices
* are opened/closed correctly.