1
0
Fork 0

block: remove the discard_zeroes_data flag

Now that we use the proper REQ_OP_WRITE_ZEROES operation everywhere we can
kill this hack.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Martin K. Petersen <martin.petersen@oracle.com>
Reviewed-by: Hannes Reinecke <hare@suse.com>
Signed-off-by: Jens Axboe <axboe@fb.com>
hifive-unleashed-5.1
Christoph Hellwig 2017-04-05 19:21:23 +02:00 committed by Jens Axboe
parent 45c21793a6
commit 48920ff2a5
23 changed files with 27 additions and 124 deletions

View File

@ -213,14 +213,8 @@ What: /sys/block/<disk>/queue/discard_zeroes_data
Date: May 2011 Date: May 2011
Contact: Martin K. Petersen <martin.petersen@oracle.com> Contact: Martin K. Petersen <martin.petersen@oracle.com>
Description: Description:
Devices that support discard functionality may return Will always return 0. Don't rely on any specific behavior
stale or random data when a previously discarded block for discards, and don't read this file.
is read back. This can cause problems if the filesystem
expects discarded blocks to be explicitly cleared. If a
device reports that it deterministically returns zeroes
when a discarded area is read the discard_zeroes_data
parameter will be set to one. Otherwise it will be 0 and
the result of reading a discarded area is undefined.
What: /sys/block/<disk>/queue/write_same_max_bytes What: /sys/block/<disk>/queue/write_same_max_bytes
Date: January 2012 Date: January 2012

View File

@ -43,11 +43,6 @@ large discards are issued, setting this value lower will make Linux issue
smaller discards and potentially help reduce latencies induced by large smaller discards and potentially help reduce latencies induced by large
discard operations. discard operations.
discard_zeroes_data (RO)
------------------------
When read, this file will show if the discarded block are zeroed by the
device or not. If its value is '1' the blocks are zeroed otherwise not.
hw_sector_size (RO) hw_sector_size (RO)
------------------- -------------------
This is the hardware sector size of the device, in bytes. This is the hardware sector size of the device, in bytes.

View File

@ -37,17 +37,12 @@ int __blkdev_issue_discard(struct block_device *bdev, sector_t sector,
return -ENXIO; return -ENXIO;
if (flags & BLKDEV_DISCARD_SECURE) { if (flags & BLKDEV_DISCARD_SECURE) {
if (flags & BLKDEV_DISCARD_ZERO)
return -EOPNOTSUPP;
if (!blk_queue_secure_erase(q)) if (!blk_queue_secure_erase(q))
return -EOPNOTSUPP; return -EOPNOTSUPP;
op = REQ_OP_SECURE_ERASE; op = REQ_OP_SECURE_ERASE;
} else { } else {
if (!blk_queue_discard(q)) if (!blk_queue_discard(q))
return -EOPNOTSUPP; return -EOPNOTSUPP;
if ((flags & BLKDEV_DISCARD_ZERO) &&
!q->limits.discard_zeroes_data)
return -EOPNOTSUPP;
op = REQ_OP_DISCARD; op = REQ_OP_DISCARD;
} }
@ -126,7 +121,7 @@ int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
&bio); &bio);
if (!ret && bio) { if (!ret && bio) {
ret = submit_bio_wait(bio); ret = submit_bio_wait(bio);
if (ret == -EOPNOTSUPP && !(flags & BLKDEV_DISCARD_ZERO)) if (ret == -EOPNOTSUPP)
ret = 0; ret = 0;
bio_put(bio); bio_put(bio);
} }

View File

@ -103,7 +103,6 @@ void blk_set_default_limits(struct queue_limits *lim)
lim->discard_granularity = 0; lim->discard_granularity = 0;
lim->discard_alignment = 0; lim->discard_alignment = 0;
lim->discard_misaligned = 0; lim->discard_misaligned = 0;
lim->discard_zeroes_data = 0;
lim->logical_block_size = lim->physical_block_size = lim->io_min = 512; lim->logical_block_size = lim->physical_block_size = lim->io_min = 512;
lim->bounce_pfn = (unsigned long)(BLK_BOUNCE_ANY >> PAGE_SHIFT); lim->bounce_pfn = (unsigned long)(BLK_BOUNCE_ANY >> PAGE_SHIFT);
lim->alignment_offset = 0; lim->alignment_offset = 0;
@ -127,7 +126,6 @@ void blk_set_stacking_limits(struct queue_limits *lim)
blk_set_default_limits(lim); blk_set_default_limits(lim);
/* Inherit limits from component devices */ /* Inherit limits from component devices */
lim->discard_zeroes_data = 1;
lim->max_segments = USHRT_MAX; lim->max_segments = USHRT_MAX;
lim->max_discard_segments = 1; lim->max_discard_segments = 1;
lim->max_hw_sectors = UINT_MAX; lim->max_hw_sectors = UINT_MAX;
@ -609,7 +607,6 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
t->io_opt = lcm_not_zero(t->io_opt, b->io_opt); t->io_opt = lcm_not_zero(t->io_opt, b->io_opt);
t->cluster &= b->cluster; t->cluster &= b->cluster;
t->discard_zeroes_data &= b->discard_zeroes_data;
/* Physical block size a multiple of the logical block size? */ /* Physical block size a multiple of the logical block size? */
if (t->physical_block_size & (t->logical_block_size - 1)) { if (t->physical_block_size & (t->logical_block_size - 1)) {

View File

@ -208,7 +208,7 @@ static ssize_t queue_discard_max_store(struct request_queue *q,
static ssize_t queue_discard_zeroes_data_show(struct request_queue *q, char *page) static ssize_t queue_discard_zeroes_data_show(struct request_queue *q, char *page)
{ {
return queue_var_show(queue_discard_zeroes_data(q), page); return queue_var_show(0, page);
} }
static ssize_t queue_write_same_max_show(struct request_queue *q, char *page) static ssize_t queue_write_same_max_show(struct request_queue *q, char *page)

View File

@ -685,7 +685,7 @@ long compat_blkdev_ioctl(struct file *file, unsigned cmd, unsigned long arg)
case BLKALIGNOFF: case BLKALIGNOFF:
return compat_put_int(arg, bdev_alignment_offset(bdev)); return compat_put_int(arg, bdev_alignment_offset(bdev));
case BLKDISCARDZEROES: case BLKDISCARDZEROES:
return compat_put_uint(arg, bdev_discard_zeroes_data(bdev)); return compat_put_uint(arg, 0);
case BLKFLSBUF: case BLKFLSBUF:
case BLKROSET: case BLKROSET:
case BLKDISCARD: case BLKDISCARD:

View File

@ -547,7 +547,7 @@ int blkdev_ioctl(struct block_device *bdev, fmode_t mode, unsigned cmd,
case BLKALIGNOFF: case BLKALIGNOFF:
return put_int(arg, bdev_alignment_offset(bdev)); return put_int(arg, bdev_alignment_offset(bdev));
case BLKDISCARDZEROES: case BLKDISCARDZEROES:
return put_uint(arg, bdev_discard_zeroes_data(bdev)); return put_uint(arg, 0);
case BLKSECTGET: case BLKSECTGET:
max_sectors = min_t(unsigned int, USHRT_MAX, max_sectors = min_t(unsigned int, USHRT_MAX,
queue_max_sectors(bdev_get_queue(bdev))); queue_max_sectors(bdev_get_queue(bdev)));

View File

@ -931,7 +931,6 @@ void assign_p_sizes_qlim(struct drbd_device *device, struct p_sizes *p, struct r
p->qlim->io_min = cpu_to_be32(queue_io_min(q)); p->qlim->io_min = cpu_to_be32(queue_io_min(q));
p->qlim->io_opt = cpu_to_be32(queue_io_opt(q)); p->qlim->io_opt = cpu_to_be32(queue_io_opt(q));
p->qlim->discard_enabled = blk_queue_discard(q); p->qlim->discard_enabled = blk_queue_discard(q);
p->qlim->discard_zeroes_data = queue_discard_zeroes_data(q);
p->qlim->write_same_capable = !!q->limits.max_write_same_sectors; p->qlim->write_same_capable = !!q->limits.max_write_same_sectors;
} else { } else {
q = device->rq_queue; q = device->rq_queue;
@ -941,7 +940,6 @@ void assign_p_sizes_qlim(struct drbd_device *device, struct p_sizes *p, struct r
p->qlim->io_min = cpu_to_be32(queue_io_min(q)); p->qlim->io_min = cpu_to_be32(queue_io_min(q));
p->qlim->io_opt = cpu_to_be32(queue_io_opt(q)); p->qlim->io_opt = cpu_to_be32(queue_io_opt(q));
p->qlim->discard_enabled = 0; p->qlim->discard_enabled = 0;
p->qlim->discard_zeroes_data = 0;
p->qlim->write_same_capable = 0; p->qlim->write_same_capable = 0;
} }
} }

View File

@ -1199,10 +1199,6 @@ static void decide_on_discard_support(struct drbd_device *device,
struct drbd_connection *connection = first_peer_device(device)->connection; struct drbd_connection *connection = first_peer_device(device)->connection;
bool can_do = b ? blk_queue_discard(b) : true; bool can_do = b ? blk_queue_discard(b) : true;
if (can_do && b && !b->limits.discard_zeroes_data && !discard_zeroes_if_aligned) {
can_do = false;
drbd_info(device, "discard_zeroes_data=0 and discard_zeroes_if_aligned=no: disabling discards\n");
}
if (can_do && connection->cstate >= C_CONNECTED && !(connection->agreed_features & DRBD_FF_TRIM)) { if (can_do && connection->cstate >= C_CONNECTED && !(connection->agreed_features & DRBD_FF_TRIM)) {
can_do = false; can_do = false;
drbd_info(connection, "peer DRBD too old, does not support TRIM: disabling discards\n"); drbd_info(connection, "peer DRBD too old, does not support TRIM: disabling discards\n");
@ -1484,8 +1480,7 @@ static void sanitize_disk_conf(struct drbd_device *device, struct disk_conf *dis
if (disk_conf->al_extents > drbd_al_extents_max(nbc)) if (disk_conf->al_extents > drbd_al_extents_max(nbc))
disk_conf->al_extents = drbd_al_extents_max(nbc); disk_conf->al_extents = drbd_al_extents_max(nbc);
if (!blk_queue_discard(q) if (!blk_queue_discard(q)) {
|| (!q->limits.discard_zeroes_data && !disk_conf->discard_zeroes_if_aligned)) {
if (disk_conf->rs_discard_granularity) { if (disk_conf->rs_discard_granularity) {
disk_conf->rs_discard_granularity = 0; /* disable feature */ disk_conf->rs_discard_granularity = 0; /* disable feature */
drbd_info(device, "rs_discard_granularity feature disabled\n"); drbd_info(device, "rs_discard_granularity feature disabled\n");

View File

@ -828,7 +828,6 @@ static void loop_config_discard(struct loop_device *lo)
q->limits.discard_alignment = 0; q->limits.discard_alignment = 0;
blk_queue_max_discard_sectors(q, 0); blk_queue_max_discard_sectors(q, 0);
blk_queue_max_write_zeroes_sectors(q, 0); blk_queue_max_write_zeroes_sectors(q, 0);
q->limits.discard_zeroes_data = 0;
queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, q); queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, q);
return; return;
} }
@ -837,7 +836,6 @@ static void loop_config_discard(struct loop_device *lo)
q->limits.discard_alignment = 0; q->limits.discard_alignment = 0;
blk_queue_max_discard_sectors(q, UINT_MAX >> 9); blk_queue_max_discard_sectors(q, UINT_MAX >> 9);
blk_queue_max_write_zeroes_sectors(q, UINT_MAX >> 9); blk_queue_max_write_zeroes_sectors(q, UINT_MAX >> 9);
q->limits.discard_zeroes_data = 1;
queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q); queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q);
} }

View File

@ -4025,7 +4025,6 @@ skip_create_disk:
dd->queue->limits.discard_granularity = 4096; dd->queue->limits.discard_granularity = 4096;
blk_queue_max_discard_sectors(dd->queue, blk_queue_max_discard_sectors(dd->queue,
MTIP_MAX_TRIM_ENTRY_LEN * MTIP_MAX_TRIM_ENTRIES); MTIP_MAX_TRIM_ENTRY_LEN * MTIP_MAX_TRIM_ENTRIES);
dd->queue->limits.discard_zeroes_data = 0;
} }
/* Set the capacity of the device in 512 byte sectors. */ /* Set the capacity of the device in 512 byte sectors. */

View File

@ -1110,7 +1110,6 @@ static int nbd_dev_add(int index)
queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, disk->queue); queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, disk->queue);
disk->queue->limits.discard_granularity = 512; disk->queue->limits.discard_granularity = 512;
blk_queue_max_discard_sectors(disk->queue, UINT_MAX); blk_queue_max_discard_sectors(disk->queue, UINT_MAX);
disk->queue->limits.discard_zeroes_data = 0;
blk_queue_max_hw_sectors(disk->queue, 65536); blk_queue_max_hw_sectors(disk->queue, 65536);
disk->queue->limits.max_sectors = 256; disk->queue->limits.max_sectors = 256;

View File

@ -2773,7 +2773,6 @@ static int cache_create(struct cache_args *ca, struct cache **result)
ti->num_discard_bios = 1; ti->num_discard_bios = 1;
ti->discards_supported = true; ti->discards_supported = true;
ti->discard_zeroes_data_unsupported = true;
ti->split_discard_bios = false; ti->split_discard_bios = false;
cache->features = ca->features; cache->features = ca->features;

View File

@ -2030,7 +2030,6 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
wake_up_process(cc->write_thread); wake_up_process(cc->write_thread);
ti->num_flush_bios = 1; ti->num_flush_bios = 1;
ti->discard_zeroes_data_unsupported = true;
return 0; return 0;

View File

@ -2813,7 +2813,9 @@ static void configure_discard_support(struct raid_set *rs)
/* Assume discards not supported until after checks below. */ /* Assume discards not supported until after checks below. */
ti->discards_supported = false; ti->discards_supported = false;
/* RAID level 4,5,6 require discard_zeroes_data for data integrity! */ /*
* XXX: RAID level 4,5,6 require zeroing for safety.
*/
raid456 = (rs->md.level == 4 || rs->md.level == 5 || rs->md.level == 6); raid456 = (rs->md.level == 4 || rs->md.level == 5 || rs->md.level == 6);
for (i = 0; i < rs->raid_disks; i++) { for (i = 0; i < rs->raid_disks; i++) {
@ -2827,8 +2829,6 @@ static void configure_discard_support(struct raid_set *rs)
return; return;
if (raid456) { if (raid456) {
if (!q->limits.discard_zeroes_data)
return;
if (!devices_handle_discard_safely) { if (!devices_handle_discard_safely) {
DMERR("raid456 discard support disabled due to discard_zeroes_data uncertainty."); DMERR("raid456 discard support disabled due to discard_zeroes_data uncertainty.");
DMERR("Set dm-raid.devices_handle_discard_safely=Y to override."); DMERR("Set dm-raid.devices_handle_discard_safely=Y to override.");

View File

@ -1124,7 +1124,6 @@ static int mirror_ctr(struct dm_target *ti, unsigned int argc, char **argv)
ti->num_flush_bios = 1; ti->num_flush_bios = 1;
ti->num_discard_bios = 1; ti->num_discard_bios = 1;
ti->per_io_data_size = sizeof(struct dm_raid1_bio_record); ti->per_io_data_size = sizeof(struct dm_raid1_bio_record);
ti->discard_zeroes_data_unsupported = true;
ms->kmirrord_wq = alloc_workqueue("kmirrord", WQ_MEM_RECLAIM, 0); ms->kmirrord_wq = alloc_workqueue("kmirrord", WQ_MEM_RECLAIM, 0);
if (!ms->kmirrord_wq) { if (!ms->kmirrord_wq) {

View File

@ -1449,22 +1449,6 @@ static bool dm_table_supports_flush(struct dm_table *t, unsigned long flush)
return false; return false;
} }
static bool dm_table_discard_zeroes_data(struct dm_table *t)
{
struct dm_target *ti;
unsigned i = 0;
/* Ensure that all targets supports discard_zeroes_data. */
while (i < dm_table_get_num_targets(t)) {
ti = dm_table_get_target(t, i++);
if (ti->discard_zeroes_data_unsupported)
return false;
}
return true;
}
static int device_is_nonrot(struct dm_target *ti, struct dm_dev *dev, static int device_is_nonrot(struct dm_target *ti, struct dm_dev *dev,
sector_t start, sector_t len, void *data) sector_t start, sector_t len, void *data)
{ {
@ -1620,9 +1604,6 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
} }
blk_queue_write_cache(q, wc, fua); blk_queue_write_cache(q, wc, fua);
if (!dm_table_discard_zeroes_data(t))
q->limits.discard_zeroes_data = 0;
/* Ensure that all underlying devices are non-rotational. */ /* Ensure that all underlying devices are non-rotational. */
if (dm_table_all_devices_attribute(t, device_is_nonrot)) if (dm_table_all_devices_attribute(t, device_is_nonrot))
queue_flag_set_unlocked(QUEUE_FLAG_NONROT, q); queue_flag_set_unlocked(QUEUE_FLAG_NONROT, q);

View File

@ -3263,7 +3263,6 @@ static int pool_ctr(struct dm_target *ti, unsigned argc, char **argv)
* them down to the data device. The thin device's discard * them down to the data device. The thin device's discard
* processing will cause mappings to be removed from the btree. * processing will cause mappings to be removed from the btree.
*/ */
ti->discard_zeroes_data_unsupported = true;
if (pf.discard_enabled && pf.discard_passdown) { if (pf.discard_enabled && pf.discard_passdown) {
ti->num_discard_bios = 1; ti->num_discard_bios = 1;
@ -4119,7 +4118,6 @@ static int thin_ctr(struct dm_target *ti, unsigned argc, char **argv)
ti->per_io_data_size = sizeof(struct dm_thin_endio_hook); ti->per_io_data_size = sizeof(struct dm_thin_endio_hook);
/* In case the pool supports discards, pass them on. */ /* In case the pool supports discards, pass them on. */
ti->discard_zeroes_data_unsupported = true;
if (tc->pool->pf.discard_enabled) { if (tc->pool->pf.discard_enabled) {
ti->discards_supported = true; ti->discards_supported = true;
ti->num_discard_bios = 1; ti->num_discard_bios = 1;

View File

@ -7227,7 +7227,6 @@ static int raid5_run(struct mddev *mddev)
if (mddev->queue) { if (mddev->queue) {
int chunk_size; int chunk_size;
bool discard_supported = true;
/* read-ahead size must cover two whole stripes, which /* read-ahead size must cover two whole stripes, which
* is 2 * (datadisks) * chunksize where 'n' is the * is 2 * (datadisks) * chunksize where 'n' is the
* number of raid devices * number of raid devices
@ -7263,12 +7262,6 @@ static int raid5_run(struct mddev *mddev)
blk_queue_max_discard_sectors(mddev->queue, blk_queue_max_discard_sectors(mddev->queue,
0xfffe * STRIPE_SECTORS); 0xfffe * STRIPE_SECTORS);
/*
* unaligned part of discard request will be ignored, so can't
* guarantee discard_zeroes_data
*/
mddev->queue->limits.discard_zeroes_data = 0;
blk_queue_max_write_same_sectors(mddev->queue, 0); blk_queue_max_write_same_sectors(mddev->queue, 0);
blk_queue_max_write_zeroes_sectors(mddev->queue, 0); blk_queue_max_write_zeroes_sectors(mddev->queue, 0);
@ -7277,35 +7270,24 @@ static int raid5_run(struct mddev *mddev)
rdev->data_offset << 9); rdev->data_offset << 9);
disk_stack_limits(mddev->gendisk, rdev->bdev, disk_stack_limits(mddev->gendisk, rdev->bdev,
rdev->new_data_offset << 9); rdev->new_data_offset << 9);
/*
* discard_zeroes_data is required, otherwise data
* could be lost. Consider a scenario: discard a stripe
* (the stripe could be inconsistent if
* discard_zeroes_data is 0); write one disk of the
* stripe (the stripe could be inconsistent again
* depending on which disks are used to calculate
* parity); the disk is broken; The stripe data of this
* disk is lost.
*/
if (!blk_queue_discard(bdev_get_queue(rdev->bdev)) ||
!bdev_get_queue(rdev->bdev)->
limits.discard_zeroes_data)
discard_supported = false;
/* Unfortunately, discard_zeroes_data is not currently
* a guarantee - just a hint. So we only allow DISCARD
* if the sysadmin has confirmed that only safe devices
* are in use by setting a module parameter.
*/
if (!devices_handle_discard_safely) {
if (discard_supported) {
pr_info("md/raid456: discard support disabled due to uncertainty.\n");
pr_info("Set raid456.devices_handle_discard_safely=Y to override.\n");
}
discard_supported = false;
}
} }
if (discard_supported && /*
* zeroing is required, otherwise data
* could be lost. Consider a scenario: discard a stripe
* (the stripe could be inconsistent if
* discard_zeroes_data is 0); write one disk of the
* stripe (the stripe could be inconsistent again
* depending on which disks are used to calculate
* parity); the disk is broken; The stripe data of this
* disk is lost.
*
* We only allow DISCARD if the sysadmin has confirmed that
* only safe devices are in use by setting a module parameter.
* A better idea might be to turn DISCARD into WRITE_ZEROES
* requests, as that is required to be safe.
*/
if (devices_handle_discard_safely &&
mddev->queue->limits.max_discard_sectors >= (stripe >> 9) && mddev->queue->limits.max_discard_sectors >= (stripe >> 9) &&
mddev->queue->limits.discard_granularity >= stripe) mddev->queue->limits.discard_granularity >= stripe)
queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, queue_flag_set_unlocked(QUEUE_FLAG_DISCARD,

View File

@ -644,8 +644,6 @@ static void sd_config_discard(struct scsi_disk *sdkp, unsigned int mode)
unsigned int logical_block_size = sdkp->device->sector_size; unsigned int logical_block_size = sdkp->device->sector_size;
unsigned int max_blocks = 0; unsigned int max_blocks = 0;
q->limits.discard_zeroes_data = 0;
/* /*
* When LBPRZ is reported, discard alignment and granularity * When LBPRZ is reported, discard alignment and granularity
* must be fixed to the logical block size. Otherwise the block * must be fixed to the logical block size. Otherwise the block
@ -681,19 +679,16 @@ static void sd_config_discard(struct scsi_disk *sdkp, unsigned int mode)
case SD_LBP_WS16: case SD_LBP_WS16:
max_blocks = min_not_zero(sdkp->max_ws_blocks, max_blocks = min_not_zero(sdkp->max_ws_blocks,
(u32)SD_MAX_WS16_BLOCKS); (u32)SD_MAX_WS16_BLOCKS);
q->limits.discard_zeroes_data = sdkp->lbprz;
break; break;
case SD_LBP_WS10: case SD_LBP_WS10:
max_blocks = min_not_zero(sdkp->max_ws_blocks, max_blocks = min_not_zero(sdkp->max_ws_blocks,
(u32)SD_MAX_WS10_BLOCKS); (u32)SD_MAX_WS10_BLOCKS);
q->limits.discard_zeroes_data = sdkp->lbprz;
break; break;
case SD_LBP_ZERO: case SD_LBP_ZERO:
max_blocks = min_not_zero(sdkp->max_ws_blocks, max_blocks = min_not_zero(sdkp->max_ws_blocks,
(u32)SD_MAX_WS10_BLOCKS); (u32)SD_MAX_WS10_BLOCKS);
q->limits.discard_zeroes_data = 1;
break; break;
} }

View File

@ -851,7 +851,7 @@ bool target_configure_unmap_from_queue(struct se_dev_attrib *attrib,
attrib->unmap_granularity = q->limits.discard_granularity / block_size; attrib->unmap_granularity = q->limits.discard_granularity / block_size;
attrib->unmap_granularity_alignment = q->limits.discard_alignment / attrib->unmap_granularity_alignment = q->limits.discard_alignment /
block_size; block_size;
attrib->unmap_zeroes_data = q->limits.discard_zeroes_data; attrib->unmap_zeroes_data = 0;
return true; return true;
} }
EXPORT_SYMBOL(target_configure_unmap_from_queue); EXPORT_SYMBOL(target_configure_unmap_from_queue);

View File

@ -339,7 +339,6 @@ struct queue_limits {
unsigned char misaligned; unsigned char misaligned;
unsigned char discard_misaligned; unsigned char discard_misaligned;
unsigned char cluster; unsigned char cluster;
unsigned char discard_zeroes_data;
unsigned char raid_partial_stripes_expensive; unsigned char raid_partial_stripes_expensive;
enum blk_zoned_model zoned; enum blk_zoned_model zoned;
}; };
@ -1341,7 +1340,6 @@ extern int blkdev_issue_write_same(struct block_device *bdev, sector_t sector,
sector_t nr_sects, gfp_t gfp_mask, struct page *page); sector_t nr_sects, gfp_t gfp_mask, struct page *page);
#define BLKDEV_DISCARD_SECURE (1 << 0) /* issue a secure erase */ #define BLKDEV_DISCARD_SECURE (1 << 0) /* issue a secure erase */
#define BLKDEV_DISCARD_ZERO (1 << 1) /* must reliably zero data */
extern int blkdev_issue_discard(struct block_device *bdev, sector_t sector, extern int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
sector_t nr_sects, gfp_t gfp_mask, unsigned long flags); sector_t nr_sects, gfp_t gfp_mask, unsigned long flags);
@ -1541,19 +1539,6 @@ static inline int bdev_discard_alignment(struct block_device *bdev)
return q->limits.discard_alignment; return q->limits.discard_alignment;
} }
static inline unsigned int queue_discard_zeroes_data(struct request_queue *q)
{
if (q->limits.max_discard_sectors && q->limits.discard_zeroes_data == 1)
return 1;
return 0;
}
static inline unsigned int bdev_discard_zeroes_data(struct block_device *bdev)
{
return queue_discard_zeroes_data(bdev_get_queue(bdev));
}
static inline unsigned int bdev_write_same(struct block_device *bdev) static inline unsigned int bdev_write_same(struct block_device *bdev)
{ {
struct request_queue *q = bdev_get_queue(bdev); struct request_queue *q = bdev_get_queue(bdev);

View File

@ -296,11 +296,6 @@ struct dm_target {
* on max_io_len boundary. * on max_io_len boundary.
*/ */
bool split_discard_bios:1; bool split_discard_bios:1;
/*
* Set if this target does not return zeroes on discarded blocks.
*/
bool discard_zeroes_data_unsupported:1;
}; };
/* Each target can link one of these into the table */ /* Each target can link one of these into the table */