dm integrity: fix integrity recalculation that is improperly skipped
commit5.4-rM2-2.2.x-imx-squashed5df96f2b9f
upstream. Commitadc0daad36
("dm: report suspended device during destroy") broke integrity recalculation. The problem is dm_suspended() returns true not only during suspend, but also during resume. So this race condition could occur: 1. dm_integrity_resume calls queue_work(ic->recalc_wq, &ic->recalc_work) 2. integrity_recalc (&ic->recalc_work) preempts the current thread 3. integrity_recalc calls if (unlikely(dm_suspended(ic->ti))) goto unlock_ret; 4. integrity_recalc exits and no recalculating is done. To fix this race condition, add a function dm_post_suspending that is only true during the postsuspend phase and use it instead of dm_suspended(). Signed-off-by: Mikulas Patocka <mpatocka redhat com> Fixes:adc0daad36
("dm: report suspended device during destroy") Cc: stable vger kernel org # v4.18+ Signed-off-by: Mike Snitzer <snitzer@redhat.com> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
parent
2ca71b8073
commit
6d4448ca54
|
@ -2298,7 +2298,7 @@ static void integrity_writer(struct work_struct *w)
|
||||||
unsigned prev_free_sectors;
|
unsigned prev_free_sectors;
|
||||||
|
|
||||||
/* the following test is not needed, but it tests the replay code */
|
/* the following test is not needed, but it tests the replay code */
|
||||||
if (unlikely(dm_suspended(ic->ti)) && !ic->meta_dev)
|
if (unlikely(dm_post_suspending(ic->ti)) && !ic->meta_dev)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
spin_lock_irq(&ic->endio_wait.lock);
|
spin_lock_irq(&ic->endio_wait.lock);
|
||||||
|
@ -2359,7 +2359,7 @@ static void integrity_recalc(struct work_struct *w)
|
||||||
|
|
||||||
next_chunk:
|
next_chunk:
|
||||||
|
|
||||||
if (unlikely(dm_suspended(ic->ti)))
|
if (unlikely(dm_post_suspending(ic->ti)))
|
||||||
goto unlock_ret;
|
goto unlock_ret;
|
||||||
|
|
||||||
range.logical_sector = le64_to_cpu(ic->sb->recalc_sector);
|
range.logical_sector = le64_to_cpu(ic->sb->recalc_sector);
|
||||||
|
|
|
@ -141,6 +141,7 @@ EXPORT_SYMBOL_GPL(dm_bio_get_target_bio_nr);
|
||||||
#define DMF_NOFLUSH_SUSPENDING 5
|
#define DMF_NOFLUSH_SUSPENDING 5
|
||||||
#define DMF_DEFERRED_REMOVE 6
|
#define DMF_DEFERRED_REMOVE 6
|
||||||
#define DMF_SUSPENDED_INTERNALLY 7
|
#define DMF_SUSPENDED_INTERNALLY 7
|
||||||
|
#define DMF_POST_SUSPENDING 8
|
||||||
|
|
||||||
#define DM_NUMA_NODE NUMA_NO_NODE
|
#define DM_NUMA_NODE NUMA_NO_NODE
|
||||||
static int dm_numa_node = DM_NUMA_NODE;
|
static int dm_numa_node = DM_NUMA_NODE;
|
||||||
|
@ -2390,6 +2391,7 @@ static void __dm_destroy(struct mapped_device *md, bool wait)
|
||||||
if (!dm_suspended_md(md)) {
|
if (!dm_suspended_md(md)) {
|
||||||
dm_table_presuspend_targets(map);
|
dm_table_presuspend_targets(map);
|
||||||
set_bit(DMF_SUSPENDED, &md->flags);
|
set_bit(DMF_SUSPENDED, &md->flags);
|
||||||
|
set_bit(DMF_POST_SUSPENDING, &md->flags);
|
||||||
dm_table_postsuspend_targets(map);
|
dm_table_postsuspend_targets(map);
|
||||||
}
|
}
|
||||||
/* dm_put_live_table must be before msleep, otherwise deadlock is possible */
|
/* dm_put_live_table must be before msleep, otherwise deadlock is possible */
|
||||||
|
@ -2712,7 +2714,9 @@ retry:
|
||||||
if (r)
|
if (r)
|
||||||
goto out_unlock;
|
goto out_unlock;
|
||||||
|
|
||||||
|
set_bit(DMF_POST_SUSPENDING, &md->flags);
|
||||||
dm_table_postsuspend_targets(map);
|
dm_table_postsuspend_targets(map);
|
||||||
|
clear_bit(DMF_POST_SUSPENDING, &md->flags);
|
||||||
|
|
||||||
out_unlock:
|
out_unlock:
|
||||||
mutex_unlock(&md->suspend_lock);
|
mutex_unlock(&md->suspend_lock);
|
||||||
|
@ -2809,7 +2813,9 @@ static void __dm_internal_suspend(struct mapped_device *md, unsigned suspend_fla
|
||||||
(void) __dm_suspend(md, map, suspend_flags, TASK_UNINTERRUPTIBLE,
|
(void) __dm_suspend(md, map, suspend_flags, TASK_UNINTERRUPTIBLE,
|
||||||
DMF_SUSPENDED_INTERNALLY);
|
DMF_SUSPENDED_INTERNALLY);
|
||||||
|
|
||||||
|
set_bit(DMF_POST_SUSPENDING, &md->flags);
|
||||||
dm_table_postsuspend_targets(map);
|
dm_table_postsuspend_targets(map);
|
||||||
|
clear_bit(DMF_POST_SUSPENDING, &md->flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void __dm_internal_resume(struct mapped_device *md)
|
static void __dm_internal_resume(struct mapped_device *md)
|
||||||
|
@ -2970,6 +2976,11 @@ int dm_suspended_md(struct mapped_device *md)
|
||||||
return test_bit(DMF_SUSPENDED, &md->flags);
|
return test_bit(DMF_SUSPENDED, &md->flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int dm_post_suspending_md(struct mapped_device *md)
|
||||||
|
{
|
||||||
|
return test_bit(DMF_POST_SUSPENDING, &md->flags);
|
||||||
|
}
|
||||||
|
|
||||||
int dm_suspended_internally_md(struct mapped_device *md)
|
int dm_suspended_internally_md(struct mapped_device *md)
|
||||||
{
|
{
|
||||||
return test_bit(DMF_SUSPENDED_INTERNALLY, &md->flags);
|
return test_bit(DMF_SUSPENDED_INTERNALLY, &md->flags);
|
||||||
|
@ -2986,6 +2997,12 @@ int dm_suspended(struct dm_target *ti)
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(dm_suspended);
|
EXPORT_SYMBOL_GPL(dm_suspended);
|
||||||
|
|
||||||
|
int dm_post_suspending(struct dm_target *ti)
|
||||||
|
{
|
||||||
|
return dm_post_suspending_md(dm_table_get_md(ti->table));
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL_GPL(dm_post_suspending);
|
||||||
|
|
||||||
int dm_noflush_suspending(struct dm_target *ti)
|
int dm_noflush_suspending(struct dm_target *ti)
|
||||||
{
|
{
|
||||||
return __noflush_suspending(dm_table_get_md(ti->table));
|
return __noflush_suspending(dm_table_get_md(ti->table));
|
||||||
|
|
|
@ -420,6 +420,7 @@ const char *dm_device_name(struct mapped_device *md);
|
||||||
int dm_copy_name_and_uuid(struct mapped_device *md, char *name, char *uuid);
|
int dm_copy_name_and_uuid(struct mapped_device *md, char *name, char *uuid);
|
||||||
struct gendisk *dm_disk(struct mapped_device *md);
|
struct gendisk *dm_disk(struct mapped_device *md);
|
||||||
int dm_suspended(struct dm_target *ti);
|
int dm_suspended(struct dm_target *ti);
|
||||||
|
int dm_post_suspending(struct dm_target *ti);
|
||||||
int dm_noflush_suspending(struct dm_target *ti);
|
int dm_noflush_suspending(struct dm_target *ti);
|
||||||
void dm_accept_partial_bio(struct bio *bio, unsigned n_sectors);
|
void dm_accept_partial_bio(struct bio *bio, unsigned n_sectors);
|
||||||
void dm_remap_zone_report(struct dm_target *ti, sector_t start,
|
void dm_remap_zone_report(struct dm_target *ti, sector_t start,
|
||||||
|
|
Loading…
Reference in New Issue