1
0
Fork 0

dm clone: replace spin_lock_irqsave with spin_lock_irq

[ Upstream commit 6ca43ed837 ]

If we are in a place where it is known that interrupts are enabled,
functions spin_lock_irq/spin_unlock_irq should be used instead of
spin_lock_irqsave/spin_unlock_irqrestore.

spin_lock_irq and spin_unlock_irq are faster because they don't need to
push and pop the flags register.

Signed-off-by: Mikulas Patocka <mpatocka@redhat.com>
Signed-off-by: Nikos Tsironis <ntsironis@arrikto.com>
Signed-off-by: Mike Snitzer <snitzer@redhat.com>
Signed-off-by: Sasha Levin <sashal@kernel.org>
5.4-rM2-2.2.x-imx-squashed
Mikulas Patocka 2019-10-04 10:17:37 -04:00 committed by Greg Kroah-Hartman
parent fddfa591da
commit dcf2f00b08
3 changed files with 27 additions and 34 deletions

View File

@ -748,7 +748,7 @@ static int __metadata_commit(struct dm_clone_metadata *cmd)
static int __flush_dmap(struct dm_clone_metadata *cmd, struct dirty_map *dmap) static int __flush_dmap(struct dm_clone_metadata *cmd, struct dirty_map *dmap)
{ {
int r; int r;
unsigned long word, flags; unsigned long word;
word = 0; word = 0;
do { do {
@ -772,9 +772,9 @@ static int __flush_dmap(struct dm_clone_metadata *cmd, struct dirty_map *dmap)
return r; return r;
/* Update the changed flag */ /* Update the changed flag */
spin_lock_irqsave(&cmd->bitmap_lock, flags); spin_lock_irq(&cmd->bitmap_lock);
dmap->changed = 0; dmap->changed = 0;
spin_unlock_irqrestore(&cmd->bitmap_lock, flags); spin_unlock_irq(&cmd->bitmap_lock);
return 0; return 0;
} }
@ -782,7 +782,6 @@ static int __flush_dmap(struct dm_clone_metadata *cmd, struct dirty_map *dmap)
int dm_clone_metadata_pre_commit(struct dm_clone_metadata *cmd) int dm_clone_metadata_pre_commit(struct dm_clone_metadata *cmd)
{ {
int r = 0; int r = 0;
unsigned long flags;
struct dirty_map *dmap, *next_dmap; struct dirty_map *dmap, *next_dmap;
down_write(&cmd->lock); down_write(&cmd->lock);
@ -808,9 +807,9 @@ int dm_clone_metadata_pre_commit(struct dm_clone_metadata *cmd)
} }
/* Swap dirty bitmaps */ /* Swap dirty bitmaps */
spin_lock_irqsave(&cmd->bitmap_lock, flags); spin_lock_irq(&cmd->bitmap_lock);
cmd->current_dmap = next_dmap; cmd->current_dmap = next_dmap;
spin_unlock_irqrestore(&cmd->bitmap_lock, flags); spin_unlock_irq(&cmd->bitmap_lock);
/* Set old dirty bitmap as currently committing */ /* Set old dirty bitmap as currently committing */
cmd->committing_dmap = dmap; cmd->committing_dmap = dmap;
@ -878,9 +877,9 @@ int dm_clone_cond_set_range(struct dm_clone_metadata *cmd, unsigned long start,
{ {
int r = 0; int r = 0;
struct dirty_map *dmap; struct dirty_map *dmap;
unsigned long word, region_nr, flags; unsigned long word, region_nr;
spin_lock_irqsave(&cmd->bitmap_lock, flags); spin_lock_irq(&cmd->bitmap_lock);
if (cmd->read_only) { if (cmd->read_only) {
r = -EPERM; r = -EPERM;
@ -898,7 +897,7 @@ int dm_clone_cond_set_range(struct dm_clone_metadata *cmd, unsigned long start,
} }
} }
out: out:
spin_unlock_irqrestore(&cmd->bitmap_lock, flags); spin_unlock_irq(&cmd->bitmap_lock);
return r; return r;
} }
@ -965,13 +964,11 @@ out:
void dm_clone_metadata_set_read_only(struct dm_clone_metadata *cmd) void dm_clone_metadata_set_read_only(struct dm_clone_metadata *cmd)
{ {
unsigned long flags;
down_write(&cmd->lock); down_write(&cmd->lock);
spin_lock_irqsave(&cmd->bitmap_lock, flags); spin_lock_irq(&cmd->bitmap_lock);
cmd->read_only = 1; cmd->read_only = 1;
spin_unlock_irqrestore(&cmd->bitmap_lock, flags); spin_unlock_irq(&cmd->bitmap_lock);
if (!cmd->fail_io) if (!cmd->fail_io)
dm_bm_set_read_only(cmd->bm); dm_bm_set_read_only(cmd->bm);
@ -981,13 +978,11 @@ void dm_clone_metadata_set_read_only(struct dm_clone_metadata *cmd)
void dm_clone_metadata_set_read_write(struct dm_clone_metadata *cmd) void dm_clone_metadata_set_read_write(struct dm_clone_metadata *cmd)
{ {
unsigned long flags;
down_write(&cmd->lock); down_write(&cmd->lock);
spin_lock_irqsave(&cmd->bitmap_lock, flags); spin_lock_irq(&cmd->bitmap_lock);
cmd->read_only = 0; cmd->read_only = 0;
spin_unlock_irqrestore(&cmd->bitmap_lock, flags); spin_unlock_irq(&cmd->bitmap_lock);
if (!cmd->fail_io) if (!cmd->fail_io)
dm_bm_set_read_write(cmd->bm); dm_bm_set_read_write(cmd->bm);

View File

@ -44,7 +44,9 @@ int dm_clone_set_region_hydrated(struct dm_clone_metadata *cmd, unsigned long re
* @start: Starting region number * @start: Starting region number
* @nr_regions: Number of regions in the range * @nr_regions: Number of regions in the range
* *
* This function doesn't block, so it's safe to call it from interrupt context. * This function doesn't block, but since it uses spin_lock_irq()/spin_unlock_irq()
* it's NOT safe to call it from any context where interrupts are disabled, e.g.,
* from interrupt context.
*/ */
int dm_clone_cond_set_range(struct dm_clone_metadata *cmd, unsigned long start, int dm_clone_cond_set_range(struct dm_clone_metadata *cmd, unsigned long start,
unsigned long nr_regions); unsigned long nr_regions);

View File

@ -338,8 +338,6 @@ static void submit_bios(struct bio_list *bios)
*/ */
static void issue_bio(struct clone *clone, struct bio *bio) static void issue_bio(struct clone *clone, struct bio *bio)
{ {
unsigned long flags;
if (!bio_triggers_commit(clone, bio)) { if (!bio_triggers_commit(clone, bio)) {
generic_make_request(bio); generic_make_request(bio);
return; return;
@ -358,9 +356,9 @@ static void issue_bio(struct clone *clone, struct bio *bio)
* Batch together any bios that trigger commits and then issue a single * Batch together any bios that trigger commits and then issue a single
* commit for them in process_deferred_flush_bios(). * commit for them in process_deferred_flush_bios().
*/ */
spin_lock_irqsave(&clone->lock, flags); spin_lock_irq(&clone->lock);
bio_list_add(&clone->deferred_flush_bios, bio); bio_list_add(&clone->deferred_flush_bios, bio);
spin_unlock_irqrestore(&clone->lock, flags); spin_unlock_irq(&clone->lock);
wake_worker(clone); wake_worker(clone);
} }
@ -475,7 +473,7 @@ static void complete_discard_bio(struct clone *clone, struct bio *bio, bool succ
static void process_discard_bio(struct clone *clone, struct bio *bio) static void process_discard_bio(struct clone *clone, struct bio *bio)
{ {
unsigned long rs, re, flags; unsigned long rs, re;
bio_region_range(clone, bio, &rs, &re); bio_region_range(clone, bio, &rs, &re);
BUG_ON(re > clone->nr_regions); BUG_ON(re > clone->nr_regions);
@ -507,9 +505,9 @@ static void process_discard_bio(struct clone *clone, struct bio *bio)
/* /*
* Defer discard processing. * Defer discard processing.
*/ */
spin_lock_irqsave(&clone->lock, flags); spin_lock_irq(&clone->lock);
bio_list_add(&clone->deferred_discard_bios, bio); bio_list_add(&clone->deferred_discard_bios, bio);
spin_unlock_irqrestore(&clone->lock, flags); spin_unlock_irq(&clone->lock);
wake_worker(clone); wake_worker(clone);
} }
@ -1167,13 +1165,13 @@ static void process_deferred_discards(struct clone *clone)
int r = -EPERM; int r = -EPERM;
struct bio *bio; struct bio *bio;
struct blk_plug plug; struct blk_plug plug;
unsigned long rs, re, flags; unsigned long rs, re;
struct bio_list discards = BIO_EMPTY_LIST; struct bio_list discards = BIO_EMPTY_LIST;
spin_lock_irqsave(&clone->lock, flags); spin_lock_irq(&clone->lock);
bio_list_merge(&discards, &clone->deferred_discard_bios); bio_list_merge(&discards, &clone->deferred_discard_bios);
bio_list_init(&clone->deferred_discard_bios); bio_list_init(&clone->deferred_discard_bios);
spin_unlock_irqrestore(&clone->lock, flags); spin_unlock_irq(&clone->lock);
if (bio_list_empty(&discards)) if (bio_list_empty(&discards))
return; return;
@ -1203,13 +1201,12 @@ out:
static void process_deferred_bios(struct clone *clone) static void process_deferred_bios(struct clone *clone)
{ {
unsigned long flags;
struct bio_list bios = BIO_EMPTY_LIST; struct bio_list bios = BIO_EMPTY_LIST;
spin_lock_irqsave(&clone->lock, flags); spin_lock_irq(&clone->lock);
bio_list_merge(&bios, &clone->deferred_bios); bio_list_merge(&bios, &clone->deferred_bios);
bio_list_init(&clone->deferred_bios); bio_list_init(&clone->deferred_bios);
spin_unlock_irqrestore(&clone->lock, flags); spin_unlock_irq(&clone->lock);
if (bio_list_empty(&bios)) if (bio_list_empty(&bios))
return; return;
@ -1220,7 +1217,6 @@ static void process_deferred_bios(struct clone *clone)
static void process_deferred_flush_bios(struct clone *clone) static void process_deferred_flush_bios(struct clone *clone)
{ {
struct bio *bio; struct bio *bio;
unsigned long flags;
bool dest_dev_flushed; bool dest_dev_flushed;
struct bio_list bios = BIO_EMPTY_LIST; struct bio_list bios = BIO_EMPTY_LIST;
struct bio_list bio_completions = BIO_EMPTY_LIST; struct bio_list bio_completions = BIO_EMPTY_LIST;
@ -1229,13 +1225,13 @@ static void process_deferred_flush_bios(struct clone *clone)
* If there are any deferred flush bios, we must commit the metadata * If there are any deferred flush bios, we must commit the metadata
* before issuing them or signaling their completion. * before issuing them or signaling their completion.
*/ */
spin_lock_irqsave(&clone->lock, flags); spin_lock_irq(&clone->lock);
bio_list_merge(&bios, &clone->deferred_flush_bios); bio_list_merge(&bios, &clone->deferred_flush_bios);
bio_list_init(&clone->deferred_flush_bios); bio_list_init(&clone->deferred_flush_bios);
bio_list_merge(&bio_completions, &clone->deferred_flush_completions); bio_list_merge(&bio_completions, &clone->deferred_flush_completions);
bio_list_init(&clone->deferred_flush_completions); bio_list_init(&clone->deferred_flush_completions);
spin_unlock_irqrestore(&clone->lock, flags); spin_unlock_irq(&clone->lock);
if (bio_list_empty(&bios) && bio_list_empty(&bio_completions) && if (bio_list_empty(&bios) && bio_list_empty(&bio_completions) &&
!(dm_clone_changed_this_transaction(clone->cmd) && need_commit_due_to_time(clone))) !(dm_clone_changed_this_transaction(clone->cmd) && need_commit_due_to_time(clone)))