md: avoid races when stopping resync.
There has been a race in raid10 and raid1 for a long time which has only recently started showing up due to a scheduler changed. When a sync_read request finishes, as soon as reschedule_retry is called, another thread can mark the resync request as having completed, so md_do_sync can finish, ->stop can be called, and ->conf can be freed. So using conf after reschedule_retry is not safe. Similarly, when finishing a sync_write, calling md_done_sync must be the last thing we do, as it allows a chain of events which will free conf and other data structures. The first of these requires action in raid10.c The second requires action in raid1.c and raid10.c Cc: stable@kernel.org Signed-off-by: NeilBrown <neilb@suse.de>hifive-unleashed-5.1
parent
78200d45cd
commit
73d5c38a95
|
@ -1237,8 +1237,9 @@ static void end_sync_write(struct bio *bio, int error)
|
||||||
update_head_pos(mirror, r1_bio);
|
update_head_pos(mirror, r1_bio);
|
||||||
|
|
||||||
if (atomic_dec_and_test(&r1_bio->remaining)) {
|
if (atomic_dec_and_test(&r1_bio->remaining)) {
|
||||||
md_done_sync(mddev, r1_bio->sectors, uptodate);
|
sector_t s = r1_bio->sectors;
|
||||||
put_buf(r1_bio);
|
put_buf(r1_bio);
|
||||||
|
md_done_sync(mddev, s, uptodate);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -1236,6 +1236,7 @@ static void end_sync_read(struct bio *bio, int error)
|
||||||
/* for reconstruct, we always reschedule after a read.
|
/* for reconstruct, we always reschedule after a read.
|
||||||
* for resync, only after all reads
|
* for resync, only after all reads
|
||||||
*/
|
*/
|
||||||
|
rdev_dec_pending(conf->mirrors[d].rdev, conf->mddev);
|
||||||
if (test_bit(R10BIO_IsRecover, &r10_bio->state) ||
|
if (test_bit(R10BIO_IsRecover, &r10_bio->state) ||
|
||||||
atomic_dec_and_test(&r10_bio->remaining)) {
|
atomic_dec_and_test(&r10_bio->remaining)) {
|
||||||
/* we have read all the blocks,
|
/* we have read all the blocks,
|
||||||
|
@ -1243,7 +1244,6 @@ static void end_sync_read(struct bio *bio, int error)
|
||||||
*/
|
*/
|
||||||
reschedule_retry(r10_bio);
|
reschedule_retry(r10_bio);
|
||||||
}
|
}
|
||||||
rdev_dec_pending(conf->mirrors[d].rdev, conf->mddev);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void end_sync_write(struct bio *bio, int error)
|
static void end_sync_write(struct bio *bio, int error)
|
||||||
|
@ -1264,11 +1264,13 @@ static void end_sync_write(struct bio *bio, int error)
|
||||||
|
|
||||||
update_head_pos(i, r10_bio);
|
update_head_pos(i, r10_bio);
|
||||||
|
|
||||||
|
rdev_dec_pending(conf->mirrors[d].rdev, mddev);
|
||||||
while (atomic_dec_and_test(&r10_bio->remaining)) {
|
while (atomic_dec_and_test(&r10_bio->remaining)) {
|
||||||
if (r10_bio->master_bio == NULL) {
|
if (r10_bio->master_bio == NULL) {
|
||||||
/* the primary of several recovery bios */
|
/* the primary of several recovery bios */
|
||||||
md_done_sync(mddev, r10_bio->sectors, 1);
|
sector_t s = r10_bio->sectors;
|
||||||
put_buf(r10_bio);
|
put_buf(r10_bio);
|
||||||
|
md_done_sync(mddev, s, 1);
|
||||||
break;
|
break;
|
||||||
} else {
|
} else {
|
||||||
r10bio_t *r10_bio2 = (r10bio_t *)r10_bio->master_bio;
|
r10bio_t *r10_bio2 = (r10bio_t *)r10_bio->master_bio;
|
||||||
|
@ -1276,7 +1278,6 @@ static void end_sync_write(struct bio *bio, int error)
|
||||||
r10_bio = r10_bio2;
|
r10_bio = r10_bio2;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
rdev_dec_pending(conf->mirrors[d].rdev, mddev);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
|
Loading…
Reference in New Issue