1
0
Fork 0

md/raid10: avoid reading from known bad blocks - part 2

When redirecting a read error to a different device, we must
again avoid bad blocks and possibly split the request.

Spin_lock typo fixed thanks to Dan Carpenter <error27@gmail.com>

Signed-off-by: NeilBrown <neilb@suse.de>
hifive-unleashed-5.1
NeilBrown 2011-07-28 11:39:23 +10:00
parent 856e08e237
commit 7399c31bc9
1 changed files with 40 additions and 5 deletions

View File

@ -1746,14 +1746,15 @@ static void handle_read_error(mddev_t *mddev, r10bio_t *r10_bio)
rdev_dec_pending(conf->mirrors[mirror].rdev, mddev);
bio = r10_bio->devs[slot].bio;
bdevname(bio->bi_bdev, b);
r10_bio->devs[slot].bio =
mddev->ro ? IO_BLOCKED : NULL;
read_more:
mirror = read_balance(conf, r10_bio, &max_sectors);
if (mirror == -1 || max_sectors < r10_bio->sectors) {
if (mirror == -1) {
printk(KERN_ALERT "md/raid10:%s: %s: unrecoverable I/O"
" read error for block %llu\n",
mdname(mddev),
bdevname(bio->bi_bdev, b),
mdname(mddev), b,
(unsigned long long)r10_bio->sector);
raid_end_bio_io(r10_bio);
bio_put(bio);
@ -1761,7 +1762,8 @@ static void handle_read_error(mddev_t *mddev, r10bio_t *r10_bio)
}
do_sync = (r10_bio->master_bio->bi_rw & REQ_SYNC);
bio_put(bio);
if (bio)
bio_put(bio);
slot = r10_bio->read_slot;
rdev = conf->mirrors[mirror].rdev;
printk_ratelimited(
@ -1773,6 +1775,9 @@ static void handle_read_error(mddev_t *mddev, r10bio_t *r10_bio)
(unsigned long long)r10_bio->sector);
bio = bio_clone_mddev(r10_bio->master_bio,
GFP_NOIO, mddev);
md_trim_bio(bio,
r10_bio->sector - bio->bi_sector,
max_sectors);
r10_bio->devs[slot].bio = bio;
bio->bi_sector = r10_bio->devs[slot].addr
+ rdev->data_offset;
@ -1780,7 +1785,37 @@ static void handle_read_error(mddev_t *mddev, r10bio_t *r10_bio)
bio->bi_rw = READ | do_sync;
bio->bi_private = r10_bio;
bio->bi_end_io = raid10_end_read_request;
generic_make_request(bio);
if (max_sectors < r10_bio->sectors) {
/* Drat - have to split this up more */
struct bio *mbio = r10_bio->master_bio;
int sectors_handled =
r10_bio->sector + max_sectors
- mbio->bi_sector;
r10_bio->sectors = max_sectors;
spin_lock_irq(&conf->device_lock);
if (mbio->bi_phys_segments == 0)
mbio->bi_phys_segments = 2;
else
mbio->bi_phys_segments++;
spin_unlock_irq(&conf->device_lock);
generic_make_request(bio);
bio = NULL;
r10_bio = mempool_alloc(conf->r10bio_pool,
GFP_NOIO);
r10_bio->master_bio = mbio;
r10_bio->sectors = (mbio->bi_size >> 9)
- sectors_handled;
r10_bio->state = 0;
set_bit(R10BIO_ReadError,
&r10_bio->state);
r10_bio->mddev = mddev;
r10_bio->sector = mbio->bi_sector
+ sectors_handled;
goto read_more;
} else
generic_make_request(bio);
}
static void raid10d(mddev_t *mddev)