md/raid1, raid10: move rXbio accounting closer to allocation.
When raid1 or raid10 find they will need to allocate a new r1bio/r10bio, in order to work around a known bad block, they account for the allocation well before the allocation is made. This separation makes the correctness less obvious and requires comments. The accounting needs to be a little before: before the first rXbio is submitted, but that is all. So move the accounting down to where it makes more sense. Signed-off-by: NeilBrown <neilb@suse.com> Signed-off-by: Shaohua Li <shli@fb.com>
This commit is contained in:
parent
97d5343808
commit
6b6c8110e1
@ -1436,18 +1436,9 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio)
|
||||
goto retry_write;
|
||||
}
|
||||
|
||||
if (max_sectors < r1_bio->sectors) {
|
||||
/* We are splitting this write into multiple parts, so
|
||||
* we need to prepare for allocating another r1_bio.
|
||||
*/
|
||||
if (max_sectors < r1_bio->sectors)
|
||||
r1_bio->sectors = max_sectors;
|
||||
spin_lock_irq(&conf->device_lock);
|
||||
if (bio->bi_phys_segments == 0)
|
||||
bio->bi_phys_segments = 2;
|
||||
else
|
||||
bio->bi_phys_segments++;
|
||||
spin_unlock_irq(&conf->device_lock);
|
||||
}
|
||||
|
||||
sectors_handled = r1_bio->sector + max_sectors - bio->bi_iter.bi_sector;
|
||||
|
||||
atomic_set(&r1_bio->remaining, 1);
|
||||
@ -1553,10 +1544,17 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio)
|
||||
* as it could result in the bio being freed.
|
||||
*/
|
||||
if (sectors_handled < bio_sectors(bio)) {
|
||||
r1_bio_write_done(r1_bio);
|
||||
/* We need another r1_bio. It has already been counted
|
||||
/* We need another r1_bio, which must be accounted
|
||||
* in bio->bi_phys_segments
|
||||
*/
|
||||
spin_lock_irq(&conf->device_lock);
|
||||
if (bio->bi_phys_segments == 0)
|
||||
bio->bi_phys_segments = 2;
|
||||
else
|
||||
bio->bi_phys_segments++;
|
||||
spin_unlock_irq(&conf->device_lock);
|
||||
|
||||
r1_bio_write_done(r1_bio);
|
||||
r1_bio = alloc_r1bio(mddev, bio, sectors_handled);
|
||||
goto retry_write;
|
||||
}
|
||||
|
@ -1384,18 +1384,8 @@ retry_write:
|
||||
goto retry_write;
|
||||
}
|
||||
|
||||
if (max_sectors < r10_bio->sectors) {
|
||||
/* We are splitting this into multiple parts, so
|
||||
* we need to prepare for allocating another r10_bio.
|
||||
*/
|
||||
if (max_sectors < r10_bio->sectors)
|
||||
r10_bio->sectors = max_sectors;
|
||||
spin_lock_irq(&conf->device_lock);
|
||||
if (bio->bi_phys_segments == 0)
|
||||
bio->bi_phys_segments = 2;
|
||||
else
|
||||
bio->bi_phys_segments++;
|
||||
spin_unlock_irq(&conf->device_lock);
|
||||
}
|
||||
sectors_handled = r10_bio->sector + max_sectors -
|
||||
bio->bi_iter.bi_sector;
|
||||
|
||||
@ -1505,10 +1495,16 @@ retry_write:
|
||||
*/
|
||||
|
||||
if (sectors_handled < bio_sectors(bio)) {
|
||||
one_write_done(r10_bio);
|
||||
/* We need another r10_bio. It has already been counted
|
||||
/* We need another r10_bio and it needs to be counted
|
||||
* in bio->bi_phys_segments.
|
||||
*/
|
||||
spin_lock_irq(&conf->device_lock);
|
||||
if (bio->bi_phys_segments == 0)
|
||||
bio->bi_phys_segments = 2;
|
||||
else
|
||||
bio->bi_phys_segments++;
|
||||
spin_unlock_irq(&conf->device_lock);
|
||||
one_write_done(r10_bio);
|
||||
r10_bio = mempool_alloc(conf->r10bio_pool, GFP_NOIO);
|
||||
|
||||
r10_bio->master_bio = bio;
|
||||
|
Loading…
Reference in New Issue
Block a user