md: give userspace control over removing failed devices when external metdata in use

When a device fails, we must not allow an further writes to the array until
the device failure has been recorded in array metadata.  When metadata is
managed externally, this requires some synchronisation...

Allow/require userspace to explicitly remove failed devices from active
service in the array by writing 'none' to the 'slot' attribute.  If this
reduces the number of failed devices to 0, the write block will automatically
be lowered.

Signed-off-by: Neil Brown <neilb@suse.de>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
NeilBrown 2008-02-06 01:39:51 -08:00 committed by Linus Torvalds
parent e691063a61
commit c303da6d71

View File

@ -1891,20 +1891,44 @@ static ssize_t
slot_store(mdk_rdev_t *rdev, const char *buf, size_t len) slot_store(mdk_rdev_t *rdev, const char *buf, size_t len)
{ {
char *e; char *e;
int err;
char nm[20];
int slot = simple_strtoul(buf, &e, 10); int slot = simple_strtoul(buf, &e, 10);
if (strncmp(buf, "none", 4)==0) if (strncmp(buf, "none", 4)==0)
slot = -1; slot = -1;
else if (e==buf || (*e && *e!= '\n')) else if (e==buf || (*e && *e!= '\n'))
return -EINVAL; return -EINVAL;
if (rdev->mddev->pers) if (rdev->mddev->pers) {
/* Cannot set slot in active array (yet) */ /* Setting 'slot' on an active array requires also
* updating the 'rd%d' link, and communicating
* with the personality with ->hot_*_disk.
* For now we only support removing
* failed/spare devices. This normally happens automatically,
* but not when the metadata is externally managed.
*/
if (slot != -1)
return -EBUSY; return -EBUSY;
if (rdev->raid_disk == -1)
return -EEXIST;
/* personality does all needed checks */
if (rdev->mddev->pers->hot_add_disk == NULL)
return -EINVAL;
err = rdev->mddev->pers->
hot_remove_disk(rdev->mddev, rdev->raid_disk);
if (err)
return err;
sprintf(nm, "rd%d", rdev->raid_disk);
sysfs_remove_link(&rdev->mddev->kobj, nm);
set_bit(MD_RECOVERY_NEEDED, &rdev->mddev->recovery);
md_wakeup_thread(rdev->mddev->thread);
} else {
if (slot >= rdev->mddev->raid_disks) if (slot >= rdev->mddev->raid_disks)
return -ENOSPC; return -ENOSPC;
rdev->raid_disk = slot; rdev->raid_disk = slot;
/* assume it is working */ /* assume it is working */
rdev->flags = 0; rdev->flags = 0;
set_bit(In_sync, &rdev->flags); set_bit(In_sync, &rdev->flags);
}
return len; return len;
} }
@ -5549,6 +5573,7 @@ static int remove_and_add_spares(mddev_t *mddev)
ITERATE_RDEV(mddev,rdev,rtmp) ITERATE_RDEV(mddev,rdev,rtmp)
if (rdev->raid_disk >= 0 && if (rdev->raid_disk >= 0 &&
!mddev->external &&
(test_bit(Faulty, &rdev->flags) || (test_bit(Faulty, &rdev->flags) ||
! test_bit(In_sync, &rdev->flags)) && ! test_bit(In_sync, &rdev->flags)) &&
atomic_read(&rdev->nr_pending)==0) { atomic_read(&rdev->nr_pending)==0) {