forked from Minki/linux
Three more fixes for md in 4.2
Mostly corner-case stuff. -----BEGIN PGP SIGNATURE----- Version: GnuPG v2 iQIcBAABCAAGBQJVvxRBAAoJEDnsnt1WYoG5OsgQAJS0pG3E44L0c+JVzgkoJQbm xusoPJrFo6ALCwZ5w4wvEhUFJkvY4qh4A9zG0oaRAAKBibJ4CVZ+NWADQfbPPIUe QK6IRbkYRNvZEP19ZcdeU3J51PAA+tFqhaSE4G5JZzoUr6RXgyQ7fYeY/wNmoy0+ zJEDxkVHJmasRm6/F+8/nOtnQLY8L9eRYZp6n186mykVVb4HBCKXVvGh+0+Wfsr8 svob+bTWPsSELDm9Rt8JESRXE91HAxz+stuCP20tfrHULkvMavA91Ni16D3o9Okd 2eMvIAovS2cFa1pwOMw9HsRYcg5sOlylw27v56286mIfZVi2ndS7wU6QCB9n0YUw FjkDGGRnFFh24scC2np4SdjrTudELN6OGfWHrYxDxe/ZZcTXw3rTlC5Y6fhkNERO QjsBvJbjnlUBoiLAdvlKrEFwyB5GUqux+eSGJTTcXG/2fjDYDnaC1/qwTxDdRoyA +wCcvjvK1tOQIUlhFfLGC4po33rCfG904TDXDPiDDLtjM4iFArOlckI4cIFtLB6S feBgYiPDgtbwl2FzdXdLu9OT6wgsMGLvN8MdZ0Xy1qrqQ2ouGVSa+SZgKqdvMXpu EktXUX1PRCmo5mcAwMA0VXmFY4yPlmNFDX6i4MVT2qV2JSwzkyM6A0P7smfWzD/U MrH8jZzQEojqeqYDZYIx =umiE -----END PGP SIGNATURE----- Merge tag 'md/4.2-rc5-fixes' of git://neil.brown.name/md Pull md fixes from Neil Brown: "Three more fixes for md in 4.2 Mostly corner-case stuff. One of these patches is for a CVE: CVE-2015-5697 I'm not convinced it is serious (data leak from CAP_SYS_ADMIN ioctl) but as people seem to want to back-port it, I've included a minimal version here. The remainder of that patch from Benjamin is code-cleanup and will arrive in the 4.3 merge window" * tag 'md/4.2-rc5-fixes' of git://neil.brown.name/md: md/raid5: don't let shrink_slab shrink too far. md: use kzalloc() when bitmap is disabled md/raid1: extend spinlock to protect raid1_end_read_request against inconsistencies
This commit is contained in:
commit
4e6b6ee253
@ -5759,7 +5759,7 @@ static int get_bitmap_file(struct mddev *mddev, void __user * arg)
|
||||
char *ptr;
|
||||
int err;
|
||||
|
||||
file = kmalloc(sizeof(*file), GFP_NOIO);
|
||||
file = kzalloc(sizeof(*file), GFP_NOIO);
|
||||
if (!file)
|
||||
return -ENOMEM;
|
||||
|
||||
|
@ -1476,6 +1476,7 @@ static void error(struct mddev *mddev, struct md_rdev *rdev)
|
||||
{
|
||||
char b[BDEVNAME_SIZE];
|
||||
struct r1conf *conf = mddev->private;
|
||||
unsigned long flags;
|
||||
|
||||
/*
|
||||
* If it is not operational, then we have already marked it as dead
|
||||
@ -1495,14 +1496,13 @@ static void error(struct mddev *mddev, struct md_rdev *rdev)
|
||||
return;
|
||||
}
|
||||
set_bit(Blocked, &rdev->flags);
|
||||
spin_lock_irqsave(&conf->device_lock, flags);
|
||||
if (test_and_clear_bit(In_sync, &rdev->flags)) {
|
||||
unsigned long flags;
|
||||
spin_lock_irqsave(&conf->device_lock, flags);
|
||||
mddev->degraded++;
|
||||
set_bit(Faulty, &rdev->flags);
|
||||
spin_unlock_irqrestore(&conf->device_lock, flags);
|
||||
} else
|
||||
set_bit(Faulty, &rdev->flags);
|
||||
spin_unlock_irqrestore(&conf->device_lock, flags);
|
||||
/*
|
||||
* if recovery is running, make sure it aborts.
|
||||
*/
|
||||
@ -1568,7 +1568,10 @@ static int raid1_spare_active(struct mddev *mddev)
|
||||
* Find all failed disks within the RAID1 configuration
|
||||
* and mark them readable.
|
||||
* Called under mddev lock, so rcu protection not needed.
|
||||
* device_lock used to avoid races with raid1_end_read_request
|
||||
* which expects 'In_sync' flags and ->degraded to be consistent.
|
||||
*/
|
||||
spin_lock_irqsave(&conf->device_lock, flags);
|
||||
for (i = 0; i < conf->raid_disks; i++) {
|
||||
struct md_rdev *rdev = conf->mirrors[i].rdev;
|
||||
struct md_rdev *repl = conf->mirrors[conf->raid_disks + i].rdev;
|
||||
@ -1599,7 +1602,6 @@ static int raid1_spare_active(struct mddev *mddev)
|
||||
sysfs_notify_dirent_safe(rdev->sysfs_state);
|
||||
}
|
||||
}
|
||||
spin_lock_irqsave(&conf->device_lock, flags);
|
||||
mddev->degraded -= count;
|
||||
spin_unlock_irqrestore(&conf->device_lock, flags);
|
||||
|
||||
|
@ -2256,7 +2256,7 @@ static int resize_stripes(struct r5conf *conf, int newsize)
|
||||
static int drop_one_stripe(struct r5conf *conf)
|
||||
{
|
||||
struct stripe_head *sh;
|
||||
int hash = (conf->max_nr_stripes - 1) % NR_STRIPE_HASH_LOCKS;
|
||||
int hash = (conf->max_nr_stripes - 1) & STRIPE_HASH_LOCKS_MASK;
|
||||
|
||||
spin_lock_irq(conf->hash_locks + hash);
|
||||
sh = get_free_stripe(conf, hash);
|
||||
@ -6388,7 +6388,8 @@ static unsigned long raid5_cache_scan(struct shrinker *shrink,
|
||||
|
||||
if (mutex_trylock(&conf->cache_size_mutex)) {
|
||||
ret= 0;
|
||||
while (ret < sc->nr_to_scan) {
|
||||
while (ret < sc->nr_to_scan &&
|
||||
conf->max_nr_stripes > conf->min_nr_stripes) {
|
||||
if (drop_one_stripe(conf) == 0) {
|
||||
ret = SHRINK_STOP;
|
||||
break;
|
||||
|
Loading…
Reference in New Issue
Block a user