md/raid5: play nice with PREEMPT_RT

raid_run_ops() relies on the implicitly disabled preemption for
its percpu ops, although this is really about CPU locality. This
breaks RT semantics as it can take regular (and thus sleeping)
spinlocks, such as stripe_lock.

Add a local_lock such that non-RT does not change and continues
to be just map to preempt_disable/enable, but makes RT happy as
the region will use a per-CPU spinlock and thus be preemptible
and still guarantee CPU locality.

Signed-off-by: Davidlohr Bueso <dbueso@suse.de>
Signed-off-by: Song Liu <songliubraving@fb.com>
This commit is contained in:
Davidlohr Bueso 2021-11-15 17:23:17 -08:00 committed by Song Liu
parent 050f461e28
commit 770b1d216d
2 changed files with 9 additions and 6 deletions

View File

@ -2215,10 +2215,9 @@ static void raid_run_ops(struct stripe_head *sh, unsigned long ops_request)
struct r5conf *conf = sh->raid_conf;
int level = conf->level;
struct raid5_percpu *percpu;
unsigned long cpu;
cpu = get_cpu();
percpu = per_cpu_ptr(conf->percpu, cpu);
local_lock(&conf->percpu->lock);
percpu = this_cpu_ptr(conf->percpu);
if (test_bit(STRIPE_OP_BIOFILL, &ops_request)) {
ops_run_biofill(sh);
overlap_clear++;
@ -2271,13 +2270,14 @@ static void raid_run_ops(struct stripe_head *sh, unsigned long ops_request)
BUG();
}
if (overlap_clear && !sh->batch_head)
if (overlap_clear && !sh->batch_head) {
for (i = disks; i--; ) {
struct r5dev *dev = &sh->dev[i];
if (test_and_clear_bit(R5_Overlap, &dev->flags))
wake_up(&sh->raid_conf->wait_for_overlap);
}
put_cpu();
}
local_unlock(&conf->percpu->lock);
}
static void free_stripe(struct kmem_cache *sc, struct stripe_head *sh)
@ -7052,6 +7052,7 @@ static int alloc_scratch_buffer(struct r5conf *conf, struct raid5_percpu *percpu
return -ENOMEM;
}
local_lock_init(&percpu->lock);
return 0;
}

View File

@ -4,6 +4,7 @@
#include <linux/raid/xor.h>
#include <linux/dmaengine.h>
#include <linux/local_lock.h>
/*
*
@ -640,7 +641,8 @@ struct r5conf {
* lists and performing address
* conversions
*/
int scribble_obj_size;
int scribble_obj_size;
local_lock_t lock;
} __percpu *percpu;
int scribble_disks;
int scribble_sectors;