bcache: optimize barrier usage for atomic operations

The idea of this patch is from Davidlohr Bueso, he posts a patch
for bcache to optimize barrier usage for read-modify-write atomic
bitops. Indeed such optimization can also apply on other locations
where smp_mb() is used before or after an atomic operation.

This patch replaces smp_mb() with smp_mb__before_atomic() or
smp_mb__after_atomic() in btree.c and writeback.c,  where it is used
to synchronize memory cache just earlier on other cores. Although
the locations are not on hot code path, it is always not bad to mkae
things a little better.

Signed-off-by: Coly Li <colyli@suse.de>
Cc: Davidlohr Bueso <dave@stgolabs.net>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
Coly Li 2020-03-22 14:03:05 +08:00 committed by Jens Axboe
parent b004aa867c
commit eb9b6666d6
2 changed files with 6 additions and 6 deletions

View File

@ -1947,7 +1947,7 @@ static int bch_btree_check_thread(void *arg)
*/
atomic_set(&check_state->enough, 1);
/* Update check_state->enough earlier */
smp_mb();
smp_mb__after_atomic();
goto out;
}
skip_nr--;
@ -1972,7 +1972,7 @@ static int bch_btree_check_thread(void *arg)
out:
info->result = ret;
/* update check_state->started among all CPUs */
smp_mb();
smp_mb__before_atomic();
if (atomic_dec_and_test(&check_state->started))
wake_up(&check_state->wait);
@ -2031,7 +2031,7 @@ int bch_btree_check(struct cache_set *c)
*/
for (i = 0; i < check_state->total_threads; i++) {
/* fetch latest check_state->enough earlier */
smp_mb();
smp_mb__before_atomic();
if (atomic_read(&check_state->enough))
break;

View File

@ -854,7 +854,7 @@ static int bch_dirty_init_thread(void *arg)
else {
atomic_set(&state->enough, 1);
/* Update state->enough earlier */
smp_mb();
smp_mb__after_atomic();
goto out;
}
skip_nr--;
@ -873,7 +873,7 @@ static int bch_dirty_init_thread(void *arg)
out:
/* In order to wake up state->wait in time */
smp_mb();
smp_mb__before_atomic();
if (atomic_dec_and_test(&state->started))
wake_up(&state->wait);
@ -932,7 +932,7 @@ void bch_sectors_dirty_init(struct bcache_device *d)
for (i = 0; i < state->total_threads; i++) {
/* Fetch latest state->enough earlier */
smp_mb();
smp_mb__before_atomic();
if (atomic_read(&state->enough))
break;