mirror of
https://github.com/torvalds/linux.git
synced 2024-12-27 21:33:00 +00:00
percpu_ref: reorganize __percpu_ref_switch_to_atomic() and relocate percpu_ref_switch_to_atomic()
Reorganize __percpu_ref_switch_to_atomic() so that it looks structurally similar to __percpu_ref_switch_to_percpu() and relocate percpu_ref_switch_to_atomic so that the two internal functions are co-located. This patch doesn't introduce any functional differences. Signed-off-by: Tejun Heo <tj@kernel.org>
This commit is contained in:
parent
a2f5630cb7
commit
b2302c7fdc
@ -161,21 +161,8 @@ static void percpu_ref_noop_confirm_switch(struct percpu_ref *ref)
|
|||||||
static void __percpu_ref_switch_to_atomic(struct percpu_ref *ref,
|
static void __percpu_ref_switch_to_atomic(struct percpu_ref *ref,
|
||||||
percpu_ref_func_t *confirm_switch)
|
percpu_ref_func_t *confirm_switch)
|
||||||
{
|
{
|
||||||
if (!(ref->percpu_count_ptr & __PERCPU_REF_ATOMIC)) {
|
if (ref->percpu_count_ptr & __PERCPU_REF_ATOMIC) {
|
||||||
/* switching from percpu to atomic */
|
if (confirm_switch) {
|
||||||
ref->percpu_count_ptr |= __PERCPU_REF_ATOMIC;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Non-NULL ->confirm_switch is used to indicate that
|
|
||||||
* switching is in progress. Use noop one if unspecified.
|
|
||||||
*/
|
|
||||||
WARN_ON_ONCE(ref->confirm_switch);
|
|
||||||
ref->confirm_switch =
|
|
||||||
confirm_switch ?: percpu_ref_noop_confirm_switch;
|
|
||||||
|
|
||||||
percpu_ref_get(ref); /* put after confirmation */
|
|
||||||
call_rcu_sched(&ref->rcu, percpu_ref_switch_to_atomic_rcu);
|
|
||||||
} else if (confirm_switch) {
|
|
||||||
/*
|
/*
|
||||||
* Somebody else already set ATOMIC. Wait for its
|
* Somebody else already set ATOMIC. Wait for its
|
||||||
* completion and invoke @confirm_switch() directly.
|
* completion and invoke @confirm_switch() directly.
|
||||||
@ -183,6 +170,48 @@ static void __percpu_ref_switch_to_atomic(struct percpu_ref *ref,
|
|||||||
wait_event(percpu_ref_switch_waitq, !ref->confirm_switch);
|
wait_event(percpu_ref_switch_waitq, !ref->confirm_switch);
|
||||||
confirm_switch(ref);
|
confirm_switch(ref);
|
||||||
}
|
}
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* switching from percpu to atomic */
|
||||||
|
ref->percpu_count_ptr |= __PERCPU_REF_ATOMIC;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Non-NULL ->confirm_switch is used to indicate that switching is
|
||||||
|
* in progress. Use noop one if unspecified.
|
||||||
|
*/
|
||||||
|
WARN_ON_ONCE(ref->confirm_switch);
|
||||||
|
ref->confirm_switch = confirm_switch ?: percpu_ref_noop_confirm_switch;
|
||||||
|
|
||||||
|
percpu_ref_get(ref); /* put after confirmation */
|
||||||
|
call_rcu_sched(&ref->rcu, percpu_ref_switch_to_atomic_rcu);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void __percpu_ref_switch_to_percpu(struct percpu_ref *ref)
|
||||||
|
{
|
||||||
|
unsigned long __percpu *percpu_count = percpu_count_ptr(ref);
|
||||||
|
int cpu;
|
||||||
|
|
||||||
|
BUG_ON(!percpu_count);
|
||||||
|
|
||||||
|
if (!(ref->percpu_count_ptr & __PERCPU_REF_ATOMIC))
|
||||||
|
return;
|
||||||
|
|
||||||
|
wait_event(percpu_ref_switch_waitq, !ref->confirm_switch);
|
||||||
|
|
||||||
|
atomic_long_add(PERCPU_COUNT_BIAS, &ref->count);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Restore per-cpu operation. smp_store_release() is paired with
|
||||||
|
* smp_read_barrier_depends() in __ref_is_percpu() and guarantees
|
||||||
|
* that the zeroing is visible to all percpu accesses which can see
|
||||||
|
* the following __PERCPU_REF_ATOMIC clearing.
|
||||||
|
*/
|
||||||
|
for_each_possible_cpu(cpu)
|
||||||
|
*per_cpu_ptr(percpu_count, cpu) = 0;
|
||||||
|
|
||||||
|
smp_store_release(&ref->percpu_count_ptr,
|
||||||
|
ref->percpu_count_ptr & ~__PERCPU_REF_ATOMIC);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -213,33 +242,6 @@ void percpu_ref_switch_to_atomic(struct percpu_ref *ref,
|
|||||||
__percpu_ref_switch_to_atomic(ref, confirm_switch);
|
__percpu_ref_switch_to_atomic(ref, confirm_switch);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void __percpu_ref_switch_to_percpu(struct percpu_ref *ref)
|
|
||||||
{
|
|
||||||
unsigned long __percpu *percpu_count = percpu_count_ptr(ref);
|
|
||||||
int cpu;
|
|
||||||
|
|
||||||
BUG_ON(!percpu_count);
|
|
||||||
|
|
||||||
if (!(ref->percpu_count_ptr & __PERCPU_REF_ATOMIC))
|
|
||||||
return;
|
|
||||||
|
|
||||||
wait_event(percpu_ref_switch_waitq, !ref->confirm_switch);
|
|
||||||
|
|
||||||
atomic_long_add(PERCPU_COUNT_BIAS, &ref->count);
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Restore per-cpu operation. smp_store_release() is paired with
|
|
||||||
* smp_read_barrier_depends() in __ref_is_percpu() and guarantees
|
|
||||||
* that the zeroing is visible to all percpu accesses which can see
|
|
||||||
* the following __PERCPU_REF_ATOMIC clearing.
|
|
||||||
*/
|
|
||||||
for_each_possible_cpu(cpu)
|
|
||||||
*per_cpu_ptr(percpu_count, cpu) = 0;
|
|
||||||
|
|
||||||
smp_store_release(&ref->percpu_count_ptr,
|
|
||||||
ref->percpu_count_ptr & ~__PERCPU_REF_ATOMIC);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* percpu_ref_switch_to_percpu - switch a percpu_ref to percpu mode
|
* percpu_ref_switch_to_percpu - switch a percpu_ref to percpu mode
|
||||||
* @ref: percpu_ref to switch to percpu mode
|
* @ref: percpu_ref to switch to percpu mode
|
||||||
|
Loading…
Reference in New Issue
Block a user