mirror of
https://github.com/torvalds/linux.git
synced 2024-12-26 12:52:30 +00:00
pcpcntr: remove percpu_counter_sum_all()
percpu_counter_sum_all() is now redundant as the race condition it
was invented to handle is now dealt with by percpu_counter_sum()
directly and all users of percpu_counter_sum_all() have been
removed.
Remove it.
This effectively reverts the changes made in f689054aac
("percpu_counter: add percpu_counter_sum_all interface") except for
the cpumask iteration that fixes percpu_counter_sum() made earlier
in this series.
Signed-off-by: Dave Chinner <dchinner@redhat.com>
Reviewed-by: Darrick J. Wong <djwong@kernel.org>
Signed-off-by: Darrick J. Wong <djwong@kernel.org>
This commit is contained in:
parent
7ba85fba47
commit
e9b60c7f97
@ -45,7 +45,6 @@ void percpu_counter_set(struct percpu_counter *fbc, s64 amount);
|
||||
void percpu_counter_add_batch(struct percpu_counter *fbc, s64 amount,
|
||||
s32 batch);
|
||||
s64 __percpu_counter_sum(struct percpu_counter *fbc);
|
||||
s64 percpu_counter_sum_all(struct percpu_counter *fbc);
|
||||
int __percpu_counter_compare(struct percpu_counter *fbc, s64 rhs, s32 batch);
|
||||
void percpu_counter_sync(struct percpu_counter *fbc);
|
||||
|
||||
@ -196,11 +195,6 @@ static inline s64 percpu_counter_sum(struct percpu_counter *fbc)
|
||||
return percpu_counter_read(fbc);
|
||||
}
|
||||
|
||||
static inline s64 percpu_counter_sum_all(struct percpu_counter *fbc)
|
||||
{
|
||||
return percpu_counter_read(fbc);
|
||||
}
|
||||
|
||||
static inline bool percpu_counter_initialized(struct percpu_counter *fbc)
|
||||
{
|
||||
return true;
|
||||
|
@ -122,23 +122,6 @@ void percpu_counter_sync(struct percpu_counter *fbc)
|
||||
}
|
||||
EXPORT_SYMBOL(percpu_counter_sync);
|
||||
|
||||
static s64 __percpu_counter_sum_mask(struct percpu_counter *fbc,
|
||||
const struct cpumask *cpu_mask)
|
||||
{
|
||||
s64 ret;
|
||||
int cpu;
|
||||
unsigned long flags;
|
||||
|
||||
raw_spin_lock_irqsave(&fbc->lock, flags);
|
||||
ret = fbc->count;
|
||||
for_each_cpu_or(cpu, cpu_online_mask, cpu_mask) {
|
||||
s32 *pcount = per_cpu_ptr(fbc->counters, cpu);
|
||||
ret += *pcount;
|
||||
}
|
||||
raw_spin_unlock_irqrestore(&fbc->lock, flags);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* Add up all the per-cpu counts, return the result. This is a more accurate
|
||||
* but much slower version of percpu_counter_read_positive().
|
||||
@ -153,22 +136,21 @@ static s64 __percpu_counter_sum_mask(struct percpu_counter *fbc,
|
||||
*/
|
||||
s64 __percpu_counter_sum(struct percpu_counter *fbc)
|
||||
{
|
||||
s64 ret;
|
||||
int cpu;
|
||||
unsigned long flags;
|
||||
|
||||
return __percpu_counter_sum_mask(fbc, cpu_dying_mask);
|
||||
raw_spin_lock_irqsave(&fbc->lock, flags);
|
||||
ret = fbc->count;
|
||||
for_each_cpu_or(cpu, cpu_online_mask, cpu_dying_mask) {
|
||||
s32 *pcount = per_cpu_ptr(fbc->counters, cpu);
|
||||
ret += *pcount;
|
||||
}
|
||||
raw_spin_unlock_irqrestore(&fbc->lock, flags);
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(__percpu_counter_sum);
|
||||
|
||||
/*
|
||||
* This is slower version of percpu_counter_sum as it traverses all possible
|
||||
* cpus. Use this only in the cases where accurate data is needed in the
|
||||
* presense of CPUs getting offlined.
|
||||
*/
|
||||
s64 percpu_counter_sum_all(struct percpu_counter *fbc)
|
||||
{
|
||||
return __percpu_counter_sum_mask(fbc, cpu_possible_mask);
|
||||
}
|
||||
EXPORT_SYMBOL(percpu_counter_sum_all);
|
||||
|
||||
int __percpu_counter_init(struct percpu_counter *fbc, s64 amount, gfp_t gfp,
|
||||
struct lock_class_key *key)
|
||||
{
|
||||
|
Loading…
Reference in New Issue
Block a user