forked from Minki/linux
percpu_counter: Rename __percpu_counter_add to percpu_counter_add_batch
Currently, percpu_counter_add is a wrapper around __percpu_counter_add which is preempt safe due to explicit calls to preempt_disable. Given how __ prefix is used in percpu related interfaces, the naming unfortunately creates the false sense that __percpu_counter_add is less safe than percpu_counter_add. In terms of context-safety, they're equivalent. The only difference is that the __ version takes a batch parameter. Make this a bit more explicit by just renaming __percpu_counter_add to percpu_counter_add_batch. This patch doesn't cause any functional changes. tj: Minor updates to patch description for clarity. Cosmetic indentation updates. Signed-off-by: Nikolay Borisov <nborisov@suse.com> Signed-off-by: Tejun Heo <tj@kernel.org> Cc: Chris Mason <clm@fb.com> Cc: Josef Bacik <jbacik@fb.com> Cc: David Sterba <dsterba@suse.com> Cc: Darrick J. Wong <darrick.wong@oracle.com> Cc: Jan Kara <jack@suse.com> Cc: Jens Axboe <axboe@fb.com> Cc: linux-mm@kvack.org Cc: "David S. Miller" <davem@davemloft.net>
This commit is contained in:
parent
df95e795a7
commit
104b4e5139
@ -1255,9 +1255,9 @@ void clean_tree_block(struct btrfs_fs_info *fs_info,
|
||||
btrfs_assert_tree_locked(buf);
|
||||
|
||||
if (test_and_clear_bit(EXTENT_BUFFER_DIRTY, &buf->bflags)) {
|
||||
__percpu_counter_add(&fs_info->dirty_metadata_bytes,
|
||||
-buf->len,
|
||||
fs_info->dirty_metadata_batch);
|
||||
percpu_counter_add_batch(&fs_info->dirty_metadata_bytes,
|
||||
-buf->len,
|
||||
fs_info->dirty_metadata_batch);
|
||||
/* ugh, clear_extent_buffer_dirty needs to lock the page */
|
||||
btrfs_set_lock_blocking(buf);
|
||||
clear_extent_buffer_dirty(buf);
|
||||
@ -4046,9 +4046,9 @@ void btrfs_mark_buffer_dirty(struct extent_buffer *buf)
|
||||
buf->start, transid, fs_info->generation);
|
||||
was_dirty = set_extent_buffer_dirty(buf);
|
||||
if (!was_dirty)
|
||||
__percpu_counter_add(&fs_info->dirty_metadata_bytes,
|
||||
buf->len,
|
||||
fs_info->dirty_metadata_batch);
|
||||
percpu_counter_add_batch(&fs_info->dirty_metadata_bytes,
|
||||
buf->len,
|
||||
fs_info->dirty_metadata_batch);
|
||||
#ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY
|
||||
if (btrfs_header_level(buf) == 0 && check_leaf(root, buf)) {
|
||||
btrfs_print_leaf(fs_info, buf);
|
||||
|
@ -3584,9 +3584,9 @@ lock_extent_buffer_for_io(struct extent_buffer *eb,
|
||||
set_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags);
|
||||
spin_unlock(&eb->refs_lock);
|
||||
btrfs_set_header_flag(eb, BTRFS_HEADER_FLAG_WRITTEN);
|
||||
__percpu_counter_add(&fs_info->dirty_metadata_bytes,
|
||||
-eb->len,
|
||||
fs_info->dirty_metadata_batch);
|
||||
percpu_counter_add_batch(&fs_info->dirty_metadata_bytes,
|
||||
-eb->len,
|
||||
fs_info->dirty_metadata_batch);
|
||||
ret = 1;
|
||||
} else {
|
||||
spin_unlock(&eb->refs_lock);
|
||||
|
@ -1682,8 +1682,8 @@ static void btrfs_set_bit_hook(struct inode *inode,
|
||||
if (btrfs_is_testing(fs_info))
|
||||
return;
|
||||
|
||||
__percpu_counter_add(&fs_info->delalloc_bytes, len,
|
||||
fs_info->delalloc_batch);
|
||||
percpu_counter_add_batch(&fs_info->delalloc_bytes, len,
|
||||
fs_info->delalloc_batch);
|
||||
spin_lock(&BTRFS_I(inode)->lock);
|
||||
BTRFS_I(inode)->delalloc_bytes += len;
|
||||
if (*bits & EXTENT_DEFRAG)
|
||||
@ -1749,8 +1749,8 @@ static void btrfs_clear_bit_hook(struct btrfs_inode *inode,
|
||||
&inode->vfs_inode,
|
||||
state->start, len);
|
||||
|
||||
__percpu_counter_add(&fs_info->delalloc_bytes, -len,
|
||||
fs_info->delalloc_batch);
|
||||
percpu_counter_add_batch(&fs_info->delalloc_bytes, -len,
|
||||
fs_info->delalloc_batch);
|
||||
spin_lock(&inode->lock);
|
||||
inode->delalloc_bytes -= len;
|
||||
if (do_list && inode->delalloc_bytes == 0 &&
|
||||
|
@ -1209,7 +1209,7 @@ xfs_mod_icount(
|
||||
struct xfs_mount *mp,
|
||||
int64_t delta)
|
||||
{
|
||||
__percpu_counter_add(&mp->m_icount, delta, XFS_ICOUNT_BATCH);
|
||||
percpu_counter_add_batch(&mp->m_icount, delta, XFS_ICOUNT_BATCH);
|
||||
if (__percpu_counter_compare(&mp->m_icount, 0, XFS_ICOUNT_BATCH) < 0) {
|
||||
ASSERT(0);
|
||||
percpu_counter_add(&mp->m_icount, -delta);
|
||||
@ -1288,7 +1288,7 @@ xfs_mod_fdblocks(
|
||||
else
|
||||
batch = XFS_FDBLOCKS_BATCH;
|
||||
|
||||
__percpu_counter_add(&mp->m_fdblocks, delta, batch);
|
||||
percpu_counter_add_batch(&mp->m_fdblocks, delta, batch);
|
||||
if (__percpu_counter_compare(&mp->m_fdblocks, mp->m_alloc_set_aside,
|
||||
XFS_FDBLOCKS_BATCH) >= 0) {
|
||||
/* we had space! */
|
||||
|
@ -66,7 +66,7 @@ static inline bool bdi_has_dirty_io(struct backing_dev_info *bdi)
|
||||
static inline void __add_wb_stat(struct bdi_writeback *wb,
|
||||
enum wb_stat_item item, s64 amount)
|
||||
{
|
||||
__percpu_counter_add(&wb->stat[item], amount, WB_STAT_BATCH);
|
||||
percpu_counter_add_batch(&wb->stat[item], amount, WB_STAT_BATCH);
|
||||
}
|
||||
|
||||
static inline void __inc_wb_stat(struct bdi_writeback *wb,
|
||||
|
@ -518,7 +518,7 @@ static inline void blkg_stat_exit(struct blkg_stat *stat)
|
||||
*/
|
||||
static inline void blkg_stat_add(struct blkg_stat *stat, uint64_t val)
|
||||
{
|
||||
__percpu_counter_add(&stat->cpu_cnt, val, BLKG_STAT_CPU_BATCH);
|
||||
percpu_counter_add_batch(&stat->cpu_cnt, val, BLKG_STAT_CPU_BATCH);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -597,14 +597,14 @@ static inline void blkg_rwstat_add(struct blkg_rwstat *rwstat,
|
||||
else
|
||||
cnt = &rwstat->cpu_cnt[BLKG_RWSTAT_READ];
|
||||
|
||||
__percpu_counter_add(cnt, val, BLKG_STAT_CPU_BATCH);
|
||||
percpu_counter_add_batch(cnt, val, BLKG_STAT_CPU_BATCH);
|
||||
|
||||
if (op_is_sync(op))
|
||||
cnt = &rwstat->cpu_cnt[BLKG_RWSTAT_SYNC];
|
||||
else
|
||||
cnt = &rwstat->cpu_cnt[BLKG_RWSTAT_ASYNC];
|
||||
|
||||
__percpu_counter_add(cnt, val, BLKG_STAT_CPU_BATCH);
|
||||
percpu_counter_add_batch(cnt, val, BLKG_STAT_CPU_BATCH);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -22,7 +22,7 @@ unsigned long vm_memory_committed(void);
|
||||
|
||||
static inline void vm_acct_memory(long pages)
|
||||
{
|
||||
__percpu_counter_add(&vm_committed_as, pages, vm_committed_as_batch);
|
||||
percpu_counter_add_batch(&vm_committed_as, pages, vm_committed_as_batch);
|
||||
}
|
||||
|
||||
static inline void vm_unacct_memory(long pages)
|
||||
|
@ -39,7 +39,8 @@ int __percpu_counter_init(struct percpu_counter *fbc, s64 amount, gfp_t gfp,
|
||||
|
||||
void percpu_counter_destroy(struct percpu_counter *fbc);
|
||||
void percpu_counter_set(struct percpu_counter *fbc, s64 amount);
|
||||
void __percpu_counter_add(struct percpu_counter *fbc, s64 amount, s32 batch);
|
||||
void percpu_counter_add_batch(struct percpu_counter *fbc, s64 amount,
|
||||
s32 batch);
|
||||
s64 __percpu_counter_sum(struct percpu_counter *fbc);
|
||||
int __percpu_counter_compare(struct percpu_counter *fbc, s64 rhs, s32 batch);
|
||||
|
||||
@ -50,7 +51,7 @@ static inline int percpu_counter_compare(struct percpu_counter *fbc, s64 rhs)
|
||||
|
||||
static inline void percpu_counter_add(struct percpu_counter *fbc, s64 amount)
|
||||
{
|
||||
__percpu_counter_add(fbc, amount, percpu_counter_batch);
|
||||
percpu_counter_add_batch(fbc, amount, percpu_counter_batch);
|
||||
}
|
||||
|
||||
static inline s64 percpu_counter_sum_positive(struct percpu_counter *fbc)
|
||||
@ -136,7 +137,7 @@ percpu_counter_add(struct percpu_counter *fbc, s64 amount)
|
||||
}
|
||||
|
||||
static inline void
|
||||
__percpu_counter_add(struct percpu_counter *fbc, s64 amount, s32 batch)
|
||||
percpu_counter_add_batch(struct percpu_counter *fbc, s64 amount, s32 batch)
|
||||
{
|
||||
percpu_counter_add(fbc, amount);
|
||||
}
|
||||
|
@ -154,12 +154,12 @@ static inline int frag_mem_limit(struct netns_frags *nf)
|
||||
|
||||
static inline void sub_frag_mem_limit(struct netns_frags *nf, int i)
|
||||
{
|
||||
__percpu_counter_add(&nf->mem, -i, frag_percpu_counter_batch);
|
||||
percpu_counter_add_batch(&nf->mem, -i, frag_percpu_counter_batch);
|
||||
}
|
||||
|
||||
static inline void add_frag_mem_limit(struct netns_frags *nf, int i)
|
||||
{
|
||||
__percpu_counter_add(&nf->mem, i, frag_percpu_counter_batch);
|
||||
percpu_counter_add_batch(&nf->mem, i, frag_percpu_counter_batch);
|
||||
}
|
||||
|
||||
static inline unsigned int sum_frag_mem_limit(struct netns_frags *nf)
|
||||
|
@ -207,7 +207,7 @@ static void fprop_reflect_period_percpu(struct fprop_global *p,
|
||||
if (val < (nr_cpu_ids * PROP_BATCH))
|
||||
val = percpu_counter_sum(&pl->events);
|
||||
|
||||
__percpu_counter_add(&pl->events,
|
||||
percpu_counter_add_batch(&pl->events,
|
||||
-val + (val >> (period-pl->period)), PROP_BATCH);
|
||||
} else
|
||||
percpu_counter_set(&pl->events, 0);
|
||||
@ -219,7 +219,7 @@ static void fprop_reflect_period_percpu(struct fprop_global *p,
|
||||
void __fprop_inc_percpu(struct fprop_global *p, struct fprop_local_percpu *pl)
|
||||
{
|
||||
fprop_reflect_period_percpu(p, pl);
|
||||
__percpu_counter_add(&pl->events, 1, PROP_BATCH);
|
||||
percpu_counter_add_batch(&pl->events, 1, PROP_BATCH);
|
||||
percpu_counter_add(&p->events, 1);
|
||||
}
|
||||
|
||||
@ -267,6 +267,6 @@ void __fprop_inc_percpu_max(struct fprop_global *p,
|
||||
return;
|
||||
} else
|
||||
fprop_reflect_period_percpu(p, pl);
|
||||
__percpu_counter_add(&pl->events, 1, PROP_BATCH);
|
||||
percpu_counter_add_batch(&pl->events, 1, PROP_BATCH);
|
||||
percpu_counter_add(&p->events, 1);
|
||||
}
|
||||
|
@ -72,7 +72,7 @@ void percpu_counter_set(struct percpu_counter *fbc, s64 amount)
|
||||
}
|
||||
EXPORT_SYMBOL(percpu_counter_set);
|
||||
|
||||
void __percpu_counter_add(struct percpu_counter *fbc, s64 amount, s32 batch)
|
||||
void percpu_counter_add_batch(struct percpu_counter *fbc, s64 amount, s32 batch)
|
||||
{
|
||||
s64 count;
|
||||
|
||||
@ -89,7 +89,7 @@ void __percpu_counter_add(struct percpu_counter *fbc, s64 amount, s32 batch)
|
||||
}
|
||||
preempt_enable();
|
||||
}
|
||||
EXPORT_SYMBOL(__percpu_counter_add);
|
||||
EXPORT_SYMBOL(percpu_counter_add_batch);
|
||||
|
||||
/*
|
||||
* Add up all the per-cpu counts, return the result. This is a more accurate
|
||||
|
Loading…
Reference in New Issue
Block a user