mm: memcg: convert vmstat slab counters to bytes

In order to prepare for per-object slab memory accounting, convert
NR_SLAB_RECLAIMABLE and NR_SLAB_UNRECLAIMABLE vmstat items to bytes.

To make it obvious, rename them to NR_SLAB_RECLAIMABLE_B and
NR_SLAB_UNRECLAIMABLE_B (similar to NR_KERNEL_STACK_KB).

Internally global and per-node counters are stored in pages, however memcg
and lruvec counters are stored in bytes.  This scheme may look weird, but
only for now.  As soon as slab pages will be shared between multiple
cgroups, global and node counters will reflect the total number of slab
pages.  However memcg and lruvec counters will be used for per-memcg slab
memory tracking, which will take separate kernel objects in the account.
Keeping global and node counters in pages helps to avoid additional
overhead.

The size of slab memory shouldn't exceed 4Gb on 32-bit machines, so it
will fit into atomic_long_t we use for vmstats.

Signed-off-by: Roman Gushchin <guro@fb.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Reviewed-by: Shakeel Butt <shakeelb@google.com>
Acked-by: Johannes Weiner <hannes@cmpxchg.org>
Acked-by: Vlastimil Babka <vbabka@suse.cz>
Cc: Christoph Lameter <cl@linux.com>
Cc: Michal Hocko <mhocko@kernel.org>
Cc: Tejun Heo <tj@kernel.org>
Link: http://lkml.kernel.org/r/20200623174037.3951353-4-guro@fb.com
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
Roman Gushchin 2020-08-06 23:20:39 -07:00 committed by Linus Torvalds
parent ea426c2a7d
commit d42f3245c7
13 changed files with 53 additions and 42 deletions

View File

@ -368,8 +368,8 @@ static ssize_t node_read_meminfo(struct device *dev,
unsigned long sreclaimable, sunreclaimable; unsigned long sreclaimable, sunreclaimable;
si_meminfo_node(&i, nid); si_meminfo_node(&i, nid);
sreclaimable = node_page_state(pgdat, NR_SLAB_RECLAIMABLE); sreclaimable = node_page_state_pages(pgdat, NR_SLAB_RECLAIMABLE_B);
sunreclaimable = node_page_state(pgdat, NR_SLAB_UNRECLAIMABLE); sunreclaimable = node_page_state_pages(pgdat, NR_SLAB_UNRECLAIMABLE_B);
n = sprintf(buf, n = sprintf(buf,
"Node %d MemTotal: %8lu kB\n" "Node %d MemTotal: %8lu kB\n"
"Node %d MemFree: %8lu kB\n" "Node %d MemFree: %8lu kB\n"

View File

@ -52,8 +52,8 @@ static int meminfo_proc_show(struct seq_file *m, void *v)
pages[lru] = global_node_page_state(NR_LRU_BASE + lru); pages[lru] = global_node_page_state(NR_LRU_BASE + lru);
available = si_mem_available(); available = si_mem_available();
sreclaimable = global_node_page_state(NR_SLAB_RECLAIMABLE); sreclaimable = global_node_page_state_pages(NR_SLAB_RECLAIMABLE_B);
sunreclaim = global_node_page_state(NR_SLAB_UNRECLAIMABLE); sunreclaim = global_node_page_state_pages(NR_SLAB_UNRECLAIMABLE_B);
show_val_kb(m, "MemTotal: ", i.totalram); show_val_kb(m, "MemTotal: ", i.totalram);
show_val_kb(m, "MemFree: ", i.freeram); show_val_kb(m, "MemFree: ", i.freeram);

View File

@ -174,8 +174,8 @@ enum node_stat_item {
NR_INACTIVE_FILE, /* " " " " " */ NR_INACTIVE_FILE, /* " " " " " */
NR_ACTIVE_FILE, /* " " " " " */ NR_ACTIVE_FILE, /* " " " " " */
NR_UNEVICTABLE, /* " " " " " */ NR_UNEVICTABLE, /* " " " " " */
NR_SLAB_RECLAIMABLE, NR_SLAB_RECLAIMABLE_B,
NR_SLAB_UNRECLAIMABLE, NR_SLAB_UNRECLAIMABLE_B,
NR_ISOLATED_ANON, /* Temporary isolated pages from anon lru */ NR_ISOLATED_ANON, /* Temporary isolated pages from anon lru */
NR_ISOLATED_FILE, /* Temporary isolated pages from file lru */ NR_ISOLATED_FILE, /* Temporary isolated pages from file lru */
WORKINGSET_NODES, WORKINGSET_NODES,
@ -213,7 +213,17 @@ enum node_stat_item {
*/ */
static __always_inline bool vmstat_item_in_bytes(int idx) static __always_inline bool vmstat_item_in_bytes(int idx)
{ {
return false; /*
* Global and per-node slab counters track slab pages.
* It's expected that changes are multiples of PAGE_SIZE.
* Internally values are stored in pages.
*
* Per-memcg and per-lruvec counters track memory, consumed
* by individual slab objects. These counters are actually
* byte-precise.
*/
return (idx == NR_SLAB_RECLAIMABLE_B ||
idx == NR_SLAB_UNRECLAIMABLE_B);
} }
/* /*

View File

@ -1663,7 +1663,7 @@ static unsigned long minimum_image_size(unsigned long saveable)
{ {
unsigned long size; unsigned long size;
size = global_node_page_state(NR_SLAB_RECLAIMABLE) size = global_node_page_state_pages(NR_SLAB_RECLAIMABLE_B)
+ global_node_page_state(NR_ACTIVE_ANON) + global_node_page_state(NR_ACTIVE_ANON)
+ global_node_page_state(NR_INACTIVE_ANON) + global_node_page_state(NR_INACTIVE_ANON)
+ global_node_page_state(NR_ACTIVE_FILE) + global_node_page_state(NR_ACTIVE_FILE)

View File

@ -1391,9 +1391,8 @@ static char *memory_stat_format(struct mem_cgroup *memcg)
(u64)memcg_page_state(memcg, MEMCG_KERNEL_STACK_KB) * (u64)memcg_page_state(memcg, MEMCG_KERNEL_STACK_KB) *
1024); 1024);
seq_buf_printf(&s, "slab %llu\n", seq_buf_printf(&s, "slab %llu\n",
(u64)(memcg_page_state(memcg, NR_SLAB_RECLAIMABLE) + (u64)(memcg_page_state(memcg, NR_SLAB_RECLAIMABLE_B) +
memcg_page_state(memcg, NR_SLAB_UNRECLAIMABLE)) * memcg_page_state(memcg, NR_SLAB_UNRECLAIMABLE_B)));
PAGE_SIZE);
seq_buf_printf(&s, "sock %llu\n", seq_buf_printf(&s, "sock %llu\n",
(u64)memcg_page_state(memcg, MEMCG_SOCK) * (u64)memcg_page_state(memcg, MEMCG_SOCK) *
PAGE_SIZE); PAGE_SIZE);
@ -1423,11 +1422,9 @@ static char *memory_stat_format(struct mem_cgroup *memcg)
PAGE_SIZE); PAGE_SIZE);
seq_buf_printf(&s, "slab_reclaimable %llu\n", seq_buf_printf(&s, "slab_reclaimable %llu\n",
(u64)memcg_page_state(memcg, NR_SLAB_RECLAIMABLE) * (u64)memcg_page_state(memcg, NR_SLAB_RECLAIMABLE_B));
PAGE_SIZE);
seq_buf_printf(&s, "slab_unreclaimable %llu\n", seq_buf_printf(&s, "slab_unreclaimable %llu\n",
(u64)memcg_page_state(memcg, NR_SLAB_UNRECLAIMABLE) * (u64)memcg_page_state(memcg, NR_SLAB_UNRECLAIMABLE_B));
PAGE_SIZE);
/* Accumulated memory events */ /* Accumulated memory events */

View File

@ -184,7 +184,7 @@ static bool is_dump_unreclaim_slabs(void)
global_node_page_state(NR_ISOLATED_FILE) + global_node_page_state(NR_ISOLATED_FILE) +
global_node_page_state(NR_UNEVICTABLE); global_node_page_state(NR_UNEVICTABLE);
return (global_node_page_state(NR_SLAB_UNRECLAIMABLE) > nr_lru); return (global_node_page_state_pages(NR_SLAB_UNRECLAIMABLE_B) > nr_lru);
} }
/** /**

View File

@ -5220,7 +5220,7 @@ long si_mem_available(void)
* items that are in use, and cannot be freed. Cap this estimate at the * items that are in use, and cannot be freed. Cap this estimate at the
* low watermark. * low watermark.
*/ */
reclaimable = global_node_page_state(NR_SLAB_RECLAIMABLE) + reclaimable = global_node_page_state_pages(NR_SLAB_RECLAIMABLE_B) +
global_node_page_state(NR_KERNEL_MISC_RECLAIMABLE); global_node_page_state(NR_KERNEL_MISC_RECLAIMABLE);
available += reclaimable - min(reclaimable / 2, wmark_low); available += reclaimable - min(reclaimable / 2, wmark_low);
@ -5364,8 +5364,8 @@ void show_free_areas(unsigned int filter, nodemask_t *nodemask)
global_node_page_state(NR_UNEVICTABLE), global_node_page_state(NR_UNEVICTABLE),
global_node_page_state(NR_FILE_DIRTY), global_node_page_state(NR_FILE_DIRTY),
global_node_page_state(NR_WRITEBACK), global_node_page_state(NR_WRITEBACK),
global_node_page_state(NR_SLAB_RECLAIMABLE), global_node_page_state_pages(NR_SLAB_RECLAIMABLE_B),
global_node_page_state(NR_SLAB_UNRECLAIMABLE), global_node_page_state_pages(NR_SLAB_UNRECLAIMABLE_B),
global_node_page_state(NR_FILE_MAPPED), global_node_page_state(NR_FILE_MAPPED),
global_node_page_state(NR_SHMEM), global_node_page_state(NR_SHMEM),
global_zone_page_state(NR_PAGETABLE), global_zone_page_state(NR_PAGETABLE),

View File

@ -273,7 +273,7 @@ int __kmem_cache_alloc_bulk(struct kmem_cache *, gfp_t, size_t, void **);
static inline int cache_vmstat_idx(struct kmem_cache *s) static inline int cache_vmstat_idx(struct kmem_cache *s)
{ {
return (s->flags & SLAB_RECLAIM_ACCOUNT) ? return (s->flags & SLAB_RECLAIM_ACCOUNT) ?
NR_SLAB_RECLAIMABLE : NR_SLAB_UNRECLAIMABLE; NR_SLAB_RECLAIMABLE_B : NR_SLAB_UNRECLAIMABLE_B;
} }
#ifdef CONFIG_SLUB_DEBUG #ifdef CONFIG_SLUB_DEBUG
@ -390,7 +390,7 @@ static __always_inline int memcg_charge_slab(struct page *page,
if (unlikely(!memcg || mem_cgroup_is_root(memcg))) { if (unlikely(!memcg || mem_cgroup_is_root(memcg))) {
mod_node_page_state(page_pgdat(page), cache_vmstat_idx(s), mod_node_page_state(page_pgdat(page), cache_vmstat_idx(s),
nr_pages); nr_pages << PAGE_SHIFT);
percpu_ref_get_many(&s->memcg_params.refcnt, nr_pages); percpu_ref_get_many(&s->memcg_params.refcnt, nr_pages);
return 0; return 0;
} }
@ -400,7 +400,7 @@ static __always_inline int memcg_charge_slab(struct page *page,
goto out; goto out;
lruvec = mem_cgroup_lruvec(memcg, page_pgdat(page)); lruvec = mem_cgroup_lruvec(memcg, page_pgdat(page));
mod_lruvec_state(lruvec, cache_vmstat_idx(s), nr_pages); mod_lruvec_state(lruvec, cache_vmstat_idx(s), nr_pages << PAGE_SHIFT);
/* transer try_charge() page references to kmem_cache */ /* transer try_charge() page references to kmem_cache */
percpu_ref_get_many(&s->memcg_params.refcnt, nr_pages); percpu_ref_get_many(&s->memcg_params.refcnt, nr_pages);
@ -425,11 +425,12 @@ static __always_inline void memcg_uncharge_slab(struct page *page, int order,
memcg = READ_ONCE(s->memcg_params.memcg); memcg = READ_ONCE(s->memcg_params.memcg);
if (likely(!mem_cgroup_is_root(memcg))) { if (likely(!mem_cgroup_is_root(memcg))) {
lruvec = mem_cgroup_lruvec(memcg, page_pgdat(page)); lruvec = mem_cgroup_lruvec(memcg, page_pgdat(page));
mod_lruvec_state(lruvec, cache_vmstat_idx(s), -nr_pages); mod_lruvec_state(lruvec, cache_vmstat_idx(s),
-(nr_pages << PAGE_SHIFT));
memcg_kmem_uncharge(memcg, nr_pages); memcg_kmem_uncharge(memcg, nr_pages);
} else { } else {
mod_node_page_state(page_pgdat(page), cache_vmstat_idx(s), mod_node_page_state(page_pgdat(page), cache_vmstat_idx(s),
-nr_pages); -(nr_pages << PAGE_SHIFT));
} }
rcu_read_unlock(); rcu_read_unlock();
@ -513,7 +514,7 @@ static __always_inline int charge_slab_page(struct page *page,
{ {
if (is_root_cache(s)) { if (is_root_cache(s)) {
mod_node_page_state(page_pgdat(page), cache_vmstat_idx(s), mod_node_page_state(page_pgdat(page), cache_vmstat_idx(s),
1 << order); PAGE_SIZE << order);
return 0; return 0;
} }
@ -525,7 +526,7 @@ static __always_inline void uncharge_slab_page(struct page *page, int order,
{ {
if (is_root_cache(s)) { if (is_root_cache(s)) {
mod_node_page_state(page_pgdat(page), cache_vmstat_idx(s), mod_node_page_state(page_pgdat(page), cache_vmstat_idx(s),
-(1 << order)); -(PAGE_SIZE << order));
return; return;
} }

View File

@ -1363,8 +1363,8 @@ void *kmalloc_order(size_t size, gfp_t flags, unsigned int order)
page = alloc_pages(flags, order); page = alloc_pages(flags, order);
if (likely(page)) { if (likely(page)) {
ret = page_address(page); ret = page_address(page);
mod_node_page_state(page_pgdat(page), NR_SLAB_UNRECLAIMABLE, mod_node_page_state(page_pgdat(page), NR_SLAB_UNRECLAIMABLE_B,
1 << order); PAGE_SIZE << order);
} }
ret = kasan_kmalloc_large(ret, size, flags); ret = kasan_kmalloc_large(ret, size, flags);
/* As ret might get tagged, call kmemleak hook after KASAN. */ /* As ret might get tagged, call kmemleak hook after KASAN. */

View File

@ -202,8 +202,8 @@ static void *slob_new_pages(gfp_t gfp, int order, int node)
if (!page) if (!page)
return NULL; return NULL;
mod_node_page_state(page_pgdat(page), NR_SLAB_UNRECLAIMABLE, mod_node_page_state(page_pgdat(page), NR_SLAB_UNRECLAIMABLE_B,
1 << order); PAGE_SIZE << order);
return page_address(page); return page_address(page);
} }
@ -214,8 +214,8 @@ static void slob_free_pages(void *b, int order)
if (current->reclaim_state) if (current->reclaim_state)
current->reclaim_state->reclaimed_slab += 1 << order; current->reclaim_state->reclaimed_slab += 1 << order;
mod_node_page_state(page_pgdat(sp), NR_SLAB_UNRECLAIMABLE, mod_node_page_state(page_pgdat(sp), NR_SLAB_UNRECLAIMABLE_B,
-(1 << order)); -(PAGE_SIZE << order));
__free_pages(sp, order); __free_pages(sp, order);
} }
@ -552,8 +552,8 @@ void kfree(const void *block)
slob_free(m, *m + align); slob_free(m, *m + align);
} else { } else {
unsigned int order = compound_order(sp); unsigned int order = compound_order(sp);
mod_node_page_state(page_pgdat(sp), NR_SLAB_UNRECLAIMABLE, mod_node_page_state(page_pgdat(sp), NR_SLAB_UNRECLAIMABLE_B,
-(1 << order)); -(PAGE_SIZE << order));
__free_pages(sp, order); __free_pages(sp, order);
} }

View File

@ -3991,8 +3991,8 @@ static void *kmalloc_large_node(size_t size, gfp_t flags, int node)
page = alloc_pages_node(node, flags, order); page = alloc_pages_node(node, flags, order);
if (page) { if (page) {
ptr = page_address(page); ptr = page_address(page);
mod_node_page_state(page_pgdat(page), NR_SLAB_UNRECLAIMABLE, mod_node_page_state(page_pgdat(page), NR_SLAB_UNRECLAIMABLE_B,
1 << order); PAGE_SIZE << order);
} }
return kmalloc_large_node_hook(ptr, size, flags); return kmalloc_large_node_hook(ptr, size, flags);
@ -4123,8 +4123,8 @@ void kfree(const void *x)
BUG_ON(!PageCompound(page)); BUG_ON(!PageCompound(page));
kfree_hook(object); kfree_hook(object);
mod_node_page_state(page_pgdat(page), NR_SLAB_UNRECLAIMABLE, mod_node_page_state(page_pgdat(page), NR_SLAB_UNRECLAIMABLE_B,
-(1 << order)); -(PAGE_SIZE << order));
__free_pages(page, order); __free_pages(page, order);
return; return;
} }

View File

@ -4222,7 +4222,8 @@ int node_reclaim(struct pglist_data *pgdat, gfp_t gfp_mask, unsigned int order)
* unmapped file backed pages. * unmapped file backed pages.
*/ */
if (node_pagecache_reclaimable(pgdat) <= pgdat->min_unmapped_pages && if (node_pagecache_reclaimable(pgdat) <= pgdat->min_unmapped_pages &&
node_page_state(pgdat, NR_SLAB_RECLAIMABLE) <= pgdat->min_slab_pages) node_page_state_pages(pgdat, NR_SLAB_RECLAIMABLE_B) <=
pgdat->min_slab_pages)
return NODE_RECLAIM_FULL; return NODE_RECLAIM_FULL;
/* /*

View File

@ -486,8 +486,10 @@ static unsigned long count_shadow_nodes(struct shrinker *shrinker,
for (pages = 0, i = 0; i < NR_LRU_LISTS; i++) for (pages = 0, i = 0; i < NR_LRU_LISTS; i++)
pages += lruvec_page_state_local(lruvec, pages += lruvec_page_state_local(lruvec,
NR_LRU_BASE + i); NR_LRU_BASE + i);
pages += lruvec_page_state_local(lruvec, NR_SLAB_RECLAIMABLE); pages += lruvec_page_state_local(
pages += lruvec_page_state_local(lruvec, NR_SLAB_UNRECLAIMABLE); lruvec, NR_SLAB_RECLAIMABLE_B) >> PAGE_SHIFT;
pages += lruvec_page_state_local(
lruvec, NR_SLAB_UNRECLAIMABLE_B) >> PAGE_SHIFT;
} else } else
#endif #endif
pages = node_present_pages(sc->nid); pages = node_present_pages(sc->nid);