[PATCH] zoned vm counters: create vmstat.c/.h from page_alloc.c/.h

NOTE: ZVC are *not* the lightweight event counters.  ZVCs are reliable whereas
event counters do not need to be.

Zone based VM statistics are necessary to be able to determine what the state
of memory in one zone is.  In a NUMA system this can be helpful for local
reclaim and other memory optimizations that may be able to shift VM load in
order to get more balanced memory use.

It is also useful to know how the computing load affects the memory
allocations on various zones.  This patchset allows the retrieval of that data
from userspace.

The patchset introduces a framework for counters that is a cross between the
existing page_stats --which are simply global counters split per cpu-- and the
approach of deferred incremental updates implemented for nr_pagecache.

Small per cpu 8 bit counters are added to struct zone.  If the counter exceeds
certain thresholds then the counters are accumulated in an array of
atomic_long in the zone and in a global array that sums up all zone values.
The small 8 bit counters are next to the per cpu page pointers and so they
will be in high in the cpu cache when pages are allocated and freed.

Access to VM counter information for a zone and for the whole machine is then
possible by simply indexing an array (Thanks to Nick Piggin for pointing out
that approach).  The access to the total number of pages of various types does
no longer require the summing up of all per cpu counters.

Benefits of this patchset right now:

- Ability for UP and SMP configuration to determine how memory
  is balanced between the DMA, NORMAL and HIGHMEM zones.

- loops over all processors are avoided in writeback and
  reclaim paths. We can avoid caching the writeback information
  because the needed information is directly accessible.

- Special handling for nr_pagecache removed.

- zone_reclaim_interval vanishes since VM stats can now determine
  when it is worth to do local reclaim.

- Fast inline per node page state determination.

- Accurate counters in /sys/devices/system/node/node*/meminfo. Current
  counters are counting simply which processor allocated a page somewhere
  and guestimate based on that. So the counters were not useful to show
  the actual distribution of page use on a specific zone.

- The swap_prefetch patch requires per node statistics in order to
  figure out when processors of a node can prefetch. This patch provides
  some of the needed numbers.

- Detailed VM counters available in more /proc and /sys status files.

References to earlier discussions:
V1 http://marc.theaimsgroup.com/?l=linux-kernel&m=113511649910826&w=2
V2 http://marc.theaimsgroup.com/?l=linux-kernel&m=114980851924230&w=2
V3 http://marc.theaimsgroup.com/?l=linux-kernel&m=115014697910351&w=2
V4 http://marc.theaimsgroup.com/?l=linux-kernel&m=115024767318740&w=2

Performance tests with AIM7 did not show any regressions.  Seems to be a tad
faster even.  Tested on ia64/NUMA.  Builds fine on i386, SMP / UP.  Includes
fixes for s390/arm/uml arch code.

This patch:

Move counter code from page_alloc.c/page-flags.h to vmstat.c/h.

Create vmstat.c/vmstat.h by separating the counter code and the proc
functions.

Move the vm_stat_text array before zoneinfo_show.

[akpm@osdl.org: s390 build fix]
[akpm@osdl.org: HOTPLUG_CPU build fix]
Signed-off-by: Christoph Lameter <clameter@sgi.com>
Cc: Heiko Carstens <heiko.carstens@de.ibm.com>
Cc: Martin Schwidefsky <schwidefsky@de.ibm.com>
Cc: Trond Myklebust <trond.myklebust@fys.uio.no>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
This commit is contained in:
Christoph Lameter 2006-06-30 01:55:32 -07:00 committed by Linus Torvalds
parent 672b2714ae
commit f6ac2354d7
7 changed files with 569 additions and 549 deletions

View File

@ -657,13 +657,6 @@ static inline pte_t mk_pte_phys(unsigned long physpage, pgprot_t pgprot)
__pte; \ __pte; \
}) })
#define SetPageUptodate(_page) \
do { \
struct page *__page = (_page); \
if (!test_and_set_bit(PG_uptodate, &__page->flags)) \
page_test_and_clear_dirty(_page); \
} while (0)
#ifdef __s390x__ #ifdef __s390x__
#define pfn_pmd(pfn, pgprot) \ #define pfn_pmd(pfn, pgprot) \

View File

@ -36,7 +36,6 @@ extern int sysctl_legacy_va_layout;
#include <asm/page.h> #include <asm/page.h>
#include <asm/pgtable.h> #include <asm/pgtable.h>
#include <asm/processor.h> #include <asm/processor.h>
#include <asm/atomic.h>
#define nth_page(page,n) pfn_to_page(page_to_pfn((page)) + (n)) #define nth_page(page,n) pfn_to_page(page_to_pfn((page)) + (n))
@ -515,6 +514,11 @@ static inline void set_page_links(struct page *page, unsigned long zone,
set_page_section(page, pfn_to_section_nr(pfn)); set_page_section(page, pfn_to_section_nr(pfn));
} }
/*
* Some inline functions in vmstat.h depend on page_zone()
*/
#include <linux/vmstat.h>
#ifndef CONFIG_DISCONTIGMEM #ifndef CONFIG_DISCONTIGMEM
/* The array of struct pages - for discontigmem use pgdat->lmem_map */ /* The array of struct pages - for discontigmem use pgdat->lmem_map */
extern struct page *mem_map; extern struct page *mem_map;

View File

@ -5,12 +5,8 @@
#ifndef PAGE_FLAGS_H #ifndef PAGE_FLAGS_H
#define PAGE_FLAGS_H #define PAGE_FLAGS_H
#include <linux/percpu.h>
#include <linux/cache.h>
#include <linux/types.h> #include <linux/types.h>
#include <asm/pgtable.h>
/* /*
* Various page->flags bits: * Various page->flags bits:
* *
@ -102,134 +98,6 @@
#define PG_uncached 31 /* Page has been mapped as uncached */ #define PG_uncached 31 /* Page has been mapped as uncached */
#endif #endif
/*
* Global page accounting. One instance per CPU. Only unsigned longs are
* allowed.
*
* - Fields can be modified with xxx_page_state and xxx_page_state_zone at
* any time safely (which protects the instance from modification by
* interrupt.
* - The __xxx_page_state variants can be used safely when interrupts are
* disabled.
* - The __xxx_page_state variants can be used if the field is only
* modified from process context and protected from preemption, or only
* modified from interrupt context. In this case, the field should be
* commented here.
*/
struct page_state {
unsigned long nr_dirty; /* Dirty writeable pages */
unsigned long nr_writeback; /* Pages under writeback */
unsigned long nr_unstable; /* NFS unstable pages */
unsigned long nr_page_table_pages;/* Pages used for pagetables */
unsigned long nr_mapped; /* mapped into pagetables.
* only modified from process context */
unsigned long nr_slab; /* In slab */
#define GET_PAGE_STATE_LAST nr_slab
/*
* The below are zeroed by get_page_state(). Use get_full_page_state()
* to add up all these.
*/
unsigned long pgpgin; /* Disk reads */
unsigned long pgpgout; /* Disk writes */
unsigned long pswpin; /* swap reads */
unsigned long pswpout; /* swap writes */
unsigned long pgalloc_high; /* page allocations */
unsigned long pgalloc_normal;
unsigned long pgalloc_dma32;
unsigned long pgalloc_dma;
unsigned long pgfree; /* page freeings */
unsigned long pgactivate; /* pages moved inactive->active */
unsigned long pgdeactivate; /* pages moved active->inactive */
unsigned long pgfault; /* faults (major+minor) */
unsigned long pgmajfault; /* faults (major only) */
unsigned long pgrefill_high; /* inspected in refill_inactive_zone */
unsigned long pgrefill_normal;
unsigned long pgrefill_dma32;
unsigned long pgrefill_dma;
unsigned long pgsteal_high; /* total highmem pages reclaimed */
unsigned long pgsteal_normal;
unsigned long pgsteal_dma32;
unsigned long pgsteal_dma;
unsigned long pgscan_kswapd_high;/* total highmem pages scanned */
unsigned long pgscan_kswapd_normal;
unsigned long pgscan_kswapd_dma32;
unsigned long pgscan_kswapd_dma;
unsigned long pgscan_direct_high;/* total highmem pages scanned */
unsigned long pgscan_direct_normal;
unsigned long pgscan_direct_dma32;
unsigned long pgscan_direct_dma;
unsigned long pginodesteal; /* pages reclaimed via inode freeing */
unsigned long slabs_scanned; /* slab objects scanned */
unsigned long kswapd_steal; /* pages reclaimed by kswapd */
unsigned long kswapd_inodesteal;/* reclaimed via kswapd inode freeing */
unsigned long pageoutrun; /* kswapd's calls to page reclaim */
unsigned long allocstall; /* direct reclaim calls */
unsigned long pgrotated; /* pages rotated to tail of the LRU */
unsigned long nr_bounce; /* pages for bounce buffers */
};
extern void get_page_state(struct page_state *ret);
extern void get_page_state_node(struct page_state *ret, int node);
extern void get_full_page_state(struct page_state *ret);
extern unsigned long read_page_state_offset(unsigned long offset);
extern void mod_page_state_offset(unsigned long offset, unsigned long delta);
extern void __mod_page_state_offset(unsigned long offset, unsigned long delta);
#define read_page_state(member) \
read_page_state_offset(offsetof(struct page_state, member))
#define mod_page_state(member, delta) \
mod_page_state_offset(offsetof(struct page_state, member), (delta))
#define __mod_page_state(member, delta) \
__mod_page_state_offset(offsetof(struct page_state, member), (delta))
#define inc_page_state(member) mod_page_state(member, 1UL)
#define dec_page_state(member) mod_page_state(member, 0UL - 1)
#define add_page_state(member,delta) mod_page_state(member, (delta))
#define sub_page_state(member,delta) mod_page_state(member, 0UL - (delta))
#define __inc_page_state(member) __mod_page_state(member, 1UL)
#define __dec_page_state(member) __mod_page_state(member, 0UL - 1)
#define __add_page_state(member,delta) __mod_page_state(member, (delta))
#define __sub_page_state(member,delta) __mod_page_state(member, 0UL - (delta))
#define page_state(member) (*__page_state(offsetof(struct page_state, member)))
#define state_zone_offset(zone, member) \
({ \
unsigned offset; \
if (is_highmem(zone)) \
offset = offsetof(struct page_state, member##_high); \
else if (is_normal(zone)) \
offset = offsetof(struct page_state, member##_normal); \
else if (is_dma32(zone)) \
offset = offsetof(struct page_state, member##_dma32); \
else \
offset = offsetof(struct page_state, member##_dma); \
offset; \
})
#define __mod_page_state_zone(zone, member, delta) \
do { \
__mod_page_state_offset(state_zone_offset(zone, member), (delta)); \
} while (0)
#define mod_page_state_zone(zone, member, delta) \
do { \
mod_page_state_offset(state_zone_offset(zone, member), (delta)); \
} while (0)
/* /*
* Manipulation of page state flags * Manipulation of page state flags
*/ */
@ -254,7 +122,14 @@ extern void __mod_page_state_offset(unsigned long offset, unsigned long delta);
#define TestClearPageReferenced(page) test_and_clear_bit(PG_referenced, &(page)->flags) #define TestClearPageReferenced(page) test_and_clear_bit(PG_referenced, &(page)->flags)
#define PageUptodate(page) test_bit(PG_uptodate, &(page)->flags) #define PageUptodate(page) test_bit(PG_uptodate, &(page)->flags)
#ifndef SetPageUptodate #ifdef CONFIG_S390
#define SetPageUptodate(_page) \
do { \
struct page *__page = (_page); \
if (!test_and_set_bit(PG_uptodate, &__page->flags)) \
page_test_and_clear_dirty(_page); \
} while (0)
#else
#define SetPageUptodate(page) set_bit(PG_uptodate, &(page)->flags) #define SetPageUptodate(page) set_bit(PG_uptodate, &(page)->flags)
#endif #endif
#define ClearPageUptodate(page) clear_bit(PG_uptodate, &(page)->flags) #define ClearPageUptodate(page) clear_bit(PG_uptodate, &(page)->flags)

138
include/linux/vmstat.h Normal file
View File

@ -0,0 +1,138 @@
#ifndef _LINUX_VMSTAT_H
#define _LINUX_VMSTAT_H
#include <linux/types.h>
#include <linux/percpu.h>
/*
* Global page accounting. One instance per CPU. Only unsigned longs are
* allowed.
*
* - Fields can be modified with xxx_page_state and xxx_page_state_zone at
* any time safely (which protects the instance from modification by
* interrupt.
* - The __xxx_page_state variants can be used safely when interrupts are
* disabled.
* - The __xxx_page_state variants can be used if the field is only
* modified from process context and protected from preemption, or only
* modified from interrupt context. In this case, the field should be
* commented here.
*/
struct page_state {
unsigned long nr_dirty; /* Dirty writeable pages */
unsigned long nr_writeback; /* Pages under writeback */
unsigned long nr_unstable; /* NFS unstable pages */
unsigned long nr_page_table_pages;/* Pages used for pagetables */
unsigned long nr_mapped; /* mapped into pagetables.
* only modified from process context */
unsigned long nr_slab; /* In slab */
#define GET_PAGE_STATE_LAST nr_slab
/*
* The below are zeroed by get_page_state(). Use get_full_page_state()
* to add up all these.
*/
unsigned long pgpgin; /* Disk reads */
unsigned long pgpgout; /* Disk writes */
unsigned long pswpin; /* swap reads */
unsigned long pswpout; /* swap writes */
unsigned long pgalloc_high; /* page allocations */
unsigned long pgalloc_normal;
unsigned long pgalloc_dma32;
unsigned long pgalloc_dma;
unsigned long pgfree; /* page freeings */
unsigned long pgactivate; /* pages moved inactive->active */
unsigned long pgdeactivate; /* pages moved active->inactive */
unsigned long pgfault; /* faults (major+minor) */
unsigned long pgmajfault; /* faults (major only) */
unsigned long pgrefill_high; /* inspected in refill_inactive_zone */
unsigned long pgrefill_normal;
unsigned long pgrefill_dma32;
unsigned long pgrefill_dma;
unsigned long pgsteal_high; /* total highmem pages reclaimed */
unsigned long pgsteal_normal;
unsigned long pgsteal_dma32;
unsigned long pgsteal_dma;
unsigned long pgscan_kswapd_high;/* total highmem pages scanned */
unsigned long pgscan_kswapd_normal;
unsigned long pgscan_kswapd_dma32;
unsigned long pgscan_kswapd_dma;
unsigned long pgscan_direct_high;/* total highmem pages scanned */
unsigned long pgscan_direct_normal;
unsigned long pgscan_direct_dma32;
unsigned long pgscan_direct_dma;
unsigned long pginodesteal; /* pages reclaimed via inode freeing */
unsigned long slabs_scanned; /* slab objects scanned */
unsigned long kswapd_steal; /* pages reclaimed by kswapd */
unsigned long kswapd_inodesteal;/* reclaimed via kswapd inode freeing */
unsigned long pageoutrun; /* kswapd's calls to page reclaim */
unsigned long allocstall; /* direct reclaim calls */
unsigned long pgrotated; /* pages rotated to tail of the LRU */
unsigned long nr_bounce; /* pages for bounce buffers */
};
extern void get_page_state(struct page_state *ret);
extern void get_page_state_node(struct page_state *ret, int node);
extern void get_full_page_state(struct page_state *ret);
extern unsigned long read_page_state_offset(unsigned long offset);
extern void mod_page_state_offset(unsigned long offset, unsigned long delta);
extern void __mod_page_state_offset(unsigned long offset, unsigned long delta);
#define read_page_state(member) \
read_page_state_offset(offsetof(struct page_state, member))
#define mod_page_state(member, delta) \
mod_page_state_offset(offsetof(struct page_state, member), (delta))
#define __mod_page_state(member, delta) \
__mod_page_state_offset(offsetof(struct page_state, member), (delta))
#define inc_page_state(member) mod_page_state(member, 1UL)
#define dec_page_state(member) mod_page_state(member, 0UL - 1)
#define add_page_state(member,delta) mod_page_state(member, (delta))
#define sub_page_state(member,delta) mod_page_state(member, 0UL - (delta))
#define __inc_page_state(member) __mod_page_state(member, 1UL)
#define __dec_page_state(member) __mod_page_state(member, 0UL - 1)
#define __add_page_state(member,delta) __mod_page_state(member, (delta))
#define __sub_page_state(member,delta) __mod_page_state(member, 0UL - (delta))
#define page_state(member) (*__page_state(offsetof(struct page_state, member)))
#define state_zone_offset(zone, member) \
({ \
unsigned offset; \
if (is_highmem(zone)) \
offset = offsetof(struct page_state, member##_high); \
else if (is_normal(zone)) \
offset = offsetof(struct page_state, member##_normal); \
else if (is_dma32(zone)) \
offset = offsetof(struct page_state, member##_dma32); \
else \
offset = offsetof(struct page_state, member##_dma); \
offset; \
})
#define __mod_page_state_zone(zone, member, delta) \
do { \
__mod_page_state_offset(state_zone_offset(zone, member), (delta)); \
} while (0)
#define mod_page_state_zone(zone, member, delta) \
do { \
mod_page_state_offset(state_zone_offset(zone, member), (delta)); \
} while (0)
DECLARE_PER_CPU(struct page_state, page_states);
#endif /* _LINUX_VMSTAT_H */

View File

@ -10,7 +10,7 @@ mmu-$(CONFIG_MMU) := fremap.o highmem.o madvise.o memory.o mincore.o \
obj-y := bootmem.o filemap.o mempool.o oom_kill.o fadvise.o \ obj-y := bootmem.o filemap.o mempool.o oom_kill.o fadvise.o \
page_alloc.o page-writeback.o pdflush.o \ page_alloc.o page-writeback.o pdflush.o \
readahead.o swap.o truncate.o vmscan.o \ readahead.o swap.o truncate.o vmscan.o \
prio_tree.o util.o mmzone.o $(mmu-y) prio_tree.o util.o mmzone.o vmstat.o $(mmu-y)
obj-$(CONFIG_SWAP) += page_io.o swap_state.o swapfile.o thrash.o obj-$(CONFIG_SWAP) += page_io.o swap_state.o swapfile.o thrash.o
obj-$(CONFIG_HUGETLBFS) += hugetlb.o obj-$(CONFIG_HUGETLBFS) += hugetlb.o

View File

@ -1231,141 +1231,6 @@ static void show_node(struct zone *zone)
#define show_node(zone) do { } while (0) #define show_node(zone) do { } while (0)
#endif #endif
/*
* Accumulate the page_state information across all CPUs.
* The result is unavoidably approximate - it can change
* during and after execution of this function.
*/
static DEFINE_PER_CPU(struct page_state, page_states) = {0};
atomic_t nr_pagecache = ATOMIC_INIT(0);
EXPORT_SYMBOL(nr_pagecache);
#ifdef CONFIG_SMP
DEFINE_PER_CPU(long, nr_pagecache_local) = 0;
#endif
static void __get_page_state(struct page_state *ret, int nr, cpumask_t *cpumask)
{
unsigned cpu;
memset(ret, 0, nr * sizeof(unsigned long));
cpus_and(*cpumask, *cpumask, cpu_online_map);
for_each_cpu_mask(cpu, *cpumask) {
unsigned long *in;
unsigned long *out;
unsigned off;
unsigned next_cpu;
in = (unsigned long *)&per_cpu(page_states, cpu);
next_cpu = next_cpu(cpu, *cpumask);
if (likely(next_cpu < NR_CPUS))
prefetch(&per_cpu(page_states, next_cpu));
out = (unsigned long *)ret;
for (off = 0; off < nr; off++)
*out++ += *in++;
}
}
void get_page_state_node(struct page_state *ret, int node)
{
int nr;
cpumask_t mask = node_to_cpumask(node);
nr = offsetof(struct page_state, GET_PAGE_STATE_LAST);
nr /= sizeof(unsigned long);
__get_page_state(ret, nr+1, &mask);
}
void get_page_state(struct page_state *ret)
{
int nr;
cpumask_t mask = CPU_MASK_ALL;
nr = offsetof(struct page_state, GET_PAGE_STATE_LAST);
nr /= sizeof(unsigned long);
__get_page_state(ret, nr + 1, &mask);
}
void get_full_page_state(struct page_state *ret)
{
cpumask_t mask = CPU_MASK_ALL;
__get_page_state(ret, sizeof(*ret) / sizeof(unsigned long), &mask);
}
unsigned long read_page_state_offset(unsigned long offset)
{
unsigned long ret = 0;
int cpu;
for_each_online_cpu(cpu) {
unsigned long in;
in = (unsigned long)&per_cpu(page_states, cpu) + offset;
ret += *((unsigned long *)in);
}
return ret;
}
void __mod_page_state_offset(unsigned long offset, unsigned long delta)
{
void *ptr;
ptr = &__get_cpu_var(page_states);
*(unsigned long *)(ptr + offset) += delta;
}
EXPORT_SYMBOL(__mod_page_state_offset);
void mod_page_state_offset(unsigned long offset, unsigned long delta)
{
unsigned long flags;
void *ptr;
local_irq_save(flags);
ptr = &__get_cpu_var(page_states);
*(unsigned long *)(ptr + offset) += delta;
local_irq_restore(flags);
}
EXPORT_SYMBOL(mod_page_state_offset);
void __get_zone_counts(unsigned long *active, unsigned long *inactive,
unsigned long *free, struct pglist_data *pgdat)
{
struct zone *zones = pgdat->node_zones;
int i;
*active = 0;
*inactive = 0;
*free = 0;
for (i = 0; i < MAX_NR_ZONES; i++) {
*active += zones[i].nr_active;
*inactive += zones[i].nr_inactive;
*free += zones[i].free_pages;
}
}
void get_zone_counts(unsigned long *active,
unsigned long *inactive, unsigned long *free)
{
struct pglist_data *pgdat;
*active = 0;
*inactive = 0;
*free = 0;
for_each_online_pgdat(pgdat) {
unsigned long l, m, n;
__get_zone_counts(&l, &m, &n, pgdat);
*active += l;
*inactive += m;
*free += n;
}
}
void si_meminfo(struct sysinfo *val) void si_meminfo(struct sysinfo *val)
{ {
val->totalram = totalram_pages; val->totalram = totalram_pages;
@ -2253,278 +2118,6 @@ void __init free_area_init(unsigned long *zones_size)
__pa(PAGE_OFFSET) >> PAGE_SHIFT, NULL); __pa(PAGE_OFFSET) >> PAGE_SHIFT, NULL);
} }
#ifdef CONFIG_PROC_FS
#include <linux/seq_file.h>
static void *frag_start(struct seq_file *m, loff_t *pos)
{
pg_data_t *pgdat;
loff_t node = *pos;
for (pgdat = first_online_pgdat();
pgdat && node;
pgdat = next_online_pgdat(pgdat))
--node;
return pgdat;
}
static void *frag_next(struct seq_file *m, void *arg, loff_t *pos)
{
pg_data_t *pgdat = (pg_data_t *)arg;
(*pos)++;
return next_online_pgdat(pgdat);
}
static void frag_stop(struct seq_file *m, void *arg)
{
}
/*
* This walks the free areas for each zone.
*/
static int frag_show(struct seq_file *m, void *arg)
{
pg_data_t *pgdat = (pg_data_t *)arg;
struct zone *zone;
struct zone *node_zones = pgdat->node_zones;
unsigned long flags;
int order;
for (zone = node_zones; zone - node_zones < MAX_NR_ZONES; ++zone) {
if (!populated_zone(zone))
continue;
spin_lock_irqsave(&zone->lock, flags);
seq_printf(m, "Node %d, zone %8s ", pgdat->node_id, zone->name);
for (order = 0; order < MAX_ORDER; ++order)
seq_printf(m, "%6lu ", zone->free_area[order].nr_free);
spin_unlock_irqrestore(&zone->lock, flags);
seq_putc(m, '\n');
}
return 0;
}
struct seq_operations fragmentation_op = {
.start = frag_start,
.next = frag_next,
.stop = frag_stop,
.show = frag_show,
};
/*
* Output information about zones in @pgdat.
*/
static int zoneinfo_show(struct seq_file *m, void *arg)
{
pg_data_t *pgdat = arg;
struct zone *zone;
struct zone *node_zones = pgdat->node_zones;
unsigned long flags;
for (zone = node_zones; zone - node_zones < MAX_NR_ZONES; zone++) {
int i;
if (!populated_zone(zone))
continue;
spin_lock_irqsave(&zone->lock, flags);
seq_printf(m, "Node %d, zone %8s", pgdat->node_id, zone->name);
seq_printf(m,
"\n pages free %lu"
"\n min %lu"
"\n low %lu"
"\n high %lu"
"\n active %lu"
"\n inactive %lu"
"\n scanned %lu (a: %lu i: %lu)"
"\n spanned %lu"
"\n present %lu",
zone->free_pages,
zone->pages_min,
zone->pages_low,
zone->pages_high,
zone->nr_active,
zone->nr_inactive,
zone->pages_scanned,
zone->nr_scan_active, zone->nr_scan_inactive,
zone->spanned_pages,
zone->present_pages);
seq_printf(m,
"\n protection: (%lu",
zone->lowmem_reserve[0]);
for (i = 1; i < ARRAY_SIZE(zone->lowmem_reserve); i++)
seq_printf(m, ", %lu", zone->lowmem_reserve[i]);
seq_printf(m,
")"
"\n pagesets");
for_each_online_cpu(i) {
struct per_cpu_pageset *pageset;
int j;
pageset = zone_pcp(zone, i);
for (j = 0; j < ARRAY_SIZE(pageset->pcp); j++) {
if (pageset->pcp[j].count)
break;
}
if (j == ARRAY_SIZE(pageset->pcp))
continue;
for (j = 0; j < ARRAY_SIZE(pageset->pcp); j++) {
seq_printf(m,
"\n cpu: %i pcp: %i"
"\n count: %i"
"\n high: %i"
"\n batch: %i",
i, j,
pageset->pcp[j].count,
pageset->pcp[j].high,
pageset->pcp[j].batch);
}
#ifdef CONFIG_NUMA
seq_printf(m,
"\n numa_hit: %lu"
"\n numa_miss: %lu"
"\n numa_foreign: %lu"
"\n interleave_hit: %lu"
"\n local_node: %lu"
"\n other_node: %lu",
pageset->numa_hit,
pageset->numa_miss,
pageset->numa_foreign,
pageset->interleave_hit,
pageset->local_node,
pageset->other_node);
#endif
}
seq_printf(m,
"\n all_unreclaimable: %u"
"\n prev_priority: %i"
"\n temp_priority: %i"
"\n start_pfn: %lu",
zone->all_unreclaimable,
zone->prev_priority,
zone->temp_priority,
zone->zone_start_pfn);
spin_unlock_irqrestore(&zone->lock, flags);
seq_putc(m, '\n');
}
return 0;
}
struct seq_operations zoneinfo_op = {
.start = frag_start, /* iterate over all zones. The same as in
* fragmentation. */
.next = frag_next,
.stop = frag_stop,
.show = zoneinfo_show,
};
static char *vmstat_text[] = {
"nr_dirty",
"nr_writeback",
"nr_unstable",
"nr_page_table_pages",
"nr_mapped",
"nr_slab",
"pgpgin",
"pgpgout",
"pswpin",
"pswpout",
"pgalloc_high",
"pgalloc_normal",
"pgalloc_dma32",
"pgalloc_dma",
"pgfree",
"pgactivate",
"pgdeactivate",
"pgfault",
"pgmajfault",
"pgrefill_high",
"pgrefill_normal",
"pgrefill_dma32",
"pgrefill_dma",
"pgsteal_high",
"pgsteal_normal",
"pgsteal_dma32",
"pgsteal_dma",
"pgscan_kswapd_high",
"pgscan_kswapd_normal",
"pgscan_kswapd_dma32",
"pgscan_kswapd_dma",
"pgscan_direct_high",
"pgscan_direct_normal",
"pgscan_direct_dma32",
"pgscan_direct_dma",
"pginodesteal",
"slabs_scanned",
"kswapd_steal",
"kswapd_inodesteal",
"pageoutrun",
"allocstall",
"pgrotated",
"nr_bounce",
};
static void *vmstat_start(struct seq_file *m, loff_t *pos)
{
struct page_state *ps;
if (*pos >= ARRAY_SIZE(vmstat_text))
return NULL;
ps = kmalloc(sizeof(*ps), GFP_KERNEL);
m->private = ps;
if (!ps)
return ERR_PTR(-ENOMEM);
get_full_page_state(ps);
ps->pgpgin /= 2; /* sectors -> kbytes */
ps->pgpgout /= 2;
return (unsigned long *)ps + *pos;
}
static void *vmstat_next(struct seq_file *m, void *arg, loff_t *pos)
{
(*pos)++;
if (*pos >= ARRAY_SIZE(vmstat_text))
return NULL;
return (unsigned long *)m->private + *pos;
}
static int vmstat_show(struct seq_file *m, void *arg)
{
unsigned long *l = arg;
unsigned long off = l - (unsigned long *)m->private;
seq_printf(m, "%s %lu\n", vmstat_text[off], *l);
return 0;
}
static void vmstat_stop(struct seq_file *m, void *arg)
{
kfree(m->private);
m->private = NULL;
}
struct seq_operations vmstat_op = {
.start = vmstat_start,
.next = vmstat_next,
.stop = vmstat_stop,
.show = vmstat_show,
};
#endif /* CONFIG_PROC_FS */
#ifdef CONFIG_HOTPLUG_CPU #ifdef CONFIG_HOTPLUG_CPU
static int page_alloc_cpu_notify(struct notifier_block *self, static int page_alloc_cpu_notify(struct notifier_block *self,
unsigned long action, void *hcpu) unsigned long action, void *hcpu)

417
mm/vmstat.c Normal file
View File

@ -0,0 +1,417 @@
/*
* linux/mm/vmstat.c
*
* Manages VM statistics
* Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
*/
#include <linux/config.h>
#include <linux/mm.h>
/*
* Accumulate the page_state information across all CPUs.
* The result is unavoidably approximate - it can change
* during and after execution of this function.
*/
DEFINE_PER_CPU(struct page_state, page_states) = {0};
atomic_t nr_pagecache = ATOMIC_INIT(0);
EXPORT_SYMBOL(nr_pagecache);
#ifdef CONFIG_SMP
DEFINE_PER_CPU(long, nr_pagecache_local) = 0;
#endif
static void __get_page_state(struct page_state *ret, int nr, cpumask_t *cpumask)
{
unsigned cpu;
memset(ret, 0, nr * sizeof(unsigned long));
cpus_and(*cpumask, *cpumask, cpu_online_map);
for_each_cpu_mask(cpu, *cpumask) {
unsigned long *in;
unsigned long *out;
unsigned off;
unsigned next_cpu;
in = (unsigned long *)&per_cpu(page_states, cpu);
next_cpu = next_cpu(cpu, *cpumask);
if (likely(next_cpu < NR_CPUS))
prefetch(&per_cpu(page_states, next_cpu));
out = (unsigned long *)ret;
for (off = 0; off < nr; off++)
*out++ += *in++;
}
}
void get_page_state_node(struct page_state *ret, int node)
{
int nr;
cpumask_t mask = node_to_cpumask(node);
nr = offsetof(struct page_state, GET_PAGE_STATE_LAST);
nr /= sizeof(unsigned long);
__get_page_state(ret, nr+1, &mask);
}
void get_page_state(struct page_state *ret)
{
int nr;
cpumask_t mask = CPU_MASK_ALL;
nr = offsetof(struct page_state, GET_PAGE_STATE_LAST);
nr /= sizeof(unsigned long);
__get_page_state(ret, nr + 1, &mask);
}
void get_full_page_state(struct page_state *ret)
{
cpumask_t mask = CPU_MASK_ALL;
__get_page_state(ret, sizeof(*ret) / sizeof(unsigned long), &mask);
}
unsigned long read_page_state_offset(unsigned long offset)
{
unsigned long ret = 0;
int cpu;
for_each_online_cpu(cpu) {
unsigned long in;
in = (unsigned long)&per_cpu(page_states, cpu) + offset;
ret += *((unsigned long *)in);
}
return ret;
}
void __mod_page_state_offset(unsigned long offset, unsigned long delta)
{
void *ptr;
ptr = &__get_cpu_var(page_states);
*(unsigned long *)(ptr + offset) += delta;
}
EXPORT_SYMBOL(__mod_page_state_offset);
void mod_page_state_offset(unsigned long offset, unsigned long delta)
{
unsigned long flags;
void *ptr;
local_irq_save(flags);
ptr = &__get_cpu_var(page_states);
*(unsigned long *)(ptr + offset) += delta;
local_irq_restore(flags);
}
EXPORT_SYMBOL(mod_page_state_offset);
void __get_zone_counts(unsigned long *active, unsigned long *inactive,
unsigned long *free, struct pglist_data *pgdat)
{
struct zone *zones = pgdat->node_zones;
int i;
*active = 0;
*inactive = 0;
*free = 0;
for (i = 0; i < MAX_NR_ZONES; i++) {
*active += zones[i].nr_active;
*inactive += zones[i].nr_inactive;
*free += zones[i].free_pages;
}
}
void get_zone_counts(unsigned long *active,
unsigned long *inactive, unsigned long *free)
{
struct pglist_data *pgdat;
*active = 0;
*inactive = 0;
*free = 0;
for_each_online_pgdat(pgdat) {
unsigned long l, m, n;
__get_zone_counts(&l, &m, &n, pgdat);
*active += l;
*inactive += m;
*free += n;
}
}
#ifdef CONFIG_PROC_FS
#include <linux/seq_file.h>
static void *frag_start(struct seq_file *m, loff_t *pos)
{
pg_data_t *pgdat;
loff_t node = *pos;
for (pgdat = first_online_pgdat();
pgdat && node;
pgdat = next_online_pgdat(pgdat))
--node;
return pgdat;
}
static void *frag_next(struct seq_file *m, void *arg, loff_t *pos)
{
pg_data_t *pgdat = (pg_data_t *)arg;
(*pos)++;
return next_online_pgdat(pgdat);
}
static void frag_stop(struct seq_file *m, void *arg)
{
}
/*
* This walks the free areas for each zone.
*/
static int frag_show(struct seq_file *m, void *arg)
{
pg_data_t *pgdat = (pg_data_t *)arg;
struct zone *zone;
struct zone *node_zones = pgdat->node_zones;
unsigned long flags;
int order;
for (zone = node_zones; zone - node_zones < MAX_NR_ZONES; ++zone) {
if (!populated_zone(zone))
continue;
spin_lock_irqsave(&zone->lock, flags);
seq_printf(m, "Node %d, zone %8s ", pgdat->node_id, zone->name);
for (order = 0; order < MAX_ORDER; ++order)
seq_printf(m, "%6lu ", zone->free_area[order].nr_free);
spin_unlock_irqrestore(&zone->lock, flags);
seq_putc(m, '\n');
}
return 0;
}
struct seq_operations fragmentation_op = {
.start = frag_start,
.next = frag_next,
.stop = frag_stop,
.show = frag_show,
};
static char *vmstat_text[] = {
"nr_dirty",
"nr_writeback",
"nr_unstable",
"nr_page_table_pages",
"nr_mapped",
"nr_slab",
"pgpgin",
"pgpgout",
"pswpin",
"pswpout",
"pgalloc_high",
"pgalloc_normal",
"pgalloc_dma32",
"pgalloc_dma",
"pgfree",
"pgactivate",
"pgdeactivate",
"pgfault",
"pgmajfault",
"pgrefill_high",
"pgrefill_normal",
"pgrefill_dma32",
"pgrefill_dma",
"pgsteal_high",
"pgsteal_normal",
"pgsteal_dma32",
"pgsteal_dma",
"pgscan_kswapd_high",
"pgscan_kswapd_normal",
"pgscan_kswapd_dma32",
"pgscan_kswapd_dma",
"pgscan_direct_high",
"pgscan_direct_normal",
"pgscan_direct_dma32",
"pgscan_direct_dma",
"pginodesteal",
"slabs_scanned",
"kswapd_steal",
"kswapd_inodesteal",
"pageoutrun",
"allocstall",
"pgrotated",
"nr_bounce",
};
/*
* Output information about zones in @pgdat.
*/
static int zoneinfo_show(struct seq_file *m, void *arg)
{
pg_data_t *pgdat = arg;
struct zone *zone;
struct zone *node_zones = pgdat->node_zones;
unsigned long flags;
for (zone = node_zones; zone - node_zones < MAX_NR_ZONES; zone++) {
int i;
if (!populated_zone(zone))
continue;
spin_lock_irqsave(&zone->lock, flags);
seq_printf(m, "Node %d, zone %8s", pgdat->node_id, zone->name);
seq_printf(m,
"\n pages free %lu"
"\n min %lu"
"\n low %lu"
"\n high %lu"
"\n active %lu"
"\n inactive %lu"
"\n scanned %lu (a: %lu i: %lu)"
"\n spanned %lu"
"\n present %lu",
zone->free_pages,
zone->pages_min,
zone->pages_low,
zone->pages_high,
zone->nr_active,
zone->nr_inactive,
zone->pages_scanned,
zone->nr_scan_active, zone->nr_scan_inactive,
zone->spanned_pages,
zone->present_pages);
seq_printf(m,
"\n protection: (%lu",
zone->lowmem_reserve[0]);
for (i = 1; i < ARRAY_SIZE(zone->lowmem_reserve); i++)
seq_printf(m, ", %lu", zone->lowmem_reserve[i]);
seq_printf(m,
")"
"\n pagesets");
for_each_online_cpu(i) {
struct per_cpu_pageset *pageset;
int j;
pageset = zone_pcp(zone, i);
for (j = 0; j < ARRAY_SIZE(pageset->pcp); j++) {
if (pageset->pcp[j].count)
break;
}
if (j == ARRAY_SIZE(pageset->pcp))
continue;
for (j = 0; j < ARRAY_SIZE(pageset->pcp); j++) {
seq_printf(m,
"\n cpu: %i pcp: %i"
"\n count: %i"
"\n high: %i"
"\n batch: %i",
i, j,
pageset->pcp[j].count,
pageset->pcp[j].high,
pageset->pcp[j].batch);
}
#ifdef CONFIG_NUMA
seq_printf(m,
"\n numa_hit: %lu"
"\n numa_miss: %lu"
"\n numa_foreign: %lu"
"\n interleave_hit: %lu"
"\n local_node: %lu"
"\n other_node: %lu",
pageset->numa_hit,
pageset->numa_miss,
pageset->numa_foreign,
pageset->interleave_hit,
pageset->local_node,
pageset->other_node);
#endif
}
seq_printf(m,
"\n all_unreclaimable: %u"
"\n prev_priority: %i"
"\n temp_priority: %i"
"\n start_pfn: %lu",
zone->all_unreclaimable,
zone->prev_priority,
zone->temp_priority,
zone->zone_start_pfn);
spin_unlock_irqrestore(&zone->lock, flags);
seq_putc(m, '\n');
}
return 0;
}
struct seq_operations zoneinfo_op = {
.start = frag_start, /* iterate over all zones. The same as in
* fragmentation. */
.next = frag_next,
.stop = frag_stop,
.show = zoneinfo_show,
};
static void *vmstat_start(struct seq_file *m, loff_t *pos)
{
struct page_state *ps;
if (*pos >= ARRAY_SIZE(vmstat_text))
return NULL;
ps = kmalloc(sizeof(*ps), GFP_KERNEL);
m->private = ps;
if (!ps)
return ERR_PTR(-ENOMEM);
get_full_page_state(ps);
ps->pgpgin /= 2; /* sectors -> kbytes */
ps->pgpgout /= 2;
return (unsigned long *)ps + *pos;
}
static void *vmstat_next(struct seq_file *m, void *arg, loff_t *pos)
{
(*pos)++;
if (*pos >= ARRAY_SIZE(vmstat_text))
return NULL;
return (unsigned long *)m->private + *pos;
}
static int vmstat_show(struct seq_file *m, void *arg)
{
unsigned long *l = arg;
unsigned long off = l - (unsigned long *)m->private;
seq_printf(m, "%s %lu\n", vmstat_text[off], *l);
return 0;
}
static void vmstat_stop(struct seq_file *m, void *arg)
{
kfree(m->private);
m->private = NULL;
}
struct seq_operations vmstat_op = {
.start = vmstat_start,
.next = vmstat_next,
.stop = vmstat_stop,
.show = vmstat_show,
};
#endif /* CONFIG_PROC_FS */