mirror of
https://github.com/torvalds/linux.git
synced 2024-11-13 23:51:39 +00:00
456f998ec8
Two new stats in per-memcg memory.stat which tracks the number of page faults and number of major page faults. "pgfault" "pgmajfault" They are different from "pgpgin"/"pgpgout" stat which count number of pages charged/discharged to the cgroup and have no meaning of reading/ writing page to disk. It is valuable to track the two stats for both measuring application's performance as well as the efficiency of the kernel page reclaim path. Counting pagefaults per process is useful, but we also need the aggregated value since processes are monitored and controlled in cgroup basis in memcg. Functional test: check the total number of pgfault/pgmajfault of all memcgs and compare with global vmstat value: $ cat /proc/vmstat | grep fault pgfault 1070751 pgmajfault 553 $ cat /dev/cgroup/memory.stat | grep fault pgfault 1071138 pgmajfault 553 total_pgfault 1071142 total_pgmajfault 553 $ cat /dev/cgroup/A/memory.stat | grep fault pgfault 199 pgmajfault 0 total_pgfault 199 total_pgmajfault 0 Performance test: run page fault test(pft) wit 16 thread on faulting in 15G anon pages in 16G container. There is no regression noticed on the "flt/cpu/s" Sample output from pft: TAG pft:anon-sys-default: Gb Thr CLine User System Wall flt/cpu/s fault/wsec 15 16 1 0.67s 233.41s 14.76s 16798.546 266356.260 +-------------------------------------------------------------------------+ N Min Max Median Avg Stddev x 10 16682.962 17344.027 16913.524 16928.812 166.5362 + 10 16695.568 16923.896 16820.604 16824.652 84.816568 No difference proven at 95.0% confidence [akpm@linux-foundation.org: fix build] [hughd@google.com: shmem fix] Signed-off-by: Ying Han <yinghan@google.com> Acked-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com> Reviewed-by: Minchan Kim <minchan.kim@gmail.com> Cc: Daisuke Nishimura <nishimura@mxp.nes.nec.co.jp> Acked-by: Balbir Singh <balbir@linux.vnet.ibm.com> Signed-off-by: Hugh Dickins <hughd@google.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
129 lines
2.9 KiB
C
129 lines
2.9 KiB
C
/*
|
|
* mmap.c
|
|
*
|
|
* Copyright (C) 1995, 1996 by Volker Lendecke
|
|
* Modified 1997 Peter Waltenberg, Bill Hawes, David Woodhouse for 2.1 dcache
|
|
*
|
|
*/
|
|
|
|
#include <linux/stat.h>
|
|
#include <linux/time.h>
|
|
#include <linux/kernel.h>
|
|
#include <linux/gfp.h>
|
|
#include <linux/mm.h>
|
|
#include <linux/shm.h>
|
|
#include <linux/errno.h>
|
|
#include <linux/mman.h>
|
|
#include <linux/string.h>
|
|
#include <linux/fcntl.h>
|
|
#include <linux/memcontrol.h>
|
|
|
|
#include <asm/uaccess.h>
|
|
#include <asm/system.h>
|
|
|
|
#include "ncp_fs.h"
|
|
|
|
/*
|
|
* Fill in the supplied page for mmap
|
|
* XXX: how are we excluding truncate/invalidate here? Maybe need to lock
|
|
* page?
|
|
*/
|
|
static int ncp_file_mmap_fault(struct vm_area_struct *area,
|
|
struct vm_fault *vmf)
|
|
{
|
|
struct file *file = area->vm_file;
|
|
struct dentry *dentry = file->f_path.dentry;
|
|
struct inode *inode = dentry->d_inode;
|
|
char *pg_addr;
|
|
unsigned int already_read;
|
|
unsigned int count;
|
|
int bufsize;
|
|
int pos; /* XXX: loff_t ? */
|
|
|
|
/*
|
|
* ncpfs has nothing against high pages as long
|
|
* as recvmsg and memset works on it
|
|
*/
|
|
vmf->page = alloc_page(GFP_HIGHUSER);
|
|
if (!vmf->page)
|
|
return VM_FAULT_OOM;
|
|
pg_addr = kmap(vmf->page);
|
|
pos = vmf->pgoff << PAGE_SHIFT;
|
|
|
|
count = PAGE_SIZE;
|
|
/* what we can read in one go */
|
|
bufsize = NCP_SERVER(inode)->buffer_size;
|
|
|
|
already_read = 0;
|
|
if (ncp_make_open(inode, O_RDONLY) >= 0) {
|
|
while (already_read < count) {
|
|
int read_this_time;
|
|
int to_read;
|
|
|
|
to_read = bufsize - (pos % bufsize);
|
|
|
|
to_read = min_t(unsigned int, to_read, count - already_read);
|
|
|
|
if (ncp_read_kernel(NCP_SERVER(inode),
|
|
NCP_FINFO(inode)->file_handle,
|
|
pos, to_read,
|
|
pg_addr + already_read,
|
|
&read_this_time) != 0) {
|
|
read_this_time = 0;
|
|
}
|
|
pos += read_this_time;
|
|
already_read += read_this_time;
|
|
|
|
if (read_this_time < to_read) {
|
|
break;
|
|
}
|
|
}
|
|
ncp_inode_close(inode);
|
|
|
|
}
|
|
|
|
if (already_read < PAGE_SIZE)
|
|
memset(pg_addr + already_read, 0, PAGE_SIZE - already_read);
|
|
flush_dcache_page(vmf->page);
|
|
kunmap(vmf->page);
|
|
|
|
/*
|
|
* If I understand ncp_read_kernel() properly, the above always
|
|
* fetches from the network, here the analogue of disk.
|
|
* -- wli
|
|
*/
|
|
count_vm_event(PGMAJFAULT);
|
|
mem_cgroup_count_vm_event(area->vm_mm, PGMAJFAULT);
|
|
return VM_FAULT_MAJOR;
|
|
}
|
|
|
|
static const struct vm_operations_struct ncp_file_mmap =
|
|
{
|
|
.fault = ncp_file_mmap_fault,
|
|
};
|
|
|
|
|
|
/* This is used for a general mmap of a ncp file */
|
|
int ncp_mmap(struct file *file, struct vm_area_struct *vma)
|
|
{
|
|
struct inode *inode = file->f_path.dentry->d_inode;
|
|
|
|
DPRINTK("ncp_mmap: called\n");
|
|
|
|
if (!ncp_conn_valid(NCP_SERVER(inode)))
|
|
return -EIO;
|
|
|
|
/* only PAGE_COW or read-only supported now */
|
|
if (vma->vm_flags & VM_SHARED)
|
|
return -EINVAL;
|
|
/* we do not support files bigger than 4GB... We eventually
|
|
supports just 4GB... */
|
|
if (((vma->vm_end - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff
|
|
> (1U << (32 - PAGE_SHIFT)))
|
|
return -EFBIG;
|
|
|
|
vma->vm_ops = &ncp_file_mmap;
|
|
file_accessed(file);
|
|
return 0;
|
|
}
|