mirror of
https://github.com/torvalds/linux.git
synced 2024-11-13 23:51:39 +00:00
8c7c6e34a1
This patch implements per cgroup limit for usage of memory+swap. However there are SwapCache, double counting of swap-cache and swap-entry is avoided. Mem+Swap controller works as following. - memory usage is limited by memory.limit_in_bytes. - memory + swap usage is limited by memory.memsw_limit_in_bytes. This has following benefits. - A user can limit total resource usage of mem+swap. Without this, because memory resource controller doesn't take care of usage of swap, a process can exhaust all the swap (by memory leak.) We can avoid this case. And Swap is shared resource but it cannot be reclaimed (goes back to memory) until it's used. This characteristic can be trouble when the memory is divided into some parts by cpuset or memcg. Assume group A and group B. After some application executes, the system can be.. Group A -- very large free memory space but occupy 99% of swap. Group B -- under memory shortage but cannot use swap...it's nearly full. Ability to set appropriate swap limit for each group is required. Maybe someone wonder "why not swap but mem+swap ?" - The global LRU(kswapd) can swap out arbitrary pages. Swap-out means to move account from memory to swap...there is no change in usage of mem+swap. In other words, when we want to limit the usage of swap without affecting global LRU, mem+swap limit is better than just limiting swap. Accounting target information is stored in swap_cgroup which is per swap entry record. Charge is done as following. map - charge page and memsw. unmap - uncharge page/memsw if not SwapCache. swap-out (__delete_from_swap_cache) - uncharge page - record mem_cgroup information to swap_cgroup. swap-in (do_swap_page) - charged as page and memsw. record in swap_cgroup is cleared. memsw accounting is decremented. swap-free (swap_free()) - if swap entry is freed, memsw is uncharged by PAGE_SIZE. There are people work under never-swap environments and consider swap as something bad. For such people, this mem+swap controller extension is just an overhead. This overhead is avoided by config or boot option. (see Kconfig. detail is not in this patch.) TODO: - maybe more optimization can be don in swap-in path. (but not very safe.) But we just do simple accounting at this stage. [nishimura@mxp.nes.nec.co.jp: make resize limit hold mutex] [hugh@veritas.com: memswap controller core swapcache fixes] Signed-off-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Cc: Li Zefan <lizf@cn.fujitsu.com> Cc: Balbir Singh <balbir@in.ibm.com> Cc: Pavel Emelyanov <xemul@openvz.org> Signed-off-by: Daisuke Nishimura <nishimura@mxp.nes.nec.co.jp> Signed-off-by: Hugh Dickins <hugh@veritas.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
376 lines
10 KiB
C
376 lines
10 KiB
C
/*
|
|
* linux/mm/swap_state.c
|
|
*
|
|
* Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
|
|
* Swap reorganised 29.12.95, Stephen Tweedie
|
|
*
|
|
* Rewritten to use page cache, (C) 1998 Stephen Tweedie
|
|
*/
|
|
#include <linux/module.h>
|
|
#include <linux/mm.h>
|
|
#include <linux/kernel_stat.h>
|
|
#include <linux/swap.h>
|
|
#include <linux/swapops.h>
|
|
#include <linux/init.h>
|
|
#include <linux/pagemap.h>
|
|
#include <linux/buffer_head.h>
|
|
#include <linux/backing-dev.h>
|
|
#include <linux/pagevec.h>
|
|
#include <linux/migrate.h>
|
|
#include <linux/page_cgroup.h>
|
|
|
|
#include <asm/pgtable.h>
|
|
|
|
/*
|
|
* swapper_space is a fiction, retained to simplify the path through
|
|
* vmscan's shrink_page_list, to make sync_page look nicer, and to allow
|
|
* future use of radix_tree tags in the swap cache.
|
|
*/
|
|
static const struct address_space_operations swap_aops = {
|
|
.writepage = swap_writepage,
|
|
.sync_page = block_sync_page,
|
|
.set_page_dirty = __set_page_dirty_nobuffers,
|
|
.migratepage = migrate_page,
|
|
};
|
|
|
|
static struct backing_dev_info swap_backing_dev_info = {
|
|
.capabilities = BDI_CAP_NO_ACCT_AND_WRITEBACK | BDI_CAP_SWAP_BACKED,
|
|
.unplug_io_fn = swap_unplug_io_fn,
|
|
};
|
|
|
|
struct address_space swapper_space = {
|
|
.page_tree = RADIX_TREE_INIT(GFP_ATOMIC|__GFP_NOWARN),
|
|
.tree_lock = __SPIN_LOCK_UNLOCKED(swapper_space.tree_lock),
|
|
.a_ops = &swap_aops,
|
|
.i_mmap_nonlinear = LIST_HEAD_INIT(swapper_space.i_mmap_nonlinear),
|
|
.backing_dev_info = &swap_backing_dev_info,
|
|
};
|
|
|
|
#define INC_CACHE_INFO(x) do { swap_cache_info.x++; } while (0)
|
|
|
|
static struct {
|
|
unsigned long add_total;
|
|
unsigned long del_total;
|
|
unsigned long find_success;
|
|
unsigned long find_total;
|
|
} swap_cache_info;
|
|
|
|
void show_swap_cache_info(void)
|
|
{
|
|
printk("%lu pages in swap cache\n", total_swapcache_pages);
|
|
printk("Swap cache stats: add %lu, delete %lu, find %lu/%lu\n",
|
|
swap_cache_info.add_total, swap_cache_info.del_total,
|
|
swap_cache_info.find_success, swap_cache_info.find_total);
|
|
printk("Free swap = %ldkB\n", nr_swap_pages << (PAGE_SHIFT - 10));
|
|
printk("Total swap = %lukB\n", total_swap_pages << (PAGE_SHIFT - 10));
|
|
}
|
|
|
|
/*
|
|
* add_to_swap_cache resembles add_to_page_cache_locked on swapper_space,
|
|
* but sets SwapCache flag and private instead of mapping and index.
|
|
*/
|
|
int add_to_swap_cache(struct page *page, swp_entry_t entry, gfp_t gfp_mask)
|
|
{
|
|
int error;
|
|
|
|
VM_BUG_ON(!PageLocked(page));
|
|
VM_BUG_ON(PageSwapCache(page));
|
|
VM_BUG_ON(!PageSwapBacked(page));
|
|
|
|
error = radix_tree_preload(gfp_mask);
|
|
if (!error) {
|
|
page_cache_get(page);
|
|
SetPageSwapCache(page);
|
|
set_page_private(page, entry.val);
|
|
|
|
spin_lock_irq(&swapper_space.tree_lock);
|
|
error = radix_tree_insert(&swapper_space.page_tree,
|
|
entry.val, page);
|
|
if (likely(!error)) {
|
|
total_swapcache_pages++;
|
|
__inc_zone_page_state(page, NR_FILE_PAGES);
|
|
INC_CACHE_INFO(add_total);
|
|
}
|
|
spin_unlock_irq(&swapper_space.tree_lock);
|
|
radix_tree_preload_end();
|
|
|
|
if (unlikely(error)) {
|
|
set_page_private(page, 0UL);
|
|
ClearPageSwapCache(page);
|
|
page_cache_release(page);
|
|
}
|
|
}
|
|
return error;
|
|
}
|
|
|
|
/*
|
|
* This must be called only on pages that have
|
|
* been verified to be in the swap cache.
|
|
*/
|
|
void __delete_from_swap_cache(struct page *page)
|
|
{
|
|
swp_entry_t ent = {.val = page_private(page)};
|
|
|
|
VM_BUG_ON(!PageLocked(page));
|
|
VM_BUG_ON(!PageSwapCache(page));
|
|
VM_BUG_ON(PageWriteback(page));
|
|
|
|
radix_tree_delete(&swapper_space.page_tree, page_private(page));
|
|
set_page_private(page, 0);
|
|
ClearPageSwapCache(page);
|
|
total_swapcache_pages--;
|
|
__dec_zone_page_state(page, NR_FILE_PAGES);
|
|
INC_CACHE_INFO(del_total);
|
|
mem_cgroup_uncharge_swapcache(page, ent);
|
|
}
|
|
|
|
/**
|
|
* add_to_swap - allocate swap space for a page
|
|
* @page: page we want to move to swap
|
|
* @gfp_mask: memory allocation flags
|
|
*
|
|
* Allocate swap space for the page and add the page to the
|
|
* swap cache. Caller needs to hold the page lock.
|
|
*/
|
|
int add_to_swap(struct page *page)
|
|
{
|
|
swp_entry_t entry;
|
|
int err;
|
|
|
|
VM_BUG_ON(!PageLocked(page));
|
|
VM_BUG_ON(!PageUptodate(page));
|
|
|
|
for (;;) {
|
|
entry = get_swap_page();
|
|
if (!entry.val)
|
|
return 0;
|
|
|
|
/*
|
|
* Radix-tree node allocations from PF_MEMALLOC contexts could
|
|
* completely exhaust the page allocator. __GFP_NOMEMALLOC
|
|
* stops emergency reserves from being allocated.
|
|
*
|
|
* TODO: this could cause a theoretical memory reclaim
|
|
* deadlock in the swap out path.
|
|
*/
|
|
/*
|
|
* Add it to the swap cache and mark it dirty
|
|
*/
|
|
err = add_to_swap_cache(page, entry,
|
|
__GFP_HIGH|__GFP_NOMEMALLOC|__GFP_NOWARN);
|
|
|
|
switch (err) {
|
|
case 0: /* Success */
|
|
SetPageDirty(page);
|
|
return 1;
|
|
case -EEXIST:
|
|
/* Raced with "speculative" read_swap_cache_async */
|
|
swap_free(entry);
|
|
continue;
|
|
default:
|
|
/* -ENOMEM radix-tree allocation failure */
|
|
swap_free(entry);
|
|
return 0;
|
|
}
|
|
}
|
|
}
|
|
|
|
/*
|
|
* This must be called only on pages that have
|
|
* been verified to be in the swap cache and locked.
|
|
* It will never put the page into the free list,
|
|
* the caller has a reference on the page.
|
|
*/
|
|
void delete_from_swap_cache(struct page *page)
|
|
{
|
|
swp_entry_t entry;
|
|
|
|
entry.val = page_private(page);
|
|
|
|
spin_lock_irq(&swapper_space.tree_lock);
|
|
__delete_from_swap_cache(page);
|
|
spin_unlock_irq(&swapper_space.tree_lock);
|
|
|
|
swap_free(entry);
|
|
page_cache_release(page);
|
|
}
|
|
|
|
/*
|
|
* If we are the only user, then try to free up the swap cache.
|
|
*
|
|
* Its ok to check for PageSwapCache without the page lock
|
|
* here because we are going to recheck again inside
|
|
* try_to_free_swap() _with_ the lock.
|
|
* - Marcelo
|
|
*/
|
|
static inline void free_swap_cache(struct page *page)
|
|
{
|
|
if (PageSwapCache(page) && !page_mapped(page) && trylock_page(page)) {
|
|
try_to_free_swap(page);
|
|
unlock_page(page);
|
|
}
|
|
}
|
|
|
|
/*
|
|
* Perform a free_page(), also freeing any swap cache associated with
|
|
* this page if it is the last user of the page.
|
|
*/
|
|
void free_page_and_swap_cache(struct page *page)
|
|
{
|
|
free_swap_cache(page);
|
|
page_cache_release(page);
|
|
}
|
|
|
|
/*
|
|
* Passed an array of pages, drop them all from swapcache and then release
|
|
* them. They are removed from the LRU and freed if this is their last use.
|
|
*/
|
|
void free_pages_and_swap_cache(struct page **pages, int nr)
|
|
{
|
|
struct page **pagep = pages;
|
|
|
|
lru_add_drain();
|
|
while (nr) {
|
|
int todo = min(nr, PAGEVEC_SIZE);
|
|
int i;
|
|
|
|
for (i = 0; i < todo; i++)
|
|
free_swap_cache(pagep[i]);
|
|
release_pages(pagep, todo, 0);
|
|
pagep += todo;
|
|
nr -= todo;
|
|
}
|
|
}
|
|
|
|
/*
|
|
* Lookup a swap entry in the swap cache. A found page will be returned
|
|
* unlocked and with its refcount incremented - we rely on the kernel
|
|
* lock getting page table operations atomic even if we drop the page
|
|
* lock before returning.
|
|
*/
|
|
struct page * lookup_swap_cache(swp_entry_t entry)
|
|
{
|
|
struct page *page;
|
|
|
|
page = find_get_page(&swapper_space, entry.val);
|
|
|
|
if (page)
|
|
INC_CACHE_INFO(find_success);
|
|
|
|
INC_CACHE_INFO(find_total);
|
|
return page;
|
|
}
|
|
|
|
/*
|
|
* Locate a page of swap in physical memory, reserving swap cache space
|
|
* and reading the disk if it is not already cached.
|
|
* A failure return means that either the page allocation failed or that
|
|
* the swap entry is no longer in use.
|
|
*/
|
|
struct page *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
|
|
struct vm_area_struct *vma, unsigned long addr)
|
|
{
|
|
struct page *found_page, *new_page = NULL;
|
|
int err;
|
|
|
|
do {
|
|
/*
|
|
* First check the swap cache. Since this is normally
|
|
* called after lookup_swap_cache() failed, re-calling
|
|
* that would confuse statistics.
|
|
*/
|
|
found_page = find_get_page(&swapper_space, entry.val);
|
|
if (found_page)
|
|
break;
|
|
|
|
/*
|
|
* Get a new page to read into from swap.
|
|
*/
|
|
if (!new_page) {
|
|
new_page = alloc_page_vma(gfp_mask, vma, addr);
|
|
if (!new_page)
|
|
break; /* Out of memory */
|
|
}
|
|
|
|
/*
|
|
* Swap entry may have been freed since our caller observed it.
|
|
*/
|
|
if (!swap_duplicate(entry))
|
|
break;
|
|
|
|
/*
|
|
* Associate the page with swap entry in the swap cache.
|
|
* May fail (-EEXIST) if there is already a page associated
|
|
* with this entry in the swap cache: added by a racing
|
|
* read_swap_cache_async, or add_to_swap or shmem_writepage
|
|
* re-using the just freed swap entry for an existing page.
|
|
* May fail (-ENOMEM) if radix-tree node allocation failed.
|
|
*/
|
|
__set_page_locked(new_page);
|
|
SetPageSwapBacked(new_page);
|
|
err = add_to_swap_cache(new_page, entry, gfp_mask & GFP_KERNEL);
|
|
if (likely(!err)) {
|
|
/*
|
|
* Initiate read into locked page and return.
|
|
*/
|
|
lru_cache_add_anon(new_page);
|
|
swap_readpage(NULL, new_page);
|
|
return new_page;
|
|
}
|
|
ClearPageSwapBacked(new_page);
|
|
__clear_page_locked(new_page);
|
|
swap_free(entry);
|
|
} while (err != -ENOMEM);
|
|
|
|
if (new_page)
|
|
page_cache_release(new_page);
|
|
return found_page;
|
|
}
|
|
|
|
/**
|
|
* swapin_readahead - swap in pages in hope we need them soon
|
|
* @entry: swap entry of this memory
|
|
* @gfp_mask: memory allocation flags
|
|
* @vma: user vma this address belongs to
|
|
* @addr: target address for mempolicy
|
|
*
|
|
* Returns the struct page for entry and addr, after queueing swapin.
|
|
*
|
|
* Primitive swap readahead code. We simply read an aligned block of
|
|
* (1 << page_cluster) entries in the swap area. This method is chosen
|
|
* because it doesn't cost us any seek time. We also make sure to queue
|
|
* the 'original' request together with the readahead ones...
|
|
*
|
|
* This has been extended to use the NUMA policies from the mm triggering
|
|
* the readahead.
|
|
*
|
|
* Caller must hold down_read on the vma->vm_mm if vma is not NULL.
|
|
*/
|
|
struct page *swapin_readahead(swp_entry_t entry, gfp_t gfp_mask,
|
|
struct vm_area_struct *vma, unsigned long addr)
|
|
{
|
|
int nr_pages;
|
|
struct page *page;
|
|
unsigned long offset;
|
|
unsigned long end_offset;
|
|
|
|
/*
|
|
* Get starting offset for readaround, and number of pages to read.
|
|
* Adjust starting address by readbehind (for NUMA interleave case)?
|
|
* No, it's very unlikely that swap layout would follow vma layout,
|
|
* more likely that neighbouring swap pages came from the same node:
|
|
* so use the same "addr" to choose the same node for each swap read.
|
|
*/
|
|
nr_pages = valid_swaphandles(entry, &offset);
|
|
for (end_offset = offset + nr_pages; offset < end_offset; offset++) {
|
|
/* Ok, do the async read-ahead now */
|
|
page = read_swap_cache_async(swp_entry(swp_type(entry), offset),
|
|
gfp_mask, vma, addr);
|
|
if (!page)
|
|
break;
|
|
page_cache_release(page);
|
|
}
|
|
lru_add_drain(); /* Push any new pages onto the LRU now */
|
|
return read_swap_cache_async(entry, gfp_mask, vma, addr);
|
|
}
|