mirror of
https://github.com/torvalds/linux.git
synced 2024-11-16 09:02:00 +00:00
4f98a2fee8
Split the LRU lists in two, one set for pages that are backed by real file systems ("file") and one for pages that are backed by memory and swap ("anon"). The latter includes tmpfs. The advantage of doing this is that the VM will not have to scan over lots of anonymous pages (which we generally do not want to swap out), just to find the page cache pages that it should evict. This patch has the infrastructure and a basic policy to balance how much we scan the anon lists and how much we scan the file lists. The big policy changes are in separate patches. [lee.schermerhorn@hp.com: collect lru meminfo statistics from correct offset] [kosaki.motohiro@jp.fujitsu.com: prevent incorrect oom under split_lru] [kosaki.motohiro@jp.fujitsu.com: fix pagevec_move_tail() doesn't treat unevictable page] [hugh@veritas.com: memcg swapbacked pages active] [hugh@veritas.com: splitlru: BDI_CAP_SWAP_BACKED] [akpm@linux-foundation.org: fix /proc/vmstat units] [nishimura@mxp.nes.nec.co.jp: memcg: fix handling of shmem migration] [kosaki.motohiro@jp.fujitsu.com: adjust Quicklists field of /proc/meminfo] [kosaki.motohiro@jp.fujitsu.com: fix style issue of get_scan_ratio()] Signed-off-by: Rik van Riel <riel@redhat.com> Signed-off-by: Lee Schermerhorn <Lee.Schermerhorn@hp.com> Signed-off-by: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com> Signed-off-by: Hugh Dickins <hugh@veritas.com> Signed-off-by: Daisuke Nishimura <nishimura@mxp.nes.nec.co.jp> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
484 lines
13 KiB
C
484 lines
13 KiB
C
/*
|
|
* mm/readahead.c - address_space-level file readahead.
|
|
*
|
|
* Copyright (C) 2002, Linus Torvalds
|
|
*
|
|
* 09Apr2002 Andrew Morton
|
|
* Initial version.
|
|
*/
|
|
|
|
#include <linux/kernel.h>
|
|
#include <linux/fs.h>
|
|
#include <linux/mm.h>
|
|
#include <linux/module.h>
|
|
#include <linux/blkdev.h>
|
|
#include <linux/backing-dev.h>
|
|
#include <linux/task_io_accounting_ops.h>
|
|
#include <linux/pagevec.h>
|
|
#include <linux/pagemap.h>
|
|
|
|
void default_unplug_io_fn(struct backing_dev_info *bdi, struct page *page)
|
|
{
|
|
}
|
|
EXPORT_SYMBOL(default_unplug_io_fn);
|
|
|
|
struct backing_dev_info default_backing_dev_info = {
|
|
.ra_pages = VM_MAX_READAHEAD * 1024 / PAGE_CACHE_SIZE,
|
|
.state = 0,
|
|
.capabilities = BDI_CAP_MAP_COPY,
|
|
.unplug_io_fn = default_unplug_io_fn,
|
|
};
|
|
EXPORT_SYMBOL_GPL(default_backing_dev_info);
|
|
|
|
/*
|
|
* Initialise a struct file's readahead state. Assumes that the caller has
|
|
* memset *ra to zero.
|
|
*/
|
|
void
|
|
file_ra_state_init(struct file_ra_state *ra, struct address_space *mapping)
|
|
{
|
|
ra->ra_pages = mapping->backing_dev_info->ra_pages;
|
|
ra->prev_pos = -1;
|
|
}
|
|
EXPORT_SYMBOL_GPL(file_ra_state_init);
|
|
|
|
#define list_to_page(head) (list_entry((head)->prev, struct page, lru))
|
|
|
|
/**
|
|
* read_cache_pages - populate an address space with some pages & start reads against them
|
|
* @mapping: the address_space
|
|
* @pages: The address of a list_head which contains the target pages. These
|
|
* pages have their ->index populated and are otherwise uninitialised.
|
|
* @filler: callback routine for filling a single page.
|
|
* @data: private data for the callback routine.
|
|
*
|
|
* Hides the details of the LRU cache etc from the filesystems.
|
|
*/
|
|
int read_cache_pages(struct address_space *mapping, struct list_head *pages,
|
|
int (*filler)(void *, struct page *), void *data)
|
|
{
|
|
struct page *page;
|
|
int ret = 0;
|
|
|
|
while (!list_empty(pages)) {
|
|
page = list_to_page(pages);
|
|
list_del(&page->lru);
|
|
if (add_to_page_cache_lru(page, mapping,
|
|
page->index, GFP_KERNEL)) {
|
|
page_cache_release(page);
|
|
continue;
|
|
}
|
|
page_cache_release(page);
|
|
|
|
ret = filler(data, page);
|
|
if (unlikely(ret)) {
|
|
put_pages_list(pages);
|
|
break;
|
|
}
|
|
task_io_account_read(PAGE_CACHE_SIZE);
|
|
}
|
|
return ret;
|
|
}
|
|
|
|
EXPORT_SYMBOL(read_cache_pages);
|
|
|
|
static int read_pages(struct address_space *mapping, struct file *filp,
|
|
struct list_head *pages, unsigned nr_pages)
|
|
{
|
|
unsigned page_idx;
|
|
int ret;
|
|
|
|
if (mapping->a_ops->readpages) {
|
|
ret = mapping->a_ops->readpages(filp, mapping, pages, nr_pages);
|
|
/* Clean up the remaining pages */
|
|
put_pages_list(pages);
|
|
goto out;
|
|
}
|
|
|
|
for (page_idx = 0; page_idx < nr_pages; page_idx++) {
|
|
struct page *page = list_to_page(pages);
|
|
list_del(&page->lru);
|
|
if (!add_to_page_cache_lru(page, mapping,
|
|
page->index, GFP_KERNEL)) {
|
|
mapping->a_ops->readpage(filp, page);
|
|
}
|
|
page_cache_release(page);
|
|
}
|
|
ret = 0;
|
|
out:
|
|
return ret;
|
|
}
|
|
|
|
/*
|
|
* do_page_cache_readahead actually reads a chunk of disk. It allocates all
|
|
* the pages first, then submits them all for I/O. This avoids the very bad
|
|
* behaviour which would occur if page allocations are causing VM writeback.
|
|
* We really don't want to intermingle reads and writes like that.
|
|
*
|
|
* Returns the number of pages requested, or the maximum amount of I/O allowed.
|
|
*
|
|
* do_page_cache_readahead() returns -1 if it encountered request queue
|
|
* congestion.
|
|
*/
|
|
static int
|
|
__do_page_cache_readahead(struct address_space *mapping, struct file *filp,
|
|
pgoff_t offset, unsigned long nr_to_read,
|
|
unsigned long lookahead_size)
|
|
{
|
|
struct inode *inode = mapping->host;
|
|
struct page *page;
|
|
unsigned long end_index; /* The last page we want to read */
|
|
LIST_HEAD(page_pool);
|
|
int page_idx;
|
|
int ret = 0;
|
|
loff_t isize = i_size_read(inode);
|
|
|
|
if (isize == 0)
|
|
goto out;
|
|
|
|
end_index = ((isize - 1) >> PAGE_CACHE_SHIFT);
|
|
|
|
/*
|
|
* Preallocate as many pages as we will need.
|
|
*/
|
|
for (page_idx = 0; page_idx < nr_to_read; page_idx++) {
|
|
pgoff_t page_offset = offset + page_idx;
|
|
|
|
if (page_offset > end_index)
|
|
break;
|
|
|
|
rcu_read_lock();
|
|
page = radix_tree_lookup(&mapping->page_tree, page_offset);
|
|
rcu_read_unlock();
|
|
if (page)
|
|
continue;
|
|
|
|
page = page_cache_alloc_cold(mapping);
|
|
if (!page)
|
|
break;
|
|
page->index = page_offset;
|
|
list_add(&page->lru, &page_pool);
|
|
if (page_idx == nr_to_read - lookahead_size)
|
|
SetPageReadahead(page);
|
|
ret++;
|
|
}
|
|
|
|
/*
|
|
* Now start the IO. We ignore I/O errors - if the page is not
|
|
* uptodate then the caller will launch readpage again, and
|
|
* will then handle the error.
|
|
*/
|
|
if (ret)
|
|
read_pages(mapping, filp, &page_pool, ret);
|
|
BUG_ON(!list_empty(&page_pool));
|
|
out:
|
|
return ret;
|
|
}
|
|
|
|
/*
|
|
* Chunk the readahead into 2 megabyte units, so that we don't pin too much
|
|
* memory at once.
|
|
*/
|
|
int force_page_cache_readahead(struct address_space *mapping, struct file *filp,
|
|
pgoff_t offset, unsigned long nr_to_read)
|
|
{
|
|
int ret = 0;
|
|
|
|
if (unlikely(!mapping->a_ops->readpage && !mapping->a_ops->readpages))
|
|
return -EINVAL;
|
|
|
|
while (nr_to_read) {
|
|
int err;
|
|
|
|
unsigned long this_chunk = (2 * 1024 * 1024) / PAGE_CACHE_SIZE;
|
|
|
|
if (this_chunk > nr_to_read)
|
|
this_chunk = nr_to_read;
|
|
err = __do_page_cache_readahead(mapping, filp,
|
|
offset, this_chunk, 0);
|
|
if (err < 0) {
|
|
ret = err;
|
|
break;
|
|
}
|
|
ret += err;
|
|
offset += this_chunk;
|
|
nr_to_read -= this_chunk;
|
|
}
|
|
return ret;
|
|
}
|
|
|
|
/*
|
|
* This version skips the IO if the queue is read-congested, and will tell the
|
|
* block layer to abandon the readahead if request allocation would block.
|
|
*
|
|
* force_page_cache_readahead() will ignore queue congestion and will block on
|
|
* request queues.
|
|
*/
|
|
int do_page_cache_readahead(struct address_space *mapping, struct file *filp,
|
|
pgoff_t offset, unsigned long nr_to_read)
|
|
{
|
|
if (bdi_read_congested(mapping->backing_dev_info))
|
|
return -1;
|
|
|
|
return __do_page_cache_readahead(mapping, filp, offset, nr_to_read, 0);
|
|
}
|
|
|
|
/*
|
|
* Given a desired number of PAGE_CACHE_SIZE readahead pages, return a
|
|
* sensible upper limit.
|
|
*/
|
|
unsigned long max_sane_readahead(unsigned long nr)
|
|
{
|
|
return min(nr, (node_page_state(numa_node_id(), NR_INACTIVE_FILE)
|
|
+ node_page_state(numa_node_id(), NR_FREE_PAGES)) / 2);
|
|
}
|
|
|
|
static int __init readahead_init(void)
|
|
{
|
|
int err;
|
|
|
|
err = bdi_init(&default_backing_dev_info);
|
|
if (!err)
|
|
bdi_register(&default_backing_dev_info, NULL, "default");
|
|
|
|
return err;
|
|
}
|
|
subsys_initcall(readahead_init);
|
|
|
|
/*
|
|
* Submit IO for the read-ahead request in file_ra_state.
|
|
*/
|
|
static unsigned long ra_submit(struct file_ra_state *ra,
|
|
struct address_space *mapping, struct file *filp)
|
|
{
|
|
int actual;
|
|
|
|
actual = __do_page_cache_readahead(mapping, filp,
|
|
ra->start, ra->size, ra->async_size);
|
|
|
|
return actual;
|
|
}
|
|
|
|
/*
|
|
* Set the initial window size, round to next power of 2 and square
|
|
* for small size, x 4 for medium, and x 2 for large
|
|
* for 128k (32 page) max ra
|
|
* 1-8 page = 32k initial, > 8 page = 128k initial
|
|
*/
|
|
static unsigned long get_init_ra_size(unsigned long size, unsigned long max)
|
|
{
|
|
unsigned long newsize = roundup_pow_of_two(size);
|
|
|
|
if (newsize <= max / 32)
|
|
newsize = newsize * 4;
|
|
else if (newsize <= max / 4)
|
|
newsize = newsize * 2;
|
|
else
|
|
newsize = max;
|
|
|
|
return newsize;
|
|
}
|
|
|
|
/*
|
|
* Get the previous window size, ramp it up, and
|
|
* return it as the new window size.
|
|
*/
|
|
static unsigned long get_next_ra_size(struct file_ra_state *ra,
|
|
unsigned long max)
|
|
{
|
|
unsigned long cur = ra->size;
|
|
unsigned long newsize;
|
|
|
|
if (cur < max / 16)
|
|
newsize = 4 * cur;
|
|
else
|
|
newsize = 2 * cur;
|
|
|
|
return min(newsize, max);
|
|
}
|
|
|
|
/*
|
|
* On-demand readahead design.
|
|
*
|
|
* The fields in struct file_ra_state represent the most-recently-executed
|
|
* readahead attempt:
|
|
*
|
|
* |<----- async_size ---------|
|
|
* |------------------- size -------------------->|
|
|
* |==================#===========================|
|
|
* ^start ^page marked with PG_readahead
|
|
*
|
|
* To overlap application thinking time and disk I/O time, we do
|
|
* `readahead pipelining': Do not wait until the application consumed all
|
|
* readahead pages and stalled on the missing page at readahead_index;
|
|
* Instead, submit an asynchronous readahead I/O as soon as there are
|
|
* only async_size pages left in the readahead window. Normally async_size
|
|
* will be equal to size, for maximum pipelining.
|
|
*
|
|
* In interleaved sequential reads, concurrent streams on the same fd can
|
|
* be invalidating each other's readahead state. So we flag the new readahead
|
|
* page at (start+size-async_size) with PG_readahead, and use it as readahead
|
|
* indicator. The flag won't be set on already cached pages, to avoid the
|
|
* readahead-for-nothing fuss, saving pointless page cache lookups.
|
|
*
|
|
* prev_pos tracks the last visited byte in the _previous_ read request.
|
|
* It should be maintained by the caller, and will be used for detecting
|
|
* small random reads. Note that the readahead algorithm checks loosely
|
|
* for sequential patterns. Hence interleaved reads might be served as
|
|
* sequential ones.
|
|
*
|
|
* There is a special-case: if the first page which the application tries to
|
|
* read happens to be the first page of the file, it is assumed that a linear
|
|
* read is about to happen and the window is immediately set to the initial size
|
|
* based on I/O request size and the max_readahead.
|
|
*
|
|
* The code ramps up the readahead size aggressively at first, but slow down as
|
|
* it approaches max_readhead.
|
|
*/
|
|
|
|
/*
|
|
* A minimal readahead algorithm for trivial sequential/random reads.
|
|
*/
|
|
static unsigned long
|
|
ondemand_readahead(struct address_space *mapping,
|
|
struct file_ra_state *ra, struct file *filp,
|
|
bool hit_readahead_marker, pgoff_t offset,
|
|
unsigned long req_size)
|
|
{
|
|
int max = ra->ra_pages; /* max readahead pages */
|
|
pgoff_t prev_offset;
|
|
int sequential;
|
|
|
|
/*
|
|
* It's the expected callback offset, assume sequential access.
|
|
* Ramp up sizes, and push forward the readahead window.
|
|
*/
|
|
if (offset && (offset == (ra->start + ra->size - ra->async_size) ||
|
|
offset == (ra->start + ra->size))) {
|
|
ra->start += ra->size;
|
|
ra->size = get_next_ra_size(ra, max);
|
|
ra->async_size = ra->size;
|
|
goto readit;
|
|
}
|
|
|
|
prev_offset = ra->prev_pos >> PAGE_CACHE_SHIFT;
|
|
sequential = offset - prev_offset <= 1UL || req_size > max;
|
|
|
|
/*
|
|
* Standalone, small read.
|
|
* Read as is, and do not pollute the readahead state.
|
|
*/
|
|
if (!hit_readahead_marker && !sequential) {
|
|
return __do_page_cache_readahead(mapping, filp,
|
|
offset, req_size, 0);
|
|
}
|
|
|
|
/*
|
|
* Hit a marked page without valid readahead state.
|
|
* E.g. interleaved reads.
|
|
* Query the pagecache for async_size, which normally equals to
|
|
* readahead size. Ramp it up and use it as the new readahead size.
|
|
*/
|
|
if (hit_readahead_marker) {
|
|
pgoff_t start;
|
|
|
|
rcu_read_lock();
|
|
start = radix_tree_next_hole(&mapping->page_tree, offset,max+1);
|
|
rcu_read_unlock();
|
|
|
|
if (!start || start - offset > max)
|
|
return 0;
|
|
|
|
ra->start = start;
|
|
ra->size = start - offset; /* old async_size */
|
|
ra->size = get_next_ra_size(ra, max);
|
|
ra->async_size = ra->size;
|
|
goto readit;
|
|
}
|
|
|
|
/*
|
|
* It may be one of
|
|
* - first read on start of file
|
|
* - sequential cache miss
|
|
* - oversize random read
|
|
* Start readahead for it.
|
|
*/
|
|
ra->start = offset;
|
|
ra->size = get_init_ra_size(req_size, max);
|
|
ra->async_size = ra->size > req_size ? ra->size - req_size : ra->size;
|
|
|
|
readit:
|
|
return ra_submit(ra, mapping, filp);
|
|
}
|
|
|
|
/**
|
|
* page_cache_sync_readahead - generic file readahead
|
|
* @mapping: address_space which holds the pagecache and I/O vectors
|
|
* @ra: file_ra_state which holds the readahead state
|
|
* @filp: passed on to ->readpage() and ->readpages()
|
|
* @offset: start offset into @mapping, in pagecache page-sized units
|
|
* @req_size: hint: total size of the read which the caller is performing in
|
|
* pagecache pages
|
|
*
|
|
* page_cache_sync_readahead() should be called when a cache miss happened:
|
|
* it will submit the read. The readahead logic may decide to piggyback more
|
|
* pages onto the read request if access patterns suggest it will improve
|
|
* performance.
|
|
*/
|
|
void page_cache_sync_readahead(struct address_space *mapping,
|
|
struct file_ra_state *ra, struct file *filp,
|
|
pgoff_t offset, unsigned long req_size)
|
|
{
|
|
/* no read-ahead */
|
|
if (!ra->ra_pages)
|
|
return;
|
|
|
|
/* do read-ahead */
|
|
ondemand_readahead(mapping, ra, filp, false, offset, req_size);
|
|
}
|
|
EXPORT_SYMBOL_GPL(page_cache_sync_readahead);
|
|
|
|
/**
|
|
* page_cache_async_readahead - file readahead for marked pages
|
|
* @mapping: address_space which holds the pagecache and I/O vectors
|
|
* @ra: file_ra_state which holds the readahead state
|
|
* @filp: passed on to ->readpage() and ->readpages()
|
|
* @page: the page at @offset which has the PG_readahead flag set
|
|
* @offset: start offset into @mapping, in pagecache page-sized units
|
|
* @req_size: hint: total size of the read which the caller is performing in
|
|
* pagecache pages
|
|
*
|
|
* page_cache_async_ondemand() should be called when a page is used which
|
|
* has the PG_readahead flag; this is a marker to suggest that the application
|
|
* has used up enough of the readahead window that we should start pulling in
|
|
* more pages.
|
|
*/
|
|
void
|
|
page_cache_async_readahead(struct address_space *mapping,
|
|
struct file_ra_state *ra, struct file *filp,
|
|
struct page *page, pgoff_t offset,
|
|
unsigned long req_size)
|
|
{
|
|
/* no read-ahead */
|
|
if (!ra->ra_pages)
|
|
return;
|
|
|
|
/*
|
|
* Same bit is used for PG_readahead and PG_reclaim.
|
|
*/
|
|
if (PageWriteback(page))
|
|
return;
|
|
|
|
ClearPageReadahead(page);
|
|
|
|
/*
|
|
* Defer asynchronous read-ahead on IO congestion.
|
|
*/
|
|
if (bdi_read_congested(mapping->backing_dev_info))
|
|
return;
|
|
|
|
/* do read-ahead */
|
|
ondemand_readahead(mapping, ra, filp, true, offset, req_size);
|
|
}
|
|
EXPORT_SYMBOL_GPL(page_cache_async_readahead);
|