mirror of
https://github.com/torvalds/linux.git
synced 2024-11-14 08:02:07 +00:00
10be0b372c
Introduce page cache context based readahead algorithm. This is to better support concurrent read streams in general. RATIONALE --------- The current readahead algorithm detects interleaved reads in a _passive_ way. Given a sequence of interleaved streams 1,1001,2,1002,3,4,1003,5,1004,1005,6,... By checking for (offset == prev_offset + 1), it will discover the sequentialness between 3,4 and between 1004,1005, and start doing sequential readahead for the individual streams since page 4 and page 1005. The context readahead algorithm guarantees to discover the sequentialness no matter how the streams are interleaved. For the above example, it will start sequential readahead since page 2 and 1002. The trick is to poke for page @offset-1 in the page cache when it has no other clues on the sequentialness of request @offset: if the current requenst belongs to a sequential stream, that stream must have accessed page @offset-1 recently, and the page will still be cached now. So if page @offset-1 is there, we can take request @offset as a sequential access. BENEFICIARIES ------------- - strictly interleaved reads i.e. 1,1001,2,1002,3,1003,... the current readahead will take them as silly random reads; the context readahead will take them as two sequential streams. - cooperative IO processes i.e. NFS and SCST They create a thread pool, farming off (sequential) IO requests to different threads which will be performing interleaved IO. It was not easy(or possible) to reliably tell from file->f_ra all those cooperative processes working on the same sequential stream, since they will have different file->f_ra instances. And NFSD's file->f_ra is particularly unusable, since their file objects are dynamically created for each request. The nfsd does have code trying to restore the f_ra bits, but not satisfactory. The new scheme is to detect the sequential pattern via looking up the page cache, which provides one single and consistent view of the pages recently accessed. That makes sequential detection for cooperative processes possible. USER REPORT ----------- Vladislav recommends the addition of context readahead as a result of his SCST benchmarks. It leads to 6%~40% performance gains in various cases and achieves equal performance in others. http://lkml.org/lkml/2009/3/19/239 OVERHEADS --------- In theory, it introduces one extra page cache lookup per random read. However the below benchmark shows context readahead to be slightly faster, wondering.. Randomly reading 200MB amount of data on a sparse file, repeat 20 times for each block size. The average throughputs are: original ra context ra gain 4K random reads: 65.561MB/s 65.648MB/s +0.1% 16K random reads: 124.767MB/s 124.951MB/s +0.1% 64K random reads: 162.123MB/s 162.278MB/s +0.1% Cc: Jens Axboe <jens.axboe@oracle.com> Cc: Jeff Moyer <jmoyer@redhat.com> Tested-by: Vladislav Bolkhovitin <vst@vlnb.net> Signed-off-by: Wu Fengguang <fengguang.wu@intel.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
552 lines
15 KiB
C
552 lines
15 KiB
C
/*
|
|
* mm/readahead.c - address_space-level file readahead.
|
|
*
|
|
* Copyright (C) 2002, Linus Torvalds
|
|
*
|
|
* 09Apr2002 Andrew Morton
|
|
* Initial version.
|
|
*/
|
|
|
|
#include <linux/kernel.h>
|
|
#include <linux/fs.h>
|
|
#include <linux/mm.h>
|
|
#include <linux/module.h>
|
|
#include <linux/blkdev.h>
|
|
#include <linux/backing-dev.h>
|
|
#include <linux/task_io_accounting_ops.h>
|
|
#include <linux/pagevec.h>
|
|
#include <linux/pagemap.h>
|
|
|
|
/*
|
|
* Initialise a struct file's readahead state. Assumes that the caller has
|
|
* memset *ra to zero.
|
|
*/
|
|
void
|
|
file_ra_state_init(struct file_ra_state *ra, struct address_space *mapping)
|
|
{
|
|
ra->ra_pages = mapping->backing_dev_info->ra_pages;
|
|
ra->prev_pos = -1;
|
|
}
|
|
EXPORT_SYMBOL_GPL(file_ra_state_init);
|
|
|
|
#define list_to_page(head) (list_entry((head)->prev, struct page, lru))
|
|
|
|
/*
|
|
* see if a page needs releasing upon read_cache_pages() failure
|
|
* - the caller of read_cache_pages() may have set PG_private or PG_fscache
|
|
* before calling, such as the NFS fs marking pages that are cached locally
|
|
* on disk, thus we need to give the fs a chance to clean up in the event of
|
|
* an error
|
|
*/
|
|
static void read_cache_pages_invalidate_page(struct address_space *mapping,
|
|
struct page *page)
|
|
{
|
|
if (page_has_private(page)) {
|
|
if (!trylock_page(page))
|
|
BUG();
|
|
page->mapping = mapping;
|
|
do_invalidatepage(page, 0);
|
|
page->mapping = NULL;
|
|
unlock_page(page);
|
|
}
|
|
page_cache_release(page);
|
|
}
|
|
|
|
/*
|
|
* release a list of pages, invalidating them first if need be
|
|
*/
|
|
static void read_cache_pages_invalidate_pages(struct address_space *mapping,
|
|
struct list_head *pages)
|
|
{
|
|
struct page *victim;
|
|
|
|
while (!list_empty(pages)) {
|
|
victim = list_to_page(pages);
|
|
list_del(&victim->lru);
|
|
read_cache_pages_invalidate_page(mapping, victim);
|
|
}
|
|
}
|
|
|
|
/**
|
|
* read_cache_pages - populate an address space with some pages & start reads against them
|
|
* @mapping: the address_space
|
|
* @pages: The address of a list_head which contains the target pages. These
|
|
* pages have their ->index populated and are otherwise uninitialised.
|
|
* @filler: callback routine for filling a single page.
|
|
* @data: private data for the callback routine.
|
|
*
|
|
* Hides the details of the LRU cache etc from the filesystems.
|
|
*/
|
|
int read_cache_pages(struct address_space *mapping, struct list_head *pages,
|
|
int (*filler)(void *, struct page *), void *data)
|
|
{
|
|
struct page *page;
|
|
int ret = 0;
|
|
|
|
while (!list_empty(pages)) {
|
|
page = list_to_page(pages);
|
|
list_del(&page->lru);
|
|
if (add_to_page_cache_lru(page, mapping,
|
|
page->index, GFP_KERNEL)) {
|
|
read_cache_pages_invalidate_page(mapping, page);
|
|
continue;
|
|
}
|
|
page_cache_release(page);
|
|
|
|
ret = filler(data, page);
|
|
if (unlikely(ret)) {
|
|
read_cache_pages_invalidate_pages(mapping, pages);
|
|
break;
|
|
}
|
|
task_io_account_read(PAGE_CACHE_SIZE);
|
|
}
|
|
return ret;
|
|
}
|
|
|
|
EXPORT_SYMBOL(read_cache_pages);
|
|
|
|
static int read_pages(struct address_space *mapping, struct file *filp,
|
|
struct list_head *pages, unsigned nr_pages)
|
|
{
|
|
unsigned page_idx;
|
|
int ret;
|
|
|
|
if (mapping->a_ops->readpages) {
|
|
ret = mapping->a_ops->readpages(filp, mapping, pages, nr_pages);
|
|
/* Clean up the remaining pages */
|
|
put_pages_list(pages);
|
|
goto out;
|
|
}
|
|
|
|
for (page_idx = 0; page_idx < nr_pages; page_idx++) {
|
|
struct page *page = list_to_page(pages);
|
|
list_del(&page->lru);
|
|
if (!add_to_page_cache_lru(page, mapping,
|
|
page->index, GFP_KERNEL)) {
|
|
mapping->a_ops->readpage(filp, page);
|
|
}
|
|
page_cache_release(page);
|
|
}
|
|
ret = 0;
|
|
out:
|
|
return ret;
|
|
}
|
|
|
|
/*
|
|
* __do_page_cache_readahead() actually reads a chunk of disk. It allocates all
|
|
* the pages first, then submits them all for I/O. This avoids the very bad
|
|
* behaviour which would occur if page allocations are causing VM writeback.
|
|
* We really don't want to intermingle reads and writes like that.
|
|
*
|
|
* Returns the number of pages requested, or the maximum amount of I/O allowed.
|
|
*/
|
|
static int
|
|
__do_page_cache_readahead(struct address_space *mapping, struct file *filp,
|
|
pgoff_t offset, unsigned long nr_to_read,
|
|
unsigned long lookahead_size)
|
|
{
|
|
struct inode *inode = mapping->host;
|
|
struct page *page;
|
|
unsigned long end_index; /* The last page we want to read */
|
|
LIST_HEAD(page_pool);
|
|
int page_idx;
|
|
int ret = 0;
|
|
loff_t isize = i_size_read(inode);
|
|
|
|
if (isize == 0)
|
|
goto out;
|
|
|
|
end_index = ((isize - 1) >> PAGE_CACHE_SHIFT);
|
|
|
|
/*
|
|
* Preallocate as many pages as we will need.
|
|
*/
|
|
for (page_idx = 0; page_idx < nr_to_read; page_idx++) {
|
|
pgoff_t page_offset = offset + page_idx;
|
|
|
|
if (page_offset > end_index)
|
|
break;
|
|
|
|
rcu_read_lock();
|
|
page = radix_tree_lookup(&mapping->page_tree, page_offset);
|
|
rcu_read_unlock();
|
|
if (page)
|
|
continue;
|
|
|
|
page = page_cache_alloc_cold(mapping);
|
|
if (!page)
|
|
break;
|
|
page->index = page_offset;
|
|
list_add(&page->lru, &page_pool);
|
|
if (page_idx == nr_to_read - lookahead_size)
|
|
SetPageReadahead(page);
|
|
ret++;
|
|
}
|
|
|
|
/*
|
|
* Now start the IO. We ignore I/O errors - if the page is not
|
|
* uptodate then the caller will launch readpage again, and
|
|
* will then handle the error.
|
|
*/
|
|
if (ret)
|
|
read_pages(mapping, filp, &page_pool, ret);
|
|
BUG_ON(!list_empty(&page_pool));
|
|
out:
|
|
return ret;
|
|
}
|
|
|
|
/*
|
|
* Chunk the readahead into 2 megabyte units, so that we don't pin too much
|
|
* memory at once.
|
|
*/
|
|
int force_page_cache_readahead(struct address_space *mapping, struct file *filp,
|
|
pgoff_t offset, unsigned long nr_to_read)
|
|
{
|
|
int ret = 0;
|
|
|
|
if (unlikely(!mapping->a_ops->readpage && !mapping->a_ops->readpages))
|
|
return -EINVAL;
|
|
|
|
nr_to_read = max_sane_readahead(nr_to_read);
|
|
while (nr_to_read) {
|
|
int err;
|
|
|
|
unsigned long this_chunk = (2 * 1024 * 1024) / PAGE_CACHE_SIZE;
|
|
|
|
if (this_chunk > nr_to_read)
|
|
this_chunk = nr_to_read;
|
|
err = __do_page_cache_readahead(mapping, filp,
|
|
offset, this_chunk, 0);
|
|
if (err < 0) {
|
|
ret = err;
|
|
break;
|
|
}
|
|
ret += err;
|
|
offset += this_chunk;
|
|
nr_to_read -= this_chunk;
|
|
}
|
|
return ret;
|
|
}
|
|
|
|
/*
|
|
* Given a desired number of PAGE_CACHE_SIZE readahead pages, return a
|
|
* sensible upper limit.
|
|
*/
|
|
unsigned long max_sane_readahead(unsigned long nr)
|
|
{
|
|
return min(nr, (node_page_state(numa_node_id(), NR_INACTIVE_FILE)
|
|
+ node_page_state(numa_node_id(), NR_FREE_PAGES)) / 2);
|
|
}
|
|
|
|
/*
|
|
* Submit IO for the read-ahead request in file_ra_state.
|
|
*/
|
|
unsigned long ra_submit(struct file_ra_state *ra,
|
|
struct address_space *mapping, struct file *filp)
|
|
{
|
|
int actual;
|
|
|
|
actual = __do_page_cache_readahead(mapping, filp,
|
|
ra->start, ra->size, ra->async_size);
|
|
|
|
return actual;
|
|
}
|
|
|
|
/*
|
|
* Set the initial window size, round to next power of 2 and square
|
|
* for small size, x 4 for medium, and x 2 for large
|
|
* for 128k (32 page) max ra
|
|
* 1-8 page = 32k initial, > 8 page = 128k initial
|
|
*/
|
|
static unsigned long get_init_ra_size(unsigned long size, unsigned long max)
|
|
{
|
|
unsigned long newsize = roundup_pow_of_two(size);
|
|
|
|
if (newsize <= max / 32)
|
|
newsize = newsize * 4;
|
|
else if (newsize <= max / 4)
|
|
newsize = newsize * 2;
|
|
else
|
|
newsize = max;
|
|
|
|
return newsize;
|
|
}
|
|
|
|
/*
|
|
* Get the previous window size, ramp it up, and
|
|
* return it as the new window size.
|
|
*/
|
|
static unsigned long get_next_ra_size(struct file_ra_state *ra,
|
|
unsigned long max)
|
|
{
|
|
unsigned long cur = ra->size;
|
|
unsigned long newsize;
|
|
|
|
if (cur < max / 16)
|
|
newsize = 4 * cur;
|
|
else
|
|
newsize = 2 * cur;
|
|
|
|
return min(newsize, max);
|
|
}
|
|
|
|
/*
|
|
* On-demand readahead design.
|
|
*
|
|
* The fields in struct file_ra_state represent the most-recently-executed
|
|
* readahead attempt:
|
|
*
|
|
* |<----- async_size ---------|
|
|
* |------------------- size -------------------->|
|
|
* |==================#===========================|
|
|
* ^start ^page marked with PG_readahead
|
|
*
|
|
* To overlap application thinking time and disk I/O time, we do
|
|
* `readahead pipelining': Do not wait until the application consumed all
|
|
* readahead pages and stalled on the missing page at readahead_index;
|
|
* Instead, submit an asynchronous readahead I/O as soon as there are
|
|
* only async_size pages left in the readahead window. Normally async_size
|
|
* will be equal to size, for maximum pipelining.
|
|
*
|
|
* In interleaved sequential reads, concurrent streams on the same fd can
|
|
* be invalidating each other's readahead state. So we flag the new readahead
|
|
* page at (start+size-async_size) with PG_readahead, and use it as readahead
|
|
* indicator. The flag won't be set on already cached pages, to avoid the
|
|
* readahead-for-nothing fuss, saving pointless page cache lookups.
|
|
*
|
|
* prev_pos tracks the last visited byte in the _previous_ read request.
|
|
* It should be maintained by the caller, and will be used for detecting
|
|
* small random reads. Note that the readahead algorithm checks loosely
|
|
* for sequential patterns. Hence interleaved reads might be served as
|
|
* sequential ones.
|
|
*
|
|
* There is a special-case: if the first page which the application tries to
|
|
* read happens to be the first page of the file, it is assumed that a linear
|
|
* read is about to happen and the window is immediately set to the initial size
|
|
* based on I/O request size and the max_readahead.
|
|
*
|
|
* The code ramps up the readahead size aggressively at first, but slow down as
|
|
* it approaches max_readhead.
|
|
*/
|
|
|
|
/*
|
|
* Count contiguously cached pages from @offset-1 to @offset-@max,
|
|
* this count is a conservative estimation of
|
|
* - length of the sequential read sequence, or
|
|
* - thrashing threshold in memory tight systems
|
|
*/
|
|
static pgoff_t count_history_pages(struct address_space *mapping,
|
|
struct file_ra_state *ra,
|
|
pgoff_t offset, unsigned long max)
|
|
{
|
|
pgoff_t head;
|
|
|
|
rcu_read_lock();
|
|
head = radix_tree_prev_hole(&mapping->page_tree, offset - 1, max);
|
|
rcu_read_unlock();
|
|
|
|
return offset - 1 - head;
|
|
}
|
|
|
|
/*
|
|
* page cache context based read-ahead
|
|
*/
|
|
static int try_context_readahead(struct address_space *mapping,
|
|
struct file_ra_state *ra,
|
|
pgoff_t offset,
|
|
unsigned long req_size,
|
|
unsigned long max)
|
|
{
|
|
pgoff_t size;
|
|
|
|
size = count_history_pages(mapping, ra, offset, max);
|
|
|
|
/*
|
|
* no history pages:
|
|
* it could be a random read
|
|
*/
|
|
if (!size)
|
|
return 0;
|
|
|
|
/*
|
|
* starts from beginning of file:
|
|
* it is a strong indication of long-run stream (or whole-file-read)
|
|
*/
|
|
if (size >= offset)
|
|
size *= 2;
|
|
|
|
ra->start = offset;
|
|
ra->size = get_init_ra_size(size + req_size, max);
|
|
ra->async_size = ra->size;
|
|
|
|
return 1;
|
|
}
|
|
|
|
/*
|
|
* A minimal readahead algorithm for trivial sequential/random reads.
|
|
*/
|
|
static unsigned long
|
|
ondemand_readahead(struct address_space *mapping,
|
|
struct file_ra_state *ra, struct file *filp,
|
|
bool hit_readahead_marker, pgoff_t offset,
|
|
unsigned long req_size)
|
|
{
|
|
unsigned long max = max_sane_readahead(ra->ra_pages);
|
|
|
|
/*
|
|
* start of file
|
|
*/
|
|
if (!offset)
|
|
goto initial_readahead;
|
|
|
|
/*
|
|
* It's the expected callback offset, assume sequential access.
|
|
* Ramp up sizes, and push forward the readahead window.
|
|
*/
|
|
if ((offset == (ra->start + ra->size - ra->async_size) ||
|
|
offset == (ra->start + ra->size))) {
|
|
ra->start += ra->size;
|
|
ra->size = get_next_ra_size(ra, max);
|
|
ra->async_size = ra->size;
|
|
goto readit;
|
|
}
|
|
|
|
/*
|
|
* Hit a marked page without valid readahead state.
|
|
* E.g. interleaved reads.
|
|
* Query the pagecache for async_size, which normally equals to
|
|
* readahead size. Ramp it up and use it as the new readahead size.
|
|
*/
|
|
if (hit_readahead_marker) {
|
|
pgoff_t start;
|
|
|
|
rcu_read_lock();
|
|
start = radix_tree_next_hole(&mapping->page_tree, offset+1,max);
|
|
rcu_read_unlock();
|
|
|
|
if (!start || start - offset > max)
|
|
return 0;
|
|
|
|
ra->start = start;
|
|
ra->size = start - offset; /* old async_size */
|
|
ra->size += req_size;
|
|
ra->size = get_next_ra_size(ra, max);
|
|
ra->async_size = ra->size;
|
|
goto readit;
|
|
}
|
|
|
|
/*
|
|
* oversize read
|
|
*/
|
|
if (req_size > max)
|
|
goto initial_readahead;
|
|
|
|
/*
|
|
* sequential cache miss
|
|
*/
|
|
if (offset - (ra->prev_pos >> PAGE_CACHE_SHIFT) <= 1UL)
|
|
goto initial_readahead;
|
|
|
|
/*
|
|
* Query the page cache and look for the traces(cached history pages)
|
|
* that a sequential stream would leave behind.
|
|
*/
|
|
if (try_context_readahead(mapping, ra, offset, req_size, max))
|
|
goto readit;
|
|
|
|
/*
|
|
* standalone, small random read
|
|
* Read as is, and do not pollute the readahead state.
|
|
*/
|
|
return __do_page_cache_readahead(mapping, filp, offset, req_size, 0);
|
|
|
|
initial_readahead:
|
|
ra->start = offset;
|
|
ra->size = get_init_ra_size(req_size, max);
|
|
ra->async_size = ra->size > req_size ? ra->size - req_size : ra->size;
|
|
|
|
readit:
|
|
/*
|
|
* Will this read hit the readahead marker made by itself?
|
|
* If so, trigger the readahead marker hit now, and merge
|
|
* the resulted next readahead window into the current one.
|
|
*/
|
|
if (offset == ra->start && ra->size == ra->async_size) {
|
|
ra->async_size = get_next_ra_size(ra, max);
|
|
ra->size += ra->async_size;
|
|
}
|
|
|
|
return ra_submit(ra, mapping, filp);
|
|
}
|
|
|
|
/**
|
|
* page_cache_sync_readahead - generic file readahead
|
|
* @mapping: address_space which holds the pagecache and I/O vectors
|
|
* @ra: file_ra_state which holds the readahead state
|
|
* @filp: passed on to ->readpage() and ->readpages()
|
|
* @offset: start offset into @mapping, in pagecache page-sized units
|
|
* @req_size: hint: total size of the read which the caller is performing in
|
|
* pagecache pages
|
|
*
|
|
* page_cache_sync_readahead() should be called when a cache miss happened:
|
|
* it will submit the read. The readahead logic may decide to piggyback more
|
|
* pages onto the read request if access patterns suggest it will improve
|
|
* performance.
|
|
*/
|
|
void page_cache_sync_readahead(struct address_space *mapping,
|
|
struct file_ra_state *ra, struct file *filp,
|
|
pgoff_t offset, unsigned long req_size)
|
|
{
|
|
/* no read-ahead */
|
|
if (!ra->ra_pages)
|
|
return;
|
|
|
|
/* do read-ahead */
|
|
ondemand_readahead(mapping, ra, filp, false, offset, req_size);
|
|
}
|
|
EXPORT_SYMBOL_GPL(page_cache_sync_readahead);
|
|
|
|
/**
|
|
* page_cache_async_readahead - file readahead for marked pages
|
|
* @mapping: address_space which holds the pagecache and I/O vectors
|
|
* @ra: file_ra_state which holds the readahead state
|
|
* @filp: passed on to ->readpage() and ->readpages()
|
|
* @page: the page at @offset which has the PG_readahead flag set
|
|
* @offset: start offset into @mapping, in pagecache page-sized units
|
|
* @req_size: hint: total size of the read which the caller is performing in
|
|
* pagecache pages
|
|
*
|
|
* page_cache_async_ondemand() should be called when a page is used which
|
|
* has the PG_readahead flag; this is a marker to suggest that the application
|
|
* has used up enough of the readahead window that we should start pulling in
|
|
* more pages.
|
|
*/
|
|
void
|
|
page_cache_async_readahead(struct address_space *mapping,
|
|
struct file_ra_state *ra, struct file *filp,
|
|
struct page *page, pgoff_t offset,
|
|
unsigned long req_size)
|
|
{
|
|
/* no read-ahead */
|
|
if (!ra->ra_pages)
|
|
return;
|
|
|
|
/*
|
|
* Same bit is used for PG_readahead and PG_reclaim.
|
|
*/
|
|
if (PageWriteback(page))
|
|
return;
|
|
|
|
ClearPageReadahead(page);
|
|
|
|
/*
|
|
* Defer asynchronous read-ahead on IO congestion.
|
|
*/
|
|
if (bdi_read_congested(mapping->backing_dev_info))
|
|
return;
|
|
|
|
/* do read-ahead */
|
|
ondemand_readahead(mapping, ra, filp, true, offset, req_size);
|
|
}
|
|
EXPORT_SYMBOL_GPL(page_cache_async_readahead);
|