forked from Minki/linux
mm: change invalidatepage prototype to accept length
Currently there is no way to truncate partial page where the end truncate point is not at the end of the page. This is because it was not needed and the functionality was enough for file system truncate operation to work properly. However more file systems now support punch hole feature and it can benefit from mm supporting truncating page just up to the certain point. Specifically, with this functionality truncate_inode_pages_range() can be changed so it supports truncating partial page at the end of the range (currently it will BUG_ON() if 'end' is not at the end of the page). This commit changes the invalidatepage() address space operation prototype to accept range to be invalidated and update all the instances for it. We also change the block_invalidatepage() in the same way and actually make a use of the new length argument implementing range invalidation. Actual file system implementations will follow except the file systems where the changes are really simple and should not change the behaviour in any way .Implementation for truncate_page_range() which will be able to accept page unaligned ranges will follow as well. Signed-off-by: Lukas Czerner <lczerner@redhat.com> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Hugh Dickins <hughd@google.com>
This commit is contained in:
parent
c7788792a5
commit
d47992f86b
@ -189,7 +189,7 @@ prototypes:
|
|||||||
loff_t pos, unsigned len, unsigned copied,
|
loff_t pos, unsigned len, unsigned copied,
|
||||||
struct page *page, void *fsdata);
|
struct page *page, void *fsdata);
|
||||||
sector_t (*bmap)(struct address_space *, sector_t);
|
sector_t (*bmap)(struct address_space *, sector_t);
|
||||||
int (*invalidatepage) (struct page *, unsigned long);
|
void (*invalidatepage) (struct page *, unsigned int, unsigned int);
|
||||||
int (*releasepage) (struct page *, int);
|
int (*releasepage) (struct page *, int);
|
||||||
void (*freepage)(struct page *);
|
void (*freepage)(struct page *);
|
||||||
int (*direct_IO)(int, struct kiocb *, const struct iovec *iov,
|
int (*direct_IO)(int, struct kiocb *, const struct iovec *iov,
|
||||||
@ -310,8 +310,8 @@ filesystems and by the swapper. The latter will eventually go away. Please,
|
|||||||
keep it that way and don't breed new callers.
|
keep it that way and don't breed new callers.
|
||||||
|
|
||||||
->invalidatepage() is called when the filesystem must attempt to drop
|
->invalidatepage() is called when the filesystem must attempt to drop
|
||||||
some or all of the buffers from the page when it is being truncated. It
|
some or all of the buffers from the page when it is being truncated. It
|
||||||
returns zero on success. If ->invalidatepage is zero, the kernel uses
|
returns zero on success. If ->invalidatepage is zero, the kernel uses
|
||||||
block_invalidatepage() instead.
|
block_invalidatepage() instead.
|
||||||
|
|
||||||
->releasepage() is called when the kernel is about to try to drop the
|
->releasepage() is called when the kernel is about to try to drop the
|
||||||
|
@ -549,7 +549,7 @@ struct address_space_operations
|
|||||||
-------------------------------
|
-------------------------------
|
||||||
|
|
||||||
This describes how the VFS can manipulate mapping of a file to page cache in
|
This describes how the VFS can manipulate mapping of a file to page cache in
|
||||||
your filesystem. As of kernel 2.6.22, the following members are defined:
|
your filesystem. The following members are defined:
|
||||||
|
|
||||||
struct address_space_operations {
|
struct address_space_operations {
|
||||||
int (*writepage)(struct page *page, struct writeback_control *wbc);
|
int (*writepage)(struct page *page, struct writeback_control *wbc);
|
||||||
@ -566,7 +566,7 @@ struct address_space_operations {
|
|||||||
loff_t pos, unsigned len, unsigned copied,
|
loff_t pos, unsigned len, unsigned copied,
|
||||||
struct page *page, void *fsdata);
|
struct page *page, void *fsdata);
|
||||||
sector_t (*bmap)(struct address_space *, sector_t);
|
sector_t (*bmap)(struct address_space *, sector_t);
|
||||||
int (*invalidatepage) (struct page *, unsigned long);
|
void (*invalidatepage) (struct page *, unsigned int, unsigned int);
|
||||||
int (*releasepage) (struct page *, int);
|
int (*releasepage) (struct page *, int);
|
||||||
void (*freepage)(struct page *);
|
void (*freepage)(struct page *);
|
||||||
ssize_t (*direct_IO)(int, struct kiocb *, const struct iovec *iov,
|
ssize_t (*direct_IO)(int, struct kiocb *, const struct iovec *iov,
|
||||||
@ -685,14 +685,14 @@ struct address_space_operations {
|
|||||||
invalidatepage: If a page has PagePrivate set, then invalidatepage
|
invalidatepage: If a page has PagePrivate set, then invalidatepage
|
||||||
will be called when part or all of the page is to be removed
|
will be called when part or all of the page is to be removed
|
||||||
from the address space. This generally corresponds to either a
|
from the address space. This generally corresponds to either a
|
||||||
truncation or a complete invalidation of the address space
|
truncation, punch hole or a complete invalidation of the address
|
||||||
(in the latter case 'offset' will always be 0).
|
space (in the latter case 'offset' will always be 0 and 'length'
|
||||||
Any private data associated with the page should be updated
|
will be PAGE_CACHE_SIZE). Any private data associated with the page
|
||||||
to reflect this truncation. If offset is 0, then
|
should be updated to reflect this truncation. If offset is 0 and
|
||||||
the private data should be released, because the page
|
length is PAGE_CACHE_SIZE, then the private data should be released,
|
||||||
must be able to be completely discarded. This may be done by
|
because the page must be able to be completely discarded. This may
|
||||||
calling the ->releasepage function, but in this case the
|
be done by calling the ->releasepage function, but in this case the
|
||||||
release MUST succeed.
|
release MUST succeed.
|
||||||
|
|
||||||
releasepage: releasepage is called on PagePrivate pages to indicate
|
releasepage: releasepage is called on PagePrivate pages to indicate
|
||||||
that the page should be freed if possible. ->releasepage
|
that the page should be freed if possible. ->releasepage
|
||||||
|
@ -148,13 +148,14 @@ static int v9fs_release_page(struct page *page, gfp_t gfp)
|
|||||||
* @offset: offset in the page
|
* @offset: offset in the page
|
||||||
*/
|
*/
|
||||||
|
|
||||||
static void v9fs_invalidate_page(struct page *page, unsigned long offset)
|
static void v9fs_invalidate_page(struct page *page, unsigned int offset,
|
||||||
|
unsigned int length)
|
||||||
{
|
{
|
||||||
/*
|
/*
|
||||||
* If called with zero offset, we should release
|
* If called with zero offset, we should release
|
||||||
* the private state assocated with the page
|
* the private state assocated with the page
|
||||||
*/
|
*/
|
||||||
if (offset == 0)
|
if (offset == 0 && length == PAGE_CACHE_SIZE)
|
||||||
v9fs_fscache_invalidate_page(page);
|
v9fs_fscache_invalidate_page(page);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -19,7 +19,8 @@
|
|||||||
#include "internal.h"
|
#include "internal.h"
|
||||||
|
|
||||||
static int afs_readpage(struct file *file, struct page *page);
|
static int afs_readpage(struct file *file, struct page *page);
|
||||||
static void afs_invalidatepage(struct page *page, unsigned long offset);
|
static void afs_invalidatepage(struct page *page, unsigned int offset,
|
||||||
|
unsigned int length);
|
||||||
static int afs_releasepage(struct page *page, gfp_t gfp_flags);
|
static int afs_releasepage(struct page *page, gfp_t gfp_flags);
|
||||||
static int afs_launder_page(struct page *page);
|
static int afs_launder_page(struct page *page);
|
||||||
|
|
||||||
@ -310,16 +311,17 @@ static int afs_launder_page(struct page *page)
|
|||||||
* - release a page and clean up its private data if offset is 0 (indicating
|
* - release a page and clean up its private data if offset is 0 (indicating
|
||||||
* the entire page)
|
* the entire page)
|
||||||
*/
|
*/
|
||||||
static void afs_invalidatepage(struct page *page, unsigned long offset)
|
static void afs_invalidatepage(struct page *page, unsigned int offset,
|
||||||
|
unsigned int length)
|
||||||
{
|
{
|
||||||
struct afs_writeback *wb = (struct afs_writeback *) page_private(page);
|
struct afs_writeback *wb = (struct afs_writeback *) page_private(page);
|
||||||
|
|
||||||
_enter("{%lu},%lu", page->index, offset);
|
_enter("{%lu},%u,%u", page->index, offset, length);
|
||||||
|
|
||||||
BUG_ON(!PageLocked(page));
|
BUG_ON(!PageLocked(page));
|
||||||
|
|
||||||
/* we clean up only if the entire page is being invalidated */
|
/* we clean up only if the entire page is being invalidated */
|
||||||
if (offset == 0) {
|
if (offset == 0 && length == PAGE_CACHE_SIZE) {
|
||||||
#ifdef CONFIG_AFS_FSCACHE
|
#ifdef CONFIG_AFS_FSCACHE
|
||||||
if (PageFsCache(page)) {
|
if (PageFsCache(page)) {
|
||||||
struct afs_vnode *vnode = AFS_FS_I(page->mapping->host);
|
struct afs_vnode *vnode = AFS_FS_I(page->mapping->host);
|
||||||
|
@ -1013,7 +1013,8 @@ static int btree_releasepage(struct page *page, gfp_t gfp_flags)
|
|||||||
return try_release_extent_buffer(page);
|
return try_release_extent_buffer(page);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void btree_invalidatepage(struct page *page, unsigned long offset)
|
static void btree_invalidatepage(struct page *page, unsigned int offset,
|
||||||
|
unsigned int length)
|
||||||
{
|
{
|
||||||
struct extent_io_tree *tree;
|
struct extent_io_tree *tree;
|
||||||
tree = &BTRFS_I(page->mapping->host)->io_tree;
|
tree = &BTRFS_I(page->mapping->host)->io_tree;
|
||||||
|
@ -2957,7 +2957,7 @@ static int __extent_writepage(struct page *page, struct writeback_control *wbc,
|
|||||||
pg_offset = i_size & (PAGE_CACHE_SIZE - 1);
|
pg_offset = i_size & (PAGE_CACHE_SIZE - 1);
|
||||||
if (page->index > end_index ||
|
if (page->index > end_index ||
|
||||||
(page->index == end_index && !pg_offset)) {
|
(page->index == end_index && !pg_offset)) {
|
||||||
page->mapping->a_ops->invalidatepage(page, 0);
|
page->mapping->a_ops->invalidatepage(page, 0, PAGE_CACHE_SIZE);
|
||||||
unlock_page(page);
|
unlock_page(page);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -7510,7 +7510,8 @@ static int btrfs_releasepage(struct page *page, gfp_t gfp_flags)
|
|||||||
return __btrfs_releasepage(page, gfp_flags & GFP_NOFS);
|
return __btrfs_releasepage(page, gfp_flags & GFP_NOFS);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void btrfs_invalidatepage(struct page *page, unsigned long offset)
|
static void btrfs_invalidatepage(struct page *page, unsigned int offset,
|
||||||
|
unsigned int length)
|
||||||
{
|
{
|
||||||
struct inode *inode = page->mapping->host;
|
struct inode *inode = page->mapping->host;
|
||||||
struct extent_io_tree *tree;
|
struct extent_io_tree *tree;
|
||||||
|
21
fs/buffer.c
21
fs/buffer.c
@ -1454,7 +1454,8 @@ static void discard_buffer(struct buffer_head * bh)
|
|||||||
* block_invalidatepage - invalidate part or all of a buffer-backed page
|
* block_invalidatepage - invalidate part or all of a buffer-backed page
|
||||||
*
|
*
|
||||||
* @page: the page which is affected
|
* @page: the page which is affected
|
||||||
* @offset: the index of the truncation point
|
* @offset: start of the range to invalidate
|
||||||
|
* @length: length of the range to invalidate
|
||||||
*
|
*
|
||||||
* block_invalidatepage() is called when all or part of the page has become
|
* block_invalidatepage() is called when all or part of the page has become
|
||||||
* invalidated by a truncate operation.
|
* invalidated by a truncate operation.
|
||||||
@ -1465,21 +1466,34 @@ static void discard_buffer(struct buffer_head * bh)
|
|||||||
* point. Because the caller is about to free (and possibly reuse) those
|
* point. Because the caller is about to free (and possibly reuse) those
|
||||||
* blocks on-disk.
|
* blocks on-disk.
|
||||||
*/
|
*/
|
||||||
void block_invalidatepage(struct page *page, unsigned long offset)
|
void block_invalidatepage(struct page *page, unsigned int offset,
|
||||||
|
unsigned int length)
|
||||||
{
|
{
|
||||||
struct buffer_head *head, *bh, *next;
|
struct buffer_head *head, *bh, *next;
|
||||||
unsigned int curr_off = 0;
|
unsigned int curr_off = 0;
|
||||||
|
unsigned int stop = length + offset;
|
||||||
|
|
||||||
BUG_ON(!PageLocked(page));
|
BUG_ON(!PageLocked(page));
|
||||||
if (!page_has_buffers(page))
|
if (!page_has_buffers(page))
|
||||||
goto out;
|
goto out;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Check for overflow
|
||||||
|
*/
|
||||||
|
BUG_ON(stop > PAGE_CACHE_SIZE || stop < length);
|
||||||
|
|
||||||
head = page_buffers(page);
|
head = page_buffers(page);
|
||||||
bh = head;
|
bh = head;
|
||||||
do {
|
do {
|
||||||
unsigned int next_off = curr_off + bh->b_size;
|
unsigned int next_off = curr_off + bh->b_size;
|
||||||
next = bh->b_this_page;
|
next = bh->b_this_page;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Are we still fully in range ?
|
||||||
|
*/
|
||||||
|
if (next_off > stop)
|
||||||
|
goto out;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* is this block fully invalidated?
|
* is this block fully invalidated?
|
||||||
*/
|
*/
|
||||||
@ -1501,6 +1515,7 @@ out:
|
|||||||
}
|
}
|
||||||
EXPORT_SYMBOL(block_invalidatepage);
|
EXPORT_SYMBOL(block_invalidatepage);
|
||||||
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* We attach and possibly dirty the buffers atomically wrt
|
* We attach and possibly dirty the buffers atomically wrt
|
||||||
* __set_page_dirty_buffers() via private_lock. try_to_free_buffers
|
* __set_page_dirty_buffers() via private_lock. try_to_free_buffers
|
||||||
@ -2841,7 +2856,7 @@ int block_write_full_page_endio(struct page *page, get_block_t *get_block,
|
|||||||
* they may have been added in ext3_writepage(). Make them
|
* they may have been added in ext3_writepage(). Make them
|
||||||
* freeable here, so the page does not leak.
|
* freeable here, so the page does not leak.
|
||||||
*/
|
*/
|
||||||
do_invalidatepage(page, 0);
|
do_invalidatepage(page, 0, PAGE_CACHE_SIZE);
|
||||||
unlock_page(page);
|
unlock_page(page);
|
||||||
return 0; /* don't care */
|
return 0; /* don't care */
|
||||||
}
|
}
|
||||||
|
@ -143,7 +143,8 @@ static int ceph_set_page_dirty(struct page *page)
|
|||||||
* dirty page counters appropriately. Only called if there is private
|
* dirty page counters appropriately. Only called if there is private
|
||||||
* data on the page.
|
* data on the page.
|
||||||
*/
|
*/
|
||||||
static void ceph_invalidatepage(struct page *page, unsigned long offset)
|
static void ceph_invalidatepage(struct page *page, unsigned int offset,
|
||||||
|
unsigned int length)
|
||||||
{
|
{
|
||||||
struct inode *inode;
|
struct inode *inode;
|
||||||
struct ceph_inode_info *ci;
|
struct ceph_inode_info *ci;
|
||||||
@ -168,7 +169,7 @@ static void ceph_invalidatepage(struct page *page, unsigned long offset)
|
|||||||
|
|
||||||
ci = ceph_inode(inode);
|
ci = ceph_inode(inode);
|
||||||
if (offset == 0) {
|
if (offset == 0) {
|
||||||
dout("%p invalidatepage %p idx %lu full dirty page %lu\n",
|
dout("%p invalidatepage %p idx %lu full dirty page %u\n",
|
||||||
inode, page, page->index, offset);
|
inode, page, page->index, offset);
|
||||||
ceph_put_wrbuffer_cap_refs(ci, 1, snapc);
|
ceph_put_wrbuffer_cap_refs(ci, 1, snapc);
|
||||||
ceph_put_snap_context(snapc);
|
ceph_put_snap_context(snapc);
|
||||||
|
@ -3546,11 +3546,12 @@ static int cifs_release_page(struct page *page, gfp_t gfp)
|
|||||||
return cifs_fscache_release_page(page, gfp);
|
return cifs_fscache_release_page(page, gfp);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void cifs_invalidate_page(struct page *page, unsigned long offset)
|
static void cifs_invalidate_page(struct page *page, unsigned int offset,
|
||||||
|
unsigned int length)
|
||||||
{
|
{
|
||||||
struct cifsInodeInfo *cifsi = CIFS_I(page->mapping->host);
|
struct cifsInodeInfo *cifsi = CIFS_I(page->mapping->host);
|
||||||
|
|
||||||
if (offset == 0)
|
if (offset == 0 && length == PAGE_CACHE_SIZE)
|
||||||
cifs_fscache_invalidate_page(page, &cifsi->vfs_inode);
|
cifs_fscache_invalidate_page(page, &cifsi->vfs_inode);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -953,9 +953,11 @@ static int exofs_releasepage(struct page *page, gfp_t gfp)
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void exofs_invalidatepage(struct page *page, unsigned long offset)
|
static void exofs_invalidatepage(struct page *page, unsigned int offset,
|
||||||
|
unsigned int length)
|
||||||
{
|
{
|
||||||
EXOFS_DBGMSG("page 0x%lx offset 0x%lx\n", page->index, offset);
|
EXOFS_DBGMSG("page 0x%lx offset 0x%x length 0x%x\n",
|
||||||
|
page->index, offset, length);
|
||||||
WARN_ON(1);
|
WARN_ON(1);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1825,7 +1825,8 @@ ext3_readpages(struct file *file, struct address_space *mapping,
|
|||||||
return mpage_readpages(mapping, pages, nr_pages, ext3_get_block);
|
return mpage_readpages(mapping, pages, nr_pages, ext3_get_block);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void ext3_invalidatepage(struct page *page, unsigned long offset)
|
static void ext3_invalidatepage(struct page *page, unsigned int offset,
|
||||||
|
unsigned int length)
|
||||||
{
|
{
|
||||||
journal_t *journal = EXT3_JOURNAL(page->mapping->host);
|
journal_t *journal = EXT3_JOURNAL(page->mapping->host);
|
||||||
|
|
||||||
|
@ -132,7 +132,8 @@ static inline int ext4_begin_ordered_truncate(struct inode *inode,
|
|||||||
new_size);
|
new_size);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void ext4_invalidatepage(struct page *page, unsigned long offset);
|
static void ext4_invalidatepage(struct page *page, unsigned int offset,
|
||||||
|
unsigned int length);
|
||||||
static int __ext4_journalled_writepage(struct page *page, unsigned int len);
|
static int __ext4_journalled_writepage(struct page *page, unsigned int len);
|
||||||
static int ext4_bh_delay_or_unwritten(handle_t *handle, struct buffer_head *bh);
|
static int ext4_bh_delay_or_unwritten(handle_t *handle, struct buffer_head *bh);
|
||||||
static int ext4_discard_partial_page_buffers_no_lock(handle_t *handle,
|
static int ext4_discard_partial_page_buffers_no_lock(handle_t *handle,
|
||||||
@ -1606,7 +1607,7 @@ static void ext4_da_block_invalidatepages(struct mpage_da_data *mpd)
|
|||||||
break;
|
break;
|
||||||
BUG_ON(!PageLocked(page));
|
BUG_ON(!PageLocked(page));
|
||||||
BUG_ON(PageWriteback(page));
|
BUG_ON(PageWriteback(page));
|
||||||
block_invalidatepage(page, 0);
|
block_invalidatepage(page, 0, PAGE_CACHE_SIZE);
|
||||||
ClearPageUptodate(page);
|
ClearPageUptodate(page);
|
||||||
unlock_page(page);
|
unlock_page(page);
|
||||||
}
|
}
|
||||||
@ -2829,7 +2830,8 @@ static int ext4_da_write_end(struct file *file,
|
|||||||
return ret ? ret : copied;
|
return ret ? ret : copied;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void ext4_da_invalidatepage(struct page *page, unsigned long offset)
|
static void ext4_da_invalidatepage(struct page *page, unsigned int offset,
|
||||||
|
unsigned int length)
|
||||||
{
|
{
|
||||||
/*
|
/*
|
||||||
* Drop reserved blocks
|
* Drop reserved blocks
|
||||||
@ -2841,7 +2843,7 @@ static void ext4_da_invalidatepage(struct page *page, unsigned long offset)
|
|||||||
ext4_da_page_release_reservation(page, offset);
|
ext4_da_page_release_reservation(page, offset);
|
||||||
|
|
||||||
out:
|
out:
|
||||||
ext4_invalidatepage(page, offset);
|
ext4_invalidatepage(page, offset, length);
|
||||||
|
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
@ -2989,14 +2991,15 @@ ext4_readpages(struct file *file, struct address_space *mapping,
|
|||||||
return mpage_readpages(mapping, pages, nr_pages, ext4_get_block);
|
return mpage_readpages(mapping, pages, nr_pages, ext4_get_block);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void ext4_invalidatepage(struct page *page, unsigned long offset)
|
static void ext4_invalidatepage(struct page *page, unsigned int offset,
|
||||||
|
unsigned int length)
|
||||||
{
|
{
|
||||||
trace_ext4_invalidatepage(page, offset);
|
trace_ext4_invalidatepage(page, offset);
|
||||||
|
|
||||||
/* No journalling happens on data buffers when this function is used */
|
/* No journalling happens on data buffers when this function is used */
|
||||||
WARN_ON(page_has_buffers(page) && buffer_jbd(page_buffers(page)));
|
WARN_ON(page_has_buffers(page) && buffer_jbd(page_buffers(page)));
|
||||||
|
|
||||||
block_invalidatepage(page, offset);
|
block_invalidatepage(page, offset, PAGE_CACHE_SIZE - offset);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int __ext4_journalled_invalidatepage(struct page *page,
|
static int __ext4_journalled_invalidatepage(struct page *page,
|
||||||
@ -3017,7 +3020,8 @@ static int __ext4_journalled_invalidatepage(struct page *page,
|
|||||||
|
|
||||||
/* Wrapper for aops... */
|
/* Wrapper for aops... */
|
||||||
static void ext4_journalled_invalidatepage(struct page *page,
|
static void ext4_journalled_invalidatepage(struct page *page,
|
||||||
unsigned long offset)
|
unsigned int offset,
|
||||||
|
unsigned int length)
|
||||||
{
|
{
|
||||||
WARN_ON(__ext4_journalled_invalidatepage(page, offset) < 0);
|
WARN_ON(__ext4_journalled_invalidatepage(page, offset) < 0);
|
||||||
}
|
}
|
||||||
|
@ -698,7 +698,8 @@ static ssize_t f2fs_direct_IO(int rw, struct kiocb *iocb,
|
|||||||
get_data_block_ro);
|
get_data_block_ro);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void f2fs_invalidate_data_page(struct page *page, unsigned long offset)
|
static void f2fs_invalidate_data_page(struct page *page, unsigned int offset,
|
||||||
|
unsigned int length)
|
||||||
{
|
{
|
||||||
struct inode *inode = page->mapping->host;
|
struct inode *inode = page->mapping->host;
|
||||||
struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
|
struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
|
||||||
|
@ -1205,7 +1205,8 @@ static int f2fs_set_node_page_dirty(struct page *page)
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void f2fs_invalidate_node_page(struct page *page, unsigned long offset)
|
static void f2fs_invalidate_node_page(struct page *page, unsigned int offset,
|
||||||
|
unsigned int length)
|
||||||
{
|
{
|
||||||
struct inode *inode = page->mapping->host;
|
struct inode *inode = page->mapping->host;
|
||||||
struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
|
struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
|
||||||
|
@ -110,7 +110,7 @@ static int gfs2_writepage_common(struct page *page,
|
|||||||
/* Is the page fully outside i_size? (truncate in progress) */
|
/* Is the page fully outside i_size? (truncate in progress) */
|
||||||
offset = i_size & (PAGE_CACHE_SIZE-1);
|
offset = i_size & (PAGE_CACHE_SIZE-1);
|
||||||
if (page->index > end_index || (page->index == end_index && !offset)) {
|
if (page->index > end_index || (page->index == end_index && !offset)) {
|
||||||
page->mapping->a_ops->invalidatepage(page, 0);
|
page->mapping->a_ops->invalidatepage(page, 0, PAGE_CACHE_SIZE);
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
return 1;
|
return 1;
|
||||||
@ -299,7 +299,8 @@ static int gfs2_write_jdata_pagevec(struct address_space *mapping,
|
|||||||
|
|
||||||
/* Is the page fully outside i_size? (truncate in progress) */
|
/* Is the page fully outside i_size? (truncate in progress) */
|
||||||
if (page->index > end_index || (page->index == end_index && !offset)) {
|
if (page->index > end_index || (page->index == end_index && !offset)) {
|
||||||
page->mapping->a_ops->invalidatepage(page, 0);
|
page->mapping->a_ops->invalidatepage(page, 0,
|
||||||
|
PAGE_CACHE_SIZE);
|
||||||
unlock_page(page);
|
unlock_page(page);
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
@ -943,7 +944,8 @@ static void gfs2_discard(struct gfs2_sbd *sdp, struct buffer_head *bh)
|
|||||||
unlock_buffer(bh);
|
unlock_buffer(bh);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void gfs2_invalidatepage(struct page *page, unsigned long offset)
|
static void gfs2_invalidatepage(struct page *page, unsigned int offset,
|
||||||
|
unsigned int length)
|
||||||
{
|
{
|
||||||
struct gfs2_sbd *sdp = GFS2_SB(page->mapping->host);
|
struct gfs2_sbd *sdp = GFS2_SB(page->mapping->host);
|
||||||
struct buffer_head *bh, *head;
|
struct buffer_head *bh, *head;
|
||||||
|
@ -571,9 +571,10 @@ static int metapage_releasepage(struct page *page, gfp_t gfp_mask)
|
|||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void metapage_invalidatepage(struct page *page, unsigned long offset)
|
static void metapage_invalidatepage(struct page *page, unsigned int offset,
|
||||||
|
unsigned int length)
|
||||||
{
|
{
|
||||||
BUG_ON(offset);
|
BUG_ON(offset || length < PAGE_CACHE_SIZE);
|
||||||
|
|
||||||
BUG_ON(PageWriteback(page));
|
BUG_ON(PageWriteback(page));
|
||||||
|
|
||||||
|
@ -159,7 +159,8 @@ static int logfs_writepage(struct page *page, struct writeback_control *wbc)
|
|||||||
return __logfs_writepage(page);
|
return __logfs_writepage(page);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void logfs_invalidatepage(struct page *page, unsigned long offset)
|
static void logfs_invalidatepage(struct page *page, unsigned int offset,
|
||||||
|
unsigned int length)
|
||||||
{
|
{
|
||||||
struct logfs_block *block = logfs_block(page);
|
struct logfs_block *block = logfs_block(page);
|
||||||
|
|
||||||
|
@ -884,7 +884,8 @@ static struct logfs_area *alloc_area(struct super_block *sb)
|
|||||||
return area;
|
return area;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void map_invalidatepage(struct page *page, unsigned long l)
|
static void map_invalidatepage(struct page *page, unsigned int o,
|
||||||
|
unsigned int l)
|
||||||
{
|
{
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
@ -451,11 +451,13 @@ static int nfs_write_end(struct file *file, struct address_space *mapping,
|
|||||||
* - Called if either PG_private or PG_fscache is set on the page
|
* - Called if either PG_private or PG_fscache is set on the page
|
||||||
* - Caller holds page lock
|
* - Caller holds page lock
|
||||||
*/
|
*/
|
||||||
static void nfs_invalidate_page(struct page *page, unsigned long offset)
|
static void nfs_invalidate_page(struct page *page, unsigned int offset,
|
||||||
|
unsigned int length)
|
||||||
{
|
{
|
||||||
dfprintk(PAGECACHE, "NFS: invalidate_page(%p, %lu)\n", page, offset);
|
dfprintk(PAGECACHE, "NFS: invalidate_page(%p, %u, %u)\n",
|
||||||
|
page, offset, length);
|
||||||
|
|
||||||
if (offset != 0)
|
if (offset != 0 || length < PAGE_CACHE_SIZE)
|
||||||
return;
|
return;
|
||||||
/* Cancel any unstarted writes on this page */
|
/* Cancel any unstarted writes on this page */
|
||||||
nfs_wb_page_cancel(page_file_mapping(page)->host, page);
|
nfs_wb_page_cancel(page_file_mapping(page)->host, page);
|
||||||
|
@ -1372,7 +1372,7 @@ retry_writepage:
|
|||||||
* The page may have dirty, unmapped buffers. Make them
|
* The page may have dirty, unmapped buffers. Make them
|
||||||
* freeable here, so the page does not leak.
|
* freeable here, so the page does not leak.
|
||||||
*/
|
*/
|
||||||
block_invalidatepage(page, 0);
|
block_invalidatepage(page, 0, PAGE_CACHE_SIZE);
|
||||||
unlock_page(page);
|
unlock_page(page);
|
||||||
ntfs_debug("Write outside i_size - truncated?");
|
ntfs_debug("Write outside i_size - truncated?");
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -603,7 +603,8 @@ static void ocfs2_dio_end_io(struct kiocb *iocb,
|
|||||||
* from ext3. PageChecked() bits have been removed as OCFS2 does not
|
* from ext3. PageChecked() bits have been removed as OCFS2 does not
|
||||||
* do journalled data.
|
* do journalled data.
|
||||||
*/
|
*/
|
||||||
static void ocfs2_invalidatepage(struct page *page, unsigned long offset)
|
static void ocfs2_invalidatepage(struct page *page, unsigned int offset,
|
||||||
|
unsigned int length)
|
||||||
{
|
{
|
||||||
journal_t *journal = OCFS2_SB(page->mapping->host->i_sb)->journal->j_journal;
|
journal_t *journal = OCFS2_SB(page->mapping->host->i_sb)->journal->j_journal;
|
||||||
|
|
||||||
|
@ -2970,7 +2970,8 @@ static int invalidatepage_can_drop(struct inode *inode, struct buffer_head *bh)
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* clm -- taken from fs/buffer.c:block_invalidate_page */
|
/* clm -- taken from fs/buffer.c:block_invalidate_page */
|
||||||
static void reiserfs_invalidatepage(struct page *page, unsigned long offset)
|
static void reiserfs_invalidatepage(struct page *page, unsigned int offset,
|
||||||
|
unsigned int length)
|
||||||
{
|
{
|
||||||
struct buffer_head *head, *bh, *next;
|
struct buffer_head *head, *bh, *next;
|
||||||
struct inode *inode = page->mapping->host;
|
struct inode *inode = page->mapping->host;
|
||||||
|
@ -1277,13 +1277,14 @@ int ubifs_setattr(struct dentry *dentry, struct iattr *attr)
|
|||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void ubifs_invalidatepage(struct page *page, unsigned long offset)
|
static void ubifs_invalidatepage(struct page *page, unsigned int offset,
|
||||||
|
unsigned int length)
|
||||||
{
|
{
|
||||||
struct inode *inode = page->mapping->host;
|
struct inode *inode = page->mapping->host;
|
||||||
struct ubifs_info *c = inode->i_sb->s_fs_info;
|
struct ubifs_info *c = inode->i_sb->s_fs_info;
|
||||||
|
|
||||||
ubifs_assert(PagePrivate(page));
|
ubifs_assert(PagePrivate(page));
|
||||||
if (offset)
|
if (offset || length < PAGE_CACHE_SIZE)
|
||||||
/* Partial page remains dirty */
|
/* Partial page remains dirty */
|
||||||
return;
|
return;
|
||||||
|
|
||||||
|
@ -824,10 +824,11 @@ xfs_cluster_write(
|
|||||||
STATIC void
|
STATIC void
|
||||||
xfs_vm_invalidatepage(
|
xfs_vm_invalidatepage(
|
||||||
struct page *page,
|
struct page *page,
|
||||||
unsigned long offset)
|
unsigned int offset,
|
||||||
|
unsigned int length)
|
||||||
{
|
{
|
||||||
trace_xfs_invalidatepage(page->mapping->host, page, offset);
|
trace_xfs_invalidatepage(page->mapping->host, page, offset);
|
||||||
block_invalidatepage(page, offset);
|
block_invalidatepage(page, offset, PAGE_CACHE_SIZE - offset);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -891,7 +892,7 @@ next_buffer:
|
|||||||
|
|
||||||
xfs_iunlock(ip, XFS_ILOCK_EXCL);
|
xfs_iunlock(ip, XFS_ILOCK_EXCL);
|
||||||
out_invalidate:
|
out_invalidate:
|
||||||
xfs_vm_invalidatepage(page, 0);
|
xfs_vm_invalidatepage(page, 0, PAGE_CACHE_SIZE);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -198,7 +198,8 @@ extern int buffer_heads_over_limit;
|
|||||||
* Generic address_space_operations implementations for buffer_head-backed
|
* Generic address_space_operations implementations for buffer_head-backed
|
||||||
* address_spaces.
|
* address_spaces.
|
||||||
*/
|
*/
|
||||||
void block_invalidatepage(struct page *page, unsigned long offset);
|
void block_invalidatepage(struct page *page, unsigned int offset,
|
||||||
|
unsigned int length);
|
||||||
int block_write_full_page(struct page *page, get_block_t *get_block,
|
int block_write_full_page(struct page *page, get_block_t *get_block,
|
||||||
struct writeback_control *wbc);
|
struct writeback_control *wbc);
|
||||||
int block_write_full_page_endio(struct page *page, get_block_t *get_block,
|
int block_write_full_page_endio(struct page *page, get_block_t *get_block,
|
||||||
|
@ -364,7 +364,7 @@ struct address_space_operations {
|
|||||||
|
|
||||||
/* Unfortunately this kludge is needed for FIBMAP. Don't use it */
|
/* Unfortunately this kludge is needed for FIBMAP. Don't use it */
|
||||||
sector_t (*bmap)(struct address_space *, sector_t);
|
sector_t (*bmap)(struct address_space *, sector_t);
|
||||||
void (*invalidatepage) (struct page *, unsigned long);
|
void (*invalidatepage) (struct page *, unsigned int, unsigned int);
|
||||||
int (*releasepage) (struct page *, gfp_t);
|
int (*releasepage) (struct page *, gfp_t);
|
||||||
void (*freepage)(struct page *);
|
void (*freepage)(struct page *);
|
||||||
ssize_t (*direct_IO)(int, struct kiocb *, const struct iovec *iov,
|
ssize_t (*direct_IO)(int, struct kiocb *, const struct iovec *iov,
|
||||||
|
@ -1041,7 +1041,8 @@ int get_kernel_page(unsigned long start, int write, struct page **pages);
|
|||||||
struct page *get_dump_page(unsigned long addr);
|
struct page *get_dump_page(unsigned long addr);
|
||||||
|
|
||||||
extern int try_to_release_page(struct page * page, gfp_t gfp_mask);
|
extern int try_to_release_page(struct page * page, gfp_t gfp_mask);
|
||||||
extern void do_invalidatepage(struct page *page, unsigned long offset);
|
extern void do_invalidatepage(struct page *page, unsigned int offset,
|
||||||
|
unsigned int length);
|
||||||
|
|
||||||
int __set_page_dirty_nobuffers(struct page *page);
|
int __set_page_dirty_nobuffers(struct page *page);
|
||||||
int __set_page_dirty_no_writeback(struct page *page);
|
int __set_page_dirty_no_writeback(struct page *page);
|
||||||
|
@ -48,7 +48,7 @@ static void read_cache_pages_invalidate_page(struct address_space *mapping,
|
|||||||
if (!trylock_page(page))
|
if (!trylock_page(page))
|
||||||
BUG();
|
BUG();
|
||||||
page->mapping = mapping;
|
page->mapping = mapping;
|
||||||
do_invalidatepage(page, 0);
|
do_invalidatepage(page, 0, PAGE_CACHE_SIZE);
|
||||||
page->mapping = NULL;
|
page->mapping = NULL;
|
||||||
unlock_page(page);
|
unlock_page(page);
|
||||||
}
|
}
|
||||||
|
@ -26,7 +26,8 @@
|
|||||||
/**
|
/**
|
||||||
* do_invalidatepage - invalidate part or all of a page
|
* do_invalidatepage - invalidate part or all of a page
|
||||||
* @page: the page which is affected
|
* @page: the page which is affected
|
||||||
* @offset: the index of the truncation point
|
* @offset: start of the range to invalidate
|
||||||
|
* @length: length of the range to invalidate
|
||||||
*
|
*
|
||||||
* do_invalidatepage() is called when all or part of the page has become
|
* do_invalidatepage() is called when all or part of the page has become
|
||||||
* invalidated by a truncate operation.
|
* invalidated by a truncate operation.
|
||||||
@ -37,16 +38,18 @@
|
|||||||
* point. Because the caller is about to free (and possibly reuse) those
|
* point. Because the caller is about to free (and possibly reuse) those
|
||||||
* blocks on-disk.
|
* blocks on-disk.
|
||||||
*/
|
*/
|
||||||
void do_invalidatepage(struct page *page, unsigned long offset)
|
void do_invalidatepage(struct page *page, unsigned int offset,
|
||||||
|
unsigned int length)
|
||||||
{
|
{
|
||||||
void (*invalidatepage)(struct page *, unsigned long);
|
void (*invalidatepage)(struct page *, unsigned int, unsigned int);
|
||||||
|
|
||||||
invalidatepage = page->mapping->a_ops->invalidatepage;
|
invalidatepage = page->mapping->a_ops->invalidatepage;
|
||||||
#ifdef CONFIG_BLOCK
|
#ifdef CONFIG_BLOCK
|
||||||
if (!invalidatepage)
|
if (!invalidatepage)
|
||||||
invalidatepage = block_invalidatepage;
|
invalidatepage = block_invalidatepage;
|
||||||
#endif
|
#endif
|
||||||
if (invalidatepage)
|
if (invalidatepage)
|
||||||
(*invalidatepage)(page, offset);
|
(*invalidatepage)(page, offset, length);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void truncate_partial_page(struct page *page, unsigned partial)
|
static inline void truncate_partial_page(struct page *page, unsigned partial)
|
||||||
@ -54,7 +57,7 @@ static inline void truncate_partial_page(struct page *page, unsigned partial)
|
|||||||
zero_user_segment(page, partial, PAGE_CACHE_SIZE);
|
zero_user_segment(page, partial, PAGE_CACHE_SIZE);
|
||||||
cleancache_invalidate_page(page->mapping, page);
|
cleancache_invalidate_page(page->mapping, page);
|
||||||
if (page_has_private(page))
|
if (page_has_private(page))
|
||||||
do_invalidatepage(page, partial);
|
do_invalidatepage(page, partial, PAGE_CACHE_SIZE - partial);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -103,7 +106,7 @@ truncate_complete_page(struct address_space *mapping, struct page *page)
|
|||||||
return -EIO;
|
return -EIO;
|
||||||
|
|
||||||
if (page_has_private(page))
|
if (page_has_private(page))
|
||||||
do_invalidatepage(page, 0);
|
do_invalidatepage(page, 0, PAGE_CACHE_SIZE);
|
||||||
|
|
||||||
cancel_dirty_page(page, PAGE_CACHE_SIZE);
|
cancel_dirty_page(page, PAGE_CACHE_SIZE);
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user