forked from Minki/linux
09cbfeaf1a
PAGE_CACHE_{SIZE,SHIFT,MASK,ALIGN} macros were introduced *long* time ago with promise that one day it will be possible to implement page cache with bigger chunks than PAGE_SIZE. This promise never materialized. And unlikely will. We have many places where PAGE_CACHE_SIZE assumed to be equal to PAGE_SIZE. And it's constant source of confusion on whether PAGE_CACHE_* or PAGE_* constant should be used in a particular case, especially on the border between fs and mm. Global switching to PAGE_CACHE_SIZE != PAGE_SIZE would cause to much breakage to be doable. Let's stop pretending that pages in page cache are special. They are not. The changes are pretty straight-forward: - <foo> << (PAGE_CACHE_SHIFT - PAGE_SHIFT) -> <foo>; - <foo> >> (PAGE_CACHE_SHIFT - PAGE_SHIFT) -> <foo>; - PAGE_CACHE_{SIZE,SHIFT,MASK,ALIGN} -> PAGE_{SIZE,SHIFT,MASK,ALIGN}; - page_cache_get() -> get_page(); - page_cache_release() -> put_page(); This patch contains automated changes generated with coccinelle using script below. For some reason, coccinelle doesn't patch header files. I've called spatch for them manually. The only adjustment after coccinelle is revert of changes to PAGE_CAHCE_ALIGN definition: we are going to drop it later. There are few places in the code where coccinelle didn't reach. I'll fix them manually in a separate patch. Comments and documentation also will be addressed with the separate patch. virtual patch @@ expression E; @@ - E << (PAGE_CACHE_SHIFT - PAGE_SHIFT) + E @@ expression E; @@ - E >> (PAGE_CACHE_SHIFT - PAGE_SHIFT) + E @@ @@ - PAGE_CACHE_SHIFT + PAGE_SHIFT @@ @@ - PAGE_CACHE_SIZE + PAGE_SIZE @@ @@ - PAGE_CACHE_MASK + PAGE_MASK @@ expression E; @@ - PAGE_CACHE_ALIGN(E) + PAGE_ALIGN(E) @@ expression E; @@ - page_cache_get(E) + get_page(E) @@ expression E; @@ - page_cache_release(E) + put_page(E) Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com> Acked-by: Michal Hocko <mhocko@suse.com> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
198 lines
5.1 KiB
C
198 lines
5.1 KiB
C
/* -*- mode: c; c-basic-offset: 8; -*-
|
|
* vim: noexpandtab sw=8 ts=8 sts=0:
|
|
*
|
|
* mmap.c
|
|
*
|
|
* Code to deal with the mess that is clustered mmap.
|
|
*
|
|
* Copyright (C) 2002, 2004 Oracle. All rights reserved.
|
|
*
|
|
* This program is free software; you can redistribute it and/or
|
|
* modify it under the terms of the GNU General Public
|
|
* License as published by the Free Software Foundation; either
|
|
* version 2 of the License, or (at your option) any later version.
|
|
*
|
|
* This program is distributed in the hope that it will be useful,
|
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
|
* General Public License for more details.
|
|
*
|
|
* You should have received a copy of the GNU General Public
|
|
* License along with this program; if not, write to the
|
|
* Free Software Foundation, Inc., 59 Temple Place - Suite 330,
|
|
* Boston, MA 021110-1307, USA.
|
|
*/
|
|
|
|
#include <linux/fs.h>
|
|
#include <linux/types.h>
|
|
#include <linux/highmem.h>
|
|
#include <linux/pagemap.h>
|
|
#include <linux/uio.h>
|
|
#include <linux/signal.h>
|
|
#include <linux/rbtree.h>
|
|
|
|
#include <cluster/masklog.h>
|
|
|
|
#include "ocfs2.h"
|
|
|
|
#include "aops.h"
|
|
#include "dlmglue.h"
|
|
#include "file.h"
|
|
#include "inode.h"
|
|
#include "mmap.h"
|
|
#include "super.h"
|
|
#include "ocfs2_trace.h"
|
|
|
|
|
|
static int ocfs2_fault(struct vm_area_struct *area, struct vm_fault *vmf)
|
|
{
|
|
sigset_t oldset;
|
|
int ret;
|
|
|
|
ocfs2_block_signals(&oldset);
|
|
ret = filemap_fault(area, vmf);
|
|
ocfs2_unblock_signals(&oldset);
|
|
|
|
trace_ocfs2_fault(OCFS2_I(area->vm_file->f_mapping->host)->ip_blkno,
|
|
area, vmf->page, vmf->pgoff);
|
|
return ret;
|
|
}
|
|
|
|
static int __ocfs2_page_mkwrite(struct file *file, struct buffer_head *di_bh,
|
|
struct page *page)
|
|
{
|
|
int ret = VM_FAULT_NOPAGE;
|
|
struct inode *inode = file_inode(file);
|
|
struct address_space *mapping = inode->i_mapping;
|
|
loff_t pos = page_offset(page);
|
|
unsigned int len = PAGE_SIZE;
|
|
pgoff_t last_index;
|
|
struct page *locked_page = NULL;
|
|
void *fsdata;
|
|
loff_t size = i_size_read(inode);
|
|
|
|
last_index = (size - 1) >> PAGE_SHIFT;
|
|
|
|
/*
|
|
* There are cases that lead to the page no longer bebongs to the
|
|
* mapping.
|
|
* 1) pagecache truncates locally due to memory pressure.
|
|
* 2) pagecache truncates when another is taking EX lock against
|
|
* inode lock. see ocfs2_data_convert_worker.
|
|
*
|
|
* The i_size check doesn't catch the case where nodes truncated and
|
|
* then re-extended the file. We'll re-check the page mapping after
|
|
* taking the page lock inside of ocfs2_write_begin_nolock().
|
|
*
|
|
* Let VM retry with these cases.
|
|
*/
|
|
if ((page->mapping != inode->i_mapping) ||
|
|
(!PageUptodate(page)) ||
|
|
(page_offset(page) >= size))
|
|
goto out;
|
|
|
|
/*
|
|
* Call ocfs2_write_begin() and ocfs2_write_end() to take
|
|
* advantage of the allocation code there. We pass a write
|
|
* length of the whole page (chopped to i_size) to make sure
|
|
* the whole thing is allocated.
|
|
*
|
|
* Since we know the page is up to date, we don't have to
|
|
* worry about ocfs2_write_begin() skipping some buffer reads
|
|
* because the "write" would invalidate their data.
|
|
*/
|
|
if (page->index == last_index)
|
|
len = ((size - 1) & ~PAGE_MASK) + 1;
|
|
|
|
ret = ocfs2_write_begin_nolock(mapping, pos, len, OCFS2_WRITE_MMAP,
|
|
&locked_page, &fsdata, di_bh, page);
|
|
if (ret) {
|
|
if (ret != -ENOSPC)
|
|
mlog_errno(ret);
|
|
if (ret == -ENOMEM)
|
|
ret = VM_FAULT_OOM;
|
|
else
|
|
ret = VM_FAULT_SIGBUS;
|
|
goto out;
|
|
}
|
|
|
|
if (!locked_page) {
|
|
ret = VM_FAULT_NOPAGE;
|
|
goto out;
|
|
}
|
|
ret = ocfs2_write_end_nolock(mapping, pos, len, len, locked_page,
|
|
fsdata);
|
|
BUG_ON(ret != len);
|
|
ret = VM_FAULT_LOCKED;
|
|
out:
|
|
return ret;
|
|
}
|
|
|
|
static int ocfs2_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
|
|
{
|
|
struct page *page = vmf->page;
|
|
struct inode *inode = file_inode(vma->vm_file);
|
|
struct buffer_head *di_bh = NULL;
|
|
sigset_t oldset;
|
|
int ret;
|
|
|
|
sb_start_pagefault(inode->i_sb);
|
|
ocfs2_block_signals(&oldset);
|
|
|
|
/*
|
|
* The cluster locks taken will block a truncate from another
|
|
* node. Taking the data lock will also ensure that we don't
|
|
* attempt page truncation as part of a downconvert.
|
|
*/
|
|
ret = ocfs2_inode_lock(inode, &di_bh, 1);
|
|
if (ret < 0) {
|
|
mlog_errno(ret);
|
|
if (ret == -ENOMEM)
|
|
ret = VM_FAULT_OOM;
|
|
else
|
|
ret = VM_FAULT_SIGBUS;
|
|
goto out;
|
|
}
|
|
|
|
/*
|
|
* The alloc sem should be enough to serialize with
|
|
* ocfs2_truncate_file() changing i_size as well as any thread
|
|
* modifying the inode btree.
|
|
*/
|
|
down_write(&OCFS2_I(inode)->ip_alloc_sem);
|
|
|
|
ret = __ocfs2_page_mkwrite(vma->vm_file, di_bh, page);
|
|
|
|
up_write(&OCFS2_I(inode)->ip_alloc_sem);
|
|
|
|
brelse(di_bh);
|
|
ocfs2_inode_unlock(inode, 1);
|
|
|
|
out:
|
|
ocfs2_unblock_signals(&oldset);
|
|
sb_end_pagefault(inode->i_sb);
|
|
return ret;
|
|
}
|
|
|
|
static const struct vm_operations_struct ocfs2_file_vm_ops = {
|
|
.fault = ocfs2_fault,
|
|
.page_mkwrite = ocfs2_page_mkwrite,
|
|
};
|
|
|
|
int ocfs2_mmap(struct file *file, struct vm_area_struct *vma)
|
|
{
|
|
int ret = 0, lock_level = 0;
|
|
|
|
ret = ocfs2_inode_lock_atime(file_inode(file),
|
|
file->f_path.mnt, &lock_level);
|
|
if (ret < 0) {
|
|
mlog_errno(ret);
|
|
goto out;
|
|
}
|
|
ocfs2_inode_unlock(file_inode(file), lock_level);
|
|
out:
|
|
vma->vm_ops = &ocfs2_file_vm_ops;
|
|
return 0;
|
|
}
|
|
|