fs: don't open code lru_to_page()

Multiple filesystems open code lru_to_page().  Rectify this by moving
the macro from mm_inline (which is specific to lru stuff) to the more
generic mm.h header and start using the macro where appropriate.

No functional changes.

Link: http://lkml.kernel.org/r/20181129104810.23361-1-nborisov@suse.com
Link: https://lkml.kernel.org/r/20181129075301.29087-1-nborisov@suse.com
Signed-off-by: Nikolay Borisov <nborisov@suse.com>
Acked-by: Michal Hocko <mhocko@suse.com>
Reviewed-by: David Hildenbrand <david@redhat.com>
Reviewed-by: Mike Rapoport <rppt@linux.ibm.com>
Acked-by: Pankaj gupta <pagupta@redhat.com>
Acked-by: "Yan, Zheng" <zyan@redhat.com>		[ceph]
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
Nikolay Borisov 2019-01-03 15:29:02 -08:00 committed by Linus Torvalds
parent 08d405c8b8
commit f86196ea87
10 changed files with 15 additions and 15 deletions

View File

@ -17,6 +17,7 @@
#include <linux/writeback.h>
#include <linux/gfp.h>
#include <linux/task_io_accounting_ops.h>
#include <linux/mm.h>
#include "internal.h"
static int afs_file_mmap(struct file *file, struct vm_area_struct *vma);
@ -441,7 +442,7 @@ static int afs_readpages_one(struct file *file, struct address_space *mapping,
/* Count the number of contiguous pages at the front of the list. Note
* that the list goes prev-wards rather than next-wards.
*/
first = list_entry(pages->prev, struct page, lru);
first = lru_to_page(pages);
index = first->index + 1;
n = 1;
for (p = first->lru.prev; p != pages; p = p->prev) {
@ -473,7 +474,7 @@ static int afs_readpages_one(struct file *file, struct address_space *mapping,
* page at the end of the file.
*/
do {
page = list_entry(pages->prev, struct page, lru);
page = lru_to_page(pages);
list_del(&page->lru);
index = page->index;
if (add_to_page_cache_lru(page, mapping, index,

View File

@ -4103,8 +4103,7 @@ int extent_readpages(struct address_space *mapping, struct list_head *pages,
while (!list_empty(pages)) {
for (nr = 0; nr < ARRAY_SIZE(pagepool) && !list_empty(pages);) {
struct page *page = list_entry(pages->prev,
struct page, lru);
struct page *page = lru_to_page(pages);
prefetchw(&page->flags);
list_del(&page->lru);

View File

@ -306,7 +306,7 @@ static int start_read(struct inode *inode, struct ceph_rw_context *rw_ctx,
struct ceph_osd_client *osdc =
&ceph_inode_to_client(inode)->client->osdc;
struct ceph_inode_info *ci = ceph_inode(inode);
struct page *page = list_entry(page_list->prev, struct page, lru);
struct page *page = lru_to_page(page_list);
struct ceph_vino vino;
struct ceph_osd_request *req;
u64 off;
@ -333,8 +333,7 @@ static int start_read(struct inode *inode, struct ceph_rw_context *rw_ctx,
if (got)
ceph_put_cap_refs(ci, got);
while (!list_empty(page_list)) {
page = list_entry(page_list->prev,
struct page, lru);
page = lru_to_page(page_list);
list_del(&page->lru);
put_page(page);
}

View File

@ -33,6 +33,7 @@
#include <linux/mount.h>
#include <linux/slab.h>
#include <linux/swap.h>
#include <linux/mm.h>
#include <asm/div64.h>
#include "cifsfs.h"
#include "cifspdu.h"
@ -3964,7 +3965,7 @@ readpages_get_pages(struct address_space *mapping, struct list_head *page_list,
INIT_LIST_HEAD(tmplist);
page = list_entry(page_list->prev, struct page, lru);
page = lru_to_page(page_list);
/*
* Lock the page and put it in the cache. Since no one else

View File

@ -128,7 +128,7 @@ int ext4_mpage_readpages(struct address_space *mapping,
prefetchw(&page->flags);
if (pages) {
page = list_entry(pages->prev, struct page, lru);
page = lru_to_page(pages);
list_del(&page->lru);
if (add_to_page_cache_lru(page, mapping, page->index,
readahead_gfp_mask(mapping)))

View File

@ -30,6 +30,7 @@
#include <linux/quotaops.h>
#include <linux/blkdev.h>
#include <linux/uio.h>
#include <linux/mm.h>
#include <cluster/masklog.h>
@ -397,7 +398,7 @@ static int ocfs2_readpages(struct file *filp, struct address_space *mapping,
* Check whether a remote node truncated this file - we just
* drop out in that case as it's not worth handling here.
*/
last = list_entry(pages->prev, struct page, lru);
last = lru_to_page(pages);
start = (loff_t)last->index << PAGE_SHIFT;
if (start >= i_size_read(inode))
goto out_unlock;

View File

@ -77,7 +77,7 @@ static int orangefs_readpages(struct file *file,
for (page_idx = 0; page_idx < nr_pages; page_idx++) {
struct page *page;
page = list_entry(pages->prev, struct page, lru);
page = lru_to_page(pages);
list_del(&page->lru);
if (!add_to_page_cache(page,
mapping,

View File

@ -171,6 +171,8 @@ extern int overcommit_kbytes_handler(struct ctl_table *, int, void __user *,
/* test whether an address (unsigned long or pointer) is aligned to PAGE_SIZE */
#define PAGE_ALIGNED(addr) IS_ALIGNED((unsigned long)(addr), PAGE_SIZE)
#define lru_to_page(head) (list_entry((head)->prev, struct page, lru))
/*
* Linux kernel virtual memory manager primitives.
* The idea being to have a "virtual" mm in the same way

View File

@ -124,7 +124,4 @@ static __always_inline enum lru_list page_lru(struct page *page)
}
return lru;
}
#define lru_to_page(head) (list_entry((head)->prev, struct page, lru))
#endif

View File

@ -126,7 +126,7 @@ void put_pages_list(struct list_head *pages)
while (!list_empty(pages)) {
struct page *victim;
victim = list_entry(pages->prev, struct page, lru);
victim = lru_to_page(pages);
list_del(&victim->lru);
put_page(victim);
}