mirror of
https://github.com/torvalds/linux.git
synced 2024-11-10 14:11:52 +00:00
mm: set_page_dirty_balance() vs ->page_mkwrite()
All the current page_mkwrite() implementations also set the page dirty. Which results in the set_page_dirty_balance() call to _not_ call balance, because the page is already found dirty. This allows us to dirty a _lot_ of pages without ever hitting balance_dirty_pages(). Not good (tm). Force a balance call if ->page_mkwrite() was successful. Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
3eb215de26
commit
a200ee182a
@ -127,7 +127,7 @@ int sync_page_range(struct inode *inode, struct address_space *mapping,
|
||||
loff_t pos, loff_t count);
|
||||
int sync_page_range_nolock(struct inode *inode, struct address_space *mapping,
|
||||
loff_t pos, loff_t count);
|
||||
void set_page_dirty_balance(struct page *page);
|
||||
void set_page_dirty_balance(struct page *page, int page_mkwrite);
|
||||
void writeback_set_ratelimit(void);
|
||||
|
||||
/* pdflush.c */
|
||||
|
@ -1639,6 +1639,7 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
|
||||
struct page *old_page, *new_page;
|
||||
pte_t entry;
|
||||
int reuse = 0, ret = 0;
|
||||
int page_mkwrite = 0;
|
||||
struct page *dirty_page = NULL;
|
||||
|
||||
old_page = vm_normal_page(vma, address, orig_pte);
|
||||
@ -1687,6 +1688,8 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
|
||||
page_cache_release(old_page);
|
||||
if (!pte_same(*page_table, orig_pte))
|
||||
goto unlock;
|
||||
|
||||
page_mkwrite = 1;
|
||||
}
|
||||
dirty_page = old_page;
|
||||
get_page(dirty_page);
|
||||
@ -1774,7 +1777,7 @@ unlock:
|
||||
* do_no_page is protected similarly.
|
||||
*/
|
||||
wait_on_page_locked(dirty_page);
|
||||
set_page_dirty_balance(dirty_page);
|
||||
set_page_dirty_balance(dirty_page, page_mkwrite);
|
||||
put_page(dirty_page);
|
||||
}
|
||||
return ret;
|
||||
@ -2322,6 +2325,7 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
|
||||
struct page *dirty_page = NULL;
|
||||
struct vm_fault vmf;
|
||||
int ret;
|
||||
int page_mkwrite = 0;
|
||||
|
||||
vmf.virtual_address = (void __user *)(address & PAGE_MASK);
|
||||
vmf.pgoff = pgoff;
|
||||
@ -2398,6 +2402,7 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
|
||||
anon = 1; /* no anon but release vmf.page */
|
||||
goto out;
|
||||
}
|
||||
page_mkwrite = 1;
|
||||
}
|
||||
}
|
||||
|
||||
@ -2453,7 +2458,7 @@ out_unlocked:
|
||||
if (anon)
|
||||
page_cache_release(vmf.page);
|
||||
else if (dirty_page) {
|
||||
set_page_dirty_balance(dirty_page);
|
||||
set_page_dirty_balance(dirty_page, page_mkwrite);
|
||||
put_page(dirty_page);
|
||||
}
|
||||
|
||||
|
@ -274,9 +274,9 @@ static void balance_dirty_pages(struct address_space *mapping)
|
||||
pdflush_operation(background_writeout, 0);
|
||||
}
|
||||
|
||||
void set_page_dirty_balance(struct page *page)
|
||||
void set_page_dirty_balance(struct page *page, int page_mkwrite)
|
||||
{
|
||||
if (set_page_dirty(page)) {
|
||||
if (set_page_dirty(page) || page_mkwrite) {
|
||||
struct address_space *mapping = page_mapping(page);
|
||||
|
||||
if (mapping)
|
||||
|
Loading…
Reference in New Issue
Block a user