[PATCH] Allow migration of mlocked pages

Hugh clarified the role of VM_LOCKED.  So we can now implement page
migration for mlocked pages.

Allow the migration of mlocked pages.  This means that try_to_unmap must
unmap mlocked pages in the migration case.

Signed-off-by: Christoph Lameter <clameter@sgi.com>
Acked-by: Hugh Dickins <hugh@veritas.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
This commit is contained in:
Christoph Lameter 2006-06-25 05:46:49 -07:00 committed by Linus Torvalds
parent 7b2259b3e5
commit e6a1530d69
2 changed files with 8 additions and 11 deletions

View File

@ -616,15 +616,13 @@ static int unmap_and_move(new_page_t get_new_page, unsigned long private,
/* /*
* Establish migration ptes or remove ptes * Establish migration ptes or remove ptes
*/ */
if (try_to_unmap(page, 1) != SWAP_FAIL) { try_to_unmap(page, 1);
if (!page_mapped(page)) if (!page_mapped(page))
rc = move_to_new_page(newpage, page); rc = move_to_new_page(newpage, page);
} else
/* A vma has VM_LOCKED set -> permanent failure */
rc = -EPERM;
if (rc) if (rc)
remove_migration_ptes(page, page); remove_migration_ptes(page, page);
unlock: unlock:
unlock_page(page); unlock_page(page);

View File

@ -562,9 +562,8 @@ static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
* If it's recently referenced (perhaps page_referenced * If it's recently referenced (perhaps page_referenced
* skipped over this mm) then we should reactivate it. * skipped over this mm) then we should reactivate it.
*/ */
if ((vma->vm_flags & VM_LOCKED) || if (!migration && ((vma->vm_flags & VM_LOCKED) ||
(ptep_clear_flush_young(vma, address, pte) (ptep_clear_flush_young(vma, address, pte)))) {
&& !migration)) {
ret = SWAP_FAIL; ret = SWAP_FAIL;
goto out_unmap; goto out_unmap;
} }
@ -771,7 +770,7 @@ static int try_to_unmap_file(struct page *page, int migration)
list_for_each_entry(vma, &mapping->i_mmap_nonlinear, list_for_each_entry(vma, &mapping->i_mmap_nonlinear,
shared.vm_set.list) { shared.vm_set.list) {
if (vma->vm_flags & VM_LOCKED) if ((vma->vm_flags & VM_LOCKED) && !migration)
continue; continue;
cursor = (unsigned long) vma->vm_private_data; cursor = (unsigned long) vma->vm_private_data;
if (cursor > max_nl_cursor) if (cursor > max_nl_cursor)
@ -805,7 +804,7 @@ static int try_to_unmap_file(struct page *page, int migration)
do { do {
list_for_each_entry(vma, &mapping->i_mmap_nonlinear, list_for_each_entry(vma, &mapping->i_mmap_nonlinear,
shared.vm_set.list) { shared.vm_set.list) {
if (vma->vm_flags & VM_LOCKED) if ((vma->vm_flags & VM_LOCKED) && !migration)
continue; continue;
cursor = (unsigned long) vma->vm_private_data; cursor = (unsigned long) vma->vm_private_data;
while ( cursor < max_nl_cursor && while ( cursor < max_nl_cursor &&