mm: use mm_populate() for mremap() of VM_LOCKED vmas

Signed-off-by: Michel Lespinasse <walken@google.com>
Acked-by: Rik van Riel <riel@redhat.com>
Tested-by: Andy Lutomirski <luto@amacapital.net>
Cc: Greg Ungerer <gregungerer@westnet.com.au>
Cc: David Howells <dhowells@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
Michel Lespinasse 2013-02-22 16:32:41 -08:00 committed by Linus Torvalds
parent 128557ffe1
commit 81909b8421

View File

@ -209,7 +209,7 @@ unsigned long move_page_tables(struct vm_area_struct *vma,
static unsigned long move_vma(struct vm_area_struct *vma, static unsigned long move_vma(struct vm_area_struct *vma,
unsigned long old_addr, unsigned long old_len, unsigned long old_addr, unsigned long old_len,
unsigned long new_len, unsigned long new_addr) unsigned long new_len, unsigned long new_addr, bool *locked)
{ {
struct mm_struct *mm = vma->vm_mm; struct mm_struct *mm = vma->vm_mm;
struct vm_area_struct *new_vma; struct vm_area_struct *new_vma;
@ -300,9 +300,7 @@ static unsigned long move_vma(struct vm_area_struct *vma,
if (vm_flags & VM_LOCKED) { if (vm_flags & VM_LOCKED) {
mm->locked_vm += new_len >> PAGE_SHIFT; mm->locked_vm += new_len >> PAGE_SHIFT;
if (new_len > old_len) *locked = true;
mlock_vma_pages_range(new_vma, new_addr + old_len,
new_addr + new_len);
} }
return new_addr; return new_addr;
@ -367,9 +365,8 @@ Eagain:
return ERR_PTR(-EAGAIN); return ERR_PTR(-EAGAIN);
} }
static unsigned long mremap_to(unsigned long addr, static unsigned long mremap_to(unsigned long addr, unsigned long old_len,
unsigned long old_len, unsigned long new_addr, unsigned long new_addr, unsigned long new_len, bool *locked)
unsigned long new_len)
{ {
struct mm_struct *mm = current->mm; struct mm_struct *mm = current->mm;
struct vm_area_struct *vma; struct vm_area_struct *vma;
@ -419,7 +416,7 @@ static unsigned long mremap_to(unsigned long addr,
if (ret & ~PAGE_MASK) if (ret & ~PAGE_MASK)
goto out1; goto out1;
ret = move_vma(vma, addr, old_len, new_len, new_addr); ret = move_vma(vma, addr, old_len, new_len, new_addr, locked);
if (!(ret & ~PAGE_MASK)) if (!(ret & ~PAGE_MASK))
goto out; goto out;
out1: out1:
@ -457,6 +454,7 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
struct vm_area_struct *vma; struct vm_area_struct *vma;
unsigned long ret = -EINVAL; unsigned long ret = -EINVAL;
unsigned long charged = 0; unsigned long charged = 0;
bool locked = false;
down_write(&current->mm->mmap_sem); down_write(&current->mm->mmap_sem);
@ -479,7 +477,8 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
if (flags & MREMAP_FIXED) { if (flags & MREMAP_FIXED) {
if (flags & MREMAP_MAYMOVE) if (flags & MREMAP_MAYMOVE)
ret = mremap_to(addr, old_len, new_addr, new_len); ret = mremap_to(addr, old_len, new_addr, new_len,
&locked);
goto out; goto out;
} }
@ -521,8 +520,8 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
vm_stat_account(mm, vma->vm_flags, vma->vm_file, pages); vm_stat_account(mm, vma->vm_flags, vma->vm_file, pages);
if (vma->vm_flags & VM_LOCKED) { if (vma->vm_flags & VM_LOCKED) {
mm->locked_vm += pages; mm->locked_vm += pages;
mlock_vma_pages_range(vma, addr + old_len, locked = true;
addr + new_len); new_addr = addr;
} }
ret = addr; ret = addr;
goto out; goto out;
@ -548,11 +547,13 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
goto out; goto out;
} }
ret = move_vma(vma, addr, old_len, new_len, new_addr); ret = move_vma(vma, addr, old_len, new_len, new_addr, &locked);
} }
out: out:
if (ret & ~PAGE_MASK) if (ret & ~PAGE_MASK)
vm_unacct_memory(charged); vm_unacct_memory(charged);
up_write(&current->mm->mmap_sem); up_write(&current->mm->mmap_sem);
if (locked && new_len > old_len)
mm_populate(new_addr + old_len, new_len - old_len);
return ret; return ret;
} }