mm: gup: use get_user_pages_unlocked

This allows those get_user_pages calls to pass FAULT_FLAG_ALLOW_RETRY to
the page fault in order to release the mmap_sem during the I/O.

Signed-off-by: Andrea Arcangeli <aarcange@redhat.com>
Reviewed-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Cc: Andres Lagar-Cavilla <andreslc@google.com>
Cc: Peter Feiner <pfeiner@google.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
Andrea Arcangeli 2015-02-11 15:27:26 -08:00 committed by Linus Torvalds
parent a7b780750e
commit 7e33912849
5 changed files with 10 additions and 22 deletions

View File

@ -124,10 +124,8 @@ int ivtv_udma_setup(struct ivtv *itv, unsigned long ivtv_dest_addr,
}
/* Get user pages for DMA Xfer */
down_read(&current->mm->mmap_sem);
err = get_user_pages(current, current->mm,
user_dma.uaddr, user_dma.page_count, 0, 1, dma->map, NULL);
up_read(&current->mm->mmap_sem);
err = get_user_pages_unlocked(current, current->mm,
user_dma.uaddr, user_dma.page_count, 0, 1, dma->map);
if (user_dma.page_count != err) {
IVTV_DEBUG_WARN("failed to map user pages, returned %d instead of %d\n",

View File

@ -4551,18 +4551,15 @@ static int sgl_map_user_pages(struct st_buffer *STbp,
return -ENOMEM;
/* Try to fault in all of the necessary pages */
down_read(&current->mm->mmap_sem);
/* rw==READ means read from drive, write into memory area */
res = get_user_pages(
res = get_user_pages_unlocked(
current,
current->mm,
uaddr,
nr_pages,
rw == READ,
0, /* don't force */
pages,
NULL);
up_read(&current->mm->mmap_sem);
pages);
/* Errors and no page mapped should return here */
if (res < nr_pages)

View File

@ -686,10 +686,8 @@ static ssize_t pvr2fb_write(struct fb_info *info, const char *buf,
if (!pages)
return -ENOMEM;
down_read(&current->mm->mmap_sem);
ret = get_user_pages(current, current->mm, (unsigned long)buf,
nr_pages, WRITE, 0, pages, NULL);
up_read(&current->mm->mmap_sem);
ret = get_user_pages_unlocked(current, current->mm, (unsigned long)buf,
nr_pages, WRITE, 0, pages);
if (ret < nr_pages) {
nr_pages = ret;

View File

@ -99,11 +99,8 @@ static int process_vm_rw_single_vec(unsigned long addr,
size_t bytes;
/* Get the pages we're interested in */
down_read(&mm->mmap_sem);
pages = get_user_pages(task, mm, pa, pages,
vm_write, 0, process_pages, NULL);
up_read(&mm->mmap_sem);
pages = get_user_pages_unlocked(task, mm, pa, pages,
vm_write, 0, process_pages);
if (pages <= 0)
return -EFAULT;

View File

@ -23,17 +23,15 @@ struct page **ceph_get_direct_page_vector(const void __user *data,
if (!pages)
return ERR_PTR(-ENOMEM);
down_read(&current->mm->mmap_sem);
while (got < num_pages) {
rc = get_user_pages(current, current->mm,
rc = get_user_pages_unlocked(current, current->mm,
(unsigned long)data + ((unsigned long)got * PAGE_SIZE),
num_pages - got, write_page, 0, pages + got, NULL);
num_pages - got, write_page, 0, pages + got);
if (rc < 0)
break;
BUG_ON(rc == 0);
got += rc;
}
up_read(&current->mm->mmap_sem);
if (rc < 0)
goto fail;
return pages;