mirror of
https://github.com/torvalds/linux.git
synced 2024-11-15 08:31:55 +00:00
ia64 vm patch series that was cooking in -mm tree
-----BEGIN PGP SIGNATURE----- Version: GnuPG v1.4.11 (GNU/Linux) iQIcBAABAgAGBQJRJ+kUAAoJEKurIx+X31iBKeUQAJ3x0QN6fiT0PXu4wPQYGeGV XJi/9WXkmdHi4grhNYvk/RE7F/jsm0GJjpmq+GngO4KkUPDPKzM9ayP0RrLkWV81 h/aDsp5P04KlvA46hM2/q6DRCJZxx0kyBRrAA/3qpWDU6Xmvr4zmcG6IKKc/gsAS BFYiXTmIyNmca7Xwv23T0nNKn6j725gxfe0evaMHlhqFQsY/qbi94dltgCrCi2dD CtzqhFaZtGQ0ic/fZ42zi4BWFr23rpDKYYWeDg8LOyueNX1GaDeMDIioQkoa43mA /IVHWJnqtaeq92BFRf3rfHHnB61mRcTRuZ4cYoINB5O/b505RN8C3TmVwfUXQWRk zw0rbXWkzZZL6DKKQcCc12fZV2z2EZhwhZgtD/pwyUMvBW+pNhaXIUpjBbm//NoW u4AAQqsR52C+VAtoyRVLEAt0Pa56TCiz64B9noPS9Tew6Vjx/Gy4NgQ2lJ5SLw1F UBr4mVmUxXfciq5n/RLpg5z3LoUjTQUYyJkTJj47+nmfvWeUNfUAP78nowAbHNqm jnvw8hnwEnhWB5RMpJWZtFq3K61WWOtJ9RXAU6JtzYcwHsEBvn3qqaLoh8/hPBnK +IVcygAF+2GPwaid7Qb7rmLBhIIDPQp+lmS0OS/NqfxVlsHq/F8Q5keMvtIyDToS DiAvEnrU1wToeNfUR4R6 =v9Yo -----END PGP SIGNATURE----- Merge tag 'please-pull-vm_unwrapped' of git://git.kernel.org/pub/scm/linux/kernel/git/aegl/linux Pull ia64 update from Tony Luck: "ia64 vm patch series that was cooking in -mm tree" * tag 'please-pull-vm_unwrapped' of git://git.kernel.org/pub/scm/linux/kernel/git/aegl/linux: mm: use vm_unmapped_area() in hugetlbfs on ia64 architecture mm: use vm_unmapped_area() on ia64 architecture
This commit is contained in:
commit
d414c104e2
@ -25,9 +25,9 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
|
||||
unsigned long pgoff, unsigned long flags)
|
||||
{
|
||||
long map_shared = (flags & MAP_SHARED);
|
||||
unsigned long start_addr, align_mask = PAGE_SIZE - 1;
|
||||
unsigned long align_mask = 0;
|
||||
struct mm_struct *mm = current->mm;
|
||||
struct vm_area_struct *vma;
|
||||
struct vm_unmapped_area_info info;
|
||||
|
||||
if (len > RGN_MAP_LIMIT)
|
||||
return -ENOMEM;
|
||||
@ -44,7 +44,7 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
|
||||
addr = 0;
|
||||
#endif
|
||||
if (!addr)
|
||||
addr = mm->free_area_cache;
|
||||
addr = TASK_UNMAPPED_BASE;
|
||||
|
||||
if (map_shared && (TASK_SIZE > 0xfffffffful))
|
||||
/*
|
||||
@ -53,28 +53,15 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
|
||||
* tasks, we prefer to avoid exhausting the address space too quickly by
|
||||
* limiting alignment to a single page.
|
||||
*/
|
||||
align_mask = SHMLBA - 1;
|
||||
align_mask = PAGE_MASK & (SHMLBA - 1);
|
||||
|
||||
full_search:
|
||||
start_addr = addr = (addr + align_mask) & ~align_mask;
|
||||
|
||||
for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
|
||||
/* At this point: (!vma || addr < vma->vm_end). */
|
||||
if (TASK_SIZE - len < addr || RGN_MAP_LIMIT - len < REGION_OFFSET(addr)) {
|
||||
if (start_addr != TASK_UNMAPPED_BASE) {
|
||||
/* Start a new search --- just in case we missed some holes. */
|
||||
addr = TASK_UNMAPPED_BASE;
|
||||
goto full_search;
|
||||
}
|
||||
return -ENOMEM;
|
||||
}
|
||||
if (!vma || addr + len <= vma->vm_start) {
|
||||
/* Remember the address where we stopped this search: */
|
||||
mm->free_area_cache = addr + len;
|
||||
return addr;
|
||||
}
|
||||
addr = (vma->vm_end + align_mask) & ~align_mask;
|
||||
}
|
||||
info.flags = 0;
|
||||
info.length = len;
|
||||
info.low_limit = addr;
|
||||
info.high_limit = TASK_SIZE;
|
||||
info.align_mask = align_mask;
|
||||
info.align_offset = 0;
|
||||
return vm_unmapped_area(&info);
|
||||
}
|
||||
|
||||
asmlinkage long
|
||||
|
@ -148,7 +148,7 @@ void hugetlb_free_pgd_range(struct mmu_gather *tlb,
|
||||
unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, unsigned long len,
|
||||
unsigned long pgoff, unsigned long flags)
|
||||
{
|
||||
struct vm_area_struct *vmm;
|
||||
struct vm_unmapped_area_info info;
|
||||
|
||||
if (len > RGN_MAP_LIMIT)
|
||||
return -ENOMEM;
|
||||
@ -165,16 +165,14 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, u
|
||||
/* This code assumes that RGN_HPAGE != 0. */
|
||||
if ((REGION_NUMBER(addr) != RGN_HPAGE) || (addr & (HPAGE_SIZE - 1)))
|
||||
addr = HPAGE_REGION_BASE;
|
||||
else
|
||||
addr = ALIGN(addr, HPAGE_SIZE);
|
||||
for (vmm = find_vma(current->mm, addr); ; vmm = vmm->vm_next) {
|
||||
/* At this point: (!vmm || addr < vmm->vm_end). */
|
||||
if (REGION_OFFSET(addr) + len > RGN_MAP_LIMIT)
|
||||
return -ENOMEM;
|
||||
if (!vmm || (addr + len) <= vmm->vm_start)
|
||||
return addr;
|
||||
addr = ALIGN(vmm->vm_end, HPAGE_SIZE);
|
||||
}
|
||||
|
||||
info.flags = 0;
|
||||
info.length = len;
|
||||
info.low_limit = addr;
|
||||
info.high_limit = HPAGE_REGION_BASE + RGN_MAP_LIMIT;
|
||||
info.align_mask = PAGE_MASK & (HPAGE_SIZE - 1);
|
||||
info.align_offset = 0;
|
||||
return vm_unmapped_area(&info);
|
||||
}
|
||||
|
||||
static int __init hugetlb_setup_sz(char *str)
|
||||
|
Loading…
Reference in New Issue
Block a user