mirror of
https://github.com/torvalds/linux.git
synced 2024-11-10 22:21:40 +00:00
mm, hugetlb: fix subpool accounting handling
If we alloc hugepage with avoid_reserve, we don't dequeue reserved one. So, we should check subpool counter when avoid_reserve. This patch implement it. Signed-off-by: Joonsoo Kim <iamjoonsoo.kim@lge.com> Cc: Aneesh Kumar <aneesh.kumar@linux.vnet.ibm.com> Cc: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com> Cc: Davidlohr Bueso <davidlohr@hp.com> Cc: David Gibson <david@gibson.dropbear.id.au> Cc: Wanpeng Li <liwanp@linux.vnet.ibm.com> Cc: Hillf Danton <dhillf@gmail.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
f522c3ac00
commit
8bb3f12e7d
10
mm/hugetlb.c
10
mm/hugetlb.c
@ -1164,13 +1164,14 @@ static struct page *alloc_huge_page(struct vm_area_struct *vma,
|
||||
chg = vma_needs_reservation(h, vma, addr);
|
||||
if (chg < 0)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
if (chg)
|
||||
if (hugepage_subpool_get_pages(spool, chg))
|
||||
if (chg || avoid_reserve)
|
||||
if (hugepage_subpool_get_pages(spool, 1))
|
||||
return ERR_PTR(-ENOSPC);
|
||||
|
||||
ret = hugetlb_cgroup_charge_cgroup(idx, pages_per_huge_page(h), &h_cg);
|
||||
if (ret) {
|
||||
hugepage_subpool_put_pages(spool, chg);
|
||||
if (chg || avoid_reserve)
|
||||
hugepage_subpool_put_pages(spool, 1);
|
||||
return ERR_PTR(-ENOSPC);
|
||||
}
|
||||
spin_lock(&hugetlb_lock);
|
||||
@ -1182,7 +1183,8 @@ static struct page *alloc_huge_page(struct vm_area_struct *vma,
|
||||
hugetlb_cgroup_uncharge_cgroup(idx,
|
||||
pages_per_huge_page(h),
|
||||
h_cg);
|
||||
hugepage_subpool_put_pages(spool, chg);
|
||||
if (chg || avoid_reserve)
|
||||
hugepage_subpool_put_pages(spool, 1);
|
||||
return ERR_PTR(-ENOSPC);
|
||||
}
|
||||
spin_lock(&hugetlb_lock);
|
||||
|
Loading…
Reference in New Issue
Block a user