mirror of
https://github.com/torvalds/linux.git
synced 2024-12-27 21:33:00 +00:00
mm/memory-failure.c: fix wrong num_poisoned_pages in handling memory error on thp
num_poisoned_pages counts up the number of pages isolated by memory errors. But for thp, only one subpage is isolated because memory error handler splits it, so it's wrong to add (1 << compound_trans_order). [akpm@linux-foundation.org: tweak comment] Signed-off-by: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com> Cc: Andi Kleen <andi@firstfloor.org> Cc: Tony Luck <tony.luck@intel.com> Cc: Wu Fengguang <fengguang.wu@intel.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
af8fae7c08
commit
4db0e950c5
@ -1039,7 +1039,17 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
|
||||
return 0;
|
||||
}
|
||||
|
||||
nr_pages = 1 << compound_trans_order(hpage);
|
||||
/*
|
||||
* Currently errors on hugetlbfs pages are measured in hugepage units,
|
||||
* so nr_pages should be 1 << compound_order. OTOH when errors are on
|
||||
* transparent hugepages, they are supposed to be split and error
|
||||
* measurement is done in normal page units. So nr_pages should be one
|
||||
* in this case.
|
||||
*/
|
||||
if (PageHuge(p))
|
||||
nr_pages = 1 << compound_order(hpage);
|
||||
else /* normal page or thp */
|
||||
nr_pages = 1;
|
||||
atomic_long_add(nr_pages, &num_poisoned_pages);
|
||||
|
||||
/*
|
||||
|
Loading…
Reference in New Issue
Block a user