forked from Minki/linux
mm/hwpoison: drop forward reference declarations __soft_offline_page()
Drop forward reference declarations __soft_offline_page. Signed-off-by: Wanpeng Li <liwanp@linux.vnet.ibm.com> Reviewed-by: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com> Cc: Andi Kleen <andi@firstfloor.org> Cc: Tony Luck <tony.luck@intel.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
0be35096a1
commit
86e057734b
@ -1523,72 +1523,6 @@ static int soft_offline_huge_page(struct page *page, int flags)
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int __soft_offline_page(struct page *page, int flags);
|
||||
|
||||
/**
|
||||
* soft_offline_page - Soft offline a page.
|
||||
* @page: page to offline
|
||||
* @flags: flags. Same as memory_failure().
|
||||
*
|
||||
* Returns 0 on success, otherwise negated errno.
|
||||
*
|
||||
* Soft offline a page, by migration or invalidation,
|
||||
* without killing anything. This is for the case when
|
||||
* a page is not corrupted yet (so it's still valid to access),
|
||||
* but has had a number of corrected errors and is better taken
|
||||
* out.
|
||||
*
|
||||
* The actual policy on when to do that is maintained by
|
||||
* user space.
|
||||
*
|
||||
* This should never impact any application or cause data loss,
|
||||
* however it might take some time.
|
||||
*
|
||||
* This is not a 100% solution for all memory, but tries to be
|
||||
* ``good enough'' for the majority of memory.
|
||||
*/
|
||||
int soft_offline_page(struct page *page, int flags)
|
||||
{
|
||||
int ret;
|
||||
unsigned long pfn = page_to_pfn(page);
|
||||
struct page *hpage = compound_trans_head(page);
|
||||
|
||||
if (PageHWPoison(page)) {
|
||||
pr_info("soft offline: %#lx page already poisoned\n", pfn);
|
||||
return -EBUSY;
|
||||
}
|
||||
if (!PageHuge(page) && PageTransHuge(hpage)) {
|
||||
if (PageAnon(hpage) && unlikely(split_huge_page(hpage))) {
|
||||
pr_info("soft offline: %#lx: failed to split THP\n",
|
||||
pfn);
|
||||
return -EBUSY;
|
||||
}
|
||||
}
|
||||
|
||||
ret = get_any_page(page, pfn, flags);
|
||||
if (ret < 0)
|
||||
goto unset;
|
||||
if (ret) { /* for in-use pages */
|
||||
if (PageHuge(page))
|
||||
ret = soft_offline_huge_page(page, flags);
|
||||
else
|
||||
ret = __soft_offline_page(page, flags);
|
||||
} else { /* for free pages */
|
||||
if (PageHuge(page)) {
|
||||
set_page_hwpoison_huge_page(hpage);
|
||||
dequeue_hwpoisoned_huge_page(hpage);
|
||||
atomic_long_add(1 << compound_order(hpage),
|
||||
&num_poisoned_pages);
|
||||
} else {
|
||||
SetPageHWPoison(page);
|
||||
atomic_long_inc(&num_poisoned_pages);
|
||||
}
|
||||
}
|
||||
unset:
|
||||
unset_migratetype_isolate(page, MIGRATE_MOVABLE);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int __soft_offline_page(struct page *page, int flags)
|
||||
{
|
||||
int ret;
|
||||
@ -1675,3 +1609,67 @@ static int __soft_offline_page(struct page *page, int flags)
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* soft_offline_page - Soft offline a page.
|
||||
* @page: page to offline
|
||||
* @flags: flags. Same as memory_failure().
|
||||
*
|
||||
* Returns 0 on success, otherwise negated errno.
|
||||
*
|
||||
* Soft offline a page, by migration or invalidation,
|
||||
* without killing anything. This is for the case when
|
||||
* a page is not corrupted yet (so it's still valid to access),
|
||||
* but has had a number of corrected errors and is better taken
|
||||
* out.
|
||||
*
|
||||
* The actual policy on when to do that is maintained by
|
||||
* user space.
|
||||
*
|
||||
* This should never impact any application or cause data loss,
|
||||
* however it might take some time.
|
||||
*
|
||||
* This is not a 100% solution for all memory, but tries to be
|
||||
* ``good enough'' for the majority of memory.
|
||||
*/
|
||||
int soft_offline_page(struct page *page, int flags)
|
||||
{
|
||||
int ret;
|
||||
unsigned long pfn = page_to_pfn(page);
|
||||
struct page *hpage = compound_trans_head(page);
|
||||
|
||||
if (PageHWPoison(page)) {
|
||||
pr_info("soft offline: %#lx page already poisoned\n", pfn);
|
||||
return -EBUSY;
|
||||
}
|
||||
if (!PageHuge(page) && PageTransHuge(hpage)) {
|
||||
if (PageAnon(hpage) && unlikely(split_huge_page(hpage))) {
|
||||
pr_info("soft offline: %#lx: failed to split THP\n",
|
||||
pfn);
|
||||
return -EBUSY;
|
||||
}
|
||||
}
|
||||
|
||||
ret = get_any_page(page, pfn, flags);
|
||||
if (ret < 0)
|
||||
goto unset;
|
||||
if (ret) { /* for in-use pages */
|
||||
if (PageHuge(page))
|
||||
ret = soft_offline_huge_page(page, flags);
|
||||
else
|
||||
ret = __soft_offline_page(page, flags);
|
||||
} else { /* for free pages */
|
||||
if (PageHuge(page)) {
|
||||
set_page_hwpoison_huge_page(hpage);
|
||||
dequeue_hwpoisoned_huge_page(hpage);
|
||||
atomic_long_add(1 << compound_order(hpage),
|
||||
&num_poisoned_pages);
|
||||
} else {
|
||||
SetPageHWPoison(page);
|
||||
atomic_long_inc(&num_poisoned_pages);
|
||||
}
|
||||
}
|
||||
unset:
|
||||
unset_migratetype_isolate(page, MIGRATE_MOVABLE);
|
||||
return ret;
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user