[PATCH] swsusp: rework memory freeing on resume

The following patch makes swsusp use the PG_nosave and PG_nosave_free flags to
mark pages that should be freed in case of an error during resume.

This allows us to simplify the code and to use swsusp_free() in all of the
swsusp's resume error paths, which makes them actually work.

Signed-off-by: Rafael J. Wysocki <rjw@sisk.pl>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
This commit is contained in:
Rafael J. Wysocki 2005-10-30 14:59:58 -08:00 committed by Linus Torvalds
parent a0f496517f
commit 2c1b4a5ca4
6 changed files with 65 additions and 150 deletions

View File

@ -147,57 +147,7 @@ extern int restore_image(void);
pgd_t *temp_level4_pgt; pgd_t *temp_level4_pgt;
static void **pages; static int res_phys_pud_init(pud_t *pud, unsigned long address, unsigned long end)
static inline void *__add_page(void)
{
void **c;
c = (void **)get_usable_page(GFP_ATOMIC);
if (c) {
*c = pages;
pages = c;
}
return c;
}
static inline void *__next_page(void)
{
void **c;
c = pages;
if (c) {
pages = *c;
*c = NULL;
}
return c;
}
/*
* Try to allocate as many usable pages as needed and daisy chain them.
* If one allocation fails, free the pages allocated so far
*/
static int alloc_usable_pages(unsigned long n)
{
void *p;
pages = NULL;
do
if (!__add_page())
break;
while (--n);
if (n) {
p = __next_page();
while (p) {
free_page((unsigned long)p);
p = __next_page();
}
return -ENOMEM;
}
return 0;
}
static void res_phys_pud_init(pud_t *pud, unsigned long address, unsigned long end)
{ {
long i, j; long i, j;
@ -211,7 +161,9 @@ static void res_phys_pud_init(pud_t *pud, unsigned long address, unsigned long e
if (paddr >= end) if (paddr >= end)
break; break;
pmd = (pmd_t *)__next_page(); pmd = (pmd_t *)get_safe_page(GFP_ATOMIC);
if (!pmd)
return -ENOMEM;
set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE)); set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE));
for (j = 0; j < PTRS_PER_PMD; pmd++, j++, paddr += PMD_SIZE) { for (j = 0; j < PTRS_PER_PMD; pmd++, j++, paddr += PMD_SIZE) {
unsigned long pe; unsigned long pe;
@ -223,13 +175,17 @@ static void res_phys_pud_init(pud_t *pud, unsigned long address, unsigned long e
set_pmd(pmd, __pmd(pe)); set_pmd(pmd, __pmd(pe));
} }
} }
return 0;
} }
static void set_up_temporary_mappings(void) static int set_up_temporary_mappings(void)
{ {
unsigned long start, end, next; unsigned long start, end, next;
int error;
temp_level4_pgt = (pgd_t *)__next_page(); temp_level4_pgt = (pgd_t *)get_safe_page(GFP_ATOMIC);
if (!temp_level4_pgt)
return -ENOMEM;
/* It is safe to reuse the original kernel mapping */ /* It is safe to reuse the original kernel mapping */
set_pgd(temp_level4_pgt + pgd_index(__START_KERNEL_map), set_pgd(temp_level4_pgt + pgd_index(__START_KERNEL_map),
@ -240,29 +196,27 @@ static void set_up_temporary_mappings(void)
end = (unsigned long)pfn_to_kaddr(end_pfn); end = (unsigned long)pfn_to_kaddr(end_pfn);
for (; start < end; start = next) { for (; start < end; start = next) {
pud_t *pud = (pud_t *)__next_page(); pud_t *pud = (pud_t *)get_safe_page(GFP_ATOMIC);
if (!pud)
return -ENOMEM;
next = start + PGDIR_SIZE; next = start + PGDIR_SIZE;
if (next > end) if (next > end)
next = end; next = end;
res_phys_pud_init(pud, __pa(start), __pa(next)); if ((error = res_phys_pud_init(pud, __pa(start), __pa(next))))
return error;
set_pgd(temp_level4_pgt + pgd_index(start), set_pgd(temp_level4_pgt + pgd_index(start),
mk_kernel_pgd(__pa(pud))); mk_kernel_pgd(__pa(pud)));
} }
return 0;
} }
int swsusp_arch_resume(void) int swsusp_arch_resume(void)
{ {
unsigned long n; int error;
n = ((end_pfn << PAGE_SHIFT) + PUD_SIZE - 1) >> PUD_SHIFT;
n += (n + PTRS_PER_PUD - 1) / PTRS_PER_PUD + 1;
pr_debug("swsusp_arch_resume(): pages needed = %lu\n", n);
if (alloc_usable_pages(n)) {
free_eaten_memory();
return -ENOMEM;
}
/* We have got enough memory and from now on we cannot recover */ /* We have got enough memory and from now on we cannot recover */
set_up_temporary_mappings(); if ((error = set_up_temporary_mappings()))
return error;
restore_image(); restore_image();
return 0; return 0;
} }

View File

@ -71,8 +71,7 @@ void restore_processor_state(void);
struct saved_context; struct saved_context;
void __save_processor_state(struct saved_context *ctxt); void __save_processor_state(struct saved_context *ctxt);
void __restore_processor_state(struct saved_context *ctxt); void __restore_processor_state(struct saved_context *ctxt);
extern unsigned long get_usable_page(gfp_t gfp_mask); unsigned long get_safe_page(gfp_t gfp_mask);
extern void free_eaten_memory(void);
/* /*
* XXX: We try to keep some more pages free so that I/O operations succeed * XXX: We try to keep some more pages free so that I/O operations succeed

View File

@ -30,7 +30,6 @@ extern int swsusp_check(void);
extern int swsusp_read(void); extern int swsusp_read(void);
extern void swsusp_close(void); extern void swsusp_close(void);
extern int swsusp_resume(void); extern int swsusp_resume(void);
extern int swsusp_free(void);
static int noresume = 0; static int noresume = 0;
@ -252,14 +251,17 @@ static int software_resume(void)
pr_debug("PM: Reading swsusp image.\n"); pr_debug("PM: Reading swsusp image.\n");
if ((error = swsusp_read())) if ((error = swsusp_read())) {
goto Cleanup; swsusp_free();
goto Thaw;
}
pr_debug("PM: Preparing devices for restore.\n"); pr_debug("PM: Preparing devices for restore.\n");
if ((error = device_suspend(PMSG_FREEZE))) { if ((error = device_suspend(PMSG_FREEZE))) {
printk("Some devices failed to suspend\n"); printk("Some devices failed to suspend\n");
goto Free; swsusp_free();
goto Thaw;
} }
mb(); mb();
@ -268,9 +270,7 @@ static int software_resume(void)
swsusp_resume(); swsusp_resume();
pr_debug("PM: Restore failed, recovering.n"); pr_debug("PM: Restore failed, recovering.n");
device_resume(); device_resume();
Free: Thaw:
swsusp_free();
Cleanup:
unprepare_processes(); unprepare_processes();
Done: Done:
/* For success case, the suspend path will release the lock */ /* For success case, the suspend path will release the lock */

View File

@ -66,7 +66,7 @@ extern asmlinkage int swsusp_arch_suspend(void);
extern asmlinkage int swsusp_arch_resume(void); extern asmlinkage int swsusp_arch_resume(void);
extern int restore_highmem(void); extern int restore_highmem(void);
extern void free_pagedir(struct pbe *pblist);
extern struct pbe * alloc_pagedir(unsigned nr_pages); extern struct pbe * alloc_pagedir(unsigned nr_pages);
extern void create_pbe_list(struct pbe *pblist, unsigned nr_pages); extern void create_pbe_list(struct pbe *pblist, unsigned nr_pages);
extern void swsusp_free(void);
extern int enough_swap(unsigned nr_pages); extern int enough_swap(unsigned nr_pages);

View File

@ -240,7 +240,7 @@ static void copy_data_pages(struct pbe *pblist)
* free_pagedir - free pages allocated with alloc_pagedir() * free_pagedir - free pages allocated with alloc_pagedir()
*/ */
void free_pagedir(struct pbe *pblist) static void free_pagedir(struct pbe *pblist)
{ {
struct pbe *pbe; struct pbe *pbe;

View File

@ -629,6 +629,11 @@ int swsusp_resume(void)
* execution continues at place where swsusp_arch_suspend was called * execution continues at place where swsusp_arch_suspend was called
*/ */
BUG_ON(!error); BUG_ON(!error);
/* The only reason why swsusp_arch_resume() can fail is memory being
* very tight, so we have to free it as soon as we can to avoid
* subsequent failures
*/
swsusp_free();
restore_processor_state(); restore_processor_state();
restore_highmem(); restore_highmem();
touch_softlockup_watchdog(); touch_softlockup_watchdog();
@ -644,54 +649,28 @@ int swsusp_resume(void)
* *
* We don't know which pages are usable until we allocate them. * We don't know which pages are usable until we allocate them.
* *
* Allocated but unusable (ie eaten) memory pages are linked together * Allocated but unusable (ie eaten) memory pages are marked so that
* to create a list, so that we can free them easily * swsusp_free() can release them
*
* We could have used a type other than (void *)
* for this purpose, but ...
*/ */
static void **eaten_memory = NULL;
static inline void eat_page(void *page) unsigned long get_safe_page(gfp_t gfp_mask)
{
void **c;
c = eaten_memory;
eaten_memory = page;
*eaten_memory = c;
}
unsigned long get_usable_page(gfp_t gfp_mask)
{ {
unsigned long m; unsigned long m;
do {
m = get_zeroed_page(gfp_mask); m = get_zeroed_page(gfp_mask);
while (!PageNosaveFree(virt_to_page(m))) { if (m && PageNosaveFree(virt_to_page(m)))
eat_page((void *)m); /* This is for swsusp_free() */
m = get_zeroed_page(gfp_mask); SetPageNosave(virt_to_page(m));
if (!m) } while (m && PageNosaveFree(virt_to_page(m)));
break; if (m) {
/* This is for swsusp_free() */
SetPageNosave(virt_to_page(m));
SetPageNosaveFree(virt_to_page(m));
} }
return m; return m;
} }
void free_eaten_memory(void)
{
unsigned long m;
void **c;
int i = 0;
c = eaten_memory;
while (c) {
m = (unsigned long)c;
c = *c;
free_page(m);
i++;
}
eaten_memory = NULL;
pr_debug("swsusp: %d unused pages freed\n", i);
}
/** /**
* check_pagedir - We ensure here that pages that the PBEs point to * check_pagedir - We ensure here that pages that the PBEs point to
* won't collide with pages where we're going to restore from the loaded * won't collide with pages where we're going to restore from the loaded
@ -709,7 +688,7 @@ static int check_pagedir(struct pbe *pblist)
p->address = 0UL; p->address = 0UL;
for_each_pbe (p, pblist) { for_each_pbe (p, pblist) {
p->address = get_usable_page(GFP_ATOMIC); p->address = get_safe_page(GFP_ATOMIC);
if (!p->address) if (!p->address)
return -ENOMEM; return -ENOMEM;
} }
@ -728,7 +707,7 @@ static struct pbe * swsusp_pagedir_relocate(struct pbe *pblist)
unsigned long zone_pfn; unsigned long zone_pfn;
struct pbe *pbpage, *tail, *p; struct pbe *pbpage, *tail, *p;
void *m; void *m;
int rel = 0, error = 0; int rel = 0;
if (!pblist) /* a sanity check */ if (!pblist) /* a sanity check */
return NULL; return NULL;
@ -736,41 +715,37 @@ static struct pbe * swsusp_pagedir_relocate(struct pbe *pblist)
pr_debug("swsusp: Relocating pagedir (%lu pages to check)\n", pr_debug("swsusp: Relocating pagedir (%lu pages to check)\n",
swsusp_info.pagedir_pages); swsusp_info.pagedir_pages);
/* Set page flags */ /* Clear page flags */
for_each_zone (zone) { for_each_zone (zone) {
for (zone_pfn = 0; zone_pfn < zone->spanned_pages; ++zone_pfn) for (zone_pfn = 0; zone_pfn < zone->spanned_pages; ++zone_pfn)
SetPageNosaveFree(pfn_to_page(zone_pfn + if (pfn_valid(zone_pfn + zone->zone_start_pfn))
ClearPageNosaveFree(pfn_to_page(zone_pfn +
zone->zone_start_pfn)); zone->zone_start_pfn));
} }
/* Clear orig addresses */ /* Mark orig addresses */
for_each_pbe (p, pblist) for_each_pbe (p, pblist)
ClearPageNosaveFree(virt_to_page(p->orig_address)); SetPageNosaveFree(virt_to_page(p->orig_address));
tail = pblist + PB_PAGE_SKIP; tail = pblist + PB_PAGE_SKIP;
/* Relocate colliding pages */ /* Relocate colliding pages */
for_each_pb_page (pbpage, pblist) { for_each_pb_page (pbpage, pblist) {
if (!PageNosaveFree(virt_to_page((unsigned long)pbpage))) { if (PageNosaveFree(virt_to_page((unsigned long)pbpage))) {
m = (void *)get_usable_page(GFP_ATOMIC | __GFP_COLD); m = (void *)get_safe_page(GFP_ATOMIC | __GFP_COLD);
if (!m) { if (!m)
error = -ENOMEM; return NULL;
break;
}
memcpy(m, (void *)pbpage, PAGE_SIZE); memcpy(m, (void *)pbpage, PAGE_SIZE);
if (pbpage == pblist) if (pbpage == pblist)
pblist = (struct pbe *)m; pblist = (struct pbe *)m;
else else
tail->next = (struct pbe *)m; tail->next = (struct pbe *)m;
eat_page((void *)pbpage);
pbpage = (struct pbe *)m; pbpage = (struct pbe *)m;
/* We have to link the PBEs again */ /* We have to link the PBEs again */
for (p = pbpage; p < pbpage + PB_PAGE_SKIP; p++) for (p = pbpage; p < pbpage + PB_PAGE_SKIP; p++)
if (p->next) /* needed to save the end */ if (p->next) /* needed to save the end */
p->next = p + 1; p->next = p + 1;
@ -780,14 +755,12 @@ static struct pbe * swsusp_pagedir_relocate(struct pbe *pblist)
tail = pbpage + PB_PAGE_SKIP; tail = pbpage + PB_PAGE_SKIP;
} }
if (error) { /* This is for swsusp_free() */
printk("\nswsusp: Out of memory\n\n"); for_each_pb_page (pbpage, pblist) {
free_pagedir(pblist); SetPageNosave(virt_to_page(pbpage));
free_eaten_memory(); SetPageNosaveFree(virt_to_page(pbpage));
pblist = NULL; }
/* Is this even worth handling? It should never ever happen, and we
have just lost user's state, anyway... */
} else
printk("swsusp: Relocated %d pages\n", rel); printk("swsusp: Relocated %d pages\n", rel);
return pblist; return pblist;
@ -1006,9 +979,7 @@ static int read_pagedir(struct pbe *pblist)
break; break;
} }
if (error) if (!error)
free_pagedir(pblist);
else
BUG_ON(i != swsusp_info.pagedir_pages); BUG_ON(i != swsusp_info.pagedir_pages);
return error; return error;
@ -1051,15 +1022,6 @@ static int read_suspend_image(void)
if (!error) if (!error)
error = data_read(pagedir_nosave); error = data_read(pagedir_nosave);
if (error) { /* We fail cleanly */
free_eaten_memory();
for_each_pbe (p, pagedir_nosave)
if (p->address) {
free_page(p->address);
p->address = 0UL;
}
free_pagedir(pagedir_nosave);
}
return error; return error;
} }