forked from Minki/linux
Merge branch 'freezer'
* freezer: PM / freezer: Clean up code after recent fixes PM: convert do_each_thread to for_each_process_thread OOM, PM: OOM killed task shouldn't escape PM suspend freezer: remove obsolete comments in __thaw_task() freezer: Do not freeze tasks killed by OOM killer
This commit is contained in:
commit
96ed753235
@ -50,6 +50,9 @@ static inline bool oom_task_origin(const struct task_struct *p)
|
||||
extern unsigned long oom_badness(struct task_struct *p,
|
||||
struct mem_cgroup *memcg, const nodemask_t *nodemask,
|
||||
unsigned long totalpages);
|
||||
|
||||
extern int oom_kills_count(void);
|
||||
extern void note_oom_kill(void);
|
||||
extern void oom_kill_process(struct task_struct *p, gfp_t gfp_mask, int order,
|
||||
unsigned int points, unsigned long totalpages,
|
||||
struct mem_cgroup *memcg, nodemask_t *nodemask,
|
||||
|
@ -42,6 +42,9 @@ bool freezing_slow_path(struct task_struct *p)
|
||||
if (p->flags & (PF_NOFREEZE | PF_SUSPEND_TASK))
|
||||
return false;
|
||||
|
||||
if (test_thread_flag(TIF_MEMDIE))
|
||||
return false;
|
||||
|
||||
if (pm_nosig_freezing || cgroup_freezing(p))
|
||||
return true;
|
||||
|
||||
@ -147,12 +150,6 @@ void __thaw_task(struct task_struct *p)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
/*
|
||||
* Clear freezing and kick @p if FROZEN. Clearing is guaranteed to
|
||||
* be visible to @p as waking up implies wmb. Waking up inside
|
||||
* freezer_lock also prevents wakeups from leaking outside
|
||||
* refrigerator.
|
||||
*/
|
||||
spin_lock_irqsave(&freezer_lock, flags);
|
||||
if (frozen(p))
|
||||
wake_up_process(p);
|
||||
|
@ -46,13 +46,13 @@ static int try_to_freeze_tasks(bool user_only)
|
||||
while (true) {
|
||||
todo = 0;
|
||||
read_lock(&tasklist_lock);
|
||||
do_each_thread(g, p) {
|
||||
for_each_process_thread(g, p) {
|
||||
if (p == current || !freeze_task(p))
|
||||
continue;
|
||||
|
||||
if (!freezer_should_skip(p))
|
||||
todo++;
|
||||
} while_each_thread(g, p);
|
||||
}
|
||||
read_unlock(&tasklist_lock);
|
||||
|
||||
if (!user_only) {
|
||||
@ -93,11 +93,11 @@ static int try_to_freeze_tasks(bool user_only)
|
||||
|
||||
if (!wakeup) {
|
||||
read_lock(&tasklist_lock);
|
||||
do_each_thread(g, p) {
|
||||
for_each_process_thread(g, p) {
|
||||
if (p != current && !freezer_should_skip(p)
|
||||
&& freezing(p) && !frozen(p))
|
||||
sched_show_task(p);
|
||||
} while_each_thread(g, p);
|
||||
}
|
||||
read_unlock(&tasklist_lock);
|
||||
}
|
||||
} else {
|
||||
@ -108,6 +108,30 @@ static int try_to_freeze_tasks(bool user_only)
|
||||
return todo ? -EBUSY : 0;
|
||||
}
|
||||
|
||||
static bool __check_frozen_processes(void)
|
||||
{
|
||||
struct task_struct *g, *p;
|
||||
|
||||
for_each_process_thread(g, p)
|
||||
if (p != current && !freezer_should_skip(p) && !frozen(p))
|
||||
return false;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
/*
|
||||
* Returns true if all freezable tasks (except for current) are frozen already
|
||||
*/
|
||||
static bool check_frozen_processes(void)
|
||||
{
|
||||
bool ret;
|
||||
|
||||
read_lock(&tasklist_lock);
|
||||
ret = __check_frozen_processes();
|
||||
read_unlock(&tasklist_lock);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* freeze_processes - Signal user space processes to enter the refrigerator.
|
||||
* The current thread will not be frozen. The same process that calls
|
||||
@ -118,6 +142,7 @@ static int try_to_freeze_tasks(bool user_only)
|
||||
int freeze_processes(void)
|
||||
{
|
||||
int error;
|
||||
int oom_kills_saved;
|
||||
|
||||
error = __usermodehelper_disable(UMH_FREEZING);
|
||||
if (error)
|
||||
@ -132,11 +157,25 @@ int freeze_processes(void)
|
||||
pm_wakeup_clear();
|
||||
printk("Freezing user space processes ... ");
|
||||
pm_freezing = true;
|
||||
oom_kills_saved = oom_kills_count();
|
||||
error = try_to_freeze_tasks(true);
|
||||
if (!error) {
|
||||
printk("done.");
|
||||
__usermodehelper_set_disable_depth(UMH_DISABLED);
|
||||
oom_killer_disable();
|
||||
|
||||
/*
|
||||
* There might have been an OOM kill while we were
|
||||
* freezing tasks and the killed task might be still
|
||||
* on the way out so we have to double check for race.
|
||||
*/
|
||||
if (oom_kills_count() != oom_kills_saved &&
|
||||
!check_frozen_processes()) {
|
||||
__usermodehelper_set_disable_depth(UMH_ENABLED);
|
||||
printk("OOM in progress.");
|
||||
error = -EBUSY;
|
||||
} else {
|
||||
printk("done.");
|
||||
}
|
||||
}
|
||||
printk("\n");
|
||||
BUG_ON(in_atomic());
|
||||
@ -191,11 +230,11 @@ void thaw_processes(void)
|
||||
thaw_workqueues();
|
||||
|
||||
read_lock(&tasklist_lock);
|
||||
do_each_thread(g, p) {
|
||||
for_each_process_thread(g, p) {
|
||||
/* No other threads should have PF_SUSPEND_TASK set */
|
||||
WARN_ON((p != curr) && (p->flags & PF_SUSPEND_TASK));
|
||||
__thaw_task(p);
|
||||
} while_each_thread(g, p);
|
||||
}
|
||||
read_unlock(&tasklist_lock);
|
||||
|
||||
WARN_ON(!(curr->flags & PF_SUSPEND_TASK));
|
||||
@ -218,10 +257,10 @@ void thaw_kernel_threads(void)
|
||||
thaw_workqueues();
|
||||
|
||||
read_lock(&tasklist_lock);
|
||||
do_each_thread(g, p) {
|
||||
for_each_process_thread(g, p) {
|
||||
if (p->flags & (PF_KTHREAD | PF_WQ_WORKER))
|
||||
__thaw_task(p);
|
||||
} while_each_thread(g, p);
|
||||
}
|
||||
read_unlock(&tasklist_lock);
|
||||
|
||||
schedule();
|
||||
|
@ -404,6 +404,23 @@ static void dump_header(struct task_struct *p, gfp_t gfp_mask, int order,
|
||||
dump_tasks(memcg, nodemask);
|
||||
}
|
||||
|
||||
/*
|
||||
* Number of OOM killer invocations (including memcg OOM killer).
|
||||
* Primarily used by PM freezer to check for potential races with
|
||||
* OOM killed frozen task.
|
||||
*/
|
||||
static atomic_t oom_kills = ATOMIC_INIT(0);
|
||||
|
||||
int oom_kills_count(void)
|
||||
{
|
||||
return atomic_read(&oom_kills);
|
||||
}
|
||||
|
||||
void note_oom_kill(void)
|
||||
{
|
||||
atomic_inc(&oom_kills);
|
||||
}
|
||||
|
||||
#define K(x) ((x) << (PAGE_SHIFT-10))
|
||||
/*
|
||||
* Must be called while holding a reference to p, which will be released upon
|
||||
|
@ -2251,6 +2251,14 @@ __alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order,
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/*
|
||||
* PM-freezer should be notified that there might be an OOM killer on
|
||||
* its way to kill and wake somebody up. This is too early and we might
|
||||
* end up not killing anything but false positives are acceptable.
|
||||
* See freeze_processes.
|
||||
*/
|
||||
note_oom_kill();
|
||||
|
||||
/*
|
||||
* Go through the zonelist yet one more time, keep very high watermark
|
||||
* here, this is only to catch a parallel oom killing, we must fail if
|
||||
|
Loading…
Reference in New Issue
Block a user