forked from Minki/linux
freezer: unexport refrigerator() and update try_to_freeze() slightly
There is no reason to export two functions for entering the refrigerator. Calling refrigerator() instead of try_to_freeze() doesn't save anything noticeable or removes any race condition. * Rename refrigerator() to __refrigerator() and make it return bool indicating whether it scheduled out for freezing. * Update try_to_freeze() to return bool and relay the return value of __refrigerator() if freezing(). * Convert all refrigerator() users to try_to_freeze(). * Update documentation accordingly. * While at it, add might_sleep() to try_to_freeze(). Signed-off-by: Tejun Heo <tj@kernel.org> Cc: Samuel Ortiz <samuel@sortiz.org> Cc: Chris Mason <chris.mason@oracle.com> Cc: "Theodore Ts'o" <tytso@mit.edu> Cc: Steven Whitehouse <swhiteho@redhat.com> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Jan Kara <jack@suse.cz> Cc: KONISHI Ryusuke <konishi.ryusuke@lab.ntt.co.jp> Cc: Christoph Hellwig <hch@infradead.org>
This commit is contained in:
parent
3a7cbd50f7
commit
a0acae0e88
@ -21,7 +21,7 @@ freeze_processes() (defined in kernel/power/process.c) is called. It executes
|
||||
try_to_freeze_tasks() that sets TIF_FREEZE for all of the freezable tasks and
|
||||
either wakes them up, if they are kernel threads, or sends fake signals to them,
|
||||
if they are user space processes. A task that has TIF_FREEZE set, should react
|
||||
to it by calling the function called refrigerator() (defined in
|
||||
to it by calling the function called __refrigerator() (defined in
|
||||
kernel/freezer.c), which sets the task's PF_FROZEN flag, changes its state
|
||||
to TASK_UNINTERRUPTIBLE and makes it loop until PF_FROZEN is cleared for it.
|
||||
Then, we say that the task is 'frozen' and therefore the set of functions
|
||||
@ -29,10 +29,10 @@ handling this mechanism is referred to as 'the freezer' (these functions are
|
||||
defined in kernel/power/process.c, kernel/freezer.c & include/linux/freezer.h).
|
||||
User space processes are generally frozen before kernel threads.
|
||||
|
||||
It is not recommended to call refrigerator() directly. Instead, it is
|
||||
recommended to use the try_to_freeze() function (defined in
|
||||
include/linux/freezer.h), that checks the task's TIF_FREEZE flag and makes the
|
||||
task enter refrigerator() if the flag is set.
|
||||
__refrigerator() must not be called directly. Instead, use the
|
||||
try_to_freeze() function (defined in include/linux/freezer.h), that checks
|
||||
the task's TIF_FREEZE flag and makes the task enter __refrigerator() if the
|
||||
flag is set.
|
||||
|
||||
For user space processes try_to_freeze() is called automatically from the
|
||||
signal-handling code, but the freezable kernel threads need to call it
|
||||
@ -61,7 +61,7 @@ wait_event_freezable() and wait_event_freezable_timeout() macros.
|
||||
After the system memory state has been restored from a hibernation image and
|
||||
devices have been reinitialized, the function thaw_processes() is called in
|
||||
order to clear the PF_FROZEN flag for each frozen task. Then, the tasks that
|
||||
have been frozen leave refrigerator() and continue running.
|
||||
have been frozen leave __refrigerator() and continue running.
|
||||
|
||||
III. Which kernel threads are freezable?
|
||||
|
||||
|
@ -750,7 +750,7 @@ static int stir_transmit_thread(void *arg)
|
||||
|
||||
write_reg(stir, REG_CTRL1, CTRL1_TXPWD|CTRL1_RXPWD);
|
||||
|
||||
refrigerator();
|
||||
try_to_freeze();
|
||||
|
||||
if (change_speed(stir, stir->speed))
|
||||
break;
|
||||
|
@ -340,7 +340,7 @@ again:
|
||||
if (freezing(current)) {
|
||||
worker->working = 0;
|
||||
spin_unlock_irq(&worker->lock);
|
||||
refrigerator();
|
||||
try_to_freeze();
|
||||
} else {
|
||||
spin_unlock_irq(&worker->lock);
|
||||
if (!kthread_should_stop()) {
|
||||
|
@ -1579,9 +1579,7 @@ static int cleaner_kthread(void *arg)
|
||||
btrfs_run_defrag_inodes(root->fs_info);
|
||||
}
|
||||
|
||||
if (freezing(current)) {
|
||||
refrigerator();
|
||||
} else {
|
||||
if (!try_to_freeze()) {
|
||||
set_current_state(TASK_INTERRUPTIBLE);
|
||||
if (!kthread_should_stop())
|
||||
schedule();
|
||||
@ -1635,9 +1633,7 @@ sleep:
|
||||
wake_up_process(root->fs_info->cleaner_kthread);
|
||||
mutex_unlock(&root->fs_info->transaction_kthread_mutex);
|
||||
|
||||
if (freezing(current)) {
|
||||
refrigerator();
|
||||
} else {
|
||||
if (!try_to_freeze()) {
|
||||
set_current_state(TASK_INTERRUPTIBLE);
|
||||
if (!kthread_should_stop() &&
|
||||
!btrfs_transaction_blocked(root->fs_info))
|
||||
|
@ -2882,8 +2882,7 @@ cont_thread:
|
||||
}
|
||||
mutex_unlock(&eli->li_list_mtx);
|
||||
|
||||
if (freezing(current))
|
||||
refrigerator();
|
||||
try_to_freeze();
|
||||
|
||||
cur = jiffies;
|
||||
if ((time_after_eq(cur, next_wakeup)) ||
|
||||
|
@ -951,8 +951,8 @@ int gfs2_logd(void *data)
|
||||
wake_up(&sdp->sd_log_waitq);
|
||||
|
||||
t = gfs2_tune_get(sdp, gt_logd_secs) * HZ;
|
||||
if (freezing(current))
|
||||
refrigerator();
|
||||
|
||||
try_to_freeze();
|
||||
|
||||
do {
|
||||
prepare_to_wait(&sdp->sd_logd_waitq, &wait,
|
||||
|
@ -1427,8 +1427,8 @@ int gfs2_quotad(void *data)
|
||||
/* Check for & recover partially truncated inodes */
|
||||
quotad_check_trunc_list(sdp);
|
||||
|
||||
if (freezing(current))
|
||||
refrigerator();
|
||||
try_to_freeze();
|
||||
|
||||
t = min(quotad_timeo, statfs_timeo);
|
||||
|
||||
prepare_to_wait(&sdp->sd_quota_wait, &wait, TASK_INTERRUPTIBLE);
|
||||
|
@ -166,7 +166,7 @@ loop:
|
||||
*/
|
||||
jbd_debug(1, "Now suspending kjournald\n");
|
||||
spin_unlock(&journal->j_state_lock);
|
||||
refrigerator();
|
||||
try_to_freeze();
|
||||
spin_lock(&journal->j_state_lock);
|
||||
} else {
|
||||
/*
|
||||
|
@ -173,7 +173,7 @@ loop:
|
||||
*/
|
||||
jbd_debug(1, "Now suspending kjournald2\n");
|
||||
write_unlock(&journal->j_state_lock);
|
||||
refrigerator();
|
||||
try_to_freeze();
|
||||
write_lock(&journal->j_state_lock);
|
||||
} else {
|
||||
/*
|
||||
|
@ -2349,7 +2349,7 @@ int jfsIOWait(void *arg)
|
||||
|
||||
if (freezing(current)) {
|
||||
spin_unlock_irq(&log_redrive_lock);
|
||||
refrigerator();
|
||||
try_to_freeze();
|
||||
} else {
|
||||
set_current_state(TASK_INTERRUPTIBLE);
|
||||
spin_unlock_irq(&log_redrive_lock);
|
||||
|
@ -2800,7 +2800,7 @@ int jfs_lazycommit(void *arg)
|
||||
|
||||
if (freezing(current)) {
|
||||
LAZY_UNLOCK(flags);
|
||||
refrigerator();
|
||||
try_to_freeze();
|
||||
} else {
|
||||
DECLARE_WAITQUEUE(wq, current);
|
||||
|
||||
@ -2994,7 +2994,7 @@ int jfs_sync(void *arg)
|
||||
|
||||
if (freezing(current)) {
|
||||
TXN_UNLOCK();
|
||||
refrigerator();
|
||||
try_to_freeze();
|
||||
} else {
|
||||
set_current_state(TASK_INTERRUPTIBLE);
|
||||
TXN_UNLOCK();
|
||||
|
@ -2470,7 +2470,7 @@ static int nilfs_segctor_thread(void *arg)
|
||||
|
||||
if (freezing(current)) {
|
||||
spin_unlock(&sci->sc_state_lock);
|
||||
refrigerator();
|
||||
try_to_freeze();
|
||||
spin_lock(&sci->sc_state_lock);
|
||||
} else {
|
||||
DEFINE_WAIT(wait);
|
||||
|
@ -1703,7 +1703,7 @@ xfsbufd(
|
||||
|
||||
if (unlikely(freezing(current))) {
|
||||
set_bit(XBT_FORCE_SLEEP, &target->bt_flags);
|
||||
refrigerator();
|
||||
try_to_freeze();
|
||||
} else {
|
||||
clear_bit(XBT_FORCE_SLEEP, &target->bt_flags);
|
||||
}
|
||||
|
@ -47,18 +47,17 @@ static inline bool should_send_signal(struct task_struct *p)
|
||||
/* Takes and releases task alloc lock using task_lock() */
|
||||
extern int thaw_process(struct task_struct *p);
|
||||
|
||||
extern void refrigerator(void);
|
||||
extern bool __refrigerator(void);
|
||||
extern int freeze_processes(void);
|
||||
extern int freeze_kernel_threads(void);
|
||||
extern void thaw_processes(void);
|
||||
|
||||
static inline int try_to_freeze(void)
|
||||
static inline bool try_to_freeze(void)
|
||||
{
|
||||
if (freezing(current)) {
|
||||
refrigerator();
|
||||
return 1;
|
||||
} else
|
||||
return 0;
|
||||
might_sleep();
|
||||
if (likely(!freezing(current)))
|
||||
return false;
|
||||
return __refrigerator();
|
||||
}
|
||||
|
||||
extern bool freeze_task(struct task_struct *p, bool sig_only);
|
||||
@ -181,12 +180,12 @@ static inline void set_freeze_flag(struct task_struct *p) {}
|
||||
static inline void clear_freeze_flag(struct task_struct *p) {}
|
||||
static inline int thaw_process(struct task_struct *p) { return 1; }
|
||||
|
||||
static inline void refrigerator(void) {}
|
||||
static inline bool __refrigerator(void) { return false; }
|
||||
static inline int freeze_processes(void) { return -ENOSYS; }
|
||||
static inline int freeze_kernel_threads(void) { return -ENOSYS; }
|
||||
static inline void thaw_processes(void) {}
|
||||
|
||||
static inline int try_to_freeze(void) { return 0; }
|
||||
static inline bool try_to_freeze(void) { return false; }
|
||||
|
||||
static inline void freezer_do_not_count(void) {}
|
||||
static inline void freezer_count(void) {}
|
||||
|
@ -23,10 +23,11 @@ static inline void frozen_process(void)
|
||||
}
|
||||
|
||||
/* Refrigerator is place where frozen processes are stored :-). */
|
||||
void refrigerator(void)
|
||||
bool __refrigerator(void)
|
||||
{
|
||||
/* Hmm, should we be allowed to suspend when there are realtime
|
||||
processes around? */
|
||||
bool was_frozen = false;
|
||||
long save;
|
||||
|
||||
task_lock(current);
|
||||
@ -35,7 +36,7 @@ void refrigerator(void)
|
||||
task_unlock(current);
|
||||
} else {
|
||||
task_unlock(current);
|
||||
return;
|
||||
return was_frozen;
|
||||
}
|
||||
save = current->state;
|
||||
pr_debug("%s entered refrigerator\n", current->comm);
|
||||
@ -51,6 +52,7 @@ void refrigerator(void)
|
||||
set_current_state(TASK_UNINTERRUPTIBLE);
|
||||
if (!frozen(current))
|
||||
break;
|
||||
was_frozen = true;
|
||||
schedule();
|
||||
}
|
||||
|
||||
@ -65,8 +67,10 @@ void refrigerator(void)
|
||||
* synchronization which depends on ordered task state change.
|
||||
*/
|
||||
set_current_state(save);
|
||||
|
||||
return was_frozen;
|
||||
}
|
||||
EXPORT_SYMBOL(refrigerator);
|
||||
EXPORT_SYMBOL(__refrigerator);
|
||||
|
||||
static void fake_signal_wake_up(struct task_struct *p)
|
||||
{
|
||||
|
Loading…
Reference in New Issue
Block a user