mirror of
https://github.com/torvalds/linux.git
synced 2024-12-28 05:41:55 +00:00
sched/wait: Fix the signal handling fix
Jan Stancek reported that I wrecked things for him by fixing things for
Vladimir :/
His report was due to an UNINTERRUPTIBLE wait getting -EINTR, which
should not be possible, however my previous patch made this possible by
unconditionally checking signal_pending().
We cannot use current->state as was done previously, because the
instruction after the store to that variable it can be changed. We must
instead pass the initial state along and use that.
Fixes: 68985633bc
("sched/wait: Fix signal handling in bit wait helpers")
Reported-by: Jan Stancek <jstancek@redhat.com>
Reported-by: Chris Mason <clm@fb.com>
Tested-by: Jan Stancek <jstancek@redhat.com>
Tested-by: Vladimir Murzin <vladimir.murzin@arm.com>
Tested-by: Chris Mason <clm@fb.com>
Reviewed-by: Paul Turner <pjt@google.com>
Cc: Ingo Molnar <mingo@kernel.org>
Cc: tglx@linutronix.de
Cc: Oleg Nesterov <oleg@redhat.com>
Cc: hpa@zytor.com
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
fc89182834
commit
dfd01f0260
@ -1831,11 +1831,11 @@ cifs_invalidate_mapping(struct inode *inode)
|
||||
* @word: long word containing the bit lock
|
||||
*/
|
||||
static int
|
||||
cifs_wait_bit_killable(struct wait_bit_key *key)
|
||||
cifs_wait_bit_killable(struct wait_bit_key *key, int mode)
|
||||
{
|
||||
if (fatal_signal_pending(current))
|
||||
return -ERESTARTSYS;
|
||||
freezable_schedule_unsafe();
|
||||
if (signal_pending_state(mode, current))
|
||||
return -ERESTARTSYS;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -75,11 +75,11 @@ nfs_fattr_to_ino_t(struct nfs_fattr *fattr)
|
||||
* nfs_wait_bit_killable - helper for functions that are sleeping on bit locks
|
||||
* @word: long word containing the bit lock
|
||||
*/
|
||||
int nfs_wait_bit_killable(struct wait_bit_key *key)
|
||||
int nfs_wait_bit_killable(struct wait_bit_key *key, int mode)
|
||||
{
|
||||
if (fatal_signal_pending(current))
|
||||
return -ERESTARTSYS;
|
||||
freezable_schedule_unsafe();
|
||||
if (signal_pending_state(mode, current))
|
||||
return -ERESTARTSYS;
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(nfs_wait_bit_killable);
|
||||
|
@ -379,7 +379,7 @@ extern int nfs_drop_inode(struct inode *);
|
||||
extern void nfs_clear_inode(struct inode *);
|
||||
extern void nfs_evict_inode(struct inode *);
|
||||
void nfs_zap_acl_cache(struct inode *inode);
|
||||
extern int nfs_wait_bit_killable(struct wait_bit_key *key);
|
||||
extern int nfs_wait_bit_killable(struct wait_bit_key *key, int mode);
|
||||
|
||||
/* super.c */
|
||||
extern const struct super_operations nfs_sops;
|
||||
|
@ -129,7 +129,7 @@ __nfs_iocounter_wait(struct nfs_io_counter *c)
|
||||
set_bit(NFS_IO_INPROGRESS, &c->flags);
|
||||
if (atomic_read(&c->io_count) == 0)
|
||||
break;
|
||||
ret = nfs_wait_bit_killable(&q.key);
|
||||
ret = nfs_wait_bit_killable(&q.key, TASK_KILLABLE);
|
||||
} while (atomic_read(&c->io_count) != 0 && !ret);
|
||||
finish_wait(wq, &q.wait);
|
||||
return ret;
|
||||
|
@ -1466,11 +1466,11 @@ static bool pnfs_within_mdsthreshold(struct nfs_open_context *ctx,
|
||||
}
|
||||
|
||||
/* stop waiting if someone clears NFS_LAYOUT_RETRY_LAYOUTGET bit. */
|
||||
static int pnfs_layoutget_retry_bit_wait(struct wait_bit_key *key)
|
||||
static int pnfs_layoutget_retry_bit_wait(struct wait_bit_key *key, int mode)
|
||||
{
|
||||
if (!test_bit(NFS_LAYOUT_RETRY_LAYOUTGET, key->flags))
|
||||
return 1;
|
||||
return nfs_wait_bit_killable(key);
|
||||
return nfs_wait_bit_killable(key, mode);
|
||||
}
|
||||
|
||||
static bool pnfs_prepare_to_retry_layoutget(struct pnfs_layout_hdr *lo)
|
||||
|
@ -145,7 +145,7 @@ __remove_wait_queue(wait_queue_head_t *head, wait_queue_t *old)
|
||||
list_del(&old->task_list);
|
||||
}
|
||||
|
||||
typedef int wait_bit_action_f(struct wait_bit_key *);
|
||||
typedef int wait_bit_action_f(struct wait_bit_key *, int mode);
|
||||
void __wake_up(wait_queue_head_t *q, unsigned int mode, int nr, void *key);
|
||||
void __wake_up_locked_key(wait_queue_head_t *q, unsigned int mode, void *key);
|
||||
void __wake_up_sync_key(wait_queue_head_t *q, unsigned int mode, int nr, void *key);
|
||||
@ -960,10 +960,10 @@ int wake_bit_function(wait_queue_t *wait, unsigned mode, int sync, void *key);
|
||||
} while (0)
|
||||
|
||||
|
||||
extern int bit_wait(struct wait_bit_key *);
|
||||
extern int bit_wait_io(struct wait_bit_key *);
|
||||
extern int bit_wait_timeout(struct wait_bit_key *);
|
||||
extern int bit_wait_io_timeout(struct wait_bit_key *);
|
||||
extern int bit_wait(struct wait_bit_key *, int);
|
||||
extern int bit_wait_io(struct wait_bit_key *, int);
|
||||
extern int bit_wait_timeout(struct wait_bit_key *, int);
|
||||
extern int bit_wait_io_timeout(struct wait_bit_key *, int);
|
||||
|
||||
/**
|
||||
* wait_on_bit - wait for a bit to be cleared
|
||||
|
@ -392,7 +392,7 @@ __wait_on_bit(wait_queue_head_t *wq, struct wait_bit_queue *q,
|
||||
do {
|
||||
prepare_to_wait(wq, &q->wait, mode);
|
||||
if (test_bit(q->key.bit_nr, q->key.flags))
|
||||
ret = (*action)(&q->key);
|
||||
ret = (*action)(&q->key, mode);
|
||||
} while (test_bit(q->key.bit_nr, q->key.flags) && !ret);
|
||||
finish_wait(wq, &q->wait);
|
||||
return ret;
|
||||
@ -431,7 +431,7 @@ __wait_on_bit_lock(wait_queue_head_t *wq, struct wait_bit_queue *q,
|
||||
prepare_to_wait_exclusive(wq, &q->wait, mode);
|
||||
if (!test_bit(q->key.bit_nr, q->key.flags))
|
||||
continue;
|
||||
ret = action(&q->key);
|
||||
ret = action(&q->key, mode);
|
||||
if (!ret)
|
||||
continue;
|
||||
abort_exclusive_wait(wq, &q->wait, mode, &q->key);
|
||||
@ -581,43 +581,43 @@ void wake_up_atomic_t(atomic_t *p)
|
||||
}
|
||||
EXPORT_SYMBOL(wake_up_atomic_t);
|
||||
|
||||
__sched int bit_wait(struct wait_bit_key *word)
|
||||
__sched int bit_wait(struct wait_bit_key *word, int mode)
|
||||
{
|
||||
schedule();
|
||||
if (signal_pending(current))
|
||||
if (signal_pending_state(mode, current))
|
||||
return -EINTR;
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(bit_wait);
|
||||
|
||||
__sched int bit_wait_io(struct wait_bit_key *word)
|
||||
__sched int bit_wait_io(struct wait_bit_key *word, int mode)
|
||||
{
|
||||
io_schedule();
|
||||
if (signal_pending(current))
|
||||
if (signal_pending_state(mode, current))
|
||||
return -EINTR;
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(bit_wait_io);
|
||||
|
||||
__sched int bit_wait_timeout(struct wait_bit_key *word)
|
||||
__sched int bit_wait_timeout(struct wait_bit_key *word, int mode)
|
||||
{
|
||||
unsigned long now = READ_ONCE(jiffies);
|
||||
if (time_after_eq(now, word->timeout))
|
||||
return -EAGAIN;
|
||||
schedule_timeout(word->timeout - now);
|
||||
if (signal_pending(current))
|
||||
if (signal_pending_state(mode, current))
|
||||
return -EINTR;
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(bit_wait_timeout);
|
||||
|
||||
__sched int bit_wait_io_timeout(struct wait_bit_key *word)
|
||||
__sched int bit_wait_io_timeout(struct wait_bit_key *word, int mode)
|
||||
{
|
||||
unsigned long now = READ_ONCE(jiffies);
|
||||
if (time_after_eq(now, word->timeout))
|
||||
return -EAGAIN;
|
||||
io_schedule_timeout(word->timeout - now);
|
||||
if (signal_pending(current))
|
||||
if (signal_pending_state(mode, current))
|
||||
return -EINTR;
|
||||
return 0;
|
||||
}
|
||||
|
@ -250,11 +250,11 @@ void rpc_destroy_wait_queue(struct rpc_wait_queue *queue)
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(rpc_destroy_wait_queue);
|
||||
|
||||
static int rpc_wait_bit_killable(struct wait_bit_key *key)
|
||||
static int rpc_wait_bit_killable(struct wait_bit_key *key, int mode)
|
||||
{
|
||||
if (fatal_signal_pending(current))
|
||||
return -ERESTARTSYS;
|
||||
freezable_schedule_unsafe();
|
||||
if (signal_pending_state(mode, current))
|
||||
return -ERESTARTSYS;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user