forked from Minki/linux
fasync: Fix deadlock between task-context and interrupt-context kill_fasync()
I observed the following deadlock between them: [task 1] [task 2] [task 3] kill_fasync() mm_update_next_owner() copy_process() spin_lock_irqsave(&fa->fa_lock) read_lock(&tasklist_lock) write_lock_irq(&tasklist_lock) send_sigio() <IRQ> ... read_lock(&fown->lock) kill_fasync() ... read_lock(&tasklist_lock) spin_lock_irqsave(&fa->fa_lock) ... Task 1 can't acquire read locked tasklist_lock, since there is already task 3 expressed its wish to take the lock exclusive. Task 2 holds the read locked lock, but it can't take the spin lock. Also, there is possible another deadlock (which I haven't observed): [task 1] [task 2] f_getown() kill_fasync() read_lock(&f_own->lock) spin_lock_irqsave(&fa->fa_lock,) <IRQ> send_sigio() write_lock_irq(&f_own->lock) kill_fasync() read_lock(&fown->lock) spin_lock_irqsave(&fa->fa_lock,) Actually, we do not need exclusive fa->fa_lock in kill_fasync_rcu(), as it guarantees fa->fa_file->f_owner integrity only. It may seem, that it used to give a task a small possibility to receive two sequential signals, if there are two parallel kill_fasync() callers, and task handles the first signal fastly, but the behaviour won't become different, since there is exclusive sighand lock in do_send_sig_info(). The patch converts fa_lock into rwlock_t, and this fixes two above deadlocks, as rwlock is allowed to be taken from interrupt handler by qrwlock design. Signed-off-by: Kirill Tkhai <ktkhai@virtuozzo.com> Signed-off-by: Jeff Layton <jlayton@redhat.com>
This commit is contained in:
parent
fff75eb2a0
commit
7a107c0f55
15
fs/fcntl.c
15
fs/fcntl.c
@ -871,9 +871,9 @@ int fasync_remove_entry(struct file *filp, struct fasync_struct **fapp)
|
||||
if (fa->fa_file != filp)
|
||||
continue;
|
||||
|
||||
spin_lock_irq(&fa->fa_lock);
|
||||
write_lock_irq(&fa->fa_lock);
|
||||
fa->fa_file = NULL;
|
||||
spin_unlock_irq(&fa->fa_lock);
|
||||
write_unlock_irq(&fa->fa_lock);
|
||||
|
||||
*fp = fa->fa_next;
|
||||
call_rcu(&fa->fa_rcu, fasync_free_rcu);
|
||||
@ -918,13 +918,13 @@ struct fasync_struct *fasync_insert_entry(int fd, struct file *filp, struct fasy
|
||||
if (fa->fa_file != filp)
|
||||
continue;
|
||||
|
||||
spin_lock_irq(&fa->fa_lock);
|
||||
write_lock_irq(&fa->fa_lock);
|
||||
fa->fa_fd = fd;
|
||||
spin_unlock_irq(&fa->fa_lock);
|
||||
write_unlock_irq(&fa->fa_lock);
|
||||
goto out;
|
||||
}
|
||||
|
||||
spin_lock_init(&new->fa_lock);
|
||||
rwlock_init(&new->fa_lock);
|
||||
new->magic = FASYNC_MAGIC;
|
||||
new->fa_file = filp;
|
||||
new->fa_fd = fd;
|
||||
@ -987,14 +987,13 @@ static void kill_fasync_rcu(struct fasync_struct *fa, int sig, int band)
|
||||
{
|
||||
while (fa) {
|
||||
struct fown_struct *fown;
|
||||
unsigned long flags;
|
||||
|
||||
if (fa->magic != FASYNC_MAGIC) {
|
||||
printk(KERN_ERR "kill_fasync: bad magic number in "
|
||||
"fasync_struct!\n");
|
||||
return;
|
||||
}
|
||||
spin_lock_irqsave(&fa->fa_lock, flags);
|
||||
read_lock(&fa->fa_lock);
|
||||
if (fa->fa_file) {
|
||||
fown = &fa->fa_file->f_owner;
|
||||
/* Don't send SIGURG to processes which have not set a
|
||||
@ -1003,7 +1002,7 @@ static void kill_fasync_rcu(struct fasync_struct *fa, int sig, int band)
|
||||
if (!(sig == SIGURG && fown->signum == 0))
|
||||
send_sigio(fown, fa->fa_fd, band);
|
||||
}
|
||||
spin_unlock_irqrestore(&fa->fa_lock, flags);
|
||||
read_unlock(&fa->fa_lock);
|
||||
fa = rcu_dereference(fa->fa_next);
|
||||
}
|
||||
}
|
||||
|
@ -1250,7 +1250,7 @@ static inline int locks_lock_file_wait(struct file *filp, struct file_lock *fl)
|
||||
}
|
||||
|
||||
struct fasync_struct {
|
||||
spinlock_t fa_lock;
|
||||
rwlock_t fa_lock;
|
||||
int magic;
|
||||
int fa_fd;
|
||||
struct fasync_struct *fa_next; /* singly linked list */
|
||||
|
Loading…
Reference in New Issue
Block a user