mirror of
https://github.com/torvalds/linux.git
synced 2024-11-11 06:31:49 +00:00
um: irq/sigio: Support suspend/resume handling of workaround IRQs
If the sigio workaround needed to be applied to a file descriptor, set_irq_wake() wouldn't work for it since it would get polled by the thread instead of causing SIGIO, and thus could never really cause a wakeup, since the thread notification FD wasn't marked as being able to wake up the system. Fix this by marking the thread's notification FD explicitly as a wake source FD, i.e. not suppressing SIGIO for it in suspend. In order to not cause spurious wakeups, we then need to remove all FDs that shouldn't wake up the system from the polling thread. In order to do this, add unlocked versions of ignore_sigio_fd() and add_sigio_fd() (nothing else is happening in suspend, so this is fine), and also modify ignore_sigio_fd() to return -ENOENT if the FD wasn't originally in there. This doesn't matter because nothing else currently checks the return value, but the irq code needs to know which ones to restore the workaround for. All told, this lets us use a timerfd for the RTC clock in the next patch, which doesn't send SIGIO. Signed-off-by: Johannes Berg <johannes.berg@intel.com> Signed-off-by: Richard Weinberger <richard@nod.at>
This commit is contained in:
parent
452f94cecf
commit
cae20ba0a1
@ -316,6 +316,14 @@ extern int add_sigio_fd(int fd);
|
||||
extern int ignore_sigio_fd(int fd);
|
||||
extern void maybe_sigio_broken(int fd);
|
||||
extern void sigio_broken(int fd);
|
||||
/*
|
||||
* unlocked versions for IRQ controller code.
|
||||
*
|
||||
* This is safe because it's used at suspend/resume and nothing
|
||||
* else is running.
|
||||
*/
|
||||
extern int __add_sigio_fd(int fd);
|
||||
extern int __ignore_sigio_fd(int fd);
|
||||
|
||||
/* prctl.c */
|
||||
extern int os_arch_prctl(int pid, int option, unsigned long *arg2);
|
||||
|
@ -45,6 +45,7 @@ struct irq_entry {
|
||||
int fd;
|
||||
struct irq_reg reg[NUM_IRQ_TYPES];
|
||||
bool suspended;
|
||||
bool sigio_workaround;
|
||||
};
|
||||
|
||||
static DEFINE_SPINLOCK(irq_lock);
|
||||
@ -392,7 +393,14 @@ void um_irqs_suspend(void)
|
||||
if (!entry->reg[t].events)
|
||||
continue;
|
||||
|
||||
if (entry->reg[t].wakeup) {
|
||||
/*
|
||||
* For the SIGIO_WRITE_IRQ, which is used to handle the
|
||||
* SIGIO workaround thread, we need special handling:
|
||||
* enable wake for it itself, but below we tell it about
|
||||
* any FDs that should be suspended.
|
||||
*/
|
||||
if (entry->reg[t].wakeup ||
|
||||
entry->reg[t].irq == SIGIO_WRITE_IRQ) {
|
||||
wake = true;
|
||||
break;
|
||||
}
|
||||
@ -401,6 +409,8 @@ void um_irqs_suspend(void)
|
||||
if (!wake) {
|
||||
entry->suspended = true;
|
||||
os_clear_fd_async(entry->fd);
|
||||
entry->sigio_workaround =
|
||||
!__ignore_sigio_fd(entry->fd);
|
||||
}
|
||||
}
|
||||
spin_unlock_irqrestore(&irq_lock, flags);
|
||||
@ -418,6 +428,11 @@ void um_irqs_resume(void)
|
||||
|
||||
WARN(err < 0, "os_set_fd_async returned %d\n", err);
|
||||
entry->suspended = false;
|
||||
|
||||
if (entry->sigio_workaround) {
|
||||
err = __add_sigio_fd(entry->fd);
|
||||
WARN(err < 0, "add_sigio_returned %d\n", err);
|
||||
}
|
||||
}
|
||||
}
|
||||
spin_unlock_irqrestore(&irq_lock, flags);
|
||||
|
@ -164,47 +164,55 @@ static void update_thread(void)
|
||||
set_signals_trace(flags);
|
||||
}
|
||||
|
||||
int add_sigio_fd(int fd)
|
||||
int __add_sigio_fd(int fd)
|
||||
{
|
||||
struct pollfd *p;
|
||||
int err, i, n;
|
||||
|
||||
sigio_lock();
|
||||
for (i = 0; i < all_sigio_fds.used; i++) {
|
||||
if (all_sigio_fds.poll[i].fd == fd)
|
||||
break;
|
||||
}
|
||||
if (i == all_sigio_fds.used) {
|
||||
err = -ENOSPC;
|
||||
goto out;
|
||||
}
|
||||
if (i == all_sigio_fds.used)
|
||||
return -ENOSPC;
|
||||
|
||||
p = &all_sigio_fds.poll[i];
|
||||
|
||||
for (i = 0; i < current_poll.used; i++) {
|
||||
if (current_poll.poll[i].fd == fd)
|
||||
goto out;
|
||||
return 0;
|
||||
}
|
||||
|
||||
n = current_poll.used;
|
||||
err = need_poll(&next_poll, n + 1);
|
||||
if (err)
|
||||
goto out;
|
||||
return err;
|
||||
|
||||
memcpy(next_poll.poll, current_poll.poll,
|
||||
current_poll.used * sizeof(struct pollfd));
|
||||
next_poll.poll[n] = *p;
|
||||
next_poll.used = n + 1;
|
||||
update_thread();
|
||||
out:
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
int add_sigio_fd(int fd)
|
||||
{
|
||||
int err;
|
||||
|
||||
sigio_lock();
|
||||
err = __add_sigio_fd(fd);
|
||||
sigio_unlock();
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
int ignore_sigio_fd(int fd)
|
||||
int __ignore_sigio_fd(int fd)
|
||||
{
|
||||
struct pollfd *p;
|
||||
int err = 0, i, n = 0;
|
||||
int err, i, n = 0;
|
||||
|
||||
/*
|
||||
* This is called from exitcalls elsewhere in UML - if
|
||||
@ -214,17 +222,16 @@ int ignore_sigio_fd(int fd)
|
||||
if (write_sigio_pid == -1)
|
||||
return -EIO;
|
||||
|
||||
sigio_lock();
|
||||
for (i = 0; i < current_poll.used; i++) {
|
||||
if (current_poll.poll[i].fd == fd)
|
||||
break;
|
||||
}
|
||||
if (i == current_poll.used)
|
||||
goto out;
|
||||
return -ENOENT;
|
||||
|
||||
err = need_poll(&next_poll, current_poll.used - 1);
|
||||
if (err)
|
||||
goto out;
|
||||
return err;
|
||||
|
||||
for (i = 0; i < current_poll.used; i++) {
|
||||
p = ¤t_poll.poll[i];
|
||||
@ -234,8 +241,18 @@ int ignore_sigio_fd(int fd)
|
||||
next_poll.used = current_poll.used - 1;
|
||||
|
||||
update_thread();
|
||||
out:
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int ignore_sigio_fd(int fd)
|
||||
{
|
||||
int err;
|
||||
|
||||
sigio_lock();
|
||||
err = __ignore_sigio_fd(fd);
|
||||
sigio_unlock();
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user