forked from Minki/linux
ep_insert(): don't open-code ep_remove() on failure exits
Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
This commit is contained in:
parent
57804b1cc4
commit
e3e096e7fc
@ -1384,12 +1384,16 @@ static int ep_insert(struct eventpoll *ep, const struct epoll_event *event,
|
||||
epi->next = EP_UNACTIVE_PTR;
|
||||
if (epi->event.events & EPOLLWAKEUP) {
|
||||
error = ep_create_wakeup_source(epi);
|
||||
if (error)
|
||||
goto error_create_wakeup_source;
|
||||
if (error) {
|
||||
kmem_cache_free(epi_cache, epi);
|
||||
return error;
|
||||
}
|
||||
} else {
|
||||
RCU_INIT_POINTER(epi->ws, NULL);
|
||||
}
|
||||
|
||||
atomic_long_inc(&ep->user->epoll_watches);
|
||||
|
||||
/* Add the current item to the list of active epoll hook for this file */
|
||||
spin_lock(&tfile->f_lock);
|
||||
list_add_tail_rcu(&epi->fllink, &tfile->f_ep_links);
|
||||
@ -1402,9 +1406,10 @@ static int ep_insert(struct eventpoll *ep, const struct epoll_event *event,
|
||||
ep_rbtree_insert(ep, epi);
|
||||
|
||||
/* now check if we've created too many backpaths */
|
||||
error = -EINVAL;
|
||||
if (full_check && reverse_path_check())
|
||||
goto error_remove_epi;
|
||||
if (unlikely(full_check && reverse_path_check())) {
|
||||
ep_remove(ep, epi);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* Initialize the poll table using the queue callback */
|
||||
epq.epi = epi;
|
||||
@ -1424,9 +1429,10 @@ static int ep_insert(struct eventpoll *ep, const struct epoll_event *event,
|
||||
* install process. Namely an allocation for a wait queue failed due
|
||||
* high memory pressure.
|
||||
*/
|
||||
error = -ENOMEM;
|
||||
if (!epq.epi)
|
||||
goto error_unregister;
|
||||
if (unlikely(!epq.epi)) {
|
||||
ep_remove(ep, epi);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
/* We have to drop the new item inside our item list to keep track of it */
|
||||
write_lock_irq(&ep->lock);
|
||||
@ -1448,40 +1454,11 @@ static int ep_insert(struct eventpoll *ep, const struct epoll_event *event,
|
||||
|
||||
write_unlock_irq(&ep->lock);
|
||||
|
||||
atomic_long_inc(&ep->user->epoll_watches);
|
||||
|
||||
/* We have to call this outside the lock */
|
||||
if (pwake)
|
||||
ep_poll_safewake(ep, NULL);
|
||||
|
||||
return 0;
|
||||
|
||||
error_unregister:
|
||||
ep_unregister_pollwait(ep, epi);
|
||||
error_remove_epi:
|
||||
spin_lock(&tfile->f_lock);
|
||||
list_del_rcu(&epi->fllink);
|
||||
spin_unlock(&tfile->f_lock);
|
||||
|
||||
rb_erase_cached(&epi->rbn, &ep->rbr);
|
||||
|
||||
/*
|
||||
* We need to do this because an event could have been arrived on some
|
||||
* allocated wait queue. Note that we don't care about the ep->ovflist
|
||||
* list, since that is used/cleaned only inside a section bound by "mtx".
|
||||
* And ep_insert() is called with "mtx" held.
|
||||
*/
|
||||
write_lock_irq(&ep->lock);
|
||||
if (ep_is_linked(epi))
|
||||
list_del_init(&epi->rdllink);
|
||||
write_unlock_irq(&ep->lock);
|
||||
|
||||
wakeup_source_unregister(ep_wakeup_source(epi));
|
||||
|
||||
error_create_wakeup_source:
|
||||
kmem_cache_free(epi_cache, epi);
|
||||
|
||||
return error;
|
||||
}
|
||||
|
||||
/*
|
||||
|
Loading…
Reference in New Issue
Block a user