Merge branch 'sctp'

Wang Weidong says:

====================
sctp: remove some macro locking wrappers

In sctp.h we can find some macro locking wrappers. As Neil point out that:

"Its because in the origional implementation of the sctp protocol, there was a
user space test harness which built the kernel module for userspace execution to
cary our some unit testing on the code.  It did so by redefining some of those
locking macros to user space friendly code.  IIRC we haven't use those unit
tests in years, and so should be removing them, not adding them to other
locations."

So I remove them.
====================

Acked-by: Neil Horman <nhorman@tuxdriver.com>
Acked-by: Vlad Yasevich <vyasevich@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
David S. Miller 2014-01-21 18:41:46 -08:00
commit 656edac678
8 changed files with 100 additions and 119 deletions

View File

@ -713,11 +713,11 @@ static void process_sctp_notification(struct connection *con,
return;
/* Peel off a new sock */
sctp_lock_sock(con->sock->sk);
lock_sock(con->sock->sk);
ret = sctp_do_peeloff(con->sock->sk,
sn->sn_assoc_change.sac_assoc_id,
&new_con->sock);
sctp_release_sock(con->sock->sk);
release_sock(con->sock->sk);
if (ret < 0) {
log_print("Can't peel off a socket for "
"connection %d to node %d: err=%d",

View File

@ -170,25 +170,6 @@ extern struct kmem_cache *sctp_bucket_cachep __read_mostly;
* Section: Macros, externs, and inlines
*/
/* spin lock wrappers. */
#define sctp_spin_lock_irqsave(lock, flags) spin_lock_irqsave(lock, flags)
#define sctp_spin_unlock_irqrestore(lock, flags) \
spin_unlock_irqrestore(lock, flags)
#define sctp_local_bh_disable() local_bh_disable()
#define sctp_local_bh_enable() local_bh_enable()
#define sctp_spin_lock(lock) spin_lock(lock)
#define sctp_spin_unlock(lock) spin_unlock(lock)
#define sctp_write_lock(lock) write_lock(lock)
#define sctp_write_unlock(lock) write_unlock(lock)
#define sctp_read_lock(lock) read_lock(lock)
#define sctp_read_unlock(lock) read_unlock(lock)
/* sock lock wrappers. */
#define sctp_lock_sock(sk) lock_sock(sk)
#define sctp_release_sock(sk) release_sock(sk)
#define sctp_bh_lock_sock(sk) bh_lock_sock(sk)
#define sctp_bh_unlock_sock(sk) bh_unlock_sock(sk)
/* SCTP SNMP MIB stats handlers */
#define SCTP_INC_STATS(net, field) SNMP_INC_STATS((net)->sctp.sctp_statistics, field)
#define SCTP_INC_STATS_BH(net, field) SNMP_INC_STATS_BH((net)->sctp.sctp_statistics, field)
@ -353,13 +334,13 @@ static inline void sctp_skb_list_tail(struct sk_buff_head *list,
{
unsigned long flags;
sctp_spin_lock_irqsave(&head->lock, flags);
sctp_spin_lock(&list->lock);
spin_lock_irqsave(&head->lock, flags);
spin_lock(&list->lock);
skb_queue_splice_tail_init(list, head);
sctp_spin_unlock(&list->lock);
sctp_spin_unlock_irqrestore(&head->lock, flags);
spin_unlock(&list->lock);
spin_unlock_irqrestore(&head->lock, flags);
}
/**

View File

@ -368,9 +368,9 @@ struct sctp_association *sctp_endpoint_lookup_assoc(
{
struct sctp_association *asoc;
sctp_local_bh_disable();
local_bh_disable();
asoc = __sctp_endpoint_lookup_assoc(ep, paddr, transport);
sctp_local_bh_enable();
local_bh_enable();
return asoc;
}

View File

@ -238,7 +238,7 @@ int sctp_rcv(struct sk_buff *skb)
* bottom halves on this lock, but a user may be in the lock too,
* so check if it is busy.
*/
sctp_bh_lock_sock(sk);
bh_lock_sock(sk);
if (sk != rcvr->sk) {
/* Our cached sk is different from the rcvr->sk. This is
@ -248,14 +248,14 @@ int sctp_rcv(struct sk_buff *skb)
* be doing something with the new socket. Switch our veiw
* of the current sk.
*/
sctp_bh_unlock_sock(sk);
bh_unlock_sock(sk);
sk = rcvr->sk;
sctp_bh_lock_sock(sk);
bh_lock_sock(sk);
}
if (sock_owned_by_user(sk)) {
if (sctp_add_backlog(sk, skb)) {
sctp_bh_unlock_sock(sk);
bh_unlock_sock(sk);
sctp_chunk_free(chunk);
skb = NULL; /* sctp_chunk_free already freed the skb */
goto discard_release;
@ -266,7 +266,7 @@ int sctp_rcv(struct sk_buff *skb)
sctp_inq_push(&chunk->rcvr->inqueue, chunk);
}
sctp_bh_unlock_sock(sk);
bh_unlock_sock(sk);
/* Release the asoc/ep ref we took in the lookup calls. */
if (asoc)
@ -327,7 +327,7 @@ int sctp_backlog_rcv(struct sock *sk, struct sk_buff *skb)
*/
sk = rcvr->sk;
sctp_bh_lock_sock(sk);
bh_lock_sock(sk);
if (sock_owned_by_user(sk)) {
if (sk_add_backlog(sk, skb, sk->sk_rcvbuf))
@ -337,7 +337,7 @@ int sctp_backlog_rcv(struct sock *sk, struct sk_buff *skb)
} else
sctp_inq_push(inqueue, chunk);
sctp_bh_unlock_sock(sk);
bh_unlock_sock(sk);
/* If the chunk was backloged again, don't drop refs */
if (backloged)
@ -522,7 +522,7 @@ struct sock *sctp_err_lookup(struct net *net, int family, struct sk_buff *skb,
goto out;
}
sctp_bh_lock_sock(sk);
bh_lock_sock(sk);
/* If too many ICMPs get dropped on busy
* servers this needs to be solved differently.
@ -542,7 +542,7 @@ out:
/* Common cleanup code for icmp/icmpv6 error handler. */
void sctp_err_finish(struct sock *sk, struct sctp_association *asoc)
{
sctp_bh_unlock_sock(sk);
bh_unlock_sock(sk);
sctp_association_put(asoc);
}
@ -718,17 +718,17 @@ static void __sctp_hash_endpoint(struct sctp_endpoint *ep)
epb->hashent = sctp_ep_hashfn(net, epb->bind_addr.port);
head = &sctp_ep_hashtable[epb->hashent];
sctp_write_lock(&head->lock);
write_lock(&head->lock);
hlist_add_head(&epb->node, &head->chain);
sctp_write_unlock(&head->lock);
write_unlock(&head->lock);
}
/* Add an endpoint to the hash. Local BH-safe. */
void sctp_hash_endpoint(struct sctp_endpoint *ep)
{
sctp_local_bh_disable();
local_bh_disable();
__sctp_hash_endpoint(ep);
sctp_local_bh_enable();
local_bh_enable();
}
/* Remove endpoint from the hash table. */
@ -744,17 +744,17 @@ static void __sctp_unhash_endpoint(struct sctp_endpoint *ep)
head = &sctp_ep_hashtable[epb->hashent];
sctp_write_lock(&head->lock);
write_lock(&head->lock);
hlist_del_init(&epb->node);
sctp_write_unlock(&head->lock);
write_unlock(&head->lock);
}
/* Remove endpoint from the hash. Local BH-safe. */
void sctp_unhash_endpoint(struct sctp_endpoint *ep)
{
sctp_local_bh_disable();
local_bh_disable();
__sctp_unhash_endpoint(ep);
sctp_local_bh_enable();
local_bh_enable();
}
/* Look up an endpoint. */
@ -798,9 +798,9 @@ static void __sctp_hash_established(struct sctp_association *asoc)
head = &sctp_assoc_hashtable[epb->hashent];
sctp_write_lock(&head->lock);
write_lock(&head->lock);
hlist_add_head(&epb->node, &head->chain);
sctp_write_unlock(&head->lock);
write_unlock(&head->lock);
}
/* Add an association to the hash. Local BH-safe. */
@ -809,9 +809,9 @@ void sctp_hash_established(struct sctp_association *asoc)
if (asoc->temp)
return;
sctp_local_bh_disable();
local_bh_disable();
__sctp_hash_established(asoc);
sctp_local_bh_enable();
local_bh_enable();
}
/* Remove association from the hash table. */
@ -828,9 +828,9 @@ static void __sctp_unhash_established(struct sctp_association *asoc)
head = &sctp_assoc_hashtable[epb->hashent];
sctp_write_lock(&head->lock);
write_lock(&head->lock);
hlist_del_init(&epb->node);
sctp_write_unlock(&head->lock);
write_unlock(&head->lock);
}
/* Remove association from the hash table. Local BH-safe. */
@ -839,9 +839,9 @@ void sctp_unhash_established(struct sctp_association *asoc)
if (asoc->temp)
return;
sctp_local_bh_disable();
local_bh_disable();
__sctp_unhash_established(asoc);
sctp_local_bh_enable();
local_bh_enable();
}
/* Look up an association. */
@ -891,9 +891,9 @@ struct sctp_association *sctp_lookup_association(struct net *net,
{
struct sctp_association *asoc;
sctp_local_bh_disable();
local_bh_disable();
asoc = __sctp_lookup_association(net, laddr, paddr, transportp);
sctp_local_bh_enable();
local_bh_enable();
return asoc;
}

View File

@ -218,7 +218,7 @@ static int sctp_eps_seq_show(struct seq_file *seq, void *v)
return -ENOMEM;
head = &sctp_ep_hashtable[hash];
sctp_local_bh_disable();
local_bh_disable();
read_lock(&head->lock);
sctp_for_each_hentry(epb, &head->chain) {
ep = sctp_ep(epb);
@ -235,7 +235,7 @@ static int sctp_eps_seq_show(struct seq_file *seq, void *v)
seq_printf(seq, "\n");
}
read_unlock(&head->lock);
sctp_local_bh_enable();
local_bh_enable();
return 0;
}
@ -326,7 +326,7 @@ static int sctp_assocs_seq_show(struct seq_file *seq, void *v)
return -ENOMEM;
head = &sctp_assoc_hashtable[hash];
sctp_local_bh_disable();
local_bh_disable();
read_lock(&head->lock);
sctp_for_each_hentry(epb, &head->chain) {
assoc = sctp_assoc(epb);
@ -362,7 +362,7 @@ static int sctp_assocs_seq_show(struct seq_file *seq, void *v)
seq_printf(seq, "\n");
}
read_unlock(&head->lock);
sctp_local_bh_enable();
local_bh_enable();
return 0;
}
@ -446,7 +446,7 @@ static int sctp_remaddr_seq_show(struct seq_file *seq, void *v)
return -ENOMEM;
head = &sctp_assoc_hashtable[hash];
sctp_local_bh_disable();
local_bh_disable();
read_lock(&head->lock);
rcu_read_lock();
sctp_for_each_hentry(epb, &head->chain) {
@ -505,7 +505,7 @@ static int sctp_remaddr_seq_show(struct seq_file *seq, void *v)
rcu_read_unlock();
read_unlock(&head->lock);
sctp_local_bh_enable();
local_bh_enable();
return 0;

View File

@ -634,10 +634,10 @@ static void sctp_addr_wq_timeout_handler(unsigned long arg)
/* ignore bound-specific endpoints */
if (!sctp_is_ep_boundall(sk))
continue;
sctp_bh_lock_sock(sk);
bh_lock_sock(sk);
if (sctp_asconf_mgmt(sp, addrw) < 0)
pr_debug("%s: sctp_asconf_mgmt failed\n", __func__);
sctp_bh_unlock_sock(sk);
bh_unlock_sock(sk);
}
#if IS_ENABLED(CONFIG_IPV6)
free_next:

View File

@ -248,7 +248,7 @@ void sctp_generate_t3_rtx_event(unsigned long peer)
/* Check whether a task is in the sock. */
sctp_bh_lock_sock(asoc->base.sk);
bh_lock_sock(asoc->base.sk);
if (sock_owned_by_user(asoc->base.sk)) {
pr_debug("%s: sock is busy\n", __func__);
@ -275,7 +275,7 @@ void sctp_generate_t3_rtx_event(unsigned long peer)
asoc->base.sk->sk_err = -error;
out_unlock:
sctp_bh_unlock_sock(asoc->base.sk);
bh_unlock_sock(asoc->base.sk);
sctp_transport_put(transport);
}
@ -288,7 +288,7 @@ static void sctp_generate_timeout_event(struct sctp_association *asoc,
struct net *net = sock_net(asoc->base.sk);
int error = 0;
sctp_bh_lock_sock(asoc->base.sk);
bh_lock_sock(asoc->base.sk);
if (sock_owned_by_user(asoc->base.sk)) {
pr_debug("%s: sock is busy: timer %d\n", __func__,
timeout_type);
@ -315,7 +315,7 @@ static void sctp_generate_timeout_event(struct sctp_association *asoc,
asoc->base.sk->sk_err = -error;
out_unlock:
sctp_bh_unlock_sock(asoc->base.sk);
bh_unlock_sock(asoc->base.sk);
sctp_association_put(asoc);
}
@ -367,7 +367,7 @@ void sctp_generate_heartbeat_event(unsigned long data)
struct sctp_association *asoc = transport->asoc;
struct net *net = sock_net(asoc->base.sk);
sctp_bh_lock_sock(asoc->base.sk);
bh_lock_sock(asoc->base.sk);
if (sock_owned_by_user(asoc->base.sk)) {
pr_debug("%s: sock is busy\n", __func__);
@ -392,7 +392,7 @@ void sctp_generate_heartbeat_event(unsigned long data)
asoc->base.sk->sk_err = -error;
out_unlock:
sctp_bh_unlock_sock(asoc->base.sk);
bh_unlock_sock(asoc->base.sk);
sctp_transport_put(transport);
}
@ -405,7 +405,7 @@ void sctp_generate_proto_unreach_event(unsigned long data)
struct sctp_association *asoc = transport->asoc;
struct net *net = sock_net(asoc->base.sk);
sctp_bh_lock_sock(asoc->base.sk);
bh_lock_sock(asoc->base.sk);
if (sock_owned_by_user(asoc->base.sk)) {
pr_debug("%s: sock is busy\n", __func__);
@ -427,7 +427,7 @@ void sctp_generate_proto_unreach_event(unsigned long data)
asoc->state, asoc->ep, asoc, transport, GFP_ATOMIC);
out_unlock:
sctp_bh_unlock_sock(asoc->base.sk);
bh_unlock_sock(asoc->base.sk);
sctp_association_put(asoc);
}

View File

@ -272,7 +272,7 @@ static int sctp_bind(struct sock *sk, struct sockaddr *addr, int addr_len)
{
int retval = 0;
sctp_lock_sock(sk);
lock_sock(sk);
pr_debug("%s: sk:%p, addr:%p, addr_len:%d\n", __func__, sk,
addr, addr_len);
@ -284,7 +284,7 @@ static int sctp_bind(struct sock *sk, struct sockaddr *addr, int addr_len)
else
retval = -EINVAL;
sctp_release_sock(sk);
release_sock(sk);
return retval;
}
@ -1461,7 +1461,7 @@ static void sctp_close(struct sock *sk, long timeout)
pr_debug("%s: sk:%p, timeout:%ld\n", __func__, sk, timeout);
sctp_lock_sock(sk);
lock_sock(sk);
sk->sk_shutdown = SHUTDOWN_MASK;
sk->sk_state = SCTP_SS_CLOSING;
@ -1505,13 +1505,13 @@ static void sctp_close(struct sock *sk, long timeout)
sctp_wait_for_close(sk, timeout);
/* This will run the backlog queue. */
sctp_release_sock(sk);
release_sock(sk);
/* Supposedly, no process has access to the socket, but
* the net layers still may.
*/
sctp_local_bh_disable();
sctp_bh_lock_sock(sk);
local_bh_disable();
bh_lock_sock(sk);
/* Hold the sock, since sk_common_release() will put sock_put()
* and we have just a little more cleanup.
@ -1519,8 +1519,8 @@ static void sctp_close(struct sock *sk, long timeout)
sock_hold(sk);
sk_common_release(sk);
sctp_bh_unlock_sock(sk);
sctp_local_bh_enable();
bh_unlock_sock(sk);
local_bh_enable();
sock_put(sk);
@ -1665,7 +1665,7 @@ static int sctp_sendmsg(struct kiocb *iocb, struct sock *sk,
pr_debug("%s: about to look up association\n", __func__);
sctp_lock_sock(sk);
lock_sock(sk);
/* If a msg_name has been specified, assume this is to be used. */
if (msg_name) {
@ -1949,7 +1949,7 @@ out_free:
sctp_association_free(asoc);
}
out_unlock:
sctp_release_sock(sk);
release_sock(sk);
out_nounlock:
return sctp_error(sk, msg_flags, err);
@ -2035,7 +2035,7 @@ static int sctp_recvmsg(struct kiocb *iocb, struct sock *sk,
"addr_len:%p)\n", __func__, sk, msg, len, noblock, flags,
addr_len);
sctp_lock_sock(sk);
lock_sock(sk);
if (sctp_style(sk, TCP) && !sctp_sstate(sk, ESTABLISHED)) {
err = -ENOTCONN;
@ -2119,7 +2119,7 @@ out_free:
sctp_ulpevent_free(event);
}
out:
sctp_release_sock(sk);
release_sock(sk);
return err;
}
@ -3590,7 +3590,7 @@ static int sctp_setsockopt(struct sock *sk, int level, int optname,
goto out_nounlock;
}
sctp_lock_sock(sk);
lock_sock(sk);
switch (optname) {
case SCTP_SOCKOPT_BINDX_ADD:
@ -3708,7 +3708,7 @@ static int sctp_setsockopt(struct sock *sk, int level, int optname,
break;
}
sctp_release_sock(sk);
release_sock(sk);
out_nounlock:
return retval;
@ -3736,7 +3736,7 @@ static int sctp_connect(struct sock *sk, struct sockaddr *addr,
int err = 0;
struct sctp_af *af;
sctp_lock_sock(sk);
lock_sock(sk);
pr_debug("%s: sk:%p, sockaddr:%p, addr_len:%d\n", __func__, sk,
addr, addr_len);
@ -3752,7 +3752,7 @@ static int sctp_connect(struct sock *sk, struct sockaddr *addr,
err = __sctp_connect(sk, addr, af->sockaddr_len, NULL);
}
sctp_release_sock(sk);
release_sock(sk);
return err;
}
@ -3778,7 +3778,7 @@ static struct sock *sctp_accept(struct sock *sk, int flags, int *err)
long timeo;
int error = 0;
sctp_lock_sock(sk);
lock_sock(sk);
sp = sctp_sk(sk);
ep = sp->ep;
@ -3816,7 +3816,7 @@ static struct sock *sctp_accept(struct sock *sk, int flags, int *err)
sctp_sock_migrate(sk, newsk, asoc, SCTP_SOCKET_TCP);
out:
sctp_release_sock(sk);
release_sock(sk);
*err = error;
return newsk;
}
@ -3826,7 +3826,7 @@ static int sctp_ioctl(struct sock *sk, int cmd, unsigned long arg)
{
int rc = -ENOTCONN;
sctp_lock_sock(sk);
lock_sock(sk);
/*
* SEQPACKET-style sockets in LISTENING state are valid, for
@ -3856,7 +3856,7 @@ static int sctp_ioctl(struct sock *sk, int cmd, unsigned long arg)
break;
}
out:
sctp_release_sock(sk);
release_sock(sk);
return rc;
}
@ -5754,7 +5754,7 @@ static int sctp_getsockopt(struct sock *sk, int level, int optname,
if (get_user(len, optlen))
return -EFAULT;
sctp_lock_sock(sk);
lock_sock(sk);
switch (optname) {
case SCTP_STATUS:
@ -5878,7 +5878,7 @@ static int sctp_getsockopt(struct sock *sk, int level, int optname,
break;
}
sctp_release_sock(sk);
release_sock(sk);
return retval;
}
@ -5918,7 +5918,7 @@ static long sctp_get_port_local(struct sock *sk, union sctp_addr *addr)
pr_debug("%s: begins, snum:%d\n", __func__, snum);
sctp_local_bh_disable();
local_bh_disable();
if (snum == 0) {
/* Search for an available port. */
@ -5937,14 +5937,14 @@ static long sctp_get_port_local(struct sock *sk, union sctp_addr *addr)
continue;
index = sctp_phashfn(sock_net(sk), rover);
head = &sctp_port_hashtable[index];
sctp_spin_lock(&head->lock);
spin_lock(&head->lock);
sctp_for_each_hentry(pp, &head->chain)
if ((pp->port == rover) &&
net_eq(sock_net(sk), pp->net))
goto next;
break;
next:
sctp_spin_unlock(&head->lock);
spin_unlock(&head->lock);
} while (--remaining > 0);
/* Exhausted local port range during search? */
@ -5965,7 +5965,7 @@ static long sctp_get_port_local(struct sock *sk, union sctp_addr *addr)
* port iterator, pp being NULL.
*/
head = &sctp_port_hashtable[sctp_phashfn(sock_net(sk), snum)];
sctp_spin_lock(&head->lock);
spin_lock(&head->lock);
sctp_for_each_hentry(pp, &head->chain) {
if ((pp->port == snum) && net_eq(pp->net, sock_net(sk)))
goto pp_found;
@ -6049,10 +6049,10 @@ success:
ret = 0;
fail_unlock:
sctp_spin_unlock(&head->lock);
spin_unlock(&head->lock);
fail:
sctp_local_bh_enable();
local_bh_enable();
return ret;
}
@ -6144,7 +6144,7 @@ int sctp_inet_listen(struct socket *sock, int backlog)
if (unlikely(backlog < 0))
return err;
sctp_lock_sock(sk);
lock_sock(sk);
/* Peeled-off sockets are not allowed to listen(). */
if (sctp_style(sk, UDP_HIGH_BANDWIDTH))
@ -6177,7 +6177,7 @@ int sctp_inet_listen(struct socket *sock, int backlog)
err = 0;
out:
sctp_release_sock(sk);
release_sock(sk);
return err;
}
@ -6286,20 +6286,20 @@ static inline void __sctp_put_port(struct sock *sk)
inet_sk(sk)->inet_num)];
struct sctp_bind_bucket *pp;
sctp_spin_lock(&head->lock);
spin_lock(&head->lock);
pp = sctp_sk(sk)->bind_hash;
__sk_del_bind_node(sk);
sctp_sk(sk)->bind_hash = NULL;
inet_sk(sk)->inet_num = 0;
sctp_bucket_destroy(pp);
sctp_spin_unlock(&head->lock);
spin_unlock(&head->lock);
}
void sctp_put_port(struct sock *sk)
{
sctp_local_bh_disable();
local_bh_disable();
__sctp_put_port(sk);
sctp_local_bh_enable();
local_bh_enable();
}
/*
@ -6474,9 +6474,9 @@ static int sctp_wait_for_packet(struct sock *sk, int *err, long *timeo_p)
* does not fit in the user's buffer, but this seems to be the
* only way to honor MSG_DONTWAIT realistically.
*/
sctp_release_sock(sk);
release_sock(sk);
*timeo_p = schedule_timeout(*timeo_p);
sctp_lock_sock(sk);
lock_sock(sk);
ready:
finish_wait(sk_sleep(sk), &wait);
@ -6659,10 +6659,10 @@ static int sctp_wait_for_sndbuf(struct sctp_association *asoc, long *timeo_p,
/* Let another process have a go. Since we are going
* to sleep anyway.
*/
sctp_release_sock(sk);
release_sock(sk);
current_timeo = schedule_timeout(current_timeo);
BUG_ON(sk != asoc->base.sk);
sctp_lock_sock(sk);
lock_sock(sk);
*timeo_p = current_timeo;
}
@ -6767,9 +6767,9 @@ static int sctp_wait_for_connect(struct sctp_association *asoc, long *timeo_p)
/* Let another process have a go. Since we are going
* to sleep anyway.
*/
sctp_release_sock(sk);
release_sock(sk);
current_timeo = schedule_timeout(current_timeo);
sctp_lock_sock(sk);
lock_sock(sk);
*timeo_p = current_timeo;
}
@ -6812,9 +6812,9 @@ static int sctp_wait_for_accept(struct sock *sk, long timeo)
TASK_INTERRUPTIBLE);
if (list_empty(&ep->asocs)) {
sctp_release_sock(sk);
release_sock(sk);
timeo = schedule_timeout(timeo);
sctp_lock_sock(sk);
lock_sock(sk);
}
err = -EINVAL;
@ -6847,9 +6847,9 @@ static void sctp_wait_for_close(struct sock *sk, long timeout)
prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
if (list_empty(&sctp_sk(sk)->ep->asocs))
break;
sctp_release_sock(sk);
release_sock(sk);
timeout = schedule_timeout(timeout);
sctp_lock_sock(sk);
lock_sock(sk);
} while (!signal_pending(current) && timeout);
finish_wait(sk_sleep(sk), &wait);
@ -6950,14 +6950,14 @@ static void sctp_sock_migrate(struct sock *oldsk, struct sock *newsk,
/* Hook this new socket in to the bind_hash list. */
head = &sctp_port_hashtable[sctp_phashfn(sock_net(oldsk),
inet_sk(oldsk)->inet_num)];
sctp_local_bh_disable();
sctp_spin_lock(&head->lock);
local_bh_disable();
spin_lock(&head->lock);
pp = sctp_sk(oldsk)->bind_hash;
sk_add_bind_node(newsk, &pp->owner);
sctp_sk(newsk)->bind_hash = pp;
inet_sk(newsk)->inet_num = inet_sk(oldsk)->inet_num;
sctp_spin_unlock(&head->lock);
sctp_local_bh_enable();
spin_unlock(&head->lock);
local_bh_enable();
/* Copy the bind_addr list from the original endpoint to the new
* endpoint so that we can handle restarts properly
@ -7046,7 +7046,7 @@ static void sctp_sock_migrate(struct sock *oldsk, struct sock *newsk,
newsk->sk_shutdown |= RCV_SHUTDOWN;
newsk->sk_state = SCTP_SS_ESTABLISHED;
sctp_release_sock(newsk);
release_sock(newsk);
}