forked from Minki/linux
[AF_UNIX]: Use spinlock for unix_table_lock
This lock is actually taken mostly as a writer, so using a rwlock actually just makes performance worse especially on chips like the Intel P4. Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
parent
d83d8461f9
commit
fbe9cc4a87
@ -13,7 +13,7 @@ extern void unix_gc(void);
|
|||||||
#define UNIX_HASH_SIZE 256
|
#define UNIX_HASH_SIZE 256
|
||||||
|
|
||||||
extern struct hlist_head unix_socket_table[UNIX_HASH_SIZE + 1];
|
extern struct hlist_head unix_socket_table[UNIX_HASH_SIZE + 1];
|
||||||
extern rwlock_t unix_table_lock;
|
extern spinlock_t unix_table_lock;
|
||||||
|
|
||||||
extern atomic_t unix_tot_inflight;
|
extern atomic_t unix_tot_inflight;
|
||||||
|
|
||||||
|
@ -121,7 +121,7 @@
|
|||||||
int sysctl_unix_max_dgram_qlen = 10;
|
int sysctl_unix_max_dgram_qlen = 10;
|
||||||
|
|
||||||
struct hlist_head unix_socket_table[UNIX_HASH_SIZE + 1];
|
struct hlist_head unix_socket_table[UNIX_HASH_SIZE + 1];
|
||||||
DEFINE_RWLOCK(unix_table_lock);
|
DEFINE_SPINLOCK(unix_table_lock);
|
||||||
static atomic_t unix_nr_socks = ATOMIC_INIT(0);
|
static atomic_t unix_nr_socks = ATOMIC_INIT(0);
|
||||||
|
|
||||||
#define unix_sockets_unbound (&unix_socket_table[UNIX_HASH_SIZE])
|
#define unix_sockets_unbound (&unix_socket_table[UNIX_HASH_SIZE])
|
||||||
@ -130,7 +130,7 @@ static atomic_t unix_nr_socks = ATOMIC_INIT(0);
|
|||||||
|
|
||||||
/*
|
/*
|
||||||
* SMP locking strategy:
|
* SMP locking strategy:
|
||||||
* hash table is protected with rwlock unix_table_lock
|
* hash table is protected with spinlock unix_table_lock
|
||||||
* each socket state is protected by separate rwlock.
|
* each socket state is protected by separate rwlock.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
@ -214,16 +214,16 @@ static void __unix_insert_socket(struct hlist_head *list, struct sock *sk)
|
|||||||
|
|
||||||
static inline void unix_remove_socket(struct sock *sk)
|
static inline void unix_remove_socket(struct sock *sk)
|
||||||
{
|
{
|
||||||
write_lock(&unix_table_lock);
|
spin_lock(&unix_table_lock);
|
||||||
__unix_remove_socket(sk);
|
__unix_remove_socket(sk);
|
||||||
write_unlock(&unix_table_lock);
|
spin_unlock(&unix_table_lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void unix_insert_socket(struct hlist_head *list, struct sock *sk)
|
static inline void unix_insert_socket(struct hlist_head *list, struct sock *sk)
|
||||||
{
|
{
|
||||||
write_lock(&unix_table_lock);
|
spin_lock(&unix_table_lock);
|
||||||
__unix_insert_socket(list, sk);
|
__unix_insert_socket(list, sk);
|
||||||
write_unlock(&unix_table_lock);
|
spin_unlock(&unix_table_lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct sock *__unix_find_socket_byname(struct sockaddr_un *sunname,
|
static struct sock *__unix_find_socket_byname(struct sockaddr_un *sunname,
|
||||||
@ -250,11 +250,11 @@ static inline struct sock *unix_find_socket_byname(struct sockaddr_un *sunname,
|
|||||||
{
|
{
|
||||||
struct sock *s;
|
struct sock *s;
|
||||||
|
|
||||||
read_lock(&unix_table_lock);
|
spin_lock(&unix_table_lock);
|
||||||
s = __unix_find_socket_byname(sunname, len, type, hash);
|
s = __unix_find_socket_byname(sunname, len, type, hash);
|
||||||
if (s)
|
if (s)
|
||||||
sock_hold(s);
|
sock_hold(s);
|
||||||
read_unlock(&unix_table_lock);
|
spin_unlock(&unix_table_lock);
|
||||||
return s;
|
return s;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -263,7 +263,7 @@ static struct sock *unix_find_socket_byinode(struct inode *i)
|
|||||||
struct sock *s;
|
struct sock *s;
|
||||||
struct hlist_node *node;
|
struct hlist_node *node;
|
||||||
|
|
||||||
read_lock(&unix_table_lock);
|
spin_lock(&unix_table_lock);
|
||||||
sk_for_each(s, node,
|
sk_for_each(s, node,
|
||||||
&unix_socket_table[i->i_ino & (UNIX_HASH_SIZE - 1)]) {
|
&unix_socket_table[i->i_ino & (UNIX_HASH_SIZE - 1)]) {
|
||||||
struct dentry *dentry = unix_sk(s)->dentry;
|
struct dentry *dentry = unix_sk(s)->dentry;
|
||||||
@ -276,7 +276,7 @@ static struct sock *unix_find_socket_byinode(struct inode *i)
|
|||||||
}
|
}
|
||||||
s = NULL;
|
s = NULL;
|
||||||
found:
|
found:
|
||||||
read_unlock(&unix_table_lock);
|
spin_unlock(&unix_table_lock);
|
||||||
return s;
|
return s;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -642,12 +642,12 @@ retry:
|
|||||||
addr->len = sprintf(addr->name->sun_path+1, "%05x", ordernum) + 1 + sizeof(short);
|
addr->len = sprintf(addr->name->sun_path+1, "%05x", ordernum) + 1 + sizeof(short);
|
||||||
addr->hash = unix_hash_fold(csum_partial((void*)addr->name, addr->len, 0));
|
addr->hash = unix_hash_fold(csum_partial((void*)addr->name, addr->len, 0));
|
||||||
|
|
||||||
write_lock(&unix_table_lock);
|
spin_lock(&unix_table_lock);
|
||||||
ordernum = (ordernum+1)&0xFFFFF;
|
ordernum = (ordernum+1)&0xFFFFF;
|
||||||
|
|
||||||
if (__unix_find_socket_byname(addr->name, addr->len, sock->type,
|
if (__unix_find_socket_byname(addr->name, addr->len, sock->type,
|
||||||
addr->hash)) {
|
addr->hash)) {
|
||||||
write_unlock(&unix_table_lock);
|
spin_unlock(&unix_table_lock);
|
||||||
/* Sanity yield. It is unusual case, but yet... */
|
/* Sanity yield. It is unusual case, but yet... */
|
||||||
if (!(ordernum&0xFF))
|
if (!(ordernum&0xFF))
|
||||||
yield();
|
yield();
|
||||||
@ -658,7 +658,7 @@ retry:
|
|||||||
__unix_remove_socket(sk);
|
__unix_remove_socket(sk);
|
||||||
u->addr = addr;
|
u->addr = addr;
|
||||||
__unix_insert_socket(&unix_socket_table[addr->hash], sk);
|
__unix_insert_socket(&unix_socket_table[addr->hash], sk);
|
||||||
write_unlock(&unix_table_lock);
|
spin_unlock(&unix_table_lock);
|
||||||
err = 0;
|
err = 0;
|
||||||
|
|
||||||
out: up(&u->readsem);
|
out: up(&u->readsem);
|
||||||
@ -791,7 +791,7 @@ static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
|
|||||||
addr->hash = UNIX_HASH_SIZE;
|
addr->hash = UNIX_HASH_SIZE;
|
||||||
}
|
}
|
||||||
|
|
||||||
write_lock(&unix_table_lock);
|
spin_lock(&unix_table_lock);
|
||||||
|
|
||||||
if (!sunaddr->sun_path[0]) {
|
if (!sunaddr->sun_path[0]) {
|
||||||
err = -EADDRINUSE;
|
err = -EADDRINUSE;
|
||||||
@ -814,7 +814,7 @@ static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
|
|||||||
__unix_insert_socket(list, sk);
|
__unix_insert_socket(list, sk);
|
||||||
|
|
||||||
out_unlock:
|
out_unlock:
|
||||||
write_unlock(&unix_table_lock);
|
spin_unlock(&unix_table_lock);
|
||||||
out_up:
|
out_up:
|
||||||
up(&u->readsem);
|
up(&u->readsem);
|
||||||
out:
|
out:
|
||||||
@ -1916,7 +1916,7 @@ static struct sock *unix_seq_idx(int *iter, loff_t pos)
|
|||||||
|
|
||||||
static void *unix_seq_start(struct seq_file *seq, loff_t *pos)
|
static void *unix_seq_start(struct seq_file *seq, loff_t *pos)
|
||||||
{
|
{
|
||||||
read_lock(&unix_table_lock);
|
spin_lock(&unix_table_lock);
|
||||||
return *pos ? unix_seq_idx(seq->private, *pos - 1) : ((void *) 1);
|
return *pos ? unix_seq_idx(seq->private, *pos - 1) : ((void *) 1);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1931,7 +1931,7 @@ static void *unix_seq_next(struct seq_file *seq, void *v, loff_t *pos)
|
|||||||
|
|
||||||
static void unix_seq_stop(struct seq_file *seq, void *v)
|
static void unix_seq_stop(struct seq_file *seq, void *v)
|
||||||
{
|
{
|
||||||
read_unlock(&unix_table_lock);
|
spin_unlock(&unix_table_lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int unix_seq_show(struct seq_file *seq, void *v)
|
static int unix_seq_show(struct seq_file *seq, void *v)
|
||||||
|
@ -182,7 +182,7 @@ void unix_gc(void)
|
|||||||
if (down_trylock(&unix_gc_sem))
|
if (down_trylock(&unix_gc_sem))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
read_lock(&unix_table_lock);
|
spin_lock(&unix_table_lock);
|
||||||
|
|
||||||
forall_unix_sockets(i, s)
|
forall_unix_sockets(i, s)
|
||||||
{
|
{
|
||||||
@ -301,7 +301,7 @@ void unix_gc(void)
|
|||||||
}
|
}
|
||||||
u->gc_tree = GC_ORPHAN;
|
u->gc_tree = GC_ORPHAN;
|
||||||
}
|
}
|
||||||
read_unlock(&unix_table_lock);
|
spin_unlock(&unix_table_lock);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Here we are. Hitlist is filled. Die.
|
* Here we are. Hitlist is filled. Die.
|
||||||
|
Loading…
Reference in New Issue
Block a user