raw: complete rcu conversion
raw_diag_dump() can use rcu_read_lock() instead of read_lock() Now the hashinfo lock is only used from process context, in write mode only, we can convert it to a spinlock, and we do not need to block BH anymore. Signed-off-by: Eric Dumazet <edumazet@google.com> Link: https://lore.kernel.org/r/20220620100509.3493504-1-eric.dumazet@gmail.com Signed-off-by: Paolo Abeni <pabeni@redhat.com>
This commit is contained in:
committed by
Paolo Abeni
parent
f9aefd6b2a
commit
af185d8c76
@@ -32,7 +32,7 @@ int raw_rcv(struct sock *, struct sk_buff *);
|
|||||||
#define RAW_HTABLE_SIZE MAX_INET_PROTOS
|
#define RAW_HTABLE_SIZE MAX_INET_PROTOS
|
||||||
|
|
||||||
struct raw_hashinfo {
|
struct raw_hashinfo {
|
||||||
rwlock_t lock;
|
spinlock_t lock;
|
||||||
struct hlist_nulls_head ht[RAW_HTABLE_SIZE];
|
struct hlist_nulls_head ht[RAW_HTABLE_SIZE];
|
||||||
};
|
};
|
||||||
|
|
||||||
@@ -40,7 +40,7 @@ static inline void raw_hashinfo_init(struct raw_hashinfo *hashinfo)
|
|||||||
{
|
{
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
rwlock_init(&hashinfo->lock);
|
spin_lock_init(&hashinfo->lock);
|
||||||
for (i = 0; i < RAW_HTABLE_SIZE; i++)
|
for (i = 0; i < RAW_HTABLE_SIZE; i++)
|
||||||
INIT_HLIST_NULLS_HEAD(&hashinfo->ht[i], i);
|
INIT_HLIST_NULLS_HEAD(&hashinfo->ht[i], i);
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -95,10 +95,10 @@ int raw_hash_sk(struct sock *sk)
|
|||||||
|
|
||||||
hlist = &h->ht[inet_sk(sk)->inet_num & (RAW_HTABLE_SIZE - 1)];
|
hlist = &h->ht[inet_sk(sk)->inet_num & (RAW_HTABLE_SIZE - 1)];
|
||||||
|
|
||||||
write_lock_bh(&h->lock);
|
spin_lock(&h->lock);
|
||||||
__sk_nulls_add_node_rcu(sk, hlist);
|
__sk_nulls_add_node_rcu(sk, hlist);
|
||||||
sock_set_flag(sk, SOCK_RCU_FREE);
|
sock_set_flag(sk, SOCK_RCU_FREE);
|
||||||
write_unlock_bh(&h->lock);
|
spin_unlock(&h->lock);
|
||||||
sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
|
sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
@@ -109,10 +109,10 @@ void raw_unhash_sk(struct sock *sk)
|
|||||||
{
|
{
|
||||||
struct raw_hashinfo *h = sk->sk_prot->h.raw_hash;
|
struct raw_hashinfo *h = sk->sk_prot->h.raw_hash;
|
||||||
|
|
||||||
write_lock_bh(&h->lock);
|
spin_lock(&h->lock);
|
||||||
if (__sk_nulls_del_node_init_rcu(sk))
|
if (__sk_nulls_del_node_init_rcu(sk))
|
||||||
sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
|
sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
|
||||||
write_unlock_bh(&h->lock);
|
spin_unlock(&h->lock);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(raw_unhash_sk);
|
EXPORT_SYMBOL_GPL(raw_unhash_sk);
|
||||||
|
|
||||||
|
|||||||
@@ -156,7 +156,7 @@ static void raw_diag_dump(struct sk_buff *skb, struct netlink_callback *cb,
|
|||||||
s_slot = cb->args[0];
|
s_slot = cb->args[0];
|
||||||
num = s_num = cb->args[1];
|
num = s_num = cb->args[1];
|
||||||
|
|
||||||
read_lock(&hashinfo->lock);
|
rcu_read_lock();
|
||||||
for (slot = s_slot; slot < RAW_HTABLE_SIZE; s_num = 0, slot++) {
|
for (slot = s_slot; slot < RAW_HTABLE_SIZE; s_num = 0, slot++) {
|
||||||
num = 0;
|
num = 0;
|
||||||
|
|
||||||
@@ -184,7 +184,7 @@ next:
|
|||||||
}
|
}
|
||||||
|
|
||||||
out_unlock:
|
out_unlock:
|
||||||
read_unlock(&hashinfo->lock);
|
rcu_read_unlock();
|
||||||
|
|
||||||
cb->args[0] = slot;
|
cb->args[0] = slot;
|
||||||
cb->args[1] = num;
|
cb->args[1] = num;
|
||||||
|
|||||||
Reference in New Issue
Block a user