netfilter: conntrack: simplify early_drop
We don't need to acquire the bucket lock during early drop, we can use lockless traveral just like ____nf_conntrack_find. The timer deletion serves as synchronization point, if another cpu attempts to evict same entry, only one will succeed with timer deletion. Signed-off-by: Florian Westphal <fw@strlen.de> Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
This commit is contained in:
parent
8786a9716d
commit
242922a027
@ -301,6 +301,7 @@ void nf_ct_tmpl_free(struct nf_conn *tmpl);
|
|||||||
|
|
||||||
#define NF_CT_STAT_INC(net, count) __this_cpu_inc((net)->ct.stat->count)
|
#define NF_CT_STAT_INC(net, count) __this_cpu_inc((net)->ct.stat->count)
|
||||||
#define NF_CT_STAT_INC_ATOMIC(net, count) this_cpu_inc((net)->ct.stat->count)
|
#define NF_CT_STAT_INC_ATOMIC(net, count) this_cpu_inc((net)->ct.stat->count)
|
||||||
|
#define NF_CT_STAT_ADD_ATOMIC(net, count, v) this_cpu_add((net)->ct.stat->count, (v))
|
||||||
|
|
||||||
#define MODULE_ALIAS_NFCT_HELPER(helper) \
|
#define MODULE_ALIAS_NFCT_HELPER(helper) \
|
||||||
MODULE_ALIAS("nfct-helper-" helper)
|
MODULE_ALIAS("nfct-helper-" helper)
|
||||||
|
@ -834,67 +834,66 @@ EXPORT_SYMBOL_GPL(nf_conntrack_tuple_taken);
|
|||||||
|
|
||||||
/* There's a small race here where we may free a just-assured
|
/* There's a small race here where we may free a just-assured
|
||||||
connection. Too bad: we're in trouble anyway. */
|
connection. Too bad: we're in trouble anyway. */
|
||||||
|
static unsigned int early_drop_list(struct net *net,
|
||||||
|
struct hlist_nulls_head *head)
|
||||||
|
{
|
||||||
|
struct nf_conntrack_tuple_hash *h;
|
||||||
|
struct hlist_nulls_node *n;
|
||||||
|
unsigned int drops = 0;
|
||||||
|
struct nf_conn *tmp;
|
||||||
|
|
||||||
|
hlist_nulls_for_each_entry_rcu(h, n, head, hnnode) {
|
||||||
|
tmp = nf_ct_tuplehash_to_ctrack(h);
|
||||||
|
|
||||||
|
if (test_bit(IPS_ASSURED_BIT, &tmp->status) ||
|
||||||
|
!net_eq(nf_ct_net(tmp), net) ||
|
||||||
|
nf_ct_is_dying(tmp))
|
||||||
|
continue;
|
||||||
|
|
||||||
|
if (!atomic_inc_not_zero(&tmp->ct_general.use))
|
||||||
|
continue;
|
||||||
|
|
||||||
|
/* kill only if still in same netns -- might have moved due to
|
||||||
|
* SLAB_DESTROY_BY_RCU rules.
|
||||||
|
*
|
||||||
|
* We steal the timer reference. If that fails timer has
|
||||||
|
* already fired or someone else deleted it. Just drop ref
|
||||||
|
* and move to next entry.
|
||||||
|
*/
|
||||||
|
if (net_eq(nf_ct_net(tmp), net) &&
|
||||||
|
nf_ct_is_confirmed(tmp) &&
|
||||||
|
del_timer(&tmp->timeout) &&
|
||||||
|
nf_ct_delete(tmp, 0, 0))
|
||||||
|
drops++;
|
||||||
|
|
||||||
|
nf_ct_put(tmp);
|
||||||
|
}
|
||||||
|
|
||||||
|
return drops;
|
||||||
|
}
|
||||||
|
|
||||||
static noinline int early_drop(struct net *net, unsigned int _hash)
|
static noinline int early_drop(struct net *net, unsigned int _hash)
|
||||||
{
|
{
|
||||||
/* Use oldest entry, which is roughly LRU */
|
unsigned int i;
|
||||||
struct nf_conntrack_tuple_hash *h;
|
|
||||||
struct nf_conn *tmp;
|
|
||||||
struct hlist_nulls_node *n;
|
|
||||||
unsigned int i, hash, sequence;
|
|
||||||
struct nf_conn *ct = NULL;
|
|
||||||
spinlock_t *lockp;
|
|
||||||
bool ret = false;
|
|
||||||
|
|
||||||
i = 0;
|
for (i = 0; i < NF_CT_EVICTION_RANGE; i++) {
|
||||||
|
struct hlist_nulls_head *ct_hash;
|
||||||
|
unsigned hash, sequence, drops;
|
||||||
|
|
||||||
local_bh_disable();
|
do {
|
||||||
restart:
|
sequence = read_seqcount_begin(&nf_conntrack_generation);
|
||||||
sequence = read_seqcount_begin(&nf_conntrack_generation);
|
hash = scale_hash(_hash++);
|
||||||
for (; i < NF_CT_EVICTION_RANGE; i++) {
|
ct_hash = nf_conntrack_hash;
|
||||||
hash = scale_hash(_hash++);
|
} while (read_seqcount_retry(&nf_conntrack_generation, sequence));
|
||||||
lockp = &nf_conntrack_locks[hash % CONNTRACK_LOCKS];
|
|
||||||
nf_conntrack_lock(lockp);
|
|
||||||
if (read_seqcount_retry(&nf_conntrack_generation, sequence)) {
|
|
||||||
spin_unlock(lockp);
|
|
||||||
goto restart;
|
|
||||||
}
|
|
||||||
hlist_nulls_for_each_entry_rcu(h, n, &nf_conntrack_hash[hash],
|
|
||||||
hnnode) {
|
|
||||||
tmp = nf_ct_tuplehash_to_ctrack(h);
|
|
||||||
|
|
||||||
if (test_bit(IPS_ASSURED_BIT, &tmp->status) ||
|
drops = early_drop_list(net, &ct_hash[hash]);
|
||||||
!net_eq(nf_ct_net(tmp), net) ||
|
if (drops) {
|
||||||
nf_ct_is_dying(tmp))
|
NF_CT_STAT_ADD_ATOMIC(net, early_drop, drops);
|
||||||
continue;
|
return true;
|
||||||
|
|
||||||
if (atomic_inc_not_zero(&tmp->ct_general.use)) {
|
|
||||||
ct = tmp;
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
spin_unlock(lockp);
|
|
||||||
if (ct)
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
|
|
||||||
local_bh_enable();
|
|
||||||
|
|
||||||
if (!ct)
|
|
||||||
return false;
|
|
||||||
|
|
||||||
/* kill only if in same netns -- might have moved due to
|
|
||||||
* SLAB_DESTROY_BY_RCU rules
|
|
||||||
*/
|
|
||||||
if (net_eq(nf_ct_net(ct), net) && del_timer(&ct->timeout)) {
|
|
||||||
if (nf_ct_delete(ct, 0, 0)) {
|
|
||||||
NF_CT_STAT_INC_ATOMIC(net, early_drop);
|
|
||||||
ret = true;
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
nf_ct_put(ct);
|
return false;
|
||||||
return ret;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct nf_conn *
|
static struct nf_conn *
|
||||||
|
Loading…
Reference in New Issue
Block a user