forked from Minki/linux
Merge git://git.kernel.org/pub/scm/linux/kernel/git/pablo/nf
Pablo Neira Ayuso says: ==================== Netfilter fixes for net The following patchset contains Netfilter fixes for your net tree, they are: 1) Fix compilation warning in xt_hashlimit on m68k 32-bits, from Geert Uytterhoeven. 2) Fix wrong timeout in set elements added from packet path via nft_dynset, from Anders K. Pedersen. 3) Remove obsolete nf_conntrack_events_retry_timeout sysctl documentation, from Nicolas Dichtel. 4) Ensure proper initialization of log flags via xt_LOG, from Liping Zhang. 5) Missing alias to autoload ipcomp, also from Liping Zhang. 6) Missing NFTA_HASH_OFFSET attribute validation, again from Liping. 7) Wrong integer type in the new nft_parse_u32_check() function, from Dan Carpenter. 8) Another wrong integer type declaration in nft_exthdr_init, also from Dan Carpenter. 9) Fix insufficient mode validation in nft_range. 10) Fix compilation warning in nft_range due to possible uninitialized value, from Arnd Bergmann. 11) Zero nf_hook_ops allocated via xt_hook_alloc() in x_tables to calm down kmemcheck, from Florian Westphal. 12) Schedule gc_worker() to run again if GC_MAX_EVICTS quota is reached, from Nicolas Dichtel. 13) Fix nf_queue() after conversion to single-linked hook list, related to incorrect bypass flag handling and incorrect hook point of reinjection. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
commit
8dbad1a811
@ -33,24 +33,6 @@ nf_conntrack_events - BOOLEAN
|
||||
If this option is enabled, the connection tracking code will
|
||||
provide userspace with connection tracking events via ctnetlink.
|
||||
|
||||
nf_conntrack_events_retry_timeout - INTEGER (seconds)
|
||||
default 15
|
||||
|
||||
This option is only relevant when "reliable connection tracking
|
||||
events" are used. Normally, ctnetlink is "lossy", that is,
|
||||
events are normally dropped when userspace listeners can't keep up.
|
||||
|
||||
Userspace can request "reliable event mode". When this mode is
|
||||
active, the conntrack will only be destroyed after the event was
|
||||
delivered. If event delivery fails, the kernel periodically
|
||||
re-tries to send the event to userspace.
|
||||
|
||||
This is the maximum interval the kernel should use when re-trying
|
||||
to deliver the destroy event.
|
||||
|
||||
A higher number means there will be fewer delivery retries and it
|
||||
will take longer for a backlog to be processed.
|
||||
|
||||
nf_conntrack_expect_max - INTEGER
|
||||
Maximum size of expectation table. Default value is
|
||||
nf_conntrack_buckets / 256. Minimum is 1.
|
||||
|
@ -361,16 +361,9 @@ next_hook:
|
||||
if (ret == 0)
|
||||
ret = -EPERM;
|
||||
} else if ((verdict & NF_VERDICT_MASK) == NF_QUEUE) {
|
||||
int err;
|
||||
|
||||
RCU_INIT_POINTER(state->hook_entries, entry);
|
||||
err = nf_queue(skb, state, verdict >> NF_VERDICT_QBITS);
|
||||
if (err < 0) {
|
||||
if (err == -ESRCH &&
|
||||
(verdict & NF_VERDICT_FLAG_QUEUE_BYPASS))
|
||||
goto next_hook;
|
||||
kfree_skb(skb);
|
||||
}
|
||||
ret = nf_queue(skb, state, &entry, verdict);
|
||||
if (ret == 1 && entry)
|
||||
goto next_hook;
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
@ -983,7 +983,7 @@ static void gc_worker(struct work_struct *work)
|
||||
return;
|
||||
|
||||
ratio = scanned ? expired_count * 100 / scanned : 0;
|
||||
if (ratio >= 90)
|
||||
if (ratio >= 90 || expired_count == GC_MAX_EVICTS)
|
||||
next_run = 0;
|
||||
|
||||
gc_work->last_bucket = i;
|
||||
|
@ -18,7 +18,7 @@ unsigned int nf_iterate(struct sk_buff *skb, struct nf_hook_state *state,
|
||||
|
||||
/* nf_queue.c */
|
||||
int nf_queue(struct sk_buff *skb, struct nf_hook_state *state,
|
||||
unsigned int queuenum);
|
||||
struct nf_hook_entry **entryp, unsigned int verdict);
|
||||
void nf_queue_nf_hook_drop(struct net *net, const struct nf_hook_entry *entry);
|
||||
int __init netfilter_queue_init(void);
|
||||
|
||||
|
@ -107,13 +107,8 @@ void nf_queue_nf_hook_drop(struct net *net, const struct nf_hook_entry *entry)
|
||||
rcu_read_unlock();
|
||||
}
|
||||
|
||||
/*
|
||||
* Any packet that leaves via this function must come back
|
||||
* through nf_reinject().
|
||||
*/
|
||||
int nf_queue(struct sk_buff *skb,
|
||||
struct nf_hook_state *state,
|
||||
unsigned int queuenum)
|
||||
static int __nf_queue(struct sk_buff *skb, const struct nf_hook_state *state,
|
||||
unsigned int queuenum)
|
||||
{
|
||||
int status = -ENOENT;
|
||||
struct nf_queue_entry *entry = NULL;
|
||||
@ -161,6 +156,27 @@ err:
|
||||
return status;
|
||||
}
|
||||
|
||||
/* Packets leaving via this function must come back through nf_reinject(). */
|
||||
int nf_queue(struct sk_buff *skb, struct nf_hook_state *state,
|
||||
struct nf_hook_entry **entryp, unsigned int verdict)
|
||||
{
|
||||
struct nf_hook_entry *entry = *entryp;
|
||||
int ret;
|
||||
|
||||
RCU_INIT_POINTER(state->hook_entries, entry);
|
||||
ret = __nf_queue(skb, state, verdict >> NF_VERDICT_QBITS);
|
||||
if (ret < 0) {
|
||||
if (ret == -ESRCH &&
|
||||
(verdict & NF_VERDICT_FLAG_QUEUE_BYPASS)) {
|
||||
*entryp = rcu_dereference(entry->next);
|
||||
return 1;
|
||||
}
|
||||
kfree_skb(skb);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void nf_reinject(struct nf_queue_entry *entry, unsigned int verdict)
|
||||
{
|
||||
struct nf_hook_entry *hook_entry;
|
||||
@ -187,26 +203,26 @@ void nf_reinject(struct nf_queue_entry *entry, unsigned int verdict)
|
||||
entry->state.thresh = INT_MIN;
|
||||
|
||||
if (verdict == NF_ACCEPT) {
|
||||
next_hook:
|
||||
verdict = nf_iterate(skb, &entry->state, &hook_entry);
|
||||
hook_entry = rcu_dereference(hook_entry->next);
|
||||
if (hook_entry)
|
||||
next_hook:
|
||||
verdict = nf_iterate(skb, &entry->state, &hook_entry);
|
||||
}
|
||||
|
||||
switch (verdict & NF_VERDICT_MASK) {
|
||||
case NF_ACCEPT:
|
||||
case NF_STOP:
|
||||
okfn:
|
||||
local_bh_disable();
|
||||
entry->state.okfn(entry->state.net, entry->state.sk, skb);
|
||||
local_bh_enable();
|
||||
break;
|
||||
case NF_QUEUE:
|
||||
RCU_INIT_POINTER(entry->state.hook_entries, hook_entry);
|
||||
err = nf_queue(skb, &entry->state,
|
||||
verdict >> NF_VERDICT_QBITS);
|
||||
if (err < 0) {
|
||||
if (err == -ESRCH &&
|
||||
(verdict & NF_VERDICT_FLAG_QUEUE_BYPASS))
|
||||
err = nf_queue(skb, &entry->state, &hook_entry, verdict);
|
||||
if (err == 1) {
|
||||
if (hook_entry)
|
||||
goto next_hook;
|
||||
kfree_skb(skb);
|
||||
goto okfn;
|
||||
}
|
||||
break;
|
||||
case NF_STOLEN:
|
||||
|
@ -4423,7 +4423,7 @@ static int nf_tables_check_loops(const struct nft_ctx *ctx,
|
||||
*/
|
||||
unsigned int nft_parse_u32_check(const struct nlattr *attr, int max, u32 *dest)
|
||||
{
|
||||
int val;
|
||||
u32 val;
|
||||
|
||||
val = ntohl(nla_get_be32(attr));
|
||||
if (val > max)
|
||||
|
@ -158,7 +158,8 @@ static int nft_dynset_init(const struct nft_ctx *ctx,
|
||||
if (tb[NFTA_DYNSET_TIMEOUT] != NULL) {
|
||||
if (!(set->flags & NFT_SET_TIMEOUT))
|
||||
return -EINVAL;
|
||||
timeout = be64_to_cpu(nla_get_be64(tb[NFTA_DYNSET_TIMEOUT]));
|
||||
timeout = msecs_to_jiffies(be64_to_cpu(nla_get_be64(
|
||||
tb[NFTA_DYNSET_TIMEOUT])));
|
||||
}
|
||||
|
||||
priv->sreg_key = nft_parse_register(tb[NFTA_DYNSET_SREG_KEY]);
|
||||
@ -246,7 +247,8 @@ static int nft_dynset_dump(struct sk_buff *skb, const struct nft_expr *expr)
|
||||
goto nla_put_failure;
|
||||
if (nla_put_string(skb, NFTA_DYNSET_SET_NAME, priv->set->name))
|
||||
goto nla_put_failure;
|
||||
if (nla_put_be64(skb, NFTA_DYNSET_TIMEOUT, cpu_to_be64(priv->timeout),
|
||||
if (nla_put_be64(skb, NFTA_DYNSET_TIMEOUT,
|
||||
cpu_to_be64(jiffies_to_msecs(priv->timeout)),
|
||||
NFTA_DYNSET_PAD))
|
||||
goto nla_put_failure;
|
||||
if (priv->expr && nft_expr_dump(skb, NFTA_DYNSET_EXPR, priv->expr))
|
||||
|
@ -59,7 +59,8 @@ static int nft_exthdr_init(const struct nft_ctx *ctx,
|
||||
const struct nlattr * const tb[])
|
||||
{
|
||||
struct nft_exthdr *priv = nft_expr_priv(expr);
|
||||
u32 offset, len, err;
|
||||
u32 offset, len;
|
||||
int err;
|
||||
|
||||
if (tb[NFTA_EXTHDR_DREG] == NULL ||
|
||||
tb[NFTA_EXTHDR_TYPE] == NULL ||
|
||||
|
@ -44,6 +44,7 @@ static const struct nla_policy nft_hash_policy[NFTA_HASH_MAX + 1] = {
|
||||
[NFTA_HASH_LEN] = { .type = NLA_U32 },
|
||||
[NFTA_HASH_MODULUS] = { .type = NLA_U32 },
|
||||
[NFTA_HASH_SEED] = { .type = NLA_U32 },
|
||||
[NFTA_HASH_OFFSET] = { .type = NLA_U32 },
|
||||
};
|
||||
|
||||
static int nft_hash_init(const struct nft_ctx *ctx,
|
||||
|
@ -28,22 +28,20 @@ static void nft_range_eval(const struct nft_expr *expr,
|
||||
const struct nft_pktinfo *pkt)
|
||||
{
|
||||
const struct nft_range_expr *priv = nft_expr_priv(expr);
|
||||
bool mismatch;
|
||||
int d1, d2;
|
||||
|
||||
d1 = memcmp(®s->data[priv->sreg], &priv->data_from, priv->len);
|
||||
d2 = memcmp(®s->data[priv->sreg], &priv->data_to, priv->len);
|
||||
switch (priv->op) {
|
||||
case NFT_RANGE_EQ:
|
||||
mismatch = (d1 < 0 || d2 > 0);
|
||||
if (d1 < 0 || d2 > 0)
|
||||
regs->verdict.code = NFT_BREAK;
|
||||
break;
|
||||
case NFT_RANGE_NEQ:
|
||||
mismatch = (d1 >= 0 && d2 <= 0);
|
||||
if (d1 >= 0 && d2 <= 0)
|
||||
regs->verdict.code = NFT_BREAK;
|
||||
break;
|
||||
}
|
||||
|
||||
if (mismatch)
|
||||
regs->verdict.code = NFT_BREAK;
|
||||
}
|
||||
|
||||
static const struct nla_policy nft_range_policy[NFTA_RANGE_MAX + 1] = {
|
||||
@ -59,6 +57,7 @@ static int nft_range_init(const struct nft_ctx *ctx, const struct nft_expr *expr
|
||||
struct nft_range_expr *priv = nft_expr_priv(expr);
|
||||
struct nft_data_desc desc_from, desc_to;
|
||||
int err;
|
||||
u32 op;
|
||||
|
||||
err = nft_data_init(NULL, &priv->data_from, sizeof(priv->data_from),
|
||||
&desc_from, tb[NFTA_RANGE_FROM_DATA]);
|
||||
@ -80,7 +79,20 @@ static int nft_range_init(const struct nft_ctx *ctx, const struct nft_expr *expr
|
||||
if (err < 0)
|
||||
goto err2;
|
||||
|
||||
priv->op = ntohl(nla_get_be32(tb[NFTA_RANGE_OP]));
|
||||
err = nft_parse_u32_check(tb[NFTA_RANGE_OP], U8_MAX, &op);
|
||||
if (err < 0)
|
||||
goto err2;
|
||||
|
||||
switch (op) {
|
||||
case NFT_RANGE_EQ:
|
||||
case NFT_RANGE_NEQ:
|
||||
break;
|
||||
default:
|
||||
err = -EINVAL;
|
||||
goto err2;
|
||||
}
|
||||
|
||||
priv->op = op;
|
||||
priv->len = desc_from.len;
|
||||
return 0;
|
||||
err2:
|
||||
|
@ -1513,7 +1513,7 @@ xt_hook_ops_alloc(const struct xt_table *table, nf_hookfn *fn)
|
||||
if (!num_hooks)
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
||||
ops = kmalloc(sizeof(*ops) * num_hooks, GFP_KERNEL);
|
||||
ops = kcalloc(num_hooks, sizeof(*ops), GFP_KERNEL);
|
||||
if (ops == NULL)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
|
@ -32,6 +32,7 @@ nflog_tg(struct sk_buff *skb, const struct xt_action_param *par)
|
||||
li.u.ulog.copy_len = info->len;
|
||||
li.u.ulog.group = info->group;
|
||||
li.u.ulog.qthreshold = info->threshold;
|
||||
li.u.ulog.flags = 0;
|
||||
|
||||
if (info->flags & XT_NFLOG_F_COPY_LEN)
|
||||
li.u.ulog.flags |= NF_LOG_F_COPY_LEN;
|
||||
|
@ -431,7 +431,7 @@ static void htable_put(struct xt_hashlimit_htable *hinfo)
|
||||
CREDITS_PER_JIFFY*HZ*60*60*24 < 2^32 ie.
|
||||
*/
|
||||
#define MAX_CPJ_v1 (0xFFFFFFFF / (HZ*60*60*24))
|
||||
#define MAX_CPJ (0xFFFFFFFFFFFFFFFF / (HZ*60*60*24))
|
||||
#define MAX_CPJ (0xFFFFFFFFFFFFFFFFULL / (HZ*60*60*24))
|
||||
|
||||
/* Repeated shift and or gives us all 1s, final shift and add 1 gives
|
||||
* us the power of 2 below the theoretical max, so GCC simply does a
|
||||
@ -473,7 +473,7 @@ static u64 user2credits(u64 user, int revision)
|
||||
return div64_u64(user * HZ * CREDITS_PER_JIFFY_v1,
|
||||
XT_HASHLIMIT_SCALE);
|
||||
} else {
|
||||
if (user > 0xFFFFFFFFFFFFFFFF / (HZ*CREDITS_PER_JIFFY))
|
||||
if (user > 0xFFFFFFFFFFFFFFFFULL / (HZ*CREDITS_PER_JIFFY))
|
||||
return div64_u64(user, XT_HASHLIMIT_SCALE_v2)
|
||||
* HZ * CREDITS_PER_JIFFY;
|
||||
|
||||
|
@ -26,6 +26,8 @@
|
||||
MODULE_LICENSE("GPL");
|
||||
MODULE_AUTHOR("Fan Du <fan.du@windriver.com>");
|
||||
MODULE_DESCRIPTION("Xtables: IPv4/6 IPsec-IPComp SPI match");
|
||||
MODULE_ALIAS("ipt_ipcomp");
|
||||
MODULE_ALIAS("ip6t_ipcomp");
|
||||
|
||||
/* Returns 1 if the spi is matched by the range, 0 otherwise */
|
||||
static inline bool
|
||||
|
Loading…
Reference in New Issue
Block a user