Merge git://git.kernel.org/pub/scm/linux/kernel/git/pablo/nf-next

Pablo Neira Ayuso says:

====================
Netfilter updates for net-next

1) Move existing bridge packet reject infra to nf_reject_{ipv4,ipv6}.c
   from Jose M. Guisado.

2) Consolidate nft_reject_inet initialization and dump, also from Jose.

3) Add the netdev reject action, from Jose.

4) Allow to combine the exist flag and the destroy command in ipset,
   from Joszef Kadlecsik.

5) Expose bucket size parameter for hashtables, also from Jozsef.

6) Expose the init value for reproducible ipset listings, from Jozsef.

7) Use __printf attribute in nft_request_module, from Andrew Lunn.

8) Allow to use reject from the inet ingress chain.

* git://git.kernel.org/pub/scm/linux/kernel/git/pablo/nf-next:
  netfilter: nft_reject_inet: allow to use reject from inet ingress
  netfilter: nftables: Add __printf() attribute
  netfilter: ipset: Expose the initval hash parameter to userspace
  netfilter: ipset: Add bucketsize parameter to all hash types
  netfilter: ipset: Support the -exist flag with the destroy command
  netfilter: nft_reject: add reject verdict support for netdev
  netfilter: nft_reject: unify reject init and dump into nft_reject
  netfilter: nf_reject: add reject skbuff creation helpers
====================

Link: https://lore.kernel.org/r/20201104141149.30082-1-pablo@netfilter.org
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
Jakub Kicinski 2020-11-04 18:05:56 -08:00
commit b65ca4c388
28 changed files with 615 additions and 355 deletions

View File

@ -198,6 +198,9 @@ struct ip_set_region {
u32 elements; /* Number of elements vs timeout */
};
/* The max revision number supported by any set type + 1 */
#define IPSET_REVISION_MAX 9
/* The core set type structure */
struct ip_set_type {
struct list_head list;
@ -215,6 +218,8 @@ struct ip_set_type {
u8 family;
/* Type revisions */
u8 revision_min, revision_max;
/* Revision-specific supported (create) flags */
u8 create_flags[IPSET_REVISION_MAX+1];
/* Set features to control swapping */
u16 features;

View File

@ -18,4 +18,14 @@ struct iphdr *nf_reject_iphdr_put(struct sk_buff *nskb,
void nf_reject_ip_tcphdr_put(struct sk_buff *nskb, const struct sk_buff *oldskb,
const struct tcphdr *oth);
struct sk_buff *nf_reject_skb_v4_unreach(struct net *net,
struct sk_buff *oldskb,
const struct net_device *dev,
int hook, u8 code);
struct sk_buff *nf_reject_skb_v4_tcp_reset(struct net *net,
struct sk_buff *oldskb,
const struct net_device *dev,
int hook);
#endif /* _IPV4_NF_REJECT_H */

View File

@ -20,4 +20,13 @@ void nf_reject_ip6_tcphdr_put(struct sk_buff *nskb,
const struct sk_buff *oldskb,
const struct tcphdr *oth, unsigned int otcplen);
struct sk_buff *nf_reject_skb_v6_tcp_reset(struct net *net,
struct sk_buff *oldskb,
const struct net_device *dev,
int hook);
struct sk_buff *nf_reject_skb_v6_unreach(struct net *net,
struct sk_buff *oldskb,
const struct net_device *dev,
int hook, u8 code);
#endif /* _IPV6_NF_REJECT_H */

View File

@ -92,11 +92,11 @@ enum {
/* Reserve empty slots */
IPSET_ATTR_CADT_MAX = 16,
/* Create-only specific attributes */
IPSET_ATTR_GC,
IPSET_ATTR_INITVAL, /* was unused IPSET_ATTR_GC */
IPSET_ATTR_HASHSIZE,
IPSET_ATTR_MAXELEM,
IPSET_ATTR_NETMASK,
IPSET_ATTR_PROBES,
IPSET_ATTR_BUCKETSIZE, /* was unused IPSET_ATTR_PROBES */
IPSET_ATTR_RESIZE,
IPSET_ATTR_SIZE,
/* Kernel-only */
@ -214,6 +214,8 @@ enum ipset_cadt_flags {
enum ipset_create_flags {
IPSET_CREATE_FLAG_BIT_FORCEADD = 0,
IPSET_CREATE_FLAG_FORCEADD = (1 << IPSET_CREATE_FLAG_BIT_FORCEADD),
IPSET_CREATE_FLAG_BIT_BUCKETSIZE = 1,
IPSET_CREATE_FLAG_BUCKETSIZE = (1 << IPSET_CREATE_FLAG_BIT_BUCKETSIZE),
IPSET_CREATE_FLAG_BIT_MAX = 7,
};

View File

@ -17,7 +17,7 @@ config NFT_BRIDGE_META
config NFT_BRIDGE_REJECT
tristate "Netfilter nf_tables bridge reject support"
depends on NFT_REJECT && NFT_REJECT_IPV4 && NFT_REJECT_IPV6
depends on NFT_REJECT
help
Add support to reject packets.

View File

@ -39,30 +39,6 @@ static void nft_reject_br_push_etherhdr(struct sk_buff *oldskb,
}
}
static int nft_bridge_iphdr_validate(struct sk_buff *skb)
{
struct iphdr *iph;
u32 len;
if (!pskb_may_pull(skb, sizeof(struct iphdr)))
return 0;
iph = ip_hdr(skb);
if (iph->ihl < 5 || iph->version != 4)
return 0;
len = ntohs(iph->tot_len);
if (skb->len < len)
return 0;
else if (len < (iph->ihl*4))
return 0;
if (!pskb_may_pull(skb, iph->ihl*4))
return 0;
return 1;
}
/* We cannot use oldskb->dev, it can be either bridge device (NF_BRIDGE INPUT)
* or the bridge port (NF_BRIDGE PREROUTING).
*/
@ -72,29 +48,11 @@ static void nft_reject_br_send_v4_tcp_reset(struct net *net,
int hook)
{
struct sk_buff *nskb;
struct iphdr *niph;
const struct tcphdr *oth;
struct tcphdr _oth;
if (!nft_bridge_iphdr_validate(oldskb))
return;
oth = nf_reject_ip_tcphdr_get(oldskb, &_oth, hook);
if (!oth)
return;
nskb = alloc_skb(sizeof(struct iphdr) + sizeof(struct tcphdr) +
LL_MAX_HEADER, GFP_ATOMIC);
nskb = nf_reject_skb_v4_tcp_reset(net, oldskb, dev, hook);
if (!nskb)
return;
skb_reserve(nskb, LL_MAX_HEADER);
niph = nf_reject_iphdr_put(nskb, oldskb, IPPROTO_TCP,
net->ipv4.sysctl_ip_default_ttl);
nf_reject_ip_tcphdr_put(nskb, oldskb, oth);
niph->tot_len = htons(nskb->len);
ip_send_check(niph);
nft_reject_br_push_etherhdr(oldskb, nskb);
br_forward(br_port_get_rcu(dev), nskb, false, true);
@ -106,139 +64,32 @@ static void nft_reject_br_send_v4_unreach(struct net *net,
int hook, u8 code)
{
struct sk_buff *nskb;
struct iphdr *niph;
struct icmphdr *icmph;
unsigned int len;
__wsum csum;
u8 proto;
if (!nft_bridge_iphdr_validate(oldskb))
return;
/* IP header checks: fragment. */
if (ip_hdr(oldskb)->frag_off & htons(IP_OFFSET))
return;
/* RFC says return as much as we can without exceeding 576 bytes. */
len = min_t(unsigned int, 536, oldskb->len);
if (!pskb_may_pull(oldskb, len))
return;
if (pskb_trim_rcsum(oldskb, ntohs(ip_hdr(oldskb)->tot_len)))
return;
proto = ip_hdr(oldskb)->protocol;
if (!skb_csum_unnecessary(oldskb) &&
nf_reject_verify_csum(proto) &&
nf_ip_checksum(oldskb, hook, ip_hdrlen(oldskb), proto))
return;
nskb = alloc_skb(sizeof(struct iphdr) + sizeof(struct icmphdr) +
LL_MAX_HEADER + len, GFP_ATOMIC);
nskb = nf_reject_skb_v4_unreach(net, oldskb, dev, hook, code);
if (!nskb)
return;
skb_reserve(nskb, LL_MAX_HEADER);
niph = nf_reject_iphdr_put(nskb, oldskb, IPPROTO_ICMP,
net->ipv4.sysctl_ip_default_ttl);
skb_reset_transport_header(nskb);
icmph = skb_put_zero(nskb, sizeof(struct icmphdr));
icmph->type = ICMP_DEST_UNREACH;
icmph->code = code;
skb_put_data(nskb, skb_network_header(oldskb), len);
csum = csum_partial((void *)icmph, len + sizeof(struct icmphdr), 0);
icmph->checksum = csum_fold(csum);
niph->tot_len = htons(nskb->len);
ip_send_check(niph);
nft_reject_br_push_etherhdr(oldskb, nskb);
br_forward(br_port_get_rcu(dev), nskb, false, true);
}
static int nft_bridge_ip6hdr_validate(struct sk_buff *skb)
{
struct ipv6hdr *hdr;
u32 pkt_len;
if (!pskb_may_pull(skb, sizeof(struct ipv6hdr)))
return 0;
hdr = ipv6_hdr(skb);
if (hdr->version != 6)
return 0;
pkt_len = ntohs(hdr->payload_len);
if (pkt_len + sizeof(struct ipv6hdr) > skb->len)
return 0;
return 1;
}
static void nft_reject_br_send_v6_tcp_reset(struct net *net,
struct sk_buff *oldskb,
const struct net_device *dev,
int hook)
{
struct sk_buff *nskb;
const struct tcphdr *oth;
struct tcphdr _oth;
unsigned int otcplen;
struct ipv6hdr *nip6h;
if (!nft_bridge_ip6hdr_validate(oldskb))
return;
oth = nf_reject_ip6_tcphdr_get(oldskb, &_oth, &otcplen, hook);
if (!oth)
return;
nskb = alloc_skb(sizeof(struct ipv6hdr) + sizeof(struct tcphdr) +
LL_MAX_HEADER, GFP_ATOMIC);
nskb = nf_reject_skb_v6_tcp_reset(net, oldskb, dev, hook);
if (!nskb)
return;
skb_reserve(nskb, LL_MAX_HEADER);
nip6h = nf_reject_ip6hdr_put(nskb, oldskb, IPPROTO_TCP,
net->ipv6.devconf_all->hop_limit);
nf_reject_ip6_tcphdr_put(nskb, oldskb, oth, otcplen);
nip6h->payload_len = htons(nskb->len - sizeof(struct ipv6hdr));
nft_reject_br_push_etherhdr(oldskb, nskb);
br_forward(br_port_get_rcu(dev), nskb, false, true);
}
static bool reject6_br_csum_ok(struct sk_buff *skb, int hook)
{
const struct ipv6hdr *ip6h = ipv6_hdr(skb);
int thoff;
__be16 fo;
u8 proto = ip6h->nexthdr;
if (skb_csum_unnecessary(skb))
return true;
if (ip6h->payload_len &&
pskb_trim_rcsum(skb, ntohs(ip6h->payload_len) + sizeof(*ip6h)))
return false;
ip6h = ipv6_hdr(skb);
thoff = ipv6_skip_exthdr(skb, ((u8*)(ip6h+1) - skb->data), &proto, &fo);
if (thoff < 0 || thoff >= skb->len || (fo & htons(~0x7)) != 0)
return false;
if (!nf_reject_verify_csum(proto))
return true;
return nf_ip6_checksum(skb, hook, thoff, proto) == 0;
}
static void nft_reject_br_send_v6_unreach(struct net *net,
struct sk_buff *oldskb,
@ -246,49 +97,11 @@ static void nft_reject_br_send_v6_unreach(struct net *net,
int hook, u8 code)
{
struct sk_buff *nskb;
struct ipv6hdr *nip6h;
struct icmp6hdr *icmp6h;
unsigned int len;
if (!nft_bridge_ip6hdr_validate(oldskb))
return;
/* Include "As much of invoking packet as possible without the ICMPv6
* packet exceeding the minimum IPv6 MTU" in the ICMP payload.
*/
len = min_t(unsigned int, 1220, oldskb->len);
if (!pskb_may_pull(oldskb, len))
return;
if (!reject6_br_csum_ok(oldskb, hook))
return;
nskb = alloc_skb(sizeof(struct ipv6hdr) + sizeof(struct icmp6hdr) +
LL_MAX_HEADER + len, GFP_ATOMIC);
nskb = nf_reject_skb_v6_unreach(net, oldskb, dev, hook, code);
if (!nskb)
return;
skb_reserve(nskb, LL_MAX_HEADER);
nip6h = nf_reject_ip6hdr_put(nskb, oldskb, IPPROTO_ICMPV6,
net->ipv6.devconf_all->hop_limit);
skb_reset_transport_header(nskb);
icmp6h = skb_put_zero(nskb, sizeof(struct icmp6hdr));
icmp6h->icmp6_type = ICMPV6_DEST_UNREACH;
icmp6h->icmp6_code = code;
skb_put_data(nskb, skb_network_header(oldskb), len);
nip6h->payload_len = htons(nskb->len - sizeof(struct ipv6hdr));
icmp6h->icmp6_cksum =
csum_ipv6_magic(&nip6h->saddr, &nip6h->daddr,
nskb->len - sizeof(struct ipv6hdr),
IPPROTO_ICMPV6,
csum_partial(icmp6h,
nskb->len - sizeof(struct ipv6hdr),
0));
nft_reject_br_push_etherhdr(oldskb, nskb);
br_forward(br_port_get_rcu(dev), nskb, false, true);
@ -364,69 +177,13 @@ static int nft_reject_bridge_validate(const struct nft_ctx *ctx,
(1 << NF_BR_LOCAL_IN));
}
static int nft_reject_bridge_init(const struct nft_ctx *ctx,
const struct nft_expr *expr,
const struct nlattr * const tb[])
{
struct nft_reject *priv = nft_expr_priv(expr);
int icmp_code;
if (tb[NFTA_REJECT_TYPE] == NULL)
return -EINVAL;
priv->type = ntohl(nla_get_be32(tb[NFTA_REJECT_TYPE]));
switch (priv->type) {
case NFT_REJECT_ICMP_UNREACH:
case NFT_REJECT_ICMPX_UNREACH:
if (tb[NFTA_REJECT_ICMP_CODE] == NULL)
return -EINVAL;
icmp_code = nla_get_u8(tb[NFTA_REJECT_ICMP_CODE]);
if (priv->type == NFT_REJECT_ICMPX_UNREACH &&
icmp_code > NFT_REJECT_ICMPX_MAX)
return -EINVAL;
priv->icmp_code = icmp_code;
break;
case NFT_REJECT_TCP_RST:
break;
default:
return -EINVAL;
}
return 0;
}
static int nft_reject_bridge_dump(struct sk_buff *skb,
const struct nft_expr *expr)
{
const struct nft_reject *priv = nft_expr_priv(expr);
if (nla_put_be32(skb, NFTA_REJECT_TYPE, htonl(priv->type)))
goto nla_put_failure;
switch (priv->type) {
case NFT_REJECT_ICMP_UNREACH:
case NFT_REJECT_ICMPX_UNREACH:
if (nla_put_u8(skb, NFTA_REJECT_ICMP_CODE, priv->icmp_code))
goto nla_put_failure;
break;
default:
break;
}
return 0;
nla_put_failure:
return -1;
}
static struct nft_expr_type nft_reject_bridge_type;
static const struct nft_expr_ops nft_reject_bridge_ops = {
.type = &nft_reject_bridge_type,
.size = NFT_EXPR_SIZE(sizeof(struct nft_reject)),
.eval = nft_reject_bridge_eval,
.init = nft_reject_bridge_init,
.dump = nft_reject_bridge_dump,
.init = nft_reject_init,
.dump = nft_reject_dump,
.validate = nft_reject_bridge_validate,
};

View File

@ -12,6 +12,128 @@
#include <linux/netfilter_ipv4.h>
#include <linux/netfilter_bridge.h>
static int nf_reject_iphdr_validate(struct sk_buff *skb)
{
struct iphdr *iph;
u32 len;
if (!pskb_may_pull(skb, sizeof(struct iphdr)))
return 0;
iph = ip_hdr(skb);
if (iph->ihl < 5 || iph->version != 4)
return 0;
len = ntohs(iph->tot_len);
if (skb->len < len)
return 0;
else if (len < (iph->ihl*4))
return 0;
if (!pskb_may_pull(skb, iph->ihl*4))
return 0;
return 1;
}
struct sk_buff *nf_reject_skb_v4_tcp_reset(struct net *net,
struct sk_buff *oldskb,
const struct net_device *dev,
int hook)
{
const struct tcphdr *oth;
struct sk_buff *nskb;
struct iphdr *niph;
struct tcphdr _oth;
if (!nf_reject_iphdr_validate(oldskb))
return NULL;
oth = nf_reject_ip_tcphdr_get(oldskb, &_oth, hook);
if (!oth)
return NULL;
nskb = alloc_skb(sizeof(struct iphdr) + sizeof(struct tcphdr) +
LL_MAX_HEADER, GFP_ATOMIC);
if (!nskb)
return NULL;
nskb->dev = (struct net_device *)dev;
skb_reserve(nskb, LL_MAX_HEADER);
niph = nf_reject_iphdr_put(nskb, oldskb, IPPROTO_TCP,
net->ipv4.sysctl_ip_default_ttl);
nf_reject_ip_tcphdr_put(nskb, oldskb, oth);
niph->tot_len = htons(nskb->len);
ip_send_check(niph);
return nskb;
}
EXPORT_SYMBOL_GPL(nf_reject_skb_v4_tcp_reset);
struct sk_buff *nf_reject_skb_v4_unreach(struct net *net,
struct sk_buff *oldskb,
const struct net_device *dev,
int hook, u8 code)
{
struct sk_buff *nskb;
struct iphdr *niph;
struct icmphdr *icmph;
unsigned int len;
__wsum csum;
u8 proto;
if (!nf_reject_iphdr_validate(oldskb))
return NULL;
/* IP header checks: fragment. */
if (ip_hdr(oldskb)->frag_off & htons(IP_OFFSET))
return NULL;
/* RFC says return as much as we can without exceeding 576 bytes. */
len = min_t(unsigned int, 536, oldskb->len);
if (!pskb_may_pull(oldskb, len))
return NULL;
if (pskb_trim_rcsum(oldskb, ntohs(ip_hdr(oldskb)->tot_len)))
return NULL;
proto = ip_hdr(oldskb)->protocol;
if (!skb_csum_unnecessary(oldskb) &&
nf_reject_verify_csum(proto) &&
nf_ip_checksum(oldskb, hook, ip_hdrlen(oldskb), proto))
return NULL;
nskb = alloc_skb(sizeof(struct iphdr) + sizeof(struct icmphdr) +
LL_MAX_HEADER + len, GFP_ATOMIC);
if (!nskb)
return NULL;
nskb->dev = (struct net_device *)dev;
skb_reserve(nskb, LL_MAX_HEADER);
niph = nf_reject_iphdr_put(nskb, oldskb, IPPROTO_ICMP,
net->ipv4.sysctl_ip_default_ttl);
skb_reset_transport_header(nskb);
icmph = skb_put_zero(nskb, sizeof(struct icmphdr));
icmph->type = ICMP_DEST_UNREACH;
icmph->code = code;
skb_put_data(nskb, skb_network_header(oldskb), len);
csum = csum_partial((void *)icmph, len + sizeof(struct icmphdr), 0);
icmph->checksum = csum_fold(csum);
niph->tot_len = htons(nskb->len);
ip_send_check(niph);
return nskb;
}
EXPORT_SYMBOL_GPL(nf_reject_skb_v4_unreach);
const struct tcphdr *nf_reject_ip_tcphdr_get(struct sk_buff *oldskb,
struct tcphdr *_oth, int hook)
{
@ -124,7 +246,8 @@ void nf_send_reset(struct net *net, struct sk_buff *oldskb, int hook)
if (!oth)
return;
if (hook == NF_INET_PRE_ROUTING && nf_reject_fill_skb_dst(oldskb))
if ((hook == NF_INET_PRE_ROUTING || hook == NF_INET_INGRESS) &&
nf_reject_fill_skb_dst(oldskb) < 0)
return;
if (skb_rtable(oldskb)->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST))
@ -193,7 +316,8 @@ void nf_send_unreach(struct sk_buff *skb_in, int code, int hook)
if (iph->frag_off & htons(IP_OFFSET))
return;
if (hook == NF_INET_PRE_ROUTING && nf_reject_fill_skb_dst(skb_in))
if ((hook == NF_INET_PRE_ROUTING || hook == NF_INET_INGRESS) &&
nf_reject_fill_skb_dst(skb_in) < 0)
return;
if (skb_csum_unnecessary(skb_in) || !nf_reject_verify_csum(proto)) {

View File

@ -12,6 +12,140 @@
#include <linux/netfilter_ipv6.h>
#include <linux/netfilter_bridge.h>
static bool nf_reject_v6_csum_ok(struct sk_buff *skb, int hook)
{
const struct ipv6hdr *ip6h = ipv6_hdr(skb);
int thoff;
__be16 fo;
u8 proto = ip6h->nexthdr;
if (skb_csum_unnecessary(skb))
return true;
if (ip6h->payload_len &&
pskb_trim_rcsum(skb, ntohs(ip6h->payload_len) + sizeof(*ip6h)))
return false;
ip6h = ipv6_hdr(skb);
thoff = ipv6_skip_exthdr(skb, ((u8*)(ip6h+1) - skb->data), &proto, &fo);
if (thoff < 0 || thoff >= skb->len || (fo & htons(~0x7)) != 0)
return false;
if (!nf_reject_verify_csum(proto))
return true;
return nf_ip6_checksum(skb, hook, thoff, proto) == 0;
}
static int nf_reject_ip6hdr_validate(struct sk_buff *skb)
{
struct ipv6hdr *hdr;
u32 pkt_len;
if (!pskb_may_pull(skb, sizeof(struct ipv6hdr)))
return 0;
hdr = ipv6_hdr(skb);
if (hdr->version != 6)
return 0;
pkt_len = ntohs(hdr->payload_len);
if (pkt_len + sizeof(struct ipv6hdr) > skb->len)
return 0;
return 1;
}
struct sk_buff *nf_reject_skb_v6_tcp_reset(struct net *net,
struct sk_buff *oldskb,
const struct net_device *dev,
int hook)
{
struct sk_buff *nskb;
const struct tcphdr *oth;
struct tcphdr _oth;
unsigned int otcplen;
struct ipv6hdr *nip6h;
if (!nf_reject_ip6hdr_validate(oldskb))
return NULL;
oth = nf_reject_ip6_tcphdr_get(oldskb, &_oth, &otcplen, hook);
if (!oth)
return NULL;
nskb = alloc_skb(sizeof(struct ipv6hdr) + sizeof(struct tcphdr) +
LL_MAX_HEADER, GFP_ATOMIC);
if (!nskb)
return NULL;
nskb->dev = (struct net_device *)dev;
skb_reserve(nskb, LL_MAX_HEADER);
nip6h = nf_reject_ip6hdr_put(nskb, oldskb, IPPROTO_TCP,
net->ipv6.devconf_all->hop_limit);
nf_reject_ip6_tcphdr_put(nskb, oldskb, oth, otcplen);
nip6h->payload_len = htons(nskb->len - sizeof(struct ipv6hdr));
return nskb;
}
EXPORT_SYMBOL_GPL(nf_reject_skb_v6_tcp_reset);
struct sk_buff *nf_reject_skb_v6_unreach(struct net *net,
struct sk_buff *oldskb,
const struct net_device *dev,
int hook, u8 code)
{
struct sk_buff *nskb;
struct ipv6hdr *nip6h;
struct icmp6hdr *icmp6h;
unsigned int len;
if (!nf_reject_ip6hdr_validate(oldskb))
return NULL;
/* Include "As much of invoking packet as possible without the ICMPv6
* packet exceeding the minimum IPv6 MTU" in the ICMP payload.
*/
len = min_t(unsigned int, 1220, oldskb->len);
if (!pskb_may_pull(oldskb, len))
return NULL;
if (!nf_reject_v6_csum_ok(oldskb, hook))
return NULL;
nskb = alloc_skb(sizeof(struct ipv6hdr) + sizeof(struct icmp6hdr) +
LL_MAX_HEADER + len, GFP_ATOMIC);
if (!nskb)
return NULL;
nskb->dev = (struct net_device *)dev;
skb_reserve(nskb, LL_MAX_HEADER);
nip6h = nf_reject_ip6hdr_put(nskb, oldskb, IPPROTO_ICMPV6,
net->ipv6.devconf_all->hop_limit);
skb_reset_transport_header(nskb);
icmp6h = skb_put_zero(nskb, sizeof(struct icmp6hdr));
icmp6h->icmp6_type = ICMPV6_DEST_UNREACH;
icmp6h->icmp6_code = code;
skb_put_data(nskb, skb_network_header(oldskb), len);
nip6h->payload_len = htons(nskb->len - sizeof(struct ipv6hdr));
icmp6h->icmp6_cksum =
csum_ipv6_magic(&nip6h->saddr, &nip6h->daddr,
nskb->len - sizeof(struct ipv6hdr),
IPPROTO_ICMPV6,
csum_partial(icmp6h,
nskb->len - sizeof(struct ipv6hdr),
0));
return nskb;
}
EXPORT_SYMBOL_GPL(nf_reject_skb_v6_unreach);
const struct tcphdr *nf_reject_ip6_tcphdr_get(struct sk_buff *oldskb,
struct tcphdr *otcph,
unsigned int *otcplen, int hook)
@ -170,7 +304,7 @@ void nf_send_reset6(struct net *net, struct sk_buff *oldskb, int hook)
fl6.fl6_sport = otcph->dest;
fl6.fl6_dport = otcph->source;
if (hook == NF_INET_PRE_ROUTING) {
if (hook == NF_INET_PRE_ROUTING || hook == NF_INET_INGRESS) {
nf_ip6_route(net, &dst, flowi6_to_flowi(&fl6), false);
if (!dst)
return;
@ -268,7 +402,8 @@ void nf_send_unreach6(struct net *net, struct sk_buff *skb_in,
if (hooknum == NF_INET_LOCAL_OUT && skb_in->dev == NULL)
skb_in->dev = net->loopback_dev;
if (hooknum == NF_INET_PRE_ROUTING && nf_reject6_fill_skb_dst(skb_in))
if ((hooknum == NF_INET_PRE_ROUTING || hooknum == NF_INET_INGRESS) &&
nf_reject6_fill_skb_dst(skb_in) < 0)
return;
icmpv6_send(skb_in, ICMPV6_DEST_UNREACH, code, 0);

View File

@ -682,6 +682,16 @@ config NFT_FIB_NETDEV
The lookup will be delegated to the IPv4 or IPv6 FIB depending
on the protocol of the packet.
config NFT_REJECT_NETDEV
depends on NFT_REJECT_IPV4
depends on NFT_REJECT_IPV6
tristate "Netfilter nf_tables netdev REJECT support"
help
This option enables the REJECT support from the netdev table.
The return packet generation will be delegated to the IPv4
or IPv6 ICMP or TCP RST implementation depending on the
protocol of the packet.
endif # NF_TABLES_NETDEV
endif # NF_TABLES

View File

@ -101,6 +101,7 @@ obj-$(CONFIG_NFT_QUEUE) += nft_queue.o
obj-$(CONFIG_NFT_QUOTA) += nft_quota.o
obj-$(CONFIG_NFT_REJECT) += nft_reject.o
obj-$(CONFIG_NFT_REJECT_INET) += nft_reject_inet.o
obj-$(CONFIG_NFT_REJECT_NETDEV) += nft_reject_netdev.o
obj-$(CONFIG_NFT_TUNNEL) += nft_tunnel.o
obj-$(CONFIG_NFT_COUNTER) += nft_counter.o
obj-$(CONFIG_NFT_LOG) += nft_log.o

View File

@ -1109,6 +1109,8 @@ static int ip_set_create(struct net *net, struct sock *ctnl,
ret = -IPSET_ERR_PROTOCOL;
goto put_out;
}
/* Set create flags depending on the type revision */
set->flags |= set->type->create_flags[revision];
ret = set->type->create(net, set, tb, flags);
if (ret != 0)
@ -1239,10 +1241,12 @@ static int ip_set_destroy(struct net *net, struct sock *ctnl,
/* Modified by ip_set_destroy() only, which is serialized */
inst->is_destroyed = false;
} else {
u32 flags = flag_exist(nlh);
s = find_set_and_id(inst, nla_data(attr[IPSET_ATTR_SETNAME]),
&i);
if (!s) {
ret = -ENOENT;
if (!(flags & IPSET_FLAG_EXIST))
ret = -ENOENT;
goto out;
} else if (s->ref || s->ref_netlink) {
ret = -IPSET_ERR_BUSY;

View File

@ -37,18 +37,18 @@
*/
/* Number of elements to store in an initial array block */
#define AHASH_INIT_SIZE 4
#define AHASH_INIT_SIZE 2
/* Max number of elements to store in an array block */
#define AHASH_MAX_SIZE (3 * AHASH_INIT_SIZE)
#define AHASH_MAX_SIZE (6 * AHASH_INIT_SIZE)
/* Max muber of elements in the array block when tuned */
#define AHASH_MAX_TUNED 64
#define AHASH_MAX(h) ((h)->bucketsize)
/* Max number of elements can be tuned */
#ifdef IP_SET_HASH_WITH_MULTI
#define AHASH_MAX(h) ((h)->ahash_max)
static u8
tune_ahash_max(u8 curr, u32 multi)
tune_bucketsize(u8 curr, u32 multi)
{
u32 n;
@ -61,12 +61,10 @@ tune_ahash_max(u8 curr, u32 multi)
*/
return n > curr && n <= AHASH_MAX_TUNED ? n : curr;
}
#define TUNE_AHASH_MAX(h, multi) \
((h)->ahash_max = tune_ahash_max((h)->ahash_max, multi))
#define TUNE_BUCKETSIZE(h, multi) \
((h)->bucketsize = tune_bucketsize((h)->bucketsize, multi))
#else
#define AHASH_MAX(h) AHASH_MAX_SIZE
#define TUNE_AHASH_MAX(h, multi)
#define TUNE_BUCKETSIZE(h, multi)
#endif
/* A hash bucket */
@ -321,9 +319,7 @@ struct htype {
#ifdef IP_SET_HASH_WITH_MARKMASK
u32 markmask; /* markmask value for mark mask to store */
#endif
#ifdef IP_SET_HASH_WITH_MULTI
u8 ahash_max; /* max elements in an array block */
#endif
u8 bucketsize; /* max elements in an array block */
#ifdef IP_SET_HASH_WITH_NETMASK
u8 netmask; /* netmask value for subnets to store */
#endif
@ -950,7 +946,7 @@ mtype_add(struct ip_set *set, void *value, const struct ip_set_ext *ext,
goto set_full;
/* Create a new slot */
if (n->pos >= n->size) {
TUNE_AHASH_MAX(h, multi);
TUNE_BUCKETSIZE(h, multi);
if (n->size >= AHASH_MAX(h)) {
/* Trigger rehashing */
mtype_data_next(&h->next, d);
@ -1305,6 +1301,11 @@ mtype_head(struct ip_set *set, struct sk_buff *skb)
if (nla_put_u32(skb, IPSET_ATTR_MARKMASK, h->markmask))
goto nla_put_failure;
#endif
if (set->flags & IPSET_CREATE_FLAG_BUCKETSIZE) {
if (nla_put_u8(skb, IPSET_ATTR_BUCKETSIZE, h->bucketsize) ||
nla_put_net32(skb, IPSET_ATTR_INITVAL, htonl(h->initval)))
goto nla_put_failure;
}
if (nla_put_net32(skb, IPSET_ATTR_REFERENCES, htonl(set->ref)) ||
nla_put_net32(skb, IPSET_ATTR_MEMSIZE, htonl(memsize)) ||
nla_put_net32(skb, IPSET_ATTR_ELEMENTS, htonl(elements)))
@ -1547,8 +1548,20 @@ IPSET_TOKEN(HTYPE, _create)(struct net *net, struct ip_set *set,
#ifdef IP_SET_HASH_WITH_MARKMASK
h->markmask = markmask;
#endif
get_random_bytes(&h->initval, sizeof(h->initval));
if (tb[IPSET_ATTR_INITVAL])
h->initval = ntohl(nla_get_be32(tb[IPSET_ATTR_INITVAL]));
else
get_random_bytes(&h->initval, sizeof(h->initval));
h->bucketsize = AHASH_MAX_SIZE;
if (tb[IPSET_ATTR_BUCKETSIZE]) {
h->bucketsize = nla_get_u8(tb[IPSET_ATTR_BUCKETSIZE]);
if (h->bucketsize < AHASH_INIT_SIZE)
h->bucketsize = AHASH_INIT_SIZE;
else if (h->bucketsize > AHASH_MAX_SIZE)
h->bucketsize = AHASH_MAX_SIZE;
else if (h->bucketsize % 2)
h->bucketsize += 1;
}
t->htable_bits = hbits;
t->maxelem = h->maxelem / ahash_numof_locks(hbits);
RCU_INIT_POINTER(h->table, t);

View File

@ -23,7 +23,8 @@
/* 1 Counters support */
/* 2 Comments support */
/* 3 Forceadd support */
#define IPSET_TYPE_REV_MAX 4 /* skbinfo support */
/* 4 skbinfo support */
#define IPSET_TYPE_REV_MAX 5 /* bucketsize, initval support */
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@netfilter.org>");
@ -277,11 +278,13 @@ static struct ip_set_type hash_ip_type __read_mostly = {
.family = NFPROTO_UNSPEC,
.revision_min = IPSET_TYPE_REV_MIN,
.revision_max = IPSET_TYPE_REV_MAX,
.create_flags[IPSET_TYPE_REV_MAX] = IPSET_CREATE_FLAG_BUCKETSIZE,
.create = hash_ip_create,
.create_policy = {
[IPSET_ATTR_HASHSIZE] = { .type = NLA_U32 },
[IPSET_ATTR_MAXELEM] = { .type = NLA_U32 },
[IPSET_ATTR_PROBES] = { .type = NLA_U8 },
[IPSET_ATTR_INITVAL] = { .type = NLA_U32 },
[IPSET_ATTR_BUCKETSIZE] = { .type = NLA_U8 },
[IPSET_ATTR_RESIZE] = { .type = NLA_U8 },
[IPSET_ATTR_TIMEOUT] = { .type = NLA_U32 },
[IPSET_ATTR_NETMASK] = { .type = NLA_U8 },

View File

@ -23,7 +23,7 @@
#include <linux/netfilter/ipset/ip_set_hash.h>
#define IPSET_TYPE_REV_MIN 0
#define IPSET_TYPE_REV_MAX 0
#define IPSET_TYPE_REV_MAX 1 /* bucketsize, initval support */
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Tomasz Chilinski <tomasz.chilinski@chilan.com>");
@ -268,11 +268,13 @@ static struct ip_set_type hash_ipmac_type __read_mostly = {
.family = NFPROTO_UNSPEC,
.revision_min = IPSET_TYPE_REV_MIN,
.revision_max = IPSET_TYPE_REV_MAX,
.create_flags[IPSET_TYPE_REV_MAX] = IPSET_CREATE_FLAG_BUCKETSIZE,
.create = hash_ipmac_create,
.create_policy = {
[IPSET_ATTR_HASHSIZE] = { .type = NLA_U32 },
[IPSET_ATTR_MAXELEM] = { .type = NLA_U32 },
[IPSET_ATTR_PROBES] = { .type = NLA_U8 },
[IPSET_ATTR_INITVAL] = { .type = NLA_U32 },
[IPSET_ATTR_BUCKETSIZE] = { .type = NLA_U8 },
[IPSET_ATTR_RESIZE] = { .type = NLA_U8 },
[IPSET_ATTR_TIMEOUT] = { .type = NLA_U32 },
[IPSET_ATTR_CADT_FLAGS] = { .type = NLA_U32 },

View File

@ -21,7 +21,8 @@
#define IPSET_TYPE_REV_MIN 0
/* 1 Forceadd support */
#define IPSET_TYPE_REV_MAX 2 /* skbinfo support */
/* 2 skbinfo support */
#define IPSET_TYPE_REV_MAX 3 /* bucketsize, initval support */
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Vytas Dauksa <vytas.dauksa@smoothwall.net>");
@ -274,12 +275,14 @@ static struct ip_set_type hash_ipmark_type __read_mostly = {
.family = NFPROTO_UNSPEC,
.revision_min = IPSET_TYPE_REV_MIN,
.revision_max = IPSET_TYPE_REV_MAX,
.create_flags[IPSET_TYPE_REV_MAX] = IPSET_CREATE_FLAG_BUCKETSIZE,
.create = hash_ipmark_create,
.create_policy = {
[IPSET_ATTR_MARKMASK] = { .type = NLA_U32 },
[IPSET_ATTR_HASHSIZE] = { .type = NLA_U32 },
[IPSET_ATTR_MAXELEM] = { .type = NLA_U32 },
[IPSET_ATTR_PROBES] = { .type = NLA_U8 },
[IPSET_ATTR_INITVAL] = { .type = NLA_U32 },
[IPSET_ATTR_BUCKETSIZE] = { .type = NLA_U8 },
[IPSET_ATTR_RESIZE] = { .type = NLA_U8 },
[IPSET_ATTR_TIMEOUT] = { .type = NLA_U32 },
[IPSET_ATTR_CADT_FLAGS] = { .type = NLA_U32 },

View File

@ -25,7 +25,8 @@
/* 2 Counters support added */
/* 3 Comments support added */
/* 4 Forceadd support added */
#define IPSET_TYPE_REV_MAX 5 /* skbinfo support added */
/* 5 skbinfo support added */
#define IPSET_TYPE_REV_MAX 6 /* bucketsize, initval support added */
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@netfilter.org>");
@ -341,11 +342,13 @@ static struct ip_set_type hash_ipport_type __read_mostly = {
.family = NFPROTO_UNSPEC,
.revision_min = IPSET_TYPE_REV_MIN,
.revision_max = IPSET_TYPE_REV_MAX,
.create_flags[IPSET_TYPE_REV_MAX] = IPSET_CREATE_FLAG_BUCKETSIZE,
.create = hash_ipport_create,
.create_policy = {
[IPSET_ATTR_HASHSIZE] = { .type = NLA_U32 },
[IPSET_ATTR_MAXELEM] = { .type = NLA_U32 },
[IPSET_ATTR_PROBES] = { .type = NLA_U8 },
[IPSET_ATTR_INITVAL] = { .type = NLA_U32 },
[IPSET_ATTR_BUCKETSIZE] = { .type = NLA_U8 },
[IPSET_ATTR_RESIZE] = { .type = NLA_U8 },
[IPSET_ATTR_PROTO] = { .type = NLA_U8 },
[IPSET_ATTR_TIMEOUT] = { .type = NLA_U32 },

View File

@ -25,7 +25,8 @@
/* 2 Counters support added */
/* 3 Comments support added */
/* 4 Forceadd support added */
#define IPSET_TYPE_REV_MAX 5 /* skbinfo support added */
/* 5 skbinfo support added */
#define IPSET_TYPE_REV_MAX 6 /* bucketsize, initval support added */
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@netfilter.org>");
@ -356,11 +357,13 @@ static struct ip_set_type hash_ipportip_type __read_mostly = {
.family = NFPROTO_UNSPEC,
.revision_min = IPSET_TYPE_REV_MIN,
.revision_max = IPSET_TYPE_REV_MAX,
.create_flags[IPSET_TYPE_REV_MAX] = IPSET_CREATE_FLAG_BUCKETSIZE,
.create = hash_ipportip_create,
.create_policy = {
[IPSET_ATTR_HASHSIZE] = { .type = NLA_U32 },
[IPSET_ATTR_MAXELEM] = { .type = NLA_U32 },
[IPSET_ATTR_PROBES] = { .type = NLA_U8 },
[IPSET_ATTR_INITVAL] = { .type = NLA_U32 },
[IPSET_ATTR_BUCKETSIZE] = { .type = NLA_U8 },
[IPSET_ATTR_RESIZE] = { .type = NLA_U8 },
[IPSET_ATTR_TIMEOUT] = { .type = NLA_U32 },
[IPSET_ATTR_CADT_FLAGS] = { .type = NLA_U32 },

View File

@ -27,7 +27,8 @@
/* 4 Counters support added */
/* 5 Comments support added */
/* 6 Forceadd support added */
#define IPSET_TYPE_REV_MAX 7 /* skbinfo support added */
/* 7 skbinfo support added */
#define IPSET_TYPE_REV_MAX 8 /* bucketsize, initval support added */
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@netfilter.org>");
@ -513,11 +514,13 @@ static struct ip_set_type hash_ipportnet_type __read_mostly = {
.family = NFPROTO_UNSPEC,
.revision_min = IPSET_TYPE_REV_MIN,
.revision_max = IPSET_TYPE_REV_MAX,
.create_flags[IPSET_TYPE_REV_MAX] = IPSET_CREATE_FLAG_BUCKETSIZE,
.create = hash_ipportnet_create,
.create_policy = {
[IPSET_ATTR_HASHSIZE] = { .type = NLA_U32 },
[IPSET_ATTR_MAXELEM] = { .type = NLA_U32 },
[IPSET_ATTR_PROBES] = { .type = NLA_U8 },
[IPSET_ATTR_INITVAL] = { .type = NLA_U32 },
[IPSET_ATTR_BUCKETSIZE] = { .type = NLA_U8 },
[IPSET_ATTR_RESIZE] = { .type = NLA_U8 },
[IPSET_ATTR_TIMEOUT] = { .type = NLA_U32 },
[IPSET_ATTR_CADT_FLAGS] = { .type = NLA_U32 },

View File

@ -16,7 +16,7 @@
#include <linux/netfilter/ipset/ip_set_hash.h>
#define IPSET_TYPE_REV_MIN 0
#define IPSET_TYPE_REV_MAX 0
#define IPSET_TYPE_REV_MAX 1 /* bucketsize, initval support */
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@netfilter.org>");
@ -125,11 +125,13 @@ static struct ip_set_type hash_mac_type __read_mostly = {
.family = NFPROTO_UNSPEC,
.revision_min = IPSET_TYPE_REV_MIN,
.revision_max = IPSET_TYPE_REV_MAX,
.create_flags[IPSET_TYPE_REV_MAX] = IPSET_CREATE_FLAG_BUCKETSIZE,
.create = hash_mac_create,
.create_policy = {
[IPSET_ATTR_HASHSIZE] = { .type = NLA_U32 },
[IPSET_ATTR_MAXELEM] = { .type = NLA_U32 },
[IPSET_ATTR_PROBES] = { .type = NLA_U8 },
[IPSET_ATTR_INITVAL] = { .type = NLA_U32 },
[IPSET_ATTR_BUCKETSIZE] = { .type = NLA_U8 },
[IPSET_ATTR_RESIZE] = { .type = NLA_U8 },
[IPSET_ATTR_TIMEOUT] = { .type = NLA_U32 },
[IPSET_ATTR_CADT_FLAGS] = { .type = NLA_U32 },

View File

@ -24,7 +24,8 @@
/* 3 Counters support added */
/* 4 Comments support added */
/* 5 Forceadd support added */
#define IPSET_TYPE_REV_MAX 6 /* skbinfo mapping support added */
/* 6 skbinfo support added */
#define IPSET_TYPE_REV_MAX 7 /* bucketsize, initval support added */
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@netfilter.org>");
@ -354,11 +355,13 @@ static struct ip_set_type hash_net_type __read_mostly = {
.family = NFPROTO_UNSPEC,
.revision_min = IPSET_TYPE_REV_MIN,
.revision_max = IPSET_TYPE_REV_MAX,
.create_flags[IPSET_TYPE_REV_MAX] = IPSET_CREATE_FLAG_BUCKETSIZE,
.create = hash_net_create,
.create_policy = {
[IPSET_ATTR_HASHSIZE] = { .type = NLA_U32 },
[IPSET_ATTR_MAXELEM] = { .type = NLA_U32 },
[IPSET_ATTR_PROBES] = { .type = NLA_U8 },
[IPSET_ATTR_INITVAL] = { .type = NLA_U32 },
[IPSET_ATTR_BUCKETSIZE] = { .type = NLA_U8 },
[IPSET_ATTR_RESIZE] = { .type = NLA_U8 },
[IPSET_ATTR_TIMEOUT] = { .type = NLA_U32 },
[IPSET_ATTR_CADT_FLAGS] = { .type = NLA_U32 },

View File

@ -26,7 +26,8 @@
/* 4 Comments support added */
/* 5 Forceadd support added */
/* 6 skbinfo support added */
#define IPSET_TYPE_REV_MAX 7 /* interface wildcard support added */
/* 7 interface wildcard support added */
#define IPSET_TYPE_REV_MAX 8 /* bucketsize, initval support added */
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@netfilter.org>");
@ -470,11 +471,13 @@ static struct ip_set_type hash_netiface_type __read_mostly = {
.family = NFPROTO_UNSPEC,
.revision_min = IPSET_TYPE_REV_MIN,
.revision_max = IPSET_TYPE_REV_MAX,
.create_flags[IPSET_TYPE_REV_MAX] = IPSET_CREATE_FLAG_BUCKETSIZE,
.create = hash_netiface_create,
.create_policy = {
[IPSET_ATTR_HASHSIZE] = { .type = NLA_U32 },
[IPSET_ATTR_MAXELEM] = { .type = NLA_U32 },
[IPSET_ATTR_PROBES] = { .type = NLA_U8 },
[IPSET_ATTR_INITVAL] = { .type = NLA_U32 },
[IPSET_ATTR_BUCKETSIZE] = { .type = NLA_U8 },
[IPSET_ATTR_RESIZE] = { .type = NLA_U8 },
[IPSET_ATTR_PROTO] = { .type = NLA_U8 },
[IPSET_ATTR_TIMEOUT] = { .type = NLA_U32 },

View File

@ -22,7 +22,8 @@
#define IPSET_TYPE_REV_MIN 0
/* 1 Forceadd support added */
#define IPSET_TYPE_REV_MAX 2 /* skbinfo support added */
/* 2 skbinfo support added */
#define IPSET_TYPE_REV_MAX 3 /* bucketsize, initval support added */
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Oliver Smith <oliver@8.c.9.b.0.7.4.0.1.0.0.2.ip6.arpa>");
@ -459,11 +460,13 @@ static struct ip_set_type hash_netnet_type __read_mostly = {
.family = NFPROTO_UNSPEC,
.revision_min = IPSET_TYPE_REV_MIN,
.revision_max = IPSET_TYPE_REV_MAX,
.create_flags[IPSET_TYPE_REV_MAX] = IPSET_CREATE_FLAG_BUCKETSIZE,
.create = hash_netnet_create,
.create_policy = {
[IPSET_ATTR_HASHSIZE] = { .type = NLA_U32 },
[IPSET_ATTR_MAXELEM] = { .type = NLA_U32 },
[IPSET_ATTR_PROBES] = { .type = NLA_U8 },
[IPSET_ATTR_INITVAL] = { .type = NLA_U32 },
[IPSET_ATTR_BUCKETSIZE] = { .type = NLA_U8 },
[IPSET_ATTR_RESIZE] = { .type = NLA_U8 },
[IPSET_ATTR_TIMEOUT] = { .type = NLA_U32 },
[IPSET_ATTR_CADT_FLAGS] = { .type = NLA_U32 },

View File

@ -26,7 +26,8 @@
/* 4 Counters support added */
/* 5 Comments support added */
/* 6 Forceadd support added */
#define IPSET_TYPE_REV_MAX 7 /* skbinfo support added */
/* 7 skbinfo support added */
#define IPSET_TYPE_REV_MAX 8 /* bucketsize, initval support added */
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@netfilter.org>");
@ -460,11 +461,13 @@ static struct ip_set_type hash_netport_type __read_mostly = {
.family = NFPROTO_UNSPEC,
.revision_min = IPSET_TYPE_REV_MIN,
.revision_max = IPSET_TYPE_REV_MAX,
.create_flags[IPSET_TYPE_REV_MAX] = IPSET_CREATE_FLAG_BUCKETSIZE,
.create = hash_netport_create,
.create_policy = {
[IPSET_ATTR_HASHSIZE] = { .type = NLA_U32 },
[IPSET_ATTR_MAXELEM] = { .type = NLA_U32 },
[IPSET_ATTR_PROBES] = { .type = NLA_U8 },
[IPSET_ATTR_INITVAL] = { .type = NLA_U32 },
[IPSET_ATTR_BUCKETSIZE] = { .type = NLA_U8 },
[IPSET_ATTR_RESIZE] = { .type = NLA_U8 },
[IPSET_ATTR_PROTO] = { .type = NLA_U8 },
[IPSET_ATTR_TIMEOUT] = { .type = NLA_U32 },

View File

@ -23,7 +23,8 @@
#define IPSET_TYPE_REV_MIN 0
/* 0 Comments support added */
/* 1 Forceadd support added */
#define IPSET_TYPE_REV_MAX 2 /* skbinfo support added */
/* 2 skbinfo support added */
#define IPSET_TYPE_REV_MAX 3 /* bucketsize, initval support added */
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Oliver Smith <oliver@8.c.9.b.0.7.4.0.1.0.0.2.ip6.arpa>");
@ -558,11 +559,13 @@ static struct ip_set_type hash_netportnet_type __read_mostly = {
.family = NFPROTO_UNSPEC,
.revision_min = IPSET_TYPE_REV_MIN,
.revision_max = IPSET_TYPE_REV_MAX,
.create_flags[IPSET_TYPE_REV_MAX] = IPSET_CREATE_FLAG_BUCKETSIZE,
.create = hash_netportnet_create,
.create_policy = {
[IPSET_ATTR_HASHSIZE] = { .type = NLA_U32 },
[IPSET_ATTR_MAXELEM] = { .type = NLA_U32 },
[IPSET_ATTR_PROBES] = { .type = NLA_U8 },
[IPSET_ATTR_INITVAL] = { .type = NLA_U32 },
[IPSET_ATTR_BUCKETSIZE] = { .type = NLA_U8 },
[IPSET_ATTR_RESIZE] = { .type = NLA_U8 },
[IPSET_ATTR_TIMEOUT] = { .type = NLA_U32 },
[IPSET_ATTR_CADT_FLAGS] = { .type = NLA_U32 },

View File

@ -581,7 +581,8 @@ struct nft_module_request {
};
#ifdef CONFIG_MODULES
static int nft_request_module(struct net *net, const char *fmt, ...)
static __printf(2, 3) int nft_request_module(struct net *net, const char *fmt,
...)
{
char module_name[MODULE_NAME_LEN];
struct nft_module_request *req;

View File

@ -40,6 +40,7 @@ int nft_reject_init(const struct nft_ctx *ctx,
const struct nlattr * const tb[])
{
struct nft_reject *priv = nft_expr_priv(expr);
int icmp_code;
if (tb[NFTA_REJECT_TYPE] == NULL)
return -EINVAL;
@ -47,9 +48,17 @@ int nft_reject_init(const struct nft_ctx *ctx,
priv->type = ntohl(nla_get_be32(tb[NFTA_REJECT_TYPE]));
switch (priv->type) {
case NFT_REJECT_ICMP_UNREACH:
case NFT_REJECT_ICMPX_UNREACH:
if (tb[NFTA_REJECT_ICMP_CODE] == NULL)
return -EINVAL;
priv->icmp_code = nla_get_u8(tb[NFTA_REJECT_ICMP_CODE]);
icmp_code = nla_get_u8(tb[NFTA_REJECT_ICMP_CODE]);
if (priv->type == NFT_REJECT_ICMPX_UNREACH &&
icmp_code > NFT_REJECT_ICMPX_MAX)
return -EINVAL;
priv->icmp_code = icmp_code;
break;
case NFT_REJECT_TCP_RST:
break;
default:
@ -69,6 +78,7 @@ int nft_reject_dump(struct sk_buff *skb, const struct nft_expr *expr)
switch (priv->type) {
case NFT_REJECT_ICMP_UNREACH:
case NFT_REJECT_ICMPX_UNREACH:
if (nla_put_u8(skb, NFTA_REJECT_ICMP_CODE, priv->icmp_code))
goto nla_put_failure;
break;

View File

@ -58,60 +58,16 @@ static void nft_reject_inet_eval(const struct nft_expr *expr,
regs->verdict.code = NF_DROP;
}
static int nft_reject_inet_init(const struct nft_ctx *ctx,
const struct nft_expr *expr,
const struct nlattr * const tb[])
static int nft_reject_inet_validate(const struct nft_ctx *ctx,
const struct nft_expr *expr,
const struct nft_data **data)
{
struct nft_reject *priv = nft_expr_priv(expr);
int icmp_code;
if (tb[NFTA_REJECT_TYPE] == NULL)
return -EINVAL;
priv->type = ntohl(nla_get_be32(tb[NFTA_REJECT_TYPE]));
switch (priv->type) {
case NFT_REJECT_ICMP_UNREACH:
case NFT_REJECT_ICMPX_UNREACH:
if (tb[NFTA_REJECT_ICMP_CODE] == NULL)
return -EINVAL;
icmp_code = nla_get_u8(tb[NFTA_REJECT_ICMP_CODE]);
if (priv->type == NFT_REJECT_ICMPX_UNREACH &&
icmp_code > NFT_REJECT_ICMPX_MAX)
return -EINVAL;
priv->icmp_code = icmp_code;
break;
case NFT_REJECT_TCP_RST:
break;
default:
return -EINVAL;
}
return 0;
}
static int nft_reject_inet_dump(struct sk_buff *skb,
const struct nft_expr *expr)
{
const struct nft_reject *priv = nft_expr_priv(expr);
if (nla_put_be32(skb, NFTA_REJECT_TYPE, htonl(priv->type)))
goto nla_put_failure;
switch (priv->type) {
case NFT_REJECT_ICMP_UNREACH:
case NFT_REJECT_ICMPX_UNREACH:
if (nla_put_u8(skb, NFTA_REJECT_ICMP_CODE, priv->icmp_code))
goto nla_put_failure;
break;
default:
break;
}
return 0;
nla_put_failure:
return -1;
return nft_chain_validate_hooks(ctx->chain,
(1 << NF_INET_LOCAL_IN) |
(1 << NF_INET_FORWARD) |
(1 << NF_INET_LOCAL_OUT) |
(1 << NF_INET_PRE_ROUTING) |
(1 << NF_INET_INGRESS));
}
static struct nft_expr_type nft_reject_inet_type;
@ -119,9 +75,9 @@ static const struct nft_expr_ops nft_reject_inet_ops = {
.type = &nft_reject_inet_type,
.size = NFT_EXPR_SIZE(sizeof(struct nft_reject)),
.eval = nft_reject_inet_eval,
.init = nft_reject_inet_init,
.dump = nft_reject_inet_dump,
.validate = nft_reject_validate,
.init = nft_reject_init,
.dump = nft_reject_dump,
.validate = nft_reject_inet_validate,
};
static struct nft_expr_type nft_reject_inet_type __read_mostly = {

View File

@ -0,0 +1,189 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2020 Laura Garcia Liebana <nevola@gmail.com>
* Copyright (c) 2020 Jose M. Guisado <guigom@riseup.net>
*/
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/netlink.h>
#include <linux/netfilter.h>
#include <linux/netfilter/nf_tables.h>
#include <net/netfilter/nf_tables.h>
#include <net/netfilter/nft_reject.h>
#include <net/netfilter/ipv4/nf_reject.h>
#include <net/netfilter/ipv6/nf_reject.h>
static void nft_reject_queue_xmit(struct sk_buff *nskb, struct sk_buff *oldskb)
{
dev_hard_header(nskb, nskb->dev, ntohs(oldskb->protocol),
eth_hdr(oldskb)->h_source, eth_hdr(oldskb)->h_dest,
nskb->len);
dev_queue_xmit(nskb);
}
static void nft_reject_netdev_send_v4_tcp_reset(struct net *net,
struct sk_buff *oldskb,
const struct net_device *dev,
int hook)
{
struct sk_buff *nskb;
nskb = nf_reject_skb_v4_tcp_reset(net, oldskb, dev, hook);
if (!nskb)
return;
nft_reject_queue_xmit(nskb, oldskb);
}
static void nft_reject_netdev_send_v4_unreach(struct net *net,
struct sk_buff *oldskb,
const struct net_device *dev,
int hook, u8 code)
{
struct sk_buff *nskb;
nskb = nf_reject_skb_v4_unreach(net, oldskb, dev, hook, code);
if (!nskb)
return;
nft_reject_queue_xmit(nskb, oldskb);
}
static void nft_reject_netdev_send_v6_tcp_reset(struct net *net,
struct sk_buff *oldskb,
const struct net_device *dev,
int hook)
{
struct sk_buff *nskb;
nskb = nf_reject_skb_v6_tcp_reset(net, oldskb, dev, hook);
if (!nskb)
return;
nft_reject_queue_xmit(nskb, oldskb);
}
static void nft_reject_netdev_send_v6_unreach(struct net *net,
struct sk_buff *oldskb,
const struct net_device *dev,
int hook, u8 code)
{
struct sk_buff *nskb;
nskb = nf_reject_skb_v6_unreach(net, oldskb, dev, hook, code);
if (!nskb)
return;
nft_reject_queue_xmit(nskb, oldskb);
}
static void nft_reject_netdev_eval(const struct nft_expr *expr,
struct nft_regs *regs,
const struct nft_pktinfo *pkt)
{
struct ethhdr *eth = eth_hdr(pkt->skb);
struct nft_reject *priv = nft_expr_priv(expr);
const unsigned char *dest = eth->h_dest;
if (is_broadcast_ether_addr(dest) ||
is_multicast_ether_addr(dest))
goto out;
switch (eth->h_proto) {
case htons(ETH_P_IP):
switch (priv->type) {
case NFT_REJECT_ICMP_UNREACH:
nft_reject_netdev_send_v4_unreach(nft_net(pkt), pkt->skb,
nft_in(pkt),
nft_hook(pkt),
priv->icmp_code);
break;
case NFT_REJECT_TCP_RST:
nft_reject_netdev_send_v4_tcp_reset(nft_net(pkt), pkt->skb,
nft_in(pkt),
nft_hook(pkt));
break;
case NFT_REJECT_ICMPX_UNREACH:
nft_reject_netdev_send_v4_unreach(nft_net(pkt), pkt->skb,
nft_in(pkt),
nft_hook(pkt),
nft_reject_icmp_code(priv->icmp_code));
break;
}
break;
case htons(ETH_P_IPV6):
switch (priv->type) {
case NFT_REJECT_ICMP_UNREACH:
nft_reject_netdev_send_v6_unreach(nft_net(pkt), pkt->skb,
nft_in(pkt),
nft_hook(pkt),
priv->icmp_code);
break;
case NFT_REJECT_TCP_RST:
nft_reject_netdev_send_v6_tcp_reset(nft_net(pkt), pkt->skb,
nft_in(pkt),
nft_hook(pkt));
break;
case NFT_REJECT_ICMPX_UNREACH:
nft_reject_netdev_send_v6_unreach(nft_net(pkt), pkt->skb,
nft_in(pkt),
nft_hook(pkt),
nft_reject_icmpv6_code(priv->icmp_code));
break;
}
break;
default:
/* No explicit way to reject this protocol, drop it. */
break;
}
out:
regs->verdict.code = NF_DROP;
}
static int nft_reject_netdev_validate(const struct nft_ctx *ctx,
const struct nft_expr *expr,
const struct nft_data **data)
{
return nft_chain_validate_hooks(ctx->chain, (1 << NF_NETDEV_INGRESS));
}
static struct nft_expr_type nft_reject_netdev_type;
static const struct nft_expr_ops nft_reject_netdev_ops = {
.type = &nft_reject_netdev_type,
.size = NFT_EXPR_SIZE(sizeof(struct nft_reject)),
.eval = nft_reject_netdev_eval,
.init = nft_reject_init,
.dump = nft_reject_dump,
.validate = nft_reject_netdev_validate,
};
static struct nft_expr_type nft_reject_netdev_type __read_mostly = {
.family = NFPROTO_NETDEV,
.name = "reject",
.ops = &nft_reject_netdev_ops,
.policy = nft_reject_policy,
.maxattr = NFTA_REJECT_MAX,
.owner = THIS_MODULE,
};
static int __init nft_reject_netdev_module_init(void)
{
return nft_register_expr(&nft_reject_netdev_type);
}
static void __exit nft_reject_netdev_module_exit(void)
{
nft_unregister_expr(&nft_reject_netdev_type);
}
module_init(nft_reject_netdev_module_init);
module_exit(nft_reject_netdev_module_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Laura Garcia Liebana <nevola@gmail.com>");
MODULE_AUTHOR("Jose M. Guisado <guigom@riseup.net>");
MODULE_DESCRIPTION("Reject packets from netdev via nftables");
MODULE_ALIAS_NFT_AF_EXPR(5, "reject");