[NETFILTER]: Move reroute-after-queue code up to the nf_queue layer.

The rerouting functionality is required by the core, therefore it has
to be implemented by the core and not in individual queue handlers.

Signed-off-by: Harald Welte <laforge@netfilter.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
Harald Welte 2005-08-09 19:42:34 -07:00 committed by David S. Miller
parent 4fdb3bb723
commit 2cc7d57309
8 changed files with 199 additions and 65 deletions

View File

@ -198,6 +198,17 @@ extern void nf_invalidate_cache(int pf);
Returns true or false. */
extern int skb_make_writable(struct sk_buff **pskb, unsigned int writable_len);
struct nf_queue_rerouter {
void (*save)(const struct sk_buff *skb, struct nf_info *info);
int (*reroute)(struct sk_buff **skb, const struct nf_info *info);
int rer_size;
};
#define nf_info_reroute(x) ((void *)x + sizeof(struct nf_info))
extern int nf_register_queue_rerouter(int pf, struct nf_queue_rerouter *rer);
extern int nf_unregister_queue_rerouter(int pf);
#else /* !CONFIG_NETFILTER */
#define NF_HOOK(pf, hook, skb, indev, outdev, okfn) (okfn)(skb)
static inline void nf_ct_attach(struct sk_buff *new, struct sk_buff *skb) {}

View File

@ -71,4 +71,7 @@ enum nf_ip6_hook_priorities {
NF_IP6_PRI_LAST = INT_MAX,
};
int ipv6_netfilter_init(void);
void ipv6_netfilter_fini(void);
#endif /*__LINUX_IP6_NETFILTER_H*/

View File

@ -53,6 +53,9 @@ static struct nf_queue_handler_t {
nf_queue_outfn_t outfn;
void *data;
} queue_handler[NPROTO];
static struct nf_queue_rerouter *queue_rerouter;
static DEFINE_RWLOCK(queue_handler_lock);
int nf_register_hook(struct nf_hook_ops *reg)
@ -260,11 +263,34 @@ int nf_unregister_queue_handler(int pf)
return 0;
}
int nf_register_queue_rerouter(int pf, struct nf_queue_rerouter *rer)
{
if (pf >= NPROTO)
return -EINVAL;
write_lock_bh(&queue_handler_lock);
memcpy(&queue_rerouter[pf], rer, sizeof(queue_rerouter[pf]));
write_unlock_bh(&queue_handler_lock);
return 0;
}
int nf_unregister_queue_rerouter(int pf)
{
if (pf >= NPROTO)
return -EINVAL;
write_lock_bh(&queue_handler_lock);
memset(&queue_rerouter[pf], 0, sizeof(queue_rerouter[pf]));
write_unlock_bh(&queue_handler_lock);
return 0;
}
/*
* Any packet that leaves via this function must come back
* through nf_reinject().
*/
static int nf_queue(struct sk_buff *skb,
static int nf_queue(struct sk_buff **skb,
struct list_head *elem,
int pf, unsigned int hook,
struct net_device *indev,
@ -282,17 +308,17 @@ static int nf_queue(struct sk_buff *skb,
read_lock(&queue_handler_lock);
if (!queue_handler[pf].outfn) {
read_unlock(&queue_handler_lock);
kfree_skb(skb);
kfree_skb(*skb);
return 1;
}
info = kmalloc(sizeof(*info), GFP_ATOMIC);
info = kmalloc(sizeof(*info)+queue_rerouter[pf].rer_size, GFP_ATOMIC);
if (!info) {
if (net_ratelimit())
printk(KERN_ERR "OOM queueing packet %p\n",
skb);
*skb);
read_unlock(&queue_handler_lock);
kfree_skb(skb);
kfree_skb(*skb);
return 1;
}
@ -311,15 +337,21 @@ static int nf_queue(struct sk_buff *skb,
if (outdev) dev_hold(outdev);
#ifdef CONFIG_BRIDGE_NETFILTER
if (skb->nf_bridge) {
physindev = skb->nf_bridge->physindev;
if ((*skb)->nf_bridge) {
physindev = (*skb)->nf_bridge->physindev;
if (physindev) dev_hold(physindev);
physoutdev = skb->nf_bridge->physoutdev;
physoutdev = (*skb)->nf_bridge->physoutdev;
if (physoutdev) dev_hold(physoutdev);
}
#endif
if (queue_rerouter[pf].save)
queue_rerouter[pf].save(*skb, info);
status = queue_handler[pf].outfn(*skb, info, queue_handler[pf].data);
if (status >= 0 && queue_rerouter[pf].reroute)
status = queue_rerouter[pf].reroute(skb, info);
status = queue_handler[pf].outfn(skb, info, queue_handler[pf].data);
read_unlock(&queue_handler_lock);
if (status < 0) {
@ -332,9 +364,11 @@ static int nf_queue(struct sk_buff *skb,
#endif
module_put(info->elem->owner);
kfree(info);
kfree_skb(skb);
kfree_skb(*skb);
return 1;
}
return 1;
}
@ -365,7 +399,7 @@ next_hook:
ret = -EPERM;
} else if (verdict == NF_QUEUE) {
NFDEBUG("nf_hook: Verdict = QUEUE.\n");
if (!nf_queue(*pskb, elem, pf, hook, indev, outdev, okfn))
if (!nf_queue(pskb, elem, pf, hook, indev, outdev, okfn))
goto next_hook;
}
unlock:
@ -428,7 +462,7 @@ void nf_reinject(struct sk_buff *skb, struct nf_info *info,
break;
case NF_QUEUE:
if (!nf_queue(skb, elem, info->pf, info->hook,
if (!nf_queue(&skb, elem, info->pf, info->hook,
info->indev, info->outdev, info->okfn))
goto next_hook;
break;
@ -555,6 +589,12 @@ void __init netfilter_init(void)
{
int i, h;
queue_rerouter = kmalloc(NPROTO * sizeof(struct nf_queue_rerouter),
GFP_KERNEL);
if (!queue_rerouter)
panic("netfilter: cannot allocate queue rerouter array\n");
memset(queue_rerouter, 0, NPROTO * sizeof(struct nf_queue_rerouter));
for (i = 0; i < NPROTO; i++) {
for (h = 0; h < NF_MAX_HOOKS; h++)
INIT_LIST_HEAD(&nf_hooks[i][h]);
@ -573,4 +613,6 @@ EXPORT_SYMBOL(nf_reinject);
EXPORT_SYMBOL(nf_setsockopt);
EXPORT_SYMBOL(nf_unregister_hook);
EXPORT_SYMBOL(nf_unregister_queue_handler);
EXPORT_SYMBOL_GPL(nf_register_queue_rerouter);
EXPORT_SYMBOL_GPL(nf_unregister_queue_rerouter);
EXPORT_SYMBOL(nf_unregister_sockopt);

View File

@ -1,10 +1,11 @@
#include <linux/config.h>
/* IPv4 specific functions of netfilter core */
#include <linux/config.h>
#ifdef CONFIG_NETFILTER
/* IPv4 specific functions of netfilter core */
#include <linux/kernel.h>
#include <linux/netfilter.h>
#include <linux/netfilter_ipv4.h>
#include <linux/tcp.h>
#include <linux/udp.h>
@ -76,4 +77,63 @@ int ip_route_me_harder(struct sk_buff **pskb)
return 0;
}
EXPORT_SYMBOL(ip_route_me_harder);
/*
* Extra routing may needed on local out, as the QUEUE target never
* returns control to the table.
*/
struct ip_rt_info {
u_int32_t daddr;
u_int32_t saddr;
u_int8_t tos;
};
static void queue_save(const struct sk_buff *skb, struct nf_info *info)
{
struct ip_rt_info *rt_info = nf_info_reroute(info);
if (info->hook == NF_IP_LOCAL_OUT) {
const struct iphdr *iph = skb->nh.iph;
rt_info->tos = iph->tos;
rt_info->daddr = iph->daddr;
rt_info->saddr = iph->saddr;
}
}
static int queue_reroute(struct sk_buff **pskb, const struct nf_info *info)
{
const struct ip_rt_info *rt_info = nf_info_reroute(info);
if (info->hook == NF_IP_LOCAL_OUT) {
struct iphdr *iph = (*pskb)->nh.iph;
if (!(iph->tos == rt_info->tos
&& iph->daddr == rt_info->daddr
&& iph->saddr == rt_info->saddr))
return ip_route_me_harder(pskb);
}
return 0;
}
static struct nf_queue_rerouter ip_reroute = {
.rer_size = sizeof(struct ip_rt_info),
.save = queue_save,
.reroute = queue_reroute,
};
static int init(void)
{
return nf_register_queue_rerouter(PF_INET, &ip_reroute);
}
static void fini(void)
{
nf_unregister_queue_rerouter(PF_INET);
}
module_init(init);
module_exit(fini);
#endif /* CONFIG_NETFILTER */

View File

@ -43,17 +43,10 @@
#define NET_IPQ_QMAX 2088
#define NET_IPQ_QMAX_NAME "ip_queue_maxlen"
struct ipq_rt_info {
__u8 tos;
__u32 daddr;
__u32 saddr;
};
struct ipq_queue_entry {
struct list_head list;
struct nf_info *info;
struct sk_buff *skb;
struct ipq_rt_info rt_info;
};
typedef int (*ipq_cmpfn)(struct ipq_queue_entry *, unsigned long);
@ -305,14 +298,6 @@ ipq_enqueue_packet(struct sk_buff *skb, struct nf_info *info, void *data)
entry->info = info;
entry->skb = skb;
if (entry->info->hook == NF_IP_LOCAL_OUT) {
struct iphdr *iph = skb->nh.iph;
entry->rt_info.tos = iph->tos;
entry->rt_info.daddr = iph->daddr;
entry->rt_info.saddr = iph->saddr;
}
nskb = ipq_build_packet_message(entry, &status);
if (nskb == NULL)
goto err_out_free;
@ -393,18 +378,6 @@ ipq_mangle_ipv4(ipq_verdict_msg_t *v, struct ipq_queue_entry *e)
memcpy(e->skb->data, v->payload, v->data_len);
e->skb->ip_summed = CHECKSUM_NONE;
/*
* Extra routing may needed on local out, as the QUEUE target never
* returns control to the table.
*/
if (e->info->hook == NF_IP_LOCAL_OUT) {
struct iphdr *iph = e->skb->nh.iph;
if (!(iph->tos == e->rt_info.tos
&& iph->daddr == e->rt_info.daddr
&& iph->saddr == e->rt_info.saddr))
return ip_route_me_harder(&e->skb);
}
return 0;
}

View File

@ -44,6 +44,7 @@
#include <linux/netdevice.h>
#include <linux/icmpv6.h>
#include <linux/smp_lock.h>
#include <linux/netfilter_ipv6.h>
#include <net/ip.h>
#include <net/ipv6.h>
@ -757,6 +758,9 @@ static int __init inet6_init(void)
err = igmp6_init(&inet6_family_ops);
if (err)
goto igmp_fail;
err = ipv6_netfilter_init();
if (err)
goto netfilter_fail;
/* Create /proc/foo6 entries. */
#ifdef CONFIG_PROC_FS
err = -ENOMEM;
@ -813,6 +817,8 @@ proc_tcp6_fail:
raw6_proc_exit();
proc_raw6_fail:
#endif
ipv6_netfilter_fini();
netfilter_fail:
igmp6_cleanup();
igmp_fail:
ndisc_cleanup();
@ -852,6 +858,7 @@ static void __exit inet6_exit(void)
ip6_route_cleanup();
ipv6_packet_cleanup();
igmp6_cleanup();
ipv6_netfilter_fini();
ndisc_cleanup();
icmpv6_cleanup();
#ifdef CONFIG_SYSCTL

View File

@ -5,6 +5,8 @@
#include <linux/kernel.h>
#include <linux/ipv6.h>
#include <linux/netfilter.h>
#include <linux/netfilter_ipv6.h>
#include <net/dst.h>
#include <net/ipv6.h>
#include <net/ip6_route.h>
@ -40,4 +42,64 @@ int ip6_route_me_harder(struct sk_buff *skb)
}
EXPORT_SYMBOL(ip6_route_me_harder);
/*
* Extra routing may needed on local out, as the QUEUE target never
* returns control to the table.
*/
struct ip6_rt_info {
struct in6_addr daddr;
struct in6_addr saddr;
};
static void save(const struct sk_buff *skb, struct nf_info *info)
{
struct ip6_rt_info *rt_info = nf_info_reroute(info);
if (info->hook == NF_IP6_LOCAL_OUT) {
struct ipv6hdr *iph = skb->nh.ipv6h;
rt_info->daddr = iph->daddr;
rt_info->saddr = iph->saddr;
}
}
static int reroute(struct sk_buff **pskb, const struct nf_info *info)
{
struct ip6_rt_info *rt_info = nf_info_reroute(info);
if (info->hook == NF_IP6_LOCAL_OUT) {
struct ipv6hdr *iph = (*pskb)->nh.ipv6h;
if (!ipv6_addr_equal(&iph->daddr, &rt_info->daddr) ||
!ipv6_addr_equal(&iph->saddr, &rt_info->saddr))
return ip6_route_me_harder(*pskb);
}
return 0;
}
static struct nf_queue_rerouter ip6_reroute = {
.rer_size = sizeof(struct ip6_rt_info),
.save = &save,
.reroute = &reroute,
};
int __init ipv6_netfilter_init(void)
{
return nf_register_queue_rerouter(PF_INET6, &ip6_reroute);
}
void ipv6_netfilter_fini(void)
{
nf_unregister_queue_rerouter(PF_INET6);
}
#else /* CONFIG_NETFILTER */
int __init ipv6_netfilter_init(void)
{
return 0;
}
void ipv6_netfilter_fini(void)
{
}
#endif /* CONFIG_NETFILTER */

View File

@ -47,16 +47,10 @@
#define NET_IPQ_QMAX 2088
#define NET_IPQ_QMAX_NAME "ip6_queue_maxlen"
struct ipq_rt_info {
struct in6_addr daddr;
struct in6_addr saddr;
};
struct ipq_queue_entry {
struct list_head list;
struct nf_info *info;
struct sk_buff *skb;
struct ipq_rt_info rt_info;
};
typedef int (*ipq_cmpfn)(struct ipq_queue_entry *, unsigned long);
@ -302,13 +296,6 @@ ipq_enqueue_packet(struct sk_buff *skb, struct nf_info *info, void *data)
entry->info = info;
entry->skb = skb;
if (entry->info->hook == NF_IP_LOCAL_OUT) {
struct ipv6hdr *iph = skb->nh.ipv6h;
entry->rt_info.daddr = iph->daddr;
entry->rt_info.saddr = iph->saddr;
}
nskb = ipq_build_packet_message(entry, &status);
if (nskb == NULL)
goto err_out_free;
@ -389,17 +376,6 @@ ipq_mangle_ipv6(ipq_verdict_msg_t *v, struct ipq_queue_entry *e)
memcpy(e->skb->data, v->payload, v->data_len);
e->skb->ip_summed = CHECKSUM_NONE;
/*
* Extra routing may needed on local out, as the QUEUE target never
* returns control to the table.
* Not a nice way to cmp, but works
*/
if (e->info->hook == NF_IP_LOCAL_OUT) {
struct ipv6hdr *iph = e->skb->nh.ipv6h;
if (!ipv6_addr_equal(&iph->daddr, &e->rt_info.daddr) ||
!ipv6_addr_equal(&iph->saddr, &e->rt_info.saddr))
return ip6_route_me_harder(e->skb);
}
return 0;
}