vlan: uninline __vlan_hwaccel_rx

The function is huge and included at least once in every VLAN acceleration
capable driver. Uninline it; to avoid having drivers depend on the VLAN
module, the function is always built in statically when VLAN is enabled.

With all VLAN acceleration capable drivers that build on x86_64 enabled,
this results in:

   text    data     bss     dec     hex filename
6515227  854044  343968 7713239  75b1d7 vmlinux.inlined
6505637  854044  343968 7703649  758c61 vmlinux.uninlined
----------------------------------------------------------
  -9590

Signed-off-by: Patrick McHardy <kaber@trash.net>
Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
Patrick McHardy 2008-07-08 03:23:36 -07:00 committed by David S. Miller
parent 75b8846acd
commit 7750f403cb
5 changed files with 72 additions and 61 deletions

View File

@ -150,15 +150,6 @@ static inline struct vlan_dev_info *vlan_dev_info(const struct net_device *dev)
return netdev_priv(dev);
}
/* inline functions */
static inline __u32 vlan_get_ingress_priority(struct net_device *dev,
unsigned short vlan_tag)
{
struct vlan_dev_info *vip = vlan_dev_info(dev);
return vip->ingress_priority_map[(vlan_tag >> 13) & 0x7];
}
/* VLAN tx hw acceleration helpers. */
struct vlan_skb_tx_cookie {
u32 magic;
@ -171,56 +162,17 @@ struct vlan_skb_tx_cookie {
(VLAN_TX_SKB_CB(__skb)->magic == VLAN_TX_COOKIE_MAGIC)
#define vlan_tx_tag_get(__skb) (VLAN_TX_SKB_CB(__skb)->vlan_tag)
/* VLAN rx hw acceleration helper. This acts like netif_{rx,receive_skb}(). */
static inline int __vlan_hwaccel_rx(struct sk_buff *skb,
struct vlan_group *grp,
#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
extern int __vlan_hwaccel_rx(struct sk_buff *skb, struct vlan_group *grp,
unsigned short vlan_tag, int polling);
#else
static inline int __vlan_hwaccel_rx(struct sk_buff *skb, struct vlan_group *grp,
unsigned short vlan_tag, int polling)
{
struct net_device_stats *stats;
if (skb_bond_should_drop(skb)) {
dev_kfree_skb_any(skb);
return NET_RX_DROP;
}
skb->dev = vlan_group_get_device(grp, vlan_tag & VLAN_VID_MASK);
if (skb->dev == NULL) {
dev_kfree_skb_any(skb);
/* Not NET_RX_DROP, this is not being dropped
* due to congestion.
*/
return 0;
}
skb->dev->last_rx = jiffies;
stats = &skb->dev->stats;
stats->rx_packets++;
stats->rx_bytes += skb->len;
skb->priority = vlan_get_ingress_priority(skb->dev, vlan_tag);
switch (skb->pkt_type) {
case PACKET_BROADCAST:
break;
case PACKET_MULTICAST:
stats->multicast++;
break;
case PACKET_OTHERHOST:
/* Our lower layer thinks this is not local, let's make sure.
* This allows the VLAN to have a different MAC than the underlying
* device, and still route correctly.
*/
if (!compare_ether_addr(eth_hdr(skb)->h_dest,
skb->dev->dev_addr))
skb->pkt_type = PACKET_HOST;
break;
};
return (polling ? netif_receive_skb(skb) : netif_rx(skb));
BUG();
return NET_XMIT_SUCCESS;
}
#endif
static inline int vlan_hwaccel_rx(struct sk_buff *skb,
struct vlan_group *grp,

View File

@ -1,9 +1,10 @@
#
# Makefile for the Linux VLAN layer.
#
obj-$(subst m,y,$(CONFIG_VLAN_8021Q)) += vlan_core.o
obj-$(CONFIG_VLAN_8021Q) += 8021q.o
8021q-y := vlan.o vlan_dev.o vlan_netlink.o
8021q-$(CONFIG_VLAN_8021Q_GVRP) += vlan_gvrp.o
8021q-$(CONFIG_PROC_FS) += vlanproc.o

View File

@ -37,6 +37,14 @@ void vlan_setup(struct net_device *dev);
int register_vlan_dev(struct net_device *dev);
void unregister_vlan_dev(struct net_device *dev);
static inline u32 vlan_get_ingress_priority(struct net_device *dev,
unsigned short vlan_tag)
{
struct vlan_dev_info *vip = vlan_dev_info(dev);
return vip->ingress_priority_map[(vlan_tag >> 13) & 0x7];
}
#ifdef CONFIG_VLAN_8021Q_GVRP
extern int vlan_gvrp_request_join(const struct net_device *dev);
extern void vlan_gvrp_request_leave(const struct net_device *dev);

48
net/8021q/vlan_core.c Normal file
View File

@ -0,0 +1,48 @@
#include <linux/skbuff.h>
#include <linux/netdevice.h>
#include <linux/if_vlan.h>
#include "vlan.h"
/* VLAN rx hw acceleration helper. This acts like netif_{rx,receive_skb}(). */
int __vlan_hwaccel_rx(struct sk_buff *skb, struct vlan_group *grp,
unsigned short vlan_tag, int polling)
{
struct net_device_stats *stats;
if (skb_bond_should_drop(skb)) {
dev_kfree_skb_any(skb);
return NET_RX_DROP;
}
skb->dev = vlan_group_get_device(grp, vlan_tag & VLAN_VID_MASK);
if (skb->dev == NULL) {
dev_kfree_skb_any(skb);
/* Not NET_RX_DROP, this is not being dropped
* due to congestion. */
return NET_RX_SUCCESS;
}
skb->dev->last_rx = jiffies;
stats = &skb->dev->stats;
stats->rx_packets++;
stats->rx_bytes += skb->len;
skb->priority = vlan_get_ingress_priority(skb->dev, vlan_tag);
switch (skb->pkt_type) {
case PACKET_BROADCAST:
break;
case PACKET_MULTICAST:
stats->multicast++;
break;
case PACKET_OTHERHOST:
/* Our lower layer thinks this is not local, let's make sure.
* This allows the VLAN to have a different MAC than the
* underlying device, and still route correctly. */
if (!compare_ether_addr(eth_hdr(skb)->h_dest,
skb->dev->dev_addr))
skb->pkt_type = PACKET_HOST;
break;
};
return (polling ? netif_receive_skb(skb) : netif_rx(skb));
}
EXPORT_SYMBOL(__vlan_hwaccel_rx);

View File

@ -42,7 +42,9 @@ obj-$(CONFIG_AF_RXRPC) += rxrpc/
obj-$(CONFIG_ATM) += atm/
obj-$(CONFIG_DECNET) += decnet/
obj-$(CONFIG_ECONET) += econet/
obj-$(CONFIG_VLAN_8021Q) += 8021q/
ifneq ($(CONFIG_VLAN_8021Q),)
obj-y += 8021q/
endif
obj-$(CONFIG_IP_DCCP) += dccp/
obj-$(CONFIG_IP_SCTP) += sctp/
obj-y += wireless/