vlan: Implement vlan_dev_get_egress_qos_mask as an inline.

This is to avoid very silly Kconfig dependencies for modules
using this routine.

Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
David S. Miller 2013-11-11 00:42:07 -05:00
parent 170e85430b
commit e267cb960a
3 changed files with 99 additions and 107 deletions

View File

@ -88,8 +88,102 @@ extern struct net_device *__vlan_find_dev_deep(struct net_device *real_dev,
__be16 vlan_proto, u16 vlan_id);
extern struct net_device *vlan_dev_real_dev(const struct net_device *dev);
extern u16 vlan_dev_vlan_id(const struct net_device *dev);
extern u16 vlan_dev_get_egress_qos_mask(struct net_device *dev,
u32 skprio);
/**
* struct vlan_priority_tci_mapping - vlan egress priority mappings
* @priority: skb priority
* @vlan_qos: vlan priority: (skb->priority << 13) & 0xE000
* @next: pointer to next struct
*/
struct vlan_priority_tci_mapping {
u32 priority;
u16 vlan_qos;
struct vlan_priority_tci_mapping *next;
};
/**
* struct vlan_pcpu_stats - VLAN percpu rx/tx stats
* @rx_packets: number of received packets
* @rx_bytes: number of received bytes
* @rx_multicast: number of received multicast packets
* @tx_packets: number of transmitted packets
* @tx_bytes: number of transmitted bytes
* @syncp: synchronization point for 64bit counters
* @rx_errors: number of rx errors
* @tx_dropped: number of tx drops
*/
struct vlan_pcpu_stats {
u64 rx_packets;
u64 rx_bytes;
u64 rx_multicast;
u64 tx_packets;
u64 tx_bytes;
struct u64_stats_sync syncp;
u32 rx_errors;
u32 tx_dropped;
};
struct proc_dir_entry;
struct netpoll;
/**
* struct vlan_dev_priv - VLAN private device data
* @nr_ingress_mappings: number of ingress priority mappings
* @ingress_priority_map: ingress priority mappings
* @nr_egress_mappings: number of egress priority mappings
* @egress_priority_map: hash of egress priority mappings
* @vlan_proto: VLAN encapsulation protocol
* @vlan_id: VLAN identifier
* @flags: device flags
* @real_dev: underlying netdevice
* @real_dev_addr: address of underlying netdevice
* @dent: proc dir entry
* @vlan_pcpu_stats: ptr to percpu rx stats
*/
struct vlan_dev_priv {
unsigned int nr_ingress_mappings;
u32 ingress_priority_map[8];
unsigned int nr_egress_mappings;
struct vlan_priority_tci_mapping *egress_priority_map[16];
__be16 vlan_proto;
u16 vlan_id;
u16 flags;
struct net_device *real_dev;
unsigned char real_dev_addr[ETH_ALEN];
struct proc_dir_entry *dent;
struct vlan_pcpu_stats __percpu *vlan_pcpu_stats;
#ifdef CONFIG_NET_POLL_CONTROLLER
struct netpoll *netpoll;
#endif
};
static inline struct vlan_dev_priv *vlan_dev_priv(const struct net_device *dev)
{
return netdev_priv(dev);
}
static inline u16
vlan_dev_get_egress_qos_mask(struct net_device *dev, u32 skprio)
{
struct vlan_priority_tci_mapping *mp;
smp_rmb(); /* coupled with smp_wmb() in vlan_dev_set_egress_priority() */
mp = vlan_dev_priv(dev)->egress_priority_map[(skprio & 0xF)];
while (mp) {
if (mp->priority == skprio) {
return mp->vlan_qos; /* This should already be shifted
* to mask correctly with the
* VLAN's TCI */
}
mp = mp->next;
}
return 0;
}
extern bool vlan_do_receive(struct sk_buff **skb);
extern struct sk_buff *vlan_untag(struct sk_buff *skb);

View File

@ -5,83 +5,6 @@
#include <linux/u64_stats_sync.h>
#include <linux/list.h>
/**
* struct vlan_priority_tci_mapping - vlan egress priority mappings
* @priority: skb priority
* @vlan_qos: vlan priority: (skb->priority << 13) & 0xE000
* @next: pointer to next struct
*/
struct vlan_priority_tci_mapping {
u32 priority;
u16 vlan_qos;
struct vlan_priority_tci_mapping *next;
};
/**
* struct vlan_pcpu_stats - VLAN percpu rx/tx stats
* @rx_packets: number of received packets
* @rx_bytes: number of received bytes
* @rx_multicast: number of received multicast packets
* @tx_packets: number of transmitted packets
* @tx_bytes: number of transmitted bytes
* @syncp: synchronization point for 64bit counters
* @rx_errors: number of rx errors
* @tx_dropped: number of tx drops
*/
struct vlan_pcpu_stats {
u64 rx_packets;
u64 rx_bytes;
u64 rx_multicast;
u64 tx_packets;
u64 tx_bytes;
struct u64_stats_sync syncp;
u32 rx_errors;
u32 tx_dropped;
};
struct netpoll;
/**
* struct vlan_dev_priv - VLAN private device data
* @nr_ingress_mappings: number of ingress priority mappings
* @ingress_priority_map: ingress priority mappings
* @nr_egress_mappings: number of egress priority mappings
* @egress_priority_map: hash of egress priority mappings
* @vlan_proto: VLAN encapsulation protocol
* @vlan_id: VLAN identifier
* @flags: device flags
* @real_dev: underlying netdevice
* @real_dev_addr: address of underlying netdevice
* @dent: proc dir entry
* @vlan_pcpu_stats: ptr to percpu rx stats
*/
struct vlan_dev_priv {
unsigned int nr_ingress_mappings;
u32 ingress_priority_map[8];
unsigned int nr_egress_mappings;
struct vlan_priority_tci_mapping *egress_priority_map[16];
__be16 vlan_proto;
u16 vlan_id;
u16 flags;
struct net_device *real_dev;
unsigned char real_dev_addr[ETH_ALEN];
struct proc_dir_entry *dent;
struct vlan_pcpu_stats __percpu *vlan_pcpu_stats;
#ifdef CONFIG_NET_POLL_CONTROLLER
struct netpoll *netpoll;
#endif
};
static inline struct vlan_dev_priv *vlan_dev_priv(const struct net_device *dev)
{
return netdev_priv(dev);
}
/* if this changes, algorithm will have to be reworked because this
* depends on completely exhausting the VLAN identifier space. Thus
* it gives constant time look-up, but in many cases it wastes memory.

View File

@ -68,31 +68,6 @@ static int vlan_dev_rebuild_header(struct sk_buff *skb)
return 0;
}
static inline u16
__vlan_dev_get_egress_qos_mask(struct net_device *dev, u32 skprio)
{
struct vlan_priority_tci_mapping *mp;
smp_rmb(); /* coupled with smp_wmb() in vlan_dev_set_egress_priority() */
mp = vlan_dev_priv(dev)->egress_priority_map[(skprio & 0xF)];
while (mp) {
if (mp->priority == skprio) {
return mp->vlan_qos; /* This should already be shifted
* to mask correctly with the
* VLAN's TCI */
}
mp = mp->next;
}
return 0;
}
u16 vlan_dev_get_egress_qos_mask(struct net_device *dev, u32 skprio)
{
return __vlan_dev_get_egress_qos_mask(dev, skprio);
}
EXPORT_SYMBOL(vlan_dev_get_egress_qos_mask);
/*
* Create the VLAN header for an arbitrary protocol layer
*
@ -117,7 +92,7 @@ static int vlan_dev_hard_header(struct sk_buff *skb, struct net_device *dev,
vhdr = (struct vlan_hdr *) skb_push(skb, VLAN_HLEN);
vlan_tci = vlan->vlan_id;
vlan_tci |= __vlan_dev_get_egress_qos_mask(dev, skb->priority);
vlan_tci |= vlan_dev_get_egress_qos_mask(dev, skb->priority);
vhdr->h_vlan_TCI = htons(vlan_tci);
/*
@ -174,7 +149,7 @@ static netdev_tx_t vlan_dev_hard_start_xmit(struct sk_buff *skb,
vlan->flags & VLAN_FLAG_REORDER_HDR) {
u16 vlan_tci;
vlan_tci = vlan->vlan_id;
vlan_tci |= __vlan_dev_get_egress_qos_mask(dev, skb->priority);
vlan_tci |= vlan_dev_get_egress_qos_mask(dev, skb->priority);
skb = __vlan_hwaccel_put_tag(skb, vlan->vlan_proto, vlan_tci);
}
@ -259,7 +234,7 @@ int vlan_dev_set_egress_priority(const struct net_device *dev,
np->vlan_qos = vlan_qos;
/* Before inserting this element in hash table, make sure all its fields
* are committed to memory.
* coupled with smp_rmb() in __vlan_dev_get_egress_qos_mask()
* coupled with smp_rmb() in vlan_dev_get_egress_qos_mask()
*/
smp_wmb();
vlan->egress_priority_map[skb_prio & 0xF] = np;