net: stmmac: convert to hw_features

This also removes TSO as it's made fully in software --- better to leave this
to networking core.

If the MAC features can be detected at probe time and not at open, then
stmmac_fix_features could be simplified by limiting hw_features. That's
also better for users as they don't see offloads being togglable but
never turned on.

Redundant fallbacks for TX csum are removed as it's already handled
by network core.

Signed-off-by: Michał Mirosław <mirq-linux@rere.qmqm.pl>
Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
Michał Mirosław 2011-04-09 02:46:55 +00:00 committed by David S. Miller
parent 044a890c5a
commit 5e982f3bfd
2 changed files with 23 additions and 74 deletions

View File

@ -197,13 +197,6 @@ static void stmmac_ethtool_gregs(struct net_device *dev,
}
}
static u32 stmmac_ethtool_get_rx_csum(struct net_device *dev)
{
struct stmmac_priv *priv = netdev_priv(dev);
return priv->rx_coe;
}
static void
stmmac_get_pauseparam(struct net_device *netdev,
struct ethtool_pauseparam *pause)
@ -358,11 +351,6 @@ static struct ethtool_ops stmmac_ethtool_ops = {
.get_regs = stmmac_ethtool_gregs,
.get_regs_len = stmmac_ethtool_get_regs_len,
.get_link = ethtool_op_get_link,
.get_rx_csum = stmmac_ethtool_get_rx_csum,
.get_tx_csum = ethtool_op_get_tx_csum,
.set_tx_csum = ethtool_op_set_tx_ipv6_csum,
.get_sg = ethtool_op_get_sg,
.set_sg = ethtool_op_set_sg,
.get_pauseparam = stmmac_get_pauseparam,
.set_pauseparam = stmmac_set_pauseparam,
.get_ethtool_stats = stmmac_get_ethtool_stats,
@ -370,8 +358,6 @@ static struct ethtool_ops stmmac_ethtool_ops = {
.get_wol = stmmac_get_wol,
.set_wol = stmmac_set_wol,
.get_sset_count = stmmac_get_sset_count,
.get_tso = ethtool_op_get_tso,
.set_tso = ethtool_op_set_tso,
};
void stmmac_set_ethtool_ops(struct net_device *netdev)

View File

@ -139,7 +139,6 @@ static const u32 default_msg_level = (NETIF_MSG_DRV | NETIF_MSG_PROBE |
NETIF_MSG_IFDOWN | NETIF_MSG_TIMER);
static irqreturn_t stmmac_interrupt(int irq, void *dev_id);
static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev);
/**
* stmmac_verify_args - verify the driver parameters.
@ -843,6 +842,7 @@ static int stmmac_open(struct net_device *dev)
pr_info("stmmac: Rx Checksum Offload Engine supported\n");
if (priv->plat->tx_coe)
pr_info("\tTX Checksum insertion supported\n");
netdev_update_features(dev);
/* Initialise the MMC (if present) to disable all interrupts. */
writel(0xffffffff, priv->ioaddr + MMC_HIGH_INTR_MASK);
@ -927,46 +927,6 @@ static int stmmac_release(struct net_device *dev)
return 0;
}
/*
* To perform emulated hardware segmentation on skb.
*/
static int stmmac_sw_tso(struct stmmac_priv *priv, struct sk_buff *skb)
{
struct sk_buff *segs, *curr_skb;
int gso_segs = skb_shinfo(skb)->gso_segs;
/* Estimate the number of fragments in the worst case */
if (unlikely(stmmac_tx_avail(priv) < gso_segs)) {
netif_stop_queue(priv->dev);
TX_DBG(KERN_ERR "%s: TSO BUG! Tx Ring full when queue awake\n",
__func__);
if (stmmac_tx_avail(priv) < gso_segs)
return NETDEV_TX_BUSY;
netif_wake_queue(priv->dev);
}
TX_DBG("\tstmmac_sw_tso: segmenting: skb %p (len %d)\n",
skb, skb->len);
segs = skb_gso_segment(skb, priv->dev->features & ~NETIF_F_TSO);
if (IS_ERR(segs))
goto sw_tso_end;
do {
curr_skb = segs;
segs = segs->next;
TX_DBG("\t\tcurrent skb->len: %d, *curr %p,"
"*next %p\n", curr_skb->len, curr_skb, segs);
curr_skb->next = NULL;
stmmac_xmit(curr_skb, priv->dev);
} while (segs);
sw_tso_end:
dev_kfree_skb(skb);
return NETDEV_TX_OK;
}
static unsigned int stmmac_handle_jumbo_frames(struct sk_buff *skb,
struct net_device *dev,
int csum_insertion)
@ -1044,16 +1004,7 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
!skb_is_gso(skb) ? "isn't" : "is");
#endif
if (unlikely(skb_is_gso(skb)))
return stmmac_sw_tso(priv, skb);
if (likely((skb->ip_summed == CHECKSUM_PARTIAL))) {
if (unlikely((!priv->plat->tx_coe) ||
(priv->no_csum_insertion)))
skb_checksum_help(skb);
else
csum_insertion = 1;
}
csum_insertion = (skb->ip_summed == CHECKSUM_PARTIAL);
desc = priv->dma_tx + entry;
first = desc;
@ -1373,18 +1324,29 @@ static int stmmac_change_mtu(struct net_device *dev, int new_mtu)
return -EINVAL;
}
dev->mtu = new_mtu;
netdev_update_features(dev);
return 0;
}
static u32 stmmac_fix_features(struct net_device *dev, u32 features)
{
struct stmmac_priv *priv = netdev_priv(dev);
if (!priv->rx_coe)
features &= ~NETIF_F_RXCSUM;
if (!priv->plat->tx_coe)
features &= ~NETIF_F_ALL_CSUM;
/* Some GMAC devices have a bugged Jumbo frame support that
* needs to have the Tx COE disabled for oversized frames
* (due to limited buffer sizes). In this case we disable
* the TX csum insertionin the TDES and not use SF. */
if ((priv->plat->bugged_jumbo) && (priv->dev->mtu > ETH_DATA_LEN))
priv->no_csum_insertion = 1;
else
priv->no_csum_insertion = 0;
if (priv->plat->bugged_jumbo && (dev->mtu > ETH_DATA_LEN))
features &= ~NETIF_F_ALL_CSUM;
dev->mtu = new_mtu;
return 0;
return features;
}
static irqreturn_t stmmac_interrupt(int irq, void *dev_id)
@ -1464,6 +1426,7 @@ static const struct net_device_ops stmmac_netdev_ops = {
.ndo_start_xmit = stmmac_xmit,
.ndo_stop = stmmac_release,
.ndo_change_mtu = stmmac_change_mtu,
.ndo_fix_features = stmmac_fix_features,
.ndo_set_multicast_list = stmmac_multicast_list,
.ndo_tx_timeout = stmmac_tx_timeout,
.ndo_do_ioctl = stmmac_ioctl,
@ -1494,8 +1457,8 @@ static int stmmac_probe(struct net_device *dev)
dev->netdev_ops = &stmmac_netdev_ops;
stmmac_set_ethtool_ops(dev);
dev->features |= NETIF_F_SG | NETIF_F_HIGHDMA |
NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
dev->features |= dev->hw_features | NETIF_F_HIGHDMA;
dev->watchdog_timeo = msecs_to_jiffies(watchdog);
#ifdef STMMAC_VLAN_TAG_USED
/* Both mac100 and gmac support receive VLAN tag detection */