Merge branch 'sh_eth'

Ben Hutchings says:

====================
Fixes for sh_eth #3

I'm continuing review and testing of Ethernet support on the R-Car H2
chip.  This series fixes the last of the more serious issues I've found.

These are not tested on any of the other supported chips.
====================

Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
David S. Miller 2015-01-27 00:18:57 -08:00
commit 225776098b

View File

@ -396,6 +396,9 @@ static const u16 sh_eth_offset_fast_sh3_sh2[SH_ETH_MAX_REGISTER_OFFSET] = {
[TSU_ADRL31] = 0x01fc, [TSU_ADRL31] = 0x01fc,
}; };
static void sh_eth_rcv_snd_disable(struct net_device *ndev);
static struct net_device_stats *sh_eth_get_stats(struct net_device *ndev);
static bool sh_eth_is_gether(struct sh_eth_private *mdp) static bool sh_eth_is_gether(struct sh_eth_private *mdp)
{ {
return mdp->reg_offset == sh_eth_offset_gigabit; return mdp->reg_offset == sh_eth_offset_gigabit;
@ -1120,6 +1123,7 @@ static void sh_eth_ring_format(struct net_device *ndev)
int rx_ringsize = sizeof(*rxdesc) * mdp->num_rx_ring; int rx_ringsize = sizeof(*rxdesc) * mdp->num_rx_ring;
int tx_ringsize = sizeof(*txdesc) * mdp->num_tx_ring; int tx_ringsize = sizeof(*txdesc) * mdp->num_tx_ring;
int skbuff_size = mdp->rx_buf_sz + SH_ETH_RX_ALIGN - 1; int skbuff_size = mdp->rx_buf_sz + SH_ETH_RX_ALIGN - 1;
dma_addr_t dma_addr;
mdp->cur_rx = 0; mdp->cur_rx = 0;
mdp->cur_tx = 0; mdp->cur_tx = 0;
@ -1133,7 +1137,6 @@ static void sh_eth_ring_format(struct net_device *ndev)
/* skb */ /* skb */
mdp->rx_skbuff[i] = NULL; mdp->rx_skbuff[i] = NULL;
skb = netdev_alloc_skb(ndev, skbuff_size); skb = netdev_alloc_skb(ndev, skbuff_size);
mdp->rx_skbuff[i] = skb;
if (skb == NULL) if (skb == NULL)
break; break;
sh_eth_set_receive_align(skb); sh_eth_set_receive_align(skb);
@ -1142,9 +1145,15 @@ static void sh_eth_ring_format(struct net_device *ndev)
rxdesc = &mdp->rx_ring[i]; rxdesc = &mdp->rx_ring[i];
/* The size of the buffer is a multiple of 16 bytes. */ /* The size of the buffer is a multiple of 16 bytes. */
rxdesc->buffer_length = ALIGN(mdp->rx_buf_sz, 16); rxdesc->buffer_length = ALIGN(mdp->rx_buf_sz, 16);
dma_map_single(&ndev->dev, skb->data, rxdesc->buffer_length, dma_addr = dma_map_single(&ndev->dev, skb->data,
DMA_FROM_DEVICE); rxdesc->buffer_length,
rxdesc->addr = virt_to_phys(skb->data); DMA_FROM_DEVICE);
if (dma_mapping_error(&ndev->dev, dma_addr)) {
kfree_skb(skb);
break;
}
mdp->rx_skbuff[i] = skb;
rxdesc->addr = dma_addr;
rxdesc->status = cpu_to_edmac(mdp, RD_RACT | RD_RFP); rxdesc->status = cpu_to_edmac(mdp, RD_RACT | RD_RFP);
/* Rx descriptor address set */ /* Rx descriptor address set */
@ -1358,6 +1367,33 @@ static int sh_eth_dev_init(struct net_device *ndev, bool start)
return ret; return ret;
} }
static void sh_eth_dev_exit(struct net_device *ndev)
{
struct sh_eth_private *mdp = netdev_priv(ndev);
int i;
/* Deactivate all TX descriptors, so DMA should stop at next
* packet boundary if it's currently running
*/
for (i = 0; i < mdp->num_tx_ring; i++)
mdp->tx_ring[i].status &= ~cpu_to_edmac(mdp, TD_TACT);
/* Disable TX FIFO egress to MAC */
sh_eth_rcv_snd_disable(ndev);
/* Stop RX DMA at next packet boundary */
sh_eth_write(ndev, 0, EDRRR);
/* Aside from TX DMA, we can't tell when the hardware is
* really stopped, so we need to reset to make sure.
* Before doing that, wait for long enough to *probably*
* finish transmitting the last packet and poll stats.
*/
msleep(2); /* max frame time at 10 Mbps < 1250 us */
sh_eth_get_stats(ndev);
sh_eth_reset(ndev);
}
/* free Tx skb function */ /* free Tx skb function */
static int sh_eth_txfree(struct net_device *ndev) static int sh_eth_txfree(struct net_device *ndev)
{ {
@ -1402,6 +1438,7 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota)
u16 pkt_len = 0; u16 pkt_len = 0;
u32 desc_status; u32 desc_status;
int skbuff_size = mdp->rx_buf_sz + SH_ETH_RX_ALIGN - 1; int skbuff_size = mdp->rx_buf_sz + SH_ETH_RX_ALIGN - 1;
dma_addr_t dma_addr;
boguscnt = min(boguscnt, *quota); boguscnt = min(boguscnt, *quota);
limit = boguscnt; limit = boguscnt;
@ -1449,9 +1486,9 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota)
mdp->rx_skbuff[entry] = NULL; mdp->rx_skbuff[entry] = NULL;
if (mdp->cd->rpadir) if (mdp->cd->rpadir)
skb_reserve(skb, NET_IP_ALIGN); skb_reserve(skb, NET_IP_ALIGN);
dma_sync_single_for_cpu(&ndev->dev, rxdesc->addr, dma_unmap_single(&ndev->dev, rxdesc->addr,
ALIGN(mdp->rx_buf_sz, 16), ALIGN(mdp->rx_buf_sz, 16),
DMA_FROM_DEVICE); DMA_FROM_DEVICE);
skb_put(skb, pkt_len); skb_put(skb, pkt_len);
skb->protocol = eth_type_trans(skb, ndev); skb->protocol = eth_type_trans(skb, ndev);
netif_receive_skb(skb); netif_receive_skb(skb);
@ -1471,15 +1508,20 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota)
if (mdp->rx_skbuff[entry] == NULL) { if (mdp->rx_skbuff[entry] == NULL) {
skb = netdev_alloc_skb(ndev, skbuff_size); skb = netdev_alloc_skb(ndev, skbuff_size);
mdp->rx_skbuff[entry] = skb;
if (skb == NULL) if (skb == NULL)
break; /* Better luck next round. */ break; /* Better luck next round. */
sh_eth_set_receive_align(skb); sh_eth_set_receive_align(skb);
dma_map_single(&ndev->dev, skb->data, dma_addr = dma_map_single(&ndev->dev, skb->data,
rxdesc->buffer_length, DMA_FROM_DEVICE); rxdesc->buffer_length,
DMA_FROM_DEVICE);
if (dma_mapping_error(&ndev->dev, dma_addr)) {
kfree_skb(skb);
break;
}
mdp->rx_skbuff[entry] = skb;
skb_checksum_none_assert(skb); skb_checksum_none_assert(skb);
rxdesc->addr = virt_to_phys(skb->data); rxdesc->addr = dma_addr;
} }
if (entry >= mdp->num_rx_ring - 1) if (entry >= mdp->num_rx_ring - 1)
rxdesc->status |= rxdesc->status |=
@ -1575,7 +1617,6 @@ ignore_link:
if (intr_status & EESR_RFRMER) { if (intr_status & EESR_RFRMER) {
/* Receive Frame Overflow int */ /* Receive Frame Overflow int */
ndev->stats.rx_frame_errors++; ndev->stats.rx_frame_errors++;
netif_err(mdp, rx_err, ndev, "Receive Abort\n");
} }
} }
@ -1594,13 +1635,11 @@ ignore_link:
if (intr_status & EESR_RDE) { if (intr_status & EESR_RDE) {
/* Receive Descriptor Empty int */ /* Receive Descriptor Empty int */
ndev->stats.rx_over_errors++; ndev->stats.rx_over_errors++;
netif_err(mdp, rx_err, ndev, "Receive Descriptor Empty\n");
} }
if (intr_status & EESR_RFE) { if (intr_status & EESR_RFE) {
/* Receive FIFO Overflow int */ /* Receive FIFO Overflow int */
ndev->stats.rx_fifo_errors++; ndev->stats.rx_fifo_errors++;
netif_err(mdp, rx_err, ndev, "Receive FIFO Overflow\n");
} }
if (!mdp->cd->no_ade && (intr_status & EESR_ADE)) { if (!mdp->cd->no_ade && (intr_status & EESR_ADE)) {
@ -1989,9 +2028,7 @@ static int sh_eth_set_ringparam(struct net_device *ndev,
napi_synchronize(&mdp->napi); napi_synchronize(&mdp->napi);
sh_eth_write(ndev, 0x0000, EESIPR); sh_eth_write(ndev, 0x0000, EESIPR);
/* Stop the chip's Tx and Rx processes. */ sh_eth_dev_exit(ndev);
sh_eth_write(ndev, 0, EDTRR);
sh_eth_write(ndev, 0, EDRRR);
/* Free all the skbuffs in the Rx queue. */ /* Free all the skbuffs in the Rx queue. */
sh_eth_ring_free(ndev); sh_eth_ring_free(ndev);
@ -2149,6 +2186,10 @@ static int sh_eth_start_xmit(struct sk_buff *skb, struct net_device *ndev)
skb->len + 2); skb->len + 2);
txdesc->addr = dma_map_single(&ndev->dev, skb->data, skb->len, txdesc->addr = dma_map_single(&ndev->dev, skb->data, skb->len,
DMA_TO_DEVICE); DMA_TO_DEVICE);
if (dma_mapping_error(&ndev->dev, txdesc->addr)) {
kfree_skb(skb);
return NETDEV_TX_OK;
}
txdesc->buffer_length = skb->len; txdesc->buffer_length = skb->len;
if (entry >= mdp->num_tx_ring - 1) if (entry >= mdp->num_tx_ring - 1)
@ -2210,11 +2251,8 @@ static int sh_eth_close(struct net_device *ndev)
napi_disable(&mdp->napi); napi_disable(&mdp->napi);
sh_eth_write(ndev, 0x0000, EESIPR); sh_eth_write(ndev, 0x0000, EESIPR);
/* Stop the chip's Tx and Rx processes. */ sh_eth_dev_exit(ndev);
sh_eth_write(ndev, 0, EDTRR);
sh_eth_write(ndev, 0, EDRRR);
sh_eth_get_stats(ndev);
/* PHY Disconnect */ /* PHY Disconnect */
if (mdp->phydev) { if (mdp->phydev) {
phy_stop(mdp->phydev); phy_stop(mdp->phydev);