mirror of
https://github.com/torvalds/linux.git
synced 2024-11-16 09:02:00 +00:00
Merge branch 'mvpp2-fixes'
Marcin Wojtas says: ==================== Marvell Armada 375 mvpp2 fixes During my work on mvneta driver I revised mvpp2, and it occurred that the initial version of Marvell Armada 375 SoC comprised bugs around DMA-unmapping in both ingress and egress paths - not all buffers were umapped in TX path and none(!) in RX. Three patches that I send fix this situation. Any feedback would be welcome. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
commit
6001f340dd
@ -3413,16 +3413,23 @@ static void mvpp2_bm_pool_bufsize_set(struct mvpp2 *priv,
|
||||
}
|
||||
|
||||
/* Free all buffers from the pool */
|
||||
static void mvpp2_bm_bufs_free(struct mvpp2 *priv, struct mvpp2_bm_pool *bm_pool)
|
||||
static void mvpp2_bm_bufs_free(struct device *dev, struct mvpp2 *priv,
|
||||
struct mvpp2_bm_pool *bm_pool)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < bm_pool->buf_num; i++) {
|
||||
dma_addr_t buf_phys_addr;
|
||||
u32 vaddr;
|
||||
|
||||
/* Get buffer virtual address (indirect access) */
|
||||
mvpp2_read(priv, MVPP2_BM_PHY_ALLOC_REG(bm_pool->id));
|
||||
buf_phys_addr = mvpp2_read(priv,
|
||||
MVPP2_BM_PHY_ALLOC_REG(bm_pool->id));
|
||||
vaddr = mvpp2_read(priv, MVPP2_BM_VIRT_ALLOC_REG);
|
||||
|
||||
dma_unmap_single(dev, buf_phys_addr,
|
||||
bm_pool->buf_size, DMA_FROM_DEVICE);
|
||||
|
||||
if (!vaddr)
|
||||
break;
|
||||
dev_kfree_skb_any((struct sk_buff *)vaddr);
|
||||
@ -3439,7 +3446,7 @@ static int mvpp2_bm_pool_destroy(struct platform_device *pdev,
|
||||
{
|
||||
u32 val;
|
||||
|
||||
mvpp2_bm_bufs_free(priv, bm_pool);
|
||||
mvpp2_bm_bufs_free(&pdev->dev, priv, bm_pool);
|
||||
if (bm_pool->buf_num) {
|
||||
WARN(1, "cannot free all buffers in pool %d\n", bm_pool->id);
|
||||
return 0;
|
||||
@ -3692,7 +3699,8 @@ mvpp2_bm_pool_use(struct mvpp2_port *port, int pool, enum mvpp2_bm_type type,
|
||||
MVPP2_BM_LONG_BUF_NUM :
|
||||
MVPP2_BM_SHORT_BUF_NUM;
|
||||
else
|
||||
mvpp2_bm_bufs_free(port->priv, new_pool);
|
||||
mvpp2_bm_bufs_free(port->dev->dev.parent,
|
||||
port->priv, new_pool);
|
||||
|
||||
new_pool->pkt_size = pkt_size;
|
||||
|
||||
@ -3756,7 +3764,7 @@ static int mvpp2_bm_update_mtu(struct net_device *dev, int mtu)
|
||||
int pkt_size = MVPP2_RX_PKT_SIZE(mtu);
|
||||
|
||||
/* Update BM pool with new buffer size */
|
||||
mvpp2_bm_bufs_free(port->priv, port_pool);
|
||||
mvpp2_bm_bufs_free(dev->dev.parent, port->priv, port_pool);
|
||||
if (port_pool->buf_num) {
|
||||
WARN(1, "cannot free all buffers in pool %d\n", port_pool->id);
|
||||
return -EIO;
|
||||
@ -4401,11 +4409,10 @@ static void mvpp2_txq_bufs_free(struct mvpp2_port *port,
|
||||
|
||||
mvpp2_txq_inc_get(txq_pcpu);
|
||||
|
||||
if (!skb)
|
||||
continue;
|
||||
|
||||
dma_unmap_single(port->dev->dev.parent, buf_phys_addr,
|
||||
skb_headlen(skb), DMA_TO_DEVICE);
|
||||
if (!skb)
|
||||
continue;
|
||||
dev_kfree_skb_any(skb);
|
||||
}
|
||||
}
|
||||
@ -5092,7 +5099,8 @@ static int mvpp2_rx(struct mvpp2_port *port, int rx_todo,
|
||||
struct mvpp2_rx_queue *rxq)
|
||||
{
|
||||
struct net_device *dev = port->dev;
|
||||
int rx_received, rx_filled, i;
|
||||
int rx_received;
|
||||
int rx_done = 0;
|
||||
u32 rcvd_pkts = 0;
|
||||
u32 rcvd_bytes = 0;
|
||||
|
||||
@ -5101,17 +5109,18 @@ static int mvpp2_rx(struct mvpp2_port *port, int rx_todo,
|
||||
if (rx_todo > rx_received)
|
||||
rx_todo = rx_received;
|
||||
|
||||
rx_filled = 0;
|
||||
for (i = 0; i < rx_todo; i++) {
|
||||
while (rx_done < rx_todo) {
|
||||
struct mvpp2_rx_desc *rx_desc = mvpp2_rxq_next_desc_get(rxq);
|
||||
struct mvpp2_bm_pool *bm_pool;
|
||||
struct sk_buff *skb;
|
||||
dma_addr_t phys_addr;
|
||||
u32 bm, rx_status;
|
||||
int pool, rx_bytes, err;
|
||||
|
||||
rx_filled++;
|
||||
rx_done++;
|
||||
rx_status = rx_desc->status;
|
||||
rx_bytes = rx_desc->data_size - MVPP2_MH_SIZE;
|
||||
phys_addr = rx_desc->buf_phys_addr;
|
||||
|
||||
bm = mvpp2_bm_cookie_build(rx_desc);
|
||||
pool = mvpp2_bm_cookie_pool_get(bm);
|
||||
@ -5128,8 +5137,10 @@ static int mvpp2_rx(struct mvpp2_port *port, int rx_todo,
|
||||
* comprised by the RX descriptor.
|
||||
*/
|
||||
if (rx_status & MVPP2_RXD_ERR_SUMMARY) {
|
||||
err_drop_frame:
|
||||
dev->stats.rx_errors++;
|
||||
mvpp2_rx_error(port, rx_desc);
|
||||
/* Return the buffer to the pool */
|
||||
mvpp2_pool_refill(port, bm, rx_desc->buf_phys_addr,
|
||||
rx_desc->buf_cookie);
|
||||
continue;
|
||||
@ -5137,6 +5148,15 @@ static int mvpp2_rx(struct mvpp2_port *port, int rx_todo,
|
||||
|
||||
skb = (struct sk_buff *)rx_desc->buf_cookie;
|
||||
|
||||
err = mvpp2_rx_refill(port, bm_pool, bm, 0);
|
||||
if (err) {
|
||||
netdev_err(port->dev, "failed to refill BM pools\n");
|
||||
goto err_drop_frame;
|
||||
}
|
||||
|
||||
dma_unmap_single(dev->dev.parent, phys_addr,
|
||||
bm_pool->buf_size, DMA_FROM_DEVICE);
|
||||
|
||||
rcvd_pkts++;
|
||||
rcvd_bytes += rx_bytes;
|
||||
atomic_inc(&bm_pool->in_use);
|
||||
@ -5147,12 +5167,6 @@ static int mvpp2_rx(struct mvpp2_port *port, int rx_todo,
|
||||
mvpp2_rx_csum(port, rx_status, skb);
|
||||
|
||||
napi_gro_receive(&port->napi, skb);
|
||||
|
||||
err = mvpp2_rx_refill(port, bm_pool, bm, 0);
|
||||
if (err) {
|
||||
netdev_err(port->dev, "failed to refill BM pools\n");
|
||||
rx_filled--;
|
||||
}
|
||||
}
|
||||
|
||||
if (rcvd_pkts) {
|
||||
@ -5166,7 +5180,7 @@ static int mvpp2_rx(struct mvpp2_port *port, int rx_todo,
|
||||
|
||||
/* Update Rx queue management counters */
|
||||
wmb();
|
||||
mvpp2_rxq_status_update(port, rxq->id, rx_todo, rx_filled);
|
||||
mvpp2_rxq_status_update(port, rxq->id, rx_done, rx_done);
|
||||
|
||||
return rx_todo;
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user