mirror of
https://github.com/torvalds/linux.git
synced 2024-12-29 06:12:08 +00:00
net: socionext: netsec: always grab descriptor lock
Always acquire tx descriptor spinlock even if a xdp program is not loaded
on the netsec device since ndo_xdp_xmit can run concurrently with
netsec_netdev_start_xmit and netsec_clean_tx_dring. This can happen
loading a xdp program on a different device (e.g virtio-net) and
xdp_do_redirect_map/xdp_do_redirect_slow can redirect to netsec even if
we do not have a xdp program on it.
Fixes: ba2b232108
("net: netsec: add XDP support")
Tested-by: Ilias Apalodimas <ilias.apalodimas@linaro.org>
Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
Reviewed-by: Ilias Apalodimas <ilias.apalodimas@linaro.org>
Acked-by: Toke Høiland-Jørgensen <toke@redhat.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
parent
68ce6688a5
commit
55131dec2b
@ -282,7 +282,6 @@ struct netsec_desc_ring {
|
|||||||
void *vaddr;
|
void *vaddr;
|
||||||
u16 head, tail;
|
u16 head, tail;
|
||||||
u16 xdp_xmit; /* netsec_xdp_xmit packets */
|
u16 xdp_xmit; /* netsec_xdp_xmit packets */
|
||||||
bool is_xdp;
|
|
||||||
struct page_pool *page_pool;
|
struct page_pool *page_pool;
|
||||||
struct xdp_rxq_info xdp_rxq;
|
struct xdp_rxq_info xdp_rxq;
|
||||||
spinlock_t lock; /* XDP tx queue locking */
|
spinlock_t lock; /* XDP tx queue locking */
|
||||||
@ -634,8 +633,7 @@ static bool netsec_clean_tx_dring(struct netsec_priv *priv)
|
|||||||
unsigned int bytes;
|
unsigned int bytes;
|
||||||
int cnt = 0;
|
int cnt = 0;
|
||||||
|
|
||||||
if (dring->is_xdp)
|
spin_lock(&dring->lock);
|
||||||
spin_lock(&dring->lock);
|
|
||||||
|
|
||||||
bytes = 0;
|
bytes = 0;
|
||||||
entry = dring->vaddr + DESC_SZ * tail;
|
entry = dring->vaddr + DESC_SZ * tail;
|
||||||
@ -682,8 +680,8 @@ next:
|
|||||||
entry = dring->vaddr + DESC_SZ * tail;
|
entry = dring->vaddr + DESC_SZ * tail;
|
||||||
cnt++;
|
cnt++;
|
||||||
}
|
}
|
||||||
if (dring->is_xdp)
|
|
||||||
spin_unlock(&dring->lock);
|
spin_unlock(&dring->lock);
|
||||||
|
|
||||||
if (!cnt)
|
if (!cnt)
|
||||||
return false;
|
return false;
|
||||||
@ -799,9 +797,6 @@ static void netsec_set_tx_de(struct netsec_priv *priv,
|
|||||||
de->data_buf_addr_lw = lower_32_bits(desc->dma_addr);
|
de->data_buf_addr_lw = lower_32_bits(desc->dma_addr);
|
||||||
de->buf_len_info = (tx_ctrl->tcp_seg_len << 16) | desc->len;
|
de->buf_len_info = (tx_ctrl->tcp_seg_len << 16) | desc->len;
|
||||||
de->attr = attr;
|
de->attr = attr;
|
||||||
/* under spin_lock if using XDP */
|
|
||||||
if (!dring->is_xdp)
|
|
||||||
dma_wmb();
|
|
||||||
|
|
||||||
dring->desc[idx] = *desc;
|
dring->desc[idx] = *desc;
|
||||||
if (desc->buf_type == TYPE_NETSEC_SKB)
|
if (desc->buf_type == TYPE_NETSEC_SKB)
|
||||||
@ -1123,12 +1118,10 @@ static netdev_tx_t netsec_netdev_start_xmit(struct sk_buff *skb,
|
|||||||
u16 tso_seg_len = 0;
|
u16 tso_seg_len = 0;
|
||||||
int filled;
|
int filled;
|
||||||
|
|
||||||
if (dring->is_xdp)
|
spin_lock_bh(&dring->lock);
|
||||||
spin_lock_bh(&dring->lock);
|
|
||||||
filled = netsec_desc_used(dring);
|
filled = netsec_desc_used(dring);
|
||||||
if (netsec_check_stop_tx(priv, filled)) {
|
if (netsec_check_stop_tx(priv, filled)) {
|
||||||
if (dring->is_xdp)
|
spin_unlock_bh(&dring->lock);
|
||||||
spin_unlock_bh(&dring->lock);
|
|
||||||
net_warn_ratelimited("%s %s Tx queue full\n",
|
net_warn_ratelimited("%s %s Tx queue full\n",
|
||||||
dev_name(priv->dev), ndev->name);
|
dev_name(priv->dev), ndev->name);
|
||||||
return NETDEV_TX_BUSY;
|
return NETDEV_TX_BUSY;
|
||||||
@ -1161,8 +1154,7 @@ static netdev_tx_t netsec_netdev_start_xmit(struct sk_buff *skb,
|
|||||||
tx_desc.dma_addr = dma_map_single(priv->dev, skb->data,
|
tx_desc.dma_addr = dma_map_single(priv->dev, skb->data,
|
||||||
skb_headlen(skb), DMA_TO_DEVICE);
|
skb_headlen(skb), DMA_TO_DEVICE);
|
||||||
if (dma_mapping_error(priv->dev, tx_desc.dma_addr)) {
|
if (dma_mapping_error(priv->dev, tx_desc.dma_addr)) {
|
||||||
if (dring->is_xdp)
|
spin_unlock_bh(&dring->lock);
|
||||||
spin_unlock_bh(&dring->lock);
|
|
||||||
netif_err(priv, drv, priv->ndev,
|
netif_err(priv, drv, priv->ndev,
|
||||||
"%s: DMA mapping failed\n", __func__);
|
"%s: DMA mapping failed\n", __func__);
|
||||||
ndev->stats.tx_dropped++;
|
ndev->stats.tx_dropped++;
|
||||||
@ -1177,8 +1169,7 @@ static netdev_tx_t netsec_netdev_start_xmit(struct sk_buff *skb,
|
|||||||
netdev_sent_queue(priv->ndev, skb->len);
|
netdev_sent_queue(priv->ndev, skb->len);
|
||||||
|
|
||||||
netsec_set_tx_de(priv, dring, &tx_ctrl, &tx_desc, skb);
|
netsec_set_tx_de(priv, dring, &tx_ctrl, &tx_desc, skb);
|
||||||
if (dring->is_xdp)
|
spin_unlock_bh(&dring->lock);
|
||||||
spin_unlock_bh(&dring->lock);
|
|
||||||
netsec_write(priv, NETSEC_REG_NRM_TX_PKTCNT, 1); /* submit another tx */
|
netsec_write(priv, NETSEC_REG_NRM_TX_PKTCNT, 1); /* submit another tx */
|
||||||
|
|
||||||
return NETDEV_TX_OK;
|
return NETDEV_TX_OK;
|
||||||
@ -1262,7 +1253,6 @@ err:
|
|||||||
static void netsec_setup_tx_dring(struct netsec_priv *priv)
|
static void netsec_setup_tx_dring(struct netsec_priv *priv)
|
||||||
{
|
{
|
||||||
struct netsec_desc_ring *dring = &priv->desc_ring[NETSEC_RING_TX];
|
struct netsec_desc_ring *dring = &priv->desc_ring[NETSEC_RING_TX];
|
||||||
struct bpf_prog *xdp_prog = READ_ONCE(priv->xdp_prog);
|
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
for (i = 0; i < DESC_NUM; i++) {
|
for (i = 0; i < DESC_NUM; i++) {
|
||||||
@ -1275,12 +1265,6 @@ static void netsec_setup_tx_dring(struct netsec_priv *priv)
|
|||||||
*/
|
*/
|
||||||
de->attr = 1U << NETSEC_TX_SHIFT_OWN_FIELD;
|
de->attr = 1U << NETSEC_TX_SHIFT_OWN_FIELD;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (xdp_prog)
|
|
||||||
dring->is_xdp = true;
|
|
||||||
else
|
|
||||||
dring->is_xdp = false;
|
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static int netsec_setup_rx_dring(struct netsec_priv *priv)
|
static int netsec_setup_rx_dring(struct netsec_priv *priv)
|
||||||
|
Loading…
Reference in New Issue
Block a user