mlx5: basic XDP_REDIRECT forward support
This implements basic XDP redirect support in mlx5 driver.
Notice that the ndo_xdp_xmit() is NOT implemented, because that API
need some changes that this patchset is working towards.
The main purpose of this patch is have different drivers doing
XDP_REDIRECT to show how different memory models behave in a cross
driver world.
Update(pre-RFCv2 Tariq): Need to DMA unmap page before xdp_do_redirect,
as the return API does not exist yet to to keep this mapped.
Update(pre-RFCv3 Saeed): Don't mix XDP_TX and XDP_REDIRECT flushing,
introduce xdpsq.db.redirect_flush boolian.
V9: Adjust for commit 121e892754
("net/mlx5e: Refactor RQ XDP_TX indication")
Signed-off-by: Jesper Dangaard Brouer <brouer@redhat.com>
Reviewed-by: Tariq Toukan <tariqt@mellanox.com>
Acked-by: Saeed Mahameed <saeedm@mellanox.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
parent
897ddc2483
commit
5168d73201
@ -392,6 +392,7 @@ struct mlx5e_xdpsq {
|
|||||||
struct {
|
struct {
|
||||||
struct mlx5e_dma_info *di;
|
struct mlx5e_dma_info *di;
|
||||||
bool doorbell;
|
bool doorbell;
|
||||||
|
bool redirect_flush;
|
||||||
} db;
|
} db;
|
||||||
|
|
||||||
/* read only */
|
/* read only */
|
||||||
|
@ -236,14 +236,20 @@ static inline int mlx5e_page_alloc_mapped(struct mlx5e_rq *rq,
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void mlx5e_page_dma_unmap(struct mlx5e_rq *rq,
|
||||||
|
struct mlx5e_dma_info *dma_info)
|
||||||
|
{
|
||||||
|
dma_unmap_page(rq->pdev, dma_info->addr, RQ_PAGE_SIZE(rq),
|
||||||
|
rq->buff.map_dir);
|
||||||
|
}
|
||||||
|
|
||||||
void mlx5e_page_release(struct mlx5e_rq *rq, struct mlx5e_dma_info *dma_info,
|
void mlx5e_page_release(struct mlx5e_rq *rq, struct mlx5e_dma_info *dma_info,
|
||||||
bool recycle)
|
bool recycle)
|
||||||
{
|
{
|
||||||
if (likely(recycle) && mlx5e_rx_cache_put(rq, dma_info))
|
if (likely(recycle) && mlx5e_rx_cache_put(rq, dma_info))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
dma_unmap_page(rq->pdev, dma_info->addr, RQ_PAGE_SIZE(rq),
|
mlx5e_page_dma_unmap(rq, dma_info);
|
||||||
rq->buff.map_dir);
|
|
||||||
put_page(dma_info->page);
|
put_page(dma_info->page);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -800,9 +806,10 @@ static inline int mlx5e_xdp_handle(struct mlx5e_rq *rq,
|
|||||||
struct mlx5e_dma_info *di,
|
struct mlx5e_dma_info *di,
|
||||||
void *va, u16 *rx_headroom, u32 *len)
|
void *va, u16 *rx_headroom, u32 *len)
|
||||||
{
|
{
|
||||||
const struct bpf_prog *prog = READ_ONCE(rq->xdp_prog);
|
struct bpf_prog *prog = READ_ONCE(rq->xdp_prog);
|
||||||
struct xdp_buff xdp;
|
struct xdp_buff xdp;
|
||||||
u32 act;
|
u32 act;
|
||||||
|
int err;
|
||||||
|
|
||||||
if (!prog)
|
if (!prog)
|
||||||
return false;
|
return false;
|
||||||
@ -823,6 +830,15 @@ static inline int mlx5e_xdp_handle(struct mlx5e_rq *rq,
|
|||||||
if (unlikely(!mlx5e_xmit_xdp_frame(rq, di, &xdp)))
|
if (unlikely(!mlx5e_xmit_xdp_frame(rq, di, &xdp)))
|
||||||
trace_xdp_exception(rq->netdev, prog, act);
|
trace_xdp_exception(rq->netdev, prog, act);
|
||||||
return true;
|
return true;
|
||||||
|
case XDP_REDIRECT:
|
||||||
|
/* When XDP enabled then page-refcnt==1 here */
|
||||||
|
err = xdp_do_redirect(rq->netdev, &xdp, prog);
|
||||||
|
if (!err) {
|
||||||
|
__set_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags);
|
||||||
|
rq->xdpsq.db.redirect_flush = true;
|
||||||
|
mlx5e_page_dma_unmap(rq, di);
|
||||||
|
}
|
||||||
|
return true;
|
||||||
default:
|
default:
|
||||||
bpf_warn_invalid_xdp_action(act);
|
bpf_warn_invalid_xdp_action(act);
|
||||||
case XDP_ABORTED:
|
case XDP_ABORTED:
|
||||||
@ -1140,6 +1156,11 @@ int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget)
|
|||||||
xdpsq->db.doorbell = false;
|
xdpsq->db.doorbell = false;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (xdpsq->db.redirect_flush) {
|
||||||
|
xdp_do_flush_map();
|
||||||
|
xdpsq->db.redirect_flush = false;
|
||||||
|
}
|
||||||
|
|
||||||
mlx5_cqwq_update_db_record(&cq->wq);
|
mlx5_cqwq_update_db_record(&cq->wq);
|
||||||
|
|
||||||
/* ensure cq space is freed before enabling more cqes */
|
/* ensure cq space is freed before enabling more cqes */
|
||||||
|
Loading…
Reference in New Issue
Block a user