mirror of
https://github.com/torvalds/linux.git
synced 2024-11-12 23:23:03 +00:00
bnxt: refactor bnxt_rx_xdp to separate xdp_init_buff/xdp_prepare_buff
Move initialization of xdp_buff outside of bnxt_rx_xdp to prepare for allowing bnxt_rx_xdp to operate on multibuffer xdp_buffs. v2: Fix uninitalized variables warning in bnxt_xdp.c. v3: Add new define BNXT_PAGE_MODE_BUF_SIZE Signed-off-by: Andy Gospodarek <gospo@broadcom.com> Signed-off-by: Michael Chan <michael.chan@broadcom.com> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
parent
d1f66ac69f
commit
b231c3f341
@ -1731,6 +1731,7 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
|
||||
u8 *data_ptr, agg_bufs, cmp_type;
|
||||
dma_addr_t dma_addr;
|
||||
struct sk_buff *skb;
|
||||
struct xdp_buff xdp;
|
||||
u32 flags, misc;
|
||||
void *data;
|
||||
int rc = 0;
|
||||
@ -1839,11 +1840,13 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
|
||||
len = flags >> RX_CMP_LEN_SHIFT;
|
||||
dma_addr = rx_buf->mapping;
|
||||
|
||||
if (bnxt_rx_xdp(bp, rxr, cons, data, &data_ptr, &len, event)) {
|
||||
rc = 1;
|
||||
goto next_rx;
|
||||
if (bnxt_xdp_attached(bp, rxr)) {
|
||||
bnxt_xdp_buff_init(bp, rxr, cons, &data_ptr, &len, &xdp);
|
||||
if (bnxt_rx_xdp(bp, rxr, cons, xdp, data, &len, event)) {
|
||||
rc = 1;
|
||||
goto next_rx;
|
||||
}
|
||||
}
|
||||
|
||||
if (len <= bp->rx_copy_thresh) {
|
||||
skb = bnxt_copy_skb(bnapi, data_ptr, len, dma_addr);
|
||||
bnxt_reuse_rx_data(rxr, cons, data);
|
||||
|
@ -591,10 +591,12 @@ struct nqe_cn {
|
||||
#define BNXT_RX_PAGE_SIZE (1 << BNXT_RX_PAGE_SHIFT)
|
||||
|
||||
#define BNXT_MAX_MTU 9500
|
||||
#define BNXT_MAX_PAGE_MODE_MTU \
|
||||
#define BNXT_PAGE_MODE_BUF_SIZE \
|
||||
((unsigned int)PAGE_SIZE - VLAN_ETH_HLEN - NET_IP_ALIGN - \
|
||||
XDP_PACKET_HEADROOM - \
|
||||
SKB_DATA_ALIGN((unsigned int)sizeof(struct skb_shared_info)))
|
||||
XDP_PACKET_HEADROOM)
|
||||
#define BNXT_MAX_PAGE_MODE_MTU \
|
||||
BNXT_PAGE_MODE_BUF_SIZE - \
|
||||
SKB_DATA_ALIGN((unsigned int)sizeof(struct skb_shared_info))
|
||||
|
||||
#define BNXT_MIN_PKT_SIZE 52
|
||||
|
||||
|
@ -106,18 +106,44 @@ void bnxt_tx_int_xdp(struct bnxt *bp, struct bnxt_napi *bnapi, int nr_pkts)
|
||||
}
|
||||
}
|
||||
|
||||
bool bnxt_xdp_attached(struct bnxt *bp, struct bnxt_rx_ring_info *rxr)
|
||||
{
|
||||
struct bpf_prog *xdp_prog = READ_ONCE(rxr->xdp_prog);
|
||||
|
||||
return !!xdp_prog;
|
||||
}
|
||||
|
||||
void bnxt_xdp_buff_init(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
|
||||
u16 cons, u8 **data_ptr, unsigned int *len,
|
||||
struct xdp_buff *xdp)
|
||||
{
|
||||
struct bnxt_sw_rx_bd *rx_buf;
|
||||
struct pci_dev *pdev;
|
||||
dma_addr_t mapping;
|
||||
u32 offset;
|
||||
|
||||
pdev = bp->pdev;
|
||||
rx_buf = &rxr->rx_buf_ring[cons];
|
||||
offset = bp->rx_offset;
|
||||
|
||||
mapping = rx_buf->mapping - bp->rx_dma_offset;
|
||||
dma_sync_single_for_cpu(&pdev->dev, mapping + offset, *len, bp->rx_dir);
|
||||
|
||||
xdp_init_buff(xdp, BNXT_PAGE_MODE_BUF_SIZE + offset, &rxr->xdp_rxq);
|
||||
xdp_prepare_buff(xdp, *data_ptr - offset, offset, *len, false);
|
||||
}
|
||||
|
||||
/* returns the following:
|
||||
* true - packet consumed by XDP and new buffer is allocated.
|
||||
* false - packet should be passed to the stack.
|
||||
*/
|
||||
bool bnxt_rx_xdp(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 cons,
|
||||
struct page *page, u8 **data_ptr, unsigned int *len, u8 *event)
|
||||
struct xdp_buff xdp, struct page *page, unsigned int *len, u8 *event)
|
||||
{
|
||||
struct bpf_prog *xdp_prog = READ_ONCE(rxr->xdp_prog);
|
||||
struct bnxt_tx_ring_info *txr;
|
||||
struct bnxt_sw_rx_bd *rx_buf;
|
||||
struct pci_dev *pdev;
|
||||
struct xdp_buff xdp;
|
||||
dma_addr_t mapping;
|
||||
void *orig_data;
|
||||
u32 tx_avail;
|
||||
@ -128,16 +154,10 @@ bool bnxt_rx_xdp(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 cons,
|
||||
return false;
|
||||
|
||||
pdev = bp->pdev;
|
||||
rx_buf = &rxr->rx_buf_ring[cons];
|
||||
offset = bp->rx_offset;
|
||||
|
||||
mapping = rx_buf->mapping - bp->rx_dma_offset;
|
||||
dma_sync_single_for_cpu(&pdev->dev, mapping + offset, *len, bp->rx_dir);
|
||||
|
||||
txr = rxr->bnapi->tx_ring;
|
||||
/* BNXT_RX_PAGE_MODE(bp) when XDP enabled */
|
||||
xdp_init_buff(&xdp, PAGE_SIZE, &rxr->xdp_rxq);
|
||||
xdp_prepare_buff(&xdp, *data_ptr - offset, offset, *len, false);
|
||||
orig_data = xdp.data;
|
||||
|
||||
act = bpf_prog_run_xdp(xdp_prog, &xdp);
|
||||
@ -150,15 +170,17 @@ bool bnxt_rx_xdp(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 cons,
|
||||
*event &= ~BNXT_RX_EVENT;
|
||||
|
||||
*len = xdp.data_end - xdp.data;
|
||||
if (orig_data != xdp.data) {
|
||||
if (orig_data != xdp.data)
|
||||
offset = xdp.data - xdp.data_hard_start;
|
||||
*data_ptr = xdp.data_hard_start + offset;
|
||||
}
|
||||
|
||||
switch (act) {
|
||||
case XDP_PASS:
|
||||
return false;
|
||||
|
||||
case XDP_TX:
|
||||
rx_buf = &rxr->rx_buf_ring[cons];
|
||||
mapping = rx_buf->mapping - bp->rx_dma_offset;
|
||||
|
||||
if (tx_avail < 1) {
|
||||
trace_xdp_exception(bp->dev, xdp_prog, act);
|
||||
bnxt_reuse_rx_data(rxr, cons, page);
|
||||
@ -177,6 +199,8 @@ bool bnxt_rx_xdp(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 cons,
|
||||
* redirect is coming from a frame received by the
|
||||
* bnxt_en driver.
|
||||
*/
|
||||
rx_buf = &rxr->rx_buf_ring[cons];
|
||||
mapping = rx_buf->mapping - bp->rx_dma_offset;
|
||||
dma_unmap_page_attrs(&pdev->dev, mapping,
|
||||
PAGE_SIZE, bp->rx_dir,
|
||||
DMA_ATTR_WEAK_ORDERING);
|
||||
|
@ -17,10 +17,15 @@ struct bnxt_sw_tx_bd *bnxt_xmit_bd(struct bnxt *bp,
|
||||
dma_addr_t mapping, u32 len);
|
||||
void bnxt_tx_int_xdp(struct bnxt *bp, struct bnxt_napi *bnapi, int nr_pkts);
|
||||
bool bnxt_rx_xdp(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 cons,
|
||||
struct page *page, u8 **data_ptr, unsigned int *len,
|
||||
struct xdp_buff xdp, struct page *page, unsigned int *len,
|
||||
u8 *event);
|
||||
int bnxt_xdp(struct net_device *dev, struct netdev_bpf *xdp);
|
||||
int bnxt_xdp_xmit(struct net_device *dev, int num_frames,
|
||||
struct xdp_frame **frames, u32 flags);
|
||||
|
||||
bool bnxt_xdp_attached(struct bnxt *bp, struct bnxt_rx_ring_info *rxr);
|
||||
|
||||
void bnxt_xdp_buff_init(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
|
||||
u16 cons, u8 **data_ptr, unsigned int *len,
|
||||
struct xdp_buff *xdp);
|
||||
#endif
|
||||
|
Loading…
Reference in New Issue
Block a user