diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net.h b/drivers/net/ethernet/netronome/nfp/nfp_net.h index 34f8c439f42f..4d45f4573b57 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net.h +++ b/drivers/net/ethernet/netronome/nfp/nfp_net.h @@ -50,14 +50,14 @@ #include "nfp_net_ctrl.h" -#define nn_err(nn, fmt, args...) netdev_err((nn)->netdev, fmt, ## args) -#define nn_warn(nn, fmt, args...) netdev_warn((nn)->netdev, fmt, ## args) -#define nn_info(nn, fmt, args...) netdev_info((nn)->netdev, fmt, ## args) -#define nn_dbg(nn, fmt, args...) netdev_dbg((nn)->netdev, fmt, ## args) -#define nn_warn_ratelimit(nn, fmt, args...) \ +#define nn_err(nn, fmt, args...) netdev_err((nn)->dp.netdev, fmt, ## args) +#define nn_warn(nn, fmt, args...) netdev_warn((nn)->dp.netdev, fmt, ## args) +#define nn_info(nn, fmt, args...) netdev_info((nn)->dp.netdev, fmt, ## args) +#define nn_dbg(nn, fmt, args...) netdev_dbg((nn)->dp.netdev, fmt, ## args) +#define nn_dp_warn(dp, fmt, args...) \ do { \ if (unlikely(net_ratelimit())) \ - netdev_warn((nn)->netdev, fmt, ## args); \ + netdev_warn((dp)->netdev, fmt, ## args); \ } while (0) /* Max time to wait for NFP to respond on updates (in seconds) */ @@ -316,8 +316,6 @@ struct nfp_net_rx_buf { * @rxds: Virtual address of FL/RX ring in host memory * @dma: DMA address of the FL/RX ring * @size: Size, in bytes, of the FL/RX ring (needed to free) - * @bufsz: Buffer allocation size for convenience of management routines - * (NOTE: this is in second cache line, do not use on fast path!) */ struct nfp_net_rx_ring { struct nfp_net_r_vector *r_vec; @@ -339,7 +337,6 @@ struct nfp_net_rx_ring { dma_addr_t dma; unsigned int size; - unsigned int bufsz; } ____cacheline_aligned; /** @@ -434,18 +431,73 @@ struct nfp_stat_pair { }; /** - * struct nfp_net - NFP network device structure + * struct nfp_net_dp - NFP network device datapath data structure * @dev: Backpointer to struct device - * @netdev: Backpointer to net_device structure - * @is_vf: Is the driver attached to a VF? + * @netdev: Backpointer to net_device structure + * @is_vf: Is the driver attached to a VF? * @bpf_offload_skip_sw: Offloaded BPF program will not be rerun by cls_bpf * @bpf_offload_xdp: Offloaded BPF program is XDP * @chained_metadata_format: Firemware will use new metadata format - * @ctrl: Local copy of the control register/word. - * @fl_bufsz: Currently configured size of the freelist buffers + * @rx_dma_dir: Mapping direction for RX buffers + * @rx_dma_off: Offset at which DMA packets (for XDP headroom) * @rx_offset: Offset in the RX buffers where packet data starts + * @ctrl: Local copy of the control register/word. + * @fl_bufsz: Currently configured size of the freelist buffers * @xdp_prog: Installed XDP program - * @fw_ver: Firmware version + * @tx_rings: Array of pre-allocated TX ring structures + * @rx_rings: Array of pre-allocated RX ring structures + * @ctrl_bar: Pointer to mapped control BAR + * + * @txd_cnt: Size of the TX ring in number of descriptors + * @rxd_cnt: Size of the RX ring in number of descriptors + * @num_r_vecs: Number of used ring vectors + * @num_tx_rings: Currently configured number of TX rings + * @num_stack_tx_rings: Number of TX rings used by the stack (not XDP) + * @num_rx_rings: Currently configured number of RX rings + * @mtu: Device MTU + */ +struct nfp_net_dp { + struct device *dev; + struct net_device *netdev; + + u8 is_vf:1; + u8 bpf_offload_skip_sw:1; + u8 bpf_offload_xdp:1; + u8 chained_metadata_format:1; + + u8 rx_dma_dir; + u8 rx_dma_off; + + u8 rx_offset; + + u32 ctrl; + u32 fl_bufsz; + + struct bpf_prog *xdp_prog; + + struct nfp_net_tx_ring *tx_rings; + struct nfp_net_rx_ring *rx_rings; + + u8 __iomem *ctrl_bar; + + /* Cold data follows */ + + unsigned int txd_cnt; + unsigned int rxd_cnt; + + unsigned int num_r_vecs; + + unsigned int num_tx_rings; + unsigned int num_stack_tx_rings; + unsigned int num_rx_rings; + + unsigned int mtu; +}; + +/** + * struct nfp_net - NFP network device structure + * @dp: Datapath structure + * @fw_ver: Firmware version * @cap: Capabilities advertised by the Firmware * @max_mtu: Maximum support MTU advertised by the Firmware * @rss_hfunc: RSS selected hash function @@ -457,17 +509,9 @@ struct nfp_stat_pair { * @rx_filter_change: Jiffies when statistics last changed * @rx_filter_stats_timer: Timer for polling filter offload statistics * @rx_filter_lock: Lock protecting timer state changes (teardown) + * @max_r_vecs: Number of allocated interrupt vectors for RX/TX * @max_tx_rings: Maximum number of TX rings supported by the Firmware * @max_rx_rings: Maximum number of RX rings supported by the Firmware - * @num_tx_rings: Currently configured number of TX rings - * @num_stack_tx_rings: Number of TX rings used by the stack (not XDP) - * @num_rx_rings: Currently configured number of RX rings - * @txd_cnt: Size of the TX ring in number of descriptors - * @rxd_cnt: Size of the RX ring in number of descriptors - * @tx_rings: Array of pre-allocated TX ring structures - * @rx_rings: Array of pre-allocated RX ring structures - * @max_r_vecs: Number of allocated interrupt vectors for RX/TX - * @num_r_vecs: Number of used ring vectors * @r_vecs: Pre-allocated array of ring vectors * @irq_entries: Pre-allocated array of MSI-X entries * @lsc_handler: Handler for Link State Change interrupt @@ -491,7 +535,6 @@ struct nfp_stat_pair { * @vxlan_ports: VXLAN ports for RX inner csum offload communicated to HW * @vxlan_usecnt: IPv4/IPv6 VXLAN port use counts * @qcp_cfg: Pointer to QCP queue used for configuration notification - * @ctrl_bar: Pointer to mapped control BAR * @tx_bar: Pointer to mapped TX queues * @rx_bar: Pointer to mapped FL/RX queues * @debugfs_dir: Device directory in debugfs @@ -502,25 +545,10 @@ struct nfp_stat_pair { * @eth_port: Translated ETH Table port entry */ struct nfp_net { - struct device *dev; - struct net_device *netdev; - - unsigned is_vf:1; - unsigned bpf_offload_skip_sw:1; - unsigned bpf_offload_xdp:1; - unsigned chained_metadata_format:1; - - u32 ctrl; - u32 fl_bufsz; - - u32 rx_offset; - - struct bpf_prog *xdp_prog; - - struct nfp_net_tx_ring *tx_rings; - struct nfp_net_rx_ring *rx_rings; + struct nfp_net_dp dp; struct nfp_net_fw_version fw_ver; + u32 cap; u32 max_mtu; @@ -537,18 +565,10 @@ struct nfp_net { unsigned int max_tx_rings; unsigned int max_rx_rings; - unsigned int num_tx_rings; - unsigned int num_stack_tx_rings; - unsigned int num_rx_rings; - int stride_tx; int stride_rx; - int txd_cnt; - int rxd_cnt; - unsigned int max_r_vecs; - unsigned int num_r_vecs; struct nfp_net_r_vector r_vecs[NFP_NET_MAX_R_VECS]; struct msix_entry irq_entries[NFP_NET_MAX_IRQS]; @@ -582,7 +602,6 @@ struct nfp_net { u8 __iomem *qcp_cfg; - u8 __iomem *ctrl_bar; u8 __iomem *tx_bar; u8 __iomem *rx_bar; @@ -597,54 +616,47 @@ struct nfp_net { struct nfp_eth_table_port *eth_port; }; -struct nfp_net_ring_set { - unsigned int n_rings; - unsigned int mtu; - unsigned int dcnt; - void *rings; -}; - /* Functions to read/write from/to a BAR * Performs any endian conversion necessary. */ static inline u16 nn_readb(struct nfp_net *nn, int off) { - return readb(nn->ctrl_bar + off); + return readb(nn->dp.ctrl_bar + off); } static inline void nn_writeb(struct nfp_net *nn, int off, u8 val) { - writeb(val, nn->ctrl_bar + off); + writeb(val, nn->dp.ctrl_bar + off); } static inline u16 nn_readw(struct nfp_net *nn, int off) { - return readw(nn->ctrl_bar + off); + return readw(nn->dp.ctrl_bar + off); } static inline void nn_writew(struct nfp_net *nn, int off, u16 val) { - writew(val, nn->ctrl_bar + off); + writew(val, nn->dp.ctrl_bar + off); } static inline u32 nn_readl(struct nfp_net *nn, int off) { - return readl(nn->ctrl_bar + off); + return readl(nn->dp.ctrl_bar + off); } static inline void nn_writel(struct nfp_net *nn, int off, u32 val) { - writel(val, nn->ctrl_bar + off); + writel(val, nn->dp.ctrl_bar + off); } static inline u64 nn_readq(struct nfp_net *nn, int off) { - return readq(nn->ctrl_bar + off); + return readq(nn->dp.ctrl_bar + off); } static inline void nn_writeq(struct nfp_net *nn, int off, u64 val) { - writeq(val, nn->ctrl_bar + off); + writeq(val, nn->dp.ctrl_bar + off); } /* Flush posted PCI writes by reading something without side effects */ @@ -798,9 +810,9 @@ void nfp_net_irqs_disable(struct pci_dev *pdev); void nfp_net_irqs_assign(struct nfp_net *nn, struct msix_entry *irq_entries, unsigned int n); -int -nfp_net_ring_reconfig(struct nfp_net *nn, struct bpf_prog **xdp_prog, - struct nfp_net_ring_set *rx, struct nfp_net_ring_set *tx); + +struct nfp_net_dp *nfp_net_clone_dp(struct nfp_net *nn); +int nfp_net_ring_reconfig(struct nfp_net *nn, struct nfp_net_dp *new); #ifdef CONFIG_NFP_DEBUG void nfp_net_debugfs_create(void); diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c index 2d964d030dbe..f134f1808b9a 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c @@ -85,20 +85,18 @@ void nfp_net_get_fw_version(struct nfp_net_fw_version *fw_ver, put_unaligned_le32(reg, fw_ver); } -static dma_addr_t -nfp_net_dma_map_rx(struct nfp_net *nn, void *frag, unsigned int bufsz, - int direction) +static dma_addr_t nfp_net_dma_map_rx(struct nfp_net_dp *dp, void *frag) { - return dma_map_single(nn->dev, frag + NFP_NET_RX_BUF_HEADROOM, - bufsz - NFP_NET_RX_BUF_NON_DATA, direction); + return dma_map_single(dp->dev, frag + NFP_NET_RX_BUF_HEADROOM, + dp->fl_bufsz - NFP_NET_RX_BUF_NON_DATA, + dp->rx_dma_dir); } -static void -nfp_net_dma_unmap_rx(struct nfp_net *nn, dma_addr_t dma_addr, - unsigned int bufsz, int direction) +static void nfp_net_dma_unmap_rx(struct nfp_net_dp *dp, dma_addr_t dma_addr) { - dma_unmap_single(nn->dev, dma_addr, - bufsz - NFP_NET_RX_BUF_NON_DATA, direction); + dma_unmap_single(dp->dev, dma_addr, + dp->fl_bufsz - NFP_NET_RX_BUF_NON_DATA, + dp->rx_dma_dir); } /* Firmware reconfig @@ -329,19 +327,22 @@ void nfp_net_irqs_assign(struct nfp_net *nn, struct msix_entry *irq_entries, unsigned int n) { + struct nfp_net_dp *dp = &nn->dp; + nn->max_r_vecs = n - NFP_NET_NON_Q_VECTORS; - nn->num_r_vecs = nn->max_r_vecs; + dp->num_r_vecs = nn->max_r_vecs; memcpy(nn->irq_entries, irq_entries, sizeof(*irq_entries) * n); - if (nn->num_rx_rings > nn->num_r_vecs || - nn->num_tx_rings > nn->num_r_vecs) + if (dp->num_rx_rings > dp->num_r_vecs || + dp->num_tx_rings > dp->num_r_vecs) nn_warn(nn, "More rings (%d,%d) than vectors (%d).\n", - nn->num_rx_rings, nn->num_tx_rings, nn->num_r_vecs); + dp->num_rx_rings, dp->num_tx_rings, + dp->num_r_vecs); - nn->num_rx_rings = min(nn->num_r_vecs, nn->num_rx_rings); - nn->num_tx_rings = min(nn->num_r_vecs, nn->num_tx_rings); - nn->num_stack_tx_rings = nn->num_tx_rings; + dp->num_rx_rings = min(dp->num_r_vecs, dp->num_rx_rings); + dp->num_tx_rings = min(dp->num_r_vecs, dp->num_tx_rings); + dp->num_stack_tx_rings = dp->num_tx_rings; } /** @@ -396,11 +397,11 @@ static void nfp_net_read_link_status(struct nfp_net *nn) nn->link_up = link_up; if (nn->link_up) { - netif_carrier_on(nn->netdev); - netdev_info(nn->netdev, "NIC Link is Up\n"); + netif_carrier_on(nn->dp.netdev); + netdev_info(nn->dp.netdev, "NIC Link is Up\n"); } else { - netif_carrier_off(nn->netdev); - netdev_info(nn->netdev, "NIC Link is Down\n"); + netif_carrier_off(nn->dp.netdev); + netdev_info(nn->dp.netdev, "NIC Link is Down\n"); } out: spin_unlock_irqrestore(&nn->link_status_lock, flags); @@ -532,7 +533,7 @@ nfp_net_aux_irq_request(struct nfp_net *nn, u32 ctrl_offset, entry = &nn->irq_entries[vector_idx]; - snprintf(name, name_sz, format, netdev_name(nn->netdev)); + snprintf(name, name_sz, format, netdev_name(nn->dp.netdev)); err = request_irq(entry->vector, handler, 0, name, nn); if (err) { nn_err(nn, "Failed to request IRQ %d (err=%d).\n", @@ -619,7 +620,6 @@ static void nfp_net_tx_ring_stop(struct netdev_queue *nd_q, /** * nfp_net_tx_tso() - Set up Tx descriptor for LSO - * @nn: NFP Net device * @r_vec: per-ring structure * @txbuf: Pointer to driver soft TX descriptor * @txd: Pointer to HW TX descriptor @@ -628,7 +628,7 @@ static void nfp_net_tx_ring_stop(struct netdev_queue *nd_q, * Set up Tx descriptor for LSO, do nothing for non-LSO skbs. * Return error on packet header greater than maximum supported LSO header size. */ -static void nfp_net_tx_tso(struct nfp_net *nn, struct nfp_net_r_vector *r_vec, +static void nfp_net_tx_tso(struct nfp_net_r_vector *r_vec, struct nfp_net_tx_buf *txbuf, struct nfp_net_tx_desc *txd, struct sk_buff *skb) { @@ -659,7 +659,7 @@ static void nfp_net_tx_tso(struct nfp_net *nn, struct nfp_net_r_vector *r_vec, /** * nfp_net_tx_csum() - Set TX CSUM offload flags in TX descriptor - * @nn: NFP Net device + * @dp: NFP Net data path struct * @r_vec: per-ring structure * @txbuf: Pointer to driver soft TX descriptor * @txd: Pointer to TX descriptor @@ -668,7 +668,8 @@ static void nfp_net_tx_tso(struct nfp_net *nn, struct nfp_net_r_vector *r_vec, * This function sets the TX checksum flags in the TX descriptor based * on the configuration and the protocol of the packet to be transmitted. */ -static void nfp_net_tx_csum(struct nfp_net *nn, struct nfp_net_r_vector *r_vec, +static void nfp_net_tx_csum(struct nfp_net_dp *dp, + struct nfp_net_r_vector *r_vec, struct nfp_net_tx_buf *txbuf, struct nfp_net_tx_desc *txd, struct sk_buff *skb) { @@ -676,7 +677,7 @@ static void nfp_net_tx_csum(struct nfp_net *nn, struct nfp_net_r_vector *r_vec, struct iphdr *iph; u8 l4_hdr; - if (!(nn->ctrl & NFP_NET_CFG_CTRL_TXCSUM)) + if (!(dp->ctrl & NFP_NET_CFG_CTRL_TXCSUM)) return; if (skb->ip_summed != CHECKSUM_PARTIAL) @@ -695,8 +696,7 @@ static void nfp_net_tx_csum(struct nfp_net *nn, struct nfp_net_r_vector *r_vec, } else if (ipv6h->version == 6) { l4_hdr = ipv6h->nexthdr; } else { - nn_warn_ratelimit(nn, "partial checksum but ipv=%x!\n", - iph->version); + nn_dp_warn(dp, "partial checksum but ipv=%x!\n", iph->version); return; } @@ -708,8 +708,7 @@ static void nfp_net_tx_csum(struct nfp_net *nn, struct nfp_net_r_vector *r_vec, txd->flags |= PCIE_DESC_TX_UDP_CSUM; break; default: - nn_warn_ratelimit(nn, "partial checksum but l4 proto=%x!\n", - l4_hdr); + nn_dp_warn(dp, "partial checksum but l4 proto=%x!\n", l4_hdr); return; } @@ -744,22 +743,24 @@ static int nfp_net_tx(struct sk_buff *skb, struct net_device *netdev) struct nfp_net_r_vector *r_vec; struct nfp_net_tx_buf *txbuf; struct netdev_queue *nd_q; + struct nfp_net_dp *dp; dma_addr_t dma_addr; unsigned int fsize; int f, nr_frags; int wr_idx; u16 qidx; + dp = &nn->dp; qidx = skb_get_queue_mapping(skb); - tx_ring = &nn->tx_rings[qidx]; + tx_ring = &dp->tx_rings[qidx]; r_vec = tx_ring->r_vec; - nd_q = netdev_get_tx_queue(nn->netdev, qidx); + nd_q = netdev_get_tx_queue(dp->netdev, qidx); nr_frags = skb_shinfo(skb)->nr_frags; if (unlikely(nfp_net_tx_full(tx_ring, nr_frags + 1))) { - nn_warn_ratelimit(nn, "TX ring %d busy. wrp=%u rdp=%u\n", - qidx, tx_ring->wr_p, tx_ring->rd_p); + nn_dp_warn(dp, "TX ring %d busy. wrp=%u rdp=%u\n", + qidx, tx_ring->wr_p, tx_ring->rd_p); netif_tx_stop_queue(nd_q); u64_stats_update_begin(&r_vec->tx_sync); r_vec->tx_busy++; @@ -768,9 +769,9 @@ static int nfp_net_tx(struct sk_buff *skb, struct net_device *netdev) } /* Start with the head skbuf */ - dma_addr = dma_map_single(nn->dev, skb->data, skb_headlen(skb), + dma_addr = dma_map_single(dp->dev, skb->data, skb_headlen(skb), DMA_TO_DEVICE); - if (dma_mapping_error(nn->dev, dma_addr)) + if (dma_mapping_error(dp->dev, dma_addr)) goto err_free; wr_idx = tx_ring->wr_p & (tx_ring->cnt - 1); @@ -794,11 +795,11 @@ static int nfp_net_tx(struct sk_buff *skb, struct net_device *netdev) txd->mss = 0; txd->l4_offset = 0; - nfp_net_tx_tso(nn, r_vec, txbuf, txd, skb); + nfp_net_tx_tso(r_vec, txbuf, txd, skb); - nfp_net_tx_csum(nn, r_vec, txbuf, txd, skb); + nfp_net_tx_csum(dp, r_vec, txbuf, txd, skb); - if (skb_vlan_tag_present(skb) && nn->ctrl & NFP_NET_CFG_CTRL_TXVLAN) { + if (skb_vlan_tag_present(skb) && dp->ctrl & NFP_NET_CFG_CTRL_TXVLAN) { txd->flags |= PCIE_DESC_TX_VLAN; txd->vlan = cpu_to_le16(skb_vlan_tag_get(skb)); } @@ -812,9 +813,9 @@ static int nfp_net_tx(struct sk_buff *skb, struct net_device *netdev) frag = &skb_shinfo(skb)->frags[f]; fsize = skb_frag_size(frag); - dma_addr = skb_frag_dma_map(nn->dev, frag, 0, + dma_addr = skb_frag_dma_map(dp->dev, frag, 0, fsize, DMA_TO_DEVICE); - if (dma_mapping_error(nn->dev, dma_addr)) + if (dma_mapping_error(dp->dev, dma_addr)) goto err_unmap; wr_idx = (wr_idx + 1) & (tx_ring->cnt - 1); @@ -853,7 +854,7 @@ err_unmap: --f; while (f >= 0) { frag = &skb_shinfo(skb)->frags[f]; - dma_unmap_page(nn->dev, tx_ring->txbufs[wr_idx].dma_addr, + dma_unmap_page(dp->dev, tx_ring->txbufs[wr_idx].dma_addr, skb_frag_size(frag), DMA_TO_DEVICE); tx_ring->txbufs[wr_idx].skb = NULL; tx_ring->txbufs[wr_idx].dma_addr = 0; @@ -862,13 +863,13 @@ err_unmap: if (wr_idx < 0) wr_idx += tx_ring->cnt; } - dma_unmap_single(nn->dev, tx_ring->txbufs[wr_idx].dma_addr, + dma_unmap_single(dp->dev, tx_ring->txbufs[wr_idx].dma_addr, skb_headlen(skb), DMA_TO_DEVICE); tx_ring->txbufs[wr_idx].skb = NULL; tx_ring->txbufs[wr_idx].dma_addr = 0; tx_ring->txbufs[wr_idx].fidx = -2; err_free: - nn_warn_ratelimit(nn, "Failed to map DMA TX buffer\n"); + nn_dp_warn(dp, "Failed to map DMA TX buffer\n"); u64_stats_update_begin(&r_vec->tx_sync); r_vec->tx_errors++; u64_stats_update_end(&r_vec->tx_sync); @@ -885,7 +886,7 @@ err_free: static void nfp_net_tx_complete(struct nfp_net_tx_ring *tx_ring) { struct nfp_net_r_vector *r_vec = tx_ring->r_vec; - struct nfp_net *nn = r_vec->nfp_net; + struct nfp_net_dp *dp = &r_vec->nfp_net->dp; const struct skb_frag_struct *frag; struct netdev_queue *nd_q; u32 done_pkts = 0, done_bytes = 0; @@ -919,7 +920,7 @@ static void nfp_net_tx_complete(struct nfp_net_tx_ring *tx_ring) if (fidx == -1) { /* unmap head */ - dma_unmap_single(nn->dev, tx_ring->txbufs[idx].dma_addr, + dma_unmap_single(dp->dev, tx_ring->txbufs[idx].dma_addr, skb_headlen(skb), DMA_TO_DEVICE); done_pkts += tx_ring->txbufs[idx].pkt_cnt; @@ -927,7 +928,7 @@ static void nfp_net_tx_complete(struct nfp_net_tx_ring *tx_ring) } else { /* unmap fragment */ frag = &skb_shinfo(skb)->frags[fidx]; - dma_unmap_page(nn->dev, tx_ring->txbufs[idx].dma_addr, + dma_unmap_page(dp->dev, tx_ring->txbufs[idx].dma_addr, skb_frag_size(frag), DMA_TO_DEVICE); } @@ -947,7 +948,7 @@ static void nfp_net_tx_complete(struct nfp_net_tx_ring *tx_ring) r_vec->tx_pkts += done_pkts; u64_stats_update_end(&r_vec->tx_sync); - nd_q = netdev_get_tx_queue(nn->netdev, tx_ring->idx); + nd_q = netdev_get_tx_queue(dp->netdev, tx_ring->idx); netdev_tx_completed_queue(nd_q, done_pkts, done_bytes); if (nfp_net_tx_ring_should_wake(tx_ring)) { /* Make sure TX thread will see updated tx_ring->rd_p */ @@ -965,7 +966,7 @@ static void nfp_net_tx_complete(struct nfp_net_tx_ring *tx_ring) static void nfp_net_xdp_complete(struct nfp_net_tx_ring *tx_ring) { struct nfp_net_r_vector *r_vec = tx_ring->r_vec; - struct nfp_net *nn = r_vec->nfp_net; + struct nfp_net_dp *dp = &r_vec->nfp_net->dp; u32 done_pkts = 0, done_bytes = 0; int idx, todo; u32 qcp_rd_p; @@ -988,8 +989,7 @@ static void nfp_net_xdp_complete(struct nfp_net_tx_ring *tx_ring) if (!tx_ring->txbufs[idx].frag) continue; - nfp_net_dma_unmap_rx(nn, tx_ring->txbufs[idx].dma_addr, - nn->fl_bufsz, DMA_BIDIRECTIONAL); + nfp_net_dma_unmap_rx(dp, tx_ring->txbufs[idx].dma_addr); __free_page(virt_to_page(tx_ring->txbufs[idx].frag)); done_pkts++; @@ -1014,13 +1014,13 @@ static void nfp_net_xdp_complete(struct nfp_net_tx_ring *tx_ring) /** * nfp_net_tx_ring_reset() - Free any untransmitted buffers and reset pointers - * @nn: NFP Net device + * @dp: NFP Net data path struct * @tx_ring: TX ring structure * * Assumes that the device is stopped */ static void -nfp_net_tx_ring_reset(struct nfp_net *nn, struct nfp_net_tx_ring *tx_ring) +nfp_net_tx_ring_reset(struct nfp_net_dp *dp, struct nfp_net_tx_ring *tx_ring) { struct nfp_net_r_vector *r_vec = tx_ring->r_vec; const struct skb_frag_struct *frag; @@ -1034,8 +1034,7 @@ nfp_net_tx_ring_reset(struct nfp_net *nn, struct nfp_net_tx_ring *tx_ring) tx_buf = &tx_ring->txbufs[idx]; if (tx_ring == r_vec->xdp_ring) { - nfp_net_dma_unmap_rx(nn, tx_buf->dma_addr, - nn->fl_bufsz, DMA_BIDIRECTIONAL); + nfp_net_dma_unmap_rx(dp, tx_buf->dma_addr); __free_page(virt_to_page(tx_ring->txbufs[idx].frag)); } else { struct sk_buff *skb = tx_ring->txbufs[idx].skb; @@ -1043,13 +1042,13 @@ nfp_net_tx_ring_reset(struct nfp_net *nn, struct nfp_net_tx_ring *tx_ring) if (tx_buf->fidx == -1) { /* unmap head */ - dma_unmap_single(nn->dev, tx_buf->dma_addr, + dma_unmap_single(dp->dev, tx_buf->dma_addr, skb_headlen(skb), DMA_TO_DEVICE); } else { /* unmap fragment */ frag = &skb_shinfo(skb)->frags[tx_buf->fidx]; - dma_unmap_page(nn->dev, tx_buf->dma_addr, + dma_unmap_page(dp->dev, tx_buf->dma_addr, skb_frag_size(frag), DMA_TO_DEVICE); } @@ -1076,7 +1075,7 @@ nfp_net_tx_ring_reset(struct nfp_net *nn, struct nfp_net_tx_ring *tx_ring) if (tx_ring == r_vec->xdp_ring) return; - nd_q = netdev_get_tx_queue(nn->netdev, tx_ring->idx); + nd_q = netdev_get_tx_queue(dp->netdev, tx_ring->idx); netdev_tx_reset_queue(nd_q); } @@ -1085,7 +1084,7 @@ static void nfp_net_tx_timeout(struct net_device *netdev) struct nfp_net *nn = netdev_priv(netdev); int i; - for (i = 0; i < nn->netdev->real_num_tx_queues; i++) { + for (i = 0; i < nn->dp.netdev->real_num_tx_queues; i++) { if (!netif_tx_queue_stopped(netdev_get_tx_queue(netdev, i))) continue; nn_warn(nn, "TX timeout on ring: %d\n", i); @@ -1096,16 +1095,17 @@ static void nfp_net_tx_timeout(struct net_device *netdev) /* Receive processing */ static unsigned int -nfp_net_calc_fl_bufsz(struct nfp_net *nn, unsigned int mtu) +nfp_net_calc_fl_bufsz(struct nfp_net_dp *dp) { unsigned int fl_bufsz; fl_bufsz = NFP_NET_RX_BUF_HEADROOM; - if (nn->rx_offset == NFP_NET_CFG_RX_OFFSET_DYNAMIC) + fl_bufsz += dp->rx_dma_off; + if (dp->rx_offset == NFP_NET_CFG_RX_OFFSET_DYNAMIC) fl_bufsz += NFP_NET_MAX_PREPEND; else - fl_bufsz += nn->rx_offset; - fl_bufsz += ETH_HLEN + VLAN_HLEN * 2 + mtu; + fl_bufsz += dp->rx_offset; + fl_bufsz += ETH_HLEN + VLAN_HLEN * 2 + dp->mtu; fl_bufsz = SKB_DATA_ALIGN(fl_bufsz); fl_bufsz += SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); @@ -1124,62 +1124,56 @@ nfp_net_free_frag(void *frag, bool xdp) /** * nfp_net_rx_alloc_one() - Allocate and map page frag for RX + * @dp: NFP Net data path struct * @rx_ring: RX ring structure of the skb * @dma_addr: Pointer to storage for DMA address (output param) - * @fl_bufsz: size of freelist buffers - * @xdp: Whether XDP is enabled * * This function will allcate a new page frag, map it for DMA. * * Return: allocated page frag or NULL on failure. */ static void * -nfp_net_rx_alloc_one(struct nfp_net_rx_ring *rx_ring, dma_addr_t *dma_addr, - unsigned int fl_bufsz, bool xdp) +nfp_net_rx_alloc_one(struct nfp_net_dp *dp, struct nfp_net_rx_ring *rx_ring, + dma_addr_t *dma_addr) { - struct nfp_net *nn = rx_ring->r_vec->nfp_net; - int direction; void *frag; - if (!xdp) - frag = netdev_alloc_frag(fl_bufsz); + if (!dp->xdp_prog) + frag = netdev_alloc_frag(dp->fl_bufsz); else frag = page_address(alloc_page(GFP_KERNEL | __GFP_COLD)); if (!frag) { - nn_warn_ratelimit(nn, "Failed to alloc receive page frag\n"); + nn_dp_warn(dp, "Failed to alloc receive page frag\n"); return NULL; } - direction = xdp ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE; - - *dma_addr = nfp_net_dma_map_rx(nn, frag, fl_bufsz, direction); - if (dma_mapping_error(nn->dev, *dma_addr)) { - nfp_net_free_frag(frag, xdp); - nn_warn_ratelimit(nn, "Failed to map DMA RX buffer\n"); + *dma_addr = nfp_net_dma_map_rx(dp, frag); + if (dma_mapping_error(dp->dev, *dma_addr)) { + nfp_net_free_frag(frag, dp->xdp_prog); + nn_dp_warn(dp, "Failed to map DMA RX buffer\n"); return NULL; } return frag; } -static void * -nfp_net_napi_alloc_one(struct nfp_net *nn, int direction, dma_addr_t *dma_addr) +static void *nfp_net_napi_alloc_one(struct nfp_net_dp *dp, dma_addr_t *dma_addr) { void *frag; - if (!nn->xdp_prog) - frag = napi_alloc_frag(nn->fl_bufsz); + if (!dp->xdp_prog) + frag = napi_alloc_frag(dp->fl_bufsz); else frag = page_address(alloc_page(GFP_ATOMIC | __GFP_COLD)); if (!frag) { - nn_warn_ratelimit(nn, "Failed to alloc receive page frag\n"); + nn_dp_warn(dp, "Failed to alloc receive page frag\n"); return NULL; } - *dma_addr = nfp_net_dma_map_rx(nn, frag, nn->fl_bufsz, direction); - if (dma_mapping_error(nn->dev, *dma_addr)) { - nfp_net_free_frag(frag, nn->xdp_prog); - nn_warn_ratelimit(nn, "Failed to map DMA RX buffer\n"); + *dma_addr = nfp_net_dma_map_rx(dp, frag); + if (dma_mapping_error(dp->dev, *dma_addr)) { + nfp_net_free_frag(frag, dp->xdp_prog); + nn_dp_warn(dp, "Failed to map DMA RX buffer\n"); return NULL; } @@ -1188,11 +1182,13 @@ nfp_net_napi_alloc_one(struct nfp_net *nn, int direction, dma_addr_t *dma_addr) /** * nfp_net_rx_give_one() - Put mapped skb on the software and hardware rings + * @dp: NFP Net data path struct * @rx_ring: RX ring structure * @frag: page fragment buffer * @dma_addr: DMA address of skb mapping */ -static void nfp_net_rx_give_one(struct nfp_net_rx_ring *rx_ring, +static void nfp_net_rx_give_one(const struct nfp_net_dp *dp, + struct nfp_net_rx_ring *rx_ring, void *frag, dma_addr_t dma_addr) { unsigned int wr_idx; @@ -1206,7 +1202,8 @@ static void nfp_net_rx_give_one(struct nfp_net_rx_ring *rx_ring, /* Fill freelist descriptor */ rx_ring->rxds[wr_idx].fld.reserved = 0; rx_ring->rxds[wr_idx].fld.meta_len_dd = 0; - nfp_desc_set_dma_addr(&rx_ring->rxds[wr_idx].fld, dma_addr); + nfp_desc_set_dma_addr(&rx_ring->rxds[wr_idx].fld, + dma_addr + dp->rx_dma_off); rx_ring->wr_p++; rx_ring->wr_ptr_add++; @@ -1247,19 +1244,17 @@ static void nfp_net_rx_ring_reset(struct nfp_net_rx_ring *rx_ring) /** * nfp_net_rx_ring_bufs_free() - Free any buffers currently on the RX ring - * @nn: NFP Net device + * @dp: NFP Net data path struct * @rx_ring: RX ring to remove buffers from - * @xdp: Whether XDP is enabled * * Assumes that the device is stopped and buffers are in [0, ring->cnt - 1) * entries. After device is disabled nfp_net_rx_ring_reset() must be called * to restore required ring geometry. */ static void -nfp_net_rx_ring_bufs_free(struct nfp_net *nn, struct nfp_net_rx_ring *rx_ring, - bool xdp) +nfp_net_rx_ring_bufs_free(struct nfp_net_dp *dp, + struct nfp_net_rx_ring *rx_ring) { - int direction = xdp ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE; unsigned int i; for (i = 0; i < rx_ring->cnt - 1; i++) { @@ -1270,9 +1265,8 @@ nfp_net_rx_ring_bufs_free(struct nfp_net *nn, struct nfp_net_rx_ring *rx_ring, if (!rx_ring->rxbufs[i].frag) continue; - nfp_net_dma_unmap_rx(nn, rx_ring->rxbufs[i].dma_addr, - rx_ring->bufsz, direction); - nfp_net_free_frag(rx_ring->rxbufs[i].frag, xdp); + nfp_net_dma_unmap_rx(dp, rx_ring->rxbufs[i].dma_addr); + nfp_net_free_frag(rx_ring->rxbufs[i].frag, dp->xdp_prog); rx_ring->rxbufs[i].dma_addr = 0; rx_ring->rxbufs[i].frag = NULL; } @@ -1280,13 +1274,12 @@ nfp_net_rx_ring_bufs_free(struct nfp_net *nn, struct nfp_net_rx_ring *rx_ring, /** * nfp_net_rx_ring_bufs_alloc() - Fill RX ring with buffers (don't give to FW) - * @nn: NFP Net device + * @dp: NFP Net data path struct * @rx_ring: RX ring to remove buffers from - * @xdp: Whether XDP is enabled */ static int -nfp_net_rx_ring_bufs_alloc(struct nfp_net *nn, struct nfp_net_rx_ring *rx_ring, - bool xdp) +nfp_net_rx_ring_bufs_alloc(struct nfp_net_dp *dp, + struct nfp_net_rx_ring *rx_ring) { struct nfp_net_rx_buf *rxbufs; unsigned int i; @@ -1295,10 +1288,9 @@ nfp_net_rx_ring_bufs_alloc(struct nfp_net *nn, struct nfp_net_rx_ring *rx_ring, for (i = 0; i < rx_ring->cnt - 1; i++) { rxbufs[i].frag = - nfp_net_rx_alloc_one(rx_ring, &rxbufs[i].dma_addr, - rx_ring->bufsz, xdp); + nfp_net_rx_alloc_one(dp, rx_ring, &rxbufs[i].dma_addr); if (!rxbufs[i].frag) { - nfp_net_rx_ring_bufs_free(nn, rx_ring, xdp); + nfp_net_rx_ring_bufs_free(dp, rx_ring); return -ENOMEM; } } @@ -1308,14 +1300,17 @@ nfp_net_rx_ring_bufs_alloc(struct nfp_net *nn, struct nfp_net_rx_ring *rx_ring, /** * nfp_net_rx_ring_fill_freelist() - Give buffers from the ring to FW + * @dp: NFP Net data path struct * @rx_ring: RX ring to fill */ -static void nfp_net_rx_ring_fill_freelist(struct nfp_net_rx_ring *rx_ring) +static void +nfp_net_rx_ring_fill_freelist(struct nfp_net_dp *dp, + struct nfp_net_rx_ring *rx_ring) { unsigned int i; for (i = 0; i < rx_ring->cnt - 1; i++) - nfp_net_rx_give_one(rx_ring, rx_ring->rxbufs[i].frag, + nfp_net_rx_give_one(dp, rx_ring, rx_ring->rxbufs[i].frag, rx_ring->rxbufs[i].dma_addr); } @@ -1335,17 +1330,18 @@ static int nfp_net_rx_csum_has_errors(u16 flags) /** * nfp_net_rx_csum() - set SKB checksum field based on RX descriptor flags - * @nn: NFP Net device + * @dp: NFP Net data path struct * @r_vec: per-ring structure * @rxd: Pointer to RX descriptor * @skb: Pointer to SKB */ -static void nfp_net_rx_csum(struct nfp_net *nn, struct nfp_net_r_vector *r_vec, +static void nfp_net_rx_csum(struct nfp_net_dp *dp, + struct nfp_net_r_vector *r_vec, struct nfp_net_rx_desc *rxd, struct sk_buff *skb) { skb_checksum_none_assert(skb); - if (!(nn->netdev->features & NETIF_F_RXCSUM)) + if (!(dp->netdev->features & NETIF_F_RXCSUM)) return; if (nfp_net_rx_csum_has_errors(le16_to_cpu(rxd->rxd.flags))) { @@ -1396,24 +1392,21 @@ static void nfp_net_set_hash(struct net_device *netdev, struct sk_buff *skb, static void nfp_net_set_hash_desc(struct net_device *netdev, struct sk_buff *skb, - struct nfp_net_rx_desc *rxd) + void *data, struct nfp_net_rx_desc *rxd) { - struct nfp_net_rx_hash *rx_hash; + struct nfp_net_rx_hash *rx_hash = data; if (!(rxd->rxd.flags & PCIE_DESC_RX_RSS)) return; - rx_hash = (struct nfp_net_rx_hash *)(skb->data - sizeof(*rx_hash)); - nfp_net_set_hash(netdev, skb, get_unaligned_be32(&rx_hash->hash_type), &rx_hash->hash); } static void * nfp_net_parse_meta(struct net_device *netdev, struct sk_buff *skb, - int meta_len) + void *data, int meta_len) { - u8 *data = skb->data - meta_len; u32 meta_info; meta_info = get_unaligned_be32(data); @@ -1443,8 +1436,9 @@ nfp_net_parse_meta(struct net_device *netdev, struct sk_buff *skb, } static void -nfp_net_rx_drop(struct nfp_net_r_vector *r_vec, struct nfp_net_rx_ring *rx_ring, - struct nfp_net_rx_buf *rxbuf, struct sk_buff *skb) +nfp_net_rx_drop(const struct nfp_net_dp *dp, struct nfp_net_r_vector *r_vec, + struct nfp_net_rx_ring *rx_ring, struct nfp_net_rx_buf *rxbuf, + struct sk_buff *skb) { u64_stats_update_begin(&r_vec->rx_sync); r_vec->rx_drops++; @@ -1456,15 +1450,15 @@ nfp_net_rx_drop(struct nfp_net_r_vector *r_vec, struct nfp_net_rx_ring *rx_ring, if (skb && rxbuf && skb->head == rxbuf->frag) page_ref_inc(virt_to_head_page(rxbuf->frag)); if (rxbuf) - nfp_net_rx_give_one(rx_ring, rxbuf->frag, rxbuf->dma_addr); + nfp_net_rx_give_one(dp, rx_ring, rxbuf->frag, rxbuf->dma_addr); if (skb) dev_kfree_skb_any(skb); } static bool -nfp_net_tx_xdp_buf(struct nfp_net *nn, struct nfp_net_rx_ring *rx_ring, +nfp_net_tx_xdp_buf(struct nfp_net_dp *dp, struct nfp_net_rx_ring *rx_ring, struct nfp_net_tx_ring *tx_ring, - struct nfp_net_rx_buf *rxbuf, unsigned int pkt_off, + struct nfp_net_rx_buf *rxbuf, unsigned int dma_off, unsigned int pkt_len) { struct nfp_net_tx_buf *txbuf; @@ -1474,16 +1468,16 @@ nfp_net_tx_xdp_buf(struct nfp_net *nn, struct nfp_net_rx_ring *rx_ring, int wr_idx; if (unlikely(nfp_net_tx_full(tx_ring, 1))) { - nfp_net_rx_drop(rx_ring->r_vec, rx_ring, rxbuf, NULL); + nfp_net_rx_drop(dp, rx_ring->r_vec, rx_ring, rxbuf, NULL); return false; } - new_frag = nfp_net_napi_alloc_one(nn, DMA_BIDIRECTIONAL, &new_dma_addr); + new_frag = nfp_net_napi_alloc_one(dp, &new_dma_addr); if (unlikely(!new_frag)) { - nfp_net_rx_drop(rx_ring->r_vec, rx_ring, rxbuf, NULL); + nfp_net_rx_drop(dp, rx_ring->r_vec, rx_ring, rxbuf, NULL); return false; } - nfp_net_rx_give_one(rx_ring, new_frag, new_dma_addr); + nfp_net_rx_give_one(dp, rx_ring, new_frag, new_dma_addr); wr_idx = tx_ring->wr_p & (tx_ring->cnt - 1); @@ -1495,14 +1489,14 @@ nfp_net_tx_xdp_buf(struct nfp_net *nn, struct nfp_net_rx_ring *rx_ring, txbuf->pkt_cnt = 1; txbuf->real_len = pkt_len; - dma_sync_single_for_device(nn->dev, rxbuf->dma_addr + pkt_off, + dma_sync_single_for_device(dp->dev, rxbuf->dma_addr + dma_off, pkt_len, DMA_BIDIRECTIONAL); /* Build TX descriptor */ txd = &tx_ring->txds[wr_idx]; txd->offset_eop = PCIE_DESC_TX_EOP; txd->dma_len = cpu_to_le16(pkt_len); - nfp_desc_set_dma_addr(txd, rxbuf->dma_addr + pkt_off); + nfp_desc_set_dma_addr(txd, rxbuf->dma_addr + dma_off); txd->data_len = cpu_to_le16(pkt_len); txd->flags = 0; @@ -1514,14 +1508,24 @@ nfp_net_tx_xdp_buf(struct nfp_net *nn, struct nfp_net_rx_ring *rx_ring, return true; } -static int nfp_net_run_xdp(struct bpf_prog *prog, void *data, unsigned int len) +static int nfp_net_run_xdp(struct bpf_prog *prog, void *data, void *hard_start, + unsigned int *off, unsigned int *len) { struct xdp_buff xdp; + void *orig_data; + int ret; - xdp.data = data; - xdp.data_end = data + len; + xdp.data_hard_start = hard_start; + xdp.data = data + *off; + xdp.data_end = data + *off + *len; - return bpf_prog_run_xdp(prog, &xdp); + orig_data = xdp.data; + ret = bpf_prog_run_xdp(prog, &xdp); + + *len -= xdp.data - orig_data; + *off += xdp.data - orig_data; + + return ret; } /** @@ -1538,27 +1542,27 @@ static int nfp_net_run_xdp(struct bpf_prog *prog, void *data, unsigned int len) static int nfp_net_rx(struct nfp_net_rx_ring *rx_ring, int budget) { struct nfp_net_r_vector *r_vec = rx_ring->r_vec; - struct nfp_net *nn = r_vec->nfp_net; + struct nfp_net_dp *dp = &r_vec->nfp_net->dp; struct nfp_net_tx_ring *tx_ring; struct bpf_prog *xdp_prog; unsigned int true_bufsz; struct sk_buff *skb; int pkts_polled = 0; - int rx_dma_map_dir; int idx; rcu_read_lock(); - xdp_prog = READ_ONCE(nn->xdp_prog); - rx_dma_map_dir = xdp_prog ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE; - true_bufsz = xdp_prog ? PAGE_SIZE : nn->fl_bufsz; + xdp_prog = READ_ONCE(dp->xdp_prog); + true_bufsz = xdp_prog ? PAGE_SIZE : dp->fl_bufsz; tx_ring = r_vec->xdp_ring; while (pkts_polled < budget) { - unsigned int meta_len, data_len, data_off, pkt_len, pkt_off; + unsigned int meta_len, data_len, data_off, pkt_len; + u8 meta_prepend[NFP_NET_MAX_PREPEND]; struct nfp_net_rx_buf *rxbuf; struct nfp_net_rx_desc *rxd; dma_addr_t new_dma_addr; void *new_frag; + u8 *meta; idx = rx_ring->rd_p & (rx_ring->cnt - 1); @@ -1591,11 +1595,11 @@ static int nfp_net_rx(struct nfp_net_rx_ring *rx_ring, int budget) data_len = le16_to_cpu(rxd->rxd.data_len); pkt_len = data_len - meta_len; - if (nn->rx_offset == NFP_NET_CFG_RX_OFFSET_DYNAMIC) - pkt_off = meta_len; + if (dp->rx_offset == NFP_NET_CFG_RX_OFFSET_DYNAMIC) + data_off = NFP_NET_RX_BUF_HEADROOM + meta_len; else - pkt_off = nn->rx_offset; - data_off = NFP_NET_RX_BUF_HEADROOM + pkt_off; + data_off = NFP_NET_RX_BUF_HEADROOM + dp->rx_offset; + data_off += dp->rx_dma_off; /* Stats update */ u64_stats_update_begin(&r_vec->rx_sync); @@ -1603,30 +1607,55 @@ static int nfp_net_rx(struct nfp_net_rx_ring *rx_ring, int budget) r_vec->rx_bytes += pkt_len; u64_stats_update_end(&r_vec->rx_sync); + /* Pointer to start of metadata */ + meta = rxbuf->frag + data_off - meta_len; + + if (unlikely(meta_len > NFP_NET_MAX_PREPEND || + (dp->rx_offset && meta_len > dp->rx_offset))) { + nn_dp_warn(dp, "oversized RX packet metadata %u\n", + meta_len); + nfp_net_rx_drop(dp, r_vec, rx_ring, rxbuf, NULL); + continue; + } + if (xdp_prog && !(rxd->rxd.flags & PCIE_DESC_RX_BPF && - nn->bpf_offload_xdp)) { + dp->bpf_offload_xdp)) { + unsigned int dma_off; + void *hard_start; int act; - dma_sync_single_for_cpu(nn->dev, - rxbuf->dma_addr + pkt_off, - pkt_len, DMA_BIDIRECTIONAL); - act = nfp_net_run_xdp(xdp_prog, rxbuf->frag + data_off, - pkt_len); + hard_start = rxbuf->frag + NFP_NET_RX_BUF_HEADROOM; + dma_off = data_off - NFP_NET_RX_BUF_HEADROOM; + dma_sync_single_for_cpu(dp->dev, rxbuf->dma_addr, + dma_off + pkt_len, + DMA_BIDIRECTIONAL); + + /* Move prepend out of the way */ + if (xdp_prog->xdp_adjust_head) { + memcpy(meta_prepend, meta, meta_len); + meta = meta_prepend; + } + + act = nfp_net_run_xdp(xdp_prog, rxbuf->frag, hard_start, + &data_off, &pkt_len); switch (act) { case XDP_PASS: break; case XDP_TX: - if (unlikely(!nfp_net_tx_xdp_buf(nn, rx_ring, + dma_off = data_off - NFP_NET_RX_BUF_HEADROOM; + if (unlikely(!nfp_net_tx_xdp_buf(dp, rx_ring, tx_ring, rxbuf, - pkt_off, pkt_len))) - trace_xdp_exception(nn->netdev, xdp_prog, act); + dma_off, + pkt_len))) + trace_xdp_exception(dp->netdev, + xdp_prog, act); continue; default: bpf_warn_invalid_xdp_action(act); case XDP_ABORTED: - trace_xdp_exception(nn->netdev, xdp_prog, act); + trace_xdp_exception(dp->netdev, xdp_prog, act); case XDP_DROP: - nfp_net_rx_give_one(rx_ring, rxbuf->frag, + nfp_net_rx_give_one(dp, rx_ring, rxbuf->frag, rxbuf->dma_addr); continue; } @@ -1634,41 +1663,40 @@ static int nfp_net_rx(struct nfp_net_rx_ring *rx_ring, int budget) skb = build_skb(rxbuf->frag, true_bufsz); if (unlikely(!skb)) { - nfp_net_rx_drop(r_vec, rx_ring, rxbuf, NULL); + nfp_net_rx_drop(dp, r_vec, rx_ring, rxbuf, NULL); continue; } - new_frag = nfp_net_napi_alloc_one(nn, rx_dma_map_dir, - &new_dma_addr); + new_frag = nfp_net_napi_alloc_one(dp, &new_dma_addr); if (unlikely(!new_frag)) { - nfp_net_rx_drop(r_vec, rx_ring, rxbuf, skb); + nfp_net_rx_drop(dp, r_vec, rx_ring, rxbuf, skb); continue; } - nfp_net_dma_unmap_rx(nn, rxbuf->dma_addr, nn->fl_bufsz, - rx_dma_map_dir); + nfp_net_dma_unmap_rx(dp, rxbuf->dma_addr); - nfp_net_rx_give_one(rx_ring, new_frag, new_dma_addr); + nfp_net_rx_give_one(dp, rx_ring, new_frag, new_dma_addr); skb_reserve(skb, data_off); skb_put(skb, pkt_len); - if (!nn->chained_metadata_format) { - nfp_net_set_hash_desc(nn->netdev, skb, rxd); + if (!dp->chained_metadata_format) { + nfp_net_set_hash_desc(dp->netdev, skb, meta, rxd); } else if (meta_len) { void *end; - end = nfp_net_parse_meta(nn->netdev, skb, meta_len); - if (unlikely(end != skb->data)) { - nn_warn_ratelimit(nn, "invalid RX packet metadata\n"); - nfp_net_rx_drop(r_vec, rx_ring, NULL, skb); + end = nfp_net_parse_meta(dp->netdev, skb, meta, + meta_len); + if (unlikely(end != meta + meta_len)) { + nn_dp_warn(dp, "invalid RX packet metadata\n"); + nfp_net_rx_drop(dp, r_vec, rx_ring, NULL, skb); continue; } } skb_record_rx_queue(skb, rx_ring->idx); - skb->protocol = eth_type_trans(skb, nn->netdev); + skb->protocol = eth_type_trans(skb, dp->netdev); - nfp_net_rx_csum(nn, r_vec, rxd, skb); + nfp_net_rx_csum(dp, r_vec, rxd, skb); if (rxd->rxd.flags & PCIE_DESC_RX_VLAN) __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), @@ -1722,12 +1750,12 @@ static int nfp_net_poll(struct napi_struct *napi, int budget) static void nfp_net_tx_ring_free(struct nfp_net_tx_ring *tx_ring) { struct nfp_net_r_vector *r_vec = tx_ring->r_vec; - struct nfp_net *nn = r_vec->nfp_net; + struct nfp_net_dp *dp = &r_vec->nfp_net->dp; kfree(tx_ring->txbufs); if (tx_ring->txds) - dma_free_coherent(nn->dev, tx_ring->size, + dma_free_coherent(dp->dev, tx_ring->size, tx_ring->txds, tx_ring->dma); tx_ring->cnt = 0; @@ -1739,23 +1767,23 @@ static void nfp_net_tx_ring_free(struct nfp_net_tx_ring *tx_ring) /** * nfp_net_tx_ring_alloc() - Allocate resource for a TX ring + * @dp: NFP Net data path struct * @tx_ring: TX Ring structure to allocate - * @cnt: Ring buffer count * @is_xdp: True if ring will be used for XDP * * Return: 0 on success, negative errno otherwise. */ static int -nfp_net_tx_ring_alloc(struct nfp_net_tx_ring *tx_ring, u32 cnt, bool is_xdp) +nfp_net_tx_ring_alloc(struct nfp_net_dp *dp, struct nfp_net_tx_ring *tx_ring, + bool is_xdp) { struct nfp_net_r_vector *r_vec = tx_ring->r_vec; - struct nfp_net *nn = r_vec->nfp_net; int sz; - tx_ring->cnt = cnt; + tx_ring->cnt = dp->txd_cnt; tx_ring->size = sizeof(*tx_ring->txds) * tx_ring->cnt; - tx_ring->txds = dma_zalloc_coherent(nn->dev, tx_ring->size, + tx_ring->txds = dma_zalloc_coherent(dp->dev, tx_ring->size, &tx_ring->dma, GFP_KERNEL); if (!tx_ring->txds) goto err_alloc; @@ -1766,7 +1794,7 @@ nfp_net_tx_ring_alloc(struct nfp_net_tx_ring *tx_ring, u32 cnt, bool is_xdp) goto err_alloc; if (!is_xdp) - netif_set_xps_queue(nn->netdev, &r_vec->affinity_mask, + netif_set_xps_queue(dp->netdev, &r_vec->affinity_mask, tx_ring->idx); return 0; @@ -1776,62 +1804,45 @@ err_alloc: return -ENOMEM; } -static struct nfp_net_tx_ring * -nfp_net_tx_ring_set_prepare(struct nfp_net *nn, struct nfp_net_ring_set *s, - unsigned int num_stack_tx_rings) +static int nfp_net_tx_rings_prepare(struct nfp_net *nn, struct nfp_net_dp *dp) { - struct nfp_net_tx_ring *rings; unsigned int r; - rings = kcalloc(s->n_rings, sizeof(*rings), GFP_KERNEL); - if (!rings) - return NULL; + dp->tx_rings = kcalloc(dp->num_tx_rings, sizeof(*dp->tx_rings), + GFP_KERNEL); + if (!dp->tx_rings) + return -ENOMEM; - for (r = 0; r < s->n_rings; r++) { + for (r = 0; r < dp->num_tx_rings; r++) { int bias = 0; - if (r >= num_stack_tx_rings) - bias = num_stack_tx_rings; + if (r >= dp->num_stack_tx_rings) + bias = dp->num_stack_tx_rings; - nfp_net_tx_ring_init(&rings[r], &nn->r_vecs[r - bias], r); + nfp_net_tx_ring_init(&dp->tx_rings[r], &nn->r_vecs[r - bias], + r); - if (nfp_net_tx_ring_alloc(&rings[r], s->dcnt, bias)) + if (nfp_net_tx_ring_alloc(dp, &dp->tx_rings[r], bias)) goto err_free_prev; } - return s->rings = rings; + return 0; err_free_prev: while (r--) - nfp_net_tx_ring_free(&rings[r]); - kfree(rings); - return NULL; + nfp_net_tx_ring_free(&dp->tx_rings[r]); + kfree(dp->tx_rings); + return -ENOMEM; } -static void -nfp_net_tx_ring_set_swap(struct nfp_net *nn, struct nfp_net_ring_set *s) +static void nfp_net_tx_rings_free(struct nfp_net_dp *dp) { - struct nfp_net_ring_set new = *s; - - s->dcnt = nn->txd_cnt; - s->rings = nn->tx_rings; - s->n_rings = nn->num_tx_rings; - - nn->txd_cnt = new.dcnt; - nn->tx_rings = new.rings; - nn->num_tx_rings = new.n_rings; -} - -static void -nfp_net_tx_ring_set_free(struct nfp_net *nn, struct nfp_net_ring_set *s) -{ - struct nfp_net_tx_ring *rings = s->rings; unsigned int r; - for (r = 0; r < s->n_rings; r++) - nfp_net_tx_ring_free(&rings[r]); + for (r = 0; r < dp->num_tx_rings; r++) + nfp_net_tx_ring_free(&dp->tx_rings[r]); - kfree(rings); + kfree(dp->tx_rings); } /** @@ -1841,12 +1852,12 @@ nfp_net_tx_ring_set_free(struct nfp_net *nn, struct nfp_net_ring_set *s) static void nfp_net_rx_ring_free(struct nfp_net_rx_ring *rx_ring) { struct nfp_net_r_vector *r_vec = rx_ring->r_vec; - struct nfp_net *nn = r_vec->nfp_net; + struct nfp_net_dp *dp = &r_vec->nfp_net->dp; kfree(rx_ring->rxbufs); if (rx_ring->rxds) - dma_free_coherent(nn->dev, rx_ring->size, + dma_free_coherent(dp->dev, rx_ring->size, rx_ring->rxds, rx_ring->dma); rx_ring->cnt = 0; @@ -1858,25 +1869,19 @@ static void nfp_net_rx_ring_free(struct nfp_net_rx_ring *rx_ring) /** * nfp_net_rx_ring_alloc() - Allocate resource for a RX ring + * @dp: NFP Net data path struct * @rx_ring: RX ring to allocate - * @fl_bufsz: Size of buffers to allocate - * @cnt: Ring buffer count * * Return: 0 on success, negative errno otherwise. */ static int -nfp_net_rx_ring_alloc(struct nfp_net_rx_ring *rx_ring, unsigned int fl_bufsz, - u32 cnt) +nfp_net_rx_ring_alloc(struct nfp_net_dp *dp, struct nfp_net_rx_ring *rx_ring) { - struct nfp_net_r_vector *r_vec = rx_ring->r_vec; - struct nfp_net *nn = r_vec->nfp_net; int sz; - rx_ring->cnt = cnt; - rx_ring->bufsz = fl_bufsz; - + rx_ring->cnt = dp->rxd_cnt; rx_ring->size = sizeof(*rx_ring->rxds) * rx_ring->cnt; - rx_ring->rxds = dma_zalloc_coherent(nn->dev, rx_ring->size, + rx_ring->rxds = dma_zalloc_coherent(dp->dev, rx_ring->size, &rx_ring->dma, GFP_KERNEL); if (!rx_ring->rxds) goto err_alloc; @@ -1893,82 +1898,59 @@ err_alloc: return -ENOMEM; } -static struct nfp_net_rx_ring * -nfp_net_rx_ring_set_prepare(struct nfp_net *nn, struct nfp_net_ring_set *s, - bool xdp) +static int nfp_net_rx_rings_prepare(struct nfp_net *nn, struct nfp_net_dp *dp) { - unsigned int fl_bufsz = nfp_net_calc_fl_bufsz(nn, s->mtu); - struct nfp_net_rx_ring *rings; unsigned int r; - rings = kcalloc(s->n_rings, sizeof(*rings), GFP_KERNEL); - if (!rings) - return NULL; + dp->rx_rings = kcalloc(dp->num_rx_rings, sizeof(*dp->rx_rings), + GFP_KERNEL); + if (!dp->rx_rings) + return -ENOMEM; - for (r = 0; r < s->n_rings; r++) { - nfp_net_rx_ring_init(&rings[r], &nn->r_vecs[r], r); + for (r = 0; r < dp->num_rx_rings; r++) { + nfp_net_rx_ring_init(&dp->rx_rings[r], &nn->r_vecs[r], r); - if (nfp_net_rx_ring_alloc(&rings[r], fl_bufsz, s->dcnt)) + if (nfp_net_rx_ring_alloc(dp, &dp->rx_rings[r])) goto err_free_prev; - if (nfp_net_rx_ring_bufs_alloc(nn, &rings[r], xdp)) + if (nfp_net_rx_ring_bufs_alloc(dp, &dp->rx_rings[r])) goto err_free_ring; } - return s->rings = rings; + return 0; err_free_prev: while (r--) { - nfp_net_rx_ring_bufs_free(nn, &rings[r], xdp); + nfp_net_rx_ring_bufs_free(dp, &dp->rx_rings[r]); err_free_ring: - nfp_net_rx_ring_free(&rings[r]); + nfp_net_rx_ring_free(&dp->rx_rings[r]); } - kfree(rings); - return NULL; + kfree(dp->rx_rings); + return -ENOMEM; } -static void -nfp_net_rx_ring_set_swap(struct nfp_net *nn, struct nfp_net_ring_set *s) +static void nfp_net_rx_rings_free(struct nfp_net_dp *dp) { - struct nfp_net_ring_set new = *s; - - s->mtu = nn->netdev->mtu; - s->dcnt = nn->rxd_cnt; - s->rings = nn->rx_rings; - s->n_rings = nn->num_rx_rings; - - nn->netdev->mtu = new.mtu; - nn->fl_bufsz = nfp_net_calc_fl_bufsz(nn, new.mtu); - nn->rxd_cnt = new.dcnt; - nn->rx_rings = new.rings; - nn->num_rx_rings = new.n_rings; -} - -static void -nfp_net_rx_ring_set_free(struct nfp_net *nn, struct nfp_net_ring_set *s, - bool xdp) -{ - struct nfp_net_rx_ring *rings = s->rings; unsigned int r; - for (r = 0; r < s->n_rings; r++) { - nfp_net_rx_ring_bufs_free(nn, &rings[r], xdp); - nfp_net_rx_ring_free(&rings[r]); + for (r = 0; r < dp->num_rx_rings; r++) { + nfp_net_rx_ring_bufs_free(dp, &dp->rx_rings[r]); + nfp_net_rx_ring_free(&dp->rx_rings[r]); } - kfree(rings); + kfree(dp->rx_rings); } static void -nfp_net_vector_assign_rings(struct nfp_net *nn, struct nfp_net_r_vector *r_vec, - int idx) +nfp_net_vector_assign_rings(struct nfp_net_dp *dp, + struct nfp_net_r_vector *r_vec, int idx) { - r_vec->rx_ring = idx < nn->num_rx_rings ? &nn->rx_rings[idx] : NULL; + r_vec->rx_ring = idx < dp->num_rx_rings ? &dp->rx_rings[idx] : NULL; r_vec->tx_ring = - idx < nn->num_stack_tx_rings ? &nn->tx_rings[idx] : NULL; + idx < dp->num_stack_tx_rings ? &dp->tx_rings[idx] : NULL; - r_vec->xdp_ring = idx < nn->num_tx_rings - nn->num_stack_tx_rings ? - &nn->tx_rings[nn->num_stack_tx_rings + idx] : NULL; + r_vec->xdp_ring = idx < dp->num_tx_rings - dp->num_stack_tx_rings ? + &dp->tx_rings[dp->num_stack_tx_rings + idx] : NULL; } static int @@ -1978,11 +1960,11 @@ nfp_net_prepare_vector(struct nfp_net *nn, struct nfp_net_r_vector *r_vec, int err; /* Setup NAPI */ - netif_napi_add(nn->netdev, &r_vec->napi, + netif_napi_add(nn->dp.netdev, &r_vec->napi, nfp_net_poll, NAPI_POLL_WEIGHT); snprintf(r_vec->name, sizeof(r_vec->name), - "%s-rxtx-%d", nn->netdev->name, idx); + "%s-rxtx-%d", nn->dp.netdev->name, idx); err = request_irq(r_vec->irq_vector, r_vec->handler, 0, r_vec->name, r_vec); if (err) { @@ -2053,13 +2035,13 @@ void nfp_net_coalesce_write_cfg(struct nfp_net *nn) /* copy RX interrupt coalesce parameters */ value = (nn->rx_coalesce_max_frames << 16) | (factor * nn->rx_coalesce_usecs); - for (i = 0; i < nn->num_rx_rings; i++) + for (i = 0; i < nn->dp.num_rx_rings; i++) nn_writel(nn, NFP_NET_CFG_RXR_IRQ_MOD(i), value); /* copy TX interrupt coalesce parameters */ value = (nn->tx_coalesce_max_frames << 16) | (factor * nn->tx_coalesce_usecs); - for (i = 0; i < nn->num_tx_rings; i++) + for (i = 0; i < nn->dp.num_tx_rings; i++) nn_writel(nn, NFP_NET_CFG_TXR_IRQ_MOD(i), value); } @@ -2074,9 +2056,9 @@ void nfp_net_coalesce_write_cfg(struct nfp_net *nn) static void nfp_net_write_mac_addr(struct nfp_net *nn) { nn_writel(nn, NFP_NET_CFG_MACADDR + 0, - get_unaligned_be32(nn->netdev->dev_addr)); + get_unaligned_be32(nn->dp.netdev->dev_addr)); nn_writew(nn, NFP_NET_CFG_MACADDR + 6, - get_unaligned_be16(nn->netdev->dev_addr + 4)); + get_unaligned_be16(nn->dp.netdev->dev_addr + 4)); } static void nfp_net_vec_clear_ring_data(struct nfp_net *nn, unsigned int idx) @@ -2100,7 +2082,7 @@ static void nfp_net_clear_config_and_disable(struct nfp_net *nn) unsigned int r; int err; - new_ctrl = nn->ctrl; + new_ctrl = nn->dp.ctrl; new_ctrl &= ~NFP_NET_CFG_CTRL_ENABLE; update = NFP_NET_CFG_UPDATE_GEN; update |= NFP_NET_CFG_UPDATE_MSIX; @@ -2117,14 +2099,14 @@ static void nfp_net_clear_config_and_disable(struct nfp_net *nn) if (err) nn_err(nn, "Could not disable device: %d\n", err); - for (r = 0; r < nn->num_rx_rings; r++) - nfp_net_rx_ring_reset(&nn->rx_rings[r]); - for (r = 0; r < nn->num_tx_rings; r++) - nfp_net_tx_ring_reset(nn, &nn->tx_rings[r]); - for (r = 0; r < nn->num_r_vecs; r++) + for (r = 0; r < nn->dp.num_rx_rings; r++) + nfp_net_rx_ring_reset(&nn->dp.rx_rings[r]); + for (r = 0; r < nn->dp.num_tx_rings; r++) + nfp_net_tx_ring_reset(&nn->dp, &nn->dp.tx_rings[r]); + for (r = 0; r < nn->dp.num_r_vecs; r++) nfp_net_vec_clear_ring_data(nn, r); - nn->ctrl = new_ctrl; + nn->dp.ctrl = new_ctrl; } static void @@ -2152,7 +2134,7 @@ static int __nfp_net_set_config_and_enable(struct nfp_net *nn) unsigned int r; int err; - new_ctrl = nn->ctrl; + new_ctrl = nn->dp.ctrl; if (nn->cap & NFP_NET_CFG_CTRL_RSS) { nfp_net_rss_write_key(nn); @@ -2168,22 +2150,22 @@ static int __nfp_net_set_config_and_enable(struct nfp_net *nn) update |= NFP_NET_CFG_UPDATE_IRQMOD; } - for (r = 0; r < nn->num_tx_rings; r++) - nfp_net_tx_ring_hw_cfg_write(nn, &nn->tx_rings[r], r); - for (r = 0; r < nn->num_rx_rings; r++) - nfp_net_rx_ring_hw_cfg_write(nn, &nn->rx_rings[r], r); + for (r = 0; r < nn->dp.num_tx_rings; r++) + nfp_net_tx_ring_hw_cfg_write(nn, &nn->dp.tx_rings[r], r); + for (r = 0; r < nn->dp.num_rx_rings; r++) + nfp_net_rx_ring_hw_cfg_write(nn, &nn->dp.rx_rings[r], r); - nn_writeq(nn, NFP_NET_CFG_TXRS_ENABLE, nn->num_tx_rings == 64 ? - 0xffffffffffffffffULL : ((u64)1 << nn->num_tx_rings) - 1); + nn_writeq(nn, NFP_NET_CFG_TXRS_ENABLE, nn->dp.num_tx_rings == 64 ? + 0xffffffffffffffffULL : ((u64)1 << nn->dp.num_tx_rings) - 1); - nn_writeq(nn, NFP_NET_CFG_RXRS_ENABLE, nn->num_rx_rings == 64 ? - 0xffffffffffffffffULL : ((u64)1 << nn->num_rx_rings) - 1); + nn_writeq(nn, NFP_NET_CFG_RXRS_ENABLE, nn->dp.num_rx_rings == 64 ? + 0xffffffffffffffffULL : ((u64)1 << nn->dp.num_rx_rings) - 1); nfp_net_write_mac_addr(nn); - nn_writel(nn, NFP_NET_CFG_MTU, nn->netdev->mtu); + nn_writel(nn, NFP_NET_CFG_MTU, nn->dp.netdev->mtu); nn_writel(nn, NFP_NET_CFG_FLBUFSZ, - nn->fl_bufsz - NFP_NET_RX_BUF_NON_DATA); + nn->dp.fl_bufsz - NFP_NET_RX_BUF_NON_DATA); /* Enable device */ new_ctrl |= NFP_NET_CFG_CTRL_ENABLE; @@ -2196,18 +2178,18 @@ static int __nfp_net_set_config_and_enable(struct nfp_net *nn) nn_writel(nn, NFP_NET_CFG_CTRL, new_ctrl); err = nfp_net_reconfig(nn, update); - nn->ctrl = new_ctrl; + nn->dp.ctrl = new_ctrl; - for (r = 0; r < nn->num_rx_rings; r++) - nfp_net_rx_ring_fill_freelist(&nn->rx_rings[r]); + for (r = 0; r < nn->dp.num_rx_rings; r++) + nfp_net_rx_ring_fill_freelist(&nn->dp, &nn->dp.rx_rings[r]); /* Since reconfiguration requests while NFP is down are ignored we * have to wipe the entire VXLAN configuration and reinitialize it. */ - if (nn->ctrl & NFP_NET_CFG_CTRL_VXLAN) { + if (nn->dp.ctrl & NFP_NET_CFG_CTRL_VXLAN) { memset(&nn->vxlan_ports, 0, sizeof(nn->vxlan_ports)); memset(&nn->vxlan_usecnt, 0, sizeof(nn->vxlan_usecnt)); - udp_tunnel_get_rx_info(nn->netdev); + udp_tunnel_get_rx_info(nn->dp.netdev); } return err; @@ -2236,12 +2218,12 @@ static void nfp_net_open_stack(struct nfp_net *nn) { unsigned int r; - for (r = 0; r < nn->num_r_vecs; r++) { + for (r = 0; r < nn->dp.num_r_vecs; r++) { napi_enable(&nn->r_vecs[r].napi); enable_irq(nn->r_vecs[r].irq_vector); } - netif_tx_wake_all_queues(nn->netdev); + netif_tx_wake_all_queues(nn->dp.netdev); enable_irq(nn->irq_entries[NFP_NET_IRQ_LSC_IDX].vector); nfp_net_read_link_status(nn); @@ -2250,19 +2232,10 @@ static void nfp_net_open_stack(struct nfp_net *nn) static int nfp_net_netdev_open(struct net_device *netdev) { struct nfp_net *nn = netdev_priv(netdev); - struct nfp_net_ring_set rx = { - .n_rings = nn->num_rx_rings, - .mtu = nn->netdev->mtu, - .dcnt = nn->rxd_cnt, - }; - struct nfp_net_ring_set tx = { - .n_rings = nn->num_tx_rings, - .dcnt = nn->txd_cnt, - }; int err, r; - if (nn->ctrl & NFP_NET_CFG_CTRL_ENABLE) { - nn_err(nn, "Dev is already enabled: 0x%08x\n", nn->ctrl); + if (nn->dp.ctrl & NFP_NET_CFG_CTRL_ENABLE) { + nn_err(nn, "Dev is already enabled: 0x%08x\n", nn->dp.ctrl); return -EBUSY; } @@ -2283,33 +2256,28 @@ static int nfp_net_netdev_open(struct net_device *netdev) goto err_free_exn; disable_irq(nn->irq_entries[NFP_NET_IRQ_LSC_IDX].vector); - for (r = 0; r < nn->num_r_vecs; r++) { + for (r = 0; r < nn->dp.num_r_vecs; r++) { err = nfp_net_prepare_vector(nn, &nn->r_vecs[r], r); if (err) goto err_cleanup_vec_p; } - nn->rx_rings = nfp_net_rx_ring_set_prepare(nn, &rx, nn->xdp_prog); - if (!nn->rx_rings) { - err = -ENOMEM; + err = nfp_net_rx_rings_prepare(nn, &nn->dp); + if (err) goto err_cleanup_vec; - } - nn->tx_rings = nfp_net_tx_ring_set_prepare(nn, &tx, - nn->num_stack_tx_rings); - if (!nn->tx_rings) { - err = -ENOMEM; + err = nfp_net_tx_rings_prepare(nn, &nn->dp); + if (err) goto err_free_rx_rings; - } for (r = 0; r < nn->max_r_vecs; r++) - nfp_net_vector_assign_rings(nn, &nn->r_vecs[r], r); + nfp_net_vector_assign_rings(&nn->dp, &nn->r_vecs[r], r); - err = netif_set_real_num_tx_queues(netdev, nn->num_stack_tx_rings); + err = netif_set_real_num_tx_queues(netdev, nn->dp.num_stack_tx_rings); if (err) goto err_free_rings; - err = netif_set_real_num_rx_queues(netdev, nn->num_rx_rings); + err = netif_set_real_num_rx_queues(netdev, nn->dp.num_rx_rings); if (err) goto err_free_rings; @@ -2335,11 +2303,11 @@ static int nfp_net_netdev_open(struct net_device *netdev) return 0; err_free_rings: - nfp_net_tx_ring_set_free(nn, &tx); + nfp_net_tx_rings_free(&nn->dp); err_free_rx_rings: - nfp_net_rx_ring_set_free(nn, &rx, nn->xdp_prog); + nfp_net_rx_rings_free(&nn->dp); err_cleanup_vec: - r = nn->num_r_vecs; + r = nn->dp.num_r_vecs; err_cleanup_vec_p: while (r--) nfp_net_cleanup_vector(nn, &nn->r_vecs[r]); @@ -2358,15 +2326,15 @@ static void nfp_net_close_stack(struct nfp_net *nn) unsigned int r; disable_irq(nn->irq_entries[NFP_NET_IRQ_LSC_IDX].vector); - netif_carrier_off(nn->netdev); + netif_carrier_off(nn->dp.netdev); nn->link_up = false; - for (r = 0; r < nn->num_r_vecs; r++) { + for (r = 0; r < nn->dp.num_r_vecs; r++) { disable_irq(nn->r_vecs[r].irq_vector); napi_disable(&nn->r_vecs[r].napi); } - netif_tx_disable(nn->netdev); + netif_tx_disable(nn->dp.netdev); } /** @@ -2377,17 +2345,17 @@ static void nfp_net_close_free_all(struct nfp_net *nn) { unsigned int r; - for (r = 0; r < nn->num_rx_rings; r++) { - nfp_net_rx_ring_bufs_free(nn, &nn->rx_rings[r], nn->xdp_prog); - nfp_net_rx_ring_free(&nn->rx_rings[r]); + for (r = 0; r < nn->dp.num_rx_rings; r++) { + nfp_net_rx_ring_bufs_free(&nn->dp, &nn->dp.rx_rings[r]); + nfp_net_rx_ring_free(&nn->dp.rx_rings[r]); } - for (r = 0; r < nn->num_tx_rings; r++) - nfp_net_tx_ring_free(&nn->tx_rings[r]); - for (r = 0; r < nn->num_r_vecs; r++) + for (r = 0; r < nn->dp.num_tx_rings; r++) + nfp_net_tx_ring_free(&nn->dp.tx_rings[r]); + for (r = 0; r < nn->dp.num_r_vecs; r++) nfp_net_cleanup_vector(nn, &nn->r_vecs[r]); - kfree(nn->rx_rings); - kfree(nn->tx_rings); + kfree(nn->dp.rx_rings); + kfree(nn->dp.tx_rings); nfp_net_aux_irq_free(nn, NFP_NET_CFG_LSC, NFP_NET_IRQ_LSC_IDX); nfp_net_aux_irq_free(nn, NFP_NET_CFG_EXN, NFP_NET_IRQ_EXN_IDX); @@ -2401,8 +2369,8 @@ static int nfp_net_netdev_close(struct net_device *netdev) { struct nfp_net *nn = netdev_priv(netdev); - if (!(nn->ctrl & NFP_NET_CFG_CTRL_ENABLE)) { - nn_err(nn, "Dev is not up: 0x%08x\n", nn->ctrl); + if (!(nn->dp.ctrl & NFP_NET_CFG_CTRL_ENABLE)) { + nn_err(nn, "Dev is not up: 0x%08x\n", nn->dp.ctrl); return 0; } @@ -2427,7 +2395,7 @@ static void nfp_net_set_rx_mode(struct net_device *netdev) struct nfp_net *nn = netdev_priv(netdev); u32 new_ctrl; - new_ctrl = nn->ctrl; + new_ctrl = nn->dp.ctrl; if (netdev->flags & IFF_PROMISC) { if (nn->cap & NFP_NET_CFG_CTRL_PROMISC) @@ -2438,13 +2406,13 @@ static void nfp_net_set_rx_mode(struct net_device *netdev) new_ctrl &= ~NFP_NET_CFG_CTRL_PROMISC; } - if (new_ctrl == nn->ctrl) + if (new_ctrl == nn->dp.ctrl) return; nn_writel(nn, NFP_NET_CFG_CTRL, new_ctrl); nfp_net_reconfig_post(nn, NFP_NET_CFG_UPDATE_GEN); - nn->ctrl = new_ctrl; + nn->dp.ctrl = new_ctrl; } static void nfp_net_rss_init_itbl(struct nfp_net *nn) @@ -2453,42 +2421,39 @@ static void nfp_net_rss_init_itbl(struct nfp_net *nn) for (i = 0; i < sizeof(nn->rss_itbl); i++) nn->rss_itbl[i] = - ethtool_rxfh_indir_default(i, nn->num_rx_rings); + ethtool_rxfh_indir_default(i, nn->dp.num_rx_rings); } -static int -nfp_net_ring_swap_enable(struct nfp_net *nn, unsigned int *num_vecs, - unsigned int *stack_tx_rings, - struct bpf_prog **xdp_prog, - struct nfp_net_ring_set *rx, - struct nfp_net_ring_set *tx) +static void nfp_net_dp_swap(struct nfp_net *nn, struct nfp_net_dp *dp) +{ + struct nfp_net_dp new_dp = *dp; + + *dp = nn->dp; + nn->dp = new_dp; + + nn->dp.netdev->mtu = new_dp.mtu; + + if (!netif_is_rxfh_configured(nn->dp.netdev)) + nfp_net_rss_init_itbl(nn); +} + +static int nfp_net_dp_swap_enable(struct nfp_net *nn, struct nfp_net_dp *dp) { unsigned int r; int err; - if (rx) - nfp_net_rx_ring_set_swap(nn, rx); - if (tx) - nfp_net_tx_ring_set_swap(nn, tx); - - swap(*num_vecs, nn->num_r_vecs); - swap(*stack_tx_rings, nn->num_stack_tx_rings); - *xdp_prog = xchg(&nn->xdp_prog, *xdp_prog); + nfp_net_dp_swap(nn, dp); for (r = 0; r < nn->max_r_vecs; r++) - nfp_net_vector_assign_rings(nn, &nn->r_vecs[r], r); + nfp_net_vector_assign_rings(&nn->dp, &nn->r_vecs[r], r); - if (!netif_is_rxfh_configured(nn->netdev)) - nfp_net_rss_init_itbl(nn); - - err = netif_set_real_num_rx_queues(nn->netdev, - nn->num_rx_rings); + err = netif_set_real_num_rx_queues(nn->dp.netdev, nn->dp.num_rx_rings); if (err) return err; - if (nn->netdev->real_num_tx_queues != nn->num_stack_tx_rings) { - err = netif_set_real_num_tx_queues(nn->netdev, - nn->num_stack_tx_rings); + if (nn->dp.netdev->real_num_tx_queues != nn->dp.num_stack_tx_rings) { + err = netif_set_real_num_tx_queues(nn->dp.netdev, + nn->dp.num_stack_tx_rings); if (err) return err; } @@ -2496,18 +2461,36 @@ nfp_net_ring_swap_enable(struct nfp_net *nn, unsigned int *num_vecs, return __nfp_net_set_config_and_enable(nn); } -static int -nfp_net_check_config(struct nfp_net *nn, struct bpf_prog *xdp_prog, - struct nfp_net_ring_set *rx, struct nfp_net_ring_set *tx) +struct nfp_net_dp *nfp_net_clone_dp(struct nfp_net *nn) +{ + struct nfp_net_dp *new; + + new = kmalloc(sizeof(*new), GFP_KERNEL); + if (!new) + return NULL; + + *new = nn->dp; + + /* Clear things which need to be recomputed */ + new->fl_bufsz = 0; + new->tx_rings = NULL; + new->rx_rings = NULL; + new->num_r_vecs = 0; + new->num_stack_tx_rings = 0; + + return new; +} + +static int nfp_net_check_config(struct nfp_net *nn, struct nfp_net_dp *dp) { /* XDP-enabled tests */ - if (!xdp_prog) + if (!dp->xdp_prog) return 0; - if (rx && nfp_net_calc_fl_bufsz(nn, rx->mtu) > PAGE_SIZE) { + if (dp->fl_bufsz > PAGE_SIZE) { nn_warn(nn, "MTU too large w/ XDP enabled\n"); return -EINVAL; } - if (tx && tx->n_rings > nn->max_tx_rings) { + if (dp->num_tx_rings > nn->max_tx_rings) { nn_warn(nn, "Insufficient number of TX rings w/ XDP enabled\n"); return -EINVAL; } @@ -2515,119 +2498,94 @@ nfp_net_check_config(struct nfp_net *nn, struct bpf_prog *xdp_prog, return 0; } -static void -nfp_net_ring_reconfig_down(struct nfp_net *nn, struct bpf_prog **xdp_prog, - struct nfp_net_ring_set *rx, - struct nfp_net_ring_set *tx, - unsigned int stack_tx_rings, unsigned int num_vecs) +int nfp_net_ring_reconfig(struct nfp_net *nn, struct nfp_net_dp *dp) { - nn->netdev->mtu = rx ? rx->mtu : nn->netdev->mtu; - nn->fl_bufsz = nfp_net_calc_fl_bufsz(nn, nn->netdev->mtu); - nn->rxd_cnt = rx ? rx->dcnt : nn->rxd_cnt; - nn->txd_cnt = tx ? tx->dcnt : nn->txd_cnt; - nn->num_rx_rings = rx ? rx->n_rings : nn->num_rx_rings; - nn->num_tx_rings = tx ? tx->n_rings : nn->num_tx_rings; - nn->num_stack_tx_rings = stack_tx_rings; - nn->num_r_vecs = num_vecs; - *xdp_prog = xchg(&nn->xdp_prog, *xdp_prog); + int r, err; - if (!netif_is_rxfh_configured(nn->netdev)) - nfp_net_rss_init_itbl(nn); -} + dp->fl_bufsz = nfp_net_calc_fl_bufsz(dp); -int -nfp_net_ring_reconfig(struct nfp_net *nn, struct bpf_prog **xdp_prog, - struct nfp_net_ring_set *rx, struct nfp_net_ring_set *tx) -{ - unsigned int stack_tx_rings, num_vecs, r; - int err; + dp->num_stack_tx_rings = dp->num_tx_rings; + if (dp->xdp_prog) + dp->num_stack_tx_rings -= dp->num_rx_rings; - stack_tx_rings = tx ? tx->n_rings : nn->num_tx_rings; - if (*xdp_prog) - stack_tx_rings -= rx ? rx->n_rings : nn->num_rx_rings; + dp->num_r_vecs = max(dp->num_rx_rings, dp->num_stack_tx_rings); - num_vecs = max(rx ? rx->n_rings : nn->num_rx_rings, stack_tx_rings); - - err = nfp_net_check_config(nn, *xdp_prog, rx, tx); + err = nfp_net_check_config(nn, dp); if (err) - return err; + goto exit_free_dp; - if (!netif_running(nn->netdev)) { - nfp_net_ring_reconfig_down(nn, xdp_prog, rx, tx, - stack_tx_rings, num_vecs); - return 0; + if (!netif_running(dp->netdev)) { + nfp_net_dp_swap(nn, dp); + err = 0; + goto exit_free_dp; } /* Prepare new rings */ - for (r = nn->num_r_vecs; r < num_vecs; r++) { + for (r = nn->dp.num_r_vecs; r < dp->num_r_vecs; r++) { err = nfp_net_prepare_vector(nn, &nn->r_vecs[r], r); if (err) { - num_vecs = r; + dp->num_r_vecs = r; goto err_cleanup_vecs; } } - if (rx) { - if (!nfp_net_rx_ring_set_prepare(nn, rx, *xdp_prog)) { - err = -ENOMEM; - goto err_cleanup_vecs; - } - } - if (tx) { - if (!nfp_net_tx_ring_set_prepare(nn, tx, stack_tx_rings)) { - err = -ENOMEM; - goto err_free_rx; - } - } + + err = nfp_net_rx_rings_prepare(nn, dp); + if (err) + goto err_cleanup_vecs; + + err = nfp_net_tx_rings_prepare(nn, dp); + if (err) + goto err_free_rx; /* Stop device, swap in new rings, try to start the firmware */ nfp_net_close_stack(nn); nfp_net_clear_config_and_disable(nn); - err = nfp_net_ring_swap_enable(nn, &num_vecs, &stack_tx_rings, - xdp_prog, rx, tx); + err = nfp_net_dp_swap_enable(nn, dp); if (err) { int err2; nfp_net_clear_config_and_disable(nn); /* Try with old configuration and old rings */ - err2 = nfp_net_ring_swap_enable(nn, &num_vecs, &stack_tx_rings, - xdp_prog, rx, tx); + err2 = nfp_net_dp_swap_enable(nn, dp); if (err2) nn_err(nn, "Can't restore ring config - FW communication failed (%d,%d)\n", err, err2); } - for (r = num_vecs - 1; r >= nn->num_r_vecs; r--) + for (r = dp->num_r_vecs - 1; r >= nn->dp.num_r_vecs; r--) nfp_net_cleanup_vector(nn, &nn->r_vecs[r]); - if (rx) - nfp_net_rx_ring_set_free(nn, rx, *xdp_prog); - if (tx) - nfp_net_tx_ring_set_free(nn, tx); + nfp_net_rx_rings_free(dp); + nfp_net_tx_rings_free(dp); nfp_net_open_stack(nn); +exit_free_dp: + kfree(dp); return err; err_free_rx: - if (rx) - nfp_net_rx_ring_set_free(nn, rx, *xdp_prog); + nfp_net_rx_rings_free(dp); err_cleanup_vecs: - for (r = num_vecs - 1; r >= nn->num_r_vecs; r--) + for (r = dp->num_r_vecs - 1; r >= nn->dp.num_r_vecs; r--) nfp_net_cleanup_vector(nn, &nn->r_vecs[r]); + kfree(dp); return err; } static int nfp_net_change_mtu(struct net_device *netdev, int new_mtu) { struct nfp_net *nn = netdev_priv(netdev); - struct nfp_net_ring_set rx = { - .n_rings = nn->num_rx_rings, - .mtu = new_mtu, - .dcnt = nn->rxd_cnt, - }; + struct nfp_net_dp *dp; - return nfp_net_ring_reconfig(nn, &nn->xdp_prog, &rx, NULL); + dp = nfp_net_clone_dp(nn); + if (!dp) + return -ENOMEM; + + dp->mtu = new_mtu; + + return nfp_net_ring_reconfig(nn, dp); } static void nfp_net_stat64(struct net_device *netdev, @@ -2636,7 +2594,7 @@ static void nfp_net_stat64(struct net_device *netdev, struct nfp_net *nn = netdev_priv(netdev); int r; - for (r = 0; r < nn->num_r_vecs; r++) { + for (r = 0; r < nn->dp.num_r_vecs; r++) { struct nfp_net_r_vector *r_vec = &nn->r_vecs[r]; u64 data[3]; unsigned int start; @@ -2683,7 +2641,7 @@ nfp_net_setup_tc(struct net_device *netdev, u32 handle, __be16 proto, return -ENOTSUPP; if (tc->type == TC_SETUP_CLSBPF && nfp_net_ebpf_capable(nn)) { - if (!nn->bpf_offload_xdp) + if (!nn->dp.bpf_offload_xdp) return nfp_net_bpf_offload(nn, tc->cls_bpf); else return -EBUSY; @@ -2702,7 +2660,7 @@ static int nfp_net_set_features(struct net_device *netdev, /* Assume this is not called with features we have not advertised */ - new_ctrl = nn->ctrl; + new_ctrl = nn->dp.ctrl; if (changed & NETIF_F_RXCSUM) { if (features & NETIF_F_RXCSUM) @@ -2746,7 +2704,7 @@ static int nfp_net_set_features(struct net_device *netdev, new_ctrl &= ~NFP_NET_CFG_CTRL_GATHER; } - if (changed & NETIF_F_HW_TC && nn->ctrl & NFP_NET_CFG_CTRL_BPF) { + if (changed & NETIF_F_HW_TC && nn->dp.ctrl & NFP_NET_CFG_CTRL_BPF) { nn_err(nn, "Cannot disable HW TC offload while in use\n"); return -EBUSY; } @@ -2754,16 +2712,16 @@ static int nfp_net_set_features(struct net_device *netdev, nn_dbg(nn, "Feature change 0x%llx -> 0x%llx (changed=0x%llx)\n", netdev->features, features, changed); - if (new_ctrl == nn->ctrl) + if (new_ctrl == nn->dp.ctrl) return 0; - nn_dbg(nn, "NIC ctrl: 0x%x -> 0x%x\n", nn->ctrl, new_ctrl); + nn_dbg(nn, "NIC ctrl: 0x%x -> 0x%x\n", nn->dp.ctrl, new_ctrl); nn_writel(nn, NFP_NET_CFG_CTRL, new_ctrl); err = nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_GEN); if (err) return err; - nn->ctrl = new_ctrl; + nn->dp.ctrl = new_ctrl; return 0; } @@ -2846,7 +2804,7 @@ static void nfp_net_set_vxlan_port(struct nfp_net *nn, int idx, __be16 port) nn->vxlan_ports[idx] = port; - if (!(nn->ctrl & NFP_NET_CFG_CTRL_VXLAN)) + if (!(nn->dp.ctrl & NFP_NET_CFG_CTRL_VXLAN)) return; BUILD_BUG_ON(NFP_NET_N_VXLAN_PORTS & 1); @@ -2925,8 +2883,8 @@ static int nfp_net_xdp_offload(struct nfp_net *nn, struct bpf_prog *prog) if (!nfp_net_ebpf_capable(nn)) return -EINVAL; - if (nn->ctrl & NFP_NET_CFG_CTRL_BPF) { - if (!nn->bpf_offload_xdp) + if (nn->dp.ctrl & NFP_NET_CFG_CTRL_BPF) { + if (!nn->dp.bpf_offload_xdp) return prog ? -EBUSY : 0; cmd.command = prog ? TC_CLSBPF_REPLACE : TC_CLSBPF_DESTROY; } else { @@ -2939,48 +2897,47 @@ static int nfp_net_xdp_offload(struct nfp_net *nn, struct bpf_prog *prog) /* Stop offload if replace not possible */ if (ret && cmd.command == TC_CLSBPF_REPLACE) nfp_net_xdp_offload(nn, NULL); - nn->bpf_offload_xdp = prog && !ret; + nn->dp.bpf_offload_xdp = prog && !ret; return ret; } static int nfp_net_xdp_setup(struct nfp_net *nn, struct bpf_prog *prog) { - struct nfp_net_ring_set rx = { - .n_rings = nn->num_rx_rings, - .mtu = nn->netdev->mtu, - .dcnt = nn->rxd_cnt, - }; - struct nfp_net_ring_set tx = { - .n_rings = nn->num_tx_rings, - .dcnt = nn->txd_cnt, - }; + struct bpf_prog *old_prog = nn->dp.xdp_prog; + struct nfp_net_dp *dp; int err; - if (prog && prog->xdp_adjust_head) { - nn_err(nn, "Does not support bpf_xdp_adjust_head()\n"); - return -EOPNOTSUPP; - } - if (!prog && !nn->xdp_prog) + if (!prog && !nn->dp.xdp_prog) return 0; - if (prog && nn->xdp_prog) { - prog = xchg(&nn->xdp_prog, prog); + if (prog && nn->dp.xdp_prog) { + prog = xchg(&nn->dp.xdp_prog, prog); bpf_prog_put(prog); - nfp_net_xdp_offload(nn, nn->xdp_prog); + nfp_net_xdp_offload(nn, nn->dp.xdp_prog); return 0; } - tx.n_rings += prog ? nn->num_rx_rings : -nn->num_rx_rings; + dp = nfp_net_clone_dp(nn); + if (!dp) + return -ENOMEM; + + dp->xdp_prog = prog; + dp->num_tx_rings += prog ? nn->dp.num_rx_rings : -nn->dp.num_rx_rings; + dp->rx_dma_dir = prog ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE; + if (prog) + dp->rx_dma_off = XDP_PACKET_HEADROOM - + (nn->dp.rx_offset ?: NFP_NET_MAX_PREPEND); + else + dp->rx_dma_off = 0; /* We need RX reconfig to remap the buffers (BIDIR vs FROM_DEV) */ - err = nfp_net_ring_reconfig(nn, &prog, &rx, &tx); + err = nfp_net_ring_reconfig(nn, dp); if (err) return err; - /* @prog got swapped and is now the old one */ - if (prog) - bpf_prog_put(prog); + if (old_prog) + bpf_prog_put(old_prog); - nfp_net_xdp_offload(nn, nn->xdp_prog); + nfp_net_xdp_offload(nn, nn->dp.xdp_prog); return 0; } @@ -2993,7 +2950,7 @@ static int nfp_net_xdp(struct net_device *netdev, struct netdev_xdp *xdp) case XDP_SETUP_PROG: return nfp_net_xdp_setup(nn, xdp->prog); case XDP_QUERY_PROG: - xdp->prog_attached = !!nn->xdp_prog; + xdp->prog_attached = !!nn->dp.xdp_prog; return 0; default: return -EINVAL; @@ -3025,9 +2982,9 @@ static const struct net_device_ops nfp_net_netdev_ops = { void nfp_net_info(struct nfp_net *nn) { nn_info(nn, "Netronome NFP-6xxx %sNetdev: TxQs=%d/%d RxQs=%d/%d\n", - nn->is_vf ? "VF " : "", - nn->num_tx_rings, nn->max_tx_rings, - nn->num_rx_rings, nn->max_rx_rings); + nn->dp.is_vf ? "VF " : "", + nn->dp.num_tx_rings, nn->max_tx_rings, + nn->dp.num_rx_rings, nn->max_rx_rings); nn_info(nn, "VER: %d.%d.%d.%d, Maximum supported MTU: %d\n", nn->fw_ver.resv, nn->fw_ver.class, nn->fw_ver.major, nn->fw_ver.minor, @@ -3079,22 +3036,24 @@ struct nfp_net *nfp_net_netdev_alloc(struct pci_dev *pdev, SET_NETDEV_DEV(netdev, &pdev->dev); nn = netdev_priv(netdev); - nn->netdev = netdev; - nn->dev = &pdev->dev; + nn->dp.netdev = netdev; + nn->dp.dev = &pdev->dev; nn->pdev = pdev; nn->max_tx_rings = max_tx_rings; nn->max_rx_rings = max_rx_rings; - nn->num_tx_rings = min_t(unsigned int, max_tx_rings, num_online_cpus()); - nn->num_rx_rings = min_t(unsigned int, max_rx_rings, + nn->dp.num_tx_rings = min_t(unsigned int, + max_tx_rings, num_online_cpus()); + nn->dp.num_rx_rings = min_t(unsigned int, max_rx_rings, netif_get_num_default_rss_queues()); - nn->num_r_vecs = max(nn->num_tx_rings, nn->num_rx_rings); - nn->num_r_vecs = min_t(unsigned int, nn->num_r_vecs, num_online_cpus()); + nn->dp.num_r_vecs = max(nn->dp.num_tx_rings, nn->dp.num_rx_rings); + nn->dp.num_r_vecs = min_t(unsigned int, + nn->dp.num_r_vecs, num_online_cpus()); - nn->txd_cnt = NFP_NET_TX_DESCS_DEFAULT; - nn->rxd_cnt = NFP_NET_RX_DESCS_DEFAULT; + nn->dp.txd_cnt = NFP_NET_TX_DESCS_DEFAULT; + nn->dp.rxd_cnt = NFP_NET_RX_DESCS_DEFAULT; spin_lock_init(&nn->reconfig_lock); spin_lock_init(&nn->rx_filter_lock); @@ -3114,7 +3073,7 @@ struct nfp_net *nfp_net_netdev_alloc(struct pci_dev *pdev, */ void nfp_net_netdev_free(struct nfp_net *nn) { - free_netdev(nn->netdev); + free_netdev(nn->dp.netdev); } /** @@ -3156,7 +3115,7 @@ static void nfp_net_rss_init(struct nfp_net *nn) func_bit = find_first_bit(&rss_cap_hfunc, NFP_NET_CFG_RSS_HFUNCS); if (func_bit == NFP_NET_CFG_RSS_HFUNCS) { - dev_warn(nn->dev, + dev_warn(nn->dp.dev, "Bad RSS config, defaulting to Toeplitz hash\n"); func_bit = ETH_RSS_HASH_TOP_BIT; } @@ -3196,7 +3155,16 @@ int nfp_net_netdev_init(struct net_device *netdev) struct nfp_net *nn = netdev_priv(netdev); int err; - nn->chained_metadata_format = nn->fw_ver.major > 3; + /* XDP calls for 256 byte packet headroom which wouldn't fit in a u8. + * We, however, reuse the metadata prepend space for XDP buffers which + * is at least 1 byte long and as long as XDP headroom doesn't increase + * above 256 the *extra* XDP headroom will fit on 8 bits. + */ + BUILD_BUG_ON(XDP_PACKET_HEADROOM > 256); + + nn->dp.chained_metadata_format = nn->fw_ver.major > 3; + + nn->dp.rx_dma_dir = DMA_FROM_DEVICE; /* Get some of the read-only fields from the BAR */ nn->cap = nn_readl(nn, NFP_NET_CFG_CAP); @@ -3205,17 +3173,26 @@ int nfp_net_netdev_init(struct net_device *netdev) nfp_net_write_mac_addr(nn); /* Determine RX packet/metadata boundary offset */ - if (nn->fw_ver.major >= 2) - nn->rx_offset = nn_readl(nn, NFP_NET_CFG_RX_OFFSET); - else - nn->rx_offset = NFP_NET_RX_OFFSET; + if (nn->fw_ver.major >= 2) { + u32 reg; + + reg = nn_readl(nn, NFP_NET_CFG_RX_OFFSET); + if (reg > NFP_NET_MAX_PREPEND) { + nn_err(nn, "Invalid rx offset: %d\n", reg); + return -EINVAL; + } + nn->dp.rx_offset = reg; + } else { + nn->dp.rx_offset = NFP_NET_RX_OFFSET; + } /* Set default MTU and Freelist buffer size */ if (nn->max_mtu < NFP_NET_DEFAULT_MTU) netdev->mtu = nn->max_mtu; else netdev->mtu = NFP_NET_DEFAULT_MTU; - nn->fl_bufsz = nfp_net_calc_fl_bufsz(nn, netdev->mtu); + nn->dp.mtu = netdev->mtu; + nn->dp.fl_bufsz = nfp_net_calc_fl_bufsz(&nn->dp); /* Advertise/enable offloads based on capabilities * @@ -3226,31 +3203,31 @@ int nfp_net_netdev_init(struct net_device *netdev) netdev->hw_features = NETIF_F_HIGHDMA; if (nn->cap & NFP_NET_CFG_CTRL_RXCSUM) { netdev->hw_features |= NETIF_F_RXCSUM; - nn->ctrl |= NFP_NET_CFG_CTRL_RXCSUM; + nn->dp.ctrl |= NFP_NET_CFG_CTRL_RXCSUM; } if (nn->cap & NFP_NET_CFG_CTRL_TXCSUM) { netdev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM; - nn->ctrl |= NFP_NET_CFG_CTRL_TXCSUM; + nn->dp.ctrl |= NFP_NET_CFG_CTRL_TXCSUM; } if (nn->cap & NFP_NET_CFG_CTRL_GATHER) { netdev->hw_features |= NETIF_F_SG; - nn->ctrl |= NFP_NET_CFG_CTRL_GATHER; + nn->dp.ctrl |= NFP_NET_CFG_CTRL_GATHER; } if ((nn->cap & NFP_NET_CFG_CTRL_LSO) && nn->fw_ver.major > 2) { netdev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6; - nn->ctrl |= NFP_NET_CFG_CTRL_LSO; + nn->dp.ctrl |= NFP_NET_CFG_CTRL_LSO; } if (nn->cap & NFP_NET_CFG_CTRL_RSS) { netdev->hw_features |= NETIF_F_RXHASH; nfp_net_rss_init(nn); - nn->ctrl |= NFP_NET_CFG_CTRL_RSS; + nn->dp.ctrl |= NFP_NET_CFG_CTRL_RSS; } if (nn->cap & NFP_NET_CFG_CTRL_VXLAN && nn->cap & NFP_NET_CFG_CTRL_NVGRE) { if (nn->cap & NFP_NET_CFG_CTRL_LSO) netdev->hw_features |= NETIF_F_GSO_GRE | NETIF_F_GSO_UDP_TUNNEL; - nn->ctrl |= NFP_NET_CFG_CTRL_VXLAN | NFP_NET_CFG_CTRL_NVGRE; + nn->dp.ctrl |= NFP_NET_CFG_CTRL_VXLAN | NFP_NET_CFG_CTRL_NVGRE; netdev->hw_enc_features = netdev->hw_features; } @@ -3259,11 +3236,11 @@ int nfp_net_netdev_init(struct net_device *netdev) if (nn->cap & NFP_NET_CFG_CTRL_RXVLAN) { netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX; - nn->ctrl |= NFP_NET_CFG_CTRL_RXVLAN; + nn->dp.ctrl |= NFP_NET_CFG_CTRL_RXVLAN; } if (nn->cap & NFP_NET_CFG_CTRL_TXVLAN) { netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX; - nn->ctrl |= NFP_NET_CFG_CTRL_TXVLAN; + nn->dp.ctrl |= NFP_NET_CFG_CTRL_TXVLAN; } netdev->features = netdev->hw_features; @@ -3276,14 +3253,14 @@ int nfp_net_netdev_init(struct net_device *netdev) /* Allow L2 Broadcast and Multicast through by default, if supported */ if (nn->cap & NFP_NET_CFG_CTRL_L2BC) - nn->ctrl |= NFP_NET_CFG_CTRL_L2BC; + nn->dp.ctrl |= NFP_NET_CFG_CTRL_L2BC; if (nn->cap & NFP_NET_CFG_CTRL_L2MC) - nn->ctrl |= NFP_NET_CFG_CTRL_L2MC; + nn->dp.ctrl |= NFP_NET_CFG_CTRL_L2MC; /* Allow IRQ moderation, if supported */ if (nn->cap & NFP_NET_CFG_CTRL_IRQMOD) { nfp_net_irqmod_init(nn); - nn->ctrl |= NFP_NET_CFG_CTRL_IRQMOD; + nn->dp.ctrl |= NFP_NET_CFG_CTRL_IRQMOD; } /* Stash the re-configuration queue away. First odd queue in TX Bar */ @@ -3322,9 +3299,9 @@ void nfp_net_netdev_clean(struct net_device *netdev) { struct nfp_net *nn = netdev_priv(netdev); - if (nn->xdp_prog) - bpf_prog_put(nn->xdp_prog); - if (nn->bpf_offload_xdp) + if (nn->dp.xdp_prog) + bpf_prog_put(nn->dp.xdp_prog); + if (nn->dp.bpf_offload_xdp) nfp_net_xdp_offload(nn, NULL); - unregister_netdev(nn->netdev); + unregister_netdev(nn->dp.netdev); } diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_debugfs.c b/drivers/net/ethernet/netronome/nfp/nfp_net_debugfs.c index edfa59e51fdd..74125584260b 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_debugfs.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_debugfs.c @@ -54,7 +54,7 @@ static int nfp_net_debugfs_rx_q_read(struct seq_file *file, void *data) goto out; nn = r_vec->nfp_net; rx_ring = r_vec->rx_ring; - if (!netif_running(nn->netdev)) + if (!netif_running(nn->dp.netdev)) goto out; rxd_cnt = rx_ring->cnt; @@ -145,7 +145,7 @@ static int nfp_net_debugfs_tx_q_read(struct seq_file *file, void *data) if (!r_vec->nfp_net || !tx_ring) goto out; nn = r_vec->nfp_net; - if (!netif_running(nn->netdev)) + if (!netif_running(nn->dp.netdev)) goto out; txd_cnt = tx_ring->cnt; diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c index a1bca2dca0a5..ed22a813e579 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c @@ -127,9 +127,9 @@ static const struct _nfp_net_et_stats nfp_net_et_stats[] = { }; #define NN_ET_GLOBAL_STATS_LEN ARRAY_SIZE(nfp_net_et_stats) -#define NN_ET_RVEC_STATS_LEN (nn->num_r_vecs * 3) +#define NN_ET_RVEC_STATS_LEN (nn->dp.num_r_vecs * 3) #define NN_ET_RVEC_GATHER_STATS 7 -#define NN_ET_QUEUE_STATS_LEN ((nn->num_tx_rings + nn->num_rx_rings) * 2) +#define NN_ET_QUEUE_STATS_LEN ((nn->dp.num_tx_rings + nn->dp.num_rx_rings) * 2) #define NN_ET_STATS_LEN (NN_ET_GLOBAL_STATS_LEN + NN_ET_RVEC_GATHER_STATS + \ NN_ET_RVEC_STATS_LEN + NN_ET_QUEUE_STATS_LEN) @@ -180,30 +180,22 @@ static void nfp_net_get_ringparam(struct net_device *netdev, ring->rx_max_pending = NFP_NET_MAX_RX_DESCS; ring->tx_max_pending = NFP_NET_MAX_TX_DESCS; - ring->rx_pending = nn->rxd_cnt; - ring->tx_pending = nn->txd_cnt; + ring->rx_pending = nn->dp.rxd_cnt; + ring->tx_pending = nn->dp.txd_cnt; } static int nfp_net_set_ring_size(struct nfp_net *nn, u32 rxd_cnt, u32 txd_cnt) { - struct nfp_net_ring_set *reconfig_rx = NULL, *reconfig_tx = NULL; - struct nfp_net_ring_set rx = { - .n_rings = nn->num_rx_rings, - .mtu = nn->netdev->mtu, - .dcnt = rxd_cnt, - }; - struct nfp_net_ring_set tx = { - .n_rings = nn->num_tx_rings, - .dcnt = txd_cnt, - }; + struct nfp_net_dp *dp; - if (nn->rxd_cnt != rxd_cnt) - reconfig_rx = ℞ - if (nn->txd_cnt != txd_cnt) - reconfig_tx = &tx; + dp = nfp_net_clone_dp(nn); + if (!dp) + return -ENOMEM; - return nfp_net_ring_reconfig(nn, &nn->xdp_prog, - reconfig_rx, reconfig_tx); + dp->rxd_cnt = rxd_cnt; + dp->txd_cnt = txd_cnt; + + return nfp_net_ring_reconfig(nn, dp); } static int nfp_net_set_ringparam(struct net_device *netdev, @@ -224,11 +216,11 @@ static int nfp_net_set_ringparam(struct net_device *netdev, txd_cnt < NFP_NET_MIN_TX_DESCS || txd_cnt > NFP_NET_MAX_TX_DESCS) return -EINVAL; - if (nn->rxd_cnt == rxd_cnt && nn->txd_cnt == txd_cnt) + if (nn->dp.rxd_cnt == rxd_cnt && nn->dp.txd_cnt == txd_cnt) return 0; nn_dbg(nn, "Change ring size: RxQ %u->%u, TxQ %u->%u\n", - nn->rxd_cnt, rxd_cnt, nn->txd_cnt, txd_cnt); + nn->dp.rxd_cnt, rxd_cnt, nn->dp.txd_cnt, txd_cnt); return nfp_net_set_ring_size(nn, rxd_cnt, txd_cnt); } @@ -246,7 +238,7 @@ static void nfp_net_get_strings(struct net_device *netdev, memcpy(p, nfp_net_et_stats[i].name, ETH_GSTRING_LEN); p += ETH_GSTRING_LEN; } - for (i = 0; i < nn->num_r_vecs; i++) { + for (i = 0; i < nn->dp.num_r_vecs; i++) { sprintf(p, "rvec_%u_rx_pkts", i); p += ETH_GSTRING_LEN; sprintf(p, "rvec_%u_tx_pkts", i); @@ -268,13 +260,13 @@ static void nfp_net_get_strings(struct net_device *netdev, p += ETH_GSTRING_LEN; strncpy(p, "tx_lso", ETH_GSTRING_LEN); p += ETH_GSTRING_LEN; - for (i = 0; i < nn->num_tx_rings; i++) { + for (i = 0; i < nn->dp.num_tx_rings; i++) { sprintf(p, "txq_%u_pkts", i); p += ETH_GSTRING_LEN; sprintf(p, "txq_%u_bytes", i); p += ETH_GSTRING_LEN; } - for (i = 0; i < nn->num_rx_rings; i++) { + for (i = 0; i < nn->dp.num_rx_rings; i++) { sprintf(p, "rxq_%u_pkts", i); p += ETH_GSTRING_LEN; sprintf(p, "rxq_%u_bytes", i); @@ -307,12 +299,12 @@ static void nfp_net_get_stats(struct net_device *netdev, break; case NFP_NET_DEV_ET_STATS: - io_p = nn->ctrl_bar + nfp_net_et_stats[i].off; + io_p = nn->dp.ctrl_bar + nfp_net_et_stats[i].off; data[i] = readq(io_p); break; } } - for (j = 0; j < nn->num_r_vecs; j++) { + for (j = 0; j < nn->dp.num_r_vecs; j++) { unsigned int start; do { @@ -338,16 +330,16 @@ static void nfp_net_get_stats(struct net_device *netdev, } for (j = 0; j < NN_ET_RVEC_GATHER_STATS; j++) data[i++] = gathered_stats[j]; - for (j = 0; j < nn->num_tx_rings; j++) { - io_p = nn->ctrl_bar + NFP_NET_CFG_TXR_STATS(j); + for (j = 0; j < nn->dp.num_tx_rings; j++) { + io_p = nn->dp.ctrl_bar + NFP_NET_CFG_TXR_STATS(j); data[i++] = readq(io_p); - io_p = nn->ctrl_bar + NFP_NET_CFG_TXR_STATS(j) + 8; + io_p = nn->dp.ctrl_bar + NFP_NET_CFG_TXR_STATS(j) + 8; data[i++] = readq(io_p); } - for (j = 0; j < nn->num_rx_rings; j++) { - io_p = nn->ctrl_bar + NFP_NET_CFG_RXR_STATS(j); + for (j = 0; j < nn->dp.num_rx_rings; j++) { + io_p = nn->dp.ctrl_bar + NFP_NET_CFG_RXR_STATS(j); data[i++] = readq(io_p); - io_p = nn->ctrl_bar + NFP_NET_CFG_RXR_STATS(j) + 8; + io_p = nn->dp.ctrl_bar + NFP_NET_CFG_RXR_STATS(j) + 8; data[i++] = readq(io_p); } } @@ -411,7 +403,7 @@ static int nfp_net_get_rxnfc(struct net_device *netdev, switch (cmd->cmd) { case ETHTOOL_GRXRINGS: - cmd->data = nn->num_rx_rings; + cmd->data = nn->dp.num_rx_rings; return 0; case ETHTOOL_GRXFH: return nfp_net_get_rss_hash_opts(nn, cmd); @@ -461,7 +453,7 @@ static int nfp_net_set_rss_hash_opt(struct nfp_net *nn, if (new_rss_cfg == nn->rss_cfg) return 0; - writel(new_rss_cfg, nn->ctrl_bar + NFP_NET_CFG_RSS_CTRL); + writel(new_rss_cfg, nn->dp.ctrl_bar + NFP_NET_CFG_RSS_CTRL); err = nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_RSS); if (err) return err; @@ -573,7 +565,7 @@ static void nfp_net_get_regs(struct net_device *netdev, regs->version = nn_readl(nn, NFP_NET_CFG_VERSION); for (i = 0; i < NFP_NET_CFG_BAR_SZ / sizeof(u32); i++) - regs_buf[i] = readl(nn->ctrl_bar + (i * sizeof(u32))); + regs_buf[i] = readl(nn->dp.ctrl_bar + (i * sizeof(u32))); } static int nfp_net_get_coalesce(struct net_device *netdev, @@ -745,16 +737,16 @@ static void nfp_net_get_channels(struct net_device *netdev, struct nfp_net *nn = netdev_priv(netdev); unsigned int num_tx_rings; - num_tx_rings = nn->num_tx_rings; - if (nn->xdp_prog) - num_tx_rings -= nn->num_rx_rings; + num_tx_rings = nn->dp.num_tx_rings; + if (nn->dp.xdp_prog) + num_tx_rings -= nn->dp.num_rx_rings; channel->max_rx = min(nn->max_rx_rings, nn->max_r_vecs); channel->max_tx = min(nn->max_tx_rings, nn->max_r_vecs); channel->max_combined = min(channel->max_rx, channel->max_tx); channel->max_other = NFP_NET_NON_Q_VECTORS; - channel->combined_count = min(nn->num_rx_rings, num_tx_rings); - channel->rx_count = nn->num_rx_rings - channel->combined_count; + channel->combined_count = min(nn->dp.num_rx_rings, num_tx_rings); + channel->rx_count = nn->dp.num_rx_rings - channel->combined_count; channel->tx_count = num_tx_rings - channel->combined_count; channel->other_count = NFP_NET_NON_Q_VECTORS; } @@ -762,29 +754,19 @@ static void nfp_net_get_channels(struct net_device *netdev, static int nfp_net_set_num_rings(struct nfp_net *nn, unsigned int total_rx, unsigned int total_tx) { - struct nfp_net_ring_set *reconfig_rx = NULL, *reconfig_tx = NULL; - struct nfp_net_ring_set rx = { - .n_rings = total_rx, - .mtu = nn->netdev->mtu, - .dcnt = nn->rxd_cnt, - }; - struct nfp_net_ring_set tx = { - .n_rings = total_tx, - .dcnt = nn->txd_cnt, - }; + struct nfp_net_dp *dp; - if (nn->num_rx_rings != total_rx) - reconfig_rx = ℞ - if (nn->num_stack_tx_rings != total_tx || - (nn->xdp_prog && reconfig_rx)) - reconfig_tx = &tx; + dp = nfp_net_clone_dp(nn); + if (!dp) + return -ENOMEM; - /* nfp_net_check_config() will catch tx.n_rings > nn->max_tx_rings */ - if (nn->xdp_prog) - tx.n_rings += total_rx; + dp->num_rx_rings = total_rx; + dp->num_tx_rings = total_tx; + /* nfp_net_check_config() will catch num_tx_rings > nn->max_tx_rings */ + if (dp->xdp_prog) + dp->num_tx_rings += total_rx; - return nfp_net_ring_reconfig(nn, &nn->xdp_prog, - reconfig_rx, reconfig_tx); + return nfp_net_ring_reconfig(nn, dp); } static int nfp_net_set_channels(struct net_device *netdev, diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_main.c b/drivers/net/ethernet/netronome/nfp/nfp_net_main.c index 8a9b3f3b95a8..2025cb7c6d90 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_main.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_main.c @@ -130,7 +130,7 @@ err_area: } static void -nfp_net_get_mac_addr_hwinfo(struct nfp_net *nn, struct nfp_cpp *cpp, +nfp_net_get_mac_addr_hwinfo(struct nfp_net_dp *dp, struct nfp_cpp *cpp, unsigned int id) { u8 mac_addr[ETH_ALEN]; @@ -141,22 +141,22 @@ nfp_net_get_mac_addr_hwinfo(struct nfp_net *nn, struct nfp_cpp *cpp, mac_str = nfp_hwinfo_lookup(cpp, name); if (!mac_str) { - dev_warn(nn->dev, "Can't lookup MAC address. Generate\n"); - eth_hw_addr_random(nn->netdev); + dev_warn(dp->dev, "Can't lookup MAC address. Generate\n"); + eth_hw_addr_random(dp->netdev); return; } if (sscanf(mac_str, "%02hhx:%02hhx:%02hhx:%02hhx:%02hhx:%02hhx", &mac_addr[0], &mac_addr[1], &mac_addr[2], &mac_addr[3], &mac_addr[4], &mac_addr[5]) != 6) { - dev_warn(nn->dev, + dev_warn(dp->dev, "Can't parse MAC address (%s). Generate.\n", mac_str); - eth_hw_addr_random(nn->netdev); + eth_hw_addr_random(dp->netdev); return; } - ether_addr_copy(nn->netdev->dev_addr, mac_addr); - ether_addr_copy(nn->netdev->perm_addr, mac_addr); + ether_addr_copy(dp->netdev->dev_addr, mac_addr); + ether_addr_copy(dp->netdev->perm_addr, mac_addr); } /** @@ -179,12 +179,12 @@ nfp_net_get_mac_addr(struct nfp_net *nn, struct nfp_pf *pf, unsigned int id) nn->eth_port = &pf->eth_tbl->ports[i]; - ether_addr_copy(nn->netdev->dev_addr, mac_addr); - ether_addr_copy(nn->netdev->perm_addr, mac_addr); + ether_addr_copy(nn->dp.netdev->dev_addr, mac_addr); + ether_addr_copy(nn->dp.netdev->perm_addr, mac_addr); return; } - nfp_net_get_mac_addr_hwinfo(nn, pf->cpp, id); + nfp_net_get_mac_addr_hwinfo(&nn->dp, pf->cpp, id); } static unsigned int nfp_net_pf_get_num_ports(struct nfp_pf *pf) @@ -306,10 +306,10 @@ nfp_net_pf_alloc_port_netdev(struct nfp_pf *pf, void __iomem *ctrl_bar, nn->cpp = pf->cpp; nn->fw_ver = *fw_ver; - nn->ctrl_bar = ctrl_bar; + nn->dp.ctrl_bar = ctrl_bar; nn->tx_bar = tx_bar; nn->rx_bar = rx_bar; - nn->is_vf = 0; + nn->dp.is_vf = 0; nn->stride_rx = stride; nn->stride_tx = stride; @@ -331,7 +331,7 @@ nfp_net_pf_init_port_netdev(struct nfp_pf *pf, struct nfp_net *nn, */ nn->me_freq_mhz = 1200; - err = nfp_net_netdev_init(nn->netdev); + err = nfp_net_netdev_init(nn->dp.netdev); if (err) return err; @@ -400,7 +400,7 @@ nfp_net_pf_spawn_netdevs(struct nfp_pf *pf, /* Get MSI-X vectors */ wanted_irqs = 0; list_for_each_entry(nn, &pf->ports, port_list) - wanted_irqs += NFP_NET_NON_Q_VECTORS + nn->num_r_vecs; + wanted_irqs += NFP_NET_NON_Q_VECTORS + nn->dp.num_r_vecs; pf->irq_entries = kcalloc(wanted_irqs, sizeof(*pf->irq_entries), GFP_KERNEL); if (!pf->irq_entries) { @@ -445,7 +445,7 @@ nfp_net_pf_spawn_netdevs(struct nfp_pf *pf, err_prev_deinit: list_for_each_entry_continue_reverse(nn, &pf->ports, port_list) { nfp_net_debugfs_dir_clean(&nn->debugfs_dir); - nfp_net_netdev_clean(nn->netdev); + nfp_net_netdev_clean(nn->dp.netdev); } nfp_net_irqs_disable(pf->pdev); err_vec_free: @@ -571,7 +571,7 @@ void nfp_net_pci_remove(struct nfp_pf *pf) list_for_each_entry(nn, &pf->ports, port_list) { nfp_net_debugfs_dir_clean(&nn->debugfs_dir); - nfp_net_netdev_clean(nn->netdev); + nfp_net_netdev_clean(nn->dp.netdev); } nfp_net_pf_free_netdevs(pf); diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_offload.c b/drivers/net/ethernet/netronome/nfp/nfp_net_offload.c index f6ed1aa9d94b..b5b6f69d1e0f 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_offload.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_offload.c @@ -58,7 +58,7 @@ void nfp_net_filter_stats_timer(unsigned long data) spin_lock_bh(&nn->rx_filter_lock); - if (nn->ctrl & NFP_NET_CFG_CTRL_BPF) + if (nn->dp.ctrl & NFP_NET_CFG_CTRL_BPF) mod_timer(&nn->rx_filter_stats_timer, jiffies + NFP_NET_STAT_POLL_IVL); @@ -132,7 +132,7 @@ nfp_net_bpf_get_act(struct nfp_net *nn, struct tc_cls_bpf_offload *cls_bpf) return NN_ACT_TC_DROP; if (is_tcf_mirred_egress_redirect(a) && - tcf_mirred_ifindex(a) == nn->netdev->ifindex) + tcf_mirred_ifindex(a) == nn->dp.netdev->ifindex) return NN_ACT_TC_REDIR; } @@ -160,7 +160,7 @@ nfp_net_bpf_offload_prepare(struct nfp_net *nn, act = ret; max_mtu = nn_readb(nn, NFP_NET_CFG_BPF_INL_MTU) * 64 - 32; - if (max_mtu < nn->netdev->mtu) { + if (max_mtu < nn->dp.netdev->mtu) { nn_info(nn, "BPF offload not supported with MTU larger than HW packet split boundary\n"); return -ENOTSUPP; } @@ -168,7 +168,7 @@ nfp_net_bpf_offload_prepare(struct nfp_net *nn, start_off = nn_readw(nn, NFP_NET_CFG_BPF_START); done_off = nn_readw(nn, NFP_NET_CFG_BPF_DONE); - *code = dma_zalloc_coherent(nn->dev, code_sz, dma_addr, GFP_KERNEL); + *code = dma_zalloc_coherent(nn->dp.dev, code_sz, dma_addr, GFP_KERNEL); if (!*code) return -ENOMEM; @@ -180,7 +180,7 @@ nfp_net_bpf_offload_prepare(struct nfp_net *nn, return 0; out: - dma_free_coherent(nn->dev, code_sz, *code, *dma_addr); + dma_free_coherent(nn->dp.dev, code_sz, *code, *dma_addr); return ret; } @@ -193,7 +193,7 @@ nfp_net_bpf_load_and_start(struct nfp_net *nn, u32 tc_flags, u64 bpf_addr = dma_addr; int err; - nn->bpf_offload_skip_sw = !!(tc_flags & TCA_CLS_FLAGS_SKIP_SW); + nn->dp.bpf_offload_skip_sw = !!(tc_flags & TCA_CLS_FLAGS_SKIP_SW); if (dense_mode) bpf_addr |= NFP_NET_CFG_BPF_CFG_8CTX; @@ -207,13 +207,13 @@ nfp_net_bpf_load_and_start(struct nfp_net *nn, u32 tc_flags, nn_err(nn, "FW command error while loading BPF: %d\n", err); /* Enable passing packets through BPF function */ - nn->ctrl |= NFP_NET_CFG_CTRL_BPF; - nn_writel(nn, NFP_NET_CFG_CTRL, nn->ctrl); + nn->dp.ctrl |= NFP_NET_CFG_CTRL_BPF; + nn_writel(nn, NFP_NET_CFG_CTRL, nn->dp.ctrl); err = nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_GEN); if (err) nn_err(nn, "FW command error while enabling BPF: %d\n", err); - dma_free_coherent(nn->dev, code_sz, code, dma_addr); + dma_free_coherent(nn->dp.dev, code_sz, code, dma_addr); nfp_net_bpf_stats_reset(nn); mod_timer(&nn->rx_filter_stats_timer, jiffies + NFP_NET_STAT_POLL_IVL); @@ -221,16 +221,16 @@ nfp_net_bpf_load_and_start(struct nfp_net *nn, u32 tc_flags, static int nfp_net_bpf_stop(struct nfp_net *nn) { - if (!(nn->ctrl & NFP_NET_CFG_CTRL_BPF)) + if (!(nn->dp.ctrl & NFP_NET_CFG_CTRL_BPF)) return 0; spin_lock_bh(&nn->rx_filter_lock); - nn->ctrl &= ~NFP_NET_CFG_CTRL_BPF; + nn->dp.ctrl &= ~NFP_NET_CFG_CTRL_BPF; spin_unlock_bh(&nn->rx_filter_lock); - nn_writel(nn, NFP_NET_CFG_CTRL, nn->ctrl); + nn_writel(nn, NFP_NET_CFG_CTRL, nn->dp.ctrl); del_timer_sync(&nn->rx_filter_stats_timer); - nn->bpf_offload_skip_sw = 0; + nn->dp.bpf_offload_skip_sw = 0; return nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_GEN); } @@ -254,7 +254,7 @@ int nfp_net_bpf_offload(struct nfp_net *nn, struct tc_cls_bpf_offload *cls_bpf) * frames which didn't have BPF applied in the hardware should * be fine if software fallback is available, though. */ - if (nn->bpf_offload_skip_sw) + if (nn->dp.bpf_offload_skip_sw) return -EBUSY; err = nfp_net_bpf_offload_prepare(nn, cls_bpf, &res, &code, @@ -269,7 +269,7 @@ int nfp_net_bpf_offload(struct nfp_net *nn, struct tc_cls_bpf_offload *cls_bpf) return 0; case TC_CLSBPF_ADD: - if (nn->ctrl & NFP_NET_CFG_CTRL_BPF) + if (nn->dp.ctrl & NFP_NET_CFG_CTRL_BPF) return -EBUSY; err = nfp_net_bpf_offload_prepare(nn, cls_bpf, &res, &code, diff --git a/drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c b/drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c index 39407f7cc586..86e61be6f35c 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c @@ -84,12 +84,12 @@ static void nfp_netvf_get_mac_addr(struct nfp_net *nn) put_unaligned_be16(nn_readw(nn, NFP_NET_CFG_MACADDR + 6), &mac_addr[4]); if (!is_valid_ether_addr(mac_addr)) { - eth_hw_addr_random(nn->netdev); + eth_hw_addr_random(nn->dp.netdev); return; } - ether_addr_copy(nn->netdev->dev_addr, mac_addr); - ether_addr_copy(nn->netdev->perm_addr, mac_addr); + ether_addr_copy(nn->dp.netdev->dev_addr, mac_addr); + ether_addr_copy(nn->dp.netdev->perm_addr, mac_addr); } static int nfp_netvf_pci_probe(struct pci_dev *pdev, @@ -210,8 +210,8 @@ static int nfp_netvf_pci_probe(struct pci_dev *pdev, vf->nn = nn; nn->fw_ver = fw_ver; - nn->ctrl_bar = ctrl_bar; - nn->is_vf = 1; + nn->dp.ctrl_bar = ctrl_bar; + nn->dp.is_vf = 1; nn->stride_tx = stride; nn->stride_rx = stride; @@ -268,7 +268,8 @@ static int nfp_netvf_pci_probe(struct pci_dev *pdev, num_irqs = nfp_net_irqs_alloc(pdev, vf->irq_entries, NFP_NET_MIN_PORT_IRQS, - NFP_NET_NON_Q_VECTORS + nn->num_r_vecs); + NFP_NET_NON_Q_VECTORS + + nn->dp.num_r_vecs); if (!num_irqs) { nn_warn(nn, "Unable to allocate MSI-X Vectors. Exiting\n"); err = -EIO; @@ -282,7 +283,7 @@ static int nfp_netvf_pci_probe(struct pci_dev *pdev, */ nn->me_freq_mhz = 1200; - err = nfp_net_netdev_init(nn->netdev); + err = nfp_net_netdev_init(nn->dp.netdev); if (err) goto err_irqs_disable; @@ -327,7 +328,7 @@ static void nfp_netvf_pci_remove(struct pci_dev *pdev) nfp_net_debugfs_dir_clean(&nn->debugfs_dir); nfp_net_debugfs_dir_clean(&vf->ddir); - nfp_net_netdev_clean(nn->netdev); + nfp_net_netdev_clean(nn->dp.netdev); nfp_net_irqs_disable(pdev); @@ -337,7 +338,7 @@ static void nfp_netvf_pci_remove(struct pci_dev *pdev) } else { iounmap(vf->q_bar); } - iounmap(nn->ctrl_bar); + iounmap(nn->dp.ctrl_bar); nfp_net_netdev_free(nn);