i40e: add support for XDP_REDIRECT
The driver now acts upon the XDP_REDIRECT return action. Two new ndos are implemented, ndo_xdp_xmit and ndo_xdp_flush. XDP_REDIRECT action enables XDP program to redirect frames to other netdevs. Signed-off-by: Björn Töpel <bjorn.topel@intel.com> Tested-by: Andrew Bowers <andrewx.bowers@intel.com> Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
This commit is contained in:
		
							parent
							
								
									8ce29c679a
								
							
						
					
					
						commit
						d9314c474d
					
				| @ -11815,6 +11815,8 @@ static const struct net_device_ops i40e_netdev_ops = { | ||||
| 	.ndo_bridge_getlink	= i40e_ndo_bridge_getlink, | ||||
| 	.ndo_bridge_setlink	= i40e_ndo_bridge_setlink, | ||||
| 	.ndo_bpf		= i40e_xdp, | ||||
| 	.ndo_xdp_xmit		= i40e_xdp_xmit, | ||||
| 	.ndo_xdp_flush		= i40e_xdp_flush, | ||||
| }; | ||||
| 
 | ||||
| /**
 | ||||
|  | ||||
| @ -2214,7 +2214,7 @@ static int i40e_xmit_xdp_ring(struct xdp_buff *xdp, | ||||
| static struct sk_buff *i40e_run_xdp(struct i40e_ring *rx_ring, | ||||
| 				    struct xdp_buff *xdp) | ||||
| { | ||||
| 	int result = I40E_XDP_PASS; | ||||
| 	int err, result = I40E_XDP_PASS; | ||||
| 	struct i40e_ring *xdp_ring; | ||||
| 	struct bpf_prog *xdp_prog; | ||||
| 	u32 act; | ||||
| @ -2233,6 +2233,10 @@ static struct sk_buff *i40e_run_xdp(struct i40e_ring *rx_ring, | ||||
| 		xdp_ring = rx_ring->vsi->xdp_rings[rx_ring->queue_index]; | ||||
| 		result = i40e_xmit_xdp_ring(xdp, xdp_ring); | ||||
| 		break; | ||||
| 	case XDP_REDIRECT: | ||||
| 		err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog); | ||||
| 		result = !err ? I40E_XDP_TX : I40E_XDP_CONSUMED; | ||||
| 		break; | ||||
| 	default: | ||||
| 		bpf_warn_invalid_xdp_action(act); | ||||
| 	case XDP_ABORTED: | ||||
| @ -2268,6 +2272,15 @@ static void i40e_rx_buffer_flip(struct i40e_ring *rx_ring, | ||||
| #endif | ||||
| } | ||||
| 
 | ||||
| static inline void i40e_xdp_ring_update_tail(struct i40e_ring *xdp_ring) | ||||
| { | ||||
| 	/* Force memory writes to complete before letting h/w
 | ||||
| 	 * know there are new descriptors to fetch. | ||||
| 	 */ | ||||
| 	wmb(); | ||||
| 	writel_relaxed(xdp_ring->next_to_use, xdp_ring->tail); | ||||
| } | ||||
| 
 | ||||
| /**
 | ||||
|  * i40e_clean_rx_irq - Clean completed descriptors from Rx ring - bounce buf | ||||
|  * @rx_ring: rx descriptor ring to transact packets on | ||||
| @ -2402,16 +2415,11 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget) | ||||
| 	} | ||||
| 
 | ||||
| 	if (xdp_xmit) { | ||||
| 		struct i40e_ring *xdp_ring; | ||||
| 		struct i40e_ring *xdp_ring = | ||||
| 			rx_ring->vsi->xdp_rings[rx_ring->queue_index]; | ||||
| 
 | ||||
| 		xdp_ring = rx_ring->vsi->xdp_rings[rx_ring->queue_index]; | ||||
| 
 | ||||
| 		/* Force memory writes to complete before letting h/w
 | ||||
| 		 * know there are new descriptors to fetch. | ||||
| 		 */ | ||||
| 		wmb(); | ||||
| 
 | ||||
| 		writel(xdp_ring->next_to_use, xdp_ring->tail); | ||||
| 		i40e_xdp_ring_update_tail(xdp_ring); | ||||
| 		xdp_do_flush_map(); | ||||
| 	} | ||||
| 
 | ||||
| 	rx_ring->skb = skb; | ||||
| @ -3659,3 +3667,49 @@ netdev_tx_t i40e_lan_xmit_frame(struct sk_buff *skb, struct net_device *netdev) | ||||
| 
 | ||||
| 	return i40e_xmit_frame_ring(skb, tx_ring); | ||||
| } | ||||
| 
 | ||||
| /**
 | ||||
|  * i40e_xdp_xmit - Implements ndo_xdp_xmit | ||||
|  * @dev: netdev | ||||
|  * @xdp: XDP buffer | ||||
|  * | ||||
|  * Returns Zero if sent, else an error code | ||||
|  **/ | ||||
| int i40e_xdp_xmit(struct net_device *dev, struct xdp_buff *xdp) | ||||
| { | ||||
| 	struct i40e_netdev_priv *np = netdev_priv(dev); | ||||
| 	unsigned int queue_index = smp_processor_id(); | ||||
| 	struct i40e_vsi *vsi = np->vsi; | ||||
| 	int err; | ||||
| 
 | ||||
| 	if (test_bit(__I40E_VSI_DOWN, vsi->state)) | ||||
| 		return -ENETDOWN; | ||||
| 
 | ||||
| 	if (!i40e_enabled_xdp_vsi(vsi) || queue_index >= vsi->num_queue_pairs) | ||||
| 		return -ENXIO; | ||||
| 
 | ||||
| 	err = i40e_xmit_xdp_ring(xdp, vsi->xdp_rings[queue_index]); | ||||
| 	if (err != I40E_XDP_TX) | ||||
| 		return -ENOSPC; | ||||
| 
 | ||||
| 	return 0; | ||||
| } | ||||
| 
 | ||||
| /**
 | ||||
|  * i40e_xdp_flush - Implements ndo_xdp_flush | ||||
|  * @dev: netdev | ||||
|  **/ | ||||
| void i40e_xdp_flush(struct net_device *dev) | ||||
| { | ||||
| 	struct i40e_netdev_priv *np = netdev_priv(dev); | ||||
| 	unsigned int queue_index = smp_processor_id(); | ||||
| 	struct i40e_vsi *vsi = np->vsi; | ||||
| 
 | ||||
| 	if (test_bit(__I40E_VSI_DOWN, vsi->state)) | ||||
| 		return; | ||||
| 
 | ||||
| 	if (!i40e_enabled_xdp_vsi(vsi) || queue_index >= vsi->num_queue_pairs) | ||||
| 		return; | ||||
| 
 | ||||
| 	i40e_xdp_ring_update_tail(vsi->xdp_rings[queue_index]); | ||||
| } | ||||
|  | ||||
| @ -510,6 +510,8 @@ u32 i40e_get_tx_pending(struct i40e_ring *ring, bool in_sw); | ||||
| void i40e_detect_recover_hung(struct i40e_vsi *vsi); | ||||
| int __i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size); | ||||
| bool __i40e_chk_linearize(struct sk_buff *skb); | ||||
| int i40e_xdp_xmit(struct net_device *dev, struct xdp_buff *xdp); | ||||
| void i40e_xdp_flush(struct net_device *dev); | ||||
| 
 | ||||
| /**
 | ||||
|  * i40e_get_head - Retrieve head from head writeback | ||||
|  | ||||
		Loading…
	
		Reference in New Issue
	
	Block a user