Merge branch '1GbE' of git://git.kernel.org/pub/scm/linux/kernel/git/jkirsher/next-queue

Jeff Kirsher says:

====================
Intel Wired LAN Driver Updates 2018-11-21

This series contains updates to all of the Intel LAN drivers and
documentation.

Shannon Nelson updates the ixgbe kernel documentation to include IPsec
hardware offload.

Joe Perches cleans up whitespace issues in the igb driver.

Jesse update the netdev kernel documentation for NETIF_F_GSO_UDP_L4 to
align with the actual code.  Also aligned all the NAPI driver code for
all of the Intel drivers to implement the recommendations of Eric
Dumazet to check the return code of the napi_complete_done() to
determine whether or not to enable interrupts or exit poll.

Paul E. McKenney replaces synchronize_sched() with synchronize_rcu() for
ixgbe.

Sasha implements suggestions made by Joe Perches to remove obsolete code
and to use the dev_err() method.
====================

Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
David S. Miller 2018-11-23 11:33:54 -08:00
commit 9af8009082
15 changed files with 97 additions and 68 deletions

View File

@ -501,6 +501,19 @@ NOTE: This feature can be disabled for a specific Virtual Function (VF)::
ip link set <pf dev> vf <vf id> spoofchk {off|on}
IPsec Offload
-------------
The ixgbe driver supports IPsec Hardware Offload. When creating Security
Associations with "ip xfrm ..." the 'offload' tag option can be used to
register the IPsec SA with the driver in order to get higher throughput in
the secure communications.
The offload is also supported for ixgbe's VFs, but the VF must be set as
'trusted' and the support must be enabled with::
ethtool --set-priv-flags eth<x> vf-ipsec on
ip link set eth<x> vf <y> trust on
Known Issues/Troubleshooting
============================

View File

@ -115,7 +115,7 @@ set, be it TCPv4 (when NETIF_F_TSO is enabled) or TCPv6 (NETIF_F_TSO6).
* Transmit UDP segmentation offload
NETIF_F_GSO_UDP_GSO_L4 accepts a single UDP header with a payload that exceeds
NETIF_F_GSO_UDP_L4 accepts a single UDP header with a payload that exceeds
gso_size. On segmentation, it segments the payload on gso_size boundaries and
replicates the network and UDP headers (fixing up the last one if less than
gso_size).

View File

@ -2225,11 +2225,13 @@ static int e100_poll(struct napi_struct *napi, int budget)
e100_rx_clean(nic, &work_done, budget);
e100_tx_clean(nic);
/* If budget not fully consumed, exit the polling mode */
if (work_done < budget) {
napi_complete_done(napi, work_done);
/* If budget fully consumed, continue polling */
if (work_done == budget)
return budget;
/* only re-enable interrupt if stack agrees polling is really done */
if (likely(napi_complete_done(napi, work_done)))
e100_enable_irq(nic);
}
return work_done;
}

View File

@ -3803,14 +3803,15 @@ static int e1000_clean(struct napi_struct *napi, int budget)
adapter->clean_rx(adapter, &adapter->rx_ring[0], &work_done, budget);
if (!tx_clean_complete)
work_done = budget;
if (!tx_clean_complete || work_done == budget)
return budget;
/* If budget not fully consumed, exit the polling mode */
if (work_done < budget) {
/* Exit the polling mode, but don't re-enable interrupts if stack might
* poll us due to busy-polling
*/
if (likely(napi_complete_done(napi, work_done))) {
if (likely(adapter->itr_setting & 3))
e1000_set_itr(adapter);
napi_complete_done(napi, work_done);
if (!test_bit(__E1000_DOWN, &adapter->flags))
e1000_irq_enable(adapter);
}

View File

@ -2651,9 +2651,9 @@ err:
/**
* e1000e_poll - NAPI Rx polling callback
* @napi: struct associated with this polling callback
* @weight: number of packets driver is allowed to process this poll
* @budget: number of packets driver is allowed to process this poll
**/
static int e1000e_poll(struct napi_struct *napi, int weight)
static int e1000e_poll(struct napi_struct *napi, int budget)
{
struct e1000_adapter *adapter = container_of(napi, struct e1000_adapter,
napi);
@ -2667,16 +2667,17 @@ static int e1000e_poll(struct napi_struct *napi, int weight)
(adapter->rx_ring->ims_val & adapter->tx_ring->ims_val))
tx_cleaned = e1000_clean_tx_irq(adapter->tx_ring);
adapter->clean_rx(adapter->rx_ring, &work_done, weight);
adapter->clean_rx(adapter->rx_ring, &work_done, budget);
if (!tx_cleaned)
work_done = weight;
if (!tx_cleaned || work_done == budget)
return budget;
/* If weight not fully consumed, exit the polling mode */
if (work_done < weight) {
/* Exit the polling mode, but don't re-enable interrupts if stack might
* poll us due to busy-polling
*/
if (likely(napi_complete_done(napi, work_done))) {
if (adapter->itr_setting & 3)
e1000_set_itr(adapter);
napi_complete_done(napi, work_done);
if (!test_bit(__E1000_DOWN, &adapter->state)) {
if (adapter->msix_entries)
ew32(IMS, adapter->rx_ring->ims_val);

View File

@ -1465,11 +1465,11 @@ static int fm10k_poll(struct napi_struct *napi, int budget)
if (!clean_complete)
return budget;
/* all work done, exit the polling mode */
napi_complete_done(napi, work_done);
/* re-enable the q_vector */
fm10k_qv_enable(q_vector);
/* Exit the polling mode, but don't re-enable interrupts if stack might
* poll us due to busy-polling
*/
if (likely(napi_complete_done(napi, work_done)))
fm10k_qv_enable(q_vector);
return min(work_done, budget - 1);
}

View File

@ -2667,10 +2667,11 @@ tx_only:
if (vsi->back->flags & I40E_TXR_FLAGS_WB_ON_ITR)
q_vector->arm_wb_state = false;
/* Work is done so exit the polling mode and re-enable the interrupt */
napi_complete_done(napi, work_done);
i40e_update_enable_itr(vsi, q_vector);
/* Exit the polling mode, but don't re-enable interrupts if stack might
* poll us due to busy-polling
*/
if (likely(napi_complete_done(napi, work_done)))
i40e_update_enable_itr(vsi, q_vector);
return min(work_done, budget - 1);
}

View File

@ -1761,10 +1761,11 @@ tx_only:
if (vsi->back->flags & IAVF_TXR_FLAGS_WB_ON_ITR)
q_vector->arm_wb_state = false;
/* Work is done so exit the polling mode and re-enable the interrupt */
napi_complete_done(napi, work_done);
iavf_update_enable_itr(vsi, q_vector);
/* Exit the polling mode, but don't re-enable interrupts if stack might
* poll us due to busy-polling
*/
if (likely(napi_complete_done(napi, work_done)))
iavf_update_enable_itr(vsi, q_vector);
return min(work_done, budget - 1);
}

View File

@ -1103,10 +1103,12 @@ int ice_napi_poll(struct napi_struct *napi, int budget)
if (!clean_complete)
return budget;
/* Work is done so exit the polling mode and re-enable the interrupt */
napi_complete_done(napi, work_done);
if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags))
ice_irq_dynamic_ena(&vsi->back->hw, vsi, q_vector);
/* Exit the polling mode, but don't re-enable interrupts if stack might
* poll us due to busy-polling
*/
if (likely(napi_complete_done(napi, work_done)))
if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags))
ice_irq_dynamic_ena(&vsi->back->hw, vsi, q_vector);
return min(work_done, budget - 1);
}

View File

@ -1850,13 +1850,12 @@ static void igb_config_tx_modes(struct igb_adapter *adapter, int queue)
* configuration' in respect to these parameters.
*/
netdev_dbg(netdev, "Qav Tx mode: cbs %s, launchtime %s, queue %d \
idleslope %d sendslope %d hiCredit %d \
locredit %d\n",
(ring->cbs_enable) ? "enabled" : "disabled",
(ring->launchtime_enable) ? "enabled" : "disabled", queue,
ring->idleslope, ring->sendslope, ring->hicredit,
ring->locredit);
netdev_dbg(netdev, "Qav Tx mode: cbs %s, launchtime %s, queue %d idleslope %d sendslope %d hiCredit %d locredit %d\n",
ring->cbs_enable ? "enabled" : "disabled",
ring->launchtime_enable ? "enabled" : "disabled",
queue,
ring->idleslope, ring->sendslope,
ring->hicredit, ring->locredit);
}
static int igb_save_txtime_params(struct igb_adapter *adapter, int queue,
@ -7753,11 +7752,13 @@ static int igb_poll(struct napi_struct *napi, int budget)
if (!clean_complete)
return budget;
/* If not enough Rx work done, exit the polling mode */
napi_complete_done(napi, work_done);
igb_ring_irq_enable(q_vector);
/* Exit the polling mode, but don't re-enable interrupts if stack might
* poll us due to busy-polling
*/
if (likely(napi_complete_done(napi, work_done)))
igb_ring_irq_enable(q_vector);
return 0;
return min(work_done, budget - 1);
}
/**

View File

@ -1186,10 +1186,13 @@ static int igbvf_poll(struct napi_struct *napi, int budget)
igbvf_clean_rx_irq(adapter, &work_done, budget);
/* If not enough Rx work done, exit the polling mode */
if (work_done < budget) {
napi_complete_done(napi, work_done);
if (work_done == budget)
return budget;
/* Exit the polling mode, but don't re-enable interrupts if stack might
* poll us due to busy-polling
*/
if (likely(napi_complete_done(napi, work_done))) {
if (adapter->requested_itr & 3)
igbvf_set_itr(adapter);

View File

@ -11,8 +11,6 @@
#include <linux/ethtool.h>
#include <linux/sctp.h>
#define IGC_ERR(args...) pr_err("igc: " args)
#include "igc_hw.h"
/* main */

View File

@ -2852,11 +2852,13 @@ static int igc_poll(struct napi_struct *napi, int budget)
if (!clean_complete)
return budget;
/* If not enough Rx work done, exit the polling mode */
napi_complete_done(napi, work_done);
igc_ring_irq_enable(q_vector);
/* Exit the polling mode, but don't re-enable interrupts if stack might
* poll us due to busy-polling
*/
if (likely(napi_complete_done(napi, work_done)))
igc_ring_irq_enable(q_vector);
return 0;
return min(work_done, budget - 1);
}
/**
@ -3533,7 +3535,7 @@ static int igc_probe(struct pci_dev *pdev,
err = dma_set_coherent_mask(&pdev->dev,
DMA_BIT_MASK(32));
if (err) {
IGC_ERR("Wrong DMA configuration, aborting\n");
dev_err(&pdev->dev, "igc: Wrong DMA config\n");
goto err_dma;
}
}

View File

@ -6077,9 +6077,9 @@ void ixgbe_down(struct ixgbe_adapter *adapter)
/* Disable Rx */
ixgbe_disable_rx(adapter);
/* synchronize_sched() needed for pending XDP buffers to drain */
/* synchronize_rcu() needed for pending XDP buffers to drain */
if (adapter->xdp_ring[0])
synchronize_sched();
synchronize_rcu();
ixgbe_irq_disable(adapter);
@ -10476,7 +10476,7 @@ void ixgbe_txrx_ring_disable(struct ixgbe_adapter *adapter, int ring)
ixgbe_disable_rxr_hw(adapter, rx_ring);
if (xdp_ring)
synchronize_sched();
synchronize_rcu();
/* Rx/Tx/XDP Tx share the same napi context. */
napi_disable(&rx_ring->q_vector->napi);

View File

@ -1293,16 +1293,20 @@ static int ixgbevf_poll(struct napi_struct *napi, int budget)
/* If all work not completed, return budget and keep polling */
if (!clean_complete)
return budget;
/* all work done, exit the polling mode */
napi_complete_done(napi, work_done);
if (adapter->rx_itr_setting == 1)
ixgbevf_set_itr(q_vector);
if (!test_bit(__IXGBEVF_DOWN, &adapter->state) &&
!test_bit(__IXGBEVF_REMOVING, &adapter->state))
ixgbevf_irq_enable_queues(adapter,
BIT(q_vector->v_idx));
return 0;
/* Exit the polling mode, but don't re-enable interrupts if stack might
* poll us due to busy-polling
*/
if (likely(napi_complete_done(napi, work_done))) {
if (adapter->rx_itr_setting == 1)
ixgbevf_set_itr(q_vector);
if (!test_bit(__IXGBEVF_DOWN, &adapter->state) &&
!test_bit(__IXGBEVF_REMOVING, &adapter->state))
ixgbevf_irq_enable_queues(adapter,
BIT(q_vector->v_idx));
}
return min(work_done, budget - 1);
}
/**