forked from Minki/linux
Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/jkirsher/net-next
Jeff Kirsher says: ==================== Intel Wired LAN Driver Updates 2014-07-01 This series contains updates to i40e, i40evf, igb and ixgbe. Shannon adds the Base Address High and Low to the admin queue structure to simplify the logic in the configuration routines. Also adds code to clear all queues and interrupts to help clean up after a PXE or other early boot activity. Kevin fixes mask assignment value since -1 cannot be used for unsigned integer types. Mitch fixes an issue where in some circumstances the reply from the PF would come back before we were able to properly modify the admin queue pending and required flags. This would mess up the flags and put the driver in an indeterminate state, so fix this by simply setting the flags before sending the request to the admin queue. Also changes the branding string for i40evf to reduce confusion and to match up with our other marketing materials. Kamil adds a new variable defining admin send queue (ASQ) command write back timeout to allow for dynamic modification of this timeout. Anjali fix a bug in the flow director filter replay logic, so that we call a replay after a sideband reset correctly. Jesse adds code to initialize all members of the context descriptor to prevent possible stale data. Christopher fixes i40e to prevent writing to reserved bits, since the queue index is only 0-127. Jacob removes the unneeded header export.h from the i40e PTP code. Fixes ixgbe PTP code where the PPS signal was not correct, as it generates a one half HZ clock signal, it only generates one level change per second. To generate a full clock, we need two level changes per second. Todd provides a fix for igb to bring up link when the PHY has powered up, which was reported by Jeff Westfahl. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
commit
090cce4263
@ -55,16 +55,24 @@ static void i40e_adminq_init_regs(struct i40e_hw *hw)
|
||||
hw->aq.asq.tail = I40E_VF_ATQT1;
|
||||
hw->aq.asq.head = I40E_VF_ATQH1;
|
||||
hw->aq.asq.len = I40E_VF_ATQLEN1;
|
||||
hw->aq.asq.bal = I40E_VF_ATQBAL1;
|
||||
hw->aq.asq.bah = I40E_VF_ATQBAH1;
|
||||
hw->aq.arq.tail = I40E_VF_ARQT1;
|
||||
hw->aq.arq.head = I40E_VF_ARQH1;
|
||||
hw->aq.arq.len = I40E_VF_ARQLEN1;
|
||||
hw->aq.arq.bal = I40E_VF_ARQBAL1;
|
||||
hw->aq.arq.bah = I40E_VF_ARQBAH1;
|
||||
} else {
|
||||
hw->aq.asq.tail = I40E_PF_ATQT;
|
||||
hw->aq.asq.head = I40E_PF_ATQH;
|
||||
hw->aq.asq.len = I40E_PF_ATQLEN;
|
||||
hw->aq.asq.bal = I40E_PF_ATQBAL;
|
||||
hw->aq.asq.bah = I40E_PF_ATQBAH;
|
||||
hw->aq.arq.tail = I40E_PF_ARQT;
|
||||
hw->aq.arq.head = I40E_PF_ARQH;
|
||||
hw->aq.arq.len = I40E_PF_ARQLEN;
|
||||
hw->aq.arq.bal = I40E_PF_ARQBAL;
|
||||
hw->aq.arq.bah = I40E_PF_ARQBAH;
|
||||
}
|
||||
}
|
||||
|
||||
@ -300,27 +308,14 @@ static i40e_status i40e_config_asq_regs(struct i40e_hw *hw)
|
||||
wr32(hw, hw->aq.asq.head, 0);
|
||||
wr32(hw, hw->aq.asq.tail, 0);
|
||||
|
||||
if (hw->mac.type == I40E_MAC_VF) {
|
||||
/* configure the transmit queue */
|
||||
wr32(hw, I40E_VF_ATQBAH1,
|
||||
upper_32_bits(hw->aq.asq.desc_buf.pa));
|
||||
wr32(hw, I40E_VF_ATQBAL1,
|
||||
lower_32_bits(hw->aq.asq.desc_buf.pa));
|
||||
wr32(hw, I40E_VF_ATQLEN1, (hw->aq.num_asq_entries |
|
||||
I40E_VF_ATQLEN1_ATQENABLE_MASK));
|
||||
reg = rd32(hw, I40E_VF_ATQBAL1);
|
||||
} else {
|
||||
/* configure the transmit queue */
|
||||
wr32(hw, I40E_PF_ATQBAH,
|
||||
upper_32_bits(hw->aq.asq.desc_buf.pa));
|
||||
wr32(hw, I40E_PF_ATQBAL,
|
||||
lower_32_bits(hw->aq.asq.desc_buf.pa));
|
||||
wr32(hw, I40E_PF_ATQLEN, (hw->aq.num_asq_entries |
|
||||
I40E_PF_ATQLEN_ATQENABLE_MASK));
|
||||
reg = rd32(hw, I40E_PF_ATQBAL);
|
||||
}
|
||||
/* set starting point */
|
||||
wr32(hw, hw->aq.asq.len, (hw->aq.num_asq_entries |
|
||||
I40E_PF_ATQLEN_ATQENABLE_MASK));
|
||||
wr32(hw, hw->aq.asq.bal, lower_32_bits(hw->aq.asq.desc_buf.pa));
|
||||
wr32(hw, hw->aq.asq.bah, upper_32_bits(hw->aq.asq.desc_buf.pa));
|
||||
|
||||
/* Check one register to verify that config was applied */
|
||||
reg = rd32(hw, hw->aq.asq.bal);
|
||||
if (reg != lower_32_bits(hw->aq.asq.desc_buf.pa))
|
||||
ret_code = I40E_ERR_ADMIN_QUEUE_ERROR;
|
||||
|
||||
@ -342,30 +337,17 @@ static i40e_status i40e_config_arq_regs(struct i40e_hw *hw)
|
||||
wr32(hw, hw->aq.arq.head, 0);
|
||||
wr32(hw, hw->aq.arq.tail, 0);
|
||||
|
||||
if (hw->mac.type == I40E_MAC_VF) {
|
||||
/* configure the receive queue */
|
||||
wr32(hw, I40E_VF_ARQBAH1,
|
||||
upper_32_bits(hw->aq.arq.desc_buf.pa));
|
||||
wr32(hw, I40E_VF_ARQBAL1,
|
||||
lower_32_bits(hw->aq.arq.desc_buf.pa));
|
||||
wr32(hw, I40E_VF_ARQLEN1, (hw->aq.num_arq_entries |
|
||||
I40E_VF_ARQLEN1_ARQENABLE_MASK));
|
||||
reg = rd32(hw, I40E_VF_ARQBAL1);
|
||||
} else {
|
||||
/* configure the receive queue */
|
||||
wr32(hw, I40E_PF_ARQBAH,
|
||||
upper_32_bits(hw->aq.arq.desc_buf.pa));
|
||||
wr32(hw, I40E_PF_ARQBAL,
|
||||
lower_32_bits(hw->aq.arq.desc_buf.pa));
|
||||
wr32(hw, I40E_PF_ARQLEN, (hw->aq.num_arq_entries |
|
||||
I40E_PF_ARQLEN_ARQENABLE_MASK));
|
||||
reg = rd32(hw, I40E_PF_ARQBAL);
|
||||
}
|
||||
/* set starting point */
|
||||
wr32(hw, hw->aq.arq.len, (hw->aq.num_arq_entries |
|
||||
I40E_PF_ARQLEN_ARQENABLE_MASK));
|
||||
wr32(hw, hw->aq.arq.bal, lower_32_bits(hw->aq.arq.desc_buf.pa));
|
||||
wr32(hw, hw->aq.arq.bah, upper_32_bits(hw->aq.arq.desc_buf.pa));
|
||||
|
||||
/* Update tail in the HW to post pre-allocated buffers */
|
||||
wr32(hw, hw->aq.arq.tail, hw->aq.num_arq_entries - 1);
|
||||
|
||||
/* Check one register to verify that config was applied */
|
||||
reg = rd32(hw, hw->aq.arq.bal);
|
||||
if (reg != lower_32_bits(hw->aq.arq.desc_buf.pa))
|
||||
ret_code = I40E_ERR_ADMIN_QUEUE_ERROR;
|
||||
|
||||
@ -507,6 +489,8 @@ static i40e_status i40e_shutdown_asq(struct i40e_hw *hw)
|
||||
wr32(hw, hw->aq.asq.head, 0);
|
||||
wr32(hw, hw->aq.asq.tail, 0);
|
||||
wr32(hw, hw->aq.asq.len, 0);
|
||||
wr32(hw, hw->aq.asq.bal, 0);
|
||||
wr32(hw, hw->aq.asq.bah, 0);
|
||||
|
||||
/* make sure lock is available */
|
||||
mutex_lock(&hw->aq.asq_mutex);
|
||||
@ -538,6 +522,8 @@ static i40e_status i40e_shutdown_arq(struct i40e_hw *hw)
|
||||
wr32(hw, hw->aq.arq.head, 0);
|
||||
wr32(hw, hw->aq.arq.tail, 0);
|
||||
wr32(hw, hw->aq.arq.len, 0);
|
||||
wr32(hw, hw->aq.arq.bal, 0);
|
||||
wr32(hw, hw->aq.arq.bah, 0);
|
||||
|
||||
/* make sure lock is available */
|
||||
mutex_lock(&hw->aq.arq_mutex);
|
||||
@ -585,6 +571,9 @@ i40e_status i40e_init_adminq(struct i40e_hw *hw)
|
||||
/* Set up register offsets */
|
||||
i40e_adminq_init_regs(hw);
|
||||
|
||||
/* setup ASQ command write back timeout */
|
||||
hw->aq.asq_cmd_timeout = I40E_ASQ_CMD_TIMEOUT;
|
||||
|
||||
/* allocate the ASQ */
|
||||
ret_code = i40e_init_asq(hw);
|
||||
if (ret_code)
|
||||
@ -874,7 +863,7 @@ i40e_status i40e_asq_send_command(struct i40e_hw *hw,
|
||||
/* ugh! delay while spin_lock */
|
||||
udelay(delay_len);
|
||||
total_delay += delay_len;
|
||||
} while (total_delay < I40E_ASQ_CMD_TIMEOUT);
|
||||
} while (total_delay < hw->aq.asq_cmd_timeout);
|
||||
}
|
||||
|
||||
/* if ready, copy the desc back to temp */
|
||||
|
@ -56,6 +56,8 @@ struct i40e_adminq_ring {
|
||||
u32 head;
|
||||
u32 tail;
|
||||
u32 len;
|
||||
u32 bah;
|
||||
u32 bal;
|
||||
};
|
||||
|
||||
/* ASQ transaction details */
|
||||
@ -82,6 +84,7 @@ struct i40e_arq_event_info {
|
||||
struct i40e_adminq_info {
|
||||
struct i40e_adminq_ring arq; /* receive queue */
|
||||
struct i40e_adminq_ring asq; /* send queue */
|
||||
u32 asq_cmd_timeout; /* send queue cmd write back timeout*/
|
||||
u16 num_arq_entries; /* receive queue depth */
|
||||
u16 num_asq_entries; /* send queue depth */
|
||||
u16 arq_buf_size; /* receive queue buffer size */
|
||||
|
@ -669,8 +669,10 @@ void i40e_pre_tx_queue_cfg(struct i40e_hw *hw, u32 queue, bool enable)
|
||||
u32 reg_block = 0;
|
||||
u32 reg_val;
|
||||
|
||||
if (abs_queue_idx >= 128)
|
||||
if (abs_queue_idx >= 128) {
|
||||
reg_block = abs_queue_idx / 128;
|
||||
abs_queue_idx %= 128;
|
||||
}
|
||||
|
||||
reg_val = rd32(hw, I40E_GLLAN_TXPRE_QDIS(reg_block));
|
||||
reg_val &= ~I40E_GLLAN_TXPRE_QDIS_QINDX_MASK;
|
||||
@ -810,6 +812,99 @@ i40e_status i40e_pf_reset(struct i40e_hw *hw)
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* i40e_clear_hw - clear out any left over hw state
|
||||
* @hw: pointer to the hw struct
|
||||
*
|
||||
* Clear queues and interrupts, typically called at init time,
|
||||
* but after the capabilities have been found so we know how many
|
||||
* queues and msix vectors have been allocated.
|
||||
**/
|
||||
void i40e_clear_hw(struct i40e_hw *hw)
|
||||
{
|
||||
u32 num_queues, base_queue;
|
||||
u32 num_pf_int;
|
||||
u32 num_vf_int;
|
||||
u32 num_vfs;
|
||||
u32 i, j;
|
||||
u32 val;
|
||||
u32 eol = 0x7ff;
|
||||
|
||||
/* get number of interrupts, queues, and vfs */
|
||||
val = rd32(hw, I40E_GLPCI_CNF2);
|
||||
num_pf_int = (val & I40E_GLPCI_CNF2_MSI_X_PF_N_MASK) >>
|
||||
I40E_GLPCI_CNF2_MSI_X_PF_N_SHIFT;
|
||||
num_vf_int = (val & I40E_GLPCI_CNF2_MSI_X_VF_N_MASK) >>
|
||||
I40E_GLPCI_CNF2_MSI_X_VF_N_SHIFT;
|
||||
|
||||
val = rd32(hw, I40E_PFLAN_QALLOC);
|
||||
base_queue = (val & I40E_PFLAN_QALLOC_FIRSTQ_MASK) >>
|
||||
I40E_PFLAN_QALLOC_FIRSTQ_SHIFT;
|
||||
j = (val & I40E_PFLAN_QALLOC_LASTQ_MASK) >>
|
||||
I40E_PFLAN_QALLOC_LASTQ_SHIFT;
|
||||
if (val & I40E_PFLAN_QALLOC_VALID_MASK)
|
||||
num_queues = (j - base_queue) + 1;
|
||||
else
|
||||
num_queues = 0;
|
||||
|
||||
val = rd32(hw, I40E_PF_VT_PFALLOC);
|
||||
i = (val & I40E_PF_VT_PFALLOC_FIRSTVF_MASK) >>
|
||||
I40E_PF_VT_PFALLOC_FIRSTVF_SHIFT;
|
||||
j = (val & I40E_PF_VT_PFALLOC_LASTVF_MASK) >>
|
||||
I40E_PF_VT_PFALLOC_LASTVF_SHIFT;
|
||||
if (val & I40E_PF_VT_PFALLOC_VALID_MASK)
|
||||
num_vfs = (j - i) + 1;
|
||||
else
|
||||
num_vfs = 0;
|
||||
|
||||
/* stop all the interrupts */
|
||||
wr32(hw, I40E_PFINT_ICR0_ENA, 0);
|
||||
val = 0x3 << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT;
|
||||
for (i = 0; i < num_pf_int - 2; i++)
|
||||
wr32(hw, I40E_PFINT_DYN_CTLN(i), val);
|
||||
|
||||
/* Set the FIRSTQ_INDX field to 0x7FF in PFINT_LNKLSTx */
|
||||
val = eol << I40E_PFINT_LNKLST0_FIRSTQ_INDX_SHIFT;
|
||||
wr32(hw, I40E_PFINT_LNKLST0, val);
|
||||
for (i = 0; i < num_pf_int - 2; i++)
|
||||
wr32(hw, I40E_PFINT_LNKLSTN(i), val);
|
||||
val = eol << I40E_VPINT_LNKLST0_FIRSTQ_INDX_SHIFT;
|
||||
for (i = 0; i < num_vfs; i++)
|
||||
wr32(hw, I40E_VPINT_LNKLST0(i), val);
|
||||
for (i = 0; i < num_vf_int - 2; i++)
|
||||
wr32(hw, I40E_VPINT_LNKLSTN(i), val);
|
||||
|
||||
/* warn the HW of the coming Tx disables */
|
||||
for (i = 0; i < num_queues; i++) {
|
||||
u32 abs_queue_idx = base_queue + i;
|
||||
u32 reg_block = 0;
|
||||
|
||||
if (abs_queue_idx >= 128) {
|
||||
reg_block = abs_queue_idx / 128;
|
||||
abs_queue_idx %= 128;
|
||||
}
|
||||
|
||||
val = rd32(hw, I40E_GLLAN_TXPRE_QDIS(reg_block));
|
||||
val &= ~I40E_GLLAN_TXPRE_QDIS_QINDX_MASK;
|
||||
val |= (abs_queue_idx << I40E_GLLAN_TXPRE_QDIS_QINDX_SHIFT);
|
||||
val |= I40E_GLLAN_TXPRE_QDIS_SET_QDIS_MASK;
|
||||
|
||||
wr32(hw, I40E_GLLAN_TXPRE_QDIS(reg_block), val);
|
||||
}
|
||||
udelay(400);
|
||||
|
||||
/* stop all the queues */
|
||||
for (i = 0; i < num_queues; i++) {
|
||||
wr32(hw, I40E_QINT_TQCTL(i), 0);
|
||||
wr32(hw, I40E_QTX_ENA(i), 0);
|
||||
wr32(hw, I40E_QINT_RQCTL(i), 0);
|
||||
wr32(hw, I40E_QRX_ENA(i), 0);
|
||||
}
|
||||
|
||||
/* short wait for all queue disables to settle */
|
||||
udelay(50);
|
||||
}
|
||||
|
||||
/**
|
||||
* i40e_clear_pxe_mode - clear pxe operations mode
|
||||
* @hw: pointer to the hw struct
|
||||
|
@ -858,7 +858,7 @@ static void i40e_write_dword(u8 *hmc_bits,
|
||||
if (ce_info->width < 32)
|
||||
mask = ((u32)1 << ce_info->width) - 1;
|
||||
else
|
||||
mask = -1;
|
||||
mask = 0xFFFFFFFF;
|
||||
|
||||
/* don't swizzle the bits until after the mask because the mask bits
|
||||
* will be in a different bit position on big endian machines
|
||||
@ -910,7 +910,7 @@ static void i40e_write_qword(u8 *hmc_bits,
|
||||
if (ce_info->width < 64)
|
||||
mask = ((u64)1 << ce_info->width) - 1;
|
||||
else
|
||||
mask = -1;
|
||||
mask = 0xFFFFFFFFFFFFFFFF;
|
||||
|
||||
/* don't swizzle the bits until after the mask because the mask bits
|
||||
* will be in a different bit position on big endian machines
|
||||
|
@ -39,7 +39,7 @@ static const char i40e_driver_string[] =
|
||||
|
||||
#define DRV_VERSION_MAJOR 0
|
||||
#define DRV_VERSION_MINOR 4
|
||||
#define DRV_VERSION_BUILD 17
|
||||
#define DRV_VERSION_BUILD 19
|
||||
#define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \
|
||||
__stringify(DRV_VERSION_MINOR) "." \
|
||||
__stringify(DRV_VERSION_BUILD) DRV_KERN
|
||||
@ -8636,6 +8636,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
|
||||
}
|
||||
|
||||
/* Reset here to make sure all is clean and to define PF 'n' */
|
||||
i40e_clear_hw(hw);
|
||||
err = i40e_pf_reset(hw);
|
||||
if (err) {
|
||||
dev_info(&pdev->dev, "Initial pf_reset failed: %d\n", err);
|
||||
|
@ -217,6 +217,7 @@ i40e_status i40e_aq_query_switch_comp_bw_config(struct i40e_hw *hw,
|
||||
/* i40e_common */
|
||||
i40e_status i40e_init_shared_code(struct i40e_hw *hw);
|
||||
i40e_status i40e_pf_reset(struct i40e_hw *hw);
|
||||
void i40e_clear_hw(struct i40e_hw *hw);
|
||||
void i40e_clear_pxe_mode(struct i40e_hw *hw);
|
||||
bool i40e_get_link_status(struct i40e_hw *hw);
|
||||
i40e_status i40e_get_mac_addr(struct i40e_hw *hw,
|
||||
|
@ -25,7 +25,6 @@
|
||||
******************************************************************************/
|
||||
|
||||
#include "i40e.h"
|
||||
#include <linux/export.h>
|
||||
#include <linux/ptp_classify.h>
|
||||
|
||||
/* The XL710 timesync is very much like Intel's 82599 design when it comes to
|
||||
|
@ -445,14 +445,16 @@ static void i40e_fd_handle_status(struct i40e_ring *rx_ring,
|
||||
*/
|
||||
if (fcnt_prog >= (fcnt_avail - I40E_FDIR_BUFFER_FULL_MARGIN)) {
|
||||
/* Turn off ATR first */
|
||||
if (pf->flags & I40E_FLAG_FD_ATR_ENABLED) {
|
||||
pf->flags &= ~I40E_FLAG_FD_ATR_ENABLED;
|
||||
if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) &&
|
||||
!(pf->auto_disable_flags &
|
||||
I40E_FLAG_FD_ATR_ENABLED)) {
|
||||
dev_warn(&pdev->dev, "FD filter space full, ATR for further flows will be turned off\n");
|
||||
pf->auto_disable_flags |=
|
||||
I40E_FLAG_FD_ATR_ENABLED;
|
||||
pf->flags |= I40E_FLAG_FDIR_REQUIRES_REINIT;
|
||||
} else if (pf->flags & I40E_FLAG_FD_SB_ENABLED) {
|
||||
pf->flags &= ~I40E_FLAG_FD_SB_ENABLED;
|
||||
} else if ((pf->flags & I40E_FLAG_FD_SB_ENABLED) &&
|
||||
!(pf->auto_disable_flags &
|
||||
I40E_FLAG_FD_SB_ENABLED)) {
|
||||
dev_warn(&pdev->dev, "FD filter space full, new ntuple rules will not be added\n");
|
||||
pf->auto_disable_flags |=
|
||||
I40E_FLAG_FD_SB_ENABLED;
|
||||
@ -1989,6 +1991,7 @@ static void i40e_create_tx_ctx(struct i40e_ring *tx_ring,
|
||||
/* cpu_to_le32 and assign to struct fields */
|
||||
context_desc->tunneling_params = cpu_to_le32(cd_tunneling);
|
||||
context_desc->l2tag2 = cpu_to_le16(cd_l2tag2);
|
||||
context_desc->rsvd = cpu_to_le16(0);
|
||||
context_desc->type_cmd_tso_mss = cpu_to_le64(cd_type_cmd_tso_mss);
|
||||
}
|
||||
|
||||
|
@ -53,16 +53,24 @@ static void i40e_adminq_init_regs(struct i40e_hw *hw)
|
||||
hw->aq.asq.tail = I40E_VF_ATQT1;
|
||||
hw->aq.asq.head = I40E_VF_ATQH1;
|
||||
hw->aq.asq.len = I40E_VF_ATQLEN1;
|
||||
hw->aq.asq.bal = I40E_VF_ATQBAL1;
|
||||
hw->aq.asq.bah = I40E_VF_ATQBAH1;
|
||||
hw->aq.arq.tail = I40E_VF_ARQT1;
|
||||
hw->aq.arq.head = I40E_VF_ARQH1;
|
||||
hw->aq.arq.len = I40E_VF_ARQLEN1;
|
||||
hw->aq.arq.bal = I40E_VF_ARQBAL1;
|
||||
hw->aq.arq.bah = I40E_VF_ARQBAH1;
|
||||
} else {
|
||||
hw->aq.asq.tail = I40E_PF_ATQT;
|
||||
hw->aq.asq.head = I40E_PF_ATQH;
|
||||
hw->aq.asq.len = I40E_PF_ATQLEN;
|
||||
hw->aq.asq.bal = I40E_PF_ATQBAL;
|
||||
hw->aq.asq.bah = I40E_PF_ATQBAH;
|
||||
hw->aq.arq.tail = I40E_PF_ARQT;
|
||||
hw->aq.arq.head = I40E_PF_ARQH;
|
||||
hw->aq.arq.len = I40E_PF_ARQLEN;
|
||||
hw->aq.arq.bal = I40E_PF_ARQBAL;
|
||||
hw->aq.arq.bah = I40E_PF_ARQBAH;
|
||||
}
|
||||
}
|
||||
|
||||
@ -298,27 +306,14 @@ static i40e_status i40e_config_asq_regs(struct i40e_hw *hw)
|
||||
wr32(hw, hw->aq.asq.head, 0);
|
||||
wr32(hw, hw->aq.asq.tail, 0);
|
||||
|
||||
if (hw->mac.type == I40E_MAC_VF) {
|
||||
/* configure the transmit queue */
|
||||
wr32(hw, I40E_VF_ATQBAH1,
|
||||
upper_32_bits(hw->aq.asq.desc_buf.pa));
|
||||
wr32(hw, I40E_VF_ATQBAL1,
|
||||
lower_32_bits(hw->aq.asq.desc_buf.pa));
|
||||
wr32(hw, I40E_VF_ATQLEN1, (hw->aq.num_asq_entries |
|
||||
I40E_VF_ATQLEN1_ATQENABLE_MASK));
|
||||
reg = rd32(hw, I40E_VF_ATQBAL1);
|
||||
} else {
|
||||
/* configure the transmit queue */
|
||||
wr32(hw, I40E_PF_ATQBAH,
|
||||
upper_32_bits(hw->aq.asq.desc_buf.pa));
|
||||
wr32(hw, I40E_PF_ATQBAL,
|
||||
lower_32_bits(hw->aq.asq.desc_buf.pa));
|
||||
wr32(hw, I40E_PF_ATQLEN, (hw->aq.num_asq_entries |
|
||||
I40E_PF_ATQLEN_ATQENABLE_MASK));
|
||||
reg = rd32(hw, I40E_PF_ATQBAL);
|
||||
}
|
||||
/* set starting point */
|
||||
wr32(hw, hw->aq.asq.len, (hw->aq.num_asq_entries |
|
||||
I40E_PF_ATQLEN_ATQENABLE_MASK));
|
||||
wr32(hw, hw->aq.asq.bal, lower_32_bits(hw->aq.asq.desc_buf.pa));
|
||||
wr32(hw, hw->aq.asq.bah, upper_32_bits(hw->aq.asq.desc_buf.pa));
|
||||
|
||||
/* Check one register to verify that config was applied */
|
||||
reg = rd32(hw, hw->aq.asq.bal);
|
||||
if (reg != lower_32_bits(hw->aq.asq.desc_buf.pa))
|
||||
ret_code = I40E_ERR_ADMIN_QUEUE_ERROR;
|
||||
|
||||
@ -340,30 +335,17 @@ static i40e_status i40e_config_arq_regs(struct i40e_hw *hw)
|
||||
wr32(hw, hw->aq.arq.head, 0);
|
||||
wr32(hw, hw->aq.arq.tail, 0);
|
||||
|
||||
if (hw->mac.type == I40E_MAC_VF) {
|
||||
/* configure the receive queue */
|
||||
wr32(hw, I40E_VF_ARQBAH1,
|
||||
upper_32_bits(hw->aq.arq.desc_buf.pa));
|
||||
wr32(hw, I40E_VF_ARQBAL1,
|
||||
lower_32_bits(hw->aq.arq.desc_buf.pa));
|
||||
wr32(hw, I40E_VF_ARQLEN1, (hw->aq.num_arq_entries |
|
||||
I40E_VF_ARQLEN1_ARQENABLE_MASK));
|
||||
reg = rd32(hw, I40E_VF_ARQBAL1);
|
||||
} else {
|
||||
/* configure the receive queue */
|
||||
wr32(hw, I40E_PF_ARQBAH,
|
||||
upper_32_bits(hw->aq.arq.desc_buf.pa));
|
||||
wr32(hw, I40E_PF_ARQBAL,
|
||||
lower_32_bits(hw->aq.arq.desc_buf.pa));
|
||||
wr32(hw, I40E_PF_ARQLEN, (hw->aq.num_arq_entries |
|
||||
I40E_PF_ARQLEN_ARQENABLE_MASK));
|
||||
reg = rd32(hw, I40E_PF_ARQBAL);
|
||||
}
|
||||
/* set starting point */
|
||||
wr32(hw, hw->aq.arq.len, (hw->aq.num_arq_entries |
|
||||
I40E_PF_ARQLEN_ARQENABLE_MASK));
|
||||
wr32(hw, hw->aq.arq.bal, lower_32_bits(hw->aq.arq.desc_buf.pa));
|
||||
wr32(hw, hw->aq.arq.bah, upper_32_bits(hw->aq.arq.desc_buf.pa));
|
||||
|
||||
/* Update tail in the HW to post pre-allocated buffers */
|
||||
wr32(hw, hw->aq.arq.tail, hw->aq.num_arq_entries - 1);
|
||||
|
||||
/* Check one register to verify that config was applied */
|
||||
reg = rd32(hw, hw->aq.arq.bal);
|
||||
if (reg != lower_32_bits(hw->aq.arq.desc_buf.pa))
|
||||
ret_code = I40E_ERR_ADMIN_QUEUE_ERROR;
|
||||
|
||||
@ -505,6 +487,8 @@ static i40e_status i40e_shutdown_asq(struct i40e_hw *hw)
|
||||
wr32(hw, hw->aq.asq.head, 0);
|
||||
wr32(hw, hw->aq.asq.tail, 0);
|
||||
wr32(hw, hw->aq.asq.len, 0);
|
||||
wr32(hw, hw->aq.asq.bal, 0);
|
||||
wr32(hw, hw->aq.asq.bah, 0);
|
||||
|
||||
/* make sure lock is available */
|
||||
mutex_lock(&hw->aq.asq_mutex);
|
||||
@ -536,6 +520,8 @@ static i40e_status i40e_shutdown_arq(struct i40e_hw *hw)
|
||||
wr32(hw, hw->aq.arq.head, 0);
|
||||
wr32(hw, hw->aq.arq.tail, 0);
|
||||
wr32(hw, hw->aq.arq.len, 0);
|
||||
wr32(hw, hw->aq.arq.bal, 0);
|
||||
wr32(hw, hw->aq.arq.bah, 0);
|
||||
|
||||
/* make sure lock is available */
|
||||
mutex_lock(&hw->aq.arq_mutex);
|
||||
@ -581,6 +567,9 @@ i40e_status i40evf_init_adminq(struct i40e_hw *hw)
|
||||
/* Set up register offsets */
|
||||
i40e_adminq_init_regs(hw);
|
||||
|
||||
/* setup ASQ command write back timeout */
|
||||
hw->aq.asq_cmd_timeout = I40E_ASQ_CMD_TIMEOUT;
|
||||
|
||||
/* allocate the ASQ */
|
||||
ret_code = i40e_init_asq(hw);
|
||||
if (ret_code)
|
||||
@ -828,7 +817,7 @@ i40e_status i40evf_asq_send_command(struct i40e_hw *hw,
|
||||
/* ugh! delay while spin_lock */
|
||||
udelay(delay_len);
|
||||
total_delay += delay_len;
|
||||
} while (total_delay < I40E_ASQ_CMD_TIMEOUT);
|
||||
} while (total_delay < hw->aq.asq_cmd_timeout);
|
||||
}
|
||||
|
||||
/* if ready, copy the desc back to temp */
|
||||
|
@ -56,6 +56,8 @@ struct i40e_adminq_ring {
|
||||
u32 head;
|
||||
u32 tail;
|
||||
u32 len;
|
||||
u32 bah;
|
||||
u32 bal;
|
||||
};
|
||||
|
||||
/* ASQ transaction details */
|
||||
@ -82,6 +84,7 @@ struct i40e_arq_event_info {
|
||||
struct i40e_adminq_info {
|
||||
struct i40e_adminq_ring arq; /* receive queue */
|
||||
struct i40e_adminq_ring asq; /* send queue */
|
||||
u32 asq_cmd_timeout; /* send queue cmd write back timeout*/
|
||||
u16 num_arq_entries; /* receive queue depth */
|
||||
u16 num_asq_entries; /* send queue depth */
|
||||
u16 arq_buf_size; /* receive queue buffer size */
|
||||
|
@ -1336,6 +1336,7 @@ static void i40e_create_tx_ctx(struct i40e_ring *tx_ring,
|
||||
/* cpu_to_le32 and assign to struct fields */
|
||||
context_desc->tunneling_params = cpu_to_le32(cd_tunneling);
|
||||
context_desc->l2tag2 = cpu_to_le16(cd_l2tag2);
|
||||
context_desc->rsvd = cpu_to_le16(0);
|
||||
context_desc->type_cmd_tso_mss = cpu_to_le64(cd_type_cmd_tso_mss);
|
||||
}
|
||||
|
||||
|
@ -34,9 +34,9 @@ static int i40evf_close(struct net_device *netdev);
|
||||
|
||||
char i40evf_driver_name[] = "i40evf";
|
||||
static const char i40evf_driver_string[] =
|
||||
"Intel(R) XL710 X710 Virtual Function Network Driver";
|
||||
"Intel(R) XL710/X710 Virtual Function Network Driver";
|
||||
|
||||
#define DRV_VERSION "0.9.36"
|
||||
#define DRV_VERSION "0.9.38"
|
||||
const char i40evf_driver_version[] = DRV_VERSION;
|
||||
static const char i40evf_copyright[] =
|
||||
"Copyright (c) 2013 - 2014 Intel Corporation.";
|
||||
|
@ -248,11 +248,11 @@ void i40evf_configure_queues(struct i40evf_adapter *adapter)
|
||||
vqpi++;
|
||||
}
|
||||
|
||||
adapter->aq_pending |= I40EVF_FLAG_AQ_CONFIGURE_QUEUES;
|
||||
adapter->aq_required &= ~I40EVF_FLAG_AQ_CONFIGURE_QUEUES;
|
||||
i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES,
|
||||
(u8 *)vqci, len);
|
||||
kfree(vqci);
|
||||
adapter->aq_pending |= I40EVF_FLAG_AQ_CONFIGURE_QUEUES;
|
||||
adapter->aq_required &= ~I40EVF_FLAG_AQ_CONFIGURE_QUEUES;
|
||||
}
|
||||
|
||||
/**
|
||||
@ -275,10 +275,10 @@ void i40evf_enable_queues(struct i40evf_adapter *adapter)
|
||||
vqs.vsi_id = adapter->vsi_res->vsi_id;
|
||||
vqs.tx_queues = (1 << adapter->vsi_res->num_queue_pairs) - 1;
|
||||
vqs.rx_queues = vqs.tx_queues;
|
||||
i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_ENABLE_QUEUES,
|
||||
(u8 *)&vqs, sizeof(vqs));
|
||||
adapter->aq_pending |= I40EVF_FLAG_AQ_ENABLE_QUEUES;
|
||||
adapter->aq_required &= ~I40EVF_FLAG_AQ_ENABLE_QUEUES;
|
||||
i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_ENABLE_QUEUES,
|
||||
(u8 *)&vqs, sizeof(vqs));
|
||||
}
|
||||
|
||||
/**
|
||||
@ -301,10 +301,10 @@ void i40evf_disable_queues(struct i40evf_adapter *adapter)
|
||||
vqs.vsi_id = adapter->vsi_res->vsi_id;
|
||||
vqs.tx_queues = (1 << adapter->vsi_res->num_queue_pairs) - 1;
|
||||
vqs.rx_queues = vqs.tx_queues;
|
||||
i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_DISABLE_QUEUES,
|
||||
(u8 *)&vqs, sizeof(vqs));
|
||||
adapter->aq_pending |= I40EVF_FLAG_AQ_DISABLE_QUEUES;
|
||||
adapter->aq_required &= ~I40EVF_FLAG_AQ_DISABLE_QUEUES;
|
||||
i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_DISABLE_QUEUES,
|
||||
(u8 *)&vqs, sizeof(vqs));
|
||||
}
|
||||
|
||||
/**
|
||||
@ -352,11 +352,11 @@ void i40evf_map_queues(struct i40evf_adapter *adapter)
|
||||
vimi->vecmap[v_idx].txq_map = 0;
|
||||
vimi->vecmap[v_idx].rxq_map = 0;
|
||||
|
||||
adapter->aq_pending |= I40EVF_FLAG_AQ_MAP_VECTORS;
|
||||
adapter->aq_required &= ~I40EVF_FLAG_AQ_MAP_VECTORS;
|
||||
i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP,
|
||||
(u8 *)vimi, len);
|
||||
kfree(vimi);
|
||||
adapter->aq_pending |= I40EVF_FLAG_AQ_MAP_VECTORS;
|
||||
adapter->aq_required &= ~I40EVF_FLAG_AQ_MAP_VECTORS;
|
||||
}
|
||||
|
||||
/**
|
||||
@ -413,12 +413,11 @@ void i40evf_add_ether_addrs(struct i40evf_adapter *adapter)
|
||||
f->add = false;
|
||||
}
|
||||
}
|
||||
adapter->aq_pending |= I40EVF_FLAG_AQ_ADD_MAC_FILTER;
|
||||
adapter->aq_required &= ~I40EVF_FLAG_AQ_ADD_MAC_FILTER;
|
||||
i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS,
|
||||
(u8 *)veal, len);
|
||||
kfree(veal);
|
||||
adapter->aq_pending |= I40EVF_FLAG_AQ_ADD_MAC_FILTER;
|
||||
adapter->aq_required &= ~I40EVF_FLAG_AQ_ADD_MAC_FILTER;
|
||||
|
||||
}
|
||||
|
||||
/**
|
||||
@ -475,11 +474,11 @@ void i40evf_del_ether_addrs(struct i40evf_adapter *adapter)
|
||||
kfree(f);
|
||||
}
|
||||
}
|
||||
adapter->aq_pending |= I40EVF_FLAG_AQ_DEL_MAC_FILTER;
|
||||
adapter->aq_required &= ~I40EVF_FLAG_AQ_DEL_MAC_FILTER;
|
||||
i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS,
|
||||
(u8 *)veal, len);
|
||||
kfree(veal);
|
||||
adapter->aq_pending |= I40EVF_FLAG_AQ_DEL_MAC_FILTER;
|
||||
adapter->aq_required &= ~I40EVF_FLAG_AQ_DEL_MAC_FILTER;
|
||||
}
|
||||
|
||||
/**
|
||||
@ -536,10 +535,10 @@ void i40evf_add_vlans(struct i40evf_adapter *adapter)
|
||||
f->add = false;
|
||||
}
|
||||
}
|
||||
i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_ADD_VLAN, (u8 *)vvfl, len);
|
||||
kfree(vvfl);
|
||||
adapter->aq_pending |= I40EVF_FLAG_AQ_ADD_VLAN_FILTER;
|
||||
adapter->aq_required &= ~I40EVF_FLAG_AQ_ADD_VLAN_FILTER;
|
||||
i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_ADD_VLAN, (u8 *)vvfl, len);
|
||||
kfree(vvfl);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -597,10 +596,10 @@ void i40evf_del_vlans(struct i40evf_adapter *adapter)
|
||||
kfree(f);
|
||||
}
|
||||
}
|
||||
i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_DEL_VLAN, (u8 *)vvfl, len);
|
||||
kfree(vvfl);
|
||||
adapter->aq_pending |= I40EVF_FLAG_AQ_DEL_VLAN_FILTER;
|
||||
adapter->aq_required &= ~I40EVF_FLAG_AQ_DEL_VLAN_FILTER;
|
||||
i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_DEL_VLAN, (u8 *)vvfl, len);
|
||||
kfree(vvfl);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -1630,6 +1630,8 @@ void igb_power_up_link(struct igb_adapter *adapter)
|
||||
igb_power_up_phy_copper(&adapter->hw);
|
||||
else
|
||||
igb_power_up_serdes_link_82575(&adapter->hw);
|
||||
|
||||
igb_setup_link(&adapter->hw);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -98,9 +98,11 @@
|
||||
#define IXGBE_OVERFLOW_PERIOD (HZ * 30)
|
||||
#define IXGBE_PTP_TX_TIMEOUT (HZ * 15)
|
||||
|
||||
#ifndef NSECS_PER_SEC
|
||||
#define NSECS_PER_SEC 1000000000ULL
|
||||
#endif
|
||||
/* half of a one second clock period, for use with PPS signal. We have to use
|
||||
* this instead of something pre-defined like IXGBE_PTP_PPS_HALF_SECOND, in
|
||||
* order to force at least 64bits of precision for shifting
|
||||
*/
|
||||
#define IXGBE_PTP_PPS_HALF_SECOND 500000000ULL
|
||||
|
||||
/**
|
||||
* ixgbe_ptp_setup_sdp
|
||||
@ -146,8 +148,8 @@ static void ixgbe_ptp_setup_sdp(struct ixgbe_adapter *adapter)
|
||||
IXGBE_TSAUXC_SDP0_INT);
|
||||
|
||||
/* clock period (or pulse length) */
|
||||
clktiml = (u32)(NSECS_PER_SEC << shift);
|
||||
clktimh = (u32)((NSECS_PER_SEC << shift) >> 32);
|
||||
clktiml = (u32)(IXGBE_PTP_PPS_HALF_SECOND << shift);
|
||||
clktimh = (u32)((IXGBE_PTP_PPS_HALF_SECOND << shift) >> 32);
|
||||
|
||||
/*
|
||||
* Account for the cyclecounter wrap-around value by
|
||||
@ -158,8 +160,8 @@ static void ixgbe_ptp_setup_sdp(struct ixgbe_adapter *adapter)
|
||||
clock_edge |= (u64)IXGBE_READ_REG(hw, IXGBE_SYSTIMH) << 32;
|
||||
ns = timecounter_cyc2time(&adapter->tc, clock_edge);
|
||||
|
||||
div_u64_rem(ns, NSECS_PER_SEC, &rem);
|
||||
clock_edge += ((NSECS_PER_SEC - (u64)rem) << shift);
|
||||
div_u64_rem(ns, IXGBE_PTP_PPS_HALF_SECOND, &rem);
|
||||
clock_edge += ((IXGBE_PTP_PPS_HALF_SECOND - (u64)rem) << shift);
|
||||
|
||||
/* specify the initial clock start time */
|
||||
trgttiml = (u32)clock_edge;
|
||||
|
Loading…
Reference in New Issue
Block a user