Merge branch 'master' of master.kernel.org:/pub/scm/linux/kernel/git/jkirsher/net-next-2.6

This commit is contained in:
David S. Miller 2011-06-23 23:39:33 -07:00
commit 21e842579a
6 changed files with 868 additions and 474 deletions

View File

@ -482,6 +482,17 @@ struct ixgbe_adapter {
struct vf_macvlans vf_mvs; struct vf_macvlans vf_mvs;
struct vf_macvlans *mv_list; struct vf_macvlans *mv_list;
bool antispoofing_enabled; bool antispoofing_enabled;
struct hlist_head fdir_filter_list;
union ixgbe_atr_input fdir_mask;
int fdir_filter_count;
};
struct ixgbe_fdir_filter {
struct hlist_node fdir_node;
union ixgbe_atr_input filter;
u16 sw_idx;
u16 action;
}; };
enum ixbge_state_t { enum ixbge_state_t {
@ -543,16 +554,22 @@ extern void ixgbe_alloc_rx_buffers(struct ixgbe_ring *, u16);
extern void ixgbe_write_eitr(struct ixgbe_q_vector *); extern void ixgbe_write_eitr(struct ixgbe_q_vector *);
extern int ethtool_ioctl(struct ifreq *ifr); extern int ethtool_ioctl(struct ifreq *ifr);
extern s32 ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw); extern s32 ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw);
extern s32 ixgbe_init_fdir_signature_82599(struct ixgbe_hw *hw, u32 pballoc); extern s32 ixgbe_init_fdir_signature_82599(struct ixgbe_hw *hw, u32 fdirctrl);
extern s32 ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 pballoc); extern s32 ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 fdirctrl);
extern s32 ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw, extern s32 ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw,
union ixgbe_atr_hash_dword input, union ixgbe_atr_hash_dword input,
union ixgbe_atr_hash_dword common, union ixgbe_atr_hash_dword common,
u8 queue); u8 queue);
extern s32 ixgbe_fdir_add_perfect_filter_82599(struct ixgbe_hw *hw, extern s32 ixgbe_fdir_set_input_mask_82599(struct ixgbe_hw *hw,
union ixgbe_atr_input *input, union ixgbe_atr_input *input_mask);
struct ixgbe_atr_input_masks *input_masks, extern s32 ixgbe_fdir_write_perfect_filter_82599(struct ixgbe_hw *hw,
u16 soft_id, u8 queue); union ixgbe_atr_input *input,
u16 soft_id, u8 queue);
extern s32 ixgbe_fdir_erase_perfect_filter_82599(struct ixgbe_hw *hw,
union ixgbe_atr_input *input,
u16 soft_id);
extern void ixgbe_atr_compute_perfect_hash_82599(union ixgbe_atr_input *input,
union ixgbe_atr_input *mask);
extern void ixgbe_configure_rscctl(struct ixgbe_adapter *adapter, extern void ixgbe_configure_rscctl(struct ixgbe_adapter *adapter,
struct ixgbe_ring *ring); struct ixgbe_ring *ring);
extern void ixgbe_clear_rscctl(struct ixgbe_adapter *adapter, extern void ixgbe_clear_rscctl(struct ixgbe_adapter *adapter,

View File

@ -1107,42 +1107,86 @@ s32 ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw)
} }
/** /**
* ixgbe_init_fdir_signature_82599 - Initialize Flow Director signature filters * ixgbe_set_fdir_rxpba_82599 - Initialize Flow Director Rx packet buffer
* @hw: pointer to hardware structure * @hw: pointer to hardware structure
* @pballoc: which mode to allocate filters with * @pballoc: which mode to allocate filters with
**/ **/
s32 ixgbe_init_fdir_signature_82599(struct ixgbe_hw *hw, u32 pballoc) static s32 ixgbe_set_fdir_rxpba_82599(struct ixgbe_hw *hw, const u32 pballoc)
{ {
u32 fdirctrl = 0; u32 fdir_pbsize = hw->mac.rx_pb_size << IXGBE_RXPBSIZE_SHIFT;
u32 current_rxpbsize = 0;
int i; int i;
/* Send interrupt when 64 filters are left */ /* reserve space for Flow Director filters */
fdirctrl |= 4 << IXGBE_FDIRCTRL_FULL_THRESH_SHIFT;
/* Set the maximum length per hash bucket to 0xA filters */
fdirctrl |= 0xA << IXGBE_FDIRCTRL_MAX_LENGTH_SHIFT;
switch (pballoc) { switch (pballoc) {
case IXGBE_FDIR_PBALLOC_64K: case IXGBE_FDIR_PBALLOC_256K:
/* 8k - 1 signature filters */ fdir_pbsize -= 256 << IXGBE_RXPBSIZE_SHIFT;
fdirctrl |= IXGBE_FDIRCTRL_PBALLOC_64K;
break; break;
case IXGBE_FDIR_PBALLOC_128K: case IXGBE_FDIR_PBALLOC_128K:
/* 16k - 1 signature filters */ fdir_pbsize -= 128 << IXGBE_RXPBSIZE_SHIFT;
fdirctrl |= IXGBE_FDIRCTRL_PBALLOC_128K;
break; break;
case IXGBE_FDIR_PBALLOC_256K: case IXGBE_FDIR_PBALLOC_64K:
/* 32k - 1 signature filters */ fdir_pbsize -= 64 << IXGBE_RXPBSIZE_SHIFT;
fdirctrl |= IXGBE_FDIRCTRL_PBALLOC_256K;
break; break;
case IXGBE_FDIR_PBALLOC_NONE:
default: default:
/* bad value */ return IXGBE_ERR_PARAM;
return IXGBE_ERR_CONFIG;
} }
/* Move the flexible bytes to use the ethertype - shift 6 words */ /* determine current RX packet buffer size */
fdirctrl |= (0x6 << IXGBE_FDIRCTRL_FLEX_SHIFT); for (i = 0; i < 8; i++)
current_rxpbsize += IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i));
/* if there is already room for the filters do nothing */
if (current_rxpbsize <= fdir_pbsize)
return 0;
if (current_rxpbsize > hw->mac.rx_pb_size) {
/*
* if rxpbsize is greater than max then HW max the Rx buffer
* sizes are unconfigured or misconfigured since HW default is
* to give the full buffer to each traffic class resulting in
* the total size being buffer size 8x actual size
*
* This assumes no DCB since the RXPBSIZE registers appear to
* be unconfigured.
*/
IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(0), fdir_pbsize);
for (i = 1; i < 8; i++)
IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), 0);
} else {
/*
* Since the Rx packet buffer appears to have already been
* configured we need to shrink each packet buffer by enough
* to make room for the filters. As such we take each rxpbsize
* value and multiply it by a fraction representing the size
* needed over the size we currently have.
*
* We need to reduce fdir_pbsize and current_rxpbsize to
* 1/1024 of their original values in order to avoid
* overflowing the u32 being used to store rxpbsize.
*/
fdir_pbsize >>= IXGBE_RXPBSIZE_SHIFT;
current_rxpbsize >>= IXGBE_RXPBSIZE_SHIFT;
for (i = 0; i < 8; i++) {
u32 rxpbsize = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i));
rxpbsize *= fdir_pbsize;
rxpbsize /= current_rxpbsize;
IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpbsize);
}
}
return 0;
}
/**
* ixgbe_fdir_enable_82599 - Initialize Flow Director control registers
* @hw: pointer to hardware structure
* @fdirctrl: value to write to flow director control register
**/
static void ixgbe_fdir_enable_82599(struct ixgbe_hw *hw, u32 fdirctrl)
{
int i;
/* Prime the keys for hashing */ /* Prime the keys for hashing */
IXGBE_WRITE_REG(hw, IXGBE_FDIRHKEY, IXGBE_ATR_BUCKET_HASH_KEY); IXGBE_WRITE_REG(hw, IXGBE_FDIRHKEY, IXGBE_ATR_BUCKET_HASH_KEY);
@ -1169,8 +1213,38 @@ s32 ixgbe_init_fdir_signature_82599(struct ixgbe_hw *hw, u32 pballoc)
break; break;
usleep_range(1000, 2000); usleep_range(1000, 2000);
} }
if (i >= IXGBE_FDIR_INIT_DONE_POLL) if (i >= IXGBE_FDIR_INIT_DONE_POLL)
hw_dbg(hw, "Flow Director Signature poll time exceeded!\n"); hw_dbg(hw, "Flow Director poll time exceeded!\n");
}
/**
* ixgbe_init_fdir_signature_82599 - Initialize Flow Director signature filters
* @hw: pointer to hardware structure
* @fdirctrl: value to write to flow director control register, initially
* contains just the value of the Rx packet buffer allocation
**/
s32 ixgbe_init_fdir_signature_82599(struct ixgbe_hw *hw, u32 fdirctrl)
{
s32 err;
/* Before enabling Flow Director, verify the Rx Packet Buffer size */
err = ixgbe_set_fdir_rxpba_82599(hw, fdirctrl);
if (err)
return err;
/*
* Continue setup of fdirctrl register bits:
* Move the flexible bytes to use the ethertype - shift 6 words
* Set the maximum length per hash bucket to 0xA filters
* Send interrupt when 64 filters are left
*/
fdirctrl |= (0x6 << IXGBE_FDIRCTRL_FLEX_SHIFT) |
(0xA << IXGBE_FDIRCTRL_MAX_LENGTH_SHIFT) |
(4 << IXGBE_FDIRCTRL_FULL_THRESH_SHIFT);
/* write hashes and fdirctrl register, poll for completion */
ixgbe_fdir_enable_82599(hw, fdirctrl);
return 0; return 0;
} }
@ -1178,170 +1252,40 @@ s32 ixgbe_init_fdir_signature_82599(struct ixgbe_hw *hw, u32 pballoc)
/** /**
* ixgbe_init_fdir_perfect_82599 - Initialize Flow Director perfect filters * ixgbe_init_fdir_perfect_82599 - Initialize Flow Director perfect filters
* @hw: pointer to hardware structure * @hw: pointer to hardware structure
* @pballoc: which mode to allocate filters with * @fdirctrl: value to write to flow director control register, initially
* contains just the value of the Rx packet buffer allocation
**/ **/
s32 ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 pballoc) s32 ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 fdirctrl)
{ {
u32 fdirctrl = 0; s32 err;
int i;
/* Send interrupt when 64 filters are left */ /* Before enabling Flow Director, verify the Rx Packet Buffer size */
fdirctrl |= 4 << IXGBE_FDIRCTRL_FULL_THRESH_SHIFT; err = ixgbe_set_fdir_rxpba_82599(hw, fdirctrl);
if (err)
/* Initialize the drop queue to Rx queue 127 */ return err;
fdirctrl |= (127 << IXGBE_FDIRCTRL_DROP_Q_SHIFT);
switch (pballoc) {
case IXGBE_FDIR_PBALLOC_64K:
/* 2k - 1 perfect filters */
fdirctrl |= IXGBE_FDIRCTRL_PBALLOC_64K;
break;
case IXGBE_FDIR_PBALLOC_128K:
/* 4k - 1 perfect filters */
fdirctrl |= IXGBE_FDIRCTRL_PBALLOC_128K;
break;
case IXGBE_FDIR_PBALLOC_256K:
/* 8k - 1 perfect filters */
fdirctrl |= IXGBE_FDIRCTRL_PBALLOC_256K;
break;
default:
/* bad value */
return IXGBE_ERR_CONFIG;
}
/* Turn perfect match filtering on */
fdirctrl |= IXGBE_FDIRCTRL_PERFECT_MATCH;
fdirctrl |= IXGBE_FDIRCTRL_REPORT_STATUS;
/* Move the flexible bytes to use the ethertype - shift 6 words */
fdirctrl |= (0x6 << IXGBE_FDIRCTRL_FLEX_SHIFT);
/* Prime the keys for hashing */
IXGBE_WRITE_REG(hw, IXGBE_FDIRHKEY, IXGBE_ATR_BUCKET_HASH_KEY);
IXGBE_WRITE_REG(hw, IXGBE_FDIRSKEY, IXGBE_ATR_SIGNATURE_HASH_KEY);
/* /*
* Poll init-done after we write the register. Estimated times: * Continue setup of fdirctrl register bits:
* 10G: PBALLOC = 11b, timing is 60us * Turn perfect match filtering on
* 1G: PBALLOC = 11b, timing is 600us * Report hash in RSS field of Rx wb descriptor
* 100M: PBALLOC = 11b, timing is 6ms * Initialize the drop queue
* * Move the flexible bytes to use the ethertype - shift 6 words
* Multiple these timings by 4 if under full Rx load * Set the maximum length per hash bucket to 0xA filters
* * Send interrupt when 64 (0x4 * 16) filters are left
* So we'll poll for IXGBE_FDIR_INIT_DONE_POLL times, sleeping for
* 1 msec per poll time. If we're at line rate and drop to 100M, then
* this might not finish in our poll time, but we can live with that
* for now.
*/ */
fdirctrl |= IXGBE_FDIRCTRL_PERFECT_MATCH |
IXGBE_FDIRCTRL_REPORT_STATUS |
(IXGBE_FDIR_DROP_QUEUE << IXGBE_FDIRCTRL_DROP_Q_SHIFT) |
(0x6 << IXGBE_FDIRCTRL_FLEX_SHIFT) |
(0xA << IXGBE_FDIRCTRL_MAX_LENGTH_SHIFT) |
(4 << IXGBE_FDIRCTRL_FULL_THRESH_SHIFT);
/* Set the maximum length per hash bucket to 0xA filters */ /* write hashes and fdirctrl register, poll for completion */
fdirctrl |= (0xA << IXGBE_FDIRCTRL_MAX_LENGTH_SHIFT); ixgbe_fdir_enable_82599(hw, fdirctrl);
IXGBE_WRITE_REG(hw, IXGBE_FDIRCTRL, fdirctrl);
IXGBE_WRITE_FLUSH(hw);
for (i = 0; i < IXGBE_FDIR_INIT_DONE_POLL; i++) {
if (IXGBE_READ_REG(hw, IXGBE_FDIRCTRL) &
IXGBE_FDIRCTRL_INIT_DONE)
break;
usleep_range(1000, 2000);
}
if (i >= IXGBE_FDIR_INIT_DONE_POLL)
hw_dbg(hw, "Flow Director Perfect poll time exceeded!\n");
return 0; return 0;
} }
/**
* ixgbe_atr_compute_hash_82599 - Compute the hashes for SW ATR
* @stream: input bitstream to compute the hash on
* @key: 32-bit hash key
**/
static u32 ixgbe_atr_compute_hash_82599(union ixgbe_atr_input *atr_input,
u32 key)
{
/*
* The algorithm is as follows:
* Hash[15:0] = Sum { S[n] x K[n+16] }, n = 0...350
* where Sum {A[n]}, n = 0...n is bitwise XOR of A[0], A[1]...A[n]
* and A[n] x B[n] is bitwise AND between same length strings
*
* K[n] is 16 bits, defined as:
* for n modulo 32 >= 15, K[n] = K[n % 32 : (n % 32) - 15]
* for n modulo 32 < 15, K[n] =
* K[(n % 32:0) | (31:31 - (14 - (n % 32)))]
*
* S[n] is 16 bits, defined as:
* for n >= 15, S[n] = S[n:n - 15]
* for n < 15, S[n] = S[(n:0) | (350:350 - (14 - n))]
*
* To simplify for programming, the algorithm is implemented
* in software this way:
*
* key[31:0], hi_hash_dword[31:0], lo_hash_dword[31:0], hash[15:0]
*
* for (i = 0; i < 352; i+=32)
* hi_hash_dword[31:0] ^= Stream[(i+31):i];
*
* lo_hash_dword[15:0] ^= Stream[15:0];
* lo_hash_dword[15:0] ^= hi_hash_dword[31:16];
* lo_hash_dword[31:16] ^= hi_hash_dword[15:0];
*
* hi_hash_dword[31:0] ^= Stream[351:320];
*
* if(key[0])
* hash[15:0] ^= Stream[15:0];
*
* for (i = 0; i < 16; i++) {
* if (key[i])
* hash[15:0] ^= lo_hash_dword[(i+15):i];
* if (key[i + 16])
* hash[15:0] ^= hi_hash_dword[(i+15):i];
* }
*
*/
__be32 common_hash_dword = 0;
u32 hi_hash_dword, lo_hash_dword, flow_vm_vlan;
u32 hash_result = 0;
u8 i;
/* record the flow_vm_vlan bits as they are a key part to the hash */
flow_vm_vlan = ntohl(atr_input->dword_stream[0]);
/* generate common hash dword */
for (i = 10; i; i -= 2)
common_hash_dword ^= atr_input->dword_stream[i] ^
atr_input->dword_stream[i - 1];
hi_hash_dword = ntohl(common_hash_dword);
/* low dword is word swapped version of common */
lo_hash_dword = (hi_hash_dword >> 16) | (hi_hash_dword << 16);
/* apply flow ID/VM pool/VLAN ID bits to hash words */
hi_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan >> 16);
/* Process bits 0 and 16 */
if (key & 0x0001) hash_result ^= lo_hash_dword;
if (key & 0x00010000) hash_result ^= hi_hash_dword;
/*
* apply flow ID/VM pool/VLAN ID bits to lo hash dword, we had to
* delay this because bit 0 of the stream should not be processed
* so we do not add the vlan until after bit 0 was processed
*/
lo_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan << 16);
/* process the remaining 30 bits in the key 2 bits at a time */
for (i = 15; i; i-- ) {
if (key & (0x0001 << i)) hash_result ^= lo_hash_dword >> i;
if (key & (0x00010000 << i)) hash_result ^= hi_hash_dword >> i;
}
return hash_result & IXGBE_ATR_HASH_MASK;
}
/* /*
* These defines allow us to quickly generate all of the necessary instructions * These defines allow us to quickly generate all of the necessary instructions
* in the function below by simply calling out IXGBE_COMPUTE_SIG_HASH_ITERATION * in the function below by simply calling out IXGBE_COMPUTE_SIG_HASH_ITERATION
@ -1476,7 +1420,6 @@ s32 ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw,
*/ */
fdirhashcmd = (u64)fdircmd << 32; fdirhashcmd = (u64)fdircmd << 32;
fdirhashcmd |= ixgbe_atr_compute_sig_hash_82599(input, common); fdirhashcmd |= ixgbe_atr_compute_sig_hash_82599(input, common);
IXGBE_WRITE_REG64(hw, IXGBE_FDIRHASH, fdirhashcmd); IXGBE_WRITE_REG64(hw, IXGBE_FDIRHASH, fdirhashcmd);
hw_dbg(hw, "Tx Queue=%x hash=%x\n", queue, (u32)fdirhashcmd); hw_dbg(hw, "Tx Queue=%x hash=%x\n", queue, (u32)fdirhashcmd);
@ -1484,6 +1427,101 @@ s32 ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw,
return 0; return 0;
} }
#define IXGBE_COMPUTE_BKT_HASH_ITERATION(_n) \
do { \
u32 n = (_n); \
if (IXGBE_ATR_BUCKET_HASH_KEY & (0x01 << n)) \
bucket_hash ^= lo_hash_dword >> n; \
if (IXGBE_ATR_BUCKET_HASH_KEY & (0x01 << (n + 16))) \
bucket_hash ^= hi_hash_dword >> n; \
} while (0);
/**
* ixgbe_atr_compute_perfect_hash_82599 - Compute the perfect filter hash
* @atr_input: input bitstream to compute the hash on
* @input_mask: mask for the input bitstream
*
* This function serves two main purposes. First it applys the input_mask
* to the atr_input resulting in a cleaned up atr_input data stream.
* Secondly it computes the hash and stores it in the bkt_hash field at
* the end of the input byte stream. This way it will be available for
* future use without needing to recompute the hash.
**/
void ixgbe_atr_compute_perfect_hash_82599(union ixgbe_atr_input *input,
union ixgbe_atr_input *input_mask)
{
u32 hi_hash_dword, lo_hash_dword, flow_vm_vlan;
u32 bucket_hash = 0;
/* Apply masks to input data */
input->dword_stream[0] &= input_mask->dword_stream[0];
input->dword_stream[1] &= input_mask->dword_stream[1];
input->dword_stream[2] &= input_mask->dword_stream[2];
input->dword_stream[3] &= input_mask->dword_stream[3];
input->dword_stream[4] &= input_mask->dword_stream[4];
input->dword_stream[5] &= input_mask->dword_stream[5];
input->dword_stream[6] &= input_mask->dword_stream[6];
input->dword_stream[7] &= input_mask->dword_stream[7];
input->dword_stream[8] &= input_mask->dword_stream[8];
input->dword_stream[9] &= input_mask->dword_stream[9];
input->dword_stream[10] &= input_mask->dword_stream[10];
/* record the flow_vm_vlan bits as they are a key part to the hash */
flow_vm_vlan = ntohl(input->dword_stream[0]);
/* generate common hash dword */
hi_hash_dword = ntohl(input->dword_stream[1] ^
input->dword_stream[2] ^
input->dword_stream[3] ^
input->dword_stream[4] ^
input->dword_stream[5] ^
input->dword_stream[6] ^
input->dword_stream[7] ^
input->dword_stream[8] ^
input->dword_stream[9] ^
input->dword_stream[10]);
/* low dword is word swapped version of common */
lo_hash_dword = (hi_hash_dword >> 16) | (hi_hash_dword << 16);
/* apply flow ID/VM pool/VLAN ID bits to hash words */
hi_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan >> 16);
/* Process bits 0 and 16 */
IXGBE_COMPUTE_BKT_HASH_ITERATION(0);
/*
* apply flow ID/VM pool/VLAN ID bits to lo hash dword, we had to
* delay this because bit 0 of the stream should not be processed
* so we do not add the vlan until after bit 0 was processed
*/
lo_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan << 16);
/* Process remaining 30 bit of the key */
IXGBE_COMPUTE_BKT_HASH_ITERATION(1);
IXGBE_COMPUTE_BKT_HASH_ITERATION(2);
IXGBE_COMPUTE_BKT_HASH_ITERATION(3);
IXGBE_COMPUTE_BKT_HASH_ITERATION(4);
IXGBE_COMPUTE_BKT_HASH_ITERATION(5);
IXGBE_COMPUTE_BKT_HASH_ITERATION(6);
IXGBE_COMPUTE_BKT_HASH_ITERATION(7);
IXGBE_COMPUTE_BKT_HASH_ITERATION(8);
IXGBE_COMPUTE_BKT_HASH_ITERATION(9);
IXGBE_COMPUTE_BKT_HASH_ITERATION(10);
IXGBE_COMPUTE_BKT_HASH_ITERATION(11);
IXGBE_COMPUTE_BKT_HASH_ITERATION(12);
IXGBE_COMPUTE_BKT_HASH_ITERATION(13);
IXGBE_COMPUTE_BKT_HASH_ITERATION(14);
IXGBE_COMPUTE_BKT_HASH_ITERATION(15);
/*
* Limit hash to 13 bits since max bucket count is 8K.
* Store result at the end of the input stream.
*/
input->formatted.bkt_hash = bucket_hash & 0x1FFF;
}
/** /**
* ixgbe_get_fdirtcpm_82599 - generate a tcp port from atr_input_masks * ixgbe_get_fdirtcpm_82599 - generate a tcp port from atr_input_masks
* @input_mask: mask to be bit swapped * @input_mask: mask to be bit swapped
@ -1493,11 +1531,11 @@ s32 ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw,
* generate a correctly swapped value we need to bit swap the mask and that * generate a correctly swapped value we need to bit swap the mask and that
* is what is accomplished by this function. * is what is accomplished by this function.
**/ **/
static u32 ixgbe_get_fdirtcpm_82599(struct ixgbe_atr_input_masks *input_masks) static u32 ixgbe_get_fdirtcpm_82599(union ixgbe_atr_input *input_mask)
{ {
u32 mask = ntohs(input_masks->dst_port_mask); u32 mask = ntohs(input_mask->formatted.dst_port);
mask <<= IXGBE_FDIRTCPM_DPORTM_SHIFT; mask <<= IXGBE_FDIRTCPM_DPORTM_SHIFT;
mask |= ntohs(input_masks->src_port_mask); mask |= ntohs(input_mask->formatted.src_port);
mask = ((mask & 0x55555555) << 1) | ((mask & 0xAAAAAAAA) >> 1); mask = ((mask & 0x55555555) << 1) | ((mask & 0xAAAAAAAA) >> 1);
mask = ((mask & 0x33333333) << 2) | ((mask & 0xCCCCCCCC) >> 2); mask = ((mask & 0x33333333) << 2) | ((mask & 0xCCCCCCCC) >> 2);
mask = ((mask & 0x0F0F0F0F) << 4) | ((mask & 0xF0F0F0F0) >> 4); mask = ((mask & 0x0F0F0F0F) << 4) | ((mask & 0xF0F0F0F0) >> 4);
@ -1519,52 +1557,14 @@ static u32 ixgbe_get_fdirtcpm_82599(struct ixgbe_atr_input_masks *input_masks)
IXGBE_WRITE_REG((a), (reg), IXGBE_STORE_AS_BE32(ntohl(value))) IXGBE_WRITE_REG((a), (reg), IXGBE_STORE_AS_BE32(ntohl(value)))
#define IXGBE_STORE_AS_BE16(_value) \ #define IXGBE_STORE_AS_BE16(_value) \
(((u16)(_value) >> 8) | ((u16)(_value) << 8)) ntohs(((u16)(_value) >> 8) | ((u16)(_value) << 8))
/** s32 ixgbe_fdir_set_input_mask_82599(struct ixgbe_hw *hw,
* ixgbe_fdir_add_perfect_filter_82599 - Adds a perfect filter union ixgbe_atr_input *input_mask)
* @hw: pointer to hardware structure
* @input: input bitstream
* @input_masks: bitwise masks for relevant fields
* @soft_id: software index into the silicon hash tables for filter storage
* @queue: queue index to direct traffic to
*
* Note that the caller to this function must lock before calling, since the
* hardware writes must be protected from one another.
**/
s32 ixgbe_fdir_add_perfect_filter_82599(struct ixgbe_hw *hw,
union ixgbe_atr_input *input,
struct ixgbe_atr_input_masks *input_masks,
u16 soft_id, u8 queue)
{ {
u32 fdirhash; /* mask IPv6 since it is currently not supported */
u32 fdircmd; u32 fdirm = IXGBE_FDIRM_DIPv6;
u32 fdirport, fdirtcpm; u32 fdirtcpm;
u32 fdirvlan;
/* start with VLAN, flex bytes, VM pool, and IPv6 destination masked */
u32 fdirm = IXGBE_FDIRM_VLANID | IXGBE_FDIRM_VLANP | IXGBE_FDIRM_FLEX |
IXGBE_FDIRM_POOL | IXGBE_FDIRM_DIPv6;
/*
* Check flow_type formatting, and bail out before we touch the hardware
* if there's a configuration issue
*/
switch (input->formatted.flow_type) {
case IXGBE_ATR_FLOW_TYPE_IPV4:
/* use the L4 protocol mask for raw IPv4/IPv6 traffic */
fdirm |= IXGBE_FDIRM_L4P;
case IXGBE_ATR_FLOW_TYPE_SCTPV4:
if (input_masks->dst_port_mask || input_masks->src_port_mask) {
hw_dbg(hw, " Error on src/dst port mask\n");
return IXGBE_ERR_CONFIG;
}
case IXGBE_ATR_FLOW_TYPE_TCPV4:
case IXGBE_ATR_FLOW_TYPE_UDPV4:
break;
default:
hw_dbg(hw, " Error on flow type input\n");
return IXGBE_ERR_CONFIG;
}
/* /*
* Program the relevant mask registers. If src/dst_port or src/dst_addr * Program the relevant mask registers. If src/dst_port or src/dst_addr
@ -1576,41 +1576,71 @@ s32 ixgbe_fdir_add_perfect_filter_82599(struct ixgbe_hw *hw,
* point in time. * point in time.
*/ */
/* Program FDIRM */ /* verify bucket hash is cleared on hash generation */
switch (ntohs(input_masks->vlan_id_mask) & 0xEFFF) { if (input_mask->formatted.bkt_hash)
case 0xEFFF: hw_dbg(hw, " bucket hash should always be 0 in mask\n");
/* Unmask VLAN ID - bit 0 and fall through to unmask prio */
fdirm &= ~IXGBE_FDIRM_VLANID; /* Program FDIRM and verify partial masks */
case 0xE000: switch (input_mask->formatted.vm_pool & 0x7F) {
/* Unmask VLAN prio - bit 1 */ case 0x0:
fdirm &= ~IXGBE_FDIRM_VLANP; fdirm |= IXGBE_FDIRM_POOL;
case 0x7F:
break; break;
case 0x0FFF: default:
/* Unmask VLAN ID - bit 0 */ hw_dbg(hw, " Error on vm pool mask\n");
fdirm &= ~IXGBE_FDIRM_VLANID; return IXGBE_ERR_CONFIG;
}
switch (input_mask->formatted.flow_type & IXGBE_ATR_L4TYPE_MASK) {
case 0x0:
fdirm |= IXGBE_FDIRM_L4P;
if (input_mask->formatted.dst_port ||
input_mask->formatted.src_port) {
hw_dbg(hw, " Error on src/dst port mask\n");
return IXGBE_ERR_CONFIG;
}
case IXGBE_ATR_L4TYPE_MASK:
break; break;
default:
hw_dbg(hw, " Error on flow type mask\n");
return IXGBE_ERR_CONFIG;
}
switch (ntohs(input_mask->formatted.vlan_id) & 0xEFFF) {
case 0x0000: case 0x0000:
/* do nothing, vlans already masked */ /* mask VLAN ID, fall through to mask VLAN priority */
fdirm |= IXGBE_FDIRM_VLANID;
case 0x0FFF:
/* mask VLAN priority */
fdirm |= IXGBE_FDIRM_VLANP;
break;
case 0xE000:
/* mask VLAN ID only, fall through */
fdirm |= IXGBE_FDIRM_VLANID;
case 0xEFFF:
/* no VLAN fields masked */
break; break;
default: default:
hw_dbg(hw, " Error on VLAN mask\n"); hw_dbg(hw, " Error on VLAN mask\n");
return IXGBE_ERR_CONFIG; return IXGBE_ERR_CONFIG;
} }
if (input_masks->flex_mask & 0xFFFF) { switch (input_mask->formatted.flex_bytes & 0xFFFF) {
if ((input_masks->flex_mask & 0xFFFF) != 0xFFFF) { case 0x0000:
hw_dbg(hw, " Error on flexible byte mask\n"); /* Mask Flex Bytes, fall through */
return IXGBE_ERR_CONFIG; fdirm |= IXGBE_FDIRM_FLEX;
} case 0xFFFF:
/* Unmask Flex Bytes - bit 4 */ break;
fdirm &= ~IXGBE_FDIRM_FLEX; default:
hw_dbg(hw, " Error on flexible byte mask\n");
return IXGBE_ERR_CONFIG;
} }
/* Now mask VM pool and destination IPv6 - bits 5 and 2 */ /* Now mask VM pool and destination IPv6 - bits 5 and 2 */
IXGBE_WRITE_REG(hw, IXGBE_FDIRM, fdirm); IXGBE_WRITE_REG(hw, IXGBE_FDIRM, fdirm);
/* store the TCP/UDP port masks, bit reversed from port layout */ /* store the TCP/UDP port masks, bit reversed from port layout */
fdirtcpm = ixgbe_get_fdirtcpm_82599(input_masks); fdirtcpm = ixgbe_get_fdirtcpm_82599(input_mask);
/* write both the same so that UDP and TCP use the same mask */ /* write both the same so that UDP and TCP use the same mask */
IXGBE_WRITE_REG(hw, IXGBE_FDIRTCPM, ~fdirtcpm); IXGBE_WRITE_REG(hw, IXGBE_FDIRTCPM, ~fdirtcpm);
@ -1618,24 +1648,32 @@ s32 ixgbe_fdir_add_perfect_filter_82599(struct ixgbe_hw *hw,
/* store source and destination IP masks (big-enian) */ /* store source and destination IP masks (big-enian) */
IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIP4M, IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIP4M,
~input_masks->src_ip_mask[0]); ~input_mask->formatted.src_ip[0]);
IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRDIP4M, IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRDIP4M,
~input_masks->dst_ip_mask[0]); ~input_mask->formatted.dst_ip[0]);
/* Apply masks to input data */ return 0;
input->formatted.vlan_id &= input_masks->vlan_id_mask; }
input->formatted.flex_bytes &= input_masks->flex_mask;
input->formatted.src_port &= input_masks->src_port_mask;
input->formatted.dst_port &= input_masks->dst_port_mask;
input->formatted.src_ip[0] &= input_masks->src_ip_mask[0];
input->formatted.dst_ip[0] &= input_masks->dst_ip_mask[0];
/* record vlan (little-endian) and flex_bytes(big-endian) */ s32 ixgbe_fdir_write_perfect_filter_82599(struct ixgbe_hw *hw,
fdirvlan = union ixgbe_atr_input *input,
IXGBE_STORE_AS_BE16(ntohs(input->formatted.flex_bytes)); u16 soft_id, u8 queue)
fdirvlan <<= IXGBE_FDIRVLAN_FLEX_SHIFT; {
fdirvlan |= ntohs(input->formatted.vlan_id); u32 fdirport, fdirvlan, fdirhash, fdircmd;
IXGBE_WRITE_REG(hw, IXGBE_FDIRVLAN, fdirvlan);
/* currently IPv6 is not supported, must be programmed with 0 */
IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIPv6(0),
input->formatted.src_ip[0]);
IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIPv6(1),
input->formatted.src_ip[1]);
IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIPv6(2),
input->formatted.src_ip[2]);
/* record the source address (big-endian) */
IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRIPSA, input->formatted.src_ip[0]);
/* record the first 32 bits of the destination address (big-endian) */
IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRIPDA, input->formatted.dst_ip[0]);
/* record source and destination port (little-endian)*/ /* record source and destination port (little-endian)*/
fdirport = ntohs(input->formatted.dst_port); fdirport = ntohs(input->formatted.dst_port);
@ -1643,29 +1681,80 @@ s32 ixgbe_fdir_add_perfect_filter_82599(struct ixgbe_hw *hw,
fdirport |= ntohs(input->formatted.src_port); fdirport |= ntohs(input->formatted.src_port);
IXGBE_WRITE_REG(hw, IXGBE_FDIRPORT, fdirport); IXGBE_WRITE_REG(hw, IXGBE_FDIRPORT, fdirport);
/* record the first 32 bits of the destination address (big-endian) */ /* record vlan (little-endian) and flex_bytes(big-endian) */
IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRIPDA, input->formatted.dst_ip[0]); fdirvlan = IXGBE_STORE_AS_BE16(input->formatted.flex_bytes);
fdirvlan <<= IXGBE_FDIRVLAN_FLEX_SHIFT;
fdirvlan |= ntohs(input->formatted.vlan_id);
IXGBE_WRITE_REG(hw, IXGBE_FDIRVLAN, fdirvlan);
/* record the source address (big-endian) */ /* configure FDIRHASH register */
IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRIPSA, input->formatted.src_ip[0]); fdirhash = input->formatted.bkt_hash;
fdirhash |= soft_id << IXGBE_FDIRHASH_SIG_SW_INDEX_SHIFT;
IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, fdirhash);
/*
* flush all previous writes to make certain registers are
* programmed prior to issuing the command
*/
IXGBE_WRITE_FLUSH(hw);
/* configure FDIRCMD register */ /* configure FDIRCMD register */
fdircmd = IXGBE_FDIRCMD_CMD_ADD_FLOW | IXGBE_FDIRCMD_FILTER_UPDATE | fdircmd = IXGBE_FDIRCMD_CMD_ADD_FLOW | IXGBE_FDIRCMD_FILTER_UPDATE |
IXGBE_FDIRCMD_LAST | IXGBE_FDIRCMD_QUEUE_EN; IXGBE_FDIRCMD_LAST | IXGBE_FDIRCMD_QUEUE_EN;
if (queue == IXGBE_FDIR_DROP_QUEUE)
fdircmd |= IXGBE_FDIRCMD_DROP;
fdircmd |= input->formatted.flow_type << IXGBE_FDIRCMD_FLOW_TYPE_SHIFT; fdircmd |= input->formatted.flow_type << IXGBE_FDIRCMD_FLOW_TYPE_SHIFT;
fdircmd |= (u32)queue << IXGBE_FDIRCMD_RX_QUEUE_SHIFT; fdircmd |= (u32)queue << IXGBE_FDIRCMD_RX_QUEUE_SHIFT;
fdircmd |= (u32)input->formatted.vm_pool << IXGBE_FDIRCMD_VT_POOL_SHIFT;
/* we only want the bucket hash so drop the upper 16 bits */
fdirhash = ixgbe_atr_compute_hash_82599(input,
IXGBE_ATR_BUCKET_HASH_KEY);
fdirhash |= soft_id << IXGBE_FDIRHASH_SIG_SW_INDEX_SHIFT;
IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, fdirhash);
IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, fdircmd); IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, fdircmd);
return 0; return 0;
} }
s32 ixgbe_fdir_erase_perfect_filter_82599(struct ixgbe_hw *hw,
union ixgbe_atr_input *input,
u16 soft_id)
{
u32 fdirhash;
u32 fdircmd = 0;
u32 retry_count;
s32 err = 0;
/* configure FDIRHASH register */
fdirhash = input->formatted.bkt_hash;
fdirhash |= soft_id << IXGBE_FDIRHASH_SIG_SW_INDEX_SHIFT;
IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, fdirhash);
/* flush hash to HW */
IXGBE_WRITE_FLUSH(hw);
/* Query if filter is present */
IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, IXGBE_FDIRCMD_CMD_QUERY_REM_FILT);
for (retry_count = 10; retry_count; retry_count--) {
/* allow 10us for query to process */
udelay(10);
/* verify query completed successfully */
fdircmd = IXGBE_READ_REG(hw, IXGBE_FDIRCMD);
if (!(fdircmd & IXGBE_FDIRCMD_CMD_MASK))
break;
}
if (!retry_count)
err = IXGBE_ERR_FDIR_REINIT_FAILED;
/* if filter exists in hardware then remove it */
if (fdircmd & IXGBE_FDIRCMD_FILTER_VALID) {
IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, fdirhash);
IXGBE_WRITE_FLUSH(hw);
IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD,
IXGBE_FDIRCMD_CMD_REMOVE_FLOW);
}
return err;
}
/** /**
* ixgbe_read_analog_reg8_82599 - Reads 8 bit Omer analog register * ixgbe_read_analog_reg8_82599 - Reads 8 bit Omer analog register
* @hw: pointer to hardware structure * @hw: pointer to hardware structure

View File

@ -114,11 +114,12 @@ static u8 ixgbe_dcbnl_set_state(struct net_device *netdev, u8 state)
u8 err = 0; u8 err = 0;
struct ixgbe_adapter *adapter = netdev_priv(netdev); struct ixgbe_adapter *adapter = netdev_priv(netdev);
/* verify there is something to do, if not then exit */
if (!!state != !(adapter->flags & IXGBE_FLAG_DCB_ENABLED))
return err;
if (state > 0) { if (state > 0) {
/* Turn on DCB */ /* Turn on DCB */
if (adapter->flags & IXGBE_FLAG_DCB_ENABLED)
goto out;
if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) { if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) {
e_err(drv, "Enable failed, needs MSI-X\n"); e_err(drv, "Enable failed, needs MSI-X\n");
err = 1; err = 1;
@ -143,9 +144,6 @@ static u8 ixgbe_dcbnl_set_state(struct net_device *netdev, u8 state)
ixgbe_setup_tc(netdev, MAX_TRAFFIC_CLASS); ixgbe_setup_tc(netdev, MAX_TRAFFIC_CLASS);
} else { } else {
/* Turn off DCB */ /* Turn off DCB */
if (!(adapter->flags & IXGBE_FLAG_DCB_ENABLED))
goto out;
adapter->hw.fc.requested_mode = adapter->last_lfc_mode; adapter->hw.fc.requested_mode = adapter->last_lfc_mode;
adapter->temp_dcb_cfg.pfc_mode_enable = false; adapter->temp_dcb_cfg.pfc_mode_enable = false;
adapter->dcb_cfg.pfc_mode_enable = false; adapter->dcb_cfg.pfc_mode_enable = false;
@ -153,7 +151,8 @@ static u8 ixgbe_dcbnl_set_state(struct net_device *netdev, u8 state)
switch (adapter->hw.mac.type) { switch (adapter->hw.mac.type) {
case ixgbe_mac_82599EB: case ixgbe_mac_82599EB:
case ixgbe_mac_X540: case ixgbe_mac_X540:
adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE; if (!(adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE))
adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE;
break; break;
default: default:
break; break;

View File

@ -442,20 +442,67 @@ static int ixgbe_set_pauseparam(struct net_device *netdev,
return 0; return 0;
} }
static void ixgbe_do_reset(struct net_device *netdev)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
if (netif_running(netdev))
ixgbe_reinit_locked(adapter);
else
ixgbe_reset(adapter);
}
static u32 ixgbe_get_rx_csum(struct net_device *netdev) static u32 ixgbe_get_rx_csum(struct net_device *netdev)
{ {
struct ixgbe_adapter *adapter = netdev_priv(netdev); struct ixgbe_adapter *adapter = netdev_priv(netdev);
return adapter->flags & IXGBE_FLAG_RX_CSUM_ENABLED; return adapter->flags & IXGBE_FLAG_RX_CSUM_ENABLED;
} }
static void ixgbe_set_rsc(struct ixgbe_adapter *adapter)
{
int i;
for (i = 0; i < adapter->num_rx_queues; i++) {
struct ixgbe_ring *ring = adapter->rx_ring[i];
if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) {
set_ring_rsc_enabled(ring);
ixgbe_configure_rscctl(adapter, ring);
} else {
ixgbe_clear_rscctl(adapter, ring);
}
}
}
static int ixgbe_set_rx_csum(struct net_device *netdev, u32 data) static int ixgbe_set_rx_csum(struct net_device *netdev, u32 data)
{ {
struct ixgbe_adapter *adapter = netdev_priv(netdev); struct ixgbe_adapter *adapter = netdev_priv(netdev);
if (data) bool need_reset = false;
if (data) {
adapter->flags |= IXGBE_FLAG_RX_CSUM_ENABLED; adapter->flags |= IXGBE_FLAG_RX_CSUM_ENABLED;
else } else {
adapter->flags &= ~IXGBE_FLAG_RX_CSUM_ENABLED; adapter->flags &= ~IXGBE_FLAG_RX_CSUM_ENABLED;
if (adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE) {
adapter->flags2 &= ~IXGBE_FLAG2_RSC_ENABLED;
netdev->features &= ~NETIF_F_LRO;
}
switch (adapter->hw.mac.type) {
case ixgbe_mac_X540:
ixgbe_set_rsc(adapter);
break;
case ixgbe_mac_82599EB:
need_reset = true;
break;
default:
break;
}
}
if (need_reset)
ixgbe_do_reset(netdev);
return 0; return 0;
} }
@ -2234,12 +2281,8 @@ static int ixgbe_set_coalesce(struct net_device *netdev,
* correctly w.r.t stopping tx, and changing TXDCTL.WTHRESH settings * correctly w.r.t stopping tx, and changing TXDCTL.WTHRESH settings
* also locks in RSC enable/disable which requires reset * also locks in RSC enable/disable which requires reset
*/ */
if (need_reset) { if (need_reset)
if (netif_running(netdev)) ixgbe_do_reset(netdev);
ixgbe_reinit_locked(adapter);
else
ixgbe_reset(adapter);
}
return 0; return 0;
} }
@ -2281,25 +2324,12 @@ static int ixgbe_set_flags(struct net_device *netdev, u32 data)
} else { } else {
adapter->flags2 ^= IXGBE_FLAG2_RSC_ENABLED; adapter->flags2 ^= IXGBE_FLAG2_RSC_ENABLED;
switch (adapter->hw.mac.type) { switch (adapter->hw.mac.type) {
case ixgbe_mac_X540:
ixgbe_set_rsc(adapter);
break;
case ixgbe_mac_82599EB: case ixgbe_mac_82599EB:
need_reset = true; need_reset = true;
break; break;
case ixgbe_mac_X540: {
int i;
for (i = 0; i < adapter->num_rx_queues; i++) {
struct ixgbe_ring *ring =
adapter->rx_ring[i];
if (adapter->flags2 &
IXGBE_FLAG2_RSC_ENABLED) {
ixgbe_configure_rscctl(adapter,
ring);
} else {
ixgbe_clear_rscctl(adapter,
ring);
}
}
}
break;
default: default:
break; break;
} }
@ -2310,165 +2340,392 @@ static int ixgbe_set_flags(struct net_device *netdev, u32 data)
* Check if Flow Director n-tuple support was enabled or disabled. If * Check if Flow Director n-tuple support was enabled or disabled. If
* the state changed, we need to reset. * the state changed, we need to reset.
*/ */
if ((adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE) && if (!(adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)) {
(!(data & ETH_FLAG_NTUPLE))) { /* turn off ATR, enable perfect filters and reset */
/* turn off Flow Director perfect, set hash and reset */ if (data & ETH_FLAG_NTUPLE) {
adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
adapter->flags |= IXGBE_FLAG_FDIR_PERFECT_CAPABLE;
need_reset = true;
}
} else if (!(data & ETH_FLAG_NTUPLE)) {
/* turn off Flow Director, set ATR and reset */
adapter->flags &= ~IXGBE_FLAG_FDIR_PERFECT_CAPABLE; adapter->flags &= ~IXGBE_FLAG_FDIR_PERFECT_CAPABLE;
adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE; if ((adapter->flags & IXGBE_FLAG_RSS_ENABLED) &&
!(adapter->flags & IXGBE_FLAG_DCB_ENABLED))
adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE;
need_reset = true; need_reset = true;
} else if ((!(adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)) &&
(data & ETH_FLAG_NTUPLE)) {
/* turn off Flow Director hash, enable perfect and reset */
adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
adapter->flags |= IXGBE_FLAG_FDIR_PERFECT_CAPABLE;
need_reset = true;
} else {
/* no state change */
} }
if (need_reset) { if (need_reset)
if (netif_running(netdev)) ixgbe_do_reset(netdev);
ixgbe_reinit_locked(adapter);
else return 0;
ixgbe_reset(adapter); }
static int ixgbe_get_ethtool_fdir_entry(struct ixgbe_adapter *adapter,
struct ethtool_rxnfc *cmd)
{
union ixgbe_atr_input *mask = &adapter->fdir_mask;
struct ethtool_rx_flow_spec *fsp =
(struct ethtool_rx_flow_spec *)&cmd->fs;
struct hlist_node *node, *node2;
struct ixgbe_fdir_filter *rule = NULL;
/* report total rule count */
cmd->data = (1024 << adapter->fdir_pballoc) - 2;
hlist_for_each_entry_safe(rule, node, node2,
&adapter->fdir_filter_list, fdir_node) {
if (fsp->location <= rule->sw_idx)
break;
}
if (!rule || fsp->location != rule->sw_idx)
return -EINVAL;
/* fill out the flow spec entry */
/* set flow type field */
switch (rule->filter.formatted.flow_type) {
case IXGBE_ATR_FLOW_TYPE_TCPV4:
fsp->flow_type = TCP_V4_FLOW;
break;
case IXGBE_ATR_FLOW_TYPE_UDPV4:
fsp->flow_type = UDP_V4_FLOW;
break;
case IXGBE_ATR_FLOW_TYPE_SCTPV4:
fsp->flow_type = SCTP_V4_FLOW;
break;
case IXGBE_ATR_FLOW_TYPE_IPV4:
fsp->flow_type = IP_USER_FLOW;
fsp->h_u.usr_ip4_spec.ip_ver = ETH_RX_NFC_IP4;
fsp->h_u.usr_ip4_spec.proto = 0;
fsp->m_u.usr_ip4_spec.proto = 0;
break;
default:
return -EINVAL;
}
fsp->h_u.tcp_ip4_spec.psrc = rule->filter.formatted.src_port;
fsp->m_u.tcp_ip4_spec.psrc = mask->formatted.src_port;
fsp->h_u.tcp_ip4_spec.pdst = rule->filter.formatted.dst_port;
fsp->m_u.tcp_ip4_spec.pdst = mask->formatted.dst_port;
fsp->h_u.tcp_ip4_spec.ip4src = rule->filter.formatted.src_ip[0];
fsp->m_u.tcp_ip4_spec.ip4src = mask->formatted.src_ip[0];
fsp->h_u.tcp_ip4_spec.ip4dst = rule->filter.formatted.dst_ip[0];
fsp->m_u.tcp_ip4_spec.ip4dst = mask->formatted.dst_ip[0];
fsp->h_ext.vlan_tci = rule->filter.formatted.vlan_id;
fsp->m_ext.vlan_tci = mask->formatted.vlan_id;
fsp->h_ext.vlan_etype = rule->filter.formatted.flex_bytes;
fsp->m_ext.vlan_etype = mask->formatted.flex_bytes;
fsp->h_ext.data[1] = htonl(rule->filter.formatted.vm_pool);
fsp->m_ext.data[1] = htonl(mask->formatted.vm_pool);
fsp->flow_type |= FLOW_EXT;
/* record action */
if (rule->action == IXGBE_FDIR_DROP_QUEUE)
fsp->ring_cookie = RX_CLS_FLOW_DISC;
else
fsp->ring_cookie = rule->action;
return 0;
}
static int ixgbe_get_ethtool_fdir_all(struct ixgbe_adapter *adapter,
struct ethtool_rxnfc *cmd,
u32 *rule_locs)
{
struct hlist_node *node, *node2;
struct ixgbe_fdir_filter *rule;
int cnt = 0;
/* report total rule count */
cmd->data = (1024 << adapter->fdir_pballoc) - 2;
hlist_for_each_entry_safe(rule, node, node2,
&adapter->fdir_filter_list, fdir_node) {
if (cnt == cmd->rule_cnt)
return -EMSGSIZE;
rule_locs[cnt] = rule->sw_idx;
cnt++;
} }
return 0; return 0;
} }
static int ixgbe_set_rx_ntuple(struct net_device *dev, static int ixgbe_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
struct ethtool_rx_ntuple *cmd) void *rule_locs)
{ {
struct ixgbe_adapter *adapter = netdev_priv(dev); struct ixgbe_adapter *adapter = netdev_priv(dev);
struct ethtool_rx_ntuple_flow_spec *fs = &cmd->fs; int ret = -EOPNOTSUPP;
union ixgbe_atr_input input_struct;
struct ixgbe_atr_input_masks input_masks; switch (cmd->cmd) {
int target_queue; case ETHTOOL_GRXRINGS:
cmd->data = adapter->num_rx_queues;
ret = 0;
break;
case ETHTOOL_GRXCLSRLCNT:
cmd->rule_cnt = adapter->fdir_filter_count;
ret = 0;
break;
case ETHTOOL_GRXCLSRULE:
ret = ixgbe_get_ethtool_fdir_entry(adapter, cmd);
break;
case ETHTOOL_GRXCLSRLALL:
ret = ixgbe_get_ethtool_fdir_all(adapter, cmd,
(u32 *)rule_locs);
break;
default:
break;
}
return ret;
}
static int ixgbe_update_ethtool_fdir_entry(struct ixgbe_adapter *adapter,
struct ixgbe_fdir_filter *input,
u16 sw_idx)
{
struct ixgbe_hw *hw = &adapter->hw;
struct hlist_node *node, *node2, *parent;
struct ixgbe_fdir_filter *rule;
int err = -EINVAL;
parent = NULL;
rule = NULL;
hlist_for_each_entry_safe(rule, node, node2,
&adapter->fdir_filter_list, fdir_node) {
/* hash found, or no matching entry */
if (rule->sw_idx >= sw_idx)
break;
parent = node;
}
/* if there is an old rule occupying our place remove it */
if (rule && (rule->sw_idx == sw_idx)) {
if (!input || (rule->filter.formatted.bkt_hash !=
input->filter.formatted.bkt_hash)) {
err = ixgbe_fdir_erase_perfect_filter_82599(hw,
&rule->filter,
sw_idx);
}
hlist_del(&rule->fdir_node);
kfree(rule);
adapter->fdir_filter_count--;
}
/*
* If no input this was a delete, err should be 0 if a rule was
* successfully found and removed from the list else -EINVAL
*/
if (!input)
return err;
/* initialize node and set software index */
INIT_HLIST_NODE(&input->fdir_node);
/* add filter to the list */
if (parent)
hlist_add_after(parent, &input->fdir_node);
else
hlist_add_head(&input->fdir_node,
&adapter->fdir_filter_list);
/* update counts */
adapter->fdir_filter_count++;
return 0;
}
static int ixgbe_flowspec_to_flow_type(struct ethtool_rx_flow_spec *fsp,
u8 *flow_type)
{
switch (fsp->flow_type & ~FLOW_EXT) {
case TCP_V4_FLOW:
*flow_type = IXGBE_ATR_FLOW_TYPE_TCPV4;
break;
case UDP_V4_FLOW:
*flow_type = IXGBE_ATR_FLOW_TYPE_UDPV4;
break;
case SCTP_V4_FLOW:
*flow_type = IXGBE_ATR_FLOW_TYPE_SCTPV4;
break;
case IP_USER_FLOW:
switch (fsp->h_u.usr_ip4_spec.proto) {
case IPPROTO_TCP:
*flow_type = IXGBE_ATR_FLOW_TYPE_TCPV4;
break;
case IPPROTO_UDP:
*flow_type = IXGBE_ATR_FLOW_TYPE_UDPV4;
break;
case IPPROTO_SCTP:
*flow_type = IXGBE_ATR_FLOW_TYPE_SCTPV4;
break;
case 0:
if (!fsp->m_u.usr_ip4_spec.proto) {
*flow_type = IXGBE_ATR_FLOW_TYPE_IPV4;
break;
}
default:
return 0;
}
break;
default:
return 0;
}
return 1;
}
static int ixgbe_add_ethtool_fdir_entry(struct ixgbe_adapter *adapter,
struct ethtool_rxnfc *cmd)
{
struct ethtool_rx_flow_spec *fsp =
(struct ethtool_rx_flow_spec *)&cmd->fs;
struct ixgbe_hw *hw = &adapter->hw;
struct ixgbe_fdir_filter *input;
union ixgbe_atr_input mask;
int err; int err;
if (adapter->hw.mac.type == ixgbe_mac_82598EB) if (!(adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE))
return -EOPNOTSUPP; return -EOPNOTSUPP;
/* /*
* Don't allow programming if the action is a queue greater than * Don't allow programming if the action is a queue greater than
* the number of online Tx queues. * the number of online Rx queues.
*/ */
if ((fs->action >= adapter->num_tx_queues) || if ((fsp->ring_cookie != RX_CLS_FLOW_DISC) &&
(fs->action < ETHTOOL_RXNTUPLE_ACTION_DROP)) (fsp->ring_cookie >= adapter->num_rx_queues))
return -EINVAL; return -EINVAL;
memset(&input_struct, 0, sizeof(union ixgbe_atr_input)); /* Don't allow indexes to exist outside of available space */
memset(&input_masks, 0, sizeof(struct ixgbe_atr_input_masks)); if (fsp->location >= ((1024 << adapter->fdir_pballoc) - 2)) {
e_err(drv, "Location out of range\n");
return -EINVAL;
}
input = kzalloc(sizeof(*input), GFP_ATOMIC);
if (!input)
return -ENOMEM;
memset(&mask, 0, sizeof(union ixgbe_atr_input));
/* set SW index */
input->sw_idx = fsp->location;
/* record flow type */ /* record flow type */
switch (fs->flow_type) { if (!ixgbe_flowspec_to_flow_type(fsp,
case IPV4_FLOW: &input->filter.formatted.flow_type)) {
input_struct.formatted.flow_type = IXGBE_ATR_FLOW_TYPE_IPV4; e_err(drv, "Unrecognized flow type\n");
break; goto err_out;
case TCP_V4_FLOW:
input_struct.formatted.flow_type = IXGBE_ATR_FLOW_TYPE_TCPV4;
break;
case UDP_V4_FLOW:
input_struct.formatted.flow_type = IXGBE_ATR_FLOW_TYPE_UDPV4;
break;
case SCTP_V4_FLOW:
input_struct.formatted.flow_type = IXGBE_ATR_FLOW_TYPE_SCTPV4;
break;
default:
return -1;
} }
/* copy vlan tag minus the CFI bit */ mask.formatted.flow_type = IXGBE_ATR_L4TYPE_IPV6_MASK |
if ((fs->vlan_tag & 0xEFFF) || (~fs->vlan_tag_mask & 0xEFFF)) { IXGBE_ATR_L4TYPE_MASK;
input_struct.formatted.vlan_id = htons(fs->vlan_tag & 0xEFFF);
if (!fs->vlan_tag_mask) {
input_masks.vlan_id_mask = htons(0xEFFF);
} else {
switch (~fs->vlan_tag_mask & 0xEFFF) {
/* all of these are valid vlan-mask values */
case 0xEFFF:
case 0xE000:
case 0x0FFF:
case 0x0000:
input_masks.vlan_id_mask =
htons(~fs->vlan_tag_mask);
break;
/* exit with error if vlan-mask is invalid */
default:
e_err(drv, "Partial VLAN ID or "
"priority mask in vlan-mask is not "
"supported by hardware\n");
return -1;
}
}
}
/* make sure we only use the first 2 bytes of user data */ if (input->filter.formatted.flow_type == IXGBE_ATR_FLOW_TYPE_IPV4)
if ((fs->data & 0xFFFF) || (~fs->data_mask & 0xFFFF)) { mask.formatted.flow_type &= IXGBE_ATR_L4TYPE_IPV6_MASK;
input_struct.formatted.flex_bytes = htons(fs->data & 0xFFFF);
if (!(fs->data_mask & 0xFFFF)) {
input_masks.flex_mask = 0xFFFF;
} else if (~fs->data_mask & 0xFFFF) {
e_err(drv, "Partial user-def-mask is not "
"supported by hardware\n");
return -1;
}
}
/* /* Copy input into formatted structures */
* Copy input into formatted structures input->filter.formatted.src_ip[0] = fsp->h_u.tcp_ip4_spec.ip4src;
* mask.formatted.src_ip[0] = fsp->m_u.tcp_ip4_spec.ip4src;
* These assignments are based on the following logic input->filter.formatted.dst_ip[0] = fsp->h_u.tcp_ip4_spec.ip4dst;
* If neither input or mask are set assume value is masked out. mask.formatted.dst_ip[0] = fsp->m_u.tcp_ip4_spec.ip4dst;
* If input is set, but mask is not mask should default to accept all. input->filter.formatted.src_port = fsp->h_u.tcp_ip4_spec.psrc;
* If input is not set, but mask is set then mask likely results in 0. mask.formatted.src_port = fsp->m_u.tcp_ip4_spec.psrc;
* If input is set and mask is set then assign both. input->filter.formatted.dst_port = fsp->h_u.tcp_ip4_spec.pdst;
*/ mask.formatted.dst_port = fsp->m_u.tcp_ip4_spec.pdst;
if (fs->h_u.tcp_ip4_spec.ip4src || ~fs->m_u.tcp_ip4_spec.ip4src) {
input_struct.formatted.src_ip[0] = fs->h_u.tcp_ip4_spec.ip4src; if (fsp->flow_type & FLOW_EXT) {
if (!fs->m_u.tcp_ip4_spec.ip4src) input->filter.formatted.vm_pool =
input_masks.src_ip_mask[0] = 0xFFFFFFFF; (unsigned char)ntohl(fsp->h_ext.data[1]);
else mask.formatted.vm_pool =
input_masks.src_ip_mask[0] = (unsigned char)ntohl(fsp->m_ext.data[1]);
~fs->m_u.tcp_ip4_spec.ip4src; input->filter.formatted.vlan_id = fsp->h_ext.vlan_tci;
} mask.formatted.vlan_id = fsp->m_ext.vlan_tci;
if (fs->h_u.tcp_ip4_spec.ip4dst || ~fs->m_u.tcp_ip4_spec.ip4dst) { input->filter.formatted.flex_bytes =
input_struct.formatted.dst_ip[0] = fs->h_u.tcp_ip4_spec.ip4dst; fsp->h_ext.vlan_etype;
if (!fs->m_u.tcp_ip4_spec.ip4dst) mask.formatted.flex_bytes = fsp->m_ext.vlan_etype;
input_masks.dst_ip_mask[0] = 0xFFFFFFFF;
else
input_masks.dst_ip_mask[0] =
~fs->m_u.tcp_ip4_spec.ip4dst;
}
if (fs->h_u.tcp_ip4_spec.psrc || ~fs->m_u.tcp_ip4_spec.psrc) {
input_struct.formatted.src_port = fs->h_u.tcp_ip4_spec.psrc;
if (!fs->m_u.tcp_ip4_spec.psrc)
input_masks.src_port_mask = 0xFFFF;
else
input_masks.src_port_mask = ~fs->m_u.tcp_ip4_spec.psrc;
}
if (fs->h_u.tcp_ip4_spec.pdst || ~fs->m_u.tcp_ip4_spec.pdst) {
input_struct.formatted.dst_port = fs->h_u.tcp_ip4_spec.pdst;
if (!fs->m_u.tcp_ip4_spec.pdst)
input_masks.dst_port_mask = 0xFFFF;
else
input_masks.dst_port_mask = ~fs->m_u.tcp_ip4_spec.pdst;
} }
/* determine if we need to drop or route the packet */ /* determine if we need to drop or route the packet */
if (fs->action == ETHTOOL_RXNTUPLE_ACTION_DROP) if (fsp->ring_cookie == RX_CLS_FLOW_DISC)
target_queue = MAX_RX_QUEUES - 1; input->action = IXGBE_FDIR_DROP_QUEUE;
else else
target_queue = fs->action; input->action = fsp->ring_cookie;
spin_lock(&adapter->fdir_perfect_lock); spin_lock(&adapter->fdir_perfect_lock);
err = ixgbe_fdir_add_perfect_filter_82599(&adapter->hw,
&input_struct, if (hlist_empty(&adapter->fdir_filter_list)) {
&input_masks, 0, /* save mask and program input mask into HW */
target_queue); memcpy(&adapter->fdir_mask, &mask, sizeof(mask));
err = ixgbe_fdir_set_input_mask_82599(hw, &mask);
if (err) {
e_err(drv, "Error writing mask\n");
goto err_out_w_lock;
}
} else if (memcmp(&adapter->fdir_mask, &mask, sizeof(mask))) {
e_err(drv, "Only one mask supported per port\n");
goto err_out_w_lock;
}
/* apply mask and compute/store hash */
ixgbe_atr_compute_perfect_hash_82599(&input->filter, &mask);
/* program filters to filter memory */
err = ixgbe_fdir_write_perfect_filter_82599(hw,
&input->filter, input->sw_idx,
(input->action == IXGBE_FDIR_DROP_QUEUE) ?
IXGBE_FDIR_DROP_QUEUE :
adapter->rx_ring[input->action]->reg_idx);
if (err)
goto err_out_w_lock;
ixgbe_update_ethtool_fdir_entry(adapter, input, input->sw_idx);
spin_unlock(&adapter->fdir_perfect_lock); spin_unlock(&adapter->fdir_perfect_lock);
return err ? -1 : 0; return err;
err_out_w_lock:
spin_unlock(&adapter->fdir_perfect_lock);
err_out:
kfree(input);
return -EINVAL;
}
static int ixgbe_del_ethtool_fdir_entry(struct ixgbe_adapter *adapter,
struct ethtool_rxnfc *cmd)
{
struct ethtool_rx_flow_spec *fsp =
(struct ethtool_rx_flow_spec *)&cmd->fs;
int err;
spin_lock(&adapter->fdir_perfect_lock);
err = ixgbe_update_ethtool_fdir_entry(adapter, NULL, fsp->location);
spin_unlock(&adapter->fdir_perfect_lock);
return err;
}
static int ixgbe_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
{
struct ixgbe_adapter *adapter = netdev_priv(dev);
int ret = -EOPNOTSUPP;
switch (cmd->cmd) {
case ETHTOOL_SRXCLSRLINS:
ret = ixgbe_add_ethtool_fdir_entry(adapter, cmd);
break;
case ETHTOOL_SRXCLSRLDEL:
ret = ixgbe_del_ethtool_fdir_entry(adapter, cmd);
break;
default:
break;
}
return ret;
} }
static const struct ethtool_ops ixgbe_ethtool_ops = { static const struct ethtool_ops ixgbe_ethtool_ops = {
@ -2506,7 +2763,8 @@ static const struct ethtool_ops ixgbe_ethtool_ops = {
.set_coalesce = ixgbe_set_coalesce, .set_coalesce = ixgbe_set_coalesce,
.get_flags = ethtool_op_get_flags, .get_flags = ethtool_op_get_flags,
.set_flags = ixgbe_set_flags, .set_flags = ixgbe_set_flags,
.set_rx_ntuple = ixgbe_set_rx_ntuple, .get_rxnfc = ixgbe_get_rxnfc,
.set_rxnfc = ixgbe_set_rxnfc,
}; };
void ixgbe_set_ethtool_ops(struct net_device *netdev) void ixgbe_set_ethtool_ops(struct net_device *netdev)

View File

@ -54,11 +54,10 @@ char ixgbe_driver_name[] = "ixgbe";
static const char ixgbe_driver_string[] = static const char ixgbe_driver_string[] =
"Intel(R) 10 Gigabit PCI Express Network Driver"; "Intel(R) 10 Gigabit PCI Express Network Driver";
#define MAJ 3 #define MAJ 3
#define MIN 3 #define MIN 4
#define BUILD 8 #define BUILD 8
#define KFIX 2
#define DRV_VERSION __stringify(MAJ) "." __stringify(MIN) "." \ #define DRV_VERSION __stringify(MAJ) "." __stringify(MIN) "." \
__stringify(BUILD) "-k" __stringify(KFIX) __stringify(BUILD) "-k"
const char ixgbe_driver_version[] = DRV_VERSION; const char ixgbe_driver_version[] = DRV_VERSION;
static const char ixgbe_copyright[] = static const char ixgbe_copyright[] =
"Copyright (c) 1999-2011 Intel Corporation."; "Copyright (c) 1999-2011 Intel Corporation.";
@ -1555,9 +1554,8 @@ static void ixgbe_configure_msix(struct ixgbe_adapter *adapter)
q_vector->eitr = adapter->rx_eitr_param; q_vector->eitr = adapter->rx_eitr_param;
ixgbe_write_eitr(q_vector); ixgbe_write_eitr(q_vector);
/* If Flow Director is enabled, set interrupt affinity */ /* If ATR is enabled, set interrupt affinity */
if ((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) || if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) {
(adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)) {
/* /*
* Allocate the affinity_hint cpumask, assign the mask * Allocate the affinity_hint cpumask, assign the mask
* for this vector, and set our affinity_hint for * for this vector, and set our affinity_hint for
@ -2468,8 +2466,7 @@ static inline void ixgbe_irq_enable(struct ixgbe_adapter *adapter, bool queues,
default: default:
break; break;
} }
if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE || if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE)
adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)
mask |= IXGBE_EIMS_FLOW_DIR; mask |= IXGBE_EIMS_FLOW_DIR;
IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, mask); IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, mask);
@ -3743,6 +3740,30 @@ static void ixgbe_configure_pb(struct ixgbe_adapter *adapter)
hw->mac.ops.set_rxpba(&adapter->hw, num_tc, hdrm, PBA_STRATEGY_EQUAL); hw->mac.ops.set_rxpba(&adapter->hw, num_tc, hdrm, PBA_STRATEGY_EQUAL);
} }
static void ixgbe_fdir_filter_restore(struct ixgbe_adapter *adapter)
{
struct ixgbe_hw *hw = &adapter->hw;
struct hlist_node *node, *node2;
struct ixgbe_fdir_filter *filter;
spin_lock(&adapter->fdir_perfect_lock);
if (!hlist_empty(&adapter->fdir_filter_list))
ixgbe_fdir_set_input_mask_82599(hw, &adapter->fdir_mask);
hlist_for_each_entry_safe(filter, node, node2,
&adapter->fdir_filter_list, fdir_node) {
ixgbe_fdir_write_perfect_filter_82599(hw,
&filter->filter,
filter->sw_idx,
(filter->action == IXGBE_FDIR_DROP_QUEUE) ?
IXGBE_FDIR_DROP_QUEUE :
adapter->rx_ring[filter->action]->reg_idx);
}
spin_unlock(&adapter->fdir_perfect_lock);
}
static void ixgbe_configure(struct ixgbe_adapter *adapter) static void ixgbe_configure(struct ixgbe_adapter *adapter)
{ {
struct net_device *netdev = adapter->netdev; struct net_device *netdev = adapter->netdev;
@ -3768,7 +3789,9 @@ static void ixgbe_configure(struct ixgbe_adapter *adapter)
adapter->atr_sample_rate; adapter->atr_sample_rate;
ixgbe_init_fdir_signature_82599(hw, adapter->fdir_pballoc); ixgbe_init_fdir_signature_82599(hw, adapter->fdir_pballoc);
} else if (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE) { } else if (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE) {
ixgbe_init_fdir_perfect_82599(hw, adapter->fdir_pballoc); ixgbe_init_fdir_perfect_82599(&adapter->hw,
adapter->fdir_pballoc);
ixgbe_fdir_filter_restore(adapter);
} }
ixgbe_configure_virtualization(adapter); ixgbe_configure_virtualization(adapter);
@ -4145,6 +4168,23 @@ static void ixgbe_clean_all_tx_rings(struct ixgbe_adapter *adapter)
ixgbe_clean_tx_ring(adapter->tx_ring[i]); ixgbe_clean_tx_ring(adapter->tx_ring[i]);
} }
static void ixgbe_fdir_filter_exit(struct ixgbe_adapter *adapter)
{
struct hlist_node *node, *node2;
struct ixgbe_fdir_filter *filter;
spin_lock(&adapter->fdir_perfect_lock);
hlist_for_each_entry_safe(filter, node, node2,
&adapter->fdir_filter_list, fdir_node) {
hlist_del(&filter->fdir_node);
kfree(filter);
}
adapter->fdir_filter_count = 0;
spin_unlock(&adapter->fdir_perfect_lock);
}
void ixgbe_down(struct ixgbe_adapter *adapter) void ixgbe_down(struct ixgbe_adapter *adapter)
{ {
struct net_device *netdev = adapter->netdev; struct net_device *netdev = adapter->netdev;
@ -4334,15 +4374,13 @@ static inline bool ixgbe_set_fdir_queues(struct ixgbe_adapter *adapter)
f_fdir->mask = 0; f_fdir->mask = 0;
/* Flow Director must have RSS enabled */ /* Flow Director must have RSS enabled */
if (adapter->flags & IXGBE_FLAG_RSS_ENABLED && if ((adapter->flags & IXGBE_FLAG_RSS_ENABLED) &&
((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE || (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE)) {
(adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)))) {
adapter->num_tx_queues = f_fdir->indices; adapter->num_tx_queues = f_fdir->indices;
adapter->num_rx_queues = f_fdir->indices; adapter->num_rx_queues = f_fdir->indices;
ret = true; ret = true;
} else { } else {
adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE; adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
adapter->flags &= ~IXGBE_FLAG_FDIR_PERFECT_CAPABLE;
} }
return ret; return ret;
} }
@ -4372,12 +4410,12 @@ static inline bool ixgbe_set_fcoe_queues(struct ixgbe_adapter *adapter)
if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) { if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) {
e_info(probe, "FCoE enabled with RSS\n"); e_info(probe, "FCoE enabled with RSS\n");
if ((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) || if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE)
(adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE))
ixgbe_set_fdir_queues(adapter); ixgbe_set_fdir_queues(adapter);
else else
ixgbe_set_rss_queues(adapter); ixgbe_set_rss_queues(adapter);
} }
/* adding FCoE rx rings to the end */ /* adding FCoE rx rings to the end */
f->mask = adapter->num_rx_queues; f->mask = adapter->num_rx_queues;
adapter->num_rx_queues += f->indices; adapter->num_rx_queues += f->indices;
@ -4670,9 +4708,8 @@ static inline bool ixgbe_cache_ring_fdir(struct ixgbe_adapter *adapter)
int i; int i;
bool ret = false; bool ret = false;
if (adapter->flags & IXGBE_FLAG_RSS_ENABLED && if ((adapter->flags & IXGBE_FLAG_RSS_ENABLED) &&
((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) || (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE)) {
(adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE))) {
for (i = 0; i < adapter->num_rx_queues; i++) for (i = 0; i < adapter->num_rx_queues; i++)
adapter->rx_ring[i]->reg_idx = i; adapter->rx_ring[i]->reg_idx = i;
for (i = 0; i < adapter->num_tx_queues; i++) for (i = 0; i < adapter->num_tx_queues; i++)
@ -4701,8 +4738,7 @@ static inline bool ixgbe_cache_ring_fcoe(struct ixgbe_adapter *adapter)
return false; return false;
if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) { if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) {
if ((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) || if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE)
(adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE))
ixgbe_cache_ring_fdir(adapter); ixgbe_cache_ring_fdir(adapter);
else else
ixgbe_cache_ring_rss(adapter); ixgbe_cache_ring_rss(adapter);
@ -4882,14 +4918,12 @@ static int ixgbe_set_interrupt_capability(struct ixgbe_adapter *adapter)
adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED; adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED;
adapter->flags &= ~IXGBE_FLAG_RSS_ENABLED; adapter->flags &= ~IXGBE_FLAG_RSS_ENABLED;
if (adapter->flags & (IXGBE_FLAG_FDIR_HASH_CAPABLE | if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) {
IXGBE_FLAG_FDIR_PERFECT_CAPABLE)) {
e_err(probe, e_err(probe,
"Flow Director is not supported while multiple " "ATR is not supported while multiple "
"queues are disabled. Disabling Flow Director\n"); "queues are disabled. Disabling Flow Director\n");
} }
adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE; adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
adapter->flags &= ~IXGBE_FLAG_FDIR_PERFECT_CAPABLE;
adapter->atr_sample_rate = 0; adapter->atr_sample_rate = 0;
if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
ixgbe_disable_sriov(adapter); ixgbe_disable_sriov(adapter);
@ -5140,7 +5174,7 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter)
adapter->atr_sample_rate = 20; adapter->atr_sample_rate = 20;
adapter->ring_feature[RING_F_FDIR].indices = adapter->ring_feature[RING_F_FDIR].indices =
IXGBE_MAX_FDIR_INDICES; IXGBE_MAX_FDIR_INDICES;
adapter->fdir_pballoc = 0; adapter->fdir_pballoc = IXGBE_FDIR_PBALLOC_64K;
#ifdef IXGBE_FCOE #ifdef IXGBE_FCOE
adapter->flags |= IXGBE_FLAG_FCOE_CAPABLE; adapter->flags |= IXGBE_FLAG_FCOE_CAPABLE;
adapter->flags &= ~IXGBE_FLAG_FCOE_ENABLED; adapter->flags &= ~IXGBE_FLAG_FCOE_ENABLED;
@ -5537,6 +5571,8 @@ static int ixgbe_close(struct net_device *netdev)
ixgbe_down(adapter); ixgbe_down(adapter);
ixgbe_free_irq(adapter); ixgbe_free_irq(adapter);
ixgbe_fdir_filter_exit(adapter);
ixgbe_free_all_tx_resources(adapter); ixgbe_free_all_tx_resources(adapter);
ixgbe_free_all_rx_resources(adapter); ixgbe_free_all_rx_resources(adapter);
@ -7676,7 +7712,8 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
/* Inform firmware of driver version */ /* Inform firmware of driver version */
if (hw->mac.ops.set_fw_drv_ver) if (hw->mac.ops.set_fw_drv_ver)
hw->mac.ops.set_fw_drv_ver(hw, MAJ, MIN, BUILD, KFIX); hw->mac.ops.set_fw_drv_ver(hw, MAJ, MIN, BUILD,
FW_CEM_UNUSED_VER);
/* add san mac addr to netdev */ /* add san mac addr to netdev */
ixgbe_add_sanmac_netdev(netdev); ixgbe_add_sanmac_netdev(netdev);

View File

@ -2056,9 +2056,10 @@ enum {
#define IXGBE_VFLREC(_i) (0x00700 + (_i * 4)) #define IXGBE_VFLREC(_i) (0x00700 + (_i * 4))
enum ixgbe_fdir_pballoc_type { enum ixgbe_fdir_pballoc_type {
IXGBE_FDIR_PBALLOC_64K = 0, IXGBE_FDIR_PBALLOC_NONE = 0,
IXGBE_FDIR_PBALLOC_128K, IXGBE_FDIR_PBALLOC_64K = 1,
IXGBE_FDIR_PBALLOC_256K, IXGBE_FDIR_PBALLOC_128K = 2,
IXGBE_FDIR_PBALLOC_256K = 3,
}; };
#define IXGBE_FDIR_PBALLOC_SIZE_SHIFT 16 #define IXGBE_FDIR_PBALLOC_SIZE_SHIFT 16
@ -2112,7 +2113,7 @@ enum ixgbe_fdir_pballoc_type {
#define IXGBE_FDIRCMD_CMD_ADD_FLOW 0x00000001 #define IXGBE_FDIRCMD_CMD_ADD_FLOW 0x00000001
#define IXGBE_FDIRCMD_CMD_REMOVE_FLOW 0x00000002 #define IXGBE_FDIRCMD_CMD_REMOVE_FLOW 0x00000002
#define IXGBE_FDIRCMD_CMD_QUERY_REM_FILT 0x00000003 #define IXGBE_FDIRCMD_CMD_QUERY_REM_FILT 0x00000003
#define IXGBE_FDIRCMD_CMD_QUERY_REM_HASH 0x00000007 #define IXGBE_FDIRCMD_FILTER_VALID 0x00000004
#define IXGBE_FDIRCMD_FILTER_UPDATE 0x00000008 #define IXGBE_FDIRCMD_FILTER_UPDATE 0x00000008
#define IXGBE_FDIRCMD_IPv6DMATCH 0x00000010 #define IXGBE_FDIRCMD_IPv6DMATCH 0x00000010
#define IXGBE_FDIRCMD_L4TYPE_UDP 0x00000020 #define IXGBE_FDIRCMD_L4TYPE_UDP 0x00000020
@ -2131,6 +2132,8 @@ enum ixgbe_fdir_pballoc_type {
#define IXGBE_FDIR_INIT_DONE_POLL 10 #define IXGBE_FDIR_INIT_DONE_POLL 10
#define IXGBE_FDIRCMD_CMD_POLL 10 #define IXGBE_FDIRCMD_CMD_POLL 10
#define IXGBE_FDIR_DROP_QUEUE 127
/* Manageablility Host Interface defines */ /* Manageablility Host Interface defines */
#define IXGBE_HI_MAX_BLOCK_BYTE_LENGTH 1792 /* Num of bytes in range */ #define IXGBE_HI_MAX_BLOCK_BYTE_LENGTH 1792 /* Num of bytes in range */
#define IXGBE_HI_MAX_BLOCK_DWORD_LENGTH 448 /* Num of dwords in range */ #define IXGBE_HI_MAX_BLOCK_DWORD_LENGTH 448 /* Num of dwords in range */
@ -2140,7 +2143,8 @@ enum ixgbe_fdir_pballoc_type {
#define FW_CEM_HDR_LEN 0x4 #define FW_CEM_HDR_LEN 0x4
#define FW_CEM_CMD_DRIVER_INFO 0xDD #define FW_CEM_CMD_DRIVER_INFO 0xDD
#define FW_CEM_CMD_DRIVER_INFO_LEN 0x5 #define FW_CEM_CMD_DRIVER_INFO_LEN 0x5
#define FW_CEM_CMD_RESERVED 0X0 #define FW_CEM_CMD_RESERVED 0x0
#define FW_CEM_UNUSED_VER 0x0
#define FW_CEM_MAX_RETRIES 3 #define FW_CEM_MAX_RETRIES 3
#define FW_CEM_RESP_STATUS_SUCCESS 0x1 #define FW_CEM_RESP_STATUS_SUCCESS 0x1
@ -2350,7 +2354,7 @@ union ixgbe_atr_input {
* src_port - 2 bytes * src_port - 2 bytes
* dst_port - 2 bytes * dst_port - 2 bytes
* flex_bytes - 2 bytes * flex_bytes - 2 bytes
* rsvd0 - 2 bytes - space reserved must be 0. * bkt_hash - 2 bytes
*/ */
struct { struct {
u8 vm_pool; u8 vm_pool;
@ -2361,7 +2365,7 @@ union ixgbe_atr_input {
__be16 src_port; __be16 src_port;
__be16 dst_port; __be16 dst_port;
__be16 flex_bytes; __be16 flex_bytes;
__be16 rsvd0; __be16 bkt_hash;
} formatted; } formatted;
__be32 dword_stream[11]; __be32 dword_stream[11];
}; };
@ -2382,16 +2386,6 @@ union ixgbe_atr_hash_dword {
__be32 dword; __be32 dword;
}; };
struct ixgbe_atr_input_masks {
__be16 rsvd0;
__be16 vlan_id_mask;
__be32 dst_ip_mask[4];
__be32 src_ip_mask[4];
__be16 src_port_mask;
__be16 dst_port_mask;
__be16 flex_mask;
};
enum ixgbe_eeprom_type { enum ixgbe_eeprom_type {
ixgbe_eeprom_uninitialized = 0, ixgbe_eeprom_uninitialized = 0,
ixgbe_eeprom_spi, ixgbe_eeprom_spi,