mirror of
https://github.com/torvalds/linux.git
synced 2024-11-17 09:31:50 +00:00
Merge branch '100GbE' of git://git.kernel.org/pub/scm/linux/kernel/git/jkirsher/next-queue
Jeff Kirsher says: ==================== 100GbE Intel Wired LAN Driver Updates 2019-03-26 This series contains more updates to the ice driver only. Jeremiah provides his first patch to the Linux kernel to clean up un-necessary newlines in driver log messages. Mitch updates the ice driver to use existing status codes in the iavf driver so that when errors occur, it will not report nonsensical results. Adds support for VF admin queue interrupts by programming the VPINT_MBX_CTL register array. Brett adds a check for a bit that we set while preparing for a reset, to ensure we are prepared to do a proper reset. Also implemented PCI error handling operations. Went through and audited the hot path with pahole and made modifications based on the results since 2 structures were taking up more space than necessary due to cache alignment issues. Fixed an issue where when flow control was disabled, the state of flow control was being displayed as "Unknown". Anirudh fixes adaptive interrupt moderation changes by adding code that was missed, that should have been added in the initial patch to add that support. Cleaned up a function prototype that was never implemented. Did additional code cleanup by removing unneeded braces and redundant code comments. Akeem fixes an issue that occurs when the VF is attempting to remove the default LAN/MAC address, which is programmed by the administrator by updating the error message to explicitly say that the VF cannot change the MAC programmed by the PF. Preethi fixes the driver to not fall into the error path when a added filter already exists, but instead continue to process the rest of the function and add appropriate checks after adding MAC filters. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
commit
eec7e2954d
@ -294,19 +294,25 @@ struct ice_vsi {
|
||||
/* struct that defines an interrupt vector */
|
||||
struct ice_q_vector {
|
||||
struct ice_vsi *vsi;
|
||||
cpumask_t affinity_mask;
|
||||
struct napi_struct napi;
|
||||
struct ice_ring_container rx;
|
||||
struct ice_ring_container tx;
|
||||
struct irq_affinity_notify affinity_notify;
|
||||
|
||||
u16 v_idx; /* index in the vsi->q_vector array. */
|
||||
u8 num_ring_tx; /* total number of Tx rings in vector */
|
||||
u8 num_ring_rx; /* total number of Rx rings in vector */
|
||||
char name[ICE_INT_NAME_STR_LEN];
|
||||
u8 num_ring_tx; /* total number of Tx rings in vector */
|
||||
u8 itr_countdown; /* when 0 should adjust adaptive ITR */
|
||||
/* in usecs, need to use ice_intrl_to_usecs_reg() before writing this
|
||||
* value to the device
|
||||
*/
|
||||
u8 intrl;
|
||||
|
||||
struct napi_struct napi;
|
||||
|
||||
struct ice_ring_container rx;
|
||||
struct ice_ring_container tx;
|
||||
|
||||
cpumask_t affinity_mask;
|
||||
struct irq_affinity_notify affinity_notify;
|
||||
|
||||
char name[ICE_INT_NAME_STR_LEN];
|
||||
} ____cacheline_internodealigned_in_smp;
|
||||
|
||||
enum ice_pf_flags {
|
||||
|
@ -178,6 +178,8 @@
|
||||
#define VPINT_ALLOC_PCI_LAST_S 12
|
||||
#define VPINT_ALLOC_PCI_LAST_M ICE_M(0x7FF, 12)
|
||||
#define VPINT_ALLOC_PCI_VALID_M BIT(31)
|
||||
#define VPINT_MBX_CTL(_VSI) (0x0016A000 + ((_VSI) * 4))
|
||||
#define VPINT_MBX_CTL_CAUSE_ENA_M BIT(30)
|
||||
#define GLLAN_RCTL_0 0x002941F8
|
||||
#define QRX_CONTEXT(_i, _QRX) (0x00280000 + ((_i) * 8192 + (_QRX) * 4))
|
||||
#define QRX_CTRL(_QRX) (0x00120000 + ((_QRX) * 4))
|
||||
|
@ -342,12 +342,12 @@ enum ice_tx_desc_cmd_bits {
|
||||
ICE_TX_DESC_CMD_EOP = 0x0001,
|
||||
ICE_TX_DESC_CMD_RS = 0x0002,
|
||||
ICE_TX_DESC_CMD_IL2TAG1 = 0x0008,
|
||||
ICE_TX_DESC_CMD_IIPT_IPV6 = 0x0020, /* 2 BITS */
|
||||
ICE_TX_DESC_CMD_IIPT_IPV4 = 0x0040, /* 2 BITS */
|
||||
ICE_TX_DESC_CMD_IIPT_IPV4_CSUM = 0x0060, /* 2 BITS */
|
||||
ICE_TX_DESC_CMD_L4T_EOFT_TCP = 0x0100, /* 2 BITS */
|
||||
ICE_TX_DESC_CMD_L4T_EOFT_SCTP = 0x0200, /* 2 BITS */
|
||||
ICE_TX_DESC_CMD_L4T_EOFT_UDP = 0x0300, /* 2 BITS */
|
||||
ICE_TX_DESC_CMD_IIPT_IPV6 = 0x0020,
|
||||
ICE_TX_DESC_CMD_IIPT_IPV4 = 0x0040,
|
||||
ICE_TX_DESC_CMD_IIPT_IPV4_CSUM = 0x0060,
|
||||
ICE_TX_DESC_CMD_L4T_EOFT_TCP = 0x0100,
|
||||
ICE_TX_DESC_CMD_L4T_EOFT_SCTP = 0x0200,
|
||||
ICE_TX_DESC_CMD_L4T_EOFT_UDP = 0x0300,
|
||||
};
|
||||
|
||||
#define ICE_TXD_QW1_OFFSET_S 16
|
||||
|
@ -608,11 +608,10 @@ err_scatter:
|
||||
|
||||
/**
|
||||
* __ice_vsi_get_qs - helper function for assigning queues from PF to VSI
|
||||
* @qs_cfg: gathered variables needed for PF->VSI queues assignment
|
||||
* @qs_cfg: gathered variables needed for pf->vsi queues assignment
|
||||
*
|
||||
* This is an internal function for assigning queues from the PF to VSI and
|
||||
* initially tries to find contiguous space. If it is not successful to find
|
||||
* contiguous space, then it tries with the scatter approach.
|
||||
* This function first tries to find contiguous space. If it is not successful,
|
||||
* it tries with the scatter approach.
|
||||
*
|
||||
* Return 0 on success and -ENOMEM in case of no left space in PF queue bitmap
|
||||
*/
|
||||
@ -1820,7 +1819,6 @@ ice_cfg_itr(struct ice_hw *hw, struct ice_q_vector *q_vector, u16 vector)
|
||||
rc->target_itr = ITR_TO_REG(rc->itr_setting);
|
||||
rc->next_update = jiffies + 1;
|
||||
rc->current_itr = rc->target_itr;
|
||||
rc->latency_range = ICE_LOW_LATENCY;
|
||||
wr32(hw, GLINT_ITR(rc->itr_idx, vector),
|
||||
ITR_REG_ALIGN(rc->current_itr) >> ICE_ITR_GRAN_S);
|
||||
}
|
||||
@ -1835,7 +1833,6 @@ ice_cfg_itr(struct ice_hw *hw, struct ice_q_vector *q_vector, u16 vector)
|
||||
rc->target_itr = ITR_TO_REG(rc->itr_setting);
|
||||
rc->next_update = jiffies + 1;
|
||||
rc->current_itr = rc->target_itr;
|
||||
rc->latency_range = ICE_LOW_LATENCY;
|
||||
wr32(hw, GLINT_ITR(rc->itr_idx, vector),
|
||||
ITR_REG_ALIGN(rc->current_itr) >> ICE_ITR_GRAN_S);
|
||||
}
|
||||
|
@ -70,8 +70,6 @@ void ice_vsi_free_rx_rings(struct ice_vsi *vsi);
|
||||
|
||||
void ice_vsi_free_tx_rings(struct ice_vsi *vsi);
|
||||
|
||||
int ice_vsi_cfg_tc(struct ice_vsi *vsi, u8 ena_tc);
|
||||
|
||||
int ice_vsi_manage_rss_lut(struct ice_vsi *vsi, bool ena);
|
||||
|
||||
#endif /* !_ICE_LIB_H_ */
|
||||
|
@ -260,7 +260,11 @@ static int ice_vsi_sync_fltr(struct ice_vsi *vsi)
|
||||
/* Add mac addresses in the sync list */
|
||||
status = ice_add_mac(hw, &vsi->tmp_sync_list);
|
||||
ice_free_fltr_list(dev, &vsi->tmp_sync_list);
|
||||
if (status) {
|
||||
/* If filter is added successfully or already exists, do not go into
|
||||
* 'if' condition and report it as error. Instead continue processing
|
||||
* rest of the function.
|
||||
*/
|
||||
if (status && status != ICE_ERR_ALREADY_EXISTS) {
|
||||
netdev_err(netdev, "Failed to add MAC filters\n");
|
||||
/* If there is no more space for new umac filters, vsi
|
||||
* should go into promiscuous mode. There should be some
|
||||
@ -403,6 +407,10 @@ ice_prepare_for_reset(struct ice_pf *pf)
|
||||
{
|
||||
struct ice_hw *hw = &pf->hw;
|
||||
|
||||
/* already prepared for reset */
|
||||
if (test_bit(__ICE_PREPARED_FOR_RESET, pf->state))
|
||||
return;
|
||||
|
||||
/* Notify VFs of impending reset */
|
||||
if (ice_check_sq_alive(hw, &hw->mailboxq))
|
||||
ice_vc_notify_reset(pf);
|
||||
@ -486,8 +494,7 @@ static void ice_reset_subtask(struct ice_pf *pf)
|
||||
/* return if no valid reset type requested */
|
||||
if (reset_type == ICE_RESET_INVAL)
|
||||
return;
|
||||
if (!test_bit(__ICE_PREPARED_FOR_RESET, pf->state))
|
||||
ice_prepare_for_reset(pf);
|
||||
ice_prepare_for_reset(pf);
|
||||
|
||||
/* make sure we are ready to rebuild */
|
||||
if (ice_check_reset(&pf->hw)) {
|
||||
@ -588,6 +595,9 @@ void ice_print_link_msg(struct ice_vsi *vsi, bool isup)
|
||||
case ICE_FC_RX_PAUSE:
|
||||
fc = "RX";
|
||||
break;
|
||||
case ICE_FC_NONE:
|
||||
fc = "None";
|
||||
break;
|
||||
default:
|
||||
fc = "Unknown";
|
||||
break;
|
||||
@ -1001,6 +1011,18 @@ static void ice_service_task_stop(struct ice_pf *pf)
|
||||
clear_bit(__ICE_SERVICE_SCHED, pf->state);
|
||||
}
|
||||
|
||||
/**
|
||||
* ice_service_task_restart - restart service task and schedule works
|
||||
* @pf: board private structure
|
||||
*
|
||||
* This function is needed for suspend and resume works (e.g WoL scenario)
|
||||
*/
|
||||
static void ice_service_task_restart(struct ice_pf *pf)
|
||||
{
|
||||
clear_bit(__ICE_SERVICE_DIS, pf->state);
|
||||
ice_service_task_schedule(pf);
|
||||
}
|
||||
|
||||
/**
|
||||
* ice_service_timer - timer callback to schedule service task
|
||||
* @t: pointer to timer_list
|
||||
@ -2392,6 +2414,136 @@ static void ice_remove(struct pci_dev *pdev)
|
||||
pci_disable_pcie_error_reporting(pdev);
|
||||
}
|
||||
|
||||
/**
|
||||
* ice_pci_err_detected - warning that PCI error has been detected
|
||||
* @pdev: PCI device information struct
|
||||
* @err: the type of PCI error
|
||||
*
|
||||
* Called to warn that something happened on the PCI bus and the error handling
|
||||
* is in progress. Allows the driver to gracefully prepare/handle PCI errors.
|
||||
*/
|
||||
static pci_ers_result_t
|
||||
ice_pci_err_detected(struct pci_dev *pdev, enum pci_channel_state err)
|
||||
{
|
||||
struct ice_pf *pf = pci_get_drvdata(pdev);
|
||||
|
||||
if (!pf) {
|
||||
dev_err(&pdev->dev, "%s: unrecoverable device error %d\n",
|
||||
__func__, err);
|
||||
return PCI_ERS_RESULT_DISCONNECT;
|
||||
}
|
||||
|
||||
if (!test_bit(__ICE_SUSPENDED, pf->state)) {
|
||||
ice_service_task_stop(pf);
|
||||
|
||||
if (!test_bit(__ICE_PREPARED_FOR_RESET, pf->state)) {
|
||||
set_bit(__ICE_PFR_REQ, pf->state);
|
||||
ice_prepare_for_reset(pf);
|
||||
}
|
||||
}
|
||||
|
||||
return PCI_ERS_RESULT_NEED_RESET;
|
||||
}
|
||||
|
||||
/**
|
||||
* ice_pci_err_slot_reset - a PCI slot reset has just happened
|
||||
* @pdev: PCI device information struct
|
||||
*
|
||||
* Called to determine if the driver can recover from the PCI slot reset by
|
||||
* using a register read to determine if the device is recoverable.
|
||||
*/
|
||||
static pci_ers_result_t ice_pci_err_slot_reset(struct pci_dev *pdev)
|
||||
{
|
||||
struct ice_pf *pf = pci_get_drvdata(pdev);
|
||||
pci_ers_result_t result;
|
||||
int err;
|
||||
u32 reg;
|
||||
|
||||
err = pci_enable_device_mem(pdev);
|
||||
if (err) {
|
||||
dev_err(&pdev->dev,
|
||||
"Cannot re-enable PCI device after reset, error %d\n",
|
||||
err);
|
||||
result = PCI_ERS_RESULT_DISCONNECT;
|
||||
} else {
|
||||
pci_set_master(pdev);
|
||||
pci_restore_state(pdev);
|
||||
pci_save_state(pdev);
|
||||
pci_wake_from_d3(pdev, false);
|
||||
|
||||
/* Check for life */
|
||||
reg = rd32(&pf->hw, GLGEN_RTRIG);
|
||||
if (!reg)
|
||||
result = PCI_ERS_RESULT_RECOVERED;
|
||||
else
|
||||
result = PCI_ERS_RESULT_DISCONNECT;
|
||||
}
|
||||
|
||||
err = pci_cleanup_aer_uncorrect_error_status(pdev);
|
||||
if (err)
|
||||
dev_dbg(&pdev->dev,
|
||||
"pci_cleanup_aer_uncorrect_error_status failed, error %d\n",
|
||||
err);
|
||||
/* non-fatal, continue */
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
/**
|
||||
* ice_pci_err_resume - restart operations after PCI error recovery
|
||||
* @pdev: PCI device information struct
|
||||
*
|
||||
* Called to allow the driver to bring things back up after PCI error and/or
|
||||
* reset recovery have finished
|
||||
*/
|
||||
static void ice_pci_err_resume(struct pci_dev *pdev)
|
||||
{
|
||||
struct ice_pf *pf = pci_get_drvdata(pdev);
|
||||
|
||||
if (!pf) {
|
||||
dev_err(&pdev->dev,
|
||||
"%s failed, device is unrecoverable\n", __func__);
|
||||
return;
|
||||
}
|
||||
|
||||
if (test_bit(__ICE_SUSPENDED, pf->state)) {
|
||||
dev_dbg(&pdev->dev, "%s failed to resume normal operations!\n",
|
||||
__func__);
|
||||
return;
|
||||
}
|
||||
|
||||
ice_do_reset(pf, ICE_RESET_PFR);
|
||||
ice_service_task_restart(pf);
|
||||
mod_timer(&pf->serv_tmr, round_jiffies(jiffies + pf->serv_tmr_period));
|
||||
}
|
||||
|
||||
/**
|
||||
* ice_pci_err_reset_prepare - prepare device driver for PCI reset
|
||||
* @pdev: PCI device information struct
|
||||
*/
|
||||
static void ice_pci_err_reset_prepare(struct pci_dev *pdev)
|
||||
{
|
||||
struct ice_pf *pf = pci_get_drvdata(pdev);
|
||||
|
||||
if (!test_bit(__ICE_SUSPENDED, pf->state)) {
|
||||
ice_service_task_stop(pf);
|
||||
|
||||
if (!test_bit(__ICE_PREPARED_FOR_RESET, pf->state)) {
|
||||
set_bit(__ICE_PFR_REQ, pf->state);
|
||||
ice_prepare_for_reset(pf);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* ice_pci_err_reset_done - PCI reset done, device driver reset can begin
|
||||
* @pdev: PCI device information struct
|
||||
*/
|
||||
static void ice_pci_err_reset_done(struct pci_dev *pdev)
|
||||
{
|
||||
ice_pci_err_resume(pdev);
|
||||
}
|
||||
|
||||
/* ice_pci_tbl - PCI Device ID Table
|
||||
*
|
||||
* Wildcard entries (PCI_ANY_ID) should come last
|
||||
@ -2409,12 +2561,21 @@ static const struct pci_device_id ice_pci_tbl[] = {
|
||||
};
|
||||
MODULE_DEVICE_TABLE(pci, ice_pci_tbl);
|
||||
|
||||
static const struct pci_error_handlers ice_pci_err_handler = {
|
||||
.error_detected = ice_pci_err_detected,
|
||||
.slot_reset = ice_pci_err_slot_reset,
|
||||
.reset_prepare = ice_pci_err_reset_prepare,
|
||||
.reset_done = ice_pci_err_reset_done,
|
||||
.resume = ice_pci_err_resume
|
||||
};
|
||||
|
||||
static struct pci_driver ice_driver = {
|
||||
.name = KBUILD_MODNAME,
|
||||
.id_table = ice_pci_tbl,
|
||||
.probe = ice_probe,
|
||||
.remove = ice_remove,
|
||||
.sriov_configure = ice_sriov_configure,
|
||||
.err_handler = &ice_pci_err_handler
|
||||
};
|
||||
|
||||
/**
|
||||
|
@ -968,9 +968,8 @@ static void
|
||||
ice_receive_skb(struct ice_ring *rx_ring, struct sk_buff *skb, u16 vlan_tag)
|
||||
{
|
||||
if ((rx_ring->netdev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
|
||||
(vlan_tag & VLAN_VID_MASK)) {
|
||||
(vlan_tag & VLAN_VID_MASK))
|
||||
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
|
||||
}
|
||||
napi_gro_receive(&rx_ring->q_vector->napi, skb);
|
||||
}
|
||||
|
||||
@ -1097,18 +1096,257 @@ static int ice_clean_rx_irq(struct ice_ring *rx_ring, int budget)
|
||||
return failure ? budget : (int)total_rx_pkts;
|
||||
}
|
||||
|
||||
static unsigned int ice_itr_divisor(struct ice_port_info *pi)
|
||||
{
|
||||
switch (pi->phy.link_info.link_speed) {
|
||||
case ICE_AQ_LINK_SPEED_40GB:
|
||||
return ICE_ITR_ADAPTIVE_MIN_INC * 1024;
|
||||
case ICE_AQ_LINK_SPEED_25GB:
|
||||
case ICE_AQ_LINK_SPEED_20GB:
|
||||
return ICE_ITR_ADAPTIVE_MIN_INC * 512;
|
||||
case ICE_AQ_LINK_SPEED_100MB:
|
||||
return ICE_ITR_ADAPTIVE_MIN_INC * 32;
|
||||
default:
|
||||
return ICE_ITR_ADAPTIVE_MIN_INC * 256;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* ice_update_itr - update the adaptive ITR value based on statistics
|
||||
* @q_vector: structure containing interrupt and ring information
|
||||
* @rc: structure containing ring performance data
|
||||
*
|
||||
* Stores a new ITR value based on packets and byte
|
||||
* counts during the last interrupt. The advantage of per interrupt
|
||||
* computation is faster updates and more accurate ITR for the current
|
||||
* traffic pattern. Constants in this function were computed
|
||||
* based on theoretical maximum wire speed and thresholds were set based
|
||||
* on testing data as well as attempting to minimize response time
|
||||
* while increasing bulk throughput.
|
||||
*/
|
||||
static void
|
||||
ice_update_itr(struct ice_q_vector *q_vector, struct ice_ring_container *rc)
|
||||
{
|
||||
unsigned int avg_wire_size, packets, bytes, itr;
|
||||
unsigned long next_update = jiffies;
|
||||
bool container_is_rx;
|
||||
|
||||
if (!rc->ring || !ITR_IS_DYNAMIC(rc->itr_setting))
|
||||
return;
|
||||
|
||||
/* If itr_countdown is set it means we programmed an ITR within
|
||||
* the last 4 interrupt cycles. This has a side effect of us
|
||||
* potentially firing an early interrupt. In order to work around
|
||||
* this we need to throw out any data received for a few
|
||||
* interrupts following the update.
|
||||
*/
|
||||
if (q_vector->itr_countdown) {
|
||||
itr = rc->target_itr;
|
||||
goto clear_counts;
|
||||
}
|
||||
|
||||
container_is_rx = (&q_vector->rx == rc);
|
||||
/* For Rx we want to push the delay up and default to low latency.
|
||||
* for Tx we want to pull the delay down and default to high latency.
|
||||
*/
|
||||
itr = container_is_rx ?
|
||||
ICE_ITR_ADAPTIVE_MIN_USECS | ICE_ITR_ADAPTIVE_LATENCY :
|
||||
ICE_ITR_ADAPTIVE_MAX_USECS | ICE_ITR_ADAPTIVE_LATENCY;
|
||||
|
||||
/* If we didn't update within up to 1 - 2 jiffies we can assume
|
||||
* that either packets are coming in so slow there hasn't been
|
||||
* any work, or that there is so much work that NAPI is dealing
|
||||
* with interrupt moderation and we don't need to do anything.
|
||||
*/
|
||||
if (time_after(next_update, rc->next_update))
|
||||
goto clear_counts;
|
||||
|
||||
packets = rc->total_pkts;
|
||||
bytes = rc->total_bytes;
|
||||
|
||||
if (container_is_rx) {
|
||||
/* If Rx there are 1 to 4 packets and bytes are less than
|
||||
* 9000 assume insufficient data to use bulk rate limiting
|
||||
* approach unless Tx is already in bulk rate limiting. We
|
||||
* are likely latency driven.
|
||||
*/
|
||||
if (packets && packets < 4 && bytes < 9000 &&
|
||||
(q_vector->tx.target_itr & ICE_ITR_ADAPTIVE_LATENCY)) {
|
||||
itr = ICE_ITR_ADAPTIVE_LATENCY;
|
||||
goto adjust_by_size;
|
||||
}
|
||||
} else if (packets < 4) {
|
||||
/* If we have Tx and Rx ITR maxed and Tx ITR is running in
|
||||
* bulk mode and we are receiving 4 or fewer packets just
|
||||
* reset the ITR_ADAPTIVE_LATENCY bit for latency mode so
|
||||
* that the Rx can relax.
|
||||
*/
|
||||
if (rc->target_itr == ICE_ITR_ADAPTIVE_MAX_USECS &&
|
||||
(q_vector->rx.target_itr & ICE_ITR_MASK) ==
|
||||
ICE_ITR_ADAPTIVE_MAX_USECS)
|
||||
goto clear_counts;
|
||||
} else if (packets > 32) {
|
||||
/* If we have processed over 32 packets in a single interrupt
|
||||
* for Tx assume we need to switch over to "bulk" mode.
|
||||
*/
|
||||
rc->target_itr &= ~ICE_ITR_ADAPTIVE_LATENCY;
|
||||
}
|
||||
|
||||
/* We have no packets to actually measure against. This means
|
||||
* either one of the other queues on this vector is active or
|
||||
* we are a Tx queue doing TSO with too high of an interrupt rate.
|
||||
*
|
||||
* Between 4 and 56 we can assume that our current interrupt delay
|
||||
* is only slightly too low. As such we should increase it by a small
|
||||
* fixed amount.
|
||||
*/
|
||||
if (packets < 56) {
|
||||
itr = rc->target_itr + ICE_ITR_ADAPTIVE_MIN_INC;
|
||||
if ((itr & ICE_ITR_MASK) > ICE_ITR_ADAPTIVE_MAX_USECS) {
|
||||
itr &= ICE_ITR_ADAPTIVE_LATENCY;
|
||||
itr += ICE_ITR_ADAPTIVE_MAX_USECS;
|
||||
}
|
||||
goto clear_counts;
|
||||
}
|
||||
|
||||
if (packets <= 256) {
|
||||
itr = min(q_vector->tx.current_itr, q_vector->rx.current_itr);
|
||||
itr &= ICE_ITR_MASK;
|
||||
|
||||
/* Between 56 and 112 is our "goldilocks" zone where we are
|
||||
* working out "just right". Just report that our current
|
||||
* ITR is good for us.
|
||||
*/
|
||||
if (packets <= 112)
|
||||
goto clear_counts;
|
||||
|
||||
/* If packet count is 128 or greater we are likely looking
|
||||
* at a slight overrun of the delay we want. Try halving
|
||||
* our delay to see if that will cut the number of packets
|
||||
* in half per interrupt.
|
||||
*/
|
||||
itr >>= 1;
|
||||
itr &= ICE_ITR_MASK;
|
||||
if (itr < ICE_ITR_ADAPTIVE_MIN_USECS)
|
||||
itr = ICE_ITR_ADAPTIVE_MIN_USECS;
|
||||
|
||||
goto clear_counts;
|
||||
}
|
||||
|
||||
/* The paths below assume we are dealing with a bulk ITR since
|
||||
* number of packets is greater than 256. We are just going to have
|
||||
* to compute a value and try to bring the count under control,
|
||||
* though for smaller packet sizes there isn't much we can do as
|
||||
* NAPI polling will likely be kicking in sooner rather than later.
|
||||
*/
|
||||
itr = ICE_ITR_ADAPTIVE_BULK;
|
||||
|
||||
adjust_by_size:
|
||||
/* If packet counts are 256 or greater we can assume we have a gross
|
||||
* overestimation of what the rate should be. Instead of trying to fine
|
||||
* tune it just use the formula below to try and dial in an exact value
|
||||
* gives the current packet size of the frame.
|
||||
*/
|
||||
avg_wire_size = bytes / packets;
|
||||
|
||||
/* The following is a crude approximation of:
|
||||
* wmem_default / (size + overhead) = desired_pkts_per_int
|
||||
* rate / bits_per_byte / (size + ethernet overhead) = pkt_rate
|
||||
* (desired_pkt_rate / pkt_rate) * usecs_per_sec = ITR value
|
||||
*
|
||||
* Assuming wmem_default is 212992 and overhead is 640 bytes per
|
||||
* packet, (256 skb, 64 headroom, 320 shared info), we can reduce the
|
||||
* formula down to
|
||||
*
|
||||
* (170 * (size + 24)) / (size + 640) = ITR
|
||||
*
|
||||
* We first do some math on the packet size and then finally bitshift
|
||||
* by 8 after rounding up. We also have to account for PCIe link speed
|
||||
* difference as ITR scales based on this.
|
||||
*/
|
||||
if (avg_wire_size <= 60) {
|
||||
/* Start at 250k ints/sec */
|
||||
avg_wire_size = 4096;
|
||||
} else if (avg_wire_size <= 380) {
|
||||
/* 250K ints/sec to 60K ints/sec */
|
||||
avg_wire_size *= 40;
|
||||
avg_wire_size += 1696;
|
||||
} else if (avg_wire_size <= 1084) {
|
||||
/* 60K ints/sec to 36K ints/sec */
|
||||
avg_wire_size *= 15;
|
||||
avg_wire_size += 11452;
|
||||
} else if (avg_wire_size <= 1980) {
|
||||
/* 36K ints/sec to 30K ints/sec */
|
||||
avg_wire_size *= 5;
|
||||
avg_wire_size += 22420;
|
||||
} else {
|
||||
/* plateau at a limit of 30K ints/sec */
|
||||
avg_wire_size = 32256;
|
||||
}
|
||||
|
||||
/* If we are in low latency mode halve our delay which doubles the
|
||||
* rate to somewhere between 100K to 16K ints/sec
|
||||
*/
|
||||
if (itr & ICE_ITR_ADAPTIVE_LATENCY)
|
||||
avg_wire_size >>= 1;
|
||||
|
||||
/* Resultant value is 256 times larger than it needs to be. This
|
||||
* gives us room to adjust the value as needed to either increase
|
||||
* or decrease the value based on link speeds of 10G, 2.5G, 1G, etc.
|
||||
*
|
||||
* Use addition as we have already recorded the new latency flag
|
||||
* for the ITR value.
|
||||
*/
|
||||
itr += DIV_ROUND_UP(avg_wire_size,
|
||||
ice_itr_divisor(q_vector->vsi->port_info)) *
|
||||
ICE_ITR_ADAPTIVE_MIN_INC;
|
||||
|
||||
if ((itr & ICE_ITR_MASK) > ICE_ITR_ADAPTIVE_MAX_USECS) {
|
||||
itr &= ICE_ITR_ADAPTIVE_LATENCY;
|
||||
itr += ICE_ITR_ADAPTIVE_MAX_USECS;
|
||||
}
|
||||
|
||||
clear_counts:
|
||||
/* write back value */
|
||||
rc->target_itr = itr;
|
||||
|
||||
/* next update should occur within next jiffy */
|
||||
rc->next_update = next_update + 1;
|
||||
|
||||
rc->total_bytes = 0;
|
||||
rc->total_pkts = 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* ice_buildreg_itr - build value for writing to the GLINT_DYN_CTL register
|
||||
* @itr_idx: interrupt throttling index
|
||||
* @reg_itr: interrupt throttling value adjusted based on ITR granularity
|
||||
* @itr: interrupt throttling value in usecs
|
||||
*/
|
||||
static u32 ice_buildreg_itr(int itr_idx, u16 reg_itr)
|
||||
static u32 ice_buildreg_itr(u16 itr_idx, u16 itr)
|
||||
{
|
||||
/* The itr value is reported in microseconds, and the register value is
|
||||
* recorded in 2 microsecond units. For this reason we only need to
|
||||
* shift by the GLINT_DYN_CTL_INTERVAL_S - ICE_ITR_GRAN_S to apply this
|
||||
* granularity as a shift instead of division. The mask makes sure the
|
||||
* ITR value is never odd so we don't accidentally write into the field
|
||||
* prior to the ITR field.
|
||||
*/
|
||||
itr &= ICE_ITR_MASK;
|
||||
|
||||
return GLINT_DYN_CTL_INTENA_M | GLINT_DYN_CTL_CLEARPBA_M |
|
||||
(itr_idx << GLINT_DYN_CTL_ITR_INDX_S) |
|
||||
(reg_itr << GLINT_DYN_CTL_INTERVAL_S);
|
||||
(itr << (GLINT_DYN_CTL_INTERVAL_S - ICE_ITR_GRAN_S));
|
||||
}
|
||||
|
||||
/* The act of updating the ITR will cause it to immediately trigger. In order
|
||||
* to prevent this from throwing off adaptive update statistics we defer the
|
||||
* update so that it can only happen so often. So after either Tx or Rx are
|
||||
* updated we make the adaptive scheme wait until either the ITR completely
|
||||
* expires via the next_update expiration or we have been through at least
|
||||
* 3 interrupts.
|
||||
*/
|
||||
#define ITR_COUNTDOWN_START 3
|
||||
|
||||
/**
|
||||
* ice_update_ena_itr - Update ITR and re-enable MSIX interrupt
|
||||
* @vsi: the VSI associated with the q_vector
|
||||
@ -1117,10 +1355,14 @@ static u32 ice_buildreg_itr(int itr_idx, u16 reg_itr)
|
||||
static void
|
||||
ice_update_ena_itr(struct ice_vsi *vsi, struct ice_q_vector *q_vector)
|
||||
{
|
||||
struct ice_hw *hw = &vsi->back->hw;
|
||||
struct ice_ring_container *rc;
|
||||
struct ice_ring_container *tx = &q_vector->tx;
|
||||
struct ice_ring_container *rx = &q_vector->rx;
|
||||
u32 itr_val;
|
||||
|
||||
/* This will do nothing if dynamic updates are not enabled */
|
||||
ice_update_itr(q_vector, tx);
|
||||
ice_update_itr(q_vector, rx);
|
||||
|
||||
/* This block of logic allows us to get away with only updating
|
||||
* one ITR value with each interrupt. The idea is to perform a
|
||||
* pseudo-lazy update with the following criteria.
|
||||
@ -1129,35 +1371,36 @@ ice_update_ena_itr(struct ice_vsi *vsi, struct ice_q_vector *q_vector)
|
||||
* 2. If we must reduce an ITR that is given highest priority.
|
||||
* 3. We then give priority to increasing ITR based on amount.
|
||||
*/
|
||||
if (q_vector->rx.target_itr < q_vector->rx.current_itr) {
|
||||
rc = &q_vector->rx;
|
||||
if (rx->target_itr < rx->current_itr) {
|
||||
/* Rx ITR needs to be reduced, this is highest priority */
|
||||
itr_val = ice_buildreg_itr(rc->itr_idx, rc->target_itr);
|
||||
rc->current_itr = rc->target_itr;
|
||||
} else if ((q_vector->tx.target_itr < q_vector->tx.current_itr) ||
|
||||
((q_vector->rx.target_itr - q_vector->rx.current_itr) <
|
||||
(q_vector->tx.target_itr - q_vector->tx.current_itr))) {
|
||||
rc = &q_vector->tx;
|
||||
itr_val = ice_buildreg_itr(rx->itr_idx, rx->target_itr);
|
||||
rx->current_itr = rx->target_itr;
|
||||
q_vector->itr_countdown = ITR_COUNTDOWN_START;
|
||||
} else if ((tx->target_itr < tx->current_itr) ||
|
||||
((rx->target_itr - rx->current_itr) <
|
||||
(tx->target_itr - tx->current_itr))) {
|
||||
/* Tx ITR needs to be reduced, this is second priority
|
||||
* Tx ITR needs to be increased more than Rx, fourth priority
|
||||
*/
|
||||
itr_val = ice_buildreg_itr(rc->itr_idx, rc->target_itr);
|
||||
rc->current_itr = rc->target_itr;
|
||||
} else if (q_vector->rx.current_itr != q_vector->rx.target_itr) {
|
||||
rc = &q_vector->rx;
|
||||
itr_val = ice_buildreg_itr(tx->itr_idx, tx->target_itr);
|
||||
tx->current_itr = tx->target_itr;
|
||||
q_vector->itr_countdown = ITR_COUNTDOWN_START;
|
||||
} else if (rx->current_itr != rx->target_itr) {
|
||||
/* Rx ITR needs to be increased, third priority */
|
||||
itr_val = ice_buildreg_itr(rc->itr_idx, rc->target_itr);
|
||||
rc->current_itr = rc->target_itr;
|
||||
itr_val = ice_buildreg_itr(rx->itr_idx, rx->target_itr);
|
||||
rx->current_itr = rx->target_itr;
|
||||
q_vector->itr_countdown = ITR_COUNTDOWN_START;
|
||||
} else {
|
||||
/* Still have to re-enable the interrupts */
|
||||
itr_val = ice_buildreg_itr(ICE_ITR_NONE, 0);
|
||||
if (q_vector->itr_countdown)
|
||||
q_vector->itr_countdown--;
|
||||
}
|
||||
|
||||
if (!test_bit(__ICE_DOWN, vsi->state)) {
|
||||
int vector = vsi->hw_base_vector + q_vector->v_idx;
|
||||
|
||||
wr32(hw, GLINT_DYN_CTL(vector), itr_val);
|
||||
}
|
||||
if (!test_bit(__ICE_DOWN, vsi->state))
|
||||
wr32(&vsi->back->hw,
|
||||
GLINT_DYN_CTL(vsi->hw_base_vector + q_vector->v_idx),
|
||||
itr_val);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -128,11 +128,17 @@ enum ice_rx_dtype {
|
||||
#define ICE_ITR_DYNAMIC 0x8000 /* used as flag for itr_setting */
|
||||
#define ITR_IS_DYNAMIC(setting) (!!((setting) & ICE_ITR_DYNAMIC))
|
||||
#define ITR_TO_REG(setting) ((setting) & ~ICE_ITR_DYNAMIC)
|
||||
#define ICE_ITR_GRAN_S 1 /* Assume ITR granularity is 2us */
|
||||
#define ICE_ITR_GRAN_S 1 /* ITR granularity is always 2us */
|
||||
#define ICE_ITR_GRAN_US BIT(ICE_ITR_GRAN_S)
|
||||
#define ICE_ITR_MASK 0x1FFE /* ITR register value alignment mask */
|
||||
#define ITR_REG_ALIGN(setting) __ALIGN_MASK(setting, ~ICE_ITR_MASK)
|
||||
|
||||
#define ICE_ITR_ADAPTIVE_MIN_INC 0x0002
|
||||
#define ICE_ITR_ADAPTIVE_MIN_USECS 0x0002
|
||||
#define ICE_ITR_ADAPTIVE_MAX_USECS 0x00FA
|
||||
#define ICE_ITR_ADAPTIVE_LATENCY 0x8000
|
||||
#define ICE_ITR_ADAPTIVE_BULK 0x0000
|
||||
|
||||
#define ICE_DFLT_INTRL 0
|
||||
|
||||
/* Legacy or Advanced Mode Queue */
|
||||
@ -178,21 +184,13 @@ struct ice_ring {
|
||||
u16 next_to_alloc;
|
||||
} ____cacheline_internodealigned_in_smp;
|
||||
|
||||
enum ice_latency_range {
|
||||
ICE_LOWEST_LATENCY = 0,
|
||||
ICE_LOW_LATENCY = 1,
|
||||
ICE_BULK_LATENCY = 2,
|
||||
ICE_ULTRA_LATENCY = 3,
|
||||
};
|
||||
|
||||
struct ice_ring_container {
|
||||
/* head of linked-list of rings */
|
||||
struct ice_ring *ring;
|
||||
unsigned long next_update; /* jiffies value of next queue update */
|
||||
unsigned int total_bytes; /* total bytes processed this int */
|
||||
unsigned int total_pkts; /* total packets processed this int */
|
||||
enum ice_latency_range latency_range;
|
||||
int itr_idx; /* index in the interrupt vector */
|
||||
u16 itr_idx; /* index in the interrupt vector */
|
||||
u16 target_itr; /* value in usecs divided by the hw->itr_gran */
|
||||
u16 current_itr; /* value in usecs divided by the hw->itr_gran */
|
||||
/* high bit set means dynamic ITR, rest is used to store user
|
||||
|
@ -4,6 +4,37 @@
|
||||
#include "ice.h"
|
||||
#include "ice_lib.h"
|
||||
|
||||
/**
|
||||
* ice_err_to_virt err - translate errors for VF return code
|
||||
* @ice_err: error return code
|
||||
*/
|
||||
static enum virtchnl_status_code ice_err_to_virt_err(enum ice_status ice_err)
|
||||
{
|
||||
switch (ice_err) {
|
||||
case ICE_SUCCESS:
|
||||
return VIRTCHNL_STATUS_SUCCESS;
|
||||
case ICE_ERR_BAD_PTR:
|
||||
case ICE_ERR_INVAL_SIZE:
|
||||
case ICE_ERR_DEVICE_NOT_SUPPORTED:
|
||||
case ICE_ERR_PARAM:
|
||||
case ICE_ERR_CFG:
|
||||
return VIRTCHNL_STATUS_ERR_PARAM;
|
||||
case ICE_ERR_NO_MEMORY:
|
||||
return VIRTCHNL_STATUS_ERR_NO_MEMORY;
|
||||
case ICE_ERR_NOT_READY:
|
||||
case ICE_ERR_RESET_FAILED:
|
||||
case ICE_ERR_FW_API_VER:
|
||||
case ICE_ERR_AQ_ERROR:
|
||||
case ICE_ERR_AQ_TIMEOUT:
|
||||
case ICE_ERR_AQ_FULL:
|
||||
case ICE_ERR_AQ_NO_WORK:
|
||||
case ICE_ERR_AQ_EMPTY:
|
||||
return VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR;
|
||||
default:
|
||||
return VIRTCHNL_STATUS_ERR_NOT_SUPPORTED;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* ice_vc_vf_broadcast - Broadcast a message to all VFs on PF
|
||||
* @pf: pointer to the PF structure
|
||||
@ -14,7 +45,7 @@
|
||||
*/
|
||||
static void
|
||||
ice_vc_vf_broadcast(struct ice_pf *pf, enum virtchnl_ops v_opcode,
|
||||
enum ice_status v_retval, u8 *msg, u16 msglen)
|
||||
enum virtchnl_status_code v_retval, u8 *msg, u16 msglen)
|
||||
{
|
||||
struct ice_hw *hw = &pf->hw;
|
||||
struct ice_vf *vf = pf->vf;
|
||||
@ -104,7 +135,8 @@ static void ice_vc_notify_vf_link_state(struct ice_vf *vf)
|
||||
ice_set_pfe_link(vf, &pfe, ls->link_speed, ls->link_info &
|
||||
ICE_AQ_LINK_UP);
|
||||
|
||||
ice_aq_send_msg_to_vf(hw, vf->vf_id, VIRTCHNL_OP_EVENT, 0, (u8 *)&pfe,
|
||||
ice_aq_send_msg_to_vf(hw, vf->vf_id, VIRTCHNL_OP_EVENT,
|
||||
VIRTCHNL_STATUS_SUCCESS, (u8 *)&pfe,
|
||||
sizeof(pfe), NULL);
|
||||
}
|
||||
|
||||
@ -572,6 +604,10 @@ static void ice_ena_vf_mappings(struct ice_vf *vf)
|
||||
wr32(hw, GLINT_VECT2FUNC(v), reg);
|
||||
}
|
||||
|
||||
/* Map mailbox interrupt. We put an explicit 0 here to remind us that
|
||||
* VF admin queue interrupts will go to VF MSI-X vector 0.
|
||||
*/
|
||||
wr32(hw, VPINT_MBX_CTL(abs_vf_id), VPINT_MBX_CTL_CAUSE_ENA_M | 0);
|
||||
/* set regardless of mapping mode */
|
||||
wr32(hw, VPLAN_TXQ_MAPENA(vf->vf_id), VPLAN_TXQ_MAPENA_TX_ENA_M);
|
||||
|
||||
@ -1043,7 +1079,7 @@ void ice_vc_notify_reset(struct ice_pf *pf)
|
||||
|
||||
pfe.event = VIRTCHNL_EVENT_RESET_IMPENDING;
|
||||
pfe.severity = PF_EVENT_SEVERITY_CERTAIN_DOOM;
|
||||
ice_vc_vf_broadcast(pf, VIRTCHNL_OP_EVENT, ICE_SUCCESS,
|
||||
ice_vc_vf_broadcast(pf, VIRTCHNL_OP_EVENT, VIRTCHNL_STATUS_SUCCESS,
|
||||
(u8 *)&pfe, sizeof(struct virtchnl_pf_event));
|
||||
}
|
||||
|
||||
@ -1066,8 +1102,9 @@ static void ice_vc_notify_vf_reset(struct ice_vf *vf)
|
||||
|
||||
pfe.event = VIRTCHNL_EVENT_RESET_IMPENDING;
|
||||
pfe.severity = PF_EVENT_SEVERITY_CERTAIN_DOOM;
|
||||
ice_aq_send_msg_to_vf(&vf->pf->hw, vf->vf_id, VIRTCHNL_OP_EVENT, 0,
|
||||
(u8 *)&pfe, sizeof(pfe), NULL);
|
||||
ice_aq_send_msg_to_vf(&vf->pf->hw, vf->vf_id, VIRTCHNL_OP_EVENT,
|
||||
VIRTCHNL_STATUS_SUCCESS, (u8 *)&pfe, sizeof(pfe),
|
||||
NULL);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -1288,8 +1325,8 @@ static void ice_vc_dis_vf(struct ice_vf *vf)
|
||||
* send msg to VF
|
||||
*/
|
||||
static int
|
||||
ice_vc_send_msg_to_vf(struct ice_vf *vf, u32 v_opcode, enum ice_status v_retval,
|
||||
u8 *msg, u16 msglen)
|
||||
ice_vc_send_msg_to_vf(struct ice_vf *vf, u32 v_opcode,
|
||||
enum virtchnl_status_code v_retval, u8 *msg, u16 msglen)
|
||||
{
|
||||
enum ice_status aq_ret;
|
||||
struct ice_pf *pf;
|
||||
@ -1349,8 +1386,8 @@ static int ice_vc_get_ver_msg(struct ice_vf *vf, u8 *msg)
|
||||
if (VF_IS_V10(&vf->vf_ver))
|
||||
info.minor = VIRTCHNL_VERSION_MINOR_NO_VF_CAPS;
|
||||
|
||||
return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_VERSION, ICE_SUCCESS,
|
||||
(u8 *)&info,
|
||||
return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_VERSION,
|
||||
VIRTCHNL_STATUS_SUCCESS, (u8 *)&info,
|
||||
sizeof(struct virtchnl_version_info));
|
||||
}
|
||||
|
||||
@ -1363,15 +1400,15 @@ static int ice_vc_get_ver_msg(struct ice_vf *vf, u8 *msg)
|
||||
*/
|
||||
static int ice_vc_get_vf_res_msg(struct ice_vf *vf, u8 *msg)
|
||||
{
|
||||
enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
|
||||
struct virtchnl_vf_resource *vfres = NULL;
|
||||
enum ice_status aq_ret = 0;
|
||||
struct ice_pf *pf = vf->pf;
|
||||
struct ice_vsi *vsi;
|
||||
int len = 0;
|
||||
int ret;
|
||||
|
||||
if (!test_bit(ICE_VF_STATE_INIT, vf->vf_states)) {
|
||||
aq_ret = ICE_ERR_PARAM;
|
||||
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
|
||||
goto err;
|
||||
}
|
||||
|
||||
@ -1379,7 +1416,7 @@ static int ice_vc_get_vf_res_msg(struct ice_vf *vf, u8 *msg)
|
||||
|
||||
vfres = devm_kzalloc(&pf->pdev->dev, len, GFP_KERNEL);
|
||||
if (!vfres) {
|
||||
aq_ret = ICE_ERR_NO_MEMORY;
|
||||
v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY;
|
||||
len = 0;
|
||||
goto err;
|
||||
}
|
||||
@ -1393,7 +1430,7 @@ static int ice_vc_get_vf_res_msg(struct ice_vf *vf, u8 *msg)
|
||||
vfres->vf_cap_flags = VIRTCHNL_VF_OFFLOAD_L2;
|
||||
vsi = pf->vsi[vf->lan_vsi_idx];
|
||||
if (!vsi) {
|
||||
aq_ret = ICE_ERR_PARAM;
|
||||
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
|
||||
goto err;
|
||||
}
|
||||
|
||||
@ -1447,7 +1484,7 @@ static int ice_vc_get_vf_res_msg(struct ice_vf *vf, u8 *msg)
|
||||
|
||||
err:
|
||||
/* send the response back to the VF */
|
||||
ret = ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_VF_RESOURCES, aq_ret,
|
||||
ret = ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_VF_RESOURCES, v_ret,
|
||||
(u8 *)vfres, len);
|
||||
|
||||
devm_kfree(&pf->pdev->dev, vfres);
|
||||
@ -1527,43 +1564,42 @@ static bool ice_vc_isvalid_q_id(struct ice_vf *vf, u16 vsi_id, u8 qid)
|
||||
*/
|
||||
static int ice_vc_config_rss_key(struct ice_vf *vf, u8 *msg)
|
||||
{
|
||||
enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
|
||||
struct virtchnl_rss_key *vrk =
|
||||
(struct virtchnl_rss_key *)msg;
|
||||
struct ice_vsi *vsi = NULL;
|
||||
struct ice_pf *pf = vf->pf;
|
||||
enum ice_status aq_ret;
|
||||
int ret;
|
||||
struct ice_vsi *vsi = NULL;
|
||||
|
||||
if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
|
||||
aq_ret = ICE_ERR_PARAM;
|
||||
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
|
||||
goto error_param;
|
||||
}
|
||||
|
||||
if (!ice_vc_isvalid_vsi_id(vf, vrk->vsi_id)) {
|
||||
aq_ret = ICE_ERR_PARAM;
|
||||
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
|
||||
goto error_param;
|
||||
}
|
||||
|
||||
vsi = pf->vsi[vf->lan_vsi_idx];
|
||||
if (!vsi) {
|
||||
aq_ret = ICE_ERR_PARAM;
|
||||
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
|
||||
goto error_param;
|
||||
}
|
||||
|
||||
if (vrk->key_len != ICE_VSIQF_HKEY_ARRAY_SIZE) {
|
||||
aq_ret = ICE_ERR_PARAM;
|
||||
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
|
||||
goto error_param;
|
||||
}
|
||||
|
||||
if (!test_bit(ICE_FLAG_RSS_ENA, vf->pf->flags)) {
|
||||
aq_ret = ICE_ERR_PARAM;
|
||||
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
|
||||
goto error_param;
|
||||
}
|
||||
|
||||
ret = ice_set_rss(vsi, vrk->key, NULL, 0);
|
||||
aq_ret = ret ? ICE_ERR_PARAM : ICE_SUCCESS;
|
||||
if (ice_set_rss(vsi, vrk->key, NULL, 0))
|
||||
v_ret = VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR;
|
||||
error_param:
|
||||
return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_RSS_KEY, aq_ret,
|
||||
return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_RSS_KEY, v_ret,
|
||||
NULL, 0);
|
||||
}
|
||||
|
||||
@ -1577,41 +1613,40 @@ error_param:
|
||||
static int ice_vc_config_rss_lut(struct ice_vf *vf, u8 *msg)
|
||||
{
|
||||
struct virtchnl_rss_lut *vrl = (struct virtchnl_rss_lut *)msg;
|
||||
struct ice_vsi *vsi = NULL;
|
||||
enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
|
||||
struct ice_pf *pf = vf->pf;
|
||||
enum ice_status aq_ret;
|
||||
int ret;
|
||||
struct ice_vsi *vsi = NULL;
|
||||
|
||||
if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
|
||||
aq_ret = ICE_ERR_PARAM;
|
||||
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
|
||||
goto error_param;
|
||||
}
|
||||
|
||||
if (!ice_vc_isvalid_vsi_id(vf, vrl->vsi_id)) {
|
||||
aq_ret = ICE_ERR_PARAM;
|
||||
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
|
||||
goto error_param;
|
||||
}
|
||||
|
||||
vsi = pf->vsi[vf->lan_vsi_idx];
|
||||
if (!vsi) {
|
||||
aq_ret = ICE_ERR_PARAM;
|
||||
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
|
||||
goto error_param;
|
||||
}
|
||||
|
||||
if (vrl->lut_entries != ICE_VSIQF_HLUT_ARRAY_SIZE) {
|
||||
aq_ret = ICE_ERR_PARAM;
|
||||
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
|
||||
goto error_param;
|
||||
}
|
||||
|
||||
if (!test_bit(ICE_FLAG_RSS_ENA, vf->pf->flags)) {
|
||||
aq_ret = ICE_ERR_PARAM;
|
||||
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
|
||||
goto error_param;
|
||||
}
|
||||
|
||||
ret = ice_set_rss(vsi, NULL, vrl->lut, ICE_VSIQF_HLUT_ARRAY_SIZE);
|
||||
aq_ret = ret ? ICE_ERR_PARAM : ICE_SUCCESS;
|
||||
if (ice_set_rss(vsi, NULL, vrl->lut, ICE_VSIQF_HLUT_ARRAY_SIZE))
|
||||
v_ret = VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR;
|
||||
error_param:
|
||||
return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_RSS_LUT, aq_ret,
|
||||
return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_RSS_LUT, v_ret,
|
||||
NULL, 0);
|
||||
}
|
||||
|
||||
@ -1624,26 +1659,26 @@ error_param:
|
||||
*/
|
||||
static int ice_vc_get_stats_msg(struct ice_vf *vf, u8 *msg)
|
||||
{
|
||||
enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
|
||||
struct virtchnl_queue_select *vqs =
|
||||
(struct virtchnl_queue_select *)msg;
|
||||
enum ice_status aq_ret = 0;
|
||||
struct ice_pf *pf = vf->pf;
|
||||
struct ice_eth_stats stats;
|
||||
struct ice_vsi *vsi;
|
||||
|
||||
if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
|
||||
aq_ret = ICE_ERR_PARAM;
|
||||
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
|
||||
goto error_param;
|
||||
}
|
||||
|
||||
if (!ice_vc_isvalid_vsi_id(vf, vqs->vsi_id)) {
|
||||
aq_ret = ICE_ERR_PARAM;
|
||||
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
|
||||
goto error_param;
|
||||
}
|
||||
|
||||
vsi = pf->vsi[vf->lan_vsi_idx];
|
||||
if (!vsi) {
|
||||
aq_ret = ICE_ERR_PARAM;
|
||||
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
|
||||
goto error_param;
|
||||
}
|
||||
|
||||
@ -1654,7 +1689,7 @@ static int ice_vc_get_stats_msg(struct ice_vf *vf, u8 *msg)
|
||||
|
||||
error_param:
|
||||
/* send the response to the VF */
|
||||
return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_STATS, aq_ret,
|
||||
return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_STATS, v_ret,
|
||||
(u8 *)&stats, sizeof(stats));
|
||||
}
|
||||
|
||||
@ -1667,30 +1702,30 @@ error_param:
|
||||
*/
|
||||
static int ice_vc_ena_qs_msg(struct ice_vf *vf, u8 *msg)
|
||||
{
|
||||
enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
|
||||
struct virtchnl_queue_select *vqs =
|
||||
(struct virtchnl_queue_select *)msg;
|
||||
enum ice_status aq_ret = 0;
|
||||
struct ice_pf *pf = vf->pf;
|
||||
struct ice_vsi *vsi;
|
||||
|
||||
if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
|
||||
aq_ret = ICE_ERR_PARAM;
|
||||
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
|
||||
goto error_param;
|
||||
}
|
||||
|
||||
if (!ice_vc_isvalid_vsi_id(vf, vqs->vsi_id)) {
|
||||
aq_ret = ICE_ERR_PARAM;
|
||||
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
|
||||
goto error_param;
|
||||
}
|
||||
|
||||
if (!vqs->rx_queues && !vqs->tx_queues) {
|
||||
aq_ret = ICE_ERR_PARAM;
|
||||
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
|
||||
goto error_param;
|
||||
}
|
||||
|
||||
vsi = pf->vsi[vf->lan_vsi_idx];
|
||||
if (!vsi) {
|
||||
aq_ret = ICE_ERR_PARAM;
|
||||
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
|
||||
goto error_param;
|
||||
}
|
||||
|
||||
@ -1699,15 +1734,15 @@ static int ice_vc_ena_qs_msg(struct ice_vf *vf, u8 *msg)
|
||||
* programmed using ice_vsi_cfg_txqs
|
||||
*/
|
||||
if (ice_vsi_start_rx_rings(vsi))
|
||||
aq_ret = ICE_ERR_PARAM;
|
||||
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
|
||||
|
||||
/* Set flag to indicate that queues are enabled */
|
||||
if (!aq_ret)
|
||||
if (v_ret == VIRTCHNL_STATUS_SUCCESS)
|
||||
set_bit(ICE_VF_STATE_ENA, vf->vf_states);
|
||||
|
||||
error_param:
|
||||
/* send the response to the VF */
|
||||
return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ENABLE_QUEUES, aq_ret,
|
||||
return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ENABLE_QUEUES, v_ret,
|
||||
NULL, 0);
|
||||
}
|
||||
|
||||
@ -1721,31 +1756,31 @@ error_param:
|
||||
*/
|
||||
static int ice_vc_dis_qs_msg(struct ice_vf *vf, u8 *msg)
|
||||
{
|
||||
enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
|
||||
struct virtchnl_queue_select *vqs =
|
||||
(struct virtchnl_queue_select *)msg;
|
||||
enum ice_status aq_ret = 0;
|
||||
struct ice_pf *pf = vf->pf;
|
||||
struct ice_vsi *vsi;
|
||||
|
||||
if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states) &&
|
||||
!test_bit(ICE_VF_STATE_ENA, vf->vf_states)) {
|
||||
aq_ret = ICE_ERR_PARAM;
|
||||
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
|
||||
goto error_param;
|
||||
}
|
||||
|
||||
if (!ice_vc_isvalid_vsi_id(vf, vqs->vsi_id)) {
|
||||
aq_ret = ICE_ERR_PARAM;
|
||||
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
|
||||
goto error_param;
|
||||
}
|
||||
|
||||
if (!vqs->rx_queues && !vqs->tx_queues) {
|
||||
aq_ret = ICE_ERR_PARAM;
|
||||
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
|
||||
goto error_param;
|
||||
}
|
||||
|
||||
vsi = pf->vsi[vf->lan_vsi_idx];
|
||||
if (!vsi) {
|
||||
aq_ret = ICE_ERR_PARAM;
|
||||
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
|
||||
goto error_param;
|
||||
}
|
||||
|
||||
@ -1753,23 +1788,23 @@ static int ice_vc_dis_qs_msg(struct ice_vf *vf, u8 *msg)
|
||||
dev_err(&vsi->back->pdev->dev,
|
||||
"Failed to stop tx rings on VSI %d\n",
|
||||
vsi->vsi_num);
|
||||
aq_ret = ICE_ERR_PARAM;
|
||||
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
|
||||
}
|
||||
|
||||
if (ice_vsi_stop_rx_rings(vsi)) {
|
||||
dev_err(&vsi->back->pdev->dev,
|
||||
"Failed to stop rx rings on VSI %d\n",
|
||||
vsi->vsi_num);
|
||||
aq_ret = ICE_ERR_PARAM;
|
||||
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
|
||||
}
|
||||
|
||||
/* Clear enabled queues flag */
|
||||
if (!aq_ret)
|
||||
if (v_ret == VIRTCHNL_STATUS_SUCCESS)
|
||||
clear_bit(ICE_VF_STATE_ENA, vf->vf_states);
|
||||
|
||||
error_param:
|
||||
/* send the response to the VF */
|
||||
return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DISABLE_QUEUES, aq_ret,
|
||||
return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DISABLE_QUEUES, v_ret,
|
||||
NULL, 0);
|
||||
}
|
||||
|
||||
@ -1782,18 +1817,18 @@ error_param:
|
||||
*/
|
||||
static int ice_vc_cfg_irq_map_msg(struct ice_vf *vf, u8 *msg)
|
||||
{
|
||||
enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
|
||||
struct virtchnl_irq_map_info *irqmap_info =
|
||||
(struct virtchnl_irq_map_info *)msg;
|
||||
u16 vsi_id, vsi_q_id, vector_id;
|
||||
struct virtchnl_vector_map *map;
|
||||
struct ice_vsi *vsi = NULL;
|
||||
struct ice_pf *pf = vf->pf;
|
||||
enum ice_status aq_ret = 0;
|
||||
unsigned long qmap;
|
||||
int i;
|
||||
|
||||
if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
|
||||
aq_ret = ICE_ERR_PARAM;
|
||||
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
|
||||
goto error_param;
|
||||
}
|
||||
|
||||
@ -1805,13 +1840,13 @@ static int ice_vc_cfg_irq_map_msg(struct ice_vf *vf, u8 *msg)
|
||||
/* validate msg params */
|
||||
if (!(vector_id < pf->hw.func_caps.common_cap
|
||||
.num_msix_vectors) || !ice_vc_isvalid_vsi_id(vf, vsi_id)) {
|
||||
aq_ret = ICE_ERR_PARAM;
|
||||
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
|
||||
goto error_param;
|
||||
}
|
||||
|
||||
vsi = pf->vsi[vf->lan_vsi_idx];
|
||||
if (!vsi) {
|
||||
aq_ret = ICE_ERR_PARAM;
|
||||
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
|
||||
goto error_param;
|
||||
}
|
||||
|
||||
@ -1821,7 +1856,7 @@ static int ice_vc_cfg_irq_map_msg(struct ice_vf *vf, u8 *msg)
|
||||
struct ice_q_vector *q_vector;
|
||||
|
||||
if (!ice_vc_isvalid_q_id(vf, vsi_id, vsi_q_id)) {
|
||||
aq_ret = ICE_ERR_PARAM;
|
||||
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
|
||||
goto error_param;
|
||||
}
|
||||
q_vector = vsi->q_vectors[i];
|
||||
@ -1835,7 +1870,7 @@ static int ice_vc_cfg_irq_map_msg(struct ice_vf *vf, u8 *msg)
|
||||
struct ice_q_vector *q_vector;
|
||||
|
||||
if (!ice_vc_isvalid_q_id(vf, vsi_id, vsi_q_id)) {
|
||||
aq_ret = ICE_ERR_PARAM;
|
||||
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
|
||||
goto error_param;
|
||||
}
|
||||
q_vector = vsi->q_vectors[i];
|
||||
@ -1849,7 +1884,7 @@ static int ice_vc_cfg_irq_map_msg(struct ice_vf *vf, u8 *msg)
|
||||
ice_vsi_cfg_msix(vsi);
|
||||
error_param:
|
||||
/* send the response to the VF */
|
||||
return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_IRQ_MAP, aq_ret,
|
||||
return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_IRQ_MAP, v_ret,
|
||||
NULL, 0);
|
||||
}
|
||||
|
||||
@ -1862,27 +1897,26 @@ error_param:
|
||||
*/
|
||||
static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg)
|
||||
{
|
||||
enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
|
||||
struct virtchnl_vsi_queue_config_info *qci =
|
||||
(struct virtchnl_vsi_queue_config_info *)msg;
|
||||
struct virtchnl_queue_pair_info *qpi;
|
||||
struct ice_pf *pf = vf->pf;
|
||||
enum ice_status aq_ret = 0;
|
||||
struct ice_vsi *vsi;
|
||||
int i;
|
||||
|
||||
if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
|
||||
aq_ret = ICE_ERR_PARAM;
|
||||
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
|
||||
goto error_param;
|
||||
}
|
||||
|
||||
if (!ice_vc_isvalid_vsi_id(vf, qci->vsi_id)) {
|
||||
aq_ret = ICE_ERR_PARAM;
|
||||
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
|
||||
goto error_param;
|
||||
}
|
||||
|
||||
vsi = pf->vsi[vf->lan_vsi_idx];
|
||||
if (!vsi) {
|
||||
aq_ret = ICE_ERR_PARAM;
|
||||
goto error_param;
|
||||
}
|
||||
|
||||
@ -1890,7 +1924,7 @@ static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg)
|
||||
dev_err(&pf->pdev->dev,
|
||||
"VF-%d requesting more than supported number of queues: %d\n",
|
||||
vf->vf_id, qci->num_queue_pairs);
|
||||
aq_ret = ICE_ERR_PARAM;
|
||||
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
|
||||
goto error_param;
|
||||
}
|
||||
|
||||
@ -1900,7 +1934,7 @@ static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg)
|
||||
qpi->rxq.vsi_id != qci->vsi_id ||
|
||||
qpi->rxq.queue_id != qpi->txq.queue_id ||
|
||||
!ice_vc_isvalid_q_id(vf, qci->vsi_id, qpi->txq.queue_id)) {
|
||||
aq_ret = ICE_ERR_PARAM;
|
||||
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
|
||||
goto error_param;
|
||||
}
|
||||
/* copy Tx queue info from VF into VSI */
|
||||
@ -1910,13 +1944,13 @@ static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg)
|
||||
vsi->rx_rings[i]->dma = qpi->rxq.dma_ring_addr;
|
||||
vsi->rx_rings[i]->count = qpi->rxq.ring_len;
|
||||
if (qpi->rxq.databuffer_size > ((16 * 1024) - 128)) {
|
||||
aq_ret = ICE_ERR_PARAM;
|
||||
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
|
||||
goto error_param;
|
||||
}
|
||||
vsi->rx_buf_len = qpi->rxq.databuffer_size;
|
||||
if (qpi->rxq.max_pkt_size >= (16 * 1024) ||
|
||||
qpi->rxq.max_pkt_size < 64) {
|
||||
aq_ret = ICE_ERR_PARAM;
|
||||
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
|
||||
goto error_param;
|
||||
}
|
||||
vsi->max_frame = qpi->rxq.max_pkt_size;
|
||||
@ -1931,14 +1965,12 @@ static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg)
|
||||
vsi->tc_cfg.tc_info[0].qcount_tx = qci->num_queue_pairs;
|
||||
vsi->tc_cfg.tc_info[0].qcount_rx = qci->num_queue_pairs;
|
||||
|
||||
if (!ice_vsi_cfg_lan_txqs(vsi) && !ice_vsi_cfg_rxqs(vsi))
|
||||
aq_ret = 0;
|
||||
else
|
||||
aq_ret = ICE_ERR_PARAM;
|
||||
if (ice_vsi_cfg_lan_txqs(vsi) || ice_vsi_cfg_rxqs(vsi))
|
||||
v_ret = VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR;
|
||||
|
||||
error_param:
|
||||
/* send the response to the VF */
|
||||
return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_VSI_QUEUES, aq_ret,
|
||||
return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_VSI_QUEUES, v_ret,
|
||||
NULL, 0);
|
||||
}
|
||||
|
||||
@ -1980,11 +2012,11 @@ static bool ice_can_vf_change_mac(struct ice_vf *vf)
|
||||
static int
|
||||
ice_vc_handle_mac_addr_msg(struct ice_vf *vf, u8 *msg, bool set)
|
||||
{
|
||||
enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
|
||||
struct virtchnl_ether_addr_list *al =
|
||||
(struct virtchnl_ether_addr_list *)msg;
|
||||
struct ice_pf *pf = vf->pf;
|
||||
enum virtchnl_ops vc_op;
|
||||
enum ice_status ret = 0;
|
||||
LIST_HEAD(mac_list);
|
||||
struct ice_vsi *vsi;
|
||||
int mac_count = 0;
|
||||
@ -1997,7 +2029,7 @@ ice_vc_handle_mac_addr_msg(struct ice_vf *vf, u8 *msg, bool set)
|
||||
|
||||
if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states) ||
|
||||
!ice_vc_isvalid_vsi_id(vf, al->vsi_id)) {
|
||||
ret = ICE_ERR_PARAM;
|
||||
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
|
||||
goto handle_mac_exit;
|
||||
}
|
||||
|
||||
@ -2009,12 +2041,13 @@ ice_vc_handle_mac_addr_msg(struct ice_vf *vf, u8 *msg, bool set)
|
||||
/* There is no need to let VF know about not being trusted
|
||||
* to add more MAC addr, so we can just return success message.
|
||||
*/
|
||||
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
|
||||
goto handle_mac_exit;
|
||||
}
|
||||
|
||||
vsi = pf->vsi[vf->lan_vsi_idx];
|
||||
if (!vsi) {
|
||||
ret = ICE_ERR_PARAM;
|
||||
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
|
||||
goto handle_mac_exit;
|
||||
}
|
||||
|
||||
@ -2028,40 +2061,39 @@ ice_vc_handle_mac_addr_msg(struct ice_vf *vf, u8 *msg, bool set)
|
||||
* already added. Just continue.
|
||||
*/
|
||||
dev_info(&pf->pdev->dev,
|
||||
"mac %pM already set for VF %d\n",
|
||||
"MAC %pM already set for VF %d\n",
|
||||
maddr, vf->vf_id);
|
||||
continue;
|
||||
} else {
|
||||
/* VF can't remove dflt_lan_addr/bcast mac */
|
||||
dev_err(&pf->pdev->dev,
|
||||
"can't remove mac %pM for VF %d\n",
|
||||
"VF can't remove default MAC address or MAC %pM programmed by PF for VF %d\n",
|
||||
maddr, vf->vf_id);
|
||||
ret = ICE_ERR_PARAM;
|
||||
goto handle_mac_exit;
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
/* check for the invalid cases and bail if necessary */
|
||||
if (is_zero_ether_addr(maddr)) {
|
||||
dev_err(&pf->pdev->dev,
|
||||
"invalid mac %pM provided for VF %d\n",
|
||||
"invalid MAC %pM provided for VF %d\n",
|
||||
maddr, vf->vf_id);
|
||||
ret = ICE_ERR_PARAM;
|
||||
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
|
||||
goto handle_mac_exit;
|
||||
}
|
||||
|
||||
if (is_unicast_ether_addr(maddr) &&
|
||||
!ice_can_vf_change_mac(vf)) {
|
||||
dev_err(&pf->pdev->dev,
|
||||
"can't change unicast mac for untrusted VF %d\n",
|
||||
"can't change unicast MAC for untrusted VF %d\n",
|
||||
vf->vf_id);
|
||||
ret = ICE_ERR_PARAM;
|
||||
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
|
||||
goto handle_mac_exit;
|
||||
}
|
||||
|
||||
/* get here if maddr is multicast or if VF can change mac */
|
||||
if (ice_add_mac_to_list(vsi, &mac_list, al->list[i].addr)) {
|
||||
ret = ICE_ERR_NO_MEMORY;
|
||||
v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY;
|
||||
goto handle_mac_exit;
|
||||
}
|
||||
mac_count++;
|
||||
@ -2069,14 +2101,14 @@ ice_vc_handle_mac_addr_msg(struct ice_vf *vf, u8 *msg, bool set)
|
||||
|
||||
/* program the updated filter list */
|
||||
if (set)
|
||||
ret = ice_add_mac(&pf->hw, &mac_list);
|
||||
v_ret = ice_err_to_virt_err(ice_add_mac(&pf->hw, &mac_list));
|
||||
else
|
||||
ret = ice_remove_mac(&pf->hw, &mac_list);
|
||||
v_ret = ice_err_to_virt_err(ice_remove_mac(&pf->hw, &mac_list));
|
||||
|
||||
if (ret) {
|
||||
if (v_ret) {
|
||||
dev_err(&pf->pdev->dev,
|
||||
"can't update mac filters for VF %d, error %d\n",
|
||||
vf->vf_id, ret);
|
||||
"can't update MAC filters for VF %d, error %d\n",
|
||||
vf->vf_id, v_ret);
|
||||
} else {
|
||||
if (set)
|
||||
vf->num_mac += mac_count;
|
||||
@ -2087,7 +2119,7 @@ ice_vc_handle_mac_addr_msg(struct ice_vf *vf, u8 *msg, bool set)
|
||||
handle_mac_exit:
|
||||
ice_free_fltr_list(&pf->pdev->dev, &mac_list);
|
||||
/* send the response to the VF */
|
||||
return ice_vc_send_msg_to_vf(vf, vc_op, ret, NULL, 0);
|
||||
return ice_vc_send_msg_to_vf(vf, vc_op, v_ret, NULL, 0);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -2126,17 +2158,17 @@ static int ice_vc_del_mac_addr_msg(struct ice_vf *vf, u8 *msg)
|
||||
*/
|
||||
static int ice_vc_request_qs_msg(struct ice_vf *vf, u8 *msg)
|
||||
{
|
||||
enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
|
||||
struct virtchnl_vf_res_request *vfres =
|
||||
(struct virtchnl_vf_res_request *)msg;
|
||||
int req_queues = vfres->num_queue_pairs;
|
||||
enum ice_status aq_ret = 0;
|
||||
struct ice_pf *pf = vf->pf;
|
||||
int max_allowed_vf_queues;
|
||||
int tx_rx_queue_left;
|
||||
int cur_queues;
|
||||
|
||||
if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
|
||||
aq_ret = ICE_ERR_PARAM;
|
||||
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
|
||||
goto error_param;
|
||||
}
|
||||
|
||||
@ -2171,7 +2203,7 @@ static int ice_vc_request_qs_msg(struct ice_vf *vf, u8 *msg)
|
||||
error_param:
|
||||
/* send the response to the VF */
|
||||
return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_REQUEST_QUEUES,
|
||||
aq_ret, (u8 *)vfres, sizeof(*vfres));
|
||||
v_ret, (u8 *)vfres, sizeof(*vfres));
|
||||
}
|
||||
|
||||
/**
|
||||
@ -2268,9 +2300,9 @@ error_set_pvid:
|
||||
*/
|
||||
static int ice_vc_process_vlan_msg(struct ice_vf *vf, u8 *msg, bool add_v)
|
||||
{
|
||||
enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
|
||||
struct virtchnl_vlan_filter_list *vfl =
|
||||
(struct virtchnl_vlan_filter_list *)msg;
|
||||
enum ice_status aq_ret = 0;
|
||||
struct ice_pf *pf = vf->pf;
|
||||
bool vlan_promisc = false;
|
||||
struct ice_vsi *vsi;
|
||||
@ -2280,12 +2312,12 @@ static int ice_vc_process_vlan_msg(struct ice_vf *vf, u8 *msg, bool add_v)
|
||||
int i;
|
||||
|
||||
if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
|
||||
aq_ret = ICE_ERR_PARAM;
|
||||
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
|
||||
goto error_param;
|
||||
}
|
||||
|
||||
if (!ice_vc_isvalid_vsi_id(vf, vfl->vsi_id)) {
|
||||
aq_ret = ICE_ERR_PARAM;
|
||||
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
|
||||
goto error_param;
|
||||
}
|
||||
|
||||
@ -2297,12 +2329,13 @@ static int ice_vc_process_vlan_msg(struct ice_vf *vf, u8 *msg, bool add_v)
|
||||
/* There is no need to let VF know about being not trusted,
|
||||
* so we can just return success message here
|
||||
*/
|
||||
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
|
||||
goto error_param;
|
||||
}
|
||||
|
||||
for (i = 0; i < vfl->num_elements; i++) {
|
||||
if (vfl->vlan_id[i] > ICE_MAX_VLANID) {
|
||||
aq_ret = ICE_ERR_PARAM;
|
||||
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
|
||||
dev_err(&pf->pdev->dev,
|
||||
"invalid VF VLAN id %d\n", vfl->vlan_id[i]);
|
||||
goto error_param;
|
||||
@ -2312,12 +2345,12 @@ static int ice_vc_process_vlan_msg(struct ice_vf *vf, u8 *msg, bool add_v)
|
||||
hw = &pf->hw;
|
||||
vsi = pf->vsi[vf->lan_vsi_idx];
|
||||
if (!vsi) {
|
||||
aq_ret = ICE_ERR_PARAM;
|
||||
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
|
||||
goto error_param;
|
||||
}
|
||||
|
||||
if (vsi->info.pvid) {
|
||||
aq_ret = ICE_ERR_PARAM;
|
||||
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
|
||||
goto error_param;
|
||||
}
|
||||
|
||||
@ -2325,7 +2358,7 @@ static int ice_vc_process_vlan_msg(struct ice_vf *vf, u8 *msg, bool add_v)
|
||||
dev_err(&pf->pdev->dev,
|
||||
"%sable VLAN stripping failed for VSI %i\n",
|
||||
add_v ? "en" : "dis", vsi->vsi_num);
|
||||
aq_ret = ICE_ERR_PARAM;
|
||||
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
|
||||
goto error_param;
|
||||
}
|
||||
|
||||
@ -2338,7 +2371,7 @@ static int ice_vc_process_vlan_msg(struct ice_vf *vf, u8 *msg, bool add_v)
|
||||
u16 vid = vfl->vlan_id[i];
|
||||
|
||||
if (ice_vsi_add_vlan(vsi, vid)) {
|
||||
aq_ret = ICE_ERR_PARAM;
|
||||
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
|
||||
goto error_param;
|
||||
}
|
||||
|
||||
@ -2347,7 +2380,7 @@ static int ice_vc_process_vlan_msg(struct ice_vf *vf, u8 *msg, bool add_v)
|
||||
if (!vlan_promisc) {
|
||||
status = ice_cfg_vlan_pruning(vsi, true, false);
|
||||
if (status) {
|
||||
aq_ret = ICE_ERR_PARAM;
|
||||
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
|
||||
dev_err(&pf->pdev->dev,
|
||||
"Enable VLAN pruning on VLAN ID: %d failed error-%d\n",
|
||||
vid, status);
|
||||
@ -2360,10 +2393,12 @@ static int ice_vc_process_vlan_msg(struct ice_vf *vf, u8 *msg, bool add_v)
|
||||
|
||||
status = ice_set_vsi_promisc(hw, vsi->idx,
|
||||
promisc_m, vid);
|
||||
if (status)
|
||||
if (status) {
|
||||
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
|
||||
dev_err(&pf->pdev->dev,
|
||||
"Enable Unicast/multicast promiscuous mode on VLAN ID:%d failed error-%d\n",
|
||||
vid, status);
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
@ -2374,7 +2409,7 @@ static int ice_vc_process_vlan_msg(struct ice_vf *vf, u8 *msg, bool add_v)
|
||||
* updating VLAN information
|
||||
*/
|
||||
if (ice_vsi_kill_vlan(vsi, vid)) {
|
||||
aq_ret = ICE_ERR_PARAM;
|
||||
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
|
||||
goto error_param;
|
||||
}
|
||||
|
||||
@ -2396,10 +2431,10 @@ static int ice_vc_process_vlan_msg(struct ice_vf *vf, u8 *msg, bool add_v)
|
||||
error_param:
|
||||
/* send the response to the VF */
|
||||
if (add_v)
|
||||
return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ADD_VLAN, aq_ret,
|
||||
return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ADD_VLAN, v_ret,
|
||||
NULL, 0);
|
||||
else
|
||||
return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DEL_VLAN, aq_ret,
|
||||
return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DEL_VLAN, v_ret,
|
||||
NULL, 0);
|
||||
}
|
||||
|
||||
@ -2435,22 +2470,22 @@ static int ice_vc_remove_vlan_msg(struct ice_vf *vf, u8 *msg)
|
||||
*/
|
||||
static int ice_vc_ena_vlan_stripping(struct ice_vf *vf)
|
||||
{
|
||||
enum ice_status aq_ret = 0;
|
||||
enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
|
||||
struct ice_pf *pf = vf->pf;
|
||||
struct ice_vsi *vsi;
|
||||
|
||||
if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
|
||||
aq_ret = ICE_ERR_PARAM;
|
||||
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
|
||||
goto error_param;
|
||||
}
|
||||
|
||||
vsi = pf->vsi[vf->lan_vsi_idx];
|
||||
if (ice_vsi_manage_vlan_stripping(vsi, true))
|
||||
aq_ret = ICE_ERR_AQ_ERROR;
|
||||
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
|
||||
|
||||
error_param:
|
||||
return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ENABLE_VLAN_STRIPPING,
|
||||
aq_ret, NULL, 0);
|
||||
v_ret, NULL, 0);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -2461,27 +2496,27 @@ error_param:
|
||||
*/
|
||||
static int ice_vc_dis_vlan_stripping(struct ice_vf *vf)
|
||||
{
|
||||
enum ice_status aq_ret = 0;
|
||||
enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
|
||||
struct ice_pf *pf = vf->pf;
|
||||
struct ice_vsi *vsi;
|
||||
|
||||
if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
|
||||
aq_ret = ICE_ERR_PARAM;
|
||||
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
|
||||
goto error_param;
|
||||
}
|
||||
|
||||
vsi = pf->vsi[vf->lan_vsi_idx];
|
||||
if (!vsi) {
|
||||
aq_ret = ICE_ERR_PARAM;
|
||||
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
|
||||
goto error_param;
|
||||
}
|
||||
|
||||
if (ice_vsi_manage_vlan_stripping(vsi, false))
|
||||
aq_ret = ICE_ERR_AQ_ERROR;
|
||||
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
|
||||
|
||||
error_param:
|
||||
return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DISABLE_VLAN_STRIPPING,
|
||||
aq_ret, NULL, 0);
|
||||
v_ret, NULL, 0);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -2517,7 +2552,7 @@ void ice_vc_process_vf_msg(struct ice_pf *pf, struct ice_rq_event_info *event)
|
||||
/* Perform basic checks on the msg */
|
||||
err = virtchnl_vc_validate_vf_msg(&vf->vf_ver, v_opcode, msg, msglen);
|
||||
if (err) {
|
||||
if (err == VIRTCHNL_ERR_PARAM)
|
||||
if (err == VIRTCHNL_STATUS_ERR_PARAM)
|
||||
err = -EPERM;
|
||||
else
|
||||
err = -EINVAL;
|
||||
@ -2539,7 +2574,8 @@ void ice_vc_process_vf_msg(struct ice_pf *pf, struct ice_rq_event_info *event)
|
||||
|
||||
error_handler:
|
||||
if (err) {
|
||||
ice_vc_send_msg_to_vf(vf, v_opcode, ICE_ERR_PARAM, NULL, 0);
|
||||
ice_vc_send_msg_to_vf(vf, v_opcode, VIRTCHNL_STATUS_ERR_PARAM,
|
||||
NULL, 0);
|
||||
dev_err(&pf->pdev->dev, "Invalid message from VF %d, opcode %d, len %d, error %d\n",
|
||||
vf_id, v_opcode, msglen, err);
|
||||
return;
|
||||
@ -2602,7 +2638,8 @@ error_handler:
|
||||
default:
|
||||
dev_err(&pf->pdev->dev, "Unsupported opcode %d from VF %d\n",
|
||||
v_opcode, vf_id);
|
||||
err = ice_vc_send_msg_to_vf(vf, v_opcode, ICE_ERR_NOT_IMPL,
|
||||
err = ice_vc_send_msg_to_vf(vf, v_opcode,
|
||||
VIRTCHNL_STATUS_ERR_NOT_SUPPORTED,
|
||||
NULL, 0);
|
||||
break;
|
||||
}
|
||||
@ -2611,7 +2648,7 @@ error_handler:
|
||||
* as it is busy with pending work.
|
||||
*/
|
||||
dev_info(&pf->pdev->dev,
|
||||
"PF failed to honor VF %d, opcode %d\n, error %d\n",
|
||||
"PF failed to honor VF %d, opcode %d, error %d\n",
|
||||
vf_id, v_opcode, err);
|
||||
}
|
||||
}
|
||||
@ -2771,7 +2808,7 @@ int ice_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac)
|
||||
ether_addr_copy(vf->dflt_lan_addr.addr, mac);
|
||||
vf->pf_set_mac = true;
|
||||
netdev_info(netdev,
|
||||
"mac on VF %d set to %pM\n. VF driver will be reinitialized\n",
|
||||
"MAC on VF %d set to %pM. VF driver will be reinitialized\n",
|
||||
vf_id, mac);
|
||||
|
||||
ice_vc_dis_vf(vf);
|
||||
@ -2874,7 +2911,8 @@ int ice_set_vf_link_state(struct net_device *netdev, int vf_id, int link_state)
|
||||
ice_set_pfe_link(vf, &pfe, ls->link_speed, vf->link_up);
|
||||
|
||||
/* Notify the VF of its new link state */
|
||||
ice_aq_send_msg_to_vf(hw, vf->vf_id, VIRTCHNL_OP_EVENT, 0, (u8 *)&pfe,
|
||||
ice_aq_send_msg_to_vf(hw, vf->vf_id, VIRTCHNL_OP_EVENT,
|
||||
VIRTCHNL_STATUS_SUCCESS, (u8 *)&pfe,
|
||||
sizeof(pfe), NULL);
|
||||
|
||||
return 0;
|
||||
|
Loading…
Reference in New Issue
Block a user