Merge branch 'hns3-next'
Huazhong Tan says: ==================== net: hns3: some code optimizations & cleanups & bugfixes [patch 01/12] fixes a TX timeout issue. [patch 02/12 - 04/12] adds some patch related to TM module. [patch 05/12] fixes a compile warning. [patch 06/12] adds Asym Pause support for autoneg [patch 07/12] optimizes the error handler for VF reset. [patch 08/12] deals with the empty interrupt case. [patch 09/12 - 12/12] adds some cleanups & optimizations. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
commit
fcd71efd5e
@ -28,8 +28,7 @@
|
||||
#define hns3_set_field(origin, shift, val) ((origin) |= ((val) << (shift)))
|
||||
#define hns3_tx_bd_count(S) DIV_ROUND_UP(S, HNS3_MAX_BD_SIZE)
|
||||
|
||||
static void hns3_clear_all_ring(struct hnae3_handle *h);
|
||||
static void hns3_force_clear_all_ring(struct hnae3_handle *h);
|
||||
static void hns3_clear_all_ring(struct hnae3_handle *h, bool force);
|
||||
static void hns3_remove_hw_addr(struct net_device *netdev);
|
||||
|
||||
static const char hns3_driver_name[] = "hns3";
|
||||
@ -463,6 +462,20 @@ static int hns3_nic_net_open(struct net_device *netdev)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void hns3_reset_tx_queue(struct hnae3_handle *h)
|
||||
{
|
||||
struct net_device *ndev = h->kinfo.netdev;
|
||||
struct hns3_nic_priv *priv = netdev_priv(ndev);
|
||||
struct netdev_queue *dev_queue;
|
||||
u32 i;
|
||||
|
||||
for (i = 0; i < h->kinfo.num_tqps; i++) {
|
||||
dev_queue = netdev_get_tx_queue(ndev,
|
||||
priv->ring_data[i].queue_index);
|
||||
netdev_tx_reset_queue(dev_queue);
|
||||
}
|
||||
}
|
||||
|
||||
static void hns3_nic_net_down(struct net_device *netdev)
|
||||
{
|
||||
struct hns3_nic_priv *priv = netdev_priv(netdev);
|
||||
@ -493,7 +506,9 @@ static void hns3_nic_net_down(struct net_device *netdev)
|
||||
* to disable the ring through firmware when downing the netdev.
|
||||
*/
|
||||
if (!hns3_nic_resetting(netdev))
|
||||
hns3_clear_all_ring(priv->ae_handle);
|
||||
hns3_clear_all_ring(priv->ae_handle, false);
|
||||
|
||||
hns3_reset_tx_queue(priv->ae_handle);
|
||||
}
|
||||
|
||||
static int hns3_nic_net_stop(struct net_device *netdev)
|
||||
@ -1475,12 +1490,10 @@ static void hns3_nic_get_stats64(struct net_device *netdev,
|
||||
start = u64_stats_fetch_begin_irq(&ring->syncp);
|
||||
rx_bytes += ring->stats.rx_bytes;
|
||||
rx_pkts += ring->stats.rx_pkts;
|
||||
rx_drop += ring->stats.non_vld_descs;
|
||||
rx_drop += ring->stats.l2_err;
|
||||
rx_errors += ring->stats.non_vld_descs;
|
||||
rx_errors += ring->stats.l2_err;
|
||||
rx_errors += ring->stats.l3l4_csum_err;
|
||||
rx_crc_errors += ring->stats.l2_err;
|
||||
rx_crc_errors += ring->stats.l3l4_csum_err;
|
||||
rx_multicast += ring->stats.rx_multicast;
|
||||
rx_length_errors += ring->stats.err_pkt_len;
|
||||
} while (u64_stats_fetch_retry_irq(&ring->syncp, start));
|
||||
@ -2754,14 +2767,6 @@ static int hns3_handle_bdinfo(struct hns3_enet_ring *ring, struct sk_buff *skb)
|
||||
vlan_tag);
|
||||
}
|
||||
|
||||
if (unlikely(!(bd_base_info & BIT(HNS3_RXD_VLD_B)))) {
|
||||
u64_stats_update_begin(&ring->syncp);
|
||||
ring->stats.non_vld_descs++;
|
||||
u64_stats_update_end(&ring->syncp);
|
||||
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (unlikely(!desc->rx.pkt_len || (l234info & (BIT(HNS3_RXD_TRUNCAT_B) |
|
||||
BIT(HNS3_RXD_L2E_B))))) {
|
||||
u64_stats_update_begin(&ring->syncp);
|
||||
@ -3921,7 +3926,7 @@ static void hns3_client_uninit(struct hnae3_handle *handle, bool reset)
|
||||
|
||||
hns3_del_all_fd_rules(netdev, true);
|
||||
|
||||
hns3_force_clear_all_ring(handle);
|
||||
hns3_clear_all_ring(handle, true);
|
||||
|
||||
hns3_nic_uninit_vector_data(priv);
|
||||
|
||||
@ -4090,43 +4095,26 @@ static void hns3_force_clear_rx_ring(struct hns3_enet_ring *ring)
|
||||
}
|
||||
}
|
||||
|
||||
static void hns3_force_clear_all_ring(struct hnae3_handle *h)
|
||||
{
|
||||
struct net_device *ndev = h->kinfo.netdev;
|
||||
struct hns3_nic_priv *priv = netdev_priv(ndev);
|
||||
struct hns3_enet_ring *ring;
|
||||
u32 i;
|
||||
|
||||
for (i = 0; i < h->kinfo.num_tqps; i++) {
|
||||
ring = priv->ring_data[i].ring;
|
||||
hns3_clear_tx_ring(ring);
|
||||
|
||||
ring = priv->ring_data[i + h->kinfo.num_tqps].ring;
|
||||
hns3_force_clear_rx_ring(ring);
|
||||
}
|
||||
}
|
||||
|
||||
static void hns3_clear_all_ring(struct hnae3_handle *h)
|
||||
static void hns3_clear_all_ring(struct hnae3_handle *h, bool force)
|
||||
{
|
||||
struct net_device *ndev = h->kinfo.netdev;
|
||||
struct hns3_nic_priv *priv = netdev_priv(ndev);
|
||||
u32 i;
|
||||
|
||||
for (i = 0; i < h->kinfo.num_tqps; i++) {
|
||||
struct netdev_queue *dev_queue;
|
||||
struct hns3_enet_ring *ring;
|
||||
|
||||
ring = priv->ring_data[i].ring;
|
||||
hns3_clear_tx_ring(ring);
|
||||
dev_queue = netdev_get_tx_queue(ndev,
|
||||
priv->ring_data[i].queue_index);
|
||||
netdev_tx_reset_queue(dev_queue);
|
||||
|
||||
ring = priv->ring_data[i + h->kinfo.num_tqps].ring;
|
||||
/* Continue to clear other rings even if clearing some
|
||||
* rings failed.
|
||||
*/
|
||||
hns3_clear_rx_ring(ring);
|
||||
if (force)
|
||||
hns3_force_clear_rx_ring(ring);
|
||||
else
|
||||
hns3_clear_rx_ring(ring);
|
||||
}
|
||||
}
|
||||
|
||||
@ -4331,8 +4319,8 @@ static int hns3_reset_notify_uninit_enet(struct hnae3_handle *handle)
|
||||
return 0;
|
||||
}
|
||||
|
||||
hns3_clear_all_ring(handle);
|
||||
hns3_force_clear_all_ring(handle);
|
||||
hns3_clear_all_ring(handle, true);
|
||||
hns3_reset_tx_queue(priv->ae_handle);
|
||||
|
||||
hns3_nic_uninit_vector_data(priv);
|
||||
|
||||
|
@ -384,7 +384,6 @@ struct ring_stats {
|
||||
u64 rx_err_cnt;
|
||||
u64 reuse_pg_cnt;
|
||||
u64 err_pkt_len;
|
||||
u64 non_vld_descs;
|
||||
u64 err_bd_num;
|
||||
u64 l2_err;
|
||||
u64 l3l4_csum_err;
|
||||
@ -446,25 +445,6 @@ enum hns3_flow_level_range {
|
||||
HNS3_FLOW_ULTRA = 3,
|
||||
};
|
||||
|
||||
enum hns3_link_mode_bits {
|
||||
HNS3_LM_FIBRE_BIT = BIT(0),
|
||||
HNS3_LM_AUTONEG_BIT = BIT(1),
|
||||
HNS3_LM_TP_BIT = BIT(2),
|
||||
HNS3_LM_PAUSE_BIT = BIT(3),
|
||||
HNS3_LM_BACKPLANE_BIT = BIT(4),
|
||||
HNS3_LM_10BASET_HALF_BIT = BIT(5),
|
||||
HNS3_LM_10BASET_FULL_BIT = BIT(6),
|
||||
HNS3_LM_100BASET_HALF_BIT = BIT(7),
|
||||
HNS3_LM_100BASET_FULL_BIT = BIT(8),
|
||||
HNS3_LM_1000BASET_FULL_BIT = BIT(9),
|
||||
HNS3_LM_10000BASEKR_FULL_BIT = BIT(10),
|
||||
HNS3_LM_25000BASEKR_FULL_BIT = BIT(11),
|
||||
HNS3_LM_40000BASELR4_FULL_BIT = BIT(12),
|
||||
HNS3_LM_50000BASEKR2_FULL_BIT = BIT(13),
|
||||
HNS3_LM_100000BASEKR4_FULL_BIT = BIT(14),
|
||||
HNS3_LM_COUNT = 15
|
||||
};
|
||||
|
||||
#define HNS3_INT_GL_MAX 0x1FE0
|
||||
#define HNS3_INT_GL_50K 0x0014
|
||||
#define HNS3_INT_GL_20K 0x0032
|
||||
|
@ -44,7 +44,6 @@ static const struct hns3_stats hns3_rxq_stats[] = {
|
||||
HNS3_TQP_STAT("errors", rx_err_cnt),
|
||||
HNS3_TQP_STAT("reuse_pg_cnt", reuse_pg_cnt),
|
||||
HNS3_TQP_STAT("err_pkt_len", err_pkt_len),
|
||||
HNS3_TQP_STAT("non_vld_descs", non_vld_descs),
|
||||
HNS3_TQP_STAT("err_bd_num", err_bd_num),
|
||||
HNS3_TQP_STAT("l2_err", l2_err),
|
||||
HNS3_TQP_STAT("l3l4_csum_err", l3l4_csum_err),
|
||||
|
@ -232,6 +232,7 @@ static int hclge_cmd_check_retval(struct hclge_hw *hw, struct hclge_desc *desc,
|
||||
int hclge_cmd_send(struct hclge_hw *hw, struct hclge_desc *desc, int num)
|
||||
{
|
||||
struct hclge_dev *hdev = container_of(hw, struct hclge_dev, hw);
|
||||
struct hclge_cmq_ring *csq = &hw->cmq.csq;
|
||||
struct hclge_desc *desc_to_use;
|
||||
bool complete = false;
|
||||
u32 timeout = 0;
|
||||
@ -241,8 +242,16 @@ int hclge_cmd_send(struct hclge_hw *hw, struct hclge_desc *desc, int num)
|
||||
|
||||
spin_lock_bh(&hw->cmq.csq.lock);
|
||||
|
||||
if (num > hclge_ring_space(&hw->cmq.csq) ||
|
||||
test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state)) {
|
||||
if (test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state)) {
|
||||
spin_unlock_bh(&hw->cmq.csq.lock);
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
if (num > hclge_ring_space(&hw->cmq.csq)) {
|
||||
/* If CMDQ ring is full, SW HEAD and HW HEAD may be different,
|
||||
* need update the SW HEAD pointer csq->next_to_clean
|
||||
*/
|
||||
csq->next_to_clean = hclge_read_dev(hw, HCLGE_NIC_CSQ_HEAD_REG);
|
||||
spin_unlock_bh(&hw->cmq.csq.lock);
|
||||
return -EBUSY;
|
||||
}
|
||||
@ -280,7 +289,7 @@ int hclge_cmd_send(struct hclge_hw *hw, struct hclge_desc *desc, int num)
|
||||
}
|
||||
|
||||
if (!complete) {
|
||||
retval = -EAGAIN;
|
||||
retval = -EBADE;
|
||||
} else {
|
||||
retval = hclge_cmd_check_retval(hw, desc, num, ntc);
|
||||
}
|
||||
|
@ -884,7 +884,7 @@ struct hclge_serdes_lb_cmd {
|
||||
#define HCLGE_TOTAL_PKT_BUF 0x108000 /* 1.03125M bytes */
|
||||
#define HCLGE_DEFAULT_DV 0xA000 /* 40k byte */
|
||||
#define HCLGE_DEFAULT_NON_DCB_DV 0x7800 /* 30K byte */
|
||||
#define HCLGE_NON_DCB_ADDITIONAL_BUF 0x200 /* 512 byte */
|
||||
#define HCLGE_NON_DCB_ADDITIONAL_BUF 0x1400 /* 5120 byte */
|
||||
|
||||
#define HCLGE_TYPE_CRQ 0
|
||||
#define HCLGE_TYPE_CSQ 1
|
||||
|
@ -325,6 +325,8 @@ static int hclge_ieee_setpfc(struct hnae3_handle *h, struct ieee_pfc *pfc)
|
||||
hdev->tm_info.hw_pfc_map = pfc_map;
|
||||
hdev->tm_info.pfc_en = pfc->pfc_en;
|
||||
|
||||
hclge_tm_pfc_info_update(hdev);
|
||||
|
||||
return hclge_pause_setup_hw(hdev, false);
|
||||
}
|
||||
|
||||
|
@ -30,6 +30,9 @@
|
||||
#define HCLGE_BUF_SIZE_UNIT 256U
|
||||
#define HCLGE_BUF_MUL_BY 2
|
||||
#define HCLGE_BUF_DIV_BY 2
|
||||
#define NEED_RESERVE_TC_NUM 2
|
||||
#define BUF_MAX_PERCENT 100
|
||||
#define BUF_RESERVE_PERCENT 90
|
||||
|
||||
#define HCLGE_RESET_MAX_FAIL_CNT 5
|
||||
|
||||
@ -561,8 +564,7 @@ static u8 *hclge_comm_get_strings(u32 stringset,
|
||||
return buff;
|
||||
|
||||
for (i = 0; i < size; i++) {
|
||||
snprintf(buff, ETH_GSTRING_LEN,
|
||||
strs[i].desc);
|
||||
snprintf(buff, ETH_GSTRING_LEN, "%s", strs[i].desc);
|
||||
buff = buff + ETH_GSTRING_LEN;
|
||||
}
|
||||
|
||||
@ -1059,6 +1061,7 @@ static void hclge_parse_copper_link_mode(struct hclge_dev *hdev,
|
||||
linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, supported);
|
||||
linkmode_set_bit(ETHTOOL_LINK_MODE_TP_BIT, supported);
|
||||
linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, supported);
|
||||
linkmode_set_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, supported);
|
||||
}
|
||||
|
||||
static void hclge_parse_link_mode(struct hclge_dev *hdev, u8 speed_ability)
|
||||
@ -1694,10 +1697,14 @@ static bool hclge_is_rx_buf_ok(struct hclge_dev *hdev,
|
||||
}
|
||||
|
||||
if (hnae3_dev_dcb_supported(hdev)) {
|
||||
hi_thrd = shared_buf - hdev->dv_buf_size;
|
||||
|
||||
if (tc_num <= NEED_RESERVE_TC_NUM)
|
||||
hi_thrd = hi_thrd * BUF_RESERVE_PERCENT
|
||||
/ BUF_MAX_PERCENT;
|
||||
|
||||
if (tc_num)
|
||||
hi_thrd = (shared_buf - hdev->dv_buf_size) / tc_num;
|
||||
else
|
||||
hi_thrd = shared_buf - hdev->dv_buf_size;
|
||||
hi_thrd = hi_thrd / tc_num;
|
||||
|
||||
hi_thrd = max_t(u32, hi_thrd, HCLGE_BUF_MUL_BY * aligned_mps);
|
||||
hi_thrd = rounddown(hi_thrd, HCLGE_BUF_SIZE_UNIT);
|
||||
@ -1837,6 +1844,55 @@ static bool hclge_drop_pfc_buf_till_fit(struct hclge_dev *hdev,
|
||||
return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
|
||||
}
|
||||
|
||||
static int hclge_only_alloc_priv_buff(struct hclge_dev *hdev,
|
||||
struct hclge_pkt_buf_alloc *buf_alloc)
|
||||
{
|
||||
#define COMPENSATE_BUFFER 0x3C00
|
||||
#define COMPENSATE_HALF_MPS_NUM 5
|
||||
#define PRIV_WL_GAP 0x1800
|
||||
|
||||
u32 rx_priv = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
|
||||
u32 tc_num = hclge_get_tc_num(hdev);
|
||||
u32 half_mps = hdev->mps >> 1;
|
||||
u32 min_rx_priv;
|
||||
unsigned int i;
|
||||
|
||||
if (tc_num)
|
||||
rx_priv = rx_priv / tc_num;
|
||||
|
||||
if (tc_num <= NEED_RESERVE_TC_NUM)
|
||||
rx_priv = rx_priv * BUF_RESERVE_PERCENT / BUF_MAX_PERCENT;
|
||||
|
||||
min_rx_priv = hdev->dv_buf_size + COMPENSATE_BUFFER +
|
||||
COMPENSATE_HALF_MPS_NUM * half_mps;
|
||||
min_rx_priv = round_up(min_rx_priv, HCLGE_BUF_SIZE_UNIT);
|
||||
rx_priv = round_down(rx_priv, HCLGE_BUF_SIZE_UNIT);
|
||||
|
||||
if (rx_priv < min_rx_priv)
|
||||
return false;
|
||||
|
||||
for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
|
||||
struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
|
||||
|
||||
priv->enable = 0;
|
||||
priv->wl.low = 0;
|
||||
priv->wl.high = 0;
|
||||
priv->buf_size = 0;
|
||||
|
||||
if (!(hdev->hw_tc_map & BIT(i)))
|
||||
continue;
|
||||
|
||||
priv->enable = 1;
|
||||
priv->buf_size = rx_priv;
|
||||
priv->wl.high = rx_priv - hdev->dv_buf_size;
|
||||
priv->wl.low = priv->wl.high - PRIV_WL_GAP;
|
||||
}
|
||||
|
||||
buf_alloc->s_buf.buf_size = 0;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
/* hclge_rx_buffer_calc: calculate the rx private buffer size for all TCs
|
||||
* @hdev: pointer to struct hclge_dev
|
||||
* @buf_alloc: pointer to buffer calculation data
|
||||
@ -1856,6 +1912,9 @@ static int hclge_rx_buffer_calc(struct hclge_dev *hdev,
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (hclge_only_alloc_priv_buff(hdev, buf_alloc))
|
||||
return 0;
|
||||
|
||||
if (hclge_rx_buf_calc_all(hdev, true, buf_alloc))
|
||||
return 0;
|
||||
|
||||
@ -2724,8 +2783,9 @@ static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval)
|
||||
|
||||
/* check for vector0 msix event source */
|
||||
if (msix_src_reg & HCLGE_VECTOR0_REG_MSIX_MASK) {
|
||||
dev_dbg(&hdev->pdev->dev, "received event 0x%x\n",
|
||||
msix_src_reg);
|
||||
dev_info(&hdev->pdev->dev, "received event 0x%x\n",
|
||||
msix_src_reg);
|
||||
*clearval = msix_src_reg;
|
||||
return HCLGE_VECTOR0_EVENT_ERR;
|
||||
}
|
||||
|
||||
@ -2737,8 +2797,11 @@ static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval)
|
||||
}
|
||||
|
||||
/* print other vector0 event source */
|
||||
dev_dbg(&hdev->pdev->dev, "cmdq_src_reg:0x%x, msix_src_reg:0x%x\n",
|
||||
cmdq_src_reg, msix_src_reg);
|
||||
dev_info(&hdev->pdev->dev,
|
||||
"CMDQ INT status:0x%x, other INT status:0x%x\n",
|
||||
cmdq_src_reg, msix_src_reg);
|
||||
*clearval = msix_src_reg;
|
||||
|
||||
return HCLGE_VECTOR0_EVENT_OTHER;
|
||||
}
|
||||
|
||||
@ -2817,7 +2880,8 @@ static irqreturn_t hclge_misc_irq_handle(int irq, void *data)
|
||||
}
|
||||
|
||||
/* clear the source of interrupt if it is not cause by reset */
|
||||
if (event_cause == HCLGE_VECTOR0_EVENT_MBX) {
|
||||
if (!clearval ||
|
||||
event_cause == HCLGE_VECTOR0_EVENT_MBX) {
|
||||
hclge_clear_event_cause(hdev, event_cause, clearval);
|
||||
hclge_enable_vector(&hdev->misc_vector, true);
|
||||
}
|
||||
|
@ -224,6 +224,13 @@ int hclge_mac_connect_phy(struct hnae3_handle *handle)
|
||||
linkmode_and(phydev->supported, phydev->supported, mask);
|
||||
linkmode_copy(phydev->advertising, phydev->supported);
|
||||
|
||||
/* supported flag is Pause and Asym Pause, but default advertising
|
||||
* should be rx on, tx on, so need clear Asym Pause in advertising
|
||||
* flag
|
||||
*/
|
||||
linkmode_clear_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT,
|
||||
phydev->advertising);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -58,7 +58,8 @@ static int hclge_shaper_para_calc(u32 ir, u8 shaper_level,
|
||||
u32 tick;
|
||||
|
||||
/* Calc tick */
|
||||
if (shaper_level >= HCLGE_SHAPER_LVL_CNT)
|
||||
if (shaper_level >= HCLGE_SHAPER_LVL_CNT ||
|
||||
ir > HCLGE_ETHER_MAX_RATE)
|
||||
return -EINVAL;
|
||||
|
||||
tick = tick_array[shaper_level];
|
||||
@ -597,8 +598,10 @@ static void hclge_tm_tc_info_init(struct hclge_dev *hdev)
|
||||
hdev->tm_info.prio_tc[i] =
|
||||
(i >= hdev->tm_info.num_tc) ? 0 : i;
|
||||
|
||||
/* DCB is enabled if we have more than 1 TC */
|
||||
if (hdev->tm_info.num_tc > 1)
|
||||
/* DCB is enabled if we have more than 1 TC or pfc_en is
|
||||
* non-zero.
|
||||
*/
|
||||
if (hdev->tm_info.num_tc > 1 || hdev->tm_info.pfc_en)
|
||||
hdev->flag |= HCLGE_FLAG_DCB_ENABLE;
|
||||
else
|
||||
hdev->flag &= ~HCLGE_FLAG_DCB_ENABLE;
|
||||
@ -1136,6 +1139,9 @@ static int hclge_tm_schd_mode_vnet_base_cfg(struct hclge_vport *vport)
|
||||
int ret;
|
||||
u8 i;
|
||||
|
||||
if (vport->vport_id >= HNAE3_MAX_TC)
|
||||
return -EINVAL;
|
||||
|
||||
ret = hclge_tm_pri_schd_mode_cfg(hdev, vport->vport_id);
|
||||
if (ret)
|
||||
return ret;
|
||||
@ -1388,6 +1394,19 @@ void hclge_tm_schd_info_update(struct hclge_dev *hdev, u8 num_tc)
|
||||
hclge_tm_schd_info_init(hdev);
|
||||
}
|
||||
|
||||
void hclge_tm_pfc_info_update(struct hclge_dev *hdev)
|
||||
{
|
||||
/* DCB is enabled if we have more than 1 TC or pfc_en is
|
||||
* non-zero.
|
||||
*/
|
||||
if (hdev->tm_info.num_tc > 1 || hdev->tm_info.pfc_en)
|
||||
hdev->flag |= HCLGE_FLAG_DCB_ENABLE;
|
||||
else
|
||||
hdev->flag &= ~HCLGE_FLAG_DCB_ENABLE;
|
||||
|
||||
hclge_pfc_info_init(hdev);
|
||||
}
|
||||
|
||||
int hclge_tm_init_hw(struct hclge_dev *hdev, bool init)
|
||||
{
|
||||
int ret;
|
||||
|
@ -12,7 +12,7 @@
|
||||
|
||||
#define HCLGE_TM_PORT_BASE_MODE_MSK BIT(0)
|
||||
|
||||
#define HCLGE_DEFAULT_PAUSE_TRANS_GAP 0xFF
|
||||
#define HCLGE_DEFAULT_PAUSE_TRANS_GAP 0x7F
|
||||
#define HCLGE_DEFAULT_PAUSE_TRANS_TIME 0xFFFF
|
||||
|
||||
/* SP or DWRR */
|
||||
@ -147,6 +147,7 @@ int hclge_pause_setup_hw(struct hclge_dev *hdev, bool init);
|
||||
int hclge_tm_schd_setup_hw(struct hclge_dev *hdev);
|
||||
void hclge_tm_prio_tc_info_update(struct hclge_dev *hdev, u8 *prio_tc);
|
||||
void hclge_tm_schd_info_update(struct hclge_dev *hdev, u8 num_tc);
|
||||
void hclge_tm_pfc_info_update(struct hclge_dev *hdev);
|
||||
int hclge_tm_dwrr_cfg(struct hclge_dev *hdev);
|
||||
int hclge_tm_init_hw(struct hclge_dev *hdev, bool init);
|
||||
int hclge_mac_pause_en_cfg(struct hclge_dev *hdev, bool tx, bool rx);
|
||||
|
@ -188,6 +188,7 @@ void hclgevf_cmd_setup_basic_desc(struct hclgevf_desc *desc,
|
||||
int hclgevf_cmd_send(struct hclgevf_hw *hw, struct hclgevf_desc *desc, int num)
|
||||
{
|
||||
struct hclgevf_dev *hdev = (struct hclgevf_dev *)hw->hdev;
|
||||
struct hclgevf_cmq_ring *csq = &hw->cmq.csq;
|
||||
struct hclgevf_desc *desc_to_use;
|
||||
bool complete = false;
|
||||
u32 timeout = 0;
|
||||
@ -199,8 +200,17 @@ int hclgevf_cmd_send(struct hclgevf_hw *hw, struct hclgevf_desc *desc, int num)
|
||||
|
||||
spin_lock_bh(&hw->cmq.csq.lock);
|
||||
|
||||
if (num > hclgevf_ring_space(&hw->cmq.csq) ||
|
||||
test_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state)) {
|
||||
if (test_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state)) {
|
||||
spin_unlock_bh(&hw->cmq.csq.lock);
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
if (num > hclgevf_ring_space(&hw->cmq.csq)) {
|
||||
/* If CMDQ ring is full, SW HEAD and HW HEAD may be different,
|
||||
* need update the SW HEAD pointer csq->next_to_clean
|
||||
*/
|
||||
csq->next_to_clean = hclgevf_read_dev(hw,
|
||||
HCLGEVF_NIC_CSQ_HEAD_REG);
|
||||
spin_unlock_bh(&hw->cmq.csq.lock);
|
||||
return -EBUSY;
|
||||
}
|
||||
@ -263,14 +273,13 @@ int hclgevf_cmd_send(struct hclgevf_hw *hw, struct hclgevf_desc *desc, int num)
|
||||
}
|
||||
|
||||
if (!complete)
|
||||
status = -EAGAIN;
|
||||
status = -EBADE;
|
||||
|
||||
/* Clean the command send queue */
|
||||
handle = hclgevf_cmd_csq_clean(hw);
|
||||
if (handle != num) {
|
||||
if (handle != num)
|
||||
dev_warn(&hdev->pdev->dev,
|
||||
"cleaned %d, need to clean %d\n", handle, num);
|
||||
}
|
||||
|
||||
spin_unlock_bh(&hw->cmq.csq.lock);
|
||||
|
||||
|
@ -11,6 +11,8 @@
|
||||
|
||||
#define HCLGEVF_NAME "hclgevf"
|
||||
|
||||
#define HCLGEVF_RESET_MAX_FAIL_CNT 5
|
||||
|
||||
static int hclgevf_reset_hdev(struct hclgevf_dev *hdev);
|
||||
static struct hnae3_ae_algo ae_algovf;
|
||||
|
||||
@ -1481,6 +1483,24 @@ static int hclgevf_reset_prepare_wait(struct hclgevf_dev *hdev)
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void hclgevf_reset_err_handle(struct hclgevf_dev *hdev)
|
||||
{
|
||||
hdev->rst_stats.rst_fail_cnt++;
|
||||
dev_err(&hdev->pdev->dev, "failed to reset VF(%d)\n",
|
||||
hdev->rst_stats.rst_fail_cnt);
|
||||
|
||||
if (hdev->rst_stats.rst_fail_cnt < HCLGEVF_RESET_MAX_FAIL_CNT)
|
||||
set_bit(hdev->reset_type, &hdev->reset_pending);
|
||||
|
||||
if (hclgevf_is_reset_pending(hdev)) {
|
||||
set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state);
|
||||
hclgevf_reset_task_schedule(hdev);
|
||||
} else {
|
||||
hclgevf_write_dev(&hdev->hw, HCLGEVF_NIC_CSQ_DEPTH_REG,
|
||||
HCLGEVF_NIC_CMQ_ENABLE);
|
||||
}
|
||||
}
|
||||
|
||||
static int hclgevf_reset(struct hclgevf_dev *hdev)
|
||||
{
|
||||
struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
|
||||
@ -1537,19 +1557,13 @@ static int hclgevf_reset(struct hclgevf_dev *hdev)
|
||||
hdev->last_reset_time = jiffies;
|
||||
ae_dev->reset_type = HNAE3_NONE_RESET;
|
||||
hdev->rst_stats.rst_done_cnt++;
|
||||
hdev->rst_stats.rst_fail_cnt = 0;
|
||||
|
||||
return ret;
|
||||
err_reset_lock:
|
||||
rtnl_unlock();
|
||||
err_reset:
|
||||
/* When VF reset failed, only the higher level reset asserted by PF
|
||||
* can restore it, so re-initialize the command queue to receive
|
||||
* this higher reset event.
|
||||
*/
|
||||
hclgevf_cmd_init(hdev);
|
||||
dev_err(&hdev->pdev->dev, "failed to reset VF\n");
|
||||
if (hclgevf_is_reset_pending(hdev))
|
||||
hclgevf_reset_task_schedule(hdev);
|
||||
hclgevf_reset_err_handle(hdev);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@ -226,6 +226,7 @@ struct hclgevf_rst_stats {
|
||||
u32 vf_rst_cnt; /* the number of VF reset */
|
||||
u32 rst_done_cnt; /* the number of reset completed */
|
||||
u32 hw_rst_done_cnt; /* the number of HW reset completed */
|
||||
u32 rst_fail_cnt; /* the number of VF reset fail */
|
||||
};
|
||||
|
||||
struct hclgevf_dev {
|
||||
|
Loading…
Reference in New Issue
Block a user