bnxt_en: Remove PCIe non-counters from ethtool statistics

Remove PCIe non-counters display from ethtool statistics, as
they are not simple counters but register dump.  The next few
patches will add logic to detect counter roll-over and it won't
work with these PCIe non-counters.

There will be a follow up patch to get PCIe information via
ethtool register dump.

Signed-off-by: Vasundhara Volam <vasundhara-v.volam@broadcom.com>
Signed-off-by: Michael Chan <michael.chan@broadcom.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
Vasundhara Volam 2020-07-27 05:40:36 -04:00 committed by David S. Miller
parent 350e7ab92d
commit dfe64de974
3 changed files with 1 additions and 79 deletions

View File

@ -3738,12 +3738,6 @@ static void bnxt_free_port_stats(struct bnxt *bp)
bp->hw_rx_port_stats_ext_map);
bp->hw_rx_port_stats_ext = NULL;
}
if (bp->hw_pcie_stats) {
dma_free_coherent(&pdev->dev, sizeof(struct pcie_ctx_hw_stats),
bp->hw_pcie_stats, bp->hw_pcie_stats_map);
bp->hw_pcie_stats = NULL;
}
}
static void bnxt_free_ring_stats(struct bnxt *bp)
@ -3826,7 +3820,7 @@ alloc_ext_stats:
alloc_tx_ext_stats:
if (bp->hw_tx_port_stats_ext)
goto alloc_pcie_stats;
return 0;
if (bp->hwrm_spec_code >= 0x10902 ||
(bp->fw_cap & BNXT_FW_CAP_EXT_STATS_SUPPORTED)) {
@ -3837,19 +3831,6 @@ alloc_tx_ext_stats:
GFP_KERNEL);
}
bp->flags |= BNXT_FLAG_PORT_STATS_EXT;
alloc_pcie_stats:
if (bp->hw_pcie_stats ||
!(bp->fw_cap & BNXT_FW_CAP_PCIE_STATS_SUPPORTED))
return 0;
bp->hw_pcie_stats =
dma_alloc_coherent(&pdev->dev, sizeof(struct pcie_ctx_hw_stats),
&bp->hw_pcie_stats_map, GFP_KERNEL);
if (!bp->hw_pcie_stats)
return 0;
bp->flags |= BNXT_FLAG_PCIE_STATS;
return 0;
}
@ -7574,19 +7555,6 @@ qstats_done:
return rc;
}
static int bnxt_hwrm_pcie_qstats(struct bnxt *bp)
{
struct hwrm_pcie_qstats_input req = {0};
if (!(bp->flags & BNXT_FLAG_PCIE_STATS))
return 0;
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PCIE_QSTATS, -1, -1);
req.pcie_stat_size = cpu_to_le16(sizeof(struct pcie_ctx_hw_stats));
req.pcie_stat_host_addr = cpu_to_le64(bp->hw_pcie_stats_map);
return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
}
static void bnxt_hwrm_free_tunnel_ports(struct bnxt *bp)
{
if (bp->vxlan_fw_dst_port_id != INVALID_HW_RING_ID)
@ -10466,7 +10434,6 @@ static void bnxt_sp_task(struct work_struct *work)
if (test_and_clear_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event)) {
bnxt_hwrm_port_qstats(bp);
bnxt_hwrm_port_qstats_ext(bp);
bnxt_hwrm_pcie_qstats(bp);
}
if (test_and_clear_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event)) {

View File

@ -1566,7 +1566,6 @@ struct bnxt {
#define BNXT_FLAG_DIM 0x2000000
#define BNXT_FLAG_ROCE_MIRROR_CAP 0x4000000
#define BNXT_FLAG_PORT_STATS_EXT 0x10000000
#define BNXT_FLAG_PCIE_STATS 0x40000000
#define BNXT_FLAG_ALL_CONFIG_FEATS (BNXT_FLAG_TPA | \
BNXT_FLAG_RFS | \
@ -1737,12 +1736,10 @@ struct bnxt {
struct tx_port_stats *hw_tx_port_stats;
struct rx_port_stats_ext *hw_rx_port_stats_ext;
struct tx_port_stats_ext *hw_tx_port_stats_ext;
struct pcie_ctx_hw_stats *hw_pcie_stats;
dma_addr_t hw_rx_port_stats_map;
dma_addr_t hw_tx_port_stats_map;
dma_addr_t hw_rx_port_stats_ext_map;
dma_addr_t hw_tx_port_stats_ext_map;
dma_addr_t hw_pcie_stats_map;
int hw_port_stats_size;
u16 fw_rx_stats_ext_size;
u16 fw_tx_stats_ext_size;
@ -1898,9 +1895,6 @@ struct bnxt {
#define BNXT_TX_STATS_EXT_OFFSET(counter) \
(offsetof(struct tx_port_stats_ext, counter) / 8)
#define BNXT_PCIE_STATS_OFFSET(counter) \
(offsetof(struct pcie_ctx_hw_stats, counter) / 8)
#define BNXT_HW_FEATURE_VLAN_ALL_RX \
(NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX)
#define BNXT_HW_FEATURE_VLAN_ALL_TX \

View File

@ -293,9 +293,6 @@ static const char * const bnxt_cmn_sw_stats_str[] = {
BNXT_TX_STATS_PRI_ENTRY(counter, 6), \
BNXT_TX_STATS_PRI_ENTRY(counter, 7)
#define BNXT_PCIE_STATS_ENTRY(counter) \
{ BNXT_PCIE_STATS_OFFSET(counter), __stringify(counter) }
enum {
RX_TOTAL_DISCARDS,
TX_TOTAL_DISCARDS,
@ -454,24 +451,6 @@ static const struct {
BNXT_TX_STATS_PRI_ENTRIES(tx_packets),
};
static const struct {
long offset;
char string[ETH_GSTRING_LEN];
} bnxt_pcie_stats_arr[] = {
BNXT_PCIE_STATS_ENTRY(pcie_pl_signal_integrity),
BNXT_PCIE_STATS_ENTRY(pcie_dl_signal_integrity),
BNXT_PCIE_STATS_ENTRY(pcie_tl_signal_integrity),
BNXT_PCIE_STATS_ENTRY(pcie_link_integrity),
BNXT_PCIE_STATS_ENTRY(pcie_tx_traffic_rate),
BNXT_PCIE_STATS_ENTRY(pcie_rx_traffic_rate),
BNXT_PCIE_STATS_ENTRY(pcie_tx_dllp_statistics),
BNXT_PCIE_STATS_ENTRY(pcie_rx_dllp_statistics),
BNXT_PCIE_STATS_ENTRY(pcie_equalization_time),
BNXT_PCIE_STATS_ENTRY(pcie_ltssm_histogram[0]),
BNXT_PCIE_STATS_ENTRY(pcie_ltssm_histogram[2]),
BNXT_PCIE_STATS_ENTRY(pcie_recovery_histogram),
};
#define BNXT_NUM_SW_FUNC_STATS ARRAY_SIZE(bnxt_sw_func_stats)
#define BNXT_NUM_PORT_STATS ARRAY_SIZE(bnxt_port_stats_arr)
#define BNXT_NUM_STATS_PRI \
@ -479,7 +458,6 @@ static const struct {
ARRAY_SIZE(bnxt_rx_pkts_pri_arr) + \
ARRAY_SIZE(bnxt_tx_bytes_pri_arr) + \
ARRAY_SIZE(bnxt_tx_pkts_pri_arr))
#define BNXT_NUM_PCIE_STATS ARRAY_SIZE(bnxt_pcie_stats_arr)
static int bnxt_get_num_tpa_ring_stats(struct bnxt *bp)
{
@ -526,9 +504,6 @@ static int bnxt_get_num_stats(struct bnxt *bp)
num_stats += BNXT_NUM_STATS_PRI;
}
if (bp->flags & BNXT_FLAG_PCIE_STATS)
num_stats += BNXT_NUM_PCIE_STATS;
return num_stats;
}
@ -674,14 +649,6 @@ skip_ring_stats:
}
}
}
if (bp->flags & BNXT_FLAG_PCIE_STATS) {
__le64 *pcie_stats = (__le64 *)bp->hw_pcie_stats;
for (i = 0; i < BNXT_NUM_PCIE_STATS; i++, j++) {
buf[j] = le64_to_cpu(*(pcie_stats +
bnxt_pcie_stats_arr[i].offset));
}
}
}
static void bnxt_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
@ -782,12 +749,6 @@ skip_tpa_stats:
}
}
}
if (bp->flags & BNXT_FLAG_PCIE_STATS) {
for (i = 0; i < BNXT_NUM_PCIE_STATS; i++) {
strcpy(buf, bnxt_pcie_stats_arr[i].string);
buf += ETH_GSTRING_LEN;
}
}
break;
case ETH_SS_TEST:
if (bp->num_tests)