mirror of
https://github.com/torvalds/linux.git
synced 2024-11-11 22:51:42 +00:00
Merge branch 'bnxt_en-dcbnl'
Michael Chan says: ==================== bnxt_en: Add DCBNL support. This series adds DCBNL operations to support host-based IEEE DCBX. v2: Updated to the latest firmware interface spec. David, please consider this series for net-next. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
commit
f83e83037c
@ -203,4 +203,14 @@ config BNXT_SRIOV
|
||||
Virtualization support in the NetXtreme-C/E products. This
|
||||
allows for virtual function acceleration in virtual environments.
|
||||
|
||||
config BNXT_DCB
|
||||
bool "Data Center Bridging (DCB) Support"
|
||||
default n
|
||||
depends on BNXT && DCB
|
||||
---help---
|
||||
Say Y here if you want to use Data Center Bridging (DCB) in the
|
||||
driver.
|
||||
|
||||
If unsure, say N.
|
||||
|
||||
endif # NET_VENDOR_BROADCOM
|
||||
|
@ -1,3 +1,3 @@
|
||||
obj-$(CONFIG_BNXT) += bnxt_en.o
|
||||
|
||||
bnxt_en-y := bnxt.o bnxt_sriov.o bnxt_ethtool.o
|
||||
bnxt_en-y := bnxt.o bnxt_sriov.o bnxt_ethtool.o bnxt_dcb.o
|
||||
|
@ -54,6 +54,7 @@
|
||||
#include "bnxt.h"
|
||||
#include "bnxt_sriov.h"
|
||||
#include "bnxt_ethtool.h"
|
||||
#include "bnxt_dcb.h"
|
||||
|
||||
#define BNXT_TX_TIMEOUT (5 * HZ)
|
||||
|
||||
@ -186,11 +187,11 @@ static const u16 bnxt_vf_req_snif[] = {
|
||||
};
|
||||
|
||||
static const u16 bnxt_async_events_arr[] = {
|
||||
HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE,
|
||||
HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD,
|
||||
HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED,
|
||||
HWRM_ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE,
|
||||
HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE,
|
||||
ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE,
|
||||
ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD,
|
||||
ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED,
|
||||
ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE,
|
||||
ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE,
|
||||
};
|
||||
|
||||
static bool bnxt_vf_pciid(enum board_idx idx)
|
||||
@ -1476,8 +1477,8 @@ next_rx_no_prod:
|
||||
}
|
||||
|
||||
#define BNXT_GET_EVENT_PORT(data) \
|
||||
((data) & \
|
||||
HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_PORT_ID_MASK)
|
||||
((data) & \
|
||||
ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_PORT_ID_MASK)
|
||||
|
||||
static int bnxt_async_event_process(struct bnxt *bp,
|
||||
struct hwrm_async_event_cmpl *cmpl)
|
||||
@ -1486,7 +1487,7 @@ static int bnxt_async_event_process(struct bnxt *bp,
|
||||
|
||||
/* TODO CHIMP_FW: Define event id's for link change, error etc */
|
||||
switch (event_id) {
|
||||
case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE: {
|
||||
case ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE: {
|
||||
u32 data1 = le32_to_cpu(cmpl->event_data1);
|
||||
struct bnxt_link_info *link_info = &bp->link_info;
|
||||
|
||||
@ -1502,13 +1503,13 @@ static int bnxt_async_event_process(struct bnxt *bp,
|
||||
set_bit(BNXT_LINK_SPEED_CHNG_SP_EVENT, &bp->sp_event);
|
||||
/* fall thru */
|
||||
}
|
||||
case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE:
|
||||
case ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE:
|
||||
set_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event);
|
||||
break;
|
||||
case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD:
|
||||
case ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD:
|
||||
set_bit(BNXT_HWRM_PF_UNLOAD_SP_EVENT, &bp->sp_event);
|
||||
break;
|
||||
case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED: {
|
||||
case ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED: {
|
||||
u32 data1 = le32_to_cpu(cmpl->event_data1);
|
||||
u16 port_id = BNXT_GET_EVENT_PORT(data1);
|
||||
|
||||
@ -1521,7 +1522,7 @@ static int bnxt_async_event_process(struct bnxt *bp,
|
||||
set_bit(BNXT_HWRM_PORT_MODULE_SP_EVENT, &bp->sp_event);
|
||||
break;
|
||||
}
|
||||
case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE:
|
||||
case ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE:
|
||||
if (BNXT_PF(bp))
|
||||
goto async_event_process_exit;
|
||||
set_bit(BNXT_RESET_TASK_SILENT_SP_EVENT, &bp->sp_event);
|
||||
@ -4261,12 +4262,16 @@ static int bnxt_hwrm_queue_qportcfg(struct bnxt *bp)
|
||||
goto qportcfg_exit;
|
||||
}
|
||||
bp->max_tc = resp->max_configurable_queues;
|
||||
bp->max_lltc = resp->max_configurable_lossless_queues;
|
||||
if (bp->max_tc > BNXT_MAX_QUEUE)
|
||||
bp->max_tc = BNXT_MAX_QUEUE;
|
||||
|
||||
if (resp->queue_cfg_info & QUEUE_QPORTCFG_RESP_QUEUE_CFG_INFO_ASYM_CFG)
|
||||
bp->max_tc = 1;
|
||||
|
||||
if (bp->max_lltc > bp->max_tc)
|
||||
bp->max_lltc = bp->max_tc;
|
||||
|
||||
qptr = &resp->queue_id0;
|
||||
for (i = 0; i < bp->max_tc; i++) {
|
||||
bp->q_info[i].queue_id = *qptr++;
|
||||
@ -4993,7 +4998,7 @@ static void bnxt_enable_napi(struct bnxt *bp)
|
||||
}
|
||||
}
|
||||
|
||||
static void bnxt_tx_disable(struct bnxt *bp)
|
||||
void bnxt_tx_disable(struct bnxt *bp)
|
||||
{
|
||||
int i;
|
||||
struct bnxt_tx_ring_info *txr;
|
||||
@ -5011,7 +5016,7 @@ static void bnxt_tx_disable(struct bnxt *bp)
|
||||
netif_carrier_off(bp->dev);
|
||||
}
|
||||
|
||||
static void bnxt_tx_enable(struct bnxt *bp)
|
||||
void bnxt_tx_enable(struct bnxt *bp)
|
||||
{
|
||||
int i;
|
||||
struct bnxt_tx_ring_info *txr;
|
||||
@ -6337,17 +6342,10 @@ static int bnxt_change_mtu(struct net_device *dev, int new_mtu)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int bnxt_setup_tc(struct net_device *dev, u32 handle, __be16 proto,
|
||||
struct tc_to_netdev *ntc)
|
||||
int bnxt_setup_mq_tc(struct net_device *dev, u8 tc)
|
||||
{
|
||||
struct bnxt *bp = netdev_priv(dev);
|
||||
bool sh = false;
|
||||
u8 tc;
|
||||
|
||||
if (ntc->type != TC_SETUP_MQPRIO)
|
||||
return -EINVAL;
|
||||
|
||||
tc = ntc->tc;
|
||||
|
||||
if (tc > bp->max_tc) {
|
||||
netdev_err(dev, "too many traffic classes requested: %d Max supported is %d\n",
|
||||
@ -6390,6 +6388,15 @@ static int bnxt_setup_tc(struct net_device *dev, u32 handle, __be16 proto,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int bnxt_setup_tc(struct net_device *dev, u32 handle, __be16 proto,
|
||||
struct tc_to_netdev *ntc)
|
||||
{
|
||||
if (ntc->type != TC_SETUP_MQPRIO)
|
||||
return -EINVAL;
|
||||
|
||||
return bnxt_setup_mq_tc(dev, ntc->tc);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_RFS_ACCEL
|
||||
static bool bnxt_fltr_match(struct bnxt_ntuple_filter *f1,
|
||||
struct bnxt_ntuple_filter *f2)
|
||||
@ -6680,6 +6687,7 @@ static void bnxt_remove_one(struct pci_dev *pdev)
|
||||
|
||||
bnxt_hwrm_func_drv_unrgtr(bp);
|
||||
bnxt_free_hwrm_resources(bp);
|
||||
bnxt_dcb_free(bp);
|
||||
pci_iounmap(pdev, bp->bar2);
|
||||
pci_iounmap(pdev, bp->bar1);
|
||||
pci_iounmap(pdev, bp->bar0);
|
||||
@ -6907,6 +6915,8 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
|
||||
dev->min_mtu = ETH_ZLEN;
|
||||
dev->max_mtu = 9500;
|
||||
|
||||
bnxt_dcb_init(bp);
|
||||
|
||||
#ifdef CONFIG_BNXT_SRIOV
|
||||
init_waitqueue_head(&bp->sriov_cfg_wait);
|
||||
#endif
|
||||
|
@ -11,10 +11,10 @@
|
||||
#define BNXT_H
|
||||
|
||||
#define DRV_MODULE_NAME "bnxt_en"
|
||||
#define DRV_MODULE_VERSION "1.5.0"
|
||||
#define DRV_MODULE_VERSION "1.6.0"
|
||||
|
||||
#define DRV_VER_MAJ 1
|
||||
#define DRV_VER_MIN 5
|
||||
#define DRV_VER_MIN 6
|
||||
#define DRV_VER_UPD 0
|
||||
|
||||
struct tx_bd {
|
||||
@ -1010,6 +1010,7 @@ struct bnxt {
|
||||
u32 rss_hash_cfg;
|
||||
|
||||
u8 max_tc;
|
||||
u8 max_lltc; /* lossless TCs */
|
||||
struct bnxt_queue_info q_info[BNXT_MAX_QUEUE];
|
||||
|
||||
unsigned int current_interval;
|
||||
@ -1025,6 +1026,13 @@ struct bnxt {
|
||||
struct bnxt_irq *irq_tbl;
|
||||
u8 mac_addr[ETH_ALEN];
|
||||
|
||||
#ifdef CONFIG_BNXT_DCB
|
||||
struct ieee_pfc *ieee_pfc;
|
||||
struct ieee_ets *ieee_ets;
|
||||
u8 dcbx_cap;
|
||||
u8 default_pri;
|
||||
#endif /* CONFIG_BNXT_DCB */
|
||||
|
||||
u32 msg_enable;
|
||||
|
||||
u32 hwrm_spec_code;
|
||||
@ -1116,6 +1124,13 @@ struct bnxt {
|
||||
u32 lpi_tmr_hi;
|
||||
};
|
||||
|
||||
#define BNXT_RX_STATS_OFFSET(counter) \
|
||||
(offsetof(struct rx_port_stats, counter) / 8)
|
||||
|
||||
#define BNXT_TX_STATS_OFFSET(counter) \
|
||||
((offsetof(struct tx_port_stats, counter) + \
|
||||
sizeof(struct rx_port_stats) + 512) / 8)
|
||||
|
||||
#ifdef CONFIG_NET_RX_BUSY_POLL
|
||||
static inline void bnxt_enable_poll(struct bnxt_napi *bnapi)
|
||||
{
|
||||
@ -1220,10 +1235,13 @@ int hwrm_send_message(struct bnxt *, void *, u32, int);
|
||||
int hwrm_send_message_silent(struct bnxt *, void *, u32, int);
|
||||
int bnxt_hwrm_set_coal(struct bnxt *);
|
||||
int bnxt_hwrm_func_qcaps(struct bnxt *);
|
||||
void bnxt_tx_disable(struct bnxt *bp);
|
||||
void bnxt_tx_enable(struct bnxt *bp);
|
||||
int bnxt_hwrm_set_pause(struct bnxt *);
|
||||
int bnxt_hwrm_set_link_setting(struct bnxt *, bool, bool);
|
||||
int bnxt_hwrm_fw_set_time(struct bnxt *);
|
||||
int bnxt_open_nic(struct bnxt *, bool, bool);
|
||||
int bnxt_close_nic(struct bnxt *, bool, bool);
|
||||
int bnxt_setup_mq_tc(struct net_device *dev, u8 tc);
|
||||
int bnxt_get_max_rings(struct bnxt *, int *, int *, bool);
|
||||
#endif
|
||||
|
502
drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c
Normal file
502
drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c
Normal file
@ -0,0 +1,502 @@
|
||||
/* Broadcom NetXtreme-C/E network driver.
|
||||
*
|
||||
* Copyright (c) 2014-2016 Broadcom Corporation
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License as published by
|
||||
* the Free Software Foundation.
|
||||
*/
|
||||
|
||||
#include <linux/netdevice.h>
|
||||
#include <linux/types.h>
|
||||
#include <linux/errno.h>
|
||||
#include <linux/rtnetlink.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/pci.h>
|
||||
#include <linux/etherdevice.h>
|
||||
#include "bnxt_hsi.h"
|
||||
#include "bnxt.h"
|
||||
#include "bnxt_dcb.h"
|
||||
|
||||
#ifdef CONFIG_BNXT_DCB
|
||||
static int bnxt_hwrm_queue_pri2cos_cfg(struct bnxt *bp, struct ieee_ets *ets)
|
||||
{
|
||||
struct hwrm_queue_pri2cos_cfg_input req = {0};
|
||||
int rc = 0, i;
|
||||
u8 *pri2cos;
|
||||
|
||||
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_PRI2COS_CFG, -1, -1);
|
||||
req.flags = cpu_to_le32(QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH_BIDIR |
|
||||
QUEUE_PRI2COS_CFG_REQ_FLAGS_IVLAN);
|
||||
|
||||
pri2cos = &req.pri0_cos_queue_id;
|
||||
for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
|
||||
req.enables |= cpu_to_le32(
|
||||
QUEUE_PRI2COS_CFG_REQ_ENABLES_PRI0_COS_QUEUE_ID << i);
|
||||
|
||||
pri2cos[i] = bp->q_info[ets->prio_tc[i]].queue_id;
|
||||
}
|
||||
rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
|
||||
return rc;
|
||||
}
|
||||
|
||||
static int bnxt_hwrm_queue_pri2cos_qcfg(struct bnxt *bp, struct ieee_ets *ets)
|
||||
{
|
||||
struct hwrm_queue_pri2cos_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
|
||||
struct hwrm_queue_pri2cos_qcfg_input req = {0};
|
||||
int rc = 0;
|
||||
|
||||
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_PRI2COS_QCFG, -1, -1);
|
||||
req.flags = cpu_to_le32(QUEUE_PRI2COS_QCFG_REQ_FLAGS_IVLAN);
|
||||
rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
|
||||
if (!rc) {
|
||||
u8 *pri2cos = &resp->pri0_cos_queue_id;
|
||||
int i, j;
|
||||
|
||||
for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
|
||||
u8 queue_id = pri2cos[i];
|
||||
|
||||
for (j = 0; j < bp->max_tc; j++) {
|
||||
if (bp->q_info[j].queue_id == queue_id) {
|
||||
ets->prio_tc[i] = j;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return rc;
|
||||
}
|
||||
|
||||
static int bnxt_hwrm_queue_cos2bw_cfg(struct bnxt *bp, struct ieee_ets *ets,
|
||||
u8 max_tc)
|
||||
{
|
||||
struct hwrm_queue_cos2bw_cfg_input req = {0};
|
||||
struct bnxt_cos2bw_cfg cos2bw;
|
||||
int rc = 0, i;
|
||||
void *data;
|
||||
|
||||
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_COS2BW_CFG, -1, -1);
|
||||
data = &req.unused_0;
|
||||
for (i = 0; i < max_tc; i++, data += sizeof(cos2bw) - 4) {
|
||||
req.enables |= cpu_to_le32(
|
||||
QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID0_VALID << i);
|
||||
|
||||
memset(&cos2bw, 0, sizeof(cos2bw));
|
||||
cos2bw.queue_id = bp->q_info[i].queue_id;
|
||||
if (ets->tc_tsa[i] == IEEE_8021QAZ_TSA_STRICT) {
|
||||
cos2bw.tsa =
|
||||
QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_TSA_ASSIGN_SP;
|
||||
cos2bw.pri_lvl = i;
|
||||
} else {
|
||||
cos2bw.tsa =
|
||||
QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_TSA_ASSIGN_ETS;
|
||||
cos2bw.bw_weight = ets->tc_tx_bw[i];
|
||||
}
|
||||
memcpy(data, &cos2bw.queue_id, sizeof(cos2bw) - 4);
|
||||
if (i == 0) {
|
||||
req.queue_id0 = cos2bw.queue_id;
|
||||
req.unused_0 = 0;
|
||||
}
|
||||
}
|
||||
rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
|
||||
return rc;
|
||||
}
|
||||
|
||||
static int bnxt_hwrm_queue_cos2bw_qcfg(struct bnxt *bp, struct ieee_ets *ets)
|
||||
{
|
||||
struct hwrm_queue_cos2bw_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
|
||||
struct hwrm_queue_cos2bw_qcfg_input req = {0};
|
||||
struct bnxt_cos2bw_cfg cos2bw;
|
||||
void *data;
|
||||
int rc, i;
|
||||
|
||||
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_COS2BW_QCFG, -1, -1);
|
||||
rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
data = &resp->queue_id0 + offsetof(struct bnxt_cos2bw_cfg, queue_id);
|
||||
for (i = 0; i < bp->max_tc; i++, data += sizeof(cos2bw) - 4) {
|
||||
int j;
|
||||
|
||||
memcpy(&cos2bw.queue_id, data, sizeof(cos2bw) - 4);
|
||||
if (i == 0)
|
||||
cos2bw.queue_id = resp->queue_id0;
|
||||
|
||||
for (j = 0; j < bp->max_tc; j++) {
|
||||
if (bp->q_info[j].queue_id != cos2bw.queue_id)
|
||||
continue;
|
||||
if (cos2bw.tsa ==
|
||||
QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_TSA_ASSIGN_SP) {
|
||||
ets->tc_tsa[j] = IEEE_8021QAZ_TSA_STRICT;
|
||||
} else {
|
||||
ets->tc_tsa[j] = IEEE_8021QAZ_TSA_ETS;
|
||||
ets->tc_tx_bw[j] = cos2bw.bw_weight;
|
||||
}
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int bnxt_hwrm_queue_cfg(struct bnxt *bp, unsigned int lltc_mask)
|
||||
{
|
||||
struct hwrm_queue_cfg_input req = {0};
|
||||
int i;
|
||||
|
||||
if (netif_running(bp->dev))
|
||||
bnxt_tx_disable(bp);
|
||||
|
||||
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_CFG, -1, -1);
|
||||
req.flags = cpu_to_le32(QUEUE_CFG_REQ_FLAGS_PATH_BIDIR);
|
||||
req.enables = cpu_to_le32(QUEUE_CFG_REQ_ENABLES_SERVICE_PROFILE);
|
||||
|
||||
/* Configure lossless queues to lossy first */
|
||||
req.service_profile = QUEUE_CFG_REQ_SERVICE_PROFILE_LOSSY;
|
||||
for (i = 0; i < bp->max_tc; i++) {
|
||||
if (BNXT_LLQ(bp->q_info[i].queue_profile)) {
|
||||
req.queue_id = cpu_to_le32(bp->q_info[i].queue_id);
|
||||
hwrm_send_message(bp, &req, sizeof(req),
|
||||
HWRM_CMD_TIMEOUT);
|
||||
bp->q_info[i].queue_profile =
|
||||
QUEUE_CFG_REQ_SERVICE_PROFILE_LOSSY;
|
||||
}
|
||||
}
|
||||
|
||||
/* Now configure desired queues to lossless */
|
||||
req.service_profile = QUEUE_CFG_REQ_SERVICE_PROFILE_LOSSLESS;
|
||||
for (i = 0; i < bp->max_tc; i++) {
|
||||
if (lltc_mask & (1 << i)) {
|
||||
req.queue_id = cpu_to_le32(bp->q_info[i].queue_id);
|
||||
hwrm_send_message(bp, &req, sizeof(req),
|
||||
HWRM_CMD_TIMEOUT);
|
||||
bp->q_info[i].queue_profile =
|
||||
QUEUE_CFG_REQ_SERVICE_PROFILE_LOSSLESS;
|
||||
}
|
||||
}
|
||||
if (netif_running(bp->dev))
|
||||
bnxt_tx_enable(bp);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int bnxt_hwrm_queue_pfc_cfg(struct bnxt *bp, struct ieee_pfc *pfc)
|
||||
{
|
||||
struct hwrm_queue_pfcenable_cfg_input req = {0};
|
||||
struct ieee_ets *my_ets = bp->ieee_ets;
|
||||
unsigned int tc_mask = 0, pri_mask = 0;
|
||||
u8 i, pri, lltc_count = 0;
|
||||
bool need_q_recfg = false;
|
||||
int rc;
|
||||
|
||||
if (!my_ets)
|
||||
return -EINVAL;
|
||||
|
||||
for (i = 0; i < bp->max_tc; i++) {
|
||||
for (pri = 0; pri < IEEE_8021QAZ_MAX_TCS; pri++) {
|
||||
if ((pfc->pfc_en & (1 << pri)) &&
|
||||
(my_ets->prio_tc[pri] == i)) {
|
||||
pri_mask |= 1 << pri;
|
||||
tc_mask |= 1 << i;
|
||||
}
|
||||
}
|
||||
if (tc_mask & (1 << i))
|
||||
lltc_count++;
|
||||
}
|
||||
if (lltc_count > bp->max_lltc)
|
||||
return -EINVAL;
|
||||
|
||||
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_PFCENABLE_CFG, -1, -1);
|
||||
req.flags = cpu_to_le32(pri_mask);
|
||||
rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
for (i = 0; i < bp->max_tc; i++) {
|
||||
if (tc_mask & (1 << i)) {
|
||||
if (!BNXT_LLQ(bp->q_info[i].queue_profile))
|
||||
need_q_recfg = true;
|
||||
}
|
||||
}
|
||||
|
||||
if (need_q_recfg)
|
||||
rc = bnxt_hwrm_queue_cfg(bp, tc_mask);
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
static int bnxt_hwrm_queue_pfc_qcfg(struct bnxt *bp, struct ieee_pfc *pfc)
|
||||
{
|
||||
struct hwrm_queue_pfcenable_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
|
||||
struct hwrm_queue_pfcenable_qcfg_input req = {0};
|
||||
u8 pri_mask;
|
||||
int rc;
|
||||
|
||||
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_PFCENABLE_QCFG, -1, -1);
|
||||
rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
pri_mask = le32_to_cpu(resp->flags);
|
||||
pfc->pfc_en = pri_mask;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int bnxt_ets_validate(struct bnxt *bp, struct ieee_ets *ets, u8 *tc)
|
||||
{
|
||||
int total_ets_bw = 0;
|
||||
u8 max_tc = 0;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
|
||||
if (ets->prio_tc[i] > bp->max_tc) {
|
||||
netdev_err(bp->dev, "priority to TC mapping exceeds TC count %d\n",
|
||||
ets->prio_tc[i]);
|
||||
return -EINVAL;
|
||||
}
|
||||
if (ets->prio_tc[i] > max_tc)
|
||||
max_tc = ets->prio_tc[i];
|
||||
|
||||
if ((ets->tc_tx_bw[i] || ets->tc_tsa[i]) && i > bp->max_tc)
|
||||
return -EINVAL;
|
||||
|
||||
switch (ets->tc_tsa[i]) {
|
||||
case IEEE_8021QAZ_TSA_STRICT:
|
||||
break;
|
||||
case IEEE_8021QAZ_TSA_ETS:
|
||||
total_ets_bw += ets->tc_tx_bw[i];
|
||||
break;
|
||||
default:
|
||||
return -ENOTSUPP;
|
||||
}
|
||||
}
|
||||
if (total_ets_bw > 100)
|
||||
return -EINVAL;
|
||||
|
||||
*tc = max_tc + 1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int bnxt_dcbnl_ieee_getets(struct net_device *dev, struct ieee_ets *ets)
|
||||
{
|
||||
struct bnxt *bp = netdev_priv(dev);
|
||||
struct ieee_ets *my_ets = bp->ieee_ets;
|
||||
|
||||
ets->ets_cap = bp->max_tc;
|
||||
|
||||
if (!my_ets) {
|
||||
int rc;
|
||||
|
||||
if (bp->dcbx_cap & DCB_CAP_DCBX_HOST)
|
||||
return 0;
|
||||
|
||||
my_ets = kzalloc(sizeof(*my_ets), GFP_KERNEL);
|
||||
if (!my_ets)
|
||||
return 0;
|
||||
rc = bnxt_hwrm_queue_cos2bw_qcfg(bp, my_ets);
|
||||
if (rc)
|
||||
return 0;
|
||||
rc = bnxt_hwrm_queue_pri2cos_qcfg(bp, my_ets);
|
||||
if (rc)
|
||||
return 0;
|
||||
}
|
||||
|
||||
ets->cbs = my_ets->cbs;
|
||||
memcpy(ets->tc_tx_bw, my_ets->tc_tx_bw, sizeof(ets->tc_tx_bw));
|
||||
memcpy(ets->tc_rx_bw, my_ets->tc_rx_bw, sizeof(ets->tc_rx_bw));
|
||||
memcpy(ets->tc_tsa, my_ets->tc_tsa, sizeof(ets->tc_tsa));
|
||||
memcpy(ets->prio_tc, my_ets->prio_tc, sizeof(ets->prio_tc));
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int bnxt_dcbnl_ieee_setets(struct net_device *dev, struct ieee_ets *ets)
|
||||
{
|
||||
struct bnxt *bp = netdev_priv(dev);
|
||||
struct ieee_ets *my_ets = bp->ieee_ets;
|
||||
u8 max_tc = 0;
|
||||
int rc, i;
|
||||
|
||||
if (!(bp->dcbx_cap & DCB_CAP_DCBX_VER_IEEE) ||
|
||||
!(bp->dcbx_cap & DCB_CAP_DCBX_HOST))
|
||||
return -EINVAL;
|
||||
|
||||
rc = bnxt_ets_validate(bp, ets, &max_tc);
|
||||
if (!rc) {
|
||||
if (!my_ets) {
|
||||
my_ets = kzalloc(sizeof(*my_ets), GFP_KERNEL);
|
||||
if (!my_ets)
|
||||
return -ENOMEM;
|
||||
/* initialize PRI2TC mappings to invalid value */
|
||||
for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++)
|
||||
my_ets->prio_tc[i] = IEEE_8021QAZ_MAX_TCS;
|
||||
bp->ieee_ets = my_ets;
|
||||
}
|
||||
rc = bnxt_setup_mq_tc(dev, max_tc);
|
||||
if (rc)
|
||||
return rc;
|
||||
rc = bnxt_hwrm_queue_cos2bw_cfg(bp, ets, max_tc);
|
||||
if (rc)
|
||||
return rc;
|
||||
rc = bnxt_hwrm_queue_pri2cos_cfg(bp, ets);
|
||||
if (rc)
|
||||
return rc;
|
||||
memcpy(my_ets, ets, sizeof(*my_ets));
|
||||
}
|
||||
return rc;
|
||||
}
|
||||
|
||||
static int bnxt_dcbnl_ieee_getpfc(struct net_device *dev, struct ieee_pfc *pfc)
|
||||
{
|
||||
struct bnxt *bp = netdev_priv(dev);
|
||||
__le64 *stats = (__le64 *)bp->hw_rx_port_stats;
|
||||
struct ieee_pfc *my_pfc = bp->ieee_pfc;
|
||||
long rx_off, tx_off;
|
||||
int i, rc;
|
||||
|
||||
pfc->pfc_cap = bp->max_lltc;
|
||||
|
||||
if (!my_pfc) {
|
||||
if (bp->dcbx_cap & DCB_CAP_DCBX_HOST)
|
||||
return 0;
|
||||
|
||||
my_pfc = kzalloc(sizeof(*my_pfc), GFP_KERNEL);
|
||||
if (!my_pfc)
|
||||
return 0;
|
||||
bp->ieee_pfc = my_pfc;
|
||||
rc = bnxt_hwrm_queue_pfc_qcfg(bp, my_pfc);
|
||||
if (rc)
|
||||
return 0;
|
||||
}
|
||||
|
||||
pfc->pfc_en = my_pfc->pfc_en;
|
||||
pfc->mbc = my_pfc->mbc;
|
||||
pfc->delay = my_pfc->delay;
|
||||
|
||||
if (!stats)
|
||||
return 0;
|
||||
|
||||
rx_off = BNXT_RX_STATS_OFFSET(rx_pfc_ena_frames_pri0);
|
||||
tx_off = BNXT_TX_STATS_OFFSET(tx_pfc_ena_frames_pri0);
|
||||
for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++, rx_off++, tx_off++) {
|
||||
pfc->requests[i] = le64_to_cpu(*(stats + tx_off));
|
||||
pfc->indications[i] = le64_to_cpu(*(stats + rx_off));
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int bnxt_dcbnl_ieee_setpfc(struct net_device *dev, struct ieee_pfc *pfc)
|
||||
{
|
||||
struct bnxt *bp = netdev_priv(dev);
|
||||
struct ieee_pfc *my_pfc = bp->ieee_pfc;
|
||||
int rc;
|
||||
|
||||
if (!(bp->dcbx_cap & DCB_CAP_DCBX_VER_IEEE) ||
|
||||
!(bp->dcbx_cap & DCB_CAP_DCBX_HOST))
|
||||
return -EINVAL;
|
||||
|
||||
if (!my_pfc) {
|
||||
my_pfc = kzalloc(sizeof(*my_pfc), GFP_KERNEL);
|
||||
if (!my_pfc)
|
||||
return -ENOMEM;
|
||||
bp->ieee_pfc = my_pfc;
|
||||
}
|
||||
rc = bnxt_hwrm_queue_pfc_cfg(bp, pfc);
|
||||
if (!rc)
|
||||
memcpy(my_pfc, pfc, sizeof(*my_pfc));
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
static int bnxt_dcbnl_ieee_setapp(struct net_device *dev, struct dcb_app *app)
|
||||
{
|
||||
struct bnxt *bp = netdev_priv(dev);
|
||||
int rc = -EINVAL;
|
||||
|
||||
if (!(bp->dcbx_cap & DCB_CAP_DCBX_VER_IEEE) ||
|
||||
!(bp->dcbx_cap & DCB_CAP_DCBX_HOST))
|
||||
return -EINVAL;
|
||||
|
||||
rc = dcb_ieee_setapp(dev, app);
|
||||
return rc;
|
||||
}
|
||||
|
||||
static int bnxt_dcbnl_ieee_delapp(struct net_device *dev, struct dcb_app *app)
|
||||
{
|
||||
struct bnxt *bp = netdev_priv(dev);
|
||||
int rc;
|
||||
|
||||
if (!(bp->dcbx_cap & DCB_CAP_DCBX_VER_IEEE))
|
||||
return -EINVAL;
|
||||
|
||||
rc = dcb_ieee_delapp(dev, app);
|
||||
return rc;
|
||||
}
|
||||
|
||||
static u8 bnxt_dcbnl_getdcbx(struct net_device *dev)
|
||||
{
|
||||
struct bnxt *bp = netdev_priv(dev);
|
||||
|
||||
return bp->dcbx_cap;
|
||||
}
|
||||
|
||||
static u8 bnxt_dcbnl_setdcbx(struct net_device *dev, u8 mode)
|
||||
{
|
||||
struct bnxt *bp = netdev_priv(dev);
|
||||
|
||||
/* only support IEEE */
|
||||
if ((mode & DCB_CAP_DCBX_VER_CEE) || !(mode & DCB_CAP_DCBX_VER_IEEE))
|
||||
return 1;
|
||||
|
||||
if ((mode & DCB_CAP_DCBX_HOST) && BNXT_VF(bp))
|
||||
return 1;
|
||||
|
||||
if (mode == bp->dcbx_cap)
|
||||
return 0;
|
||||
|
||||
bp->dcbx_cap = mode;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct dcbnl_rtnl_ops dcbnl_ops = {
|
||||
.ieee_getets = bnxt_dcbnl_ieee_getets,
|
||||
.ieee_setets = bnxt_dcbnl_ieee_setets,
|
||||
.ieee_getpfc = bnxt_dcbnl_ieee_getpfc,
|
||||
.ieee_setpfc = bnxt_dcbnl_ieee_setpfc,
|
||||
.ieee_setapp = bnxt_dcbnl_ieee_setapp,
|
||||
.ieee_delapp = bnxt_dcbnl_ieee_delapp,
|
||||
.getdcbx = bnxt_dcbnl_getdcbx,
|
||||
.setdcbx = bnxt_dcbnl_setdcbx,
|
||||
};
|
||||
|
||||
void bnxt_dcb_init(struct bnxt *bp)
|
||||
{
|
||||
if (bp->hwrm_spec_code < 0x10501)
|
||||
return;
|
||||
|
||||
bp->dcbx_cap = DCB_CAP_DCBX_VER_IEEE;
|
||||
if (BNXT_PF(bp))
|
||||
bp->dcbx_cap |= DCB_CAP_DCBX_HOST;
|
||||
else
|
||||
bp->dcbx_cap |= DCB_CAP_DCBX_LLD_MANAGED;
|
||||
bp->dev->dcbnl_ops = &dcbnl_ops;
|
||||
}
|
||||
|
||||
void bnxt_dcb_free(struct bnxt *bp)
|
||||
{
|
||||
kfree(bp->ieee_pfc);
|
||||
kfree(bp->ieee_ets);
|
||||
bp->ieee_pfc = NULL;
|
||||
bp->ieee_ets = NULL;
|
||||
}
|
||||
|
||||
#else
|
||||
|
||||
void bnxt_dcb_init(struct bnxt *bp)
|
||||
{
|
||||
}
|
||||
|
||||
void bnxt_dcb_free(struct bnxt *bp)
|
||||
{
|
||||
}
|
||||
|
||||
#endif
|
41
drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.h
Normal file
41
drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.h
Normal file
@ -0,0 +1,41 @@
|
||||
/* Broadcom NetXtreme-C/E network driver.
|
||||
*
|
||||
* Copyright (c) 2014-2016 Broadcom Corporation
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License as published by
|
||||
* the Free Software Foundation.
|
||||
*/
|
||||
|
||||
#ifndef BNXT_DCB_H
|
||||
#define BNXT_DCB_H
|
||||
|
||||
#include <net/dcbnl.h>
|
||||
|
||||
struct bnxt_dcb {
|
||||
u8 max_tc;
|
||||
struct ieee_pfc *ieee_pfc;
|
||||
struct ieee_ets *ieee_ets;
|
||||
u8 dcbx_cap;
|
||||
u8 default_pri;
|
||||
};
|
||||
|
||||
struct bnxt_cos2bw_cfg {
|
||||
u8 pad[3];
|
||||
u8 queue_id;
|
||||
__le32 min_bw;
|
||||
__le32 max_bw;
|
||||
u8 tsa;
|
||||
u8 pri_lvl;
|
||||
u8 bw_weight;
|
||||
u8 unused;
|
||||
};
|
||||
|
||||
#define BNXT_LLQ(q_profile) \
|
||||
((q_profile) == QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_LOSSLESS)
|
||||
|
||||
#define HWRM_STRUCT_DATA_SUBTYPE_HOST_OPERATIONAL 0x0300
|
||||
|
||||
void bnxt_dcb_init(struct bnxt *bp);
|
||||
void bnxt_dcb_free(struct bnxt *bp);
|
||||
#endif
|
@ -107,16 +107,9 @@ static int bnxt_set_coalesce(struct net_device *dev,
|
||||
|
||||
#define BNXT_NUM_STATS 21
|
||||
|
||||
#define BNXT_RX_STATS_OFFSET(counter) \
|
||||
(offsetof(struct rx_port_stats, counter) / 8)
|
||||
|
||||
#define BNXT_RX_STATS_ENTRY(counter) \
|
||||
{ BNXT_RX_STATS_OFFSET(counter), __stringify(counter) }
|
||||
|
||||
#define BNXT_TX_STATS_OFFSET(counter) \
|
||||
((offsetof(struct tx_port_stats, counter) + \
|
||||
sizeof(struct rx_port_stats) + 512) / 8)
|
||||
|
||||
#define BNXT_TX_STATS_ENTRY(counter) \
|
||||
{ BNXT_TX_STATS_OFFSET(counter), __stringify(counter) }
|
||||
|
||||
@ -150,6 +143,14 @@ static const struct {
|
||||
BNXT_RX_STATS_ENTRY(rx_tagged_frames),
|
||||
BNXT_RX_STATS_ENTRY(rx_double_tagged_frames),
|
||||
BNXT_RX_STATS_ENTRY(rx_good_frames),
|
||||
BNXT_RX_STATS_ENTRY(rx_pfc_ena_frames_pri0),
|
||||
BNXT_RX_STATS_ENTRY(rx_pfc_ena_frames_pri1),
|
||||
BNXT_RX_STATS_ENTRY(rx_pfc_ena_frames_pri2),
|
||||
BNXT_RX_STATS_ENTRY(rx_pfc_ena_frames_pri3),
|
||||
BNXT_RX_STATS_ENTRY(rx_pfc_ena_frames_pri4),
|
||||
BNXT_RX_STATS_ENTRY(rx_pfc_ena_frames_pri5),
|
||||
BNXT_RX_STATS_ENTRY(rx_pfc_ena_frames_pri6),
|
||||
BNXT_RX_STATS_ENTRY(rx_pfc_ena_frames_pri7),
|
||||
BNXT_RX_STATS_ENTRY(rx_undrsz_frames),
|
||||
BNXT_RX_STATS_ENTRY(rx_eee_lpi_events),
|
||||
BNXT_RX_STATS_ENTRY(rx_eee_lpi_duration),
|
||||
@ -179,6 +180,14 @@ static const struct {
|
||||
BNXT_TX_STATS_ENTRY(tx_fcs_err_frames),
|
||||
BNXT_TX_STATS_ENTRY(tx_err),
|
||||
BNXT_TX_STATS_ENTRY(tx_fifo_underruns),
|
||||
BNXT_TX_STATS_ENTRY(tx_pfc_ena_frames_pri0),
|
||||
BNXT_TX_STATS_ENTRY(tx_pfc_ena_frames_pri1),
|
||||
BNXT_TX_STATS_ENTRY(tx_pfc_ena_frames_pri2),
|
||||
BNXT_TX_STATS_ENTRY(tx_pfc_ena_frames_pri3),
|
||||
BNXT_TX_STATS_ENTRY(tx_pfc_ena_frames_pri4),
|
||||
BNXT_TX_STATS_ENTRY(tx_pfc_ena_frames_pri5),
|
||||
BNXT_TX_STATS_ENTRY(tx_pfc_ena_frames_pri6),
|
||||
BNXT_TX_STATS_ENTRY(tx_pfc_ena_frames_pri7),
|
||||
BNXT_TX_STATS_ENTRY(tx_eee_lpi_events),
|
||||
BNXT_TX_STATS_ENTRY(tx_eee_lpi_duration),
|
||||
BNXT_TX_STATS_ENTRY(tx_total_collisions),
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -34,8 +34,7 @@ static int bnxt_hwrm_fwd_async_event_cmpl(struct bnxt *bp,
|
||||
/* broadcast this async event to all VFs */
|
||||
req.encap_async_event_target_id = cpu_to_le16(0xffff);
|
||||
async_cmpl = (struct hwrm_async_event_cmpl *)req.encap_async_event_cmpl;
|
||||
async_cmpl->type =
|
||||
cpu_to_le16(HWRM_ASYNC_EVENT_CMPL_TYPE_HWRM_ASYNC_EVENT);
|
||||
async_cmpl->type = cpu_to_le16(ASYNC_EVENT_CMPL_TYPE_HWRM_ASYNC_EVENT);
|
||||
async_cmpl->event_id = cpu_to_le16(event_id);
|
||||
|
||||
mutex_lock(&bp->hwrm_cmd_lock);
|
||||
@ -288,7 +287,7 @@ int bnxt_set_vf_link_state(struct net_device *dev, int vf_id, int link)
|
||||
}
|
||||
if (vf->flags & (BNXT_VF_LINK_UP | BNXT_VF_LINK_FORCED))
|
||||
rc = bnxt_hwrm_fwd_async_event_cmpl(bp, vf,
|
||||
HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE);
|
||||
ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE);
|
||||
return rc;
|
||||
}
|
||||
|
||||
@ -578,8 +577,7 @@ void bnxt_sriov_disable(struct bnxt *bp)
|
||||
|
||||
if (pci_vfs_assigned(bp->pdev)) {
|
||||
bnxt_hwrm_fwd_async_event_cmpl(
|
||||
bp, NULL,
|
||||
HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD);
|
||||
bp, NULL, ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD);
|
||||
netdev_warn(bp->dev, "Unable to free %d VFs because some are assigned to VMs.\n",
|
||||
num_vfs);
|
||||
} else {
|
||||
|
Loading…
Reference in New Issue
Block a user