mirror of
https://github.com/torvalds/linux.git
synced 2024-11-19 10:31:48 +00:00
qed: Utilize FW 8.10.3.0
The New QED firmware contains several fixes, including: - Wrong classification of packets in 4-port devices. - Anti-spoof interoperability with encapsulated packets. - Tx-switching of encapsulated packets. It also slightly improves Tx performance of the device. In addition, this firmware contains the necessary logic for supporting iscsi & rdma, for which we plan on pushing protocol drivers in the imminent future. Signed-off-by: Yuval Mintz <Yuval.Mintz@qlogic.com> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
parent
b87ab6b8e5
commit
351a4dedb3
@ -244,6 +244,7 @@ static int qed_init_qm_info(struct qed_hwfn *p_hwfn, bool b_sleepable)
|
|||||||
qm_info->qm_pq_params[curr_queue].tc_id =
|
qm_info->qm_pq_params[curr_queue].tc_id =
|
||||||
p_hwfn->hw_info.non_offload_tc;
|
p_hwfn->hw_info.non_offload_tc;
|
||||||
qm_info->qm_pq_params[curr_queue].wrr_group = 1;
|
qm_info->qm_pq_params[curr_queue].wrr_group = 1;
|
||||||
|
qm_info->qm_pq_params[curr_queue].rl_valid = 1;
|
||||||
curr_queue++;
|
curr_queue++;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -256,7 +257,10 @@ static int qed_init_qm_info(struct qed_hwfn *p_hwfn, bool b_sleepable)
|
|||||||
for (i = 0; i < num_ports; i++) {
|
for (i = 0; i < num_ports; i++) {
|
||||||
p_qm_port = &qm_info->qm_port_params[i];
|
p_qm_port = &qm_info->qm_port_params[i];
|
||||||
p_qm_port->active = 1;
|
p_qm_port->active = 1;
|
||||||
p_qm_port->num_active_phys_tcs = 4;
|
if (num_ports == 4)
|
||||||
|
p_qm_port->active_phys_tcs = 0x7;
|
||||||
|
else
|
||||||
|
p_qm_port->active_phys_tcs = 0x9f;
|
||||||
p_qm_port->num_pbf_cmd_lines = PBF_MAX_CMD_LINES / num_ports;
|
p_qm_port->num_pbf_cmd_lines = PBF_MAX_CMD_LINES / num_ports;
|
||||||
p_qm_port->num_btb_blocks = BTB_MAX_BLOCKS / num_ports;
|
p_qm_port->num_btb_blocks = BTB_MAX_BLOCKS / num_ports;
|
||||||
}
|
}
|
||||||
@ -703,8 +707,31 @@ static int qed_hw_init_port(struct qed_hwfn *p_hwfn,
|
|||||||
{
|
{
|
||||||
int rc = 0;
|
int rc = 0;
|
||||||
|
|
||||||
rc = qed_init_run(p_hwfn, p_ptt, PHASE_PORT, p_hwfn->port_id,
|
rc = qed_init_run(p_hwfn, p_ptt, PHASE_PORT, p_hwfn->port_id, hw_mode);
|
||||||
hw_mode);
|
if (rc != 0)
|
||||||
|
return rc;
|
||||||
|
|
||||||
|
if (hw_mode & (1 << MODE_MF_SI)) {
|
||||||
|
u8 pf_id = 0;
|
||||||
|
|
||||||
|
if (!qed_hw_init_first_eth(p_hwfn, p_ptt, &pf_id)) {
|
||||||
|
DP_VERBOSE(p_hwfn, NETIF_MSG_IFUP,
|
||||||
|
"PF[%08x] is first eth on engine\n", pf_id);
|
||||||
|
|
||||||
|
/* We should have configured BIT for ppfid, i.e., the
|
||||||
|
* relative function number in the port. But there's a
|
||||||
|
* bug in LLH in BB where the ppfid is actually engine
|
||||||
|
* based, so we need to take this into account.
|
||||||
|
*/
|
||||||
|
qed_wr(p_hwfn, p_ptt,
|
||||||
|
NIG_REG_LLH_TAGMAC_DEF_PF_VECTOR, 1 << pf_id);
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Take the protocol-based hit vector if there is a hit,
|
||||||
|
* otherwise take the other vector.
|
||||||
|
*/
|
||||||
|
qed_wr(p_hwfn, p_ptt, NIG_REG_LLH_CLS_TYPE_DUALMODE, 0x2);
|
||||||
|
}
|
||||||
return rc;
|
return rc;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -773,6 +800,21 @@ static int qed_hw_init_pf(struct qed_hwfn *p_hwfn,
|
|||||||
/* Pure runtime initializations - directly to the HW */
|
/* Pure runtime initializations - directly to the HW */
|
||||||
qed_int_igu_init_pure_rt(p_hwfn, p_ptt, true, true);
|
qed_int_igu_init_pure_rt(p_hwfn, p_ptt, true, true);
|
||||||
|
|
||||||
|
if (hw_mode & (1 << MODE_MF_SI)) {
|
||||||
|
u8 pf_id = 0;
|
||||||
|
u32 val;
|
||||||
|
|
||||||
|
if (!qed_hw_init_first_eth(p_hwfn, p_ptt, &pf_id)) {
|
||||||
|
if (p_hwfn->rel_pf_id == pf_id) {
|
||||||
|
DP_VERBOSE(p_hwfn, NETIF_MSG_IFUP,
|
||||||
|
"PF[%d] is first ETH on engine\n",
|
||||||
|
pf_id);
|
||||||
|
val = 1;
|
||||||
|
}
|
||||||
|
qed_wr(p_hwfn, p_ptt, PRS_REG_MSG_INFO, val);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
if (b_hw_start) {
|
if (b_hw_start) {
|
||||||
/* enable interrupts */
|
/* enable interrupts */
|
||||||
qed_int_igu_enable(p_hwfn, p_ptt, int_mode);
|
qed_int_igu_enable(p_hwfn, p_ptt, int_mode);
|
||||||
@ -1304,31 +1346,31 @@ static int qed_hw_get_nvm_info(struct qed_hwfn *p_hwfn,
|
|||||||
|
|
||||||
switch ((core_cfg & NVM_CFG1_GLOB_NETWORK_PORT_MODE_MASK) >>
|
switch ((core_cfg & NVM_CFG1_GLOB_NETWORK_PORT_MODE_MASK) >>
|
||||||
NVM_CFG1_GLOB_NETWORK_PORT_MODE_OFFSET) {
|
NVM_CFG1_GLOB_NETWORK_PORT_MODE_OFFSET) {
|
||||||
case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_2X40G:
|
case NVM_CFG1_GLOB_NETWORK_PORT_MODE_BB_2X40G:
|
||||||
p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_2X40G;
|
p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_2X40G;
|
||||||
break;
|
break;
|
||||||
case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_2X50G:
|
case NVM_CFG1_GLOB_NETWORK_PORT_MODE_2X50G:
|
||||||
p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_2X50G;
|
p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_2X50G;
|
||||||
break;
|
break;
|
||||||
case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_1X100G:
|
case NVM_CFG1_GLOB_NETWORK_PORT_MODE_BB_1X100G:
|
||||||
p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_1X100G;
|
p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_1X100G;
|
||||||
break;
|
break;
|
||||||
case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_4X10G_F:
|
case NVM_CFG1_GLOB_NETWORK_PORT_MODE_4X10G_F:
|
||||||
p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_4X10G_F;
|
p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_4X10G_F;
|
||||||
break;
|
break;
|
||||||
case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_4X10G_E:
|
case NVM_CFG1_GLOB_NETWORK_PORT_MODE_BB_4X10G_E:
|
||||||
p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_4X10G_E;
|
p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_4X10G_E;
|
||||||
break;
|
break;
|
||||||
case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_4X20G:
|
case NVM_CFG1_GLOB_NETWORK_PORT_MODE_BB_4X20G:
|
||||||
p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_4X20G;
|
p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_4X20G;
|
||||||
break;
|
break;
|
||||||
case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_1X40G:
|
case NVM_CFG1_GLOB_NETWORK_PORT_MODE_1X40G:
|
||||||
p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_1X40G;
|
p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_1X40G;
|
||||||
break;
|
break;
|
||||||
case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_2X25G:
|
case NVM_CFG1_GLOB_NETWORK_PORT_MODE_2X25G:
|
||||||
p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_2X25G;
|
p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_2X25G;
|
||||||
break;
|
break;
|
||||||
case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_1X25G:
|
case NVM_CFG1_GLOB_NETWORK_PORT_MODE_1X25G:
|
||||||
p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_1X25G;
|
p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_1X25G;
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
@ -1373,7 +1415,7 @@ static int qed_hw_get_nvm_info(struct qed_hwfn *p_hwfn,
|
|||||||
case NVM_CFG1_PORT_DRV_LINK_SPEED_50G:
|
case NVM_CFG1_PORT_DRV_LINK_SPEED_50G:
|
||||||
link->speed.forced_speed = 50000;
|
link->speed.forced_speed = 50000;
|
||||||
break;
|
break;
|
||||||
case NVM_CFG1_PORT_DRV_LINK_SPEED_100G:
|
case NVM_CFG1_PORT_DRV_LINK_SPEED_BB_100G:
|
||||||
link->speed.forced_speed = 100000;
|
link->speed.forced_speed = 100000;
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
|
File diff suppressed because it is too large
Load Diff
@ -446,7 +446,7 @@ qed_dmae_post_command(struct qed_hwfn *p_hwfn,
|
|||||||
idx_cmd,
|
idx_cmd,
|
||||||
le32_to_cpu(command->opcode),
|
le32_to_cpu(command->opcode),
|
||||||
le16_to_cpu(command->opcode_b),
|
le16_to_cpu(command->opcode_b),
|
||||||
le16_to_cpu(command->length),
|
le16_to_cpu(command->length_dw),
|
||||||
le32_to_cpu(command->src_addr_hi),
|
le32_to_cpu(command->src_addr_hi),
|
||||||
le32_to_cpu(command->src_addr_lo),
|
le32_to_cpu(command->src_addr_lo),
|
||||||
le32_to_cpu(command->dst_addr_hi),
|
le32_to_cpu(command->dst_addr_hi),
|
||||||
@ -461,7 +461,7 @@ qed_dmae_post_command(struct qed_hwfn *p_hwfn,
|
|||||||
idx_cmd,
|
idx_cmd,
|
||||||
le32_to_cpu(command->opcode),
|
le32_to_cpu(command->opcode),
|
||||||
le16_to_cpu(command->opcode_b),
|
le16_to_cpu(command->opcode_b),
|
||||||
le16_to_cpu(command->length),
|
le16_to_cpu(command->length_dw),
|
||||||
le32_to_cpu(command->src_addr_hi),
|
le32_to_cpu(command->src_addr_hi),
|
||||||
le32_to_cpu(command->src_addr_lo),
|
le32_to_cpu(command->src_addr_lo),
|
||||||
le32_to_cpu(command->dst_addr_hi),
|
le32_to_cpu(command->dst_addr_hi),
|
||||||
@ -645,7 +645,7 @@ static int qed_dmae_execute_sub_operation(struct qed_hwfn *p_hwfn,
|
|||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
cmd->length = cpu_to_le16((u16)length);
|
cmd->length_dw = cpu_to_le16((u16)length);
|
||||||
|
|
||||||
qed_dmae_post_command(p_hwfn, p_ptt);
|
qed_dmae_post_command(p_hwfn, p_ptt);
|
||||||
|
|
||||||
|
@ -31,7 +31,6 @@ enum cminterface {
|
|||||||
};
|
};
|
||||||
|
|
||||||
/* general constants */
|
/* general constants */
|
||||||
#define QM_PQ_ELEMENT_SIZE 4 /* in bytes */
|
|
||||||
#define QM_PQ_MEM_4KB(pq_size) (pq_size ? DIV_ROUND_UP((pq_size + 1) * \
|
#define QM_PQ_MEM_4KB(pq_size) (pq_size ? DIV_ROUND_UP((pq_size + 1) * \
|
||||||
QM_PQ_ELEMENT_SIZE, \
|
QM_PQ_ELEMENT_SIZE, \
|
||||||
0x1000) : 0)
|
0x1000) : 0)
|
||||||
@ -44,28 +43,28 @@ enum cminterface {
|
|||||||
/* other PQ constants */
|
/* other PQ constants */
|
||||||
#define QM_OTHER_PQS_PER_PF 4
|
#define QM_OTHER_PQS_PER_PF 4
|
||||||
/* WFQ constants */
|
/* WFQ constants */
|
||||||
#define QM_WFQ_UPPER_BOUND 6250000
|
#define QM_WFQ_UPPER_BOUND 62500000
|
||||||
#define QM_WFQ_VP_PQ_VOQ_SHIFT 0
|
#define QM_WFQ_VP_PQ_VOQ_SHIFT 0
|
||||||
#define QM_WFQ_VP_PQ_PF_SHIFT 5
|
#define QM_WFQ_VP_PQ_PF_SHIFT 5
|
||||||
#define QM_WFQ_INC_VAL(weight) ((weight) * 0x9000)
|
#define QM_WFQ_INC_VAL(weight) ((weight) * 0x9000)
|
||||||
#define QM_WFQ_MAX_INC_VAL 4375000
|
#define QM_WFQ_MAX_INC_VAL 43750000
|
||||||
#define QM_WFQ_INIT_CRD(inc_val) (2 * (inc_val))
|
|
||||||
/* RL constants */
|
/* RL constants */
|
||||||
#define QM_RL_UPPER_BOUND 6250000
|
#define QM_RL_UPPER_BOUND 62500000
|
||||||
#define QM_RL_PERIOD 5 /* in us */
|
#define QM_RL_PERIOD 5 /* in us */
|
||||||
#define QM_RL_PERIOD_CLK_25M (25 * QM_RL_PERIOD)
|
#define QM_RL_PERIOD_CLK_25M (25 * QM_RL_PERIOD)
|
||||||
|
#define QM_RL_MAX_INC_VAL 43750000
|
||||||
#define QM_RL_INC_VAL(rate) max_t(u32, \
|
#define QM_RL_INC_VAL(rate) max_t(u32, \
|
||||||
(((rate ? rate : 1000000) \
|
(u32)(((rate ? rate : \
|
||||||
* QM_RL_PERIOD) / 8), 1)
|
1000000) * \
|
||||||
#define QM_RL_MAX_INC_VAL 4375000
|
QM_RL_PERIOD * \
|
||||||
|
101) / (8 * 100)), 1)
|
||||||
/* AFullOprtnstcCrdMask constants */
|
/* AFullOprtnstcCrdMask constants */
|
||||||
#define QM_OPPOR_LINE_VOQ_DEF 1
|
#define QM_OPPOR_LINE_VOQ_DEF 1
|
||||||
#define QM_OPPOR_FW_STOP_DEF 0
|
#define QM_OPPOR_FW_STOP_DEF 0
|
||||||
#define QM_OPPOR_PQ_EMPTY_DEF 1
|
#define QM_OPPOR_PQ_EMPTY_DEF 1
|
||||||
#define EAGLE_WORKAROUND_TC 7
|
|
||||||
/* Command Queue constants */
|
/* Command Queue constants */
|
||||||
#define PBF_CMDQ_PURE_LB_LINES 150
|
#define PBF_CMDQ_PURE_LB_LINES 150
|
||||||
#define PBF_CMDQ_EAGLE_WORKAROUND_LINES 8
|
|
||||||
#define PBF_CMDQ_LINES_RT_OFFSET(voq) ( \
|
#define PBF_CMDQ_LINES_RT_OFFSET(voq) ( \
|
||||||
PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET + voq * \
|
PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET + voq * \
|
||||||
(PBF_REG_YCMD_QS_NUM_LINES_VOQ1_RT_OFFSET - \
|
(PBF_REG_YCMD_QS_NUM_LINES_VOQ1_RT_OFFSET - \
|
||||||
@ -80,7 +79,6 @@ enum cminterface {
|
|||||||
/* BTB: blocks constants (block size = 256B) */
|
/* BTB: blocks constants (block size = 256B) */
|
||||||
#define BTB_JUMBO_PKT_BLOCKS 38
|
#define BTB_JUMBO_PKT_BLOCKS 38
|
||||||
#define BTB_HEADROOM_BLOCKS BTB_JUMBO_PKT_BLOCKS
|
#define BTB_HEADROOM_BLOCKS BTB_JUMBO_PKT_BLOCKS
|
||||||
#define BTB_EAGLE_WORKAROUND_BLOCKS 4
|
|
||||||
#define BTB_PURE_LB_FACTOR 10
|
#define BTB_PURE_LB_FACTOR 10
|
||||||
#define BTB_PURE_LB_RATIO 7
|
#define BTB_PURE_LB_RATIO 7
|
||||||
/* QM stop command constants */
|
/* QM stop command constants */
|
||||||
@ -107,9 +105,9 @@ enum cminterface {
|
|||||||
cmd ## _ ## field, \
|
cmd ## _ ## field, \
|
||||||
value)
|
value)
|
||||||
/* QM: VOQ macros */
|
/* QM: VOQ macros */
|
||||||
#define PHYS_VOQ(port, tc, max_phy_tcs_pr_port) ((port) * \
|
#define PHYS_VOQ(port, tc, max_phys_tcs_per_port) ((port) * \
|
||||||
(max_phy_tcs_pr_port) \
|
(max_phys_tcs_per_port) + \
|
||||||
+ (tc))
|
(tc))
|
||||||
#define LB_VOQ(port) ( \
|
#define LB_VOQ(port) ( \
|
||||||
MAX_PHYS_VOQS + (port))
|
MAX_PHYS_VOQS + (port))
|
||||||
#define VOQ(port, tc, max_phy_tcs_pr_port) \
|
#define VOQ(port, tc, max_phy_tcs_pr_port) \
|
||||||
@ -120,8 +118,7 @@ enum cminterface {
|
|||||||
: LB_VOQ(port))
|
: LB_VOQ(port))
|
||||||
/******************** INTERNAL IMPLEMENTATION *********************/
|
/******************** INTERNAL IMPLEMENTATION *********************/
|
||||||
/* Prepare PF RL enable/disable runtime init values */
|
/* Prepare PF RL enable/disable runtime init values */
|
||||||
static void qed_enable_pf_rl(struct qed_hwfn *p_hwfn,
|
static void qed_enable_pf_rl(struct qed_hwfn *p_hwfn, bool pf_rl_en)
|
||||||
bool pf_rl_en)
|
|
||||||
{
|
{
|
||||||
STORE_RT_REG(p_hwfn, QM_REG_RLPFENABLE_RT_OFFSET, pf_rl_en ? 1 : 0);
|
STORE_RT_REG(p_hwfn, QM_REG_RLPFENABLE_RT_OFFSET, pf_rl_en ? 1 : 0);
|
||||||
if (pf_rl_en) {
|
if (pf_rl_en) {
|
||||||
@ -130,8 +127,7 @@ static void qed_enable_pf_rl(struct qed_hwfn *p_hwfn,
|
|||||||
(1 << MAX_NUM_VOQS) - 1);
|
(1 << MAX_NUM_VOQS) - 1);
|
||||||
/* write RL period */
|
/* write RL period */
|
||||||
STORE_RT_REG(p_hwfn,
|
STORE_RT_REG(p_hwfn,
|
||||||
QM_REG_RLPFPERIOD_RT_OFFSET,
|
QM_REG_RLPFPERIOD_RT_OFFSET, QM_RL_PERIOD_CLK_25M);
|
||||||
QM_RL_PERIOD_CLK_25M);
|
|
||||||
STORE_RT_REG(p_hwfn,
|
STORE_RT_REG(p_hwfn,
|
||||||
QM_REG_RLPFPERIODTIMER_RT_OFFSET,
|
QM_REG_RLPFPERIODTIMER_RT_OFFSET,
|
||||||
QM_RL_PERIOD_CLK_25M);
|
QM_RL_PERIOD_CLK_25M);
|
||||||
@ -144,8 +140,7 @@ static void qed_enable_pf_rl(struct qed_hwfn *p_hwfn,
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* Prepare PF WFQ enable/disable runtime init values */
|
/* Prepare PF WFQ enable/disable runtime init values */
|
||||||
static void qed_enable_pf_wfq(struct qed_hwfn *p_hwfn,
|
static void qed_enable_pf_wfq(struct qed_hwfn *p_hwfn, bool pf_wfq_en)
|
||||||
bool pf_wfq_en)
|
|
||||||
{
|
{
|
||||||
STORE_RT_REG(p_hwfn, QM_REG_WFQPFENABLE_RT_OFFSET, pf_wfq_en ? 1 : 0);
|
STORE_RT_REG(p_hwfn, QM_REG_WFQPFENABLE_RT_OFFSET, pf_wfq_en ? 1 : 0);
|
||||||
/* set credit threshold for QM bypass flow */
|
/* set credit threshold for QM bypass flow */
|
||||||
@ -156,8 +151,7 @@ static void qed_enable_pf_wfq(struct qed_hwfn *p_hwfn,
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* Prepare VPORT RL enable/disable runtime init values */
|
/* Prepare VPORT RL enable/disable runtime init values */
|
||||||
static void qed_enable_vport_rl(struct qed_hwfn *p_hwfn,
|
static void qed_enable_vport_rl(struct qed_hwfn *p_hwfn, bool vport_rl_en)
|
||||||
bool vport_rl_en)
|
|
||||||
{
|
{
|
||||||
STORE_RT_REG(p_hwfn, QM_REG_RLGLBLENABLE_RT_OFFSET,
|
STORE_RT_REG(p_hwfn, QM_REG_RLGLBLENABLE_RT_OFFSET,
|
||||||
vport_rl_en ? 1 : 0);
|
vport_rl_en ? 1 : 0);
|
||||||
@ -178,8 +172,7 @@ static void qed_enable_vport_rl(struct qed_hwfn *p_hwfn,
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* Prepare VPORT WFQ enable/disable runtime init values */
|
/* Prepare VPORT WFQ enable/disable runtime init values */
|
||||||
static void qed_enable_vport_wfq(struct qed_hwfn *p_hwfn,
|
static void qed_enable_vport_wfq(struct qed_hwfn *p_hwfn, bool vport_wfq_en)
|
||||||
bool vport_wfq_en)
|
|
||||||
{
|
{
|
||||||
STORE_RT_REG(p_hwfn, QM_REG_WFQVPENABLE_RT_OFFSET,
|
STORE_RT_REG(p_hwfn, QM_REG_WFQVPENABLE_RT_OFFSET,
|
||||||
vport_wfq_en ? 1 : 0);
|
vport_wfq_en ? 1 : 0);
|
||||||
@ -194,8 +187,7 @@ static void qed_enable_vport_wfq(struct qed_hwfn *p_hwfn,
|
|||||||
* the specified VOQ
|
* the specified VOQ
|
||||||
*/
|
*/
|
||||||
static void qed_cmdq_lines_voq_rt_init(struct qed_hwfn *p_hwfn,
|
static void qed_cmdq_lines_voq_rt_init(struct qed_hwfn *p_hwfn,
|
||||||
u8 voq,
|
u8 voq, u16 cmdq_lines)
|
||||||
u16 cmdq_lines)
|
|
||||||
{
|
{
|
||||||
u32 qm_line_crd;
|
u32 qm_line_crd;
|
||||||
|
|
||||||
@ -221,7 +213,7 @@ static void qed_cmdq_lines_rt_init(
|
|||||||
u8 max_phys_tcs_per_port,
|
u8 max_phys_tcs_per_port,
|
||||||
struct init_qm_port_params port_params[MAX_NUM_PORTS])
|
struct init_qm_port_params port_params[MAX_NUM_PORTS])
|
||||||
{
|
{
|
||||||
u8 tc, voq, port_id;
|
u8 tc, voq, port_id, num_tcs_in_port;
|
||||||
|
|
||||||
/* clear PBF lines for all VOQs */
|
/* clear PBF lines for all VOQs */
|
||||||
for (voq = 0; voq < MAX_NUM_VOQS; voq++)
|
for (voq = 0; voq < MAX_NUM_VOQS; voq++)
|
||||||
@ -229,22 +221,31 @@ static void qed_cmdq_lines_rt_init(
|
|||||||
for (port_id = 0; port_id < max_ports_per_engine; port_id++) {
|
for (port_id = 0; port_id < max_ports_per_engine; port_id++) {
|
||||||
if (port_params[port_id].active) {
|
if (port_params[port_id].active) {
|
||||||
u16 phys_lines, phys_lines_per_tc;
|
u16 phys_lines, phys_lines_per_tc;
|
||||||
u8 phys_tcs = port_params[port_id].num_active_phys_tcs;
|
|
||||||
|
|
||||||
/* find #lines to divide between the active
|
/* find #lines to divide between active phys TCs */
|
||||||
* physical TCs.
|
|
||||||
*/
|
|
||||||
phys_lines = port_params[port_id].num_pbf_cmd_lines -
|
phys_lines = port_params[port_id].num_pbf_cmd_lines -
|
||||||
PBF_CMDQ_PURE_LB_LINES;
|
PBF_CMDQ_PURE_LB_LINES;
|
||||||
/* find #lines per active physical TC */
|
/* find #lines per active physical TC */
|
||||||
phys_lines_per_tc = phys_lines / phys_tcs;
|
num_tcs_in_port = 0;
|
||||||
|
for (tc = 0; tc < NUM_OF_PHYS_TCS; tc++) {
|
||||||
|
if (((port_params[port_id].active_phys_tcs >>
|
||||||
|
tc) & 0x1) == 1)
|
||||||
|
num_tcs_in_port++;
|
||||||
|
}
|
||||||
|
|
||||||
|
phys_lines_per_tc = phys_lines / num_tcs_in_port;
|
||||||
/* init registers per active TC */
|
/* init registers per active TC */
|
||||||
for (tc = 0; tc < phys_tcs; tc++) {
|
for (tc = 0; tc < NUM_OF_PHYS_TCS; tc++) {
|
||||||
|
if (((port_params[port_id].active_phys_tcs >>
|
||||||
|
tc) & 0x1) != 1)
|
||||||
|
continue;
|
||||||
|
|
||||||
voq = PHYS_VOQ(port_id, tc,
|
voq = PHYS_VOQ(port_id, tc,
|
||||||
max_phys_tcs_per_port);
|
max_phys_tcs_per_port);
|
||||||
qed_cmdq_lines_voq_rt_init(p_hwfn, voq,
|
qed_cmdq_lines_voq_rt_init(p_hwfn, voq,
|
||||||
phys_lines_per_tc);
|
phys_lines_per_tc);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* init registers for pure LB TC */
|
/* init registers for pure LB TC */
|
||||||
qed_cmdq_lines_voq_rt_init(p_hwfn, LB_VOQ(port_id),
|
qed_cmdq_lines_voq_rt_init(p_hwfn, LB_VOQ(port_id),
|
||||||
PBF_CMDQ_PURE_LB_LINES);
|
PBF_CMDQ_PURE_LB_LINES);
|
||||||
@ -259,34 +260,42 @@ static void qed_btb_blocks_rt_init(
|
|||||||
struct init_qm_port_params port_params[MAX_NUM_PORTS])
|
struct init_qm_port_params port_params[MAX_NUM_PORTS])
|
||||||
{
|
{
|
||||||
u32 usable_blocks, pure_lb_blocks, phys_blocks;
|
u32 usable_blocks, pure_lb_blocks, phys_blocks;
|
||||||
u8 tc, voq, port_id;
|
u8 tc, voq, port_id, num_tcs_in_port;
|
||||||
|
|
||||||
for (port_id = 0; port_id < max_ports_per_engine; port_id++) {
|
for (port_id = 0; port_id < max_ports_per_engine; port_id++) {
|
||||||
u32 temp;
|
u32 temp;
|
||||||
u8 phys_tcs;
|
|
||||||
|
|
||||||
if (!port_params[port_id].active)
|
if (!port_params[port_id].active)
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
phys_tcs = port_params[port_id].num_active_phys_tcs;
|
|
||||||
|
|
||||||
/* subtract headroom blocks */
|
/* subtract headroom blocks */
|
||||||
usable_blocks = port_params[port_id].num_btb_blocks -
|
usable_blocks = port_params[port_id].num_btb_blocks -
|
||||||
BTB_HEADROOM_BLOCKS;
|
BTB_HEADROOM_BLOCKS;
|
||||||
|
|
||||||
/* find blocks per physical TC. use factor to avoid
|
/* find blocks per physical TC */
|
||||||
* floating arithmethic.
|
num_tcs_in_port = 0;
|
||||||
*/
|
for (tc = 0; tc < NUM_OF_PHYS_TCS; tc++) {
|
||||||
|
if (((port_params[port_id].active_phys_tcs >>
|
||||||
|
tc) & 0x1) == 1)
|
||||||
|
num_tcs_in_port++;
|
||||||
|
}
|
||||||
|
|
||||||
pure_lb_blocks = (usable_blocks * BTB_PURE_LB_FACTOR) /
|
pure_lb_blocks = (usable_blocks * BTB_PURE_LB_FACTOR) /
|
||||||
(phys_tcs * BTB_PURE_LB_FACTOR +
|
(num_tcs_in_port * BTB_PURE_LB_FACTOR +
|
||||||
BTB_PURE_LB_RATIO);
|
BTB_PURE_LB_RATIO);
|
||||||
pure_lb_blocks = max_t(u32, BTB_JUMBO_PKT_BLOCKS,
|
pure_lb_blocks = max_t(u32, BTB_JUMBO_PKT_BLOCKS,
|
||||||
pure_lb_blocks / BTB_PURE_LB_FACTOR);
|
pure_lb_blocks / BTB_PURE_LB_FACTOR);
|
||||||
phys_blocks = (usable_blocks - pure_lb_blocks) / phys_tcs;
|
phys_blocks = (usable_blocks - pure_lb_blocks) /
|
||||||
|
num_tcs_in_port;
|
||||||
|
|
||||||
/* init physical TCs */
|
/* init physical TCs */
|
||||||
for (tc = 0; tc < phys_tcs; tc++) {
|
for (tc = 0; tc < NUM_OF_PHYS_TCS; tc++) {
|
||||||
voq = PHYS_VOQ(port_id, tc, max_phys_tcs_per_port);
|
if (((port_params[port_id].active_phys_tcs >>
|
||||||
|
tc) & 0x1) != 1)
|
||||||
|
continue;
|
||||||
|
|
||||||
|
voq = PHYS_VOQ(port_id, tc,
|
||||||
|
max_phys_tcs_per_port);
|
||||||
STORE_RT_REG(p_hwfn, PBF_BTB_GUARANTEED_RT_OFFSET(voq),
|
STORE_RT_REG(p_hwfn, PBF_BTB_GUARANTEED_RT_OFFSET(voq),
|
||||||
phys_blocks);
|
phys_blocks);
|
||||||
}
|
}
|
||||||
@ -360,10 +369,11 @@ static void qed_tx_pq_map_rt_init(
|
|||||||
memset(&tx_pq_map, 0, sizeof(tx_pq_map));
|
memset(&tx_pq_map, 0, sizeof(tx_pq_map));
|
||||||
SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_PQ_VALID, 1);
|
SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_PQ_VALID, 1);
|
||||||
SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_RL_VALID,
|
SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_RL_VALID,
|
||||||
is_vf_pq ? 1 : 0);
|
p_params->pq_params[i].rl_valid ? 1 : 0);
|
||||||
SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_VP_PQ_ID, first_tx_pq_id);
|
SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_VP_PQ_ID, first_tx_pq_id);
|
||||||
SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_RL_ID,
|
SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_RL_ID,
|
||||||
is_vf_pq ? p_params->pq_params[i].vport_id : 0);
|
p_params->pq_params[i].rl_valid ?
|
||||||
|
p_params->pq_params[i].vport_id : 0);
|
||||||
SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_VOQ, voq);
|
SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_VOQ, voq);
|
||||||
SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_WRR_WEIGHT_GROUP,
|
SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_WRR_WEIGHT_GROUP,
|
||||||
p_params->pq_params[i].wrr_group);
|
p_params->pq_params[i].wrr_group);
|
||||||
@ -390,25 +400,11 @@ static void qed_tx_pq_map_rt_init(
|
|||||||
/* store Tx PQ VF mask to size select register */
|
/* store Tx PQ VF mask to size select register */
|
||||||
for (i = 0; i < num_tx_pq_vf_masks; i++) {
|
for (i = 0; i < num_tx_pq_vf_masks; i++) {
|
||||||
if (tx_pq_vf_mask[i]) {
|
if (tx_pq_vf_mask[i]) {
|
||||||
if (is_bb_a0) {
|
u32 addr;
|
||||||
u32 curr_mask = 0, addr;
|
|
||||||
|
|
||||||
addr = QM_REG_MAXPQSIZETXSEL_0 + (i * 4);
|
addr = QM_REG_MAXPQSIZETXSEL_0_RT_OFFSET + i;
|
||||||
if (!p_params->is_first_pf)
|
STORE_RT_REG(p_hwfn, addr,
|
||||||
curr_mask = qed_rd(p_hwfn, p_ptt,
|
tx_pq_vf_mask[i]);
|
||||||
addr);
|
|
||||||
|
|
||||||
addr = QM_REG_MAXPQSIZETXSEL_0_RT_OFFSET + i;
|
|
||||||
|
|
||||||
STORE_RT_REG(p_hwfn, addr,
|
|
||||||
curr_mask | tx_pq_vf_mask[i]);
|
|
||||||
} else {
|
|
||||||
u32 addr;
|
|
||||||
|
|
||||||
addr = QM_REG_MAXPQSIZETXSEL_0_RT_OFFSET + i;
|
|
||||||
STORE_RT_REG(p_hwfn, addr,
|
|
||||||
tx_pq_vf_mask[i]);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -418,8 +414,7 @@ static void qed_other_pq_map_rt_init(struct qed_hwfn *p_hwfn,
|
|||||||
u8 port_id,
|
u8 port_id,
|
||||||
u8 pf_id,
|
u8 pf_id,
|
||||||
u32 num_pf_cids,
|
u32 num_pf_cids,
|
||||||
u32 num_tids,
|
u32 num_tids, u32 base_mem_addr_4kb)
|
||||||
u32 base_mem_addr_4kb)
|
|
||||||
{
|
{
|
||||||
u16 i, pq_id;
|
u16 i, pq_id;
|
||||||
|
|
||||||
@ -465,15 +460,10 @@ static int qed_pf_wfq_rt_init(struct qed_hwfn *p_hwfn,
|
|||||||
(p_params->pf_id % MAX_NUM_PFS_BB);
|
(p_params->pf_id % MAX_NUM_PFS_BB);
|
||||||
|
|
||||||
inc_val = QM_WFQ_INC_VAL(p_params->pf_wfq);
|
inc_val = QM_WFQ_INC_VAL(p_params->pf_wfq);
|
||||||
if (inc_val > QM_WFQ_MAX_INC_VAL) {
|
if (!inc_val || inc_val > QM_WFQ_MAX_INC_VAL) {
|
||||||
DP_NOTICE(p_hwfn, "Invalid PF WFQ weight configuration");
|
DP_NOTICE(p_hwfn, "Invalid PF WFQ weight configuration");
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
STORE_RT_REG(p_hwfn, QM_REG_WFQPFWEIGHT_RT_OFFSET + p_params->pf_id,
|
|
||||||
inc_val);
|
|
||||||
STORE_RT_REG(p_hwfn,
|
|
||||||
QM_REG_WFQPFUPPERBOUND_RT_OFFSET + p_params->pf_id,
|
|
||||||
QM_WFQ_UPPER_BOUND | QM_WFQ_CRD_REG_SIGN_BIT);
|
|
||||||
|
|
||||||
for (i = 0; i < num_tx_pqs; i++) {
|
for (i = 0; i < num_tx_pqs; i++) {
|
||||||
u8 voq = VOQ(p_params->port_id, p_params->pq_params[i].tc_id,
|
u8 voq = VOQ(p_params->port_id, p_params->pq_params[i].tc_id,
|
||||||
@ -481,19 +471,21 @@ static int qed_pf_wfq_rt_init(struct qed_hwfn *p_hwfn,
|
|||||||
|
|
||||||
OVERWRITE_RT_REG(p_hwfn,
|
OVERWRITE_RT_REG(p_hwfn,
|
||||||
crd_reg_offset + voq * MAX_NUM_PFS_BB,
|
crd_reg_offset + voq * MAX_NUM_PFS_BB,
|
||||||
QM_WFQ_INIT_CRD(inc_val) |
|
|
||||||
QM_WFQ_CRD_REG_SIGN_BIT);
|
QM_WFQ_CRD_REG_SIGN_BIT);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
STORE_RT_REG(p_hwfn, QM_REG_WFQPFWEIGHT_RT_OFFSET + p_params->pf_id,
|
||||||
|
inc_val);
|
||||||
|
STORE_RT_REG(p_hwfn,
|
||||||
|
QM_REG_WFQPFUPPERBOUND_RT_OFFSET + p_params->pf_id,
|
||||||
|
QM_WFQ_UPPER_BOUND | QM_WFQ_CRD_REG_SIGN_BIT);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Prepare PF RL runtime init values for the specified PF.
|
/* Prepare PF RL runtime init values for the specified PF.
|
||||||
* Return -1 on error.
|
* Return -1 on error.
|
||||||
*/
|
*/
|
||||||
static int qed_pf_rl_rt_init(struct qed_hwfn *p_hwfn,
|
static int qed_pf_rl_rt_init(struct qed_hwfn *p_hwfn, u8 pf_id, u32 pf_rl)
|
||||||
u8 pf_id,
|
|
||||||
u32 pf_rl)
|
|
||||||
{
|
{
|
||||||
u32 inc_val = QM_RL_INC_VAL(pf_rl);
|
u32 inc_val = QM_RL_INC_VAL(pf_rl);
|
||||||
|
|
||||||
@ -607,9 +599,7 @@ static bool qed_poll_on_qm_cmd_ready(struct qed_hwfn *p_hwfn,
|
|||||||
|
|
||||||
static bool qed_send_qm_cmd(struct qed_hwfn *p_hwfn,
|
static bool qed_send_qm_cmd(struct qed_hwfn *p_hwfn,
|
||||||
struct qed_ptt *p_ptt,
|
struct qed_ptt *p_ptt,
|
||||||
u32 cmd_addr,
|
u32 cmd_addr, u32 cmd_data_lsb, u32 cmd_data_msb)
|
||||||
u32 cmd_data_lsb,
|
|
||||||
u32 cmd_data_msb)
|
|
||||||
{
|
{
|
||||||
if (!qed_poll_on_qm_cmd_ready(p_hwfn, p_ptt))
|
if (!qed_poll_on_qm_cmd_ready(p_hwfn, p_ptt))
|
||||||
return false;
|
return false;
|
||||||
@ -627,9 +617,7 @@ static bool qed_send_qm_cmd(struct qed_hwfn *p_hwfn,
|
|||||||
u32 qed_qm_pf_mem_size(u8 pf_id,
|
u32 qed_qm_pf_mem_size(u8 pf_id,
|
||||||
u32 num_pf_cids,
|
u32 num_pf_cids,
|
||||||
u32 num_vf_cids,
|
u32 num_vf_cids,
|
||||||
u32 num_tids,
|
u32 num_tids, u16 num_pf_pqs, u16 num_vf_pqs)
|
||||||
u16 num_pf_pqs,
|
|
||||||
u16 num_vf_pqs)
|
|
||||||
{
|
{
|
||||||
return QM_PQ_MEM_4KB(num_pf_cids) * num_pf_pqs +
|
return QM_PQ_MEM_4KB(num_pf_cids) * num_pf_pqs +
|
||||||
QM_PQ_MEM_4KB(num_vf_cids) * num_vf_pqs +
|
QM_PQ_MEM_4KB(num_vf_cids) * num_vf_pqs +
|
||||||
@ -713,8 +701,7 @@ int qed_qm_pf_rt_init(struct qed_hwfn *p_hwfn,
|
|||||||
}
|
}
|
||||||
|
|
||||||
int qed_init_pf_wfq(struct qed_hwfn *p_hwfn,
|
int qed_init_pf_wfq(struct qed_hwfn *p_hwfn,
|
||||||
struct qed_ptt *p_ptt,
|
struct qed_ptt *p_ptt, u8 pf_id, u16 pf_wfq)
|
||||||
u8 pf_id, u16 pf_wfq)
|
|
||||||
{
|
{
|
||||||
u32 inc_val = QM_WFQ_INC_VAL(pf_wfq);
|
u32 inc_val = QM_WFQ_INC_VAL(pf_wfq);
|
||||||
|
|
||||||
@ -728,9 +715,7 @@ int qed_init_pf_wfq(struct qed_hwfn *p_hwfn,
|
|||||||
}
|
}
|
||||||
|
|
||||||
int qed_init_pf_rl(struct qed_hwfn *p_hwfn,
|
int qed_init_pf_rl(struct qed_hwfn *p_hwfn,
|
||||||
struct qed_ptt *p_ptt,
|
struct qed_ptt *p_ptt, u8 pf_id, u32 pf_rl)
|
||||||
u8 pf_id,
|
|
||||||
u32 pf_rl)
|
|
||||||
{
|
{
|
||||||
u32 inc_val = QM_RL_INC_VAL(pf_rl);
|
u32 inc_val = QM_RL_INC_VAL(pf_rl);
|
||||||
|
|
||||||
@ -749,8 +734,7 @@ int qed_init_pf_rl(struct qed_hwfn *p_hwfn,
|
|||||||
|
|
||||||
int qed_init_vport_wfq(struct qed_hwfn *p_hwfn,
|
int qed_init_vport_wfq(struct qed_hwfn *p_hwfn,
|
||||||
struct qed_ptt *p_ptt,
|
struct qed_ptt *p_ptt,
|
||||||
u16 first_tx_pq_id[NUM_OF_TCS],
|
u16 first_tx_pq_id[NUM_OF_TCS], u16 vport_wfq)
|
||||||
u16 vport_wfq)
|
|
||||||
{
|
{
|
||||||
u32 inc_val = QM_WFQ_INC_VAL(vport_wfq);
|
u32 inc_val = QM_WFQ_INC_VAL(vport_wfq);
|
||||||
u8 tc;
|
u8 tc;
|
||||||
@ -773,9 +757,7 @@ int qed_init_vport_wfq(struct qed_hwfn *p_hwfn,
|
|||||||
}
|
}
|
||||||
|
|
||||||
int qed_init_vport_rl(struct qed_hwfn *p_hwfn,
|
int qed_init_vport_rl(struct qed_hwfn *p_hwfn,
|
||||||
struct qed_ptt *p_ptt,
|
struct qed_ptt *p_ptt, u8 vport_id, u32 vport_rl)
|
||||||
u8 vport_id,
|
|
||||||
u32 vport_rl)
|
|
||||||
{
|
{
|
||||||
u32 inc_val = QM_RL_INC_VAL(vport_rl);
|
u32 inc_val = QM_RL_INC_VAL(vport_rl);
|
||||||
|
|
||||||
@ -795,9 +777,7 @@ int qed_init_vport_rl(struct qed_hwfn *p_hwfn,
|
|||||||
bool qed_send_qm_stop_cmd(struct qed_hwfn *p_hwfn,
|
bool qed_send_qm_stop_cmd(struct qed_hwfn *p_hwfn,
|
||||||
struct qed_ptt *p_ptt,
|
struct qed_ptt *p_ptt,
|
||||||
bool is_release_cmd,
|
bool is_release_cmd,
|
||||||
bool is_tx_pq,
|
bool is_tx_pq, u16 start_pq, u16 num_pqs)
|
||||||
u16 start_pq,
|
|
||||||
u16 num_pqs)
|
|
||||||
{
|
{
|
||||||
u32 cmd_arr[QM_CMD_STRUCT_SIZE(QM_STOP_CMD)] = { 0 };
|
u32 cmd_arr[QM_CMD_STRUCT_SIZE(QM_STOP_CMD)] = { 0 };
|
||||||
u32 pq_mask = 0, last_pq = start_pq + num_pqs - 1, pq_id;
|
u32 pq_mask = 0, last_pq = start_pq + num_pqs - 1, pq_id;
|
||||||
@ -841,17 +821,15 @@ qed_set_tunnel_type_enable_bit(unsigned long *var, int bit, bool enable)
|
|||||||
#define PRS_ETH_TUNN_FIC_FORMAT -188897008
|
#define PRS_ETH_TUNN_FIC_FORMAT -188897008
|
||||||
|
|
||||||
void qed_set_vxlan_dest_port(struct qed_hwfn *p_hwfn,
|
void qed_set_vxlan_dest_port(struct qed_hwfn *p_hwfn,
|
||||||
struct qed_ptt *p_ptt,
|
struct qed_ptt *p_ptt, u16 dest_port)
|
||||||
u16 dest_port)
|
|
||||||
{
|
{
|
||||||
qed_wr(p_hwfn, p_ptt, PRS_REG_VXLAN_PORT, dest_port);
|
qed_wr(p_hwfn, p_ptt, PRS_REG_VXLAN_PORT, dest_port);
|
||||||
qed_wr(p_hwfn, p_ptt, NIG_REG_VXLAN_PORT, dest_port);
|
qed_wr(p_hwfn, p_ptt, NIG_REG_VXLAN_CTRL, dest_port);
|
||||||
qed_wr(p_hwfn, p_ptt, PBF_REG_VXLAN_PORT, dest_port);
|
qed_wr(p_hwfn, p_ptt, PBF_REG_VXLAN_PORT, dest_port);
|
||||||
}
|
}
|
||||||
|
|
||||||
void qed_set_vxlan_enable(struct qed_hwfn *p_hwfn,
|
void qed_set_vxlan_enable(struct qed_hwfn *p_hwfn,
|
||||||
struct qed_ptt *p_ptt,
|
struct qed_ptt *p_ptt, bool vxlan_enable)
|
||||||
bool vxlan_enable)
|
|
||||||
{
|
{
|
||||||
unsigned long reg_val = 0;
|
unsigned long reg_val = 0;
|
||||||
u8 shift;
|
u8 shift;
|
||||||
@ -908,8 +886,7 @@ void qed_set_gre_enable(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
|
|||||||
}
|
}
|
||||||
|
|
||||||
void qed_set_geneve_dest_port(struct qed_hwfn *p_hwfn,
|
void qed_set_geneve_dest_port(struct qed_hwfn *p_hwfn,
|
||||||
struct qed_ptt *p_ptt,
|
struct qed_ptt *p_ptt, u16 dest_port)
|
||||||
u16 dest_port)
|
|
||||||
{
|
{
|
||||||
qed_wr(p_hwfn, p_ptt, PRS_REG_NGE_PORT, dest_port);
|
qed_wr(p_hwfn, p_ptt, PRS_REG_NGE_PORT, dest_port);
|
||||||
qed_wr(p_hwfn, p_ptt, NIG_REG_NGE_PORT, dest_port);
|
qed_wr(p_hwfn, p_ptt, NIG_REG_NGE_PORT, dest_port);
|
||||||
@ -918,8 +895,7 @@ void qed_set_geneve_dest_port(struct qed_hwfn *p_hwfn,
|
|||||||
|
|
||||||
void qed_set_geneve_enable(struct qed_hwfn *p_hwfn,
|
void qed_set_geneve_enable(struct qed_hwfn *p_hwfn,
|
||||||
struct qed_ptt *p_ptt,
|
struct qed_ptt *p_ptt,
|
||||||
bool eth_geneve_enable,
|
bool eth_geneve_enable, bool ip_geneve_enable)
|
||||||
bool ip_geneve_enable)
|
|
||||||
{
|
{
|
||||||
unsigned long reg_val = 0;
|
unsigned long reg_val = 0;
|
||||||
u8 shift;
|
u8 shift;
|
||||||
|
@ -543,8 +543,7 @@ void qed_gtt_init(struct qed_hwfn *p_hwfn)
|
|||||||
pxp_global_win[i]);
|
pxp_global_win[i]);
|
||||||
}
|
}
|
||||||
|
|
||||||
int qed_init_fw_data(struct qed_dev *cdev,
|
int qed_init_fw_data(struct qed_dev *cdev, const u8 *data)
|
||||||
const u8 *data)
|
|
||||||
{
|
{
|
||||||
struct qed_fw_data *fw = cdev->fw_data;
|
struct qed_fw_data *fw = cdev->fw_data;
|
||||||
struct bin_buffer_hdr *buf_hdr;
|
struct bin_buffer_hdr *buf_hdr;
|
||||||
@ -555,7 +554,11 @@ int qed_init_fw_data(struct qed_dev *cdev,
|
|||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
buf_hdr = (struct bin_buffer_hdr *)data;
|
/* First Dword contains metadata and should be skipped */
|
||||||
|
buf_hdr = (struct bin_buffer_hdr *)(data + sizeof(u32));
|
||||||
|
|
||||||
|
offset = buf_hdr[BIN_BUF_FW_VER_INFO].offset;
|
||||||
|
fw->fw_ver_info = (struct fw_ver_info *)(data + offset);
|
||||||
|
|
||||||
offset = buf_hdr[BIN_BUF_INIT_CMD].offset;
|
offset = buf_hdr[BIN_BUF_INIT_CMD].offset;
|
||||||
fw->init_ops = (union init_op *)(data + offset);
|
fw->init_ops = (union init_op *)(data + offset);
|
||||||
|
@ -575,9 +575,12 @@ int qed_sp_eth_rxq_start_ramrod(struct qed_hwfn *p_hwfn,
|
|||||||
p_ramrod->num_of_pbl_pages = cpu_to_le16(cqe_pbl_size);
|
p_ramrod->num_of_pbl_pages = cpu_to_le16(cqe_pbl_size);
|
||||||
DMA_REGPAIR_LE(p_ramrod->cqe_pbl_addr, cqe_pbl_addr);
|
DMA_REGPAIR_LE(p_ramrod->cqe_pbl_addr, cqe_pbl_addr);
|
||||||
|
|
||||||
rc = qed_spq_post(p_hwfn, p_ent, NULL);
|
p_ramrod->vf_rx_prod_index = params->vf_qid;
|
||||||
|
if (params->vf_qid)
|
||||||
|
DP_VERBOSE(p_hwfn, QED_MSG_SP,
|
||||||
|
"Queue is meant for VF rxq[%04x]\n", params->vf_qid);
|
||||||
|
|
||||||
return rc;
|
return qed_spq_post(p_hwfn, p_ent, NULL);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int
|
static int
|
||||||
@ -615,7 +618,7 @@ qed_sp_eth_rx_queue_start(struct qed_hwfn *p_hwfn,
|
|||||||
|
|
||||||
*pp_prod = (u8 __iomem *)p_hwfn->regview +
|
*pp_prod = (u8 __iomem *)p_hwfn->regview +
|
||||||
GTT_BAR0_MAP_REG_MSDM_RAM +
|
GTT_BAR0_MAP_REG_MSDM_RAM +
|
||||||
MSTORM_PRODS_OFFSET(abs_l2_queue);
|
MSTORM_ETH_PF_PRODS_OFFSET(abs_l2_queue);
|
||||||
|
|
||||||
/* Init the rcq, rx bd and rx sge (if valid) producers to 0 */
|
/* Init the rcq, rx bd and rx sge (if valid) producers to 0 */
|
||||||
__internal_ram_wr(p_hwfn, *pp_prod, sizeof(u64),
|
__internal_ram_wr(p_hwfn, *pp_prod, sizeof(u64),
|
||||||
@ -759,9 +762,9 @@ int qed_sp_eth_txq_start_ramrod(struct qed_hwfn *p_hwfn,
|
|||||||
struct qed_spq_entry *p_ent = NULL;
|
struct qed_spq_entry *p_ent = NULL;
|
||||||
struct qed_sp_init_data init_data;
|
struct qed_sp_init_data init_data;
|
||||||
struct qed_hw_cid_data *p_tx_cid;
|
struct qed_hw_cid_data *p_tx_cid;
|
||||||
u8 abs_vport_id;
|
u16 pq_id, abs_tx_q_id = 0;
|
||||||
int rc = -EINVAL;
|
int rc = -EINVAL;
|
||||||
u16 pq_id;
|
u8 abs_vport_id;
|
||||||
|
|
||||||
/* Store information for the stop */
|
/* Store information for the stop */
|
||||||
p_tx_cid = &p_hwfn->p_tx_cids[p_params->queue_id];
|
p_tx_cid = &p_hwfn->p_tx_cids[p_params->queue_id];
|
||||||
@ -772,6 +775,10 @@ int qed_sp_eth_txq_start_ramrod(struct qed_hwfn *p_hwfn,
|
|||||||
if (rc)
|
if (rc)
|
||||||
return rc;
|
return rc;
|
||||||
|
|
||||||
|
rc = qed_fw_l2_queue(p_hwfn, p_params->queue_id, &abs_tx_q_id);
|
||||||
|
if (rc)
|
||||||
|
return rc;
|
||||||
|
|
||||||
/* Get SPQ entry */
|
/* Get SPQ entry */
|
||||||
memset(&init_data, 0, sizeof(init_data));
|
memset(&init_data, 0, sizeof(init_data));
|
||||||
init_data.cid = cid;
|
init_data.cid = cid;
|
||||||
@ -791,6 +798,7 @@ int qed_sp_eth_txq_start_ramrod(struct qed_hwfn *p_hwfn,
|
|||||||
p_ramrod->sb_index = p_params->sb_idx;
|
p_ramrod->sb_index = p_params->sb_idx;
|
||||||
p_ramrod->stats_counter_id = stats_id;
|
p_ramrod->stats_counter_id = stats_id;
|
||||||
|
|
||||||
|
p_ramrod->queue_zone_id = cpu_to_le16(abs_tx_q_id);
|
||||||
p_ramrod->pbl_size = cpu_to_le16(pbl_size);
|
p_ramrod->pbl_size = cpu_to_le16(pbl_size);
|
||||||
DMA_REGPAIR_LE(p_ramrod->pbl_base_addr, pbl_addr);
|
DMA_REGPAIR_LE(p_ramrod->pbl_base_addr, pbl_addr);
|
||||||
|
|
||||||
@ -1485,51 +1493,51 @@ static void __qed_get_vport_port_stats(struct qed_hwfn *p_hwfn,
|
|||||||
offsetof(struct public_port, stats),
|
offsetof(struct public_port, stats),
|
||||||
sizeof(port_stats));
|
sizeof(port_stats));
|
||||||
|
|
||||||
p_stats->rx_64_byte_packets += port_stats.pmm.r64;
|
p_stats->rx_64_byte_packets += port_stats.eth.r64;
|
||||||
p_stats->rx_65_to_127_byte_packets += port_stats.pmm.r127;
|
p_stats->rx_65_to_127_byte_packets += port_stats.eth.r127;
|
||||||
p_stats->rx_128_to_255_byte_packets += port_stats.pmm.r255;
|
p_stats->rx_128_to_255_byte_packets += port_stats.eth.r255;
|
||||||
p_stats->rx_256_to_511_byte_packets += port_stats.pmm.r511;
|
p_stats->rx_256_to_511_byte_packets += port_stats.eth.r511;
|
||||||
p_stats->rx_512_to_1023_byte_packets += port_stats.pmm.r1023;
|
p_stats->rx_512_to_1023_byte_packets += port_stats.eth.r1023;
|
||||||
p_stats->rx_1024_to_1518_byte_packets += port_stats.pmm.r1518;
|
p_stats->rx_1024_to_1518_byte_packets += port_stats.eth.r1518;
|
||||||
p_stats->rx_1519_to_1522_byte_packets += port_stats.pmm.r1522;
|
p_stats->rx_1519_to_1522_byte_packets += port_stats.eth.r1522;
|
||||||
p_stats->rx_1519_to_2047_byte_packets += port_stats.pmm.r2047;
|
p_stats->rx_1519_to_2047_byte_packets += port_stats.eth.r2047;
|
||||||
p_stats->rx_2048_to_4095_byte_packets += port_stats.pmm.r4095;
|
p_stats->rx_2048_to_4095_byte_packets += port_stats.eth.r4095;
|
||||||
p_stats->rx_4096_to_9216_byte_packets += port_stats.pmm.r9216;
|
p_stats->rx_4096_to_9216_byte_packets += port_stats.eth.r9216;
|
||||||
p_stats->rx_9217_to_16383_byte_packets += port_stats.pmm.r16383;
|
p_stats->rx_9217_to_16383_byte_packets += port_stats.eth.r16383;
|
||||||
p_stats->rx_crc_errors += port_stats.pmm.rfcs;
|
p_stats->rx_crc_errors += port_stats.eth.rfcs;
|
||||||
p_stats->rx_mac_crtl_frames += port_stats.pmm.rxcf;
|
p_stats->rx_mac_crtl_frames += port_stats.eth.rxcf;
|
||||||
p_stats->rx_pause_frames += port_stats.pmm.rxpf;
|
p_stats->rx_pause_frames += port_stats.eth.rxpf;
|
||||||
p_stats->rx_pfc_frames += port_stats.pmm.rxpp;
|
p_stats->rx_pfc_frames += port_stats.eth.rxpp;
|
||||||
p_stats->rx_align_errors += port_stats.pmm.raln;
|
p_stats->rx_align_errors += port_stats.eth.raln;
|
||||||
p_stats->rx_carrier_errors += port_stats.pmm.rfcr;
|
p_stats->rx_carrier_errors += port_stats.eth.rfcr;
|
||||||
p_stats->rx_oversize_packets += port_stats.pmm.rovr;
|
p_stats->rx_oversize_packets += port_stats.eth.rovr;
|
||||||
p_stats->rx_jabbers += port_stats.pmm.rjbr;
|
p_stats->rx_jabbers += port_stats.eth.rjbr;
|
||||||
p_stats->rx_undersize_packets += port_stats.pmm.rund;
|
p_stats->rx_undersize_packets += port_stats.eth.rund;
|
||||||
p_stats->rx_fragments += port_stats.pmm.rfrg;
|
p_stats->rx_fragments += port_stats.eth.rfrg;
|
||||||
p_stats->tx_64_byte_packets += port_stats.pmm.t64;
|
p_stats->tx_64_byte_packets += port_stats.eth.t64;
|
||||||
p_stats->tx_65_to_127_byte_packets += port_stats.pmm.t127;
|
p_stats->tx_65_to_127_byte_packets += port_stats.eth.t127;
|
||||||
p_stats->tx_128_to_255_byte_packets += port_stats.pmm.t255;
|
p_stats->tx_128_to_255_byte_packets += port_stats.eth.t255;
|
||||||
p_stats->tx_256_to_511_byte_packets += port_stats.pmm.t511;
|
p_stats->tx_256_to_511_byte_packets += port_stats.eth.t511;
|
||||||
p_stats->tx_512_to_1023_byte_packets += port_stats.pmm.t1023;
|
p_stats->tx_512_to_1023_byte_packets += port_stats.eth.t1023;
|
||||||
p_stats->tx_1024_to_1518_byte_packets += port_stats.pmm.t1518;
|
p_stats->tx_1024_to_1518_byte_packets += port_stats.eth.t1518;
|
||||||
p_stats->tx_1519_to_2047_byte_packets += port_stats.pmm.t2047;
|
p_stats->tx_1519_to_2047_byte_packets += port_stats.eth.t2047;
|
||||||
p_stats->tx_2048_to_4095_byte_packets += port_stats.pmm.t4095;
|
p_stats->tx_2048_to_4095_byte_packets += port_stats.eth.t4095;
|
||||||
p_stats->tx_4096_to_9216_byte_packets += port_stats.pmm.t9216;
|
p_stats->tx_4096_to_9216_byte_packets += port_stats.eth.t9216;
|
||||||
p_stats->tx_9217_to_16383_byte_packets += port_stats.pmm.t16383;
|
p_stats->tx_9217_to_16383_byte_packets += port_stats.eth.t16383;
|
||||||
p_stats->tx_pause_frames += port_stats.pmm.txpf;
|
p_stats->tx_pause_frames += port_stats.eth.txpf;
|
||||||
p_stats->tx_pfc_frames += port_stats.pmm.txpp;
|
p_stats->tx_pfc_frames += port_stats.eth.txpp;
|
||||||
p_stats->tx_lpi_entry_count += port_stats.pmm.tlpiec;
|
p_stats->tx_lpi_entry_count += port_stats.eth.tlpiec;
|
||||||
p_stats->tx_total_collisions += port_stats.pmm.tncl;
|
p_stats->tx_total_collisions += port_stats.eth.tncl;
|
||||||
p_stats->rx_mac_bytes += port_stats.pmm.rbyte;
|
p_stats->rx_mac_bytes += port_stats.eth.rbyte;
|
||||||
p_stats->rx_mac_uc_packets += port_stats.pmm.rxuca;
|
p_stats->rx_mac_uc_packets += port_stats.eth.rxuca;
|
||||||
p_stats->rx_mac_mc_packets += port_stats.pmm.rxmca;
|
p_stats->rx_mac_mc_packets += port_stats.eth.rxmca;
|
||||||
p_stats->rx_mac_bc_packets += port_stats.pmm.rxbca;
|
p_stats->rx_mac_bc_packets += port_stats.eth.rxbca;
|
||||||
p_stats->rx_mac_frames_ok += port_stats.pmm.rxpok;
|
p_stats->rx_mac_frames_ok += port_stats.eth.rxpok;
|
||||||
p_stats->tx_mac_bytes += port_stats.pmm.tbyte;
|
p_stats->tx_mac_bytes += port_stats.eth.tbyte;
|
||||||
p_stats->tx_mac_uc_packets += port_stats.pmm.txuca;
|
p_stats->tx_mac_uc_packets += port_stats.eth.txuca;
|
||||||
p_stats->tx_mac_mc_packets += port_stats.pmm.txmca;
|
p_stats->tx_mac_mc_packets += port_stats.eth.txmca;
|
||||||
p_stats->tx_mac_bc_packets += port_stats.pmm.txbca;
|
p_stats->tx_mac_bc_packets += port_stats.eth.txbca;
|
||||||
p_stats->tx_mac_ctrl_frames += port_stats.pmm.txcf;
|
p_stats->tx_mac_ctrl_frames += port_stats.eth.txcf;
|
||||||
for (j = 0; j < 8; j++) {
|
for (j = 0; j < 8; j++) {
|
||||||
p_stats->brb_truncates += port_stats.brb.brb_truncate[j];
|
p_stats->brb_truncates += port_stats.brb.brb_truncate[j];
|
||||||
p_stats->brb_discards += port_stats.brb.brb_discard[j];
|
p_stats->brb_discards += port_stats.brb.brb_discard[j];
|
||||||
|
@ -832,7 +832,8 @@ static int qed_slowpath_start(struct qed_dev *cdev,
|
|||||||
goto err2;
|
goto err2;
|
||||||
}
|
}
|
||||||
|
|
||||||
data = cdev->firmware->data;
|
/* First Dword used to diffrentiate between various sources */
|
||||||
|
data = cdev->firmware->data + sizeof(u32);
|
||||||
}
|
}
|
||||||
|
|
||||||
memset(&tunn_info, 0, sizeof(tunn_info));
|
memset(&tunn_info, 0, sizeof(tunn_info));
|
||||||
@ -991,8 +992,7 @@ static bool qed_can_link_change(struct qed_dev *cdev)
|
|||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int qed_set_link(struct qed_dev *cdev,
|
static int qed_set_link(struct qed_dev *cdev, struct qed_link_params *params)
|
||||||
struct qed_link_params *params)
|
|
||||||
{
|
{
|
||||||
struct qed_hwfn *hwfn;
|
struct qed_hwfn *hwfn;
|
||||||
struct qed_mcp_link_params *link_params;
|
struct qed_mcp_link_params *link_params;
|
||||||
@ -1032,7 +1032,7 @@ static int qed_set_link(struct qed_dev *cdev,
|
|||||||
NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G;
|
NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G;
|
||||||
if (params->adv_speeds & 0)
|
if (params->adv_speeds & 0)
|
||||||
link_params->speed.advertised_speeds |=
|
link_params->speed.advertised_speeds |=
|
||||||
NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_100G;
|
NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G;
|
||||||
}
|
}
|
||||||
if (params->override_flags & QED_LINK_OVERRIDE_SPEED_FORCED_SPEED)
|
if (params->override_flags & QED_LINK_OVERRIDE_SPEED_FORCED_SPEED)
|
||||||
link_params->speed.forced_speed = params->forced_speed;
|
link_params->speed.forced_speed = params->forced_speed;
|
||||||
@ -1053,19 +1053,19 @@ static int qed_set_link(struct qed_dev *cdev,
|
|||||||
if (params->override_flags & QED_LINK_OVERRIDE_LOOPBACK_MODE) {
|
if (params->override_flags & QED_LINK_OVERRIDE_LOOPBACK_MODE) {
|
||||||
switch (params->loopback_mode) {
|
switch (params->loopback_mode) {
|
||||||
case QED_LINK_LOOPBACK_INT_PHY:
|
case QED_LINK_LOOPBACK_INT_PHY:
|
||||||
link_params->loopback_mode = PMM_LOOPBACK_INT_PHY;
|
link_params->loopback_mode = ETH_LOOPBACK_INT_PHY;
|
||||||
break;
|
break;
|
||||||
case QED_LINK_LOOPBACK_EXT_PHY:
|
case QED_LINK_LOOPBACK_EXT_PHY:
|
||||||
link_params->loopback_mode = PMM_LOOPBACK_EXT_PHY;
|
link_params->loopback_mode = ETH_LOOPBACK_EXT_PHY;
|
||||||
break;
|
break;
|
||||||
case QED_LINK_LOOPBACK_EXT:
|
case QED_LINK_LOOPBACK_EXT:
|
||||||
link_params->loopback_mode = PMM_LOOPBACK_EXT;
|
link_params->loopback_mode = ETH_LOOPBACK_EXT;
|
||||||
break;
|
break;
|
||||||
case QED_LINK_LOOPBACK_MAC:
|
case QED_LINK_LOOPBACK_MAC:
|
||||||
link_params->loopback_mode = PMM_LOOPBACK_MAC;
|
link_params->loopback_mode = ETH_LOOPBACK_MAC;
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
link_params->loopback_mode = PMM_LOOPBACK_NONE;
|
link_params->loopback_mode = ETH_LOOPBACK_NONE;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -1157,7 +1157,7 @@ static void qed_fill_link(struct qed_hwfn *hwfn,
|
|||||||
NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G)
|
NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G)
|
||||||
if_link->advertised_caps |= 0;
|
if_link->advertised_caps |= 0;
|
||||||
if (params.speed.advertised_speeds &
|
if (params.speed.advertised_speeds &
|
||||||
NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_100G)
|
NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G)
|
||||||
if_link->advertised_caps |= 0;
|
if_link->advertised_caps |= 0;
|
||||||
|
|
||||||
if (link_caps.speed_capabilities &
|
if (link_caps.speed_capabilities &
|
||||||
@ -1174,7 +1174,7 @@ static void qed_fill_link(struct qed_hwfn *hwfn,
|
|||||||
NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G)
|
NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G)
|
||||||
if_link->supported_caps |= 0;
|
if_link->supported_caps |= 0;
|
||||||
if (link_caps.speed_capabilities &
|
if (link_caps.speed_capabilities &
|
||||||
NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_100G)
|
NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G)
|
||||||
if_link->supported_caps |= 0;
|
if_link->supported_caps |= 0;
|
||||||
|
|
||||||
if (link.link_up)
|
if (link.link_up)
|
||||||
|
@ -531,9 +531,9 @@ static void qed_mcp_handle_transceiver_change(struct qed_hwfn *p_hwfn,
|
|||||||
transceiver_data)));
|
transceiver_data)));
|
||||||
|
|
||||||
transceiver_state = GET_FIELD(transceiver_state,
|
transceiver_state = GET_FIELD(transceiver_state,
|
||||||
PMM_TRANSCEIVER_STATE);
|
ETH_TRANSCEIVER_STATE);
|
||||||
|
|
||||||
if (transceiver_state == PMM_TRANSCEIVER_STATE_PRESENT)
|
if (transceiver_state == ETH_TRANSCEIVER_STATE_PRESENT)
|
||||||
DP_NOTICE(p_hwfn, "Transceiver is present.\n");
|
DP_NOTICE(p_hwfn, "Transceiver is present.\n");
|
||||||
else
|
else
|
||||||
DP_NOTICE(p_hwfn, "Transceiver is unplugged.\n");
|
DP_NOTICE(p_hwfn, "Transceiver is unplugged.\n");
|
||||||
@ -668,14 +668,12 @@ static void qed_mcp_handle_link_change(struct qed_hwfn *p_hwfn,
|
|||||||
qed_link_update(p_hwfn);
|
qed_link_update(p_hwfn);
|
||||||
}
|
}
|
||||||
|
|
||||||
int qed_mcp_set_link(struct qed_hwfn *p_hwfn,
|
int qed_mcp_set_link(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, bool b_up)
|
||||||
struct qed_ptt *p_ptt,
|
|
||||||
bool b_up)
|
|
||||||
{
|
{
|
||||||
struct qed_mcp_link_params *params = &p_hwfn->mcp_info->link_input;
|
struct qed_mcp_link_params *params = &p_hwfn->mcp_info->link_input;
|
||||||
struct qed_mcp_mb_params mb_params;
|
struct qed_mcp_mb_params mb_params;
|
||||||
union drv_union_data union_data;
|
union drv_union_data union_data;
|
||||||
struct pmm_phy_cfg *phy_cfg;
|
struct eth_phy_cfg *phy_cfg;
|
||||||
int rc = 0;
|
int rc = 0;
|
||||||
u32 cmd;
|
u32 cmd;
|
||||||
|
|
||||||
@ -685,9 +683,9 @@ int qed_mcp_set_link(struct qed_hwfn *p_hwfn,
|
|||||||
cmd = b_up ? DRV_MSG_CODE_INIT_PHY : DRV_MSG_CODE_LINK_RESET;
|
cmd = b_up ? DRV_MSG_CODE_INIT_PHY : DRV_MSG_CODE_LINK_RESET;
|
||||||
if (!params->speed.autoneg)
|
if (!params->speed.autoneg)
|
||||||
phy_cfg->speed = params->speed.forced_speed;
|
phy_cfg->speed = params->speed.forced_speed;
|
||||||
phy_cfg->pause |= (params->pause.autoneg) ? PMM_PAUSE_AUTONEG : 0;
|
phy_cfg->pause |= (params->pause.autoneg) ? ETH_PAUSE_AUTONEG : 0;
|
||||||
phy_cfg->pause |= (params->pause.forced_rx) ? PMM_PAUSE_RX : 0;
|
phy_cfg->pause |= (params->pause.forced_rx) ? ETH_PAUSE_RX : 0;
|
||||||
phy_cfg->pause |= (params->pause.forced_tx) ? PMM_PAUSE_TX : 0;
|
phy_cfg->pause |= (params->pause.forced_tx) ? ETH_PAUSE_TX : 0;
|
||||||
phy_cfg->adv_speed = params->speed.advertised_speeds;
|
phy_cfg->adv_speed = params->speed.advertised_speeds;
|
||||||
phy_cfg->loopback_mode = params->loopback_mode;
|
phy_cfg->loopback_mode = params->loopback_mode;
|
||||||
|
|
||||||
@ -773,6 +771,34 @@ static u32 qed_mcp_get_shmem_func(struct qed_hwfn *p_hwfn,
|
|||||||
return size;
|
return size;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
int qed_hw_init_first_eth(struct qed_hwfn *p_hwfn,
|
||||||
|
struct qed_ptt *p_ptt, u8 *p_pf)
|
||||||
|
{
|
||||||
|
struct public_func shmem_info;
|
||||||
|
int i;
|
||||||
|
|
||||||
|
/* Find first Ethernet interface in port */
|
||||||
|
for (i = 0; i < NUM_OF_ENG_PFS(p_hwfn->cdev);
|
||||||
|
i += p_hwfn->cdev->num_ports_in_engines) {
|
||||||
|
qed_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info,
|
||||||
|
MCP_PF_ID_BY_REL(p_hwfn, i));
|
||||||
|
|
||||||
|
if (shmem_info.config & FUNC_MF_CFG_FUNC_HIDE)
|
||||||
|
continue;
|
||||||
|
|
||||||
|
if ((shmem_info.config & FUNC_MF_CFG_PROTOCOL_MASK) ==
|
||||||
|
FUNC_MF_CFG_PROTOCOL_ETHERNET) {
|
||||||
|
*p_pf = (u8)i;
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
DP_NOTICE(p_hwfn,
|
||||||
|
"Failed to find on port an ethernet interface in MF_SI mode\n");
|
||||||
|
|
||||||
|
return -EINVAL;
|
||||||
|
}
|
||||||
|
|
||||||
static void qed_mcp_update_bw(struct qed_hwfn *p_hwfn,
|
static void qed_mcp_update_bw(struct qed_hwfn *p_hwfn,
|
||||||
struct qed_ptt *p_ptt)
|
struct qed_ptt *p_ptt)
|
||||||
{
|
{
|
||||||
|
@ -457,4 +457,7 @@ int __qed_configure_pf_min_bandwidth(struct qed_hwfn *p_hwfn,
|
|||||||
struct qed_ptt *p_ptt,
|
struct qed_ptt *p_ptt,
|
||||||
struct qed_mcp_link_state *p_link,
|
struct qed_mcp_link_state *p_link,
|
||||||
u8 min_bw);
|
u8 min_bw);
|
||||||
|
|
||||||
|
int qed_hw_init_first_eth(struct qed_hwfn *p_hwfn,
|
||||||
|
struct qed_ptt *p_ptt, u8 *p_pf);
|
||||||
#endif
|
#endif
|
||||||
|
@ -167,6 +167,10 @@
|
|||||||
0x1800004UL
|
0x1800004UL
|
||||||
#define NIG_REG_CM_HDR \
|
#define NIG_REG_CM_HDR \
|
||||||
0x500840UL
|
0x500840UL
|
||||||
|
#define NIG_REG_LLH_TAGMAC_DEF_PF_VECTOR \
|
||||||
|
0x50196cUL
|
||||||
|
#define NIG_REG_LLH_CLS_TYPE_DUALMODE \
|
||||||
|
0x501964UL
|
||||||
#define NCSI_REG_CONFIG \
|
#define NCSI_REG_CONFIG \
|
||||||
0x040200UL
|
0x040200UL
|
||||||
#define PBF_REG_INIT \
|
#define PBF_REG_INIT \
|
||||||
@ -219,6 +223,8 @@
|
|||||||
0x230000UL
|
0x230000UL
|
||||||
#define PRS_REG_SOFT_RST \
|
#define PRS_REG_SOFT_RST \
|
||||||
0x1f0000UL
|
0x1f0000UL
|
||||||
|
#define PRS_REG_MSG_INFO \
|
||||||
|
0x1f0a1cUL
|
||||||
#define PSDM_REG_ENABLE_IN1 \
|
#define PSDM_REG_ENABLE_IN1 \
|
||||||
0xfa0004UL
|
0xfa0004UL
|
||||||
#define PSEM_REG_ENABLE_IN \
|
#define PSEM_REG_ENABLE_IN \
|
||||||
@ -460,7 +466,7 @@
|
|||||||
#define NIG_REG_ENC_TYPE_ENABLE_VXLAN_ENABLE (0x1 << 2)
|
#define NIG_REG_ENC_TYPE_ENABLE_VXLAN_ENABLE (0x1 << 2)
|
||||||
#define NIG_REG_ENC_TYPE_ENABLE_VXLAN_ENABLE_SHIFT 2
|
#define NIG_REG_ENC_TYPE_ENABLE_VXLAN_ENABLE_SHIFT 2
|
||||||
|
|
||||||
#define NIG_REG_VXLAN_PORT 0x50105cUL
|
#define NIG_REG_VXLAN_CTRL 0x50105cUL
|
||||||
#define PBF_REG_VXLAN_PORT 0xd80518UL
|
#define PBF_REG_VXLAN_PORT 0xd80518UL
|
||||||
#define PBF_REG_NGE_PORT 0xd8051cUL
|
#define PBF_REG_NGE_PORT 0xd8051cUL
|
||||||
#define PRS_REG_NGE_PORT 0x1f086cUL
|
#define PRS_REG_NGE_PORT 0x1f086cUL
|
||||||
|
@ -332,7 +332,7 @@ int qed_sp_pf_start(struct qed_hwfn *p_hwfn,
|
|||||||
p_ramrod->path_id = QED_PATH_ID(p_hwfn);
|
p_ramrod->path_id = QED_PATH_ID(p_hwfn);
|
||||||
p_ramrod->dont_log_ramrods = 0;
|
p_ramrod->dont_log_ramrods = 0;
|
||||||
p_ramrod->log_type_mask = cpu_to_le16(0xf);
|
p_ramrod->log_type_mask = cpu_to_le16(0xf);
|
||||||
p_ramrod->mf_mode = mode;
|
|
||||||
switch (mode) {
|
switch (mode) {
|
||||||
case QED_MF_DEFAULT:
|
case QED_MF_DEFAULT:
|
||||||
case QED_MF_NPAR:
|
case QED_MF_NPAR:
|
||||||
@ -368,6 +368,8 @@ int qed_sp_pf_start(struct qed_hwfn *p_hwfn,
|
|||||||
p_ramrod->base_vf_id = (u8) p_iov->first_vf_in_pf;
|
p_ramrod->base_vf_id = (u8) p_iov->first_vf_in_pf;
|
||||||
p_ramrod->num_vfs = (u8) p_iov->total_vfs;
|
p_ramrod->num_vfs = (u8) p_iov->total_vfs;
|
||||||
}
|
}
|
||||||
|
p_ramrod->hsi_fp_ver.major_ver_arr[ETH_VER_KEY] = ETH_HSI_VER_MAJOR;
|
||||||
|
p_ramrod->hsi_fp_ver.minor_ver_arr[ETH_VER_KEY] = ETH_HSI_VER_MINOR;
|
||||||
|
|
||||||
DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
|
DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
|
||||||
"Setting event_ring_sb [id %04x index %02x], outer_tag [%d]\n",
|
"Setting event_ring_sb [id %04x index %02x], outer_tag [%d]\n",
|
||||||
|
@ -47,6 +47,8 @@ static int qed_sp_vf_start(struct qed_hwfn *p_hwfn,
|
|||||||
p_ramrod->opaque_fid = cpu_to_le16(opaque_vfid);
|
p_ramrod->opaque_fid = cpu_to_le16(opaque_vfid);
|
||||||
|
|
||||||
p_ramrod->personality = PERSONALITY_ETH;
|
p_ramrod->personality = PERSONALITY_ETH;
|
||||||
|
p_ramrod->hsi_fp_ver.major_ver_arr[ETH_VER_KEY] = ETH_HSI_VER_MAJOR;
|
||||||
|
p_ramrod->hsi_fp_ver.minor_ver_arr[ETH_VER_KEY] = ETH_HSI_VER_MINOR;
|
||||||
|
|
||||||
return qed_spq_post(p_hwfn, p_ent, NULL);
|
return qed_spq_post(p_hwfn, p_ent, NULL);
|
||||||
}
|
}
|
||||||
@ -1585,10 +1587,6 @@ static void qed_iov_vf_mbx_stop_vport(struct qed_hwfn *p_hwfn,
|
|||||||
sizeof(struct pfvf_def_resp_tlv), status);
|
sizeof(struct pfvf_def_resp_tlv), status);
|
||||||
}
|
}
|
||||||
|
|
||||||
#define TSTORM_QZONE_START PXP_VF_BAR0_START_SDM_ZONE_A
|
|
||||||
#define MSTORM_QZONE_START(dev) (TSTORM_QZONE_START + \
|
|
||||||
(TSTORM_QZONE_SIZE * NUM_OF_L2_QUEUES(dev)))
|
|
||||||
|
|
||||||
static void qed_iov_vf_mbx_start_rxq_resp(struct qed_hwfn *p_hwfn,
|
static void qed_iov_vf_mbx_start_rxq_resp(struct qed_hwfn *p_hwfn,
|
||||||
struct qed_ptt *p_ptt,
|
struct qed_ptt *p_ptt,
|
||||||
struct qed_vf_info *vf, u8 status)
|
struct qed_vf_info *vf, u8 status)
|
||||||
@ -1606,16 +1604,11 @@ static void qed_iov_vf_mbx_start_rxq_resp(struct qed_hwfn *p_hwfn,
|
|||||||
|
|
||||||
/* Update the TLV with the response */
|
/* Update the TLV with the response */
|
||||||
if (status == PFVF_STATUS_SUCCESS) {
|
if (status == PFVF_STATUS_SUCCESS) {
|
||||||
u16 hw_qid = 0;
|
|
||||||
|
|
||||||
req = &mbx->req_virt->start_rxq;
|
req = &mbx->req_virt->start_rxq;
|
||||||
qed_fw_l2_queue(p_hwfn, vf->vf_queues[req->rx_qid].fw_rx_qid,
|
p_tlv->offset = PXP_VF_BAR0_START_MSDM_ZONE_B +
|
||||||
&hw_qid);
|
offsetof(struct mstorm_vf_zone,
|
||||||
|
non_trigger.eth_rx_queue_producers) +
|
||||||
p_tlv->offset = MSTORM_QZONE_START(p_hwfn->cdev) +
|
sizeof(struct eth_rx_prod_data) * req->rx_qid;
|
||||||
hw_qid * MSTORM_QZONE_SIZE +
|
|
||||||
offsetof(struct mstorm_eth_queue_zone,
|
|
||||||
rx_producers);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
qed_iov_send_response(p_hwfn, p_ptt, vf, sizeof(*p_tlv), status);
|
qed_iov_send_response(p_hwfn, p_ptt, vf, sizeof(*p_tlv), status);
|
||||||
@ -1634,6 +1627,7 @@ static void qed_iov_vf_mbx_start_rxq(struct qed_hwfn *p_hwfn,
|
|||||||
memset(¶ms, 0, sizeof(params));
|
memset(¶ms, 0, sizeof(params));
|
||||||
req = &mbx->req_virt->start_rxq;
|
req = &mbx->req_virt->start_rxq;
|
||||||
params.queue_id = vf->vf_queues[req->rx_qid].fw_rx_qid;
|
params.queue_id = vf->vf_queues[req->rx_qid].fw_rx_qid;
|
||||||
|
params.vf_qid = req->rx_qid;
|
||||||
params.vport_id = vf->vport_id;
|
params.vport_id = vf->vport_id;
|
||||||
params.sb = req->hw_sb;
|
params.sb = req->hw_sb;
|
||||||
params.sb_idx = req->sb_index;
|
params.sb_idx = req->sb_index;
|
||||||
|
@ -910,6 +910,8 @@ static int qede_selftest_transmit_traffic(struct qede_dev *edev,
|
|||||||
memset(first_bd, 0, sizeof(*first_bd));
|
memset(first_bd, 0, sizeof(*first_bd));
|
||||||
val = 1 << ETH_TX_1ST_BD_FLAGS_START_BD_SHIFT;
|
val = 1 << ETH_TX_1ST_BD_FLAGS_START_BD_SHIFT;
|
||||||
first_bd->data.bd_flags.bitfields = val;
|
first_bd->data.bd_flags.bitfields = val;
|
||||||
|
val = skb->len & ETH_TX_DATA_1ST_BD_PKT_LEN_MASK;
|
||||||
|
first_bd->data.bitfields |= (val << ETH_TX_DATA_1ST_BD_PKT_LEN_SHIFT);
|
||||||
|
|
||||||
/* Map skb linear data for DMA and set in the first BD */
|
/* Map skb linear data for DMA and set in the first BD */
|
||||||
mapping = dma_map_single(&edev->pdev->dev, skb->data,
|
mapping = dma_map_single(&edev->pdev->dev, skb->data,
|
||||||
|
@ -577,8 +577,6 @@ netdev_tx_t qede_start_xmit(struct sk_buff *skb,
|
|||||||
|
|
||||||
/* Fill the parsing flags & params according to the requested offload */
|
/* Fill the parsing flags & params according to the requested offload */
|
||||||
if (xmit_type & XMIT_L4_CSUM) {
|
if (xmit_type & XMIT_L4_CSUM) {
|
||||||
u16 temp = 1 << ETH_TX_DATA_1ST_BD_TUNN_CFG_OVERRIDE_SHIFT;
|
|
||||||
|
|
||||||
/* We don't re-calculate IP checksum as it is already done by
|
/* We don't re-calculate IP checksum as it is already done by
|
||||||
* the upper stack
|
* the upper stack
|
||||||
*/
|
*/
|
||||||
@ -588,14 +586,8 @@ netdev_tx_t qede_start_xmit(struct sk_buff *skb,
|
|||||||
if (xmit_type & XMIT_ENC) {
|
if (xmit_type & XMIT_ENC) {
|
||||||
first_bd->data.bd_flags.bitfields |=
|
first_bd->data.bd_flags.bitfields |=
|
||||||
1 << ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT;
|
1 << ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT;
|
||||||
} else {
|
first_bd->data.bitfields |=
|
||||||
/* In cases when OS doesn't indicate for inner offloads
|
1 << ETH_TX_DATA_1ST_BD_TUNN_FLAG_SHIFT;
|
||||||
* when packet is tunnelled, we need to override the HW
|
|
||||||
* tunnel configuration so that packets are treated as
|
|
||||||
* regular non tunnelled packets and no inner offloads
|
|
||||||
* are done by the hardware.
|
|
||||||
*/
|
|
||||||
first_bd->data.bitfields |= cpu_to_le16(temp);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/* If the packet is IPv6 with extension header, indicate that
|
/* If the packet is IPv6 with extension header, indicate that
|
||||||
@ -653,6 +645,10 @@ netdev_tx_t qede_start_xmit(struct sk_buff *skb,
|
|||||||
tx_data_bd = (struct eth_tx_bd *)third_bd;
|
tx_data_bd = (struct eth_tx_bd *)third_bd;
|
||||||
data_split = true;
|
data_split = true;
|
||||||
}
|
}
|
||||||
|
} else {
|
||||||
|
first_bd->data.bitfields |=
|
||||||
|
(skb->len & ETH_TX_DATA_1ST_BD_PKT_LEN_MASK) <<
|
||||||
|
ETH_TX_DATA_1ST_BD_PKT_LEN_SHIFT;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Handle fragmented skb */
|
/* Handle fragmented skb */
|
||||||
|
@ -13,9 +13,19 @@
|
|||||||
|
|
||||||
#define X_FINAL_CLEANUP_AGG_INT 1
|
#define X_FINAL_CLEANUP_AGG_INT 1
|
||||||
|
|
||||||
|
/* Queue Zone sizes in bytes */
|
||||||
|
#define TSTORM_QZONE_SIZE 8
|
||||||
|
#define MSTORM_QZONE_SIZE 0
|
||||||
|
#define USTORM_QZONE_SIZE 8
|
||||||
|
#define XSTORM_QZONE_SIZE 8
|
||||||
|
#define YSTORM_QZONE_SIZE 0
|
||||||
|
#define PSTORM_QZONE_SIZE 0
|
||||||
|
|
||||||
|
#define ETH_MAX_NUM_RX_QUEUES_PER_VF 16
|
||||||
|
|
||||||
#define FW_MAJOR_VERSION 8
|
#define FW_MAJOR_VERSION 8
|
||||||
#define FW_MINOR_VERSION 7
|
#define FW_MINOR_VERSION 10
|
||||||
#define FW_REVISION_VERSION 3
|
#define FW_REVISION_VERSION 5
|
||||||
#define FW_ENGINEERING_VERSION 0
|
#define FW_ENGINEERING_VERSION 0
|
||||||
|
|
||||||
/***********************/
|
/***********************/
|
||||||
@ -97,45 +107,86 @@
|
|||||||
#define DQ_XCM_AGG_VAL_SEL_REG6 7
|
#define DQ_XCM_AGG_VAL_SEL_REG6 7
|
||||||
|
|
||||||
/* XCM agg val selection */
|
/* XCM agg val selection */
|
||||||
#define DQ_XCM_ETH_EDPM_NUM_BDS_CMD \
|
#define DQ_XCM_CORE_TX_BD_CONS_CMD DQ_XCM_AGG_VAL_SEL_WORD3
|
||||||
DQ_XCM_AGG_VAL_SEL_WORD2
|
#define DQ_XCM_CORE_TX_BD_PROD_CMD DQ_XCM_AGG_VAL_SEL_WORD4
|
||||||
#define DQ_XCM_ETH_TX_BD_CONS_CMD \
|
#define DQ_XCM_CORE_SPQ_PROD_CMD DQ_XCM_AGG_VAL_SEL_WORD4
|
||||||
DQ_XCM_AGG_VAL_SEL_WORD3
|
#define DQ_XCM_ETH_EDPM_NUM_BDS_CMD DQ_XCM_AGG_VAL_SEL_WORD2
|
||||||
#define DQ_XCM_CORE_TX_BD_CONS_CMD \
|
#define DQ_XCM_ETH_TX_BD_CONS_CMD DQ_XCM_AGG_VAL_SEL_WORD3
|
||||||
DQ_XCM_AGG_VAL_SEL_WORD3
|
#define DQ_XCM_ETH_TX_BD_PROD_CMD DQ_XCM_AGG_VAL_SEL_WORD4
|
||||||
#define DQ_XCM_ETH_TX_BD_PROD_CMD \
|
#define DQ_XCM_ETH_GO_TO_BD_CONS_CMD DQ_XCM_AGG_VAL_SEL_WORD5
|
||||||
DQ_XCM_AGG_VAL_SEL_WORD4
|
|
||||||
#define DQ_XCM_CORE_TX_BD_PROD_CMD \
|
/* UCM agg val selection (HW) */
|
||||||
DQ_XCM_AGG_VAL_SEL_WORD4
|
#define DQ_UCM_AGG_VAL_SEL_WORD0 0
|
||||||
#define DQ_XCM_CORE_SPQ_PROD_CMD \
|
#define DQ_UCM_AGG_VAL_SEL_WORD1 1
|
||||||
DQ_XCM_AGG_VAL_SEL_WORD4
|
#define DQ_UCM_AGG_VAL_SEL_WORD2 2
|
||||||
#define DQ_XCM_ETH_GO_TO_BD_CONS_CMD DQ_XCM_AGG_VAL_SEL_WORD5
|
#define DQ_UCM_AGG_VAL_SEL_WORD3 3
|
||||||
|
#define DQ_UCM_AGG_VAL_SEL_REG0 4
|
||||||
|
#define DQ_UCM_AGG_VAL_SEL_REG1 5
|
||||||
|
#define DQ_UCM_AGG_VAL_SEL_REG2 6
|
||||||
|
#define DQ_UCM_AGG_VAL_SEL_REG3 7
|
||||||
|
|
||||||
|
/* UCM agg val selection (FW) */
|
||||||
|
#define DQ_UCM_ETH_PMD_TX_CONS_CMD DQ_UCM_AGG_VAL_SEL_WORD2
|
||||||
|
#define DQ_UCM_ETH_PMD_RX_CONS_CMD DQ_UCM_AGG_VAL_SEL_WORD3
|
||||||
|
#define DQ_UCM_ROCE_CQ_CONS_CMD DQ_UCM_AGG_VAL_SEL_REG0
|
||||||
|
#define DQ_UCM_ROCE_CQ_PROD_CMD DQ_UCM_AGG_VAL_SEL_REG2
|
||||||
|
|
||||||
|
/* TCM agg val selection (HW) */
|
||||||
|
#define DQ_TCM_AGG_VAL_SEL_WORD0 0
|
||||||
|
#define DQ_TCM_AGG_VAL_SEL_WORD1 1
|
||||||
|
#define DQ_TCM_AGG_VAL_SEL_WORD2 2
|
||||||
|
#define DQ_TCM_AGG_VAL_SEL_WORD3 3
|
||||||
|
#define DQ_TCM_AGG_VAL_SEL_REG1 4
|
||||||
|
#define DQ_TCM_AGG_VAL_SEL_REG2 5
|
||||||
|
#define DQ_TCM_AGG_VAL_SEL_REG6 6
|
||||||
|
#define DQ_TCM_AGG_VAL_SEL_REG9 7
|
||||||
|
|
||||||
|
/* TCM agg val selection (FW) */
|
||||||
|
#define DQ_TCM_L2B_BD_PROD_CMD \
|
||||||
|
DQ_TCM_AGG_VAL_SEL_WORD1
|
||||||
|
#define DQ_TCM_ROCE_RQ_PROD_CMD \
|
||||||
|
DQ_TCM_AGG_VAL_SEL_WORD0
|
||||||
|
|
||||||
/* XCM agg counter flag selection */
|
/* XCM agg counter flag selection */
|
||||||
#define DQ_XCM_AGG_FLG_SHIFT_BIT14 0
|
#define DQ_XCM_AGG_FLG_SHIFT_BIT14 0
|
||||||
#define DQ_XCM_AGG_FLG_SHIFT_BIT15 1
|
#define DQ_XCM_AGG_FLG_SHIFT_BIT15 1
|
||||||
#define DQ_XCM_AGG_FLG_SHIFT_CF12 2
|
#define DQ_XCM_AGG_FLG_SHIFT_CF12 2
|
||||||
#define DQ_XCM_AGG_FLG_SHIFT_CF13 3
|
#define DQ_XCM_AGG_FLG_SHIFT_CF13 3
|
||||||
#define DQ_XCM_AGG_FLG_SHIFT_CF18 4
|
#define DQ_XCM_AGG_FLG_SHIFT_CF18 4
|
||||||
#define DQ_XCM_AGG_FLG_SHIFT_CF19 5
|
#define DQ_XCM_AGG_FLG_SHIFT_CF19 5
|
||||||
#define DQ_XCM_AGG_FLG_SHIFT_CF22 6
|
#define DQ_XCM_AGG_FLG_SHIFT_CF22 6
|
||||||
#define DQ_XCM_AGG_FLG_SHIFT_CF23 7
|
#define DQ_XCM_AGG_FLG_SHIFT_CF23 7
|
||||||
|
|
||||||
/* XCM agg counter flag selection */
|
/* XCM agg counter flag selection */
|
||||||
#define DQ_XCM_ETH_DQ_CF_CMD (1 << \
|
#define DQ_XCM_CORE_DQ_CF_CMD (1 << DQ_XCM_AGG_FLG_SHIFT_CF18)
|
||||||
DQ_XCM_AGG_FLG_SHIFT_CF18)
|
#define DQ_XCM_CORE_TERMINATE_CMD (1 << DQ_XCM_AGG_FLG_SHIFT_CF19)
|
||||||
#define DQ_XCM_CORE_DQ_CF_CMD (1 << \
|
#define DQ_XCM_CORE_SLOW_PATH_CMD (1 << DQ_XCM_AGG_FLG_SHIFT_CF22)
|
||||||
DQ_XCM_AGG_FLG_SHIFT_CF18)
|
#define DQ_XCM_ETH_DQ_CF_CMD (1 << DQ_XCM_AGG_FLG_SHIFT_CF18)
|
||||||
#define DQ_XCM_ETH_TERMINATE_CMD (1 << \
|
#define DQ_XCM_ETH_TERMINATE_CMD (1 << DQ_XCM_AGG_FLG_SHIFT_CF19)
|
||||||
DQ_XCM_AGG_FLG_SHIFT_CF19)
|
#define DQ_XCM_ETH_SLOW_PATH_CMD (1 << DQ_XCM_AGG_FLG_SHIFT_CF22)
|
||||||
#define DQ_XCM_CORE_TERMINATE_CMD (1 << \
|
#define DQ_XCM_ETH_TPH_EN_CMD (1 << DQ_XCM_AGG_FLG_SHIFT_CF23)
|
||||||
DQ_XCM_AGG_FLG_SHIFT_CF19)
|
|
||||||
#define DQ_XCM_ETH_SLOW_PATH_CMD (1 << \
|
/* UCM agg counter flag selection (HW) */
|
||||||
DQ_XCM_AGG_FLG_SHIFT_CF22)
|
#define DQ_UCM_AGG_FLG_SHIFT_CF0 0
|
||||||
#define DQ_XCM_CORE_SLOW_PATH_CMD (1 << \
|
#define DQ_UCM_AGG_FLG_SHIFT_CF1 1
|
||||||
DQ_XCM_AGG_FLG_SHIFT_CF22)
|
#define DQ_UCM_AGG_FLG_SHIFT_CF3 2
|
||||||
#define DQ_XCM_ETH_TPH_EN_CMD (1 << \
|
#define DQ_UCM_AGG_FLG_SHIFT_CF4 3
|
||||||
DQ_XCM_AGG_FLG_SHIFT_CF23)
|
#define DQ_UCM_AGG_FLG_SHIFT_CF5 4
|
||||||
|
#define DQ_UCM_AGG_FLG_SHIFT_CF6 5
|
||||||
|
#define DQ_UCM_AGG_FLG_SHIFT_RULE0EN 6
|
||||||
|
#define DQ_UCM_AGG_FLG_SHIFT_RULE1EN 7
|
||||||
|
|
||||||
|
/* UCM agg counter flag selection (FW) */
|
||||||
|
#define DQ_UCM_ETH_PMD_TX_ARM_CMD (1 << DQ_UCM_AGG_FLG_SHIFT_CF4)
|
||||||
|
#define DQ_UCM_ETH_PMD_RX_ARM_CMD (1 << DQ_UCM_AGG_FLG_SHIFT_CF5)
|
||||||
|
|
||||||
|
#define DQ_REGION_SHIFT (12)
|
||||||
|
|
||||||
|
/* DPM */
|
||||||
|
#define DQ_DPM_WQE_BUFF_SIZE (320)
|
||||||
|
|
||||||
|
/* Conn type ranges */
|
||||||
|
#define DQ_CONN_TYPE_RANGE_SHIFT (4)
|
||||||
|
|
||||||
/*****************/
|
/*****************/
|
||||||
/* QM CONSTANTS */
|
/* QM CONSTANTS */
|
||||||
@ -282,8 +333,6 @@
|
|||||||
(PXP_EXTERNAL_BAR_GLOBAL_WINDOW_START + \
|
(PXP_EXTERNAL_BAR_GLOBAL_WINDOW_START + \
|
||||||
PXP_EXTERNAL_BAR_GLOBAL_WINDOW_LENGTH - 1)
|
PXP_EXTERNAL_BAR_GLOBAL_WINDOW_LENGTH - 1)
|
||||||
|
|
||||||
#define PXP_ILT_PAGE_SIZE_NUM_BITS_MIN 12
|
|
||||||
#define PXP_ILT_BLOCK_FACTOR_MULTIPLIER 1024
|
|
||||||
|
|
||||||
#define PXP_VF_BAR0_START_IGU 0
|
#define PXP_VF_BAR0_START_IGU 0
|
||||||
#define PXP_VF_BAR0_IGU_LENGTH 0x3000
|
#define PXP_VF_BAR0_IGU_LENGTH 0x3000
|
||||||
@ -342,6 +391,9 @@
|
|||||||
|
|
||||||
#define PXP_VF_BAR0_GRC_WINDOW_LENGTH 32
|
#define PXP_VF_BAR0_GRC_WINDOW_LENGTH 32
|
||||||
|
|
||||||
|
#define PXP_ILT_PAGE_SIZE_NUM_BITS_MIN 12
|
||||||
|
#define PXP_ILT_BLOCK_FACTOR_MULTIPLIER 1024
|
||||||
|
|
||||||
/* ILT Records */
|
/* ILT Records */
|
||||||
#define PXP_NUM_ILT_RECORDS_BB 7600
|
#define PXP_NUM_ILT_RECORDS_BB 7600
|
||||||
#define PXP_NUM_ILT_RECORDS_K2 11000
|
#define PXP_NUM_ILT_RECORDS_K2 11000
|
||||||
@ -379,6 +431,38 @@ struct async_data {
|
|||||||
u8 fw_debug_param;
|
u8 fw_debug_param;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
struct coalescing_timeset {
|
||||||
|
u8 value;
|
||||||
|
#define COALESCING_TIMESET_TIMESET_MASK 0x7F
|
||||||
|
#define COALESCING_TIMESET_TIMESET_SHIFT 0
|
||||||
|
#define COALESCING_TIMESET_VALID_MASK 0x1
|
||||||
|
#define COALESCING_TIMESET_VALID_SHIFT 7
|
||||||
|
};
|
||||||
|
|
||||||
|
struct common_prs_pf_msg_info {
|
||||||
|
__le32 value;
|
||||||
|
#define COMMON_PRS_PF_MSG_INFO_NPAR_DEFAULT_PF_MASK 0x1
|
||||||
|
#define COMMON_PRS_PF_MSG_INFO_NPAR_DEFAULT_PF_SHIFT 0
|
||||||
|
#define COMMON_PRS_PF_MSG_INFO_FW_DEBUG_1_MASK 0x1
|
||||||
|
#define COMMON_PRS_PF_MSG_INFO_FW_DEBUG_1_SHIFT 1
|
||||||
|
#define COMMON_PRS_PF_MSG_INFO_FW_DEBUG_2_MASK 0x1
|
||||||
|
#define COMMON_PRS_PF_MSG_INFO_FW_DEBUG_2_SHIFT 2
|
||||||
|
#define COMMON_PRS_PF_MSG_INFO_FW_DEBUG_3_MASK 0x1
|
||||||
|
#define COMMON_PRS_PF_MSG_INFO_FW_DEBUG_3_SHIFT 3
|
||||||
|
#define COMMON_PRS_PF_MSG_INFO_RESERVED_MASK 0xFFFFFFF
|
||||||
|
#define COMMON_PRS_PF_MSG_INFO_RESERVED_SHIFT 4
|
||||||
|
};
|
||||||
|
|
||||||
|
struct common_queue_zone {
|
||||||
|
__le16 ring_drv_data_consumer;
|
||||||
|
__le16 reserved;
|
||||||
|
};
|
||||||
|
|
||||||
|
struct eth_rx_prod_data {
|
||||||
|
__le16 bd_prod;
|
||||||
|
__le16 cqe_prod;
|
||||||
|
};
|
||||||
|
|
||||||
struct regpair {
|
struct regpair {
|
||||||
__le32 lo;
|
__le32 lo;
|
||||||
__le32 hi;
|
__le32 hi;
|
||||||
@ -388,11 +472,23 @@ struct vf_pf_channel_eqe_data {
|
|||||||
struct regpair msg_addr;
|
struct regpair msg_addr;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
struct malicious_vf_eqe_data {
|
||||||
|
u8 vf_id;
|
||||||
|
u8 err_id;
|
||||||
|
__le16 reserved[3];
|
||||||
|
};
|
||||||
|
|
||||||
|
struct initial_cleanup_eqe_data {
|
||||||
|
u8 vf_id;
|
||||||
|
u8 reserved[7];
|
||||||
|
};
|
||||||
|
|
||||||
/* Event Data Union */
|
/* Event Data Union */
|
||||||
union event_ring_data {
|
union event_ring_data {
|
||||||
u8 bytes[8];
|
u8 bytes[8];
|
||||||
struct vf_pf_channel_eqe_data vf_pf_channel;
|
struct vf_pf_channel_eqe_data vf_pf_channel;
|
||||||
struct async_data async_info;
|
struct malicious_vf_eqe_data malicious_vf;
|
||||||
|
struct initial_cleanup_eqe_data vf_init_cleanup;
|
||||||
};
|
};
|
||||||
|
|
||||||
/* Event Ring Entry */
|
/* Event Ring Entry */
|
||||||
@ -433,6 +529,16 @@ enum protocol_type {
|
|||||||
MAX_PROTOCOL_TYPE
|
MAX_PROTOCOL_TYPE
|
||||||
};
|
};
|
||||||
|
|
||||||
|
struct ustorm_eth_queue_zone {
|
||||||
|
struct coalescing_timeset int_coalescing_timeset;
|
||||||
|
u8 reserved[3];
|
||||||
|
};
|
||||||
|
|
||||||
|
struct ustorm_queue_zone {
|
||||||
|
struct ustorm_eth_queue_zone eth;
|
||||||
|
struct common_queue_zone common;
|
||||||
|
};
|
||||||
|
|
||||||
/* status block structure */
|
/* status block structure */
|
||||||
struct cau_pi_entry {
|
struct cau_pi_entry {
|
||||||
u32 prod;
|
u32 prod;
|
||||||
@ -683,19 +789,4 @@ struct status_block {
|
|||||||
#define STATUS_BLOCK_ZERO_PAD3_SHIFT 24
|
#define STATUS_BLOCK_ZERO_PAD3_SHIFT 24
|
||||||
};
|
};
|
||||||
|
|
||||||
struct tunnel_parsing_flags {
|
|
||||||
u8 flags;
|
|
||||||
#define TUNNEL_PARSING_FLAGS_TYPE_MASK 0x3
|
|
||||||
#define TUNNEL_PARSING_FLAGS_TYPE_SHIFT 0
|
|
||||||
#define TUNNEL_PARSING_FLAGS_TENNANT_ID_EXIST_MASK 0x1
|
|
||||||
#define TUNNEL_PARSING_FLAGS_TENNANT_ID_EXIST_SHIFT 2
|
|
||||||
#define TUNNEL_PARSING_FLAGS_NEXT_PROTOCOL_MASK 0x3
|
|
||||||
#define TUNNEL_PARSING_FLAGS_NEXT_PROTOCOL_SHIFT 3
|
|
||||||
#define TUNNEL_PARSING_FLAGS_FIRSTHDRIPMATCH_MASK 0x1
|
|
||||||
#define TUNNEL_PARSING_FLAGS_FIRSTHDRIPMATCH_SHIFT 5
|
|
||||||
#define TUNNEL_PARSING_FLAGS_IPV4_FRAGMENT_MASK 0x1
|
|
||||||
#define TUNNEL_PARSING_FLAGS_IPV4_FRAGMENT_SHIFT 6
|
|
||||||
#define TUNNEL_PARSING_FLAGS_IPV4_OPTIONS_MASK 0x1
|
|
||||||
#define TUNNEL_PARSING_FLAGS_IPV4_OPTIONS_SHIFT 7
|
|
||||||
};
|
|
||||||
#endif /* __COMMON_HSI__ */
|
#endif /* __COMMON_HSI__ */
|
||||||
|
@ -12,6 +12,8 @@
|
|||||||
/********************/
|
/********************/
|
||||||
/* ETH FW CONSTANTS */
|
/* ETH FW CONSTANTS */
|
||||||
/********************/
|
/********************/
|
||||||
|
#define ETH_HSI_VER_MAJOR 3
|
||||||
|
#define ETH_HSI_VER_MINOR 0
|
||||||
#define ETH_CACHE_LINE_SIZE 64
|
#define ETH_CACHE_LINE_SIZE 64
|
||||||
|
|
||||||
#define ETH_MAX_RAMROD_PER_CON 8
|
#define ETH_MAX_RAMROD_PER_CON 8
|
||||||
@ -57,19 +59,6 @@
|
|||||||
#define ETH_TPA_CQE_CONT_LEN_LIST_SIZE 6
|
#define ETH_TPA_CQE_CONT_LEN_LIST_SIZE 6
|
||||||
#define ETH_TPA_CQE_END_LEN_LIST_SIZE 4
|
#define ETH_TPA_CQE_END_LEN_LIST_SIZE 4
|
||||||
|
|
||||||
/* Queue Zone sizes */
|
|
||||||
#define TSTORM_QZONE_SIZE 0
|
|
||||||
#define MSTORM_QZONE_SIZE sizeof(struct mstorm_eth_queue_zone)
|
|
||||||
#define USTORM_QZONE_SIZE sizeof(struct ustorm_eth_queue_zone)
|
|
||||||
#define XSTORM_QZONE_SIZE 0
|
|
||||||
#define YSTORM_QZONE_SIZE sizeof(struct ystorm_eth_queue_zone)
|
|
||||||
#define PSTORM_QZONE_SIZE 0
|
|
||||||
|
|
||||||
/* Interrupt coalescing TimeSet */
|
|
||||||
struct coalescing_timeset {
|
|
||||||
u8 timeset;
|
|
||||||
u8 valid;
|
|
||||||
};
|
|
||||||
|
|
||||||
struct eth_tx_1st_bd_flags {
|
struct eth_tx_1st_bd_flags {
|
||||||
u8 bitfields;
|
u8 bitfields;
|
||||||
@ -97,12 +86,12 @@ struct eth_tx_data_1st_bd {
|
|||||||
u8 nbds;
|
u8 nbds;
|
||||||
struct eth_tx_1st_bd_flags bd_flags;
|
struct eth_tx_1st_bd_flags bd_flags;
|
||||||
__le16 bitfields;
|
__le16 bitfields;
|
||||||
#define ETH_TX_DATA_1ST_BD_TUNN_CFG_OVERRIDE_MASK 0x1
|
#define ETH_TX_DATA_1ST_BD_TUNN_FLAG_MASK 0x1
|
||||||
#define ETH_TX_DATA_1ST_BD_TUNN_CFG_OVERRIDE_SHIFT 0
|
#define ETH_TX_DATA_1ST_BD_TUNN_FLAG_SHIFT 0
|
||||||
#define ETH_TX_DATA_1ST_BD_RESERVED0_MASK 0x1
|
#define ETH_TX_DATA_1ST_BD_RESERVED0_MASK 0x1
|
||||||
#define ETH_TX_DATA_1ST_BD_RESERVED0_SHIFT 1
|
#define ETH_TX_DATA_1ST_BD_RESERVED0_SHIFT 1
|
||||||
#define ETH_TX_DATA_1ST_BD_FW_USE_ONLY_MASK 0x3FFF
|
#define ETH_TX_DATA_1ST_BD_PKT_LEN_MASK 0x3FFF
|
||||||
#define ETH_TX_DATA_1ST_BD_FW_USE_ONLY_SHIFT 2
|
#define ETH_TX_DATA_1ST_BD_PKT_LEN_SHIFT 2
|
||||||
};
|
};
|
||||||
|
|
||||||
/* The parsing information data for the second tx bd of a given packet. */
|
/* The parsing information data for the second tx bd of a given packet. */
|
||||||
@ -136,28 +125,51 @@ struct eth_tx_data_2nd_bd {
|
|||||||
#define ETH_TX_DATA_2ND_BD_RESERVED0_SHIFT 13
|
#define ETH_TX_DATA_2ND_BD_RESERVED0_SHIFT 13
|
||||||
};
|
};
|
||||||
|
|
||||||
|
struct eth_fast_path_cqe_fw_debug {
|
||||||
|
u8 reserved0;
|
||||||
|
u8 reserved1;
|
||||||
|
__le16 reserved2;
|
||||||
|
};
|
||||||
|
|
||||||
|
/* tunneling parsing flags */
|
||||||
|
struct eth_tunnel_parsing_flags {
|
||||||
|
u8 flags;
|
||||||
|
#define ETH_TUNNEL_PARSING_FLAGS_TYPE_MASK 0x3
|
||||||
|
#define ETH_TUNNEL_PARSING_FLAGS_TYPE_SHIFT 0
|
||||||
|
#define ETH_TUNNEL_PARSING_FLAGS_TENNANT_ID_EXIST_MASK 0x1
|
||||||
|
#define ETH_TUNNEL_PARSING_FLAGS_TENNANT_ID_EXIST_SHIFT 2
|
||||||
|
#define ETH_TUNNEL_PARSING_FLAGS_NEXT_PROTOCOL_MASK 0x3
|
||||||
|
#define ETH_TUNNEL_PARSING_FLAGS_NEXT_PROTOCOL_SHIFT 3
|
||||||
|
#define ETH_TUNNEL_PARSING_FLAGS_FIRSTHDRIPMATCH_MASK 0x1
|
||||||
|
#define ETH_TUNNEL_PARSING_FLAGS_FIRSTHDRIPMATCH_SHIFT 5
|
||||||
|
#define ETH_TUNNEL_PARSING_FLAGS_IPV4_FRAGMENT_MASK 0x1
|
||||||
|
#define ETH_TUNNEL_PARSING_FLAGS_IPV4_FRAGMENT_SHIFT 6
|
||||||
|
#define ETH_TUNNEL_PARSING_FLAGS_IPV4_OPTIONS_MASK 0x1
|
||||||
|
#define ETH_TUNNEL_PARSING_FLAGS_IPV4_OPTIONS_SHIFT 7
|
||||||
|
};
|
||||||
|
|
||||||
/* Regular ETH Rx FP CQE. */
|
/* Regular ETH Rx FP CQE. */
|
||||||
struct eth_fast_path_rx_reg_cqe {
|
struct eth_fast_path_rx_reg_cqe {
|
||||||
u8 type;
|
u8 type;
|
||||||
u8 bitfields;
|
u8 bitfields;
|
||||||
#define ETH_FAST_PATH_RX_REG_CQE_RSS_HASH_TYPE_MASK 0x7
|
#define ETH_FAST_PATH_RX_REG_CQE_RSS_HASH_TYPE_MASK 0x7
|
||||||
#define ETH_FAST_PATH_RX_REG_CQE_RSS_HASH_TYPE_SHIFT 0
|
#define ETH_FAST_PATH_RX_REG_CQE_RSS_HASH_TYPE_SHIFT 0
|
||||||
#define ETH_FAST_PATH_RX_REG_CQE_TC_MASK 0xF
|
#define ETH_FAST_PATH_RX_REG_CQE_TC_MASK 0xF
|
||||||
#define ETH_FAST_PATH_RX_REG_CQE_TC_SHIFT 3
|
#define ETH_FAST_PATH_RX_REG_CQE_TC_SHIFT 3
|
||||||
#define ETH_FAST_PATH_RX_REG_CQE_RESERVED0_MASK 0x1
|
#define ETH_FAST_PATH_RX_REG_CQE_RESERVED0_MASK 0x1
|
||||||
#define ETH_FAST_PATH_RX_REG_CQE_RESERVED0_SHIFT 7
|
#define ETH_FAST_PATH_RX_REG_CQE_RESERVED0_SHIFT 7
|
||||||
__le16 pkt_len;
|
__le16 pkt_len;
|
||||||
struct parsing_and_err_flags pars_flags;
|
struct parsing_and_err_flags pars_flags;
|
||||||
__le16 vlan_tag;
|
__le16 vlan_tag;
|
||||||
__le32 rss_hash;
|
__le32 rss_hash;
|
||||||
__le16 len_on_first_bd;
|
__le16 len_on_first_bd;
|
||||||
u8 placement_offset;
|
u8 placement_offset;
|
||||||
struct tunnel_parsing_flags tunnel_pars_flags;
|
struct eth_tunnel_parsing_flags tunnel_pars_flags;
|
||||||
u8 bd_num;
|
u8 bd_num;
|
||||||
u8 reserved[7];
|
u8 reserved[7];
|
||||||
u32 fw_debug;
|
struct eth_fast_path_cqe_fw_debug fw_debug;
|
||||||
u8 reserved1[3];
|
u8 reserved1[3];
|
||||||
u8 flags;
|
u8 flags;
|
||||||
#define ETH_FAST_PATH_RX_REG_CQE_VALID_MASK 0x1
|
#define ETH_FAST_PATH_RX_REG_CQE_VALID_MASK 0x1
|
||||||
#define ETH_FAST_PATH_RX_REG_CQE_VALID_SHIFT 0
|
#define ETH_FAST_PATH_RX_REG_CQE_VALID_SHIFT 0
|
||||||
#define ETH_FAST_PATH_RX_REG_CQE_VALID_TOGGLE_MASK 0x1
|
#define ETH_FAST_PATH_RX_REG_CQE_VALID_TOGGLE_MASK 0x1
|
||||||
@ -207,11 +219,11 @@ struct eth_fast_path_rx_tpa_start_cqe {
|
|||||||
__le32 rss_hash;
|
__le32 rss_hash;
|
||||||
__le16 len_on_first_bd;
|
__le16 len_on_first_bd;
|
||||||
u8 placement_offset;
|
u8 placement_offset;
|
||||||
struct tunnel_parsing_flags tunnel_pars_flags;
|
struct eth_tunnel_parsing_flags tunnel_pars_flags;
|
||||||
u8 tpa_agg_index;
|
u8 tpa_agg_index;
|
||||||
u8 header_len;
|
u8 header_len;
|
||||||
__le16 ext_bd_len_list[ETH_TPA_CQE_START_LEN_LIST_SIZE];
|
__le16 ext_bd_len_list[ETH_TPA_CQE_START_LEN_LIST_SIZE];
|
||||||
u32 fw_debug;
|
struct eth_fast_path_cqe_fw_debug fw_debug;
|
||||||
};
|
};
|
||||||
|
|
||||||
/* The L4 pseudo checksum mode for Ethernet */
|
/* The L4 pseudo checksum mode for Ethernet */
|
||||||
@ -264,12 +276,25 @@ enum eth_rx_cqe_type {
|
|||||||
MAX_ETH_RX_CQE_TYPE
|
MAX_ETH_RX_CQE_TYPE
|
||||||
};
|
};
|
||||||
|
|
||||||
/* ETH Rx producers data */
|
enum eth_rx_tunn_type {
|
||||||
struct eth_rx_prod_data {
|
ETH_RX_NO_TUNN,
|
||||||
__le16 bd_prod;
|
ETH_RX_TUNN_GENEVE,
|
||||||
__le16 cqe_prod;
|
ETH_RX_TUNN_GRE,
|
||||||
__le16 reserved;
|
ETH_RX_TUNN_VXLAN,
|
||||||
__le16 reserved1;
|
MAX_ETH_RX_TUNN_TYPE
|
||||||
|
};
|
||||||
|
|
||||||
|
/* Aggregation end reason. */
|
||||||
|
enum eth_tpa_end_reason {
|
||||||
|
ETH_AGG_END_UNUSED,
|
||||||
|
ETH_AGG_END_SP_UPDATE,
|
||||||
|
ETH_AGG_END_MAX_LEN,
|
||||||
|
ETH_AGG_END_LAST_SEG,
|
||||||
|
ETH_AGG_END_TIMEOUT,
|
||||||
|
ETH_AGG_END_NOT_CONSISTENT,
|
||||||
|
ETH_AGG_END_OUT_OF_ORDER,
|
||||||
|
ETH_AGG_END_NON_TPA_SEG,
|
||||||
|
MAX_ETH_TPA_END_REASON
|
||||||
};
|
};
|
||||||
|
|
||||||
/* The first tx bd of a given packet */
|
/* The first tx bd of a given packet */
|
||||||
@ -337,21 +362,18 @@ union eth_tx_bd_types {
|
|||||||
};
|
};
|
||||||
|
|
||||||
/* Mstorm Queue Zone */
|
/* Mstorm Queue Zone */
|
||||||
struct mstorm_eth_queue_zone {
|
enum eth_tx_tunn_type {
|
||||||
struct eth_rx_prod_data rx_producers;
|
ETH_TX_TUNN_GENEVE,
|
||||||
__le32 reserved[2];
|
ETH_TX_TUNN_TTAG,
|
||||||
};
|
ETH_TX_TUNN_GRE,
|
||||||
|
ETH_TX_TUNN_VXLAN,
|
||||||
/* Ustorm Queue Zone */
|
MAX_ETH_TX_TUNN_TYPE
|
||||||
struct ustorm_eth_queue_zone {
|
|
||||||
struct coalescing_timeset int_coalescing_timeset;
|
|
||||||
__le16 reserved[3];
|
|
||||||
};
|
};
|
||||||
|
|
||||||
/* Ystorm Queue Zone */
|
/* Ystorm Queue Zone */
|
||||||
struct ystorm_eth_queue_zone {
|
struct xstorm_eth_queue_zone {
|
||||||
struct coalescing_timeset int_coalescing_timeset;
|
struct coalescing_timeset int_coalescing_timeset;
|
||||||
__le16 reserved[3];
|
u8 reserved[7];
|
||||||
};
|
};
|
||||||
|
|
||||||
/* ETH doorbell data */
|
/* ETH doorbell data */
|
||||||
|
@ -113,6 +113,7 @@ struct qed_queue_start_common_params {
|
|||||||
u8 vport_id;
|
u8 vport_id;
|
||||||
u16 sb;
|
u16 sb;
|
||||||
u16 sb_idx;
|
u16 sb_idx;
|
||||||
|
u16 vf_qid;
|
||||||
};
|
};
|
||||||
|
|
||||||
struct qed_tunn_params {
|
struct qed_tunn_params {
|
||||||
|
Loading…
Reference in New Issue
Block a user