2017-12-14 18:03:07 +00:00
|
|
|
// SPDX-License-Identifier: GPL-2.0+
|
|
|
|
|
// Copyright (c) 2016-2017 Hisilicon Limited.
|
|
|
|
|
|
|
|
|
|
#include "hclge_main.h"
|
|
|
|
|
#include "hclge_mbx.h"
|
|
|
|
|
#include "hnae3.h"
|
|
|
|
|
|
2020-04-20 10:17:35 +08:00
|
|
|
#define CREATE_TRACE_POINTS
|
|
|
|
|
#include "hclge_trace.h"
|
|
|
|
|
|
2020-03-18 11:57:05 +08:00
|
|
|
static u16 hclge_errno_to_resp(int errno)
|
|
|
|
|
{
|
|
|
|
|
return abs(errno);
|
|
|
|
|
}
|
|
|
|
|
|
2017-12-14 18:03:07 +00:00
|
|
|
/* hclge_gen_resp_to_vf: used to generate a synchronous response to VF when PF
|
|
|
|
|
* receives a mailbox message from VF.
|
|
|
|
|
* @vport: pointer to struct hclge_vport
|
|
|
|
|
* @vf_to_pf_req: pointer to hclge_mbx_vf_to_pf_cmd of the original mailbox
|
|
|
|
|
* message
|
|
|
|
|
* @resp_status: indicate to VF whether its request success(0) or failed.
|
|
|
|
|
*/
|
|
|
|
|
static int hclge_gen_resp_to_vf(struct hclge_vport *vport,
|
|
|
|
|
struct hclge_mbx_vf_to_pf_cmd *vf_to_pf_req,
|
2020-03-18 11:57:07 +08:00
|
|
|
struct hclge_respond_to_vf_msg *resp_msg)
|
2017-12-14 18:03:07 +00:00
|
|
|
{
|
|
|
|
|
struct hclge_mbx_pf_to_vf_cmd *resp_pf_to_vf;
|
|
|
|
|
struct hclge_dev *hdev = vport->back;
|
|
|
|
|
enum hclge_cmd_status status;
|
|
|
|
|
struct hclge_desc desc;
|
2020-03-18 11:57:05 +08:00
|
|
|
u16 resp;
|
2017-12-14 18:03:07 +00:00
|
|
|
|
|
|
|
|
resp_pf_to_vf = (struct hclge_mbx_pf_to_vf_cmd *)desc.data;
|
|
|
|
|
|
2020-03-18 11:57:07 +08:00
|
|
|
if (resp_msg->len > HCLGE_MBX_MAX_RESP_DATA_SIZE) {
|
2017-12-14 18:03:07 +00:00
|
|
|
dev_err(&hdev->pdev->dev,
|
2019-10-31 19:23:23 +08:00
|
|
|
"PF fail to gen resp to VF len %u exceeds max len %u\n",
|
2020-03-18 11:57:07 +08:00
|
|
|
resp_msg->len,
|
2017-12-14 18:03:07 +00:00
|
|
|
HCLGE_MBX_MAX_RESP_DATA_SIZE);
|
2020-03-18 11:57:07 +08:00
|
|
|
/* If resp_msg->len is too long, set the value to max length
|
2019-07-04 22:04:28 +08:00
|
|
|
* and return the msg to VF
|
|
|
|
|
*/
|
2020-03-18 11:57:07 +08:00
|
|
|
resp_msg->len = HCLGE_MBX_MAX_RESP_DATA_SIZE;
|
2017-12-14 18:03:07 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
hclge_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_MBX_PF_TO_VF, false);
|
|
|
|
|
|
|
|
|
|
resp_pf_to_vf->dest_vfid = vf_to_pf_req->mbx_src_vfid;
|
|
|
|
|
resp_pf_to_vf->msg_len = vf_to_pf_req->msg_len;
|
|
|
|
|
|
2020-03-18 11:57:06 +08:00
|
|
|
resp_pf_to_vf->msg.code = HCLGE_MBX_PF_VF_RESP;
|
|
|
|
|
resp_pf_to_vf->msg.vf_mbx_msg_code = vf_to_pf_req->msg.code;
|
|
|
|
|
resp_pf_to_vf->msg.vf_mbx_msg_subcode = vf_to_pf_req->msg.subcode;
|
2020-03-18 11:57:07 +08:00
|
|
|
resp = hclge_errno_to_resp(resp_msg->status);
|
2020-03-18 11:57:05 +08:00
|
|
|
if (resp < SHRT_MAX) {
|
2020-03-18 11:57:06 +08:00
|
|
|
resp_pf_to_vf->msg.resp_status = resp;
|
2020-03-18 11:57:05 +08:00
|
|
|
} else {
|
|
|
|
|
dev_warn(&hdev->pdev->dev,
|
|
|
|
|
"failed to send response to VF, response status %d is out-of-bound\n",
|
|
|
|
|
resp);
|
2020-03-18 11:57:06 +08:00
|
|
|
resp_pf_to_vf->msg.resp_status = EIO;
|
2020-03-18 11:57:05 +08:00
|
|
|
}
|
2017-12-14 18:03:07 +00:00
|
|
|
|
2020-03-18 11:57:07 +08:00
|
|
|
if (resp_msg->len > 0)
|
|
|
|
|
memcpy(resp_pf_to_vf->msg.resp_data, resp_msg->data,
|
|
|
|
|
resp_msg->len);
|
2017-12-14 18:03:07 +00:00
|
|
|
|
|
|
|
|
status = hclge_cmd_send(&hdev->hw, &desc, 1);
|
|
|
|
|
if (status)
|
|
|
|
|
dev_err(&hdev->pdev->dev,
|
2020-03-18 11:57:06 +08:00
|
|
|
"failed to send response to VF, status: %d, vfid: %u, code: %u, subcode: %u.\n",
|
|
|
|
|
status, vf_to_pf_req->mbx_src_vfid,
|
|
|
|
|
vf_to_pf_req->msg.code, vf_to_pf_req->msg.subcode);
|
2017-12-14 18:03:07 +00:00
|
|
|
|
|
|
|
|
return status;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static int hclge_send_mbx_msg(struct hclge_vport *vport, u8 *msg, u16 msg_len,
|
|
|
|
|
u16 mbx_opcode, u8 dest_vfid)
|
|
|
|
|
{
|
|
|
|
|
struct hclge_mbx_pf_to_vf_cmd *resp_pf_to_vf;
|
|
|
|
|
struct hclge_dev *hdev = vport->back;
|
|
|
|
|
enum hclge_cmd_status status;
|
|
|
|
|
struct hclge_desc desc;
|
|
|
|
|
|
|
|
|
|
resp_pf_to_vf = (struct hclge_mbx_pf_to_vf_cmd *)desc.data;
|
|
|
|
|
|
|
|
|
|
hclge_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_MBX_PF_TO_VF, false);
|
|
|
|
|
|
|
|
|
|
resp_pf_to_vf->dest_vfid = dest_vfid;
|
|
|
|
|
resp_pf_to_vf->msg_len = msg_len;
|
2020-03-18 11:57:06 +08:00
|
|
|
resp_pf_to_vf->msg.code = mbx_opcode;
|
2017-12-14 18:03:07 +00:00
|
|
|
|
2020-03-18 11:57:06 +08:00
|
|
|
memcpy(&resp_pf_to_vf->msg.vf_mbx_msg_code, msg, msg_len);
|
2017-12-14 18:03:07 +00:00
|
|
|
|
2020-04-20 10:17:35 +08:00
|
|
|
trace_hclge_pf_mbx_send(hdev, resp_pf_to_vf);
|
|
|
|
|
|
2017-12-14 18:03:07 +00:00
|
|
|
status = hclge_cmd_send(&hdev->hw, &desc, 1);
|
|
|
|
|
if (status)
|
|
|
|
|
dev_err(&hdev->pdev->dev,
|
2020-03-18 11:57:06 +08:00
|
|
|
"failed to send mailbox to VF, status: %d, vfid: %u, opcode: %u\n",
|
|
|
|
|
status, dest_vfid, mbx_opcode);
|
2017-12-14 18:03:07 +00:00
|
|
|
|
|
|
|
|
return status;
|
|
|
|
|
}
|
|
|
|
|
|
net: hns3: adjust VF's reset process
Currently when VF need to reset itself, it will send a cmd to PF,
after receiving the VF reset requset, PF sends a cmd to inform
VF to enter the reset process and send a cmd to firmware to do the
actual reset for the VF, it is possible that firmware has resetted
the VF, but VF has not entered the reset process, which may cause
IO not stopped problem when firmware is resetting VF.
This patch fixes it by adjusting the VF reset process, when VF
need to reset itself, it will enter the reset process first, and
it will tell the PF to send cmd to firmware to reset itself.
Add member reset_pending to struct hclgevf_dev, which indicates that
there is reset event need to be processed by the VF's reset task, and
the VF's reset task chooses the highest-level one and clears other
low-level one when it processes reset_pending.
hclge_inform_reset_assert_to_vf function is unused now, but it will
be used to support the PF reset with VF working, so declare it in
the header file.
Signed-off-by: Huazhong Tan <tanhuazhong@huawei.com>
Signed-off-by: Yunsheng Lin <linyunsheng@huawei.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2018-11-09 22:07:47 +08:00
|
|
|
int hclge_inform_reset_assert_to_vf(struct hclge_vport *vport)
|
2018-03-22 14:29:00 +00:00
|
|
|
{
|
2018-11-09 22:07:48 +08:00
|
|
|
struct hclge_dev *hdev = vport->back;
|
2020-01-04 10:49:30 +08:00
|
|
|
u16 reset_type;
|
2018-03-22 14:29:00 +00:00
|
|
|
u8 msg_data[2];
|
|
|
|
|
u8 dest_vfid;
|
|
|
|
|
|
2020-01-04 10:49:30 +08:00
|
|
|
BUILD_BUG_ON(HNAE3_MAX_RESET > U16_MAX);
|
|
|
|
|
|
2018-03-22 14:29:00 +00:00
|
|
|
dest_vfid = (u8)vport->vport_id;
|
|
|
|
|
|
2018-11-09 22:07:48 +08:00
|
|
|
if (hdev->reset_type == HNAE3_FUNC_RESET)
|
|
|
|
|
reset_type = HNAE3_VF_PF_FUNC_RESET;
|
2018-11-09 22:07:54 +08:00
|
|
|
else if (hdev->reset_type == HNAE3_FLR_RESET)
|
|
|
|
|
reset_type = HNAE3_VF_FULL_RESET;
|
2018-11-09 22:07:48 +08:00
|
|
|
else
|
2019-06-07 10:03:07 +08:00
|
|
|
reset_type = HNAE3_VF_FUNC_RESET;
|
2018-11-09 22:07:48 +08:00
|
|
|
|
|
|
|
|
memcpy(&msg_data[0], &reset_type, sizeof(u16));
|
|
|
|
|
|
2018-03-22 14:29:00 +00:00
|
|
|
/* send this requested info to VF */
|
2018-11-09 22:07:48 +08:00
|
|
|
return hclge_send_mbx_msg(vport, msg_data, sizeof(msg_data),
|
2018-03-22 14:29:00 +00:00
|
|
|
HCLGE_MBX_ASSERTING_RESET, dest_vfid);
|
|
|
|
|
}
|
|
|
|
|
|
2017-12-14 18:03:08 +00:00
|
|
|
static void hclge_free_vector_ring_chain(struct hnae3_ring_chain_node *head)
|
|
|
|
|
{
|
|
|
|
|
struct hnae3_ring_chain_node *chain_tmp, *chain;
|
|
|
|
|
|
|
|
|
|
chain = head->next;
|
|
|
|
|
|
|
|
|
|
while (chain) {
|
|
|
|
|
chain_tmp = chain->next;
|
2020-08-06 23:18:13 -07:00
|
|
|
kfree_sensitive(chain);
|
2017-12-14 18:03:08 +00:00
|
|
|
chain = chain_tmp;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2018-07-16 16:36:27 +01:00
|
|
|
/* hclge_get_ring_chain_from_mbx: get ring type & tqp id & int_gl idx
|
|
|
|
|
* from mailbox message
|
2017-12-14 18:03:08 +00:00
|
|
|
* msg[0]: opcode
|
|
|
|
|
* msg[1]: <not relevant to this function>
|
|
|
|
|
* msg[2]: ring_num
|
|
|
|
|
* msg[3]: first ring type (TX|RX)
|
|
|
|
|
* msg[4]: first tqp id
|
2018-07-16 16:36:27 +01:00
|
|
|
* msg[5]: first int_gl idx
|
|
|
|
|
* msg[6] ~ msg[14]: other ring type, tqp id and int_gl idx
|
2017-12-14 18:03:08 +00:00
|
|
|
*/
|
|
|
|
|
static int hclge_get_ring_chain_from_mbx(
|
|
|
|
|
struct hclge_mbx_vf_to_pf_cmd *req,
|
|
|
|
|
struct hnae3_ring_chain_node *ring_chain,
|
|
|
|
|
struct hclge_vport *vport)
|
|
|
|
|
{
|
|
|
|
|
struct hnae3_ring_chain_node *cur_chain, *new_chain;
|
|
|
|
|
int ring_num;
|
2020-03-18 11:57:06 +08:00
|
|
|
int i = 0;
|
2017-12-14 18:03:08 +00:00
|
|
|
|
2020-03-18 11:57:06 +08:00
|
|
|
ring_num = req->msg.ring_num;
|
2017-12-14 18:03:08 +00:00
|
|
|
|
2020-03-18 11:57:06 +08:00
|
|
|
if (ring_num > HCLGE_MBX_MAX_RING_CHAIN_PARAM_NUM)
|
2018-03-10 11:29:29 +08:00
|
|
|
return -ENOMEM;
|
|
|
|
|
|
2020-03-18 11:57:06 +08:00
|
|
|
hnae3_set_bit(ring_chain->flag, HNAE3_RING_TYPE_B,
|
|
|
|
|
req->msg.param[i].ring_type);
|
2017-12-14 18:03:08 +00:00
|
|
|
ring_chain->tqp_index =
|
2020-03-18 11:57:06 +08:00
|
|
|
hclge_get_queue_id(vport->nic.kinfo.tqp
|
|
|
|
|
[req->msg.param[i].tqp_index]);
|
2018-07-16 16:36:26 +01:00
|
|
|
hnae3_set_field(ring_chain->int_gl_idx, HNAE3_RING_GL_IDX_M,
|
2020-03-18 11:57:06 +08:00
|
|
|
HNAE3_RING_GL_IDX_S, req->msg.param[i].int_gl_index);
|
2017-12-14 18:03:08 +00:00
|
|
|
|
|
|
|
|
cur_chain = ring_chain;
|
|
|
|
|
|
|
|
|
|
for (i = 1; i < ring_num; i++) {
|
|
|
|
|
new_chain = kzalloc(sizeof(*new_chain), GFP_KERNEL);
|
|
|
|
|
if (!new_chain)
|
|
|
|
|
goto err;
|
|
|
|
|
|
2018-07-02 15:50:26 +08:00
|
|
|
hnae3_set_bit(new_chain->flag, HNAE3_RING_TYPE_B,
|
2020-03-18 11:57:06 +08:00
|
|
|
req->msg.param[i].ring_type);
|
2017-12-14 18:03:08 +00:00
|
|
|
|
|
|
|
|
new_chain->tqp_index =
|
|
|
|
|
hclge_get_queue_id(vport->nic.kinfo.tqp
|
2020-03-18 11:57:06 +08:00
|
|
|
[req->msg.param[i].tqp_index]);
|
2017-12-14 18:03:08 +00:00
|
|
|
|
2018-07-16 16:36:26 +01:00
|
|
|
hnae3_set_field(new_chain->int_gl_idx, HNAE3_RING_GL_IDX_M,
|
|
|
|
|
HNAE3_RING_GL_IDX_S,
|
2020-03-18 11:57:06 +08:00
|
|
|
req->msg.param[i].int_gl_index);
|
2018-01-26 19:31:25 +08:00
|
|
|
|
2017-12-14 18:03:08 +00:00
|
|
|
cur_chain->next = new_chain;
|
|
|
|
|
cur_chain = new_chain;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
err:
|
|
|
|
|
hclge_free_vector_ring_chain(ring_chain);
|
|
|
|
|
return -ENOMEM;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static int hclge_map_unmap_ring_to_vf_vector(struct hclge_vport *vport, bool en,
|
|
|
|
|
struct hclge_mbx_vf_to_pf_cmd *req)
|
|
|
|
|
{
|
|
|
|
|
struct hnae3_ring_chain_node ring_chain;
|
2020-03-18 11:57:06 +08:00
|
|
|
int vector_id = req->msg.vector_id;
|
2017-12-14 18:03:08 +00:00
|
|
|
int ret;
|
|
|
|
|
|
|
|
|
|
memset(&ring_chain, 0, sizeof(ring_chain));
|
|
|
|
|
ret = hclge_get_ring_chain_from_mbx(req, &ring_chain, vport);
|
|
|
|
|
if (ret)
|
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
|
|
ret = hclge_bind_ring_with_vector(vport, vector_id, en, &ring_chain);
|
|
|
|
|
|
|
|
|
|
hclge_free_vector_ring_chain(&ring_chain);
|
|
|
|
|
|
2019-05-28 17:03:02 +08:00
|
|
|
return ret;
|
2017-12-14 18:03:08 +00:00
|
|
|
}
|
|
|
|
|
|
2017-12-14 18:03:07 +00:00
|
|
|
static int hclge_set_vf_promisc_mode(struct hclge_vport *vport,
|
|
|
|
|
struct hclge_mbx_vf_to_pf_cmd *req)
|
|
|
|
|
{
|
2020-03-18 11:57:06 +08:00
|
|
|
bool en_bc = req->msg.en_bc ? true : false;
|
|
|
|
|
bool en_uc = req->msg.en_uc ? true : false;
|
|
|
|
|
bool en_mc = req->msg.en_mc ? true : false;
|
2020-12-06 12:06:14 +08:00
|
|
|
struct hnae3_handle *handle = &vport->nic;
|
2019-10-08 09:20:06 +08:00
|
|
|
int ret;
|
|
|
|
|
|
|
|
|
|
if (!vport->vf_info.trusted) {
|
|
|
|
|
en_uc = false;
|
|
|
|
|
en_mc = false;
|
|
|
|
|
}
|
|
|
|
|
|
2020-12-06 12:06:14 +08:00
|
|
|
if (req->msg.en_limit_promisc)
|
|
|
|
|
set_bit(HNAE3_PFLAG_LIMIT_PROMISC, &handle->priv_flags);
|
|
|
|
|
else
|
|
|
|
|
clear_bit(HNAE3_PFLAG_LIMIT_PROMISC,
|
|
|
|
|
&handle->priv_flags);
|
|
|
|
|
|
2019-10-08 09:20:06 +08:00
|
|
|
ret = hclge_set_vport_promisc_mode(vport, en_uc, en_mc, en_bc);
|
|
|
|
|
|
|
|
|
|
vport->vf_info.promisc_enable = (en_uc || en_mc) ? 1 : 0;
|
|
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void hclge_inform_vf_promisc_info(struct hclge_vport *vport)
|
|
|
|
|
{
|
|
|
|
|
u8 dest_vfid = (u8)vport->vport_id;
|
|
|
|
|
u8 msg_data[2];
|
|
|
|
|
|
|
|
|
|
memcpy(&msg_data[0], &vport->vf_info.promisc_enable, sizeof(u16));
|
|
|
|
|
|
|
|
|
|
hclge_send_mbx_msg(vport, msg_data, sizeof(msg_data),
|
|
|
|
|
HCLGE_MBX_PUSH_PROMISC_INFO, dest_vfid);
|
2017-12-14 18:03:07 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static int hclge_set_vf_uc_mac_addr(struct hclge_vport *vport,
|
2019-04-25 20:42:50 +08:00
|
|
|
struct hclge_mbx_vf_to_pf_cmd *mbx_req)
|
2017-12-14 18:03:07 +00:00
|
|
|
{
|
2020-03-18 11:57:06 +08:00
|
|
|
#define HCLGE_MBX_VF_OLD_MAC_ADDR_OFFSET 6
|
|
|
|
|
|
|
|
|
|
const u8 *mac_addr = (const u8 *)(mbx_req->msg.data);
|
2017-12-14 18:03:07 +00:00
|
|
|
struct hclge_dev *hdev = vport->back;
|
|
|
|
|
int status;
|
|
|
|
|
|
2020-03-18 11:57:06 +08:00
|
|
|
if (mbx_req->msg.subcode == HCLGE_MBX_MAC_VLAN_UC_MODIFY) {
|
|
|
|
|
const u8 *old_addr = (const u8 *)
|
|
|
|
|
(&mbx_req->msg.data[HCLGE_MBX_VF_OLD_MAC_ADDR_OFFSET]);
|
2017-12-14 18:03:07 +00:00
|
|
|
|
2019-10-08 09:20:08 +08:00
|
|
|
/* If VF MAC has been configured by the host then it
|
|
|
|
|
* cannot be overridden by the MAC specified by the VM.
|
|
|
|
|
*/
|
|
|
|
|
if (!is_zero_ether_addr(vport->vf_info.mac) &&
|
2020-03-18 11:57:07 +08:00
|
|
|
!ether_addr_equal(mac_addr, vport->vf_info.mac))
|
|
|
|
|
return -EPERM;
|
2019-10-08 09:20:08 +08:00
|
|
|
|
2020-03-18 11:57:07 +08:00
|
|
|
if (!is_valid_ether_addr(mac_addr))
|
|
|
|
|
return -EINVAL;
|
2019-10-08 09:20:08 +08:00
|
|
|
|
net: hns3: refactor the MAC address configure
Currently, the HNS3 driver sync and unsync MAC address in function
hns3_set_rx_mode(). For PF, it adds and deletes MAC address directly
in the path of dev_set_rx_mode(). If failed, it won't retry until
next calling of hns3_set_rx_mode(). On the other hand, if request
add and remove a same address many times at a short interval, each
request must be done one by one, can't be merged. For VF, it sends
mailbox messages to PF to request adding or deleting MAC address in
the path of function hns3_set_rx_mode(), no matter the address is
configured success.
This patch refines it by recording the MAC address in function
hns3_set_rx_mode(), and updating MAC address in the service task.
If failed, it will retry by the next calling of periodical service
task. It also uses some state to mark the state of each MAC address
in the MAC list, which can help merge configure request for a same
address. With these changes, when global reset or IMP reset occurs,
we can restore the MAC table with the MAC list.
Signed-off-by: Jian Shen <shenjian15@huawei.com>
Signed-off-by: Huazhong Tan <tanhuazhong@huawei.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2020-04-24 10:23:09 +08:00
|
|
|
spin_lock_bh(&vport->mac_list_lock);
|
|
|
|
|
status = hclge_update_mac_node_for_dev_addr(vport, old_addr,
|
|
|
|
|
mac_addr);
|
|
|
|
|
spin_unlock_bh(&vport->mac_list_lock);
|
|
|
|
|
hclge_task_schedule(hdev, 0);
|
2020-03-18 11:57:06 +08:00
|
|
|
} else if (mbx_req->msg.subcode == HCLGE_MBX_MAC_VLAN_UC_ADD) {
|
net: hns3: refactor the MAC address configure
Currently, the HNS3 driver sync and unsync MAC address in function
hns3_set_rx_mode(). For PF, it adds and deletes MAC address directly
in the path of dev_set_rx_mode(). If failed, it won't retry until
next calling of hns3_set_rx_mode(). On the other hand, if request
add and remove a same address many times at a short interval, each
request must be done one by one, can't be merged. For VF, it sends
mailbox messages to PF to request adding or deleting MAC address in
the path of function hns3_set_rx_mode(), no matter the address is
configured success.
This patch refines it by recording the MAC address in function
hns3_set_rx_mode(), and updating MAC address in the service task.
If failed, it will retry by the next calling of periodical service
task. It also uses some state to mark the state of each MAC address
in the MAC list, which can help merge configure request for a same
address. With these changes, when global reset or IMP reset occurs,
we can restore the MAC table with the MAC list.
Signed-off-by: Jian Shen <shenjian15@huawei.com>
Signed-off-by: Huazhong Tan <tanhuazhong@huawei.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2020-04-24 10:23:09 +08:00
|
|
|
status = hclge_update_mac_list(vport, HCLGE_MAC_TO_ADD,
|
|
|
|
|
HCLGE_MAC_ADDR_UC, mac_addr);
|
2020-03-18 11:57:06 +08:00
|
|
|
} else if (mbx_req->msg.subcode == HCLGE_MBX_MAC_VLAN_UC_REMOVE) {
|
net: hns3: refactor the MAC address configure
Currently, the HNS3 driver sync and unsync MAC address in function
hns3_set_rx_mode(). For PF, it adds and deletes MAC address directly
in the path of dev_set_rx_mode(). If failed, it won't retry until
next calling of hns3_set_rx_mode(). On the other hand, if request
add and remove a same address many times at a short interval, each
request must be done one by one, can't be merged. For VF, it sends
mailbox messages to PF to request adding or deleting MAC address in
the path of function hns3_set_rx_mode(), no matter the address is
configured success.
This patch refines it by recording the MAC address in function
hns3_set_rx_mode(), and updating MAC address in the service task.
If failed, it will retry by the next calling of periodical service
task. It also uses some state to mark the state of each MAC address
in the MAC list, which can help merge configure request for a same
address. With these changes, when global reset or IMP reset occurs,
we can restore the MAC table with the MAC list.
Signed-off-by: Jian Shen <shenjian15@huawei.com>
Signed-off-by: Huazhong Tan <tanhuazhong@huawei.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2020-04-24 10:23:09 +08:00
|
|
|
status = hclge_update_mac_list(vport, HCLGE_MAC_TO_DEL,
|
|
|
|
|
HCLGE_MAC_ADDR_UC, mac_addr);
|
2017-12-14 18:03:07 +00:00
|
|
|
} else {
|
|
|
|
|
dev_err(&hdev->pdev->dev,
|
2019-10-31 19:23:23 +08:00
|
|
|
"failed to set unicast mac addr, unknown subcode %u\n",
|
2020-03-18 11:57:06 +08:00
|
|
|
mbx_req->msg.subcode);
|
2017-12-14 18:03:07 +00:00
|
|
|
return -EIO;
|
|
|
|
|
}
|
|
|
|
|
|
2020-03-18 11:57:07 +08:00
|
|
|
return status;
|
2017-12-14 18:03:07 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static int hclge_set_vf_mc_mac_addr(struct hclge_vport *vport,
|
2020-03-18 11:57:07 +08:00
|
|
|
struct hclge_mbx_vf_to_pf_cmd *mbx_req)
|
2017-12-14 18:03:07 +00:00
|
|
|
{
|
2020-03-18 11:57:06 +08:00
|
|
|
const u8 *mac_addr = (const u8 *)(mbx_req->msg.data);
|
2017-12-14 18:03:07 +00:00
|
|
|
struct hclge_dev *hdev = vport->back;
|
|
|
|
|
|
2020-03-18 11:57:06 +08:00
|
|
|
if (mbx_req->msg.subcode == HCLGE_MBX_MAC_VLAN_MC_ADD) {
|
net: hns3: refactor the MAC address configure
Currently, the HNS3 driver sync and unsync MAC address in function
hns3_set_rx_mode(). For PF, it adds and deletes MAC address directly
in the path of dev_set_rx_mode(). If failed, it won't retry until
next calling of hns3_set_rx_mode(). On the other hand, if request
add and remove a same address many times at a short interval, each
request must be done one by one, can't be merged. For VF, it sends
mailbox messages to PF to request adding or deleting MAC address in
the path of function hns3_set_rx_mode(), no matter the address is
configured success.
This patch refines it by recording the MAC address in function
hns3_set_rx_mode(), and updating MAC address in the service task.
If failed, it will retry by the next calling of periodical service
task. It also uses some state to mark the state of each MAC address
in the MAC list, which can help merge configure request for a same
address. With these changes, when global reset or IMP reset occurs,
we can restore the MAC table with the MAC list.
Signed-off-by: Jian Shen <shenjian15@huawei.com>
Signed-off-by: Huazhong Tan <tanhuazhong@huawei.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2020-04-24 10:23:09 +08:00
|
|
|
hclge_update_mac_list(vport, HCLGE_MAC_TO_ADD,
|
|
|
|
|
HCLGE_MAC_ADDR_MC, mac_addr);
|
2020-03-18 11:57:06 +08:00
|
|
|
} else if (mbx_req->msg.subcode == HCLGE_MBX_MAC_VLAN_MC_REMOVE) {
|
net: hns3: refactor the MAC address configure
Currently, the HNS3 driver sync and unsync MAC address in function
hns3_set_rx_mode(). For PF, it adds and deletes MAC address directly
in the path of dev_set_rx_mode(). If failed, it won't retry until
next calling of hns3_set_rx_mode(). On the other hand, if request
add and remove a same address many times at a short interval, each
request must be done one by one, can't be merged. For VF, it sends
mailbox messages to PF to request adding or deleting MAC address in
the path of function hns3_set_rx_mode(), no matter the address is
configured success.
This patch refines it by recording the MAC address in function
hns3_set_rx_mode(), and updating MAC address in the service task.
If failed, it will retry by the next calling of periodical service
task. It also uses some state to mark the state of each MAC address
in the MAC list, which can help merge configure request for a same
address. With these changes, when global reset or IMP reset occurs,
we can restore the MAC table with the MAC list.
Signed-off-by: Jian Shen <shenjian15@huawei.com>
Signed-off-by: Huazhong Tan <tanhuazhong@huawei.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2020-04-24 10:23:09 +08:00
|
|
|
hclge_update_mac_list(vport, HCLGE_MAC_TO_DEL,
|
|
|
|
|
HCLGE_MAC_ADDR_MC, mac_addr);
|
2017-12-14 18:03:07 +00:00
|
|
|
} else {
|
|
|
|
|
dev_err(&hdev->pdev->dev,
|
2019-10-31 19:23:23 +08:00
|
|
|
"failed to set mcast mac addr, unknown subcode %u\n",
|
2020-03-18 11:57:06 +08:00
|
|
|
mbx_req->msg.subcode);
|
2017-12-14 18:03:07 +00:00
|
|
|
return -EIO;
|
|
|
|
|
}
|
|
|
|
|
|
net: hns3: refactor the MAC address configure
Currently, the HNS3 driver sync and unsync MAC address in function
hns3_set_rx_mode(). For PF, it adds and deletes MAC address directly
in the path of dev_set_rx_mode(). If failed, it won't retry until
next calling of hns3_set_rx_mode(). On the other hand, if request
add and remove a same address many times at a short interval, each
request must be done one by one, can't be merged. For VF, it sends
mailbox messages to PF to request adding or deleting MAC address in
the path of function hns3_set_rx_mode(), no matter the address is
configured success.
This patch refines it by recording the MAC address in function
hns3_set_rx_mode(), and updating MAC address in the service task.
If failed, it will retry by the next calling of periodical service
task. It also uses some state to mark the state of each MAC address
in the MAC list, which can help merge configure request for a same
address. With these changes, when global reset or IMP reset occurs,
we can restore the MAC table with the MAC list.
Signed-off-by: Jian Shen <shenjian15@huawei.com>
Signed-off-by: Huazhong Tan <tanhuazhong@huawei.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2020-04-24 10:23:09 +08:00
|
|
|
return 0;
|
2017-12-14 18:03:07 +00:00
|
|
|
}
|
|
|
|
|
|
2019-04-14 09:47:38 +08:00
|
|
|
int hclge_push_vf_port_base_vlan_info(struct hclge_vport *vport, u8 vfid,
|
|
|
|
|
u16 state, u16 vlan_tag, u16 qos,
|
|
|
|
|
u16 vlan_proto)
|
|
|
|
|
{
|
|
|
|
|
#define MSG_DATA_SIZE 8
|
|
|
|
|
|
|
|
|
|
u8 msg_data[MSG_DATA_SIZE];
|
|
|
|
|
|
|
|
|
|
memcpy(&msg_data[0], &state, sizeof(u16));
|
|
|
|
|
memcpy(&msg_data[2], &vlan_proto, sizeof(u16));
|
|
|
|
|
memcpy(&msg_data[4], &qos, sizeof(u16));
|
|
|
|
|
memcpy(&msg_data[6], &vlan_tag, sizeof(u16));
|
|
|
|
|
|
|
|
|
|
return hclge_send_mbx_msg(vport, msg_data, sizeof(msg_data),
|
2019-07-21 15:08:31 +02:00
|
|
|
HCLGE_MBX_PUSH_VLAN_INFO, vfid);
|
2019-04-14 09:47:38 +08:00
|
|
|
}
|
|
|
|
|
|
2017-12-14 18:03:07 +00:00
|
|
|
static int hclge_set_vf_vlan_cfg(struct hclge_vport *vport,
|
2020-03-18 11:57:07 +08:00
|
|
|
struct hclge_mbx_vf_to_pf_cmd *mbx_req,
|
|
|
|
|
struct hclge_respond_to_vf_msg *resp_msg)
|
2017-12-14 18:03:07 +00:00
|
|
|
{
|
2020-03-18 11:57:06 +08:00
|
|
|
#define HCLGE_MBX_VLAN_STATE_OFFSET 0
|
|
|
|
|
#define HCLGE_MBX_VLAN_INFO_OFFSET 2
|
|
|
|
|
|
2019-06-13 17:12:32 +08:00
|
|
|
struct hclge_vf_vlan_cfg *msg_cmd;
|
2017-12-14 18:03:07 +00:00
|
|
|
int status = 0;
|
|
|
|
|
|
2020-03-18 11:57:06 +08:00
|
|
|
msg_cmd = (struct hclge_vf_vlan_cfg *)&mbx_req->msg;
|
2019-06-13 17:12:32 +08:00
|
|
|
if (msg_cmd->subcode == HCLGE_MBX_VLAN_FILTER) {
|
2018-05-01 19:56:04 +01:00
|
|
|
struct hnae3_handle *handle = &vport->nic;
|
2017-12-14 18:03:07 +00:00
|
|
|
u16 vlan, proto;
|
|
|
|
|
bool is_kill;
|
|
|
|
|
|
2019-06-13 17:12:32 +08:00
|
|
|
is_kill = !!msg_cmd->is_kill;
|
|
|
|
|
vlan = msg_cmd->vlan;
|
|
|
|
|
proto = msg_cmd->proto;
|
2018-05-01 19:56:04 +01:00
|
|
|
status = hclge_set_vlan_filter(handle, cpu_to_be16(proto),
|
|
|
|
|
vlan, is_kill);
|
2019-06-13 17:12:32 +08:00
|
|
|
} else if (msg_cmd->subcode == HCLGE_MBX_VLAN_RX_OFF_CFG) {
|
2018-05-03 17:28:11 +01:00
|
|
|
struct hnae3_handle *handle = &vport->nic;
|
2019-06-13 17:12:32 +08:00
|
|
|
bool en = msg_cmd->is_kill ? true : false;
|
2018-05-03 17:28:11 +01:00
|
|
|
|
|
|
|
|
status = hclge_en_hw_strip_rxvtag(handle, en);
|
2020-03-18 11:57:06 +08:00
|
|
|
} else if (msg_cmd->subcode == HCLGE_MBX_PORT_BASE_VLAN_CFG) {
|
2019-04-14 09:47:38 +08:00
|
|
|
struct hclge_vlan_info *vlan_info;
|
|
|
|
|
u16 *state;
|
|
|
|
|
|
2020-03-18 11:57:06 +08:00
|
|
|
state = (u16 *)&mbx_req->msg.data[HCLGE_MBX_VLAN_STATE_OFFSET];
|
|
|
|
|
vlan_info = (struct hclge_vlan_info *)
|
|
|
|
|
&mbx_req->msg.data[HCLGE_MBX_VLAN_INFO_OFFSET];
|
2019-04-14 09:47:38 +08:00
|
|
|
status = hclge_update_port_base_vlan_cfg(vport, *state,
|
|
|
|
|
vlan_info);
|
2020-03-18 11:57:06 +08:00
|
|
|
} else if (msg_cmd->subcode == HCLGE_MBX_GET_PORT_BASE_VLAN_STATE) {
|
net: hns3: refine the VLAN tag handle for port based VLAN
For DEVICE_VERSION_V2, the hardware only supports max two layer
VLAN tags, including port based tag inserted by hardware, tag in
tx buffer descriptor(get from skb->tci) and tag in packet.
For transmit packet:
If port based VLAN disabled, and vf driver gets a VLAN tag from
skb, the VLAN tag must be filled to the Outer_VLAN_TAG field
(tag near to DMAC) of tx buffer descriptor, otherwise it may
be inserted after the tag in packet.
If port based VLAN enabled, and vf driver gets a VLAN tag from
skb, the VLAN tag must be filled to the VLAN_TAG field (tag
far to DMAC) of tx buffer descriptor, otherwise it may be
conflicted with port based VLAN, and raise a hardware error.
For receive packet:
The hardware will strip the VLAN tags and fill them in the rx
buffer descriptor, no matter port based VLAN enable or not.
Because port based VLAN tag is useless for stack, so vf driver
needs to discard the port based VLAN tag get from rx buffer
descriptor when port based VLAN enabled.
So vf must know about the port based VLAN state.
For DEVICE_VERSION_V3, the hardware provides some new
configuration to improve it.
For transmit packet:
When enable tag shift mode, hardware will handle the VLAN tag
in outer_VLAN_TAG field as VLAN_TAG, so it won't conflict with
port based VLAN. And hardware also make sure the tag before
the tag in packet. So vf driver doesn't need to specify the tag
position according to the port based VLAN state anymore.
For receive packet:
When enable discard mode, hardware will strip and discard the
port based VLAN tag, so vf driver doesn't need to identify it
from rx buffer descriptor.
So modify the port based VLAN configuration, simplify the process
for vf handling the VLAN tag.
Signed-off-by: Guojia Liao <liaoguojia@huawei.com>
Signed-off-by: Huazhong Tan <tanhuazhong@huawei.com>
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
2020-12-06 12:06:15 +08:00
|
|
|
struct hnae3_ae_dev *ae_dev = pci_get_drvdata(vport->nic.pdev);
|
|
|
|
|
/* vf does not need to know about the port based VLAN state
|
|
|
|
|
* on device HNAE3_DEVICE_VERSION_V3. So always return disable
|
|
|
|
|
* on device HNAE3_DEVICE_VERSION_V3 if vf queries the port
|
|
|
|
|
* based VLAN state.
|
|
|
|
|
*/
|
|
|
|
|
resp_msg->data[0] =
|
|
|
|
|
ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V3 ?
|
|
|
|
|
HNAE3_PORT_BASE_VLAN_DISABLE :
|
|
|
|
|
vport->port_base_vlan_cfg.state;
|
2020-03-18 11:57:07 +08:00
|
|
|
resp_msg->len = sizeof(u8);
|
2017-12-14 18:03:07 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return status;
|
|
|
|
|
}
|
|
|
|
|
|
2018-11-18 03:19:12 +00:00
|
|
|
static int hclge_set_vf_alive(struct hclge_vport *vport,
|
2020-03-18 11:57:07 +08:00
|
|
|
struct hclge_mbx_vf_to_pf_cmd *mbx_req)
|
2018-11-18 03:19:12 +00:00
|
|
|
{
|
2020-03-18 11:57:06 +08:00
|
|
|
bool alive = !!mbx_req->msg.data[0];
|
2018-11-18 03:19:12 +00:00
|
|
|
int ret = 0;
|
|
|
|
|
|
|
|
|
|
if (alive)
|
|
|
|
|
ret = hclge_vport_start(vport);
|
|
|
|
|
else
|
|
|
|
|
hclge_vport_stop(vport);
|
|
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
|
}
|
|
|
|
|
|
2020-03-18 11:57:07 +08:00
|
|
|
static void hclge_get_vf_tcinfo(struct hclge_vport *vport,
|
|
|
|
|
struct hclge_respond_to_vf_msg *resp_msg)
|
2017-12-14 18:03:07 +00:00
|
|
|
{
|
2019-01-31 04:55:45 +08:00
|
|
|
struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
|
2019-06-13 17:12:32 +08:00
|
|
|
unsigned int i;
|
2019-01-31 04:55:45 +08:00
|
|
|
|
2020-12-10 11:42:06 +08:00
|
|
|
for (i = 0; i < kinfo->tc_info.num_tc; i++)
|
2020-03-18 11:57:07 +08:00
|
|
|
resp_msg->data[0] |= BIT(i);
|
2017-12-14 18:03:07 +00:00
|
|
|
|
2020-03-18 11:57:07 +08:00
|
|
|
resp_msg->len = sizeof(u8);
|
2017-12-14 18:03:07 +00:00
|
|
|
}
|
|
|
|
|
|
2020-03-18 11:57:07 +08:00
|
|
|
static void hclge_get_vf_queue_info(struct hclge_vport *vport,
|
|
|
|
|
struct hclge_respond_to_vf_msg *resp_msg)
|
2017-12-14 18:03:07 +00:00
|
|
|
{
|
2019-02-23 17:22:15 +08:00
|
|
|
#define HCLGE_TQPS_RSS_INFO_LEN 6
|
2020-03-18 11:57:07 +08:00
|
|
|
#define HCLGE_TQPS_ALLOC_OFFSET 0
|
|
|
|
|
#define HCLGE_TQPS_RSS_SIZE_OFFSET 2
|
|
|
|
|
#define HCLGE_TQPS_RX_BUFFER_LEN_OFFSET 4
|
|
|
|
|
|
2017-12-14 18:03:07 +00:00
|
|
|
struct hclge_dev *hdev = vport->back;
|
|
|
|
|
|
|
|
|
|
/* get the queue related info */
|
2020-03-18 11:57:07 +08:00
|
|
|
memcpy(&resp_msg->data[HCLGE_TQPS_ALLOC_OFFSET],
|
|
|
|
|
&vport->alloc_tqps, sizeof(u16));
|
|
|
|
|
memcpy(&resp_msg->data[HCLGE_TQPS_RSS_SIZE_OFFSET],
|
|
|
|
|
&vport->nic.kinfo.rss_size, sizeof(u16));
|
|
|
|
|
memcpy(&resp_msg->data[HCLGE_TQPS_RX_BUFFER_LEN_OFFSET],
|
|
|
|
|
&hdev->rx_buf_len, sizeof(u16));
|
|
|
|
|
resp_msg->len = HCLGE_TQPS_RSS_INFO_LEN;
|
2017-12-14 18:03:07 +00:00
|
|
|
}
|
|
|
|
|
|
2020-03-18 11:57:07 +08:00
|
|
|
static void hclge_get_vf_mac_addr(struct hclge_vport *vport,
|
|
|
|
|
struct hclge_respond_to_vf_msg *resp_msg)
|
2019-10-08 09:20:08 +08:00
|
|
|
{
|
2020-03-18 11:57:07 +08:00
|
|
|
ether_addr_copy(resp_msg->data, vport->vf_info.mac);
|
|
|
|
|
resp_msg->len = ETH_ALEN;
|
2019-10-08 09:20:08 +08:00
|
|
|
}
|
|
|
|
|
|
2020-03-18 11:57:07 +08:00
|
|
|
static void hclge_get_vf_queue_depth(struct hclge_vport *vport,
|
|
|
|
|
struct hclge_respond_to_vf_msg *resp_msg)
|
2019-02-23 17:22:15 +08:00
|
|
|
{
|
|
|
|
|
#define HCLGE_TQPS_DEPTH_INFO_LEN 4
|
2020-03-18 11:57:07 +08:00
|
|
|
#define HCLGE_TQPS_NUM_TX_DESC_OFFSET 0
|
|
|
|
|
#define HCLGE_TQPS_NUM_RX_DESC_OFFSET 2
|
|
|
|
|
|
2019-02-23 17:22:15 +08:00
|
|
|
struct hclge_dev *hdev = vport->back;
|
|
|
|
|
|
|
|
|
|
/* get the queue depth info */
|
2020-03-18 11:57:07 +08:00
|
|
|
memcpy(&resp_msg->data[HCLGE_TQPS_NUM_TX_DESC_OFFSET],
|
|
|
|
|
&hdev->num_tx_desc, sizeof(u16));
|
|
|
|
|
memcpy(&resp_msg->data[HCLGE_TQPS_NUM_RX_DESC_OFFSET],
|
|
|
|
|
&hdev->num_rx_desc, sizeof(u16));
|
|
|
|
|
resp_msg->len = HCLGE_TQPS_DEPTH_INFO_LEN;
|
2019-02-23 17:22:15 +08:00
|
|
|
}
|
|
|
|
|
|
2020-03-18 11:57:07 +08:00
|
|
|
static void hclge_get_vf_media_type(struct hclge_vport *vport,
|
|
|
|
|
struct hclge_respond_to_vf_msg *resp_msg)
|
2019-04-04 16:17:50 +08:00
|
|
|
{
|
2020-03-18 11:57:07 +08:00
|
|
|
#define HCLGE_VF_MEDIA_TYPE_OFFSET 0
|
|
|
|
|
#define HCLGE_VF_MODULE_TYPE_OFFSET 1
|
|
|
|
|
#define HCLGE_VF_MEDIA_TYPE_LENGTH 2
|
|
|
|
|
|
2019-04-04 16:17:50 +08:00
|
|
|
struct hclge_dev *hdev = vport->back;
|
|
|
|
|
|
2020-03-18 11:57:07 +08:00
|
|
|
resp_msg->data[HCLGE_VF_MEDIA_TYPE_OFFSET] =
|
|
|
|
|
hdev->hw.mac.media_type;
|
|
|
|
|
resp_msg->data[HCLGE_VF_MODULE_TYPE_OFFSET] =
|
|
|
|
|
hdev->hw.mac.module_type;
|
|
|
|
|
resp_msg->len = HCLGE_VF_MEDIA_TYPE_LENGTH;
|
2019-04-04 16:17:50 +08:00
|
|
|
}
|
|
|
|
|
|
2017-12-14 18:03:07 +00:00
|
|
|
static int hclge_get_link_info(struct hclge_vport *vport,
|
|
|
|
|
struct hclge_mbx_vf_to_pf_cmd *mbx_req)
|
|
|
|
|
{
|
2019-10-08 09:20:04 +08:00
|
|
|
#define HCLGE_VF_LINK_STATE_UP 1U
|
|
|
|
|
#define HCLGE_VF_LINK_STATE_DOWN 0U
|
|
|
|
|
|
2017-12-14 18:03:07 +00:00
|
|
|
struct hclge_dev *hdev = vport->back;
|
|
|
|
|
u16 link_status;
|
2019-04-04 16:17:50 +08:00
|
|
|
u8 msg_data[8];
|
2017-12-14 18:03:07 +00:00
|
|
|
u8 dest_vfid;
|
2018-03-21 15:49:29 +08:00
|
|
|
u16 duplex;
|
2017-12-14 18:03:07 +00:00
|
|
|
|
|
|
|
|
/* mac.link can only be 0 or 1 */
|
2019-10-08 09:20:04 +08:00
|
|
|
switch (vport->vf_info.link_state) {
|
|
|
|
|
case IFLA_VF_LINK_STATE_ENABLE:
|
|
|
|
|
link_status = HCLGE_VF_LINK_STATE_UP;
|
|
|
|
|
break;
|
|
|
|
|
case IFLA_VF_LINK_STATE_DISABLE:
|
|
|
|
|
link_status = HCLGE_VF_LINK_STATE_DOWN;
|
|
|
|
|
break;
|
|
|
|
|
case IFLA_VF_LINK_STATE_AUTO:
|
|
|
|
|
default:
|
|
|
|
|
link_status = (u16)hdev->hw.mac.link;
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
|
2018-03-21 15:49:29 +08:00
|
|
|
duplex = hdev->hw.mac.duplex;
|
2017-12-14 18:03:07 +00:00
|
|
|
memcpy(&msg_data[0], &link_status, sizeof(u16));
|
2018-03-21 15:49:29 +08:00
|
|
|
memcpy(&msg_data[2], &hdev->hw.mac.speed, sizeof(u32));
|
|
|
|
|
memcpy(&msg_data[6], &duplex, sizeof(u16));
|
2017-12-14 18:03:07 +00:00
|
|
|
dest_vfid = mbx_req->mbx_src_vfid;
|
|
|
|
|
|
|
|
|
|
/* send this requested info to VF */
|
2018-03-21 15:49:29 +08:00
|
|
|
return hclge_send_mbx_msg(vport, msg_data, sizeof(msg_data),
|
2017-12-14 18:03:07 +00:00
|
|
|
HCLGE_MBX_LINK_STAT_CHANGE, dest_vfid);
|
|
|
|
|
}
|
|
|
|
|
|
2019-02-02 22:39:33 +08:00
|
|
|
static void hclge_get_link_mode(struct hclge_vport *vport,
|
|
|
|
|
struct hclge_mbx_vf_to_pf_cmd *mbx_req)
|
|
|
|
|
{
|
|
|
|
|
#define HCLGE_SUPPORTED 1
|
|
|
|
|
struct hclge_dev *hdev = vport->back;
|
|
|
|
|
unsigned long advertising;
|
|
|
|
|
unsigned long supported;
|
|
|
|
|
unsigned long send_data;
|
|
|
|
|
u8 msg_data[10];
|
|
|
|
|
u8 dest_vfid;
|
|
|
|
|
|
|
|
|
|
advertising = hdev->hw.mac.advertising[0];
|
|
|
|
|
supported = hdev->hw.mac.supported[0];
|
|
|
|
|
dest_vfid = mbx_req->mbx_src_vfid;
|
2020-03-18 11:57:06 +08:00
|
|
|
msg_data[0] = mbx_req->msg.data[0];
|
2019-02-02 22:39:33 +08:00
|
|
|
|
|
|
|
|
send_data = msg_data[0] == HCLGE_SUPPORTED ? supported : advertising;
|
|
|
|
|
|
|
|
|
|
memcpy(&msg_data[2], &send_data, sizeof(unsigned long));
|
|
|
|
|
hclge_send_mbx_msg(vport, msg_data, sizeof(msg_data),
|
|
|
|
|
HCLGE_MBX_LINK_STAT_MODE, dest_vfid);
|
|
|
|
|
}
|
|
|
|
|
|
2018-03-21 15:49:21 +08:00
|
|
|
static void hclge_mbx_reset_vf_queue(struct hclge_vport *vport,
|
|
|
|
|
struct hclge_mbx_vf_to_pf_cmd *mbx_req)
|
2017-12-14 18:03:08 +00:00
|
|
|
{
|
|
|
|
|
u16 queue_id;
|
|
|
|
|
|
2020-03-18 11:57:06 +08:00
|
|
|
memcpy(&queue_id, mbx_req->msg.data, sizeof(queue_id));
|
2017-12-14 18:03:08 +00:00
|
|
|
|
2018-03-21 15:49:21 +08:00
|
|
|
hclge_reset_vf_queue(vport, queue_id);
|
2017-12-14 18:03:08 +00:00
|
|
|
}
|
|
|
|
|
|
2020-03-18 11:57:07 +08:00
|
|
|
static int hclge_reset_vf(struct hclge_vport *vport)
|
2018-03-22 14:29:00 +00:00
|
|
|
{
|
|
|
|
|
struct hclge_dev *hdev = vport->back;
|
|
|
|
|
|
2019-10-31 19:23:23 +08:00
|
|
|
dev_warn(&hdev->pdev->dev, "PF received VF reset request from VF %u!",
|
net: hns3: adjust VF's reset process
Currently when VF need to reset itself, it will send a cmd to PF,
after receiving the VF reset requset, PF sends a cmd to inform
VF to enter the reset process and send a cmd to firmware to do the
actual reset for the VF, it is possible that firmware has resetted
the VF, but VF has not entered the reset process, which may cause
IO not stopped problem when firmware is resetting VF.
This patch fixes it by adjusting the VF reset process, when VF
need to reset itself, it will enter the reset process first, and
it will tell the PF to send cmd to firmware to reset itself.
Add member reset_pending to struct hclgevf_dev, which indicates that
there is reset event need to be processed by the VF's reset task, and
the VF's reset task chooses the highest-level one and clears other
low-level one when it processes reset_pending.
hclge_inform_reset_assert_to_vf function is unused now, but it will
be used to support the PF reset with VF working, so declare it in
the header file.
Signed-off-by: Huazhong Tan <tanhuazhong@huawei.com>
Signed-off-by: Yunsheng Lin <linyunsheng@huawei.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2018-11-09 22:07:47 +08:00
|
|
|
vport->vport_id);
|
2018-03-22 14:29:00 +00:00
|
|
|
|
2020-03-18 11:57:07 +08:00
|
|
|
return hclge_func_reset_cmd(hdev, vport->vport_id);
|
2018-03-22 14:29:00 +00:00
|
|
|
}
|
|
|
|
|
|
2020-03-18 11:57:07 +08:00
|
|
|
static void hclge_vf_keep_alive(struct hclge_vport *vport)
|
2018-11-18 03:19:12 +00:00
|
|
|
{
|
|
|
|
|
vport->last_active_jiffies = jiffies;
|
|
|
|
|
}
|
|
|
|
|
|
2018-11-18 03:19:13 +00:00
|
|
|
static int hclge_set_vf_mtu(struct hclge_vport *vport,
|
|
|
|
|
struct hclge_mbx_vf_to_pf_cmd *mbx_req)
|
|
|
|
|
{
|
|
|
|
|
u32 mtu;
|
|
|
|
|
|
2020-03-18 11:57:06 +08:00
|
|
|
memcpy(&mtu, mbx_req->msg.data, sizeof(mtu));
|
2018-11-18 03:19:13 +00:00
|
|
|
|
2020-03-18 11:57:07 +08:00
|
|
|
return hclge_set_vport_mtu(vport, mtu);
|
2018-11-18 03:19:13 +00:00
|
|
|
}
|
|
|
|
|
|
2020-03-18 11:57:07 +08:00
|
|
|
static void hclge_get_queue_id_in_pf(struct hclge_vport *vport,
|
|
|
|
|
struct hclge_mbx_vf_to_pf_cmd *mbx_req,
|
|
|
|
|
struct hclge_respond_to_vf_msg *resp_msg)
|
2018-12-15 15:31:57 +00:00
|
|
|
{
|
|
|
|
|
u16 queue_id, qid_in_pf;
|
|
|
|
|
|
2020-03-18 11:57:06 +08:00
|
|
|
memcpy(&queue_id, mbx_req->msg.data, sizeof(queue_id));
|
2018-12-15 15:31:57 +00:00
|
|
|
qid_in_pf = hclge_covert_handle_qid_global(&vport->nic, queue_id);
|
2020-03-18 11:57:07 +08:00
|
|
|
memcpy(resp_msg->data, &qid_in_pf, sizeof(qid_in_pf));
|
|
|
|
|
resp_msg->len = sizeof(qid_in_pf);
|
2018-12-15 15:31:57 +00:00
|
|
|
}
|
|
|
|
|
|
2020-03-18 11:57:07 +08:00
|
|
|
static void hclge_get_rss_key(struct hclge_vport *vport,
|
|
|
|
|
struct hclge_mbx_vf_to_pf_cmd *mbx_req,
|
|
|
|
|
struct hclge_respond_to_vf_msg *resp_msg)
|
2019-02-23 17:22:17 +08:00
|
|
|
{
|
|
|
|
|
#define HCLGE_RSS_MBX_RESP_LEN 8
|
|
|
|
|
struct hclge_dev *hdev = vport->back;
|
|
|
|
|
u8 index;
|
|
|
|
|
|
2020-03-18 11:57:06 +08:00
|
|
|
index = mbx_req->msg.data[0];
|
2019-02-23 17:22:17 +08:00
|
|
|
|
2020-03-18 11:57:07 +08:00
|
|
|
memcpy(resp_msg->data,
|
2019-02-23 17:22:17 +08:00
|
|
|
&hdev->vport[0].rss_hash_key[index * HCLGE_RSS_MBX_RESP_LEN],
|
|
|
|
|
HCLGE_RSS_MBX_RESP_LEN);
|
2020-03-18 11:57:07 +08:00
|
|
|
resp_msg->len = HCLGE_RSS_MBX_RESP_LEN;
|
2019-02-23 17:22:17 +08:00
|
|
|
}
|
|
|
|
|
|
2019-08-01 11:55:34 +08:00
|
|
|
static void hclge_link_fail_parse(struct hclge_dev *hdev, u8 link_fail_code)
|
|
|
|
|
{
|
|
|
|
|
switch (link_fail_code) {
|
|
|
|
|
case HCLGE_LF_REF_CLOCK_LOST:
|
|
|
|
|
dev_warn(&hdev->pdev->dev, "Reference clock lost!\n");
|
|
|
|
|
break;
|
|
|
|
|
case HCLGE_LF_XSFP_TX_DISABLE:
|
|
|
|
|
dev_warn(&hdev->pdev->dev, "SFP tx is disabled!\n");
|
|
|
|
|
break;
|
|
|
|
|
case HCLGE_LF_XSFP_ABSENT:
|
|
|
|
|
dev_warn(&hdev->pdev->dev, "SFP is absent!\n");
|
|
|
|
|
break;
|
|
|
|
|
default:
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void hclge_handle_link_change_event(struct hclge_dev *hdev,
|
|
|
|
|
struct hclge_mbx_vf_to_pf_cmd *req)
|
|
|
|
|
{
|
|
|
|
|
hclge_task_schedule(hdev, 0);
|
|
|
|
|
|
2020-03-18 11:57:06 +08:00
|
|
|
if (!req->msg.subcode)
|
|
|
|
|
hclge_link_fail_parse(hdev, req->msg.data[0]);
|
2019-08-01 11:55:34 +08:00
|
|
|
}
|
|
|
|
|
|
2018-05-25 19:43:00 +01:00
|
|
|
static bool hclge_cmd_crq_empty(struct hclge_hw *hw)
|
|
|
|
|
{
|
|
|
|
|
u32 tail = hclge_read_dev(hw, HCLGE_NIC_CRQ_TAIL_REG);
|
|
|
|
|
|
|
|
|
|
return tail == hw->cmq.crq.next_to_use;
|
|
|
|
|
}
|
|
|
|
|
|
2019-08-01 11:55:35 +08:00
|
|
|
static void hclge_handle_ncsi_error(struct hclge_dev *hdev)
|
|
|
|
|
{
|
|
|
|
|
struct hnae3_ae_dev *ae_dev = hdev->ae_dev;
|
|
|
|
|
|
|
|
|
|
ae_dev->ops->set_default_reset_request(ae_dev, HNAE3_GLOBAL_RESET);
|
|
|
|
|
dev_warn(&hdev->pdev->dev, "requesting reset due to NCSI error\n");
|
|
|
|
|
ae_dev->ops->reset_event(hdev->pdev, NULL);
|
|
|
|
|
}
|
|
|
|
|
|
2020-04-24 10:23:13 +08:00
|
|
|
static void hclge_handle_vf_tbl(struct hclge_vport *vport,
|
|
|
|
|
struct hclge_mbx_vf_to_pf_cmd *mbx_req)
|
|
|
|
|
{
|
|
|
|
|
struct hclge_dev *hdev = vport->back;
|
|
|
|
|
struct hclge_vf_vlan_cfg *msg_cmd;
|
|
|
|
|
|
|
|
|
|
msg_cmd = (struct hclge_vf_vlan_cfg *)&mbx_req->msg;
|
|
|
|
|
if (msg_cmd->subcode == HCLGE_MBX_VPORT_LIST_CLEAR) {
|
|
|
|
|
hclge_rm_vport_all_mac_table(vport, true, HCLGE_MAC_ADDR_UC);
|
|
|
|
|
hclge_rm_vport_all_mac_table(vport, true, HCLGE_MAC_ADDR_MC);
|
|
|
|
|
hclge_rm_vport_all_vlan_table(vport, true);
|
|
|
|
|
} else {
|
|
|
|
|
dev_warn(&hdev->pdev->dev, "Invalid cmd(%u)\n",
|
|
|
|
|
msg_cmd->subcode);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2017-12-14 18:03:07 +00:00
|
|
|
void hclge_mbx_handler(struct hclge_dev *hdev)
|
|
|
|
|
{
|
|
|
|
|
struct hclge_cmq_ring *crq = &hdev->hw.cmq.crq;
|
2020-03-18 11:57:07 +08:00
|
|
|
struct hclge_respond_to_vf_msg resp_msg;
|
2017-12-14 18:03:07 +00:00
|
|
|
struct hclge_mbx_vf_to_pf_cmd *req;
|
|
|
|
|
struct hclge_vport *vport;
|
|
|
|
|
struct hclge_desc *desc;
|
2020-04-24 10:23:13 +08:00
|
|
|
bool is_del = false;
|
2019-06-13 17:12:32 +08:00
|
|
|
unsigned int flag;
|
2020-03-18 11:57:07 +08:00
|
|
|
int ret = 0;
|
2017-12-14 18:03:07 +00:00
|
|
|
|
2020-03-18 11:57:07 +08:00
|
|
|
memset(&resp_msg, 0, sizeof(resp_msg));
|
2017-12-14 18:03:07 +00:00
|
|
|
/* handle all the mailbox requests in the queue */
|
2018-05-25 19:43:00 +01:00
|
|
|
while (!hclge_cmd_crq_empty(&hdev->hw)) {
|
2018-10-30 21:50:51 +08:00
|
|
|
if (test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state)) {
|
|
|
|
|
dev_warn(&hdev->pdev->dev,
|
|
|
|
|
"command queue needs re-initializing\n");
|
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
|
2017-12-14 18:03:07 +00:00
|
|
|
desc = &crq->desc[crq->next_to_use];
|
|
|
|
|
req = (struct hclge_mbx_vf_to_pf_cmd *)desc->data;
|
|
|
|
|
|
2018-05-25 19:43:00 +01:00
|
|
|
flag = le16_to_cpu(crq->desc[crq->next_to_use].flag);
|
2018-07-02 15:50:26 +08:00
|
|
|
if (unlikely(!hnae3_get_bit(flag, HCLGE_CMDQ_RX_OUTVLD_B))) {
|
2018-05-25 19:43:00 +01:00
|
|
|
dev_warn(&hdev->pdev->dev,
|
2019-10-31 19:23:23 +08:00
|
|
|
"dropped invalid mailbox message, code = %u\n",
|
2020-03-18 11:57:06 +08:00
|
|
|
req->msg.code);
|
2018-05-25 19:43:00 +01:00
|
|
|
|
|
|
|
|
/* dropping/not processing this invalid message */
|
|
|
|
|
crq->desc[crq->next_to_use].flag = 0;
|
|
|
|
|
hclge_mbx_ring_ptr_move_crq(crq);
|
|
|
|
|
continue;
|
|
|
|
|
}
|
|
|
|
|
|
2017-12-14 18:03:07 +00:00
|
|
|
vport = &hdev->vport[req->mbx_src_vfid];
|
|
|
|
|
|
2020-04-20 10:17:35 +08:00
|
|
|
trace_hclge_pf_mbx_get(hdev, req);
|
|
|
|
|
|
2020-03-18 11:57:06 +08:00
|
|
|
switch (req->msg.code) {
|
2017-12-14 18:03:08 +00:00
|
|
|
case HCLGE_MBX_MAP_RING_TO_VECTOR:
|
|
|
|
|
ret = hclge_map_unmap_ring_to_vf_vector(vport, true,
|
|
|
|
|
req);
|
|
|
|
|
break;
|
|
|
|
|
case HCLGE_MBX_UNMAP_RING_TO_VECTOR:
|
|
|
|
|
ret = hclge_map_unmap_ring_to_vf_vector(vport, false,
|
|
|
|
|
req);
|
|
|
|
|
break;
|
2017-12-14 18:03:07 +00:00
|
|
|
case HCLGE_MBX_SET_PROMISC_MODE:
|
|
|
|
|
ret = hclge_set_vf_promisc_mode(vport, req);
|
|
|
|
|
if (ret)
|
|
|
|
|
dev_err(&hdev->pdev->dev,
|
|
|
|
|
"PF fail(%d) to set VF promisc mode\n",
|
|
|
|
|
ret);
|
|
|
|
|
break;
|
|
|
|
|
case HCLGE_MBX_SET_UNICAST:
|
2019-04-25 20:42:50 +08:00
|
|
|
ret = hclge_set_vf_uc_mac_addr(vport, req);
|
2017-12-14 18:03:07 +00:00
|
|
|
if (ret)
|
|
|
|
|
dev_err(&hdev->pdev->dev,
|
|
|
|
|
"PF fail(%d) to set VF UC MAC Addr\n",
|
|
|
|
|
ret);
|
|
|
|
|
break;
|
|
|
|
|
case HCLGE_MBX_SET_MULTICAST:
|
2020-03-18 11:57:07 +08:00
|
|
|
ret = hclge_set_vf_mc_mac_addr(vport, req);
|
2017-12-14 18:03:07 +00:00
|
|
|
if (ret)
|
|
|
|
|
dev_err(&hdev->pdev->dev,
|
|
|
|
|
"PF fail(%d) to set VF MC MAC Addr\n",
|
|
|
|
|
ret);
|
|
|
|
|
break;
|
|
|
|
|
case HCLGE_MBX_SET_VLAN:
|
2020-03-18 11:57:07 +08:00
|
|
|
ret = hclge_set_vf_vlan_cfg(vport, req, &resp_msg);
|
2017-12-14 18:03:07 +00:00
|
|
|
if (ret)
|
|
|
|
|
dev_err(&hdev->pdev->dev,
|
|
|
|
|
"PF failed(%d) to config VF's VLAN\n",
|
|
|
|
|
ret);
|
|
|
|
|
break;
|
2018-11-18 03:19:12 +00:00
|
|
|
case HCLGE_MBX_SET_ALIVE:
|
2020-03-18 11:57:07 +08:00
|
|
|
ret = hclge_set_vf_alive(vport, req);
|
2018-11-18 03:19:12 +00:00
|
|
|
if (ret)
|
|
|
|
|
dev_err(&hdev->pdev->dev,
|
|
|
|
|
"PF failed(%d) to set VF's ALIVE\n",
|
|
|
|
|
ret);
|
|
|
|
|
break;
|
2017-12-14 18:03:07 +00:00
|
|
|
case HCLGE_MBX_GET_QINFO:
|
2020-03-18 11:57:07 +08:00
|
|
|
hclge_get_vf_queue_info(vport, &resp_msg);
|
2017-12-14 18:03:07 +00:00
|
|
|
break;
|
2019-02-23 17:22:15 +08:00
|
|
|
case HCLGE_MBX_GET_QDEPTH:
|
2020-03-18 11:57:07 +08:00
|
|
|
hclge_get_vf_queue_depth(vport, &resp_msg);
|
2019-02-23 17:22:15 +08:00
|
|
|
break;
|
2017-12-14 18:03:07 +00:00
|
|
|
case HCLGE_MBX_GET_TCINFO:
|
2020-03-18 11:57:07 +08:00
|
|
|
hclge_get_vf_tcinfo(vport, &resp_msg);
|
2017-12-14 18:03:07 +00:00
|
|
|
break;
|
|
|
|
|
case HCLGE_MBX_GET_LINK_STATUS:
|
|
|
|
|
ret = hclge_get_link_info(vport, req);
|
|
|
|
|
if (ret)
|
|
|
|
|
dev_err(&hdev->pdev->dev,
|
2020-05-14 20:41:24 +08:00
|
|
|
"failed to inform link stat to VF, ret = %d\n",
|
2017-12-14 18:03:07 +00:00
|
|
|
ret);
|
|
|
|
|
break;
|
2017-12-14 18:03:08 +00:00
|
|
|
case HCLGE_MBX_QUEUE_RESET:
|
2018-03-21 15:49:21 +08:00
|
|
|
hclge_mbx_reset_vf_queue(vport, req);
|
2017-12-14 18:03:08 +00:00
|
|
|
break;
|
2018-03-22 14:29:00 +00:00
|
|
|
case HCLGE_MBX_RESET:
|
2020-03-18 11:57:07 +08:00
|
|
|
ret = hclge_reset_vf(vport);
|
2018-03-22 14:29:00 +00:00
|
|
|
break;
|
2018-11-18 03:19:12 +00:00
|
|
|
case HCLGE_MBX_KEEP_ALIVE:
|
2020-03-18 11:57:07 +08:00
|
|
|
hclge_vf_keep_alive(vport);
|
2018-11-18 03:19:12 +00:00
|
|
|
break;
|
2018-11-18 03:19:13 +00:00
|
|
|
case HCLGE_MBX_SET_MTU:
|
|
|
|
|
ret = hclge_set_vf_mtu(vport, req);
|
|
|
|
|
if (ret)
|
|
|
|
|
dev_err(&hdev->pdev->dev,
|
|
|
|
|
"VF fail(%d) to set mtu\n", ret);
|
|
|
|
|
break;
|
2018-12-15 15:31:57 +00:00
|
|
|
case HCLGE_MBX_GET_QID_IN_PF:
|
2020-03-18 11:57:07 +08:00
|
|
|
hclge_get_queue_id_in_pf(vport, req, &resp_msg);
|
2018-12-15 15:31:57 +00:00
|
|
|
break;
|
2019-02-23 17:22:17 +08:00
|
|
|
case HCLGE_MBX_GET_RSS_KEY:
|
2020-03-18 11:57:07 +08:00
|
|
|
hclge_get_rss_key(vport, req, &resp_msg);
|
2019-02-23 17:22:17 +08:00
|
|
|
break;
|
2019-02-02 22:39:33 +08:00
|
|
|
case HCLGE_MBX_GET_LINK_MODE:
|
|
|
|
|
hclge_get_link_mode(vport, req);
|
|
|
|
|
break;
|
2019-02-20 10:32:48 +08:00
|
|
|
case HCLGE_MBX_GET_VF_FLR_STATUS:
|
2020-03-12 15:11:04 +08:00
|
|
|
case HCLGE_MBX_VF_UNINIT:
|
2020-04-24 10:23:13 +08:00
|
|
|
is_del = req->msg.code == HCLGE_MBX_VF_UNINIT;
|
|
|
|
|
hclge_rm_vport_all_mac_table(vport, is_del,
|
2019-02-20 10:32:48 +08:00
|
|
|
HCLGE_MAC_ADDR_UC);
|
2020-04-24 10:23:13 +08:00
|
|
|
hclge_rm_vport_all_mac_table(vport, is_del,
|
2019-02-20 10:32:48 +08:00
|
|
|
HCLGE_MAC_ADDR_MC);
|
2020-04-24 10:23:13 +08:00
|
|
|
hclge_rm_vport_all_vlan_table(vport, is_del);
|
2019-02-20 10:32:48 +08:00
|
|
|
break;
|
2019-04-04 16:17:50 +08:00
|
|
|
case HCLGE_MBX_GET_MEDIA_TYPE:
|
2020-03-18 11:57:07 +08:00
|
|
|
hclge_get_vf_media_type(vport, &resp_msg);
|
2019-04-04 16:17:50 +08:00
|
|
|
break;
|
2019-08-01 11:55:34 +08:00
|
|
|
case HCLGE_MBX_PUSH_LINK_STATUS:
|
|
|
|
|
hclge_handle_link_change_event(hdev, req);
|
|
|
|
|
break;
|
2019-10-08 09:20:08 +08:00
|
|
|
case HCLGE_MBX_GET_MAC_ADDR:
|
2020-03-18 11:57:07 +08:00
|
|
|
hclge_get_vf_mac_addr(vport, &resp_msg);
|
2019-10-08 09:20:08 +08:00
|
|
|
break;
|
2019-08-01 11:55:35 +08:00
|
|
|
case HCLGE_MBX_NCSI_ERROR:
|
|
|
|
|
hclge_handle_ncsi_error(hdev);
|
|
|
|
|
break;
|
2020-04-24 10:23:13 +08:00
|
|
|
case HCLGE_MBX_HANDLE_VF_TBL:
|
|
|
|
|
hclge_handle_vf_tbl(vport, req);
|
|
|
|
|
break;
|
2017-12-14 18:03:07 +00:00
|
|
|
default:
|
|
|
|
|
dev_err(&hdev->pdev->dev,
|
2019-10-31 19:23:23 +08:00
|
|
|
"un-supported mailbox message, code = %u\n",
|
2020-03-18 11:57:06 +08:00
|
|
|
req->msg.code);
|
2017-12-14 18:03:07 +00:00
|
|
|
break;
|
|
|
|
|
}
|
2020-03-18 11:57:07 +08:00
|
|
|
|
|
|
|
|
/* PF driver should not reply IMP */
|
|
|
|
|
if (hnae3_get_bit(req->mbx_need_resp, HCLGE_MBX_NEED_RESP_B) &&
|
|
|
|
|
req->msg.code < HCLGE_MBX_GET_VF_FLR_STATUS) {
|
|
|
|
|
resp_msg.status = ret;
|
|
|
|
|
hclge_gen_resp_to_vf(vport, req, &resp_msg);
|
|
|
|
|
}
|
|
|
|
|
|
2018-03-08 19:41:51 +08:00
|
|
|
crq->desc[crq->next_to_use].flag = 0;
|
2017-12-14 18:03:07 +00:00
|
|
|
hclge_mbx_ring_ptr_move_crq(crq);
|
2020-03-18 11:57:07 +08:00
|
|
|
|
|
|
|
|
/* reinitialize ret after complete the mbx message processing */
|
|
|
|
|
ret = 0;
|
2017-12-14 18:03:07 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* Write back CMDQ_RQ header pointer, M7 need this pointer */
|
|
|
|
|
hclge_write_dev(&hdev->hw, HCLGE_NIC_CRQ_HEAD_REG, crq->next_to_use);
|
|
|
|
|
}
|