2019-09-03 22:28:07 +00:00
|
|
|
// SPDX-License-Identifier: GPL-2.0
|
|
|
|
/* Copyright(c) 2017 - 2019 Pensando Systems, Inc */
|
|
|
|
|
2020-11-20 22:50:52 +00:00
|
|
|
#include <linux/ethtool.h>
|
2019-09-18 19:55:11 +00:00
|
|
|
#include <linux/printk.h>
|
|
|
|
#include <linux/dynamic_debug.h>
|
2019-09-03 22:28:07 +00:00
|
|
|
#include <linux/netdevice.h>
|
|
|
|
#include <linux/etherdevice.h>
|
2020-07-21 20:34:04 +00:00
|
|
|
#include <linux/if_vlan.h>
|
2019-09-03 22:28:21 +00:00
|
|
|
#include <linux/rtnetlink.h>
|
2019-09-03 22:28:07 +00:00
|
|
|
#include <linux/interrupt.h>
|
|
|
|
#include <linux/pci.h>
|
|
|
|
#include <linux/cpumask.h>
|
2021-07-27 17:43:25 +00:00
|
|
|
#include <linux/crash_dump.h>
|
2019-09-03 22:28:07 +00:00
|
|
|
|
|
|
|
#include "ionic.h"
|
|
|
|
#include "ionic_bus.h"
|
|
|
|
#include "ionic_lif.h"
|
2019-09-03 22:28:17 +00:00
|
|
|
#include "ionic_txrx.h"
|
2019-09-03 22:28:16 +00:00
|
|
|
#include "ionic_ethtool.h"
|
2019-09-03 22:28:07 +00:00
|
|
|
#include "ionic_debugfs.h"
|
|
|
|
|
2020-05-12 00:59:27 +00:00
|
|
|
/* queuetype support level */
|
|
|
|
static const u8 ionic_qtype_versions[IONIC_QTYPE_MAX] = {
|
|
|
|
[IONIC_QTYPE_ADMINQ] = 0, /* 0 = Base version with CQ support */
|
|
|
|
[IONIC_QTYPE_NOTIFYQ] = 0, /* 0 = Base version */
|
|
|
|
[IONIC_QTYPE_RXQ] = 0, /* 0 = Base version with CQ+SG support */
|
|
|
|
[IONIC_QTYPE_TXQ] = 1, /* 0 = Base version with CQ+SG support
|
|
|
|
* 1 = ... with Tx SG version 1
|
|
|
|
*/
|
|
|
|
};
|
|
|
|
|
2019-09-03 22:28:15 +00:00
|
|
|
static void ionic_link_status_check(struct ionic_lif *lif);
|
2020-03-28 03:14:48 +00:00
|
|
|
static void ionic_lif_handle_fw_down(struct ionic_lif *lif);
|
|
|
|
static void ionic_lif_handle_fw_up(struct ionic_lif *lif);
|
|
|
|
static void ionic_lif_set_netdev_info(struct ionic_lif *lif);
|
2019-09-03 22:28:14 +00:00
|
|
|
|
2020-08-27 23:00:27 +00:00
|
|
|
static void ionic_txrx_deinit(struct ionic_lif *lif);
|
|
|
|
static int ionic_txrx_init(struct ionic_lif *lif);
|
2020-03-28 03:14:47 +00:00
|
|
|
static int ionic_start_queues(struct ionic_lif *lif);
|
|
|
|
static void ionic_stop_queues(struct ionic_lif *lif);
|
2020-05-12 00:59:27 +00:00
|
|
|
static void ionic_lif_queue_identify(struct ionic_lif *lif);
|
2020-03-28 03:14:47 +00:00
|
|
|
|
2020-09-15 23:59:03 +00:00
|
|
|
static void ionic_dim_work(struct work_struct *work)
|
|
|
|
{
|
|
|
|
struct dim *dim = container_of(work, struct dim, work);
|
|
|
|
struct dim_cq_moder cur_moder;
|
|
|
|
struct ionic_qcq *qcq;
|
|
|
|
u32 new_coal;
|
|
|
|
|
|
|
|
cur_moder = net_dim_get_rx_moderation(dim->mode, dim->profile_ix);
|
|
|
|
qcq = container_of(dim, struct ionic_qcq, dim);
|
|
|
|
new_coal = ionic_coal_usec_to_hw(qcq->q.lif->ionic, cur_moder.usec);
|
2021-07-23 18:02:47 +00:00
|
|
|
new_coal = new_coal ? new_coal : 1;
|
|
|
|
|
|
|
|
if (qcq->intr.dim_coal_hw != new_coal) {
|
|
|
|
unsigned int qi = qcq->cq.bound_q->index;
|
|
|
|
struct ionic_lif *lif = qcq->q.lif;
|
|
|
|
|
|
|
|
qcq->intr.dim_coal_hw = new_coal;
|
|
|
|
|
|
|
|
ionic_intr_coal_init(lif->ionic->idev.intr_ctrl,
|
|
|
|
lif->rxqcqs[qi]->intr.index,
|
|
|
|
qcq->intr.dim_coal_hw);
|
|
|
|
}
|
|
|
|
|
2020-09-15 23:59:03 +00:00
|
|
|
dim->state = DIM_START_MEASURE;
|
|
|
|
}
|
|
|
|
|
2019-09-03 22:28:14 +00:00
|
|
|
static void ionic_lif_deferred_work(struct work_struct *work)
|
|
|
|
{
|
|
|
|
struct ionic_lif *lif = container_of(work, struct ionic_lif, deferred.work);
|
|
|
|
struct ionic_deferred *def = &lif->deferred;
|
|
|
|
struct ionic_deferred_work *w = NULL;
|
|
|
|
|
2020-10-01 16:22:40 +00:00
|
|
|
do {
|
|
|
|
spin_lock_bh(&def->lock);
|
|
|
|
if (!list_empty(&def->list)) {
|
|
|
|
w = list_first_entry(&def->list,
|
|
|
|
struct ionic_deferred_work, list);
|
|
|
|
list_del(&w->list);
|
|
|
|
}
|
|
|
|
spin_unlock_bh(&def->lock);
|
|
|
|
|
|
|
|
if (!w)
|
|
|
|
break;
|
2019-09-03 22:28:14 +00:00
|
|
|
|
|
|
|
switch (w->type) {
|
|
|
|
case IONIC_DW_TYPE_RX_MODE:
|
2021-07-23 18:02:45 +00:00
|
|
|
ionic_lif_rx_mode(lif);
|
2019-09-03 22:28:14 +00:00
|
|
|
break;
|
2019-09-03 22:28:15 +00:00
|
|
|
case IONIC_DW_TYPE_LINK_STATUS:
|
|
|
|
ionic_link_status_check(lif);
|
|
|
|
break;
|
2020-03-28 03:14:48 +00:00
|
|
|
case IONIC_DW_TYPE_LIF_RESET:
|
2021-08-27 18:55:07 +00:00
|
|
|
if (w->fw_status) {
|
2020-03-28 03:14:48 +00:00
|
|
|
ionic_lif_handle_fw_up(lif);
|
2021-08-27 18:55:07 +00:00
|
|
|
} else {
|
2020-03-28 03:14:48 +00:00
|
|
|
ionic_lif_handle_fw_down(lif);
|
2021-08-27 18:55:07 +00:00
|
|
|
|
|
|
|
/* Fire off another watchdog to see
|
|
|
|
* if the FW is already back rather than
|
|
|
|
* waiting another whole cycle
|
|
|
|
*/
|
|
|
|
mod_timer(&lif->ionic->watchdog_timer, jiffies + 1);
|
|
|
|
}
|
2020-03-28 03:14:48 +00:00
|
|
|
break;
|
2019-09-03 22:28:14 +00:00
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
kfree(w);
|
2020-10-01 16:22:40 +00:00
|
|
|
w = NULL;
|
|
|
|
} while (true);
|
2019-09-03 22:28:14 +00:00
|
|
|
}
|
|
|
|
|
2020-03-28 03:14:48 +00:00
|
|
|
void ionic_lif_deferred_enqueue(struct ionic_deferred *def,
|
|
|
|
struct ionic_deferred_work *work)
|
2019-09-03 22:28:14 +00:00
|
|
|
{
|
|
|
|
spin_lock_bh(&def->lock);
|
|
|
|
list_add_tail(&work->list, &def->list);
|
|
|
|
spin_unlock_bh(&def->lock);
|
|
|
|
schedule_work(&def->work);
|
|
|
|
}
|
|
|
|
|
2019-09-03 22:28:15 +00:00
|
|
|
static void ionic_link_status_check(struct ionic_lif *lif)
|
|
|
|
{
|
|
|
|
struct net_device *netdev = lif->netdev;
|
|
|
|
u16 link_status;
|
|
|
|
bool link_up;
|
|
|
|
|
2020-07-20 23:00:17 +00:00
|
|
|
if (!test_bit(IONIC_LIF_F_LINK_CHECK_REQUESTED, lif->state))
|
2020-03-28 03:14:47 +00:00
|
|
|
return;
|
|
|
|
|
2021-03-19 00:48:09 +00:00
|
|
|
/* Don't put carrier back up if we're in a broken state */
|
|
|
|
if (test_bit(IONIC_LIF_F_BROKEN, lif->state)) {
|
|
|
|
clear_bit(IONIC_LIF_F_LINK_CHECK_REQUESTED, lif->state);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2019-09-03 22:28:15 +00:00
|
|
|
link_status = le16_to_cpu(lif->info->status.link_status);
|
|
|
|
link_up = link_status == IONIC_PORT_OPER_STATUS_UP;
|
|
|
|
|
|
|
|
if (link_up) {
|
2021-03-19 00:48:09 +00:00
|
|
|
int err = 0;
|
|
|
|
|
2021-03-19 00:48:04 +00:00
|
|
|
if (netdev->flags & IFF_UP && netif_running(netdev)) {
|
2020-11-12 18:22:01 +00:00
|
|
|
mutex_lock(&lif->queue_lock);
|
2021-03-19 00:48:09 +00:00
|
|
|
err = ionic_start_queues(lif);
|
2021-04-07 23:19:59 +00:00
|
|
|
if (err && err != -EBUSY) {
|
2021-03-19 00:48:09 +00:00
|
|
|
netdev_err(lif->netdev,
|
|
|
|
"Failed to start queues: %d\n", err);
|
|
|
|
set_bit(IONIC_LIF_F_BROKEN, lif->state);
|
|
|
|
netif_carrier_off(lif->netdev);
|
|
|
|
}
|
2020-11-12 18:22:01 +00:00
|
|
|
mutex_unlock(&lif->queue_lock);
|
|
|
|
}
|
|
|
|
|
2021-03-19 00:48:09 +00:00
|
|
|
if (!err && !netif_carrier_ok(netdev)) {
|
2020-03-28 03:14:41 +00:00
|
|
|
ionic_port_identify(lif->ionic);
|
|
|
|
netdev_info(netdev, "Link up - %d Gbps\n",
|
2021-03-19 00:48:04 +00:00
|
|
|
le32_to_cpu(lif->info->status.link_speed) / 1000);
|
2019-09-03 22:28:17 +00:00
|
|
|
netif_carrier_on(netdev);
|
|
|
|
}
|
2019-09-03 22:28:15 +00:00
|
|
|
} else {
|
2020-03-28 03:14:41 +00:00
|
|
|
if (netif_carrier_ok(netdev)) {
|
|
|
|
netdev_info(netdev, "Link down\n");
|
|
|
|
netif_carrier_off(netdev);
|
|
|
|
}
|
2019-09-03 22:28:15 +00:00
|
|
|
|
2021-03-19 00:48:04 +00:00
|
|
|
if (netdev->flags & IFF_UP && netif_running(netdev)) {
|
2020-07-20 23:00:17 +00:00
|
|
|
mutex_lock(&lif->queue_lock);
|
2020-03-28 03:14:47 +00:00
|
|
|
ionic_stop_queues(lif);
|
2020-07-20 23:00:17 +00:00
|
|
|
mutex_unlock(&lif->queue_lock);
|
|
|
|
}
|
2019-09-03 22:28:15 +00:00
|
|
|
}
|
|
|
|
|
2020-03-07 01:04:04 +00:00
|
|
|
clear_bit(IONIC_LIF_F_LINK_CHECK_REQUESTED, lif->state);
|
2019-09-03 22:28:15 +00:00
|
|
|
}
|
|
|
|
|
2020-09-29 20:25:20 +00:00
|
|
|
void ionic_link_status_check_request(struct ionic_lif *lif, bool can_sleep)
|
2019-09-03 22:28:15 +00:00
|
|
|
{
|
|
|
|
struct ionic_deferred_work *work;
|
|
|
|
|
|
|
|
/* we only need one request outstanding at a time */
|
2020-03-07 01:04:04 +00:00
|
|
|
if (test_and_set_bit(IONIC_LIF_F_LINK_CHECK_REQUESTED, lif->state))
|
2019-09-03 22:28:15 +00:00
|
|
|
return;
|
|
|
|
|
2020-09-29 20:25:20 +00:00
|
|
|
if (!can_sleep) {
|
2019-09-03 22:28:15 +00:00
|
|
|
work = kzalloc(sizeof(*work), GFP_ATOMIC);
|
2020-10-01 16:22:41 +00:00
|
|
|
if (!work) {
|
|
|
|
clear_bit(IONIC_LIF_F_LINK_CHECK_REQUESTED, lif->state);
|
2019-09-03 22:28:15 +00:00
|
|
|
return;
|
2020-10-01 16:22:41 +00:00
|
|
|
}
|
2019-09-03 22:28:15 +00:00
|
|
|
|
|
|
|
work->type = IONIC_DW_TYPE_LINK_STATUS;
|
|
|
|
ionic_lif_deferred_enqueue(&lif->deferred, work);
|
|
|
|
} else {
|
|
|
|
ionic_link_status_check(lif);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-09-03 22:28:09 +00:00
|
|
|
static irqreturn_t ionic_isr(int irq, void *data)
|
|
|
|
{
|
|
|
|
struct napi_struct *napi = data;
|
|
|
|
|
|
|
|
napi_schedule_irqoff(napi);
|
|
|
|
|
|
|
|
return IRQ_HANDLED;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int ionic_request_irq(struct ionic_lif *lif, struct ionic_qcq *qcq)
|
|
|
|
{
|
|
|
|
struct ionic_intr_info *intr = &qcq->intr;
|
|
|
|
struct device *dev = lif->ionic->dev;
|
|
|
|
struct ionic_queue *q = &qcq->q;
|
|
|
|
const char *name;
|
|
|
|
|
|
|
|
if (lif->registered)
|
|
|
|
name = lif->netdev->name;
|
|
|
|
else
|
|
|
|
name = dev_name(dev);
|
|
|
|
|
|
|
|
snprintf(intr->name, sizeof(intr->name),
|
|
|
|
"%s-%s-%s", IONIC_DRV_NAME, name, q->name);
|
|
|
|
|
|
|
|
return devm_request_irq(dev, intr->vector, ionic_isr,
|
|
|
|
0, intr->name, &qcq->napi);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int ionic_intr_alloc(struct ionic_lif *lif, struct ionic_intr_info *intr)
|
|
|
|
{
|
|
|
|
struct ionic *ionic = lif->ionic;
|
|
|
|
int index;
|
|
|
|
|
|
|
|
index = find_first_zero_bit(ionic->intrs, ionic->nintrs);
|
|
|
|
if (index == ionic->nintrs) {
|
|
|
|
netdev_warn(lif->netdev, "%s: no intr, index=%d nintrs=%d\n",
|
|
|
|
__func__, index, ionic->nintrs);
|
|
|
|
return -ENOSPC;
|
|
|
|
}
|
|
|
|
|
|
|
|
set_bit(index, ionic->intrs);
|
|
|
|
ionic_intr_init(&ionic->idev, intr, index);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2020-05-12 00:59:33 +00:00
|
|
|
static void ionic_intr_free(struct ionic *ionic, int index)
|
2019-09-03 22:28:09 +00:00
|
|
|
{
|
2020-05-12 00:59:34 +00:00
|
|
|
if (index != IONIC_INTR_INDEX_NOT_ASSIGNED && index < ionic->nintrs)
|
2020-05-12 00:59:33 +00:00
|
|
|
clear_bit(index, ionic->intrs);
|
2019-09-03 22:28:09 +00:00
|
|
|
}
|
|
|
|
|
2019-09-03 22:28:17 +00:00
|
|
|
static int ionic_qcq_enable(struct ionic_qcq *qcq)
|
|
|
|
{
|
|
|
|
struct ionic_queue *q = &qcq->q;
|
|
|
|
struct ionic_lif *lif = q->lif;
|
|
|
|
struct ionic_dev *idev;
|
|
|
|
struct device *dev;
|
|
|
|
|
|
|
|
struct ionic_admin_ctx ctx = {
|
|
|
|
.work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
|
|
|
|
.cmd.q_control = {
|
|
|
|
.opcode = IONIC_CMD_Q_CONTROL,
|
|
|
|
.lif_index = cpu_to_le16(lif->index),
|
|
|
|
.type = q->type,
|
|
|
|
.index = cpu_to_le32(q->index),
|
|
|
|
.oper = IONIC_Q_ENABLE,
|
|
|
|
},
|
|
|
|
};
|
|
|
|
|
|
|
|
idev = &lif->ionic->idev;
|
|
|
|
dev = lif->ionic->dev;
|
|
|
|
|
|
|
|
dev_dbg(dev, "q_enable.index %d q_enable.qtype %d\n",
|
|
|
|
ctx.cmd.q_control.index, ctx.cmd.q_control.type);
|
|
|
|
|
|
|
|
if (qcq->flags & IONIC_QCQ_F_INTR) {
|
|
|
|
irq_set_affinity_hint(qcq->intr.vector,
|
|
|
|
&qcq->intr.affinity_mask);
|
|
|
|
napi_enable(&qcq->napi);
|
|
|
|
ionic_intr_clean(idev->intr_ctrl, qcq->intr.index);
|
|
|
|
ionic_intr_mask(idev->intr_ctrl, qcq->intr.index,
|
|
|
|
IONIC_INTR_MASK_CLEAR);
|
|
|
|
}
|
|
|
|
|
|
|
|
return ionic_adminq_post_wait(lif, &ctx);
|
|
|
|
}
|
|
|
|
|
2021-10-01 18:05:57 +00:00
|
|
|
static int ionic_qcq_disable(struct ionic_lif *lif, struct ionic_qcq *qcq, int fw_err)
|
2019-09-03 22:28:17 +00:00
|
|
|
{
|
2020-10-01 16:22:42 +00:00
|
|
|
struct ionic_queue *q;
|
2019-09-03 22:28:17 +00:00
|
|
|
|
|
|
|
struct ionic_admin_ctx ctx = {
|
|
|
|
.work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
|
|
|
|
.cmd.q_control = {
|
|
|
|
.opcode = IONIC_CMD_Q_CONTROL,
|
|
|
|
.oper = IONIC_Q_DISABLE,
|
|
|
|
},
|
|
|
|
};
|
|
|
|
|
2021-10-01 18:05:57 +00:00
|
|
|
if (!qcq) {
|
|
|
|
netdev_err(lif->netdev, "%s: bad qcq\n", __func__);
|
2020-10-01 16:22:42 +00:00
|
|
|
return -ENXIO;
|
2021-10-01 18:05:57 +00:00
|
|
|
}
|
2019-09-03 22:28:17 +00:00
|
|
|
|
2020-10-01 16:22:42 +00:00
|
|
|
q = &qcq->q;
|
2019-09-03 22:28:17 +00:00
|
|
|
|
|
|
|
if (qcq->flags & IONIC_QCQ_F_INTR) {
|
2020-10-01 16:22:42 +00:00
|
|
|
struct ionic_dev *idev = &lif->ionic->idev;
|
|
|
|
|
2020-09-15 23:59:03 +00:00
|
|
|
cancel_work_sync(&qcq->dim.work);
|
2019-09-03 22:28:17 +00:00
|
|
|
ionic_intr_mask(idev->intr_ctrl, qcq->intr.index,
|
|
|
|
IONIC_INTR_MASK_SET);
|
|
|
|
synchronize_irq(qcq->intr.vector);
|
|
|
|
irq_set_affinity_hint(qcq->intr.vector, NULL);
|
|
|
|
napi_disable(&qcq->napi);
|
|
|
|
}
|
|
|
|
|
2021-10-01 18:05:56 +00:00
|
|
|
/* If there was a previous fw communcation error, don't bother with
|
|
|
|
* sending the adminq command and just return the same error value.
|
|
|
|
*/
|
|
|
|
if (fw_err == -ETIMEDOUT || fw_err == -ENXIO)
|
|
|
|
return fw_err;
|
2020-10-01 16:22:42 +00:00
|
|
|
|
2021-10-01 18:05:56 +00:00
|
|
|
ctx.cmd.q_control.lif_index = cpu_to_le16(lif->index);
|
|
|
|
ctx.cmd.q_control.type = q->type;
|
|
|
|
ctx.cmd.q_control.index = cpu_to_le32(q->index);
|
|
|
|
dev_dbg(lif->ionic->dev, "q_disable.index %d q_disable.qtype %d\n",
|
|
|
|
ctx.cmd.q_control.index, ctx.cmd.q_control.type);
|
2020-10-01 16:22:43 +00:00
|
|
|
|
2021-10-01 18:05:56 +00:00
|
|
|
return ionic_adminq_post_wait(lif, &ctx);
|
2019-09-03 22:28:17 +00:00
|
|
|
}
|
|
|
|
|
2019-09-03 22:28:09 +00:00
|
|
|
static void ionic_lif_qcq_deinit(struct ionic_lif *lif, struct ionic_qcq *qcq)
|
|
|
|
{
|
|
|
|
struct ionic_dev *idev = &lif->ionic->idev;
|
|
|
|
|
|
|
|
if (!qcq)
|
|
|
|
return;
|
|
|
|
|
|
|
|
if (!(qcq->flags & IONIC_QCQ_F_INITED))
|
|
|
|
return;
|
|
|
|
|
|
|
|
if (qcq->flags & IONIC_QCQ_F_INTR) {
|
|
|
|
ionic_intr_mask(idev->intr_ctrl, qcq->intr.index,
|
|
|
|
IONIC_INTR_MASK_SET);
|
|
|
|
netif_napi_del(&qcq->napi);
|
|
|
|
}
|
|
|
|
|
|
|
|
qcq->flags &= ~IONIC_QCQ_F_INITED;
|
|
|
|
}
|
|
|
|
|
2020-08-27 23:00:29 +00:00
|
|
|
static void ionic_qcq_intr_free(struct ionic_lif *lif, struct ionic_qcq *qcq)
|
|
|
|
{
|
|
|
|
if (!(qcq->flags & IONIC_QCQ_F_INTR) || qcq->intr.vector == 0)
|
|
|
|
return;
|
|
|
|
|
|
|
|
irq_set_affinity_hint(qcq->intr.vector, NULL);
|
|
|
|
devm_free_irq(lif->ionic->dev, qcq->intr.vector, &qcq->napi);
|
|
|
|
qcq->intr.vector = 0;
|
|
|
|
ionic_intr_free(lif->ionic, qcq->intr.index);
|
|
|
|
qcq->intr.index = IONIC_INTR_INDEX_NOT_ASSIGNED;
|
|
|
|
}
|
|
|
|
|
2019-09-03 22:28:09 +00:00
|
|
|
static void ionic_qcq_free(struct ionic_lif *lif, struct ionic_qcq *qcq)
|
|
|
|
{
|
|
|
|
struct device *dev = lif->ionic->dev;
|
|
|
|
|
|
|
|
if (!qcq)
|
|
|
|
return;
|
|
|
|
|
2020-03-28 03:14:43 +00:00
|
|
|
ionic_debugfs_del_qcq(qcq);
|
|
|
|
|
2020-08-27 23:00:25 +00:00
|
|
|
if (qcq->q_base) {
|
|
|
|
dma_free_coherent(dev, qcq->q_size, qcq->q_base, qcq->q_base_pa);
|
|
|
|
qcq->q_base = NULL;
|
|
|
|
qcq->q_base_pa = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (qcq->cq_base) {
|
|
|
|
dma_free_coherent(dev, qcq->cq_size, qcq->cq_base, qcq->cq_base_pa);
|
|
|
|
qcq->cq_base = NULL;
|
|
|
|
qcq->cq_base_pa = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (qcq->sg_base) {
|
|
|
|
dma_free_coherent(dev, qcq->sg_size, qcq->sg_base, qcq->sg_base_pa);
|
|
|
|
qcq->sg_base = NULL;
|
|
|
|
qcq->sg_base_pa = 0;
|
|
|
|
}
|
2019-09-03 22:28:09 +00:00
|
|
|
|
2020-08-27 23:00:29 +00:00
|
|
|
ionic_qcq_intr_free(lif, qcq);
|
2019-09-03 22:28:09 +00:00
|
|
|
|
2020-08-27 23:00:28 +00:00
|
|
|
if (qcq->cq.info) {
|
|
|
|
devm_kfree(dev, qcq->cq.info);
|
|
|
|
qcq->cq.info = NULL;
|
|
|
|
}
|
|
|
|
if (qcq->q.info) {
|
|
|
|
devm_kfree(dev, qcq->q.info);
|
|
|
|
qcq->q.info = NULL;
|
|
|
|
}
|
2019-09-03 22:28:09 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static void ionic_qcqs_free(struct ionic_lif *lif)
|
|
|
|
{
|
2019-09-03 22:28:17 +00:00
|
|
|
struct device *dev = lif->ionic->dev;
|
2021-03-19 00:48:10 +00:00
|
|
|
struct ionic_qcq *adminqcq;
|
|
|
|
unsigned long irqflags;
|
2019-09-03 22:28:17 +00:00
|
|
|
|
2019-09-03 22:28:11 +00:00
|
|
|
if (lif->notifyqcq) {
|
|
|
|
ionic_qcq_free(lif, lif->notifyqcq);
|
2020-08-27 23:00:29 +00:00
|
|
|
devm_kfree(dev, lif->notifyqcq);
|
2019-09-03 22:28:11 +00:00
|
|
|
lif->notifyqcq = NULL;
|
|
|
|
}
|
|
|
|
|
2019-09-03 22:28:09 +00:00
|
|
|
if (lif->adminqcq) {
|
2021-03-19 00:48:10 +00:00
|
|
|
spin_lock_irqsave(&lif->adminq_lock, irqflags);
|
|
|
|
adminqcq = READ_ONCE(lif->adminqcq);
|
2019-09-03 22:28:09 +00:00
|
|
|
lif->adminqcq = NULL;
|
2021-03-19 00:48:10 +00:00
|
|
|
spin_unlock_irqrestore(&lif->adminq_lock, irqflags);
|
|
|
|
if (adminqcq) {
|
|
|
|
ionic_qcq_free(lif, adminqcq);
|
|
|
|
devm_kfree(dev, adminqcq);
|
|
|
|
}
|
2019-09-03 22:28:09 +00:00
|
|
|
}
|
2019-09-03 22:28:17 +00:00
|
|
|
|
2020-03-20 02:31:53 +00:00
|
|
|
if (lif->rxqcqs) {
|
2020-08-27 23:00:23 +00:00
|
|
|
devm_kfree(dev, lif->rxqstats);
|
|
|
|
lif->rxqstats = NULL;
|
2020-03-20 02:31:53 +00:00
|
|
|
devm_kfree(dev, lif->rxqcqs);
|
|
|
|
lif->rxqcqs = NULL;
|
|
|
|
}
|
2019-09-03 22:28:17 +00:00
|
|
|
|
2020-03-20 02:31:53 +00:00
|
|
|
if (lif->txqcqs) {
|
2020-08-27 23:00:23 +00:00
|
|
|
devm_kfree(dev, lif->txqstats);
|
|
|
|
lif->txqstats = NULL;
|
2020-03-20 02:31:53 +00:00
|
|
|
devm_kfree(dev, lif->txqcqs);
|
|
|
|
lif->txqcqs = NULL;
|
|
|
|
}
|
2019-09-03 22:28:09 +00:00
|
|
|
}
|
|
|
|
|
2019-09-03 22:28:11 +00:00
|
|
|
static void ionic_link_qcq_interrupts(struct ionic_qcq *src_qcq,
|
|
|
|
struct ionic_qcq *n_qcq)
|
|
|
|
{
|
|
|
|
if (WARN_ON(n_qcq->flags & IONIC_QCQ_F_INTR)) {
|
2020-05-12 00:59:33 +00:00
|
|
|
ionic_intr_free(n_qcq->cq.lif->ionic, n_qcq->intr.index);
|
2019-09-03 22:28:11 +00:00
|
|
|
n_qcq->flags &= ~IONIC_QCQ_F_INTR;
|
|
|
|
}
|
|
|
|
|
|
|
|
n_qcq->intr.vector = src_qcq->intr.vector;
|
|
|
|
n_qcq->intr.index = src_qcq->intr.index;
|
|
|
|
}
|
|
|
|
|
2020-08-27 23:00:29 +00:00
|
|
|
static int ionic_alloc_qcq_interrupt(struct ionic_lif *lif, struct ionic_qcq *qcq)
|
|
|
|
{
|
|
|
|
int err;
|
|
|
|
|
|
|
|
if (!(qcq->flags & IONIC_QCQ_F_INTR)) {
|
|
|
|
qcq->intr.index = IONIC_INTR_INDEX_NOT_ASSIGNED;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
err = ionic_intr_alloc(lif, &qcq->intr);
|
|
|
|
if (err) {
|
|
|
|
netdev_warn(lif->netdev, "no intr for %s: %d\n",
|
|
|
|
qcq->q.name, err);
|
|
|
|
goto err_out;
|
|
|
|
}
|
|
|
|
|
|
|
|
err = ionic_bus_get_irq(lif->ionic, qcq->intr.index);
|
|
|
|
if (err < 0) {
|
|
|
|
netdev_warn(lif->netdev, "no vector for %s: %d\n",
|
|
|
|
qcq->q.name, err);
|
|
|
|
goto err_out_free_intr;
|
|
|
|
}
|
|
|
|
qcq->intr.vector = err;
|
|
|
|
ionic_intr_mask_assert(lif->ionic->idev.intr_ctrl, qcq->intr.index,
|
|
|
|
IONIC_INTR_MASK_SET);
|
|
|
|
|
|
|
|
err = ionic_request_irq(lif, qcq);
|
|
|
|
if (err) {
|
|
|
|
netdev_warn(lif->netdev, "irq request failed %d\n", err);
|
|
|
|
goto err_out_free_intr;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* try to get the irq on the local numa node first */
|
|
|
|
qcq->intr.cpu = cpumask_local_spread(qcq->intr.index,
|
|
|
|
dev_to_node(lif->ionic->dev));
|
|
|
|
if (qcq->intr.cpu != -1)
|
|
|
|
cpumask_set_cpu(qcq->intr.cpu, &qcq->intr.affinity_mask);
|
|
|
|
|
|
|
|
netdev_dbg(lif->netdev, "%s: Interrupt index %d\n", qcq->q.name, qcq->intr.index);
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
err_out_free_intr:
|
|
|
|
ionic_intr_free(lif->ionic, qcq->intr.index);
|
|
|
|
err_out:
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2019-09-03 22:28:09 +00:00
|
|
|
static int ionic_qcq_alloc(struct ionic_lif *lif, unsigned int type,
|
|
|
|
unsigned int index,
|
|
|
|
const char *name, unsigned int flags,
|
|
|
|
unsigned int num_descs, unsigned int desc_size,
|
|
|
|
unsigned int cq_desc_size,
|
|
|
|
unsigned int sg_desc_size,
|
|
|
|
unsigned int pid, struct ionic_qcq **qcq)
|
|
|
|
{
|
|
|
|
struct ionic_dev *idev = &lif->ionic->idev;
|
|
|
|
struct device *dev = lif->ionic->dev;
|
|
|
|
void *q_base, *cq_base, *sg_base;
|
|
|
|
dma_addr_t cq_base_pa = 0;
|
|
|
|
dma_addr_t sg_base_pa = 0;
|
|
|
|
dma_addr_t q_base_pa = 0;
|
|
|
|
struct ionic_qcq *new;
|
|
|
|
int err;
|
|
|
|
|
|
|
|
*qcq = NULL;
|
|
|
|
|
|
|
|
new = devm_kzalloc(dev, sizeof(*new), GFP_KERNEL);
|
|
|
|
if (!new) {
|
|
|
|
netdev_err(lif->netdev, "Cannot allocate queue structure\n");
|
|
|
|
err = -ENOMEM;
|
|
|
|
goto err_out;
|
|
|
|
}
|
|
|
|
|
2021-03-10 19:26:28 +00:00
|
|
|
new->q.dev = dev;
|
2019-09-03 22:28:09 +00:00
|
|
|
new->flags = flags;
|
|
|
|
|
2020-08-10 02:38:07 +00:00
|
|
|
new->q.info = devm_kcalloc(dev, num_descs, sizeof(*new->q.info),
|
2019-09-03 22:28:09 +00:00
|
|
|
GFP_KERNEL);
|
|
|
|
if (!new->q.info) {
|
|
|
|
netdev_err(lif->netdev, "Cannot allocate queue info\n");
|
|
|
|
err = -ENOMEM;
|
2020-08-27 23:00:25 +00:00
|
|
|
goto err_out_free_qcq;
|
2019-09-03 22:28:09 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
new->q.type = type;
|
2021-03-10 19:26:28 +00:00
|
|
|
new->q.max_sg_elems = lif->qtype_info[type].max_sg_elems;
|
2019-09-03 22:28:09 +00:00
|
|
|
|
|
|
|
err = ionic_q_init(lif, idev, &new->q, index, name, num_descs,
|
|
|
|
desc_size, sg_desc_size, pid);
|
|
|
|
if (err) {
|
|
|
|
netdev_err(lif->netdev, "Cannot initialize queue\n");
|
2020-08-27 23:00:25 +00:00
|
|
|
goto err_out_free_q_info;
|
2019-09-03 22:28:09 +00:00
|
|
|
}
|
|
|
|
|
2020-08-27 23:00:29 +00:00
|
|
|
err = ionic_alloc_qcq_interrupt(lif, new);
|
|
|
|
if (err)
|
|
|
|
goto err_out;
|
2019-09-03 22:28:09 +00:00
|
|
|
|
2020-08-10 02:38:07 +00:00
|
|
|
new->cq.info = devm_kcalloc(dev, num_descs, sizeof(*new->cq.info),
|
2019-09-03 22:28:09 +00:00
|
|
|
GFP_KERNEL);
|
|
|
|
if (!new->cq.info) {
|
|
|
|
netdev_err(lif->netdev, "Cannot allocate completion queue info\n");
|
|
|
|
err = -ENOMEM;
|
2020-03-28 03:14:44 +00:00
|
|
|
goto err_out_free_irq;
|
2019-09-03 22:28:09 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
err = ionic_cq_init(lif, &new->cq, &new->intr, num_descs, cq_desc_size);
|
|
|
|
if (err) {
|
|
|
|
netdev_err(lif->netdev, "Cannot initialize completion queue\n");
|
2020-08-27 23:00:25 +00:00
|
|
|
goto err_out_free_cq_info;
|
2019-09-03 22:28:09 +00:00
|
|
|
}
|
|
|
|
|
2020-10-01 16:22:39 +00:00
|
|
|
if (flags & IONIC_QCQ_F_NOTIFYQ) {
|
|
|
|
int q_size, cq_size;
|
|
|
|
|
|
|
|
/* q & cq need to be contiguous in case of notifyq */
|
|
|
|
q_size = ALIGN(num_descs * desc_size, PAGE_SIZE);
|
|
|
|
cq_size = ALIGN(num_descs * cq_desc_size, PAGE_SIZE);
|
|
|
|
|
|
|
|
new->q_size = PAGE_SIZE + q_size + cq_size;
|
|
|
|
new->q_base = dma_alloc_coherent(dev, new->q_size,
|
|
|
|
&new->q_base_pa, GFP_KERNEL);
|
|
|
|
if (!new->q_base) {
|
|
|
|
netdev_err(lif->netdev, "Cannot allocate qcq DMA memory\n");
|
|
|
|
err = -ENOMEM;
|
|
|
|
goto err_out_free_cq_info;
|
|
|
|
}
|
|
|
|
q_base = PTR_ALIGN(new->q_base, PAGE_SIZE);
|
|
|
|
q_base_pa = ALIGN(new->q_base_pa, PAGE_SIZE);
|
|
|
|
ionic_q_map(&new->q, q_base, q_base_pa);
|
|
|
|
|
|
|
|
cq_base = PTR_ALIGN(q_base + q_size, PAGE_SIZE);
|
|
|
|
cq_base_pa = ALIGN(new->q_base_pa + q_size, PAGE_SIZE);
|
|
|
|
ionic_cq_map(&new->cq, cq_base, cq_base_pa);
|
|
|
|
ionic_cq_bind(&new->cq, &new->q);
|
|
|
|
} else {
|
|
|
|
new->q_size = PAGE_SIZE + (num_descs * desc_size);
|
|
|
|
new->q_base = dma_alloc_coherent(dev, new->q_size, &new->q_base_pa,
|
|
|
|
GFP_KERNEL);
|
|
|
|
if (!new->q_base) {
|
|
|
|
netdev_err(lif->netdev, "Cannot allocate queue DMA memory\n");
|
|
|
|
err = -ENOMEM;
|
|
|
|
goto err_out_free_cq_info;
|
|
|
|
}
|
|
|
|
q_base = PTR_ALIGN(new->q_base, PAGE_SIZE);
|
|
|
|
q_base_pa = ALIGN(new->q_base_pa, PAGE_SIZE);
|
|
|
|
ionic_q_map(&new->q, q_base, q_base_pa);
|
|
|
|
|
|
|
|
new->cq_size = PAGE_SIZE + (num_descs * cq_desc_size);
|
|
|
|
new->cq_base = dma_alloc_coherent(dev, new->cq_size, &new->cq_base_pa,
|
|
|
|
GFP_KERNEL);
|
|
|
|
if (!new->cq_base) {
|
|
|
|
netdev_err(lif->netdev, "Cannot allocate cq DMA memory\n");
|
|
|
|
err = -ENOMEM;
|
|
|
|
goto err_out_free_q;
|
|
|
|
}
|
|
|
|
cq_base = PTR_ALIGN(new->cq_base, PAGE_SIZE);
|
|
|
|
cq_base_pa = ALIGN(new->cq_base_pa, PAGE_SIZE);
|
|
|
|
ionic_cq_map(&new->cq, cq_base, cq_base_pa);
|
|
|
|
ionic_cq_bind(&new->cq, &new->q);
|
2020-08-27 23:00:25 +00:00
|
|
|
}
|
2019-09-03 22:28:09 +00:00
|
|
|
|
|
|
|
if (flags & IONIC_QCQ_F_SG) {
|
2020-08-27 23:00:25 +00:00
|
|
|
new->sg_size = PAGE_SIZE + (num_descs * sg_desc_size);
|
|
|
|
new->sg_base = dma_alloc_coherent(dev, new->sg_size, &new->sg_base_pa,
|
|
|
|
GFP_KERNEL);
|
|
|
|
if (!new->sg_base) {
|
|
|
|
netdev_err(lif->netdev, "Cannot allocate sg DMA memory\n");
|
|
|
|
err = -ENOMEM;
|
|
|
|
goto err_out_free_cq;
|
|
|
|
}
|
|
|
|
sg_base = PTR_ALIGN(new->sg_base, PAGE_SIZE);
|
|
|
|
sg_base_pa = ALIGN(new->sg_base_pa, PAGE_SIZE);
|
2019-09-03 22:28:09 +00:00
|
|
|
ionic_q_sg_map(&new->q, sg_base, sg_base_pa);
|
|
|
|
}
|
|
|
|
|
2020-09-15 23:59:03 +00:00
|
|
|
INIT_WORK(&new->dim.work, ionic_dim_work);
|
|
|
|
new->dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
|
|
|
|
|
2019-09-03 22:28:09 +00:00
|
|
|
*qcq = new;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
2020-08-27 23:00:25 +00:00
|
|
|
err_out_free_cq:
|
|
|
|
dma_free_coherent(dev, new->cq_size, new->cq_base, new->cq_base_pa);
|
|
|
|
err_out_free_q:
|
|
|
|
dma_free_coherent(dev, new->q_size, new->q_base, new->q_base_pa);
|
|
|
|
err_out_free_cq_info:
|
|
|
|
devm_kfree(dev, new->cq.info);
|
2020-03-28 03:14:44 +00:00
|
|
|
err_out_free_irq:
|
2020-08-27 23:00:29 +00:00
|
|
|
if (flags & IONIC_QCQ_F_INTR) {
|
2020-03-28 03:14:44 +00:00
|
|
|
devm_free_irq(dev, new->intr.vector, &new->napi);
|
2020-05-12 00:59:33 +00:00
|
|
|
ionic_intr_free(lif->ionic, new->intr.index);
|
2020-08-27 23:00:29 +00:00
|
|
|
}
|
2020-08-27 23:00:25 +00:00
|
|
|
err_out_free_q_info:
|
|
|
|
devm_kfree(dev, new->q.info);
|
|
|
|
err_out_free_qcq:
|
|
|
|
devm_kfree(dev, new);
|
2019-09-03 22:28:09 +00:00
|
|
|
err_out:
|
|
|
|
dev_err(dev, "qcq alloc of %s%d failed %d\n", name, index, err);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int ionic_qcqs_alloc(struct ionic_lif *lif)
|
|
|
|
{
|
2019-09-03 22:28:17 +00:00
|
|
|
struct device *dev = lif->ionic->dev;
|
2019-09-03 22:28:09 +00:00
|
|
|
unsigned int flags;
|
|
|
|
int err;
|
|
|
|
|
|
|
|
flags = IONIC_QCQ_F_INTR;
|
|
|
|
err = ionic_qcq_alloc(lif, IONIC_QTYPE_ADMINQ, 0, "admin", flags,
|
|
|
|
IONIC_ADMINQ_LENGTH,
|
|
|
|
sizeof(struct ionic_admin_cmd),
|
|
|
|
sizeof(struct ionic_admin_comp),
|
|
|
|
0, lif->kern_pid, &lif->adminqcq);
|
|
|
|
if (err)
|
|
|
|
return err;
|
2020-03-28 03:14:43 +00:00
|
|
|
ionic_debugfs_add_qcq(lif, lif->adminqcq);
|
2019-09-03 22:28:09 +00:00
|
|
|
|
2019-09-03 22:28:11 +00:00
|
|
|
if (lif->ionic->nnqs_per_lif) {
|
|
|
|
flags = IONIC_QCQ_F_NOTIFYQ;
|
|
|
|
err = ionic_qcq_alloc(lif, IONIC_QTYPE_NOTIFYQ, 0, "notifyq",
|
|
|
|
flags, IONIC_NOTIFYQ_LENGTH,
|
|
|
|
sizeof(struct ionic_notifyq_cmd),
|
|
|
|
sizeof(union ionic_notifyq_comp),
|
|
|
|
0, lif->kern_pid, &lif->notifyqcq);
|
|
|
|
if (err)
|
2020-08-27 23:00:23 +00:00
|
|
|
goto err_out;
|
2020-03-28 03:14:43 +00:00
|
|
|
ionic_debugfs_add_qcq(lif, lif->notifyqcq);
|
2019-09-03 22:28:11 +00:00
|
|
|
|
|
|
|
/* Let the notifyq ride on the adminq interrupt */
|
|
|
|
ionic_link_qcq_interrupts(lif->adminqcq, lif->notifyqcq);
|
|
|
|
}
|
|
|
|
|
2019-09-03 22:28:17 +00:00
|
|
|
err = -ENOMEM;
|
2020-08-27 23:00:21 +00:00
|
|
|
lif->txqcqs = devm_kcalloc(dev, lif->ionic->ntxqs_per_lif,
|
2021-03-30 19:52:08 +00:00
|
|
|
sizeof(*lif->txqcqs), GFP_KERNEL);
|
2019-09-03 22:28:17 +00:00
|
|
|
if (!lif->txqcqs)
|
2020-08-27 23:00:23 +00:00
|
|
|
goto err_out;
|
2020-08-27 23:00:21 +00:00
|
|
|
lif->rxqcqs = devm_kcalloc(dev, lif->ionic->nrxqs_per_lif,
|
2021-03-30 19:52:08 +00:00
|
|
|
sizeof(*lif->rxqcqs), GFP_KERNEL);
|
2019-09-03 22:28:17 +00:00
|
|
|
if (!lif->rxqcqs)
|
2020-08-27 23:00:23 +00:00
|
|
|
goto err_out;
|
2019-09-03 22:28:17 +00:00
|
|
|
|
2021-04-01 17:56:06 +00:00
|
|
|
lif->txqstats = devm_kcalloc(dev, lif->ionic->ntxqs_per_lif + 1,
|
2021-03-30 19:52:08 +00:00
|
|
|
sizeof(*lif->txqstats), GFP_KERNEL);
|
2020-08-27 23:00:23 +00:00
|
|
|
if (!lif->txqstats)
|
|
|
|
goto err_out;
|
2021-04-01 17:56:06 +00:00
|
|
|
lif->rxqstats = devm_kcalloc(dev, lif->ionic->nrxqs_per_lif + 1,
|
2021-03-30 19:52:08 +00:00
|
|
|
sizeof(*lif->rxqstats), GFP_KERNEL);
|
2020-08-27 23:00:23 +00:00
|
|
|
if (!lif->rxqstats)
|
|
|
|
goto err_out;
|
2019-09-03 22:28:11 +00:00
|
|
|
|
2020-08-27 23:00:23 +00:00
|
|
|
return 0;
|
2019-09-03 22:28:11 +00:00
|
|
|
|
2020-08-27 23:00:23 +00:00
|
|
|
err_out:
|
|
|
|
ionic_qcqs_free(lif);
|
2019-09-03 22:28:11 +00:00
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2020-08-27 23:00:27 +00:00
|
|
|
static void ionic_qcq_sanitize(struct ionic_qcq *qcq)
|
|
|
|
{
|
|
|
|
qcq->q.tail_idx = 0;
|
|
|
|
qcq->q.head_idx = 0;
|
|
|
|
qcq->cq.tail_idx = 0;
|
|
|
|
qcq->cq.done_color = 1;
|
|
|
|
memset(qcq->q_base, 0, qcq->q_size);
|
|
|
|
memset(qcq->cq_base, 0, qcq->cq_size);
|
|
|
|
memset(qcq->sg_base, 0, qcq->sg_size);
|
|
|
|
}
|
|
|
|
|
2019-09-03 22:28:17 +00:00
|
|
|
static int ionic_lif_txq_init(struct ionic_lif *lif, struct ionic_qcq *qcq)
|
|
|
|
{
|
|
|
|
struct device *dev = lif->ionic->dev;
|
|
|
|
struct ionic_queue *q = &qcq->q;
|
|
|
|
struct ionic_cq *cq = &qcq->cq;
|
|
|
|
struct ionic_admin_ctx ctx = {
|
|
|
|
.work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
|
|
|
|
.cmd.q_init = {
|
|
|
|
.opcode = IONIC_CMD_Q_INIT,
|
|
|
|
.lif_index = cpu_to_le16(lif->index),
|
|
|
|
.type = q->type,
|
2020-05-12 00:59:27 +00:00
|
|
|
.ver = lif->qtype_info[q->type].version,
|
2019-09-03 22:28:17 +00:00
|
|
|
.index = cpu_to_le32(q->index),
|
|
|
|
.flags = cpu_to_le16(IONIC_QINIT_F_IRQ |
|
|
|
|
IONIC_QINIT_F_SG),
|
|
|
|
.pid = cpu_to_le16(q->pid),
|
|
|
|
.ring_size = ilog2(q->num_descs),
|
|
|
|
.ring_base = cpu_to_le64(q->base_pa),
|
|
|
|
.cq_ring_base = cpu_to_le64(cq->base_pa),
|
|
|
|
.sg_ring_base = cpu_to_le64(q->sg_base_pa),
|
2021-04-01 17:55:59 +00:00
|
|
|
.features = cpu_to_le64(q->features),
|
2019-09-03 22:28:17 +00:00
|
|
|
},
|
|
|
|
};
|
2020-07-31 20:15:36 +00:00
|
|
|
unsigned int intr_index;
|
2019-09-03 22:28:17 +00:00
|
|
|
int err;
|
|
|
|
|
2021-03-19 00:48:05 +00:00
|
|
|
intr_index = qcq->intr.index;
|
|
|
|
|
2020-07-31 20:15:36 +00:00
|
|
|
ctx.cmd.q_init.intr_index = cpu_to_le16(intr_index);
|
|
|
|
|
2019-09-03 22:28:17 +00:00
|
|
|
dev_dbg(dev, "txq_init.pid %d\n", ctx.cmd.q_init.pid);
|
|
|
|
dev_dbg(dev, "txq_init.index %d\n", ctx.cmd.q_init.index);
|
|
|
|
dev_dbg(dev, "txq_init.ring_base 0x%llx\n", ctx.cmd.q_init.ring_base);
|
|
|
|
dev_dbg(dev, "txq_init.ring_size %d\n", ctx.cmd.q_init.ring_size);
|
2020-05-12 00:59:27 +00:00
|
|
|
dev_dbg(dev, "txq_init.flags 0x%x\n", ctx.cmd.q_init.flags);
|
|
|
|
dev_dbg(dev, "txq_init.ver %d\n", ctx.cmd.q_init.ver);
|
2020-07-31 20:15:36 +00:00
|
|
|
dev_dbg(dev, "txq_init.intr_index %d\n", ctx.cmd.q_init.intr_index);
|
2019-09-03 22:28:17 +00:00
|
|
|
|
2020-08-27 23:00:27 +00:00
|
|
|
ionic_qcq_sanitize(qcq);
|
2020-03-28 03:14:47 +00:00
|
|
|
|
2019-09-03 22:28:17 +00:00
|
|
|
err = ionic_adminq_post_wait(lif, &ctx);
|
|
|
|
if (err)
|
|
|
|
return err;
|
|
|
|
|
|
|
|
q->hw_type = ctx.comp.q_init.hw_type;
|
|
|
|
q->hw_index = le32_to_cpu(ctx.comp.q_init.hw_index);
|
|
|
|
q->dbval = IONIC_DBELL_QID(q->hw_index);
|
|
|
|
|
|
|
|
dev_dbg(dev, "txq->hw_type %d\n", q->hw_type);
|
|
|
|
dev_dbg(dev, "txq->hw_index %d\n", q->hw_index);
|
|
|
|
|
2020-07-31 20:15:36 +00:00
|
|
|
if (test_bit(IONIC_LIF_F_SPLIT_INTR, lif->state))
|
|
|
|
netif_napi_add(lif->netdev, &qcq->napi, ionic_tx_napi,
|
|
|
|
NAPI_POLL_WEIGHT);
|
|
|
|
|
2019-09-03 22:28:17 +00:00
|
|
|
qcq->flags |= IONIC_QCQ_F_INITED;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int ionic_lif_rxq_init(struct ionic_lif *lif, struct ionic_qcq *qcq)
|
|
|
|
{
|
|
|
|
struct device *dev = lif->ionic->dev;
|
|
|
|
struct ionic_queue *q = &qcq->q;
|
|
|
|
struct ionic_cq *cq = &qcq->cq;
|
|
|
|
struct ionic_admin_ctx ctx = {
|
|
|
|
.work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
|
|
|
|
.cmd.q_init = {
|
|
|
|
.opcode = IONIC_CMD_Q_INIT,
|
|
|
|
.lif_index = cpu_to_le16(lif->index),
|
|
|
|
.type = q->type,
|
2020-05-12 00:59:27 +00:00
|
|
|
.ver = lif->qtype_info[q->type].version,
|
2019-09-03 22:28:17 +00:00
|
|
|
.index = cpu_to_le32(q->index),
|
2019-10-24 00:48:59 +00:00
|
|
|
.flags = cpu_to_le16(IONIC_QINIT_F_IRQ |
|
|
|
|
IONIC_QINIT_F_SG),
|
2019-09-03 22:28:17 +00:00
|
|
|
.intr_index = cpu_to_le16(cq->bound_intr->index),
|
|
|
|
.pid = cpu_to_le16(q->pid),
|
|
|
|
.ring_size = ilog2(q->num_descs),
|
|
|
|
.ring_base = cpu_to_le64(q->base_pa),
|
|
|
|
.cq_ring_base = cpu_to_le64(cq->base_pa),
|
2019-10-24 00:48:59 +00:00
|
|
|
.sg_ring_base = cpu_to_le64(q->sg_base_pa),
|
2021-04-01 17:55:59 +00:00
|
|
|
.features = cpu_to_le64(q->features),
|
2019-09-03 22:28:17 +00:00
|
|
|
},
|
|
|
|
};
|
|
|
|
int err;
|
|
|
|
|
|
|
|
dev_dbg(dev, "rxq_init.pid %d\n", ctx.cmd.q_init.pid);
|
|
|
|
dev_dbg(dev, "rxq_init.index %d\n", ctx.cmd.q_init.index);
|
|
|
|
dev_dbg(dev, "rxq_init.ring_base 0x%llx\n", ctx.cmd.q_init.ring_base);
|
|
|
|
dev_dbg(dev, "rxq_init.ring_size %d\n", ctx.cmd.q_init.ring_size);
|
2020-05-12 00:59:27 +00:00
|
|
|
dev_dbg(dev, "rxq_init.flags 0x%x\n", ctx.cmd.q_init.flags);
|
|
|
|
dev_dbg(dev, "rxq_init.ver %d\n", ctx.cmd.q_init.ver);
|
2020-07-31 20:15:36 +00:00
|
|
|
dev_dbg(dev, "rxq_init.intr_index %d\n", ctx.cmd.q_init.intr_index);
|
2019-09-03 22:28:17 +00:00
|
|
|
|
2020-08-27 23:00:27 +00:00
|
|
|
ionic_qcq_sanitize(qcq);
|
2020-03-28 03:14:47 +00:00
|
|
|
|
2019-09-03 22:28:17 +00:00
|
|
|
err = ionic_adminq_post_wait(lif, &ctx);
|
|
|
|
if (err)
|
|
|
|
return err;
|
|
|
|
|
|
|
|
q->hw_type = ctx.comp.q_init.hw_type;
|
|
|
|
q->hw_index = le32_to_cpu(ctx.comp.q_init.hw_index);
|
|
|
|
q->dbval = IONIC_DBELL_QID(q->hw_index);
|
|
|
|
|
|
|
|
dev_dbg(dev, "rxq->hw_type %d\n", q->hw_type);
|
|
|
|
dev_dbg(dev, "rxq->hw_index %d\n", q->hw_index);
|
|
|
|
|
2020-07-31 20:15:36 +00:00
|
|
|
if (test_bit(IONIC_LIF_F_SPLIT_INTR, lif->state))
|
|
|
|
netif_napi_add(lif->netdev, &qcq->napi, ionic_rx_napi,
|
|
|
|
NAPI_POLL_WEIGHT);
|
|
|
|
else
|
|
|
|
netif_napi_add(lif->netdev, &qcq->napi, ionic_txrx_napi,
|
|
|
|
NAPI_POLL_WEIGHT);
|
2019-09-03 22:28:17 +00:00
|
|
|
|
|
|
|
qcq->flags |= IONIC_QCQ_F_INITED;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2021-04-01 17:56:04 +00:00
|
|
|
int ionic_lif_create_hwstamp_txq(struct ionic_lif *lif)
|
|
|
|
{
|
2021-04-01 17:56:06 +00:00
|
|
|
unsigned int num_desc, desc_sz, comp_sz, sg_desc_sz;
|
|
|
|
unsigned int txq_i, flags;
|
|
|
|
struct ionic_qcq *txq;
|
|
|
|
u64 features;
|
|
|
|
int err;
|
|
|
|
|
|
|
|
if (lif->hwstamp_txq)
|
2021-08-27 18:55:11 +00:00
|
|
|
return 0;
|
2021-04-01 17:56:06 +00:00
|
|
|
|
|
|
|
features = IONIC_Q_F_2X_CQ_DESC | IONIC_TXQ_F_HWSTAMP;
|
|
|
|
|
|
|
|
num_desc = IONIC_MIN_TXRX_DESC;
|
|
|
|
desc_sz = sizeof(struct ionic_txq_desc);
|
|
|
|
comp_sz = 2 * sizeof(struct ionic_txq_comp);
|
|
|
|
|
|
|
|
if (lif->qtype_info[IONIC_QTYPE_TXQ].version >= 1 &&
|
|
|
|
lif->qtype_info[IONIC_QTYPE_TXQ].sg_desc_sz == sizeof(struct ionic_txq_sg_desc_v1))
|
|
|
|
sg_desc_sz = sizeof(struct ionic_txq_sg_desc_v1);
|
|
|
|
else
|
|
|
|
sg_desc_sz = sizeof(struct ionic_txq_sg_desc);
|
|
|
|
|
|
|
|
txq_i = lif->ionic->ntxqs_per_lif;
|
|
|
|
flags = IONIC_QCQ_F_TX_STATS | IONIC_QCQ_F_SG;
|
|
|
|
|
|
|
|
err = ionic_qcq_alloc(lif, IONIC_QTYPE_TXQ, txq_i, "hwstamp_tx", flags,
|
|
|
|
num_desc, desc_sz, comp_sz, sg_desc_sz,
|
|
|
|
lif->kern_pid, &txq);
|
|
|
|
if (err)
|
|
|
|
goto err_qcq_alloc;
|
|
|
|
|
|
|
|
txq->q.features = features;
|
|
|
|
|
|
|
|
ionic_link_qcq_interrupts(lif->adminqcq, txq);
|
|
|
|
ionic_debugfs_add_qcq(lif, txq);
|
|
|
|
|
|
|
|
lif->hwstamp_txq = txq;
|
|
|
|
|
|
|
|
if (netif_running(lif->netdev)) {
|
|
|
|
err = ionic_lif_txq_init(lif, txq);
|
|
|
|
if (err)
|
|
|
|
goto err_qcq_init;
|
|
|
|
|
|
|
|
if (test_bit(IONIC_LIF_F_UP, lif->state)) {
|
|
|
|
err = ionic_qcq_enable(txq);
|
|
|
|
if (err)
|
|
|
|
goto err_qcq_enable;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-04-01 17:56:04 +00:00
|
|
|
return 0;
|
2021-04-01 17:56:06 +00:00
|
|
|
|
|
|
|
err_qcq_enable:
|
|
|
|
ionic_lif_qcq_deinit(lif, txq);
|
|
|
|
err_qcq_init:
|
|
|
|
lif->hwstamp_txq = NULL;
|
|
|
|
ionic_debugfs_del_qcq(txq);
|
|
|
|
ionic_qcq_free(lif, txq);
|
|
|
|
devm_kfree(lif->ionic->dev, txq);
|
|
|
|
err_qcq_alloc:
|
|
|
|
return err;
|
2021-04-01 17:56:04 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
int ionic_lif_create_hwstamp_rxq(struct ionic_lif *lif)
|
|
|
|
{
|
2021-04-01 17:56:06 +00:00
|
|
|
unsigned int num_desc, desc_sz, comp_sz, sg_desc_sz;
|
|
|
|
unsigned int rxq_i, flags;
|
|
|
|
struct ionic_qcq *rxq;
|
|
|
|
u64 features;
|
|
|
|
int err;
|
|
|
|
|
|
|
|
if (lif->hwstamp_rxq)
|
2021-08-27 18:55:11 +00:00
|
|
|
return 0;
|
2021-04-01 17:56:06 +00:00
|
|
|
|
|
|
|
features = IONIC_Q_F_2X_CQ_DESC | IONIC_RXQ_F_HWSTAMP;
|
|
|
|
|
|
|
|
num_desc = IONIC_MIN_TXRX_DESC;
|
|
|
|
desc_sz = sizeof(struct ionic_rxq_desc);
|
|
|
|
comp_sz = 2 * sizeof(struct ionic_rxq_comp);
|
|
|
|
sg_desc_sz = sizeof(struct ionic_rxq_sg_desc);
|
|
|
|
|
|
|
|
rxq_i = lif->ionic->nrxqs_per_lif;
|
|
|
|
flags = IONIC_QCQ_F_RX_STATS | IONIC_QCQ_F_SG;
|
|
|
|
|
|
|
|
err = ionic_qcq_alloc(lif, IONIC_QTYPE_RXQ, rxq_i, "hwstamp_rx", flags,
|
|
|
|
num_desc, desc_sz, comp_sz, sg_desc_sz,
|
|
|
|
lif->kern_pid, &rxq);
|
|
|
|
if (err)
|
|
|
|
goto err_qcq_alloc;
|
|
|
|
|
|
|
|
rxq->q.features = features;
|
|
|
|
|
|
|
|
ionic_link_qcq_interrupts(lif->adminqcq, rxq);
|
|
|
|
ionic_debugfs_add_qcq(lif, rxq);
|
|
|
|
|
|
|
|
lif->hwstamp_rxq = rxq;
|
|
|
|
|
|
|
|
if (netif_running(lif->netdev)) {
|
|
|
|
err = ionic_lif_rxq_init(lif, rxq);
|
|
|
|
if (err)
|
|
|
|
goto err_qcq_init;
|
|
|
|
|
|
|
|
if (test_bit(IONIC_LIF_F_UP, lif->state)) {
|
|
|
|
ionic_rx_fill(&rxq->q);
|
|
|
|
err = ionic_qcq_enable(rxq);
|
|
|
|
if (err)
|
|
|
|
goto err_qcq_enable;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-04-01 17:56:04 +00:00
|
|
|
return 0;
|
2021-04-01 17:56:06 +00:00
|
|
|
|
|
|
|
err_qcq_enable:
|
|
|
|
ionic_lif_qcq_deinit(lif, rxq);
|
|
|
|
err_qcq_init:
|
|
|
|
lif->hwstamp_rxq = NULL;
|
|
|
|
ionic_debugfs_del_qcq(rxq);
|
|
|
|
ionic_qcq_free(lif, rxq);
|
|
|
|
devm_kfree(lif->ionic->dev, rxq);
|
|
|
|
err_qcq_alloc:
|
|
|
|
return err;
|
2021-04-01 17:56:04 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
int ionic_lif_config_hwstamp_rxq_all(struct ionic_lif *lif, bool rx_all)
|
|
|
|
{
|
2021-04-01 17:56:06 +00:00
|
|
|
struct ionic_queue_params qparam;
|
|
|
|
|
|
|
|
ionic_init_queue_params(lif, &qparam);
|
|
|
|
|
|
|
|
if (rx_all)
|
|
|
|
qparam.rxq_features = IONIC_Q_F_2X_CQ_DESC | IONIC_RXQ_F_HWSTAMP;
|
|
|
|
else
|
|
|
|
qparam.rxq_features = 0;
|
|
|
|
|
|
|
|
/* if we're not running, just set the values and return */
|
|
|
|
if (!netif_running(lif->netdev)) {
|
|
|
|
lif->rxq_features = qparam.rxq_features;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
return ionic_reconfigure_queues(lif, &qparam);
|
2021-04-01 17:56:04 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
int ionic_lif_set_hwstamp_txmode(struct ionic_lif *lif, u16 txstamp_mode)
|
|
|
|
{
|
2021-04-01 17:56:06 +00:00
|
|
|
struct ionic_admin_ctx ctx = {
|
|
|
|
.work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
|
|
|
|
.cmd.lif_setattr = {
|
|
|
|
.opcode = IONIC_CMD_LIF_SETATTR,
|
|
|
|
.index = cpu_to_le16(lif->index),
|
|
|
|
.attr = IONIC_LIF_ATTR_TXSTAMP,
|
|
|
|
.txstamp_mode = cpu_to_le16(txstamp_mode),
|
|
|
|
},
|
|
|
|
};
|
|
|
|
|
|
|
|
return ionic_adminq_post_wait(lif, &ctx);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void ionic_lif_del_hwstamp_rxfilt(struct ionic_lif *lif)
|
|
|
|
{
|
|
|
|
struct ionic_admin_ctx ctx = {
|
|
|
|
.work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
|
|
|
|
.cmd.rx_filter_del = {
|
|
|
|
.opcode = IONIC_CMD_RX_FILTER_DEL,
|
|
|
|
.lif_index = cpu_to_le16(lif->index),
|
|
|
|
},
|
|
|
|
};
|
|
|
|
struct ionic_rx_filter *f;
|
|
|
|
u32 filter_id;
|
|
|
|
int err;
|
|
|
|
|
|
|
|
spin_lock_bh(&lif->rx_filters.lock);
|
|
|
|
|
|
|
|
f = ionic_rx_filter_rxsteer(lif);
|
|
|
|
if (!f) {
|
|
|
|
spin_unlock_bh(&lif->rx_filters.lock);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
filter_id = f->filter_id;
|
|
|
|
ionic_rx_filter_free(lif, f);
|
|
|
|
|
|
|
|
spin_unlock_bh(&lif->rx_filters.lock);
|
|
|
|
|
|
|
|
netdev_dbg(lif->netdev, "rx_filter del RXSTEER (id %d)\n", filter_id);
|
|
|
|
|
|
|
|
ctx.cmd.rx_filter_del.filter_id = cpu_to_le32(filter_id);
|
|
|
|
|
|
|
|
err = ionic_adminq_post_wait(lif, &ctx);
|
|
|
|
if (err && err != -EEXIST)
|
|
|
|
netdev_dbg(lif->netdev, "failed to delete rx_filter RXSTEER (id %d)\n", filter_id);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int ionic_lif_add_hwstamp_rxfilt(struct ionic_lif *lif, u64 pkt_class)
|
|
|
|
{
|
|
|
|
struct ionic_admin_ctx ctx = {
|
|
|
|
.work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
|
|
|
|
.cmd.rx_filter_add = {
|
|
|
|
.opcode = IONIC_CMD_RX_FILTER_ADD,
|
|
|
|
.lif_index = cpu_to_le16(lif->index),
|
|
|
|
.match = cpu_to_le16(IONIC_RX_FILTER_STEER_PKTCLASS),
|
|
|
|
.pkt_class = cpu_to_le64(pkt_class),
|
|
|
|
},
|
|
|
|
};
|
|
|
|
u8 qtype;
|
|
|
|
u32 qid;
|
|
|
|
int err;
|
|
|
|
|
|
|
|
if (!lif->hwstamp_rxq)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
qtype = lif->hwstamp_rxq->q.type;
|
|
|
|
ctx.cmd.rx_filter_add.qtype = qtype;
|
|
|
|
|
|
|
|
qid = lif->hwstamp_rxq->q.index;
|
|
|
|
ctx.cmd.rx_filter_add.qid = cpu_to_le32(qid);
|
|
|
|
|
|
|
|
netdev_dbg(lif->netdev, "rx_filter add RXSTEER\n");
|
|
|
|
err = ionic_adminq_post_wait(lif, &ctx);
|
|
|
|
if (err && err != -EEXIST)
|
|
|
|
return err;
|
|
|
|
|
ionic: sync the filters in the work task
In order to separate the atomic needs of __dev_uc_sync()
and __dev_mc_sync() from the safe rx_mode handling, we need
to have the ndo handler manipulate the driver's filter list,
and later have the driver sync the filters to the firmware,
outside of the atomic context.
Here we put __dev_mc_sync() and __dev_uc_sync() back into the
ndo callback to give them their netif_addr_lock context and
have them update the driver's filter list, flagging changes
that should be made to the device filter list. Later, in the
rx_mode handler, we read those hints and sync up the device's
list as needed.
It is possible for multiple add/delete requests to come from
the stack before the rx_mode task processes the list, but the
handling of the sync status flag should keep everything sorted
correctly. For example, if a delete of an existing filter is
followed by another add before the rx_mode task is run, as can
happen when going in and out of a bond, the add will cancel
the delete and no actual changes will be sent to the device.
We also add a check in the watchdog to see if there are any
stray unsync'd filters, possibly left over from a filter
overflow and waiting to get sync'd after some other filter
gets removed to make room.
Signed-off-by: Shannon Nelson <snelson@pensando.io>
Signed-off-by: David S. Miller <davem@davemloft.net>
2021-08-26 01:24:48 +00:00
|
|
|
spin_lock_bh(&lif->rx_filters.lock);
|
|
|
|
err = ionic_rx_filter_save(lif, 0, qid, 0, &ctx, IONIC_FILTER_STATE_SYNCED);
|
|
|
|
spin_unlock_bh(&lif->rx_filters.lock);
|
|
|
|
|
|
|
|
return err;
|
2021-04-01 17:56:04 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
int ionic_lif_set_hwstamp_rxfilt(struct ionic_lif *lif, u64 pkt_class)
|
|
|
|
{
|
2021-04-01 17:56:06 +00:00
|
|
|
ionic_lif_del_hwstamp_rxfilt(lif);
|
|
|
|
|
|
|
|
if (!pkt_class)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
return ionic_lif_add_hwstamp_rxfilt(lif, pkt_class);
|
2021-04-01 17:56:04 +00:00
|
|
|
}
|
|
|
|
|
2019-09-03 22:28:11 +00:00
|
|
|
static bool ionic_notifyq_service(struct ionic_cq *cq,
|
|
|
|
struct ionic_cq_info *cq_info)
|
|
|
|
{
|
|
|
|
union ionic_notifyq_comp *comp = cq_info->cq_desc;
|
2020-03-28 03:14:48 +00:00
|
|
|
struct ionic_deferred_work *work;
|
2019-09-03 22:28:11 +00:00
|
|
|
struct net_device *netdev;
|
|
|
|
struct ionic_queue *q;
|
|
|
|
struct ionic_lif *lif;
|
|
|
|
u64 eid;
|
|
|
|
|
|
|
|
q = cq->bound_q;
|
|
|
|
lif = q->info[0].cb_arg;
|
|
|
|
netdev = lif->netdev;
|
|
|
|
eid = le64_to_cpu(comp->event.eid);
|
|
|
|
|
|
|
|
/* Have we run out of new completions to process? */
|
2020-07-21 20:34:07 +00:00
|
|
|
if ((s64)(eid - lif->last_eid) <= 0)
|
2019-09-03 22:28:11 +00:00
|
|
|
return false;
|
|
|
|
|
|
|
|
lif->last_eid = eid;
|
|
|
|
|
|
|
|
dev_dbg(lif->ionic->dev, "notifyq event:\n");
|
|
|
|
dynamic_hex_dump("event ", DUMP_PREFIX_OFFSET, 16, 1,
|
|
|
|
comp, sizeof(*comp), true);
|
|
|
|
|
|
|
|
switch (le16_to_cpu(comp->event.ecode)) {
|
|
|
|
case IONIC_EVENT_LINK_CHANGE:
|
2021-03-19 00:48:04 +00:00
|
|
|
ionic_link_status_check_request(lif, CAN_NOT_SLEEP);
|
2019-09-03 22:28:11 +00:00
|
|
|
break;
|
|
|
|
case IONIC_EVENT_RESET:
|
2022-01-24 18:53:02 +00:00
|
|
|
if (lif->ionic->idev.fw_status_ready &&
|
|
|
|
!test_bit(IONIC_LIF_F_FW_RESET, lif->state) &&
|
|
|
|
!test_and_set_bit(IONIC_LIF_F_FW_STOPPING, lif->state)) {
|
|
|
|
work = kzalloc(sizeof(*work), GFP_ATOMIC);
|
|
|
|
if (!work) {
|
|
|
|
netdev_err(lif->netdev, "Reset event dropped\n");
|
|
|
|
clear_bit(IONIC_LIF_F_FW_STOPPING, lif->state);
|
|
|
|
} else {
|
|
|
|
work->type = IONIC_DW_TYPE_LIF_RESET;
|
|
|
|
ionic_lif_deferred_enqueue(&lif->deferred, work);
|
|
|
|
}
|
2020-03-28 03:14:48 +00:00
|
|
|
}
|
2019-09-03 22:28:11 +00:00
|
|
|
break;
|
|
|
|
default:
|
2020-05-12 00:59:27 +00:00
|
|
|
netdev_warn(netdev, "Notifyq event ecode=%d eid=%lld\n",
|
2019-09-03 22:28:11 +00:00
|
|
|
comp->event.ecode, eid);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2019-09-03 22:28:09 +00:00
|
|
|
static bool ionic_adminq_service(struct ionic_cq *cq,
|
|
|
|
struct ionic_cq_info *cq_info)
|
|
|
|
{
|
|
|
|
struct ionic_admin_comp *comp = cq_info->cq_desc;
|
|
|
|
|
|
|
|
if (!color_match(comp->color, cq->done_color))
|
|
|
|
return false;
|
|
|
|
|
|
|
|
ionic_q_service(cq->bound_q, cq_info, le16_to_cpu(comp->comp_index));
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int ionic_adminq_napi(struct napi_struct *napi, int budget)
|
|
|
|
{
|
2020-09-01 18:20:22 +00:00
|
|
|
struct ionic_intr_info *intr = napi_to_cq(napi)->bound_intr;
|
2019-09-03 22:28:11 +00:00
|
|
|
struct ionic_lif *lif = napi_to_cq(napi)->lif;
|
2020-09-01 18:20:22 +00:00
|
|
|
struct ionic_dev *idev = &lif->ionic->idev;
|
2021-03-19 00:48:10 +00:00
|
|
|
unsigned long irqflags;
|
2020-09-01 18:20:22 +00:00
|
|
|
unsigned int flags = 0;
|
2021-04-01 17:56:07 +00:00
|
|
|
int rx_work = 0;
|
|
|
|
int tx_work = 0;
|
2019-09-03 22:28:11 +00:00
|
|
|
int n_work = 0;
|
|
|
|
int a_work = 0;
|
2020-09-01 18:20:22 +00:00
|
|
|
int work_done;
|
2021-04-01 17:56:07 +00:00
|
|
|
int credits;
|
2020-09-01 18:20:22 +00:00
|
|
|
|
|
|
|
if (lif->notifyqcq && lif->notifyqcq->flags & IONIC_QCQ_F_INITED)
|
|
|
|
n_work = ionic_cq_service(&lif->notifyqcq->cq, budget,
|
|
|
|
ionic_notifyq_service, NULL, NULL);
|
2019-09-03 22:28:11 +00:00
|
|
|
|
2021-03-19 00:48:10 +00:00
|
|
|
spin_lock_irqsave(&lif->adminq_lock, irqflags);
|
2020-09-01 18:20:22 +00:00
|
|
|
if (lif->adminqcq && lif->adminqcq->flags & IONIC_QCQ_F_INITED)
|
|
|
|
a_work = ionic_cq_service(&lif->adminqcq->cq, budget,
|
|
|
|
ionic_adminq_service, NULL, NULL);
|
2021-03-19 00:48:10 +00:00
|
|
|
spin_unlock_irqrestore(&lif->adminq_lock, irqflags);
|
2020-09-01 18:20:22 +00:00
|
|
|
|
2021-04-01 17:56:07 +00:00
|
|
|
if (lif->hwstamp_rxq)
|
|
|
|
rx_work = ionic_cq_service(&lif->hwstamp_rxq->cq, budget,
|
|
|
|
ionic_rx_service, NULL, NULL);
|
|
|
|
|
|
|
|
if (lif->hwstamp_txq)
|
|
|
|
tx_work = ionic_cq_service(&lif->hwstamp_txq->cq, budget,
|
|
|
|
ionic_tx_service, NULL, NULL);
|
|
|
|
|
|
|
|
work_done = max(max(n_work, a_work), max(rx_work, tx_work));
|
2020-09-01 18:20:22 +00:00
|
|
|
if (work_done < budget && napi_complete_done(napi, work_done)) {
|
|
|
|
flags |= IONIC_INTR_CRED_UNMASK;
|
2021-03-19 00:48:06 +00:00
|
|
|
intr->rearm_count++;
|
2020-09-01 18:20:22 +00:00
|
|
|
}
|
2019-09-03 22:28:11 +00:00
|
|
|
|
2020-09-01 18:20:22 +00:00
|
|
|
if (work_done || flags) {
|
|
|
|
flags |= IONIC_INTR_CRED_RESET_COALESCE;
|
2021-04-01 17:56:07 +00:00
|
|
|
credits = n_work + a_work + rx_work + tx_work;
|
|
|
|
ionic_intr_credits(idev->intr_ctrl, intr->index, credits, flags);
|
2020-09-01 18:20:22 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return work_done;
|
2019-09-03 22:28:09 +00:00
|
|
|
}
|
|
|
|
|
2020-05-12 00:59:35 +00:00
|
|
|
void ionic_get_stats64(struct net_device *netdev,
|
|
|
|
struct rtnl_link_stats64 *ns)
|
2019-09-03 22:28:15 +00:00
|
|
|
{
|
|
|
|
struct ionic_lif *lif = netdev_priv(netdev);
|
|
|
|
struct ionic_lif_stats *ls;
|
|
|
|
|
|
|
|
memset(ns, 0, sizeof(*ns));
|
|
|
|
ls = &lif->info->stats;
|
|
|
|
|
|
|
|
ns->rx_packets = le64_to_cpu(ls->rx_ucast_packets) +
|
|
|
|
le64_to_cpu(ls->rx_mcast_packets) +
|
|
|
|
le64_to_cpu(ls->rx_bcast_packets);
|
|
|
|
|
|
|
|
ns->tx_packets = le64_to_cpu(ls->tx_ucast_packets) +
|
|
|
|
le64_to_cpu(ls->tx_mcast_packets) +
|
|
|
|
le64_to_cpu(ls->tx_bcast_packets);
|
|
|
|
|
|
|
|
ns->rx_bytes = le64_to_cpu(ls->rx_ucast_bytes) +
|
|
|
|
le64_to_cpu(ls->rx_mcast_bytes) +
|
|
|
|
le64_to_cpu(ls->rx_bcast_bytes);
|
|
|
|
|
|
|
|
ns->tx_bytes = le64_to_cpu(ls->tx_ucast_bytes) +
|
|
|
|
le64_to_cpu(ls->tx_mcast_bytes) +
|
|
|
|
le64_to_cpu(ls->tx_bcast_bytes);
|
|
|
|
|
|
|
|
ns->rx_dropped = le64_to_cpu(ls->rx_ucast_drop_packets) +
|
|
|
|
le64_to_cpu(ls->rx_mcast_drop_packets) +
|
|
|
|
le64_to_cpu(ls->rx_bcast_drop_packets);
|
|
|
|
|
|
|
|
ns->tx_dropped = le64_to_cpu(ls->tx_ucast_drop_packets) +
|
|
|
|
le64_to_cpu(ls->tx_mcast_drop_packets) +
|
|
|
|
le64_to_cpu(ls->tx_bcast_drop_packets);
|
|
|
|
|
|
|
|
ns->multicast = le64_to_cpu(ls->rx_mcast_packets);
|
|
|
|
|
|
|
|
ns->rx_over_errors = le64_to_cpu(ls->rx_queue_empty);
|
|
|
|
|
|
|
|
ns->rx_missed_errors = le64_to_cpu(ls->rx_dma_error) +
|
|
|
|
le64_to_cpu(ls->rx_queue_disabled) +
|
|
|
|
le64_to_cpu(ls->rx_desc_fetch_error) +
|
|
|
|
le64_to_cpu(ls->rx_desc_data_error);
|
|
|
|
|
|
|
|
ns->tx_aborted_errors = le64_to_cpu(ls->tx_dma_error) +
|
|
|
|
le64_to_cpu(ls->tx_queue_disabled) +
|
|
|
|
le64_to_cpu(ls->tx_desc_fetch_error) +
|
|
|
|
le64_to_cpu(ls->tx_desc_data_error);
|
|
|
|
|
|
|
|
ns->rx_errors = ns->rx_over_errors +
|
|
|
|
ns->rx_missed_errors;
|
|
|
|
|
|
|
|
ns->tx_errors = ns->tx_aborted_errors;
|
|
|
|
}
|
|
|
|
|
2019-09-03 22:28:14 +00:00
|
|
|
static int ionic_addr_add(struct net_device *netdev, const u8 *addr)
|
|
|
|
{
|
ionic: sync the filters in the work task
In order to separate the atomic needs of __dev_uc_sync()
and __dev_mc_sync() from the safe rx_mode handling, we need
to have the ndo handler manipulate the driver's filter list,
and later have the driver sync the filters to the firmware,
outside of the atomic context.
Here we put __dev_mc_sync() and __dev_uc_sync() back into the
ndo callback to give them their netif_addr_lock context and
have them update the driver's filter list, flagging changes
that should be made to the device filter list. Later, in the
rx_mode handler, we read those hints and sync up the device's
list as needed.
It is possible for multiple add/delete requests to come from
the stack before the rx_mode task processes the list, but the
handling of the sync status flag should keep everything sorted
correctly. For example, if a delete of an existing filter is
followed by another add before the rx_mode task is run, as can
happen when going in and out of a bond, the add will cancel
the delete and no actual changes will be sent to the device.
We also add a check in the watchdog to see if there are any
stray unsync'd filters, possibly left over from a filter
overflow and waiting to get sync'd after some other filter
gets removed to make room.
Signed-off-by: Shannon Nelson <snelson@pensando.io>
Signed-off-by: David S. Miller <davem@davemloft.net>
2021-08-26 01:24:48 +00:00
|
|
|
return ionic_lif_list_addr(netdev_priv(netdev), addr, ADD_ADDR);
|
2019-09-03 22:28:14 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static int ionic_addr_del(struct net_device *netdev, const u8 *addr)
|
|
|
|
{
|
2021-10-08 19:38:01 +00:00
|
|
|
/* Don't delete our own address from the uc list */
|
|
|
|
if (ether_addr_equal(addr, netdev->dev_addr))
|
|
|
|
return 0;
|
|
|
|
|
ionic: sync the filters in the work task
In order to separate the atomic needs of __dev_uc_sync()
and __dev_mc_sync() from the safe rx_mode handling, we need
to have the ndo handler manipulate the driver's filter list,
and later have the driver sync the filters to the firmware,
outside of the atomic context.
Here we put __dev_mc_sync() and __dev_uc_sync() back into the
ndo callback to give them their netif_addr_lock context and
have them update the driver's filter list, flagging changes
that should be made to the device filter list. Later, in the
rx_mode handler, we read those hints and sync up the device's
list as needed.
It is possible for multiple add/delete requests to come from
the stack before the rx_mode task processes the list, but the
handling of the sync status flag should keep everything sorted
correctly. For example, if a delete of an existing filter is
followed by another add before the rx_mode task is run, as can
happen when going in and out of a bond, the add will cancel
the delete and no actual changes will be sent to the device.
We also add a check in the watchdog to see if there are any
stray unsync'd filters, possibly left over from a filter
overflow and waiting to get sync'd after some other filter
gets removed to make room.
Signed-off-by: Shannon Nelson <snelson@pensando.io>
Signed-off-by: David S. Miller <davem@davemloft.net>
2021-08-26 01:24:48 +00:00
|
|
|
return ionic_lif_list_addr(netdev_priv(netdev), addr, DEL_ADDR);
|
2020-09-29 20:25:20 +00:00
|
|
|
}
|
|
|
|
|
ionic: sync the filters in the work task
In order to separate the atomic needs of __dev_uc_sync()
and __dev_mc_sync() from the safe rx_mode handling, we need
to have the ndo handler manipulate the driver's filter list,
and later have the driver sync the filters to the firmware,
outside of the atomic context.
Here we put __dev_mc_sync() and __dev_uc_sync() back into the
ndo callback to give them their netif_addr_lock context and
have them update the driver's filter list, flagging changes
that should be made to the device filter list. Later, in the
rx_mode handler, we read those hints and sync up the device's
list as needed.
It is possible for multiple add/delete requests to come from
the stack before the rx_mode task processes the list, but the
handling of the sync status flag should keep everything sorted
correctly. For example, if a delete of an existing filter is
followed by another add before the rx_mode task is run, as can
happen when going in and out of a bond, the add will cancel
the delete and no actual changes will be sent to the device.
We also add a check in the watchdog to see if there are any
stray unsync'd filters, possibly left over from a filter
overflow and waiting to get sync'd after some other filter
gets removed to make room.
Signed-off-by: Shannon Nelson <snelson@pensando.io>
Signed-off-by: David S. Miller <davem@davemloft.net>
2021-08-26 01:24:48 +00:00
|
|
|
void ionic_lif_rx_mode(struct ionic_lif *lif)
|
2020-09-29 20:25:20 +00:00
|
|
|
{
|
2021-07-23 18:02:45 +00:00
|
|
|
struct net_device *netdev = lif->netdev;
|
|
|
|
unsigned int nfilters;
|
|
|
|
unsigned int nd_flags;
|
2019-09-03 22:28:14 +00:00
|
|
|
char buf[128];
|
2021-07-23 18:02:45 +00:00
|
|
|
u16 rx_mode;
|
2019-09-03 22:28:14 +00:00
|
|
|
int i;
|
|
|
|
#define REMAIN(__x) (sizeof(buf) - (__x))
|
|
|
|
|
2021-07-23 18:02:45 +00:00
|
|
|
mutex_lock(&lif->config_lock);
|
2019-09-03 22:28:14 +00:00
|
|
|
|
2021-07-23 18:02:45 +00:00
|
|
|
/* grab the flags once for local use */
|
|
|
|
nd_flags = netdev->flags;
|
2019-09-03 22:28:14 +00:00
|
|
|
|
|
|
|
rx_mode = IONIC_RX_MODE_F_UNICAST;
|
2021-07-23 18:02:45 +00:00
|
|
|
rx_mode |= (nd_flags & IFF_MULTICAST) ? IONIC_RX_MODE_F_MULTICAST : 0;
|
|
|
|
rx_mode |= (nd_flags & IFF_BROADCAST) ? IONIC_RX_MODE_F_BROADCAST : 0;
|
|
|
|
rx_mode |= (nd_flags & IFF_PROMISC) ? IONIC_RX_MODE_F_PROMISC : 0;
|
|
|
|
rx_mode |= (nd_flags & IFF_ALLMULTI) ? IONIC_RX_MODE_F_ALLMULTI : 0;
|
2019-09-03 22:28:14 +00:00
|
|
|
|
ionic: handle vlan id overflow
Add vlans to the existing rx_filter_sync mechanics currently
used for managing mac filters.
Older versions of our firmware had no enforced limits on the
number of vlans that the LIF could request, but requesting large
numbers of vlans caused issues in FW memory management, so an
arbitrary limit was added in the FW. The FW now returns -ENOSPC
when it hits that limit, which the driver needs to handle.
Unfortunately, the FW doesn't advertise the vlan id limit,
as it does with mac filters, so the driver won't know the
limit until it bumps into it. We'll grab the current vlan id
count and use that as the limit from there on and thus prevent
getting any more -ENOSPC errors.
Just as is done for the mac filters, the device puts the device
into promiscuous mode when -ENOSPC is seen for vlan ids, and
the driver will track the vlans that aren't synced to the FW.
When vlans are removed, the driver will retry the un-synced
vlans. If all outstanding vlans are synced, the promiscuous
mode will be disabled.
Signed-off-by: Shannon Nelson <snelson@pensando.io>
Signed-off-by: David S. Miller <davem@davemloft.net>
2021-10-09 18:45:21 +00:00
|
|
|
/* sync the filters */
|
ionic: sync the filters in the work task
In order to separate the atomic needs of __dev_uc_sync()
and __dev_mc_sync() from the safe rx_mode handling, we need
to have the ndo handler manipulate the driver's filter list,
and later have the driver sync the filters to the firmware,
outside of the atomic context.
Here we put __dev_mc_sync() and __dev_uc_sync() back into the
ndo callback to give them their netif_addr_lock context and
have them update the driver's filter list, flagging changes
that should be made to the device filter list. Later, in the
rx_mode handler, we read those hints and sync up the device's
list as needed.
It is possible for multiple add/delete requests to come from
the stack before the rx_mode task processes the list, but the
handling of the sync status flag should keep everything sorted
correctly. For example, if a delete of an existing filter is
followed by another add before the rx_mode task is run, as can
happen when going in and out of a bond, the add will cancel
the delete and no actual changes will be sent to the device.
We also add a check in the watchdog to see if there are any
stray unsync'd filters, possibly left over from a filter
overflow and waiting to get sync'd after some other filter
gets removed to make room.
Signed-off-by: Shannon Nelson <snelson@pensando.io>
Signed-off-by: David S. Miller <davem@davemloft.net>
2021-08-26 01:24:48 +00:00
|
|
|
ionic_rx_filter_sync(lif);
|
|
|
|
|
|
|
|
/* check for overflow state
|
2019-09-03 22:28:14 +00:00
|
|
|
* if so, we track that we overflowed and enable NIC PROMISC
|
|
|
|
* else if the overflow is set and not needed
|
|
|
|
* we remove our overflow flag and check the netdev flags
|
|
|
|
* to see if we can disable NIC PROMISC
|
|
|
|
*/
|
2020-10-01 16:22:45 +00:00
|
|
|
nfilters = le32_to_cpu(lif->identity->eth.max_ucast_filters);
|
2021-10-09 18:45:17 +00:00
|
|
|
|
ionic: handle vlan id overflow
Add vlans to the existing rx_filter_sync mechanics currently
used for managing mac filters.
Older versions of our firmware had no enforced limits on the
number of vlans that the LIF could request, but requesting large
numbers of vlans caused issues in FW memory management, so an
arbitrary limit was added in the FW. The FW now returns -ENOSPC
when it hits that limit, which the driver needs to handle.
Unfortunately, the FW doesn't advertise the vlan id limit,
as it does with mac filters, so the driver won't know the
limit until it bumps into it. We'll grab the current vlan id
count and use that as the limit from there on and thus prevent
getting any more -ENOSPC errors.
Just as is done for the mac filters, the device puts the device
into promiscuous mode when -ENOSPC is seen for vlan ids, and
the driver will track the vlans that aren't synced to the FW.
When vlans are removed, the driver will retry the un-synced
vlans. If all outstanding vlans are synced, the promiscuous
mode will be disabled.
Signed-off-by: Shannon Nelson <snelson@pensando.io>
Signed-off-by: David S. Miller <davem@davemloft.net>
2021-10-09 18:45:21 +00:00
|
|
|
if (((lif->nucast + lif->nmcast) >= nfilters) ||
|
|
|
|
(lif->max_vlans && lif->nvlans >= lif->max_vlans)) {
|
2019-09-03 22:28:14 +00:00
|
|
|
rx_mode |= IONIC_RX_MODE_F_PROMISC;
|
2021-08-26 01:24:50 +00:00
|
|
|
rx_mode |= IONIC_RX_MODE_F_ALLMULTI;
|
2021-10-09 18:45:17 +00:00
|
|
|
} else {
|
2021-07-23 18:02:45 +00:00
|
|
|
if (!(nd_flags & IFF_PROMISC))
|
2019-09-03 22:28:14 +00:00
|
|
|
rx_mode &= ~IONIC_RX_MODE_F_PROMISC;
|
2021-07-23 18:02:45 +00:00
|
|
|
if (!(nd_flags & IFF_ALLMULTI))
|
2019-09-03 22:28:14 +00:00
|
|
|
rx_mode &= ~IONIC_RX_MODE_F_ALLMULTI;
|
|
|
|
}
|
|
|
|
|
2021-07-23 18:02:45 +00:00
|
|
|
i = scnprintf(buf, sizeof(buf), "rx_mode 0x%04x -> 0x%04x:",
|
|
|
|
lif->rx_mode, rx_mode);
|
|
|
|
if (rx_mode & IONIC_RX_MODE_F_UNICAST)
|
|
|
|
i += scnprintf(&buf[i], REMAIN(i), " RX_MODE_F_UNICAST");
|
|
|
|
if (rx_mode & IONIC_RX_MODE_F_MULTICAST)
|
|
|
|
i += scnprintf(&buf[i], REMAIN(i), " RX_MODE_F_MULTICAST");
|
|
|
|
if (rx_mode & IONIC_RX_MODE_F_BROADCAST)
|
|
|
|
i += scnprintf(&buf[i], REMAIN(i), " RX_MODE_F_BROADCAST");
|
|
|
|
if (rx_mode & IONIC_RX_MODE_F_PROMISC)
|
|
|
|
i += scnprintf(&buf[i], REMAIN(i), " RX_MODE_F_PROMISC");
|
|
|
|
if (rx_mode & IONIC_RX_MODE_F_ALLMULTI)
|
|
|
|
i += scnprintf(&buf[i], REMAIN(i), " RX_MODE_F_ALLMULTI");
|
|
|
|
if (rx_mode & IONIC_RX_MODE_F_RDMA_SNIFFER)
|
|
|
|
i += scnprintf(&buf[i], REMAIN(i), " RX_MODE_F_RDMA_SNIFFER");
|
|
|
|
netdev_dbg(netdev, "lif%d %s\n", lif->index, buf);
|
|
|
|
|
2020-11-12 18:22:06 +00:00
|
|
|
if (lif->rx_mode != rx_mode) {
|
2021-07-23 18:02:45 +00:00
|
|
|
struct ionic_admin_ctx ctx = {
|
|
|
|
.work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
|
|
|
|
.cmd.rx_mode_set = {
|
|
|
|
.opcode = IONIC_CMD_RX_MODE_SET,
|
|
|
|
.lif_index = cpu_to_le16(lif->index),
|
|
|
|
},
|
|
|
|
};
|
|
|
|
int err;
|
|
|
|
|
|
|
|
ctx.cmd.rx_mode_set.rx_mode = cpu_to_le16(rx_mode);
|
|
|
|
err = ionic_adminq_post_wait(lif, &ctx);
|
|
|
|
if (err)
|
|
|
|
netdev_warn(netdev, "set rx_mode 0x%04x failed: %d\n",
|
|
|
|
rx_mode, err);
|
|
|
|
else
|
|
|
|
lif->rx_mode = rx_mode;
|
|
|
|
}
|
|
|
|
|
|
|
|
mutex_unlock(&lif->config_lock);
|
|
|
|
}
|
|
|
|
|
2021-08-26 01:24:47 +00:00
|
|
|
static void ionic_ndo_set_rx_mode(struct net_device *netdev)
|
2021-07-23 18:02:45 +00:00
|
|
|
{
|
|
|
|
struct ionic_lif *lif = netdev_priv(netdev);
|
|
|
|
struct ionic_deferred_work *work;
|
|
|
|
|
ionic: sync the filters in the work task
In order to separate the atomic needs of __dev_uc_sync()
and __dev_mc_sync() from the safe rx_mode handling, we need
to have the ndo handler manipulate the driver's filter list,
and later have the driver sync the filters to the firmware,
outside of the atomic context.
Here we put __dev_mc_sync() and __dev_uc_sync() back into the
ndo callback to give them their netif_addr_lock context and
have them update the driver's filter list, flagging changes
that should be made to the device filter list. Later, in the
rx_mode handler, we read those hints and sync up the device's
list as needed.
It is possible for multiple add/delete requests to come from
the stack before the rx_mode task processes the list, but the
handling of the sync status flag should keep everything sorted
correctly. For example, if a delete of an existing filter is
followed by another add before the rx_mode task is run, as can
happen when going in and out of a bond, the add will cancel
the delete and no actual changes will be sent to the device.
We also add a check in the watchdog to see if there are any
stray unsync'd filters, possibly left over from a filter
overflow and waiting to get sync'd after some other filter
gets removed to make room.
Signed-off-by: Shannon Nelson <snelson@pensando.io>
Signed-off-by: David S. Miller <davem@davemloft.net>
2021-08-26 01:24:48 +00:00
|
|
|
/* Sync the kernel filter list with the driver filter list */
|
|
|
|
__dev_uc_sync(netdev, ionic_addr_add, ionic_addr_del);
|
|
|
|
__dev_mc_sync(netdev, ionic_addr_add, ionic_addr_del);
|
|
|
|
|
|
|
|
/* Shove off the rest of the rxmode work to the work task
|
|
|
|
* which will include syncing the filters to the firmware.
|
|
|
|
*/
|
2021-08-26 01:24:47 +00:00
|
|
|
work = kzalloc(sizeof(*work), GFP_ATOMIC);
|
|
|
|
if (!work) {
|
|
|
|
netdev_err(lif->netdev, "rxmode change dropped\n");
|
|
|
|
return;
|
2020-11-12 18:22:06 +00:00
|
|
|
}
|
2021-08-26 01:24:47 +00:00
|
|
|
work->type = IONIC_DW_TYPE_RX_MODE;
|
|
|
|
netdev_dbg(lif->netdev, "deferred: rx_mode\n");
|
|
|
|
ionic_lif_deferred_enqueue(&lif->deferred, work);
|
2019-09-03 22:28:14 +00:00
|
|
|
}
|
|
|
|
|
2019-09-03 22:28:12 +00:00
|
|
|
static __le64 ionic_netdev_features_to_nic(netdev_features_t features)
|
|
|
|
{
|
|
|
|
u64 wanted = 0;
|
|
|
|
|
|
|
|
if (features & NETIF_F_HW_VLAN_CTAG_TX)
|
|
|
|
wanted |= IONIC_ETH_HW_VLAN_TX_TAG;
|
|
|
|
if (features & NETIF_F_HW_VLAN_CTAG_RX)
|
|
|
|
wanted |= IONIC_ETH_HW_VLAN_RX_STRIP;
|
|
|
|
if (features & NETIF_F_HW_VLAN_CTAG_FILTER)
|
|
|
|
wanted |= IONIC_ETH_HW_VLAN_RX_FILTER;
|
|
|
|
if (features & NETIF_F_RXHASH)
|
|
|
|
wanted |= IONIC_ETH_HW_RX_HASH;
|
|
|
|
if (features & NETIF_F_RXCSUM)
|
|
|
|
wanted |= IONIC_ETH_HW_RX_CSUM;
|
|
|
|
if (features & NETIF_F_SG)
|
|
|
|
wanted |= IONIC_ETH_HW_TX_SG;
|
|
|
|
if (features & NETIF_F_HW_CSUM)
|
|
|
|
wanted |= IONIC_ETH_HW_TX_CSUM;
|
|
|
|
if (features & NETIF_F_TSO)
|
|
|
|
wanted |= IONIC_ETH_HW_TSO;
|
|
|
|
if (features & NETIF_F_TSO6)
|
|
|
|
wanted |= IONIC_ETH_HW_TSO_IPV6;
|
|
|
|
if (features & NETIF_F_TSO_ECN)
|
|
|
|
wanted |= IONIC_ETH_HW_TSO_ECN;
|
|
|
|
if (features & NETIF_F_GSO_GRE)
|
|
|
|
wanted |= IONIC_ETH_HW_TSO_GRE;
|
|
|
|
if (features & NETIF_F_GSO_GRE_CSUM)
|
|
|
|
wanted |= IONIC_ETH_HW_TSO_GRE_CSUM;
|
|
|
|
if (features & NETIF_F_GSO_IPXIP4)
|
|
|
|
wanted |= IONIC_ETH_HW_TSO_IPXIP4;
|
|
|
|
if (features & NETIF_F_GSO_IPXIP6)
|
|
|
|
wanted |= IONIC_ETH_HW_TSO_IPXIP6;
|
|
|
|
if (features & NETIF_F_GSO_UDP_TUNNEL)
|
|
|
|
wanted |= IONIC_ETH_HW_TSO_UDP;
|
|
|
|
if (features & NETIF_F_GSO_UDP_TUNNEL_CSUM)
|
|
|
|
wanted |= IONIC_ETH_HW_TSO_UDP_CSUM;
|
|
|
|
|
|
|
|
return cpu_to_le64(wanted);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int ionic_set_nic_features(struct ionic_lif *lif,
|
|
|
|
netdev_features_t features)
|
|
|
|
{
|
|
|
|
struct device *dev = lif->ionic->dev;
|
|
|
|
struct ionic_admin_ctx ctx = {
|
|
|
|
.work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
|
|
|
|
.cmd.lif_setattr = {
|
|
|
|
.opcode = IONIC_CMD_LIF_SETATTR,
|
|
|
|
.index = cpu_to_le16(lif->index),
|
|
|
|
.attr = IONIC_LIF_ATTR_FEATURES,
|
|
|
|
},
|
|
|
|
};
|
|
|
|
u64 vlan_flags = IONIC_ETH_HW_VLAN_TX_TAG |
|
|
|
|
IONIC_ETH_HW_VLAN_RX_STRIP |
|
|
|
|
IONIC_ETH_HW_VLAN_RX_FILTER;
|
2020-03-07 01:04:05 +00:00
|
|
|
u64 old_hw_features;
|
2019-09-03 22:28:12 +00:00
|
|
|
int err;
|
|
|
|
|
|
|
|
ctx.cmd.lif_setattr.features = ionic_netdev_features_to_nic(features);
|
2021-04-01 17:56:07 +00:00
|
|
|
|
2021-04-01 17:56:10 +00:00
|
|
|
if (lif->phc)
|
|
|
|
ctx.cmd.lif_setattr.features |= cpu_to_le64(IONIC_ETH_HW_TIMESTAMP);
|
|
|
|
|
2019-09-03 22:28:12 +00:00
|
|
|
err = ionic_adminq_post_wait(lif, &ctx);
|
|
|
|
if (err)
|
|
|
|
return err;
|
|
|
|
|
2020-03-07 01:04:05 +00:00
|
|
|
old_hw_features = lif->hw_features;
|
2019-09-03 22:28:12 +00:00
|
|
|
lif->hw_features = le64_to_cpu(ctx.cmd.lif_setattr.features &
|
|
|
|
ctx.comp.lif_setattr.features);
|
|
|
|
|
2020-03-07 01:04:05 +00:00
|
|
|
if ((old_hw_features ^ lif->hw_features) & IONIC_ETH_HW_RX_HASH)
|
|
|
|
ionic_lif_rss_config(lif, lif->rss_types, NULL, NULL);
|
|
|
|
|
2019-09-03 22:28:12 +00:00
|
|
|
if ((vlan_flags & features) &&
|
|
|
|
!(vlan_flags & le64_to_cpu(ctx.comp.lif_setattr.features)))
|
|
|
|
dev_info_once(lif->ionic->dev, "NIC is not supporting vlan offload, likely in SmartNIC mode\n");
|
|
|
|
|
|
|
|
if (lif->hw_features & IONIC_ETH_HW_VLAN_TX_TAG)
|
|
|
|
dev_dbg(dev, "feature ETH_HW_VLAN_TX_TAG\n");
|
|
|
|
if (lif->hw_features & IONIC_ETH_HW_VLAN_RX_STRIP)
|
|
|
|
dev_dbg(dev, "feature ETH_HW_VLAN_RX_STRIP\n");
|
|
|
|
if (lif->hw_features & IONIC_ETH_HW_VLAN_RX_FILTER)
|
|
|
|
dev_dbg(dev, "feature ETH_HW_VLAN_RX_FILTER\n");
|
|
|
|
if (lif->hw_features & IONIC_ETH_HW_RX_HASH)
|
|
|
|
dev_dbg(dev, "feature ETH_HW_RX_HASH\n");
|
|
|
|
if (lif->hw_features & IONIC_ETH_HW_TX_SG)
|
|
|
|
dev_dbg(dev, "feature ETH_HW_TX_SG\n");
|
|
|
|
if (lif->hw_features & IONIC_ETH_HW_TX_CSUM)
|
|
|
|
dev_dbg(dev, "feature ETH_HW_TX_CSUM\n");
|
|
|
|
if (lif->hw_features & IONIC_ETH_HW_RX_CSUM)
|
|
|
|
dev_dbg(dev, "feature ETH_HW_RX_CSUM\n");
|
|
|
|
if (lif->hw_features & IONIC_ETH_HW_TSO)
|
|
|
|
dev_dbg(dev, "feature ETH_HW_TSO\n");
|
|
|
|
if (lif->hw_features & IONIC_ETH_HW_TSO_IPV6)
|
|
|
|
dev_dbg(dev, "feature ETH_HW_TSO_IPV6\n");
|
|
|
|
if (lif->hw_features & IONIC_ETH_HW_TSO_ECN)
|
|
|
|
dev_dbg(dev, "feature ETH_HW_TSO_ECN\n");
|
|
|
|
if (lif->hw_features & IONIC_ETH_HW_TSO_GRE)
|
|
|
|
dev_dbg(dev, "feature ETH_HW_TSO_GRE\n");
|
|
|
|
if (lif->hw_features & IONIC_ETH_HW_TSO_GRE_CSUM)
|
|
|
|
dev_dbg(dev, "feature ETH_HW_TSO_GRE_CSUM\n");
|
|
|
|
if (lif->hw_features & IONIC_ETH_HW_TSO_IPXIP4)
|
|
|
|
dev_dbg(dev, "feature ETH_HW_TSO_IPXIP4\n");
|
|
|
|
if (lif->hw_features & IONIC_ETH_HW_TSO_IPXIP6)
|
|
|
|
dev_dbg(dev, "feature ETH_HW_TSO_IPXIP6\n");
|
|
|
|
if (lif->hw_features & IONIC_ETH_HW_TSO_UDP)
|
|
|
|
dev_dbg(dev, "feature ETH_HW_TSO_UDP\n");
|
|
|
|
if (lif->hw_features & IONIC_ETH_HW_TSO_UDP_CSUM)
|
|
|
|
dev_dbg(dev, "feature ETH_HW_TSO_UDP_CSUM\n");
|
2021-04-01 17:56:10 +00:00
|
|
|
if (lif->hw_features & IONIC_ETH_HW_TIMESTAMP)
|
|
|
|
dev_dbg(dev, "feature ETH_HW_TIMESTAMP\n");
|
2019-09-03 22:28:12 +00:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int ionic_init_nic_features(struct ionic_lif *lif)
|
|
|
|
{
|
|
|
|
struct net_device *netdev = lif->netdev;
|
|
|
|
netdev_features_t features;
|
|
|
|
int err;
|
|
|
|
|
|
|
|
/* set up what we expect to support by default */
|
|
|
|
features = NETIF_F_HW_VLAN_CTAG_TX |
|
|
|
|
NETIF_F_HW_VLAN_CTAG_RX |
|
|
|
|
NETIF_F_HW_VLAN_CTAG_FILTER |
|
|
|
|
NETIF_F_SG |
|
|
|
|
NETIF_F_HW_CSUM |
|
|
|
|
NETIF_F_RXCSUM |
|
|
|
|
NETIF_F_TSO |
|
|
|
|
NETIF_F_TSO6 |
|
|
|
|
NETIF_F_TSO_ECN;
|
|
|
|
|
2021-07-27 17:43:33 +00:00
|
|
|
if (lif->nxqs > 1)
|
|
|
|
features |= NETIF_F_RXHASH;
|
|
|
|
|
2019-09-03 22:28:12 +00:00
|
|
|
err = ionic_set_nic_features(lif, features);
|
|
|
|
if (err)
|
|
|
|
return err;
|
|
|
|
|
|
|
|
/* tell the netdev what we actually can support */
|
|
|
|
netdev->features |= NETIF_F_HIGHDMA;
|
|
|
|
|
|
|
|
if (lif->hw_features & IONIC_ETH_HW_VLAN_TX_TAG)
|
|
|
|
netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX;
|
|
|
|
if (lif->hw_features & IONIC_ETH_HW_VLAN_RX_STRIP)
|
|
|
|
netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX;
|
|
|
|
if (lif->hw_features & IONIC_ETH_HW_VLAN_RX_FILTER)
|
|
|
|
netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER;
|
|
|
|
if (lif->hw_features & IONIC_ETH_HW_RX_HASH)
|
|
|
|
netdev->hw_features |= NETIF_F_RXHASH;
|
|
|
|
if (lif->hw_features & IONIC_ETH_HW_TX_SG)
|
|
|
|
netdev->hw_features |= NETIF_F_SG;
|
|
|
|
|
|
|
|
if (lif->hw_features & IONIC_ETH_HW_TX_CSUM)
|
|
|
|
netdev->hw_enc_features |= NETIF_F_HW_CSUM;
|
|
|
|
if (lif->hw_features & IONIC_ETH_HW_RX_CSUM)
|
|
|
|
netdev->hw_enc_features |= NETIF_F_RXCSUM;
|
|
|
|
if (lif->hw_features & IONIC_ETH_HW_TSO)
|
|
|
|
netdev->hw_enc_features |= NETIF_F_TSO;
|
|
|
|
if (lif->hw_features & IONIC_ETH_HW_TSO_IPV6)
|
|
|
|
netdev->hw_enc_features |= NETIF_F_TSO6;
|
|
|
|
if (lif->hw_features & IONIC_ETH_HW_TSO_ECN)
|
|
|
|
netdev->hw_enc_features |= NETIF_F_TSO_ECN;
|
|
|
|
if (lif->hw_features & IONIC_ETH_HW_TSO_GRE)
|
|
|
|
netdev->hw_enc_features |= NETIF_F_GSO_GRE;
|
|
|
|
if (lif->hw_features & IONIC_ETH_HW_TSO_GRE_CSUM)
|
|
|
|
netdev->hw_enc_features |= NETIF_F_GSO_GRE_CSUM;
|
|
|
|
if (lif->hw_features & IONIC_ETH_HW_TSO_IPXIP4)
|
|
|
|
netdev->hw_enc_features |= NETIF_F_GSO_IPXIP4;
|
|
|
|
if (lif->hw_features & IONIC_ETH_HW_TSO_IPXIP6)
|
|
|
|
netdev->hw_enc_features |= NETIF_F_GSO_IPXIP6;
|
|
|
|
if (lif->hw_features & IONIC_ETH_HW_TSO_UDP)
|
|
|
|
netdev->hw_enc_features |= NETIF_F_GSO_UDP_TUNNEL;
|
|
|
|
if (lif->hw_features & IONIC_ETH_HW_TSO_UDP_CSUM)
|
|
|
|
netdev->hw_enc_features |= NETIF_F_GSO_UDP_TUNNEL_CSUM;
|
|
|
|
|
|
|
|
netdev->hw_features |= netdev->hw_enc_features;
|
|
|
|
netdev->features |= netdev->hw_features;
|
2020-06-16 15:06:26 +00:00
|
|
|
netdev->vlan_features |= netdev->features & ~NETIF_F_VLAN_FEATURES;
|
2019-09-03 22:28:12 +00:00
|
|
|
|
2020-03-28 03:14:48 +00:00
|
|
|
netdev->priv_flags |= IFF_UNICAST_FLT |
|
|
|
|
IFF_LIVE_ADDR_CHANGE;
|
2019-09-03 22:28:12 +00:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int ionic_set_features(struct net_device *netdev,
|
|
|
|
netdev_features_t features)
|
|
|
|
{
|
|
|
|
struct ionic_lif *lif = netdev_priv(netdev);
|
|
|
|
int err;
|
|
|
|
|
|
|
|
netdev_dbg(netdev, "%s: lif->features=0x%08llx new_features=0x%08llx\n",
|
|
|
|
__func__, (u64)lif->netdev->features, (u64)features);
|
|
|
|
|
|
|
|
err = ionic_set_nic_features(lif, features);
|
|
|
|
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int ionic_set_mac_address(struct net_device *netdev, void *sa)
|
|
|
|
{
|
2019-09-03 22:28:14 +00:00
|
|
|
struct sockaddr *addr = sa;
|
|
|
|
u8 *mac;
|
|
|
|
int err;
|
|
|
|
|
|
|
|
mac = (u8 *)addr->sa_data;
|
|
|
|
if (ether_addr_equal(netdev->dev_addr, mac))
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
err = eth_prepare_mac_addr_change(netdev, addr);
|
|
|
|
if (err)
|
|
|
|
return err;
|
|
|
|
|
|
|
|
if (!is_zero_ether_addr(netdev->dev_addr)) {
|
|
|
|
netdev_info(netdev, "deleting mac addr %pM\n",
|
|
|
|
netdev->dev_addr);
|
ionic: sync the filters in the work task
In order to separate the atomic needs of __dev_uc_sync()
and __dev_mc_sync() from the safe rx_mode handling, we need
to have the ndo handler manipulate the driver's filter list,
and later have the driver sync the filters to the firmware,
outside of the atomic context.
Here we put __dev_mc_sync() and __dev_uc_sync() back into the
ndo callback to give them their netif_addr_lock context and
have them update the driver's filter list, flagging changes
that should be made to the device filter list. Later, in the
rx_mode handler, we read those hints and sync up the device's
list as needed.
It is possible for multiple add/delete requests to come from
the stack before the rx_mode task processes the list, but the
handling of the sync status flag should keep everything sorted
correctly. For example, if a delete of an existing filter is
followed by another add before the rx_mode task is run, as can
happen when going in and out of a bond, the add will cancel
the delete and no actual changes will be sent to the device.
We also add a check in the watchdog to see if there are any
stray unsync'd filters, possibly left over from a filter
overflow and waiting to get sync'd after some other filter
gets removed to make room.
Signed-off-by: Shannon Nelson <snelson@pensando.io>
Signed-off-by: David S. Miller <davem@davemloft.net>
2021-08-26 01:24:48 +00:00
|
|
|
ionic_lif_addr_del(netdev_priv(netdev), netdev->dev_addr);
|
2019-09-03 22:28:14 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
eth_commit_mac_addr_change(netdev, addr);
|
|
|
|
netdev_info(netdev, "updating mac addr %pM\n", mac);
|
|
|
|
|
ionic: sync the filters in the work task
In order to separate the atomic needs of __dev_uc_sync()
and __dev_mc_sync() from the safe rx_mode handling, we need
to have the ndo handler manipulate the driver's filter list,
and later have the driver sync the filters to the firmware,
outside of the atomic context.
Here we put __dev_mc_sync() and __dev_uc_sync() back into the
ndo callback to give them their netif_addr_lock context and
have them update the driver's filter list, flagging changes
that should be made to the device filter list. Later, in the
rx_mode handler, we read those hints and sync up the device's
list as needed.
It is possible for multiple add/delete requests to come from
the stack before the rx_mode task processes the list, but the
handling of the sync status flag should keep everything sorted
correctly. For example, if a delete of an existing filter is
followed by another add before the rx_mode task is run, as can
happen when going in and out of a bond, the add will cancel
the delete and no actual changes will be sent to the device.
We also add a check in the watchdog to see if there are any
stray unsync'd filters, possibly left over from a filter
overflow and waiting to get sync'd after some other filter
gets removed to make room.
Signed-off-by: Shannon Nelson <snelson@pensando.io>
Signed-off-by: David S. Miller <davem@davemloft.net>
2021-08-26 01:24:48 +00:00
|
|
|
return ionic_lif_addr_add(netdev_priv(netdev), mac);
|
2019-09-03 22:28:12 +00:00
|
|
|
}
|
|
|
|
|
2020-08-27 23:00:27 +00:00
|
|
|
static void ionic_stop_queues_reconfig(struct ionic_lif *lif)
|
|
|
|
{
|
|
|
|
/* Stop and clean the queues before reconfiguration */
|
|
|
|
netif_device_detach(lif->netdev);
|
|
|
|
ionic_stop_queues(lif);
|
|
|
|
ionic_txrx_deinit(lif);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int ionic_start_queues_reconfig(struct ionic_lif *lif)
|
|
|
|
{
|
|
|
|
int err;
|
|
|
|
|
|
|
|
/* Re-init the queues after reconfiguration */
|
|
|
|
|
|
|
|
/* The only way txrx_init can fail here is if communication
|
|
|
|
* with FW is suddenly broken. There's not much we can do
|
|
|
|
* at this point - error messages have already been printed,
|
|
|
|
* so we can continue on and the user can eventually do a
|
|
|
|
* DOWN and UP to try to reset and clear the issue.
|
|
|
|
*/
|
|
|
|
err = ionic_txrx_init(lif);
|
2021-09-02 16:34:07 +00:00
|
|
|
ionic_link_status_check_request(lif, CAN_NOT_SLEEP);
|
2020-08-27 23:00:27 +00:00
|
|
|
netif_device_attach(lif->netdev);
|
|
|
|
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2019-09-03 22:28:12 +00:00
|
|
|
static int ionic_change_mtu(struct net_device *netdev, int new_mtu)
|
|
|
|
{
|
|
|
|
struct ionic_lif *lif = netdev_priv(netdev);
|
|
|
|
struct ionic_admin_ctx ctx = {
|
|
|
|
.work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
|
|
|
|
.cmd.lif_setattr = {
|
|
|
|
.opcode = IONIC_CMD_LIF_SETATTR,
|
|
|
|
.index = cpu_to_le16(lif->index),
|
|
|
|
.attr = IONIC_LIF_ATTR_MTU,
|
|
|
|
.mtu = cpu_to_le32(new_mtu),
|
|
|
|
},
|
|
|
|
};
|
|
|
|
int err;
|
|
|
|
|
|
|
|
err = ionic_adminq_post_wait(lif, &ctx);
|
|
|
|
if (err)
|
|
|
|
return err;
|
|
|
|
|
2020-08-27 23:00:27 +00:00
|
|
|
/* if we're not running, nothing more to do */
|
2020-12-01 00:25:46 +00:00
|
|
|
if (!netif_running(netdev)) {
|
|
|
|
netdev->mtu = new_mtu;
|
2020-08-27 23:00:27 +00:00
|
|
|
return 0;
|
2020-12-01 00:25:46 +00:00
|
|
|
}
|
2019-09-03 22:28:12 +00:00
|
|
|
|
2021-09-02 16:34:07 +00:00
|
|
|
mutex_lock(&lif->queue_lock);
|
2020-08-27 23:00:27 +00:00
|
|
|
ionic_stop_queues_reconfig(lif);
|
2020-12-01 00:25:46 +00:00
|
|
|
netdev->mtu = new_mtu;
|
2021-09-02 16:34:07 +00:00
|
|
|
err = ionic_start_queues_reconfig(lif);
|
|
|
|
mutex_unlock(&lif->queue_lock);
|
|
|
|
|
|
|
|
return err;
|
2019-09-03 22:28:12 +00:00
|
|
|
}
|
|
|
|
|
2019-09-03 22:28:21 +00:00
|
|
|
static void ionic_tx_timeout_work(struct work_struct *ws)
|
|
|
|
{
|
|
|
|
struct ionic_lif *lif = container_of(ws, struct ionic_lif, tx_timeout_work);
|
|
|
|
|
2021-03-19 00:48:08 +00:00
|
|
|
if (test_bit(IONIC_LIF_F_FW_RESET, lif->state))
|
|
|
|
return;
|
2019-09-03 22:28:21 +00:00
|
|
|
|
2020-08-27 23:00:30 +00:00
|
|
|
/* if we were stopped before this scheduled job was launched,
|
|
|
|
* don't bother the queues as they are already stopped.
|
|
|
|
*/
|
|
|
|
if (!netif_running(lif->netdev))
|
|
|
|
return;
|
|
|
|
|
2021-09-02 16:34:07 +00:00
|
|
|
mutex_lock(&lif->queue_lock);
|
2020-08-27 23:00:30 +00:00
|
|
|
ionic_stop_queues_reconfig(lif);
|
|
|
|
ionic_start_queues_reconfig(lif);
|
2021-09-02 16:34:07 +00:00
|
|
|
mutex_unlock(&lif->queue_lock);
|
2019-09-03 22:28:21 +00:00
|
|
|
}
|
|
|
|
|
netdev: pass the stuck queue to the timeout handler
This allows incrementing the correct timeout statistic without any mess.
Down the road, devices can learn to reset just the specific queue.
The patch was generated with the following script:
use strict;
use warnings;
our $^I = '.bak';
my @work = (
["arch/m68k/emu/nfeth.c", "nfeth_tx_timeout"],
["arch/um/drivers/net_kern.c", "uml_net_tx_timeout"],
["arch/um/drivers/vector_kern.c", "vector_net_tx_timeout"],
["arch/xtensa/platforms/iss/network.c", "iss_net_tx_timeout"],
["drivers/char/pcmcia/synclink_cs.c", "hdlcdev_tx_timeout"],
["drivers/infiniband/ulp/ipoib/ipoib_main.c", "ipoib_timeout"],
["drivers/infiniband/ulp/ipoib/ipoib_main.c", "ipoib_timeout"],
["drivers/message/fusion/mptlan.c", "mpt_lan_tx_timeout"],
["drivers/misc/sgi-xp/xpnet.c", "xpnet_dev_tx_timeout"],
["drivers/net/appletalk/cops.c", "cops_timeout"],
["drivers/net/arcnet/arcdevice.h", "arcnet_timeout"],
["drivers/net/arcnet/arcnet.c", "arcnet_timeout"],
["drivers/net/arcnet/com20020.c", "arcnet_timeout"],
["drivers/net/ethernet/3com/3c509.c", "el3_tx_timeout"],
["drivers/net/ethernet/3com/3c515.c", "corkscrew_timeout"],
["drivers/net/ethernet/3com/3c574_cs.c", "el3_tx_timeout"],
["drivers/net/ethernet/3com/3c589_cs.c", "el3_tx_timeout"],
["drivers/net/ethernet/3com/3c59x.c", "vortex_tx_timeout"],
["drivers/net/ethernet/3com/3c59x.c", "vortex_tx_timeout"],
["drivers/net/ethernet/3com/typhoon.c", "typhoon_tx_timeout"],
["drivers/net/ethernet/8390/8390.h", "ei_tx_timeout"],
["drivers/net/ethernet/8390/8390.h", "eip_tx_timeout"],
["drivers/net/ethernet/8390/8390.c", "ei_tx_timeout"],
["drivers/net/ethernet/8390/8390p.c", "eip_tx_timeout"],
["drivers/net/ethernet/8390/ax88796.c", "ax_ei_tx_timeout"],
["drivers/net/ethernet/8390/axnet_cs.c", "axnet_tx_timeout"],
["drivers/net/ethernet/8390/etherh.c", "__ei_tx_timeout"],
["drivers/net/ethernet/8390/hydra.c", "__ei_tx_timeout"],
["drivers/net/ethernet/8390/mac8390.c", "__ei_tx_timeout"],
["drivers/net/ethernet/8390/mcf8390.c", "__ei_tx_timeout"],
["drivers/net/ethernet/8390/lib8390.c", "__ei_tx_timeout"],
["drivers/net/ethernet/8390/ne2k-pci.c", "ei_tx_timeout"],
["drivers/net/ethernet/8390/pcnet_cs.c", "ei_tx_timeout"],
["drivers/net/ethernet/8390/smc-ultra.c", "ei_tx_timeout"],
["drivers/net/ethernet/8390/wd.c", "ei_tx_timeout"],
["drivers/net/ethernet/8390/zorro8390.c", "__ei_tx_timeout"],
["drivers/net/ethernet/adaptec/starfire.c", "tx_timeout"],
["drivers/net/ethernet/agere/et131x.c", "et131x_tx_timeout"],
["drivers/net/ethernet/allwinner/sun4i-emac.c", "emac_timeout"],
["drivers/net/ethernet/alteon/acenic.c", "ace_watchdog"],
["drivers/net/ethernet/amazon/ena/ena_netdev.c", "ena_tx_timeout"],
["drivers/net/ethernet/amd/7990.h", "lance_tx_timeout"],
["drivers/net/ethernet/amd/7990.c", "lance_tx_timeout"],
["drivers/net/ethernet/amd/a2065.c", "lance_tx_timeout"],
["drivers/net/ethernet/amd/am79c961a.c", "am79c961_timeout"],
["drivers/net/ethernet/amd/amd8111e.c", "amd8111e_tx_timeout"],
["drivers/net/ethernet/amd/ariadne.c", "ariadne_tx_timeout"],
["drivers/net/ethernet/amd/atarilance.c", "lance_tx_timeout"],
["drivers/net/ethernet/amd/au1000_eth.c", "au1000_tx_timeout"],
["drivers/net/ethernet/amd/declance.c", "lance_tx_timeout"],
["drivers/net/ethernet/amd/lance.c", "lance_tx_timeout"],
["drivers/net/ethernet/amd/mvme147.c", "lance_tx_timeout"],
["drivers/net/ethernet/amd/ni65.c", "ni65_timeout"],
["drivers/net/ethernet/amd/nmclan_cs.c", "mace_tx_timeout"],
["drivers/net/ethernet/amd/pcnet32.c", "pcnet32_tx_timeout"],
["drivers/net/ethernet/amd/sunlance.c", "lance_tx_timeout"],
["drivers/net/ethernet/amd/xgbe/xgbe-drv.c", "xgbe_tx_timeout"],
["drivers/net/ethernet/apm/xgene-v2/main.c", "xge_timeout"],
["drivers/net/ethernet/apm/xgene/xgene_enet_main.c", "xgene_enet_timeout"],
["drivers/net/ethernet/apple/macmace.c", "mace_tx_timeout"],
["drivers/net/ethernet/atheros/ag71xx.c", "ag71xx_tx_timeout"],
["drivers/net/ethernet/atheros/alx/main.c", "alx_tx_timeout"],
["drivers/net/ethernet/atheros/atl1c/atl1c_main.c", "atl1c_tx_timeout"],
["drivers/net/ethernet/atheros/atl1e/atl1e_main.c", "atl1e_tx_timeout"],
["drivers/net/ethernet/atheros/atlx/atl.c", "atlx_tx_timeout"],
["drivers/net/ethernet/atheros/atlx/atl1.c", "atlx_tx_timeout"],
["drivers/net/ethernet/atheros/atlx/atl2.c", "atl2_tx_timeout"],
["drivers/net/ethernet/broadcom/b44.c", "b44_tx_timeout"],
["drivers/net/ethernet/broadcom/bcmsysport.c", "bcm_sysport_tx_timeout"],
["drivers/net/ethernet/broadcom/bnx2.c", "bnx2_tx_timeout"],
["drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h", "bnx2x_tx_timeout"],
["drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c", "bnx2x_tx_timeout"],
["drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c", "bnx2x_tx_timeout"],
["drivers/net/ethernet/broadcom/bnxt/bnxt.c", "bnxt_tx_timeout"],
["drivers/net/ethernet/broadcom/genet/bcmgenet.c", "bcmgenet_timeout"],
["drivers/net/ethernet/broadcom/sb1250-mac.c", "sbmac_tx_timeout"],
["drivers/net/ethernet/broadcom/tg3.c", "tg3_tx_timeout"],
["drivers/net/ethernet/calxeda/xgmac.c", "xgmac_tx_timeout"],
["drivers/net/ethernet/cavium/liquidio/lio_main.c", "liquidio_tx_timeout"],
["drivers/net/ethernet/cavium/liquidio/lio_vf_main.c", "liquidio_tx_timeout"],
["drivers/net/ethernet/cavium/liquidio/lio_vf_rep.c", "lio_vf_rep_tx_timeout"],
["drivers/net/ethernet/cavium/thunder/nicvf_main.c", "nicvf_tx_timeout"],
["drivers/net/ethernet/cirrus/cs89x0.c", "net_timeout"],
["drivers/net/ethernet/cisco/enic/enic_main.c", "enic_tx_timeout"],
["drivers/net/ethernet/cisco/enic/enic_main.c", "enic_tx_timeout"],
["drivers/net/ethernet/cortina/gemini.c", "gmac_tx_timeout"],
["drivers/net/ethernet/davicom/dm9000.c", "dm9000_timeout"],
["drivers/net/ethernet/dec/tulip/de2104x.c", "de_tx_timeout"],
["drivers/net/ethernet/dec/tulip/tulip_core.c", "tulip_tx_timeout"],
["drivers/net/ethernet/dec/tulip/winbond-840.c", "tx_timeout"],
["drivers/net/ethernet/dlink/dl2k.c", "rio_tx_timeout"],
["drivers/net/ethernet/dlink/sundance.c", "tx_timeout"],
["drivers/net/ethernet/emulex/benet/be_main.c", "be_tx_timeout"],
["drivers/net/ethernet/ethoc.c", "ethoc_tx_timeout"],
["drivers/net/ethernet/faraday/ftgmac100.c", "ftgmac100_tx_timeout"],
["drivers/net/ethernet/fealnx.c", "fealnx_tx_timeout"],
["drivers/net/ethernet/freescale/dpaa/dpaa_eth.c", "dpaa_tx_timeout"],
["drivers/net/ethernet/freescale/fec_main.c", "fec_timeout"],
["drivers/net/ethernet/freescale/fec_mpc52xx.c", "mpc52xx_fec_tx_timeout"],
["drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c", "fs_timeout"],
["drivers/net/ethernet/freescale/gianfar.c", "gfar_timeout"],
["drivers/net/ethernet/freescale/ucc_geth.c", "ucc_geth_timeout"],
["drivers/net/ethernet/fujitsu/fmvj18x_cs.c", "fjn_tx_timeout"],
["drivers/net/ethernet/google/gve/gve_main.c", "gve_tx_timeout"],
["drivers/net/ethernet/hisilicon/hip04_eth.c", "hip04_timeout"],
["drivers/net/ethernet/hisilicon/hix5hd2_gmac.c", "hix5hd2_net_timeout"],
["drivers/net/ethernet/hisilicon/hns/hns_enet.c", "hns_nic_net_timeout"],
["drivers/net/ethernet/hisilicon/hns3/hns3_enet.c", "hns3_nic_net_timeout"],
["drivers/net/ethernet/huawei/hinic/hinic_main.c", "hinic_tx_timeout"],
["drivers/net/ethernet/i825xx/82596.c", "i596_tx_timeout"],
["drivers/net/ethernet/i825xx/ether1.c", "ether1_timeout"],
["drivers/net/ethernet/i825xx/lib82596.c", "i596_tx_timeout"],
["drivers/net/ethernet/i825xx/sun3_82586.c", "sun3_82586_timeout"],
["drivers/net/ethernet/ibm/ehea/ehea_main.c", "ehea_tx_watchdog"],
["drivers/net/ethernet/ibm/emac/core.c", "emac_tx_timeout"],
["drivers/net/ethernet/ibm/emac/core.c", "emac_tx_timeout"],
["drivers/net/ethernet/ibm/ibmvnic.c", "ibmvnic_tx_timeout"],
["drivers/net/ethernet/intel/e100.c", "e100_tx_timeout"],
["drivers/net/ethernet/intel/e1000/e1000_main.c", "e1000_tx_timeout"],
["drivers/net/ethernet/intel/e1000e/netdev.c", "e1000_tx_timeout"],
["drivers/net/ethernet/intel/fm10k/fm10k_netdev.c", "fm10k_tx_timeout"],
["drivers/net/ethernet/intel/i40e/i40e_main.c", "i40e_tx_timeout"],
["drivers/net/ethernet/intel/iavf/iavf_main.c", "iavf_tx_timeout"],
["drivers/net/ethernet/intel/ice/ice_main.c", "ice_tx_timeout"],
["drivers/net/ethernet/intel/ice/ice_main.c", "ice_tx_timeout"],
["drivers/net/ethernet/intel/igb/igb_main.c", "igb_tx_timeout"],
["drivers/net/ethernet/intel/igbvf/netdev.c", "igbvf_tx_timeout"],
["drivers/net/ethernet/intel/ixgb/ixgb_main.c", "ixgb_tx_timeout"],
["drivers/net/ethernet/intel/ixgbe/ixgbe_debugfs.c", "adapter->netdev->netdev_ops->ndo_tx_timeout(adapter->netdev);"],
["drivers/net/ethernet/intel/ixgbe/ixgbe_main.c", "ixgbe_tx_timeout"],
["drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c", "ixgbevf_tx_timeout"],
["drivers/net/ethernet/jme.c", "jme_tx_timeout"],
["drivers/net/ethernet/korina.c", "korina_tx_timeout"],
["drivers/net/ethernet/lantiq_etop.c", "ltq_etop_tx_timeout"],
["drivers/net/ethernet/marvell/mv643xx_eth.c", "mv643xx_eth_tx_timeout"],
["drivers/net/ethernet/marvell/pxa168_eth.c", "pxa168_eth_tx_timeout"],
["drivers/net/ethernet/marvell/skge.c", "skge_tx_timeout"],
["drivers/net/ethernet/marvell/sky2.c", "sky2_tx_timeout"],
["drivers/net/ethernet/marvell/sky2.c", "sky2_tx_timeout"],
["drivers/net/ethernet/mediatek/mtk_eth_soc.c", "mtk_tx_timeout"],
["drivers/net/ethernet/mellanox/mlx4/en_netdev.c", "mlx4_en_tx_timeout"],
["drivers/net/ethernet/mellanox/mlx4/en_netdev.c", "mlx4_en_tx_timeout"],
["drivers/net/ethernet/mellanox/mlx5/core/en_main.c", "mlx5e_tx_timeout"],
["drivers/net/ethernet/micrel/ks8842.c", "ks8842_tx_timeout"],
["drivers/net/ethernet/micrel/ksz884x.c", "netdev_tx_timeout"],
["drivers/net/ethernet/microchip/enc28j60.c", "enc28j60_tx_timeout"],
["drivers/net/ethernet/microchip/encx24j600.c", "encx24j600_tx_timeout"],
["drivers/net/ethernet/natsemi/sonic.h", "sonic_tx_timeout"],
["drivers/net/ethernet/natsemi/sonic.c", "sonic_tx_timeout"],
["drivers/net/ethernet/natsemi/jazzsonic.c", "sonic_tx_timeout"],
["drivers/net/ethernet/natsemi/macsonic.c", "sonic_tx_timeout"],
["drivers/net/ethernet/natsemi/natsemi.c", "ns_tx_timeout"],
["drivers/net/ethernet/natsemi/ns83820.c", "ns83820_tx_timeout"],
["drivers/net/ethernet/natsemi/xtsonic.c", "sonic_tx_timeout"],
["drivers/net/ethernet/neterion/s2io.h", "s2io_tx_watchdog"],
["drivers/net/ethernet/neterion/s2io.c", "s2io_tx_watchdog"],
["drivers/net/ethernet/neterion/vxge/vxge-main.c", "vxge_tx_watchdog"],
["drivers/net/ethernet/netronome/nfp/nfp_net_common.c", "nfp_net_tx_timeout"],
["drivers/net/ethernet/nvidia/forcedeth.c", "nv_tx_timeout"],
["drivers/net/ethernet/nvidia/forcedeth.c", "nv_tx_timeout"],
["drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c", "pch_gbe_tx_timeout"],
["drivers/net/ethernet/packetengines/hamachi.c", "hamachi_tx_timeout"],
["drivers/net/ethernet/packetengines/yellowfin.c", "yellowfin_tx_timeout"],
["drivers/net/ethernet/pensando/ionic/ionic_lif.c", "ionic_tx_timeout"],
["drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c", "netxen_tx_timeout"],
["drivers/net/ethernet/qlogic/qla3xxx.c", "ql3xxx_tx_timeout"],
["drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c", "qlcnic_tx_timeout"],
["drivers/net/ethernet/qualcomm/emac/emac.c", "emac_tx_timeout"],
["drivers/net/ethernet/qualcomm/qca_spi.c", "qcaspi_netdev_tx_timeout"],
["drivers/net/ethernet/qualcomm/qca_uart.c", "qcauart_netdev_tx_timeout"],
["drivers/net/ethernet/rdc/r6040.c", "r6040_tx_timeout"],
["drivers/net/ethernet/realtek/8139cp.c", "cp_tx_timeout"],
["drivers/net/ethernet/realtek/8139too.c", "rtl8139_tx_timeout"],
["drivers/net/ethernet/realtek/atp.c", "tx_timeout"],
["drivers/net/ethernet/realtek/r8169_main.c", "rtl8169_tx_timeout"],
["drivers/net/ethernet/renesas/ravb_main.c", "ravb_tx_timeout"],
["drivers/net/ethernet/renesas/sh_eth.c", "sh_eth_tx_timeout"],
["drivers/net/ethernet/renesas/sh_eth.c", "sh_eth_tx_timeout"],
["drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c", "sxgbe_tx_timeout"],
["drivers/net/ethernet/seeq/ether3.c", "ether3_timeout"],
["drivers/net/ethernet/seeq/sgiseeq.c", "timeout"],
["drivers/net/ethernet/sfc/efx.c", "efx_watchdog"],
["drivers/net/ethernet/sfc/falcon/efx.c", "ef4_watchdog"],
["drivers/net/ethernet/sgi/ioc3-eth.c", "ioc3_timeout"],
["drivers/net/ethernet/sgi/meth.c", "meth_tx_timeout"],
["drivers/net/ethernet/silan/sc92031.c", "sc92031_tx_timeout"],
["drivers/net/ethernet/sis/sis190.c", "sis190_tx_timeout"],
["drivers/net/ethernet/sis/sis900.c", "sis900_tx_timeout"],
["drivers/net/ethernet/smsc/epic100.c", "epic_tx_timeout"],
["drivers/net/ethernet/smsc/smc911x.c", "smc911x_timeout"],
["drivers/net/ethernet/smsc/smc9194.c", "smc_timeout"],
["drivers/net/ethernet/smsc/smc91c92_cs.c", "smc_tx_timeout"],
["drivers/net/ethernet/smsc/smc91x.c", "smc_timeout"],
["drivers/net/ethernet/stmicro/stmmac/stmmac_main.c", "stmmac_tx_timeout"],
["drivers/net/ethernet/sun/cassini.c", "cas_tx_timeout"],
["drivers/net/ethernet/sun/ldmvsw.c", "sunvnet_tx_timeout_common"],
["drivers/net/ethernet/sun/niu.c", "niu_tx_timeout"],
["drivers/net/ethernet/sun/sunbmac.c", "bigmac_tx_timeout"],
["drivers/net/ethernet/sun/sungem.c", "gem_tx_timeout"],
["drivers/net/ethernet/sun/sunhme.c", "happy_meal_tx_timeout"],
["drivers/net/ethernet/sun/sunqe.c", "qe_tx_timeout"],
["drivers/net/ethernet/sun/sunvnet.c", "sunvnet_tx_timeout_common"],
["drivers/net/ethernet/sun/sunvnet_common.c", "sunvnet_tx_timeout_common"],
["drivers/net/ethernet/sun/sunvnet_common.h", "sunvnet_tx_timeout_common"],
["drivers/net/ethernet/synopsys/dwc-xlgmac-net.c", "xlgmac_tx_timeout"],
["drivers/net/ethernet/ti/cpmac.c", "cpmac_tx_timeout"],
["drivers/net/ethernet/ti/cpsw.c", "cpsw_ndo_tx_timeout"],
["drivers/net/ethernet/ti/cpsw_priv.c", "cpsw_ndo_tx_timeout"],
["drivers/net/ethernet/ti/cpsw_priv.h", "cpsw_ndo_tx_timeout"],
["drivers/net/ethernet/ti/davinci_emac.c", "emac_dev_tx_timeout"],
["drivers/net/ethernet/ti/netcp_core.c", "netcp_ndo_tx_timeout"],
["drivers/net/ethernet/ti/tlan.c", "tlan_tx_timeout"],
["drivers/net/ethernet/toshiba/ps3_gelic_net.h", "gelic_net_tx_timeout"],
["drivers/net/ethernet/toshiba/ps3_gelic_net.c", "gelic_net_tx_timeout"],
["drivers/net/ethernet/toshiba/ps3_gelic_wireless.c", "gelic_net_tx_timeout"],
["drivers/net/ethernet/toshiba/spider_net.c", "spider_net_tx_timeout"],
["drivers/net/ethernet/toshiba/tc35815.c", "tc35815_tx_timeout"],
["drivers/net/ethernet/via/via-rhine.c", "rhine_tx_timeout"],
["drivers/net/ethernet/wiznet/w5100.c", "w5100_tx_timeout"],
["drivers/net/ethernet/wiznet/w5300.c", "w5300_tx_timeout"],
["drivers/net/ethernet/xilinx/xilinx_emaclite.c", "xemaclite_tx_timeout"],
["drivers/net/ethernet/xircom/xirc2ps_cs.c", "xirc_tx_timeout"],
["drivers/net/fjes/fjes_main.c", "fjes_tx_retry"],
["drivers/net/slip/slip.c", "sl_tx_timeout"],
["include/linux/usb/usbnet.h", "usbnet_tx_timeout"],
["drivers/net/usb/aqc111.c", "usbnet_tx_timeout"],
["drivers/net/usb/asix_devices.c", "usbnet_tx_timeout"],
["drivers/net/usb/asix_devices.c", "usbnet_tx_timeout"],
["drivers/net/usb/asix_devices.c", "usbnet_tx_timeout"],
["drivers/net/usb/ax88172a.c", "usbnet_tx_timeout"],
["drivers/net/usb/ax88179_178a.c", "usbnet_tx_timeout"],
["drivers/net/usb/catc.c", "catc_tx_timeout"],
["drivers/net/usb/cdc_mbim.c", "usbnet_tx_timeout"],
["drivers/net/usb/cdc_ncm.c", "usbnet_tx_timeout"],
["drivers/net/usb/dm9601.c", "usbnet_tx_timeout"],
["drivers/net/usb/hso.c", "hso_net_tx_timeout"],
["drivers/net/usb/int51x1.c", "usbnet_tx_timeout"],
["drivers/net/usb/ipheth.c", "ipheth_tx_timeout"],
["drivers/net/usb/kaweth.c", "kaweth_tx_timeout"],
["drivers/net/usb/lan78xx.c", "lan78xx_tx_timeout"],
["drivers/net/usb/mcs7830.c", "usbnet_tx_timeout"],
["drivers/net/usb/pegasus.c", "pegasus_tx_timeout"],
["drivers/net/usb/qmi_wwan.c", "usbnet_tx_timeout"],
["drivers/net/usb/r8152.c", "rtl8152_tx_timeout"],
["drivers/net/usb/rndis_host.c", "usbnet_tx_timeout"],
["drivers/net/usb/rtl8150.c", "rtl8150_tx_timeout"],
["drivers/net/usb/sierra_net.c", "usbnet_tx_timeout"],
["drivers/net/usb/smsc75xx.c", "usbnet_tx_timeout"],
["drivers/net/usb/smsc95xx.c", "usbnet_tx_timeout"],
["drivers/net/usb/sr9700.c", "usbnet_tx_timeout"],
["drivers/net/usb/sr9800.c", "usbnet_tx_timeout"],
["drivers/net/usb/usbnet.c", "usbnet_tx_timeout"],
["drivers/net/vmxnet3/vmxnet3_drv.c", "vmxnet3_tx_timeout"],
["drivers/net/wan/cosa.c", "cosa_net_timeout"],
["drivers/net/wan/farsync.c", "fst_tx_timeout"],
["drivers/net/wan/fsl_ucc_hdlc.c", "uhdlc_tx_timeout"],
["drivers/net/wan/lmc/lmc_main.c", "lmc_driver_timeout"],
["drivers/net/wan/x25_asy.c", "x25_asy_timeout"],
["drivers/net/wimax/i2400m/netdev.c", "i2400m_tx_timeout"],
["drivers/net/wireless/intel/ipw2x00/ipw2100.c", "ipw2100_tx_timeout"],
["drivers/net/wireless/intersil/hostap/hostap_main.c", "prism2_tx_timeout"],
["drivers/net/wireless/intersil/hostap/hostap_main.c", "prism2_tx_timeout"],
["drivers/net/wireless/intersil/hostap/hostap_main.c", "prism2_tx_timeout"],
["drivers/net/wireless/intersil/orinoco/main.c", "orinoco_tx_timeout"],
["drivers/net/wireless/intersil/orinoco/orinoco_usb.c", "orinoco_tx_timeout"],
["drivers/net/wireless/intersil/orinoco/orinoco.h", "orinoco_tx_timeout"],
["drivers/net/wireless/intersil/prism54/islpci_dev.c", "islpci_eth_tx_timeout"],
["drivers/net/wireless/intersil/prism54/islpci_eth.c", "islpci_eth_tx_timeout"],
["drivers/net/wireless/intersil/prism54/islpci_eth.h", "islpci_eth_tx_timeout"],
["drivers/net/wireless/marvell/mwifiex/main.c", "mwifiex_tx_timeout"],
["drivers/net/wireless/quantenna/qtnfmac/core.c", "qtnf_netdev_tx_timeout"],
["drivers/net/wireless/quantenna/qtnfmac/core.h", "qtnf_netdev_tx_timeout"],
["drivers/net/wireless/rndis_wlan.c", "usbnet_tx_timeout"],
["drivers/net/wireless/wl3501_cs.c", "wl3501_tx_timeout"],
["drivers/net/wireless/zydas/zd1201.c", "zd1201_tx_timeout"],
["drivers/s390/net/qeth_core.h", "qeth_tx_timeout"],
["drivers/s390/net/qeth_core_main.c", "qeth_tx_timeout"],
["drivers/s390/net/qeth_l2_main.c", "qeth_tx_timeout"],
["drivers/s390/net/qeth_l2_main.c", "qeth_tx_timeout"],
["drivers/s390/net/qeth_l3_main.c", "qeth_tx_timeout"],
["drivers/s390/net/qeth_l3_main.c", "qeth_tx_timeout"],
["drivers/staging/ks7010/ks_wlan_net.c", "ks_wlan_tx_timeout"],
["drivers/staging/qlge/qlge_main.c", "qlge_tx_timeout"],
["drivers/staging/rtl8192e/rtl8192e/rtl_core.c", "_rtl92e_tx_timeout"],
["drivers/staging/rtl8192u/r8192U_core.c", "tx_timeout"],
["drivers/staging/unisys/visornic/visornic_main.c", "visornic_xmit_timeout"],
["drivers/staging/wlan-ng/p80211netdev.c", "p80211knetdev_tx_timeout"],
["drivers/tty/n_gsm.c", "gsm_mux_net_tx_timeout"],
["drivers/tty/synclink.c", "hdlcdev_tx_timeout"],
["drivers/tty/synclink_gt.c", "hdlcdev_tx_timeout"],
["drivers/tty/synclinkmp.c", "hdlcdev_tx_timeout"],
["net/atm/lec.c", "lec_tx_timeout"],
["net/bluetooth/bnep/netdev.c", "bnep_net_timeout"]
);
for my $p (@work) {
my @pair = @$p;
my $file = $pair[0];
my $func = $pair[1];
print STDERR $file , ": ", $func,"\n";
our @ARGV = ($file);
while (<ARGV>) {
if (m/($func\s*\(struct\s+net_device\s+\*[A-Za-z_]?[A-Za-z-0-9_]*)(\))/) {
print STDERR "found $1+$2 in $file\n";
}
if (s/($func\s*\(struct\s+net_device\s+\*[A-Za-z_]?[A-Za-z-0-9_]*)(\))/$1, unsigned int txqueue$2/) {
print STDERR "$func found in $file\n";
}
print;
}
}
where the list of files and functions is simply from:
git grep ndo_tx_timeout, with manual addition of headers
in the rare cases where the function is from a header,
then manually changing the few places which actually
call ndo_tx_timeout.
Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
Acked-by: Heiner Kallweit <hkallweit1@gmail.com>
Acked-by: Jakub Kicinski <jakub.kicinski@netronome.com>
Acked-by: Shannon Nelson <snelson@pensando.io>
Reviewed-by: Martin Habets <mhabets@solarflare.com>
changes from v9:
fixup a forward declaration
changes from v9:
more leftovers from v3 change
changes from v8:
fix up a missing direct call to timeout
rebased on net-next
changes from v7:
fixup leftovers from v3 change
changes from v6:
fix typo in rtl driver
changes from v5:
add missing files (allow any net device argument name)
changes from v4:
add a missing driver header
changes from v3:
change queue # to unsigned
Changes from v2:
added headers
Changes from v1:
Fix errors found by kbuild:
generalize the pattern a bit, to pick up
a couple of instances missed by the previous
version.
Signed-off-by: David S. Miller <davem@davemloft.net>
2019-12-10 14:23:51 +00:00
|
|
|
static void ionic_tx_timeout(struct net_device *netdev, unsigned int txqueue)
|
2019-09-03 22:28:12 +00:00
|
|
|
{
|
2019-09-03 22:28:21 +00:00
|
|
|
struct ionic_lif *lif = netdev_priv(netdev);
|
|
|
|
|
2021-03-19 00:48:08 +00:00
|
|
|
netdev_info(lif->netdev, "Tx Timeout triggered - txq %d\n", txqueue);
|
2019-09-03 22:28:21 +00:00
|
|
|
schedule_work(&lif->tx_timeout_work);
|
2019-09-03 22:28:12 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static int ionic_vlan_rx_add_vid(struct net_device *netdev, __be16 proto,
|
|
|
|
u16 vid)
|
|
|
|
{
|
2019-09-03 22:28:14 +00:00
|
|
|
struct ionic_lif *lif = netdev_priv(netdev);
|
|
|
|
int err;
|
|
|
|
|
ionic: handle vlan id overflow
Add vlans to the existing rx_filter_sync mechanics currently
used for managing mac filters.
Older versions of our firmware had no enforced limits on the
number of vlans that the LIF could request, but requesting large
numbers of vlans caused issues in FW memory management, so an
arbitrary limit was added in the FW. The FW now returns -ENOSPC
when it hits that limit, which the driver needs to handle.
Unfortunately, the FW doesn't advertise the vlan id limit,
as it does with mac filters, so the driver won't know the
limit until it bumps into it. We'll grab the current vlan id
count and use that as the limit from there on and thus prevent
getting any more -ENOSPC errors.
Just as is done for the mac filters, the device puts the device
into promiscuous mode when -ENOSPC is seen for vlan ids, and
the driver will track the vlans that aren't synced to the FW.
When vlans are removed, the driver will retry the un-synced
vlans. If all outstanding vlans are synced, the promiscuous
mode will be disabled.
Signed-off-by: Shannon Nelson <snelson@pensando.io>
Signed-off-by: David S. Miller <davem@davemloft.net>
2021-10-09 18:45:21 +00:00
|
|
|
err = ionic_lif_vlan_add(lif, vid);
|
2019-09-03 22:28:14 +00:00
|
|
|
if (err)
|
|
|
|
return err;
|
|
|
|
|
ionic: handle vlan id overflow
Add vlans to the existing rx_filter_sync mechanics currently
used for managing mac filters.
Older versions of our firmware had no enforced limits on the
number of vlans that the LIF could request, but requesting large
numbers of vlans caused issues in FW memory management, so an
arbitrary limit was added in the FW. The FW now returns -ENOSPC
when it hits that limit, which the driver needs to handle.
Unfortunately, the FW doesn't advertise the vlan id limit,
as it does with mac filters, so the driver won't know the
limit until it bumps into it. We'll grab the current vlan id
count and use that as the limit from there on and thus prevent
getting any more -ENOSPC errors.
Just as is done for the mac filters, the device puts the device
into promiscuous mode when -ENOSPC is seen for vlan ids, and
the driver will track the vlans that aren't synced to the FW.
When vlans are removed, the driver will retry the un-synced
vlans. If all outstanding vlans are synced, the promiscuous
mode will be disabled.
Signed-off-by: Shannon Nelson <snelson@pensando.io>
Signed-off-by: David S. Miller <davem@davemloft.net>
2021-10-09 18:45:21 +00:00
|
|
|
ionic_lif_rx_mode(lif);
|
ionic: sync the filters in the work task
In order to separate the atomic needs of __dev_uc_sync()
and __dev_mc_sync() from the safe rx_mode handling, we need
to have the ndo handler manipulate the driver's filter list,
and later have the driver sync the filters to the firmware,
outside of the atomic context.
Here we put __dev_mc_sync() and __dev_uc_sync() back into the
ndo callback to give them their netif_addr_lock context and
have them update the driver's filter list, flagging changes
that should be made to the device filter list. Later, in the
rx_mode handler, we read those hints and sync up the device's
list as needed.
It is possible for multiple add/delete requests to come from
the stack before the rx_mode task processes the list, but the
handling of the sync status flag should keep everything sorted
correctly. For example, if a delete of an existing filter is
followed by another add before the rx_mode task is run, as can
happen when going in and out of a bond, the add will cancel
the delete and no actual changes will be sent to the device.
We also add a check in the watchdog to see if there are any
stray unsync'd filters, possibly left over from a filter
overflow and waiting to get sync'd after some other filter
gets removed to make room.
Signed-off-by: Shannon Nelson <snelson@pensando.io>
Signed-off-by: David S. Miller <davem@davemloft.net>
2021-08-26 01:24:48 +00:00
|
|
|
|
ionic: handle vlan id overflow
Add vlans to the existing rx_filter_sync mechanics currently
used for managing mac filters.
Older versions of our firmware had no enforced limits on the
number of vlans that the LIF could request, but requesting large
numbers of vlans caused issues in FW memory management, so an
arbitrary limit was added in the FW. The FW now returns -ENOSPC
when it hits that limit, which the driver needs to handle.
Unfortunately, the FW doesn't advertise the vlan id limit,
as it does with mac filters, so the driver won't know the
limit until it bumps into it. We'll grab the current vlan id
count and use that as the limit from there on and thus prevent
getting any more -ENOSPC errors.
Just as is done for the mac filters, the device puts the device
into promiscuous mode when -ENOSPC is seen for vlan ids, and
the driver will track the vlans that aren't synced to the FW.
When vlans are removed, the driver will retry the un-synced
vlans. If all outstanding vlans are synced, the promiscuous
mode will be disabled.
Signed-off-by: Shannon Nelson <snelson@pensando.io>
Signed-off-by: David S. Miller <davem@davemloft.net>
2021-10-09 18:45:21 +00:00
|
|
|
return 0;
|
2019-09-03 22:28:12 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static int ionic_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto,
|
|
|
|
u16 vid)
|
|
|
|
{
|
2019-09-03 22:28:14 +00:00
|
|
|
struct ionic_lif *lif = netdev_priv(netdev);
|
ionic: handle vlan id overflow
Add vlans to the existing rx_filter_sync mechanics currently
used for managing mac filters.
Older versions of our firmware had no enforced limits on the
number of vlans that the LIF could request, but requesting large
numbers of vlans caused issues in FW memory management, so an
arbitrary limit was added in the FW. The FW now returns -ENOSPC
when it hits that limit, which the driver needs to handle.
Unfortunately, the FW doesn't advertise the vlan id limit,
as it does with mac filters, so the driver won't know the
limit until it bumps into it. We'll grab the current vlan id
count and use that as the limit from there on and thus prevent
getting any more -ENOSPC errors.
Just as is done for the mac filters, the device puts the device
into promiscuous mode when -ENOSPC is seen for vlan ids, and
the driver will track the vlans that aren't synced to the FW.
When vlans are removed, the driver will retry the un-synced
vlans. If all outstanding vlans are synced, the promiscuous
mode will be disabled.
Signed-off-by: Shannon Nelson <snelson@pensando.io>
Signed-off-by: David S. Miller <davem@davemloft.net>
2021-10-09 18:45:21 +00:00
|
|
|
int err;
|
2019-09-03 22:28:14 +00:00
|
|
|
|
ionic: handle vlan id overflow
Add vlans to the existing rx_filter_sync mechanics currently
used for managing mac filters.
Older versions of our firmware had no enforced limits on the
number of vlans that the LIF could request, but requesting large
numbers of vlans caused issues in FW memory management, so an
arbitrary limit was added in the FW. The FW now returns -ENOSPC
when it hits that limit, which the driver needs to handle.
Unfortunately, the FW doesn't advertise the vlan id limit,
as it does with mac filters, so the driver won't know the
limit until it bumps into it. We'll grab the current vlan id
count and use that as the limit from there on and thus prevent
getting any more -ENOSPC errors.
Just as is done for the mac filters, the device puts the device
into promiscuous mode when -ENOSPC is seen for vlan ids, and
the driver will track the vlans that aren't synced to the FW.
When vlans are removed, the driver will retry the un-synced
vlans. If all outstanding vlans are synced, the promiscuous
mode will be disabled.
Signed-off-by: Shannon Nelson <snelson@pensando.io>
Signed-off-by: David S. Miller <davem@davemloft.net>
2021-10-09 18:45:21 +00:00
|
|
|
err = ionic_lif_vlan_del(lif, vid);
|
|
|
|
if (err)
|
|
|
|
return err;
|
2019-09-03 22:28:14 +00:00
|
|
|
|
ionic: handle vlan id overflow
Add vlans to the existing rx_filter_sync mechanics currently
used for managing mac filters.
Older versions of our firmware had no enforced limits on the
number of vlans that the LIF could request, but requesting large
numbers of vlans caused issues in FW memory management, so an
arbitrary limit was added in the FW. The FW now returns -ENOSPC
when it hits that limit, which the driver needs to handle.
Unfortunately, the FW doesn't advertise the vlan id limit,
as it does with mac filters, so the driver won't know the
limit until it bumps into it. We'll grab the current vlan id
count and use that as the limit from there on and thus prevent
getting any more -ENOSPC errors.
Just as is done for the mac filters, the device puts the device
into promiscuous mode when -ENOSPC is seen for vlan ids, and
the driver will track the vlans that aren't synced to the FW.
When vlans are removed, the driver will retry the un-synced
vlans. If all outstanding vlans are synced, the promiscuous
mode will be disabled.
Signed-off-by: Shannon Nelson <snelson@pensando.io>
Signed-off-by: David S. Miller <davem@davemloft.net>
2021-10-09 18:45:21 +00:00
|
|
|
ionic_lif_rx_mode(lif);
|
2019-09-03 22:28:14 +00:00
|
|
|
|
ionic: handle vlan id overflow
Add vlans to the existing rx_filter_sync mechanics currently
used for managing mac filters.
Older versions of our firmware had no enforced limits on the
number of vlans that the LIF could request, but requesting large
numbers of vlans caused issues in FW memory management, so an
arbitrary limit was added in the FW. The FW now returns -ENOSPC
when it hits that limit, which the driver needs to handle.
Unfortunately, the FW doesn't advertise the vlan id limit,
as it does with mac filters, so the driver won't know the
limit until it bumps into it. We'll grab the current vlan id
count and use that as the limit from there on and thus prevent
getting any more -ENOSPC errors.
Just as is done for the mac filters, the device puts the device
into promiscuous mode when -ENOSPC is seen for vlan ids, and
the driver will track the vlans that aren't synced to the FW.
When vlans are removed, the driver will retry the un-synced
vlans. If all outstanding vlans are synced, the promiscuous
mode will be disabled.
Signed-off-by: Shannon Nelson <snelson@pensando.io>
Signed-off-by: David S. Miller <davem@davemloft.net>
2021-10-09 18:45:21 +00:00
|
|
|
return 0;
|
2019-09-03 22:28:12 +00:00
|
|
|
}
|
|
|
|
|
2019-09-03 22:28:20 +00:00
|
|
|
int ionic_lif_rss_config(struct ionic_lif *lif, const u16 types,
|
|
|
|
const u8 *key, const u32 *indir)
|
|
|
|
{
|
|
|
|
struct ionic_admin_ctx ctx = {
|
|
|
|
.work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
|
|
|
|
.cmd.lif_setattr = {
|
|
|
|
.opcode = IONIC_CMD_LIF_SETATTR,
|
|
|
|
.attr = IONIC_LIF_ATTR_RSS,
|
|
|
|
.rss.addr = cpu_to_le64(lif->rss_ind_tbl_pa),
|
|
|
|
},
|
|
|
|
};
|
|
|
|
unsigned int i, tbl_sz;
|
|
|
|
|
2020-03-07 01:04:05 +00:00
|
|
|
if (lif->hw_features & IONIC_ETH_HW_RX_HASH) {
|
|
|
|
lif->rss_types = types;
|
|
|
|
ctx.cmd.lif_setattr.rss.types = cpu_to_le16(types);
|
|
|
|
}
|
2019-09-03 22:28:20 +00:00
|
|
|
|
|
|
|
if (key)
|
|
|
|
memcpy(lif->rss_hash_key, key, IONIC_RSS_HASH_KEY_SIZE);
|
|
|
|
|
|
|
|
if (indir) {
|
|
|
|
tbl_sz = le16_to_cpu(lif->ionic->ident.lif.eth.rss_ind_tbl_sz);
|
|
|
|
for (i = 0; i < tbl_sz; i++)
|
|
|
|
lif->rss_ind_tbl[i] = indir[i];
|
|
|
|
}
|
|
|
|
|
|
|
|
memcpy(ctx.cmd.lif_setattr.rss.key, lif->rss_hash_key,
|
|
|
|
IONIC_RSS_HASH_KEY_SIZE);
|
|
|
|
|
|
|
|
return ionic_adminq_post_wait(lif, &ctx);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int ionic_lif_rss_init(struct ionic_lif *lif)
|
|
|
|
{
|
|
|
|
unsigned int tbl_sz;
|
|
|
|
unsigned int i;
|
|
|
|
|
|
|
|
lif->rss_types = IONIC_RSS_TYPE_IPV4 |
|
|
|
|
IONIC_RSS_TYPE_IPV4_TCP |
|
|
|
|
IONIC_RSS_TYPE_IPV4_UDP |
|
|
|
|
IONIC_RSS_TYPE_IPV6 |
|
|
|
|
IONIC_RSS_TYPE_IPV6_TCP |
|
|
|
|
IONIC_RSS_TYPE_IPV6_UDP;
|
|
|
|
|
|
|
|
/* Fill indirection table with 'default' values */
|
|
|
|
tbl_sz = le16_to_cpu(lif->ionic->ident.lif.eth.rss_ind_tbl_sz);
|
|
|
|
for (i = 0; i < tbl_sz; i++)
|
|
|
|
lif->rss_ind_tbl[i] = ethtool_rxfh_indir_default(i, lif->nxqs);
|
|
|
|
|
2019-12-03 22:17:34 +00:00
|
|
|
return ionic_lif_rss_config(lif, lif->rss_types, NULL, NULL);
|
2019-09-03 22:28:20 +00:00
|
|
|
}
|
|
|
|
|
2019-12-03 22:17:34 +00:00
|
|
|
static void ionic_lif_rss_deinit(struct ionic_lif *lif)
|
2019-09-03 22:28:20 +00:00
|
|
|
{
|
2019-12-03 22:17:34 +00:00
|
|
|
int tbl_sz;
|
|
|
|
|
|
|
|
tbl_sz = le16_to_cpu(lif->ionic->ident.lif.eth.rss_ind_tbl_sz);
|
|
|
|
memset(lif->rss_ind_tbl, 0, tbl_sz);
|
|
|
|
memset(lif->rss_hash_key, 0, IONIC_RSS_HASH_KEY_SIZE);
|
|
|
|
|
|
|
|
ionic_lif_rss_config(lif, 0x0, NULL, NULL);
|
2019-09-03 22:28:20 +00:00
|
|
|
}
|
|
|
|
|
2020-11-12 18:22:03 +00:00
|
|
|
static void ionic_lif_quiesce(struct ionic_lif *lif)
|
|
|
|
{
|
|
|
|
struct ionic_admin_ctx ctx = {
|
|
|
|
.work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
|
|
|
|
.cmd.lif_setattr = {
|
|
|
|
.opcode = IONIC_CMD_LIF_SETATTR,
|
|
|
|
.index = cpu_to_le16(lif->index),
|
|
|
|
.attr = IONIC_LIF_ATTR_STATE,
|
|
|
|
.state = IONIC_LIF_QUIESCE,
|
|
|
|
},
|
|
|
|
};
|
|
|
|
int err;
|
|
|
|
|
|
|
|
err = ionic_adminq_post_wait(lif, &ctx);
|
|
|
|
if (err)
|
2022-01-24 18:53:11 +00:00
|
|
|
netdev_dbg(lif->netdev, "lif quiesce failed %d\n", err);
|
2020-11-12 18:22:03 +00:00
|
|
|
}
|
|
|
|
|
2019-09-03 22:28:17 +00:00
|
|
|
static void ionic_txrx_disable(struct ionic_lif *lif)
|
|
|
|
{
|
|
|
|
unsigned int i;
|
2020-10-01 16:22:43 +00:00
|
|
|
int err = 0;
|
2019-09-03 22:28:17 +00:00
|
|
|
|
2020-03-28 03:14:46 +00:00
|
|
|
if (lif->txqcqs) {
|
2020-10-01 16:22:43 +00:00
|
|
|
for (i = 0; i < lif->nxqs; i++)
|
2021-10-01 18:05:57 +00:00
|
|
|
err = ionic_qcq_disable(lif, lif->txqcqs[i], err);
|
2020-03-28 03:14:46 +00:00
|
|
|
}
|
|
|
|
|
2021-04-01 17:56:06 +00:00
|
|
|
if (lif->hwstamp_txq)
|
2021-10-01 18:05:57 +00:00
|
|
|
err = ionic_qcq_disable(lif, lif->hwstamp_txq, err);
|
2021-04-01 17:56:06 +00:00
|
|
|
|
2020-03-28 03:14:46 +00:00
|
|
|
if (lif->rxqcqs) {
|
2020-10-01 16:22:43 +00:00
|
|
|
for (i = 0; i < lif->nxqs; i++)
|
2021-10-01 18:05:57 +00:00
|
|
|
err = ionic_qcq_disable(lif, lif->rxqcqs[i], err);
|
2019-09-03 22:28:17 +00:00
|
|
|
}
|
2020-11-12 18:22:03 +00:00
|
|
|
|
2021-04-01 17:56:06 +00:00
|
|
|
if (lif->hwstamp_rxq)
|
2021-10-01 18:05:57 +00:00
|
|
|
err = ionic_qcq_disable(lif, lif->hwstamp_rxq, err);
|
2021-04-01 17:56:06 +00:00
|
|
|
|
2020-11-12 18:22:03 +00:00
|
|
|
ionic_lif_quiesce(lif);
|
2019-09-03 22:28:17 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static void ionic_txrx_deinit(struct ionic_lif *lif)
|
|
|
|
{
|
|
|
|
unsigned int i;
|
|
|
|
|
2020-03-28 03:14:46 +00:00
|
|
|
if (lif->txqcqs) {
|
2020-08-27 23:00:29 +00:00
|
|
|
for (i = 0; i < lif->nxqs && lif->txqcqs[i]; i++) {
|
2020-08-27 23:00:23 +00:00
|
|
|
ionic_lif_qcq_deinit(lif, lif->txqcqs[i]);
|
|
|
|
ionic_tx_flush(&lif->txqcqs[i]->cq);
|
|
|
|
ionic_tx_empty(&lif->txqcqs[i]->q);
|
2020-03-28 03:14:46 +00:00
|
|
|
}
|
|
|
|
}
|
2019-09-03 22:28:17 +00:00
|
|
|
|
2020-03-28 03:14:46 +00:00
|
|
|
if (lif->rxqcqs) {
|
2020-08-27 23:00:29 +00:00
|
|
|
for (i = 0; i < lif->nxqs && lif->rxqcqs[i]; i++) {
|
2020-08-27 23:00:23 +00:00
|
|
|
ionic_lif_qcq_deinit(lif, lif->rxqcqs[i]);
|
|
|
|
ionic_rx_empty(&lif->rxqcqs[i]->q);
|
2020-03-28 03:14:46 +00:00
|
|
|
}
|
2019-09-03 22:28:17 +00:00
|
|
|
}
|
2020-03-28 03:14:47 +00:00
|
|
|
lif->rx_mode = 0;
|
2021-04-01 17:56:07 +00:00
|
|
|
|
|
|
|
if (lif->hwstamp_txq) {
|
|
|
|
ionic_lif_qcq_deinit(lif, lif->hwstamp_txq);
|
|
|
|
ionic_tx_flush(&lif->hwstamp_txq->cq);
|
|
|
|
ionic_tx_empty(&lif->hwstamp_txq->q);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (lif->hwstamp_rxq) {
|
|
|
|
ionic_lif_qcq_deinit(lif, lif->hwstamp_rxq);
|
|
|
|
ionic_rx_empty(&lif->hwstamp_rxq->q);
|
|
|
|
}
|
2019-09-03 22:28:17 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static void ionic_txrx_free(struct ionic_lif *lif)
|
|
|
|
{
|
|
|
|
unsigned int i;
|
|
|
|
|
2020-03-28 03:14:46 +00:00
|
|
|
if (lif->txqcqs) {
|
2020-08-27 23:00:29 +00:00
|
|
|
for (i = 0; i < lif->ionic->ntxqs_per_lif && lif->txqcqs[i]; i++) {
|
2020-08-27 23:00:23 +00:00
|
|
|
ionic_qcq_free(lif, lif->txqcqs[i]);
|
2020-08-27 23:00:29 +00:00
|
|
|
devm_kfree(lif->ionic->dev, lif->txqcqs[i]);
|
2020-08-27 23:00:23 +00:00
|
|
|
lif->txqcqs[i] = NULL;
|
2020-03-28 03:14:46 +00:00
|
|
|
}
|
|
|
|
}
|
2019-09-03 22:28:17 +00:00
|
|
|
|
2020-03-28 03:14:46 +00:00
|
|
|
if (lif->rxqcqs) {
|
2020-08-27 23:00:29 +00:00
|
|
|
for (i = 0; i < lif->ionic->nrxqs_per_lif && lif->rxqcqs[i]; i++) {
|
2020-08-27 23:00:23 +00:00
|
|
|
ionic_qcq_free(lif, lif->rxqcqs[i]);
|
2020-08-27 23:00:29 +00:00
|
|
|
devm_kfree(lif->ionic->dev, lif->rxqcqs[i]);
|
2020-08-27 23:00:23 +00:00
|
|
|
lif->rxqcqs[i] = NULL;
|
2020-03-28 03:14:46 +00:00
|
|
|
}
|
2019-09-03 22:28:17 +00:00
|
|
|
}
|
2021-04-01 17:56:06 +00:00
|
|
|
|
|
|
|
if (lif->hwstamp_txq) {
|
|
|
|
ionic_qcq_free(lif, lif->hwstamp_txq);
|
|
|
|
devm_kfree(lif->ionic->dev, lif->hwstamp_txq);
|
|
|
|
lif->hwstamp_txq = NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (lif->hwstamp_rxq) {
|
|
|
|
ionic_qcq_free(lif, lif->hwstamp_rxq);
|
|
|
|
devm_kfree(lif->ionic->dev, lif->hwstamp_rxq);
|
|
|
|
lif->hwstamp_rxq = NULL;
|
|
|
|
}
|
2019-09-03 22:28:17 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static int ionic_txrx_alloc(struct ionic_lif *lif)
|
|
|
|
{
|
2021-04-07 23:19:54 +00:00
|
|
|
unsigned int comp_sz, desc_sz, num_desc, sg_desc_sz;
|
|
|
|
unsigned int flags, i;
|
2019-09-03 22:28:17 +00:00
|
|
|
int err = 0;
|
|
|
|
|
2021-04-01 17:56:00 +00:00
|
|
|
num_desc = lif->ntxq_descs;
|
|
|
|
desc_sz = sizeof(struct ionic_txq_desc);
|
|
|
|
comp_sz = sizeof(struct ionic_txq_comp);
|
|
|
|
|
2020-05-12 00:59:27 +00:00
|
|
|
if (lif->qtype_info[IONIC_QTYPE_TXQ].version >= 1 &&
|
|
|
|
lif->qtype_info[IONIC_QTYPE_TXQ].sg_desc_sz ==
|
|
|
|
sizeof(struct ionic_txq_sg_desc_v1))
|
|
|
|
sg_desc_sz = sizeof(struct ionic_txq_sg_desc_v1);
|
|
|
|
else
|
|
|
|
sg_desc_sz = sizeof(struct ionic_txq_sg_desc);
|
|
|
|
|
2019-09-03 22:28:17 +00:00
|
|
|
flags = IONIC_QCQ_F_TX_STATS | IONIC_QCQ_F_SG;
|
2020-07-31 20:15:36 +00:00
|
|
|
if (test_bit(IONIC_LIF_F_SPLIT_INTR, lif->state))
|
|
|
|
flags |= IONIC_QCQ_F_INTR;
|
2019-09-03 22:28:17 +00:00
|
|
|
for (i = 0; i < lif->nxqs; i++) {
|
|
|
|
err = ionic_qcq_alloc(lif, IONIC_QTYPE_TXQ, i, "tx", flags,
|
2021-04-01 17:56:00 +00:00
|
|
|
num_desc, desc_sz, comp_sz, sg_desc_sz,
|
2020-08-27 23:00:23 +00:00
|
|
|
lif->kern_pid, &lif->txqcqs[i]);
|
2019-09-03 22:28:17 +00:00
|
|
|
if (err)
|
|
|
|
goto err_out;
|
|
|
|
|
2020-09-15 23:59:03 +00:00
|
|
|
if (flags & IONIC_QCQ_F_INTR) {
|
2020-07-31 20:15:36 +00:00
|
|
|
ionic_intr_coal_init(lif->ionic->idev.intr_ctrl,
|
2020-08-27 23:00:23 +00:00
|
|
|
lif->txqcqs[i]->intr.index,
|
2020-07-31 20:15:36 +00:00
|
|
|
lif->tx_coalesce_hw);
|
2020-09-15 23:59:03 +00:00
|
|
|
if (test_bit(IONIC_LIF_F_TX_DIM_INTR, lif->state))
|
|
|
|
lif->txqcqs[i]->intr.dim_coal_hw = lif->tx_coalesce_hw;
|
|
|
|
}
|
2020-07-31 20:15:36 +00:00
|
|
|
|
2020-08-27 23:00:23 +00:00
|
|
|
ionic_debugfs_add_qcq(lif, lif->txqcqs[i]);
|
2019-09-03 22:28:17 +00:00
|
|
|
}
|
|
|
|
|
2019-10-24 00:48:59 +00:00
|
|
|
flags = IONIC_QCQ_F_RX_STATS | IONIC_QCQ_F_SG | IONIC_QCQ_F_INTR;
|
2021-04-01 17:56:00 +00:00
|
|
|
|
|
|
|
num_desc = lif->nrxq_descs;
|
|
|
|
desc_sz = sizeof(struct ionic_rxq_desc);
|
|
|
|
comp_sz = sizeof(struct ionic_rxq_comp);
|
|
|
|
sg_desc_sz = sizeof(struct ionic_rxq_sg_desc);
|
|
|
|
|
|
|
|
if (lif->rxq_features & IONIC_Q_F_2X_CQ_DESC)
|
|
|
|
comp_sz *= 2;
|
|
|
|
|
2019-09-03 22:28:17 +00:00
|
|
|
for (i = 0; i < lif->nxqs; i++) {
|
|
|
|
err = ionic_qcq_alloc(lif, IONIC_QTYPE_RXQ, i, "rx", flags,
|
2021-04-01 17:56:00 +00:00
|
|
|
num_desc, desc_sz, comp_sz, sg_desc_sz,
|
2020-08-27 23:00:23 +00:00
|
|
|
lif->kern_pid, &lif->rxqcqs[i]);
|
2019-09-03 22:28:17 +00:00
|
|
|
if (err)
|
|
|
|
goto err_out;
|
|
|
|
|
2021-04-01 17:56:00 +00:00
|
|
|
lif->rxqcqs[i]->q.features = lif->rxq_features;
|
|
|
|
|
2019-09-03 22:28:21 +00:00
|
|
|
ionic_intr_coal_init(lif->ionic->idev.intr_ctrl,
|
2020-08-27 23:00:23 +00:00
|
|
|
lif->rxqcqs[i]->intr.index,
|
2019-10-01 03:03:24 +00:00
|
|
|
lif->rx_coalesce_hw);
|
2020-09-15 23:59:03 +00:00
|
|
|
if (test_bit(IONIC_LIF_F_RX_DIM_INTR, lif->state))
|
|
|
|
lif->rxqcqs[i]->intr.dim_coal_hw = lif->rx_coalesce_hw;
|
2020-07-31 20:15:36 +00:00
|
|
|
|
|
|
|
if (!test_bit(IONIC_LIF_F_SPLIT_INTR, lif->state))
|
2020-08-27 23:00:23 +00:00
|
|
|
ionic_link_qcq_interrupts(lif->rxqcqs[i],
|
|
|
|
lif->txqcqs[i]);
|
2020-07-31 20:15:36 +00:00
|
|
|
|
2020-08-27 23:00:23 +00:00
|
|
|
ionic_debugfs_add_qcq(lif, lif->rxqcqs[i]);
|
2019-09-03 22:28:17 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
err_out:
|
|
|
|
ionic_txrx_free(lif);
|
|
|
|
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int ionic_txrx_init(struct ionic_lif *lif)
|
|
|
|
{
|
|
|
|
unsigned int i;
|
|
|
|
int err;
|
|
|
|
|
|
|
|
for (i = 0; i < lif->nxqs; i++) {
|
2020-08-27 23:00:23 +00:00
|
|
|
err = ionic_lif_txq_init(lif, lif->txqcqs[i]);
|
2019-09-03 22:28:17 +00:00
|
|
|
if (err)
|
|
|
|
goto err_out;
|
|
|
|
|
2020-08-27 23:00:23 +00:00
|
|
|
err = ionic_lif_rxq_init(lif, lif->rxqcqs[i]);
|
2019-09-03 22:28:17 +00:00
|
|
|
if (err) {
|
2020-08-27 23:00:23 +00:00
|
|
|
ionic_lif_qcq_deinit(lif, lif->txqcqs[i]);
|
2019-09-03 22:28:17 +00:00
|
|
|
goto err_out;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-09-03 22:28:20 +00:00
|
|
|
if (lif->netdev->features & NETIF_F_RXHASH)
|
|
|
|
ionic_lif_rss_init(lif);
|
|
|
|
|
2021-08-26 01:24:47 +00:00
|
|
|
ionic_lif_rx_mode(lif);
|
2019-09-03 22:28:17 +00:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
err_out:
|
|
|
|
while (i--) {
|
2020-08-27 23:00:23 +00:00
|
|
|
ionic_lif_qcq_deinit(lif, lif->txqcqs[i]);
|
|
|
|
ionic_lif_qcq_deinit(lif, lif->rxqcqs[i]);
|
2019-09-03 22:28:17 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int ionic_txrx_enable(struct ionic_lif *lif)
|
|
|
|
{
|
2020-10-01 16:22:43 +00:00
|
|
|
int derr = 0;
|
2019-09-03 22:28:17 +00:00
|
|
|
int i, err;
|
|
|
|
|
|
|
|
for (i = 0; i < lif->nxqs; i++) {
|
2020-10-01 16:22:42 +00:00
|
|
|
if (!(lif->rxqcqs[i] && lif->txqcqs[i])) {
|
|
|
|
dev_err(lif->ionic->dev, "%s: bad qcq %d\n", __func__, i);
|
|
|
|
err = -ENXIO;
|
|
|
|
goto err_out;
|
|
|
|
}
|
|
|
|
|
2020-08-27 23:00:23 +00:00
|
|
|
ionic_rx_fill(&lif->rxqcqs[i]->q);
|
|
|
|
err = ionic_qcq_enable(lif->rxqcqs[i]);
|
2019-09-03 22:28:17 +00:00
|
|
|
if (err)
|
|
|
|
goto err_out;
|
|
|
|
|
2020-08-27 23:00:23 +00:00
|
|
|
err = ionic_qcq_enable(lif->txqcqs[i]);
|
2019-09-03 22:28:17 +00:00
|
|
|
if (err) {
|
2021-10-01 18:05:57 +00:00
|
|
|
derr = ionic_qcq_disable(lif, lif->rxqcqs[i], err);
|
2019-09-03 22:28:17 +00:00
|
|
|
goto err_out;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-04-01 17:56:07 +00:00
|
|
|
if (lif->hwstamp_rxq) {
|
|
|
|
ionic_rx_fill(&lif->hwstamp_rxq->q);
|
|
|
|
err = ionic_qcq_enable(lif->hwstamp_rxq);
|
|
|
|
if (err)
|
|
|
|
goto err_out_hwstamp_rx;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (lif->hwstamp_txq) {
|
|
|
|
err = ionic_qcq_enable(lif->hwstamp_txq);
|
|
|
|
if (err)
|
|
|
|
goto err_out_hwstamp_tx;
|
|
|
|
}
|
|
|
|
|
2019-09-03 22:28:17 +00:00
|
|
|
return 0;
|
|
|
|
|
2021-04-01 17:56:07 +00:00
|
|
|
err_out_hwstamp_tx:
|
|
|
|
if (lif->hwstamp_rxq)
|
2021-10-01 18:05:57 +00:00
|
|
|
derr = ionic_qcq_disable(lif, lif->hwstamp_rxq, derr);
|
2021-04-01 17:56:07 +00:00
|
|
|
err_out_hwstamp_rx:
|
|
|
|
i = lif->nxqs;
|
2019-09-03 22:28:17 +00:00
|
|
|
err_out:
|
|
|
|
while (i--) {
|
2021-10-01 18:05:57 +00:00
|
|
|
derr = ionic_qcq_disable(lif, lif->txqcqs[i], derr);
|
|
|
|
derr = ionic_qcq_disable(lif, lif->rxqcqs[i], derr);
|
2019-09-03 22:28:17 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2020-03-28 03:14:47 +00:00
|
|
|
static int ionic_start_queues(struct ionic_lif *lif)
|
|
|
|
{
|
|
|
|
int err;
|
|
|
|
|
2021-03-19 00:48:09 +00:00
|
|
|
if (test_bit(IONIC_LIF_F_BROKEN, lif->state))
|
|
|
|
return -EIO;
|
|
|
|
|
2021-03-19 00:48:08 +00:00
|
|
|
if (test_bit(IONIC_LIF_F_FW_RESET, lif->state))
|
|
|
|
return -EBUSY;
|
|
|
|
|
2020-03-28 03:14:47 +00:00
|
|
|
if (test_and_set_bit(IONIC_LIF_F_UP, lif->state))
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
err = ionic_txrx_enable(lif);
|
|
|
|
if (err) {
|
|
|
|
clear_bit(IONIC_LIF_F_UP, lif->state);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
netif_tx_wake_all_queues(lif->netdev);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2020-08-27 23:00:24 +00:00
|
|
|
static int ionic_open(struct net_device *netdev)
|
2019-09-03 22:28:12 +00:00
|
|
|
{
|
|
|
|
struct ionic_lif *lif = netdev_priv(netdev);
|
2019-09-03 22:28:17 +00:00
|
|
|
int err;
|
2019-09-03 22:28:12 +00:00
|
|
|
|
2021-03-19 00:48:09 +00:00
|
|
|
/* If recovering from a broken state, clear the bit and we'll try again */
|
|
|
|
if (test_and_clear_bit(IONIC_LIF_F_BROKEN, lif->state))
|
|
|
|
netdev_info(netdev, "clearing broken state\n");
|
|
|
|
|
2021-08-27 18:55:10 +00:00
|
|
|
mutex_lock(&lif->queue_lock);
|
|
|
|
|
2019-09-03 22:28:17 +00:00
|
|
|
err = ionic_txrx_alloc(lif);
|
|
|
|
if (err)
|
2021-08-27 18:55:10 +00:00
|
|
|
goto err_unlock;
|
2019-09-03 22:28:17 +00:00
|
|
|
|
|
|
|
err = ionic_txrx_init(lif);
|
|
|
|
if (err)
|
2021-03-19 00:48:04 +00:00
|
|
|
goto err_txrx_free;
|
2019-09-03 22:28:12 +00:00
|
|
|
|
2020-06-26 05:58:37 +00:00
|
|
|
err = netif_set_real_num_tx_queues(netdev, lif->nxqs);
|
|
|
|
if (err)
|
|
|
|
goto err_txrx_deinit;
|
|
|
|
|
|
|
|
err = netif_set_real_num_rx_queues(netdev, lif->nxqs);
|
|
|
|
if (err)
|
|
|
|
goto err_txrx_deinit;
|
|
|
|
|
2020-03-28 03:14:47 +00:00
|
|
|
/* don't start the queues until we have link */
|
|
|
|
if (netif_carrier_ok(netdev)) {
|
|
|
|
err = ionic_start_queues(lif);
|
|
|
|
if (err)
|
|
|
|
goto err_txrx_deinit;
|
|
|
|
}
|
2019-09-03 22:28:15 +00:00
|
|
|
|
2021-08-27 18:55:12 +00:00
|
|
|
/* If hardware timestamping is enabled, but the queues were freed by
|
|
|
|
* ionic_stop, those need to be reallocated and initialized, too.
|
|
|
|
*/
|
|
|
|
ionic_lif_hwstamp_recreate_queues(lif);
|
|
|
|
|
2021-08-27 18:55:10 +00:00
|
|
|
mutex_unlock(&lif->queue_lock);
|
2021-08-27 18:55:12 +00:00
|
|
|
|
2019-09-03 22:28:12 +00:00
|
|
|
return 0;
|
2019-09-03 22:28:17 +00:00
|
|
|
|
|
|
|
err_txrx_deinit:
|
|
|
|
ionic_txrx_deinit(lif);
|
2021-03-19 00:48:04 +00:00
|
|
|
err_txrx_free:
|
2019-09-03 22:28:17 +00:00
|
|
|
ionic_txrx_free(lif);
|
2021-08-27 18:55:10 +00:00
|
|
|
err_unlock:
|
|
|
|
mutex_unlock(&lif->queue_lock);
|
2019-09-03 22:28:17 +00:00
|
|
|
return err;
|
2019-09-03 22:28:12 +00:00
|
|
|
}
|
|
|
|
|
2020-03-28 03:14:47 +00:00
|
|
|
static void ionic_stop_queues(struct ionic_lif *lif)
|
2019-09-03 22:28:12 +00:00
|
|
|
{
|
2020-03-28 03:14:47 +00:00
|
|
|
if (!test_and_clear_bit(IONIC_LIF_F_UP, lif->state))
|
|
|
|
return;
|
2019-09-03 22:28:12 +00:00
|
|
|
|
2020-03-28 03:14:47 +00:00
|
|
|
netif_tx_disable(lif->netdev);
|
2020-06-18 17:29:04 +00:00
|
|
|
ionic_txrx_disable(lif);
|
2020-03-28 03:14:47 +00:00
|
|
|
}
|
2019-09-03 22:28:12 +00:00
|
|
|
|
2020-08-27 23:00:24 +00:00
|
|
|
static int ionic_stop(struct net_device *netdev)
|
2020-03-28 03:14:47 +00:00
|
|
|
{
|
|
|
|
struct ionic_lif *lif = netdev_priv(netdev);
|
2019-09-03 22:28:17 +00:00
|
|
|
|
2020-06-18 17:29:04 +00:00
|
|
|
if (test_bit(IONIC_LIF_F_FW_RESET, lif->state))
|
2020-03-28 03:14:48 +00:00
|
|
|
return 0;
|
|
|
|
|
2021-08-27 18:55:10 +00:00
|
|
|
mutex_lock(&lif->queue_lock);
|
2020-03-28 03:14:47 +00:00
|
|
|
ionic_stop_queues(lif);
|
2019-09-03 22:28:17 +00:00
|
|
|
ionic_txrx_deinit(lif);
|
|
|
|
ionic_txrx_free(lif);
|
2021-08-27 18:55:10 +00:00
|
|
|
mutex_unlock(&lif->queue_lock);
|
2019-09-03 22:28:12 +00:00
|
|
|
|
2020-03-28 03:14:47 +00:00
|
|
|
return 0;
|
2019-09-03 22:28:12 +00:00
|
|
|
}
|
|
|
|
|
2021-07-27 13:45:13 +00:00
|
|
|
static int ionic_eth_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
|
2021-04-01 17:56:10 +00:00
|
|
|
{
|
|
|
|
struct ionic_lif *lif = netdev_priv(netdev);
|
|
|
|
|
|
|
|
switch (cmd) {
|
|
|
|
case SIOCSHWTSTAMP:
|
|
|
|
return ionic_lif_hwstamp_set(lif, ifr);
|
|
|
|
case SIOCGHWTSTAMP:
|
|
|
|
return ionic_lif_hwstamp_get(lif, ifr);
|
|
|
|
default:
|
|
|
|
return -EOPNOTSUPP;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-01-24 18:53:06 +00:00
|
|
|
static int ionic_update_cached_vf_config(struct ionic *ionic, int vf)
|
|
|
|
{
|
|
|
|
struct ionic_vf_getattr_comp comp = { 0 };
|
|
|
|
int err;
|
|
|
|
u8 attr;
|
|
|
|
|
|
|
|
attr = IONIC_VF_ATTR_VLAN;
|
|
|
|
err = ionic_dev_cmd_vf_getattr(ionic, vf, attr, &comp);
|
|
|
|
if (err && comp.status != IONIC_RC_ENOSUPP)
|
|
|
|
goto err_out;
|
|
|
|
if (!err)
|
|
|
|
ionic->vfs[vf].vlanid = comp.vlanid;
|
|
|
|
|
|
|
|
attr = IONIC_VF_ATTR_SPOOFCHK;
|
|
|
|
err = ionic_dev_cmd_vf_getattr(ionic, vf, attr, &comp);
|
|
|
|
if (err && comp.status != IONIC_RC_ENOSUPP)
|
|
|
|
goto err_out;
|
|
|
|
if (!err)
|
|
|
|
ionic->vfs[vf].spoofchk = comp.spoofchk;
|
|
|
|
|
|
|
|
attr = IONIC_VF_ATTR_LINKSTATE;
|
|
|
|
err = ionic_dev_cmd_vf_getattr(ionic, vf, attr, &comp);
|
|
|
|
if (err && comp.status != IONIC_RC_ENOSUPP)
|
|
|
|
goto err_out;
|
|
|
|
if (!err) {
|
|
|
|
switch (comp.linkstate) {
|
|
|
|
case IONIC_VF_LINK_STATUS_UP:
|
|
|
|
ionic->vfs[vf].linkstate = IFLA_VF_LINK_STATE_ENABLE;
|
|
|
|
break;
|
|
|
|
case IONIC_VF_LINK_STATUS_DOWN:
|
|
|
|
ionic->vfs[vf].linkstate = IFLA_VF_LINK_STATE_DISABLE;
|
|
|
|
break;
|
|
|
|
case IONIC_VF_LINK_STATUS_AUTO:
|
|
|
|
ionic->vfs[vf].linkstate = IFLA_VF_LINK_STATE_AUTO;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
dev_warn(ionic->dev, "Unexpected link state %u\n", comp.linkstate);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
attr = IONIC_VF_ATTR_RATE;
|
|
|
|
err = ionic_dev_cmd_vf_getattr(ionic, vf, attr, &comp);
|
|
|
|
if (err && comp.status != IONIC_RC_ENOSUPP)
|
|
|
|
goto err_out;
|
|
|
|
if (!err)
|
|
|
|
ionic->vfs[vf].maxrate = comp.maxrate;
|
|
|
|
|
|
|
|
attr = IONIC_VF_ATTR_TRUST;
|
|
|
|
err = ionic_dev_cmd_vf_getattr(ionic, vf, attr, &comp);
|
|
|
|
if (err && comp.status != IONIC_RC_ENOSUPP)
|
|
|
|
goto err_out;
|
|
|
|
if (!err)
|
|
|
|
ionic->vfs[vf].trusted = comp.trust;
|
|
|
|
|
|
|
|
attr = IONIC_VF_ATTR_MAC;
|
|
|
|
err = ionic_dev_cmd_vf_getattr(ionic, vf, attr, &comp);
|
|
|
|
if (err && comp.status != IONIC_RC_ENOSUPP)
|
|
|
|
goto err_out;
|
|
|
|
if (!err)
|
|
|
|
ether_addr_copy(ionic->vfs[vf].macaddr, comp.macaddr);
|
|
|
|
|
|
|
|
err_out:
|
|
|
|
if (err)
|
|
|
|
dev_err(ionic->dev, "Failed to get %s for VF %d\n",
|
|
|
|
ionic_vf_attr_to_str(attr), vf);
|
|
|
|
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2020-01-03 17:55:08 +00:00
|
|
|
static int ionic_get_vf_config(struct net_device *netdev,
|
|
|
|
int vf, struct ifla_vf_info *ivf)
|
|
|
|
{
|
|
|
|
struct ionic_lif *lif = netdev_priv(netdev);
|
|
|
|
struct ionic *ionic = lif->ionic;
|
|
|
|
int ret = 0;
|
|
|
|
|
2020-05-12 00:59:29 +00:00
|
|
|
if (!netif_device_present(netdev))
|
|
|
|
return -EBUSY;
|
|
|
|
|
2020-01-03 17:55:08 +00:00
|
|
|
down_read(&ionic->vf_op_lock);
|
|
|
|
|
|
|
|
if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) {
|
|
|
|
ret = -EINVAL;
|
|
|
|
} else {
|
2022-01-24 18:53:06 +00:00
|
|
|
ivf->vf = vf;
|
|
|
|
ivf->qos = 0;
|
|
|
|
|
|
|
|
ret = ionic_update_cached_vf_config(ionic, vf);
|
|
|
|
if (!ret) {
|
|
|
|
ivf->vlan = le16_to_cpu(ionic->vfs[vf].vlanid);
|
|
|
|
ivf->spoofchk = ionic->vfs[vf].spoofchk;
|
|
|
|
ivf->linkstate = ionic->vfs[vf].linkstate;
|
|
|
|
ivf->max_tx_rate = le32_to_cpu(ionic->vfs[vf].maxrate);
|
|
|
|
ivf->trusted = ionic->vfs[vf].trusted;
|
|
|
|
ether_addr_copy(ivf->mac, ionic->vfs[vf].macaddr);
|
|
|
|
}
|
2020-01-03 17:55:08 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
up_read(&ionic->vf_op_lock);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int ionic_get_vf_stats(struct net_device *netdev, int vf,
|
|
|
|
struct ifla_vf_stats *vf_stats)
|
|
|
|
{
|
|
|
|
struct ionic_lif *lif = netdev_priv(netdev);
|
|
|
|
struct ionic *ionic = lif->ionic;
|
|
|
|
struct ionic_lif_stats *vs;
|
|
|
|
int ret = 0;
|
|
|
|
|
2020-05-12 00:59:29 +00:00
|
|
|
if (!netif_device_present(netdev))
|
|
|
|
return -EBUSY;
|
|
|
|
|
2020-01-03 17:55:08 +00:00
|
|
|
down_read(&ionic->vf_op_lock);
|
|
|
|
|
|
|
|
if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) {
|
|
|
|
ret = -EINVAL;
|
|
|
|
} else {
|
|
|
|
memset(vf_stats, 0, sizeof(*vf_stats));
|
|
|
|
vs = &ionic->vfs[vf].stats;
|
|
|
|
|
|
|
|
vf_stats->rx_packets = le64_to_cpu(vs->rx_ucast_packets);
|
|
|
|
vf_stats->tx_packets = le64_to_cpu(vs->tx_ucast_packets);
|
|
|
|
vf_stats->rx_bytes = le64_to_cpu(vs->rx_ucast_bytes);
|
|
|
|
vf_stats->tx_bytes = le64_to_cpu(vs->tx_ucast_bytes);
|
|
|
|
vf_stats->broadcast = le64_to_cpu(vs->rx_bcast_packets);
|
|
|
|
vf_stats->multicast = le64_to_cpu(vs->rx_mcast_packets);
|
|
|
|
vf_stats->rx_dropped = le64_to_cpu(vs->rx_ucast_drop_packets) +
|
|
|
|
le64_to_cpu(vs->rx_mcast_drop_packets) +
|
|
|
|
le64_to_cpu(vs->rx_bcast_drop_packets);
|
|
|
|
vf_stats->tx_dropped = le64_to_cpu(vs->tx_ucast_drop_packets) +
|
|
|
|
le64_to_cpu(vs->tx_mcast_drop_packets) +
|
|
|
|
le64_to_cpu(vs->tx_bcast_drop_packets);
|
|
|
|
}
|
|
|
|
|
|
|
|
up_read(&ionic->vf_op_lock);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int ionic_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
|
|
|
|
{
|
2022-01-24 18:53:12 +00:00
|
|
|
struct ionic_vf_setattr_cmd vfc = { .attr = IONIC_VF_ATTR_MAC };
|
2020-01-03 17:55:08 +00:00
|
|
|
struct ionic_lif *lif = netdev_priv(netdev);
|
|
|
|
struct ionic *ionic = lif->ionic;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
if (!(is_zero_ether_addr(mac) || is_valid_ether_addr(mac)))
|
|
|
|
return -EINVAL;
|
|
|
|
|
2020-05-12 00:59:29 +00:00
|
|
|
if (!netif_device_present(netdev))
|
|
|
|
return -EBUSY;
|
|
|
|
|
2020-03-04 17:42:10 +00:00
|
|
|
down_write(&ionic->vf_op_lock);
|
2020-01-03 17:55:08 +00:00
|
|
|
|
|
|
|
if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) {
|
|
|
|
ret = -EINVAL;
|
|
|
|
} else {
|
2022-01-24 18:53:12 +00:00
|
|
|
ether_addr_copy(vfc.macaddr, mac);
|
|
|
|
dev_dbg(ionic->dev, "%s: vf %d macaddr %pM\n",
|
|
|
|
__func__, vf, vfc.macaddr);
|
|
|
|
|
|
|
|
ret = ionic_set_vf_config(ionic, vf, &vfc);
|
2020-01-03 17:55:08 +00:00
|
|
|
if (!ret)
|
|
|
|
ether_addr_copy(ionic->vfs[vf].macaddr, mac);
|
|
|
|
}
|
|
|
|
|
2020-03-04 17:42:10 +00:00
|
|
|
up_write(&ionic->vf_op_lock);
|
2020-01-03 17:55:08 +00:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int ionic_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan,
|
|
|
|
u8 qos, __be16 proto)
|
|
|
|
{
|
2022-01-24 18:53:12 +00:00
|
|
|
struct ionic_vf_setattr_cmd vfc = { .attr = IONIC_VF_ATTR_VLAN };
|
2020-01-03 17:55:08 +00:00
|
|
|
struct ionic_lif *lif = netdev_priv(netdev);
|
|
|
|
struct ionic *ionic = lif->ionic;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
/* until someday when we support qos */
|
|
|
|
if (qos)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
if (vlan > 4095)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
if (proto != htons(ETH_P_8021Q))
|
|
|
|
return -EPROTONOSUPPORT;
|
|
|
|
|
2020-05-12 00:59:29 +00:00
|
|
|
if (!netif_device_present(netdev))
|
|
|
|
return -EBUSY;
|
|
|
|
|
2020-03-04 17:42:10 +00:00
|
|
|
down_write(&ionic->vf_op_lock);
|
2020-01-03 17:55:08 +00:00
|
|
|
|
|
|
|
if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) {
|
|
|
|
ret = -EINVAL;
|
|
|
|
} else {
|
2022-01-24 18:53:12 +00:00
|
|
|
vfc.vlanid = cpu_to_le16(vlan);
|
|
|
|
dev_dbg(ionic->dev, "%s: vf %d vlan %d\n",
|
|
|
|
__func__, vf, le16_to_cpu(vfc.vlanid));
|
|
|
|
|
|
|
|
ret = ionic_set_vf_config(ionic, vf, &vfc);
|
2020-01-03 17:55:08 +00:00
|
|
|
if (!ret)
|
2020-10-22 23:55:29 +00:00
|
|
|
ionic->vfs[vf].vlanid = cpu_to_le16(vlan);
|
2020-01-03 17:55:08 +00:00
|
|
|
}
|
|
|
|
|
2020-03-04 17:42:10 +00:00
|
|
|
up_write(&ionic->vf_op_lock);
|
2020-01-03 17:55:08 +00:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int ionic_set_vf_rate(struct net_device *netdev, int vf,
|
|
|
|
int tx_min, int tx_max)
|
|
|
|
{
|
2022-01-24 18:53:12 +00:00
|
|
|
struct ionic_vf_setattr_cmd vfc = { .attr = IONIC_VF_ATTR_RATE };
|
2020-01-03 17:55:08 +00:00
|
|
|
struct ionic_lif *lif = netdev_priv(netdev);
|
|
|
|
struct ionic *ionic = lif->ionic;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
/* setting the min just seems silly */
|
|
|
|
if (tx_min)
|
|
|
|
return -EINVAL;
|
|
|
|
|
2020-05-12 00:59:29 +00:00
|
|
|
if (!netif_device_present(netdev))
|
|
|
|
return -EBUSY;
|
|
|
|
|
2020-01-03 17:55:08 +00:00
|
|
|
down_write(&ionic->vf_op_lock);
|
|
|
|
|
|
|
|
if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) {
|
|
|
|
ret = -EINVAL;
|
|
|
|
} else {
|
2022-01-24 18:53:12 +00:00
|
|
|
vfc.maxrate = cpu_to_le32(tx_max);
|
|
|
|
dev_dbg(ionic->dev, "%s: vf %d maxrate %d\n",
|
|
|
|
__func__, vf, le32_to_cpu(vfc.maxrate));
|
|
|
|
|
|
|
|
ret = ionic_set_vf_config(ionic, vf, &vfc);
|
2020-01-03 17:55:08 +00:00
|
|
|
if (!ret)
|
2020-10-22 23:55:29 +00:00
|
|
|
lif->ionic->vfs[vf].maxrate = cpu_to_le32(tx_max);
|
2020-01-03 17:55:08 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
up_write(&ionic->vf_op_lock);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int ionic_set_vf_spoofchk(struct net_device *netdev, int vf, bool set)
|
|
|
|
{
|
2022-01-24 18:53:12 +00:00
|
|
|
struct ionic_vf_setattr_cmd vfc = { .attr = IONIC_VF_ATTR_SPOOFCHK };
|
2020-01-03 17:55:08 +00:00
|
|
|
struct ionic_lif *lif = netdev_priv(netdev);
|
|
|
|
struct ionic *ionic = lif->ionic;
|
|
|
|
int ret;
|
|
|
|
|
2020-05-12 00:59:29 +00:00
|
|
|
if (!netif_device_present(netdev))
|
|
|
|
return -EBUSY;
|
|
|
|
|
2020-01-03 17:55:08 +00:00
|
|
|
down_write(&ionic->vf_op_lock);
|
|
|
|
|
|
|
|
if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) {
|
|
|
|
ret = -EINVAL;
|
|
|
|
} else {
|
2022-01-24 18:53:12 +00:00
|
|
|
vfc.spoofchk = set;
|
|
|
|
dev_dbg(ionic->dev, "%s: vf %d spoof %d\n",
|
|
|
|
__func__, vf, vfc.spoofchk);
|
|
|
|
|
|
|
|
ret = ionic_set_vf_config(ionic, vf, &vfc);
|
2020-01-03 17:55:08 +00:00
|
|
|
if (!ret)
|
2022-01-24 18:53:12 +00:00
|
|
|
ionic->vfs[vf].spoofchk = set;
|
2020-01-03 17:55:08 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
up_write(&ionic->vf_op_lock);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int ionic_set_vf_trust(struct net_device *netdev, int vf, bool set)
|
|
|
|
{
|
2022-01-24 18:53:12 +00:00
|
|
|
struct ionic_vf_setattr_cmd vfc = { .attr = IONIC_VF_ATTR_TRUST };
|
2020-01-03 17:55:08 +00:00
|
|
|
struct ionic_lif *lif = netdev_priv(netdev);
|
|
|
|
struct ionic *ionic = lif->ionic;
|
|
|
|
int ret;
|
|
|
|
|
2020-05-12 00:59:29 +00:00
|
|
|
if (!netif_device_present(netdev))
|
|
|
|
return -EBUSY;
|
|
|
|
|
2020-01-03 17:55:08 +00:00
|
|
|
down_write(&ionic->vf_op_lock);
|
|
|
|
|
|
|
|
if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) {
|
|
|
|
ret = -EINVAL;
|
|
|
|
} else {
|
2022-01-24 18:53:12 +00:00
|
|
|
vfc.trust = set;
|
|
|
|
dev_dbg(ionic->dev, "%s: vf %d trust %d\n",
|
|
|
|
__func__, vf, vfc.trust);
|
|
|
|
|
|
|
|
ret = ionic_set_vf_config(ionic, vf, &vfc);
|
2020-01-03 17:55:08 +00:00
|
|
|
if (!ret)
|
2022-01-24 18:53:12 +00:00
|
|
|
ionic->vfs[vf].trusted = set;
|
2020-01-03 17:55:08 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
up_write(&ionic->vf_op_lock);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int ionic_set_vf_link_state(struct net_device *netdev, int vf, int set)
|
|
|
|
{
|
2022-01-24 18:53:12 +00:00
|
|
|
struct ionic_vf_setattr_cmd vfc = { .attr = IONIC_VF_ATTR_LINKSTATE };
|
2020-01-03 17:55:08 +00:00
|
|
|
struct ionic_lif *lif = netdev_priv(netdev);
|
|
|
|
struct ionic *ionic = lif->ionic;
|
2022-01-24 18:53:12 +00:00
|
|
|
u8 vfls;
|
2020-01-03 17:55:08 +00:00
|
|
|
int ret;
|
|
|
|
|
|
|
|
switch (set) {
|
|
|
|
case IFLA_VF_LINK_STATE_ENABLE:
|
2022-01-24 18:53:12 +00:00
|
|
|
vfls = IONIC_VF_LINK_STATUS_UP;
|
2020-01-03 17:55:08 +00:00
|
|
|
break;
|
|
|
|
case IFLA_VF_LINK_STATE_DISABLE:
|
2022-01-24 18:53:12 +00:00
|
|
|
vfls = IONIC_VF_LINK_STATUS_DOWN;
|
2020-01-03 17:55:08 +00:00
|
|
|
break;
|
|
|
|
case IFLA_VF_LINK_STATE_AUTO:
|
2022-01-24 18:53:12 +00:00
|
|
|
vfls = IONIC_VF_LINK_STATUS_AUTO;
|
2020-01-03 17:55:08 +00:00
|
|
|
break;
|
|
|
|
default:
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
2020-05-12 00:59:29 +00:00
|
|
|
if (!netif_device_present(netdev))
|
|
|
|
return -EBUSY;
|
|
|
|
|
2020-01-03 17:55:08 +00:00
|
|
|
down_write(&ionic->vf_op_lock);
|
|
|
|
|
|
|
|
if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) {
|
|
|
|
ret = -EINVAL;
|
|
|
|
} else {
|
2022-01-24 18:53:12 +00:00
|
|
|
vfc.linkstate = vfls;
|
|
|
|
dev_dbg(ionic->dev, "%s: vf %d linkstate %d\n",
|
|
|
|
__func__, vf, vfc.linkstate);
|
|
|
|
|
|
|
|
ret = ionic_set_vf_config(ionic, vf, &vfc);
|
2020-01-03 17:55:08 +00:00
|
|
|
if (!ret)
|
|
|
|
ionic->vfs[vf].linkstate = set;
|
|
|
|
}
|
|
|
|
|
|
|
|
up_write(&ionic->vf_op_lock);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2019-09-03 22:28:12 +00:00
|
|
|
static const struct net_device_ops ionic_netdev_ops = {
|
|
|
|
.ndo_open = ionic_open,
|
|
|
|
.ndo_stop = ionic_stop,
|
2021-07-27 13:45:13 +00:00
|
|
|
.ndo_eth_ioctl = ionic_eth_ioctl,
|
2019-09-03 22:28:17 +00:00
|
|
|
.ndo_start_xmit = ionic_start_xmit,
|
2019-09-03 22:28:15 +00:00
|
|
|
.ndo_get_stats64 = ionic_get_stats64,
|
2020-09-29 20:25:20 +00:00
|
|
|
.ndo_set_rx_mode = ionic_ndo_set_rx_mode,
|
2019-09-03 22:28:12 +00:00
|
|
|
.ndo_set_features = ionic_set_features,
|
|
|
|
.ndo_set_mac_address = ionic_set_mac_address,
|
|
|
|
.ndo_validate_addr = eth_validate_addr,
|
|
|
|
.ndo_tx_timeout = ionic_tx_timeout,
|
|
|
|
.ndo_change_mtu = ionic_change_mtu,
|
|
|
|
.ndo_vlan_rx_add_vid = ionic_vlan_rx_add_vid,
|
|
|
|
.ndo_vlan_rx_kill_vid = ionic_vlan_rx_kill_vid,
|
2020-01-03 17:55:08 +00:00
|
|
|
.ndo_set_vf_vlan = ionic_set_vf_vlan,
|
|
|
|
.ndo_set_vf_trust = ionic_set_vf_trust,
|
|
|
|
.ndo_set_vf_mac = ionic_set_vf_mac,
|
|
|
|
.ndo_set_vf_rate = ionic_set_vf_rate,
|
|
|
|
.ndo_set_vf_spoofchk = ionic_set_vf_spoofchk,
|
|
|
|
.ndo_get_vf_config = ionic_get_vf_config,
|
|
|
|
.ndo_set_vf_link_state = ionic_set_vf_link_state,
|
|
|
|
.ndo_get_vf_stats = ionic_get_vf_stats,
|
2019-09-03 22:28:12 +00:00
|
|
|
};
|
|
|
|
|
2020-08-27 23:00:28 +00:00
|
|
|
static void ionic_swap_queues(struct ionic_qcq *a, struct ionic_qcq *b)
|
|
|
|
{
|
|
|
|
/* only swapping the queues, not the napi, flags, or other stuff */
|
2021-04-01 17:55:59 +00:00
|
|
|
swap(a->q.features, b->q.features);
|
2020-08-27 23:00:28 +00:00
|
|
|
swap(a->q.num_descs, b->q.num_descs);
|
2021-04-01 17:56:00 +00:00
|
|
|
swap(a->q.desc_size, b->q.desc_size);
|
2020-08-27 23:00:28 +00:00
|
|
|
swap(a->q.base, b->q.base);
|
|
|
|
swap(a->q.base_pa, b->q.base_pa);
|
|
|
|
swap(a->q.info, b->q.info);
|
|
|
|
swap(a->q_base, b->q_base);
|
|
|
|
swap(a->q_base_pa, b->q_base_pa);
|
|
|
|
swap(a->q_size, b->q_size);
|
|
|
|
|
2021-04-01 17:56:00 +00:00
|
|
|
swap(a->q.sg_desc_size, b->q.sg_desc_size);
|
2020-08-27 23:00:28 +00:00
|
|
|
swap(a->q.sg_base, b->q.sg_base);
|
|
|
|
swap(a->q.sg_base_pa, b->q.sg_base_pa);
|
|
|
|
swap(a->sg_base, b->sg_base);
|
|
|
|
swap(a->sg_base_pa, b->sg_base_pa);
|
|
|
|
swap(a->sg_size, b->sg_size);
|
|
|
|
|
|
|
|
swap(a->cq.num_descs, b->cq.num_descs);
|
2021-04-01 17:56:00 +00:00
|
|
|
swap(a->cq.desc_size, b->cq.desc_size);
|
2020-08-27 23:00:28 +00:00
|
|
|
swap(a->cq.base, b->cq.base);
|
|
|
|
swap(a->cq.base_pa, b->cq.base_pa);
|
|
|
|
swap(a->cq.info, b->cq.info);
|
|
|
|
swap(a->cq_base, b->cq_base);
|
|
|
|
swap(a->cq_base_pa, b->cq_base_pa);
|
|
|
|
swap(a->cq_size, b->cq_size);
|
2021-03-10 19:26:30 +00:00
|
|
|
|
|
|
|
ionic_debugfs_del_qcq(a);
|
|
|
|
ionic_debugfs_add_qcq(a->q.lif, a);
|
2020-08-27 23:00:28 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
int ionic_reconfigure_queues(struct ionic_lif *lif,
|
|
|
|
struct ionic_queue_params *qparam)
|
|
|
|
{
|
2021-04-07 23:19:54 +00:00
|
|
|
unsigned int comp_sz, desc_sz, num_desc, sg_desc_sz;
|
2020-08-27 23:00:28 +00:00
|
|
|
struct ionic_qcq **tx_qcqs = NULL;
|
|
|
|
struct ionic_qcq **rx_qcqs = NULL;
|
2021-04-07 23:19:54 +00:00
|
|
|
unsigned int flags, i;
|
2021-07-27 17:43:28 +00:00
|
|
|
int err = 0;
|
2020-08-27 23:00:28 +00:00
|
|
|
|
|
|
|
/* allocate temporary qcq arrays to hold new queue structs */
|
2020-08-27 23:00:29 +00:00
|
|
|
if (qparam->nxqs != lif->nxqs || qparam->ntxq_descs != lif->ntxq_descs) {
|
|
|
|
tx_qcqs = devm_kcalloc(lif->ionic->dev, lif->ionic->ntxqs_per_lif,
|
2020-08-27 23:00:28 +00:00
|
|
|
sizeof(struct ionic_qcq *), GFP_KERNEL);
|
2021-07-27 17:43:28 +00:00
|
|
|
if (!tx_qcqs) {
|
|
|
|
err = -ENOMEM;
|
2020-08-27 23:00:28 +00:00
|
|
|
goto err_out;
|
2021-07-27 17:43:28 +00:00
|
|
|
}
|
2020-08-27 23:00:28 +00:00
|
|
|
}
|
2021-04-01 17:56:00 +00:00
|
|
|
if (qparam->nxqs != lif->nxqs ||
|
|
|
|
qparam->nrxq_descs != lif->nrxq_descs ||
|
|
|
|
qparam->rxq_features != lif->rxq_features) {
|
2020-08-27 23:00:29 +00:00
|
|
|
rx_qcqs = devm_kcalloc(lif->ionic->dev, lif->ionic->nrxqs_per_lif,
|
2020-08-27 23:00:28 +00:00
|
|
|
sizeof(struct ionic_qcq *), GFP_KERNEL);
|
2021-07-27 17:43:28 +00:00
|
|
|
if (!rx_qcqs) {
|
|
|
|
err = -ENOMEM;
|
2020-08-27 23:00:28 +00:00
|
|
|
goto err_out;
|
2021-07-27 17:43:28 +00:00
|
|
|
}
|
2020-08-27 23:00:28 +00:00
|
|
|
}
|
|
|
|
|
2020-08-27 23:00:29 +00:00
|
|
|
/* allocate new desc_info and rings, but leave the interrupt setup
|
|
|
|
* until later so as to not mess with the still-running queues
|
|
|
|
*/
|
2020-08-27 23:00:28 +00:00
|
|
|
if (tx_qcqs) {
|
2021-04-01 17:56:00 +00:00
|
|
|
num_desc = qparam->ntxq_descs;
|
|
|
|
desc_sz = sizeof(struct ionic_txq_desc);
|
|
|
|
comp_sz = sizeof(struct ionic_txq_comp);
|
|
|
|
|
|
|
|
if (lif->qtype_info[IONIC_QTYPE_TXQ].version >= 1 &&
|
|
|
|
lif->qtype_info[IONIC_QTYPE_TXQ].sg_desc_sz ==
|
|
|
|
sizeof(struct ionic_txq_sg_desc_v1))
|
|
|
|
sg_desc_sz = sizeof(struct ionic_txq_sg_desc_v1);
|
|
|
|
else
|
|
|
|
sg_desc_sz = sizeof(struct ionic_txq_sg_desc);
|
|
|
|
|
2020-08-27 23:00:29 +00:00
|
|
|
for (i = 0; i < qparam->nxqs; i++) {
|
2020-08-27 23:00:28 +00:00
|
|
|
flags = lif->txqcqs[i]->flags & ~IONIC_QCQ_F_INTR;
|
|
|
|
err = ionic_qcq_alloc(lif, IONIC_QTYPE_TXQ, i, "tx", flags,
|
2021-04-01 17:56:00 +00:00
|
|
|
num_desc, desc_sz, comp_sz, sg_desc_sz,
|
2020-08-27 23:00:28 +00:00
|
|
|
lif->kern_pid, &tx_qcqs[i]);
|
|
|
|
if (err)
|
|
|
|
goto err_out;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (rx_qcqs) {
|
2021-04-01 17:56:00 +00:00
|
|
|
num_desc = qparam->nrxq_descs;
|
|
|
|
desc_sz = sizeof(struct ionic_rxq_desc);
|
|
|
|
comp_sz = sizeof(struct ionic_rxq_comp);
|
|
|
|
sg_desc_sz = sizeof(struct ionic_rxq_sg_desc);
|
|
|
|
|
|
|
|
if (qparam->rxq_features & IONIC_Q_F_2X_CQ_DESC)
|
|
|
|
comp_sz *= 2;
|
|
|
|
|
2020-08-27 23:00:29 +00:00
|
|
|
for (i = 0; i < qparam->nxqs; i++) {
|
2020-08-27 23:00:28 +00:00
|
|
|
flags = lif->rxqcqs[i]->flags & ~IONIC_QCQ_F_INTR;
|
|
|
|
err = ionic_qcq_alloc(lif, IONIC_QTYPE_RXQ, i, "rx", flags,
|
2021-04-01 17:56:00 +00:00
|
|
|
num_desc, desc_sz, comp_sz, sg_desc_sz,
|
2020-08-27 23:00:28 +00:00
|
|
|
lif->kern_pid, &rx_qcqs[i]);
|
|
|
|
if (err)
|
|
|
|
goto err_out;
|
2021-04-01 17:56:00 +00:00
|
|
|
|
|
|
|
rx_qcqs[i]->q.features = qparam->rxq_features;
|
2020-08-27 23:00:28 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* stop and clean the queues */
|
|
|
|
ionic_stop_queues_reconfig(lif);
|
|
|
|
|
2020-08-27 23:00:29 +00:00
|
|
|
if (qparam->nxqs != lif->nxqs) {
|
|
|
|
err = netif_set_real_num_tx_queues(lif->netdev, qparam->nxqs);
|
|
|
|
if (err)
|
|
|
|
goto err_out_reinit_unlock;
|
|
|
|
err = netif_set_real_num_rx_queues(lif->netdev, qparam->nxqs);
|
|
|
|
if (err) {
|
|
|
|
netif_set_real_num_tx_queues(lif->netdev, lif->nxqs);
|
|
|
|
goto err_out_reinit_unlock;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-08-27 23:00:28 +00:00
|
|
|
/* swap new desc_info and rings, keeping existing interrupt config */
|
|
|
|
if (tx_qcqs) {
|
|
|
|
lif->ntxq_descs = qparam->ntxq_descs;
|
2020-08-27 23:00:29 +00:00
|
|
|
for (i = 0; i < qparam->nxqs; i++)
|
2020-08-27 23:00:28 +00:00
|
|
|
ionic_swap_queues(lif->txqcqs[i], tx_qcqs[i]);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (rx_qcqs) {
|
|
|
|
lif->nrxq_descs = qparam->nrxq_descs;
|
2020-08-27 23:00:29 +00:00
|
|
|
for (i = 0; i < qparam->nxqs; i++)
|
2020-08-27 23:00:28 +00:00
|
|
|
ionic_swap_queues(lif->rxqcqs[i], rx_qcqs[i]);
|
|
|
|
}
|
|
|
|
|
2020-08-27 23:00:29 +00:00
|
|
|
/* if we need to change the interrupt layout, this is the time */
|
|
|
|
if (qparam->intr_split != test_bit(IONIC_LIF_F_SPLIT_INTR, lif->state) ||
|
|
|
|
qparam->nxqs != lif->nxqs) {
|
|
|
|
if (qparam->intr_split) {
|
|
|
|
set_bit(IONIC_LIF_F_SPLIT_INTR, lif->state);
|
|
|
|
} else {
|
|
|
|
clear_bit(IONIC_LIF_F_SPLIT_INTR, lif->state);
|
|
|
|
lif->tx_coalesce_usecs = lif->rx_coalesce_usecs;
|
|
|
|
lif->tx_coalesce_hw = lif->rx_coalesce_hw;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* clear existing interrupt assignments */
|
|
|
|
for (i = 0; i < lif->ionic->ntxqs_per_lif; i++) {
|
|
|
|
ionic_qcq_intr_free(lif, lif->txqcqs[i]);
|
|
|
|
ionic_qcq_intr_free(lif, lif->rxqcqs[i]);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* re-assign the interrupts */
|
|
|
|
for (i = 0; i < qparam->nxqs; i++) {
|
|
|
|
lif->rxqcqs[i]->flags |= IONIC_QCQ_F_INTR;
|
|
|
|
err = ionic_alloc_qcq_interrupt(lif, lif->rxqcqs[i]);
|
|
|
|
ionic_intr_coal_init(lif->ionic->idev.intr_ctrl,
|
|
|
|
lif->rxqcqs[i]->intr.index,
|
|
|
|
lif->rx_coalesce_hw);
|
|
|
|
|
|
|
|
if (qparam->intr_split) {
|
|
|
|
lif->txqcqs[i]->flags |= IONIC_QCQ_F_INTR;
|
|
|
|
err = ionic_alloc_qcq_interrupt(lif, lif->txqcqs[i]);
|
|
|
|
ionic_intr_coal_init(lif->ionic->idev.intr_ctrl,
|
|
|
|
lif->txqcqs[i]->intr.index,
|
|
|
|
lif->tx_coalesce_hw);
|
2020-09-15 23:59:03 +00:00
|
|
|
if (test_bit(IONIC_LIF_F_TX_DIM_INTR, lif->state))
|
|
|
|
lif->txqcqs[i]->intr.dim_coal_hw = lif->tx_coalesce_hw;
|
2020-08-27 23:00:29 +00:00
|
|
|
} else {
|
|
|
|
lif->txqcqs[i]->flags &= ~IONIC_QCQ_F_INTR;
|
|
|
|
ionic_link_qcq_interrupts(lif->rxqcqs[i], lif->txqcqs[i]);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-09-13 19:16:54 +00:00
|
|
|
/* now we can rework the debugfs mappings */
|
|
|
|
if (tx_qcqs) {
|
|
|
|
for (i = 0; i < qparam->nxqs; i++) {
|
|
|
|
ionic_debugfs_del_qcq(lif->txqcqs[i]);
|
|
|
|
ionic_debugfs_add_qcq(lif, lif->txqcqs[i]);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (rx_qcqs) {
|
|
|
|
for (i = 0; i < qparam->nxqs; i++) {
|
|
|
|
ionic_debugfs_del_qcq(lif->rxqcqs[i]);
|
|
|
|
ionic_debugfs_add_qcq(lif, lif->rxqcqs[i]);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-08-27 23:00:29 +00:00
|
|
|
swap(lif->nxqs, qparam->nxqs);
|
2021-04-01 17:56:00 +00:00
|
|
|
swap(lif->rxq_features, qparam->rxq_features);
|
2020-08-27 23:00:29 +00:00
|
|
|
|
|
|
|
err_out_reinit_unlock:
|
2021-03-19 00:48:04 +00:00
|
|
|
/* re-init the queues, but don't lose an error code */
|
2020-08-27 23:00:29 +00:00
|
|
|
if (err)
|
|
|
|
ionic_start_queues_reconfig(lif);
|
|
|
|
else
|
|
|
|
err = ionic_start_queues_reconfig(lif);
|
2020-08-27 23:00:28 +00:00
|
|
|
|
|
|
|
err_out:
|
|
|
|
/* free old allocs without cleaning intr */
|
2020-08-27 23:00:29 +00:00
|
|
|
for (i = 0; i < qparam->nxqs; i++) {
|
2020-08-27 23:00:28 +00:00
|
|
|
if (tx_qcqs && tx_qcqs[i]) {
|
|
|
|
tx_qcqs[i]->flags &= ~IONIC_QCQ_F_INTR;
|
|
|
|
ionic_qcq_free(lif, tx_qcqs[i]);
|
2020-08-27 23:00:29 +00:00
|
|
|
devm_kfree(lif->ionic->dev, tx_qcqs[i]);
|
2020-08-27 23:00:28 +00:00
|
|
|
tx_qcqs[i] = NULL;
|
|
|
|
}
|
|
|
|
if (rx_qcqs && rx_qcqs[i]) {
|
|
|
|
rx_qcqs[i]->flags &= ~IONIC_QCQ_F_INTR;
|
|
|
|
ionic_qcq_free(lif, rx_qcqs[i]);
|
2020-08-27 23:00:29 +00:00
|
|
|
devm_kfree(lif->ionic->dev, rx_qcqs[i]);
|
2020-08-27 23:00:28 +00:00
|
|
|
rx_qcqs[i] = NULL;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* free q array */
|
|
|
|
if (rx_qcqs) {
|
|
|
|
devm_kfree(lif->ionic->dev, rx_qcqs);
|
|
|
|
rx_qcqs = NULL;
|
|
|
|
}
|
|
|
|
if (tx_qcqs) {
|
|
|
|
devm_kfree(lif->ionic->dev, tx_qcqs);
|
|
|
|
tx_qcqs = NULL;
|
|
|
|
}
|
|
|
|
|
2020-08-27 23:00:29 +00:00
|
|
|
/* clean the unused dma and info allocations when new set is smaller
|
|
|
|
* than the full array, but leave the qcq shells in place
|
|
|
|
*/
|
|
|
|
for (i = lif->nxqs; i < lif->ionic->ntxqs_per_lif; i++) {
|
|
|
|
lif->txqcqs[i]->flags &= ~IONIC_QCQ_F_INTR;
|
|
|
|
ionic_qcq_free(lif, lif->txqcqs[i]);
|
|
|
|
|
|
|
|
lif->rxqcqs[i]->flags &= ~IONIC_QCQ_F_INTR;
|
|
|
|
ionic_qcq_free(lif, lif->rxqcqs[i]);
|
|
|
|
}
|
|
|
|
|
2021-07-27 17:43:28 +00:00
|
|
|
if (err)
|
|
|
|
netdev_info(lif->netdev, "%s: failed %d\n", __func__, err);
|
|
|
|
|
2020-08-27 23:00:28 +00:00
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2020-08-27 23:00:22 +00:00
|
|
|
int ionic_lif_alloc(struct ionic *ionic)
|
2019-09-03 22:28:07 +00:00
|
|
|
{
|
|
|
|
struct device *dev = ionic->dev;
|
2020-07-21 20:34:04 +00:00
|
|
|
union ionic_lif_identity *lid;
|
2019-09-03 22:28:07 +00:00
|
|
|
struct net_device *netdev;
|
|
|
|
struct ionic_lif *lif;
|
2019-09-03 22:28:20 +00:00
|
|
|
int tbl_sz;
|
2019-09-03 22:28:07 +00:00
|
|
|
int err;
|
|
|
|
|
2020-07-21 20:34:04 +00:00
|
|
|
lid = kzalloc(sizeof(*lid), GFP_KERNEL);
|
|
|
|
if (!lid)
|
2020-08-27 23:00:22 +00:00
|
|
|
return -ENOMEM;
|
2020-07-21 20:34:04 +00:00
|
|
|
|
2019-09-03 22:28:07 +00:00
|
|
|
netdev = alloc_etherdev_mqs(sizeof(*lif),
|
|
|
|
ionic->ntxqs_per_lif, ionic->ntxqs_per_lif);
|
|
|
|
if (!netdev) {
|
|
|
|
dev_err(dev, "Cannot allocate netdev, aborting\n");
|
2020-07-22 17:40:03 +00:00
|
|
|
err = -ENOMEM;
|
|
|
|
goto err_out_free_lid;
|
2019-09-03 22:28:07 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
SET_NETDEV_DEV(netdev, dev);
|
|
|
|
|
|
|
|
lif = netdev_priv(netdev);
|
|
|
|
lif->netdev = netdev;
|
2020-08-27 23:00:22 +00:00
|
|
|
ionic->lif = lif;
|
2019-09-03 22:28:12 +00:00
|
|
|
netdev->netdev_ops = &ionic_netdev_ops;
|
2019-09-03 22:28:16 +00:00
|
|
|
ionic_ethtool_set_ops(netdev);
|
2019-09-03 22:28:12 +00:00
|
|
|
|
|
|
|
netdev->watchdog_timeo = 2 * HZ;
|
2020-03-28 03:14:41 +00:00
|
|
|
netif_carrier_off(netdev);
|
|
|
|
|
2020-07-21 20:34:04 +00:00
|
|
|
lif->identity = lid;
|
|
|
|
lif->lif_type = IONIC_LIF_TYPE_CLASSIC;
|
2020-10-01 16:22:45 +00:00
|
|
|
err = ionic_lif_identify(ionic, lif->lif_type, lif->identity);
|
|
|
|
if (err) {
|
|
|
|
dev_err(ionic->dev, "Cannot identify type %d: %d\n",
|
|
|
|
lif->lif_type, err);
|
|
|
|
goto err_out_free_netdev;
|
|
|
|
}
|
2020-08-27 23:00:19 +00:00
|
|
|
lif->netdev->min_mtu = max_t(unsigned int, ETH_MIN_MTU,
|
|
|
|
le32_to_cpu(lif->identity->eth.min_frame_size));
|
2020-07-21 20:34:04 +00:00
|
|
|
lif->netdev->max_mtu =
|
|
|
|
le32_to_cpu(lif->identity->eth.max_frame_size) - ETH_HLEN - VLAN_HLEN;
|
2019-09-03 22:28:07 +00:00
|
|
|
|
|
|
|
lif->neqs = ionic->neqs_per_lif;
|
|
|
|
lif->nxqs = ionic->ntxqs_per_lif;
|
|
|
|
|
|
|
|
lif->ionic = ionic;
|
2020-08-27 23:00:22 +00:00
|
|
|
lif->index = 0;
|
2021-07-27 17:43:25 +00:00
|
|
|
|
|
|
|
if (is_kdump_kernel()) {
|
|
|
|
lif->ntxq_descs = IONIC_MIN_TXRX_DESC;
|
|
|
|
lif->nrxq_descs = IONIC_MIN_TXRX_DESC;
|
|
|
|
} else {
|
|
|
|
lif->ntxq_descs = IONIC_DEF_TXRX_DESC;
|
|
|
|
lif->nrxq_descs = IONIC_DEF_TXRX_DESC;
|
|
|
|
}
|
2019-09-03 22:28:07 +00:00
|
|
|
|
2019-09-03 22:28:21 +00:00
|
|
|
/* Convert the default coalesce value to actual hw resolution */
|
2019-10-01 03:03:24 +00:00
|
|
|
lif->rx_coalesce_usecs = IONIC_ITR_COAL_USEC_DEFAULT;
|
2019-10-24 00:48:56 +00:00
|
|
|
lif->rx_coalesce_hw = ionic_coal_usec_to_hw(lif->ionic,
|
2019-10-01 03:03:24 +00:00
|
|
|
lif->rx_coalesce_usecs);
|
2020-07-31 20:15:36 +00:00
|
|
|
lif->tx_coalesce_usecs = lif->rx_coalesce_usecs;
|
|
|
|
lif->tx_coalesce_hw = lif->rx_coalesce_hw;
|
2020-09-15 23:59:03 +00:00
|
|
|
set_bit(IONIC_LIF_F_RX_DIM_INTR, lif->state);
|
|
|
|
set_bit(IONIC_LIF_F_TX_DIM_INTR, lif->state);
|
2019-09-03 22:28:21 +00:00
|
|
|
|
2020-08-27 23:00:22 +00:00
|
|
|
snprintf(lif->name, sizeof(lif->name), "lif%u", lif->index);
|
2019-09-03 22:28:07 +00:00
|
|
|
|
2021-10-01 18:05:53 +00:00
|
|
|
mutex_init(&lif->queue_lock);
|
|
|
|
mutex_init(&lif->config_lock);
|
|
|
|
|
2019-09-03 22:28:09 +00:00
|
|
|
spin_lock_init(&lif->adminq_lock);
|
|
|
|
|
2019-09-03 22:28:14 +00:00
|
|
|
spin_lock_init(&lif->deferred.lock);
|
|
|
|
INIT_LIST_HEAD(&lif->deferred.list);
|
|
|
|
INIT_WORK(&lif->deferred.work, ionic_lif_deferred_work);
|
|
|
|
|
2019-09-03 22:28:07 +00:00
|
|
|
/* allocate lif info */
|
|
|
|
lif->info_sz = ALIGN(sizeof(*lif->info), PAGE_SIZE);
|
|
|
|
lif->info = dma_alloc_coherent(dev, lif->info_sz,
|
|
|
|
&lif->info_pa, GFP_KERNEL);
|
|
|
|
if (!lif->info) {
|
|
|
|
dev_err(dev, "Failed to allocate lif info, aborting\n");
|
|
|
|
err = -ENOMEM;
|
2021-10-01 18:05:53 +00:00
|
|
|
goto err_out_free_mutex;
|
2019-09-03 22:28:07 +00:00
|
|
|
}
|
|
|
|
|
2020-03-28 03:14:43 +00:00
|
|
|
ionic_debugfs_add_lif(lif);
|
|
|
|
|
2020-08-27 23:00:22 +00:00
|
|
|
/* allocate control queues and txrx queue arrays */
|
|
|
|
ionic_lif_queue_identify(lif);
|
2019-09-03 22:28:09 +00:00
|
|
|
err = ionic_qcqs_alloc(lif);
|
|
|
|
if (err)
|
|
|
|
goto err_out_free_lif_info;
|
|
|
|
|
2019-09-03 22:28:20 +00:00
|
|
|
/* allocate rss indirection table */
|
|
|
|
tbl_sz = le16_to_cpu(lif->ionic->ident.lif.eth.rss_ind_tbl_sz);
|
|
|
|
lif->rss_ind_tbl_sz = sizeof(*lif->rss_ind_tbl) * tbl_sz;
|
|
|
|
lif->rss_ind_tbl = dma_alloc_coherent(dev, lif->rss_ind_tbl_sz,
|
|
|
|
&lif->rss_ind_tbl_pa,
|
|
|
|
GFP_KERNEL);
|
|
|
|
|
|
|
|
if (!lif->rss_ind_tbl) {
|
2019-09-21 05:59:26 +00:00
|
|
|
err = -ENOMEM;
|
2019-09-03 22:28:20 +00:00
|
|
|
dev_err(dev, "Failed to allocate rss indirection table, aborting\n");
|
|
|
|
goto err_out_free_qcqs;
|
|
|
|
}
|
2019-12-03 22:17:34 +00:00
|
|
|
netdev_rss_key_fill(lif->rss_hash_key, IONIC_RSS_HASH_KEY_SIZE);
|
2019-09-03 22:28:20 +00:00
|
|
|
|
2021-04-01 17:56:06 +00:00
|
|
|
ionic_lif_alloc_phc(lif);
|
|
|
|
|
2020-08-27 23:00:22 +00:00
|
|
|
return 0;
|
2019-09-03 22:28:07 +00:00
|
|
|
|
2019-09-03 22:28:20 +00:00
|
|
|
err_out_free_qcqs:
|
|
|
|
ionic_qcqs_free(lif);
|
2019-09-03 22:28:09 +00:00
|
|
|
err_out_free_lif_info:
|
|
|
|
dma_free_coherent(dev, lif->info_sz, lif->info, lif->info_pa);
|
|
|
|
lif->info = NULL;
|
|
|
|
lif->info_pa = 0;
|
2021-10-01 18:05:53 +00:00
|
|
|
err_out_free_mutex:
|
|
|
|
mutex_destroy(&lif->config_lock);
|
|
|
|
mutex_destroy(&lif->queue_lock);
|
2019-09-03 22:28:07 +00:00
|
|
|
err_out_free_netdev:
|
|
|
|
free_netdev(lif->netdev);
|
|
|
|
lif = NULL;
|
2020-07-22 17:40:03 +00:00
|
|
|
err_out_free_lid:
|
2020-07-21 20:34:04 +00:00
|
|
|
kfree(lid);
|
2019-09-03 22:28:07 +00:00
|
|
|
|
2020-08-27 23:00:22 +00:00
|
|
|
return err;
|
2019-09-03 22:28:07 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static void ionic_lif_reset(struct ionic_lif *lif)
|
|
|
|
{
|
|
|
|
struct ionic_dev *idev = &lif->ionic->idev;
|
|
|
|
|
|
|
|
mutex_lock(&lif->ionic->dev_cmd_lock);
|
|
|
|
ionic_dev_cmd_lif_reset(idev, lif->index);
|
|
|
|
ionic_dev_cmd_wait(lif->ionic, DEVCMD_TIMEOUT);
|
|
|
|
mutex_unlock(&lif->ionic->dev_cmd_lock);
|
|
|
|
}
|
|
|
|
|
2020-03-28 03:14:48 +00:00
|
|
|
static void ionic_lif_handle_fw_down(struct ionic_lif *lif)
|
|
|
|
{
|
|
|
|
struct ionic *ionic = lif->ionic;
|
|
|
|
|
|
|
|
if (test_and_set_bit(IONIC_LIF_F_FW_RESET, lif->state))
|
|
|
|
return;
|
|
|
|
|
|
|
|
dev_info(ionic->dev, "FW Down: Stopping LIFs\n");
|
|
|
|
|
|
|
|
netif_device_detach(lif->netdev);
|
|
|
|
|
2021-10-01 18:05:54 +00:00
|
|
|
mutex_lock(&lif->queue_lock);
|
2020-03-28 03:14:48 +00:00
|
|
|
if (test_bit(IONIC_LIF_F_UP, lif->state)) {
|
|
|
|
dev_info(ionic->dev, "Surprise FW stop, stopping queues\n");
|
|
|
|
ionic_stop_queues(lif);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (netif_running(lif->netdev)) {
|
|
|
|
ionic_txrx_deinit(lif);
|
|
|
|
ionic_txrx_free(lif);
|
|
|
|
}
|
2020-08-27 23:00:22 +00:00
|
|
|
ionic_lif_deinit(lif);
|
2020-04-30 21:33:43 +00:00
|
|
|
ionic_reset(ionic);
|
2020-03-28 03:14:48 +00:00
|
|
|
ionic_qcqs_free(lif);
|
|
|
|
|
2021-10-01 18:05:54 +00:00
|
|
|
mutex_unlock(&lif->queue_lock);
|
|
|
|
|
2022-01-24 18:53:01 +00:00
|
|
|
clear_bit(IONIC_LIF_F_FW_STOPPING, lif->state);
|
2020-03-28 03:14:48 +00:00
|
|
|
dev_info(ionic->dev, "FW Down: LIFs stopped\n");
|
|
|
|
}
|
|
|
|
|
|
|
|
static void ionic_lif_handle_fw_up(struct ionic_lif *lif)
|
|
|
|
{
|
|
|
|
struct ionic *ionic = lif->ionic;
|
|
|
|
int err;
|
|
|
|
|
|
|
|
if (!test_bit(IONIC_LIF_F_FW_RESET, lif->state))
|
|
|
|
return;
|
|
|
|
|
|
|
|
dev_info(ionic->dev, "FW Up: restarting LIFs\n");
|
|
|
|
|
2020-04-30 21:33:42 +00:00
|
|
|
ionic_init_devinfo(ionic);
|
2020-10-01 16:22:44 +00:00
|
|
|
err = ionic_identify(ionic);
|
|
|
|
if (err)
|
|
|
|
goto err_out;
|
|
|
|
err = ionic_port_identify(ionic);
|
|
|
|
if (err)
|
|
|
|
goto err_out;
|
|
|
|
err = ionic_port_init(ionic);
|
|
|
|
if (err)
|
|
|
|
goto err_out;
|
2021-10-01 18:05:54 +00:00
|
|
|
|
|
|
|
mutex_lock(&lif->queue_lock);
|
|
|
|
|
2020-03-28 03:14:48 +00:00
|
|
|
err = ionic_qcqs_alloc(lif);
|
|
|
|
if (err)
|
2021-10-01 18:05:54 +00:00
|
|
|
goto err_unlock;
|
2020-03-28 03:14:48 +00:00
|
|
|
|
2020-08-27 23:00:22 +00:00
|
|
|
err = ionic_lif_init(lif);
|
2020-03-28 03:14:48 +00:00
|
|
|
if (err)
|
|
|
|
goto err_qcqs_free;
|
|
|
|
|
|
|
|
if (lif->registered)
|
|
|
|
ionic_lif_set_netdev_info(lif);
|
|
|
|
|
2020-04-08 16:19:11 +00:00
|
|
|
ionic_rx_filter_replay(lif);
|
|
|
|
|
2020-03-28 03:14:48 +00:00
|
|
|
if (netif_running(lif->netdev)) {
|
|
|
|
err = ionic_txrx_alloc(lif);
|
|
|
|
if (err)
|
|
|
|
goto err_lifs_deinit;
|
|
|
|
|
|
|
|
err = ionic_txrx_init(lif);
|
|
|
|
if (err)
|
|
|
|
goto err_txrx_free;
|
|
|
|
}
|
|
|
|
|
2021-10-01 18:05:54 +00:00
|
|
|
mutex_unlock(&lif->queue_lock);
|
|
|
|
|
2020-03-28 03:14:48 +00:00
|
|
|
clear_bit(IONIC_LIF_F_FW_RESET, lif->state);
|
2021-03-19 00:48:04 +00:00
|
|
|
ionic_link_status_check_request(lif, CAN_SLEEP);
|
2020-03-28 03:14:48 +00:00
|
|
|
netif_device_attach(lif->netdev);
|
|
|
|
dev_info(ionic->dev, "FW Up: LIFs restarted\n");
|
|
|
|
|
2021-04-07 23:19:58 +00:00
|
|
|
/* restore the hardware timestamping queues */
|
2021-04-07 23:20:00 +00:00
|
|
|
ionic_lif_hwstamp_replay(lif);
|
2021-04-07 23:19:58 +00:00
|
|
|
|
2020-03-28 03:14:48 +00:00
|
|
|
return;
|
|
|
|
|
|
|
|
err_txrx_free:
|
|
|
|
ionic_txrx_free(lif);
|
|
|
|
err_lifs_deinit:
|
2020-08-27 23:00:22 +00:00
|
|
|
ionic_lif_deinit(lif);
|
2020-03-28 03:14:48 +00:00
|
|
|
err_qcqs_free:
|
|
|
|
ionic_qcqs_free(lif);
|
2021-10-01 18:05:54 +00:00
|
|
|
err_unlock:
|
|
|
|
mutex_unlock(&lif->queue_lock);
|
2020-03-28 03:14:48 +00:00
|
|
|
err_out:
|
|
|
|
dev_err(ionic->dev, "FW Up: LIFs restart failed - err %d\n", err);
|
|
|
|
}
|
|
|
|
|
2020-08-27 23:00:22 +00:00
|
|
|
void ionic_lif_free(struct ionic_lif *lif)
|
2019-09-03 22:28:07 +00:00
|
|
|
{
|
|
|
|
struct device *dev = lif->ionic->dev;
|
|
|
|
|
2021-04-01 17:56:06 +00:00
|
|
|
ionic_lif_free_phc(lif);
|
|
|
|
|
2019-09-03 22:28:20 +00:00
|
|
|
/* free rss indirection table */
|
|
|
|
dma_free_coherent(dev, lif->rss_ind_tbl_sz, lif->rss_ind_tbl,
|
|
|
|
lif->rss_ind_tbl_pa);
|
|
|
|
lif->rss_ind_tbl = NULL;
|
|
|
|
lif->rss_ind_tbl_pa = 0;
|
|
|
|
|
2019-09-03 22:28:09 +00:00
|
|
|
/* free queues */
|
|
|
|
ionic_qcqs_free(lif);
|
2020-03-28 03:14:48 +00:00
|
|
|
if (!test_bit(IONIC_LIF_F_FW_RESET, lif->state))
|
|
|
|
ionic_lif_reset(lif);
|
2019-09-03 22:28:07 +00:00
|
|
|
|
|
|
|
/* free lif info */
|
2020-07-21 20:34:04 +00:00
|
|
|
kfree(lif->identity);
|
2019-09-03 22:28:07 +00:00
|
|
|
dma_free_coherent(dev, lif->info_sz, lif->info, lif->info_pa);
|
|
|
|
lif->info = NULL;
|
|
|
|
lif->info_pa = 0;
|
|
|
|
|
2019-09-03 22:28:08 +00:00
|
|
|
/* unmap doorbell page */
|
|
|
|
ionic_bus_unmap_dbpage(lif->ionic, lif->kern_dbpage);
|
|
|
|
lif->kern_dbpage = NULL;
|
|
|
|
|
2021-10-01 18:05:53 +00:00
|
|
|
mutex_destroy(&lif->config_lock);
|
|
|
|
mutex_destroy(&lif->queue_lock);
|
|
|
|
|
2019-09-03 22:28:07 +00:00
|
|
|
/* free netdev & lif */
|
|
|
|
ionic_debugfs_del_lif(lif);
|
|
|
|
free_netdev(lif->netdev);
|
|
|
|
}
|
|
|
|
|
2020-08-27 23:00:22 +00:00
|
|
|
void ionic_lif_deinit(struct ionic_lif *lif)
|
2019-09-03 22:28:07 +00:00
|
|
|
{
|
2020-03-28 03:14:48 +00:00
|
|
|
if (!test_and_clear_bit(IONIC_LIF_F_INITED, lif->state))
|
2019-09-03 22:28:07 +00:00
|
|
|
return;
|
|
|
|
|
2020-03-28 03:14:48 +00:00
|
|
|
if (!test_bit(IONIC_LIF_F_FW_RESET, lif->state)) {
|
|
|
|
cancel_work_sync(&lif->deferred.work);
|
|
|
|
cancel_work_sync(&lif->tx_timeout_work);
|
2020-04-08 16:19:11 +00:00
|
|
|
ionic_rx_filters_deinit(lif);
|
2020-07-20 23:00:16 +00:00
|
|
|
if (lif->netdev->features & NETIF_F_RXHASH)
|
|
|
|
ionic_lif_rss_deinit(lif);
|
2020-03-28 03:14:48 +00:00
|
|
|
}
|
2019-09-03 22:28:07 +00:00
|
|
|
|
2019-09-03 22:28:09 +00:00
|
|
|
napi_disable(&lif->adminqcq->napi);
|
2019-09-03 22:28:11 +00:00
|
|
|
ionic_lif_qcq_deinit(lif, lif->notifyqcq);
|
2019-09-03 22:28:09 +00:00
|
|
|
ionic_lif_qcq_deinit(lif, lif->adminqcq);
|
|
|
|
|
2019-09-03 22:28:07 +00:00
|
|
|
ionic_lif_reset(lif);
|
|
|
|
}
|
|
|
|
|
2019-09-03 22:28:09 +00:00
|
|
|
static int ionic_lif_adminq_init(struct ionic_lif *lif)
|
|
|
|
{
|
|
|
|
struct device *dev = lif->ionic->dev;
|
|
|
|
struct ionic_q_init_comp comp;
|
|
|
|
struct ionic_dev *idev;
|
|
|
|
struct ionic_qcq *qcq;
|
|
|
|
struct ionic_queue *q;
|
|
|
|
int err;
|
|
|
|
|
|
|
|
idev = &lif->ionic->idev;
|
|
|
|
qcq = lif->adminqcq;
|
|
|
|
q = &qcq->q;
|
|
|
|
|
|
|
|
mutex_lock(&lif->ionic->dev_cmd_lock);
|
|
|
|
ionic_dev_cmd_adminq_init(idev, qcq, lif->index, qcq->intr.index);
|
|
|
|
err = ionic_dev_cmd_wait(lif->ionic, DEVCMD_TIMEOUT);
|
|
|
|
ionic_dev_cmd_comp(idev, (union ionic_dev_cmd_comp *)&comp);
|
|
|
|
mutex_unlock(&lif->ionic->dev_cmd_lock);
|
|
|
|
if (err) {
|
|
|
|
netdev_err(lif->netdev, "adminq init failed %d\n", err);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
q->hw_type = comp.hw_type;
|
|
|
|
q->hw_index = le32_to_cpu(comp.hw_index);
|
|
|
|
q->dbval = IONIC_DBELL_QID(q->hw_index);
|
|
|
|
|
|
|
|
dev_dbg(dev, "adminq->hw_type %d\n", q->hw_type);
|
|
|
|
dev_dbg(dev, "adminq->hw_index %d\n", q->hw_index);
|
|
|
|
|
|
|
|
netif_napi_add(lif->netdev, &qcq->napi, ionic_adminq_napi,
|
|
|
|
NAPI_POLL_WEIGHT);
|
|
|
|
|
|
|
|
napi_enable(&qcq->napi);
|
|
|
|
|
|
|
|
if (qcq->flags & IONIC_QCQ_F_INTR)
|
|
|
|
ionic_intr_mask(idev->intr_ctrl, qcq->intr.index,
|
|
|
|
IONIC_INTR_MASK_CLEAR);
|
|
|
|
|
|
|
|
qcq->flags |= IONIC_QCQ_F_INITED;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2019-09-03 22:28:11 +00:00
|
|
|
static int ionic_lif_notifyq_init(struct ionic_lif *lif)
|
|
|
|
{
|
|
|
|
struct ionic_qcq *qcq = lif->notifyqcq;
|
|
|
|
struct device *dev = lif->ionic->dev;
|
|
|
|
struct ionic_queue *q = &qcq->q;
|
|
|
|
int err;
|
|
|
|
|
|
|
|
struct ionic_admin_ctx ctx = {
|
|
|
|
.work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
|
|
|
|
.cmd.q_init = {
|
|
|
|
.opcode = IONIC_CMD_Q_INIT,
|
|
|
|
.lif_index = cpu_to_le16(lif->index),
|
|
|
|
.type = q->type,
|
2020-05-12 00:59:27 +00:00
|
|
|
.ver = lif->qtype_info[q->type].version,
|
2019-09-03 22:28:11 +00:00
|
|
|
.index = cpu_to_le32(q->index),
|
|
|
|
.flags = cpu_to_le16(IONIC_QINIT_F_IRQ |
|
|
|
|
IONIC_QINIT_F_ENA),
|
|
|
|
.intr_index = cpu_to_le16(lif->adminqcq->intr.index),
|
|
|
|
.pid = cpu_to_le16(q->pid),
|
|
|
|
.ring_size = ilog2(q->num_descs),
|
|
|
|
.ring_base = cpu_to_le64(q->base_pa),
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
dev_dbg(dev, "notifyq_init.pid %d\n", ctx.cmd.q_init.pid);
|
|
|
|
dev_dbg(dev, "notifyq_init.index %d\n", ctx.cmd.q_init.index);
|
|
|
|
dev_dbg(dev, "notifyq_init.ring_base 0x%llx\n", ctx.cmd.q_init.ring_base);
|
|
|
|
dev_dbg(dev, "notifyq_init.ring_size %d\n", ctx.cmd.q_init.ring_size);
|
|
|
|
|
|
|
|
err = ionic_adminq_post_wait(lif, &ctx);
|
|
|
|
if (err)
|
|
|
|
return err;
|
|
|
|
|
2020-03-28 03:14:48 +00:00
|
|
|
lif->last_eid = 0;
|
2019-09-03 22:28:11 +00:00
|
|
|
q->hw_type = ctx.comp.q_init.hw_type;
|
|
|
|
q->hw_index = le32_to_cpu(ctx.comp.q_init.hw_index);
|
|
|
|
q->dbval = IONIC_DBELL_QID(q->hw_index);
|
|
|
|
|
|
|
|
dev_dbg(dev, "notifyq->hw_type %d\n", q->hw_type);
|
|
|
|
dev_dbg(dev, "notifyq->hw_index %d\n", q->hw_index);
|
|
|
|
|
|
|
|
/* preset the callback info */
|
|
|
|
q->info[0].cb_arg = lif;
|
|
|
|
|
|
|
|
qcq->flags |= IONIC_QCQ_F_INITED;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2019-09-03 22:28:14 +00:00
|
|
|
static int ionic_station_set(struct ionic_lif *lif)
|
|
|
|
{
|
|
|
|
struct net_device *netdev = lif->netdev;
|
|
|
|
struct ionic_admin_ctx ctx = {
|
|
|
|
.work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
|
|
|
|
.cmd.lif_getattr = {
|
|
|
|
.opcode = IONIC_CMD_LIF_GETATTR,
|
|
|
|
.index = cpu_to_le16(lif->index),
|
|
|
|
.attr = IONIC_LIF_ATTR_MAC,
|
|
|
|
},
|
|
|
|
};
|
|
|
|
struct sockaddr addr;
|
|
|
|
int err;
|
|
|
|
|
|
|
|
err = ionic_adminq_post_wait(lif, &ctx);
|
|
|
|
if (err)
|
|
|
|
return err;
|
2020-04-08 16:19:12 +00:00
|
|
|
netdev_dbg(lif->netdev, "found initial MAC addr %pM\n",
|
|
|
|
ctx.comp.lif_getattr.mac);
|
2020-01-03 17:55:08 +00:00
|
|
|
if (is_zero_ether_addr(ctx.comp.lif_getattr.mac))
|
|
|
|
return 0;
|
|
|
|
|
2020-05-11 21:04:44 +00:00
|
|
|
if (!is_zero_ether_addr(netdev->dev_addr)) {
|
|
|
|
/* If the netdev mac is non-zero and doesn't match the default
|
|
|
|
* device address, it was set by something earlier and we're
|
|
|
|
* likely here again after a fw-upgrade reset. We need to be
|
|
|
|
* sure the netdev mac is in our filter list.
|
|
|
|
*/
|
|
|
|
if (!ether_addr_equal(ctx.comp.lif_getattr.mac,
|
|
|
|
netdev->dev_addr))
|
2021-08-26 01:24:49 +00:00
|
|
|
ionic_lif_addr_add(lif, netdev->dev_addr);
|
2020-05-11 21:04:44 +00:00
|
|
|
} else {
|
|
|
|
/* Update the netdev mac with the device's mac */
|
2020-04-08 16:19:12 +00:00
|
|
|
memcpy(addr.sa_data, ctx.comp.lif_getattr.mac, netdev->addr_len);
|
|
|
|
addr.sa_family = AF_INET;
|
|
|
|
err = eth_prepare_mac_addr_change(netdev, &addr);
|
|
|
|
if (err) {
|
|
|
|
netdev_warn(lif->netdev, "ignoring bad MAC addr from NIC %pM - err %d\n",
|
|
|
|
addr.sa_data, err);
|
|
|
|
return 0;
|
|
|
|
}
|
2019-09-03 22:28:14 +00:00
|
|
|
|
2020-04-08 16:19:12 +00:00
|
|
|
eth_commit_mac_addr_change(netdev, &addr);
|
|
|
|
}
|
2020-01-03 17:55:08 +00:00
|
|
|
|
2019-09-03 22:28:14 +00:00
|
|
|
netdev_dbg(lif->netdev, "adding station MAC addr %pM\n",
|
|
|
|
netdev->dev_addr);
|
2021-08-26 01:24:49 +00:00
|
|
|
ionic_lif_addr_add(lif, netdev->dev_addr);
|
2019-09-03 22:28:14 +00:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2020-08-27 23:00:22 +00:00
|
|
|
int ionic_lif_init(struct ionic_lif *lif)
|
2019-09-03 22:28:07 +00:00
|
|
|
{
|
|
|
|
struct ionic_dev *idev = &lif->ionic->idev;
|
2019-09-03 22:28:08 +00:00
|
|
|
struct device *dev = lif->ionic->dev;
|
2019-09-03 22:28:07 +00:00
|
|
|
struct ionic_lif_init_comp comp;
|
2019-09-03 22:28:08 +00:00
|
|
|
int dbpage_num;
|
2019-09-03 22:28:07 +00:00
|
|
|
int err;
|
|
|
|
|
|
|
|
mutex_lock(&lif->ionic->dev_cmd_lock);
|
|
|
|
ionic_dev_cmd_lif_init(idev, lif->index, lif->info_pa);
|
|
|
|
err = ionic_dev_cmd_wait(lif->ionic, DEVCMD_TIMEOUT);
|
|
|
|
ionic_dev_cmd_comp(idev, (union ionic_dev_cmd_comp *)&comp);
|
|
|
|
mutex_unlock(&lif->ionic->dev_cmd_lock);
|
|
|
|
if (err)
|
|
|
|
return err;
|
|
|
|
|
|
|
|
lif->hw_index = le16_to_cpu(comp.hw_index);
|
|
|
|
|
2019-09-03 22:28:08 +00:00
|
|
|
/* now that we have the hw_index we can figure out our doorbell page */
|
|
|
|
lif->dbid_count = le32_to_cpu(lif->ionic->ident.dev.ndbpgs_per_lif);
|
|
|
|
if (!lif->dbid_count) {
|
|
|
|
dev_err(dev, "No doorbell pages, aborting\n");
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
lif->kern_pid = 0;
|
|
|
|
dbpage_num = ionic_db_page_num(lif, lif->kern_pid);
|
|
|
|
lif->kern_dbpage = ionic_bus_map_dbpage(lif->ionic, dbpage_num);
|
|
|
|
if (!lif->kern_dbpage) {
|
|
|
|
dev_err(dev, "Cannot map dbpage, aborting\n");
|
2022-01-24 18:53:10 +00:00
|
|
|
return -ENOMEM;
|
2019-09-03 22:28:08 +00:00
|
|
|
}
|
|
|
|
|
2019-09-03 22:28:09 +00:00
|
|
|
err = ionic_lif_adminq_init(lif);
|
|
|
|
if (err)
|
|
|
|
goto err_out_adminq_deinit;
|
|
|
|
|
2019-09-03 22:28:11 +00:00
|
|
|
if (lif->ionic->nnqs_per_lif) {
|
|
|
|
err = ionic_lif_notifyq_init(lif);
|
|
|
|
if (err)
|
|
|
|
goto err_out_notifyq_deinit;
|
|
|
|
}
|
|
|
|
|
2019-09-03 22:28:12 +00:00
|
|
|
err = ionic_init_nic_features(lif);
|
|
|
|
if (err)
|
|
|
|
goto err_out_notifyq_deinit;
|
|
|
|
|
2020-04-08 16:19:11 +00:00
|
|
|
if (!test_bit(IONIC_LIF_F_FW_RESET, lif->state)) {
|
|
|
|
err = ionic_rx_filters_init(lif);
|
|
|
|
if (err)
|
|
|
|
goto err_out_notifyq_deinit;
|
|
|
|
}
|
2019-09-03 22:28:13 +00:00
|
|
|
|
2019-09-03 22:28:14 +00:00
|
|
|
err = ionic_station_set(lif);
|
|
|
|
if (err)
|
|
|
|
goto err_out_notifyq_deinit;
|
|
|
|
|
2019-09-03 22:28:17 +00:00
|
|
|
lif->rx_copybreak = IONIC_RX_COPYBREAK_DEFAULT;
|
|
|
|
|
2020-03-07 01:04:04 +00:00
|
|
|
set_bit(IONIC_LIF_F_INITED, lif->state);
|
2019-09-03 22:28:07 +00:00
|
|
|
|
2019-09-03 22:28:21 +00:00
|
|
|
INIT_WORK(&lif->tx_timeout_work, ionic_tx_timeout_work);
|
|
|
|
|
2019-09-03 22:28:07 +00:00
|
|
|
return 0;
|
2019-09-03 22:28:08 +00:00
|
|
|
|
2019-09-03 22:28:11 +00:00
|
|
|
err_out_notifyq_deinit:
|
2022-01-24 18:53:09 +00:00
|
|
|
napi_disable(&lif->adminqcq->napi);
|
2019-09-03 22:28:11 +00:00
|
|
|
ionic_lif_qcq_deinit(lif, lif->notifyqcq);
|
2019-09-03 22:28:09 +00:00
|
|
|
err_out_adminq_deinit:
|
|
|
|
ionic_lif_qcq_deinit(lif, lif->adminqcq);
|
|
|
|
ionic_lif_reset(lif);
|
|
|
|
ionic_bus_unmap_dbpage(lif->ionic, lif->kern_dbpage);
|
|
|
|
lif->kern_dbpage = NULL;
|
2019-09-03 22:28:08 +00:00
|
|
|
|
|
|
|
return err;
|
2019-09-03 22:28:07 +00:00
|
|
|
}
|
|
|
|
|
2019-09-03 22:28:18 +00:00
|
|
|
static void ionic_lif_notify_work(struct work_struct *ws)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
|
|
|
static void ionic_lif_set_netdev_info(struct ionic_lif *lif)
|
|
|
|
{
|
|
|
|
struct ionic_admin_ctx ctx = {
|
|
|
|
.work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
|
|
|
|
.cmd.lif_setattr = {
|
|
|
|
.opcode = IONIC_CMD_LIF_SETATTR,
|
|
|
|
.index = cpu_to_le16(lif->index),
|
|
|
|
.attr = IONIC_LIF_ATTR_NAME,
|
|
|
|
},
|
|
|
|
};
|
|
|
|
|
|
|
|
strlcpy(ctx.cmd.lif_setattr.name, lif->netdev->name,
|
|
|
|
sizeof(ctx.cmd.lif_setattr.name));
|
|
|
|
|
|
|
|
ionic_adminq_post_wait(lif, &ctx);
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct ionic_lif *ionic_netdev_lif(struct net_device *netdev)
|
|
|
|
{
|
|
|
|
if (!netdev || netdev->netdev_ops->ndo_start_xmit != ionic_start_xmit)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
return netdev_priv(netdev);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int ionic_lif_notify(struct notifier_block *nb,
|
|
|
|
unsigned long event, void *info)
|
|
|
|
{
|
|
|
|
struct net_device *ndev = netdev_notifier_info_to_dev(info);
|
|
|
|
struct ionic *ionic = container_of(nb, struct ionic, nb);
|
|
|
|
struct ionic_lif *lif = ionic_netdev_lif(ndev);
|
|
|
|
|
|
|
|
if (!lif || lif->ionic != ionic)
|
|
|
|
return NOTIFY_DONE;
|
|
|
|
|
|
|
|
switch (event) {
|
|
|
|
case NETDEV_CHANGENAME:
|
|
|
|
ionic_lif_set_netdev_info(lif);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
return NOTIFY_DONE;
|
|
|
|
}
|
|
|
|
|
2020-08-27 23:00:22 +00:00
|
|
|
int ionic_lif_register(struct ionic_lif *lif)
|
2019-09-03 22:28:12 +00:00
|
|
|
{
|
|
|
|
int err;
|
|
|
|
|
2021-04-01 17:56:10 +00:00
|
|
|
ionic_lif_register_phc(lif);
|
|
|
|
|
2020-08-27 23:00:22 +00:00
|
|
|
INIT_WORK(&lif->ionic->nb_work, ionic_lif_notify_work);
|
2019-09-03 22:28:18 +00:00
|
|
|
|
2020-08-27 23:00:22 +00:00
|
|
|
lif->ionic->nb.notifier_call = ionic_lif_notify;
|
2019-09-03 22:28:18 +00:00
|
|
|
|
2020-08-27 23:00:22 +00:00
|
|
|
err = register_netdevice_notifier(&lif->ionic->nb);
|
2019-09-03 22:28:18 +00:00
|
|
|
if (err)
|
2020-08-27 23:00:22 +00:00
|
|
|
lif->ionic->nb.notifier_call = NULL;
|
2019-09-03 22:28:18 +00:00
|
|
|
|
2019-09-03 22:28:12 +00:00
|
|
|
/* only register LIF0 for now */
|
2020-08-27 23:00:22 +00:00
|
|
|
err = register_netdev(lif->netdev);
|
2019-09-03 22:28:12 +00:00
|
|
|
if (err) {
|
2020-08-27 23:00:22 +00:00
|
|
|
dev_err(lif->ionic->dev, "Cannot register net device, aborting\n");
|
2021-04-01 17:56:10 +00:00
|
|
|
ionic_lif_unregister_phc(lif);
|
2019-09-03 22:28:12 +00:00
|
|
|
return err;
|
|
|
|
}
|
2020-11-12 18:22:02 +00:00
|
|
|
|
2021-03-19 00:48:04 +00:00
|
|
|
ionic_link_status_check_request(lif, CAN_SLEEP);
|
2020-08-27 23:00:22 +00:00
|
|
|
lif->registered = true;
|
|
|
|
ionic_lif_set_netdev_info(lif);
|
2019-09-03 22:28:12 +00:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2020-08-27 23:00:22 +00:00
|
|
|
void ionic_lif_unregister(struct ionic_lif *lif)
|
2019-09-03 22:28:12 +00:00
|
|
|
{
|
2020-08-27 23:00:22 +00:00
|
|
|
if (lif->ionic->nb.notifier_call) {
|
|
|
|
unregister_netdevice_notifier(&lif->ionic->nb);
|
|
|
|
cancel_work_sync(&lif->ionic->nb_work);
|
|
|
|
lif->ionic->nb.notifier_call = NULL;
|
2019-09-03 22:28:18 +00:00
|
|
|
}
|
|
|
|
|
2020-08-27 23:00:22 +00:00
|
|
|
if (lif->netdev->reg_state == NETREG_REGISTERED)
|
|
|
|
unregister_netdev(lif->netdev);
|
2021-04-01 17:56:06 +00:00
|
|
|
|
2021-04-01 17:56:10 +00:00
|
|
|
ionic_lif_unregister_phc(lif);
|
|
|
|
|
2020-08-27 23:00:22 +00:00
|
|
|
lif->registered = false;
|
2019-09-03 22:28:12 +00:00
|
|
|
}
|
|
|
|
|
2020-05-12 00:59:27 +00:00
|
|
|
static void ionic_lif_queue_identify(struct ionic_lif *lif)
|
|
|
|
{
|
2020-10-22 23:55:29 +00:00
|
|
|
union ionic_q_identity __iomem *q_ident;
|
2020-05-12 00:59:27 +00:00
|
|
|
struct ionic *ionic = lif->ionic;
|
|
|
|
struct ionic_dev *idev;
|
|
|
|
int qtype;
|
|
|
|
int err;
|
|
|
|
|
|
|
|
idev = &lif->ionic->idev;
|
2020-10-22 23:55:29 +00:00
|
|
|
q_ident = (union ionic_q_identity __iomem *)&idev->dev_cmd_regs->data;
|
2020-05-12 00:59:27 +00:00
|
|
|
|
|
|
|
for (qtype = 0; qtype < ARRAY_SIZE(ionic_qtype_versions); qtype++) {
|
|
|
|
struct ionic_qtype_info *qti = &lif->qtype_info[qtype];
|
|
|
|
|
|
|
|
/* filter out the ones we know about */
|
|
|
|
switch (qtype) {
|
|
|
|
case IONIC_QTYPE_ADMINQ:
|
|
|
|
case IONIC_QTYPE_NOTIFYQ:
|
|
|
|
case IONIC_QTYPE_RXQ:
|
|
|
|
case IONIC_QTYPE_TXQ:
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
memset(qti, 0, sizeof(*qti));
|
|
|
|
|
|
|
|
mutex_lock(&ionic->dev_cmd_lock);
|
|
|
|
ionic_dev_cmd_queue_identify(idev, lif->lif_type, qtype,
|
|
|
|
ionic_qtype_versions[qtype]);
|
|
|
|
err = ionic_dev_cmd_wait(ionic, DEVCMD_TIMEOUT);
|
|
|
|
if (!err) {
|
2020-10-22 23:55:29 +00:00
|
|
|
qti->version = readb(&q_ident->version);
|
|
|
|
qti->supported = readb(&q_ident->supported);
|
|
|
|
qti->features = readq(&q_ident->features);
|
|
|
|
qti->desc_sz = readw(&q_ident->desc_sz);
|
|
|
|
qti->comp_sz = readw(&q_ident->comp_sz);
|
|
|
|
qti->sg_desc_sz = readw(&q_ident->sg_desc_sz);
|
|
|
|
qti->max_sg_elems = readw(&q_ident->max_sg_elems);
|
|
|
|
qti->sg_desc_stride = readw(&q_ident->sg_desc_stride);
|
2020-05-12 00:59:27 +00:00
|
|
|
}
|
|
|
|
mutex_unlock(&ionic->dev_cmd_lock);
|
|
|
|
|
|
|
|
if (err == -EINVAL) {
|
|
|
|
dev_err(ionic->dev, "qtype %d not supported\n", qtype);
|
|
|
|
continue;
|
|
|
|
} else if (err == -EIO) {
|
|
|
|
dev_err(ionic->dev, "q_ident failed, not supported on older FW\n");
|
|
|
|
return;
|
|
|
|
} else if (err) {
|
|
|
|
dev_err(ionic->dev, "q_ident failed, qtype %d: %d\n",
|
|
|
|
qtype, err);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
dev_dbg(ionic->dev, " qtype[%d].version = %d\n",
|
|
|
|
qtype, qti->version);
|
|
|
|
dev_dbg(ionic->dev, " qtype[%d].supported = 0x%02x\n",
|
|
|
|
qtype, qti->supported);
|
|
|
|
dev_dbg(ionic->dev, " qtype[%d].features = 0x%04llx\n",
|
|
|
|
qtype, qti->features);
|
|
|
|
dev_dbg(ionic->dev, " qtype[%d].desc_sz = %d\n",
|
|
|
|
qtype, qti->desc_sz);
|
|
|
|
dev_dbg(ionic->dev, " qtype[%d].comp_sz = %d\n",
|
|
|
|
qtype, qti->comp_sz);
|
|
|
|
dev_dbg(ionic->dev, " qtype[%d].sg_desc_sz = %d\n",
|
|
|
|
qtype, qti->sg_desc_sz);
|
|
|
|
dev_dbg(ionic->dev, " qtype[%d].max_sg_elems = %d\n",
|
|
|
|
qtype, qti->max_sg_elems);
|
|
|
|
dev_dbg(ionic->dev, " qtype[%d].sg_desc_stride = %d\n",
|
|
|
|
qtype, qti->sg_desc_stride);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-09-03 22:28:07 +00:00
|
|
|
int ionic_lif_identify(struct ionic *ionic, u8 lif_type,
|
|
|
|
union ionic_lif_identity *lid)
|
|
|
|
{
|
|
|
|
struct ionic_dev *idev = &ionic->idev;
|
|
|
|
size_t sz;
|
|
|
|
int err;
|
|
|
|
|
|
|
|
sz = min(sizeof(*lid), sizeof(idev->dev_cmd_regs->data));
|
|
|
|
|
|
|
|
mutex_lock(&ionic->dev_cmd_lock);
|
|
|
|
ionic_dev_cmd_lif_identify(idev, lif_type, IONIC_IDENTITY_VERSION_1);
|
|
|
|
err = ionic_dev_cmd_wait(ionic, DEVCMD_TIMEOUT);
|
|
|
|
memcpy_fromio(lid, &idev->dev_cmd_regs->data, sz);
|
|
|
|
mutex_unlock(&ionic->dev_cmd_lock);
|
|
|
|
if (err)
|
|
|
|
return (err);
|
|
|
|
|
|
|
|
dev_dbg(ionic->dev, "capabilities 0x%llx\n",
|
|
|
|
le64_to_cpu(lid->capabilities));
|
|
|
|
|
|
|
|
dev_dbg(ionic->dev, "eth.max_ucast_filters %d\n",
|
|
|
|
le32_to_cpu(lid->eth.max_ucast_filters));
|
|
|
|
dev_dbg(ionic->dev, "eth.max_mcast_filters %d\n",
|
|
|
|
le32_to_cpu(lid->eth.max_mcast_filters));
|
|
|
|
dev_dbg(ionic->dev, "eth.features 0x%llx\n",
|
|
|
|
le64_to_cpu(lid->eth.config.features));
|
|
|
|
dev_dbg(ionic->dev, "eth.queue_count[IONIC_QTYPE_ADMINQ] %d\n",
|
|
|
|
le32_to_cpu(lid->eth.config.queue_count[IONIC_QTYPE_ADMINQ]));
|
|
|
|
dev_dbg(ionic->dev, "eth.queue_count[IONIC_QTYPE_NOTIFYQ] %d\n",
|
|
|
|
le32_to_cpu(lid->eth.config.queue_count[IONIC_QTYPE_NOTIFYQ]));
|
|
|
|
dev_dbg(ionic->dev, "eth.queue_count[IONIC_QTYPE_RXQ] %d\n",
|
|
|
|
le32_to_cpu(lid->eth.config.queue_count[IONIC_QTYPE_RXQ]));
|
|
|
|
dev_dbg(ionic->dev, "eth.queue_count[IONIC_QTYPE_TXQ] %d\n",
|
|
|
|
le32_to_cpu(lid->eth.config.queue_count[IONIC_QTYPE_TXQ]));
|
|
|
|
dev_dbg(ionic->dev, "eth.config.name %s\n", lid->eth.config.name);
|
|
|
|
dev_dbg(ionic->dev, "eth.config.mac %pM\n", lid->eth.config.mac);
|
|
|
|
dev_dbg(ionic->dev, "eth.config.mtu %d\n",
|
|
|
|
le32_to_cpu(lid->eth.config.mtu));
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2020-08-27 23:00:22 +00:00
|
|
|
int ionic_lif_size(struct ionic *ionic)
|
2019-09-03 22:28:07 +00:00
|
|
|
{
|
|
|
|
struct ionic_identity *ident = &ionic->ident;
|
|
|
|
unsigned int nintrs, dev_nintrs;
|
|
|
|
union ionic_lif_config *lc;
|
|
|
|
unsigned int ntxqs_per_lif;
|
|
|
|
unsigned int nrxqs_per_lif;
|
|
|
|
unsigned int neqs_per_lif;
|
|
|
|
unsigned int nnqs_per_lif;
|
|
|
|
unsigned int nxqs, neqs;
|
|
|
|
unsigned int min_intrs;
|
|
|
|
int err;
|
|
|
|
|
2021-07-27 17:43:25 +00:00
|
|
|
/* retrieve basic values from FW */
|
2019-09-03 22:28:07 +00:00
|
|
|
lc = &ident->lif.eth.config;
|
|
|
|
dev_nintrs = le32_to_cpu(ident->dev.nintrs);
|
|
|
|
neqs_per_lif = le32_to_cpu(ident->lif.rdma.eq_qtype.qid_count);
|
|
|
|
nnqs_per_lif = le32_to_cpu(lc->queue_count[IONIC_QTYPE_NOTIFYQ]);
|
|
|
|
ntxqs_per_lif = le32_to_cpu(lc->queue_count[IONIC_QTYPE_TXQ]);
|
|
|
|
nrxqs_per_lif = le32_to_cpu(lc->queue_count[IONIC_QTYPE_RXQ]);
|
|
|
|
|
2021-07-27 17:43:25 +00:00
|
|
|
/* limit values to play nice with kdump */
|
|
|
|
if (is_kdump_kernel()) {
|
|
|
|
dev_nintrs = 2;
|
|
|
|
neqs_per_lif = 0;
|
|
|
|
nnqs_per_lif = 0;
|
|
|
|
ntxqs_per_lif = 1;
|
|
|
|
nrxqs_per_lif = 1;
|
|
|
|
}
|
|
|
|
|
2021-04-01 17:56:06 +00:00
|
|
|
/* reserve last queue id for hardware timestamping */
|
|
|
|
if (lc->features & cpu_to_le64(IONIC_ETH_HW_TIMESTAMP)) {
|
|
|
|
if (ntxqs_per_lif <= 1 || nrxqs_per_lif <= 1) {
|
|
|
|
lc->features &= cpu_to_le64(~IONIC_ETH_HW_TIMESTAMP);
|
|
|
|
} else {
|
|
|
|
ntxqs_per_lif -= 1;
|
|
|
|
nrxqs_per_lif -= 1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-09-03 22:28:07 +00:00
|
|
|
nxqs = min(ntxqs_per_lif, nrxqs_per_lif);
|
|
|
|
nxqs = min(nxqs, num_online_cpus());
|
|
|
|
neqs = min(neqs_per_lif, num_online_cpus());
|
|
|
|
|
|
|
|
try_again:
|
|
|
|
/* interrupt usage:
|
|
|
|
* 1 for master lif adminq/notifyq
|
|
|
|
* 1 for each CPU for master lif TxRx queue pairs
|
|
|
|
* whatever's left is for RDMA queues
|
|
|
|
*/
|
|
|
|
nintrs = 1 + nxqs + neqs;
|
|
|
|
min_intrs = 2; /* adminq + 1 TxRx queue pair */
|
|
|
|
|
|
|
|
if (nintrs > dev_nintrs)
|
|
|
|
goto try_fewer;
|
|
|
|
|
|
|
|
err = ionic_bus_alloc_irq_vectors(ionic, nintrs);
|
|
|
|
if (err < 0 && err != -ENOSPC) {
|
|
|
|
dev_err(ionic->dev, "Can't get intrs from OS: %d\n", err);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
if (err == -ENOSPC)
|
|
|
|
goto try_fewer;
|
|
|
|
|
|
|
|
if (err != nintrs) {
|
|
|
|
ionic_bus_free_irq_vectors(ionic);
|
|
|
|
goto try_fewer;
|
|
|
|
}
|
|
|
|
|
|
|
|
ionic->nnqs_per_lif = nnqs_per_lif;
|
|
|
|
ionic->neqs_per_lif = neqs;
|
|
|
|
ionic->ntxqs_per_lif = nxqs;
|
|
|
|
ionic->nrxqs_per_lif = nxqs;
|
|
|
|
ionic->nintrs = nintrs;
|
|
|
|
|
|
|
|
ionic_debugfs_add_sizes(ionic);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
try_fewer:
|
|
|
|
if (nnqs_per_lif > 1) {
|
|
|
|
nnqs_per_lif >>= 1;
|
|
|
|
goto try_again;
|
|
|
|
}
|
|
|
|
if (neqs > 1) {
|
|
|
|
neqs >>= 1;
|
|
|
|
goto try_again;
|
|
|
|
}
|
|
|
|
if (nxqs > 1) {
|
|
|
|
nxqs >>= 1;
|
|
|
|
goto try_again;
|
|
|
|
}
|
|
|
|
dev_err(ionic->dev, "Can't get minimum %d intrs from OS\n", min_intrs);
|
|
|
|
return -ENOSPC;
|
|
|
|
}
|