net: mana: Add support for jumbo frame

During probe, get the hardware-allowed max MTU by querying the device
configuration. Users can select MTU up to the device limit.
When XDP is in use, limit MTU settings so the buffer size is within
one page. And, when MTU is set to a too large value, XDP is not allowed
to run.
Also, to prevent changing MTU fails, and leaves the NIC in a bad state,
pre-allocate all buffers before starting the change. So in low memory
condition, it will return error, without affecting the NIC.

Signed-off-by: Haiyang Zhang <haiyangz@microsoft.com>
Reviewed-by: Jesse Brandeburg <jesse.brandeburg@intel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
Haiyang Zhang 2023-04-12 14:16:03 -07:00 committed by David S. Miller
parent 2fbbd712ba
commit 80f6215b45
4 changed files with 233 additions and 24 deletions

View File

@ -133,12 +133,6 @@ out:
return act;
}
static unsigned int mana_xdp_fraglen(unsigned int len)
{
return SKB_DATA_ALIGN(len) +
SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
}
struct bpf_prog *mana_xdp_get(struct mana_port_context *apc)
{
ASSERT_RTNL();
@ -179,17 +173,18 @@ static int mana_xdp_set(struct net_device *ndev, struct bpf_prog *prog,
{
struct mana_port_context *apc = netdev_priv(ndev);
struct bpf_prog *old_prog;
int buf_max;
struct gdma_context *gc;
gc = apc->ac->gdma_dev->gdma_context;
old_prog = mana_xdp_get(apc);
if (!old_prog && !prog)
return 0;
buf_max = XDP_PACKET_HEADROOM + mana_xdp_fraglen(ndev->mtu + ETH_HLEN);
if (prog && buf_max > PAGE_SIZE) {
netdev_err(ndev, "XDP: mtu:%u too large, buf_max:%u\n",
ndev->mtu, buf_max);
if (prog && ndev->mtu > MANA_XDP_MTU_MAX) {
netdev_err(ndev, "XDP: mtu:%u too large, mtu_max:%lu\n",
ndev->mtu, MANA_XDP_MTU_MAX);
NL_SET_ERR_MSG_MOD(extack, "XDP: mtu too large");
return -EOPNOTSUPP;
@ -206,6 +201,11 @@ static int mana_xdp_set(struct net_device *ndev, struct bpf_prog *prog,
if (apc->port_is_up)
mana_chn_setxdp(apc, prog);
if (prog)
ndev->max_mtu = MANA_XDP_MTU_MAX;
else
ndev->max_mtu = gc->adapter_mtu - ETH_HLEN;
return 0;
}

View File

@ -427,6 +427,192 @@ static u16 mana_select_queue(struct net_device *ndev, struct sk_buff *skb,
return txq;
}
/* Release pre-allocated RX buffers */
static void mana_pre_dealloc_rxbufs(struct mana_port_context *mpc)
{
struct device *dev;
int i;
dev = mpc->ac->gdma_dev->gdma_context->dev;
if (!mpc->rxbufs_pre)
goto out1;
if (!mpc->das_pre)
goto out2;
while (mpc->rxbpre_total) {
i = --mpc->rxbpre_total;
dma_unmap_single(dev, mpc->das_pre[i], mpc->rxbpre_datasize,
DMA_FROM_DEVICE);
put_page(virt_to_head_page(mpc->rxbufs_pre[i]));
}
kfree(mpc->das_pre);
mpc->das_pre = NULL;
out2:
kfree(mpc->rxbufs_pre);
mpc->rxbufs_pre = NULL;
out1:
mpc->rxbpre_datasize = 0;
mpc->rxbpre_alloc_size = 0;
mpc->rxbpre_headroom = 0;
}
/* Get a buffer from the pre-allocated RX buffers */
static void *mana_get_rxbuf_pre(struct mana_rxq *rxq, dma_addr_t *da)
{
struct net_device *ndev = rxq->ndev;
struct mana_port_context *mpc;
void *va;
mpc = netdev_priv(ndev);
if (!mpc->rxbufs_pre || !mpc->das_pre || !mpc->rxbpre_total) {
netdev_err(ndev, "No RX pre-allocated bufs\n");
return NULL;
}
/* Check sizes to catch unexpected coding error */
if (mpc->rxbpre_datasize != rxq->datasize) {
netdev_err(ndev, "rxbpre_datasize mismatch: %u: %u\n",
mpc->rxbpre_datasize, rxq->datasize);
return NULL;
}
if (mpc->rxbpre_alloc_size != rxq->alloc_size) {
netdev_err(ndev, "rxbpre_alloc_size mismatch: %u: %u\n",
mpc->rxbpre_alloc_size, rxq->alloc_size);
return NULL;
}
if (mpc->rxbpre_headroom != rxq->headroom) {
netdev_err(ndev, "rxbpre_headroom mismatch: %u: %u\n",
mpc->rxbpre_headroom, rxq->headroom);
return NULL;
}
mpc->rxbpre_total--;
*da = mpc->das_pre[mpc->rxbpre_total];
va = mpc->rxbufs_pre[mpc->rxbpre_total];
mpc->rxbufs_pre[mpc->rxbpre_total] = NULL;
/* Deallocate the array after all buffers are gone */
if (!mpc->rxbpre_total)
mana_pre_dealloc_rxbufs(mpc);
return va;
}
/* Get RX buffer's data size, alloc size, XDP headroom based on MTU */
static void mana_get_rxbuf_cfg(int mtu, u32 *datasize, u32 *alloc_size,
u32 *headroom)
{
if (mtu > MANA_XDP_MTU_MAX)
*headroom = 0; /* no support for XDP */
else
*headroom = XDP_PACKET_HEADROOM;
*alloc_size = mtu + MANA_RXBUF_PAD + *headroom;
*datasize = ALIGN(mtu + ETH_HLEN, MANA_RX_DATA_ALIGN);
}
static int mana_pre_alloc_rxbufs(struct mana_port_context *mpc, int new_mtu)
{
struct device *dev;
struct page *page;
dma_addr_t da;
int num_rxb;
void *va;
int i;
mana_get_rxbuf_cfg(new_mtu, &mpc->rxbpre_datasize,
&mpc->rxbpre_alloc_size, &mpc->rxbpre_headroom);
dev = mpc->ac->gdma_dev->gdma_context->dev;
num_rxb = mpc->num_queues * RX_BUFFERS_PER_QUEUE;
WARN(mpc->rxbufs_pre, "mana rxbufs_pre exists\n");
mpc->rxbufs_pre = kmalloc_array(num_rxb, sizeof(void *), GFP_KERNEL);
if (!mpc->rxbufs_pre)
goto error;
mpc->das_pre = kmalloc_array(num_rxb, sizeof(dma_addr_t), GFP_KERNEL);
if (!mpc->das_pre)
goto error;
mpc->rxbpre_total = 0;
for (i = 0; i < num_rxb; i++) {
if (mpc->rxbpre_alloc_size > PAGE_SIZE) {
va = netdev_alloc_frag(mpc->rxbpre_alloc_size);
if (!va)
goto error;
} else {
page = dev_alloc_page();
if (!page)
goto error;
va = page_to_virt(page);
}
da = dma_map_single(dev, va + mpc->rxbpre_headroom,
mpc->rxbpre_datasize, DMA_FROM_DEVICE);
if (dma_mapping_error(dev, da)) {
put_page(virt_to_head_page(va));
goto error;
}
mpc->rxbufs_pre[i] = va;
mpc->das_pre[i] = da;
mpc->rxbpre_total = i + 1;
}
return 0;
error:
mana_pre_dealloc_rxbufs(mpc);
return -ENOMEM;
}
static int mana_change_mtu(struct net_device *ndev, int new_mtu)
{
struct mana_port_context *mpc = netdev_priv(ndev);
unsigned int old_mtu = ndev->mtu;
int err;
/* Pre-allocate buffers to prevent failure in mana_attach later */
err = mana_pre_alloc_rxbufs(mpc, new_mtu);
if (err) {
netdev_err(ndev, "Insufficient memory for new MTU\n");
return err;
}
err = mana_detach(ndev, false);
if (err) {
netdev_err(ndev, "mana_detach failed: %d\n", err);
goto out;
}
ndev->mtu = new_mtu;
err = mana_attach(ndev);
if (err) {
netdev_err(ndev, "mana_attach failed: %d\n", err);
ndev->mtu = old_mtu;
}
out:
mana_pre_dealloc_rxbufs(mpc);
return err;
}
static const struct net_device_ops mana_devops = {
.ndo_open = mana_open,
.ndo_stop = mana_close,
@ -436,6 +622,7 @@ static const struct net_device_ops mana_devops = {
.ndo_get_stats64 = mana_get_stats64,
.ndo_bpf = mana_bpf,
.ndo_xdp_xmit = mana_xdp_xmit,
.ndo_change_mtu = mana_change_mtu,
};
static void mana_cleanup_port_context(struct mana_port_context *apc)
@ -625,6 +812,9 @@ static int mana_query_device_cfg(struct mana_context *ac, u32 proto_major_ver,
mana_gd_init_req_hdr(&req.hdr, MANA_QUERY_DEV_CONFIG,
sizeof(req), sizeof(resp));
req.hdr.resp.msg_version = GDMA_MESSAGE_V2;
req.proto_major_ver = proto_major_ver;
req.proto_minor_ver = proto_minor_ver;
req.proto_micro_ver = proto_micro_ver;
@ -647,6 +837,11 @@ static int mana_query_device_cfg(struct mana_context *ac, u32 proto_major_ver,
*max_num_vports = resp.max_num_vports;
if (resp.hdr.response.msg_version == GDMA_MESSAGE_V2)
gc->adapter_mtu = resp.adapter_mtu;
else
gc->adapter_mtu = ETH_FRAME_LEN;
return 0;
}
@ -1712,10 +1907,14 @@ static void mana_destroy_rxq(struct mana_port_context *apc,
static int mana_fill_rx_oob(struct mana_recv_buf_oob *rx_oob, u32 mem_key,
struct mana_rxq *rxq, struct device *dev)
{
struct mana_port_context *mpc = netdev_priv(rxq->ndev);
dma_addr_t da;
void *va;
va = mana_get_rxfrag(rxq, dev, &da, false);
if (mpc->rxbufs_pre)
va = mana_get_rxbuf_pre(rxq, &da);
else
va = mana_get_rxfrag(rxq, dev, &da, false);
if (!va)
return -ENOMEM;
@ -1797,7 +1996,6 @@ static struct mana_rxq *mana_create_rxq(struct mana_port_context *apc,
struct gdma_dev *gd = apc->ac->gdma_dev;
struct mana_obj_spec wq_spec;
struct mana_obj_spec cq_spec;
unsigned int mtu = ndev->mtu;
struct gdma_queue_spec spec;
struct mana_cq *cq = NULL;
struct gdma_context *gc;
@ -1817,15 +2015,8 @@ static struct mana_rxq *mana_create_rxq(struct mana_port_context *apc,
rxq->rxq_idx = rxq_idx;
rxq->rxobj = INVALID_MANA_HANDLE;
rxq->datasize = ALIGN(mtu + ETH_HLEN, 64);
if (mtu > MANA_XDP_MTU_MAX) {
rxq->alloc_size = mtu + MANA_RXBUF_PAD;
rxq->headroom = 0;
} else {
rxq->alloc_size = mtu + MANA_RXBUF_PAD + XDP_PACKET_HEADROOM;
rxq->headroom = XDP_PACKET_HEADROOM;
}
mana_get_rxbuf_cfg(ndev->mtu, &rxq->datasize, &rxq->alloc_size,
&rxq->headroom);
err = mana_alloc_rx_wqe(apc, rxq, &rq_size, &cq_size);
if (err)
@ -2238,8 +2429,8 @@ static int mana_probe_port(struct mana_context *ac, int port_idx,
ndev->netdev_ops = &mana_devops;
ndev->ethtool_ops = &mana_ethtool_ops;
ndev->mtu = ETH_DATA_LEN;
ndev->max_mtu = ndev->mtu;
ndev->min_mtu = ndev->mtu;
ndev->max_mtu = gc->adapter_mtu - ETH_HLEN;
ndev->min_mtu = ETH_MIN_MTU;
ndev->needed_headroom = MANA_HEADROOM;
ndev->dev_port = port_idx;
SET_NETDEV_DEV(ndev, gc->dev);

View File

@ -145,6 +145,7 @@ struct gdma_general_req {
}; /* HW DATA */
#define GDMA_MESSAGE_V1 1
#define GDMA_MESSAGE_V2 2
struct gdma_general_resp {
struct gdma_resp_hdr hdr;
@ -354,6 +355,9 @@ struct gdma_context {
struct gdma_resource msix_resource;
struct gdma_irq_context *irq_contexts;
/* L2 MTU */
u16 adapter_mtu;
/* This maps a CQ index to the queue structure. */
unsigned int max_num_cqs;
struct gdma_queue **cq_table;

View File

@ -37,6 +37,7 @@ enum TRI_STATE {
#define COMP_ENTRY_SIZE 64
#define RX_BUFFERS_PER_QUEUE 512
#define MANA_RX_DATA_ALIGN 64
#define MAX_SEND_BUFFERS_PER_QUEUE 256
@ -390,6 +391,14 @@ struct mana_port_context {
/* This points to an array of num_queues of RQ pointers. */
struct mana_rxq **rxqs;
/* pre-allocated rx buffer array */
void **rxbufs_pre;
dma_addr_t *das_pre;
int rxbpre_total;
u32 rxbpre_datasize;
u32 rxbpre_alloc_size;
u32 rxbpre_headroom;
struct bpf_prog *bpf_prog;
/* Create num_queues EQs, SQs, SQ-CQs, RQs and RQ-CQs, respectively. */
@ -489,6 +498,11 @@ struct mana_query_device_cfg_resp {
u16 max_num_vports;
u16 reserved;
u32 max_num_eqs;
/* response v2: */
u16 adapter_mtu;
u16 reserved2;
u32 reserved3;
}; /* HW DATA */
/* Query vPort Configuration */