2019-05-29 07:18:05 -07:00
|
|
|
/* SPDX-License-Identifier: GPL-2.0-only */
|
2009-10-14 15:13:45 -07:00
|
|
|
/*******************************************************************************
|
|
|
|
|
Copyright (C) 2007-2009 STMicroelectronics Ltd
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
|
|
|
|
|
*******************************************************************************/
|
|
|
|
|
|
2012-08-22 21:28:18 +00:00
|
|
|
#ifndef __STMMAC_H__
|
|
|
|
|
#define __STMMAC_H__
|
|
|
|
|
|
2011-12-21 03:58:19 +00:00
|
|
|
#define STMMAC_RESOURCE_NAME "stmmaceth"
|
2012-04-04 04:33:25 +00:00
|
|
|
|
|
|
|
|
#include <linux/clk.h>
|
2020-11-20 16:02:08 +01:00
|
|
|
#include <linux/hrtimer.h>
|
2019-08-07 10:03:14 +02:00
|
|
|
#include <linux/if_vlan.h>
|
2010-01-06 23:07:14 +00:00
|
|
|
#include <linux/stmmac.h>
|
2019-06-11 17:18:46 +02:00
|
|
|
#include <linux/phylink.h>
|
2012-06-07 19:25:07 +00:00
|
|
|
#include <linux/pci.h>
|
2009-10-14 15:13:45 -07:00
|
|
|
#include "common.h"
|
2013-03-26 04:43:11 +00:00
|
|
|
#include <linux/ptp_clock_kernel.h>
|
2019-01-20 19:05:15 +03:00
|
|
|
#include <linux/net_tstamp.h>
|
2014-01-17 21:24:41 +08:00
|
|
|
#include <linux/reset.h>
|
2019-07-09 10:03:00 +02:00
|
|
|
#include <net/page_pool.h>
|
2021-12-29 17:27:41 -08:00
|
|
|
#include <uapi/linux/bpf.h>
|
2009-10-14 15:13:45 -07:00
|
|
|
|
2015-05-20 20:03:07 +02:00
|
|
|
struct stmmac_resources {
|
|
|
|
|
void __iomem *addr;
|
of: net: pass the dst buffer to of_get_mac_address()
of_get_mac_address() returns a "const void*" pointer to a MAC address.
Lately, support to fetch the MAC address by an NVMEM provider was added.
But this will only work with platform devices. It will not work with
PCI devices (e.g. of an integrated root complex) and esp. not with DSA
ports.
There is an of_* variant of the nvmem binding which works without
devices. The returned data of a nvmem_cell_read() has to be freed after
use. On the other hand the return of_get_mac_address() points to some
static data without a lifetime. The trick for now, was to allocate a
device resource managed buffer which is then returned. This will only
work if we have an actual device.
Change it, so that the caller of of_get_mac_address() has to supply a
buffer where the MAC address is written to. Unfortunately, this will
touch all drivers which use the of_get_mac_address().
Usually the code looks like:
const char *addr;
addr = of_get_mac_address(np);
if (!IS_ERR(addr))
ether_addr_copy(ndev->dev_addr, addr);
This can then be simply rewritten as:
of_get_mac_address(np, ndev->dev_addr);
Sometimes is_valid_ether_addr() is used to test the MAC address.
of_get_mac_address() already makes sure, it just returns a valid MAC
address. Thus we can just test its return code. But we have to be
careful if there are still other sources for the MAC address before the
of_get_mac_address(). In this case we have to keep the
is_valid_ether_addr() call.
The following coccinelle patch was used to convert common cases to the
new style. Afterwards, I've manually gone over the drivers and fixed the
return code variable: either used a new one or if one was already
available use that. Mansour Moufid, thanks for that coccinelle patch!
<spml>
@a@
identifier x;
expression y, z;
@@
- x = of_get_mac_address(y);
+ x = of_get_mac_address(y, z);
<...
- ether_addr_copy(z, x);
...>
@@
identifier a.x;
@@
- if (<+... x ...+>) {}
@@
identifier a.x;
@@
if (<+... x ...+>) {
...
}
- else {}
@@
identifier a.x;
expression e;
@@
- if (<+... x ...+>@e)
- {}
- else
+ if (!(e))
{...}
@@
expression x, y, z;
@@
- x = of_get_mac_address(y, z);
+ of_get_mac_address(y, z);
... when != x
</spml>
All drivers, except drivers/net/ethernet/aeroflex/greth.c, were
compile-time tested.
Suggested-by: Andrew Lunn <andrew@lunn.ch>
Signed-off-by: Michael Walle <michael@walle.cc>
Reviewed-by: Andrew Lunn <andrew@lunn.ch>
Signed-off-by: David S. Miller <davem@davemloft.net>
2021-04-12 19:47:17 +02:00
|
|
|
u8 mac[ETH_ALEN];
|
2015-05-20 20:03:07 +02:00
|
|
|
int wol_irq;
|
|
|
|
|
int lpi_irq;
|
|
|
|
|
int irq;
|
2021-03-26 01:39:14 +08:00
|
|
|
int sfty_ce_irq;
|
|
|
|
|
int sfty_ue_irq;
|
|
|
|
|
int rx_irq[MTL_MAX_RX_QUEUES];
|
|
|
|
|
int tx_irq[MTL_MAX_TX_QUEUES];
|
2015-05-20 20:03:07 +02:00
|
|
|
};
|
|
|
|
|
|
2021-04-01 10:11:16 +08:00
|
|
|
enum stmmac_txbuf_type {
|
|
|
|
|
STMMAC_TXBUF_T_SKB,
|
|
|
|
|
STMMAC_TXBUF_T_XDP_TX,
|
2021-04-01 10:11:17 +08:00
|
|
|
STMMAC_TXBUF_T_XDP_NDO,
|
net: stmmac: Add TX via XDP zero-copy socket
We add the support of XDP ZC TX submission and cleaning into
stmmac_tx_clean(). The function is made to clean as many TX complete
frames as possible, i.e. limit by priv->dma_tx_size instead of NAPI
budget. For TX ring that is associated with XSK pool, the function
stmmac_xdp_xmit_zc() is introduced to TX frame buffers from XSK pool by
using xsk_tx_peek_desc(). To make stmmac_tx_clean() support the cleaning
of XSK TX frames, STMMAC_TXBUF_T_XSK_TX TX buffer type is introduced.
As stmmac_tx_clean() uses the return value to cue whether NAPI function
should continue to poll, we augment the caller of stmmac_tx_clean() to
pass NAPI budget instead of priv->dma_tx_size through 'budget' input and
made stmmac_tx_clean() to always clean up-to the TX ring size instead.
This allows us to use the return boolean status of stmmac_xdp_xmit_zc()
to decide if XSK TX work is done or not: If true, set 'xmits' to return
'budget - 1' so that NAPI poll may exit. Else, set 'xmits' to return
'budget' to make NAPI poll continue to poll since XSK TX work is not
done. Finally, at the end of stmmac_tx_clean(), the function now take
a maximum value between 'count' and 'xmits' so that status from both
TX cleaning and XSK TX (only for XDP ZC) is considered.
This patch adds a new NAPI poll called stmmac_napi_poll_rxtx() that is
meant to be enabled/disabled for RX and TX ring that are bound to XSK
pool. This NAPI poll function starts with cleaning TX ring, then submits
XSK TX frames to TX ring before proceed to perform RX operations, i.e.
, receiving RX frames and replenishing RX ring with RX free buffers
obtained from XSK pool. Therefore, during XSK RX and TX setup, the driver
enables stmmac_napi_poll_rxtx() for RX and TX operations, then during
XSK RX and TX pool tear-down, the driver reenables the exisiting
independent NAPI poll functions accordingly: stmmac_napi_poll_rx() and
stmmac_napi_poll_tx().
Signed-off-by: Ong Boon Leong <boon.leong.ong@intel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2021-04-13 17:36:26 +08:00
|
|
|
STMMAC_TXBUF_T_XSK_TX,
|
2021-04-01 10:11:16 +08:00
|
|
|
};
|
|
|
|
|
|
2014-08-27 11:27:00 +02:00
|
|
|
struct stmmac_tx_info {
|
|
|
|
|
dma_addr_t buf;
|
|
|
|
|
bool map_as_page;
|
2016-02-29 14:27:31 +01:00
|
|
|
unsigned len;
|
2016-02-29 14:27:32 +01:00
|
|
|
bool last_segment;
|
2016-02-29 14:27:33 +01:00
|
|
|
bool is_jumbo;
|
2021-04-01 10:11:16 +08:00
|
|
|
enum stmmac_txbuf_type buf_type;
|
2014-08-27 11:27:00 +02:00
|
|
|
};
|
|
|
|
|
|
2020-01-13 17:24:09 +01:00
|
|
|
#define STMMAC_TBS_AVAIL BIT(0)
|
|
|
|
|
#define STMMAC_TBS_EN BIT(1)
|
|
|
|
|
|
2017-04-06 09:49:10 +01:00
|
|
|
/* Frequently used values are kept adjacent for cache effect */
|
|
|
|
|
struct stmmac_tx_queue {
|
2018-09-17 09:22:56 +01:00
|
|
|
u32 tx_count_frames;
|
2020-01-13 17:24:09 +01:00
|
|
|
int tbs;
|
2020-11-20 16:02:08 +01:00
|
|
|
struct hrtimer txtimer;
|
2017-04-06 09:49:10 +01:00
|
|
|
u32 queue_index;
|
|
|
|
|
struct stmmac_priv *priv_data;
|
|
|
|
|
struct dma_extended_desc *dma_etx ____cacheline_aligned_in_smp;
|
2020-01-13 17:24:09 +01:00
|
|
|
struct dma_edesc *dma_entx;
|
2017-04-06 09:49:10 +01:00
|
|
|
struct dma_desc *dma_tx;
|
2021-04-01 10:11:16 +08:00
|
|
|
union {
|
|
|
|
|
struct sk_buff **tx_skbuff;
|
|
|
|
|
struct xdp_frame **xdpf;
|
|
|
|
|
};
|
2017-04-06 09:49:10 +01:00
|
|
|
struct stmmac_tx_info *tx_skbuff_dma;
|
net: stmmac: Add TX via XDP zero-copy socket
We add the support of XDP ZC TX submission and cleaning into
stmmac_tx_clean(). The function is made to clean as many TX complete
frames as possible, i.e. limit by priv->dma_tx_size instead of NAPI
budget. For TX ring that is associated with XSK pool, the function
stmmac_xdp_xmit_zc() is introduced to TX frame buffers from XSK pool by
using xsk_tx_peek_desc(). To make stmmac_tx_clean() support the cleaning
of XSK TX frames, STMMAC_TXBUF_T_XSK_TX TX buffer type is introduced.
As stmmac_tx_clean() uses the return value to cue whether NAPI function
should continue to poll, we augment the caller of stmmac_tx_clean() to
pass NAPI budget instead of priv->dma_tx_size through 'budget' input and
made stmmac_tx_clean() to always clean up-to the TX ring size instead.
This allows us to use the return boolean status of stmmac_xdp_xmit_zc()
to decide if XSK TX work is done or not: If true, set 'xmits' to return
'budget - 1' so that NAPI poll may exit. Else, set 'xmits' to return
'budget' to make NAPI poll continue to poll since XSK TX work is not
done. Finally, at the end of stmmac_tx_clean(), the function now take
a maximum value between 'count' and 'xmits' so that status from both
TX cleaning and XSK TX (only for XDP ZC) is considered.
This patch adds a new NAPI poll called stmmac_napi_poll_rxtx() that is
meant to be enabled/disabled for RX and TX ring that are bound to XSK
pool. This NAPI poll function starts with cleaning TX ring, then submits
XSK TX frames to TX ring before proceed to perform RX operations, i.e.
, receiving RX frames and replenishing RX ring with RX free buffers
obtained from XSK pool. Therefore, during XSK RX and TX setup, the driver
enables stmmac_napi_poll_rxtx() for RX and TX operations, then during
XSK RX and TX pool tear-down, the driver reenables the exisiting
independent NAPI poll functions accordingly: stmmac_napi_poll_rx() and
stmmac_napi_poll_tx().
Signed-off-by: Ong Boon Leong <boon.leong.ong@intel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2021-04-13 17:36:26 +08:00
|
|
|
struct xsk_buff_pool *xsk_pool;
|
|
|
|
|
u32 xsk_frames_done;
|
2017-04-06 09:49:10 +01:00
|
|
|
unsigned int cur_tx;
|
|
|
|
|
unsigned int dirty_tx;
|
|
|
|
|
dma_addr_t dma_tx_phy;
|
2021-06-11 17:02:38 +08:00
|
|
|
dma_addr_t tx_tail_addr;
|
2018-02-19 18:11:09 +01:00
|
|
|
u32 mss;
|
2017-04-06 09:49:10 +01:00
|
|
|
};
|
|
|
|
|
|
2019-07-09 10:03:00 +02:00
|
|
|
struct stmmac_rx_buffer {
|
net: stmmac: Enable RX via AF_XDP zero-copy
This patch adds the support for receiving packet via AF_XDP zero-copy
mechanism.
XDP ZC uses 1:1 mapping of XDP buffer to receive packet, therefore the
use of split header is not used currently. The 'xdp_buff' is declared as
union together with a struct that contains 'page', 'addr' and
'page_offset' that are associated with primary buffer.
RX buffers are now allocated either via page_pool or xsk pool. For RX
buffers from xsk_pool they are allocated and deallocated using below
functions:
* stmmac_alloc_rx_buffers_zc(struct stmmac_priv *priv, u32 queue)
* dma_free_rx_xskbufs(struct stmmac_priv *priv, u32 queue)
With above functions now available, we then extend the following driver
functions to support XDP ZC:
* stmmac_reinit_rx_buffers()
* __init_dma_rx_desc_rings()
* init_dma_rx_desc_rings()
* __free_dma_rx_desc_resources()
Note: stmmac_alloc_rx_buffers_zc() may return -ENOMEM due to RX XDP
buffer pool is not allocated (e.g. samples/bpf/xdpsock TX-only). But,
it is still ok to let TX XDP ZC to continue, therefore, the -ENOMEM
is silently ignored to let the driver succcessfully transition to XDP
ZC mode for the said RX and TX queue.
As XDP ZC buffer size is different, the DMA buffer size is required
to be reprogrammed accordingly for RX DMA/Queue that is populated with
XDP buffer from XSK pool.
Next, to add or remove per-queue XSK pool, stmmac_xdp_setup_pool()
will call stmmac_xdp_enable_pool() or stmmac_xdp_disable_pool()
that in-turn coordinates the tearing down and setting up RX ring via
RX buffers and descriptors removal and reallocation through
stmmac_disable_rx_queue() and stmmac_enable_rx_queue(). In addition,
stmmac_xsk_wakeup() is added to initiate XDP RX buffer replenishing
by signalling user application to add available XDP frames back to
FILL queue.
For RX processing using XDP zero-copy buffer, stmmac_rx_zc() is
introduced which is implemented with the assumption that RX split
header is disabled. For XDP verdict is XDP_PASS, the XDP buffer is
copied into a sk_buff allocated through stmmac_construct_skb_zc()
and sent to Linux network GRO inside stmmac_dispatch_skb_zc(). Free RX
buffers are then replenished using stmmac_rx_refill_zc()
v2: introduce __stmmac_disable_all_queues() to contain the original code
that does napi_disable() and then make stmmac_setup_tc_block_cb()
to use it. Move synchronize_rcu() into stmmac_disable_all_queues()
that eventually calls __stmmac_disable_all_queues(). Then,
make both stmmac_release() and stmmac_suspend() to use
stmmac_disable_all_queues(). Thanks David Miller for spotting the
synchronize_rcu() issue in v1 patch.
Signed-off-by: Ong Boon Leong <boon.leong.ong@intel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2021-04-13 17:36:25 +08:00
|
|
|
union {
|
|
|
|
|
struct {
|
|
|
|
|
struct page *page;
|
|
|
|
|
dma_addr_t addr;
|
|
|
|
|
__u32 page_offset;
|
|
|
|
|
};
|
|
|
|
|
struct xdp_buff *xdp;
|
|
|
|
|
};
|
2021-04-01 10:11:15 +08:00
|
|
|
struct page *sec_page;
|
2019-08-17 20:54:43 +02:00
|
|
|
dma_addr_t sec_addr;
|
2019-07-09 10:03:00 +02:00
|
|
|
};
|
|
|
|
|
|
2017-04-06 09:49:09 +01:00
|
|
|
struct stmmac_rx_queue {
|
2019-07-09 10:02:58 +02:00
|
|
|
u32 rx_count_frames;
|
2017-04-06 09:49:09 +01:00
|
|
|
u32 queue_index;
|
2021-04-01 10:11:16 +08:00
|
|
|
struct xdp_rxq_info xdp_rxq;
|
net: stmmac: Enable RX via AF_XDP zero-copy
This patch adds the support for receiving packet via AF_XDP zero-copy
mechanism.
XDP ZC uses 1:1 mapping of XDP buffer to receive packet, therefore the
use of split header is not used currently. The 'xdp_buff' is declared as
union together with a struct that contains 'page', 'addr' and
'page_offset' that are associated with primary buffer.
RX buffers are now allocated either via page_pool or xsk pool. For RX
buffers from xsk_pool they are allocated and deallocated using below
functions:
* stmmac_alloc_rx_buffers_zc(struct stmmac_priv *priv, u32 queue)
* dma_free_rx_xskbufs(struct stmmac_priv *priv, u32 queue)
With above functions now available, we then extend the following driver
functions to support XDP ZC:
* stmmac_reinit_rx_buffers()
* __init_dma_rx_desc_rings()
* init_dma_rx_desc_rings()
* __free_dma_rx_desc_resources()
Note: stmmac_alloc_rx_buffers_zc() may return -ENOMEM due to RX XDP
buffer pool is not allocated (e.g. samples/bpf/xdpsock TX-only). But,
it is still ok to let TX XDP ZC to continue, therefore, the -ENOMEM
is silently ignored to let the driver succcessfully transition to XDP
ZC mode for the said RX and TX queue.
As XDP ZC buffer size is different, the DMA buffer size is required
to be reprogrammed accordingly for RX DMA/Queue that is populated with
XDP buffer from XSK pool.
Next, to add or remove per-queue XSK pool, stmmac_xdp_setup_pool()
will call stmmac_xdp_enable_pool() or stmmac_xdp_disable_pool()
that in-turn coordinates the tearing down and setting up RX ring via
RX buffers and descriptors removal and reallocation through
stmmac_disable_rx_queue() and stmmac_enable_rx_queue(). In addition,
stmmac_xsk_wakeup() is added to initiate XDP RX buffer replenishing
by signalling user application to add available XDP frames back to
FILL queue.
For RX processing using XDP zero-copy buffer, stmmac_rx_zc() is
introduced which is implemented with the assumption that RX split
header is disabled. For XDP verdict is XDP_PASS, the XDP buffer is
copied into a sk_buff allocated through stmmac_construct_skb_zc()
and sent to Linux network GRO inside stmmac_dispatch_skb_zc(). Free RX
buffers are then replenished using stmmac_rx_refill_zc()
v2: introduce __stmmac_disable_all_queues() to contain the original code
that does napi_disable() and then make stmmac_setup_tc_block_cb()
to use it. Move synchronize_rcu() into stmmac_disable_all_queues()
that eventually calls __stmmac_disable_all_queues(). Then,
make both stmmac_release() and stmmac_suspend() to use
stmmac_disable_all_queues(). Thanks David Miller for spotting the
synchronize_rcu() issue in v1 patch.
Signed-off-by: Ong Boon Leong <boon.leong.ong@intel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2021-04-13 17:36:25 +08:00
|
|
|
struct xsk_buff_pool *xsk_pool;
|
2019-07-09 10:03:00 +02:00
|
|
|
struct page_pool *page_pool;
|
|
|
|
|
struct stmmac_rx_buffer *buf_pool;
|
2017-04-06 09:49:09 +01:00
|
|
|
struct stmmac_priv *priv_data;
|
|
|
|
|
struct dma_extended_desc *dma_erx;
|
|
|
|
|
struct dma_desc *dma_rx ____cacheline_aligned_in_smp;
|
|
|
|
|
unsigned int cur_rx;
|
|
|
|
|
unsigned int dirty_rx;
|
net: stmmac: Enable RX via AF_XDP zero-copy
This patch adds the support for receiving packet via AF_XDP zero-copy
mechanism.
XDP ZC uses 1:1 mapping of XDP buffer to receive packet, therefore the
use of split header is not used currently. The 'xdp_buff' is declared as
union together with a struct that contains 'page', 'addr' and
'page_offset' that are associated with primary buffer.
RX buffers are now allocated either via page_pool or xsk pool. For RX
buffers from xsk_pool they are allocated and deallocated using below
functions:
* stmmac_alloc_rx_buffers_zc(struct stmmac_priv *priv, u32 queue)
* dma_free_rx_xskbufs(struct stmmac_priv *priv, u32 queue)
With above functions now available, we then extend the following driver
functions to support XDP ZC:
* stmmac_reinit_rx_buffers()
* __init_dma_rx_desc_rings()
* init_dma_rx_desc_rings()
* __free_dma_rx_desc_resources()
Note: stmmac_alloc_rx_buffers_zc() may return -ENOMEM due to RX XDP
buffer pool is not allocated (e.g. samples/bpf/xdpsock TX-only). But,
it is still ok to let TX XDP ZC to continue, therefore, the -ENOMEM
is silently ignored to let the driver succcessfully transition to XDP
ZC mode for the said RX and TX queue.
As XDP ZC buffer size is different, the DMA buffer size is required
to be reprogrammed accordingly for RX DMA/Queue that is populated with
XDP buffer from XSK pool.
Next, to add or remove per-queue XSK pool, stmmac_xdp_setup_pool()
will call stmmac_xdp_enable_pool() or stmmac_xdp_disable_pool()
that in-turn coordinates the tearing down and setting up RX ring via
RX buffers and descriptors removal and reallocation through
stmmac_disable_rx_queue() and stmmac_enable_rx_queue(). In addition,
stmmac_xsk_wakeup() is added to initiate XDP RX buffer replenishing
by signalling user application to add available XDP frames back to
FILL queue.
For RX processing using XDP zero-copy buffer, stmmac_rx_zc() is
introduced which is implemented with the assumption that RX split
header is disabled. For XDP verdict is XDP_PASS, the XDP buffer is
copied into a sk_buff allocated through stmmac_construct_skb_zc()
and sent to Linux network GRO inside stmmac_dispatch_skb_zc(). Free RX
buffers are then replenished using stmmac_rx_refill_zc()
v2: introduce __stmmac_disable_all_queues() to contain the original code
that does napi_disable() and then make stmmac_setup_tc_block_cb()
to use it. Move synchronize_rcu() into stmmac_disable_all_queues()
that eventually calls __stmmac_disable_all_queues(). Then,
make both stmmac_release() and stmmac_suspend() to use
stmmac_disable_all_queues(). Thanks David Miller for spotting the
synchronize_rcu() issue in v1 patch.
Signed-off-by: Ong Boon Leong <boon.leong.ong@intel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2021-04-13 17:36:25 +08:00
|
|
|
unsigned int buf_alloc_num;
|
2017-04-06 09:49:09 +01:00
|
|
|
u32 rx_zeroc_thresh;
|
|
|
|
|
dma_addr_t dma_rx_phy;
|
|
|
|
|
u32 rx_tail_addr;
|
2019-08-17 20:54:41 +02:00
|
|
|
unsigned int state_saved;
|
|
|
|
|
struct {
|
|
|
|
|
struct sk_buff *skb;
|
|
|
|
|
unsigned int len;
|
|
|
|
|
unsigned int error;
|
|
|
|
|
} state;
|
2018-09-17 09:22:56 +01:00
|
|
|
};
|
|
|
|
|
|
|
|
|
|
struct stmmac_channel {
|
2019-02-19 10:38:47 +01:00
|
|
|
struct napi_struct rx_napi ____cacheline_aligned_in_smp;
|
|
|
|
|
struct napi_struct tx_napi ____cacheline_aligned_in_smp;
|
net: stmmac: Add TX via XDP zero-copy socket
We add the support of XDP ZC TX submission and cleaning into
stmmac_tx_clean(). The function is made to clean as many TX complete
frames as possible, i.e. limit by priv->dma_tx_size instead of NAPI
budget. For TX ring that is associated with XSK pool, the function
stmmac_xdp_xmit_zc() is introduced to TX frame buffers from XSK pool by
using xsk_tx_peek_desc(). To make stmmac_tx_clean() support the cleaning
of XSK TX frames, STMMAC_TXBUF_T_XSK_TX TX buffer type is introduced.
As stmmac_tx_clean() uses the return value to cue whether NAPI function
should continue to poll, we augment the caller of stmmac_tx_clean() to
pass NAPI budget instead of priv->dma_tx_size through 'budget' input and
made stmmac_tx_clean() to always clean up-to the TX ring size instead.
This allows us to use the return boolean status of stmmac_xdp_xmit_zc()
to decide if XSK TX work is done or not: If true, set 'xmits' to return
'budget - 1' so that NAPI poll may exit. Else, set 'xmits' to return
'budget' to make NAPI poll continue to poll since XSK TX work is not
done. Finally, at the end of stmmac_tx_clean(), the function now take
a maximum value between 'count' and 'xmits' so that status from both
TX cleaning and XSK TX (only for XDP ZC) is considered.
This patch adds a new NAPI poll called stmmac_napi_poll_rxtx() that is
meant to be enabled/disabled for RX and TX ring that are bound to XSK
pool. This NAPI poll function starts with cleaning TX ring, then submits
XSK TX frames to TX ring before proceed to perform RX operations, i.e.
, receiving RX frames and replenishing RX ring with RX free buffers
obtained from XSK pool. Therefore, during XSK RX and TX setup, the driver
enables stmmac_napi_poll_rxtx() for RX and TX operations, then during
XSK RX and TX pool tear-down, the driver reenables the exisiting
independent NAPI poll functions accordingly: stmmac_napi_poll_rx() and
stmmac_napi_poll_tx().
Signed-off-by: Ong Boon Leong <boon.leong.ong@intel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2021-04-13 17:36:26 +08:00
|
|
|
struct napi_struct rxtx_napi ____cacheline_aligned_in_smp;
|
2018-09-17 09:22:56 +01:00
|
|
|
struct stmmac_priv *priv_data;
|
2019-12-18 11:24:44 +01:00
|
|
|
spinlock_t lock;
|
2018-09-17 09:22:56 +01:00
|
|
|
u32 index;
|
2017-04-06 09:49:09 +01:00
|
|
|
};
|
|
|
|
|
|
2018-05-04 10:01:38 +01:00
|
|
|
struct stmmac_tc_entry {
|
|
|
|
|
bool in_use;
|
|
|
|
|
bool in_hw;
|
|
|
|
|
bool is_last;
|
|
|
|
|
bool is_frag;
|
|
|
|
|
void *frag_ptr;
|
|
|
|
|
unsigned int table_pos;
|
|
|
|
|
u32 handle;
|
|
|
|
|
u32 prio;
|
|
|
|
|
struct {
|
|
|
|
|
u32 match_data;
|
|
|
|
|
u32 match_en;
|
|
|
|
|
u8 af:1;
|
|
|
|
|
u8 rf:1;
|
|
|
|
|
u8 im:1;
|
|
|
|
|
u8 nc:1;
|
|
|
|
|
u8 res1:4;
|
|
|
|
|
u8 frame_offset;
|
|
|
|
|
u8 ok_index;
|
|
|
|
|
u8 dma_ch_no;
|
|
|
|
|
u32 res2;
|
|
|
|
|
} __packed val;
|
|
|
|
|
};
|
|
|
|
|
|
2018-05-31 18:01:27 +01:00
|
|
|
#define STMMAC_PPS_MAX 4
|
|
|
|
|
struct stmmac_pps_cfg {
|
|
|
|
|
bool available;
|
|
|
|
|
struct timespec64 start;
|
|
|
|
|
struct timespec64 period;
|
|
|
|
|
};
|
|
|
|
|
|
2019-08-07 10:03:12 +02:00
|
|
|
struct stmmac_rss {
|
|
|
|
|
int enable;
|
|
|
|
|
u8 key[STMMAC_RSS_HASH_KEY_SIZE];
|
|
|
|
|
u32 table[STMMAC_RSS_MAX_TABLE_SIZE];
|
|
|
|
|
};
|
|
|
|
|
|
2019-09-04 15:16:56 +02:00
|
|
|
#define STMMAC_FLOW_ACTION_DROP BIT(0)
|
|
|
|
|
struct stmmac_flow_entry {
|
|
|
|
|
unsigned long cookie;
|
|
|
|
|
unsigned long action;
|
|
|
|
|
u8 ip_proto;
|
|
|
|
|
int in_use;
|
|
|
|
|
int idx;
|
|
|
|
|
int is_l4;
|
|
|
|
|
};
|
|
|
|
|
|
2021-12-11 22:51:34 +08:00
|
|
|
/* Rx Frame Steering */
|
|
|
|
|
enum stmmac_rfs_type {
|
|
|
|
|
STMMAC_RFS_T_VLAN,
|
2021-12-22 22:43:10 +08:00
|
|
|
STMMAC_RFS_T_LLDP,
|
|
|
|
|
STMMAC_RFS_T_1588,
|
2021-12-11 22:51:34 +08:00
|
|
|
STMMAC_RFS_T_MAX,
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
struct stmmac_rfs_entry {
|
|
|
|
|
unsigned long cookie;
|
2021-12-22 22:43:10 +08:00
|
|
|
u16 etype;
|
2021-12-11 22:51:34 +08:00
|
|
|
int in_use;
|
|
|
|
|
int type;
|
|
|
|
|
int tc;
|
|
|
|
|
};
|
|
|
|
|
|
2009-10-14 15:13:45 -07:00
|
|
|
struct stmmac_priv {
|
|
|
|
|
/* Frequently used values are kept adjacent for cache effect */
|
2021-03-17 09:01:23 +08:00
|
|
|
u32 tx_coal_frames[MTL_MAX_TX_QUEUES];
|
|
|
|
|
u32 tx_coal_timer[MTL_MAX_TX_QUEUES];
|
|
|
|
|
u32 rx_coal_frames[MTL_MAX_TX_QUEUES];
|
2017-04-06 09:49:10 +01:00
|
|
|
|
2013-04-08 02:10:02 +00:00
|
|
|
int hwts_tx_en;
|
|
|
|
|
bool tx_path_in_lpi_mode;
|
2016-04-01 11:37:34 +02:00
|
|
|
bool tso;
|
2019-08-17 20:54:43 +02:00
|
|
|
int sph;
|
2021-04-01 10:11:13 +08:00
|
|
|
int sph_cap;
|
2019-08-17 20:54:47 +02:00
|
|
|
u32 sarc_type;
|
2009-10-14 15:13:45 -07:00
|
|
|
|
2013-04-08 02:10:02 +00:00
|
|
|
unsigned int dma_buf_sz;
|
2016-02-29 14:27:41 +01:00
|
|
|
unsigned int rx_copybreak;
|
2021-03-17 09:01:23 +08:00
|
|
|
u32 rx_riwt[MTL_MAX_TX_QUEUES];
|
2013-04-08 02:10:02 +00:00
|
|
|
int hwts_rx_en;
|
2017-03-29 07:05:40 +02:00
|
|
|
|
2013-04-08 02:10:02 +00:00
|
|
|
void __iomem *ioaddr;
|
2009-10-14 15:13:45 -07:00
|
|
|
struct net_device *dev;
|
|
|
|
|
struct device *device;
|
2010-01-06 23:07:17 +00:00
|
|
|
struct mac_device_info *hw;
|
2018-06-15 16:17:27 +01:00
|
|
|
int (*hwif_quirks)(struct stmmac_priv *priv);
|
2018-05-24 16:09:07 +02:00
|
|
|
struct mutex lock;
|
2009-10-14 15:13:45 -07:00
|
|
|
|
2017-04-06 09:49:09 +01:00
|
|
|
/* RX Queue */
|
|
|
|
|
struct stmmac_rx_queue rx_queue[MTL_MAX_RX_QUEUES];
|
2020-09-16 15:40:20 +08:00
|
|
|
unsigned int dma_rx_size;
|
2017-04-06 09:49:09 +01:00
|
|
|
|
2017-04-06 09:49:10 +01:00
|
|
|
/* TX Queue */
|
|
|
|
|
struct stmmac_tx_queue tx_queue[MTL_MAX_TX_QUEUES];
|
2020-09-16 15:40:20 +08:00
|
|
|
unsigned int dma_tx_size;
|
2017-04-06 09:49:10 +01:00
|
|
|
|
2018-09-17 09:22:56 +01:00
|
|
|
/* Generic channel for NAPI */
|
|
|
|
|
struct stmmac_channel channel[STMMAC_CH_MAX];
|
|
|
|
|
|
2009-10-14 15:13:45 -07:00
|
|
|
int speed;
|
|
|
|
|
unsigned int flow_ctrl;
|
|
|
|
|
unsigned int pause;
|
|
|
|
|
struct mii_bus *mii;
|
|
|
|
|
|
2019-06-11 17:18:46 +02:00
|
|
|
struct phylink_config phylink_config;
|
|
|
|
|
struct phylink *phylink;
|
|
|
|
|
|
2013-04-08 02:10:02 +00:00
|
|
|
struct stmmac_extra_stats xstats ____cacheline_aligned_in_smp;
|
2018-03-29 10:40:19 +01:00
|
|
|
struct stmmac_safety_stats sstats;
|
2010-11-24 02:37:58 +00:00
|
|
|
struct plat_stmmacenet_data *plat;
|
2011-09-01 21:51:41 +00:00
|
|
|
struct dma_features dma_cap;
|
2013-04-08 02:10:02 +00:00
|
|
|
struct stmmac_counters mmc;
|
2011-11-16 21:58:00 +00:00
|
|
|
int hw_cap_support;
|
2013-04-08 02:10:02 +00:00
|
|
|
int synopsys_id;
|
|
|
|
|
u32 msg_enable;
|
|
|
|
|
int wolopts;
|
|
|
|
|
int wol_irq;
|
2012-04-04 04:33:27 +00:00
|
|
|
int clk_csr;
|
2012-06-27 21:14:37 +00:00
|
|
|
struct timer_list eee_ctrl_timer;
|
|
|
|
|
int lpi_irq;
|
|
|
|
|
int eee_enabled;
|
|
|
|
|
int eee_active;
|
|
|
|
|
int tx_lpi_timer;
|
2020-10-01 23:56:09 +08:00
|
|
|
int tx_lpi_enabled;
|
|
|
|
|
int eee_tw_timer;
|
2020-10-28 00:00:51 +08:00
|
|
|
bool eee_sw_timer_en;
|
2013-03-26 04:43:05 +00:00
|
|
|
unsigned int mode;
|
2018-04-23 09:05:15 +01:00
|
|
|
unsigned int chain_mode;
|
2013-03-26 04:43:06 +00:00
|
|
|
int extend_desc;
|
2019-01-20 19:05:15 +03:00
|
|
|
struct hwtstamp_config tstamp_config;
|
2013-03-26 04:43:11 +00:00
|
|
|
struct ptp_clock *ptp_clock;
|
|
|
|
|
struct ptp_clock_info ptp_clock_ops;
|
2013-04-08 02:10:02 +00:00
|
|
|
unsigned int default_addend;
|
2018-05-31 18:01:27 +01:00
|
|
|
u32 sub_second_inc;
|
|
|
|
|
u32 systime_flags;
|
2013-04-08 02:10:02 +00:00
|
|
|
u32 adv_ts;
|
|
|
|
|
int use_riwt;
|
2014-01-16 10:53:00 +00:00
|
|
|
int irq_wake;
|
2022-02-04 14:55:44 +01:00
|
|
|
rwlock_t ptp_lock;
|
2021-04-14 08:16:17 +08:00
|
|
|
/* Protects auxiliary snapshot registers from concurrent access. */
|
|
|
|
|
struct mutex aux_ts_lock;
|
|
|
|
|
|
2016-04-01 11:37:32 +02:00
|
|
|
void __iomem *mmcaddr;
|
2016-11-14 09:27:29 +01:00
|
|
|
void __iomem *ptpaddr;
|
2019-08-07 10:03:14 +02:00
|
|
|
unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
|
2021-03-26 01:39:14 +08:00
|
|
|
int sfty_ce_irq;
|
|
|
|
|
int sfty_ue_irq;
|
|
|
|
|
int rx_irq[MTL_MAX_RX_QUEUES];
|
|
|
|
|
int tx_irq[MTL_MAX_TX_QUEUES];
|
|
|
|
|
/*irq name */
|
|
|
|
|
char int_name_mac[IFNAMSIZ + 9];
|
|
|
|
|
char int_name_wol[IFNAMSIZ + 9];
|
|
|
|
|
char int_name_lpi[IFNAMSIZ + 9];
|
|
|
|
|
char int_name_sfty_ce[IFNAMSIZ + 10];
|
|
|
|
|
char int_name_sfty_ue[IFNAMSIZ + 10];
|
|
|
|
|
char int_name_rx_irq[MTL_MAX_TX_QUEUES][IFNAMSIZ + 14];
|
|
|
|
|
char int_name_tx_irq[MTL_MAX_TX_QUEUES][IFNAMSIZ + 18];
|
2015-05-22 19:03:29 -07:00
|
|
|
|
|
|
|
|
#ifdef CONFIG_DEBUG_FS
|
|
|
|
|
struct dentry *dbgfs_dir;
|
|
|
|
|
#endif
|
2018-03-29 10:40:18 +01:00
|
|
|
|
|
|
|
|
unsigned long state;
|
|
|
|
|
struct workqueue_struct *wq;
|
|
|
|
|
struct work_struct service_task;
|
2018-05-04 10:01:38 +01:00
|
|
|
|
2021-03-24 17:07:42 +08:00
|
|
|
/* Workqueue for handling FPE hand-shaking */
|
|
|
|
|
unsigned long fpe_task_state;
|
|
|
|
|
struct workqueue_struct *fpe_wq;
|
|
|
|
|
struct work_struct fpe_task;
|
|
|
|
|
char wq_name[IFNAMSIZ + 4];
|
|
|
|
|
|
2018-05-04 10:01:38 +01:00
|
|
|
/* TC Handling */
|
|
|
|
|
unsigned int tc_entries_max;
|
|
|
|
|
unsigned int tc_off_max;
|
|
|
|
|
struct stmmac_tc_entry *tc_entries;
|
2019-09-04 15:16:56 +02:00
|
|
|
unsigned int flow_entries_max;
|
|
|
|
|
struct stmmac_flow_entry *flow_entries;
|
2021-12-11 22:51:34 +08:00
|
|
|
unsigned int rfs_entries_max[STMMAC_RFS_T_MAX];
|
|
|
|
|
unsigned int rfs_entries_cnt[STMMAC_RFS_T_MAX];
|
|
|
|
|
unsigned int rfs_entries_total;
|
|
|
|
|
struct stmmac_rfs_entry *rfs_entries;
|
2018-05-31 18:01:27 +01:00
|
|
|
|
|
|
|
|
/* Pulse Per Second output */
|
|
|
|
|
struct stmmac_pps_cfg pps[STMMAC_PPS_MAX];
|
2019-08-07 10:03:12 +02:00
|
|
|
|
|
|
|
|
/* Receive Side Scaling */
|
|
|
|
|
struct stmmac_rss rss;
|
2021-04-01 10:11:15 +08:00
|
|
|
|
|
|
|
|
/* XDP BPF Program */
|
net: stmmac: Enable RX via AF_XDP zero-copy
This patch adds the support for receiving packet via AF_XDP zero-copy
mechanism.
XDP ZC uses 1:1 mapping of XDP buffer to receive packet, therefore the
use of split header is not used currently. The 'xdp_buff' is declared as
union together with a struct that contains 'page', 'addr' and
'page_offset' that are associated with primary buffer.
RX buffers are now allocated either via page_pool or xsk pool. For RX
buffers from xsk_pool they are allocated and deallocated using below
functions:
* stmmac_alloc_rx_buffers_zc(struct stmmac_priv *priv, u32 queue)
* dma_free_rx_xskbufs(struct stmmac_priv *priv, u32 queue)
With above functions now available, we then extend the following driver
functions to support XDP ZC:
* stmmac_reinit_rx_buffers()
* __init_dma_rx_desc_rings()
* init_dma_rx_desc_rings()
* __free_dma_rx_desc_resources()
Note: stmmac_alloc_rx_buffers_zc() may return -ENOMEM due to RX XDP
buffer pool is not allocated (e.g. samples/bpf/xdpsock TX-only). But,
it is still ok to let TX XDP ZC to continue, therefore, the -ENOMEM
is silently ignored to let the driver succcessfully transition to XDP
ZC mode for the said RX and TX queue.
As XDP ZC buffer size is different, the DMA buffer size is required
to be reprogrammed accordingly for RX DMA/Queue that is populated with
XDP buffer from XSK pool.
Next, to add or remove per-queue XSK pool, stmmac_xdp_setup_pool()
will call stmmac_xdp_enable_pool() or stmmac_xdp_disable_pool()
that in-turn coordinates the tearing down and setting up RX ring via
RX buffers and descriptors removal and reallocation through
stmmac_disable_rx_queue() and stmmac_enable_rx_queue(). In addition,
stmmac_xsk_wakeup() is added to initiate XDP RX buffer replenishing
by signalling user application to add available XDP frames back to
FILL queue.
For RX processing using XDP zero-copy buffer, stmmac_rx_zc() is
introduced which is implemented with the assumption that RX split
header is disabled. For XDP verdict is XDP_PASS, the XDP buffer is
copied into a sk_buff allocated through stmmac_construct_skb_zc()
and sent to Linux network GRO inside stmmac_dispatch_skb_zc(). Free RX
buffers are then replenished using stmmac_rx_refill_zc()
v2: introduce __stmmac_disable_all_queues() to contain the original code
that does napi_disable() and then make stmmac_setup_tc_block_cb()
to use it. Move synchronize_rcu() into stmmac_disable_all_queues()
that eventually calls __stmmac_disable_all_queues(). Then,
make both stmmac_release() and stmmac_suspend() to use
stmmac_disable_all_queues(). Thanks David Miller for spotting the
synchronize_rcu() issue in v1 patch.
Signed-off-by: Ong Boon Leong <boon.leong.ong@intel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2021-04-13 17:36:25 +08:00
|
|
|
unsigned long *af_xdp_zc_qps;
|
2021-04-01 10:11:15 +08:00
|
|
|
struct bpf_prog *xdp_prog;
|
2018-03-29 10:40:18 +01:00
|
|
|
};
|
|
|
|
|
|
|
|
|
|
enum stmmac_state {
|
|
|
|
|
STMMAC_DOWN,
|
|
|
|
|
STMMAC_RESET_REQUESTED,
|
|
|
|
|
STMMAC_RESETING,
|
|
|
|
|
STMMAC_SERVICE_SCHED,
|
2009-10-14 15:13:45 -07:00
|
|
|
};
|
|
|
|
|
|
2013-09-23 11:37:59 -07:00
|
|
|
int stmmac_mdio_unregister(struct net_device *ndev);
|
|
|
|
|
int stmmac_mdio_register(struct net_device *ndev);
|
2014-01-16 10:52:27 +00:00
|
|
|
int stmmac_mdio_reset(struct mii_bus *mii);
|
2021-06-08 11:51:56 +08:00
|
|
|
int stmmac_xpcs_setup(struct mii_bus *mii);
|
2013-09-23 11:37:59 -07:00
|
|
|
void stmmac_set_ethtool_ops(struct net_device *netdev);
|
2014-11-05 11:45:32 +02:00
|
|
|
|
net: stmmac: retain PTP clock time during SIOCSHWTSTAMP ioctls
Currently, when user space emits SIOCSHWTSTAMP ioctl calls such as
enabling/disabling timestamping or changing filter settings, the driver
reads the current CLOCK_REALTIME value and programming this into the
NIC's hardware clock. This might be necessary during system
initialization, but at runtime, when the PTP clock has already been
synchronized to a grandmaster, a reset of the timestamp settings might
result in a clock jump. Furthermore, if the clock is also controlled by
phc2sys in automatic mode (where the UTC offset is queried from ptp4l),
that UTC-to-TAI offset (currently 37 seconds in 2021) would be
temporarily reset to 0, and it would take a long time for phc2sys to
readjust so that CLOCK_REALTIME and the PHC are apart by 37 seconds
again.
To address the issue, we introduce a new function called
stmmac_init_tstamp_counter(), which gets called during ndo_open().
It contains the code snippet moved from stmmac_hwtstamp_set() that
manages the time synchronization. Besides, the sub second increment
configuration is also moved here since the related values are hardware
dependent and runtime invariant.
Furthermore, the hardware clock must be kept running even when no time
stamping mode is selected in order to retain the synchronized time base.
That way, timestamping can be enabled again at any time only with the
need to compensate the clock's natural drifting.
As a side effect, this patch fixes the issue that ptp_clock_info::enable
can be called before SIOCSHWTSTAMP and the driver (which looks at
priv->systime_flags) was not prepared to handle that ordering.
Fixes: 92ba6888510c ("stmmac: add the support for PTP hw clock driver")
Reported-by: Michael Olbrich <m.olbrich@pengutronix.de>
Signed-off-by: Ahmad Fatoum <a.fatoum@pengutronix.de>
Signed-off-by: Holger Assmann <h.assmann@pengutronix.de>
Signed-off-by: Vladimir Oltean <vladimir.oltean@nxp.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2021-11-21 19:57:04 +02:00
|
|
|
int stmmac_init_tstamp_counter(struct stmmac_priv *priv, u32 systime_flags);
|
2016-10-19 09:06:41 +02:00
|
|
|
void stmmac_ptp_register(struct stmmac_priv *priv);
|
2013-09-23 11:37:59 -07:00
|
|
|
void stmmac_ptp_unregister(struct stmmac_priv *priv);
|
2021-11-11 22:39:49 +08:00
|
|
|
int stmmac_xdp_open(struct net_device *dev);
|
|
|
|
|
void stmmac_xdp_release(struct net_device *dev);
|
2016-05-01 22:58:19 +02:00
|
|
|
int stmmac_resume(struct device *dev);
|
|
|
|
|
int stmmac_suspend(struct device *dev);
|
|
|
|
|
int stmmac_dvr_remove(struct device *dev);
|
2015-05-20 20:03:08 +02:00
|
|
|
int stmmac_dvr_probe(struct device *device,
|
|
|
|
|
struct plat_stmmacenet_data *plat_dat,
|
|
|
|
|
struct stmmac_resources *res);
|
2012-06-27 21:14:37 +00:00
|
|
|
void stmmac_disable_eee_mode(struct stmmac_priv *priv);
|
|
|
|
|
bool stmmac_eee_init(struct stmmac_priv *priv);
|
2020-09-15 09:28:38 +08:00
|
|
|
int stmmac_reinit_queues(struct net_device *dev, u32 rx_cnt, u32 tx_cnt);
|
2020-09-16 15:40:20 +08:00
|
|
|
int stmmac_reinit_ringparam(struct net_device *dev, u32 rx_size, u32 tx_size);
|
2021-03-15 20:16:46 +08:00
|
|
|
int stmmac_bus_clks_config(struct stmmac_priv *priv, bool enabled);
|
2021-03-24 17:07:42 +08:00
|
|
|
void stmmac_fpe_handshake(struct stmmac_priv *priv, bool enable);
|
2012-04-04 04:33:25 +00:00
|
|
|
|
2021-04-01 10:11:15 +08:00
|
|
|
static inline bool stmmac_xdp_is_enabled(struct stmmac_priv *priv)
|
|
|
|
|
{
|
|
|
|
|
return !!priv->xdp_prog;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static inline unsigned int stmmac_rx_offset(struct stmmac_priv *priv)
|
|
|
|
|
{
|
|
|
|
|
if (stmmac_xdp_is_enabled(priv))
|
2021-08-20 19:30:02 +01:00
|
|
|
return XDP_PACKET_HEADROOM;
|
2021-04-01 10:11:15 +08:00
|
|
|
|
2021-08-20 19:30:02 +01:00
|
|
|
return 0;
|
2021-04-01 10:11:15 +08:00
|
|
|
}
|
|
|
|
|
|
net: stmmac: Enable RX via AF_XDP zero-copy
This patch adds the support for receiving packet via AF_XDP zero-copy
mechanism.
XDP ZC uses 1:1 mapping of XDP buffer to receive packet, therefore the
use of split header is not used currently. The 'xdp_buff' is declared as
union together with a struct that contains 'page', 'addr' and
'page_offset' that are associated with primary buffer.
RX buffers are now allocated either via page_pool or xsk pool. For RX
buffers from xsk_pool they are allocated and deallocated using below
functions:
* stmmac_alloc_rx_buffers_zc(struct stmmac_priv *priv, u32 queue)
* dma_free_rx_xskbufs(struct stmmac_priv *priv, u32 queue)
With above functions now available, we then extend the following driver
functions to support XDP ZC:
* stmmac_reinit_rx_buffers()
* __init_dma_rx_desc_rings()
* init_dma_rx_desc_rings()
* __free_dma_rx_desc_resources()
Note: stmmac_alloc_rx_buffers_zc() may return -ENOMEM due to RX XDP
buffer pool is not allocated (e.g. samples/bpf/xdpsock TX-only). But,
it is still ok to let TX XDP ZC to continue, therefore, the -ENOMEM
is silently ignored to let the driver succcessfully transition to XDP
ZC mode for the said RX and TX queue.
As XDP ZC buffer size is different, the DMA buffer size is required
to be reprogrammed accordingly for RX DMA/Queue that is populated with
XDP buffer from XSK pool.
Next, to add or remove per-queue XSK pool, stmmac_xdp_setup_pool()
will call stmmac_xdp_enable_pool() or stmmac_xdp_disable_pool()
that in-turn coordinates the tearing down and setting up RX ring via
RX buffers and descriptors removal and reallocation through
stmmac_disable_rx_queue() and stmmac_enable_rx_queue(). In addition,
stmmac_xsk_wakeup() is added to initiate XDP RX buffer replenishing
by signalling user application to add available XDP frames back to
FILL queue.
For RX processing using XDP zero-copy buffer, stmmac_rx_zc() is
introduced which is implemented with the assumption that RX split
header is disabled. For XDP verdict is XDP_PASS, the XDP buffer is
copied into a sk_buff allocated through stmmac_construct_skb_zc()
and sent to Linux network GRO inside stmmac_dispatch_skb_zc(). Free RX
buffers are then replenished using stmmac_rx_refill_zc()
v2: introduce __stmmac_disable_all_queues() to contain the original code
that does napi_disable() and then make stmmac_setup_tc_block_cb()
to use it. Move synchronize_rcu() into stmmac_disable_all_queues()
that eventually calls __stmmac_disable_all_queues(). Then,
make both stmmac_release() and stmmac_suspend() to use
stmmac_disable_all_queues(). Thanks David Miller for spotting the
synchronize_rcu() issue in v1 patch.
Signed-off-by: Ong Boon Leong <boon.leong.ong@intel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2021-04-13 17:36:25 +08:00
|
|
|
void stmmac_disable_rx_queue(struct stmmac_priv *priv, u32 queue);
|
|
|
|
|
void stmmac_enable_rx_queue(struct stmmac_priv *priv, u32 queue);
|
net: stmmac: Add TX via XDP zero-copy socket
We add the support of XDP ZC TX submission and cleaning into
stmmac_tx_clean(). The function is made to clean as many TX complete
frames as possible, i.e. limit by priv->dma_tx_size instead of NAPI
budget. For TX ring that is associated with XSK pool, the function
stmmac_xdp_xmit_zc() is introduced to TX frame buffers from XSK pool by
using xsk_tx_peek_desc(). To make stmmac_tx_clean() support the cleaning
of XSK TX frames, STMMAC_TXBUF_T_XSK_TX TX buffer type is introduced.
As stmmac_tx_clean() uses the return value to cue whether NAPI function
should continue to poll, we augment the caller of stmmac_tx_clean() to
pass NAPI budget instead of priv->dma_tx_size through 'budget' input and
made stmmac_tx_clean() to always clean up-to the TX ring size instead.
This allows us to use the return boolean status of stmmac_xdp_xmit_zc()
to decide if XSK TX work is done or not: If true, set 'xmits' to return
'budget - 1' so that NAPI poll may exit. Else, set 'xmits' to return
'budget' to make NAPI poll continue to poll since XSK TX work is not
done. Finally, at the end of stmmac_tx_clean(), the function now take
a maximum value between 'count' and 'xmits' so that status from both
TX cleaning and XSK TX (only for XDP ZC) is considered.
This patch adds a new NAPI poll called stmmac_napi_poll_rxtx() that is
meant to be enabled/disabled for RX and TX ring that are bound to XSK
pool. This NAPI poll function starts with cleaning TX ring, then submits
XSK TX frames to TX ring before proceed to perform RX operations, i.e.
, receiving RX frames and replenishing RX ring with RX free buffers
obtained from XSK pool. Therefore, during XSK RX and TX setup, the driver
enables stmmac_napi_poll_rxtx() for RX and TX operations, then during
XSK RX and TX pool tear-down, the driver reenables the exisiting
independent NAPI poll functions accordingly: stmmac_napi_poll_rx() and
stmmac_napi_poll_tx().
Signed-off-by: Ong Boon Leong <boon.leong.ong@intel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2021-04-13 17:36:26 +08:00
|
|
|
void stmmac_disable_tx_queue(struct stmmac_priv *priv, u32 queue);
|
|
|
|
|
void stmmac_enable_tx_queue(struct stmmac_priv *priv, u32 queue);
|
net: stmmac: Enable RX via AF_XDP zero-copy
This patch adds the support for receiving packet via AF_XDP zero-copy
mechanism.
XDP ZC uses 1:1 mapping of XDP buffer to receive packet, therefore the
use of split header is not used currently. The 'xdp_buff' is declared as
union together with a struct that contains 'page', 'addr' and
'page_offset' that are associated with primary buffer.
RX buffers are now allocated either via page_pool or xsk pool. For RX
buffers from xsk_pool they are allocated and deallocated using below
functions:
* stmmac_alloc_rx_buffers_zc(struct stmmac_priv *priv, u32 queue)
* dma_free_rx_xskbufs(struct stmmac_priv *priv, u32 queue)
With above functions now available, we then extend the following driver
functions to support XDP ZC:
* stmmac_reinit_rx_buffers()
* __init_dma_rx_desc_rings()
* init_dma_rx_desc_rings()
* __free_dma_rx_desc_resources()
Note: stmmac_alloc_rx_buffers_zc() may return -ENOMEM due to RX XDP
buffer pool is not allocated (e.g. samples/bpf/xdpsock TX-only). But,
it is still ok to let TX XDP ZC to continue, therefore, the -ENOMEM
is silently ignored to let the driver succcessfully transition to XDP
ZC mode for the said RX and TX queue.
As XDP ZC buffer size is different, the DMA buffer size is required
to be reprogrammed accordingly for RX DMA/Queue that is populated with
XDP buffer from XSK pool.
Next, to add or remove per-queue XSK pool, stmmac_xdp_setup_pool()
will call stmmac_xdp_enable_pool() or stmmac_xdp_disable_pool()
that in-turn coordinates the tearing down and setting up RX ring via
RX buffers and descriptors removal and reallocation through
stmmac_disable_rx_queue() and stmmac_enable_rx_queue(). In addition,
stmmac_xsk_wakeup() is added to initiate XDP RX buffer replenishing
by signalling user application to add available XDP frames back to
FILL queue.
For RX processing using XDP zero-copy buffer, stmmac_rx_zc() is
introduced which is implemented with the assumption that RX split
header is disabled. For XDP verdict is XDP_PASS, the XDP buffer is
copied into a sk_buff allocated through stmmac_construct_skb_zc()
and sent to Linux network GRO inside stmmac_dispatch_skb_zc(). Free RX
buffers are then replenished using stmmac_rx_refill_zc()
v2: introduce __stmmac_disable_all_queues() to contain the original code
that does napi_disable() and then make stmmac_setup_tc_block_cb()
to use it. Move synchronize_rcu() into stmmac_disable_all_queues()
that eventually calls __stmmac_disable_all_queues(). Then,
make both stmmac_release() and stmmac_suspend() to use
stmmac_disable_all_queues(). Thanks David Miller for spotting the
synchronize_rcu() issue in v1 patch.
Signed-off-by: Ong Boon Leong <boon.leong.ong@intel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2021-04-13 17:36:25 +08:00
|
|
|
int stmmac_xsk_wakeup(struct net_device *dev, u32 queue, u32 flags);
|
2021-07-05 18:26:53 +08:00
|
|
|
struct timespec64 stmmac_calc_tas_basetime(ktime_t old_base_time,
|
|
|
|
|
ktime_t current_time,
|
|
|
|
|
u64 cycle_time);
|
net: stmmac: Enable RX via AF_XDP zero-copy
This patch adds the support for receiving packet via AF_XDP zero-copy
mechanism.
XDP ZC uses 1:1 mapping of XDP buffer to receive packet, therefore the
use of split header is not used currently. The 'xdp_buff' is declared as
union together with a struct that contains 'page', 'addr' and
'page_offset' that are associated with primary buffer.
RX buffers are now allocated either via page_pool or xsk pool. For RX
buffers from xsk_pool they are allocated and deallocated using below
functions:
* stmmac_alloc_rx_buffers_zc(struct stmmac_priv *priv, u32 queue)
* dma_free_rx_xskbufs(struct stmmac_priv *priv, u32 queue)
With above functions now available, we then extend the following driver
functions to support XDP ZC:
* stmmac_reinit_rx_buffers()
* __init_dma_rx_desc_rings()
* init_dma_rx_desc_rings()
* __free_dma_rx_desc_resources()
Note: stmmac_alloc_rx_buffers_zc() may return -ENOMEM due to RX XDP
buffer pool is not allocated (e.g. samples/bpf/xdpsock TX-only). But,
it is still ok to let TX XDP ZC to continue, therefore, the -ENOMEM
is silently ignored to let the driver succcessfully transition to XDP
ZC mode for the said RX and TX queue.
As XDP ZC buffer size is different, the DMA buffer size is required
to be reprogrammed accordingly for RX DMA/Queue that is populated with
XDP buffer from XSK pool.
Next, to add or remove per-queue XSK pool, stmmac_xdp_setup_pool()
will call stmmac_xdp_enable_pool() or stmmac_xdp_disable_pool()
that in-turn coordinates the tearing down and setting up RX ring via
RX buffers and descriptors removal and reallocation through
stmmac_disable_rx_queue() and stmmac_enable_rx_queue(). In addition,
stmmac_xsk_wakeup() is added to initiate XDP RX buffer replenishing
by signalling user application to add available XDP frames back to
FILL queue.
For RX processing using XDP zero-copy buffer, stmmac_rx_zc() is
introduced which is implemented with the assumption that RX split
header is disabled. For XDP verdict is XDP_PASS, the XDP buffer is
copied into a sk_buff allocated through stmmac_construct_skb_zc()
and sent to Linux network GRO inside stmmac_dispatch_skb_zc(). Free RX
buffers are then replenished using stmmac_rx_refill_zc()
v2: introduce __stmmac_disable_all_queues() to contain the original code
that does napi_disable() and then make stmmac_setup_tc_block_cb()
to use it. Move synchronize_rcu() into stmmac_disable_all_queues()
that eventually calls __stmmac_disable_all_queues(). Then,
make both stmmac_release() and stmmac_suspend() to use
stmmac_disable_all_queues(). Thanks David Miller for spotting the
synchronize_rcu() issue in v1 patch.
Signed-off-by: Ong Boon Leong <boon.leong.ong@intel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2021-04-13 17:36:25 +08:00
|
|
|
|
2019-05-24 10:20:19 +02:00
|
|
|
#if IS_ENABLED(CONFIG_STMMAC_SELFTESTS)
|
|
|
|
|
void stmmac_selftest_run(struct net_device *dev,
|
|
|
|
|
struct ethtool_test *etest, u64 *buf);
|
|
|
|
|
void stmmac_selftest_get_strings(struct stmmac_priv *priv, u8 *data);
|
|
|
|
|
int stmmac_selftest_get_count(struct stmmac_priv *priv);
|
|
|
|
|
#else
|
|
|
|
|
static inline void stmmac_selftest_run(struct net_device *dev,
|
|
|
|
|
struct ethtool_test *etest, u64 *buf)
|
|
|
|
|
{
|
|
|
|
|
/* Not enabled */
|
|
|
|
|
}
|
|
|
|
|
static inline void stmmac_selftest_get_strings(struct stmmac_priv *priv,
|
|
|
|
|
u8 *data)
|
|
|
|
|
{
|
|
|
|
|
/* Not enabled */
|
|
|
|
|
}
|
|
|
|
|
static inline int stmmac_selftest_get_count(struct stmmac_priv *priv)
|
|
|
|
|
{
|
|
|
|
|
return -EOPNOTSUPP;
|
|
|
|
|
}
|
|
|
|
|
#endif /* CONFIG_STMMAC_SELFTESTS */
|
|
|
|
|
|
2012-08-22 21:28:18 +00:00
|
|
|
#endif /* __STMMAC_H__ */
|