forked from Minki/linux
Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net
Pull networking fixes from David Miller: 1) Off by one in mt76 airtime calculation, from Dan Carpenter. 2) Fix TLV fragment allocation loop condition in iwlwifi, from Luca Coelho. 3) Don't confirm neigh entries when doing ipsec pmtu updates, from Xu Wang. 4) More checks to make sure we only send TSO packets to lan78xx chips that they can actually handle. From James Hughes. 5) Fix ip_tunnel namespace move, from William Dauchy. 6) Fix unintended packet reordering due to cooperation between listification done by GRO and non-GRO paths. From Maxim Mikityanskiy. 7) Add Jakub Kicincki formally as networking co-maintainer. 8) Info leak in airo ioctls, from Michael Ellerman. 9) IFLA_MTU attribute needs validation during rtnl_create_link(), from Eric Dumazet. 10) Use after free during reload in mlxsw, from Ido Schimmel. 11) Dangling pointers are possible in tp->highest_sack, fix from Eric Dumazet. 12) Missing *pos++ in various networking seq_next handlers, from Vasily Averin. 13) CHELSIO_GET_MEM operation neds CAP_NET_ADMIN check, from Michael Ellerman. * git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net: (109 commits) firestream: fix memory leaks net: cxgb3_main: Add CAP_NET_ADMIN check to CHELSIO_GET_MEM net: bcmgenet: Use netif_tx_napi_add() for TX NAPI tipc: change maintainer email address net: stmmac: platform: fix probe for ACPI devices net/mlx5e: kTLS, Do not send decrypted-marked SKBs via non-accel path net/mlx5e: kTLS, Remove redundant posts in TX resync flow net/mlx5e: kTLS, Fix corner-case checks in TX resync flow net/mlx5e: Clear VF config when switching modes net/mlx5: DR, use non preemptible call to get the current cpu number net/mlx5: E-Switch, Prevent ingress rate configuration of uplink rep net/mlx5: DR, Enable counter on non-fwd-dest objects net/mlx5: Update the list of the PCI supported devices net/mlx5: Fix lowest FDB pool size net: Fix skb->csum update in inet_proto_csum_replace16(). netfilter: nf_tables: autoload modules from the abort path netfilter: nf_tables: add __nft_chain_type_get() netfilter: nf_tables_offload: fix check the chain offload flag netfilter: conntrack: sctp: use distinct states for new SCTP connections ipv6_route_seq_next should increase position index ...
This commit is contained in:
commit
84809aaf78
@ -403,6 +403,19 @@ PROPERTIES
|
||||
The settings and programming routines for internal/external
|
||||
MDIO are different. Must be included for internal MDIO.
|
||||
|
||||
- fsl,erratum-a011043
|
||||
Usage: optional
|
||||
Value type: <boolean>
|
||||
Definition: Indicates the presence of the A011043 erratum
|
||||
describing that the MDIO_CFG[MDIO_RD_ER] bit may be falsely
|
||||
set when reading internal PCS registers. MDIO reads to
|
||||
internal PCS registers may result in having the
|
||||
MDIO_CFG[MDIO_RD_ER] bit set, even when there is no error and
|
||||
read data (MDIO_DATA[MDIO_DATA]) is correct.
|
||||
Software may get false read error when reading internal
|
||||
PCS registers through MDIO. As a workaround, all internal
|
||||
MDIO accesses should ignore the MDIO_CFG[MDIO_RD_ER] bit.
|
||||
|
||||
For internal PHY device on internal mdio bus, a PHY node should be created.
|
||||
See the definition of the PHY node in booting-without-of.txt for an
|
||||
example of how to define a PHY (Internal PHY has no interrupt line).
|
||||
|
@ -6197,6 +6197,7 @@ ETHERNET PHY LIBRARY
|
||||
M: Andrew Lunn <andrew@lunn.ch>
|
||||
M: Florian Fainelli <f.fainelli@gmail.com>
|
||||
M: Heiner Kallweit <hkallweit1@gmail.com>
|
||||
R: Russell King <linux@armlinux.org.uk>
|
||||
L: netdev@vger.kernel.org
|
||||
S: Maintained
|
||||
F: Documentation/ABI/testing/sysfs-class-net-phydev
|
||||
@ -8569,7 +8570,7 @@ S: Maintained
|
||||
F: drivers/platform/x86/intel-vbtn.c
|
||||
|
||||
INTEL WIRELESS 3945ABG/BG, 4965AGN (iwlegacy)
|
||||
M: Stanislaw Gruszka <sgruszka@redhat.com>
|
||||
M: Stanislaw Gruszka <stf_xl@wp.pl>
|
||||
L: linux-wireless@vger.kernel.org
|
||||
S: Supported
|
||||
F: drivers/net/wireless/intel/iwlegacy/
|
||||
@ -11499,6 +11500,7 @@ F: drivers/net/dsa/
|
||||
|
||||
NETWORKING [GENERAL]
|
||||
M: "David S. Miller" <davem@davemloft.net>
|
||||
M: Jakub Kicinski <kuba@kernel.org>
|
||||
L: netdev@vger.kernel.org
|
||||
W: http://www.linuxfoundation.org/en/Net
|
||||
Q: http://patchwork.ozlabs.org/project/netdev/list/
|
||||
@ -13820,7 +13822,7 @@ S: Maintained
|
||||
F: arch/mips/ralink
|
||||
|
||||
RALINK RT2X00 WIRELESS LAN DRIVER
|
||||
M: Stanislaw Gruszka <sgruszka@redhat.com>
|
||||
M: Stanislaw Gruszka <stf_xl@wp.pl>
|
||||
M: Helmut Schaa <helmut.schaa@googlemail.com>
|
||||
L: linux-wireless@vger.kernel.org
|
||||
S: Maintained
|
||||
@ -16601,7 +16603,7 @@ F: kernel/time/ntp.c
|
||||
F: tools/testing/selftests/timers/
|
||||
|
||||
TIPC NETWORK LAYER
|
||||
M: Jon Maloy <jon.maloy@ericsson.com>
|
||||
M: Jon Maloy <jmaloy@redhat.com>
|
||||
M: Ying Xue <ying.xue@windriver.com>
|
||||
L: netdev@vger.kernel.org (core kernel code)
|
||||
L: tipc-discussion@lists.sourceforge.net (user apps, general discussion)
|
||||
|
@ -63,6 +63,7 @@ fman@400000 {
|
||||
#size-cells = <0>;
|
||||
compatible = "fsl,fman-memac-mdio", "fsl,fman-xmdio";
|
||||
reg = <0xe1000 0x1000>;
|
||||
fsl,erratum-a011043; /* must ignore read errors */
|
||||
|
||||
pcsphy0: ethernet-phy@0 {
|
||||
reg = <0x0>;
|
||||
|
@ -60,6 +60,7 @@ fman@400000 {
|
||||
#size-cells = <0>;
|
||||
compatible = "fsl,fman-memac-mdio", "fsl,fman-xmdio";
|
||||
reg = <0xf1000 0x1000>;
|
||||
fsl,erratum-a011043; /* must ignore read errors */
|
||||
|
||||
pcsphy6: ethernet-phy@0 {
|
||||
reg = <0x0>;
|
||||
|
@ -63,6 +63,7 @@ fman@400000 {
|
||||
#size-cells = <0>;
|
||||
compatible = "fsl,fman-memac-mdio", "fsl,fman-xmdio";
|
||||
reg = <0xe3000 0x1000>;
|
||||
fsl,erratum-a011043; /* must ignore read errors */
|
||||
|
||||
pcsphy1: ethernet-phy@0 {
|
||||
reg = <0x0>;
|
||||
|
@ -60,6 +60,7 @@ fman@400000 {
|
||||
#size-cells = <0>;
|
||||
compatible = "fsl,fman-memac-mdio", "fsl,fman-xmdio";
|
||||
reg = <0xf3000 0x1000>;
|
||||
fsl,erratum-a011043; /* must ignore read errors */
|
||||
|
||||
pcsphy7: ethernet-phy@0 {
|
||||
reg = <0x0>;
|
||||
|
@ -59,6 +59,7 @@ fman@400000 {
|
||||
#size-cells = <0>;
|
||||
compatible = "fsl,fman-memac-mdio", "fsl,fman-xmdio";
|
||||
reg = <0xe1000 0x1000>;
|
||||
fsl,erratum-a011043; /* must ignore read errors */
|
||||
|
||||
pcsphy0: ethernet-phy@0 {
|
||||
reg = <0x0>;
|
||||
|
@ -59,6 +59,7 @@ fman@400000 {
|
||||
#size-cells = <0>;
|
||||
compatible = "fsl,fman-memac-mdio", "fsl,fman-xmdio";
|
||||
reg = <0xe3000 0x1000>;
|
||||
fsl,erratum-a011043; /* must ignore read errors */
|
||||
|
||||
pcsphy1: ethernet-phy@0 {
|
||||
reg = <0x0>;
|
||||
|
@ -59,6 +59,7 @@ fman@400000 {
|
||||
#size-cells = <0>;
|
||||
compatible = "fsl,fman-memac-mdio", "fsl,fman-xmdio";
|
||||
reg = <0xe5000 0x1000>;
|
||||
fsl,erratum-a011043; /* must ignore read errors */
|
||||
|
||||
pcsphy2: ethernet-phy@0 {
|
||||
reg = <0x0>;
|
||||
|
@ -59,6 +59,7 @@ fman@400000 {
|
||||
#size-cells = <0>;
|
||||
compatible = "fsl,fman-memac-mdio", "fsl,fman-xmdio";
|
||||
reg = <0xe7000 0x1000>;
|
||||
fsl,erratum-a011043; /* must ignore read errors */
|
||||
|
||||
pcsphy3: ethernet-phy@0 {
|
||||
reg = <0x0>;
|
||||
|
@ -59,6 +59,7 @@ fman@400000 {
|
||||
#size-cells = <0>;
|
||||
compatible = "fsl,fman-memac-mdio", "fsl,fman-xmdio";
|
||||
reg = <0xe9000 0x1000>;
|
||||
fsl,erratum-a011043; /* must ignore read errors */
|
||||
|
||||
pcsphy4: ethernet-phy@0 {
|
||||
reg = <0x0>;
|
||||
|
@ -59,6 +59,7 @@ fman@400000 {
|
||||
#size-cells = <0>;
|
||||
compatible = "fsl,fman-memac-mdio", "fsl,fman-xmdio";
|
||||
reg = <0xeb000 0x1000>;
|
||||
fsl,erratum-a011043; /* must ignore read errors */
|
||||
|
||||
pcsphy5: ethernet-phy@0 {
|
||||
reg = <0x0>;
|
||||
|
@ -60,6 +60,7 @@ fman@500000 {
|
||||
#size-cells = <0>;
|
||||
compatible = "fsl,fman-memac-mdio", "fsl,fman-xmdio";
|
||||
reg = <0xf1000 0x1000>;
|
||||
fsl,erratum-a011043; /* must ignore read errors */
|
||||
|
||||
pcsphy14: ethernet-phy@0 {
|
||||
reg = <0x0>;
|
||||
|
@ -60,6 +60,7 @@ fman@500000 {
|
||||
#size-cells = <0>;
|
||||
compatible = "fsl,fman-memac-mdio", "fsl,fman-xmdio";
|
||||
reg = <0xf3000 0x1000>;
|
||||
fsl,erratum-a011043; /* must ignore read errors */
|
||||
|
||||
pcsphy15: ethernet-phy@0 {
|
||||
reg = <0x0>;
|
||||
|
@ -59,6 +59,7 @@ fman@500000 {
|
||||
#size-cells = <0>;
|
||||
compatible = "fsl,fman-memac-mdio", "fsl,fman-xmdio";
|
||||
reg = <0xe1000 0x1000>;
|
||||
fsl,erratum-a011043; /* must ignore read errors */
|
||||
|
||||
pcsphy8: ethernet-phy@0 {
|
||||
reg = <0x0>;
|
||||
|
@ -59,6 +59,7 @@ fman@500000 {
|
||||
#size-cells = <0>;
|
||||
compatible = "fsl,fman-memac-mdio", "fsl,fman-xmdio";
|
||||
reg = <0xe3000 0x1000>;
|
||||
fsl,erratum-a011043; /* must ignore read errors */
|
||||
|
||||
pcsphy9: ethernet-phy@0 {
|
||||
reg = <0x0>;
|
||||
|
@ -59,6 +59,7 @@ fman@500000 {
|
||||
#size-cells = <0>;
|
||||
compatible = "fsl,fman-memac-mdio", "fsl,fman-xmdio";
|
||||
reg = <0xe5000 0x1000>;
|
||||
fsl,erratum-a011043; /* must ignore read errors */
|
||||
|
||||
pcsphy10: ethernet-phy@0 {
|
||||
reg = <0x0>;
|
||||
|
@ -59,6 +59,7 @@ fman@500000 {
|
||||
#size-cells = <0>;
|
||||
compatible = "fsl,fman-memac-mdio", "fsl,fman-xmdio";
|
||||
reg = <0xe7000 0x1000>;
|
||||
fsl,erratum-a011043; /* must ignore read errors */
|
||||
|
||||
pcsphy11: ethernet-phy@0 {
|
||||
reg = <0x0>;
|
||||
|
@ -59,6 +59,7 @@ fman@500000 {
|
||||
#size-cells = <0>;
|
||||
compatible = "fsl,fman-memac-mdio", "fsl,fman-xmdio";
|
||||
reg = <0xe9000 0x1000>;
|
||||
fsl,erratum-a011043; /* must ignore read errors */
|
||||
|
||||
pcsphy12: ethernet-phy@0 {
|
||||
reg = <0x0>;
|
||||
|
@ -59,6 +59,7 @@ fman@500000 {
|
||||
#size-cells = <0>;
|
||||
compatible = "fsl,fman-memac-mdio", "fsl,fman-xmdio";
|
||||
reg = <0xeb000 0x1000>;
|
||||
fsl,erratum-a011043; /* must ignore read errors */
|
||||
|
||||
pcsphy13: ethernet-phy@0 {
|
||||
reg = <0x0>;
|
||||
|
@ -912,6 +912,7 @@ static int fs_open(struct atm_vcc *atm_vcc)
|
||||
}
|
||||
if (!to) {
|
||||
printk ("No more free channels for FS50..\n");
|
||||
kfree(vcc);
|
||||
return -EBUSY;
|
||||
}
|
||||
vcc->channo = dev->channo;
|
||||
@ -922,6 +923,7 @@ static int fs_open(struct atm_vcc *atm_vcc)
|
||||
if (((DO_DIRECTION(rxtp) && dev->atm_vccs[vcc->channo])) ||
|
||||
( DO_DIRECTION(txtp) && test_bit (vcc->channo, dev->tx_inuse))) {
|
||||
printk ("Channel is in use for FS155.\n");
|
||||
kfree(vcc);
|
||||
return -EBUSY;
|
||||
}
|
||||
}
|
||||
@ -935,6 +937,7 @@ static int fs_open(struct atm_vcc *atm_vcc)
|
||||
tc, sizeof (struct fs_transmit_config));
|
||||
if (!tc) {
|
||||
fs_dprintk (FS_DEBUG_OPEN, "fs: can't alloc transmit_config.\n");
|
||||
kfree(vcc);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
|
@ -344,9 +344,16 @@ static void slcan_transmit(struct work_struct *work)
|
||||
*/
|
||||
static void slcan_write_wakeup(struct tty_struct *tty)
|
||||
{
|
||||
struct slcan *sl = tty->disc_data;
|
||||
struct slcan *sl;
|
||||
|
||||
rcu_read_lock();
|
||||
sl = rcu_dereference(tty->disc_data);
|
||||
if (!sl)
|
||||
goto out;
|
||||
|
||||
schedule_work(&sl->tx_work);
|
||||
out:
|
||||
rcu_read_unlock();
|
||||
}
|
||||
|
||||
/* Send a can_frame to a TTY queue. */
|
||||
@ -644,10 +651,11 @@ static void slcan_close(struct tty_struct *tty)
|
||||
return;
|
||||
|
||||
spin_lock_bh(&sl->lock);
|
||||
tty->disc_data = NULL;
|
||||
rcu_assign_pointer(tty->disc_data, NULL);
|
||||
sl->tty = NULL;
|
||||
spin_unlock_bh(&sl->lock);
|
||||
|
||||
synchronize_rcu();
|
||||
flush_work(&sl->tx_work);
|
||||
|
||||
/* Flush network side */
|
||||
|
@ -2164,8 +2164,8 @@ static void bcmgenet_init_tx_ring(struct bcmgenet_priv *priv,
|
||||
DMA_END_ADDR);
|
||||
|
||||
/* Initialize Tx NAPI */
|
||||
netif_napi_add(priv->dev, &ring->napi, bcmgenet_tx_poll,
|
||||
NAPI_POLL_WEIGHT);
|
||||
netif_tx_napi_add(priv->dev, &ring->napi, bcmgenet_tx_poll,
|
||||
NAPI_POLL_WEIGHT);
|
||||
}
|
||||
|
||||
/* Initialize a RDMA ring */
|
||||
|
@ -2448,6 +2448,8 @@ static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr)
|
||||
|
||||
if (!is_offload(adapter))
|
||||
return -EOPNOTSUPP;
|
||||
if (!capable(CAP_NET_ADMIN))
|
||||
return -EPERM;
|
||||
if (!(adapter->flags & FULL_INIT_DONE))
|
||||
return -EIO; /* need the memory controllers */
|
||||
if (copy_from_user(&t, useraddr, sizeof(t)))
|
||||
|
@ -70,8 +70,7 @@ static void *seq_tab_start(struct seq_file *seq, loff_t *pos)
|
||||
static void *seq_tab_next(struct seq_file *seq, void *v, loff_t *pos)
|
||||
{
|
||||
v = seq_tab_get_idx(seq->private, *pos + 1);
|
||||
if (v)
|
||||
++*pos;
|
||||
++(*pos);
|
||||
return v;
|
||||
}
|
||||
|
||||
|
@ -678,8 +678,7 @@ static void *l2t_seq_start(struct seq_file *seq, loff_t *pos)
|
||||
static void *l2t_seq_next(struct seq_file *seq, void *v, loff_t *pos)
|
||||
{
|
||||
v = l2t_get_idx(seq, *pos);
|
||||
if (v)
|
||||
++*pos;
|
||||
++(*pos);
|
||||
return v;
|
||||
}
|
||||
|
||||
|
@ -110,7 +110,7 @@ do { \
|
||||
/* Interface Mode Register (IF_MODE) */
|
||||
|
||||
#define IF_MODE_MASK 0x00000003 /* 30-31 Mask on i/f mode bits */
|
||||
#define IF_MODE_XGMII 0x00000000 /* 30-31 XGMII (10G) interface */
|
||||
#define IF_MODE_10G 0x00000000 /* 30-31 10G interface */
|
||||
#define IF_MODE_GMII 0x00000002 /* 30-31 GMII (1G) interface */
|
||||
#define IF_MODE_RGMII 0x00000004
|
||||
#define IF_MODE_RGMII_AUTO 0x00008000
|
||||
@ -440,7 +440,7 @@ static int init(struct memac_regs __iomem *regs, struct memac_cfg *cfg,
|
||||
tmp = 0;
|
||||
switch (phy_if) {
|
||||
case PHY_INTERFACE_MODE_XGMII:
|
||||
tmp |= IF_MODE_XGMII;
|
||||
tmp |= IF_MODE_10G;
|
||||
break;
|
||||
default:
|
||||
tmp |= IF_MODE_GMII;
|
||||
|
@ -49,6 +49,7 @@ struct tgec_mdio_controller {
|
||||
struct mdio_fsl_priv {
|
||||
struct tgec_mdio_controller __iomem *mdio_base;
|
||||
bool is_little_endian;
|
||||
bool has_a011043;
|
||||
};
|
||||
|
||||
static u32 xgmac_read32(void __iomem *regs,
|
||||
@ -226,7 +227,8 @@ static int xgmac_mdio_read(struct mii_bus *bus, int phy_id, int regnum)
|
||||
return ret;
|
||||
|
||||
/* Return all Fs if nothing was there */
|
||||
if (xgmac_read32(®s->mdio_stat, endian) & MDIO_STAT_RD_ER) {
|
||||
if ((xgmac_read32(®s->mdio_stat, endian) & MDIO_STAT_RD_ER) &&
|
||||
!priv->has_a011043) {
|
||||
dev_err(&bus->dev,
|
||||
"Error while reading PHY%d reg at %d.%hhu\n",
|
||||
phy_id, dev_addr, regnum);
|
||||
@ -274,6 +276,9 @@ static int xgmac_mdio_probe(struct platform_device *pdev)
|
||||
priv->is_little_endian = of_property_read_bool(pdev->dev.of_node,
|
||||
"little-endian");
|
||||
|
||||
priv->has_a011043 = of_property_read_bool(pdev->dev.of_node,
|
||||
"fsl,erratum-a011043");
|
||||
|
||||
ret = of_mdiobus_register(bus, np);
|
||||
if (ret) {
|
||||
dev_err(&pdev->dev, "cannot register MDIO bus\n");
|
||||
|
@ -1113,7 +1113,7 @@ i40e_status i40e_read_pba_string(struct i40e_hw *hw, u8 *pba_num,
|
||||
*/
|
||||
pba_size--;
|
||||
if (pba_num_size < (((u32)pba_size * 2) + 1)) {
|
||||
hw_dbg(hw, "Buffer to small for PBA data.\n");
|
||||
hw_dbg(hw, "Buffer too small for PBA data.\n");
|
||||
return I40E_ERR_PARAM;
|
||||
}
|
||||
|
||||
|
@ -180,7 +180,7 @@ mlx5e_ktls_tx_post_param_wqes(struct mlx5e_txqsq *sq,
|
||||
|
||||
struct tx_sync_info {
|
||||
u64 rcd_sn;
|
||||
s32 sync_len;
|
||||
u32 sync_len;
|
||||
int nr_frags;
|
||||
skb_frag_t frags[MAX_SKB_FRAGS];
|
||||
};
|
||||
@ -193,13 +193,14 @@ enum mlx5e_ktls_sync_retval {
|
||||
|
||||
static enum mlx5e_ktls_sync_retval
|
||||
tx_sync_info_get(struct mlx5e_ktls_offload_context_tx *priv_tx,
|
||||
u32 tcp_seq, struct tx_sync_info *info)
|
||||
u32 tcp_seq, int datalen, struct tx_sync_info *info)
|
||||
{
|
||||
struct tls_offload_context_tx *tx_ctx = priv_tx->tx_ctx;
|
||||
enum mlx5e_ktls_sync_retval ret = MLX5E_KTLS_SYNC_DONE;
|
||||
struct tls_record_info *record;
|
||||
int remaining, i = 0;
|
||||
unsigned long flags;
|
||||
bool ends_before;
|
||||
|
||||
spin_lock_irqsave(&tx_ctx->lock, flags);
|
||||
record = tls_get_record(tx_ctx, tcp_seq, &info->rcd_sn);
|
||||
@ -209,9 +210,21 @@ tx_sync_info_get(struct mlx5e_ktls_offload_context_tx *priv_tx,
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (unlikely(tcp_seq < tls_record_start_seq(record))) {
|
||||
ret = tls_record_is_start_marker(record) ?
|
||||
MLX5E_KTLS_SYNC_SKIP_NO_DATA : MLX5E_KTLS_SYNC_FAIL;
|
||||
/* There are the following cases:
|
||||
* 1. packet ends before start marker: bypass offload.
|
||||
* 2. packet starts before start marker and ends after it: drop,
|
||||
* not supported, breaks contract with kernel.
|
||||
* 3. packet ends before tls record info starts: drop,
|
||||
* this packet was already acknowledged and its record info
|
||||
* was released.
|
||||
*/
|
||||
ends_before = before(tcp_seq + datalen, tls_record_start_seq(record));
|
||||
|
||||
if (unlikely(tls_record_is_start_marker(record))) {
|
||||
ret = ends_before ? MLX5E_KTLS_SYNC_SKIP_NO_DATA : MLX5E_KTLS_SYNC_FAIL;
|
||||
goto out;
|
||||
} else if (ends_before) {
|
||||
ret = MLX5E_KTLS_SYNC_FAIL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
@ -337,7 +350,7 @@ mlx5e_ktls_tx_handle_ooo(struct mlx5e_ktls_offload_context_tx *priv_tx,
|
||||
u8 num_wqebbs;
|
||||
int i = 0;
|
||||
|
||||
ret = tx_sync_info_get(priv_tx, seq, &info);
|
||||
ret = tx_sync_info_get(priv_tx, seq, datalen, &info);
|
||||
if (unlikely(ret != MLX5E_KTLS_SYNC_DONE)) {
|
||||
if (ret == MLX5E_KTLS_SYNC_SKIP_NO_DATA) {
|
||||
stats->tls_skip_no_sync_data++;
|
||||
@ -351,14 +364,6 @@ mlx5e_ktls_tx_handle_ooo(struct mlx5e_ktls_offload_context_tx *priv_tx,
|
||||
goto err_out;
|
||||
}
|
||||
|
||||
if (unlikely(info.sync_len < 0)) {
|
||||
if (likely(datalen <= -info.sync_len))
|
||||
return MLX5E_KTLS_SYNC_DONE;
|
||||
|
||||
stats->tls_drop_bypass_req++;
|
||||
goto err_out;
|
||||
}
|
||||
|
||||
stats->tls_ooo++;
|
||||
|
||||
tx_post_resync_params(sq, priv_tx, info.rcd_sn);
|
||||
@ -378,8 +383,6 @@ mlx5e_ktls_tx_handle_ooo(struct mlx5e_ktls_offload_context_tx *priv_tx,
|
||||
if (unlikely(contig_wqebbs_room < num_wqebbs))
|
||||
mlx5e_fill_sq_frag_edge(sq, wq, pi, contig_wqebbs_room);
|
||||
|
||||
tx_post_resync_params(sq, priv_tx, info.rcd_sn);
|
||||
|
||||
for (; i < info.nr_frags; i++) {
|
||||
unsigned int orig_fsz, frag_offset = 0, n = 0;
|
||||
skb_frag_t *f = &info.frags[i];
|
||||
@ -455,12 +458,18 @@ struct sk_buff *mlx5e_ktls_handle_tx_skb(struct net_device *netdev,
|
||||
enum mlx5e_ktls_sync_retval ret =
|
||||
mlx5e_ktls_tx_handle_ooo(priv_tx, sq, datalen, seq);
|
||||
|
||||
if (likely(ret == MLX5E_KTLS_SYNC_DONE))
|
||||
switch (ret) {
|
||||
case MLX5E_KTLS_SYNC_DONE:
|
||||
*wqe = mlx5e_sq_fetch_wqe(sq, sizeof(**wqe), pi);
|
||||
else if (ret == MLX5E_KTLS_SYNC_FAIL)
|
||||
break;
|
||||
case MLX5E_KTLS_SYNC_SKIP_NO_DATA:
|
||||
if (likely(!skb->decrypted))
|
||||
goto out;
|
||||
WARN_ON_ONCE(1);
|
||||
/* fall-through */
|
||||
default: /* MLX5E_KTLS_SYNC_FAIL */
|
||||
goto err_out;
|
||||
else /* ret == MLX5E_KTLS_SYNC_SKIP_NO_DATA */
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
|
||||
priv_tx->expected_seq = seq + datalen;
|
||||
|
@ -4036,6 +4036,13 @@ static int apply_police_params(struct mlx5e_priv *priv, u32 rate,
|
||||
u32 rate_mbps;
|
||||
int err;
|
||||
|
||||
vport_num = rpriv->rep->vport;
|
||||
if (vport_num >= MLX5_VPORT_ECPF) {
|
||||
NL_SET_ERR_MSG_MOD(extack,
|
||||
"Ingress rate limit is supported only for Eswitch ports connected to VFs");
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
esw = priv->mdev->priv.eswitch;
|
||||
/* rate is given in bytes/sec.
|
||||
* First convert to bits/sec and then round to the nearest mbit/secs.
|
||||
@ -4044,8 +4051,6 @@ static int apply_police_params(struct mlx5e_priv *priv, u32 rate,
|
||||
* 1 mbit/sec.
|
||||
*/
|
||||
rate_mbps = rate ? max_t(u32, (rate * 8 + 500000) / 1000000, 1) : 0;
|
||||
vport_num = rpriv->rep->vport;
|
||||
|
||||
err = mlx5_esw_modify_vport_rate(esw, vport_num, rate_mbps);
|
||||
if (err)
|
||||
NL_SET_ERR_MSG_MOD(extack, "failed applying action to hardware");
|
||||
|
@ -1928,8 +1928,10 @@ static void mlx5_eswitch_clear_vf_vports_info(struct mlx5_eswitch *esw)
|
||||
struct mlx5_vport *vport;
|
||||
int i;
|
||||
|
||||
mlx5_esw_for_each_vf_vport(esw, i, vport, esw->esw_funcs.num_vfs)
|
||||
mlx5_esw_for_each_vf_vport(esw, i, vport, esw->esw_funcs.num_vfs) {
|
||||
memset(&vport->info, 0, sizeof(vport->info));
|
||||
vport->info.link_state = MLX5_VPORT_ADMIN_STATE_AUTO;
|
||||
}
|
||||
}
|
||||
|
||||
/* Public E-Switch API */
|
||||
|
@ -866,7 +866,7 @@ out:
|
||||
*/
|
||||
#define ESW_SIZE (16 * 1024 * 1024)
|
||||
const unsigned int ESW_POOLS[4] = { 4 * 1024 * 1024, 1 * 1024 * 1024,
|
||||
64 * 1024, 4 * 1024 };
|
||||
64 * 1024, 128 };
|
||||
|
||||
static int
|
||||
get_sz_from_pool(struct mlx5_eswitch *esw)
|
||||
@ -1377,7 +1377,7 @@ static int esw_offloads_start(struct mlx5_eswitch *esw,
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
mlx5_eswitch_disable(esw, false);
|
||||
mlx5_eswitch_disable(esw, true);
|
||||
mlx5_eswitch_update_num_of_vfs(esw, esw->dev->priv.sriov.num_vfs);
|
||||
err = mlx5_eswitch_enable(esw, MLX5_ESWITCH_OFFLOADS);
|
||||
if (err) {
|
||||
@ -2220,7 +2220,8 @@ int mlx5_esw_funcs_changed_handler(struct notifier_block *nb, unsigned long type
|
||||
|
||||
int esw_offloads_enable(struct mlx5_eswitch *esw)
|
||||
{
|
||||
int err;
|
||||
struct mlx5_vport *vport;
|
||||
int err, i;
|
||||
|
||||
if (MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, reformat) &&
|
||||
MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, decap))
|
||||
@ -2237,6 +2238,10 @@ int esw_offloads_enable(struct mlx5_eswitch *esw)
|
||||
if (err)
|
||||
goto err_vport_metadata;
|
||||
|
||||
/* Representor will control the vport link state */
|
||||
mlx5_esw_for_each_vf_vport(esw, i, vport, esw->esw_funcs.num_vfs)
|
||||
vport->info.link_state = MLX5_VPORT_ADMIN_STATE_DOWN;
|
||||
|
||||
err = mlx5_eswitch_enable_pf_vf_vports(esw, MLX5_VPORT_UC_ADDR_CHANGE);
|
||||
if (err)
|
||||
goto err_vports;
|
||||
@ -2266,7 +2271,7 @@ static int esw_offloads_stop(struct mlx5_eswitch *esw,
|
||||
{
|
||||
int err, err1;
|
||||
|
||||
mlx5_eswitch_disable(esw, false);
|
||||
mlx5_eswitch_disable(esw, true);
|
||||
err = mlx5_eswitch_enable(esw, MLX5_ESWITCH_LEGACY);
|
||||
if (err) {
|
||||
NL_SET_ERR_MSG_MOD(extack, "Failed setting eswitch to legacy");
|
||||
|
@ -1563,6 +1563,7 @@ static const struct pci_device_id mlx5_core_pci_table[] = {
|
||||
{ PCI_VDEVICE(MELLANOX, 0x101d) }, /* ConnectX-6 Dx */
|
||||
{ PCI_VDEVICE(MELLANOX, 0x101e), MLX5_PCI_DEV_IS_VF}, /* ConnectX Family mlx5Gen Virtual Function */
|
||||
{ PCI_VDEVICE(MELLANOX, 0x101f) }, /* ConnectX-6 LX */
|
||||
{ PCI_VDEVICE(MELLANOX, 0x1021) }, /* ConnectX-7 */
|
||||
{ PCI_VDEVICE(MELLANOX, 0xa2d2) }, /* BlueField integrated ConnectX-5 network controller */
|
||||
{ PCI_VDEVICE(MELLANOX, 0xa2d3), MLX5_PCI_DEV_IS_VF}, /* BlueField integrated ConnectX-5 network controller VF */
|
||||
{ PCI_VDEVICE(MELLANOX, 0xa2d6) }, /* BlueField-2 integrated ConnectX-6 Dx network controller */
|
||||
|
@ -1,6 +1,7 @@
|
||||
// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
|
||||
/* Copyright (c) 2019 Mellanox Technologies. */
|
||||
|
||||
#include <linux/smp.h>
|
||||
#include "dr_types.h"
|
||||
|
||||
#define QUEUE_SIZE 128
|
||||
@ -729,7 +730,7 @@ static struct mlx5dr_cq *dr_create_cq(struct mlx5_core_dev *mdev,
|
||||
if (!in)
|
||||
goto err_cqwq;
|
||||
|
||||
vector = smp_processor_id() % mlx5_comp_vectors_count(mdev);
|
||||
vector = raw_smp_processor_id() % mlx5_comp_vectors_count(mdev);
|
||||
err = mlx5_vector2eqn(mdev, vector, &eqn, &irqn);
|
||||
if (err) {
|
||||
kvfree(in);
|
||||
|
@ -352,26 +352,16 @@ static int mlx5_cmd_dr_create_fte(struct mlx5_flow_root_namespace *ns,
|
||||
if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
|
||||
list_for_each_entry(dst, &fte->node.children, node.list) {
|
||||
enum mlx5_flow_destination_type type = dst->dest_attr.type;
|
||||
u32 id;
|
||||
|
||||
if (num_actions == MLX5_FLOW_CONTEXT_ACTION_MAX) {
|
||||
err = -ENOSPC;
|
||||
goto free_actions;
|
||||
}
|
||||
|
||||
switch (type) {
|
||||
case MLX5_FLOW_DESTINATION_TYPE_COUNTER:
|
||||
id = dst->dest_attr.counter_id;
|
||||
if (type == MLX5_FLOW_DESTINATION_TYPE_COUNTER)
|
||||
continue;
|
||||
|
||||
tmp_action =
|
||||
mlx5dr_action_create_flow_counter(id);
|
||||
if (!tmp_action) {
|
||||
err = -ENOMEM;
|
||||
goto free_actions;
|
||||
}
|
||||
fs_dr_actions[fs_dr_num_actions++] = tmp_action;
|
||||
actions[num_actions++] = tmp_action;
|
||||
break;
|
||||
switch (type) {
|
||||
case MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE:
|
||||
tmp_action = create_ft_action(dev, dst);
|
||||
if (!tmp_action) {
|
||||
@ -397,6 +387,32 @@ static int mlx5_cmd_dr_create_fte(struct mlx5_flow_root_namespace *ns,
|
||||
}
|
||||
}
|
||||
|
||||
if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
|
||||
list_for_each_entry(dst, &fte->node.children, node.list) {
|
||||
u32 id;
|
||||
|
||||
if (dst->dest_attr.type !=
|
||||
MLX5_FLOW_DESTINATION_TYPE_COUNTER)
|
||||
continue;
|
||||
|
||||
if (num_actions == MLX5_FLOW_CONTEXT_ACTION_MAX) {
|
||||
err = -ENOSPC;
|
||||
goto free_actions;
|
||||
}
|
||||
|
||||
id = dst->dest_attr.counter_id;
|
||||
tmp_action =
|
||||
mlx5dr_action_create_flow_counter(id);
|
||||
if (!tmp_action) {
|
||||
err = -ENOMEM;
|
||||
goto free_actions;
|
||||
}
|
||||
|
||||
fs_dr_actions[fs_dr_num_actions++] = tmp_action;
|
||||
actions[num_actions++] = tmp_action;
|
||||
}
|
||||
}
|
||||
|
||||
params.match_sz = match_sz;
|
||||
params.match_buf = (u64 *)fte->val;
|
||||
|
||||
|
@ -8,6 +8,7 @@
|
||||
#include <linux/string.h>
|
||||
#include <linux/rhashtable.h>
|
||||
#include <linux/netdevice.h>
|
||||
#include <linux/mutex.h>
|
||||
#include <net/net_namespace.h>
|
||||
#include <net/tc_act/tc_vlan.h>
|
||||
|
||||
@ -25,6 +26,7 @@ struct mlxsw_sp_acl {
|
||||
struct mlxsw_sp_fid *dummy_fid;
|
||||
struct rhashtable ruleset_ht;
|
||||
struct list_head rules;
|
||||
struct mutex rules_lock; /* Protects rules list */
|
||||
struct {
|
||||
struct delayed_work dw;
|
||||
unsigned long interval; /* ms */
|
||||
@ -701,7 +703,9 @@ int mlxsw_sp_acl_rule_add(struct mlxsw_sp *mlxsw_sp,
|
||||
goto err_ruleset_block_bind;
|
||||
}
|
||||
|
||||
mutex_lock(&mlxsw_sp->acl->rules_lock);
|
||||
list_add_tail(&rule->list, &mlxsw_sp->acl->rules);
|
||||
mutex_unlock(&mlxsw_sp->acl->rules_lock);
|
||||
block->rule_count++;
|
||||
block->egress_blocker_rule_count += rule->rulei->egress_bind_blocker;
|
||||
return 0;
|
||||
@ -723,7 +727,9 @@ void mlxsw_sp_acl_rule_del(struct mlxsw_sp *mlxsw_sp,
|
||||
|
||||
block->egress_blocker_rule_count -= rule->rulei->egress_bind_blocker;
|
||||
ruleset->ht_key.block->rule_count--;
|
||||
mutex_lock(&mlxsw_sp->acl->rules_lock);
|
||||
list_del(&rule->list);
|
||||
mutex_unlock(&mlxsw_sp->acl->rules_lock);
|
||||
if (!ruleset->ht_key.chain_index &&
|
||||
mlxsw_sp_acl_ruleset_is_singular(ruleset))
|
||||
mlxsw_sp_acl_ruleset_block_unbind(mlxsw_sp, ruleset,
|
||||
@ -783,19 +789,18 @@ static int mlxsw_sp_acl_rules_activity_update(struct mlxsw_sp_acl *acl)
|
||||
struct mlxsw_sp_acl_rule *rule;
|
||||
int err;
|
||||
|
||||
/* Protect internal structures from changes */
|
||||
rtnl_lock();
|
||||
mutex_lock(&acl->rules_lock);
|
||||
list_for_each_entry(rule, &acl->rules, list) {
|
||||
err = mlxsw_sp_acl_rule_activity_update(acl->mlxsw_sp,
|
||||
rule);
|
||||
if (err)
|
||||
goto err_rule_update;
|
||||
}
|
||||
rtnl_unlock();
|
||||
mutex_unlock(&acl->rules_lock);
|
||||
return 0;
|
||||
|
||||
err_rule_update:
|
||||
rtnl_unlock();
|
||||
mutex_unlock(&acl->rules_lock);
|
||||
return err;
|
||||
}
|
||||
|
||||
@ -880,6 +885,7 @@ int mlxsw_sp_acl_init(struct mlxsw_sp *mlxsw_sp)
|
||||
acl->dummy_fid = fid;
|
||||
|
||||
INIT_LIST_HEAD(&acl->rules);
|
||||
mutex_init(&acl->rules_lock);
|
||||
err = mlxsw_sp_acl_tcam_init(mlxsw_sp, &acl->tcam);
|
||||
if (err)
|
||||
goto err_acl_ops_init;
|
||||
@ -892,6 +898,7 @@ int mlxsw_sp_acl_init(struct mlxsw_sp *mlxsw_sp)
|
||||
return 0;
|
||||
|
||||
err_acl_ops_init:
|
||||
mutex_destroy(&acl->rules_lock);
|
||||
mlxsw_sp_fid_put(fid);
|
||||
err_fid_get:
|
||||
rhashtable_destroy(&acl->ruleset_ht);
|
||||
@ -908,6 +915,7 @@ void mlxsw_sp_acl_fini(struct mlxsw_sp *mlxsw_sp)
|
||||
|
||||
cancel_delayed_work_sync(&mlxsw_sp->acl->rule_activity_update.dw);
|
||||
mlxsw_sp_acl_tcam_fini(mlxsw_sp, &acl->tcam);
|
||||
mutex_destroy(&acl->rules_lock);
|
||||
WARN_ON(!list_empty(&acl->rules));
|
||||
mlxsw_sp_fid_put(acl->dummy_fid);
|
||||
rhashtable_destroy(&acl->ruleset_ht);
|
||||
|
@ -64,6 +64,8 @@ static int sonic_open(struct net_device *dev)
|
||||
|
||||
netif_dbg(lp, ifup, dev, "%s: initializing sonic driver\n", __func__);
|
||||
|
||||
spin_lock_init(&lp->lock);
|
||||
|
||||
for (i = 0; i < SONIC_NUM_RRS; i++) {
|
||||
struct sk_buff *skb = netdev_alloc_skb(dev, SONIC_RBSIZE + 2);
|
||||
if (skb == NULL) {
|
||||
@ -114,6 +116,24 @@ static int sonic_open(struct net_device *dev)
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Wait for the SONIC to become idle. */
|
||||
static void sonic_quiesce(struct net_device *dev, u16 mask)
|
||||
{
|
||||
struct sonic_local * __maybe_unused lp = netdev_priv(dev);
|
||||
int i;
|
||||
u16 bits;
|
||||
|
||||
for (i = 0; i < 1000; ++i) {
|
||||
bits = SONIC_READ(SONIC_CMD) & mask;
|
||||
if (!bits)
|
||||
return;
|
||||
if (irqs_disabled() || in_interrupt())
|
||||
udelay(20);
|
||||
else
|
||||
usleep_range(100, 200);
|
||||
}
|
||||
WARN_ONCE(1, "command deadline expired! 0x%04x\n", bits);
|
||||
}
|
||||
|
||||
/*
|
||||
* Close the SONIC device
|
||||
@ -130,6 +150,9 @@ static int sonic_close(struct net_device *dev)
|
||||
/*
|
||||
* stop the SONIC, disable interrupts
|
||||
*/
|
||||
SONIC_WRITE(SONIC_CMD, SONIC_CR_RXDIS);
|
||||
sonic_quiesce(dev, SONIC_CR_ALL);
|
||||
|
||||
SONIC_WRITE(SONIC_IMR, 0);
|
||||
SONIC_WRITE(SONIC_ISR, 0x7fff);
|
||||
SONIC_WRITE(SONIC_CMD, SONIC_CR_RST);
|
||||
@ -169,6 +192,9 @@ static void sonic_tx_timeout(struct net_device *dev)
|
||||
* put the Sonic into software-reset mode and
|
||||
* disable all interrupts before releasing DMA buffers
|
||||
*/
|
||||
SONIC_WRITE(SONIC_CMD, SONIC_CR_RXDIS);
|
||||
sonic_quiesce(dev, SONIC_CR_ALL);
|
||||
|
||||
SONIC_WRITE(SONIC_IMR, 0);
|
||||
SONIC_WRITE(SONIC_ISR, 0x7fff);
|
||||
SONIC_WRITE(SONIC_CMD, SONIC_CR_RST);
|
||||
@ -206,8 +232,6 @@ static void sonic_tx_timeout(struct net_device *dev)
|
||||
* wake the tx queue
|
||||
* Concurrently with all of this, the SONIC is potentially writing to
|
||||
* the status flags of the TDs.
|
||||
* Until some mutual exclusion is added, this code will not work with SMP. However,
|
||||
* MIPS Jazz machines and m68k Macs were all uni-processor machines.
|
||||
*/
|
||||
|
||||
static int sonic_send_packet(struct sk_buff *skb, struct net_device *dev)
|
||||
@ -215,7 +239,8 @@ static int sonic_send_packet(struct sk_buff *skb, struct net_device *dev)
|
||||
struct sonic_local *lp = netdev_priv(dev);
|
||||
dma_addr_t laddr;
|
||||
int length;
|
||||
int entry = lp->next_tx;
|
||||
int entry;
|
||||
unsigned long flags;
|
||||
|
||||
netif_dbg(lp, tx_queued, dev, "%s: skb=%p\n", __func__, skb);
|
||||
|
||||
@ -237,6 +262,10 @@ static int sonic_send_packet(struct sk_buff *skb, struct net_device *dev)
|
||||
return NETDEV_TX_OK;
|
||||
}
|
||||
|
||||
spin_lock_irqsave(&lp->lock, flags);
|
||||
|
||||
entry = lp->next_tx;
|
||||
|
||||
sonic_tda_put(dev, entry, SONIC_TD_STATUS, 0); /* clear status */
|
||||
sonic_tda_put(dev, entry, SONIC_TD_FRAG_COUNT, 1); /* single fragment */
|
||||
sonic_tda_put(dev, entry, SONIC_TD_PKTSIZE, length); /* length of packet */
|
||||
@ -246,10 +275,6 @@ static int sonic_send_packet(struct sk_buff *skb, struct net_device *dev)
|
||||
sonic_tda_put(dev, entry, SONIC_TD_LINK,
|
||||
sonic_tda_get(dev, entry, SONIC_TD_LINK) | SONIC_EOL);
|
||||
|
||||
/*
|
||||
* Must set tx_skb[entry] only after clearing status, and
|
||||
* before clearing EOL and before stopping queue
|
||||
*/
|
||||
wmb();
|
||||
lp->tx_len[entry] = length;
|
||||
lp->tx_laddr[entry] = laddr;
|
||||
@ -272,6 +297,8 @@ static int sonic_send_packet(struct sk_buff *skb, struct net_device *dev)
|
||||
|
||||
SONIC_WRITE(SONIC_CMD, SONIC_CR_TXP);
|
||||
|
||||
spin_unlock_irqrestore(&lp->lock, flags);
|
||||
|
||||
return NETDEV_TX_OK;
|
||||
}
|
||||
|
||||
@ -284,15 +311,28 @@ static irqreturn_t sonic_interrupt(int irq, void *dev_id)
|
||||
struct net_device *dev = dev_id;
|
||||
struct sonic_local *lp = netdev_priv(dev);
|
||||
int status;
|
||||
unsigned long flags;
|
||||
|
||||
/* The lock has two purposes. Firstly, it synchronizes sonic_interrupt()
|
||||
* with sonic_send_packet() so that the two functions can share state.
|
||||
* Secondly, it makes sonic_interrupt() re-entrant, as that is required
|
||||
* by macsonic which must use two IRQs with different priority levels.
|
||||
*/
|
||||
spin_lock_irqsave(&lp->lock, flags);
|
||||
|
||||
status = SONIC_READ(SONIC_ISR) & SONIC_IMR_DEFAULT;
|
||||
if (!status) {
|
||||
spin_unlock_irqrestore(&lp->lock, flags);
|
||||
|
||||
if (!(status = SONIC_READ(SONIC_ISR) & SONIC_IMR_DEFAULT))
|
||||
return IRQ_NONE;
|
||||
}
|
||||
|
||||
do {
|
||||
SONIC_WRITE(SONIC_ISR, status); /* clear the interrupt(s) */
|
||||
|
||||
if (status & SONIC_INT_PKTRX) {
|
||||
netif_dbg(lp, intr, dev, "%s: packet rx\n", __func__);
|
||||
sonic_rx(dev); /* got packet(s) */
|
||||
SONIC_WRITE(SONIC_ISR, SONIC_INT_PKTRX); /* clear the interrupt */
|
||||
}
|
||||
|
||||
if (status & SONIC_INT_TXDN) {
|
||||
@ -300,11 +340,12 @@ static irqreturn_t sonic_interrupt(int irq, void *dev_id)
|
||||
int td_status;
|
||||
int freed_some = 0;
|
||||
|
||||
/* At this point, cur_tx is the index of a TD that is one of:
|
||||
* unallocated/freed (status set & tx_skb[entry] clear)
|
||||
* allocated and sent (status set & tx_skb[entry] set )
|
||||
* allocated and not yet sent (status clear & tx_skb[entry] set )
|
||||
* still being allocated by sonic_send_packet (status clear & tx_skb[entry] clear)
|
||||
/* The state of a Transmit Descriptor may be inferred
|
||||
* from { tx_skb[entry], td_status } as follows.
|
||||
* { clear, clear } => the TD has never been used
|
||||
* { set, clear } => the TD was handed to SONIC
|
||||
* { set, set } => the TD was handed back
|
||||
* { clear, set } => the TD is available for re-use
|
||||
*/
|
||||
|
||||
netif_dbg(lp, intr, dev, "%s: tx done\n", __func__);
|
||||
@ -313,18 +354,19 @@ static irqreturn_t sonic_interrupt(int irq, void *dev_id)
|
||||
if ((td_status = sonic_tda_get(dev, entry, SONIC_TD_STATUS)) == 0)
|
||||
break;
|
||||
|
||||
if (td_status & 0x0001) {
|
||||
if (td_status & SONIC_TCR_PTX) {
|
||||
lp->stats.tx_packets++;
|
||||
lp->stats.tx_bytes += sonic_tda_get(dev, entry, SONIC_TD_PKTSIZE);
|
||||
} else {
|
||||
lp->stats.tx_errors++;
|
||||
if (td_status & 0x0642)
|
||||
if (td_status & (SONIC_TCR_EXD |
|
||||
SONIC_TCR_EXC | SONIC_TCR_BCM))
|
||||
lp->stats.tx_aborted_errors++;
|
||||
if (td_status & 0x0180)
|
||||
if (td_status &
|
||||
(SONIC_TCR_NCRS | SONIC_TCR_CRLS))
|
||||
lp->stats.tx_carrier_errors++;
|
||||
if (td_status & 0x0020)
|
||||
if (td_status & SONIC_TCR_OWC)
|
||||
lp->stats.tx_window_errors++;
|
||||
if (td_status & 0x0004)
|
||||
if (td_status & SONIC_TCR_FU)
|
||||
lp->stats.tx_fifo_errors++;
|
||||
}
|
||||
|
||||
@ -346,7 +388,6 @@ static irqreturn_t sonic_interrupt(int irq, void *dev_id)
|
||||
if (freed_some || lp->tx_skb[entry] == NULL)
|
||||
netif_wake_queue(dev); /* The ring is no longer full */
|
||||
lp->cur_tx = entry;
|
||||
SONIC_WRITE(SONIC_ISR, SONIC_INT_TXDN); /* clear the interrupt */
|
||||
}
|
||||
|
||||
/*
|
||||
@ -355,42 +396,37 @@ static irqreturn_t sonic_interrupt(int irq, void *dev_id)
|
||||
if (status & SONIC_INT_RFO) {
|
||||
netif_dbg(lp, rx_err, dev, "%s: rx fifo overrun\n",
|
||||
__func__);
|
||||
lp->stats.rx_fifo_errors++;
|
||||
SONIC_WRITE(SONIC_ISR, SONIC_INT_RFO); /* clear the interrupt */
|
||||
}
|
||||
if (status & SONIC_INT_RDE) {
|
||||
netif_dbg(lp, rx_err, dev, "%s: rx descriptors exhausted\n",
|
||||
__func__);
|
||||
lp->stats.rx_dropped++;
|
||||
SONIC_WRITE(SONIC_ISR, SONIC_INT_RDE); /* clear the interrupt */
|
||||
}
|
||||
if (status & SONIC_INT_RBAE) {
|
||||
netif_dbg(lp, rx_err, dev, "%s: rx buffer area exceeded\n",
|
||||
__func__);
|
||||
lp->stats.rx_dropped++;
|
||||
SONIC_WRITE(SONIC_ISR, SONIC_INT_RBAE); /* clear the interrupt */
|
||||
}
|
||||
|
||||
/* counter overruns; all counters are 16bit wide */
|
||||
if (status & SONIC_INT_FAE) {
|
||||
if (status & SONIC_INT_FAE)
|
||||
lp->stats.rx_frame_errors += 65536;
|
||||
SONIC_WRITE(SONIC_ISR, SONIC_INT_FAE); /* clear the interrupt */
|
||||
}
|
||||
if (status & SONIC_INT_CRC) {
|
||||
if (status & SONIC_INT_CRC)
|
||||
lp->stats.rx_crc_errors += 65536;
|
||||
SONIC_WRITE(SONIC_ISR, SONIC_INT_CRC); /* clear the interrupt */
|
||||
}
|
||||
if (status & SONIC_INT_MP) {
|
||||
if (status & SONIC_INT_MP)
|
||||
lp->stats.rx_missed_errors += 65536;
|
||||
SONIC_WRITE(SONIC_ISR, SONIC_INT_MP); /* clear the interrupt */
|
||||
}
|
||||
|
||||
/* transmit error */
|
||||
if (status & SONIC_INT_TXER) {
|
||||
if (SONIC_READ(SONIC_TCR) & SONIC_TCR_FU)
|
||||
netif_dbg(lp, tx_err, dev, "%s: tx fifo underrun\n",
|
||||
__func__);
|
||||
SONIC_WRITE(SONIC_ISR, SONIC_INT_TXER); /* clear the interrupt */
|
||||
u16 tcr = SONIC_READ(SONIC_TCR);
|
||||
|
||||
netif_dbg(lp, tx_err, dev, "%s: TXER intr, TCR %04x\n",
|
||||
__func__, tcr);
|
||||
|
||||
if (tcr & (SONIC_TCR_EXD | SONIC_TCR_EXC |
|
||||
SONIC_TCR_FU | SONIC_TCR_BCM)) {
|
||||
/* Aborted transmission. Try again. */
|
||||
netif_stop_queue(dev);
|
||||
SONIC_WRITE(SONIC_CMD, SONIC_CR_TXP);
|
||||
}
|
||||
}
|
||||
|
||||
/* bus retry */
|
||||
@ -400,107 +436,164 @@ static irqreturn_t sonic_interrupt(int irq, void *dev_id)
|
||||
/* ... to help debug DMA problems causing endless interrupts. */
|
||||
/* Bounce the eth interface to turn on the interrupt again. */
|
||||
SONIC_WRITE(SONIC_IMR, 0);
|
||||
SONIC_WRITE(SONIC_ISR, SONIC_INT_BR); /* clear the interrupt */
|
||||
}
|
||||
|
||||
/* load CAM done */
|
||||
if (status & SONIC_INT_LCD)
|
||||
SONIC_WRITE(SONIC_ISR, SONIC_INT_LCD); /* clear the interrupt */
|
||||
} while((status = SONIC_READ(SONIC_ISR) & SONIC_IMR_DEFAULT));
|
||||
status = SONIC_READ(SONIC_ISR) & SONIC_IMR_DEFAULT;
|
||||
} while (status);
|
||||
|
||||
spin_unlock_irqrestore(&lp->lock, flags);
|
||||
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
/* Return the array index corresponding to a given Receive Buffer pointer. */
|
||||
static int index_from_addr(struct sonic_local *lp, dma_addr_t addr,
|
||||
unsigned int last)
|
||||
{
|
||||
unsigned int i = last;
|
||||
|
||||
do {
|
||||
i = (i + 1) & SONIC_RRS_MASK;
|
||||
if (addr == lp->rx_laddr[i])
|
||||
return i;
|
||||
} while (i != last);
|
||||
|
||||
return -ENOENT;
|
||||
}
|
||||
|
||||
/* Allocate and map a new skb to be used as a receive buffer. */
|
||||
static bool sonic_alloc_rb(struct net_device *dev, struct sonic_local *lp,
|
||||
struct sk_buff **new_skb, dma_addr_t *new_addr)
|
||||
{
|
||||
*new_skb = netdev_alloc_skb(dev, SONIC_RBSIZE + 2);
|
||||
if (!*new_skb)
|
||||
return false;
|
||||
|
||||
if (SONIC_BUS_SCALE(lp->dma_bitmode) == 2)
|
||||
skb_reserve(*new_skb, 2);
|
||||
|
||||
*new_addr = dma_map_single(lp->device, skb_put(*new_skb, SONIC_RBSIZE),
|
||||
SONIC_RBSIZE, DMA_FROM_DEVICE);
|
||||
if (!*new_addr) {
|
||||
dev_kfree_skb(*new_skb);
|
||||
*new_skb = NULL;
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
/* Place a new receive resource in the Receive Resource Area and update RWP. */
|
||||
static void sonic_update_rra(struct net_device *dev, struct sonic_local *lp,
|
||||
dma_addr_t old_addr, dma_addr_t new_addr)
|
||||
{
|
||||
unsigned int entry = sonic_rr_entry(dev, SONIC_READ(SONIC_RWP));
|
||||
unsigned int end = sonic_rr_entry(dev, SONIC_READ(SONIC_RRP));
|
||||
u32 buf;
|
||||
|
||||
/* The resources in the range [RRP, RWP) belong to the SONIC. This loop
|
||||
* scans the other resources in the RRA, those in the range [RWP, RRP).
|
||||
*/
|
||||
do {
|
||||
buf = (sonic_rra_get(dev, entry, SONIC_RR_BUFADR_H) << 16) |
|
||||
sonic_rra_get(dev, entry, SONIC_RR_BUFADR_L);
|
||||
|
||||
if (buf == old_addr)
|
||||
break;
|
||||
|
||||
entry = (entry + 1) & SONIC_RRS_MASK;
|
||||
} while (entry != end);
|
||||
|
||||
WARN_ONCE(buf != old_addr, "failed to find resource!\n");
|
||||
|
||||
sonic_rra_put(dev, entry, SONIC_RR_BUFADR_H, new_addr >> 16);
|
||||
sonic_rra_put(dev, entry, SONIC_RR_BUFADR_L, new_addr & 0xffff);
|
||||
|
||||
entry = (entry + 1) & SONIC_RRS_MASK;
|
||||
|
||||
SONIC_WRITE(SONIC_RWP, sonic_rr_addr(dev, entry));
|
||||
}
|
||||
|
||||
/*
|
||||
* We have a good packet(s), pass it/them up the network stack.
|
||||
*/
|
||||
static void sonic_rx(struct net_device *dev)
|
||||
{
|
||||
struct sonic_local *lp = netdev_priv(dev);
|
||||
int status;
|
||||
int entry = lp->cur_rx;
|
||||
int prev_entry = lp->eol_rx;
|
||||
bool rbe = false;
|
||||
|
||||
while (sonic_rda_get(dev, entry, SONIC_RD_IN_USE) == 0) {
|
||||
struct sk_buff *used_skb;
|
||||
struct sk_buff *new_skb;
|
||||
dma_addr_t new_laddr;
|
||||
u16 bufadr_l;
|
||||
u16 bufadr_h;
|
||||
int pkt_len;
|
||||
u16 status = sonic_rda_get(dev, entry, SONIC_RD_STATUS);
|
||||
|
||||
status = sonic_rda_get(dev, entry, SONIC_RD_STATUS);
|
||||
if (status & SONIC_RCR_PRX) {
|
||||
/* Malloc up new buffer. */
|
||||
new_skb = netdev_alloc_skb(dev, SONIC_RBSIZE + 2);
|
||||
if (new_skb == NULL) {
|
||||
lp->stats.rx_dropped++;
|
||||
break;
|
||||
}
|
||||
/* provide 16 byte IP header alignment unless DMA requires otherwise */
|
||||
if(SONIC_BUS_SCALE(lp->dma_bitmode) == 2)
|
||||
skb_reserve(new_skb, 2);
|
||||
/* If the RD has LPKT set, the chip has finished with the RB */
|
||||
if ((status & SONIC_RCR_PRX) && (status & SONIC_RCR_LPKT)) {
|
||||
struct sk_buff *new_skb;
|
||||
dma_addr_t new_laddr;
|
||||
u32 addr = (sonic_rda_get(dev, entry,
|
||||
SONIC_RD_PKTPTR_H) << 16) |
|
||||
sonic_rda_get(dev, entry, SONIC_RD_PKTPTR_L);
|
||||
int i = index_from_addr(lp, addr, entry);
|
||||
|
||||
new_laddr = dma_map_single(lp->device, skb_put(new_skb, SONIC_RBSIZE),
|
||||
SONIC_RBSIZE, DMA_FROM_DEVICE);
|
||||
if (!new_laddr) {
|
||||
dev_kfree_skb(new_skb);
|
||||
printk(KERN_ERR "%s: Failed to map rx buffer, dropping packet.\n", dev->name);
|
||||
lp->stats.rx_dropped++;
|
||||
if (i < 0) {
|
||||
WARN_ONCE(1, "failed to find buffer!\n");
|
||||
break;
|
||||
}
|
||||
|
||||
/* now we have a new skb to replace it, pass the used one up the stack */
|
||||
dma_unmap_single(lp->device, lp->rx_laddr[entry], SONIC_RBSIZE, DMA_FROM_DEVICE);
|
||||
used_skb = lp->rx_skb[entry];
|
||||
pkt_len = sonic_rda_get(dev, entry, SONIC_RD_PKTLEN);
|
||||
skb_trim(used_skb, pkt_len);
|
||||
used_skb->protocol = eth_type_trans(used_skb, dev);
|
||||
netif_rx(used_skb);
|
||||
lp->stats.rx_packets++;
|
||||
lp->stats.rx_bytes += pkt_len;
|
||||
if (sonic_alloc_rb(dev, lp, &new_skb, &new_laddr)) {
|
||||
struct sk_buff *used_skb = lp->rx_skb[i];
|
||||
int pkt_len;
|
||||
|
||||
/* and insert the new skb */
|
||||
lp->rx_laddr[entry] = new_laddr;
|
||||
lp->rx_skb[entry] = new_skb;
|
||||
/* Pass the used buffer up the stack */
|
||||
dma_unmap_single(lp->device, addr, SONIC_RBSIZE,
|
||||
DMA_FROM_DEVICE);
|
||||
|
||||
bufadr_l = (unsigned long)new_laddr & 0xffff;
|
||||
bufadr_h = (unsigned long)new_laddr >> 16;
|
||||
sonic_rra_put(dev, entry, SONIC_RR_BUFADR_L, bufadr_l);
|
||||
sonic_rra_put(dev, entry, SONIC_RR_BUFADR_H, bufadr_h);
|
||||
} else {
|
||||
/* This should only happen, if we enable accepting broken packets. */
|
||||
lp->stats.rx_errors++;
|
||||
if (status & SONIC_RCR_FAER)
|
||||
lp->stats.rx_frame_errors++;
|
||||
if (status & SONIC_RCR_CRCR)
|
||||
lp->stats.rx_crc_errors++;
|
||||
}
|
||||
if (status & SONIC_RCR_LPKT) {
|
||||
/*
|
||||
* this was the last packet out of the current receive buffer
|
||||
* give the buffer back to the SONIC
|
||||
pkt_len = sonic_rda_get(dev, entry,
|
||||
SONIC_RD_PKTLEN);
|
||||
skb_trim(used_skb, pkt_len);
|
||||
used_skb->protocol = eth_type_trans(used_skb,
|
||||
dev);
|
||||
netif_rx(used_skb);
|
||||
lp->stats.rx_packets++;
|
||||
lp->stats.rx_bytes += pkt_len;
|
||||
|
||||
lp->rx_skb[i] = new_skb;
|
||||
lp->rx_laddr[i] = new_laddr;
|
||||
} else {
|
||||
/* Failed to obtain a new buffer so re-use it */
|
||||
new_laddr = addr;
|
||||
lp->stats.rx_dropped++;
|
||||
}
|
||||
/* If RBE is already asserted when RWP advances then
|
||||
* it's safe to clear RBE after processing this packet.
|
||||
*/
|
||||
lp->cur_rwp += SIZEOF_SONIC_RR * SONIC_BUS_SCALE(lp->dma_bitmode);
|
||||
if (lp->cur_rwp >= lp->rra_end) lp->cur_rwp = lp->rra_laddr & 0xffff;
|
||||
SONIC_WRITE(SONIC_RWP, lp->cur_rwp);
|
||||
if (SONIC_READ(SONIC_ISR) & SONIC_INT_RBE) {
|
||||
netif_dbg(lp, rx_err, dev, "%s: rx buffer exhausted\n",
|
||||
__func__);
|
||||
SONIC_WRITE(SONIC_ISR, SONIC_INT_RBE); /* clear the flag */
|
||||
}
|
||||
} else
|
||||
printk(KERN_ERR "%s: rx desc without RCR_LPKT. Shouldn't happen !?\n",
|
||||
dev->name);
|
||||
rbe = rbe || SONIC_READ(SONIC_ISR) & SONIC_INT_RBE;
|
||||
sonic_update_rra(dev, lp, addr, new_laddr);
|
||||
}
|
||||
/*
|
||||
* give back the descriptor
|
||||
*/
|
||||
sonic_rda_put(dev, entry, SONIC_RD_LINK,
|
||||
sonic_rda_get(dev, entry, SONIC_RD_LINK) | SONIC_EOL);
|
||||
sonic_rda_put(dev, entry, SONIC_RD_STATUS, 0);
|
||||
sonic_rda_put(dev, entry, SONIC_RD_IN_USE, 1);
|
||||
sonic_rda_put(dev, lp->eol_rx, SONIC_RD_LINK,
|
||||
sonic_rda_get(dev, lp->eol_rx, SONIC_RD_LINK) & ~SONIC_EOL);
|
||||
lp->eol_rx = entry;
|
||||
lp->cur_rx = entry = (entry + 1) & SONIC_RDS_MASK;
|
||||
|
||||
prev_entry = entry;
|
||||
entry = (entry + 1) & SONIC_RDS_MASK;
|
||||
}
|
||||
|
||||
lp->cur_rx = entry;
|
||||
|
||||
if (prev_entry != lp->eol_rx) {
|
||||
/* Advance the EOL flag to put descriptors back into service */
|
||||
sonic_rda_put(dev, prev_entry, SONIC_RD_LINK, SONIC_EOL |
|
||||
sonic_rda_get(dev, prev_entry, SONIC_RD_LINK));
|
||||
sonic_rda_put(dev, lp->eol_rx, SONIC_RD_LINK, ~SONIC_EOL &
|
||||
sonic_rda_get(dev, lp->eol_rx, SONIC_RD_LINK));
|
||||
lp->eol_rx = prev_entry;
|
||||
}
|
||||
|
||||
if (rbe)
|
||||
SONIC_WRITE(SONIC_ISR, SONIC_INT_RBE);
|
||||
/*
|
||||
* If any worth-while packets have been received, netif_rx()
|
||||
* has done a mark_bh(NET_BH) for us and will work on them
|
||||
@ -550,6 +643,8 @@ static void sonic_multicast_list(struct net_device *dev)
|
||||
(netdev_mc_count(dev) > 15)) {
|
||||
rcr |= SONIC_RCR_AMC;
|
||||
} else {
|
||||
unsigned long flags;
|
||||
|
||||
netif_dbg(lp, ifup, dev, "%s: mc_count %d\n", __func__,
|
||||
netdev_mc_count(dev));
|
||||
sonic_set_cam_enable(dev, 1); /* always enable our own address */
|
||||
@ -563,9 +658,14 @@ static void sonic_multicast_list(struct net_device *dev)
|
||||
i++;
|
||||
}
|
||||
SONIC_WRITE(SONIC_CDC, 16);
|
||||
/* issue Load CAM command */
|
||||
SONIC_WRITE(SONIC_CDP, lp->cda_laddr & 0xffff);
|
||||
|
||||
/* LCAM and TXP commands can't be used simultaneously */
|
||||
spin_lock_irqsave(&lp->lock, flags);
|
||||
sonic_quiesce(dev, SONIC_CR_TXP);
|
||||
SONIC_WRITE(SONIC_CMD, SONIC_CR_LCAM);
|
||||
sonic_quiesce(dev, SONIC_CR_LCAM);
|
||||
spin_unlock_irqrestore(&lp->lock, flags);
|
||||
}
|
||||
}
|
||||
|
||||
@ -580,7 +680,6 @@ static void sonic_multicast_list(struct net_device *dev)
|
||||
*/
|
||||
static int sonic_init(struct net_device *dev)
|
||||
{
|
||||
unsigned int cmd;
|
||||
struct sonic_local *lp = netdev_priv(dev);
|
||||
int i;
|
||||
|
||||
@ -592,12 +691,16 @@ static int sonic_init(struct net_device *dev)
|
||||
SONIC_WRITE(SONIC_ISR, 0x7fff);
|
||||
SONIC_WRITE(SONIC_CMD, SONIC_CR_RST);
|
||||
|
||||
/* While in reset mode, clear CAM Enable register */
|
||||
SONIC_WRITE(SONIC_CE, 0);
|
||||
|
||||
/*
|
||||
* clear software reset flag, disable receiver, clear and
|
||||
* enable interrupts, then completely initialize the SONIC
|
||||
*/
|
||||
SONIC_WRITE(SONIC_CMD, 0);
|
||||
SONIC_WRITE(SONIC_CMD, SONIC_CR_RXDIS);
|
||||
SONIC_WRITE(SONIC_CMD, SONIC_CR_RXDIS | SONIC_CR_STP);
|
||||
sonic_quiesce(dev, SONIC_CR_ALL);
|
||||
|
||||
/*
|
||||
* initialize the receive resource area
|
||||
@ -615,15 +718,10 @@ static int sonic_init(struct net_device *dev)
|
||||
}
|
||||
|
||||
/* initialize all RRA registers */
|
||||
lp->rra_end = (lp->rra_laddr + SONIC_NUM_RRS * SIZEOF_SONIC_RR *
|
||||
SONIC_BUS_SCALE(lp->dma_bitmode)) & 0xffff;
|
||||
lp->cur_rwp = (lp->rra_laddr + (SONIC_NUM_RRS - 1) * SIZEOF_SONIC_RR *
|
||||
SONIC_BUS_SCALE(lp->dma_bitmode)) & 0xffff;
|
||||
|
||||
SONIC_WRITE(SONIC_RSA, lp->rra_laddr & 0xffff);
|
||||
SONIC_WRITE(SONIC_REA, lp->rra_end);
|
||||
SONIC_WRITE(SONIC_RRP, lp->rra_laddr & 0xffff);
|
||||
SONIC_WRITE(SONIC_RWP, lp->cur_rwp);
|
||||
SONIC_WRITE(SONIC_RSA, sonic_rr_addr(dev, 0));
|
||||
SONIC_WRITE(SONIC_REA, sonic_rr_addr(dev, SONIC_NUM_RRS));
|
||||
SONIC_WRITE(SONIC_RRP, sonic_rr_addr(dev, 0));
|
||||
SONIC_WRITE(SONIC_RWP, sonic_rr_addr(dev, SONIC_NUM_RRS - 1));
|
||||
SONIC_WRITE(SONIC_URRA, lp->rra_laddr >> 16);
|
||||
SONIC_WRITE(SONIC_EOBC, (SONIC_RBSIZE >> 1) - (lp->dma_bitmode ? 2 : 1));
|
||||
|
||||
@ -631,14 +729,7 @@ static int sonic_init(struct net_device *dev)
|
||||
netif_dbg(lp, ifup, dev, "%s: issuing RRRA command\n", __func__);
|
||||
|
||||
SONIC_WRITE(SONIC_CMD, SONIC_CR_RRRA);
|
||||
i = 0;
|
||||
while (i++ < 100) {
|
||||
if (SONIC_READ(SONIC_CMD) & SONIC_CR_RRRA)
|
||||
break;
|
||||
}
|
||||
|
||||
netif_dbg(lp, ifup, dev, "%s: status=%x, i=%d\n", __func__,
|
||||
SONIC_READ(SONIC_CMD), i);
|
||||
sonic_quiesce(dev, SONIC_CR_RRRA);
|
||||
|
||||
/*
|
||||
* Initialize the receive descriptors so that they
|
||||
@ -713,28 +804,17 @@ static int sonic_init(struct net_device *dev)
|
||||
* load the CAM
|
||||
*/
|
||||
SONIC_WRITE(SONIC_CMD, SONIC_CR_LCAM);
|
||||
|
||||
i = 0;
|
||||
while (i++ < 100) {
|
||||
if (SONIC_READ(SONIC_ISR) & SONIC_INT_LCD)
|
||||
break;
|
||||
}
|
||||
netif_dbg(lp, ifup, dev, "%s: CMD=%x, ISR=%x, i=%d\n", __func__,
|
||||
SONIC_READ(SONIC_CMD), SONIC_READ(SONIC_ISR), i);
|
||||
sonic_quiesce(dev, SONIC_CR_LCAM);
|
||||
|
||||
/*
|
||||
* enable receiver, disable loopback
|
||||
* and enable all interrupts
|
||||
*/
|
||||
SONIC_WRITE(SONIC_CMD, SONIC_CR_RXEN | SONIC_CR_STP);
|
||||
SONIC_WRITE(SONIC_RCR, SONIC_RCR_DEFAULT);
|
||||
SONIC_WRITE(SONIC_TCR, SONIC_TCR_DEFAULT);
|
||||
SONIC_WRITE(SONIC_ISR, 0x7fff);
|
||||
SONIC_WRITE(SONIC_IMR, SONIC_IMR_DEFAULT);
|
||||
|
||||
cmd = SONIC_READ(SONIC_CMD);
|
||||
if ((cmd & SONIC_CR_RXEN) == 0 || (cmd & SONIC_CR_STP) == 0)
|
||||
printk(KERN_ERR "sonic_init: failed, status=%x\n", cmd);
|
||||
SONIC_WRITE(SONIC_CMD, SONIC_CR_RXEN);
|
||||
|
||||
netif_dbg(lp, ifup, dev, "%s: new status=%x\n", __func__,
|
||||
SONIC_READ(SONIC_CMD));
|
||||
|
@ -110,6 +110,9 @@
|
||||
#define SONIC_CR_TXP 0x0002
|
||||
#define SONIC_CR_HTX 0x0001
|
||||
|
||||
#define SONIC_CR_ALL (SONIC_CR_LCAM | SONIC_CR_RRRA | \
|
||||
SONIC_CR_RXEN | SONIC_CR_TXP)
|
||||
|
||||
/*
|
||||
* SONIC data configuration bits
|
||||
*/
|
||||
@ -175,6 +178,7 @@
|
||||
#define SONIC_TCR_NCRS 0x0100
|
||||
#define SONIC_TCR_CRLS 0x0080
|
||||
#define SONIC_TCR_EXC 0x0040
|
||||
#define SONIC_TCR_OWC 0x0020
|
||||
#define SONIC_TCR_PMB 0x0008
|
||||
#define SONIC_TCR_FU 0x0004
|
||||
#define SONIC_TCR_BCM 0x0002
|
||||
@ -274,8 +278,9 @@
|
||||
#define SONIC_NUM_RDS SONIC_NUM_RRS /* number of receive descriptors */
|
||||
#define SONIC_NUM_TDS 16 /* number of transmit descriptors */
|
||||
|
||||
#define SONIC_RDS_MASK (SONIC_NUM_RDS-1)
|
||||
#define SONIC_TDS_MASK (SONIC_NUM_TDS-1)
|
||||
#define SONIC_RRS_MASK (SONIC_NUM_RRS - 1)
|
||||
#define SONIC_RDS_MASK (SONIC_NUM_RDS - 1)
|
||||
#define SONIC_TDS_MASK (SONIC_NUM_TDS - 1)
|
||||
|
||||
#define SONIC_RBSIZE 1520 /* size of one resource buffer */
|
||||
|
||||
@ -312,8 +317,6 @@ struct sonic_local {
|
||||
u32 rda_laddr; /* logical DMA address of RDA */
|
||||
dma_addr_t rx_laddr[SONIC_NUM_RRS]; /* logical DMA addresses of rx skbuffs */
|
||||
dma_addr_t tx_laddr[SONIC_NUM_TDS]; /* logical DMA addresses of tx skbuffs */
|
||||
unsigned int rra_end;
|
||||
unsigned int cur_rwp;
|
||||
unsigned int cur_rx;
|
||||
unsigned int cur_tx; /* first unacked transmit packet */
|
||||
unsigned int eol_rx;
|
||||
@ -322,6 +325,7 @@ struct sonic_local {
|
||||
int msg_enable;
|
||||
struct device *device; /* generic device */
|
||||
struct net_device_stats stats;
|
||||
spinlock_t lock;
|
||||
};
|
||||
|
||||
#define TX_TIMEOUT (3 * HZ)
|
||||
@ -344,30 +348,30 @@ static void sonic_msg_init(struct net_device *dev);
|
||||
as far as we can tell. */
|
||||
/* OpenBSD calls this "SWO". I'd like to think that sonic_buf_put()
|
||||
is a much better name. */
|
||||
static inline void sonic_buf_put(void* base, int bitmode,
|
||||
static inline void sonic_buf_put(u16 *base, int bitmode,
|
||||
int offset, __u16 val)
|
||||
{
|
||||
if (bitmode)
|
||||
#ifdef __BIG_ENDIAN
|
||||
((__u16 *) base + (offset*2))[1] = val;
|
||||
__raw_writew(val, base + (offset * 2) + 1);
|
||||
#else
|
||||
((__u16 *) base + (offset*2))[0] = val;
|
||||
__raw_writew(val, base + (offset * 2) + 0);
|
||||
#endif
|
||||
else
|
||||
((__u16 *) base)[offset] = val;
|
||||
__raw_writew(val, base + (offset * 1) + 0);
|
||||
}
|
||||
|
||||
static inline __u16 sonic_buf_get(void* base, int bitmode,
|
||||
static inline __u16 sonic_buf_get(u16 *base, int bitmode,
|
||||
int offset)
|
||||
{
|
||||
if (bitmode)
|
||||
#ifdef __BIG_ENDIAN
|
||||
return ((volatile __u16 *) base + (offset*2))[1];
|
||||
return __raw_readw(base + (offset * 2) + 1);
|
||||
#else
|
||||
return ((volatile __u16 *) base + (offset*2))[0];
|
||||
return __raw_readw(base + (offset * 2) + 0);
|
||||
#endif
|
||||
else
|
||||
return ((volatile __u16 *) base)[offset];
|
||||
return __raw_readw(base + (offset * 1) + 0);
|
||||
}
|
||||
|
||||
/* Inlines that you should actually use for reading/writing DMA buffers */
|
||||
@ -447,6 +451,22 @@ static inline __u16 sonic_rra_get(struct net_device* dev, int entry,
|
||||
(entry * SIZEOF_SONIC_RR) + offset);
|
||||
}
|
||||
|
||||
static inline u16 sonic_rr_addr(struct net_device *dev, int entry)
|
||||
{
|
||||
struct sonic_local *lp = netdev_priv(dev);
|
||||
|
||||
return lp->rra_laddr +
|
||||
entry * SIZEOF_SONIC_RR * SONIC_BUS_SCALE(lp->dma_bitmode);
|
||||
}
|
||||
|
||||
static inline u16 sonic_rr_entry(struct net_device *dev, u16 addr)
|
||||
{
|
||||
struct sonic_local *lp = netdev_priv(dev);
|
||||
|
||||
return (addr - (u16)lp->rra_laddr) / (SIZEOF_SONIC_RR *
|
||||
SONIC_BUS_SCALE(lp->dma_bitmode));
|
||||
}
|
||||
|
||||
static const char version[] =
|
||||
"sonic.c:v0.92 20.9.98 tsbogend@alpha.franken.de\n";
|
||||
|
||||
|
@ -2043,6 +2043,7 @@ static void qlcnic_83xx_exec_template_cmd(struct qlcnic_adapter *p_dev,
|
||||
break;
|
||||
}
|
||||
entry += p_hdr->size;
|
||||
cond_resched();
|
||||
}
|
||||
p_dev->ahw->reset.seq_index = index;
|
||||
}
|
||||
|
@ -703,6 +703,7 @@ static u32 qlcnic_read_memory_test_agent(struct qlcnic_adapter *adapter,
|
||||
addr += 16;
|
||||
reg_read -= 16;
|
||||
ret += 16;
|
||||
cond_resched();
|
||||
}
|
||||
out:
|
||||
mutex_unlock(&adapter->ahw->mem_lock);
|
||||
@ -1383,6 +1384,7 @@ int qlcnic_dump_fw(struct qlcnic_adapter *adapter)
|
||||
buf_offset += entry->hdr.cap_size;
|
||||
entry_offset += entry->hdr.offset;
|
||||
buffer = fw_dump->data + buf_offset;
|
||||
cond_resched();
|
||||
}
|
||||
|
||||
fw_dump->clr = 1;
|
||||
|
@ -412,9 +412,9 @@ stmmac_probe_config_dt(struct platform_device *pdev, const char **mac)
|
||||
*mac = NULL;
|
||||
}
|
||||
|
||||
rc = of_get_phy_mode(np, &plat->phy_interface);
|
||||
if (rc)
|
||||
return ERR_PTR(rc);
|
||||
plat->phy_interface = device_get_phy_mode(&pdev->dev);
|
||||
if (plat->phy_interface < 0)
|
||||
return ERR_PTR(plat->phy_interface);
|
||||
|
||||
plat->interface = stmmac_of_get_mac_mode(np);
|
||||
if (plat->interface < 0)
|
||||
|
@ -804,19 +804,21 @@ static struct sock *gtp_encap_enable_socket(int fd, int type,
|
||||
return NULL;
|
||||
}
|
||||
|
||||
if (sock->sk->sk_protocol != IPPROTO_UDP) {
|
||||
sk = sock->sk;
|
||||
if (sk->sk_protocol != IPPROTO_UDP ||
|
||||
sk->sk_type != SOCK_DGRAM ||
|
||||
(sk->sk_family != AF_INET && sk->sk_family != AF_INET6)) {
|
||||
pr_debug("socket fd=%d not UDP\n", fd);
|
||||
sk = ERR_PTR(-EINVAL);
|
||||
goto out_sock;
|
||||
}
|
||||
|
||||
lock_sock(sock->sk);
|
||||
if (sock->sk->sk_user_data) {
|
||||
lock_sock(sk);
|
||||
if (sk->sk_user_data) {
|
||||
sk = ERR_PTR(-EBUSY);
|
||||
goto out_rel_sock;
|
||||
}
|
||||
|
||||
sk = sock->sk;
|
||||
sock_hold(sk);
|
||||
|
||||
tuncfg.sk_user_data = gtp;
|
||||
|
@ -452,9 +452,16 @@ static void slip_transmit(struct work_struct *work)
|
||||
*/
|
||||
static void slip_write_wakeup(struct tty_struct *tty)
|
||||
{
|
||||
struct slip *sl = tty->disc_data;
|
||||
struct slip *sl;
|
||||
|
||||
rcu_read_lock();
|
||||
sl = rcu_dereference(tty->disc_data);
|
||||
if (!sl)
|
||||
goto out;
|
||||
|
||||
schedule_work(&sl->tx_work);
|
||||
out:
|
||||
rcu_read_unlock();
|
||||
}
|
||||
|
||||
static void sl_tx_timeout(struct net_device *dev)
|
||||
@ -882,10 +889,11 @@ static void slip_close(struct tty_struct *tty)
|
||||
return;
|
||||
|
||||
spin_lock_bh(&sl->lock);
|
||||
tty->disc_data = NULL;
|
||||
rcu_assign_pointer(tty->disc_data, NULL);
|
||||
sl->tty = NULL;
|
||||
spin_unlock_bh(&sl->lock);
|
||||
|
||||
synchronize_rcu();
|
||||
flush_work(&sl->tx_work);
|
||||
|
||||
/* VSV = very important to remove timers */
|
||||
|
@ -1936,6 +1936,10 @@ drop:
|
||||
if (ret != XDP_PASS) {
|
||||
rcu_read_unlock();
|
||||
local_bh_enable();
|
||||
if (frags) {
|
||||
tfile->napi.skb = NULL;
|
||||
mutex_unlock(&tfile->napi_mutex);
|
||||
}
|
||||
return total_len;
|
||||
}
|
||||
}
|
||||
|
@ -20,6 +20,7 @@
|
||||
#include <linux/mdio.h>
|
||||
#include <linux/phy.h>
|
||||
#include <net/ip6_checksum.h>
|
||||
#include <net/vxlan.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/irqdomain.h>
|
||||
#include <linux/irq.h>
|
||||
@ -3668,6 +3669,19 @@ static void lan78xx_tx_timeout(struct net_device *net)
|
||||
tasklet_schedule(&dev->bh);
|
||||
}
|
||||
|
||||
static netdev_features_t lan78xx_features_check(struct sk_buff *skb,
|
||||
struct net_device *netdev,
|
||||
netdev_features_t features)
|
||||
{
|
||||
if (skb->len + TX_OVERHEAD > MAX_SINGLE_PACKET_SIZE)
|
||||
features &= ~NETIF_F_GSO_MASK;
|
||||
|
||||
features = vlan_features_check(skb, features);
|
||||
features = vxlan_features_check(skb, features);
|
||||
|
||||
return features;
|
||||
}
|
||||
|
||||
static const struct net_device_ops lan78xx_netdev_ops = {
|
||||
.ndo_open = lan78xx_open,
|
||||
.ndo_stop = lan78xx_stop,
|
||||
@ -3681,6 +3695,7 @@ static const struct net_device_ops lan78xx_netdev_ops = {
|
||||
.ndo_set_features = lan78xx_set_features,
|
||||
.ndo_vlan_rx_add_vid = lan78xx_vlan_rx_add_vid,
|
||||
.ndo_vlan_rx_kill_vid = lan78xx_vlan_rx_kill_vid,
|
||||
.ndo_features_check = lan78xx_features_check,
|
||||
};
|
||||
|
||||
static void lan78xx_stat_monitor(struct timer_list *t)
|
||||
|
@ -31,7 +31,7 @@
|
||||
#define NETNEXT_VERSION "11"
|
||||
|
||||
/* Information for net */
|
||||
#define NET_VERSION "10"
|
||||
#define NET_VERSION "11"
|
||||
|
||||
#define DRIVER_VERSION "v1." NETNEXT_VERSION "." NET_VERSION
|
||||
#define DRIVER_AUTHOR "Realtek linux nic maintainers <nic_swsd@realtek.com>"
|
||||
@ -68,6 +68,7 @@
|
||||
#define PLA_LED_FEATURE 0xdd92
|
||||
#define PLA_PHYAR 0xde00
|
||||
#define PLA_BOOT_CTRL 0xe004
|
||||
#define PLA_LWAKE_CTRL_REG 0xe007
|
||||
#define PLA_GPHY_INTR_IMR 0xe022
|
||||
#define PLA_EEE_CR 0xe040
|
||||
#define PLA_EEEP_CR 0xe080
|
||||
@ -95,6 +96,7 @@
|
||||
#define PLA_TALLYCNT 0xe890
|
||||
#define PLA_SFF_STS_7 0xe8de
|
||||
#define PLA_PHYSTATUS 0xe908
|
||||
#define PLA_CONFIG6 0xe90a /* CONFIG6 */
|
||||
#define PLA_BP_BA 0xfc26
|
||||
#define PLA_BP_0 0xfc28
|
||||
#define PLA_BP_1 0xfc2a
|
||||
@ -107,6 +109,7 @@
|
||||
#define PLA_BP_EN 0xfc38
|
||||
|
||||
#define USB_USB2PHY 0xb41e
|
||||
#define USB_SSPHYLINK1 0xb426
|
||||
#define USB_SSPHYLINK2 0xb428
|
||||
#define USB_U2P3_CTRL 0xb460
|
||||
#define USB_CSR_DUMMY1 0xb464
|
||||
@ -300,6 +303,9 @@
|
||||
#define LINK_ON_WAKE_EN 0x0010
|
||||
#define LINK_OFF_WAKE_EN 0x0008
|
||||
|
||||
/* PLA_CONFIG6 */
|
||||
#define LANWAKE_CLR_EN BIT(0)
|
||||
|
||||
/* PLA_CONFIG5 */
|
||||
#define BWF_EN 0x0040
|
||||
#define MWF_EN 0x0020
|
||||
@ -312,6 +318,7 @@
|
||||
/* PLA_PHY_PWR */
|
||||
#define TX_10M_IDLE_EN 0x0080
|
||||
#define PFM_PWM_SWITCH 0x0040
|
||||
#define TEST_IO_OFF BIT(4)
|
||||
|
||||
/* PLA_MAC_PWR_CTRL */
|
||||
#define D3_CLK_GATED_EN 0x00004000
|
||||
@ -324,6 +331,7 @@
|
||||
#define MAC_CLK_SPDWN_EN BIT(15)
|
||||
|
||||
/* PLA_MAC_PWR_CTRL3 */
|
||||
#define PLA_MCU_SPDWN_EN BIT(14)
|
||||
#define PKT_AVAIL_SPDWN_EN 0x0100
|
||||
#define SUSPEND_SPDWN_EN 0x0004
|
||||
#define U1U2_SPDWN_EN 0x0002
|
||||
@ -354,6 +362,9 @@
|
||||
/* PLA_BOOT_CTRL */
|
||||
#define AUTOLOAD_DONE 0x0002
|
||||
|
||||
/* PLA_LWAKE_CTRL_REG */
|
||||
#define LANWAKE_PIN BIT(7)
|
||||
|
||||
/* PLA_SUSPEND_FLAG */
|
||||
#define LINK_CHG_EVENT BIT(0)
|
||||
|
||||
@ -365,13 +376,18 @@
|
||||
#define DEBUG_LTSSM 0x0082
|
||||
|
||||
/* PLA_EXTRA_STATUS */
|
||||
#define CUR_LINK_OK BIT(15)
|
||||
#define U3P3_CHECK_EN BIT(7) /* RTL_VER_05 only */
|
||||
#define LINK_CHANGE_FLAG BIT(8)
|
||||
#define POLL_LINK_CHG BIT(0)
|
||||
|
||||
/* USB_USB2PHY */
|
||||
#define USB2PHY_SUSPEND 0x0001
|
||||
#define USB2PHY_L1 0x0002
|
||||
|
||||
/* USB_SSPHYLINK1 */
|
||||
#define DELAY_PHY_PWR_CHG BIT(1)
|
||||
|
||||
/* USB_SSPHYLINK2 */
|
||||
#define pwd_dn_scale_mask 0x3ffe
|
||||
#define pwd_dn_scale(x) ((x) << 1)
|
||||
@ -2863,6 +2879,17 @@ static int rtl8153_enable(struct r8152 *tp)
|
||||
r8153_set_rx_early_timeout(tp);
|
||||
r8153_set_rx_early_size(tp);
|
||||
|
||||
if (tp->version == RTL_VER_09) {
|
||||
u32 ocp_data;
|
||||
|
||||
ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_FW_TASK);
|
||||
ocp_data &= ~FC_PATCH_TASK;
|
||||
ocp_write_word(tp, MCU_TYPE_USB, USB_FW_TASK, ocp_data);
|
||||
usleep_range(1000, 2000);
|
||||
ocp_data |= FC_PATCH_TASK;
|
||||
ocp_write_word(tp, MCU_TYPE_USB, USB_FW_TASK, ocp_data);
|
||||
}
|
||||
|
||||
return rtl_enable(tp);
|
||||
}
|
||||
|
||||
@ -3376,8 +3403,8 @@ static void rtl8153b_runtime_enable(struct r8152 *tp, bool enable)
|
||||
r8153b_ups_en(tp, false);
|
||||
r8153_queue_wake(tp, false);
|
||||
rtl_runtime_suspend_enable(tp, false);
|
||||
r8153_u2p3en(tp, true);
|
||||
r8153b_u1u2en(tp, true);
|
||||
if (tp->udev->speed != USB_SPEED_HIGH)
|
||||
r8153b_u1u2en(tp, true);
|
||||
}
|
||||
}
|
||||
|
||||
@ -4675,7 +4702,6 @@ static void r8153b_hw_phy_cfg(struct r8152 *tp)
|
||||
|
||||
r8153_aldps_en(tp, true);
|
||||
r8152b_enable_fc(tp);
|
||||
r8153_u2p3en(tp, true);
|
||||
|
||||
set_bit(PHY_RESET, &tp->flags);
|
||||
}
|
||||
@ -4954,6 +4980,8 @@ static void rtl8152_down(struct r8152 *tp)
|
||||
|
||||
static void rtl8153_up(struct r8152 *tp)
|
||||
{
|
||||
u32 ocp_data;
|
||||
|
||||
if (test_bit(RTL8152_UNPLUG, &tp->flags))
|
||||
return;
|
||||
|
||||
@ -4961,6 +4989,19 @@ static void rtl8153_up(struct r8152 *tp)
|
||||
r8153_u2p3en(tp, false);
|
||||
r8153_aldps_en(tp, false);
|
||||
r8153_first_init(tp);
|
||||
|
||||
ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_CONFIG6);
|
||||
ocp_data |= LANWAKE_CLR_EN;
|
||||
ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CONFIG6, ocp_data);
|
||||
|
||||
ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_LWAKE_CTRL_REG);
|
||||
ocp_data &= ~LANWAKE_PIN;
|
||||
ocp_write_byte(tp, MCU_TYPE_PLA, PLA_LWAKE_CTRL_REG, ocp_data);
|
||||
|
||||
ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_SSPHYLINK1);
|
||||
ocp_data &= ~DELAY_PHY_PWR_CHG;
|
||||
ocp_write_word(tp, MCU_TYPE_USB, USB_SSPHYLINK1, ocp_data);
|
||||
|
||||
r8153_aldps_en(tp, true);
|
||||
|
||||
switch (tp->version) {
|
||||
@ -4979,11 +5020,17 @@ static void rtl8153_up(struct r8152 *tp)
|
||||
|
||||
static void rtl8153_down(struct r8152 *tp)
|
||||
{
|
||||
u32 ocp_data;
|
||||
|
||||
if (test_bit(RTL8152_UNPLUG, &tp->flags)) {
|
||||
rtl_drop_queued_tx(tp);
|
||||
return;
|
||||
}
|
||||
|
||||
ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_CONFIG6);
|
||||
ocp_data &= ~LANWAKE_CLR_EN;
|
||||
ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CONFIG6, ocp_data);
|
||||
|
||||
r8153_u1u2en(tp, false);
|
||||
r8153_u2p3en(tp, false);
|
||||
r8153_power_cut_en(tp, false);
|
||||
@ -4994,6 +5041,8 @@ static void rtl8153_down(struct r8152 *tp)
|
||||
|
||||
static void rtl8153b_up(struct r8152 *tp)
|
||||
{
|
||||
u32 ocp_data;
|
||||
|
||||
if (test_bit(RTL8152_UNPLUG, &tp->flags))
|
||||
return;
|
||||
|
||||
@ -5004,18 +5053,29 @@ static void rtl8153b_up(struct r8152 *tp)
|
||||
r8153_first_init(tp);
|
||||
ocp_write_dword(tp, MCU_TYPE_USB, USB_RX_BUF_TH, RX_THR_B);
|
||||
|
||||
ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL3);
|
||||
ocp_data &= ~PLA_MCU_SPDWN_EN;
|
||||
ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL3, ocp_data);
|
||||
|
||||
r8153_aldps_en(tp, true);
|
||||
r8153_u2p3en(tp, true);
|
||||
r8153b_u1u2en(tp, true);
|
||||
|
||||
if (tp->udev->speed != USB_SPEED_HIGH)
|
||||
r8153b_u1u2en(tp, true);
|
||||
}
|
||||
|
||||
static void rtl8153b_down(struct r8152 *tp)
|
||||
{
|
||||
u32 ocp_data;
|
||||
|
||||
if (test_bit(RTL8152_UNPLUG, &tp->flags)) {
|
||||
rtl_drop_queued_tx(tp);
|
||||
return;
|
||||
}
|
||||
|
||||
ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL3);
|
||||
ocp_data |= PLA_MCU_SPDWN_EN;
|
||||
ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL3, ocp_data);
|
||||
|
||||
r8153b_u1u2en(tp, false);
|
||||
r8153_u2p3en(tp, false);
|
||||
r8153b_power_cut_en(tp, false);
|
||||
@ -5387,6 +5447,16 @@ static void r8153_init(struct r8152 *tp)
|
||||
else
|
||||
ocp_data |= DYNAMIC_BURST;
|
||||
ocp_write_byte(tp, MCU_TYPE_USB, USB_CSR_DUMMY1, ocp_data);
|
||||
|
||||
r8153_queue_wake(tp, false);
|
||||
|
||||
ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_EXTRA_STATUS);
|
||||
if (rtl8152_get_speed(tp) & LINK_STATUS)
|
||||
ocp_data |= CUR_LINK_OK;
|
||||
else
|
||||
ocp_data &= ~CUR_LINK_OK;
|
||||
ocp_data |= POLL_LINK_CHG;
|
||||
ocp_write_word(tp, MCU_TYPE_PLA, PLA_EXTRA_STATUS, ocp_data);
|
||||
}
|
||||
|
||||
ocp_data = ocp_read_byte(tp, MCU_TYPE_USB, USB_CSR_DUMMY2);
|
||||
@ -5416,10 +5486,19 @@ static void r8153_init(struct r8152 *tp)
|
||||
ocp_write_word(tp, MCU_TYPE_USB, USB_CONNECT_TIMER, 0x0001);
|
||||
|
||||
r8153_power_cut_en(tp, false);
|
||||
rtl_runtime_suspend_enable(tp, false);
|
||||
r8153_u1u2en(tp, true);
|
||||
r8153_mac_clk_spd(tp, false);
|
||||
usb_enable_lpm(tp->udev);
|
||||
|
||||
ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_CONFIG6);
|
||||
ocp_data |= LANWAKE_CLR_EN;
|
||||
ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CONFIG6, ocp_data);
|
||||
|
||||
ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_LWAKE_CTRL_REG);
|
||||
ocp_data &= ~LANWAKE_PIN;
|
||||
ocp_write_byte(tp, MCU_TYPE_PLA, PLA_LWAKE_CTRL_REG, ocp_data);
|
||||
|
||||
/* rx aggregation */
|
||||
ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_USB_CTRL);
|
||||
ocp_data &= ~(RX_AGG_DISABLE | RX_ZERO_EN);
|
||||
@ -5484,7 +5563,17 @@ static void r8153b_init(struct r8152 *tp)
|
||||
r8153b_ups_en(tp, false);
|
||||
r8153_queue_wake(tp, false);
|
||||
rtl_runtime_suspend_enable(tp, false);
|
||||
r8153b_u1u2en(tp, true);
|
||||
|
||||
ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_EXTRA_STATUS);
|
||||
if (rtl8152_get_speed(tp) & LINK_STATUS)
|
||||
ocp_data |= CUR_LINK_OK;
|
||||
else
|
||||
ocp_data &= ~CUR_LINK_OK;
|
||||
ocp_data |= POLL_LINK_CHG;
|
||||
ocp_write_word(tp, MCU_TYPE_PLA, PLA_EXTRA_STATUS, ocp_data);
|
||||
|
||||
if (tp->udev->speed != USB_SPEED_HIGH)
|
||||
r8153b_u1u2en(tp, true);
|
||||
usb_enable_lpm(tp->udev);
|
||||
|
||||
/* MAC clock speed down */
|
||||
@ -5492,6 +5581,19 @@ static void r8153b_init(struct r8152 *tp)
|
||||
ocp_data |= MAC_CLK_SPDWN_EN;
|
||||
ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL2, ocp_data);
|
||||
|
||||
ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL3);
|
||||
ocp_data &= ~PLA_MCU_SPDWN_EN;
|
||||
ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL3, ocp_data);
|
||||
|
||||
if (tp->version == RTL_VER_09) {
|
||||
/* Disable Test IO for 32QFN */
|
||||
if (ocp_read_byte(tp, MCU_TYPE_PLA, 0xdc00) & BIT(5)) {
|
||||
ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_PHY_PWR);
|
||||
ocp_data |= TEST_IO_OFF;
|
||||
ocp_write_word(tp, MCU_TYPE_PLA, PLA_PHY_PWR, ocp_data);
|
||||
}
|
||||
}
|
||||
|
||||
set_bit(GREEN_ETHERNET, &tp->flags);
|
||||
|
||||
/* rx aggregation */
|
||||
@ -6707,6 +6809,11 @@ static int rtl8152_probe(struct usb_interface *intf,
|
||||
|
||||
intf->needs_remote_wakeup = 1;
|
||||
|
||||
if (!rtl_can_wakeup(tp))
|
||||
__rtl_set_wol(tp, 0);
|
||||
else
|
||||
tp->saved_wolopts = __rtl_get_wol(tp);
|
||||
|
||||
tp->rtl_ops.init(tp);
|
||||
#if IS_BUILTIN(CONFIG_USB_RTL8152)
|
||||
/* Retry in case request_firmware() is not ready yet. */
|
||||
@ -6724,10 +6831,6 @@ static int rtl8152_probe(struct usb_interface *intf,
|
||||
goto out1;
|
||||
}
|
||||
|
||||
if (!rtl_can_wakeup(tp))
|
||||
__rtl_set_wol(tp, 0);
|
||||
|
||||
tp->saved_wolopts = __rtl_get_wol(tp);
|
||||
if (tp->saved_wolopts)
|
||||
device_set_wakeup_enable(&udev->dev, true);
|
||||
else
|
||||
|
@ -7790,16 +7790,8 @@ static int readrids(struct net_device *dev, aironet_ioctl *comp) {
|
||||
case AIROGVLIST: ridcode = RID_APLIST; break;
|
||||
case AIROGDRVNAM: ridcode = RID_DRVNAME; break;
|
||||
case AIROGEHTENC: ridcode = RID_ETHERENCAP; break;
|
||||
case AIROGWEPKTMP: ridcode = RID_WEP_TEMP;
|
||||
/* Only super-user can read WEP keys */
|
||||
if (!capable(CAP_NET_ADMIN))
|
||||
return -EPERM;
|
||||
break;
|
||||
case AIROGWEPKNV: ridcode = RID_WEP_PERM;
|
||||
/* Only super-user can read WEP keys */
|
||||
if (!capable(CAP_NET_ADMIN))
|
||||
return -EPERM;
|
||||
break;
|
||||
case AIROGWEPKTMP: ridcode = RID_WEP_TEMP; break;
|
||||
case AIROGWEPKNV: ridcode = RID_WEP_PERM; break;
|
||||
case AIROGSTAT: ridcode = RID_STATUS; break;
|
||||
case AIROGSTATSD32: ridcode = RID_STATSDELTA; break;
|
||||
case AIROGSTATSC32: ridcode = RID_STATS; break;
|
||||
@ -7813,7 +7805,13 @@ static int readrids(struct net_device *dev, aironet_ioctl *comp) {
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if ((iobuf = kmalloc(RIDSIZE, GFP_KERNEL)) == NULL)
|
||||
if (ridcode == RID_WEP_TEMP || ridcode == RID_WEP_PERM) {
|
||||
/* Only super-user can read WEP keys */
|
||||
if (!capable(CAP_NET_ADMIN))
|
||||
return -EPERM;
|
||||
}
|
||||
|
||||
if ((iobuf = kzalloc(RIDSIZE, GFP_KERNEL)) == NULL)
|
||||
return -ENOMEM;
|
||||
|
||||
PC4500_readrid(ai,ridcode,iobuf,RIDSIZE, 1);
|
||||
|
@ -267,7 +267,7 @@ int iwlagn_tx_skb(struct iwl_priv *priv,
|
||||
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
|
||||
struct iwl_station_priv *sta_priv = NULL;
|
||||
struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
|
||||
struct iwl_device_cmd *dev_cmd;
|
||||
struct iwl_device_tx_cmd *dev_cmd;
|
||||
struct iwl_tx_cmd *tx_cmd;
|
||||
__le16 fc;
|
||||
u8 hdr_len;
|
||||
@ -348,7 +348,6 @@ int iwlagn_tx_skb(struct iwl_priv *priv,
|
||||
if (unlikely(!dev_cmd))
|
||||
goto drop_unlock_priv;
|
||||
|
||||
memset(dev_cmd, 0, sizeof(*dev_cmd));
|
||||
dev_cmd->hdr.cmd = REPLY_TX;
|
||||
tx_cmd = (struct iwl_tx_cmd *) dev_cmd->payload;
|
||||
|
||||
|
@ -357,8 +357,8 @@ int iwl_sar_get_ewrd_table(struct iwl_fw_runtime *fwrt)
|
||||
{
|
||||
union acpi_object *wifi_pkg, *data;
|
||||
bool enabled;
|
||||
int i, n_profiles, tbl_rev;
|
||||
int ret = 0;
|
||||
int i, n_profiles, tbl_rev, pos;
|
||||
int ret = 0;
|
||||
|
||||
data = iwl_acpi_get_object(fwrt->dev, ACPI_EWRD_METHOD);
|
||||
if (IS_ERR(data))
|
||||
@ -390,10 +390,10 @@ int iwl_sar_get_ewrd_table(struct iwl_fw_runtime *fwrt)
|
||||
goto out_free;
|
||||
}
|
||||
|
||||
for (i = 0; i < n_profiles; i++) {
|
||||
/* the tables start at element 3 */
|
||||
int pos = 3;
|
||||
/* the tables start at element 3 */
|
||||
pos = 3;
|
||||
|
||||
for (i = 0; i < n_profiles; i++) {
|
||||
/* The EWRD profiles officially go from 2 to 4, but we
|
||||
* save them in sar_profiles[1-3] (because we don't
|
||||
* have profile 0). So in the array we start from 1.
|
||||
|
@ -2669,12 +2669,7 @@ int iwl_fw_dbg_stop_restart_recording(struct iwl_fw_runtime *fwrt,
|
||||
{
|
||||
int ret = 0;
|
||||
|
||||
/* if the FW crashed or not debug monitor cfg was given, there is
|
||||
* no point in changing the recording state
|
||||
*/
|
||||
if (test_bit(STATUS_FW_ERROR, &fwrt->trans->status) ||
|
||||
(!fwrt->trans->dbg.dest_tlv &&
|
||||
fwrt->trans->dbg.ini_dest == IWL_FW_INI_LOCATION_INVALID))
|
||||
if (test_bit(STATUS_FW_ERROR, &fwrt->trans->status))
|
||||
return 0;
|
||||
|
||||
if (fw_has_capa(&fwrt->fw->ucode_capa,
|
||||
|
@ -379,7 +379,7 @@ enum {
|
||||
|
||||
|
||||
/* CSR GIO */
|
||||
#define CSR_GIO_REG_VAL_L0S_ENABLED (0x00000002)
|
||||
#define CSR_GIO_REG_VAL_L0S_DISABLED (0x00000002)
|
||||
|
||||
/*
|
||||
* UCODE-DRIVER GP (general purpose) mailbox register 1
|
||||
|
@ -480,7 +480,14 @@ static int iwl_dbg_tlv_alloc_fragment(struct iwl_fw_runtime *fwrt,
|
||||
if (!frag || frag->size || !pages)
|
||||
return -EIO;
|
||||
|
||||
while (pages) {
|
||||
/*
|
||||
* We try to allocate as many pages as we can, starting with
|
||||
* the requested amount and going down until we can allocate
|
||||
* something. Because of DIV_ROUND_UP(), pages will never go
|
||||
* down to 0 and stop the loop, so stop when pages reaches 1,
|
||||
* which is too small anyway.
|
||||
*/
|
||||
while (pages > 1) {
|
||||
block = dma_alloc_coherent(fwrt->dev, pages * PAGE_SIZE,
|
||||
&physical,
|
||||
GFP_KERNEL | __GFP_NOWARN);
|
||||
|
@ -1817,9 +1817,6 @@ MODULE_PARM_DESC(antenna_coupling,
|
||||
module_param_named(nvm_file, iwlwifi_mod_params.nvm_file, charp, 0444);
|
||||
MODULE_PARM_DESC(nvm_file, "NVM file name");
|
||||
|
||||
module_param_named(lar_disable, iwlwifi_mod_params.lar_disable, bool, 0444);
|
||||
MODULE_PARM_DESC(lar_disable, "disable LAR functionality (default: N)");
|
||||
|
||||
module_param_named(uapsd_disable, iwlwifi_mod_params.uapsd_disable, uint, 0644);
|
||||
MODULE_PARM_DESC(uapsd_disable,
|
||||
"disable U-APSD functionality bitmap 1: BSS 2: P2P Client (default: 3)");
|
||||
|
@ -115,7 +115,6 @@ enum iwl_uapsd_disable {
|
||||
* @nvm_file: specifies a external NVM file
|
||||
* @uapsd_disable: disable U-APSD, see &enum iwl_uapsd_disable, default =
|
||||
* IWL_DISABLE_UAPSD_BSS | IWL_DISABLE_UAPSD_P2P_CLIENT
|
||||
* @lar_disable: disable LAR (regulatory), default = 0
|
||||
* @fw_monitor: allow to use firmware monitor
|
||||
* @disable_11ac: disable VHT capabilities, default = false.
|
||||
* @remove_when_gone: remove an inaccessible device from the PCIe bus.
|
||||
@ -136,7 +135,6 @@ struct iwl_mod_params {
|
||||
int antenna_coupling;
|
||||
char *nvm_file;
|
||||
u32 uapsd_disable;
|
||||
bool lar_disable;
|
||||
bool fw_monitor;
|
||||
bool disable_11ac;
|
||||
/**
|
||||
|
@ -224,6 +224,34 @@ enum iwl_nvm_channel_flags {
|
||||
NVM_CHANNEL_DC_HIGH = BIT(12),
|
||||
};
|
||||
|
||||
/**
|
||||
* enum iwl_reg_capa_flags - global flags applied for the whole regulatory
|
||||
* domain.
|
||||
* @REG_CAPA_BF_CCD_LOW_BAND: Beam-forming or Cyclic Delay Diversity in the
|
||||
* 2.4Ghz band is allowed.
|
||||
* @REG_CAPA_BF_CCD_HIGH_BAND: Beam-forming or Cyclic Delay Diversity in the
|
||||
* 5Ghz band is allowed.
|
||||
* @REG_CAPA_160MHZ_ALLOWED: 11ac channel with a width of 160Mhz is allowed
|
||||
* for this regulatory domain (valid only in 5Ghz).
|
||||
* @REG_CAPA_80MHZ_ALLOWED: 11ac channel with a width of 80Mhz is allowed
|
||||
* for this regulatory domain (valid only in 5Ghz).
|
||||
* @REG_CAPA_MCS_8_ALLOWED: 11ac with MCS 8 is allowed.
|
||||
* @REG_CAPA_MCS_9_ALLOWED: 11ac with MCS 9 is allowed.
|
||||
* @REG_CAPA_40MHZ_FORBIDDEN: 11n channel with a width of 40Mhz is forbidden
|
||||
* for this regulatory domain (valid only in 5Ghz).
|
||||
* @REG_CAPA_DC_HIGH_ENABLED: DC HIGH allowed.
|
||||
*/
|
||||
enum iwl_reg_capa_flags {
|
||||
REG_CAPA_BF_CCD_LOW_BAND = BIT(0),
|
||||
REG_CAPA_BF_CCD_HIGH_BAND = BIT(1),
|
||||
REG_CAPA_160MHZ_ALLOWED = BIT(2),
|
||||
REG_CAPA_80MHZ_ALLOWED = BIT(3),
|
||||
REG_CAPA_MCS_8_ALLOWED = BIT(4),
|
||||
REG_CAPA_MCS_9_ALLOWED = BIT(5),
|
||||
REG_CAPA_40MHZ_FORBIDDEN = BIT(7),
|
||||
REG_CAPA_DC_HIGH_ENABLED = BIT(9),
|
||||
};
|
||||
|
||||
static inline void iwl_nvm_print_channel_flags(struct device *dev, u32 level,
|
||||
int chan, u32 flags)
|
||||
{
|
||||
@ -939,10 +967,11 @@ iwl_nvm_no_wide_in_5ghz(struct iwl_trans *trans, const struct iwl_cfg *cfg,
|
||||
|
||||
struct iwl_nvm_data *
|
||||
iwl_parse_nvm_data(struct iwl_trans *trans, const struct iwl_cfg *cfg,
|
||||
const struct iwl_fw *fw,
|
||||
const __be16 *nvm_hw, const __le16 *nvm_sw,
|
||||
const __le16 *nvm_calib, const __le16 *regulatory,
|
||||
const __le16 *mac_override, const __le16 *phy_sku,
|
||||
u8 tx_chains, u8 rx_chains, bool lar_fw_supported)
|
||||
u8 tx_chains, u8 rx_chains)
|
||||
{
|
||||
struct iwl_nvm_data *data;
|
||||
bool lar_enabled;
|
||||
@ -1022,7 +1051,8 @@ iwl_parse_nvm_data(struct iwl_trans *trans, const struct iwl_cfg *cfg,
|
||||
return NULL;
|
||||
}
|
||||
|
||||
if (lar_fw_supported && lar_enabled)
|
||||
if (lar_enabled &&
|
||||
fw_has_capa(&fw->ucode_capa, IWL_UCODE_TLV_CAPA_LAR_SUPPORT))
|
||||
sbands_flags |= IWL_NVM_SBANDS_FLAGS_LAR;
|
||||
|
||||
if (iwl_nvm_no_wide_in_5ghz(trans, cfg, nvm_hw))
|
||||
@ -1038,6 +1068,7 @@ IWL_EXPORT_SYMBOL(iwl_parse_nvm_data);
|
||||
|
||||
static u32 iwl_nvm_get_regdom_bw_flags(const u16 *nvm_chan,
|
||||
int ch_idx, u16 nvm_flags,
|
||||
u16 cap_flags,
|
||||
const struct iwl_cfg *cfg)
|
||||
{
|
||||
u32 flags = NL80211_RRF_NO_HT40;
|
||||
@ -1076,13 +1107,27 @@ static u32 iwl_nvm_get_regdom_bw_flags(const u16 *nvm_chan,
|
||||
(flags & NL80211_RRF_NO_IR))
|
||||
flags |= NL80211_RRF_GO_CONCURRENT;
|
||||
|
||||
/*
|
||||
* cap_flags is per regulatory domain so apply it for every channel
|
||||
*/
|
||||
if (ch_idx >= NUM_2GHZ_CHANNELS) {
|
||||
if (cap_flags & REG_CAPA_40MHZ_FORBIDDEN)
|
||||
flags |= NL80211_RRF_NO_HT40;
|
||||
|
||||
if (!(cap_flags & REG_CAPA_80MHZ_ALLOWED))
|
||||
flags |= NL80211_RRF_NO_80MHZ;
|
||||
|
||||
if (!(cap_flags & REG_CAPA_160MHZ_ALLOWED))
|
||||
flags |= NL80211_RRF_NO_160MHZ;
|
||||
}
|
||||
|
||||
return flags;
|
||||
}
|
||||
|
||||
struct ieee80211_regdomain *
|
||||
iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg,
|
||||
int num_of_ch, __le32 *channels, u16 fw_mcc,
|
||||
u16 geo_info)
|
||||
u16 geo_info, u16 cap)
|
||||
{
|
||||
int ch_idx;
|
||||
u16 ch_flags;
|
||||
@ -1140,7 +1185,8 @@ iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg,
|
||||
}
|
||||
|
||||
reg_rule_flags = iwl_nvm_get_regdom_bw_flags(nvm_chan, ch_idx,
|
||||
ch_flags, cfg);
|
||||
ch_flags, cap,
|
||||
cfg);
|
||||
|
||||
/* we can't continue the same rule */
|
||||
if (ch_idx == 0 || prev_reg_rule_flags != reg_rule_flags ||
|
||||
@ -1405,9 +1451,6 @@ struct iwl_nvm_data *iwl_get_nvm(struct iwl_trans *trans,
|
||||
.id = WIDE_ID(REGULATORY_AND_NVM_GROUP, NVM_GET_INFO)
|
||||
};
|
||||
int ret;
|
||||
bool lar_fw_supported = !iwlwifi_mod_params.lar_disable &&
|
||||
fw_has_capa(&fw->ucode_capa,
|
||||
IWL_UCODE_TLV_CAPA_LAR_SUPPORT);
|
||||
bool empty_otp;
|
||||
u32 mac_flags;
|
||||
u32 sbands_flags = 0;
|
||||
@ -1485,7 +1528,9 @@ struct iwl_nvm_data *iwl_get_nvm(struct iwl_trans *trans,
|
||||
nvm->valid_tx_ant = (u8)le32_to_cpu(rsp->phy_sku.tx_chains);
|
||||
nvm->valid_rx_ant = (u8)le32_to_cpu(rsp->phy_sku.rx_chains);
|
||||
|
||||
if (le32_to_cpu(rsp->regulatory.lar_enabled) && lar_fw_supported) {
|
||||
if (le32_to_cpu(rsp->regulatory.lar_enabled) &&
|
||||
fw_has_capa(&fw->ucode_capa,
|
||||
IWL_UCODE_TLV_CAPA_LAR_SUPPORT)) {
|
||||
nvm->lar_enabled = true;
|
||||
sbands_flags |= IWL_NVM_SBANDS_FLAGS_LAR;
|
||||
}
|
||||
|
@ -7,7 +7,7 @@
|
||||
*
|
||||
* Copyright(c) 2008 - 2015 Intel Corporation. All rights reserved.
|
||||
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
|
||||
* Copyright(c) 2018 Intel Corporation
|
||||
* Copyright(c) 2018 - 2019 Intel Corporation
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of version 2 of the GNU General Public License as
|
||||
@ -29,7 +29,7 @@
|
||||
*
|
||||
* Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
|
||||
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
|
||||
* Copyright(c) 2018 Intel Corporation
|
||||
* Copyright(c) 2018 - 2019 Intel Corporation
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
@ -85,10 +85,11 @@ enum iwl_nvm_sbands_flags {
|
||||
*/
|
||||
struct iwl_nvm_data *
|
||||
iwl_parse_nvm_data(struct iwl_trans *trans, const struct iwl_cfg *cfg,
|
||||
const struct iwl_fw *fw,
|
||||
const __be16 *nvm_hw, const __le16 *nvm_sw,
|
||||
const __le16 *nvm_calib, const __le16 *regulatory,
|
||||
const __le16 *mac_override, const __le16 *phy_sku,
|
||||
u8 tx_chains, u8 rx_chains, bool lar_fw_supported);
|
||||
u8 tx_chains, u8 rx_chains);
|
||||
|
||||
/**
|
||||
* iwl_parse_mcc_info - parse MCC (mobile country code) info coming from FW
|
||||
@ -103,7 +104,7 @@ iwl_parse_nvm_data(struct iwl_trans *trans, const struct iwl_cfg *cfg,
|
||||
struct ieee80211_regdomain *
|
||||
iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg,
|
||||
int num_of_ch, __le32 *channels, u16 fw_mcc,
|
||||
u16 geo_info);
|
||||
u16 geo_info, u16 cap);
|
||||
|
||||
/**
|
||||
* struct iwl_nvm_section - describes an NVM section in memory.
|
||||
|
@ -66,7 +66,9 @@
|
||||
|
||||
struct iwl_trans *iwl_trans_alloc(unsigned int priv_size,
|
||||
struct device *dev,
|
||||
const struct iwl_trans_ops *ops)
|
||||
const struct iwl_trans_ops *ops,
|
||||
unsigned int cmd_pool_size,
|
||||
unsigned int cmd_pool_align)
|
||||
{
|
||||
struct iwl_trans *trans;
|
||||
#ifdef CONFIG_LOCKDEP
|
||||
@ -90,10 +92,8 @@ struct iwl_trans *iwl_trans_alloc(unsigned int priv_size,
|
||||
"iwl_cmd_pool:%s", dev_name(trans->dev));
|
||||
trans->dev_cmd_pool =
|
||||
kmem_cache_create(trans->dev_cmd_pool_name,
|
||||
sizeof(struct iwl_device_cmd),
|
||||
sizeof(void *),
|
||||
SLAB_HWCACHE_ALIGN,
|
||||
NULL);
|
||||
cmd_pool_size, cmd_pool_align,
|
||||
SLAB_HWCACHE_ALIGN, NULL);
|
||||
if (!trans->dev_cmd_pool)
|
||||
return NULL;
|
||||
|
||||
|
@ -193,6 +193,18 @@ struct iwl_device_cmd {
|
||||
};
|
||||
} __packed;
|
||||
|
||||
/**
|
||||
* struct iwl_device_tx_cmd - buffer for TX command
|
||||
* @hdr: the header
|
||||
* @payload: the payload placeholder
|
||||
*
|
||||
* The actual structure is sized dynamically according to need.
|
||||
*/
|
||||
struct iwl_device_tx_cmd {
|
||||
struct iwl_cmd_header hdr;
|
||||
u8 payload[];
|
||||
} __packed;
|
||||
|
||||
#define TFD_MAX_PAYLOAD_SIZE (sizeof(struct iwl_device_cmd))
|
||||
|
||||
/*
|
||||
@ -544,7 +556,7 @@ struct iwl_trans_ops {
|
||||
int (*send_cmd)(struct iwl_trans *trans, struct iwl_host_cmd *cmd);
|
||||
|
||||
int (*tx)(struct iwl_trans *trans, struct sk_buff *skb,
|
||||
struct iwl_device_cmd *dev_cmd, int queue);
|
||||
struct iwl_device_tx_cmd *dev_cmd, int queue);
|
||||
void (*reclaim)(struct iwl_trans *trans, int queue, int ssn,
|
||||
struct sk_buff_head *skbs);
|
||||
|
||||
@ -948,22 +960,22 @@ iwl_trans_dump_data(struct iwl_trans *trans, u32 dump_mask)
|
||||
return trans->ops->dump_data(trans, dump_mask);
|
||||
}
|
||||
|
||||
static inline struct iwl_device_cmd *
|
||||
static inline struct iwl_device_tx_cmd *
|
||||
iwl_trans_alloc_tx_cmd(struct iwl_trans *trans)
|
||||
{
|
||||
return kmem_cache_alloc(trans->dev_cmd_pool, GFP_ATOMIC);
|
||||
return kmem_cache_zalloc(trans->dev_cmd_pool, GFP_ATOMIC);
|
||||
}
|
||||
|
||||
int iwl_trans_send_cmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd);
|
||||
|
||||
static inline void iwl_trans_free_tx_cmd(struct iwl_trans *trans,
|
||||
struct iwl_device_cmd *dev_cmd)
|
||||
struct iwl_device_tx_cmd *dev_cmd)
|
||||
{
|
||||
kmem_cache_free(trans->dev_cmd_pool, dev_cmd);
|
||||
}
|
||||
|
||||
static inline int iwl_trans_tx(struct iwl_trans *trans, struct sk_buff *skb,
|
||||
struct iwl_device_cmd *dev_cmd, int queue)
|
||||
struct iwl_device_tx_cmd *dev_cmd, int queue)
|
||||
{
|
||||
if (unlikely(test_bit(STATUS_FW_ERROR, &trans->status)))
|
||||
return -EIO;
|
||||
@ -1271,7 +1283,9 @@ static inline bool iwl_trans_dbg_ini_valid(struct iwl_trans *trans)
|
||||
*****************************************************/
|
||||
struct iwl_trans *iwl_trans_alloc(unsigned int priv_size,
|
||||
struct device *dev,
|
||||
const struct iwl_trans_ops *ops);
|
||||
const struct iwl_trans_ops *ops,
|
||||
unsigned int cmd_pool_size,
|
||||
unsigned int cmd_pool_align);
|
||||
void iwl_trans_free(struct iwl_trans *trans);
|
||||
|
||||
/*****************************************************
|
||||
|
@ -154,5 +154,6 @@
|
||||
#define IWL_MVM_D3_DEBUG false
|
||||
#define IWL_MVM_USE_TWT false
|
||||
#define IWL_MVM_AMPDU_CONSEC_DROPS_DELBA 10
|
||||
#define IWL_MVM_USE_NSSN_SYNC 0
|
||||
|
||||
#endif /* __MVM_CONSTANTS_H */
|
||||
|
@ -841,9 +841,13 @@ int iwl_mvm_ppag_send_cmd(struct iwl_mvm *mvm)
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (!mvm->fwrt.ppag_table.enabled) {
|
||||
IWL_DEBUG_RADIO(mvm,
|
||||
"PPAG not enabled, command not sent.\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
IWL_DEBUG_RADIO(mvm, "Sending PER_PLATFORM_ANT_GAIN_CMD\n");
|
||||
IWL_DEBUG_RADIO(mvm, "PPAG is %s\n",
|
||||
mvm->fwrt.ppag_table.enabled ? "enabled" : "disabled");
|
||||
|
||||
for (i = 0; i < ACPI_PPAG_NUM_CHAINS; i++) {
|
||||
for (j = 0; j < ACPI_PPAG_NUM_SUB_BANDS; j++) {
|
||||
|
@ -256,7 +256,8 @@ struct ieee80211_regdomain *iwl_mvm_get_regdomain(struct wiphy *wiphy,
|
||||
__le32_to_cpu(resp->n_channels),
|
||||
resp->channels,
|
||||
__le16_to_cpu(resp->mcc),
|
||||
__le16_to_cpu(resp->geo_info));
|
||||
__le16_to_cpu(resp->geo_info),
|
||||
__le16_to_cpu(resp->cap));
|
||||
/* Store the return source id */
|
||||
src_id = resp->source_id;
|
||||
kfree(resp);
|
||||
@ -754,6 +755,20 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void iwl_mvm_tx_skb(struct iwl_mvm *mvm, struct sk_buff *skb,
|
||||
struct ieee80211_sta *sta)
|
||||
{
|
||||
if (likely(sta)) {
|
||||
if (likely(iwl_mvm_tx_skb_sta(mvm, skb, sta) == 0))
|
||||
return;
|
||||
} else {
|
||||
if (likely(iwl_mvm_tx_skb_non_sta(mvm, skb) == 0))
|
||||
return;
|
||||
}
|
||||
|
||||
ieee80211_free_txskb(mvm->hw, skb);
|
||||
}
|
||||
|
||||
static void iwl_mvm_mac_tx(struct ieee80211_hw *hw,
|
||||
struct ieee80211_tx_control *control,
|
||||
struct sk_buff *skb)
|
||||
@ -797,14 +812,7 @@ static void iwl_mvm_mac_tx(struct ieee80211_hw *hw,
|
||||
}
|
||||
}
|
||||
|
||||
if (sta) {
|
||||
if (iwl_mvm_tx_skb(mvm, skb, sta))
|
||||
goto drop;
|
||||
return;
|
||||
}
|
||||
|
||||
if (iwl_mvm_tx_skb_non_sta(mvm, skb))
|
||||
goto drop;
|
||||
iwl_mvm_tx_skb(mvm, skb, sta);
|
||||
return;
|
||||
drop:
|
||||
ieee80211_free_txskb(hw, skb);
|
||||
@ -854,10 +862,7 @@ void iwl_mvm_mac_itxq_xmit(struct ieee80211_hw *hw, struct ieee80211_txq *txq)
|
||||
break;
|
||||
}
|
||||
|
||||
if (!txq->sta)
|
||||
iwl_mvm_tx_skb_non_sta(mvm, skb);
|
||||
else
|
||||
iwl_mvm_tx_skb(mvm, skb, txq->sta);
|
||||
iwl_mvm_tx_skb(mvm, skb, txq->sta);
|
||||
}
|
||||
} while (atomic_dec_return(&mvmtxq->tx_request));
|
||||
rcu_read_unlock();
|
||||
@ -4771,6 +4776,125 @@ static int iwl_mvm_mac_get_survey(struct ieee80211_hw *hw, int idx,
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void iwl_mvm_set_sta_rate(u32 rate_n_flags, struct rate_info *rinfo)
|
||||
{
|
||||
switch (rate_n_flags & RATE_MCS_CHAN_WIDTH_MSK) {
|
||||
case RATE_MCS_CHAN_WIDTH_20:
|
||||
rinfo->bw = RATE_INFO_BW_20;
|
||||
break;
|
||||
case RATE_MCS_CHAN_WIDTH_40:
|
||||
rinfo->bw = RATE_INFO_BW_40;
|
||||
break;
|
||||
case RATE_MCS_CHAN_WIDTH_80:
|
||||
rinfo->bw = RATE_INFO_BW_80;
|
||||
break;
|
||||
case RATE_MCS_CHAN_WIDTH_160:
|
||||
rinfo->bw = RATE_INFO_BW_160;
|
||||
break;
|
||||
}
|
||||
|
||||
if (rate_n_flags & RATE_MCS_HT_MSK) {
|
||||
rinfo->flags |= RATE_INFO_FLAGS_MCS;
|
||||
rinfo->mcs = u32_get_bits(rate_n_flags, RATE_HT_MCS_INDEX_MSK);
|
||||
rinfo->nss = u32_get_bits(rate_n_flags,
|
||||
RATE_HT_MCS_NSS_MSK) + 1;
|
||||
if (rate_n_flags & RATE_MCS_SGI_MSK)
|
||||
rinfo->flags |= RATE_INFO_FLAGS_SHORT_GI;
|
||||
} else if (rate_n_flags & RATE_MCS_VHT_MSK) {
|
||||
rinfo->flags |= RATE_INFO_FLAGS_VHT_MCS;
|
||||
rinfo->mcs = u32_get_bits(rate_n_flags,
|
||||
RATE_VHT_MCS_RATE_CODE_MSK);
|
||||
rinfo->nss = u32_get_bits(rate_n_flags,
|
||||
RATE_VHT_MCS_NSS_MSK) + 1;
|
||||
if (rate_n_flags & RATE_MCS_SGI_MSK)
|
||||
rinfo->flags |= RATE_INFO_FLAGS_SHORT_GI;
|
||||
} else if (rate_n_flags & RATE_MCS_HE_MSK) {
|
||||
u32 gi_ltf = u32_get_bits(rate_n_flags,
|
||||
RATE_MCS_HE_GI_LTF_MSK);
|
||||
|
||||
rinfo->flags |= RATE_INFO_FLAGS_HE_MCS;
|
||||
rinfo->mcs = u32_get_bits(rate_n_flags,
|
||||
RATE_VHT_MCS_RATE_CODE_MSK);
|
||||
rinfo->nss = u32_get_bits(rate_n_flags,
|
||||
RATE_VHT_MCS_NSS_MSK) + 1;
|
||||
|
||||
if (rate_n_flags & RATE_MCS_HE_106T_MSK) {
|
||||
rinfo->bw = RATE_INFO_BW_HE_RU;
|
||||
rinfo->he_ru_alloc = NL80211_RATE_INFO_HE_RU_ALLOC_106;
|
||||
}
|
||||
|
||||
switch (rate_n_flags & RATE_MCS_HE_TYPE_MSK) {
|
||||
case RATE_MCS_HE_TYPE_SU:
|
||||
case RATE_MCS_HE_TYPE_EXT_SU:
|
||||
if (gi_ltf == 0 || gi_ltf == 1)
|
||||
rinfo->he_gi = NL80211_RATE_INFO_HE_GI_0_8;
|
||||
else if (gi_ltf == 2)
|
||||
rinfo->he_gi = NL80211_RATE_INFO_HE_GI_1_6;
|
||||
else if (rate_n_flags & RATE_MCS_SGI_MSK)
|
||||
rinfo->he_gi = NL80211_RATE_INFO_HE_GI_0_8;
|
||||
else
|
||||
rinfo->he_gi = NL80211_RATE_INFO_HE_GI_3_2;
|
||||
break;
|
||||
case RATE_MCS_HE_TYPE_MU:
|
||||
if (gi_ltf == 0 || gi_ltf == 1)
|
||||
rinfo->he_gi = NL80211_RATE_INFO_HE_GI_0_8;
|
||||
else if (gi_ltf == 2)
|
||||
rinfo->he_gi = NL80211_RATE_INFO_HE_GI_1_6;
|
||||
else
|
||||
rinfo->he_gi = NL80211_RATE_INFO_HE_GI_3_2;
|
||||
break;
|
||||
case RATE_MCS_HE_TYPE_TRIG:
|
||||
if (gi_ltf == 0 || gi_ltf == 1)
|
||||
rinfo->he_gi = NL80211_RATE_INFO_HE_GI_1_6;
|
||||
else
|
||||
rinfo->he_gi = NL80211_RATE_INFO_HE_GI_3_2;
|
||||
break;
|
||||
}
|
||||
|
||||
if (rate_n_flags & RATE_HE_DUAL_CARRIER_MODE_MSK)
|
||||
rinfo->he_dcm = 1;
|
||||
} else {
|
||||
switch (u32_get_bits(rate_n_flags, RATE_LEGACY_RATE_MSK)) {
|
||||
case IWL_RATE_1M_PLCP:
|
||||
rinfo->legacy = 10;
|
||||
break;
|
||||
case IWL_RATE_2M_PLCP:
|
||||
rinfo->legacy = 20;
|
||||
break;
|
||||
case IWL_RATE_5M_PLCP:
|
||||
rinfo->legacy = 55;
|
||||
break;
|
||||
case IWL_RATE_11M_PLCP:
|
||||
rinfo->legacy = 110;
|
||||
break;
|
||||
case IWL_RATE_6M_PLCP:
|
||||
rinfo->legacy = 60;
|
||||
break;
|
||||
case IWL_RATE_9M_PLCP:
|
||||
rinfo->legacy = 90;
|
||||
break;
|
||||
case IWL_RATE_12M_PLCP:
|
||||
rinfo->legacy = 120;
|
||||
break;
|
||||
case IWL_RATE_18M_PLCP:
|
||||
rinfo->legacy = 180;
|
||||
break;
|
||||
case IWL_RATE_24M_PLCP:
|
||||
rinfo->legacy = 240;
|
||||
break;
|
||||
case IWL_RATE_36M_PLCP:
|
||||
rinfo->legacy = 360;
|
||||
break;
|
||||
case IWL_RATE_48M_PLCP:
|
||||
rinfo->legacy = 480;
|
||||
break;
|
||||
case IWL_RATE_54M_PLCP:
|
||||
rinfo->legacy = 540;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static void iwl_mvm_mac_sta_statistics(struct ieee80211_hw *hw,
|
||||
struct ieee80211_vif *vif,
|
||||
struct ieee80211_sta *sta,
|
||||
@ -4785,6 +4909,13 @@ static void iwl_mvm_mac_sta_statistics(struct ieee80211_hw *hw,
|
||||
sinfo->filled |= BIT_ULL(NL80211_STA_INFO_SIGNAL_AVG);
|
||||
}
|
||||
|
||||
if (iwl_mvm_has_tlc_offload(mvm)) {
|
||||
struct iwl_lq_sta_rs_fw *lq_sta = &mvmsta->lq_sta.rs_fw;
|
||||
|
||||
iwl_mvm_set_sta_rate(lq_sta->last_rate_n_flags, &sinfo->txrate);
|
||||
sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_BITRATE);
|
||||
}
|
||||
|
||||
/* if beacon filtering isn't on mac80211 does it anyway */
|
||||
if (!(vif->driver_flags & IEEE80211_VIF_BEACON_FILTER))
|
||||
return;
|
||||
|
@ -1298,9 +1298,6 @@ static inline bool iwl_mvm_is_lar_supported(struct iwl_mvm *mvm)
|
||||
bool tlv_lar = fw_has_capa(&mvm->fw->ucode_capa,
|
||||
IWL_UCODE_TLV_CAPA_LAR_SUPPORT);
|
||||
|
||||
if (iwlwifi_mod_params.lar_disable)
|
||||
return false;
|
||||
|
||||
/*
|
||||
* Enable LAR only if it is supported by the FW (TLV) &&
|
||||
* enabled in the NVM
|
||||
@ -1508,8 +1505,8 @@ int __must_check iwl_mvm_send_cmd_status(struct iwl_mvm *mvm,
|
||||
int __must_check iwl_mvm_send_cmd_pdu_status(struct iwl_mvm *mvm, u32 id,
|
||||
u16 len, const void *data,
|
||||
u32 *status);
|
||||
int iwl_mvm_tx_skb(struct iwl_mvm *mvm, struct sk_buff *skb,
|
||||
struct ieee80211_sta *sta);
|
||||
int iwl_mvm_tx_skb_sta(struct iwl_mvm *mvm, struct sk_buff *skb,
|
||||
struct ieee80211_sta *sta);
|
||||
int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb);
|
||||
void iwl_mvm_set_tx_cmd(struct iwl_mvm *mvm, struct sk_buff *skb,
|
||||
struct iwl_tx_cmd *tx_cmd,
|
||||
|
@ -277,11 +277,10 @@ iwl_parse_nvm_sections(struct iwl_mvm *mvm)
|
||||
struct iwl_nvm_section *sections = mvm->nvm_sections;
|
||||
const __be16 *hw;
|
||||
const __le16 *sw, *calib, *regulatory, *mac_override, *phy_sku;
|
||||
bool lar_enabled;
|
||||
int regulatory_type;
|
||||
|
||||
/* Checking for required sections */
|
||||
if (mvm->trans->cfg->nvm_type != IWL_NVM_EXT) {
|
||||
if (mvm->trans->cfg->nvm_type == IWL_NVM) {
|
||||
if (!mvm->nvm_sections[NVM_SECTION_TYPE_SW].data ||
|
||||
!mvm->nvm_sections[mvm->cfg->nvm_hw_section_num].data) {
|
||||
IWL_ERR(mvm, "Can't parse empty OTP/NVM sections\n");
|
||||
@ -327,14 +326,9 @@ iwl_parse_nvm_sections(struct iwl_mvm *mvm)
|
||||
(const __le16 *)sections[NVM_SECTION_TYPE_REGULATORY_SDP].data :
|
||||
(const __le16 *)sections[NVM_SECTION_TYPE_REGULATORY].data;
|
||||
|
||||
lar_enabled = !iwlwifi_mod_params.lar_disable &&
|
||||
fw_has_capa(&mvm->fw->ucode_capa,
|
||||
IWL_UCODE_TLV_CAPA_LAR_SUPPORT);
|
||||
|
||||
return iwl_parse_nvm_data(mvm->trans, mvm->cfg, hw, sw, calib,
|
||||
return iwl_parse_nvm_data(mvm->trans, mvm->cfg, mvm->fw, hw, sw, calib,
|
||||
regulatory, mac_override, phy_sku,
|
||||
mvm->fw->valid_tx_ant, mvm->fw->valid_rx_ant,
|
||||
lar_enabled);
|
||||
mvm->fw->valid_tx_ant, mvm->fw->valid_rx_ant);
|
||||
}
|
||||
|
||||
/* Loads the NVM data stored in mvm->nvm_sections into the NIC */
|
||||
|
@ -514,14 +514,17 @@ static bool iwl_mvm_is_sn_less(u16 sn1, u16 sn2, u16 buffer_size)
|
||||
|
||||
static void iwl_mvm_sync_nssn(struct iwl_mvm *mvm, u8 baid, u16 nssn)
|
||||
{
|
||||
struct iwl_mvm_rss_sync_notif notif = {
|
||||
.metadata.type = IWL_MVM_RXQ_NSSN_SYNC,
|
||||
.metadata.sync = 0,
|
||||
.nssn_sync.baid = baid,
|
||||
.nssn_sync.nssn = nssn,
|
||||
};
|
||||
if (IWL_MVM_USE_NSSN_SYNC) {
|
||||
struct iwl_mvm_rss_sync_notif notif = {
|
||||
.metadata.type = IWL_MVM_RXQ_NSSN_SYNC,
|
||||
.metadata.sync = 0,
|
||||
.nssn_sync.baid = baid,
|
||||
.nssn_sync.nssn = nssn,
|
||||
};
|
||||
|
||||
iwl_mvm_sync_rx_queues_internal(mvm, (void *)¬if, sizeof(notif));
|
||||
iwl_mvm_sync_rx_queues_internal(mvm, (void *)¬if,
|
||||
sizeof(notif));
|
||||
}
|
||||
}
|
||||
|
||||
#define RX_REORDER_BUF_TIMEOUT_MQ (HZ / 10)
|
||||
|
@ -1213,7 +1213,7 @@ static int iwl_mvm_legacy_config_scan(struct iwl_mvm *mvm)
|
||||
cmd_size = sizeof(struct iwl_scan_config_v2);
|
||||
else
|
||||
cmd_size = sizeof(struct iwl_scan_config_v1);
|
||||
cmd_size += num_channels;
|
||||
cmd_size += mvm->fw->ucode_capa.n_scan_channels;
|
||||
|
||||
cfg = kzalloc(cmd_size, GFP_KERNEL);
|
||||
if (!cfg)
|
||||
|
@ -490,13 +490,13 @@ static void iwl_mvm_set_tx_cmd_crypto(struct iwl_mvm *mvm,
|
||||
/*
|
||||
* Allocates and sets the Tx cmd the driver data pointers in the skb
|
||||
*/
|
||||
static struct iwl_device_cmd *
|
||||
static struct iwl_device_tx_cmd *
|
||||
iwl_mvm_set_tx_params(struct iwl_mvm *mvm, struct sk_buff *skb,
|
||||
struct ieee80211_tx_info *info, int hdrlen,
|
||||
struct ieee80211_sta *sta, u8 sta_id)
|
||||
{
|
||||
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
|
||||
struct iwl_device_cmd *dev_cmd;
|
||||
struct iwl_device_tx_cmd *dev_cmd;
|
||||
struct iwl_tx_cmd *tx_cmd;
|
||||
|
||||
dev_cmd = iwl_trans_alloc_tx_cmd(mvm->trans);
|
||||
@ -504,11 +504,6 @@ iwl_mvm_set_tx_params(struct iwl_mvm *mvm, struct sk_buff *skb,
|
||||
if (unlikely(!dev_cmd))
|
||||
return NULL;
|
||||
|
||||
/* Make sure we zero enough of dev_cmd */
|
||||
BUILD_BUG_ON(sizeof(struct iwl_tx_cmd_gen2) > sizeof(*tx_cmd));
|
||||
BUILD_BUG_ON(sizeof(struct iwl_tx_cmd_gen3) > sizeof(*tx_cmd));
|
||||
|
||||
memset(dev_cmd, 0, sizeof(dev_cmd->hdr) + sizeof(*tx_cmd));
|
||||
dev_cmd->hdr.cmd = TX_CMD;
|
||||
|
||||
if (iwl_mvm_has_new_tx_api(mvm)) {
|
||||
@ -597,7 +592,7 @@ out:
|
||||
}
|
||||
|
||||
static void iwl_mvm_skb_prepare_status(struct sk_buff *skb,
|
||||
struct iwl_device_cmd *cmd)
|
||||
struct iwl_device_tx_cmd *cmd)
|
||||
{
|
||||
struct ieee80211_tx_info *skb_info = IEEE80211_SKB_CB(skb);
|
||||
|
||||
@ -716,7 +711,7 @@ int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb)
|
||||
{
|
||||
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
|
||||
struct ieee80211_tx_info info;
|
||||
struct iwl_device_cmd *dev_cmd;
|
||||
struct iwl_device_tx_cmd *dev_cmd;
|
||||
u8 sta_id;
|
||||
int hdrlen = ieee80211_hdrlen(hdr->frame_control);
|
||||
__le16 fc = hdr->frame_control;
|
||||
@ -1078,7 +1073,7 @@ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb,
|
||||
{
|
||||
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
|
||||
struct iwl_mvm_sta *mvmsta;
|
||||
struct iwl_device_cmd *dev_cmd;
|
||||
struct iwl_device_tx_cmd *dev_cmd;
|
||||
__le16 fc;
|
||||
u16 seq_number = 0;
|
||||
u8 tid = IWL_MAX_TID_COUNT;
|
||||
@ -1154,7 +1149,7 @@ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb,
|
||||
if (WARN_ONCE(txq_id == IWL_MVM_INVALID_QUEUE, "Invalid TXQ id")) {
|
||||
iwl_trans_free_tx_cmd(mvm->trans, dev_cmd);
|
||||
spin_unlock(&mvmsta->lock);
|
||||
return 0;
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (!iwl_mvm_has_new_tx_api(mvm)) {
|
||||
@ -1206,8 +1201,8 @@ drop:
|
||||
return -1;
|
||||
}
|
||||
|
||||
int iwl_mvm_tx_skb(struct iwl_mvm *mvm, struct sk_buff *skb,
|
||||
struct ieee80211_sta *sta)
|
||||
int iwl_mvm_tx_skb_sta(struct iwl_mvm *mvm, struct sk_buff *skb,
|
||||
struct ieee80211_sta *sta)
|
||||
{
|
||||
struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
|
||||
struct ieee80211_tx_info info;
|
||||
|
@ -57,6 +57,42 @@
|
||||
#include "internal.h"
|
||||
#include "iwl-prph.h"
|
||||
|
||||
static void *_iwl_pcie_ctxt_info_dma_alloc_coherent(struct iwl_trans *trans,
|
||||
size_t size,
|
||||
dma_addr_t *phys,
|
||||
int depth)
|
||||
{
|
||||
void *result;
|
||||
|
||||
if (WARN(depth > 2,
|
||||
"failed to allocate DMA memory not crossing 2^32 boundary"))
|
||||
return NULL;
|
||||
|
||||
result = dma_alloc_coherent(trans->dev, size, phys, GFP_KERNEL);
|
||||
|
||||
if (!result)
|
||||
return NULL;
|
||||
|
||||
if (unlikely(iwl_pcie_crosses_4g_boundary(*phys, size))) {
|
||||
void *old = result;
|
||||
dma_addr_t oldphys = *phys;
|
||||
|
||||
result = _iwl_pcie_ctxt_info_dma_alloc_coherent(trans, size,
|
||||
phys,
|
||||
depth + 1);
|
||||
dma_free_coherent(trans->dev, size, old, oldphys);
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
static void *iwl_pcie_ctxt_info_dma_alloc_coherent(struct iwl_trans *trans,
|
||||
size_t size,
|
||||
dma_addr_t *phys)
|
||||
{
|
||||
return _iwl_pcie_ctxt_info_dma_alloc_coherent(trans, size, phys, 0);
|
||||
}
|
||||
|
||||
void iwl_pcie_ctxt_info_free_paging(struct iwl_trans *trans)
|
||||
{
|
||||
struct iwl_self_init_dram *dram = &trans->init_dram;
|
||||
@ -161,14 +197,17 @@ int iwl_pcie_ctxt_info_init(struct iwl_trans *trans,
|
||||
struct iwl_context_info *ctxt_info;
|
||||
struct iwl_context_info_rbd_cfg *rx_cfg;
|
||||
u32 control_flags = 0, rb_size;
|
||||
dma_addr_t phys;
|
||||
int ret;
|
||||
|
||||
ctxt_info = dma_alloc_coherent(trans->dev, sizeof(*ctxt_info),
|
||||
&trans_pcie->ctxt_info_dma_addr,
|
||||
GFP_KERNEL);
|
||||
ctxt_info = iwl_pcie_ctxt_info_dma_alloc_coherent(trans,
|
||||
sizeof(*ctxt_info),
|
||||
&phys);
|
||||
if (!ctxt_info)
|
||||
return -ENOMEM;
|
||||
|
||||
trans_pcie->ctxt_info_dma_addr = phys;
|
||||
|
||||
ctxt_info->version.version = 0;
|
||||
ctxt_info->version.mac_id =
|
||||
cpu_to_le16((u16)iwl_read32(trans, CSR_HW_REV));
|
||||
|
@ -305,7 +305,7 @@ struct iwl_cmd_meta {
|
||||
#define IWL_FIRST_TB_SIZE_ALIGN ALIGN(IWL_FIRST_TB_SIZE, 64)
|
||||
|
||||
struct iwl_pcie_txq_entry {
|
||||
struct iwl_device_cmd *cmd;
|
||||
void *cmd;
|
||||
struct sk_buff *skb;
|
||||
/* buffer to free after command completes */
|
||||
const void *free_buf;
|
||||
@ -672,6 +672,16 @@ void iwl_pcie_disable_ict(struct iwl_trans *trans);
|
||||
/*****************************************************
|
||||
* TX / HCMD
|
||||
******************************************************/
|
||||
/*
|
||||
* We need this inline in case dma_addr_t is only 32-bits - since the
|
||||
* hardware is always 64-bit, the issue can still occur in that case,
|
||||
* so use u64 for 'phys' here to force the addition in 64-bit.
|
||||
*/
|
||||
static inline bool iwl_pcie_crosses_4g_boundary(u64 phys, u16 len)
|
||||
{
|
||||
return upper_32_bits(phys) != upper_32_bits(phys + len);
|
||||
}
|
||||
|
||||
int iwl_pcie_tx_init(struct iwl_trans *trans);
|
||||
int iwl_pcie_gen2_tx_init(struct iwl_trans *trans, int txq_id,
|
||||
int queue_size);
|
||||
@ -688,7 +698,7 @@ void iwl_trans_pcie_txq_set_shared_mode(struct iwl_trans *trans, u32 txq_id,
|
||||
void iwl_trans_pcie_log_scd_error(struct iwl_trans *trans,
|
||||
struct iwl_txq *txq);
|
||||
int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
|
||||
struct iwl_device_cmd *dev_cmd, int txq_id);
|
||||
struct iwl_device_tx_cmd *dev_cmd, int txq_id);
|
||||
void iwl_pcie_txq_check_wrptrs(struct iwl_trans *trans);
|
||||
int iwl_trans_pcie_send_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd);
|
||||
void iwl_pcie_cmdq_reclaim(struct iwl_trans *trans, int txq_id, int idx);
|
||||
@ -1082,7 +1092,8 @@ void iwl_pcie_apply_destination(struct iwl_trans *trans);
|
||||
void iwl_pcie_free_tso_page(struct iwl_trans_pcie *trans_pcie,
|
||||
struct sk_buff *skb);
|
||||
#ifdef CONFIG_INET
|
||||
struct iwl_tso_hdr_page *get_page_hdr(struct iwl_trans *trans, size_t len);
|
||||
struct iwl_tso_hdr_page *get_page_hdr(struct iwl_trans *trans, size_t len,
|
||||
struct sk_buff *skb);
|
||||
#endif
|
||||
|
||||
/* common functions that are used by gen3 transport */
|
||||
@ -1106,7 +1117,7 @@ int iwl_trans_pcie_dyn_txq_alloc(struct iwl_trans *trans,
|
||||
unsigned int timeout);
|
||||
void iwl_trans_pcie_dyn_txq_free(struct iwl_trans *trans, int queue);
|
||||
int iwl_trans_pcie_gen2_tx(struct iwl_trans *trans, struct sk_buff *skb,
|
||||
struct iwl_device_cmd *dev_cmd, int txq_id);
|
||||
struct iwl_device_tx_cmd *dev_cmd, int txq_id);
|
||||
int iwl_trans_pcie_gen2_send_hcmd(struct iwl_trans *trans,
|
||||
struct iwl_host_cmd *cmd);
|
||||
void iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans);
|
||||
|
@ -1529,13 +1529,13 @@ out:
|
||||
|
||||
napi = &rxq->napi;
|
||||
if (napi->poll) {
|
||||
napi_gro_flush(napi, false);
|
||||
|
||||
if (napi->rx_count) {
|
||||
netif_receive_skb_list(&napi->rx_list);
|
||||
INIT_LIST_HEAD(&napi->rx_list);
|
||||
napi->rx_count = 0;
|
||||
}
|
||||
|
||||
napi_gro_flush(napi, false);
|
||||
}
|
||||
|
||||
iwl_pcie_rxq_restock(trans, rxq);
|
||||
|
@ -79,6 +79,7 @@
|
||||
#include "iwl-agn-hw.h"
|
||||
#include "fw/error-dump.h"
|
||||
#include "fw/dbg.h"
|
||||
#include "fw/api/tx.h"
|
||||
#include "internal.h"
|
||||
#include "iwl-fh.h"
|
||||
|
||||
@ -301,18 +302,13 @@ void iwl_pcie_apm_config(struct iwl_trans *trans)
|
||||
u16 cap;
|
||||
|
||||
/*
|
||||
* HW bug W/A for instability in PCIe bus L0S->L1 transition.
|
||||
* Check if BIOS (or OS) enabled L1-ASPM on this device.
|
||||
* If so (likely), disable L0S, so device moves directly L0->L1;
|
||||
* costs negligible amount of power savings.
|
||||
* If not (unlikely), enable L0S, so there is at least some
|
||||
* power savings, even without L1.
|
||||
* L0S states have been found to be unstable with our devices
|
||||
* and in newer hardware they are not officially supported at
|
||||
* all, so we must always set the L0S_DISABLED bit.
|
||||
*/
|
||||
iwl_set_bit(trans, CSR_GIO_REG, CSR_GIO_REG_VAL_L0S_DISABLED);
|
||||
|
||||
pcie_capability_read_word(trans_pcie->pci_dev, PCI_EXP_LNKCTL, &lctl);
|
||||
if (lctl & PCI_EXP_LNKCTL_ASPM_L1)
|
||||
iwl_set_bit(trans, CSR_GIO_REG, CSR_GIO_REG_VAL_L0S_ENABLED);
|
||||
else
|
||||
iwl_clear_bit(trans, CSR_GIO_REG, CSR_GIO_REG_VAL_L0S_ENABLED);
|
||||
trans->pm_support = !(lctl & PCI_EXP_LNKCTL_ASPM_L0S);
|
||||
|
||||
pcie_capability_read_word(trans_pcie->pci_dev, PCI_EXP_DEVCTL2, &cap);
|
||||
@ -3460,19 +3456,34 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
|
||||
{
|
||||
struct iwl_trans_pcie *trans_pcie;
|
||||
struct iwl_trans *trans;
|
||||
int ret, addr_size;
|
||||
int ret, addr_size, txcmd_size, txcmd_align;
|
||||
const struct iwl_trans_ops *ops = &trans_ops_pcie_gen2;
|
||||
|
||||
if (!cfg_trans->gen2) {
|
||||
ops = &trans_ops_pcie;
|
||||
txcmd_size = sizeof(struct iwl_tx_cmd);
|
||||
txcmd_align = sizeof(void *);
|
||||
} else if (cfg_trans->device_family < IWL_DEVICE_FAMILY_AX210) {
|
||||
txcmd_size = sizeof(struct iwl_tx_cmd_gen2);
|
||||
txcmd_align = 64;
|
||||
} else {
|
||||
txcmd_size = sizeof(struct iwl_tx_cmd_gen3);
|
||||
txcmd_align = 128;
|
||||
}
|
||||
|
||||
txcmd_size += sizeof(struct iwl_cmd_header);
|
||||
txcmd_size += 36; /* biggest possible 802.11 header */
|
||||
|
||||
/* Ensure device TX cmd cannot reach/cross a page boundary in gen2 */
|
||||
if (WARN_ON(cfg_trans->gen2 && txcmd_size >= txcmd_align))
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
||||
ret = pcim_enable_device(pdev);
|
||||
if (ret)
|
||||
return ERR_PTR(ret);
|
||||
|
||||
if (cfg_trans->gen2)
|
||||
trans = iwl_trans_alloc(sizeof(struct iwl_trans_pcie),
|
||||
&pdev->dev, &trans_ops_pcie_gen2);
|
||||
else
|
||||
trans = iwl_trans_alloc(sizeof(struct iwl_trans_pcie),
|
||||
&pdev->dev, &trans_ops_pcie);
|
||||
|
||||
trans = iwl_trans_alloc(sizeof(struct iwl_trans_pcie), &pdev->dev, ops,
|
||||
txcmd_size, txcmd_align);
|
||||
if (!trans)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
|
@ -221,6 +221,17 @@ static int iwl_pcie_gen2_set_tb(struct iwl_trans *trans,
|
||||
int idx = iwl_pcie_gen2_get_num_tbs(trans, tfd);
|
||||
struct iwl_tfh_tb *tb;
|
||||
|
||||
/*
|
||||
* Only WARN here so we know about the issue, but we mess up our
|
||||
* unmap path because not every place currently checks for errors
|
||||
* returned from this function - it can only return an error if
|
||||
* there's no more space, and so when we know there is enough we
|
||||
* don't always check ...
|
||||
*/
|
||||
WARN(iwl_pcie_crosses_4g_boundary(addr, len),
|
||||
"possible DMA problem with iova:0x%llx, len:%d\n",
|
||||
(unsigned long long)addr, len);
|
||||
|
||||
if (WARN_ON(idx >= IWL_TFH_NUM_TBS))
|
||||
return -EINVAL;
|
||||
tb = &tfd->tbs[idx];
|
||||
@ -240,13 +251,114 @@ static int iwl_pcie_gen2_set_tb(struct iwl_trans *trans,
|
||||
return idx;
|
||||
}
|
||||
|
||||
static struct page *get_workaround_page(struct iwl_trans *trans,
|
||||
struct sk_buff *skb)
|
||||
{
|
||||
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
|
||||
struct page **page_ptr;
|
||||
struct page *ret;
|
||||
|
||||
page_ptr = (void *)((u8 *)skb->cb + trans_pcie->page_offs);
|
||||
|
||||
ret = alloc_page(GFP_ATOMIC);
|
||||
if (!ret)
|
||||
return NULL;
|
||||
|
||||
/* set the chaining pointer to the previous page if there */
|
||||
*(void **)(page_address(ret) + PAGE_SIZE - sizeof(void *)) = *page_ptr;
|
||||
*page_ptr = ret;
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* Add a TB and if needed apply the FH HW bug workaround;
|
||||
* meta != NULL indicates that it's a page mapping and we
|
||||
* need to dma_unmap_page() and set the meta->tbs bit in
|
||||
* this case.
|
||||
*/
|
||||
static int iwl_pcie_gen2_set_tb_with_wa(struct iwl_trans *trans,
|
||||
struct sk_buff *skb,
|
||||
struct iwl_tfh_tfd *tfd,
|
||||
dma_addr_t phys, void *virt,
|
||||
u16 len, struct iwl_cmd_meta *meta)
|
||||
{
|
||||
dma_addr_t oldphys = phys;
|
||||
struct page *page;
|
||||
int ret;
|
||||
|
||||
if (unlikely(dma_mapping_error(trans->dev, phys)))
|
||||
return -ENOMEM;
|
||||
|
||||
if (likely(!iwl_pcie_crosses_4g_boundary(phys, len))) {
|
||||
ret = iwl_pcie_gen2_set_tb(trans, tfd, phys, len);
|
||||
|
||||
if (ret < 0)
|
||||
goto unmap;
|
||||
|
||||
if (meta)
|
||||
meta->tbs |= BIT(ret);
|
||||
|
||||
ret = 0;
|
||||
goto trace;
|
||||
}
|
||||
|
||||
/*
|
||||
* Work around a hardware bug. If (as expressed in the
|
||||
* condition above) the TB ends on a 32-bit boundary,
|
||||
* then the next TB may be accessed with the wrong
|
||||
* address.
|
||||
* To work around it, copy the data elsewhere and make
|
||||
* a new mapping for it so the device will not fail.
|
||||
*/
|
||||
|
||||
if (WARN_ON(len > PAGE_SIZE - sizeof(void *))) {
|
||||
ret = -ENOBUFS;
|
||||
goto unmap;
|
||||
}
|
||||
|
||||
page = get_workaround_page(trans, skb);
|
||||
if (!page) {
|
||||
ret = -ENOMEM;
|
||||
goto unmap;
|
||||
}
|
||||
|
||||
memcpy(page_address(page), virt, len);
|
||||
|
||||
phys = dma_map_single(trans->dev, page_address(page), len,
|
||||
DMA_TO_DEVICE);
|
||||
if (unlikely(dma_mapping_error(trans->dev, phys)))
|
||||
return -ENOMEM;
|
||||
ret = iwl_pcie_gen2_set_tb(trans, tfd, phys, len);
|
||||
if (ret < 0) {
|
||||
/* unmap the new allocation as single */
|
||||
oldphys = phys;
|
||||
meta = NULL;
|
||||
goto unmap;
|
||||
}
|
||||
IWL_WARN(trans,
|
||||
"TB bug workaround: copied %d bytes from 0x%llx to 0x%llx\n",
|
||||
len, (unsigned long long)oldphys, (unsigned long long)phys);
|
||||
|
||||
ret = 0;
|
||||
unmap:
|
||||
if (meta)
|
||||
dma_unmap_page(trans->dev, oldphys, len, DMA_TO_DEVICE);
|
||||
else
|
||||
dma_unmap_single(trans->dev, oldphys, len, DMA_TO_DEVICE);
|
||||
trace:
|
||||
trace_iwlwifi_dev_tx_tb(trans->dev, skb, virt, phys, len);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int iwl_pcie_gen2_build_amsdu(struct iwl_trans *trans,
|
||||
struct sk_buff *skb,
|
||||
struct iwl_tfh_tfd *tfd, int start_len,
|
||||
u8 hdr_len, struct iwl_device_cmd *dev_cmd)
|
||||
u8 hdr_len,
|
||||
struct iwl_device_tx_cmd *dev_cmd)
|
||||
{
|
||||
#ifdef CONFIG_INET
|
||||
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
|
||||
struct iwl_tx_cmd_gen2 *tx_cmd = (void *)dev_cmd->payload;
|
||||
struct ieee80211_hdr *hdr = (void *)skb->data;
|
||||
unsigned int snap_ip_tcp_hdrlen, ip_hdrlen, total_len, hdr_room;
|
||||
@ -254,7 +366,6 @@ static int iwl_pcie_gen2_build_amsdu(struct iwl_trans *trans,
|
||||
u16 length, amsdu_pad;
|
||||
u8 *start_hdr;
|
||||
struct iwl_tso_hdr_page *hdr_page;
|
||||
struct page **page_ptr;
|
||||
struct tso_t tso;
|
||||
|
||||
trace_iwlwifi_dev_tx(trans->dev, skb, tfd, sizeof(*tfd),
|
||||
@ -270,14 +381,11 @@ static int iwl_pcie_gen2_build_amsdu(struct iwl_trans *trans,
|
||||
(3 + snap_ip_tcp_hdrlen + sizeof(struct ethhdr));
|
||||
|
||||
/* Our device supports 9 segments at most, it will fit in 1 page */
|
||||
hdr_page = get_page_hdr(trans, hdr_room);
|
||||
hdr_page = get_page_hdr(trans, hdr_room, skb);
|
||||
if (!hdr_page)
|
||||
return -ENOMEM;
|
||||
|
||||
get_page(hdr_page->page);
|
||||
start_hdr = hdr_page->pos;
|
||||
page_ptr = (void *)((u8 *)skb->cb + trans_pcie->page_offs);
|
||||
*page_ptr = hdr_page->page;
|
||||
|
||||
/*
|
||||
* Pull the ieee80211 header to be able to use TSO core,
|
||||
@ -332,6 +440,11 @@ static int iwl_pcie_gen2_build_amsdu(struct iwl_trans *trans,
|
||||
dev_kfree_skb(csum_skb);
|
||||
goto out_err;
|
||||
}
|
||||
/*
|
||||
* No need for _with_wa, this is from the TSO page and
|
||||
* we leave some space at the end of it so can't hit
|
||||
* the buggy scenario.
|
||||
*/
|
||||
iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, tb_len);
|
||||
trace_iwlwifi_dev_tx_tb(trans->dev, skb, start_hdr,
|
||||
tb_phys, tb_len);
|
||||
@ -343,16 +456,18 @@ static int iwl_pcie_gen2_build_amsdu(struct iwl_trans *trans,
|
||||
|
||||
/* put the payload */
|
||||
while (data_left) {
|
||||
int ret;
|
||||
|
||||
tb_len = min_t(unsigned int, tso.size, data_left);
|
||||
tb_phys = dma_map_single(trans->dev, tso.data,
|
||||
tb_len, DMA_TO_DEVICE);
|
||||
if (unlikely(dma_mapping_error(trans->dev, tb_phys))) {
|
||||
ret = iwl_pcie_gen2_set_tb_with_wa(trans, skb, tfd,
|
||||
tb_phys, tso.data,
|
||||
tb_len, NULL);
|
||||
if (ret) {
|
||||
dev_kfree_skb(csum_skb);
|
||||
goto out_err;
|
||||
}
|
||||
iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, tb_len);
|
||||
trace_iwlwifi_dev_tx_tb(trans->dev, skb, tso.data,
|
||||
tb_phys, tb_len);
|
||||
|
||||
data_left -= tb_len;
|
||||
tso_build_data(skb, &tso, tb_len);
|
||||
@ -372,7 +487,7 @@ out_err:
|
||||
static struct
|
||||
iwl_tfh_tfd *iwl_pcie_gen2_build_tx_amsdu(struct iwl_trans *trans,
|
||||
struct iwl_txq *txq,
|
||||
struct iwl_device_cmd *dev_cmd,
|
||||
struct iwl_device_tx_cmd *dev_cmd,
|
||||
struct sk_buff *skb,
|
||||
struct iwl_cmd_meta *out_meta,
|
||||
int hdr_len,
|
||||
@ -386,6 +501,11 @@ iwl_tfh_tfd *iwl_pcie_gen2_build_tx_amsdu(struct iwl_trans *trans,
|
||||
|
||||
tb_phys = iwl_pcie_get_first_tb_dma(txq, idx);
|
||||
|
||||
/*
|
||||
* No need for _with_wa, the first TB allocation is aligned up
|
||||
* to a 64-byte boundary and thus can't be at the end or cross
|
||||
* a page boundary (much less a 2^32 boundary).
|
||||
*/
|
||||
iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, IWL_FIRST_TB_SIZE);
|
||||
|
||||
/*
|
||||
@ -404,6 +524,10 @@ iwl_tfh_tfd *iwl_pcie_gen2_build_tx_amsdu(struct iwl_trans *trans,
|
||||
tb_phys = dma_map_single(trans->dev, tb1_addr, len, DMA_TO_DEVICE);
|
||||
if (unlikely(dma_mapping_error(trans->dev, tb_phys)))
|
||||
goto out_err;
|
||||
/*
|
||||
* No need for _with_wa(), we ensure (via alignment) that the data
|
||||
* here can never cross or end at a page boundary.
|
||||
*/
|
||||
iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, len);
|
||||
|
||||
if (iwl_pcie_gen2_build_amsdu(trans, skb, tfd,
|
||||
@ -430,24 +554,19 @@ static int iwl_pcie_gen2_tx_add_frags(struct iwl_trans *trans,
|
||||
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
|
||||
const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
|
||||
dma_addr_t tb_phys;
|
||||
int tb_idx;
|
||||
unsigned int fragsz = skb_frag_size(frag);
|
||||
int ret;
|
||||
|
||||
if (!skb_frag_size(frag))
|
||||
if (!fragsz)
|
||||
continue;
|
||||
|
||||
tb_phys = skb_frag_dma_map(trans->dev, frag, 0,
|
||||
skb_frag_size(frag), DMA_TO_DEVICE);
|
||||
|
||||
if (unlikely(dma_mapping_error(trans->dev, tb_phys)))
|
||||
return -ENOMEM;
|
||||
tb_idx = iwl_pcie_gen2_set_tb(trans, tfd, tb_phys,
|
||||
skb_frag_size(frag));
|
||||
trace_iwlwifi_dev_tx_tb(trans->dev, skb, skb_frag_address(frag),
|
||||
tb_phys, skb_frag_size(frag));
|
||||
if (tb_idx < 0)
|
||||
return tb_idx;
|
||||
|
||||
out_meta->tbs |= BIT(tb_idx);
|
||||
fragsz, DMA_TO_DEVICE);
|
||||
ret = iwl_pcie_gen2_set_tb_with_wa(trans, skb, tfd, tb_phys,
|
||||
skb_frag_address(frag),
|
||||
fragsz, out_meta);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
return 0;
|
||||
@ -456,7 +575,7 @@ static int iwl_pcie_gen2_tx_add_frags(struct iwl_trans *trans,
|
||||
static struct
|
||||
iwl_tfh_tfd *iwl_pcie_gen2_build_tx(struct iwl_trans *trans,
|
||||
struct iwl_txq *txq,
|
||||
struct iwl_device_cmd *dev_cmd,
|
||||
struct iwl_device_tx_cmd *dev_cmd,
|
||||
struct sk_buff *skb,
|
||||
struct iwl_cmd_meta *out_meta,
|
||||
int hdr_len,
|
||||
@ -475,6 +594,11 @@ iwl_tfh_tfd *iwl_pcie_gen2_build_tx(struct iwl_trans *trans,
|
||||
/* The first TB points to bi-directional DMA data */
|
||||
memcpy(&txq->first_tb_bufs[idx], dev_cmd, IWL_FIRST_TB_SIZE);
|
||||
|
||||
/*
|
||||
* No need for _with_wa, the first TB allocation is aligned up
|
||||
* to a 64-byte boundary and thus can't be at the end or cross
|
||||
* a page boundary (much less a 2^32 boundary).
|
||||
*/
|
||||
iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, IWL_FIRST_TB_SIZE);
|
||||
|
||||
/*
|
||||
@ -496,6 +620,10 @@ iwl_tfh_tfd *iwl_pcie_gen2_build_tx(struct iwl_trans *trans,
|
||||
tb_phys = dma_map_single(trans->dev, tb1_addr, tb1_len, DMA_TO_DEVICE);
|
||||
if (unlikely(dma_mapping_error(trans->dev, tb_phys)))
|
||||
goto out_err;
|
||||
/*
|
||||
* No need for _with_wa(), we ensure (via alignment) that the data
|
||||
* here can never cross or end at a page boundary.
|
||||
*/
|
||||
iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, tb1_len);
|
||||
trace_iwlwifi_dev_tx(trans->dev, skb, tfd, sizeof(*tfd), &dev_cmd->hdr,
|
||||
IWL_FIRST_TB_SIZE + tb1_len, hdr_len);
|
||||
@ -504,26 +632,30 @@ iwl_tfh_tfd *iwl_pcie_gen2_build_tx(struct iwl_trans *trans,
|
||||
tb2_len = skb_headlen(skb) - hdr_len;
|
||||
|
||||
if (tb2_len > 0) {
|
||||
int ret;
|
||||
|
||||
tb_phys = dma_map_single(trans->dev, skb->data + hdr_len,
|
||||
tb2_len, DMA_TO_DEVICE);
|
||||
if (unlikely(dma_mapping_error(trans->dev, tb_phys)))
|
||||
ret = iwl_pcie_gen2_set_tb_with_wa(trans, skb, tfd, tb_phys,
|
||||
skb->data + hdr_len, tb2_len,
|
||||
NULL);
|
||||
if (ret)
|
||||
goto out_err;
|
||||
iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, tb2_len);
|
||||
trace_iwlwifi_dev_tx_tb(trans->dev, skb, skb->data + hdr_len,
|
||||
tb_phys, tb2_len);
|
||||
}
|
||||
|
||||
if (iwl_pcie_gen2_tx_add_frags(trans, skb, tfd, out_meta))
|
||||
goto out_err;
|
||||
|
||||
skb_walk_frags(skb, frag) {
|
||||
int ret;
|
||||
|
||||
tb_phys = dma_map_single(trans->dev, frag->data,
|
||||
skb_headlen(frag), DMA_TO_DEVICE);
|
||||
if (unlikely(dma_mapping_error(trans->dev, tb_phys)))
|
||||
ret = iwl_pcie_gen2_set_tb_with_wa(trans, skb, tfd, tb_phys,
|
||||
frag->data,
|
||||
skb_headlen(frag), NULL);
|
||||
if (ret)
|
||||
goto out_err;
|
||||
iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, skb_headlen(frag));
|
||||
trace_iwlwifi_dev_tx_tb(trans->dev, skb, frag->data,
|
||||
tb_phys, skb_headlen(frag));
|
||||
if (iwl_pcie_gen2_tx_add_frags(trans, frag, tfd, out_meta))
|
||||
goto out_err;
|
||||
}
|
||||
@ -538,7 +670,7 @@ out_err:
|
||||
static
|
||||
struct iwl_tfh_tfd *iwl_pcie_gen2_build_tfd(struct iwl_trans *trans,
|
||||
struct iwl_txq *txq,
|
||||
struct iwl_device_cmd *dev_cmd,
|
||||
struct iwl_device_tx_cmd *dev_cmd,
|
||||
struct sk_buff *skb,
|
||||
struct iwl_cmd_meta *out_meta)
|
||||
{
|
||||
@ -578,7 +710,7 @@ struct iwl_tfh_tfd *iwl_pcie_gen2_build_tfd(struct iwl_trans *trans,
|
||||
}
|
||||
|
||||
int iwl_trans_pcie_gen2_tx(struct iwl_trans *trans, struct sk_buff *skb,
|
||||
struct iwl_device_cmd *dev_cmd, int txq_id)
|
||||
struct iwl_device_tx_cmd *dev_cmd, int txq_id)
|
||||
{
|
||||
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
|
||||
struct iwl_cmd_meta *out_meta;
|
||||
@ -603,7 +735,7 @@ int iwl_trans_pcie_gen2_tx(struct iwl_trans *trans, struct sk_buff *skb,
|
||||
|
||||
/* don't put the packet on the ring, if there is no room */
|
||||
if (unlikely(iwl_queue_space(trans, txq) < 3)) {
|
||||
struct iwl_device_cmd **dev_cmd_ptr;
|
||||
struct iwl_device_tx_cmd **dev_cmd_ptr;
|
||||
|
||||
dev_cmd_ptr = (void *)((u8 *)skb->cb +
|
||||
trans_pcie->dev_cmd_offs);
|
||||
|
@ -213,8 +213,8 @@ static void iwl_pcie_txq_update_byte_cnt_tbl(struct iwl_trans *trans,
|
||||
u8 sec_ctl = 0;
|
||||
u16 len = byte_cnt + IWL_TX_CRC_SIZE + IWL_TX_DELIMITER_SIZE;
|
||||
__le16 bc_ent;
|
||||
struct iwl_tx_cmd *tx_cmd =
|
||||
(void *)txq->entries[txq->write_ptr].cmd->payload;
|
||||
struct iwl_device_tx_cmd *dev_cmd = txq->entries[txq->write_ptr].cmd;
|
||||
struct iwl_tx_cmd *tx_cmd = (void *)dev_cmd->payload;
|
||||
u8 sta_id = tx_cmd->sta_id;
|
||||
|
||||
scd_bc_tbl = trans_pcie->scd_bc_tbls.addr;
|
||||
@ -257,8 +257,8 @@ static void iwl_pcie_txq_inval_byte_cnt_tbl(struct iwl_trans *trans,
|
||||
int read_ptr = txq->read_ptr;
|
||||
u8 sta_id = 0;
|
||||
__le16 bc_ent;
|
||||
struct iwl_tx_cmd *tx_cmd =
|
||||
(void *)txq->entries[read_ptr].cmd->payload;
|
||||
struct iwl_device_tx_cmd *dev_cmd = txq->entries[read_ptr].cmd;
|
||||
struct iwl_tx_cmd *tx_cmd = (void *)dev_cmd->payload;
|
||||
|
||||
WARN_ON(read_ptr >= TFD_QUEUE_SIZE_MAX);
|
||||
|
||||
@ -624,12 +624,18 @@ void iwl_pcie_free_tso_page(struct iwl_trans_pcie *trans_pcie,
|
||||
struct sk_buff *skb)
|
||||
{
|
||||
struct page **page_ptr;
|
||||
struct page *next;
|
||||
|
||||
page_ptr = (void *)((u8 *)skb->cb + trans_pcie->page_offs);
|
||||
next = *page_ptr;
|
||||
*page_ptr = NULL;
|
||||
|
||||
if (*page_ptr) {
|
||||
__free_page(*page_ptr);
|
||||
*page_ptr = NULL;
|
||||
while (next) {
|
||||
struct page *tmp = next;
|
||||
|
||||
next = *(void **)(page_address(next) + PAGE_SIZE -
|
||||
sizeof(void *));
|
||||
__free_page(tmp);
|
||||
}
|
||||
}
|
||||
|
||||
@ -1196,7 +1202,7 @@ void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
|
||||
|
||||
while (!skb_queue_empty(&overflow_skbs)) {
|
||||
struct sk_buff *skb = __skb_dequeue(&overflow_skbs);
|
||||
struct iwl_device_cmd *dev_cmd_ptr;
|
||||
struct iwl_device_tx_cmd *dev_cmd_ptr;
|
||||
|
||||
dev_cmd_ptr = *(void **)((u8 *)skb->cb +
|
||||
trans_pcie->dev_cmd_offs);
|
||||
@ -2052,17 +2058,34 @@ static int iwl_fill_data_tbs(struct iwl_trans *trans, struct sk_buff *skb,
|
||||
}
|
||||
|
||||
#ifdef CONFIG_INET
|
||||
struct iwl_tso_hdr_page *get_page_hdr(struct iwl_trans *trans, size_t len)
|
||||
struct iwl_tso_hdr_page *get_page_hdr(struct iwl_trans *trans, size_t len,
|
||||
struct sk_buff *skb)
|
||||
{
|
||||
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
|
||||
struct iwl_tso_hdr_page *p = this_cpu_ptr(trans_pcie->tso_hdr_page);
|
||||
struct page **page_ptr;
|
||||
|
||||
page_ptr = (void *)((u8 *)skb->cb + trans_pcie->page_offs);
|
||||
|
||||
if (WARN_ON(*page_ptr))
|
||||
return NULL;
|
||||
|
||||
if (!p->page)
|
||||
goto alloc;
|
||||
|
||||
/* enough room on this page */
|
||||
if (p->pos + len < (u8 *)page_address(p->page) + PAGE_SIZE)
|
||||
return p;
|
||||
/*
|
||||
* Check if there's enough room on this page
|
||||
*
|
||||
* Note that we put a page chaining pointer *last* in the
|
||||
* page - we need it somewhere, and if it's there then we
|
||||
* avoid DMA mapping the last bits of the page which may
|
||||
* trigger the 32-bit boundary hardware bug.
|
||||
*
|
||||
* (see also get_workaround_page() in tx-gen2.c)
|
||||
*/
|
||||
if (p->pos + len < (u8 *)page_address(p->page) + PAGE_SIZE -
|
||||
sizeof(void *))
|
||||
goto out;
|
||||
|
||||
/* We don't have enough room on this page, get a new one. */
|
||||
__free_page(p->page);
|
||||
@ -2072,6 +2095,11 @@ alloc:
|
||||
if (!p->page)
|
||||
return NULL;
|
||||
p->pos = page_address(p->page);
|
||||
/* set the chaining pointer to NULL */
|
||||
*(void **)(page_address(p->page) + PAGE_SIZE - sizeof(void *)) = NULL;
|
||||
out:
|
||||
*page_ptr = p->page;
|
||||
get_page(p->page);
|
||||
return p;
|
||||
}
|
||||
|
||||
@ -2097,7 +2125,8 @@ static void iwl_compute_pseudo_hdr_csum(void *iph, struct tcphdr *tcph,
|
||||
static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb,
|
||||
struct iwl_txq *txq, u8 hdr_len,
|
||||
struct iwl_cmd_meta *out_meta,
|
||||
struct iwl_device_cmd *dev_cmd, u16 tb1_len)
|
||||
struct iwl_device_tx_cmd *dev_cmd,
|
||||
u16 tb1_len)
|
||||
{
|
||||
struct iwl_tx_cmd *tx_cmd = (void *)dev_cmd->payload;
|
||||
struct iwl_trans_pcie *trans_pcie = txq->trans_pcie;
|
||||
@ -2107,7 +2136,6 @@ static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb,
|
||||
u16 length, iv_len, amsdu_pad;
|
||||
u8 *start_hdr;
|
||||
struct iwl_tso_hdr_page *hdr_page;
|
||||
struct page **page_ptr;
|
||||
struct tso_t tso;
|
||||
|
||||
/* if the packet is protected, then it must be CCMP or GCMP */
|
||||
@ -2130,14 +2158,11 @@ static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb,
|
||||
(3 + snap_ip_tcp_hdrlen + sizeof(struct ethhdr)) + iv_len;
|
||||
|
||||
/* Our device supports 9 segments at most, it will fit in 1 page */
|
||||
hdr_page = get_page_hdr(trans, hdr_room);
|
||||
hdr_page = get_page_hdr(trans, hdr_room, skb);
|
||||
if (!hdr_page)
|
||||
return -ENOMEM;
|
||||
|
||||
get_page(hdr_page->page);
|
||||
start_hdr = hdr_page->pos;
|
||||
page_ptr = (void *)((u8 *)skb->cb + trans_pcie->page_offs);
|
||||
*page_ptr = hdr_page->page;
|
||||
memcpy(hdr_page->pos, skb->data + hdr_len, iv_len);
|
||||
hdr_page->pos += iv_len;
|
||||
|
||||
@ -2279,7 +2304,8 @@ static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb,
|
||||
static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb,
|
||||
struct iwl_txq *txq, u8 hdr_len,
|
||||
struct iwl_cmd_meta *out_meta,
|
||||
struct iwl_device_cmd *dev_cmd, u16 tb1_len)
|
||||
struct iwl_device_tx_cmd *dev_cmd,
|
||||
u16 tb1_len)
|
||||
{
|
||||
/* No A-MSDU without CONFIG_INET */
|
||||
WARN_ON(1);
|
||||
@ -2289,7 +2315,7 @@ static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb,
|
||||
#endif /* CONFIG_INET */
|
||||
|
||||
int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
|
||||
struct iwl_device_cmd *dev_cmd, int txq_id)
|
||||
struct iwl_device_tx_cmd *dev_cmd, int txq_id)
|
||||
{
|
||||
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
|
||||
struct ieee80211_hdr *hdr;
|
||||
@ -2346,7 +2372,7 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
|
||||
|
||||
/* don't put the packet on the ring, if there is no room */
|
||||
if (unlikely(iwl_queue_space(trans, txq) < 3)) {
|
||||
struct iwl_device_cmd **dev_cmd_ptr;
|
||||
struct iwl_device_tx_cmd **dev_cmd_ptr;
|
||||
|
||||
dev_cmd_ptr = (void *)((u8 *)skb->cb +
|
||||
trans_pcie->dev_cmd_offs);
|
||||
|
@ -273,6 +273,10 @@ add_ie_rates(u8 *tlv, const u8 *ie, int *nrates)
|
||||
int hw, ap, ap_max = ie[1];
|
||||
u8 hw_rate;
|
||||
|
||||
if (ap_max > MAX_RATES) {
|
||||
lbs_deb_assoc("invalid rates\n");
|
||||
return tlv;
|
||||
}
|
||||
/* Advance past IE header */
|
||||
ie += 2;
|
||||
|
||||
@ -1717,6 +1721,9 @@ static int lbs_ibss_join_existing(struct lbs_private *priv,
|
||||
struct cmd_ds_802_11_ad_hoc_join cmd;
|
||||
u8 preamble = RADIO_PREAMBLE_SHORT;
|
||||
int ret = 0;
|
||||
int hw, i;
|
||||
u8 rates_max;
|
||||
u8 *rates;
|
||||
|
||||
/* TODO: set preamble based on scan result */
|
||||
ret = lbs_set_radio(priv, preamble, 1);
|
||||
@ -1775,9 +1782,12 @@ static int lbs_ibss_join_existing(struct lbs_private *priv,
|
||||
if (!rates_eid) {
|
||||
lbs_add_rates(cmd.bss.rates);
|
||||
} else {
|
||||
int hw, i;
|
||||
u8 rates_max = rates_eid[1];
|
||||
u8 *rates = cmd.bss.rates;
|
||||
rates_max = rates_eid[1];
|
||||
if (rates_max > MAX_RATES) {
|
||||
lbs_deb_join("invalid rates");
|
||||
goto out;
|
||||
}
|
||||
rates = cmd.bss.rates;
|
||||
for (hw = 0; hw < ARRAY_SIZE(lbs_rates); hw++) {
|
||||
u8 hw_rate = lbs_rates[hw].bitrate / 5;
|
||||
for (i = 0; i < rates_max; i++) {
|
||||
|
@ -242,7 +242,7 @@ u32 mt76_calc_rx_airtime(struct mt76_dev *dev, struct mt76_rx_status *status,
|
||||
return 0;
|
||||
|
||||
sband = dev->hw->wiphy->bands[status->band];
|
||||
if (!sband || status->rate_idx > sband->n_bitrates)
|
||||
if (!sband || status->rate_idx >= sband->n_bitrates)
|
||||
return 0;
|
||||
|
||||
rate = &sband->bitrates[status->rate_idx];
|
||||
|
@ -378,7 +378,8 @@ void mt76_unregister_device(struct mt76_dev *dev)
|
||||
{
|
||||
struct ieee80211_hw *hw = dev->hw;
|
||||
|
||||
mt76_led_cleanup(dev);
|
||||
if (IS_ENABLED(CONFIG_MT76_LEDS))
|
||||
mt76_led_cleanup(dev);
|
||||
mt76_tx_status_check(dev, NULL, true);
|
||||
ieee80211_unregister_hw(hw);
|
||||
}
|
||||
|
@ -3698,6 +3698,8 @@ int dev_set_alias(struct net_device *, const char *, size_t);
|
||||
int dev_get_alias(const struct net_device *, char *, size_t);
|
||||
int dev_change_net_namespace(struct net_device *, struct net *, const char *);
|
||||
int __dev_set_mtu(struct net_device *, int);
|
||||
int dev_validate_mtu(struct net_device *dev, int mtu,
|
||||
struct netlink_ext_ack *extack);
|
||||
int dev_set_mtu_ext(struct net_device *dev, int mtu,
|
||||
struct netlink_ext_ack *extack);
|
||||
int dev_set_mtu(struct net_device *, int);
|
||||
|
@ -426,13 +426,6 @@ ip6addrptr(const struct sk_buff *skb, bool src, struct in6_addr *addr)
|
||||
sizeof(*addr));
|
||||
}
|
||||
|
||||
/* Calculate the bytes required to store the inclusive range of a-b */
|
||||
static inline int
|
||||
bitmap_bytes(u32 a, u32 b)
|
||||
{
|
||||
return 4 * ((((b - a + 8) / 8) + 3) / 4);
|
||||
}
|
||||
|
||||
/* How often should the gc be run by default */
|
||||
#define IPSET_GC_TIME (3 * 60)
|
||||
|
||||
|
@ -31,7 +31,7 @@ struct nfnetlink_subsystem {
|
||||
const struct nfnl_callback *cb; /* callback for individual types */
|
||||
struct module *owner;
|
||||
int (*commit)(struct net *net, struct sk_buff *skb);
|
||||
int (*abort)(struct net *net, struct sk_buff *skb);
|
||||
int (*abort)(struct net *net, struct sk_buff *skb, bool autoload);
|
||||
void (*cleanup)(struct net *net);
|
||||
bool (*valid_genid)(struct net *net, u32 genid);
|
||||
};
|
||||
|
@ -7,6 +7,7 @@
|
||||
struct netns_nftables {
|
||||
struct list_head tables;
|
||||
struct list_head commit_list;
|
||||
struct list_head module_list;
|
||||
struct mutex commit_mutex;
|
||||
unsigned int base_seq;
|
||||
u8 gencursor;
|
||||
|
@ -134,8 +134,7 @@ static void vcc_seq_stop(struct seq_file *seq, void *v)
|
||||
static void *vcc_seq_next(struct seq_file *seq, void *v, loff_t *pos)
|
||||
{
|
||||
v = vcc_walk(seq, 1);
|
||||
if (v)
|
||||
(*pos)++;
|
||||
(*pos)++;
|
||||
return v;
|
||||
}
|
||||
|
||||
|
@ -62,7 +62,7 @@ static int cfusbl_transmit(struct cflayer *layr, struct cfpkt *pkt)
|
||||
hpad = (info->hdr_len + CFUSB_PAD_DESCR_SZ) & (CFUSB_ALIGNMENT - 1);
|
||||
|
||||
if (skb_headroom(skb) < ETH_HLEN + CFUSB_PAD_DESCR_SZ + hpad) {
|
||||
pr_warn("Headroom to small\n");
|
||||
pr_warn("Headroom too small\n");
|
||||
kfree_skb(skb);
|
||||
return -EIO;
|
||||
}
|
||||
|
@ -5491,9 +5491,29 @@ static void flush_all_backlogs(void)
|
||||
put_online_cpus();
|
||||
}
|
||||
|
||||
/* Pass the currently batched GRO_NORMAL SKBs up to the stack. */
|
||||
static void gro_normal_list(struct napi_struct *napi)
|
||||
{
|
||||
if (!napi->rx_count)
|
||||
return;
|
||||
netif_receive_skb_list_internal(&napi->rx_list);
|
||||
INIT_LIST_HEAD(&napi->rx_list);
|
||||
napi->rx_count = 0;
|
||||
}
|
||||
|
||||
/* Queue one GRO_NORMAL SKB up for list processing. If batch size exceeded,
|
||||
* pass the whole batch up to the stack.
|
||||
*/
|
||||
static void gro_normal_one(struct napi_struct *napi, struct sk_buff *skb)
|
||||
{
|
||||
list_add_tail(&skb->list, &napi->rx_list);
|
||||
if (++napi->rx_count >= gro_normal_batch)
|
||||
gro_normal_list(napi);
|
||||
}
|
||||
|
||||
INDIRECT_CALLABLE_DECLARE(int inet_gro_complete(struct sk_buff *, int));
|
||||
INDIRECT_CALLABLE_DECLARE(int ipv6_gro_complete(struct sk_buff *, int));
|
||||
static int napi_gro_complete(struct sk_buff *skb)
|
||||
static int napi_gro_complete(struct napi_struct *napi, struct sk_buff *skb)
|
||||
{
|
||||
struct packet_offload *ptype;
|
||||
__be16 type = skb->protocol;
|
||||
@ -5526,7 +5546,8 @@ static int napi_gro_complete(struct sk_buff *skb)
|
||||
}
|
||||
|
||||
out:
|
||||
return netif_receive_skb_internal(skb);
|
||||
gro_normal_one(napi, skb);
|
||||
return NET_RX_SUCCESS;
|
||||
}
|
||||
|
||||
static void __napi_gro_flush_chain(struct napi_struct *napi, u32 index,
|
||||
@ -5539,7 +5560,7 @@ static void __napi_gro_flush_chain(struct napi_struct *napi, u32 index,
|
||||
if (flush_old && NAPI_GRO_CB(skb)->age == jiffies)
|
||||
return;
|
||||
skb_list_del_init(skb);
|
||||
napi_gro_complete(skb);
|
||||
napi_gro_complete(napi, skb);
|
||||
napi->gro_hash[index].count--;
|
||||
}
|
||||
|
||||
@ -5641,7 +5662,7 @@ static void gro_pull_from_frag0(struct sk_buff *skb, int grow)
|
||||
}
|
||||
}
|
||||
|
||||
static void gro_flush_oldest(struct list_head *head)
|
||||
static void gro_flush_oldest(struct napi_struct *napi, struct list_head *head)
|
||||
{
|
||||
struct sk_buff *oldest;
|
||||
|
||||
@ -5657,7 +5678,7 @@ static void gro_flush_oldest(struct list_head *head)
|
||||
* SKB to the chain.
|
||||
*/
|
||||
skb_list_del_init(oldest);
|
||||
napi_gro_complete(oldest);
|
||||
napi_gro_complete(napi, oldest);
|
||||
}
|
||||
|
||||
INDIRECT_CALLABLE_DECLARE(struct sk_buff *inet_gro_receive(struct list_head *,
|
||||
@ -5733,7 +5754,7 @@ static enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff
|
||||
|
||||
if (pp) {
|
||||
skb_list_del_init(pp);
|
||||
napi_gro_complete(pp);
|
||||
napi_gro_complete(napi, pp);
|
||||
napi->gro_hash[hash].count--;
|
||||
}
|
||||
|
||||
@ -5744,7 +5765,7 @@ static enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff
|
||||
goto normal;
|
||||
|
||||
if (unlikely(napi->gro_hash[hash].count >= MAX_GRO_SKBS)) {
|
||||
gro_flush_oldest(gro_head);
|
||||
gro_flush_oldest(napi, gro_head);
|
||||
} else {
|
||||
napi->gro_hash[hash].count++;
|
||||
}
|
||||
@ -5802,26 +5823,6 @@ struct packet_offload *gro_find_complete_by_type(__be16 type)
|
||||
}
|
||||
EXPORT_SYMBOL(gro_find_complete_by_type);
|
||||
|
||||
/* Pass the currently batched GRO_NORMAL SKBs up to the stack. */
|
||||
static void gro_normal_list(struct napi_struct *napi)
|
||||
{
|
||||
if (!napi->rx_count)
|
||||
return;
|
||||
netif_receive_skb_list_internal(&napi->rx_list);
|
||||
INIT_LIST_HEAD(&napi->rx_list);
|
||||
napi->rx_count = 0;
|
||||
}
|
||||
|
||||
/* Queue one GRO_NORMAL SKB up for list processing. If batch size exceeded,
|
||||
* pass the whole batch up to the stack.
|
||||
*/
|
||||
static void gro_normal_one(struct napi_struct *napi, struct sk_buff *skb)
|
||||
{
|
||||
list_add_tail(&skb->list, &napi->rx_list);
|
||||
if (++napi->rx_count >= gro_normal_batch)
|
||||
gro_normal_list(napi);
|
||||
}
|
||||
|
||||
static void napi_skb_free_stolen_head(struct sk_buff *skb)
|
||||
{
|
||||
skb_dst_drop(skb);
|
||||
@ -6200,8 +6201,6 @@ bool napi_complete_done(struct napi_struct *n, int work_done)
|
||||
NAPIF_STATE_IN_BUSY_POLL)))
|
||||
return false;
|
||||
|
||||
gro_normal_list(n);
|
||||
|
||||
if (n->gro_bitmask) {
|
||||
unsigned long timeout = 0;
|
||||
|
||||
@ -6217,6 +6216,9 @@ bool napi_complete_done(struct napi_struct *n, int work_done)
|
||||
hrtimer_start(&n->timer, ns_to_ktime(timeout),
|
||||
HRTIMER_MODE_REL_PINNED);
|
||||
}
|
||||
|
||||
gro_normal_list(n);
|
||||
|
||||
if (unlikely(!list_empty(&n->poll_list))) {
|
||||
/* If n->poll_list is not empty, we need to mask irqs */
|
||||
local_irq_save(flags);
|
||||
@ -6548,8 +6550,6 @@ static int napi_poll(struct napi_struct *n, struct list_head *repoll)
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
gro_normal_list(n);
|
||||
|
||||
if (n->gro_bitmask) {
|
||||
/* flush too old packets
|
||||
* If HZ < 1000, flush all packets.
|
||||
@ -6557,6 +6557,8 @@ static int napi_poll(struct napi_struct *n, struct list_head *repoll)
|
||||
napi_gro_flush(n, HZ >= 1000);
|
||||
}
|
||||
|
||||
gro_normal_list(n);
|
||||
|
||||
/* Some drivers may have called napi_schedule
|
||||
* prior to exhausting their budget.
|
||||
*/
|
||||
@ -8194,6 +8196,22 @@ int __dev_set_mtu(struct net_device *dev, int new_mtu)
|
||||
}
|
||||
EXPORT_SYMBOL(__dev_set_mtu);
|
||||
|
||||
int dev_validate_mtu(struct net_device *dev, int new_mtu,
|
||||
struct netlink_ext_ack *extack)
|
||||
{
|
||||
/* MTU must be positive, and in range */
|
||||
if (new_mtu < 0 || new_mtu < dev->min_mtu) {
|
||||
NL_SET_ERR_MSG(extack, "mtu less than device minimum");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (dev->max_mtu > 0 && new_mtu > dev->max_mtu) {
|
||||
NL_SET_ERR_MSG(extack, "mtu greater than device maximum");
|
||||
return -EINVAL;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* dev_set_mtu_ext - Change maximum transfer unit
|
||||
* @dev: device
|
||||
@ -8210,16 +8228,9 @@ int dev_set_mtu_ext(struct net_device *dev, int new_mtu,
|
||||
if (new_mtu == dev->mtu)
|
||||
return 0;
|
||||
|
||||
/* MTU must be positive, and in range */
|
||||
if (new_mtu < 0 || new_mtu < dev->min_mtu) {
|
||||
NL_SET_ERR_MSG(extack, "mtu less than device minimum");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (dev->max_mtu > 0 && new_mtu > dev->max_mtu) {
|
||||
NL_SET_ERR_MSG(extack, "mtu greater than device maximum");
|
||||
return -EINVAL;
|
||||
}
|
||||
err = dev_validate_mtu(dev, new_mtu, extack);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
if (!netif_device_present(dev))
|
||||
return -ENODEV;
|
||||
@ -9302,8 +9313,10 @@ int register_netdevice(struct net_device *dev)
|
||||
goto err_uninit;
|
||||
|
||||
ret = netdev_register_kobject(dev);
|
||||
if (ret)
|
||||
if (ret) {
|
||||
dev->reg_state = NETREG_UNREGISTERED;
|
||||
goto err_uninit;
|
||||
}
|
||||
dev->reg_state = NETREG_REGISTERED;
|
||||
|
||||
__netdev_update_features(dev);
|
||||
|
@ -3290,6 +3290,7 @@ static void *neigh_stat_seq_next(struct seq_file *seq, void *v, loff_t *pos)
|
||||
*pos = cpu+1;
|
||||
return per_cpu_ptr(tbl->stats, cpu);
|
||||
}
|
||||
(*pos)++;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
|
@ -3048,8 +3048,17 @@ struct net_device *rtnl_create_link(struct net *net, const char *ifname,
|
||||
dev->rtnl_link_ops = ops;
|
||||
dev->rtnl_link_state = RTNL_LINK_INITIALIZING;
|
||||
|
||||
if (tb[IFLA_MTU])
|
||||
dev->mtu = nla_get_u32(tb[IFLA_MTU]);
|
||||
if (tb[IFLA_MTU]) {
|
||||
u32 mtu = nla_get_u32(tb[IFLA_MTU]);
|
||||
int err;
|
||||
|
||||
err = dev_validate_mtu(dev, mtu, extack);
|
||||
if (err) {
|
||||
free_netdev(dev);
|
||||
return ERR_PTR(err);
|
||||
}
|
||||
dev->mtu = mtu;
|
||||
}
|
||||
if (tb[IFLA_ADDRESS]) {
|
||||
memcpy(dev->dev_addr, nla_data(tb[IFLA_ADDRESS]),
|
||||
nla_len(tb[IFLA_ADDRESS]));
|
||||
|
@ -594,8 +594,6 @@ EXPORT_SYMBOL_GPL(sk_psock_destroy);
|
||||
|
||||
void sk_psock_drop(struct sock *sk, struct sk_psock *psock)
|
||||
{
|
||||
sock_owned_by_me(sk);
|
||||
|
||||
sk_psock_cork_free(psock);
|
||||
sk_psock_zap_ingress(psock);
|
||||
|
||||
|
@ -438,6 +438,23 @@ void inet_proto_csum_replace4(__sum16 *sum, struct sk_buff *skb,
|
||||
}
|
||||
EXPORT_SYMBOL(inet_proto_csum_replace4);
|
||||
|
||||
/**
|
||||
* inet_proto_csum_replace16 - update layer 4 header checksum field
|
||||
* @sum: Layer 4 header checksum field
|
||||
* @skb: sk_buff for the packet
|
||||
* @from: old IPv6 address
|
||||
* @to: new IPv6 address
|
||||
* @pseudohdr: True if layer 4 header checksum includes pseudoheader
|
||||
*
|
||||
* Update layer 4 header as per the update in IPv6 src/dst address.
|
||||
*
|
||||
* There is no need to update skb->csum in this function, because update in two
|
||||
* fields a.) IPv6 src/dst address and b.) L4 header checksum cancels each other
|
||||
* for skb->csum calculation. Whereas inet_proto_csum_replace4 function needs to
|
||||
* update skb->csum, because update in 3 fields a.) IPv4 src/dst address,
|
||||
* b.) IPv4 Header checksum and c.) L4 header checksum results in same diff as
|
||||
* L4 Header checksum for skb->csum calculation.
|
||||
*/
|
||||
void inet_proto_csum_replace16(__sum16 *sum, struct sk_buff *skb,
|
||||
const __be32 *from, const __be32 *to,
|
||||
bool pseudohdr)
|
||||
@ -449,9 +466,6 @@ void inet_proto_csum_replace16(__sum16 *sum, struct sk_buff *skb,
|
||||
if (skb->ip_summed != CHECKSUM_PARTIAL) {
|
||||
*sum = csum_fold(csum_partial(diff, sizeof(diff),
|
||||
~csum_unfold(*sum)));
|
||||
if (skb->ip_summed == CHECKSUM_COMPLETE && pseudohdr)
|
||||
skb->csum = ~csum_partial(diff, sizeof(diff),
|
||||
~skb->csum);
|
||||
} else if (pseudohdr)
|
||||
*sum = ~csum_fold(csum_partial(diff, sizeof(diff),
|
||||
csum_unfold(*sum)));
|
||||
|
@ -191,7 +191,7 @@ void hsr_debugfs_term(struct hsr_priv *priv);
|
||||
void hsr_debugfs_create_root(void);
|
||||
void hsr_debugfs_remove_root(void);
|
||||
#else
|
||||
static inline void void hsr_debugfs_rename(struct net_device *dev)
|
||||
static inline void hsr_debugfs_rename(struct net_device *dev)
|
||||
{
|
||||
}
|
||||
static inline void hsr_debugfs_init(struct hsr_priv *priv,
|
||||
|
@ -57,6 +57,8 @@ static struct sk_buff *esp4_gro_receive(struct list_head *head,
|
||||
if (!x)
|
||||
goto out_reset;
|
||||
|
||||
skb->mark = xfrm_smark_get(skb->mark, x);
|
||||
|
||||
sp->xvec[sp->len++] = x;
|
||||
sp->olen++;
|
||||
|
||||
|
@ -662,8 +662,8 @@ static const struct nla_policy fou_nl_policy[FOU_ATTR_MAX + 1] = {
|
||||
[FOU_ATTR_REMCSUM_NOPARTIAL] = { .type = NLA_FLAG, },
|
||||
[FOU_ATTR_LOCAL_V4] = { .type = NLA_U32, },
|
||||
[FOU_ATTR_PEER_V4] = { .type = NLA_U32, },
|
||||
[FOU_ATTR_LOCAL_V6] = { .type = sizeof(struct in6_addr), },
|
||||
[FOU_ATTR_PEER_V6] = { .type = sizeof(struct in6_addr), },
|
||||
[FOU_ATTR_LOCAL_V6] = { .len = sizeof(struct in6_addr), },
|
||||
[FOU_ATTR_PEER_V6] = { .len = sizeof(struct in6_addr), },
|
||||
[FOU_ATTR_PEER_PORT] = { .type = NLA_U16, },
|
||||
[FOU_ATTR_IFINDEX] = { .type = NLA_S32, },
|
||||
};
|
||||
|
@ -1236,10 +1236,8 @@ int ip_tunnel_init(struct net_device *dev)
|
||||
iph->version = 4;
|
||||
iph->ihl = 5;
|
||||
|
||||
if (tunnel->collect_md) {
|
||||
dev->features |= NETIF_F_NETNS_LOCAL;
|
||||
if (tunnel->collect_md)
|
||||
netif_keep_dst(dev);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(ip_tunnel_init);
|
||||
|
@ -187,8 +187,17 @@ static netdev_tx_t vti_xmit(struct sk_buff *skb, struct net_device *dev,
|
||||
int mtu;
|
||||
|
||||
if (!dst) {
|
||||
dev->stats.tx_carrier_errors++;
|
||||
goto tx_error_icmp;
|
||||
struct rtable *rt;
|
||||
|
||||
fl->u.ip4.flowi4_oif = dev->ifindex;
|
||||
fl->u.ip4.flowi4_flags |= FLOWI_FLAG_ANYSRC;
|
||||
rt = __ip_route_output_key(dev_net(dev), &fl->u.ip4);
|
||||
if (IS_ERR(rt)) {
|
||||
dev->stats.tx_carrier_errors++;
|
||||
goto tx_error_icmp;
|
||||
}
|
||||
dst = &rt->dst;
|
||||
skb_dst_set(skb, dst);
|
||||
}
|
||||
|
||||
dst_hold(dst);
|
||||
|
@ -271,6 +271,7 @@ static void *rt_cpu_seq_next(struct seq_file *seq, void *v, loff_t *pos)
|
||||
*pos = cpu+1;
|
||||
return &per_cpu(rt_cache_stat, cpu);
|
||||
}
|
||||
(*pos)++;
|
||||
return NULL;
|
||||
|
||||
}
|
||||
|
@ -2524,6 +2524,7 @@ static void tcp_rtx_queue_purge(struct sock *sk)
|
||||
{
|
||||
struct rb_node *p = rb_first(&sk->tcp_rtx_queue);
|
||||
|
||||
tcp_sk(sk)->highest_sack = NULL;
|
||||
while (p) {
|
||||
struct sk_buff *skb = rb_to_skb(p);
|
||||
|
||||
@ -2614,7 +2615,6 @@ int tcp_disconnect(struct sock *sk, int flags)
|
||||
WRITE_ONCE(tp->write_seq, seq);
|
||||
|
||||
icsk->icsk_backoff = 0;
|
||||
tp->snd_cwnd = 2;
|
||||
icsk->icsk_probes_out = 0;
|
||||
icsk->icsk_rto = TCP_TIMEOUT_INIT;
|
||||
tp->snd_ssthresh = TCP_INFINITE_SSTHRESH;
|
||||
|
@ -779,8 +779,7 @@ static void bbr_update_bw(struct sock *sk, const struct rate_sample *rs)
|
||||
* bandwidth sample. Delivered is in packets and interval_us in uS and
|
||||
* ratio will be <<1 for most connections. So delivered is first scaled.
|
||||
*/
|
||||
bw = (u64)rs->delivered * BW_UNIT;
|
||||
do_div(bw, rs->interval_us);
|
||||
bw = div64_long((u64)rs->delivered * BW_UNIT, rs->interval_us);
|
||||
|
||||
/* If this sample is application-limited, it is likely to have a very
|
||||
* low delivered count that represents application behavior rather than
|
||||
|
@ -3164,6 +3164,7 @@ static int tcp_clean_rtx_queue(struct sock *sk, u32 prior_fack,
|
||||
tp->retransmit_skb_hint = NULL;
|
||||
if (unlikely(skb == tp->lost_skb_hint))
|
||||
tp->lost_skb_hint = NULL;
|
||||
tcp_highest_sack_replace(sk, skb, next);
|
||||
tcp_rtx_queue_unlink_and_free(skb, sk);
|
||||
}
|
||||
|
||||
|
@ -3232,6 +3232,7 @@ int tcp_send_synack(struct sock *sk)
|
||||
if (!nskb)
|
||||
return -ENOMEM;
|
||||
INIT_LIST_HEAD(&nskb->tcp_tsorted_anchor);
|
||||
tcp_highest_sack_replace(sk, skb, nskb);
|
||||
tcp_rtx_queue_unlink_and_free(skb, sk);
|
||||
__skb_header_release(nskb);
|
||||
tcp_rbtree_insert(&sk->tcp_rtx_queue, nskb);
|
||||
|
@ -1368,7 +1368,8 @@ static void udp_rmem_release(struct sock *sk, int size, int partial,
|
||||
if (likely(partial)) {
|
||||
up->forward_deficit += size;
|
||||
size = up->forward_deficit;
|
||||
if (size < (sk->sk_rcvbuf >> 2))
|
||||
if (size < (sk->sk_rcvbuf >> 2) &&
|
||||
!skb_queue_empty(&up->reader_queue))
|
||||
return;
|
||||
} else {
|
||||
size += up->forward_deficit;
|
||||
|
@ -79,6 +79,8 @@ static struct sk_buff *esp6_gro_receive(struct list_head *head,
|
||||
if (!x)
|
||||
goto out_reset;
|
||||
|
||||
skb->mark = xfrm_smark_get(skb->mark, x);
|
||||
|
||||
sp->xvec[sp->len++] = x;
|
||||
sp->olen++;
|
||||
|
||||
|
@ -2495,14 +2495,13 @@ static void *ipv6_route_seq_next(struct seq_file *seq, void *v, loff_t *pos)
|
||||
struct net *net = seq_file_net(seq);
|
||||
struct ipv6_route_iter *iter = seq->private;
|
||||
|
||||
++(*pos);
|
||||
if (!v)
|
||||
goto iter_table;
|
||||
|
||||
n = rcu_dereference_bh(((struct fib6_info *)v)->fib6_next);
|
||||
if (n) {
|
||||
++*pos;
|
||||
if (n)
|
||||
return n;
|
||||
}
|
||||
|
||||
iter_table:
|
||||
ipv6_route_check_sernum(iter);
|
||||
@ -2510,8 +2509,6 @@ iter_table:
|
||||
r = fib6_walk_continue(&iter->w);
|
||||
spin_unlock_bh(&iter->tbl->tb6_lock);
|
||||
if (r > 0) {
|
||||
if (v)
|
||||
++*pos;
|
||||
return iter->w.leaf;
|
||||
} else if (r < 0) {
|
||||
fib6_walker_unlink(net, &iter->w);
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user