Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net

Pull networking fixes from David Miller:
 "Fixes here and there, a couple new device IDs, as usual:

   1) Fix BQL race in dpaa2-eth driver, from Ioana Ciornei.

   2) Fix 64-bit division in iwlwifi, from Arnd Bergmann.

   3) Fix documentation for some eBPF helpers, from Quentin Monnet.

   4) Some UAPI bpf header sync with tools, also from Quentin Monnet.

   5) Set descriptor ownership bit at the right time for jumbo frames in
      stmmac driver, from Aaro Koskinen.

   6) Set IFF_UP properly in tun driver, from Eric Dumazet.

   7) Fix load/store doubleword instruction generation in powerpc eBPF
      JIT, from Naveen N. Rao.

   8) nla_nest_start() return value checks all over, from Kangjie Lu.

   9) Fix asoc_id handling in SCTP after the SCTP_*_ASSOC changes this
      merge window. From Marcelo Ricardo Leitner and Xin Long.

  10) Fix memory corruption with large MTUs in stmmac, from Aaro
      Koskinen.

  11) Do not use ipv4 header for ipv6 flows in TCP and DCCP, from Eric
      Dumazet.

  12) Fix topology subscription cancellation in tipc, from Erik Hugne.

  13) Memory leak in genetlink error path, from Yue Haibing.

  14) Valid control actions properly in packet scheduler, from Davide
      Caratti.

  15) Even if we get EEXIST, we still need to rehash if a shrink was
      delayed. From Herbert Xu.

  16) Fix interrupt mask handling in interrupt handler of r8169, from
      Heiner Kallweit.

  17) Fix leak in ehea driver, from Wen Yang"

* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net: (168 commits)
  dpaa2-eth: fix race condition with bql frame accounting
  chelsio: use BUG() instead of BUG_ON(1)
  net: devlink: skip info_get op call if it is not defined in dumpit
  net: phy: bcm54xx: Encode link speed and activity into LEDs
  tipc: change to check tipc_own_id to return in tipc_net_stop
  net: usb: aqc111: Extend HWID table by QNAP device
  net: sched: Kconfig: update reference link for PIE
  net: dsa: qca8k: extend slave-bus implementations
  net: dsa: qca8k: remove leftover phy accessors
  dt-bindings: net: dsa: qca8k: support internal mdio-bus
  dt-bindings: net: dsa: qca8k: fix example
  net: phy: don't clear BMCR in genphy_soft_reset
  bpf, libbpf: clarify bump in libbpf version info
  bpf, libbpf: fix version info and add it to shared object
  rxrpc: avoid clang -Wuninitialized warning
  tipc: tipc clang warning
  net: sched: fix cleanup NULL pointer exception in act_mirr
  r8169: fix cable re-plugging issue
  net: ethernet: ti: fix possible object reference leak
  net: ibm: fix possible object reference leak
  ...
This commit is contained in:
Linus Torvalds 2019-03-27 12:22:57 -07:00
commit 1a9df9e29c
182 changed files with 3123 additions and 1139 deletions

View File

@ -12,10 +12,15 @@ Required properties:
Subnodes:
The integrated switch subnode should be specified according to the binding
described in dsa/dsa.txt. As the QCA8K switches do not have a N:N mapping of
port and PHY id, each subnode describing a port needs to have a valid phandle
referencing the internal PHY connected to it. The CPU port of this switch is
always port 0.
described in dsa/dsa.txt. If the QCA8K switch is connect to a SoC's external
mdio-bus each subnode describing a port needs to have a valid phandle
referencing the internal PHY it is connected to. This is because there's no
N:N mapping of port and PHY id.
Don't use mixed external and internal mdio-bus configurations, as this is
not supported by the hardware.
The CPU port of this switch is always port 0.
A CPU port node has the following optional node:
@ -31,8 +36,9 @@ For QCA8K the 'fixed-link' sub-node supports only the following properties:
- 'full-duplex' (boolean, optional), to indicate that full duplex is
used. When absent, half duplex is assumed.
Example:
Examples:
for the external mdio-bus configuration:
&mdio0 {
phy_port1: phy@0 {
@ -55,12 +61,12 @@ Example:
reg = <4>;
};
switch0@0 {
switch@10 {
compatible = "qca,qca8337";
#address-cells = <1>;
#size-cells = <0>;
reg = <0>;
reg = <0x10>;
ports {
#address-cells = <1>;
@ -108,3 +114,56 @@ Example:
};
};
};
for the internal master mdio-bus configuration:
&mdio0 {
switch@10 {
compatible = "qca,qca8337";
#address-cells = <1>;
#size-cells = <0>;
reg = <0x10>;
ports {
#address-cells = <1>;
#size-cells = <0>;
port@0 {
reg = <0>;
label = "cpu";
ethernet = <&gmac1>;
phy-mode = "rgmii";
fixed-link {
speed = 1000;
full-duplex;
};
};
port@1 {
reg = <1>;
label = "lan1";
};
port@2 {
reg = <2>;
label = "lan2";
};
port@3 {
reg = <3>;
label = "lan3";
};
port@4 {
reg = <4>;
label = "lan4";
};
port@5 {
reg = <5>;
label = "wan";
};
};
};
};

View File

@ -50,7 +50,7 @@ the excellent reporting over at LWN.net or read the original code.
patchset
[PATCH net-next v4 0/9] socket sendmsg MSG_ZEROCOPY
http://lkml.kernel.org/r/20170803202945.70750-1-willemdebruijn.kernel@gmail.com
https://lkml.kernel.org/netdev/20170803202945.70750-1-willemdebruijn.kernel@gmail.com
Interface

View File

@ -131,6 +131,19 @@ it to the maintainer to figure out what is the most recent and current
version that should be applied. If there is any doubt, the maintainer
will reply and ask what should be done.
Q: I made changes to only a few patches in a patch series should I resend only those changed?
--------------------------------------------------------------------------------------------
A: No, please resend the entire patch series and make sure you do number your
patches such that it is clear this is the latest and greatest set of patches
that can be applied.
Q: I submitted multiple versions of a patch series and it looks like a version other than the last one has been accepted, what should I do?
-------------------------------------------------------------------------------------------------------------------------------------------
A: There is no revert possible, once it is pushed out, it stays like that.
Please send incremental versions on top of what has been merged in order to fix
the patches the way they would look like if your latest patch series was to be
merged.
Q: How can I tell what patches are queued up for backporting to the various stable releases?
--------------------------------------------------------------------------------------------
A: Normally Greg Kroah-Hartman collects stable commits himself, but for

View File

@ -44,10 +44,10 @@ including the Netfilter hooks and the flowtable fastpath bypass.
/ \ / \ |Routing | / \
--> ingress ---> prerouting ---> |decision| | postrouting |--> neigh_xmit
\_________/ \__________/ ---------- \____________/ ^
| ^ | | ^ |
flowtable | | ____\/___ | |
| | | / \ | |
__\/___ | --------->| forward |------------ |
| ^ | ^ |
flowtable | ____\/___ | |
| | / \ | |
__\/___ | | forward |------------ |
|-----| | \_________/ |
|-----| | 'flow offload' rule |
|-----| | adds entry to |

View File

@ -413,7 +413,7 @@ algorithm.
.. _F-RTO: https://tools.ietf.org/html/rfc5682
TCP Fast Path
============
=============
When kernel receives a TCP packet, it has two paths to handler the
packet, one is fast path, another is slow path. The comment in kernel
code provides a good explanation of them, I pasted them below::
@ -681,6 +681,7 @@ The TCP stack receives an out of order duplicate packet, so it sends a
DSACK to the sender.
* TcpExtTCPDSACKRecv
The TCP stack receives a DSACK, which indicates an acknowledged
duplicate packet is received.
@ -690,7 +691,7 @@ The TCP stack receives a DSACK, which indicate an out of order
duplicate packet is received.
invalid SACK and DSACK
====================
======================
When a SACK (or DSACK) block is invalid, a corresponding counter would
be updated. The validation method is base on the start/end sequence
number of the SACK block. For more details, please refer the comment
@ -704,11 +705,13 @@ explaination:
.. _Add counters for discarded SACK blocks: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?id=18f02545a9a16c9a89778b91a162ad16d510bb32
* TcpExtTCPSACKDiscard
This counter indicates how many SACK blocks are invalid. If the invalid
SACK block is caused by ACK recording, the TCP stack will only ignore
it and won't update this counter.
* TcpExtTCPDSACKIgnoredOld and TcpExtTCPDSACKIgnoredNoUndo
When a DSACK block is invalid, one of these two counters would be
updated. Which counter will be updated depends on the undo_marker flag
of the TCP socket. If the undo_marker is not set, the TCP stack isn't
@ -719,7 +722,7 @@ will be updated. If the undo_marker is set, TcpExtTCPDSACKIgnoredOld
will be updated. As implied in its name, it might be an old packet.
SACK shift
=========
==========
The linux networking stack stores data in sk_buff struct (skb for
short). If a SACK block acrosses multiple skb, the TCP stack will try
to re-arrange data in these skb. E.g. if a SACK block acknowledges seq
@ -730,12 +733,15 @@ seq 14 to 20. All data in skb2 will be moved to skb1, and skb2 will be
discard, this operation is 'merge'.
* TcpExtTCPSackShifted
A skb is shifted
* TcpExtTCPSackMerged
A skb is merged
* TcpExtTCPSackShiftFallback
A skb should be shifted or merged, but the TCP stack doesn't do it for
some reasons.

View File

@ -302,6 +302,7 @@
/* Misc instructions for BPF compiler */
#define PPC_INST_LBZ 0x88000000
#define PPC_INST_LD 0xe8000000
#define PPC_INST_LDX 0x7c00002a
#define PPC_INST_LHZ 0xa0000000
#define PPC_INST_LWZ 0x80000000
#define PPC_INST_LHBRX 0x7c00062c
@ -309,6 +310,7 @@
#define PPC_INST_STB 0x98000000
#define PPC_INST_STH 0xb0000000
#define PPC_INST_STD 0xf8000000
#define PPC_INST_STDX 0x7c00012a
#define PPC_INST_STDU 0xf8000001
#define PPC_INST_STW 0x90000000
#define PPC_INST_STWU 0x94000000

View File

@ -51,6 +51,8 @@
#define PPC_LIS(r, i) PPC_ADDIS(r, 0, i)
#define PPC_STD(r, base, i) EMIT(PPC_INST_STD | ___PPC_RS(r) | \
___PPC_RA(base) | ((i) & 0xfffc))
#define PPC_STDX(r, base, b) EMIT(PPC_INST_STDX | ___PPC_RS(r) | \
___PPC_RA(base) | ___PPC_RB(b))
#define PPC_STDU(r, base, i) EMIT(PPC_INST_STDU | ___PPC_RS(r) | \
___PPC_RA(base) | ((i) & 0xfffc))
#define PPC_STW(r, base, i) EMIT(PPC_INST_STW | ___PPC_RS(r) | \
@ -65,7 +67,9 @@
#define PPC_LBZ(r, base, i) EMIT(PPC_INST_LBZ | ___PPC_RT(r) | \
___PPC_RA(base) | IMM_L(i))
#define PPC_LD(r, base, i) EMIT(PPC_INST_LD | ___PPC_RT(r) | \
___PPC_RA(base) | IMM_L(i))
___PPC_RA(base) | ((i) & 0xfffc))
#define PPC_LDX(r, base, b) EMIT(PPC_INST_LDX | ___PPC_RT(r) | \
___PPC_RA(base) | ___PPC_RB(b))
#define PPC_LWZ(r, base, i) EMIT(PPC_INST_LWZ | ___PPC_RT(r) | \
___PPC_RA(base) | IMM_L(i))
#define PPC_LHZ(r, base, i) EMIT(PPC_INST_LHZ | ___PPC_RT(r) | \
@ -85,17 +89,6 @@
___PPC_RA(a) | ___PPC_RB(b))
#define PPC_BPF_STDCX(s, a, b) EMIT(PPC_INST_STDCX | ___PPC_RS(s) | \
___PPC_RA(a) | ___PPC_RB(b))
#ifdef CONFIG_PPC64
#define PPC_BPF_LL(r, base, i) do { PPC_LD(r, base, i); } while(0)
#define PPC_BPF_STL(r, base, i) do { PPC_STD(r, base, i); } while(0)
#define PPC_BPF_STLU(r, base, i) do { PPC_STDU(r, base, i); } while(0)
#else
#define PPC_BPF_LL(r, base, i) do { PPC_LWZ(r, base, i); } while(0)
#define PPC_BPF_STL(r, base, i) do { PPC_STW(r, base, i); } while(0)
#define PPC_BPF_STLU(r, base, i) do { PPC_STWU(r, base, i); } while(0)
#endif
#define PPC_CMPWI(a, i) EMIT(PPC_INST_CMPWI | ___PPC_RA(a) | IMM_L(i))
#define PPC_CMPDI(a, i) EMIT(PPC_INST_CMPDI | ___PPC_RA(a) | IMM_L(i))
#define PPC_CMPW(a, b) EMIT(PPC_INST_CMPW | ___PPC_RA(a) | \

View File

@ -122,6 +122,10 @@ DECLARE_LOAD_FUNC(sk_load_byte_msh);
#define PPC_NTOHS_OFFS(r, base, i) PPC_LHZ_OFFS(r, base, i)
#endif
#define PPC_BPF_LL(r, base, i) do { PPC_LWZ(r, base, i); } while(0)
#define PPC_BPF_STL(r, base, i) do { PPC_STW(r, base, i); } while(0)
#define PPC_BPF_STLU(r, base, i) do { PPC_STWU(r, base, i); } while(0)
#define SEEN_DATAREF 0x10000 /* might call external helpers */
#define SEEN_XREG 0x20000 /* X reg is used */
#define SEEN_MEM 0x40000 /* SEEN_MEM+(1<<n) = use mem[n] for temporary

View File

@ -68,6 +68,26 @@ static const int b2p[] = {
/* PPC NVR range -- update this if we ever use NVRs below r27 */
#define BPF_PPC_NVR_MIN 27
/*
* WARNING: These can use TMP_REG_2 if the offset is not at word boundary,
* so ensure that it isn't in use already.
*/
#define PPC_BPF_LL(r, base, i) do { \
if ((i) % 4) { \
PPC_LI(b2p[TMP_REG_2], (i)); \
PPC_LDX(r, base, b2p[TMP_REG_2]); \
} else \
PPC_LD(r, base, i); \
} while(0)
#define PPC_BPF_STL(r, base, i) do { \
if ((i) % 4) { \
PPC_LI(b2p[TMP_REG_2], (i)); \
PPC_STDX(r, base, b2p[TMP_REG_2]); \
} else \
PPC_STD(r, base, i); \
} while(0)
#define PPC_BPF_STLU(r, base, i) do { PPC_STDU(r, base, i); } while(0)
#define SEEN_FUNC 0x1000 /* might call external helpers */
#define SEEN_STACK 0x2000 /* uses BPF stack */
#define SEEN_TAILCALL 0x4000 /* uses tail calls */

View File

@ -252,7 +252,7 @@ static void bpf_jit_emit_tail_call(u32 *image, struct codegen_context *ctx, u32
* if (tail_call_cnt > MAX_TAIL_CALL_CNT)
* goto out;
*/
PPC_LD(b2p[TMP_REG_1], 1, bpf_jit_stack_tailcallcnt(ctx));
PPC_BPF_LL(b2p[TMP_REG_1], 1, bpf_jit_stack_tailcallcnt(ctx));
PPC_CMPLWI(b2p[TMP_REG_1], MAX_TAIL_CALL_CNT);
PPC_BCC(COND_GT, out);
@ -265,7 +265,7 @@ static void bpf_jit_emit_tail_call(u32 *image, struct codegen_context *ctx, u32
/* prog = array->ptrs[index]; */
PPC_MULI(b2p[TMP_REG_1], b2p_index, 8);
PPC_ADD(b2p[TMP_REG_1], b2p[TMP_REG_1], b2p_bpf_array);
PPC_LD(b2p[TMP_REG_1], b2p[TMP_REG_1], offsetof(struct bpf_array, ptrs));
PPC_BPF_LL(b2p[TMP_REG_1], b2p[TMP_REG_1], offsetof(struct bpf_array, ptrs));
/*
* if (prog == NULL)
@ -275,7 +275,7 @@ static void bpf_jit_emit_tail_call(u32 *image, struct codegen_context *ctx, u32
PPC_BCC(COND_EQ, out);
/* goto *(prog->bpf_func + prologue_size); */
PPC_LD(b2p[TMP_REG_1], b2p[TMP_REG_1], offsetof(struct bpf_prog, bpf_func));
PPC_BPF_LL(b2p[TMP_REG_1], b2p[TMP_REG_1], offsetof(struct bpf_prog, bpf_func));
#ifdef PPC64_ELF_ABI_v1
/* skip past the function descriptor */
PPC_ADDI(b2p[TMP_REG_1], b2p[TMP_REG_1],
@ -606,7 +606,7 @@ bpf_alu32_trunc:
* the instructions generated will remain the
* same across all passes
*/
PPC_STD(dst_reg, 1, bpf_jit_stack_local(ctx));
PPC_BPF_STL(dst_reg, 1, bpf_jit_stack_local(ctx));
PPC_ADDI(b2p[TMP_REG_1], 1, bpf_jit_stack_local(ctx));
PPC_LDBRX(dst_reg, 0, b2p[TMP_REG_1]);
break;
@ -662,7 +662,7 @@ emit_clear:
PPC_LI32(b2p[TMP_REG_1], imm);
src_reg = b2p[TMP_REG_1];
}
PPC_STD(src_reg, dst_reg, off);
PPC_BPF_STL(src_reg, dst_reg, off);
break;
/*
@ -709,7 +709,7 @@ emit_clear:
break;
/* dst = *(u64 *)(ul) (src + off) */
case BPF_LDX | BPF_MEM | BPF_DW:
PPC_LD(dst_reg, src_reg, off);
PPC_BPF_LL(dst_reg, src_reg, off);
break;
/*

View File

@ -4365,7 +4365,8 @@ setup_pci(struct hfc_multi *hc, struct pci_dev *pdev,
if (m->clock2)
test_and_set_bit(HFC_CHIP_CLOCK2, &hc->chip);
if (ent->device == 0xB410) {
if (ent->vendor == PCI_VENDOR_ID_DIGIUM &&
ent->device == PCI_DEVICE_ID_DIGIUM_HFC4S) {
test_and_set_bit(HFC_CHIP_B410P, &hc->chip);
test_and_set_bit(HFC_CHIP_PCM_MASTER, &hc->chip);
test_and_clear_bit(HFC_CHIP_PCM_SLAVE, &hc->chip);

View File

@ -216,8 +216,8 @@ config GENEVE
config GTP
tristate "GPRS Tunneling Protocol datapath (GTP-U)"
depends on INET && NET_UDP_TUNNEL
select NET_IP_TUNNEL
depends on INET
select NET_UDP_TUNNEL
---help---
This allows one to create gtp virtual interfaces that provide
the GPRS Tunneling Protocol datapath (GTP-U). This tunneling protocol

View File

@ -481,6 +481,155 @@ qca8k_port_set_status(struct qca8k_priv *priv, int port, int enable)
qca8k_reg_clear(priv, QCA8K_REG_PORT_STATUS(port), mask);
}
static u32
qca8k_port_to_phy(int port)
{
/* From Andrew Lunn:
* Port 0 has no internal phy.
* Port 1 has an internal PHY at MDIO address 0.
* Port 2 has an internal PHY at MDIO address 1.
* ...
* Port 5 has an internal PHY at MDIO address 4.
* Port 6 has no internal PHY.
*/
return port - 1;
}
static int
qca8k_mdio_write(struct qca8k_priv *priv, int port, u32 regnum, u16 data)
{
u32 phy, val;
if (regnum >= QCA8K_MDIO_MASTER_MAX_REG)
return -EINVAL;
/* callee is responsible for not passing bad ports,
* but we still would like to make spills impossible.
*/
phy = qca8k_port_to_phy(port) % PHY_MAX_ADDR;
val = QCA8K_MDIO_MASTER_BUSY | QCA8K_MDIO_MASTER_EN |
QCA8K_MDIO_MASTER_WRITE | QCA8K_MDIO_MASTER_PHY_ADDR(phy) |
QCA8K_MDIO_MASTER_REG_ADDR(regnum) |
QCA8K_MDIO_MASTER_DATA(data);
qca8k_write(priv, QCA8K_MDIO_MASTER_CTRL, val);
return qca8k_busy_wait(priv, QCA8K_MDIO_MASTER_CTRL,
QCA8K_MDIO_MASTER_BUSY);
}
static int
qca8k_mdio_read(struct qca8k_priv *priv, int port, u32 regnum)
{
u32 phy, val;
if (regnum >= QCA8K_MDIO_MASTER_MAX_REG)
return -EINVAL;
/* callee is responsible for not passing bad ports,
* but we still would like to make spills impossible.
*/
phy = qca8k_port_to_phy(port) % PHY_MAX_ADDR;
val = QCA8K_MDIO_MASTER_BUSY | QCA8K_MDIO_MASTER_EN |
QCA8K_MDIO_MASTER_READ | QCA8K_MDIO_MASTER_PHY_ADDR(phy) |
QCA8K_MDIO_MASTER_REG_ADDR(regnum);
qca8k_write(priv, QCA8K_MDIO_MASTER_CTRL, val);
if (qca8k_busy_wait(priv, QCA8K_MDIO_MASTER_CTRL,
QCA8K_MDIO_MASTER_BUSY))
return -ETIMEDOUT;
val = (qca8k_read(priv, QCA8K_MDIO_MASTER_CTRL) &
QCA8K_MDIO_MASTER_DATA_MASK);
return val;
}
static int
qca8k_phy_write(struct dsa_switch *ds, int port, int regnum, u16 data)
{
struct qca8k_priv *priv = ds->priv;
return qca8k_mdio_write(priv, port, regnum, data);
}
static int
qca8k_phy_read(struct dsa_switch *ds, int port, int regnum)
{
struct qca8k_priv *priv = ds->priv;
int ret;
ret = qca8k_mdio_read(priv, port, regnum);
if (ret < 0)
return 0xffff;
return ret;
}
static int
qca8k_setup_mdio_bus(struct qca8k_priv *priv)
{
u32 internal_mdio_mask = 0, external_mdio_mask = 0, reg;
struct device_node *ports, *port;
int err;
ports = of_get_child_by_name(priv->dev->of_node, "ports");
if (!ports)
return -EINVAL;
for_each_available_child_of_node(ports, port) {
err = of_property_read_u32(port, "reg", &reg);
if (err)
return err;
if (!dsa_is_user_port(priv->ds, reg))
continue;
if (of_property_read_bool(port, "phy-handle"))
external_mdio_mask |= BIT(reg);
else
internal_mdio_mask |= BIT(reg);
}
if (!external_mdio_mask && !internal_mdio_mask) {
dev_err(priv->dev, "no PHYs are defined.\n");
return -EINVAL;
}
/* The QCA8K_MDIO_MASTER_EN Bit, which grants access to PHYs through
* the MDIO_MASTER register also _disconnects_ the external MDC
* passthrough to the internal PHYs. It's not possible to use both
* configurations at the same time!
*
* Because this came up during the review process:
* If the external mdio-bus driver is capable magically disabling
* the QCA8K_MDIO_MASTER_EN and mutex/spin-locking out the qca8k's
* accessors for the time being, it would be possible to pull this
* off.
*/
if (!!external_mdio_mask && !!internal_mdio_mask) {
dev_err(priv->dev, "either internal or external mdio bus configuration is supported.\n");
return -EINVAL;
}
if (external_mdio_mask) {
/* Make sure to disable the internal mdio bus in cases
* a dt-overlay and driver reload changed the configuration
*/
qca8k_reg_clear(priv, QCA8K_MDIO_MASTER_CTRL,
QCA8K_MDIO_MASTER_EN);
return 0;
}
priv->ops.phy_read = qca8k_phy_read;
priv->ops.phy_write = qca8k_phy_write;
return 0;
}
static int
qca8k_setup(struct dsa_switch *ds)
{
@ -502,6 +651,10 @@ qca8k_setup(struct dsa_switch *ds)
if (IS_ERR(priv->regmap))
pr_warn("regmap initialization failed");
ret = qca8k_setup_mdio_bus(priv);
if (ret)
return ret;
/* Initialize CPU port pad mode (xMII type, delays...) */
phy_mode = of_get_phy_mode(ds->ports[QCA8K_CPU_PORT].dn);
if (phy_mode < 0) {
@ -624,22 +777,6 @@ qca8k_adjust_link(struct dsa_switch *ds, int port, struct phy_device *phy)
qca8k_port_set_status(priv, port, 1);
}
static int
qca8k_phy_read(struct dsa_switch *ds, int phy, int regnum)
{
struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
return mdiobus_read(priv->bus, phy, regnum);
}
static int
qca8k_phy_write(struct dsa_switch *ds, int phy, int regnum, u16 val)
{
struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
return mdiobus_write(priv->bus, phy, regnum, val);
}
static void
qca8k_get_strings(struct dsa_switch *ds, int port, u32 stringset, uint8_t *data)
{
@ -879,8 +1016,6 @@ static const struct dsa_switch_ops qca8k_switch_ops = {
.setup = qca8k_setup,
.adjust_link = qca8k_adjust_link,
.get_strings = qca8k_get_strings,
.phy_read = qca8k_phy_read,
.phy_write = qca8k_phy_write,
.get_ethtool_stats = qca8k_get_ethtool_stats,
.get_sset_count = qca8k_get_sset_count,
.get_mac_eee = qca8k_get_mac_eee,
@ -923,7 +1058,8 @@ qca8k_sw_probe(struct mdio_device *mdiodev)
return -ENOMEM;
priv->ds->priv = priv;
priv->ds->ops = &qca8k_switch_ops;
priv->ops = qca8k_switch_ops;
priv->ds->ops = &priv->ops;
mutex_init(&priv->reg_mutex);
dev_set_drvdata(&mdiodev->dev, priv);

View File

@ -49,6 +49,18 @@
#define QCA8K_MIB_FLUSH BIT(24)
#define QCA8K_MIB_CPU_KEEP BIT(20)
#define QCA8K_MIB_BUSY BIT(17)
#define QCA8K_MDIO_MASTER_CTRL 0x3c
#define QCA8K_MDIO_MASTER_BUSY BIT(31)
#define QCA8K_MDIO_MASTER_EN BIT(30)
#define QCA8K_MDIO_MASTER_READ BIT(27)
#define QCA8K_MDIO_MASTER_WRITE 0
#define QCA8K_MDIO_MASTER_SUP_PRE BIT(26)
#define QCA8K_MDIO_MASTER_PHY_ADDR(x) ((x) << 21)
#define QCA8K_MDIO_MASTER_REG_ADDR(x) ((x) << 16)
#define QCA8K_MDIO_MASTER_DATA(x) (x)
#define QCA8K_MDIO_MASTER_DATA_MASK GENMASK(15, 0)
#define QCA8K_MDIO_MASTER_MAX_PORTS 5
#define QCA8K_MDIO_MASTER_MAX_REG 32
#define QCA8K_GOL_MAC_ADDR0 0x60
#define QCA8K_GOL_MAC_ADDR1 0x64
#define QCA8K_REG_PORT_STATUS(_i) (0x07c + (_i) * 4)
@ -169,6 +181,7 @@ struct qca8k_priv {
struct dsa_switch *ds;
struct mutex reg_mutex;
struct device *dev;
struct dsa_switch_ops ops;
};
struct qca8k_mib_desc {

View File

@ -1521,7 +1521,7 @@ static void update_stats(int ioaddr, struct net_device *dev)
static void set_rx_mode(struct net_device *dev)
{
int ioaddr = dev->base_addr;
short new_mode;
unsigned short new_mode;
if (dev->flags & IFF_PROMISC) {
if (corkscrew_debug > 3)

View File

@ -153,8 +153,6 @@ static void dayna_block_input(struct net_device *dev, int count,
static void dayna_block_output(struct net_device *dev, int count,
const unsigned char *buf, int start_page);
#define memcmp_withio(a, b, c) memcmp((a), (void *)(b), (c))
/* Slow Sane (16-bit chunk memory read/write) Cabletron uses this */
static void slow_sane_get_8390_hdr(struct net_device *dev,
struct e8390_pkt_hdr *hdr, int ring_page);
@ -233,19 +231,26 @@ static enum mac8390_type mac8390_ident(struct nubus_rsrc *fres)
static enum mac8390_access mac8390_testio(unsigned long membase)
{
unsigned long outdata = 0xA5A0B5B0;
unsigned long indata = 0x00000000;
u32 outdata = 0xA5A0B5B0;
u32 indata = 0;
/* Try writing 32 bits */
memcpy_toio((void __iomem *)membase, &outdata, 4);
/* Now compare them */
if (memcmp_withio(&outdata, membase, 4) == 0)
nubus_writel(outdata, membase);
/* Now read it back */
indata = nubus_readl(membase);
if (outdata == indata)
return ACCESS_32;
outdata = 0xC5C0D5D0;
indata = 0;
/* Write 16 bit output */
word_memcpy_tocard(membase, &outdata, 4);
/* Now read it back */
word_memcpy_fromcard(&indata, membase, 4);
if (outdata == indata)
return ACCESS_16;
return ACCESS_UNKNOWN;
}

View File

@ -186,11 +186,12 @@ static void aq_rx_checksum(struct aq_ring_s *self,
}
if (buff->is_ip_cso) {
__skb_incr_checksum_unnecessary(skb);
if (buff->is_udp_cso || buff->is_tcp_cso)
__skb_incr_checksum_unnecessary(skb);
} else {
skb->ip_summed = CHECKSUM_NONE;
}
if (buff->is_udp_cso || buff->is_tcp_cso)
__skb_incr_checksum_unnecessary(skb);
}
#define AQ_SKB_ALIGN SKB_DATA_ALIGN(sizeof(struct skb_shared_info))

View File

@ -3370,14 +3370,20 @@ static int macb_clk_init(struct platform_device *pdev, struct clk **pclk,
*hclk = devm_clk_get(&pdev->dev, "hclk");
}
if (IS_ERR(*pclk)) {
if (IS_ERR_OR_NULL(*pclk)) {
err = PTR_ERR(*pclk);
if (!err)
err = -ENODEV;
dev_err(&pdev->dev, "failed to get macb_clk (%u)\n", err);
return err;
}
if (IS_ERR(*hclk)) {
if (IS_ERR_OR_NULL(*hclk)) {
err = PTR_ERR(*hclk);
if (!err)
err = -ENODEV;
dev_err(&pdev->dev, "failed to get hclk (%u)\n", err);
return err;
}

View File

@ -2620,7 +2620,7 @@ static inline struct port_info *ethqset2pinfo(struct adapter *adap, int qset)
}
/* should never happen! */
BUG_ON(1);
BUG();
return NULL;
}

View File

@ -476,7 +476,7 @@ static inline int get_buf_size(struct adapter *adapter,
break;
default:
BUG_ON(1);
BUG();
}
return buf_size;

View File

@ -815,6 +815,14 @@ static netdev_tx_t dpaa2_eth_tx(struct sk_buff *skb, struct net_device *net_dev)
*/
queue_mapping = skb_get_queue_mapping(skb);
fq = &priv->fq[queue_mapping];
fd_len = dpaa2_fd_get_len(&fd);
nq = netdev_get_tx_queue(net_dev, queue_mapping);
netdev_tx_sent_queue(nq, fd_len);
/* Everything that happens after this enqueues might race with
* the Tx confirmation callback for this frame
*/
for (i = 0; i < DPAA2_ETH_ENQUEUE_RETRIES; i++) {
err = priv->enqueue(priv, fq, &fd, 0);
if (err != -EBUSY)
@ -825,13 +833,10 @@ static netdev_tx_t dpaa2_eth_tx(struct sk_buff *skb, struct net_device *net_dev)
percpu_stats->tx_errors++;
/* Clean up everything, including freeing the skb */
free_tx_fd(priv, fq, &fd, false);
netdev_tx_completed_queue(nq, 1, fd_len);
} else {
fd_len = dpaa2_fd_get_len(&fd);
percpu_stats->tx_packets++;
percpu_stats->tx_bytes += fd_len;
nq = netdev_get_tx_queue(net_dev, queue_mapping);
netdev_tx_sent_queue(nq, fd_len);
}
return NETDEV_TX_OK;
@ -1817,7 +1822,7 @@ static int dpaa2_eth_xdp_xmit_frame(struct net_device *net_dev,
dpaa2_fd_set_format(&fd, dpaa2_fd_single);
dpaa2_fd_set_ctrl(&fd, FD_CTRL_PTA);
fq = &priv->fq[smp_processor_id()];
fq = &priv->fq[smp_processor_id() % dpaa2_eth_queue_count(priv)];
for (i = 0; i < DPAA2_ETH_ENQUEUE_RETRIES; i++) {
err = priv->enqueue(priv, fq, &fd, 0);
if (err != -EBUSY)

View File

@ -22,6 +22,7 @@
#include "hns3_enet.h"
#define hns3_set_field(origin, shift, val) ((origin) |= ((val) << (shift)))
#define hns3_tx_bd_count(S) DIV_ROUND_UP(S, HNS3_MAX_BD_SIZE)
static void hns3_clear_all_ring(struct hnae3_handle *h);
static void hns3_force_clear_all_rx_ring(struct hnae3_handle *h);
@ -1079,7 +1080,7 @@ static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv,
desc_cb->length = size;
frag_buf_num = (size + HNS3_MAX_BD_SIZE - 1) >> HNS3_MAX_BD_SIZE_OFFSET;
frag_buf_num = hns3_tx_bd_count(size);
sizeoflast = size & HNS3_TX_LAST_SIZE_M;
sizeoflast = sizeoflast ? sizeoflast : HNS3_MAX_BD_SIZE;
@ -1124,14 +1125,13 @@ static int hns3_nic_maybe_stop_tso(struct sk_buff **out_skb, int *bnum,
int i;
size = skb_headlen(skb);
buf_num = (size + HNS3_MAX_BD_SIZE - 1) >> HNS3_MAX_BD_SIZE_OFFSET;
buf_num = hns3_tx_bd_count(size);
frag_num = skb_shinfo(skb)->nr_frags;
for (i = 0; i < frag_num; i++) {
frag = &skb_shinfo(skb)->frags[i];
size = skb_frag_size(frag);
bdnum_for_frag = (size + HNS3_MAX_BD_SIZE - 1) >>
HNS3_MAX_BD_SIZE_OFFSET;
bdnum_for_frag = hns3_tx_bd_count(size);
if (unlikely(bdnum_for_frag > HNS3_MAX_BD_PER_FRAG))
return -ENOMEM;
@ -1139,8 +1139,7 @@ static int hns3_nic_maybe_stop_tso(struct sk_buff **out_skb, int *bnum,
}
if (unlikely(buf_num > HNS3_MAX_BD_PER_FRAG)) {
buf_num = (skb->len + HNS3_MAX_BD_SIZE - 1) >>
HNS3_MAX_BD_SIZE_OFFSET;
buf_num = hns3_tx_bd_count(skb->len);
if (ring_space(ring) < buf_num)
return -EBUSY;
/* manual split the send packet */
@ -1169,7 +1168,7 @@ static int hns3_nic_maybe_stop_tx(struct sk_buff **out_skb, int *bnum,
buf_num = skb_shinfo(skb)->nr_frags + 1;
if (unlikely(buf_num > HNS3_MAX_BD_PER_FRAG)) {
buf_num = (skb->len + HNS3_MAX_BD_SIZE - 1) / HNS3_MAX_BD_SIZE;
buf_num = hns3_tx_bd_count(skb->len);
if (ring_space(ring) < buf_num)
return -EBUSY;
/* manual split the send packet */

View File

@ -193,7 +193,6 @@ enum hns3_nic_state {
#define HNS3_VECTOR_INITED 1
#define HNS3_MAX_BD_SIZE 65535
#define HNS3_MAX_BD_SIZE_OFFSET 16
#define HNS3_MAX_BD_PER_FRAG 8
#define HNS3_MAX_BD_PER_PKT MAX_SKB_FRAGS

View File

@ -3160,6 +3160,7 @@ static ssize_t ehea_probe_port(struct device *dev,
if (ehea_add_adapter_mr(adapter)) {
pr_err("creating MR failed\n");
of_node_put(eth_dn);
return -EIO;
}

View File

@ -113,7 +113,7 @@ int mlxsw_env_module_temp_thresholds_get(struct mlxsw_core *core, int module,
return 0;
default:
/* Do not consider thresholds for zero temperature. */
if (!MLXSW_REG_MTMP_TEMP_TO_MC(module_temp)) {
if (MLXSW_REG_MTMP_TEMP_TO_MC(module_temp) == 0) {
*temp = 0;
return 0;
}

View File

@ -142,6 +142,12 @@ struct ks8851_net {
static int msg_enable;
/* SPI frame opcodes */
#define KS_SPIOP_RD (0x00)
#define KS_SPIOP_WR (0x40)
#define KS_SPIOP_RXFIFO (0x80)
#define KS_SPIOP_TXFIFO (0xC0)
/* shift for byte-enable data */
#define BYTE_EN(_x) ((_x) << 2)
@ -535,9 +541,8 @@ static void ks8851_rx_pkts(struct ks8851_net *ks)
/* set dma read address */
ks8851_wrreg16(ks, KS_RXFDPR, RXFDPR_RXFPAI | 0x00);
/* start the packet dma process, and set auto-dequeue rx */
ks8851_wrreg16(ks, KS_RXQCR,
ks->rc_rxqcr | RXQCR_SDA | RXQCR_ADRFE);
/* start DMA access */
ks8851_wrreg16(ks, KS_RXQCR, ks->rc_rxqcr | RXQCR_SDA);
if (rxlen > 4) {
unsigned int rxalign;
@ -568,7 +573,8 @@ static void ks8851_rx_pkts(struct ks8851_net *ks)
}
}
ks8851_wrreg16(ks, KS_RXQCR, ks->rc_rxqcr);
/* end DMA access and dequeue packet */
ks8851_wrreg16(ks, KS_RXQCR, ks->rc_rxqcr | RXQCR_RRXEF);
}
}
@ -785,6 +791,15 @@ static void ks8851_tx_work(struct work_struct *work)
static int ks8851_net_open(struct net_device *dev)
{
struct ks8851_net *ks = netdev_priv(dev);
int ret;
ret = request_threaded_irq(dev->irq, NULL, ks8851_irq,
IRQF_TRIGGER_LOW | IRQF_ONESHOT,
dev->name, ks);
if (ret < 0) {
netdev_err(dev, "failed to get irq\n");
return ret;
}
/* lock the card, even if we may not actually be doing anything
* else at the moment */
@ -849,6 +864,7 @@ static int ks8851_net_open(struct net_device *dev)
netif_dbg(ks, ifup, ks->netdev, "network device up\n");
mutex_unlock(&ks->lock);
mii_check_link(&ks->mii);
return 0;
}
@ -899,6 +915,8 @@ static int ks8851_net_stop(struct net_device *dev)
dev_kfree_skb(txb);
}
free_irq(dev->irq, ks);
return 0;
}
@ -1508,6 +1526,7 @@ static int ks8851_probe(struct spi_device *spi)
spi_set_drvdata(spi, ks);
netif_carrier_off(ks->netdev);
ndev->if_port = IF_PORT_100BASET;
ndev->netdev_ops = &ks8851_netdev_ops;
ndev->irq = spi->irq;
@ -1529,14 +1548,6 @@ static int ks8851_probe(struct spi_device *spi)
ks8851_read_selftest(ks);
ks8851_init_mac(ks);
ret = request_threaded_irq(spi->irq, NULL, ks8851_irq,
IRQF_TRIGGER_LOW | IRQF_ONESHOT,
ndev->name, ks);
if (ret < 0) {
dev_err(&spi->dev, "failed to get irq\n");
goto err_irq;
}
ret = register_netdev(ndev);
if (ret) {
dev_err(&spi->dev, "failed to register network device\n");
@ -1549,14 +1560,10 @@ static int ks8851_probe(struct spi_device *spi)
return 0;
err_netdev:
free_irq(ndev->irq, ks);
err_irq:
err_id:
if (gpio_is_valid(gpio))
gpio_set_value(gpio, 0);
err_id:
regulator_disable(ks->vdd_reg);
err_reg:
regulator_disable(ks->vdd_io);
@ -1574,7 +1581,6 @@ static int ks8851_remove(struct spi_device *spi)
dev_info(&spi->dev, "remove\n");
unregister_netdev(priv->netdev);
free_irq(spi->irq, priv);
if (gpio_is_valid(priv->gpio))
gpio_set_value(priv->gpio, 0);
regulator_disable(priv->vdd_reg);

View File

@ -11,9 +11,15 @@
*/
#define KS_CCR 0x08
#define CCR_LE (1 << 10) /* KSZ8851-16MLL */
#define CCR_EEPROM (1 << 9)
#define CCR_SPI (1 << 8)
#define CCR_32PIN (1 << 0)
#define CCR_SPI (1 << 8) /* KSZ8851SNL */
#define CCR_8BIT (1 << 7) /* KSZ8851-16MLL */
#define CCR_16BIT (1 << 6) /* KSZ8851-16MLL */
#define CCR_32BIT (1 << 5) /* KSZ8851-16MLL */
#define CCR_SHARED (1 << 4) /* KSZ8851-16MLL */
#define CCR_48PIN (1 << 1) /* KSZ8851-16MLL */
#define CCR_32PIN (1 << 0) /* KSZ8851SNL */
/* MAC address registers */
#define KS_MAR(_m) (0x15 - (_m))
@ -112,13 +118,13 @@
#define RXCR1_RXE (1 << 0)
#define KS_RXCR2 0x76
#define RXCR2_SRDBL_MASK (0x7 << 5)
#define RXCR2_SRDBL_SHIFT (5)
#define RXCR2_SRDBL_4B (0x0 << 5)
#define RXCR2_SRDBL_8B (0x1 << 5)
#define RXCR2_SRDBL_16B (0x2 << 5)
#define RXCR2_SRDBL_32B (0x3 << 5)
#define RXCR2_SRDBL_FRAME (0x4 << 5)
#define RXCR2_SRDBL_MASK (0x7 << 5) /* KSZ8851SNL */
#define RXCR2_SRDBL_SHIFT (5) /* KSZ8851SNL */
#define RXCR2_SRDBL_4B (0x0 << 5) /* KSZ8851SNL */
#define RXCR2_SRDBL_8B (0x1 << 5) /* KSZ8851SNL */
#define RXCR2_SRDBL_16B (0x2 << 5) /* KSZ8851SNL */
#define RXCR2_SRDBL_32B (0x3 << 5) /* KSZ8851SNL */
#define RXCR2_SRDBL_FRAME (0x4 << 5) /* KSZ8851SNL */
#define RXCR2_IUFFP (1 << 4)
#define RXCR2_RXIUFCEZ (1 << 3)
#define RXCR2_UDPLFE (1 << 2)
@ -143,8 +149,10 @@
#define RXFSHR_RXCE (1 << 0)
#define KS_RXFHBCR 0x7E
#define RXFHBCR_CNT_MASK (0xfff << 0)
#define KS_TXQCR 0x80
#define TXQCR_AETFE (1 << 2)
#define TXQCR_AETFE (1 << 2) /* KSZ8851SNL */
#define TXQCR_TXQMAM (1 << 1)
#define TXQCR_METFE (1 << 0)
@ -167,6 +175,10 @@
#define KS_RXFDPR 0x86
#define RXFDPR_RXFPAI (1 << 14)
#define RXFDPR_WST (1 << 12) /* KSZ8851-16MLL */
#define RXFDPR_EMS (1 << 11) /* KSZ8851-16MLL */
#define RXFDPR_RXFP_MASK (0x7ff << 0)
#define RXFDPR_RXFP_SHIFT (0)
#define KS_RXDTTR 0x8C
#define KS_RXDBCTR 0x8E
@ -184,7 +196,7 @@
#define IRQ_RXMPDI (1 << 4)
#define IRQ_LDI (1 << 3)
#define IRQ_EDI (1 << 2)
#define IRQ_SPIBEI (1 << 1)
#define IRQ_SPIBEI (1 << 1) /* KSZ8851SNL */
#define IRQ_DEDI (1 << 0)
#define KS_RXFCTR 0x9C
@ -257,42 +269,37 @@
#define KS_P1ANLPR 0xEE
#define KS_P1SCLMD 0xF4
#define P1SCLMD_LEDOFF (1 << 15)
#define P1SCLMD_TXIDS (1 << 14)
#define P1SCLMD_RESTARTAN (1 << 13)
#define P1SCLMD_DISAUTOMDIX (1 << 10)
#define P1SCLMD_FORCEMDIX (1 << 9)
#define P1SCLMD_AUTONEGEN (1 << 7)
#define P1SCLMD_FORCE100 (1 << 6)
#define P1SCLMD_FORCEFDX (1 << 5)
#define P1SCLMD_ADV_FLOW (1 << 4)
#define P1SCLMD_ADV_100BT_FDX (1 << 3)
#define P1SCLMD_ADV_100BT_HDX (1 << 2)
#define P1SCLMD_ADV_10BT_FDX (1 << 1)
#define P1SCLMD_ADV_10BT_HDX (1 << 0)
#define KS_P1CR 0xF6
#define P1CR_HP_MDIX (1 << 15)
#define P1CR_REV_POL (1 << 13)
#define P1CR_OP_100M (1 << 10)
#define P1CR_OP_FDX (1 << 9)
#define P1CR_OP_MDI (1 << 7)
#define P1CR_AN_DONE (1 << 6)
#define P1CR_LINK_GOOD (1 << 5)
#define P1CR_PNTR_FLOW (1 << 4)
#define P1CR_PNTR_100BT_FDX (1 << 3)
#define P1CR_PNTR_100BT_HDX (1 << 2)
#define P1CR_PNTR_10BT_FDX (1 << 1)
#define P1CR_PNTR_10BT_HDX (1 << 0)
#define P1CR_LEDOFF (1 << 15)
#define P1CR_TXIDS (1 << 14)
#define P1CR_RESTARTAN (1 << 13)
#define P1CR_DISAUTOMDIX (1 << 10)
#define P1CR_FORCEMDIX (1 << 9)
#define P1CR_AUTONEGEN (1 << 7)
#define P1CR_FORCE100 (1 << 6)
#define P1CR_FORCEFDX (1 << 5)
#define P1CR_ADV_FLOW (1 << 4)
#define P1CR_ADV_100BT_FDX (1 << 3)
#define P1CR_ADV_100BT_HDX (1 << 2)
#define P1CR_ADV_10BT_FDX (1 << 1)
#define P1CR_ADV_10BT_HDX (1 << 0)
#define KS_P1SR 0xF8
#define P1SR_HP_MDIX (1 << 15)
#define P1SR_REV_POL (1 << 13)
#define P1SR_OP_100M (1 << 10)
#define P1SR_OP_FDX (1 << 9)
#define P1SR_OP_MDI (1 << 7)
#define P1SR_AN_DONE (1 << 6)
#define P1SR_LINK_GOOD (1 << 5)
#define P1SR_PNTR_FLOW (1 << 4)
#define P1SR_PNTR_100BT_FDX (1 << 3)
#define P1SR_PNTR_100BT_HDX (1 << 2)
#define P1SR_PNTR_10BT_FDX (1 << 1)
#define P1SR_PNTR_10BT_HDX (1 << 0)
/* TX Frame control */
#define TXFR_TXIC (1 << 15)
#define TXFR_TXFID_MASK (0x3f << 0)
#define TXFR_TXFID_SHIFT (0)
/* SPI frame opcodes */
#define KS_SPIOP_RD (0x00)
#define KS_SPIOP_WR (0x40)
#define KS_SPIOP_RXFIFO (0x80)
#define KS_SPIOP_TXFIFO (0xC0)

View File

@ -40,6 +40,8 @@
#include <linux/of_device.h>
#include <linux/of_net.h>
#include "ks8851.h"
#define DRV_NAME "ks8851_mll"
static u8 KS_DEFAULT_MAC_ADDRESS[] = { 0x00, 0x10, 0xA1, 0x86, 0x95, 0x11 };
@ -48,319 +50,10 @@ static u8 KS_DEFAULT_MAC_ADDRESS[] = { 0x00, 0x10, 0xA1, 0x86, 0x95, 0x11 };
#define TX_BUF_SIZE 2000
#define RX_BUF_SIZE 2000
#define KS_CCR 0x08
#define CCR_EEPROM (1 << 9)
#define CCR_SPI (1 << 8)
#define CCR_8BIT (1 << 7)
#define CCR_16BIT (1 << 6)
#define CCR_32BIT (1 << 5)
#define CCR_SHARED (1 << 4)
#define CCR_32PIN (1 << 0)
/* MAC address registers */
#define KS_MARL 0x10
#define KS_MARM 0x12
#define KS_MARH 0x14
#define KS_OBCR 0x20
#define OBCR_ODS_16MA (1 << 6)
#define KS_EEPCR 0x22
#define EEPCR_EESA (1 << 4)
#define EEPCR_EESB (1 << 3)
#define EEPCR_EEDO (1 << 2)
#define EEPCR_EESCK (1 << 1)
#define EEPCR_EECS (1 << 0)
#define KS_MBIR 0x24
#define MBIR_TXMBF (1 << 12)
#define MBIR_TXMBFA (1 << 11)
#define MBIR_RXMBF (1 << 4)
#define MBIR_RXMBFA (1 << 3)
#define KS_GRR 0x26
#define GRR_QMU (1 << 1)
#define GRR_GSR (1 << 0)
#define KS_WFCR 0x2A
#define WFCR_MPRXE (1 << 7)
#define WFCR_WF3E (1 << 3)
#define WFCR_WF2E (1 << 2)
#define WFCR_WF1E (1 << 1)
#define WFCR_WF0E (1 << 0)
#define KS_WF0CRC0 0x30
#define KS_WF0CRC1 0x32
#define KS_WF0BM0 0x34
#define KS_WF0BM1 0x36
#define KS_WF0BM2 0x38
#define KS_WF0BM3 0x3A
#define KS_WF1CRC0 0x40
#define KS_WF1CRC1 0x42
#define KS_WF1BM0 0x44
#define KS_WF1BM1 0x46
#define KS_WF1BM2 0x48
#define KS_WF1BM3 0x4A
#define KS_WF2CRC0 0x50
#define KS_WF2CRC1 0x52
#define KS_WF2BM0 0x54
#define KS_WF2BM1 0x56
#define KS_WF2BM2 0x58
#define KS_WF2BM3 0x5A
#define KS_WF3CRC0 0x60
#define KS_WF3CRC1 0x62
#define KS_WF3BM0 0x64
#define KS_WF3BM1 0x66
#define KS_WF3BM2 0x68
#define KS_WF3BM3 0x6A
#define KS_TXCR 0x70
#define TXCR_TCGICMP (1 << 8)
#define TXCR_TCGUDP (1 << 7)
#define TXCR_TCGTCP (1 << 6)
#define TXCR_TCGIP (1 << 5)
#define TXCR_FTXQ (1 << 4)
#define TXCR_TXFCE (1 << 3)
#define TXCR_TXPE (1 << 2)
#define TXCR_TXCRC (1 << 1)
#define TXCR_TXE (1 << 0)
#define KS_TXSR 0x72
#define TXSR_TXLC (1 << 13)
#define TXSR_TXMC (1 << 12)
#define TXSR_TXFID_MASK (0x3f << 0)
#define TXSR_TXFID_SHIFT (0)
#define TXSR_TXFID_GET(_v) (((_v) >> 0) & 0x3f)
#define KS_RXCR1 0x74
#define RXCR1_FRXQ (1 << 15)
#define RXCR1_RXUDPFCC (1 << 14)
#define RXCR1_RXTCPFCC (1 << 13)
#define RXCR1_RXIPFCC (1 << 12)
#define RXCR1_RXPAFMA (1 << 11)
#define RXCR1_RXFCE (1 << 10)
#define RXCR1_RXEFE (1 << 9)
#define RXCR1_RXMAFMA (1 << 8)
#define RXCR1_RXBE (1 << 7)
#define RXCR1_RXME (1 << 6)
#define RXCR1_RXUE (1 << 5)
#define RXCR1_RXAE (1 << 4)
#define RXCR1_RXINVF (1 << 1)
#define RXCR1_RXE (1 << 0)
#define RXCR1_FILTER_MASK (RXCR1_RXINVF | RXCR1_RXAE | \
RXCR1_RXMAFMA | RXCR1_RXPAFMA)
#define KS_RXCR2 0x76
#define RXCR2_SRDBL_MASK (0x7 << 5)
#define RXCR2_SRDBL_SHIFT (5)
#define RXCR2_SRDBL_4B (0x0 << 5)
#define RXCR2_SRDBL_8B (0x1 << 5)
#define RXCR2_SRDBL_16B (0x2 << 5)
#define RXCR2_SRDBL_32B (0x3 << 5)
/* #define RXCR2_SRDBL_FRAME (0x4 << 5) */
#define RXCR2_IUFFP (1 << 4)
#define RXCR2_RXIUFCEZ (1 << 3)
#define RXCR2_UDPLFE (1 << 2)
#define RXCR2_RXICMPFCC (1 << 1)
#define RXCR2_RXSAF (1 << 0)
#define KS_TXMIR 0x78
#define KS_RXFHSR 0x7C
#define RXFSHR_RXFV (1 << 15)
#define RXFSHR_RXICMPFCS (1 << 13)
#define RXFSHR_RXIPFCS (1 << 12)
#define RXFSHR_RXTCPFCS (1 << 11)
#define RXFSHR_RXUDPFCS (1 << 10)
#define RXFSHR_RXBF (1 << 7)
#define RXFSHR_RXMF (1 << 6)
#define RXFSHR_RXUF (1 << 5)
#define RXFSHR_RXMR (1 << 4)
#define RXFSHR_RXFT (1 << 3)
#define RXFSHR_RXFTL (1 << 2)
#define RXFSHR_RXRF (1 << 1)
#define RXFSHR_RXCE (1 << 0)
#define RXFSHR_ERR (RXFSHR_RXCE | RXFSHR_RXRF |\
RXFSHR_RXFTL | RXFSHR_RXMR |\
RXFSHR_RXICMPFCS | RXFSHR_RXIPFCS |\
RXFSHR_RXTCPFCS)
#define KS_RXFHBCR 0x7E
#define RXFHBCR_CNT_MASK 0x0FFF
#define KS_TXQCR 0x80
#define TXQCR_AETFE (1 << 2)
#define TXQCR_TXQMAM (1 << 1)
#define TXQCR_METFE (1 << 0)
#define KS_RXQCR 0x82
#define RXQCR_RXDTTS (1 << 12)
#define RXQCR_RXDBCTS (1 << 11)
#define RXQCR_RXFCTS (1 << 10)
#define RXQCR_RXIPHTOE (1 << 9)
#define RXQCR_RXDTTE (1 << 7)
#define RXQCR_RXDBCTE (1 << 6)
#define RXQCR_RXFCTE (1 << 5)
#define RXQCR_ADRFE (1 << 4)
#define RXQCR_SDA (1 << 3)
#define RXQCR_RRXEF (1 << 0)
#define RXQCR_CMD_CNTL (RXQCR_RXFCTE|RXQCR_ADRFE)
#define KS_TXFDPR 0x84
#define TXFDPR_TXFPAI (1 << 14)
#define TXFDPR_TXFP_MASK (0x7ff << 0)
#define TXFDPR_TXFP_SHIFT (0)
#define KS_RXFDPR 0x86
#define RXFDPR_RXFPAI (1 << 14)
#define KS_RXDTTR 0x8C
#define KS_RXDBCTR 0x8E
#define KS_IER 0x90
#define KS_ISR 0x92
#define IRQ_LCI (1 << 15)
#define IRQ_TXI (1 << 14)
#define IRQ_RXI (1 << 13)
#define IRQ_RXOI (1 << 11)
#define IRQ_TXPSI (1 << 9)
#define IRQ_RXPSI (1 << 8)
#define IRQ_TXSAI (1 << 6)
#define IRQ_RXWFDI (1 << 5)
#define IRQ_RXMPDI (1 << 4)
#define IRQ_LDI (1 << 3)
#define IRQ_EDI (1 << 2)
#define IRQ_SPIBEI (1 << 1)
#define IRQ_DEDI (1 << 0)
#define KS_RXFCTR 0x9C
#define RXFCTR_THRESHOLD_MASK 0x00FF
#define KS_RXFC 0x9D
#define RXFCTR_RXFC_MASK (0xff << 8)
#define RXFCTR_RXFC_SHIFT (8)
#define RXFCTR_RXFC_GET(_v) (((_v) >> 8) & 0xff)
#define RXFCTR_RXFCT_MASK (0xff << 0)
#define RXFCTR_RXFCT_SHIFT (0)
#define KS_TXNTFSR 0x9E
#define KS_MAHTR0 0xA0
#define KS_MAHTR1 0xA2
#define KS_MAHTR2 0xA4
#define KS_MAHTR3 0xA6
#define KS_FCLWR 0xB0
#define KS_FCHWR 0xB2
#define KS_FCOWR 0xB4
#define KS_CIDER 0xC0
#define CIDER_ID 0x8870
#define CIDER_REV_MASK (0x7 << 1)
#define CIDER_REV_SHIFT (1)
#define CIDER_REV_GET(_v) (((_v) >> 1) & 0x7)
#define KS_CGCR 0xC6
#define KS_IACR 0xC8
#define IACR_RDEN (1 << 12)
#define IACR_TSEL_MASK (0x3 << 10)
#define IACR_TSEL_SHIFT (10)
#define IACR_TSEL_MIB (0x3 << 10)
#define IACR_ADDR_MASK (0x1f << 0)
#define IACR_ADDR_SHIFT (0)
#define KS_IADLR 0xD0
#define KS_IAHDR 0xD2
#define KS_PMECR 0xD4
#define PMECR_PME_DELAY (1 << 14)
#define PMECR_PME_POL (1 << 12)
#define PMECR_WOL_WAKEUP (1 << 11)
#define PMECR_WOL_MAGICPKT (1 << 10)
#define PMECR_WOL_LINKUP (1 << 9)
#define PMECR_WOL_ENERGY (1 << 8)
#define PMECR_AUTO_WAKE_EN (1 << 7)
#define PMECR_WAKEUP_NORMAL (1 << 6)
#define PMECR_WKEVT_MASK (0xf << 2)
#define PMECR_WKEVT_SHIFT (2)
#define PMECR_WKEVT_GET(_v) (((_v) >> 2) & 0xf)
#define PMECR_WKEVT_ENERGY (0x1 << 2)
#define PMECR_WKEVT_LINK (0x2 << 2)
#define PMECR_WKEVT_MAGICPKT (0x4 << 2)
#define PMECR_WKEVT_FRAME (0x8 << 2)
#define PMECR_PM_MASK (0x3 << 0)
#define PMECR_PM_SHIFT (0)
#define PMECR_PM_NORMAL (0x0 << 0)
#define PMECR_PM_ENERGY (0x1 << 0)
#define PMECR_PM_SOFTDOWN (0x2 << 0)
#define PMECR_PM_POWERSAVE (0x3 << 0)
/* Standard MII PHY data */
#define KS_P1MBCR 0xE4
#define P1MBCR_FORCE_FDX (1 << 8)
#define KS_P1MBSR 0xE6
#define P1MBSR_AN_COMPLETE (1 << 5)
#define P1MBSR_AN_CAPABLE (1 << 3)
#define P1MBSR_LINK_UP (1 << 2)
#define KS_PHY1ILR 0xE8
#define KS_PHY1IHR 0xEA
#define KS_P1ANAR 0xEC
#define KS_P1ANLPR 0xEE
#define KS_P1SCLMD 0xF4
#define P1SCLMD_LEDOFF (1 << 15)
#define P1SCLMD_TXIDS (1 << 14)
#define P1SCLMD_RESTARTAN (1 << 13)
#define P1SCLMD_DISAUTOMDIX (1 << 10)
#define P1SCLMD_FORCEMDIX (1 << 9)
#define P1SCLMD_AUTONEGEN (1 << 7)
#define P1SCLMD_FORCE100 (1 << 6)
#define P1SCLMD_FORCEFDX (1 << 5)
#define P1SCLMD_ADV_FLOW (1 << 4)
#define P1SCLMD_ADV_100BT_FDX (1 << 3)
#define P1SCLMD_ADV_100BT_HDX (1 << 2)
#define P1SCLMD_ADV_10BT_FDX (1 << 1)
#define P1SCLMD_ADV_10BT_HDX (1 << 0)
#define KS_P1CR 0xF6
#define P1CR_HP_MDIX (1 << 15)
#define P1CR_REV_POL (1 << 13)
#define P1CR_OP_100M (1 << 10)
#define P1CR_OP_FDX (1 << 9)
#define P1CR_OP_MDI (1 << 7)
#define P1CR_AN_DONE (1 << 6)
#define P1CR_LINK_GOOD (1 << 5)
#define P1CR_PNTR_FLOW (1 << 4)
#define P1CR_PNTR_100BT_FDX (1 << 3)
#define P1CR_PNTR_100BT_HDX (1 << 2)
#define P1CR_PNTR_10BT_FDX (1 << 1)
#define P1CR_PNTR_10BT_HDX (1 << 0)
/* TX Frame control */
#define TXFR_TXIC (1 << 15)
#define TXFR_TXFID_MASK (0x3f << 0)
#define TXFR_TXFID_SHIFT (0)
#define KS_P1SR 0xF8
#define P1SR_HP_MDIX (1 << 15)
#define P1SR_REV_POL (1 << 13)
#define P1SR_OP_100M (1 << 10)
#define P1SR_OP_FDX (1 << 9)
#define P1SR_OP_MDI (1 << 7)
#define P1SR_AN_DONE (1 << 6)
#define P1SR_LINK_GOOD (1 << 5)
#define P1SR_PNTR_FLOW (1 << 4)
#define P1SR_PNTR_100BT_FDX (1 << 3)
#define P1SR_PNTR_100BT_HDX (1 << 2)
#define P1SR_PNTR_10BT_FDX (1 << 1)
#define P1SR_PNTR_10BT_HDX (1 << 0)
#define ENUM_BUS_NONE 0
#define ENUM_BUS_8BIT 1
#define ENUM_BUS_16BIT 2
@ -1475,7 +1168,7 @@ static void ks_setup(struct ks_net *ks)
ks_wrreg16(ks, KS_RXFDPR, RXFDPR_RXFPAI);
/* Setup Receive Frame Threshold - 1 frame (RXFCTFC) */
ks_wrreg16(ks, KS_RXFCTR, 1 & RXFCTR_THRESHOLD_MASK);
ks_wrreg16(ks, KS_RXFCTR, 1 & RXFCTR_RXFCT_MASK);
/* Setup RxQ Command Control (RXQCR) */
ks->rc_rxqcr = RXQCR_CMD_CNTL;
@ -1488,7 +1181,7 @@ static void ks_setup(struct ks_net *ks)
*/
w = ks_rdreg16(ks, KS_P1MBCR);
w &= ~P1MBCR_FORCE_FDX;
w &= ~BMCR_FULLDPLX;
ks_wrreg16(ks, KS_P1MBCR, w);
w = TXCR_TXFCE | TXCR_TXPE | TXCR_TXCRC | TXCR_TCGIP;
@ -1629,7 +1322,7 @@ static int ks8851_probe(struct platform_device *pdev)
ks_setup_int(ks);
data = ks_rdreg16(ks, KS_OBCR);
ks_wrreg16(ks, KS_OBCR, data | OBCR_ODS_16MA);
ks_wrreg16(ks, KS_OBCR, data | OBCR_ODS_16mA);
/* overwriting the default MAC address */
if (pdev->dev.of_node) {

View File

@ -1048,6 +1048,8 @@ int qlcnic_do_lb_test(struct qlcnic_adapter *adapter, u8 mode)
for (i = 0; i < QLCNIC_NUM_ILB_PKT; i++) {
skb = netdev_alloc_skb(adapter->netdev, QLCNIC_ILB_PKT_SIZE);
if (!skb)
break;
qlcnic_create_loopback_buff(skb->data, adapter->mac_addr);
skb_put(skb, QLCNIC_ILB_PKT_SIZE);
adapter->ahw->diag_cnt = 0;

View File

@ -482,7 +482,7 @@ static void hardware_init(struct net_device *dev)
write_reg_high(ioaddr, IMR, ISRh_RxErr);
lp->tx_unit_busy = 0;
lp->pac_cnt_in_tx_buf = 0;
lp->pac_cnt_in_tx_buf = 0;
lp->saved_tx_size = 0;
}

View File

@ -678,6 +678,7 @@ struct rtl8169_private {
struct work_struct work;
} wk;
unsigned irq_enabled:1;
unsigned supports_gmii:1;
dma_addr_t counters_phys_addr;
struct rtl8169_counters *counters;
@ -1293,6 +1294,7 @@ static void rtl_ack_events(struct rtl8169_private *tp, u16 bits)
static void rtl_irq_disable(struct rtl8169_private *tp)
{
RTL_W16(tp, IntrMask, 0);
tp->irq_enabled = 0;
}
#define RTL_EVENT_NAPI_RX (RxOK | RxErr)
@ -1301,6 +1303,7 @@ static void rtl_irq_disable(struct rtl8169_private *tp)
static void rtl_irq_enable(struct rtl8169_private *tp)
{
tp->irq_enabled = 1;
RTL_W16(tp, IntrMask, tp->irq_mask);
}
@ -6520,9 +6523,8 @@ static irqreturn_t rtl8169_interrupt(int irq, void *dev_instance)
{
struct rtl8169_private *tp = dev_instance;
u16 status = RTL_R16(tp, IntrStatus);
u16 irq_mask = RTL_R16(tp, IntrMask);
if (status == 0xffff || !(status & irq_mask))
if (!tp->irq_enabled || status == 0xffff || !(status & tp->irq_mask))
return IRQ_NONE;
if (unlikely(status & SYSErr)) {
@ -6540,7 +6542,7 @@ static irqreturn_t rtl8169_interrupt(int irq, void *dev_instance)
set_bit(RTL_FLAG_TASK_RESET_PENDING, tp->wk.flags);
}
if (status & RTL_EVENT_NAPI) {
if (status & (RTL_EVENT_NAPI | LinkChg)) {
rtl_irq_disable(tp);
napi_schedule_irqoff(&tp->napi);
}

View File

@ -730,10 +730,10 @@ static u16 sis900_default_phy(struct net_device * net_dev)
status = mdio_read(net_dev, phy->phy_addr, MII_STATUS);
/* Link ON & Not select default PHY & not ghost PHY */
if ((status & MII_STAT_LINK) && !default_phy &&
(phy->phy_types != UNKNOWN))
default_phy = phy;
else {
if ((status & MII_STAT_LINK) && !default_phy &&
(phy->phy_types != UNKNOWN)) {
default_phy = phy;
} else {
status = mdio_read(net_dev, phy->phy_addr, MII_CONTROL);
mdio_write(net_dev, phy->phy_addr, MII_CONTROL,
status | MII_CNTL_AUTO | MII_CNTL_ISOLATE);
@ -741,7 +741,7 @@ static u16 sis900_default_phy(struct net_device * net_dev)
phy_home = phy;
else if(phy->phy_types == LAN)
phy_lan = phy;
}
}
}
if (!default_phy && phy_home)

View File

@ -59,7 +59,7 @@ static int jumbo_frm(void *p, struct sk_buff *skb, int csum)
desc->des3 = cpu_to_le32(des2 + BUF_SIZE_4KiB);
stmmac_prepare_tx_desc(priv, desc, 1, bmax, csum,
STMMAC_RING_MODE, 1, false, skb->len);
STMMAC_RING_MODE, 0, false, skb->len);
tx_q->tx_skbuff[entry] = NULL;
entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
@ -79,7 +79,8 @@ static int jumbo_frm(void *p, struct sk_buff *skb, int csum)
desc->des3 = cpu_to_le32(des2 + BUF_SIZE_4KiB);
stmmac_prepare_tx_desc(priv, desc, 0, len, csum,
STMMAC_RING_MODE, 1, true, skb->len);
STMMAC_RING_MODE, 1, !skb_is_nonlinear(skb),
skb->len);
} else {
des2 = dma_map_single(priv->device, skb->data,
nopaged_len, DMA_TO_DEVICE);
@ -91,7 +92,8 @@ static int jumbo_frm(void *p, struct sk_buff *skb, int csum)
tx_q->tx_skbuff_dma[entry].is_jumbo = true;
desc->des3 = cpu_to_le32(des2 + BUF_SIZE_4KiB);
stmmac_prepare_tx_desc(priv, desc, 1, nopaged_len, csum,
STMMAC_RING_MODE, 1, true, skb->len);
STMMAC_RING_MODE, 0, !skb_is_nonlinear(skb),
skb->len);
}
tx_q->cur_tx = entry;
@ -111,10 +113,11 @@ static unsigned int is_jumbo_frm(int len, int enh_desc)
static void refill_desc3(void *priv_ptr, struct dma_desc *p)
{
struct stmmac_priv *priv = (struct stmmac_priv *)priv_ptr;
struct stmmac_rx_queue *rx_q = priv_ptr;
struct stmmac_priv *priv = rx_q->priv_data;
/* Fill DES3 in case of RING mode */
if (priv->dma_buf_sz >= BUF_SIZE_8KiB)
if (priv->dma_buf_sz == BUF_SIZE_16KiB)
p->des3 = cpu_to_le32(le32_to_cpu(p->des2) + BUF_SIZE_8KiB);
}

View File

@ -3216,14 +3216,16 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
stmmac_prepare_tx_desc(priv, first, 1, nopaged_len,
csum_insertion, priv->mode, 1, last_segment,
skb->len);
/* The own bit must be the latest setting done when prepare the
* descriptor and then barrier is needed to make sure that
* all is coherent before granting the DMA engine.
*/
wmb();
} else {
stmmac_set_tx_owner(priv, first);
}
/* The own bit must be the latest setting done when prepare the
* descriptor and then barrier is needed to make sure that
* all is coherent before granting the DMA engine.
*/
wmb();
netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
stmmac_enable_dma_transmission(priv, priv->ioaddr);

View File

@ -3657,12 +3657,16 @@ static int gbe_probe(struct netcp_device *netcp_device, struct device *dev,
ret = netcp_txpipe_init(&gbe_dev->tx_pipe, netcp_device,
gbe_dev->dma_chan_name, gbe_dev->tx_queue_id);
if (ret)
if (ret) {
of_node_put(interfaces);
return ret;
}
ret = netcp_txpipe_open(&gbe_dev->tx_pipe);
if (ret)
if (ret) {
of_node_put(interfaces);
return ret;
}
/* Create network interfaces */
INIT_LIST_HEAD(&gbe_dev->gbe_intf_head);

View File

@ -1575,12 +1575,14 @@ static int axienet_probe(struct platform_device *pdev)
ret = of_address_to_resource(np, 0, &dmares);
if (ret) {
dev_err(&pdev->dev, "unable to get DMA resource\n");
of_node_put(np);
goto free_netdev;
}
lp->dma_regs = devm_ioremap_resource(&pdev->dev, &dmares);
if (IS_ERR(lp->dma_regs)) {
dev_err(&pdev->dev, "could not map DMA regs\n");
ret = PTR_ERR(lp->dma_regs);
of_node_put(np);
goto free_netdev;
}
lp->rx_irq = irq_of_parse_and_map(np, 1);

View File

@ -1268,6 +1268,10 @@ static int adf7242_probe(struct spi_device *spi)
INIT_DELAYED_WORK(&lp->work, adf7242_rx_cal_work);
lp->wqueue = alloc_ordered_workqueue(dev_name(&spi->dev),
WQ_MEM_RECLAIM);
if (unlikely(!lp->wqueue)) {
ret = -ENOMEM;
goto err_hw_init;
}
ret = adf7242_hw_init(lp);
if (ret)

View File

@ -324,7 +324,7 @@ static int hwsim_get_radio_nl(struct sk_buff *msg, struct genl_info *info)
goto out_err;
}
genlmsg_reply(skb, info);
res = genlmsg_reply(skb, info);
break;
}

View File

@ -7,6 +7,8 @@ menuconfig MDIO_DEVICE
help
MDIO devices and driver infrastructure code.
if MDIO_DEVICE
config MDIO_BUS
tristate
default m if PHYLIB=m
@ -179,6 +181,7 @@ config MDIO_XGENE
APM X-Gene SoC's.
endif
endif
config PHYLINK
tristate

View File

@ -323,6 +323,19 @@ static int bcm54xx_config_init(struct phy_device *phydev)
bcm54xx_phydsp_config(phydev);
/* Encode link speed into LED1 and LED3 pair (green/amber).
* Also flash these two LEDs on activity. This means configuring
* them for MULTICOLOR and encoding link/activity into them.
*/
val = BCM5482_SHD_LEDS1_LED1(BCM_LED_SRC_MULTICOLOR1) |
BCM5482_SHD_LEDS1_LED3(BCM_LED_SRC_MULTICOLOR1);
bcm_phy_write_shadow(phydev, BCM5482_SHD_LEDS1, val);
val = BCM_LED_MULTICOLOR_IN_PHASE |
BCM5482_SHD_LEDS1_LED1(BCM_LED_MULTICOLOR_LINK_ACT) |
BCM5482_SHD_LEDS1_LED3(BCM_LED_MULTICOLOR_LINK_ACT);
bcm_phy_write_exp(phydev, BCM_EXP_MULTICOLOR, val);
return 0;
}

View File

@ -15,6 +15,8 @@
#include <linux/netdevice.h>
#define DP83822_PHY_ID 0x2000a240
#define DP83825I_PHY_ID 0x2000a150
#define DP83822_DEVADDR 0x1f
#define MII_DP83822_PHYSCR 0x11
@ -304,26 +306,30 @@ static int dp83822_resume(struct phy_device *phydev)
return 0;
}
#define DP83822_PHY_DRIVER(_id, _name) \
{ \
PHY_ID_MATCH_MODEL(_id), \
.name = (_name), \
.features = PHY_BASIC_FEATURES, \
.soft_reset = dp83822_phy_reset, \
.config_init = dp83822_config_init, \
.get_wol = dp83822_get_wol, \
.set_wol = dp83822_set_wol, \
.ack_interrupt = dp83822_ack_interrupt, \
.config_intr = dp83822_config_intr, \
.suspend = dp83822_suspend, \
.resume = dp83822_resume, \
}
static struct phy_driver dp83822_driver[] = {
{
.phy_id = DP83822_PHY_ID,
.phy_id_mask = 0xfffffff0,
.name = "TI DP83822",
.features = PHY_BASIC_FEATURES,
.config_init = dp83822_config_init,
.soft_reset = dp83822_phy_reset,
.get_wol = dp83822_get_wol,
.set_wol = dp83822_set_wol,
.ack_interrupt = dp83822_ack_interrupt,
.config_intr = dp83822_config_intr,
.suspend = dp83822_suspend,
.resume = dp83822_resume,
},
DP83822_PHY_DRIVER(DP83822_PHY_ID, "TI DP83822"),
DP83822_PHY_DRIVER(DP83825I_PHY_ID, "TI DP83825I"),
};
module_phy_driver(dp83822_driver);
static struct mdio_device_id __maybe_unused dp83822_tbl[] = {
{ DP83822_PHY_ID, 0xfffffff0 },
{ DP83825I_PHY_ID, 0xfffffff0 },
{ },
};
MODULE_DEVICE_TABLE(mdio, dp83822_tbl);

View File

@ -201,6 +201,7 @@ static int meson_gxl_ack_interrupt(struct phy_device *phydev)
static int meson_gxl_config_intr(struct phy_device *phydev)
{
u16 val;
int ret;
if (phydev->interrupts == PHY_INTERRUPT_ENABLED) {
val = INTSRC_ANEG_PR
@ -213,6 +214,11 @@ static int meson_gxl_config_intr(struct phy_device *phydev)
val = 0;
}
/* Ack any pending IRQ */
ret = meson_gxl_ack_interrupt(phydev);
if (ret)
return ret;
return phy_write(phydev, INTSRC_MASK, val);
}

View File

@ -1831,7 +1831,7 @@ int genphy_soft_reset(struct phy_device *phydev)
{
int ret;
ret = phy_write(phydev, MII_BMCR, BMCR_RESET);
ret = phy_set_bits(phydev, MII_BMCR, BMCR_RESET);
if (ret < 0)
return ret;

View File

@ -1763,9 +1763,6 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
int skb_xdp = 1;
bool frags = tun_napi_frags_enabled(tfile);
if (!(tun->dev->flags & IFF_UP))
return -EIO;
if (!(tun->flags & IFF_NO_PI)) {
if (len < sizeof(pi))
return -EINVAL;
@ -1867,6 +1864,8 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
err = skb_copy_datagram_from_iter(skb, 0, from, len);
if (err) {
err = -EFAULT;
drop:
this_cpu_inc(tun->pcpu_stats->rx_dropped);
kfree_skb(skb);
if (frags) {
@ -1874,7 +1873,7 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
mutex_unlock(&tfile->napi_mutex);
}
return -EFAULT;
return err;
}
}
@ -1958,6 +1957,13 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
!tfile->detached)
rxhash = __skb_get_hash_symmetric(skb);
rcu_read_lock();
if (unlikely(!(tun->dev->flags & IFF_UP))) {
err = -EIO;
rcu_read_unlock();
goto drop;
}
if (frags) {
/* Exercise flow dissector code path. */
u32 headlen = eth_get_headlen(skb->data, skb_headlen(skb));
@ -1965,6 +1971,7 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
if (unlikely(headlen > skb_headlen(skb))) {
this_cpu_inc(tun->pcpu_stats->rx_dropped);
napi_free_frags(&tfile->napi);
rcu_read_unlock();
mutex_unlock(&tfile->napi_mutex);
WARN_ON(1);
return -ENOMEM;
@ -1992,6 +1999,7 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
} else {
netif_rx_ni(skb);
}
rcu_read_unlock();
stats = get_cpu_ptr(tun->pcpu_stats);
u64_stats_update_begin(&stats->syncp);

View File

@ -1301,6 +1301,20 @@ static const struct driver_info trendnet_info = {
.tx_fixup = aqc111_tx_fixup,
};
static const struct driver_info qnap_info = {
.description = "QNAP QNA-UC5G1T USB to 5GbE Adapter",
.bind = aqc111_bind,
.unbind = aqc111_unbind,
.status = aqc111_status,
.link_reset = aqc111_link_reset,
.reset = aqc111_reset,
.stop = aqc111_stop,
.flags = FLAG_ETHER | FLAG_FRAMING_AX |
FLAG_AVOID_UNLINK_URBS | FLAG_MULTI_PACKET,
.rx_fixup = aqc111_rx_fixup,
.tx_fixup = aqc111_tx_fixup,
};
static int aqc111_suspend(struct usb_interface *intf, pm_message_t message)
{
struct usbnet *dev = usb_get_intfdata(intf);
@ -1455,6 +1469,7 @@ static const struct usb_device_id products[] = {
{AQC111_USB_ETH_DEV(0x0b95, 0x2790, asix111_info)},
{AQC111_USB_ETH_DEV(0x0b95, 0x2791, asix112_info)},
{AQC111_USB_ETH_DEV(0x20f4, 0xe05a, trendnet_info)},
{AQC111_USB_ETH_DEV(0x1c04, 0x0015, qnap_info)},
{ },/* END */
};
MODULE_DEVICE_TABLE(usb, products);

View File

@ -851,6 +851,14 @@ static const struct usb_device_id products[] = {
.driver_info = 0,
},
/* QNAP QNA-UC5G1T USB to 5GbE Adapter (based on AQC111U) */
{
USB_DEVICE_AND_INTERFACE_INFO(0x1c04, 0x0015, USB_CLASS_COMM,
USB_CDC_SUBCLASS_ETHERNET,
USB_CDC_PROTO_NONE),
.driver_info = 0,
},
/* WHITELIST!!!
*
* CDC Ether uses two interfaces, not necessarily consecutive.

View File

@ -4335,10 +4335,8 @@ static void vxlan_destroy_tunnels(struct net *net, struct list_head *head)
/* If vxlan->dev is in the same netns, it has already been added
* to the list by the previous loop.
*/
if (!net_eq(dev_net(vxlan->dev), net)) {
gro_cells_destroy(&vxlan->gro_cells);
if (!net_eq(dev_net(vxlan->dev), net))
unregister_netdevice_queue(vxlan->dev, head);
}
}
for (h = 0; h < PORT_HASH_SIZE; ++h)

View File

@ -460,9 +460,7 @@ static int iwl_mvm_ftm_range_resp_valid(struct iwl_mvm *mvm, u8 request_id,
static void iwl_mvm_debug_range_resp(struct iwl_mvm *mvm, u8 index,
struct cfg80211_pmsr_result *res)
{
s64 rtt_avg = res->ftm.rtt_avg * 100;
do_div(rtt_avg, 6666);
s64 rtt_avg = div_s64(res->ftm.rtt_avg * 100, 6666);
IWL_DEBUG_INFO(mvm, "entry %d\n", index);
IWL_DEBUG_INFO(mvm, "\tstatus: %d\n", res->status);

View File

@ -130,6 +130,8 @@ mt76_dma_tx_cleanup_idx(struct mt76_dev *dev, struct mt76_queue *q, int idx,
static void
mt76_dma_sync_idx(struct mt76_dev *dev, struct mt76_queue *q)
{
iowrite32(q->desc_dma, &q->regs->desc_base);
iowrite32(q->ndesc, &q->regs->ring_size);
q->head = ioread32(&q->regs->dma_idx);
q->tail = q->head;
iowrite32(q->head, &q->regs->cpu_idx);
@ -180,7 +182,10 @@ mt76_dma_tx_cleanup(struct mt76_dev *dev, enum mt76_txq_id qid, bool flush)
else
mt76_dma_sync_idx(dev, q);
wake = wake && qid < IEEE80211_NUM_ACS && q->queued < q->ndesc - 8;
wake = wake && q->stopped &&
qid < IEEE80211_NUM_ACS && q->queued < q->ndesc - 8;
if (wake)
q->stopped = false;
if (!q->queued)
wake_up(&dev->tx_wait);

View File

@ -679,19 +679,15 @@ out:
return ret;
}
static void
mt76_sta_remove(struct mt76_dev *dev, struct ieee80211_vif *vif,
struct ieee80211_sta *sta)
void __mt76_sta_remove(struct mt76_dev *dev, struct ieee80211_vif *vif,
struct ieee80211_sta *sta)
{
struct mt76_wcid *wcid = (struct mt76_wcid *)sta->drv_priv;
int idx = wcid->idx;
int i;
int i, idx = wcid->idx;
rcu_assign_pointer(dev->wcid[idx], NULL);
synchronize_rcu();
mutex_lock(&dev->mutex);
if (dev->drv->sta_remove)
dev->drv->sta_remove(dev, vif, sta);
@ -699,7 +695,15 @@ mt76_sta_remove(struct mt76_dev *dev, struct ieee80211_vif *vif,
for (i = 0; i < ARRAY_SIZE(sta->txq); i++)
mt76_txq_remove(dev, sta->txq[i]);
mt76_wcid_free(dev->wcid_mask, idx);
}
EXPORT_SYMBOL_GPL(__mt76_sta_remove);
static void
mt76_sta_remove(struct mt76_dev *dev, struct ieee80211_vif *vif,
struct ieee80211_sta *sta)
{
mutex_lock(&dev->mutex);
__mt76_sta_remove(dev, vif, sta);
mutex_unlock(&dev->mutex);
}

View File

@ -126,6 +126,7 @@ struct mt76_queue {
int ndesc;
int queued;
int buf_size;
bool stopped;
u8 buf_offset;
u8 hw_idx;
@ -143,6 +144,7 @@ struct mt76_mcu_ops {
const struct mt76_reg_pair *rp, int len);
int (*mcu_rd_rp)(struct mt76_dev *dev, u32 base,
struct mt76_reg_pair *rp, int len);
int (*mcu_restart)(struct mt76_dev *dev);
};
struct mt76_queue_ops {
@ -693,6 +695,8 @@ int mt76_sta_state(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
struct ieee80211_sta *sta,
enum ieee80211_sta_state old_state,
enum ieee80211_sta_state new_state);
void __mt76_sta_remove(struct mt76_dev *dev, struct ieee80211_vif *vif,
struct ieee80211_sta *sta);
struct ieee80211_sta *mt76_rx_convert(struct sk_buff *skb);

View File

@ -135,8 +135,7 @@ void mt7603_pre_tbtt_tasklet(unsigned long arg)
out:
mt76_queue_tx_cleanup(dev, MT_TXQ_BEACON, false);
if (dev->mt76.q_tx[MT_TXQ_BEACON].queued >
__sw_hweight8(dev->beacon_mask))
if (dev->mt76.q_tx[MT_TXQ_BEACON].queued > hweight8(dev->beacon_mask))
dev->beacon_check++;
}

View File

@ -27,12 +27,16 @@ static void
mt7603_rx_loopback_skb(struct mt7603_dev *dev, struct sk_buff *skb)
{
__le32 *txd = (__le32 *)skb->data;
struct ieee80211_hdr *hdr;
struct ieee80211_sta *sta;
struct mt7603_sta *msta;
struct mt76_wcid *wcid;
void *priv;
int idx;
u32 val;
u8 tid;
if (skb->len < sizeof(MT_TXD_SIZE) + sizeof(struct ieee80211_hdr))
if (skb->len < MT_TXD_SIZE + sizeof(struct ieee80211_hdr))
goto free;
val = le32_to_cpu(txd[1]);
@ -46,10 +50,19 @@ mt7603_rx_loopback_skb(struct mt7603_dev *dev, struct sk_buff *skb)
if (!wcid)
goto free;
msta = container_of(wcid, struct mt7603_sta, wcid);
priv = msta = container_of(wcid, struct mt7603_sta, wcid);
val = le32_to_cpu(txd[0]);
skb_set_queue_mapping(skb, FIELD_GET(MT_TXD0_Q_IDX, val));
val &= ~(MT_TXD0_P_IDX | MT_TXD0_Q_IDX);
val |= FIELD_PREP(MT_TXD0_Q_IDX, MT_TX_HW_QUEUE_MGMT);
txd[0] = cpu_to_le32(val);
sta = container_of(priv, struct ieee80211_sta, drv_priv);
hdr = (struct ieee80211_hdr *) &skb->data[MT_TXD_SIZE];
tid = *ieee80211_get_qos_ctl(hdr) & IEEE80211_QOS_CTL_TID_MASK;
ieee80211_sta_set_buffered(sta, tid, true);
spin_lock_bh(&dev->ps_lock);
__skb_queue_tail(&msta->psq, skb);
if (skb_queue_len(&msta->psq) >= 64) {

View File

@ -112,7 +112,7 @@ static void
mt7603_phy_init(struct mt7603_dev *dev)
{
int rx_chains = dev->mt76.antenna_mask;
int tx_chains = __sw_hweight8(rx_chains) - 1;
int tx_chains = hweight8(rx_chains) - 1;
mt76_rmw(dev, MT_WF_RMAC_RMCR,
(MT_WF_RMAC_RMCR_SMPS_MODE |

View File

@ -1072,7 +1072,7 @@ out:
case MT_PHY_TYPE_HT:
final_rate_flags |= IEEE80211_TX_RC_MCS;
final_rate &= GENMASK(5, 0);
if (i > 15)
if (final_rate > 15)
return false;
break;
default:

View File

@ -5,6 +5,7 @@
#include <linux/pci.h>
#include <linux/module.h>
#include "mt7603.h"
#include "mac.h"
#include "eeprom.h"
static int
@ -385,6 +386,15 @@ mt7603_sta_ps(struct mt76_dev *mdev, struct ieee80211_sta *sta, bool ps)
mt7603_ps_tx_list(dev, &list);
}
static void
mt7603_ps_set_more_data(struct sk_buff *skb)
{
struct ieee80211_hdr *hdr;
hdr = (struct ieee80211_hdr *) &skb->data[MT_TXD_SIZE];
hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_MOREDATA);
}
static void
mt7603_release_buffered_frames(struct ieee80211_hw *hw,
struct ieee80211_sta *sta,
@ -399,6 +409,8 @@ mt7603_release_buffered_frames(struct ieee80211_hw *hw,
__skb_queue_head_init(&list);
mt7603_wtbl_set_ps(dev, msta, false);
spin_lock_bh(&dev->ps_lock);
skb_queue_walk_safe(&msta->psq, skb, tmp) {
if (!nframes)
@ -409,11 +421,15 @@ mt7603_release_buffered_frames(struct ieee80211_hw *hw,
skb_set_queue_mapping(skb, MT_TXQ_PSD);
__skb_unlink(skb, &msta->psq);
mt7603_ps_set_more_data(skb);
__skb_queue_tail(&list, skb);
nframes--;
}
spin_unlock_bh(&dev->ps_lock);
if (!skb_queue_empty(&list))
ieee80211_sta_eosp(sta);
mt7603_ps_tx_list(dev, &list);
if (nframes)

View File

@ -433,7 +433,7 @@ int mt7603_mcu_set_channel(struct mt7603_dev *dev)
{
struct cfg80211_chan_def *chandef = &dev->mt76.chandef;
struct ieee80211_hw *hw = mt76_hw(dev);
int n_chains = __sw_hweight8(dev->mt76.antenna_mask);
int n_chains = hweight8(dev->mt76.antenna_mask);
struct {
u8 control_chan;
u8 center_chan;

View File

@ -23,9 +23,9 @@ mt76_wmac_probe(struct platform_device *pdev)
}
mem_base = devm_ioremap_resource(&pdev->dev, res);
if (!mem_base) {
if (IS_ERR(mem_base)) {
dev_err(&pdev->dev, "Failed to get memory resource\n");
return -EINVAL;
return PTR_ERR(mem_base);
}
mdev = mt76_alloc_device(&pdev->dev, sizeof(*dev), &mt7603_ops,

View File

@ -46,7 +46,7 @@ static const struct mt76_reg_pair common_mac_reg_table[] = {
{ MT_MM20_PROT_CFG, 0x01742004 },
{ MT_MM40_PROT_CFG, 0x03f42084 },
{ MT_TXOP_CTRL_CFG, 0x0000583f },
{ MT_TX_RTS_CFG, 0x00092b20 },
{ MT_TX_RTS_CFG, 0x00ffff20 },
{ MT_EXP_ACK_TIME, 0x002400ca },
{ MT_TXOP_HLDR_ET, 0x00000002 },
{ MT_XIFS_TIME_CFG, 0x33a41010 },

View File

@ -229,7 +229,7 @@ static int mt76x0u_probe(struct usb_interface *usb_intf,
struct usb_device *usb_dev = interface_to_usbdev(usb_intf);
struct mt76x02_dev *dev;
struct mt76_dev *mdev;
u32 asic_rev, mac_rev;
u32 mac_rev;
int ret;
mdev = mt76_alloc_device(&usb_intf->dev, sizeof(*dev), &mt76x0u_ops,
@ -262,10 +262,14 @@ static int mt76x0u_probe(struct usb_interface *usb_intf,
goto err;
}
asic_rev = mt76_rr(dev, MT_ASIC_VERSION);
mdev->rev = mt76_rr(dev, MT_ASIC_VERSION);
mac_rev = mt76_rr(dev, MT_MAC_CSR0);
dev_info(mdev->dev, "ASIC revision: %08x MAC revision: %08x\n",
asic_rev, mac_rev);
mdev->rev, mac_rev);
if (!is_mt76x0(dev)) {
ret = -ENODEV;
goto err;
}
/* Note: vendor driver skips this check for MT76X0U */
if (!(mt76_rr(dev, MT_EFUSE_CTRL) & MT_EFUSE_CTRL_SEL))

View File

@ -51,6 +51,7 @@ struct mt76x02_calibration {
u16 false_cca;
s8 avg_rssi_all;
s8 agc_gain_adjust;
s8 agc_lowest_gain;
s8 low_gain;
s8 temp_vco;
@ -114,8 +115,11 @@ struct mt76x02_dev {
struct mt76x02_dfs_pattern_detector dfs_pd;
/* edcca monitor */
unsigned long ed_trigger_timeout;
bool ed_tx_blocked;
bool ed_monitor;
u8 ed_monitor_enabled;
u8 ed_monitor_learning;
u8 ed_trigger;
u8 ed_silent;
ktime_t ed_time;
@ -188,6 +192,13 @@ void mt76x02_mac_start(struct mt76x02_dev *dev);
void mt76x02_init_debugfs(struct mt76x02_dev *dev);
static inline bool is_mt76x0(struct mt76x02_dev *dev)
{
return mt76_chip(&dev->mt76) == 0x7610 ||
mt76_chip(&dev->mt76) == 0x7630 ||
mt76_chip(&dev->mt76) == 0x7650;
}
static inline bool is_mt76x2(struct mt76x02_dev *dev)
{
return mt76_chip(&dev->mt76) == 0x7612 ||

View File

@ -116,6 +116,32 @@ static int read_agc(struct seq_file *file, void *data)
return 0;
}
static int
mt76_edcca_set(void *data, u64 val)
{
struct mt76x02_dev *dev = data;
enum nl80211_dfs_regions region = dev->dfs_pd.region;
dev->ed_monitor_enabled = !!val;
dev->ed_monitor = dev->ed_monitor_enabled &&
region == NL80211_DFS_ETSI;
mt76x02_edcca_init(dev, true);
return 0;
}
static int
mt76_edcca_get(void *data, u64 *val)
{
struct mt76x02_dev *dev = data;
*val = dev->ed_monitor_enabled;
return 0;
}
DEFINE_DEBUGFS_ATTRIBUTE(fops_edcca, mt76_edcca_get, mt76_edcca_set,
"%lld\n");
void mt76x02_init_debugfs(struct mt76x02_dev *dev)
{
struct dentry *dir;
@ -127,6 +153,7 @@ void mt76x02_init_debugfs(struct mt76x02_dev *dev)
debugfs_create_u8("temperature", 0400, dir, &dev->cal.temp);
debugfs_create_bool("tpc", 0600, dir, &dev->enable_tpc);
debugfs_create_file("edcca", 0400, dir, dev, &fops_edcca);
debugfs_create_file("ampdu_stat", 0400, dir, dev, &fops_ampdu_stat);
debugfs_create_file("dfs_stats", 0400, dir, dev, &fops_dfs_stat);
debugfs_create_devm_seqfile(dev->mt76.dev, "txpower", dir,

View File

@ -885,7 +885,8 @@ mt76x02_dfs_set_domain(struct mt76x02_dev *dev,
if (dfs_pd->region != region) {
tasklet_disable(&dfs_pd->dfs_tasklet);
dev->ed_monitor = region == NL80211_DFS_ETSI;
dev->ed_monitor = dev->ed_monitor_enabled &&
region == NL80211_DFS_ETSI;
mt76x02_edcca_init(dev, true);
dfs_pd->region = region;

View File

@ -67,12 +67,39 @@ int mt76x02_mac_shared_key_setup(struct mt76x02_dev *dev, u8 vif_idx,
}
EXPORT_SYMBOL_GPL(mt76x02_mac_shared_key_setup);
void mt76x02_mac_wcid_sync_pn(struct mt76x02_dev *dev, u8 idx,
struct ieee80211_key_conf *key)
{
enum mt76x02_cipher_type cipher;
u8 key_data[32];
u32 iv, eiv;
u64 pn;
cipher = mt76x02_mac_get_key_info(key, key_data);
iv = mt76_rr(dev, MT_WCID_IV(idx));
eiv = mt76_rr(dev, MT_WCID_IV(idx) + 4);
pn = (u64)eiv << 16;
if (cipher == MT_CIPHER_TKIP) {
pn |= (iv >> 16) & 0xff;
pn |= (iv & 0xff) << 8;
} else if (cipher >= MT_CIPHER_AES_CCMP) {
pn |= iv & 0xffff;
} else {
return;
}
atomic64_set(&key->tx_pn, pn);
}
int mt76x02_mac_wcid_set_key(struct mt76x02_dev *dev, u8 idx,
struct ieee80211_key_conf *key)
{
enum mt76x02_cipher_type cipher;
u8 key_data[32];
u8 iv_data[8];
u64 pn;
cipher = mt76x02_mac_get_key_info(key, key_data);
if (cipher == MT_CIPHER_NONE && key)
@ -85,9 +112,22 @@ int mt76x02_mac_wcid_set_key(struct mt76x02_dev *dev, u8 idx,
if (key) {
mt76_rmw_field(dev, MT_WCID_ATTR(idx), MT_WCID_ATTR_PAIRWISE,
!!(key->flags & IEEE80211_KEY_FLAG_PAIRWISE));
pn = atomic64_read(&key->tx_pn);
iv_data[3] = key->keyidx << 6;
if (cipher >= MT_CIPHER_TKIP)
if (cipher >= MT_CIPHER_TKIP) {
iv_data[3] |= 0x20;
put_unaligned_le32(pn >> 16, &iv_data[4]);
}
if (cipher == MT_CIPHER_TKIP) {
iv_data[0] = (pn >> 8) & 0xff;
iv_data[1] = (iv_data[0] | 0x20) & 0x7f;
iv_data[2] = pn & 0xff;
} else if (cipher >= MT_CIPHER_AES_CCMP) {
put_unaligned_le16((pn & 0xffff), &iv_data[0]);
}
}
mt76_wr_copy(dev, MT_WCID_IV(idx), iv_data, sizeof(iv_data));
@ -920,6 +960,7 @@ void mt76x02_edcca_init(struct mt76x02_dev *dev, bool enable)
}
}
mt76x02_edcca_tx_enable(dev, true);
dev->ed_monitor_learning = true;
/* clear previous CCA timer value */
mt76_rr(dev, MT_ED_CCA_TIMER);
@ -929,6 +970,10 @@ EXPORT_SYMBOL_GPL(mt76x02_edcca_init);
#define MT_EDCCA_TH 92
#define MT_EDCCA_BLOCK_TH 2
#define MT_EDCCA_LEARN_TH 50
#define MT_EDCCA_LEARN_CCA 180
#define MT_EDCCA_LEARN_TIMEOUT (20 * HZ)
static void mt76x02_edcca_check(struct mt76x02_dev *dev)
{
ktime_t cur_time;
@ -951,11 +996,23 @@ static void mt76x02_edcca_check(struct mt76x02_dev *dev)
dev->ed_trigger = 0;
}
if (dev->ed_trigger > MT_EDCCA_BLOCK_TH &&
!dev->ed_tx_blocked)
if (dev->cal.agc_lowest_gain &&
dev->cal.false_cca > MT_EDCCA_LEARN_CCA &&
dev->ed_trigger > MT_EDCCA_LEARN_TH) {
dev->ed_monitor_learning = false;
dev->ed_trigger_timeout = jiffies + 20 * HZ;
} else if (!dev->ed_monitor_learning &&
time_is_after_jiffies(dev->ed_trigger_timeout)) {
dev->ed_monitor_learning = true;
mt76x02_edcca_tx_enable(dev, true);
}
if (dev->ed_monitor_learning)
return;
if (dev->ed_trigger > MT_EDCCA_BLOCK_TH && !dev->ed_tx_blocked)
mt76x02_edcca_tx_enable(dev, false);
else if (dev->ed_silent > MT_EDCCA_BLOCK_TH &&
dev->ed_tx_blocked)
else if (dev->ed_silent > MT_EDCCA_BLOCK_TH && dev->ed_tx_blocked)
mt76x02_edcca_tx_enable(dev, true);
}

View File

@ -177,6 +177,8 @@ int mt76x02_mac_shared_key_setup(struct mt76x02_dev *dev, u8 vif_idx,
u8 key_idx, struct ieee80211_key_conf *key);
int mt76x02_mac_wcid_set_key(struct mt76x02_dev *dev, u8 idx,
struct ieee80211_key_conf *key);
void mt76x02_mac_wcid_sync_pn(struct mt76x02_dev *dev, u8 idx,
struct ieee80211_key_conf *key);
void mt76x02_mac_wcid_setup(struct mt76x02_dev *dev, u8 idx, u8 vif_idx,
u8 *mac);
void mt76x02_mac_wcid_set_drop(struct mt76x02_dev *dev, u8 idx, bool drop);

View File

@ -19,6 +19,7 @@
#include <linux/irq.h>
#include "mt76x02.h"
#include "mt76x02_mcu.h"
#include "mt76x02_trace.h"
struct beacon_bc_data {
@ -418,9 +419,66 @@ static bool mt76x02_tx_hang(struct mt76x02_dev *dev)
return i < 4;
}
static void mt76x02_key_sync(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
struct ieee80211_sta *sta,
struct ieee80211_key_conf *key, void *data)
{
struct mt76x02_dev *dev = hw->priv;
struct mt76_wcid *wcid;
if (!sta)
return;
wcid = (struct mt76_wcid *) sta->drv_priv;
if (wcid->hw_key_idx != key->keyidx || wcid->sw_iv)
return;
mt76x02_mac_wcid_sync_pn(dev, wcid->idx, key);
}
static void mt76x02_reset_state(struct mt76x02_dev *dev)
{
int i;
lockdep_assert_held(&dev->mt76.mutex);
clear_bit(MT76_STATE_RUNNING, &dev->mt76.state);
rcu_read_lock();
ieee80211_iter_keys_rcu(dev->mt76.hw, NULL, mt76x02_key_sync, NULL);
rcu_read_unlock();
for (i = 0; i < ARRAY_SIZE(dev->mt76.wcid); i++) {
struct ieee80211_sta *sta;
struct ieee80211_vif *vif;
struct mt76x02_sta *msta;
struct mt76_wcid *wcid;
void *priv;
wcid = rcu_dereference_protected(dev->mt76.wcid[i],
lockdep_is_held(&dev->mt76.mutex));
if (!wcid)
continue;
priv = msta = container_of(wcid, struct mt76x02_sta, wcid);
sta = container_of(priv, struct ieee80211_sta, drv_priv);
priv = msta->vif;
vif = container_of(priv, struct ieee80211_vif, drv_priv);
__mt76_sta_remove(&dev->mt76, vif, sta);
memset(msta, 0, sizeof(*msta));
}
dev->vif_mask = 0;
dev->beacon_mask = 0;
}
static void mt76x02_watchdog_reset(struct mt76x02_dev *dev)
{
u32 mask = dev->mt76.mmio.irqmask;
bool restart = dev->mt76.mcu_ops->mcu_restart;
int i;
ieee80211_stop_queues(dev->mt76.hw);
@ -434,6 +492,9 @@ static void mt76x02_watchdog_reset(struct mt76x02_dev *dev)
mutex_lock(&dev->mt76.mutex);
if (restart)
mt76x02_reset_state(dev);
if (dev->beacon_mask)
mt76_clear(dev, MT_BEACON_TIME_CFG,
MT_BEACON_TIME_CFG_BEACON_TX |
@ -452,20 +513,21 @@ static void mt76x02_watchdog_reset(struct mt76x02_dev *dev)
/* let fw reset DMA */
mt76_set(dev, 0x734, 0x3);
if (restart)
dev->mt76.mcu_ops->mcu_restart(&dev->mt76);
for (i = 0; i < ARRAY_SIZE(dev->mt76.q_tx); i++)
mt76_queue_tx_cleanup(dev, i, true);
for (i = 0; i < ARRAY_SIZE(dev->mt76.q_rx); i++)
mt76_queue_rx_reset(dev, i);
mt76_wr(dev, MT_MAC_SYS_CTRL,
MT_MAC_SYS_CTRL_ENABLE_TX | MT_MAC_SYS_CTRL_ENABLE_RX);
mt76_set(dev, MT_WPDMA_GLO_CFG,
MT_WPDMA_GLO_CFG_TX_DMA_EN | MT_WPDMA_GLO_CFG_RX_DMA_EN);
mt76x02_mac_start(dev);
if (dev->ed_monitor)
mt76_set(dev, MT_TXOP_CTRL_CFG, MT_TXOP_ED_CCA_EN);
if (dev->beacon_mask)
if (dev->beacon_mask && !restart)
mt76_set(dev, MT_BEACON_TIME_CFG,
MT_BEACON_TIME_CFG_BEACON_TX |
MT_BEACON_TIME_CFG_TBTT_EN);
@ -486,9 +548,13 @@ static void mt76x02_watchdog_reset(struct mt76x02_dev *dev)
napi_schedule(&dev->mt76.napi[i]);
}
ieee80211_wake_queues(dev->mt76.hw);
mt76_txq_schedule_all(&dev->mt76);
if (restart) {
mt76x02_mcu_function_select(dev, Q_SELECT, 1);
ieee80211_restart_hw(dev->mt76.hw);
} else {
ieee80211_wake_queues(dev->mt76.hw);
mt76_txq_schedule_all(&dev->mt76);
}
}
static void mt76x02_check_tx_hang(struct mt76x02_dev *dev)

View File

@ -194,6 +194,8 @@ bool mt76x02_phy_adjust_vga_gain(struct mt76x02_dev *dev)
ret = true;
}
dev->cal.agc_lowest_gain = dev->cal.agc_gain_adjust >= limit;
return ret;
}
EXPORT_SYMBOL_GPL(mt76x02_phy_adjust_vga_gain);

View File

@ -85,8 +85,9 @@ int mt76x02u_tx_prepare_skb(struct mt76_dev *mdev, void *data,
mt76x02_insert_hdr_pad(skb);
txwi = skb_push(skb, sizeof(struct mt76x02_txwi));
txwi = (struct mt76x02_txwi *)(skb->data - sizeof(struct mt76x02_txwi));
mt76x02_mac_write_txwi(dev, txwi, skb, wcid, sta, len);
skb_push(skb, sizeof(struct mt76x02_txwi));
pid = mt76_tx_status_skb_add(mdev, wcid, skb);
txwi->pktid = pid;

View File

@ -237,6 +237,8 @@ int mt76x02_sta_add(struct mt76_dev *mdev, struct ieee80211_vif *vif,
struct mt76x02_vif *mvif = (struct mt76x02_vif *)vif->drv_priv;
int idx = 0;
memset(msta, 0, sizeof(*msta));
idx = mt76_wcid_alloc(dev->mt76.wcid_mask, ARRAY_SIZE(dev->mt76.wcid));
if (idx < 0)
return -ENOSPC;
@ -274,6 +276,8 @@ mt76x02_vif_init(struct mt76x02_dev *dev, struct ieee80211_vif *vif,
struct mt76x02_vif *mvif = (struct mt76x02_vif *)vif->drv_priv;
struct mt76_txq *mtxq;
memset(mvif, 0, sizeof(*mvif));
mvif->idx = idx;
mvif->group_wcid.idx = MT_VIF_WCID(idx);
mvif->group_wcid.hw_key_idx = -1;
@ -289,6 +293,12 @@ mt76x02_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
struct mt76x02_dev *dev = hw->priv;
unsigned int idx = 0;
/* Allow to change address in HW if we create first interface. */
if (!dev->vif_mask &&
(((vif->addr[0] ^ dev->mt76.macaddr[0]) & ~GENMASK(4, 1)) ||
memcmp(vif->addr + 1, dev->mt76.macaddr + 1, ETH_ALEN - 1)))
mt76x02_mac_setaddr(dev, vif->addr);
if (vif->addr[0] & BIT(1))
idx = 1 + (((dev->mt76.macaddr[0] ^ vif->addr[0]) >> 2) & 7);
@ -311,10 +321,6 @@ mt76x02_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
if (dev->vif_mask & BIT(idx))
return -EBUSY;
/* Allow to change address in HW if we create first interface. */
if (!dev->vif_mask && !ether_addr_equal(dev->mt76.macaddr, vif->addr))
mt76x02_mac_setaddr(dev, vif->addr);
dev->vif_mask |= BIT(idx);
mt76x02_vif_init(dev, vif, idx);

View File

@ -106,7 +106,7 @@ void mt76_write_mac_initvals(struct mt76x02_dev *dev)
{ MT_TX_SW_CFG1, 0x00010000 },
{ MT_TX_SW_CFG2, 0x00000000 },
{ MT_TXOP_CTRL_CFG, 0x0400583f },
{ MT_TX_RTS_CFG, 0x00100020 },
{ MT_TX_RTS_CFG, 0x00ffff20 },
{ MT_TX_TIMEOUT_CFG, 0x000a2290 },
{ MT_TX_RETRY_CFG, 0x47f01f0f },
{ MT_EXP_ACK_TIME, 0x002c00dc },

View File

@ -71,6 +71,7 @@ int mt76x2_mcu_load_cr(struct mt76x02_dev *dev, u8 type, u8 temp_level,
void mt76x2_cleanup(struct mt76x02_dev *dev);
int mt76x2_mac_reset(struct mt76x02_dev *dev, bool hard);
void mt76x2_reset_wlan(struct mt76x02_dev *dev, bool enable);
void mt76x2_init_txpower(struct mt76x02_dev *dev,
struct ieee80211_supported_band *sband);

View File

@ -77,7 +77,7 @@ mt76x2_fixup_xtal(struct mt76x02_dev *dev)
}
}
static int mt76x2_mac_reset(struct mt76x02_dev *dev, bool hard)
int mt76x2_mac_reset(struct mt76x02_dev *dev, bool hard)
{
const u8 *macaddr = dev->mt76.macaddr;
u32 val;

View File

@ -165,9 +165,30 @@ error:
return -ENOENT;
}
static int
mt76pci_mcu_restart(struct mt76_dev *mdev)
{
struct mt76x02_dev *dev;
int ret;
dev = container_of(mdev, struct mt76x02_dev, mt76);
mt76x02_mcu_cleanup(dev);
mt76x2_mac_reset(dev, true);
ret = mt76pci_load_firmware(dev);
if (ret)
return ret;
mt76_wr(dev, MT_WPDMA_RST_IDX, ~0);
return 0;
}
int mt76x2_mcu_init(struct mt76x02_dev *dev)
{
static const struct mt76_mcu_ops mt76x2_mcu_ops = {
.mcu_restart = mt76pci_mcu_restart,
.mcu_send_msg = mt76x02_mcu_msg_send,
};
int ret;

View File

@ -260,10 +260,15 @@ mt76x2_phy_set_gain_val(struct mt76x02_dev *dev)
gain_val[0] = dev->cal.agc_gain_cur[0] - dev->cal.agc_gain_adjust;
gain_val[1] = dev->cal.agc_gain_cur[1] - dev->cal.agc_gain_adjust;
if (dev->mt76.chandef.width >= NL80211_CHAN_WIDTH_40)
val = 0x1836 << 16;
if (!mt76x2_has_ext_lna(dev) &&
dev->mt76.chandef.width >= NL80211_CHAN_WIDTH_40)
val = 0x1e42 << 16;
else
val = 0x1836 << 16;
if (mt76x2_has_ext_lna(dev) &&
dev->mt76.chandef.chan->band == NL80211_BAND_2GHZ &&
dev->mt76.chandef.width < NL80211_CHAN_WIDTH_40)
val = 0x0f36 << 16;
val |= 0xf8;
@ -280,6 +285,7 @@ void mt76x2_phy_update_channel_gain(struct mt76x02_dev *dev)
{
u8 *gain = dev->cal.agc_gain_init;
u8 low_gain_delta, gain_delta;
u32 agc_35, agc_37;
bool gain_change;
int low_gain;
u32 val;
@ -318,6 +324,16 @@ void mt76x2_phy_update_channel_gain(struct mt76x02_dev *dev)
else
low_gain_delta = 14;
agc_37 = 0x2121262c;
if (dev->mt76.chandef.chan->band == NL80211_BAND_2GHZ)
agc_35 = 0x11111516;
else if (low_gain == 2)
agc_35 = agc_37 = 0x08080808;
else if (dev->mt76.chandef.width == NL80211_CHAN_WIDTH_80)
agc_35 = 0x10101014;
else
agc_35 = 0x11111116;
if (low_gain == 2) {
mt76_wr(dev, MT_BBP(RXO, 18), 0xf000a990);
mt76_wr(dev, MT_BBP(AGC, 35), 0x08080808);
@ -326,15 +342,13 @@ void mt76x2_phy_update_channel_gain(struct mt76x02_dev *dev)
dev->cal.agc_gain_adjust = 0;
} else {
mt76_wr(dev, MT_BBP(RXO, 18), 0xf000a991);
if (dev->mt76.chandef.width == NL80211_CHAN_WIDTH_80)
mt76_wr(dev, MT_BBP(AGC, 35), 0x10101014);
else
mt76_wr(dev, MT_BBP(AGC, 35), 0x11111116);
mt76_wr(dev, MT_BBP(AGC, 37), 0x2121262C);
gain_delta = 0;
dev->cal.agc_gain_adjust = low_gain_delta;
}
mt76_wr(dev, MT_BBP(AGC, 35), agc_35);
mt76_wr(dev, MT_BBP(AGC, 37), agc_37);
dev->cal.agc_gain_cur[0] = gain[0] - gain_delta;
dev->cal.agc_gain_cur[1] = gain[1] - gain_delta;
mt76x2_phy_set_gain_val(dev);

View File

@ -21,11 +21,10 @@
#include "mt76x2u.h"
static const struct usb_device_id mt76x2u_device_table[] = {
{ USB_DEVICE(0x0e8d, 0x7612) }, /* Alfa AWUS036ACM */
{ USB_DEVICE(0x0b05, 0x1833) }, /* Asus USB-AC54 */
{ USB_DEVICE(0x0b05, 0x17eb) }, /* Asus USB-AC55 */
{ USB_DEVICE(0x0b05, 0x180b) }, /* Asus USB-N53 B1 */
{ USB_DEVICE(0x0e8d, 0x7612) }, /* Aukey USB-AC1200 */
{ USB_DEVICE(0x0e8d, 0x7612) }, /* Aukey USBAC1200 - Alfa AWUS036ACM */
{ USB_DEVICE(0x057c, 0x8503) }, /* Avm FRITZ!WLAN AC860 */
{ USB_DEVICE(0x7392, 0xb711) }, /* Edimax EW 7722 UAC */
{ USB_DEVICE(0x0846, 0x9053) }, /* Netgear A6210 */
@ -66,6 +65,10 @@ static int mt76x2u_probe(struct usb_interface *intf,
mdev->rev = mt76_rr(dev, MT_ASIC_VERSION);
dev_info(mdev->dev, "ASIC revision: %08x\n", mdev->rev);
if (!is_mt76x2(dev)) {
err = -ENODEV;
goto err;
}
err = mt76x2u_register_device(dev);
if (err < 0)

View File

@ -93,7 +93,6 @@ int mt76x2u_mac_reset(struct mt76x02_dev *dev)
mt76_wr(dev, MT_TX_LINK_CFG, 0x1020);
mt76_wr(dev, MT_AUTO_RSP_CFG, 0x13);
mt76_wr(dev, MT_MAX_LEN_CFG, 0x2f00);
mt76_wr(dev, MT_TX_RTS_CFG, 0x92b20);
mt76_wr(dev, MT_WMM_AIFSN, 0x2273);
mt76_wr(dev, MT_WMM_CWMIN, 0x2344);

View File

@ -289,8 +289,11 @@ mt76_tx(struct mt76_dev *dev, struct ieee80211_sta *sta,
dev->queue_ops->tx_queue_skb(dev, q, skb, wcid, sta);
dev->queue_ops->kick(dev, q);
if (q->queued > q->ndesc - 8)
if (q->queued > q->ndesc - 8 && !q->stopped) {
ieee80211_stop_queue(dev->hw, skb_get_queue_mapping(skb));
q->stopped = true;
}
spin_unlock_bh(&q->lock);
}
EXPORT_SYMBOL_GPL(mt76_tx);
@ -374,7 +377,10 @@ mt76_release_buffered_frames(struct ieee80211_hw *hw, struct ieee80211_sta *sta,
if (last_skb) {
mt76_queue_ps_skb(dev, sta, last_skb, true);
dev->queue_ops->kick(dev, hwq);
} else {
ieee80211_sta_eosp(sta);
}
spin_unlock_bh(&hwq->lock);
}
EXPORT_SYMBOL_GPL(mt76_release_buffered_frames);
@ -577,6 +583,9 @@ void mt76_wake_tx_queue(struct ieee80211_hw *hw, struct ieee80211_txq *txq)
struct mt76_txq *mtxq = (struct mt76_txq *) txq->drv_priv;
struct mt76_queue *hwq = mtxq->hwq;
if (!test_bit(MT76_STATE_RUNNING, &dev->state))
return;
spin_lock_bh(&hwq->lock);
if (list_empty(&mtxq->list))
list_add_tail(&mtxq->list, &hwq->swq);

View File

@ -655,7 +655,11 @@ static void mt76u_tx_tasklet(unsigned long data)
spin_lock_bh(&q->lock);
}
mt76_txq_schedule(dev, q);
wake = i < IEEE80211_NUM_ACS && q->queued < q->ndesc - 8;
wake = q->stopped && q->queued < q->ndesc - 8;
if (wake)
q->stopped = false;
if (!q->queued)
wake_up(&dev->tx_wait);

View File

@ -303,6 +303,10 @@ static int mt7601u_probe(struct usb_interface *usb_intf,
mac_rev = mt7601u_rr(dev, MT_MAC_CSR0);
dev_info(dev->dev, "ASIC revision: %08x MAC revision: %08x\n",
asic_rev, mac_rev);
if ((asic_rev >> 16) != 0x7601) {
ret = -ENODEV;
goto err;
}
/* Note: vendor driver skips this check for MT7601U */
if (!(mt7601u_rr(dev, MT_EFUSE_CTRL) & MT_EFUSE_CTRL_SEL))

View File

@ -1150,13 +1150,16 @@ static void qeth_notify_skbs(struct qeth_qdio_out_q *q,
static void qeth_release_skbs(struct qeth_qdio_out_buffer *buf)
{
struct sk_buff *skb;
/* release may never happen from within CQ tasklet scope */
WARN_ON_ONCE(atomic_read(&buf->state) == QETH_QDIO_BUF_IN_CQ);
if (atomic_read(&buf->state) == QETH_QDIO_BUF_PENDING)
qeth_notify_skbs(buf->q, buf, TX_NOTIFY_GENERALERROR);
__skb_queue_purge(&buf->skb_list);
while ((skb = __skb_dequeue(&buf->skb_list)) != NULL)
consume_skb(skb);
}
static void qeth_clear_output_buffer(struct qeth_qdio_out_q *queue,

View File

@ -629,8 +629,7 @@ static netdev_tx_t qeth_l2_hard_start_xmit(struct sk_buff *skb,
} /* else fall through */
QETH_TXQ_STAT_INC(queue, tx_dropped);
QETH_TXQ_STAT_INC(queue, tx_errors);
dev_kfree_skb_any(skb);
kfree_skb(skb);
netif_wake_queue(dev);
return NETDEV_TX_OK;
}
@ -645,6 +644,8 @@ static int qeth_l2_probe_device(struct ccwgroup_device *gdev)
struct qeth_card *card = dev_get_drvdata(&gdev->dev);
int rc;
qeth_l2_vnicc_set_defaults(card);
if (gdev->dev.type == &qeth_generic_devtype) {
rc = qeth_l2_create_device_attributes(&gdev->dev);
if (rc)
@ -652,8 +653,6 @@ static int qeth_l2_probe_device(struct ccwgroup_device *gdev)
}
hash_init(card->mac_htable);
card->info.hwtrap = 0;
qeth_l2_vnicc_set_defaults(card);
return 0;
}

View File

@ -2096,8 +2096,7 @@ static netdev_tx_t qeth_l3_hard_start_xmit(struct sk_buff *skb,
tx_drop:
QETH_TXQ_STAT_INC(queue, tx_dropped);
QETH_TXQ_STAT_INC(queue, tx_errors);
dev_kfree_skb_any(skb);
kfree_skb(skb);
netif_wake_queue(dev);
return NETDEV_TX_OK;
}
@ -2253,14 +2252,15 @@ static int qeth_l3_probe_device(struct ccwgroup_device *gdev)
struct qeth_card *card = dev_get_drvdata(&gdev->dev);
int rc;
hash_init(card->ip_htable);
if (gdev->dev.type == &qeth_generic_devtype) {
rc = qeth_l3_create_device_attributes(&gdev->dev);
if (rc)
return rc;
}
hash_init(card->ip_htable);
hash_init(card->ip_mc_htable);
card->info.hwtrap = 0;
return 0;
}

View File

@ -108,7 +108,7 @@ static __inline__ struct elapaarp *aarp_hdr(struct sk_buff *skb)
#define AARP_RESOLVE_TIME (10 * HZ)
extern struct datalink_proto *ddp_dl, *aarp_dl;
extern void aarp_proto_init(void);
extern int aarp_proto_init(void);
/* Inter module exports */

View File

@ -193,7 +193,6 @@ enum bpf_arg_type {
ARG_PTR_TO_CTX, /* pointer to context */
ARG_ANYTHING, /* any (initialized) argument is ok */
ARG_PTR_TO_SOCKET, /* pointer to bpf_sock */
ARG_PTR_TO_SPIN_LOCK, /* pointer to bpf_spin_lock */
ARG_PTR_TO_SOCK_COMMON, /* pointer to sock_common */
};

View File

@ -66,6 +66,46 @@ struct bpf_reg_state {
* same reference to the socket, to determine proper reference freeing.
*/
u32 id;
/* PTR_TO_SOCKET and PTR_TO_TCP_SOCK could be a ptr returned
* from a pointer-cast helper, bpf_sk_fullsock() and
* bpf_tcp_sock().
*
* Consider the following where "sk" is a reference counted
* pointer returned from "sk = bpf_sk_lookup_tcp();":
*
* 1: sk = bpf_sk_lookup_tcp();
* 2: if (!sk) { return 0; }
* 3: fullsock = bpf_sk_fullsock(sk);
* 4: if (!fullsock) { bpf_sk_release(sk); return 0; }
* 5: tp = bpf_tcp_sock(fullsock);
* 6: if (!tp) { bpf_sk_release(sk); return 0; }
* 7: bpf_sk_release(sk);
* 8: snd_cwnd = tp->snd_cwnd; // verifier will complain
*
* After bpf_sk_release(sk) at line 7, both "fullsock" ptr and
* "tp" ptr should be invalidated also. In order to do that,
* the reg holding "fullsock" and "sk" need to remember
* the original refcounted ptr id (i.e. sk_reg->id) in ref_obj_id
* such that the verifier can reset all regs which have
* ref_obj_id matching the sk_reg->id.
*
* sk_reg->ref_obj_id is set to sk_reg->id at line 1.
* sk_reg->id will stay as NULL-marking purpose only.
* After NULL-marking is done, sk_reg->id can be reset to 0.
*
* After "fullsock = bpf_sk_fullsock(sk);" at line 3,
* fullsock_reg->ref_obj_id is set to sk_reg->ref_obj_id.
*
* After "tp = bpf_tcp_sock(fullsock);" at line 5,
* tp_reg->ref_obj_id is set to fullsock_reg->ref_obj_id
* which is the same as sk_reg->ref_obj_id.
*
* From the verifier perspective, if sk, fullsock and tp
* are not NULL, they are the same ptr with different
* reg->type. In particular, bpf_sk_release(tp) is also
* allowed and has the same effect as bpf_sk_release(sk).
*/
u32 ref_obj_id;
/* For scalar types (SCALAR_VALUE), this represents our knowledge of
* the actual value.
* For pointer types, this represents the variable part of the offset

View File

@ -148,6 +148,22 @@
#define BCM_LED_SRC_OFF 0xe /* Tied high */
#define BCM_LED_SRC_ON 0xf /* Tied low */
/*
* Broadcom Multicolor LED configurations (expansion register 4)
*/
#define BCM_EXP_MULTICOLOR (MII_BCM54XX_EXP_SEL_ER + 0x04)
#define BCM_LED_MULTICOLOR_IN_PHASE BIT(8)
#define BCM_LED_MULTICOLOR_LINK_ACT 0x0
#define BCM_LED_MULTICOLOR_SPEED 0x1
#define BCM_LED_MULTICOLOR_ACT_FLASH 0x2
#define BCM_LED_MULTICOLOR_FDX 0x3
#define BCM_LED_MULTICOLOR_OFF 0x4
#define BCM_LED_MULTICOLOR_ON 0x5
#define BCM_LED_MULTICOLOR_ALT 0x6
#define BCM_LED_MULTICOLOR_FLASH 0x7
#define BCM_LED_MULTICOLOR_LINK 0x8
#define BCM_LED_MULTICOLOR_ACT 0x9
#define BCM_LED_MULTICOLOR_PROGRAM 0xa
/*
* BCM5482: Shadow registers

View File

@ -83,6 +83,12 @@ enum sock_type {
#endif /* ARCH_HAS_SOCKET_TYPES */
/**
* enum sock_shutdown_cmd - Shutdown types
* @SHUT_RD: shutdown receptions
* @SHUT_WR: shutdown transmissions
* @SHUT_RDWR: shutdown receptions/transmissions
*/
enum sock_shutdown_cmd {
SHUT_RD,
SHUT_WR,

View File

@ -26,7 +26,7 @@ typedef __kernel_sa_family_t sa_family_t;
/*
* 1003.1g requires sa_family_t and that sa_data is char.
*/
struct sockaddr {
sa_family_t sa_family; /* address family, AF_xxx */
char sa_data[14]; /* 14 bytes of protocol address */
@ -44,7 +44,7 @@ struct linger {
* system, not 4.3. Thus msg_accrights(len) are now missing. They
* belong in an obscure libc emulation or the bin.
*/
struct msghdr {
void *msg_name; /* ptr to socket address structure */
int msg_namelen; /* size of socket address structure */
@ -54,7 +54,7 @@ struct msghdr {
unsigned int msg_flags; /* flags on received message */
struct kiocb *msg_iocb; /* ptr to iocb for async requests */
};
struct user_msghdr {
void __user *msg_name; /* ptr to socket address structure */
int msg_namelen; /* size of socket address structure */
@ -122,7 +122,7 @@ struct cmsghdr {
* inside range, given by msg->msg_controllen before using
* ancillary object DATA. --ANK (980731)
*/
static inline struct cmsghdr * __cmsg_nxthdr(void *__ctl, __kernel_size_t __size,
struct cmsghdr *__cmsg)
{
@ -264,10 +264,10 @@ struct ucred {
/* Maximum queue length specifiable by listen. */
#define SOMAXCONN 128
/* Flags we can use with send/ and recv.
/* Flags we can use with send/ and recv.
Added those for 1003.1g not all are supported yet
*/
#define MSG_OOB 1
#define MSG_PEEK 2
#define MSG_DONTROUTE 4

View File

@ -39,7 +39,7 @@ struct tc_action {
struct gnet_stats_basic_cpu __percpu *cpu_bstats_hw;
struct gnet_stats_queue __percpu *cpu_qstats;
struct tc_cookie __rcu *act_cookie;
struct tcf_chain *goto_chain;
struct tcf_chain __rcu *goto_chain;
};
#define tcf_index common.tcfa_index
#define tcf_refcnt common.tcfa_refcnt
@ -90,7 +90,7 @@ struct tc_action_ops {
int (*lookup)(struct net *net, struct tc_action **a, u32 index);
int (*init)(struct net *net, struct nlattr *nla,
struct nlattr *est, struct tc_action **act, int ovr,
int bind, bool rtnl_held,
int bind, bool rtnl_held, struct tcf_proto *tp,
struct netlink_ext_ack *extack);
int (*walk)(struct net *, struct sk_buff *,
struct netlink_callback *, int,
@ -181,6 +181,11 @@ int tcf_action_dump_old(struct sk_buff *skb, struct tc_action *a, int, int);
int tcf_action_dump_1(struct sk_buff *skb, struct tc_action *a, int, int);
int tcf_action_copy_stats(struct sk_buff *, struct tc_action *, int);
int tcf_action_check_ctrlact(int action, struct tcf_proto *tp,
struct tcf_chain **handle,
struct netlink_ext_ack *newchain);
struct tcf_chain *tcf_action_set_ctrlact(struct tc_action *a, int action,
struct tcf_chain *newchain);
#endif /* CONFIG_NET_CLS_ACT */
static inline void tcf_action_stats_update(struct tc_action *a, u64 bytes,

View File

@ -378,6 +378,7 @@ struct tcf_chain {
bool flushing;
const struct tcf_proto_ops *tmplt_ops;
void *tmplt_priv;
struct rcu_head rcu;
};
struct tcf_block {

View File

@ -61,7 +61,7 @@ static inline __wsum sctp_csum_combine(__wsum csum, __wsum csum2,
static inline __le32 sctp_compute_cksum(const struct sk_buff *skb,
unsigned int offset)
{
struct sctphdr *sh = sctp_hdr(skb);
struct sctphdr *sh = (struct sctphdr *)(skb->data + offset);
const struct skb_checksum_ops ops = {
.update = sctp_csum_update,
.combine = sctp_csum_combine,

View File

@ -710,6 +710,12 @@ static inline void sk_add_node_rcu(struct sock *sk, struct hlist_head *list)
hlist_add_head_rcu(&sk->sk_node, list);
}
static inline void sk_add_node_tail_rcu(struct sock *sk, struct hlist_head *list)
{
sock_hold(sk);
hlist_add_tail_rcu(&sk->sk_node, list);
}
static inline void __sk_nulls_add_node_rcu(struct sock *sk, struct hlist_nulls_head *list)
{
hlist_nulls_add_head_rcu(&sk->sk_nulls_node, list);

View File

@ -56,7 +56,7 @@ static inline bool is_tcf_gact_goto_chain(const struct tc_action *a)
static inline u32 tcf_gact_goto_chain_index(const struct tc_action *a)
{
return a->goto_chain->index;
return READ_ONCE(a->tcfa_action) & TC_ACT_EXT_VAL_MASK;
}
#endif /* __NET_TC_GACT_H */

View File

@ -36,7 +36,6 @@ struct xdp_umem {
u32 headroom;
u32 chunk_size_nohr;
struct user_struct *user;
struct pid *pid;
unsigned long address;
refcount_t users;
struct work_struct work;

View File

@ -502,16 +502,6 @@ union bpf_attr {
* Return
* 0 on success, or a negative error in case of failure.
*
* int bpf_map_push_elem(struct bpf_map *map, const void *value, u64 flags)
* Description
* Push an element *value* in *map*. *flags* is one of:
*
* **BPF_EXIST**
* If the queue/stack is full, the oldest element is removed to
* make room for this.
* Return
* 0 on success, or a negative error in case of failure.
*
* int bpf_probe_read(void *dst, u32 size, const void *src)
* Description
* For tracing programs, safely attempt to read *size* bytes from
@ -1435,14 +1425,14 @@ union bpf_attr {
* u64 bpf_get_socket_cookie(struct bpf_sock_addr *ctx)
* Description
* Equivalent to bpf_get_socket_cookie() helper that accepts
* *skb*, but gets socket from **struct bpf_sock_addr** contex.
* *skb*, but gets socket from **struct bpf_sock_addr** context.
* Return
* A 8-byte long non-decreasing number.
*
* u64 bpf_get_socket_cookie(struct bpf_sock_ops *ctx)
* Description
* Equivalent to bpf_get_socket_cookie() helper that accepts
* *skb*, but gets socket from **struct bpf_sock_ops** contex.
* *skb*, but gets socket from **struct bpf_sock_ops** context.
* Return
* A 8-byte long non-decreasing number.
*
@ -2098,6 +2088,25 @@ union bpf_attr {
* Return
* 0 on success, or a negative error in case of failure.
*
* int bpf_rc_repeat(void *ctx)
* Description
* This helper is used in programs implementing IR decoding, to
* report a successfully decoded repeat key message. This delays
* the generation of a key up event for previously generated
* key down event.
*
* Some IR protocols like NEC have a special IR message for
* repeating last button, for when a button is held down.
*
* The *ctx* should point to the lirc sample as passed into
* the program.
*
* This helper is only available is the kernel was compiled with
* the **CONFIG_BPF_LIRC_MODE2** configuration option set to
* "**y**".
* Return
* 0
*
* int bpf_rc_keydown(void *ctx, u32 protocol, u64 scancode, u32 toggle)
* Description
* This helper is used in programs implementing IR decoding, to
@ -2124,26 +2133,7 @@ union bpf_attr {
* Return
* 0
*
* int bpf_rc_repeat(void *ctx)
* Description
* This helper is used in programs implementing IR decoding, to
* report a successfully decoded repeat key message. This delays
* the generation of a key up event for previously generated
* key down event.
*
* Some IR protocols like NEC have a special IR message for
* repeating last button, for when a button is held down.
*
* The *ctx* should point to the lirc sample as passed into
* the program.
*
* This helper is only available is the kernel was compiled with
* the **CONFIG_BPF_LIRC_MODE2** configuration option set to
* "**y**".
* Return
* 0
*
* uint64_t bpf_skb_cgroup_id(struct sk_buff *skb)
* u64 bpf_skb_cgroup_id(struct sk_buff *skb)
* Description
* Return the cgroup v2 id of the socket associated with the *skb*.
* This is roughly similar to the **bpf_get_cgroup_classid**\ ()
@ -2159,30 +2149,12 @@ union bpf_attr {
* Return
* The id is returned or 0 in case the id could not be retrieved.
*
* u64 bpf_skb_ancestor_cgroup_id(struct sk_buff *skb, int ancestor_level)
* Description
* Return id of cgroup v2 that is ancestor of cgroup associated
* with the *skb* at the *ancestor_level*. The root cgroup is at
* *ancestor_level* zero and each step down the hierarchy
* increments the level. If *ancestor_level* == level of cgroup
* associated with *skb*, then return value will be same as that
* of **bpf_skb_cgroup_id**\ ().
*
* The helper is useful to implement policies based on cgroups
* that are upper in hierarchy than immediate cgroup associated
* with *skb*.
*
* The format of returned id and helper limitations are same as in
* **bpf_skb_cgroup_id**\ ().
* Return
* The id is returned or 0 in case the id could not be retrieved.
*
* u64 bpf_get_current_cgroup_id(void)
* Return
* A 64-bit integer containing the current cgroup id based
* on the cgroup within which the current task is running.
*
* void* get_local_storage(void *map, u64 flags)
* void *bpf_get_local_storage(void *map, u64 flags)
* Description
* Get the pointer to the local storage area.
* The type and the size of the local storage is defined
@ -2209,6 +2181,24 @@ union bpf_attr {
* Return
* 0 on success, or a negative error in case of failure.
*
* u64 bpf_skb_ancestor_cgroup_id(struct sk_buff *skb, int ancestor_level)
* Description
* Return id of cgroup v2 that is ancestor of cgroup associated
* with the *skb* at the *ancestor_level*. The root cgroup is at
* *ancestor_level* zero and each step down the hierarchy
* increments the level. If *ancestor_level* == level of cgroup
* associated with *skb*, then return value will be same as that
* of **bpf_skb_cgroup_id**\ ().
*
* The helper is useful to implement policies based on cgroups
* that are upper in hierarchy than immediate cgroup associated
* with *skb*.
*
* The format of returned id and helper limitations are same as in
* **bpf_skb_cgroup_id**\ ().
* Return
* The id is returned or 0 in case the id could not be retrieved.
*
* struct bpf_sock *bpf_sk_lookup_tcp(void *ctx, struct bpf_sock_tuple *tuple, u32 tuple_size, u64 netns, u64 flags)
* Description
* Look for TCP socket matching *tuple*, optionally in a child
@ -2289,6 +2279,16 @@ union bpf_attr {
* Return
* 0 on success, or a negative error in case of failure.
*
* int bpf_map_push_elem(struct bpf_map *map, const void *value, u64 flags)
* Description
* Push an element *value* in *map*. *flags* is one of:
*
* **BPF_EXIST**
* If the queue/stack is full, the oldest element is
* removed to make room for this.
* Return
* 0 on success, or a negative error in case of failure.
*
* int bpf_map_pop_elem(struct bpf_map *map, void *value)
* Description
* Pop an element from *map*.
@ -2343,29 +2343,94 @@ union bpf_attr {
* Return
* 0
*
* int bpf_spin_lock(struct bpf_spin_lock *lock)
* Description
* Acquire a spinlock represented by the pointer *lock*, which is
* stored as part of a value of a map. Taking the lock allows to
* safely update the rest of the fields in that value. The
* spinlock can (and must) later be released with a call to
* **bpf_spin_unlock**\ (\ *lock*\ ).
*
* Spinlocks in BPF programs come with a number of restrictions
* and constraints:
*
* * **bpf_spin_lock** objects are only allowed inside maps of
* types **BPF_MAP_TYPE_HASH** and **BPF_MAP_TYPE_ARRAY** (this
* list could be extended in the future).
* * BTF description of the map is mandatory.
* * The BPF program can take ONE lock at a time, since taking two
* or more could cause dead locks.
* * Only one **struct bpf_spin_lock** is allowed per map element.
* * When the lock is taken, calls (either BPF to BPF or helpers)
* are not allowed.
* * The **BPF_LD_ABS** and **BPF_LD_IND** instructions are not
* allowed inside a spinlock-ed region.
* * The BPF program MUST call **bpf_spin_unlock**\ () to release
* the lock, on all execution paths, before it returns.
* * The BPF program can access **struct bpf_spin_lock** only via
* the **bpf_spin_lock**\ () and **bpf_spin_unlock**\ ()
* helpers. Loading or storing data into the **struct
* bpf_spin_lock** *lock*\ **;** field of a map is not allowed.
* * To use the **bpf_spin_lock**\ () helper, the BTF description
* of the map value must be a struct and have **struct
* bpf_spin_lock** *anyname*\ **;** field at the top level.
* Nested lock inside another struct is not allowed.
* * The **struct bpf_spin_lock** *lock* field in a map value must
* be aligned on a multiple of 4 bytes in that value.
* * Syscall with command **BPF_MAP_LOOKUP_ELEM** does not copy
* the **bpf_spin_lock** field to user space.
* * Syscall with command **BPF_MAP_UPDATE_ELEM**, or update from
* a BPF program, do not update the **bpf_spin_lock** field.
* * **bpf_spin_lock** cannot be on the stack or inside a
* networking packet (it can only be inside of a map values).
* * **bpf_spin_lock** is available to root only.
* * Tracing programs and socket filter programs cannot use
* **bpf_spin_lock**\ () due to insufficient preemption checks
* (but this may change in the future).
* * **bpf_spin_lock** is not allowed in inner maps of map-in-map.
* Return
* 0
*
* int bpf_spin_unlock(struct bpf_spin_lock *lock)
* Description
* Release the *lock* previously locked by a call to
* **bpf_spin_lock**\ (\ *lock*\ ).
* Return
* 0
*
* struct bpf_sock *bpf_sk_fullsock(struct bpf_sock *sk)
* Description
* This helper gets a **struct bpf_sock** pointer such
* that all the fields in bpf_sock can be accessed.
* that all the fields in this **bpf_sock** can be accessed.
* Return
* A **struct bpf_sock** pointer on success, or NULL in
* A **struct bpf_sock** pointer on success, or **NULL** in
* case of failure.
*
* struct bpf_tcp_sock *bpf_tcp_sock(struct bpf_sock *sk)
* Description
* This helper gets a **struct bpf_tcp_sock** pointer from a
* **struct bpf_sock** pointer.
*
* Return
* A **struct bpf_tcp_sock** pointer on success, or NULL in
* A **struct bpf_tcp_sock** pointer on success, or **NULL** in
* case of failure.
*
* int bpf_skb_ecn_set_ce(struct sk_buf *skb)
* Description
* Sets ECN of IP header to ce (congestion encountered) if
* current value is ect (ECN capable). Works with IPv6 and IPv4.
* Return
* 1 if set, 0 if not set.
* Description
* Set ECN (Explicit Congestion Notification) field of IP header
* to **CE** (Congestion Encountered) if current value is **ECT**
* (ECN Capable Transport). Otherwise, do nothing. Works with IPv6
* and IPv4.
* Return
* 1 if the **CE** flag is set (either by the current helper call
* or because it was already present), 0 if it is not set.
*
* struct bpf_sock *bpf_get_listener_sock(struct bpf_sock *sk)
* Description
* Return a **struct bpf_sock** pointer in **TCP_LISTEN** state.
* **bpf_sk_release**\ () is unnecessary and not allowed.
* Return
* A **struct bpf_sock** pointer on success, or **NULL** in
* case of failure.
*/
#define __BPF_FUNC_MAPPER(FN) \
FN(unspec), \
@ -2465,7 +2530,8 @@ union bpf_attr {
FN(spin_unlock), \
FN(sk_fullsock), \
FN(tcp_sock), \
FN(skb_ecn_set_ce),
FN(skb_ecn_set_ce), \
FN(get_listener_sock),
/* integer value in 'imm' field of BPF_CALL instruction selects which helper
* function eBPF program intends to call

View File

@ -136,21 +136,29 @@ static struct bpf_map *find_and_alloc_map(union bpf_attr *attr)
void *bpf_map_area_alloc(size_t size, int numa_node)
{
/* We definitely need __GFP_NORETRY, so OOM killer doesn't
* trigger under memory pressure as we really just want to
* fail instead.
/* We really just want to fail instead of triggering OOM killer
* under memory pressure, therefore we set __GFP_NORETRY to kmalloc,
* which is used for lower order allocation requests.
*
* It has been observed that higher order allocation requests done by
* vmalloc with __GFP_NORETRY being set might fail due to not trying
* to reclaim memory from the page cache, thus we set
* __GFP_RETRY_MAYFAIL to avoid such situations.
*/
const gfp_t flags = __GFP_NOWARN | __GFP_NORETRY | __GFP_ZERO;
const gfp_t flags = __GFP_NOWARN | __GFP_ZERO;
void *area;
if (size <= (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER)) {
area = kmalloc_node(size, GFP_USER | flags, numa_node);
area = kmalloc_node(size, GFP_USER | __GFP_NORETRY | flags,
numa_node);
if (area != NULL)
return area;
}
return __vmalloc_node_flags_caller(size, numa_node, GFP_KERNEL | flags,
__builtin_return_address(0));
return __vmalloc_node_flags_caller(size, numa_node,
GFP_KERNEL | __GFP_RETRY_MAYFAIL |
flags, __builtin_return_address(0));
}
void bpf_map_area_free(void *area)

View File

@ -212,7 +212,7 @@ struct bpf_call_arg_meta {
int access_size;
s64 msize_smax_value;
u64 msize_umax_value;
int ptr_id;
int ref_obj_id;
int func_id;
};
@ -346,35 +346,23 @@ static bool reg_type_may_be_null(enum bpf_reg_type type)
type == PTR_TO_TCP_SOCK_OR_NULL;
}
static bool type_is_refcounted(enum bpf_reg_type type)
{
return type == PTR_TO_SOCKET;
}
static bool type_is_refcounted_or_null(enum bpf_reg_type type)
{
return type == PTR_TO_SOCKET || type == PTR_TO_SOCKET_OR_NULL;
}
static bool reg_is_refcounted(const struct bpf_reg_state *reg)
{
return type_is_refcounted(reg->type);
}
static bool reg_may_point_to_spin_lock(const struct bpf_reg_state *reg)
{
return reg->type == PTR_TO_MAP_VALUE &&
map_value_has_spin_lock(reg->map_ptr);
}
static bool reg_is_refcounted_or_null(const struct bpf_reg_state *reg)
static bool reg_type_may_be_refcounted_or_null(enum bpf_reg_type type)
{
return type_is_refcounted_or_null(reg->type);
return type == PTR_TO_SOCKET ||
type == PTR_TO_SOCKET_OR_NULL ||
type == PTR_TO_TCP_SOCK ||
type == PTR_TO_TCP_SOCK_OR_NULL;
}
static bool arg_type_is_refcounted(enum bpf_arg_type type)
static bool arg_type_may_be_refcounted(enum bpf_arg_type type)
{
return type == ARG_PTR_TO_SOCKET;
return type == ARG_PTR_TO_SOCK_COMMON;
}
/* Determine whether the function releases some resources allocated by another
@ -392,6 +380,12 @@ static bool is_acquire_function(enum bpf_func_id func_id)
func_id == BPF_FUNC_sk_lookup_udp;
}
static bool is_ptr_cast_function(enum bpf_func_id func_id)
{
return func_id == BPF_FUNC_tcp_sock ||
func_id == BPF_FUNC_sk_fullsock;
}
/* string representation of 'enum bpf_reg_type' */
static const char * const reg_type_str[] = {
[NOT_INIT] = "?",
@ -466,6 +460,8 @@ static void print_verifier_state(struct bpf_verifier_env *env,
verbose(env, ",call_%d", func(env, reg)->callsite);
} else {
verbose(env, "(id=%d", reg->id);
if (reg_type_may_be_refcounted_or_null(t))
verbose(env, ",ref_obj_id=%d", reg->ref_obj_id);
if (t != SCALAR_VALUE)
verbose(env, ",off=%d", reg->off);
if (type_is_pkt_pointer(t))
@ -2414,16 +2410,15 @@ static int check_func_arg(struct bpf_verifier_env *env, u32 regno,
/* Any sk pointer can be ARG_PTR_TO_SOCK_COMMON */
if (!type_is_sk_pointer(type))
goto err_type;
} else if (arg_type == ARG_PTR_TO_SOCKET) {
expected_type = PTR_TO_SOCKET;
if (type != expected_type)
goto err_type;
if (meta->ptr_id || !reg->id) {
verbose(env, "verifier internal error: mismatched references meta=%d, reg=%d\n",
meta->ptr_id, reg->id);
return -EFAULT;
if (reg->ref_obj_id) {
if (meta->ref_obj_id) {
verbose(env, "verifier internal error: more than one arg with ref_obj_id R%d %u %u\n",
regno, reg->ref_obj_id,
meta->ref_obj_id);
return -EFAULT;
}
meta->ref_obj_id = reg->ref_obj_id;
}
meta->ptr_id = reg->id;
} else if (arg_type == ARG_PTR_TO_SPIN_LOCK) {
if (meta->func_id == BPF_FUNC_spin_lock) {
if (process_spin_lock(env, regno, true))
@ -2740,32 +2735,38 @@ static bool check_arg_pair_ok(const struct bpf_func_proto *fn)
return true;
}
static bool check_refcount_ok(const struct bpf_func_proto *fn)
static bool check_refcount_ok(const struct bpf_func_proto *fn, int func_id)
{
int count = 0;
if (arg_type_is_refcounted(fn->arg1_type))
if (arg_type_may_be_refcounted(fn->arg1_type))
count++;
if (arg_type_is_refcounted(fn->arg2_type))
if (arg_type_may_be_refcounted(fn->arg2_type))
count++;
if (arg_type_is_refcounted(fn->arg3_type))
if (arg_type_may_be_refcounted(fn->arg3_type))
count++;
if (arg_type_is_refcounted(fn->arg4_type))
if (arg_type_may_be_refcounted(fn->arg4_type))
count++;
if (arg_type_is_refcounted(fn->arg5_type))
if (arg_type_may_be_refcounted(fn->arg5_type))
count++;
/* A reference acquiring function cannot acquire
* another refcounted ptr.
*/
if (is_acquire_function(func_id) && count)
return false;
/* We only support one arg being unreferenced at the moment,
* which is sufficient for the helper functions we have right now.
*/
return count <= 1;
}
static int check_func_proto(const struct bpf_func_proto *fn)
static int check_func_proto(const struct bpf_func_proto *fn, int func_id)
{
return check_raw_mode_ok(fn) &&
check_arg_pair_ok(fn) &&
check_refcount_ok(fn) ? 0 : -EINVAL;
check_refcount_ok(fn, func_id) ? 0 : -EINVAL;
}
/* Packet data might have moved, any old PTR_TO_PACKET[_META,_END]
@ -2799,19 +2800,20 @@ static void clear_all_pkt_pointers(struct bpf_verifier_env *env)
}
static void release_reg_references(struct bpf_verifier_env *env,
struct bpf_func_state *state, int id)
struct bpf_func_state *state,
int ref_obj_id)
{
struct bpf_reg_state *regs = state->regs, *reg;
int i;
for (i = 0; i < MAX_BPF_REG; i++)
if (regs[i].id == id)
if (regs[i].ref_obj_id == ref_obj_id)
mark_reg_unknown(env, regs, i);
bpf_for_each_spilled_reg(i, state, reg) {
if (!reg)
continue;
if (reg_is_refcounted(reg) && reg->id == id)
if (reg->ref_obj_id == ref_obj_id)
__mark_reg_unknown(reg);
}
}
@ -2820,15 +2822,20 @@ static void release_reg_references(struct bpf_verifier_env *env,
* resources. Identify all copies of the same pointer and clear the reference.
*/
static int release_reference(struct bpf_verifier_env *env,
struct bpf_call_arg_meta *meta)
int ref_obj_id)
{
struct bpf_verifier_state *vstate = env->cur_state;
int err;
int i;
for (i = 0; i <= vstate->curframe; i++)
release_reg_references(env, vstate->frame[i], meta->ptr_id);
err = release_reference_state(cur_func(env), ref_obj_id);
if (err)
return err;
return release_reference_state(cur_func(env), meta->ptr_id);
for (i = 0; i <= vstate->curframe; i++)
release_reg_references(env, vstate->frame[i], ref_obj_id);
return 0;
}
static int check_func_call(struct bpf_verifier_env *env, struct bpf_insn *insn,
@ -3047,7 +3054,7 @@ static int check_helper_call(struct bpf_verifier_env *env, int func_id, int insn
memset(&meta, 0, sizeof(meta));
meta.pkt_access = fn->pkt_access;
err = check_func_proto(fn);
err = check_func_proto(fn, func_id);
if (err) {
verbose(env, "kernel subsystem misconfigured func %s#%d\n",
func_id_name(func_id), func_id);
@ -3093,7 +3100,7 @@ static int check_helper_call(struct bpf_verifier_env *env, int func_id, int insn
return err;
}
} else if (is_release_function(func_id)) {
err = release_reference(env, &meta);
err = release_reference(env, meta.ref_obj_id);
if (err) {
verbose(env, "func %s#%d reference has not been acquired before\n",
func_id_name(func_id), func_id);
@ -3154,8 +3161,10 @@ static int check_helper_call(struct bpf_verifier_env *env, int func_id, int insn
if (id < 0)
return id;
/* For release_reference() */
/* For mark_ptr_or_null_reg() */
regs[BPF_REG_0].id = id;
/* For release_reference() */
regs[BPF_REG_0].ref_obj_id = id;
} else {
/* For mark_ptr_or_null_reg() */
regs[BPF_REG_0].id = ++env->id_gen;
@ -3170,6 +3179,10 @@ static int check_helper_call(struct bpf_verifier_env *env, int func_id, int insn
return -EINVAL;
}
if (is_ptr_cast_function(func_id))
/* For release_reference() */
regs[BPF_REG_0].ref_obj_id = meta.ref_obj_id;
do_refine_retval_range(regs, fn->ret_type, func_id, &meta);
err = check_map_func_compatibility(env, meta.map_ptr, func_id);
@ -3368,7 +3381,7 @@ do_sim:
*dst_reg = *ptr_reg;
}
ret = push_stack(env, env->insn_idx + 1, env->insn_idx, true);
if (!ptr_is_dst_reg)
if (!ptr_is_dst_reg && ret)
*dst_reg = tmp;
return !ret ? -EFAULT : 0;
}
@ -4665,11 +4678,19 @@ static void mark_ptr_or_null_reg(struct bpf_func_state *state,
} else if (reg->type == PTR_TO_TCP_SOCK_OR_NULL) {
reg->type = PTR_TO_TCP_SOCK;
}
if (is_null || !(reg_is_refcounted(reg) ||
reg_may_point_to_spin_lock(reg))) {
/* We don't need id from this point onwards anymore,
* thus we should better reset it, so that state
* pruning has chances to take effect.
if (is_null) {
/* We don't need id and ref_obj_id from this point
* onwards anymore, thus we should better reset it,
* so that state pruning has chances to take effect.
*/
reg->id = 0;
reg->ref_obj_id = 0;
} else if (!reg_may_point_to_spin_lock(reg)) {
/* For not-NULL ptr, reg->ref_obj_id will be reset
* in release_reg_references().
*
* reg->id is still used by spin_lock ptr. Other
* than spin_lock ptr type, reg->id can be reset.
*/
reg->id = 0;
}
@ -4684,11 +4705,16 @@ static void mark_ptr_or_null_regs(struct bpf_verifier_state *vstate, u32 regno,
{
struct bpf_func_state *state = vstate->frame[vstate->curframe];
struct bpf_reg_state *reg, *regs = state->regs;
u32 ref_obj_id = regs[regno].ref_obj_id;
u32 id = regs[regno].id;
int i, j;
if (reg_is_refcounted_or_null(&regs[regno]) && is_null)
release_reference_state(state, id);
if (ref_obj_id && ref_obj_id == id && is_null)
/* regs[regno] is in the " == NULL" branch.
* No one could have freed the reference state before
* doing the NULL check.
*/
WARN_ON_ONCE(release_reference_state(state, id));
for (i = 0; i < MAX_BPF_REG; i++)
mark_ptr_or_null_reg(state, &regs[i], id, is_null);
@ -6052,15 +6078,17 @@ static int propagate_liveness(struct bpf_verifier_env *env,
}
/* Propagate read liveness of registers... */
BUILD_BUG_ON(BPF_REG_FP + 1 != MAX_BPF_REG);
/* We don't need to worry about FP liveness because it's read-only */
for (i = 0; i < BPF_REG_FP; i++) {
if (vparent->frame[vparent->curframe]->regs[i].live & REG_LIVE_READ)
continue;
if (vstate->frame[vstate->curframe]->regs[i].live & REG_LIVE_READ) {
err = mark_reg_read(env, &vstate->frame[vstate->curframe]->regs[i],
&vparent->frame[vstate->curframe]->regs[i]);
if (err)
return err;
for (frame = 0; frame <= vstate->curframe; frame++) {
/* We don't need to worry about FP liveness, it's read-only */
for (i = frame < vstate->curframe ? BPF_REG_6 : 0; i < BPF_REG_FP; i++) {
if (vparent->frame[frame]->regs[i].live & REG_LIVE_READ)
continue;
if (vstate->frame[frame]->regs[i].live & REG_LIVE_READ) {
err = mark_reg_read(env, &vstate->frame[frame]->regs[i],
&vparent->frame[frame]->regs[i]);
if (err)
return err;
}
}
}

View File

@ -416,8 +416,12 @@ static void rht_deferred_worker(struct work_struct *work)
else if (tbl->nest)
err = rhashtable_rehash_alloc(ht, tbl, tbl->size);
if (!err)
err = rhashtable_rehash_table(ht);
if (!err || err == -EEXIST) {
int nerr;
nerr = rhashtable_rehash_table(ht);
err = err ?: nerr;
}
mutex_unlock(&ht->mutex);

View File

@ -879,15 +879,24 @@ static struct notifier_block aarp_notifier = {
static unsigned char aarp_snap_id[] = { 0x00, 0x00, 0x00, 0x80, 0xF3 };
void __init aarp_proto_init(void)
int __init aarp_proto_init(void)
{
int rc;
aarp_dl = register_snap_client(aarp_snap_id, aarp_rcv);
if (!aarp_dl)
if (!aarp_dl) {
printk(KERN_CRIT "Unable to register AARP with SNAP.\n");
return -ENOMEM;
}
timer_setup(&aarp_timer, aarp_expire_timeout, 0);
aarp_timer.expires = jiffies + sysctl_aarp_expiry_time;
add_timer(&aarp_timer);
register_netdevice_notifier(&aarp_notifier);
rc = register_netdevice_notifier(&aarp_notifier);
if (rc) {
del_timer_sync(&aarp_timer);
unregister_snap_client(aarp_dl);
}
return rc;
}
/* Remove the AARP entries associated with a device. */

View File

@ -1904,9 +1904,6 @@ static unsigned char ddp_snap_id[] = { 0x08, 0x00, 0x07, 0x80, 0x9B };
EXPORT_SYMBOL(atrtr_get_dev);
EXPORT_SYMBOL(atalk_find_dev_addr);
static const char atalk_err_snap[] __initconst =
KERN_CRIT "Unable to register DDP with SNAP.\n";
/* Called by proto.c on kernel start up */
static int __init atalk_init(void)
{
@ -1921,17 +1918,22 @@ static int __init atalk_init(void)
goto out_proto;
ddp_dl = register_snap_client(ddp_snap_id, atalk_rcv);
if (!ddp_dl)
printk(atalk_err_snap);
if (!ddp_dl) {
pr_crit("Unable to register DDP with SNAP.\n");
goto out_sock;
}
dev_add_pack(&ltalk_packet_type);
dev_add_pack(&ppptalk_packet_type);
rc = register_netdevice_notifier(&ddp_notifier);
if (rc)
goto out_sock;
goto out_snap;
rc = aarp_proto_init();
if (rc)
goto out_dev;
aarp_proto_init();
rc = atalk_proc_init();
if (rc)
goto out_aarp;
@ -1945,11 +1947,13 @@ out_proc:
atalk_proc_exit();
out_aarp:
aarp_cleanup_module();
out_dev:
unregister_netdevice_notifier(&ddp_notifier);
out_sock:
out_snap:
dev_remove_pack(&ppptalk_packet_type);
dev_remove_pack(&ltalk_packet_type);
unregister_snap_client(ddp_dl);
out_sock:
sock_unregister(PF_APPLETALK);
out_proto:
proto_unregister(&ddp_proto);

Some files were not shown because too many files have changed in this diff Show More