mirror of
https://github.com/torvalds/linux.git
synced 2024-12-31 23:31:29 +00:00
Networking fixes for 5.18-rc3, including fixes from wireless and
netfilter. Current release - regressions: - smc: fix af_ops of child socket pointing to released memory - wifi: ath9k: fix usage of driver-private space in tx_info Previous releases - regressions: - ipv6: fix panic when forwarding a pkt with no in6 dev - sctp: use the correct skb for security_sctp_assoc_request - smc: fix NULL pointer dereference in smc_pnet_find_ib() - sched: fix initialization order when updating chain 0 head - phy: don't defer probe forever if PHY IRQ provider is missing - dsa: revert "net: dsa: setup master before ports" - dsa: felix: fix tagging protocol changes with multiple CPU ports - eth: ice: - fix use-after-free when freeing @rx_cpu_rmap - revert "iavf: fix deadlock occurrence during resetting VF interface" - eth: lan966x: stop processing the MAC entry is port is wrong Previous releases - always broken: - sched - flower: fix parsing of ethertype following VLAN header - taprio: check if socket flags are valid - nfc: add flush_workqueue to prevent uaf - veth: ensure eth header is in skb's linear part - eth: stmmac: fix altr_tse_pcs function when using a fixed-link - eth: macb: restart tx only if queue pointer is lagging - eth: macvlan: fix leaking skb in source mode with nodst option Signed-off-by: Paolo Abeni <pabeni@redhat.com> -----BEGIN PGP SIGNATURE----- iQJGBAABCAAwFiEEg1AjqC77wbdLX2LbKSR5jcyPE6QFAmJX6CoSHHBhYmVuaUBy ZWRoYXQuY29tAAoJECkkeY3MjxOkMRIP/0CKmGetL0i1WQ0LD8regv+E6NizyXDB +YCchHMMgYJ/aIRVps9GSWqD6ncU2gCSW27aCxHsN+Esw/HytrmsaHfS1SWRgIfb 6hn1CB3/Ojd1eXZOdXgVrlayhJKj//c0KdoHQoY+sQaLKNvULx5VfTq4y1yjyXy2 upfwW4JaQaUlkaTbdhjRhq5TuRY2JCwscc7EmJbwrqmqWG7nSEngrLU0XLHs8tWr X/gQbI/wDepf/KidE59rLV7yMYlovCkoZBVUrN4oLZRqxUIBtU0a8nZNX5NFWiVD KwTahrUh8eOizzSMEzjO0HpuGBWQFrmyC0eOb8KwixMJrRfDtqAWJKwHfxZtzHr/ JpUHlgvCN9iQoPYY0LZ8uU2CLFJM+p4hn1sOt/swoo23iAvCiJWWbRoAvsvwLbZ3 4pAsof8w6SRb34J/6Lcu78LECD/y62TJ27Ay82vdjrYbz1g8wb1wGlAMAbJ7K7sE NQ4iB6wzd6UVF3Qm4qo7kRJT5TTY4TxZMQUKl1gj/OEV9hrJ8zF1MjRhxNWRC2R9 +A8Rw3weL8zYKoezCEEbogaDp+h7IdeNlbZE8r19p2CMiJ/Y+NQd//p4rSJ0Oqcw 1tOtg5x7LVVMiSz38uHk0HbiKJ9UbHLJJZ35N5oi/IDPg+LWFfar23I4BfuII2kB Wo5Ii/0DMozb =Ha9M -----END PGP SIGNATURE----- Merge tag 'net-5.18-rc3' of git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net Pull networking fixes from Paolo Abeni: "Including fixes from wireless and netfilter. Current release - regressions: - smc: fix af_ops of child socket pointing to released memory - wifi: ath9k: fix usage of driver-private space in tx_info Previous releases - regressions: - ipv6: fix panic when forwarding a pkt with no in6 dev - sctp: use the correct skb for security_sctp_assoc_request - smc: fix NULL pointer dereference in smc_pnet_find_ib() - sched: fix initialization order when updating chain 0 head - phy: don't defer probe forever if PHY IRQ provider is missing - dsa: revert "net: dsa: setup master before ports" - dsa: felix: fix tagging protocol changes with multiple CPU ports - eth: ice: - fix use-after-free when freeing @rx_cpu_rmap - revert "iavf: fix deadlock occurrence during resetting VF interface" - eth: lan966x: stop processing the MAC entry is port is wrong Previous releases - always broken: - sched: - flower: fix parsing of ethertype following VLAN header - taprio: check if socket flags are valid - nfc: add flush_workqueue to prevent uaf - veth: ensure eth header is in skb's linear part - eth: stmmac: fix altr_tse_pcs function when using a fixed-link - eth: macb: restart tx only if queue pointer is lagging - eth: macvlan: fix leaking skb in source mode with nodst option" * tag 'net-5.18-rc3' of git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net: (52 commits) net: bcmgenet: Revert "Use stronger register read/writes to assure ordering" rtnetlink: Fix handling of disabled L3 stats in RTM_GETSTATS replies net: dsa: felix: fix tagging protocol changes with multiple CPU ports tun: annotate access to queue->trans_start nfc: nci: add flush_workqueue to prevent uaf net: dsa: realtek: don't parse compatible string for RTL8366S net: dsa: realtek: fix Kconfig to assure consistent driver linkage net: ftgmac100: access hardware register after clock ready Revert "net: dsa: setup master before ports" macvlan: Fix leaking skb in source mode with nodst option netfilter: nf_tables: nft_parse_register can return a negative value net: lan966x: Stop processing the MAC entry is port is wrong. net: lan966x: Fix when a port's upper is changed. net: lan966x: Fix IGMP snooping when frames have vlan tag net: lan966x: Update lan966x_ptp_get_nominal_value sctp: Initialize daddr on peeled off socket net/smc: Fix af_ops of child socket pointing to released memory net/smc: Fix NULL pointer dereference in smc_pnet_find_ib() net/smc: use memcpy instead of snprintf to avoid out of bounds read net: macb: Restart tx only if queue pointer is lagging ...
This commit is contained in:
commit
d20339fa93
@ -13,9 +13,6 @@ description: |
|
||||
This describes the devicetree bindings for AVE ethernet controller
|
||||
implemented on Socionext UniPhier SoCs.
|
||||
|
||||
allOf:
|
||||
- $ref: ethernet-controller.yaml#
|
||||
|
||||
properties:
|
||||
compatible:
|
||||
enum:
|
||||
@ -44,25 +41,13 @@ properties:
|
||||
minItems: 1
|
||||
maxItems: 4
|
||||
|
||||
clock-names:
|
||||
oneOf:
|
||||
- items: # for Pro4
|
||||
- const: gio
|
||||
- const: ether
|
||||
- const: ether-gb
|
||||
- const: ether-phy
|
||||
- const: ether # for others
|
||||
clock-names: true
|
||||
|
||||
resets:
|
||||
minItems: 1
|
||||
maxItems: 2
|
||||
|
||||
reset-names:
|
||||
oneOf:
|
||||
- items: # for Pro4
|
||||
- const: gio
|
||||
- const: ether
|
||||
- const: ether # for others
|
||||
reset-names: true
|
||||
|
||||
socionext,syscon-phy-mode:
|
||||
$ref: /schemas/types.yaml#/definitions/phandle-array
|
||||
@ -78,6 +63,42 @@ properties:
|
||||
$ref: mdio.yaml#
|
||||
unevaluatedProperties: false
|
||||
|
||||
allOf:
|
||||
- $ref: ethernet-controller.yaml#
|
||||
- if:
|
||||
properties:
|
||||
compatible:
|
||||
contains:
|
||||
const: socionext,uniphier-pro4-ave4
|
||||
then:
|
||||
properties:
|
||||
clocks:
|
||||
minItems: 4
|
||||
maxItems: 4
|
||||
clock-names:
|
||||
items:
|
||||
- const: gio
|
||||
- const: ether
|
||||
- const: ether-gb
|
||||
- const: ether-phy
|
||||
resets:
|
||||
minItems: 2
|
||||
maxItems: 2
|
||||
reset-names:
|
||||
items:
|
||||
- const: gio
|
||||
- const: ether
|
||||
else:
|
||||
properties:
|
||||
clocks:
|
||||
maxItems: 1
|
||||
clock-names:
|
||||
const: ether
|
||||
resets:
|
||||
maxItems: 1
|
||||
reset-names:
|
||||
const: ether
|
||||
|
||||
required:
|
||||
- compatible
|
||||
- reg
|
||||
@ -90,7 +111,7 @@ required:
|
||||
- reset-names
|
||||
- mdio
|
||||
|
||||
additionalProperties: false
|
||||
unevaluatedProperties: false
|
||||
|
||||
examples:
|
||||
- |
|
||||
|
@ -894,7 +894,7 @@ xmit_hash_policy
|
||||
Uses XOR of hardware MAC addresses and packet type ID
|
||||
field to generate the hash. The formula is
|
||||
|
||||
hash = source MAC XOR destination MAC XOR packet type ID
|
||||
hash = source MAC[5] XOR destination MAC[5] XOR packet type ID
|
||||
slave number = hash modulo slave count
|
||||
|
||||
This algorithm will place all traffic to a particular
|
||||
@ -910,7 +910,7 @@ xmit_hash_policy
|
||||
Uses XOR of hardware MAC addresses and IP addresses to
|
||||
generate the hash. The formula is
|
||||
|
||||
hash = source MAC XOR destination MAC XOR packet type ID
|
||||
hash = source MAC[5] XOR destination MAC[5] XOR packet type ID
|
||||
hash = hash XOR source IP XOR destination IP
|
||||
hash = hash XOR (hash RSHIFT 16)
|
||||
hash = hash XOR (hash RSHIFT 8)
|
||||
|
@ -201,6 +201,7 @@ F: include/net/ieee80211_radiotap.h
|
||||
F: include/net/iw_handler.h
|
||||
F: include/net/wext.h
|
||||
F: include/uapi/linux/nl80211.h
|
||||
F: include/uapi/linux/wireless.h
|
||||
F: net/wireless/
|
||||
|
||||
8169 10/100/1000 GIGABIT ETHERNET DRIVER
|
||||
@ -12402,7 +12403,7 @@ F: drivers/mmc/host/mtk-sd.c
|
||||
|
||||
MEDIATEK MT76 WIRELESS LAN DRIVER
|
||||
M: Felix Fietkau <nbd@nbd.name>
|
||||
M: Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
|
||||
M: Lorenzo Bianconi <lorenzo@kernel.org>
|
||||
M: Ryder Lee <ryder.lee@mediatek.com>
|
||||
R: Shayne Chen <shayne.chen@mediatek.com>
|
||||
R: Sean Wang <sean.wang@mediatek.com>
|
||||
@ -21227,10 +21228,8 @@ S: Maintained
|
||||
F: drivers/hid/hid-wiimote*
|
||||
|
||||
WILOCITY WIL6210 WIRELESS DRIVER
|
||||
M: Maya Erez <merez@codeaurora.org>
|
||||
L: linux-wireless@vger.kernel.org
|
||||
L: wil6210@qti.qualcomm.com
|
||||
S: Supported
|
||||
S: Orphan
|
||||
W: https://wireless.wiki.kernel.org/en/users/Drivers/wil6210
|
||||
F: drivers/net/wireless/ath/wil6210/
|
||||
|
||||
|
@ -296,6 +296,7 @@ int driver_deferred_probe_check_state(struct device *dev)
|
||||
|
||||
return -EPROBE_DEFER;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(driver_deferred_probe_check_state);
|
||||
|
||||
static void deferred_probe_timeout_work_func(struct work_struct *work)
|
||||
{
|
||||
|
@ -670,6 +670,8 @@ static int felix_change_tag_protocol(struct dsa_switch *ds, int cpu,
|
||||
struct ocelot *ocelot = ds->priv;
|
||||
struct felix *felix = ocelot_to_felix(ocelot);
|
||||
enum dsa_tag_protocol old_proto = felix->tag_proto;
|
||||
bool cpu_port_active = false;
|
||||
struct dsa_port *dp;
|
||||
int err;
|
||||
|
||||
if (proto != DSA_TAG_PROTO_SEVILLE &&
|
||||
@ -677,6 +679,27 @@ static int felix_change_tag_protocol(struct dsa_switch *ds, int cpu,
|
||||
proto != DSA_TAG_PROTO_OCELOT_8021Q)
|
||||
return -EPROTONOSUPPORT;
|
||||
|
||||
/* We don't support multiple CPU ports, yet the DT blob may have
|
||||
* multiple CPU ports defined. The first CPU port is the active one,
|
||||
* the others are inactive. In this case, DSA will call
|
||||
* ->change_tag_protocol() multiple times, once per CPU port.
|
||||
* Since we implement the tagging protocol change towards "ocelot" or
|
||||
* "seville" as effectively initializing the NPI port, what we are
|
||||
* doing is effectively changing who the NPI port is to the last @cpu
|
||||
* argument passed, which is an unused DSA CPU port and not the one
|
||||
* that should actively pass traffic.
|
||||
* Suppress DSA's calls on CPU ports that are inactive.
|
||||
*/
|
||||
dsa_switch_for_each_user_port(dp, ds) {
|
||||
if (dp->cpu_dp->index == cpu) {
|
||||
cpu_port_active = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (!cpu_port_active)
|
||||
return 0;
|
||||
|
||||
felix_del_tag_protocol(ds, cpu, old_proto);
|
||||
|
||||
err = felix_set_tag_protocol(ds, cpu, proto);
|
||||
|
@ -2316,7 +2316,7 @@ static int felix_pci_probe(struct pci_dev *pdev,
|
||||
|
||||
err = dsa_register_switch(ds);
|
||||
if (err) {
|
||||
dev_err(&pdev->dev, "Failed to register DSA switch: %d\n", err);
|
||||
dev_err_probe(&pdev->dev, err, "Failed to register DSA switch\n");
|
||||
goto err_register_ds;
|
||||
}
|
||||
|
||||
|
@ -9,34 +9,46 @@ menuconfig NET_DSA_REALTEK
|
||||
help
|
||||
Select to enable support for Realtek Ethernet switch chips.
|
||||
|
||||
Note that at least one interface driver must be enabled for the
|
||||
subdrivers to be loaded. Moreover, an interface driver cannot achieve
|
||||
anything without at least one subdriver enabled.
|
||||
|
||||
if NET_DSA_REALTEK
|
||||
|
||||
config NET_DSA_REALTEK_MDIO
|
||||
tristate "Realtek MDIO connected switch driver"
|
||||
depends on NET_DSA_REALTEK
|
||||
tristate "Realtek MDIO interface driver"
|
||||
depends on OF
|
||||
depends on NET_DSA_REALTEK_RTL8365MB || NET_DSA_REALTEK_RTL8366RB
|
||||
depends on NET_DSA_REALTEK_RTL8365MB || !NET_DSA_REALTEK_RTL8365MB
|
||||
depends on NET_DSA_REALTEK_RTL8366RB || !NET_DSA_REALTEK_RTL8366RB
|
||||
help
|
||||
Select to enable support for registering switches configured
|
||||
through MDIO.
|
||||
|
||||
config NET_DSA_REALTEK_SMI
|
||||
tristate "Realtek SMI connected switch driver"
|
||||
depends on NET_DSA_REALTEK
|
||||
tristate "Realtek SMI interface driver"
|
||||
depends on OF
|
||||
depends on NET_DSA_REALTEK_RTL8365MB || NET_DSA_REALTEK_RTL8366RB
|
||||
depends on NET_DSA_REALTEK_RTL8365MB || !NET_DSA_REALTEK_RTL8365MB
|
||||
depends on NET_DSA_REALTEK_RTL8366RB || !NET_DSA_REALTEK_RTL8366RB
|
||||
help
|
||||
Select to enable support for registering switches connected
|
||||
through SMI.
|
||||
|
||||
config NET_DSA_REALTEK_RTL8365MB
|
||||
tristate "Realtek RTL8365MB switch subdriver"
|
||||
depends on NET_DSA_REALTEK
|
||||
depends on NET_DSA_REALTEK_SMI || NET_DSA_REALTEK_MDIO
|
||||
imply NET_DSA_REALTEK_SMI
|
||||
imply NET_DSA_REALTEK_MDIO
|
||||
select NET_DSA_TAG_RTL8_4
|
||||
help
|
||||
Select to enable support for Realtek RTL8365MB-VC and RTL8367S.
|
||||
|
||||
config NET_DSA_REALTEK_RTL8366RB
|
||||
tristate "Realtek RTL8366RB switch subdriver"
|
||||
depends on NET_DSA_REALTEK
|
||||
depends on NET_DSA_REALTEK_SMI || NET_DSA_REALTEK_MDIO
|
||||
imply NET_DSA_REALTEK_SMI
|
||||
imply NET_DSA_REALTEK_MDIO
|
||||
select NET_DSA_TAG_RTL4_A
|
||||
help
|
||||
Select to enable support for Realtek RTL8366RB
|
||||
Select to enable support for Realtek RTL8366RB.
|
||||
|
||||
endif
|
||||
|
@ -546,11 +546,6 @@ static const struct of_device_id realtek_smi_of_match[] = {
|
||||
.data = &rtl8366rb_variant,
|
||||
},
|
||||
#endif
|
||||
{
|
||||
/* FIXME: add support for RTL8366S and more */
|
||||
.compatible = "realtek,rtl8366s",
|
||||
.data = NULL,
|
||||
},
|
||||
#if IS_ENABLED(CONFIG_NET_DSA_REALTEK_RTL8365MB)
|
||||
{
|
||||
.compatible = "realtek,rtl8365mb",
|
||||
|
@ -486,8 +486,8 @@ int aq_nic_start(struct aq_nic_s *self)
|
||||
if (err < 0)
|
||||
goto err_exit;
|
||||
|
||||
for (i = 0U, aq_vec = self->aq_vec[0];
|
||||
self->aq_vecs > i; ++i, aq_vec = self->aq_vec[i]) {
|
||||
for (i = 0U; self->aq_vecs > i; ++i) {
|
||||
aq_vec = self->aq_vec[i];
|
||||
err = aq_vec_start(aq_vec);
|
||||
if (err < 0)
|
||||
goto err_exit;
|
||||
@ -517,8 +517,8 @@ int aq_nic_start(struct aq_nic_s *self)
|
||||
mod_timer(&self->polling_timer, jiffies +
|
||||
AQ_CFG_POLLING_TIMER_INTERVAL);
|
||||
} else {
|
||||
for (i = 0U, aq_vec = self->aq_vec[0];
|
||||
self->aq_vecs > i; ++i, aq_vec = self->aq_vec[i]) {
|
||||
for (i = 0U; self->aq_vecs > i; ++i) {
|
||||
aq_vec = self->aq_vec[i];
|
||||
err = aq_pci_func_alloc_irq(self, i, self->ndev->name,
|
||||
aq_vec_isr, aq_vec,
|
||||
aq_vec_get_affinity_mask(aq_vec));
|
||||
|
@ -43,8 +43,8 @@ static int aq_vec_poll(struct napi_struct *napi, int budget)
|
||||
if (!self) {
|
||||
err = -EINVAL;
|
||||
} else {
|
||||
for (i = 0U, ring = self->ring[0];
|
||||
self->tx_rings > i; ++i, ring = self->ring[i]) {
|
||||
for (i = 0U; self->tx_rings > i; ++i) {
|
||||
ring = self->ring[i];
|
||||
u64_stats_update_begin(&ring[AQ_VEC_RX_ID].stats.rx.syncp);
|
||||
ring[AQ_VEC_RX_ID].stats.rx.polls++;
|
||||
u64_stats_update_end(&ring[AQ_VEC_RX_ID].stats.rx.syncp);
|
||||
@ -182,8 +182,8 @@ int aq_vec_init(struct aq_vec_s *self, const struct aq_hw_ops *aq_hw_ops,
|
||||
self->aq_hw_ops = aq_hw_ops;
|
||||
self->aq_hw = aq_hw;
|
||||
|
||||
for (i = 0U, ring = self->ring[0];
|
||||
self->tx_rings > i; ++i, ring = self->ring[i]) {
|
||||
for (i = 0U; self->tx_rings > i; ++i) {
|
||||
ring = self->ring[i];
|
||||
err = aq_ring_init(&ring[AQ_VEC_TX_ID], ATL_RING_TX);
|
||||
if (err < 0)
|
||||
goto err_exit;
|
||||
@ -224,8 +224,8 @@ int aq_vec_start(struct aq_vec_s *self)
|
||||
unsigned int i = 0U;
|
||||
int err = 0;
|
||||
|
||||
for (i = 0U, ring = self->ring[0];
|
||||
self->tx_rings > i; ++i, ring = self->ring[i]) {
|
||||
for (i = 0U; self->tx_rings > i; ++i) {
|
||||
ring = self->ring[i];
|
||||
err = self->aq_hw_ops->hw_ring_tx_start(self->aq_hw,
|
||||
&ring[AQ_VEC_TX_ID]);
|
||||
if (err < 0)
|
||||
@ -248,8 +248,8 @@ void aq_vec_stop(struct aq_vec_s *self)
|
||||
struct aq_ring_s *ring = NULL;
|
||||
unsigned int i = 0U;
|
||||
|
||||
for (i = 0U, ring = self->ring[0];
|
||||
self->tx_rings > i; ++i, ring = self->ring[i]) {
|
||||
for (i = 0U; self->tx_rings > i; ++i) {
|
||||
ring = self->ring[i];
|
||||
self->aq_hw_ops->hw_ring_tx_stop(self->aq_hw,
|
||||
&ring[AQ_VEC_TX_ID]);
|
||||
|
||||
@ -268,8 +268,8 @@ void aq_vec_deinit(struct aq_vec_s *self)
|
||||
if (!self)
|
||||
goto err_exit;
|
||||
|
||||
for (i = 0U, ring = self->ring[0];
|
||||
self->tx_rings > i; ++i, ring = self->ring[i]) {
|
||||
for (i = 0U; self->tx_rings > i; ++i) {
|
||||
ring = self->ring[i];
|
||||
aq_ring_tx_clean(&ring[AQ_VEC_TX_ID]);
|
||||
aq_ring_rx_deinit(&ring[AQ_VEC_RX_ID]);
|
||||
}
|
||||
@ -297,8 +297,8 @@ void aq_vec_ring_free(struct aq_vec_s *self)
|
||||
if (!self)
|
||||
goto err_exit;
|
||||
|
||||
for (i = 0U, ring = self->ring[0];
|
||||
self->tx_rings > i; ++i, ring = self->ring[i]) {
|
||||
for (i = 0U; self->tx_rings > i; ++i) {
|
||||
ring = self->ring[i];
|
||||
aq_ring_free(&ring[AQ_VEC_TX_ID]);
|
||||
if (i < self->rx_rings)
|
||||
aq_ring_free(&ring[AQ_VEC_RX_ID]);
|
||||
|
@ -76,7 +76,7 @@ static inline void bcmgenet_writel(u32 value, void __iomem *offset)
|
||||
if (IS_ENABLED(CONFIG_MIPS) && IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
|
||||
__raw_writel(value, offset);
|
||||
else
|
||||
writel(value, offset);
|
||||
writel_relaxed(value, offset);
|
||||
}
|
||||
|
||||
static inline u32 bcmgenet_readl(void __iomem *offset)
|
||||
@ -84,7 +84,7 @@ static inline u32 bcmgenet_readl(void __iomem *offset)
|
||||
if (IS_ENABLED(CONFIG_MIPS) && IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
|
||||
return __raw_readl(offset);
|
||||
else
|
||||
return readl(offset);
|
||||
return readl_relaxed(offset);
|
||||
}
|
||||
|
||||
static inline void dmadesc_set_length_status(struct bcmgenet_priv *priv,
|
||||
|
@ -1658,6 +1658,7 @@ static void macb_tx_restart(struct macb_queue *queue)
|
||||
unsigned int head = queue->tx_head;
|
||||
unsigned int tail = queue->tx_tail;
|
||||
struct macb *bp = queue->bp;
|
||||
unsigned int head_idx, tbqp;
|
||||
|
||||
if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
|
||||
queue_writel(queue, ISR, MACB_BIT(TXUBR));
|
||||
@ -1665,6 +1666,13 @@ static void macb_tx_restart(struct macb_queue *queue)
|
||||
if (head == tail)
|
||||
return;
|
||||
|
||||
tbqp = queue_readl(queue, TBQP) / macb_dma_desc_get_size(bp);
|
||||
tbqp = macb_adj_dma_desc_idx(bp, macb_tx_ring_wrap(bp, tbqp));
|
||||
head_idx = macb_adj_dma_desc_idx(bp, macb_tx_ring_wrap(bp, head));
|
||||
|
||||
if (tbqp == head_idx)
|
||||
return;
|
||||
|
||||
macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(TSTART));
|
||||
}
|
||||
|
||||
|
@ -1835,11 +1835,6 @@ static int ftgmac100_probe(struct platform_device *pdev)
|
||||
priv->rxdes0_edorr_mask = BIT(30);
|
||||
priv->txdes0_edotr_mask = BIT(30);
|
||||
priv->is_aspeed = true;
|
||||
/* Disable ast2600 problematic HW arbitration */
|
||||
if (of_device_is_compatible(np, "aspeed,ast2600-mac")) {
|
||||
iowrite32(FTGMAC100_TM_DEFAULT,
|
||||
priv->base + FTGMAC100_OFFSET_TM);
|
||||
}
|
||||
} else {
|
||||
priv->rxdes0_edorr_mask = BIT(15);
|
||||
priv->txdes0_edotr_mask = BIT(15);
|
||||
@ -1911,6 +1906,11 @@ static int ftgmac100_probe(struct platform_device *pdev)
|
||||
err = ftgmac100_setup_clk(priv);
|
||||
if (err)
|
||||
goto err_phy_connect;
|
||||
|
||||
/* Disable ast2600 problematic HW arbitration */
|
||||
if (of_device_is_compatible(np, "aspeed,ast2600-mac"))
|
||||
iowrite32(FTGMAC100_TM_DEFAULT,
|
||||
priv->base + FTGMAC100_OFFSET_TM);
|
||||
}
|
||||
|
||||
/* Default ring sizes */
|
||||
|
@ -489,11 +489,15 @@ static int dpaa_get_ts_info(struct net_device *net_dev,
|
||||
info->phc_index = -1;
|
||||
|
||||
fman_node = of_get_parent(mac_node);
|
||||
if (fman_node)
|
||||
if (fman_node) {
|
||||
ptp_node = of_parse_phandle(fman_node, "ptimer-handle", 0);
|
||||
of_node_put(fman_node);
|
||||
}
|
||||
|
||||
if (ptp_node)
|
||||
if (ptp_node) {
|
||||
ptp_dev = of_find_device_by_node(ptp_node);
|
||||
of_node_put(ptp_node);
|
||||
}
|
||||
|
||||
if (ptp_dev)
|
||||
ptp = platform_get_drvdata(ptp_dev);
|
||||
|
@ -2871,7 +2871,6 @@ continue_reset:
|
||||
running = adapter->state == __IAVF_RUNNING;
|
||||
|
||||
if (running) {
|
||||
netdev->flags &= ~IFF_UP;
|
||||
netif_carrier_off(netdev);
|
||||
netif_tx_stop_all_queues(netdev);
|
||||
adapter->link_up = false;
|
||||
@ -2988,7 +2987,7 @@ continue_reset:
|
||||
* to __IAVF_RUNNING
|
||||
*/
|
||||
iavf_up_complete(adapter);
|
||||
netdev->flags |= IFF_UP;
|
||||
|
||||
iavf_irq_enable(adapter, true);
|
||||
} else {
|
||||
iavf_change_state(adapter, __IAVF_DOWN);
|
||||
@ -3004,10 +3003,8 @@ continue_reset:
|
||||
reset_err:
|
||||
mutex_unlock(&adapter->client_lock);
|
||||
mutex_unlock(&adapter->crit_lock);
|
||||
if (running) {
|
||||
if (running)
|
||||
iavf_change_state(adapter, __IAVF_RUNNING);
|
||||
netdev->flags |= IFF_UP;
|
||||
}
|
||||
dev_err(&adapter->pdev->dev, "failed to allocate resources during reinit\n");
|
||||
iavf_close(netdev);
|
||||
}
|
||||
|
@ -577,7 +577,7 @@ void ice_free_cpu_rx_rmap(struct ice_vsi *vsi)
|
||||
{
|
||||
struct net_device *netdev;
|
||||
|
||||
if (!vsi || vsi->type != ICE_VSI_PF || !vsi->arfs_fltr_list)
|
||||
if (!vsi || vsi->type != ICE_VSI_PF)
|
||||
return;
|
||||
|
||||
netdev = vsi->netdev;
|
||||
@ -599,7 +599,7 @@ int ice_set_cpu_rx_rmap(struct ice_vsi *vsi)
|
||||
int base_idx, i;
|
||||
|
||||
if (!vsi || vsi->type != ICE_VSI_PF)
|
||||
return -EINVAL;
|
||||
return 0;
|
||||
|
||||
pf = vsi->back;
|
||||
netdev = vsi->netdev;
|
||||
@ -636,7 +636,6 @@ void ice_remove_arfs(struct ice_pf *pf)
|
||||
if (!pf_vsi)
|
||||
return;
|
||||
|
||||
ice_free_cpu_rx_rmap(pf_vsi);
|
||||
ice_clear_arfs(pf_vsi);
|
||||
}
|
||||
|
||||
@ -653,9 +652,5 @@ void ice_rebuild_arfs(struct ice_pf *pf)
|
||||
return;
|
||||
|
||||
ice_remove_arfs(pf);
|
||||
if (ice_set_cpu_rx_rmap(pf_vsi)) {
|
||||
dev_err(ice_pf_to_dev(pf), "Failed to rebuild aRFS\n");
|
||||
return;
|
||||
}
|
||||
ice_init_arfs(pf_vsi);
|
||||
}
|
||||
|
@ -2689,6 +2689,8 @@ void ice_vsi_free_irq(struct ice_vsi *vsi)
|
||||
return;
|
||||
|
||||
vsi->irqs_ready = false;
|
||||
ice_free_cpu_rx_rmap(vsi);
|
||||
|
||||
ice_for_each_q_vector(vsi, i) {
|
||||
u16 vector = i + base;
|
||||
int irq_num;
|
||||
@ -2702,7 +2704,8 @@ void ice_vsi_free_irq(struct ice_vsi *vsi)
|
||||
continue;
|
||||
|
||||
/* clear the affinity notifier in the IRQ descriptor */
|
||||
irq_set_affinity_notifier(irq_num, NULL);
|
||||
if (!IS_ENABLED(CONFIG_RFS_ACCEL))
|
||||
irq_set_affinity_notifier(irq_num, NULL);
|
||||
|
||||
/* clear the affinity_mask in the IRQ descriptor */
|
||||
irq_set_affinity_hint(irq_num, NULL);
|
||||
|
@ -2510,6 +2510,13 @@ static int ice_vsi_req_irq_msix(struct ice_vsi *vsi, char *basename)
|
||||
irq_set_affinity_hint(irq_num, &q_vector->affinity_mask);
|
||||
}
|
||||
|
||||
err = ice_set_cpu_rx_rmap(vsi);
|
||||
if (err) {
|
||||
netdev_err(vsi->netdev, "Failed to setup CPU RMAP on VSI %u: %pe\n",
|
||||
vsi->vsi_num, ERR_PTR(err));
|
||||
goto free_q_irqs;
|
||||
}
|
||||
|
||||
vsi->irqs_ready = true;
|
||||
return 0;
|
||||
|
||||
@ -3692,20 +3699,12 @@ static int ice_setup_pf_sw(struct ice_pf *pf)
|
||||
*/
|
||||
ice_napi_add(vsi);
|
||||
|
||||
status = ice_set_cpu_rx_rmap(vsi);
|
||||
if (status) {
|
||||
dev_err(dev, "Failed to set CPU Rx map VSI %d error %d\n",
|
||||
vsi->vsi_num, status);
|
||||
goto unroll_napi_add;
|
||||
}
|
||||
status = ice_init_mac_fltr(pf);
|
||||
if (status)
|
||||
goto free_cpu_rx_map;
|
||||
goto unroll_napi_add;
|
||||
|
||||
return 0;
|
||||
|
||||
free_cpu_rx_map:
|
||||
ice_free_cpu_rx_rmap(vsi);
|
||||
unroll_napi_add:
|
||||
ice_tc_indir_block_unregister(vsi);
|
||||
unroll_cfg_netdev:
|
||||
@ -5167,7 +5166,6 @@ static int __maybe_unused ice_suspend(struct device *dev)
|
||||
continue;
|
||||
ice_vsi_free_q_vectors(pf->vsi[v]);
|
||||
}
|
||||
ice_free_cpu_rx_rmap(ice_get_main_vsi(pf));
|
||||
ice_clear_interrupt_scheme(pf);
|
||||
|
||||
pci_save_state(pdev);
|
||||
|
@ -650,6 +650,7 @@ static int mlxsw_i2c_probe(struct i2c_client *client,
|
||||
return 0;
|
||||
|
||||
errout:
|
||||
mutex_destroy(&mlxsw_i2c->cmd.lock);
|
||||
i2c_set_clientdata(client, NULL);
|
||||
|
||||
return err;
|
||||
|
@ -346,7 +346,8 @@ static void lan966x_mac_irq_process(struct lan966x *lan966x, u32 row,
|
||||
|
||||
lan966x_mac_process_raw_entry(&raw_entries[column],
|
||||
mac, &vid, &dest_idx);
|
||||
WARN_ON(dest_idx > lan966x->num_phys_ports);
|
||||
if (WARN_ON(dest_idx > lan966x->num_phys_ports))
|
||||
continue;
|
||||
|
||||
/* If the entry in SW is found, then there is nothing
|
||||
* to do
|
||||
@ -392,7 +393,8 @@ static void lan966x_mac_irq_process(struct lan966x *lan966x, u32 row,
|
||||
|
||||
lan966x_mac_process_raw_entry(&raw_entries[column],
|
||||
mac, &vid, &dest_idx);
|
||||
WARN_ON(dest_idx > lan966x->num_phys_ports);
|
||||
if (WARN_ON(dest_idx > lan966x->num_phys_ports))
|
||||
continue;
|
||||
|
||||
mac_entry = lan966x_mac_alloc_entry(mac, vid, dest_idx);
|
||||
if (!mac_entry)
|
||||
|
@ -446,6 +446,12 @@ static bool lan966x_hw_offload(struct lan966x *lan966x, u32 port,
|
||||
ANA_CPU_FWD_CFG_MLD_REDIR_ENA)))
|
||||
return true;
|
||||
|
||||
if (eth_type_vlan(skb->protocol)) {
|
||||
skb = skb_vlan_untag(skb);
|
||||
if (unlikely(!skb))
|
||||
return false;
|
||||
}
|
||||
|
||||
if (skb->protocol == htons(ETH_P_IP) &&
|
||||
ip_hdr(skb)->protocol == IPPROTO_IGMP)
|
||||
return false;
|
||||
|
@ -29,10 +29,10 @@ enum {
|
||||
|
||||
static u64 lan966x_ptp_get_nominal_value(void)
|
||||
{
|
||||
u64 res = 0x304d2df1;
|
||||
|
||||
res <<= 32;
|
||||
return res;
|
||||
/* This is the default value that for each system clock, the time of day
|
||||
* is increased. It has the format 5.59 nanosecond.
|
||||
*/
|
||||
return 0x304d4873ecade305;
|
||||
}
|
||||
|
||||
int lan966x_ptp_hwtstamp_set(struct lan966x_port *port, struct ifreq *ifr)
|
||||
|
@ -322,8 +322,7 @@ static int lan966x_port_prechangeupper(struct net_device *dev,
|
||||
|
||||
if (netif_is_bridge_master(info->upper_dev) && !info->linking)
|
||||
switchdev_bridge_port_unoffload(port->dev, port,
|
||||
&lan966x_switchdev_nb,
|
||||
&lan966x_switchdev_blocking_nb);
|
||||
NULL, NULL);
|
||||
|
||||
return NOTIFY_DONE;
|
||||
}
|
||||
|
@ -57,10 +57,6 @@
|
||||
#define TSE_PCS_USE_SGMII_ENA BIT(0)
|
||||
#define TSE_PCS_IF_USE_SGMII 0x03
|
||||
|
||||
#define SGMII_ADAPTER_CTRL_REG 0x00
|
||||
#define SGMII_ADAPTER_DISABLE 0x0001
|
||||
#define SGMII_ADAPTER_ENABLE 0x0000
|
||||
|
||||
#define AUTONEGO_LINK_TIMER 20
|
||||
|
||||
static int tse_pcs_reset(void __iomem *base, struct tse_pcs *pcs)
|
||||
@ -202,12 +198,8 @@ void tse_pcs_fix_mac_speed(struct tse_pcs *pcs, struct phy_device *phy_dev,
|
||||
unsigned int speed)
|
||||
{
|
||||
void __iomem *tse_pcs_base = pcs->tse_pcs_base;
|
||||
void __iomem *sgmii_adapter_base = pcs->sgmii_adapter_base;
|
||||
u32 val;
|
||||
|
||||
writew(SGMII_ADAPTER_ENABLE,
|
||||
sgmii_adapter_base + SGMII_ADAPTER_CTRL_REG);
|
||||
|
||||
pcs->autoneg = phy_dev->autoneg;
|
||||
|
||||
if (phy_dev->autoneg == AUTONEG_ENABLE) {
|
||||
|
@ -10,6 +10,10 @@
|
||||
#include <linux/phy.h>
|
||||
#include <linux/timer.h>
|
||||
|
||||
#define SGMII_ADAPTER_CTRL_REG 0x00
|
||||
#define SGMII_ADAPTER_ENABLE 0x0000
|
||||
#define SGMII_ADAPTER_DISABLE 0x0001
|
||||
|
||||
struct tse_pcs {
|
||||
struct device *dev;
|
||||
void __iomem *tse_pcs_base;
|
||||
|
@ -18,9 +18,6 @@
|
||||
|
||||
#include "altr_tse_pcs.h"
|
||||
|
||||
#define SGMII_ADAPTER_CTRL_REG 0x00
|
||||
#define SGMII_ADAPTER_DISABLE 0x0001
|
||||
|
||||
#define SYSMGR_EMACGRP_CTRL_PHYSEL_ENUM_GMII_MII 0x0
|
||||
#define SYSMGR_EMACGRP_CTRL_PHYSEL_ENUM_RGMII 0x1
|
||||
#define SYSMGR_EMACGRP_CTRL_PHYSEL_ENUM_RMII 0x2
|
||||
@ -62,16 +59,14 @@ static void socfpga_dwmac_fix_mac_speed(void *priv, unsigned int speed)
|
||||
{
|
||||
struct socfpga_dwmac *dwmac = (struct socfpga_dwmac *)priv;
|
||||
void __iomem *splitter_base = dwmac->splitter_base;
|
||||
void __iomem *tse_pcs_base = dwmac->pcs.tse_pcs_base;
|
||||
void __iomem *sgmii_adapter_base = dwmac->pcs.sgmii_adapter_base;
|
||||
struct device *dev = dwmac->dev;
|
||||
struct net_device *ndev = dev_get_drvdata(dev);
|
||||
struct phy_device *phy_dev = ndev->phydev;
|
||||
u32 val;
|
||||
|
||||
if ((tse_pcs_base) && (sgmii_adapter_base))
|
||||
writew(SGMII_ADAPTER_DISABLE,
|
||||
sgmii_adapter_base + SGMII_ADAPTER_CTRL_REG);
|
||||
writew(SGMII_ADAPTER_DISABLE,
|
||||
sgmii_adapter_base + SGMII_ADAPTER_CTRL_REG);
|
||||
|
||||
if (splitter_base) {
|
||||
val = readl(splitter_base + EMAC_SPLITTER_CTRL_REG);
|
||||
@ -93,7 +88,9 @@ static void socfpga_dwmac_fix_mac_speed(void *priv, unsigned int speed)
|
||||
writel(val, splitter_base + EMAC_SPLITTER_CTRL_REG);
|
||||
}
|
||||
|
||||
if (tse_pcs_base && sgmii_adapter_base)
|
||||
writew(SGMII_ADAPTER_ENABLE,
|
||||
sgmii_adapter_base + SGMII_ADAPTER_CTRL_REG);
|
||||
if (phy_dev)
|
||||
tse_pcs_fix_mac_speed(&dwmac->pcs, phy_dev, speed);
|
||||
}
|
||||
|
||||
|
@ -460,8 +460,10 @@ static rx_handler_result_t macvlan_handle_frame(struct sk_buff **pskb)
|
||||
return RX_HANDLER_CONSUMED;
|
||||
*pskb = skb;
|
||||
eth = eth_hdr(skb);
|
||||
if (macvlan_forward_source(skb, port, eth->h_source))
|
||||
if (macvlan_forward_source(skb, port, eth->h_source)) {
|
||||
kfree_skb(skb);
|
||||
return RX_HANDLER_CONSUMED;
|
||||
}
|
||||
src = macvlan_hash_lookup(port, eth->h_source);
|
||||
if (src && src->mode != MACVLAN_MODE_VEPA &&
|
||||
src->mode != MACVLAN_MODE_BRIDGE) {
|
||||
@ -480,8 +482,10 @@ static rx_handler_result_t macvlan_handle_frame(struct sk_buff **pskb)
|
||||
return RX_HANDLER_PASS;
|
||||
}
|
||||
|
||||
if (macvlan_forward_source(skb, port, eth->h_source))
|
||||
if (macvlan_forward_source(skb, port, eth->h_source)) {
|
||||
kfree_skb(skb);
|
||||
return RX_HANDLER_CONSUMED;
|
||||
}
|
||||
if (macvlan_passthru(port))
|
||||
vlan = list_first_or_null_rcu(&port->vlans,
|
||||
struct macvlan_dev, list);
|
||||
|
@ -43,6 +43,11 @@ int fwnode_mdiobus_phy_device_register(struct mii_bus *mdio,
|
||||
int rc;
|
||||
|
||||
rc = fwnode_irq_get(child, 0);
|
||||
/* Don't wait forever if the IRQ provider doesn't become available,
|
||||
* just fall back to poll mode
|
||||
*/
|
||||
if (rc == -EPROBE_DEFER)
|
||||
rc = driver_deferred_probe_check_state(&phy->mdio.dev);
|
||||
if (rc == -EPROBE_DEFER)
|
||||
return rc;
|
||||
|
||||
|
@ -706,7 +706,6 @@ static int lan87xx_read_status(struct phy_device *phydev)
|
||||
static int lan87xx_config_aneg(struct phy_device *phydev)
|
||||
{
|
||||
u16 ctl = 0;
|
||||
int rc;
|
||||
|
||||
switch (phydev->master_slave_set) {
|
||||
case MASTER_SLAVE_CFG_MASTER_FORCE:
|
||||
@ -722,11 +721,7 @@ static int lan87xx_config_aneg(struct phy_device *phydev)
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
rc = phy_modify_changed(phydev, MII_CTRL1000, CTL1000_AS_MASTER, ctl);
|
||||
if (rc == 1)
|
||||
rc = genphy_soft_reset(phydev);
|
||||
|
||||
return rc;
|
||||
return phy_modify_changed(phydev, MII_CTRL1000, CTL1000_AS_MASTER, ctl);
|
||||
}
|
||||
|
||||
static struct phy_driver microchip_t1_phy_driver[] = {
|
||||
|
@ -1124,7 +1124,7 @@ static netdev_tx_t tun_net_xmit(struct sk_buff *skb, struct net_device *dev)
|
||||
|
||||
/* NETIF_F_LLTX requires to do our own update of trans_start */
|
||||
queue = netdev_get_tx_queue(dev, txq);
|
||||
queue->trans_start = jiffies;
|
||||
txq_trans_cond_update(queue);
|
||||
|
||||
/* Notify and wake up reader process */
|
||||
if (tfile->flags & TUN_FASYNC)
|
||||
|
@ -320,7 +320,7 @@ static netdev_tx_t veth_xmit(struct sk_buff *skb, struct net_device *dev)
|
||||
|
||||
rcu_read_lock();
|
||||
rcv = rcu_dereference(priv->peer);
|
||||
if (unlikely(!rcv)) {
|
||||
if (unlikely(!rcv) || !pskb_may_pull(skb, ETH_HLEN)) {
|
||||
kfree_skb(skb);
|
||||
goto drop;
|
||||
}
|
||||
|
@ -651,11 +651,11 @@ static int vxlan_fdb_append(struct vxlan_fdb *f,
|
||||
|
||||
rd = kmalloc(sizeof(*rd), GFP_ATOMIC);
|
||||
if (rd == NULL)
|
||||
return -ENOBUFS;
|
||||
return -ENOMEM;
|
||||
|
||||
if (dst_cache_init(&rd->dst_cache, GFP_ATOMIC)) {
|
||||
kfree(rd);
|
||||
return -ENOBUFS;
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
rd->remote_ip = *ip;
|
||||
|
@ -3136,6 +3136,20 @@ static void ath11k_mac_op_bss_info_changed(struct ieee80211_hw *hw,
|
||||
arvif->do_not_send_tmpl = true;
|
||||
else
|
||||
arvif->do_not_send_tmpl = false;
|
||||
|
||||
if (vif->bss_conf.he_support) {
|
||||
ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id,
|
||||
WMI_VDEV_PARAM_BA_MODE,
|
||||
WMI_BA_MODE_BUFFER_SIZE_256);
|
||||
if (ret)
|
||||
ath11k_warn(ar->ab,
|
||||
"failed to set BA BUFFER SIZE 256 for vdev: %d\n",
|
||||
arvif->vdev_id);
|
||||
else
|
||||
ath11k_dbg(ar->ab, ATH11K_DBG_MAC,
|
||||
"Set BA BUFFER SIZE 256 for VDEV: %d\n",
|
||||
arvif->vdev_id);
|
||||
}
|
||||
}
|
||||
|
||||
if (changed & (BSS_CHANGED_BEACON_INFO | BSS_CHANGED_BEACON)) {
|
||||
@ -3171,14 +3185,6 @@ static void ath11k_mac_op_bss_info_changed(struct ieee80211_hw *hw,
|
||||
|
||||
if (arvif->is_up && vif->bss_conf.he_support &&
|
||||
vif->bss_conf.he_oper.params) {
|
||||
ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id,
|
||||
WMI_VDEV_PARAM_BA_MODE,
|
||||
WMI_BA_MODE_BUFFER_SIZE_256);
|
||||
if (ret)
|
||||
ath11k_warn(ar->ab,
|
||||
"failed to set BA BUFFER SIZE 256 for vdev: %d\n",
|
||||
arvif->vdev_id);
|
||||
|
||||
param_id = WMI_VDEV_PARAM_HEOPS_0_31;
|
||||
param_value = vif->bss_conf.he_oper.params;
|
||||
ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id,
|
||||
|
@ -839,7 +839,7 @@ static bool ath9k_txq_list_has_key(struct list_head *txq_list, u32 keyix)
|
||||
continue;
|
||||
|
||||
txinfo = IEEE80211_SKB_CB(bf->bf_mpdu);
|
||||
fi = (struct ath_frame_info *)&txinfo->rate_driver_data[0];
|
||||
fi = (struct ath_frame_info *)&txinfo->status.status_driver_data[0];
|
||||
if (fi->keyix == keyix)
|
||||
return true;
|
||||
}
|
||||
|
@ -141,8 +141,8 @@ static struct ath_frame_info *get_frame_info(struct sk_buff *skb)
|
||||
{
|
||||
struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
|
||||
BUILD_BUG_ON(sizeof(struct ath_frame_info) >
|
||||
sizeof(tx_info->rate_driver_data));
|
||||
return (struct ath_frame_info *) &tx_info->rate_driver_data[0];
|
||||
sizeof(tx_info->status.status_driver_data));
|
||||
return (struct ath_frame_info *) &tx_info->status.status_driver_data[0];
|
||||
}
|
||||
|
||||
static void ath_send_bar(struct ath_atx_tid *tid, u16 seqno)
|
||||
@ -2542,6 +2542,16 @@ skip_tx_complete:
|
||||
spin_unlock_irqrestore(&sc->tx.txbuflock, flags);
|
||||
}
|
||||
|
||||
static void ath_clear_tx_status(struct ieee80211_tx_info *tx_info)
|
||||
{
|
||||
void *ptr = &tx_info->status;
|
||||
|
||||
memset(ptr + sizeof(tx_info->status.rates), 0,
|
||||
sizeof(tx_info->status) -
|
||||
sizeof(tx_info->status.rates) -
|
||||
sizeof(tx_info->status.status_driver_data));
|
||||
}
|
||||
|
||||
static void ath_tx_rc_status(struct ath_softc *sc, struct ath_buf *bf,
|
||||
struct ath_tx_status *ts, int nframes, int nbad,
|
||||
int txok)
|
||||
@ -2553,6 +2563,8 @@ static void ath_tx_rc_status(struct ath_softc *sc, struct ath_buf *bf,
|
||||
struct ath_hw *ah = sc->sc_ah;
|
||||
u8 i, tx_rateindex;
|
||||
|
||||
ath_clear_tx_status(tx_info);
|
||||
|
||||
if (txok)
|
||||
tx_info->status.ack_signal = ts->ts_rssi;
|
||||
|
||||
@ -2567,6 +2579,13 @@ static void ath_tx_rc_status(struct ath_softc *sc, struct ath_buf *bf,
|
||||
tx_info->status.ampdu_len = nframes;
|
||||
tx_info->status.ampdu_ack_len = nframes - nbad;
|
||||
|
||||
tx_info->status.rates[tx_rateindex].count = ts->ts_longretry + 1;
|
||||
|
||||
for (i = tx_rateindex + 1; i < hw->max_rates; i++) {
|
||||
tx_info->status.rates[i].count = 0;
|
||||
tx_info->status.rates[i].idx = -1;
|
||||
}
|
||||
|
||||
if ((ts->ts_status & ATH9K_TXERR_FILT) == 0 &&
|
||||
(tx_info->flags & IEEE80211_TX_CTL_NO_ACK) == 0) {
|
||||
/*
|
||||
@ -2588,16 +2607,6 @@ static void ath_tx_rc_status(struct ath_softc *sc, struct ath_buf *bf,
|
||||
tx_info->status.rates[tx_rateindex].count =
|
||||
hw->max_rate_tries;
|
||||
}
|
||||
|
||||
for (i = tx_rateindex + 1; i < hw->max_rates; i++) {
|
||||
tx_info->status.rates[i].count = 0;
|
||||
tx_info->status.rates[i].idx = -1;
|
||||
}
|
||||
|
||||
tx_info->status.rates[tx_rateindex].count = ts->ts_longretry + 1;
|
||||
|
||||
/* we report airtime in ath_tx_count_airtime(), don't report twice */
|
||||
tx_info->status.tx_time = 0;
|
||||
}
|
||||
|
||||
static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
|
||||
|
@ -557,7 +557,7 @@ enum brcmf_sdio_frmtype {
|
||||
BRCMF_SDIO_FT_SUB,
|
||||
};
|
||||
|
||||
#define SDIOD_DRVSTR_KEY(chip, pmu) (((chip) << 16) | (pmu))
|
||||
#define SDIOD_DRVSTR_KEY(chip, pmu) (((unsigned int)(chip) << 16) | (pmu))
|
||||
|
||||
/* SDIO Pad drive strength to select value mappings */
|
||||
struct sdiod_drive_str {
|
||||
|
@ -80,7 +80,7 @@ mt76x2e_probe(struct pci_dev *pdev, const struct pci_device_id *id)
|
||||
mt76_rmw_field(dev, 0x15a10, 0x1f << 16, 0x9);
|
||||
|
||||
/* RG_SSUSB_G1_CDR_BIC_LTR = 0xf */
|
||||
mt76_rmw_field(dev, 0x15a0c, 0xf << 28, 0xf);
|
||||
mt76_rmw_field(dev, 0x15a0c, 0xfU << 28, 0xf);
|
||||
|
||||
/* RG_SSUSB_CDR_BR_PE1D = 0x3 */
|
||||
mt76_rmw_field(dev, 0x15c58, 0x3 << 6, 0x3);
|
||||
|
@ -59,6 +59,8 @@ struct flow_dissector_key_vlan {
|
||||
__be16 vlan_tci;
|
||||
};
|
||||
__be16 vlan_tpid;
|
||||
__be16 vlan_eth_type;
|
||||
u16 padding;
|
||||
};
|
||||
|
||||
struct flow_dissector_mpls_lse {
|
||||
|
@ -1032,7 +1032,7 @@ bool __skb_flow_dissect(const struct net *net,
|
||||
key_eth_addrs = skb_flow_dissector_target(flow_dissector,
|
||||
FLOW_DISSECTOR_KEY_ETH_ADDRS,
|
||||
target_container);
|
||||
memcpy(key_eth_addrs, ð->h_dest, sizeof(*key_eth_addrs));
|
||||
memcpy(key_eth_addrs, eth, sizeof(*key_eth_addrs));
|
||||
}
|
||||
|
||||
proto_again:
|
||||
@ -1183,6 +1183,7 @@ proto_again:
|
||||
VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
|
||||
}
|
||||
key_vlan->vlan_tpid = saved_vlan_tpid;
|
||||
key_vlan->vlan_eth_type = proto;
|
||||
}
|
||||
|
||||
fdret = FLOW_DISSECT_RET_PROTO_AGAIN;
|
||||
|
@ -5242,6 +5242,8 @@ static int rtnl_offload_xstats_fill(struct sk_buff *skb, struct net_device *dev,
|
||||
*prividx = attr_id_l3_stats;
|
||||
|
||||
size_l3 = rtnl_offload_xstats_get_size_stats(dev, t_l3);
|
||||
if (!size_l3)
|
||||
goto skip_l3_stats;
|
||||
attr = nla_reserve_64bit(skb, attr_id_l3_stats, size_l3,
|
||||
IFLA_OFFLOAD_XSTATS_UNSPEC);
|
||||
if (!attr)
|
||||
@ -5253,6 +5255,7 @@ static int rtnl_offload_xstats_fill(struct sk_buff *skb, struct net_device *dev,
|
||||
return err;
|
||||
|
||||
have_data = true;
|
||||
skip_l3_stats:
|
||||
*prividx = 0;
|
||||
}
|
||||
|
||||
|
@ -562,7 +562,6 @@ static void dsa_port_teardown(struct dsa_port *dp)
|
||||
{
|
||||
struct devlink_port *dlp = &dp->devlink_port;
|
||||
struct dsa_switch *ds = dp->ds;
|
||||
struct net_device *slave;
|
||||
|
||||
if (!dp->setup)
|
||||
return;
|
||||
@ -584,11 +583,9 @@ static void dsa_port_teardown(struct dsa_port *dp)
|
||||
dsa_port_link_unregister_of(dp);
|
||||
break;
|
||||
case DSA_PORT_TYPE_USER:
|
||||
slave = dp->slave;
|
||||
|
||||
if (slave) {
|
||||
if (dp->slave) {
|
||||
dsa_slave_destroy(dp->slave);
|
||||
dp->slave = NULL;
|
||||
dsa_slave_destroy(slave);
|
||||
}
|
||||
break;
|
||||
}
|
||||
@ -1147,17 +1144,17 @@ static int dsa_tree_setup(struct dsa_switch_tree *dst)
|
||||
if (err)
|
||||
goto teardown_cpu_ports;
|
||||
|
||||
err = dsa_tree_setup_master(dst);
|
||||
err = dsa_tree_setup_ports(dst);
|
||||
if (err)
|
||||
goto teardown_switches;
|
||||
|
||||
err = dsa_tree_setup_ports(dst);
|
||||
err = dsa_tree_setup_master(dst);
|
||||
if (err)
|
||||
goto teardown_master;
|
||||
goto teardown_ports;
|
||||
|
||||
err = dsa_tree_setup_lags(dst);
|
||||
if (err)
|
||||
goto teardown_ports;
|
||||
goto teardown_master;
|
||||
|
||||
dst->setup = true;
|
||||
|
||||
@ -1165,10 +1162,10 @@ static int dsa_tree_setup(struct dsa_switch_tree *dst)
|
||||
|
||||
return 0;
|
||||
|
||||
teardown_ports:
|
||||
dsa_tree_teardown_ports(dst);
|
||||
teardown_master:
|
||||
dsa_tree_teardown_master(dst);
|
||||
teardown_ports:
|
||||
dsa_tree_teardown_ports(dst);
|
||||
teardown_switches:
|
||||
dsa_tree_teardown_switches(dst);
|
||||
teardown_cpu_ports:
|
||||
@ -1186,10 +1183,10 @@ static void dsa_tree_teardown(struct dsa_switch_tree *dst)
|
||||
|
||||
dsa_tree_teardown_lags(dst);
|
||||
|
||||
dsa_tree_teardown_ports(dst);
|
||||
|
||||
dsa_tree_teardown_master(dst);
|
||||
|
||||
dsa_tree_teardown_ports(dst);
|
||||
|
||||
dsa_tree_teardown_switches(dst);
|
||||
|
||||
dsa_tree_teardown_cpu_ports(dst);
|
||||
|
@ -485,7 +485,7 @@ int ip6_forward(struct sk_buff *skb)
|
||||
goto drop;
|
||||
|
||||
if (!net->ipv6.devconf_all->disable_policy &&
|
||||
!idev->cnf.disable_policy &&
|
||||
(!idev || !idev->cnf.disable_policy) &&
|
||||
!xfrm6_policy_check(NULL, XFRM_POLICY_FWD, skb)) {
|
||||
__IP6_INC_STATS(net, idev, IPSTATS_MIB_INDISCARDS);
|
||||
goto drop;
|
||||
|
@ -441,7 +441,7 @@ static ssize_t sta_ht_capa_read(struct file *file, char __user *userbuf,
|
||||
#define PRINT_HT_CAP(_cond, _str) \
|
||||
do { \
|
||||
if (_cond) \
|
||||
p += scnprintf(p, sizeof(buf)+buf-p, "\t" _str "\n"); \
|
||||
p += scnprintf(p, bufsz + buf - p, "\t" _str "\n"); \
|
||||
} while (0)
|
||||
char *buf, *p;
|
||||
int i;
|
||||
|
@ -9363,7 +9363,7 @@ int nft_parse_u32_check(const struct nlattr *attr, int max, u32 *dest)
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(nft_parse_u32_check);
|
||||
|
||||
static unsigned int nft_parse_register(const struct nlattr *attr, u32 *preg)
|
||||
static int nft_parse_register(const struct nlattr *attr, u32 *preg)
|
||||
{
|
||||
unsigned int reg;
|
||||
|
||||
|
@ -37,12 +37,11 @@ static void nft_socket_wildcard(const struct nft_pktinfo *pkt,
|
||||
|
||||
#ifdef CONFIG_SOCK_CGROUP_DATA
|
||||
static noinline bool
|
||||
nft_sock_get_eval_cgroupv2(u32 *dest, const struct nft_pktinfo *pkt, u32 level)
|
||||
nft_sock_get_eval_cgroupv2(u32 *dest, struct sock *sk, const struct nft_pktinfo *pkt, u32 level)
|
||||
{
|
||||
struct sock *sk = skb_to_full_sk(pkt->skb);
|
||||
struct cgroup *cgrp;
|
||||
|
||||
if (!sk || !sk_fullsock(sk) || !net_eq(nft_net(pkt), sock_net(sk)))
|
||||
if (!sk_fullsock(sk))
|
||||
return false;
|
||||
|
||||
cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data);
|
||||
@ -109,7 +108,7 @@ static void nft_socket_eval(const struct nft_expr *expr,
|
||||
break;
|
||||
#ifdef CONFIG_SOCK_CGROUP_DATA
|
||||
case NFT_SOCKET_CGROUPV2:
|
||||
if (!nft_sock_get_eval_cgroupv2(dest, pkt, priv->level)) {
|
||||
if (!nft_sock_get_eval_cgroupv2(dest, sk, pkt, priv->level)) {
|
||||
regs->verdict.code = NFT_BREAK;
|
||||
return;
|
||||
}
|
||||
|
@ -560,6 +560,10 @@ static int nci_close_device(struct nci_dev *ndev)
|
||||
mutex_lock(&ndev->req_lock);
|
||||
|
||||
if (!test_and_clear_bit(NCI_UP, &ndev->flags)) {
|
||||
/* Need to flush the cmd wq in case
|
||||
* there is a queued/running cmd_work
|
||||
*/
|
||||
flush_workqueue(ndev->cmd_wq);
|
||||
del_timer_sync(&ndev->cmd_timer);
|
||||
del_timer_sync(&ndev->data_timer);
|
||||
mutex_unlock(&ndev->req_lock);
|
||||
|
@ -1672,10 +1672,10 @@ static int tcf_chain_tp_insert(struct tcf_chain *chain,
|
||||
if (chain->flushing)
|
||||
return -EAGAIN;
|
||||
|
||||
RCU_INIT_POINTER(tp->next, tcf_chain_tp_prev(chain, chain_info));
|
||||
if (*chain_info->pprev == chain->filter_chain)
|
||||
tcf_chain0_head_change(chain, tp);
|
||||
tcf_proto_get(tp);
|
||||
RCU_INIT_POINTER(tp->next, tcf_chain_tp_prev(chain, chain_info));
|
||||
rcu_assign_pointer(*chain_info->pprev, tp);
|
||||
|
||||
return 0;
|
||||
|
@ -1013,6 +1013,7 @@ static int fl_set_key_mpls(struct nlattr **tb,
|
||||
static void fl_set_key_vlan(struct nlattr **tb,
|
||||
__be16 ethertype,
|
||||
int vlan_id_key, int vlan_prio_key,
|
||||
int vlan_next_eth_type_key,
|
||||
struct flow_dissector_key_vlan *key_val,
|
||||
struct flow_dissector_key_vlan *key_mask)
|
||||
{
|
||||
@ -1031,6 +1032,11 @@ static void fl_set_key_vlan(struct nlattr **tb,
|
||||
}
|
||||
key_val->vlan_tpid = ethertype;
|
||||
key_mask->vlan_tpid = cpu_to_be16(~0);
|
||||
if (tb[vlan_next_eth_type_key]) {
|
||||
key_val->vlan_eth_type =
|
||||
nla_get_be16(tb[vlan_next_eth_type_key]);
|
||||
key_mask->vlan_eth_type = cpu_to_be16(~0);
|
||||
}
|
||||
}
|
||||
|
||||
static void fl_set_key_flag(u32 flower_key, u32 flower_mask,
|
||||
@ -1602,8 +1608,9 @@ static int fl_set_key(struct net *net, struct nlattr **tb,
|
||||
|
||||
if (eth_type_vlan(ethertype)) {
|
||||
fl_set_key_vlan(tb, ethertype, TCA_FLOWER_KEY_VLAN_ID,
|
||||
TCA_FLOWER_KEY_VLAN_PRIO, &key->vlan,
|
||||
&mask->vlan);
|
||||
TCA_FLOWER_KEY_VLAN_PRIO,
|
||||
TCA_FLOWER_KEY_VLAN_ETH_TYPE,
|
||||
&key->vlan, &mask->vlan);
|
||||
|
||||
if (tb[TCA_FLOWER_KEY_VLAN_ETH_TYPE]) {
|
||||
ethertype = nla_get_be16(tb[TCA_FLOWER_KEY_VLAN_ETH_TYPE]);
|
||||
@ -1611,6 +1618,7 @@ static int fl_set_key(struct net *net, struct nlattr **tb,
|
||||
fl_set_key_vlan(tb, ethertype,
|
||||
TCA_FLOWER_KEY_CVLAN_ID,
|
||||
TCA_FLOWER_KEY_CVLAN_PRIO,
|
||||
TCA_FLOWER_KEY_CVLAN_ETH_TYPE,
|
||||
&key->cvlan, &mask->cvlan);
|
||||
fl_set_key_val(tb, &key->basic.n_proto,
|
||||
TCA_FLOWER_KEY_CVLAN_ETH_TYPE,
|
||||
@ -3002,13 +3010,13 @@ static int fl_dump_key(struct sk_buff *skb, struct net *net,
|
||||
goto nla_put_failure;
|
||||
|
||||
if (mask->basic.n_proto) {
|
||||
if (mask->cvlan.vlan_tpid) {
|
||||
if (mask->cvlan.vlan_eth_type) {
|
||||
if (nla_put_be16(skb, TCA_FLOWER_KEY_CVLAN_ETH_TYPE,
|
||||
key->basic.n_proto))
|
||||
goto nla_put_failure;
|
||||
} else if (mask->vlan.vlan_tpid) {
|
||||
} else if (mask->vlan.vlan_eth_type) {
|
||||
if (nla_put_be16(skb, TCA_FLOWER_KEY_VLAN_ETH_TYPE,
|
||||
key->basic.n_proto))
|
||||
key->vlan.vlan_eth_type))
|
||||
goto nla_put_failure;
|
||||
}
|
||||
}
|
||||
|
@ -417,7 +417,8 @@ static int taprio_enqueue_one(struct sk_buff *skb, struct Qdisc *sch,
|
||||
{
|
||||
struct taprio_sched *q = qdisc_priv(sch);
|
||||
|
||||
if (skb->sk && sock_flag(skb->sk, SOCK_TXTIME)) {
|
||||
/* sk_flags are only safe to use on full sockets. */
|
||||
if (skb->sk && sk_fullsock(skb->sk) && sock_flag(skb->sk, SOCK_TXTIME)) {
|
||||
if (!is_valid_interval(skb, sch))
|
||||
return qdisc_drop(skb, sch, to_free);
|
||||
} else if (TXTIME_ASSIST_IS_ENABLED(q->flags)) {
|
||||
|
@ -781,7 +781,7 @@ enum sctp_disposition sctp_sf_do_5_1D_ce(struct net *net,
|
||||
}
|
||||
}
|
||||
|
||||
if (security_sctp_assoc_request(new_asoc, chunk->skb)) {
|
||||
if (security_sctp_assoc_request(new_asoc, chunk->head_skb ?: chunk->skb)) {
|
||||
sctp_association_free(new_asoc);
|
||||
return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
|
||||
}
|
||||
@ -932,7 +932,7 @@ enum sctp_disposition sctp_sf_do_5_1E_ca(struct net *net,
|
||||
|
||||
/* Set peer label for connection. */
|
||||
if (security_sctp_assoc_established((struct sctp_association *)asoc,
|
||||
chunk->skb))
|
||||
chunk->head_skb ?: chunk->skb))
|
||||
return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
|
||||
|
||||
/* Verify that the chunk length for the COOKIE-ACK is OK.
|
||||
@ -2262,7 +2262,7 @@ enum sctp_disposition sctp_sf_do_5_2_4_dupcook(
|
||||
}
|
||||
|
||||
/* Update socket peer label if first association. */
|
||||
if (security_sctp_assoc_request(new_asoc, chunk->skb)) {
|
||||
if (security_sctp_assoc_request(new_asoc, chunk->head_skb ?: chunk->skb)) {
|
||||
sctp_association_free(new_asoc);
|
||||
return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
|
||||
}
|
||||
|
@ -5636,7 +5636,7 @@ int sctp_do_peeloff(struct sock *sk, sctp_assoc_t id, struct socket **sockp)
|
||||
* Set the daddr and initialize id to something more random and also
|
||||
* copy over any ip options.
|
||||
*/
|
||||
sp->pf->to_sk_daddr(&asoc->peer.primary_addr, sk);
|
||||
sp->pf->to_sk_daddr(&asoc->peer.primary_addr, sock->sk);
|
||||
sp->pf->copy_ip_options(sk, sock->sk);
|
||||
|
||||
/* Populate the fields of the newsk from the oldsk and migrate the
|
||||
|
@ -121,6 +121,7 @@ static struct sock *smc_tcp_syn_recv_sock(const struct sock *sk,
|
||||
bool *own_req)
|
||||
{
|
||||
struct smc_sock *smc;
|
||||
struct sock *child;
|
||||
|
||||
smc = smc_clcsock_user_data(sk);
|
||||
|
||||
@ -134,8 +135,17 @@ static struct sock *smc_tcp_syn_recv_sock(const struct sock *sk,
|
||||
}
|
||||
|
||||
/* passthrough to original syn recv sock fct */
|
||||
return smc->ori_af_ops->syn_recv_sock(sk, skb, req, dst, req_unhash,
|
||||
own_req);
|
||||
child = smc->ori_af_ops->syn_recv_sock(sk, skb, req, dst, req_unhash,
|
||||
own_req);
|
||||
/* child must not inherit smc or its ops */
|
||||
if (child) {
|
||||
rcu_assign_sk_user_data(child, NULL);
|
||||
|
||||
/* v4-mapped sockets don't inherit parent ops. Don't restore. */
|
||||
if (inet_csk(child)->icsk_af_ops == inet_csk(sk)->icsk_af_ops)
|
||||
inet_csk(child)->icsk_af_ops = smc->ori_af_ops;
|
||||
}
|
||||
return child;
|
||||
|
||||
drop:
|
||||
dst_release(dst);
|
||||
|
@ -191,7 +191,8 @@ static int smc_nl_ueid_dumpinfo(struct sk_buff *skb, u32 portid, u32 seq,
|
||||
flags, SMC_NETLINK_DUMP_UEID);
|
||||
if (!hdr)
|
||||
return -ENOMEM;
|
||||
snprintf(ueid_str, sizeof(ueid_str), "%s", ueid);
|
||||
memcpy(ueid_str, ueid, SMC_MAX_EID_LEN);
|
||||
ueid_str[SMC_MAX_EID_LEN] = 0;
|
||||
if (nla_put_string(skb, SMC_NLA_EID_TABLE_ENTRY, ueid_str)) {
|
||||
genlmsg_cancel(skb, hdr);
|
||||
return -EMSGSIZE;
|
||||
@ -252,7 +253,8 @@ int smc_nl_dump_seid(struct sk_buff *skb, struct netlink_callback *cb)
|
||||
goto end;
|
||||
|
||||
smc_ism_get_system_eid(&seid);
|
||||
snprintf(seid_str, sizeof(seid_str), "%s", seid);
|
||||
memcpy(seid_str, seid, SMC_MAX_EID_LEN);
|
||||
seid_str[SMC_MAX_EID_LEN] = 0;
|
||||
if (nla_put_string(skb, SMC_NLA_SEID_ENTRY, seid_str))
|
||||
goto err;
|
||||
read_lock(&smc_clc_eid_table.lock);
|
||||
|
@ -311,8 +311,9 @@ static struct smc_ib_device *smc_pnet_find_ib(char *ib_name)
|
||||
list_for_each_entry(ibdev, &smc_ib_devices.list, list) {
|
||||
if (!strncmp(ibdev->ibdev->name, ib_name,
|
||||
sizeof(ibdev->ibdev->name)) ||
|
||||
!strncmp(dev_name(ibdev->ibdev->dev.parent), ib_name,
|
||||
IB_DEVICE_NAME_MAX - 1)) {
|
||||
(ibdev->ibdev->dev.parent &&
|
||||
!strncmp(dev_name(ibdev->ibdev->dev.parent), ib_name,
|
||||
IB_DEVICE_NAME_MAX - 1))) {
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
|
@ -528,7 +528,8 @@ static const struct nla_policy nl80211_policy[NUM_NL80211_ATTR] = {
|
||||
.len = IEEE80211_MAX_MESH_ID_LEN },
|
||||
[NL80211_ATTR_MPATH_NEXT_HOP] = NLA_POLICY_ETH_ADDR_COMPAT,
|
||||
|
||||
[NL80211_ATTR_REG_ALPHA2] = { .type = NLA_STRING, .len = 2 },
|
||||
/* allow 3 for NUL-termination, we used to declare this NLA_STRING */
|
||||
[NL80211_ATTR_REG_ALPHA2] = NLA_POLICY_RANGE(NLA_BINARY, 2, 3),
|
||||
[NL80211_ATTR_REG_RULES] = { .type = NLA_NESTED },
|
||||
|
||||
[NL80211_ATTR_BSS_CTS_PROT] = { .type = NLA_U8 },
|
||||
|
@ -2018,11 +2018,13 @@ cfg80211_inform_single_bss_data(struct wiphy *wiphy,
|
||||
/* this is a nontransmitting bss, we need to add it to
|
||||
* transmitting bss' list if it is not there
|
||||
*/
|
||||
spin_lock_bh(&rdev->bss_lock);
|
||||
if (cfg80211_add_nontrans_list(non_tx_data->tx_bss,
|
||||
&res->pub)) {
|
||||
if (__cfg80211_unlink_bss(rdev, res))
|
||||
rdev->bss_generation++;
|
||||
}
|
||||
spin_unlock_bh(&rdev->bss_lock);
|
||||
}
|
||||
|
||||
trace_cfg80211_return_bss(&res->pub);
|
||||
|
Loading…
Reference in New Issue
Block a user