mirror of
https://github.com/torvalds/linux.git
synced 2024-11-10 14:11:52 +00:00
Including fixes from netfilter, bluetooth and bpf.
Fairly usual collection of driver and core fixes. The large selftest accompanying one of the fixes is also becoming a common occurrence. Current release - regressions: - ipv6: fix infinite recursion in fib6_dump_done() - net/rds: fix possible null-deref in newly added error path Current release - new code bugs: - net: do not consume a full cacheline for system_page_pool - bpf: fix bpf_arena-related file descriptor leaks in the verifier - drv: ice: fix freeing uninitialized pointers, fixing misuse of the newfangled __free() auto-cleanup Previous releases - regressions: - x86/bpf: fixes the BPF JIT with retbleed=stuff - xen-netfront: add missing skb_mark_for_recycle, fix page pool accounting leaks, revealed by recently added explicit warning - tcp: fix bind() regression for v6-only wildcard and v4-mapped-v6 non-wildcard addresses - Bluetooth: - replace "hci_qca: Set BDA quirk bit if fwnode exists in DT" with better workarounds to un-break some buggy Qualcomm devices - set conn encrypted before conn establishes, fix re-connecting to some headsets which use slightly unusual sequence of msgs - mptcp: - prevent BPF accessing lowat from a subflow socket - don't account accept() of non-MPC client as fallback to TCP - drv: mana: fix Rx DMA datasize and skb_over_panic - drv: i40e: fix VF MAC filter removal Previous releases - always broken: - gro: various fixes related to UDP tunnels - netns crossing problems, incorrect checksum conversions, and incorrect packet transformations which may lead to panics - bpf: support deferring bpf_link dealloc to after RCU grace period - nf_tables: - release batch on table validation from abort path - release mutex after nft_gc_seq_end from abort path - flush pending destroy work before exit_net release - drv: r8169: skip DASH fw status checks when DASH is disabled Signed-off-by: Jakub Kicinski <kuba@kernel.org> -----BEGIN PGP SIGNATURE----- iQIzBAABCAAdFiEE6jPA+I1ugmIBA4hXMUZtbf5SIrsFAmYO91wACgkQMUZtbf5S IrvHBQ/+PH/hobI+o3aLqwtdVlyxhmA31bVQ0I3aTIZV7c3ideMBcfgYa8TiZM2g pLiBiWoJXCN0h33wgUmlUee+sBvpoPCdPjGD/g99OJyKWjVt2D7ObnSwxMfjHUoq dtcN2JupqHP0SHz6wPPCmnWtTLxSGUsDdKjmkHQcCRhQIGTYFkYyHcOmPgNbBjaB 6jvmH1kE9WQTFD8QcOMaZmXQ5omoafpxxQLsgundtOWxPWHL7XNvk0B5k/ESDRG1 ujbxwtNnOESzpxZMQ6OyZlsnN/1tWfnEvLJFYVwf9BMrOlahJT/f5b/EJ9/Xy4dC zkAp7Tul3uAvNRKhBNhVBTWQbnIykmiNMp1VBFmiScQAy8hcnX+6d4LKTIHxbXZK V3AqcUS6YU2nyMdLRkhvq9f3uxD6hcY19gQdyqgCUPOtyUAs/JPv7lXQjCuuEqkq urEZkigUApnEqPIrIqANJ7nXUy3U0K8qU6evOZoGZ5OdiKeNKC3+tIr+g2f1ZUZq a7Dkat7JH9WQ7IG8Geody6Z30K9EpSqYMTKzB5wTfmuqw6cV8bl9OAW9UOSRK0GL pyG8GwpkpFPkNiZdu9Zt44Pno5xdLIa1+C3QZR0r5CJWYAzCbI80MppP5veF9Mw+ v+2v8iBWuh9iv0AUj9KJOwG5QQ+EXLUuSlhtx/DFnmn2CJ9plXI= =6bQI -----END PGP SIGNATURE----- Merge tag 'net-6.9-rc3' of git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net Pull networking fixes from Jakub Kicinski: "Including fixes from netfilter, bluetooth and bpf. Fairly usual collection of driver and core fixes. The large selftest accompanying one of the fixes is also becoming a common occurrence. Current release - regressions: - ipv6: fix infinite recursion in fib6_dump_done() - net/rds: fix possible null-deref in newly added error path Current release - new code bugs: - net: do not consume a full cacheline for system_page_pool - bpf: fix bpf_arena-related file descriptor leaks in the verifier - drv: ice: fix freeing uninitialized pointers, fixing misuse of the newfangled __free() auto-cleanup Previous releases - regressions: - x86/bpf: fixes the BPF JIT with retbleed=stuff - xen-netfront: add missing skb_mark_for_recycle, fix page pool accounting leaks, revealed by recently added explicit warning - tcp: fix bind() regression for v6-only wildcard and v4-mapped-v6 non-wildcard addresses - Bluetooth: - replace "hci_qca: Set BDA quirk bit if fwnode exists in DT" with better workarounds to un-break some buggy Qualcomm devices - set conn encrypted before conn establishes, fix re-connecting to some headsets which use slightly unusual sequence of msgs - mptcp: - prevent BPF accessing lowat from a subflow socket - don't account accept() of non-MPC client as fallback to TCP - drv: mana: fix Rx DMA datasize and skb_over_panic - drv: i40e: fix VF MAC filter removal Previous releases - always broken: - gro: various fixes related to UDP tunnels - netns crossing problems, incorrect checksum conversions, and incorrect packet transformations which may lead to panics - bpf: support deferring bpf_link dealloc to after RCU grace period - nf_tables: - release batch on table validation from abort path - release mutex after nft_gc_seq_end from abort path - flush pending destroy work before exit_net release - drv: r8169: skip DASH fw status checks when DASH is disabled" * tag 'net-6.9-rc3' of git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net: (81 commits) netfilter: validate user input for expected length net/sched: act_skbmod: prevent kernel-infoleak net: usb: ax88179_178a: avoid the interface always configured as random address net: dsa: sja1105: Fix parameters order in sja1110_pcs_mdio_write_c45() net: ravb: Always update error counters net: ravb: Always process TX descriptor ring netfilter: nf_tables: discard table flag update with pending basechain deletion netfilter: nf_tables: Fix potential data-race in __nft_flowtable_type_get() netfilter: nf_tables: reject new basechain after table flag update netfilter: nf_tables: flush pending destroy work before exit_net release netfilter: nf_tables: release mutex after nft_gc_seq_end from abort path netfilter: nf_tables: release batch on table validation from abort path Revert "tg3: Remove residual error handling in tg3_suspend" tg3: Remove residual error handling in tg3_suspend net: mana: Fix Rx DMA datasize and skb_over_panic net/sched: fix lockdep splat in qdisc_tree_reduce_backlog() net: phy: micrel: lan8814: Fix when enabling/disabling 1-step timestamping net: stmmac: fix rx queue priority assignment net: txgbe: fix i2c dev name cannot match clkdev net: fec: Set mac_managed_pm during probe ...
This commit is contained in:
commit
c88b9b4cde
@ -94,6 +94,10 @@ properties:
|
||||
|
||||
local-bd-address: true
|
||||
|
||||
qcom,local-bd-address-broken:
|
||||
type: boolean
|
||||
description:
|
||||
boot firmware is incorrectly passing the address in big-endian order
|
||||
|
||||
required:
|
||||
- compatible
|
||||
|
76
Documentation/networking/devlink/devlink-eswitch-attr.rst
Normal file
76
Documentation/networking/devlink/devlink-eswitch-attr.rst
Normal file
@ -0,0 +1,76 @@
|
||||
.. SPDX-License-Identifier: GPL-2.0
|
||||
|
||||
==========================
|
||||
Devlink E-Switch Attribute
|
||||
==========================
|
||||
|
||||
Devlink E-Switch supports two modes of operation: legacy and switchdev.
|
||||
Legacy mode operates based on traditional MAC/VLAN steering rules. Switching
|
||||
decisions are made based on MAC addresses, VLANs, etc. There is limited ability
|
||||
to offload switching rules to hardware.
|
||||
|
||||
On the other hand, switchdev mode allows for more advanced offloading
|
||||
capabilities of the E-Switch to hardware. In switchdev mode, more switching
|
||||
rules and logic can be offloaded to the hardware switch ASIC. It enables
|
||||
representor netdevices that represent the slow path of virtual functions (VFs)
|
||||
or scalable-functions (SFs) of the device. See more information about
|
||||
:ref:`Documentation/networking/switchdev.rst <switchdev>` and
|
||||
:ref:`Documentation/networking/representors.rst <representors>`.
|
||||
|
||||
In addition, the devlink E-Switch also comes with other attributes listed
|
||||
in the following section.
|
||||
|
||||
Attributes Description
|
||||
======================
|
||||
|
||||
The following is a list of E-Switch attributes.
|
||||
|
||||
.. list-table:: E-Switch attributes
|
||||
:widths: 8 5 45
|
||||
|
||||
* - Name
|
||||
- Type
|
||||
- Description
|
||||
* - ``mode``
|
||||
- enum
|
||||
- The mode of the device. The mode can be one of the following:
|
||||
|
||||
* ``legacy`` operates based on traditional MAC/VLAN steering
|
||||
rules.
|
||||
* ``switchdev`` allows for more advanced offloading capabilities of
|
||||
the E-Switch to hardware.
|
||||
* - ``inline-mode``
|
||||
- enum
|
||||
- Some HWs need the VF driver to put part of the packet
|
||||
headers on the TX descriptor so the e-switch can do proper
|
||||
matching and steering. Support for both switchdev mode and legacy mode.
|
||||
|
||||
* ``none`` none.
|
||||
* ``link`` L2 mode.
|
||||
* ``network`` L3 mode.
|
||||
* ``transport`` L4 mode.
|
||||
* - ``encap-mode``
|
||||
- enum
|
||||
- The encapsulation mode of the device. Support for both switchdev mode
|
||||
and legacy mode. The mode can be one of the following:
|
||||
|
||||
* ``none`` Disable encapsulation support.
|
||||
* ``basic`` Enable encapsulation support.
|
||||
|
||||
Example Usage
|
||||
=============
|
||||
|
||||
.. code:: shell
|
||||
|
||||
# enable switchdev mode
|
||||
$ devlink dev eswitch set pci/0000:08:00.0 mode switchdev
|
||||
|
||||
# set inline-mode and encap-mode
|
||||
$ devlink dev eswitch set pci/0000:08:00.0 inline-mode none encap-mode basic
|
||||
|
||||
# display devlink device eswitch attributes
|
||||
$ devlink dev eswitch show pci/0000:08:00.0
|
||||
pci/0000:08:00.0: mode switchdev inline-mode none encap-mode basic
|
||||
|
||||
# enable encap-mode with legacy mode
|
||||
$ devlink dev eswitch set pci/0000:08:00.0 mode legacy inline-mode none encap-mode basic
|
@ -67,6 +67,7 @@ general.
|
||||
devlink-selftests
|
||||
devlink-trap
|
||||
devlink-linecard
|
||||
devlink-eswitch-attr
|
||||
|
||||
Driver-specific documentation
|
||||
-----------------------------
|
||||
|
@ -1,4 +1,5 @@
|
||||
.. SPDX-License-Identifier: GPL-2.0
|
||||
.. _representors:
|
||||
|
||||
=============================
|
||||
Network Function Representors
|
||||
|
@ -14019,6 +14019,7 @@ F: drivers/net/ethernet/mellanox/mlx4/en_*
|
||||
|
||||
MELLANOX ETHERNET DRIVER (mlx5e)
|
||||
M: Saeed Mahameed <saeedm@nvidia.com>
|
||||
M: Tariq Toukan <tariqt@nvidia.com>
|
||||
L: netdev@vger.kernel.org
|
||||
S: Supported
|
||||
W: http://www.mellanox.com
|
||||
@ -14086,6 +14087,7 @@ F: include/uapi/rdma/mlx4-abi.h
|
||||
MELLANOX MLX5 core VPI driver
|
||||
M: Saeed Mahameed <saeedm@nvidia.com>
|
||||
M: Leon Romanovsky <leonro@nvidia.com>
|
||||
M: Tariq Toukan <tariqt@nvidia.com>
|
||||
L: netdev@vger.kernel.org
|
||||
L: linux-rdma@vger.kernel.org
|
||||
S: Supported
|
||||
@ -23679,7 +23681,6 @@ F: drivers/scsi/vmw_pvscsi.c
|
||||
F: drivers/scsi/vmw_pvscsi.h
|
||||
|
||||
VMWARE VIRTUAL PTP CLOCK DRIVER
|
||||
M: Jeff Sipek <jsipek@vmware.com>
|
||||
R: Ajay Kaher <akaher@vmware.com>
|
||||
R: Alexey Makhalov <amakhalov@vmware.com>
|
||||
R: VMware PV-Drivers Reviewers <pv-drivers@vmware.com>
|
||||
|
@ -944,6 +944,8 @@ ap_spi_fp: &spi10 {
|
||||
vddrf-supply = <&pp1300_l2c>;
|
||||
vddch0-supply = <&pp3300_l10c>;
|
||||
max-speed = <3200000>;
|
||||
|
||||
qcom,local-bd-address-broken;
|
||||
};
|
||||
};
|
||||
|
||||
|
@ -117,7 +117,7 @@ extern void callthunks_patch_builtin_calls(void);
|
||||
extern void callthunks_patch_module_calls(struct callthunk_sites *sites,
|
||||
struct module *mod);
|
||||
extern void *callthunks_translate_call_dest(void *dest);
|
||||
extern int x86_call_depth_emit_accounting(u8 **pprog, void *func);
|
||||
extern int x86_call_depth_emit_accounting(u8 **pprog, void *func, void *ip);
|
||||
#else
|
||||
static __always_inline void callthunks_patch_builtin_calls(void) {}
|
||||
static __always_inline void
|
||||
@ -128,7 +128,7 @@ static __always_inline void *callthunks_translate_call_dest(void *dest)
|
||||
return dest;
|
||||
}
|
||||
static __always_inline int x86_call_depth_emit_accounting(u8 **pprog,
|
||||
void *func)
|
||||
void *func, void *ip)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
@ -314,7 +314,7 @@ static bool is_callthunk(void *addr)
|
||||
return !bcmp(pad, insn_buff, tmpl_size);
|
||||
}
|
||||
|
||||
int x86_call_depth_emit_accounting(u8 **pprog, void *func)
|
||||
int x86_call_depth_emit_accounting(u8 **pprog, void *func, void *ip)
|
||||
{
|
||||
unsigned int tmpl_size = SKL_TMPL_SIZE;
|
||||
u8 insn_buff[MAX_PATCH_LEN];
|
||||
@ -327,7 +327,7 @@ int x86_call_depth_emit_accounting(u8 **pprog, void *func)
|
||||
return 0;
|
||||
|
||||
memcpy(insn_buff, skl_call_thunk_template, tmpl_size);
|
||||
apply_relocation(insn_buff, tmpl_size, *pprog,
|
||||
apply_relocation(insn_buff, tmpl_size, ip,
|
||||
skl_call_thunk_template, tmpl_size);
|
||||
|
||||
memcpy(*pprog, insn_buff, tmpl_size);
|
||||
|
@ -480,7 +480,7 @@ static int emit_call(u8 **pprog, void *func, void *ip)
|
||||
static int emit_rsb_call(u8 **pprog, void *func, void *ip)
|
||||
{
|
||||
OPTIMIZER_HIDE_VAR(func);
|
||||
x86_call_depth_emit_accounting(pprog, func);
|
||||
ip += x86_call_depth_emit_accounting(pprog, func, ip);
|
||||
return emit_patch(pprog, func, ip, 0xE8);
|
||||
}
|
||||
|
||||
@ -1972,20 +1972,17 @@ populate_extable:
|
||||
|
||||
/* call */
|
||||
case BPF_JMP | BPF_CALL: {
|
||||
int offs;
|
||||
u8 *ip = image + addrs[i - 1];
|
||||
|
||||
func = (u8 *) __bpf_call_base + imm32;
|
||||
if (tail_call_reachable) {
|
||||
RESTORE_TAIL_CALL_CNT(bpf_prog->aux->stack_depth);
|
||||
if (!imm32)
|
||||
return -EINVAL;
|
||||
offs = 7 + x86_call_depth_emit_accounting(&prog, func);
|
||||
} else {
|
||||
if (!imm32)
|
||||
return -EINVAL;
|
||||
offs = x86_call_depth_emit_accounting(&prog, func);
|
||||
ip += 7;
|
||||
}
|
||||
if (emit_call(&prog, func, image + addrs[i - 1] + offs))
|
||||
if (!imm32)
|
||||
return -EINVAL;
|
||||
ip += x86_call_depth_emit_accounting(&prog, func, ip);
|
||||
if (emit_call(&prog, func, ip))
|
||||
return -EINVAL;
|
||||
break;
|
||||
}
|
||||
@ -2835,7 +2832,7 @@ static int __arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *rw_im
|
||||
* Direct-call fentry stub, as such it needs accounting for the
|
||||
* __fentry__ call.
|
||||
*/
|
||||
x86_call_depth_emit_accounting(&prog, NULL);
|
||||
x86_call_depth_emit_accounting(&prog, NULL, image);
|
||||
}
|
||||
EMIT1(0x55); /* push rbp */
|
||||
EMIT3(0x48, 0x89, 0xE5); /* mov rbp, rsp */
|
||||
|
@ -826,11 +826,15 @@ EXPORT_SYMBOL_GPL(qca_uart_setup);
|
||||
|
||||
int qca_set_bdaddr(struct hci_dev *hdev, const bdaddr_t *bdaddr)
|
||||
{
|
||||
bdaddr_t bdaddr_swapped;
|
||||
struct sk_buff *skb;
|
||||
int err;
|
||||
|
||||
skb = __hci_cmd_sync_ev(hdev, EDL_WRITE_BD_ADDR_OPCODE, 6, bdaddr,
|
||||
HCI_EV_VENDOR, HCI_INIT_TIMEOUT);
|
||||
baswap(&bdaddr_swapped, bdaddr);
|
||||
|
||||
skb = __hci_cmd_sync_ev(hdev, EDL_WRITE_BD_ADDR_OPCODE, 6,
|
||||
&bdaddr_swapped, HCI_EV_VENDOR,
|
||||
HCI_INIT_TIMEOUT);
|
||||
if (IS_ERR(skb)) {
|
||||
err = PTR_ERR(skb);
|
||||
bt_dev_err(hdev, "QCA Change address cmd failed (%d)", err);
|
||||
|
@ -7,7 +7,6 @@
|
||||
*
|
||||
* Copyright (C) 2007 Texas Instruments, Inc.
|
||||
* Copyright (c) 2010, 2012, 2018 The Linux Foundation. All rights reserved.
|
||||
* Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
|
||||
*
|
||||
* Acknowledgements:
|
||||
* This file is based on hci_ll.c, which was...
|
||||
@ -226,6 +225,7 @@ struct qca_serdev {
|
||||
struct qca_power *bt_power;
|
||||
u32 init_speed;
|
||||
u32 oper_speed;
|
||||
bool bdaddr_property_broken;
|
||||
const char *firmware_name;
|
||||
};
|
||||
|
||||
@ -1843,6 +1843,7 @@ static int qca_setup(struct hci_uart *hu)
|
||||
const char *firmware_name = qca_get_firmware_name(hu);
|
||||
int ret;
|
||||
struct qca_btsoc_version ver;
|
||||
struct qca_serdev *qcadev;
|
||||
const char *soc_name;
|
||||
|
||||
ret = qca_check_speeds(hu);
|
||||
@ -1904,16 +1905,11 @@ retry:
|
||||
case QCA_WCN6750:
|
||||
case QCA_WCN6855:
|
||||
case QCA_WCN7850:
|
||||
set_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks);
|
||||
|
||||
/* Set BDA quirk bit for reading BDA value from fwnode property
|
||||
* only if that property exist in DT.
|
||||
*/
|
||||
if (fwnode_property_present(dev_fwnode(hdev->dev.parent), "local-bd-address")) {
|
||||
set_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks);
|
||||
bt_dev_info(hdev, "setting quirk bit to read BDA from fwnode later");
|
||||
} else {
|
||||
bt_dev_dbg(hdev, "local-bd-address` is not present in the devicetree so not setting quirk bit for BDA");
|
||||
}
|
||||
qcadev = serdev_device_get_drvdata(hu->serdev);
|
||||
if (qcadev->bdaddr_property_broken)
|
||||
set_bit(HCI_QUIRK_BDADDR_PROPERTY_BROKEN, &hdev->quirks);
|
||||
|
||||
hci_set_aosp_capable(hdev);
|
||||
|
||||
@ -2295,6 +2291,9 @@ static int qca_serdev_probe(struct serdev_device *serdev)
|
||||
if (!qcadev->oper_speed)
|
||||
BT_DBG("UART will pick default operating speed");
|
||||
|
||||
qcadev->bdaddr_property_broken = device_property_read_bool(&serdev->dev,
|
||||
"qcom,local-bd-address-broken");
|
||||
|
||||
if (data)
|
||||
qcadev->btsoc_type = data->soc_type;
|
||||
else
|
||||
|
@ -5503,8 +5503,12 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
|
||||
.family = MV88E6XXX_FAMILY_6250,
|
||||
.name = "Marvell 88E6020",
|
||||
.num_databases = 64,
|
||||
.num_ports = 4,
|
||||
/* Ports 2-4 are not routed to pins
|
||||
* => usable ports 0, 1, 5, 6
|
||||
*/
|
||||
.num_ports = 7,
|
||||
.num_internal_phys = 2,
|
||||
.invalid_port_mask = BIT(2) | BIT(3) | BIT(4),
|
||||
.max_vid = 4095,
|
||||
.port_base_addr = 0x8,
|
||||
.phy_base_addr = 0x0,
|
||||
|
@ -94,7 +94,7 @@ int sja1110_pcs_mdio_read_c45(struct mii_bus *bus, int phy, int mmd, int reg)
|
||||
return tmp & 0xffff;
|
||||
}
|
||||
|
||||
int sja1110_pcs_mdio_write_c45(struct mii_bus *bus, int phy, int reg, int mmd,
|
||||
int sja1110_pcs_mdio_write_c45(struct mii_bus *bus, int phy, int mmd, int reg,
|
||||
u16 val)
|
||||
{
|
||||
struct sja1105_mdio_private *mdio_priv = bus->priv;
|
||||
|
@ -3280,7 +3280,7 @@ static void bcmgenet_get_hw_addr(struct bcmgenet_priv *priv,
|
||||
}
|
||||
|
||||
/* Returns a reusable dma control register value */
|
||||
static u32 bcmgenet_dma_disable(struct bcmgenet_priv *priv)
|
||||
static u32 bcmgenet_dma_disable(struct bcmgenet_priv *priv, bool flush_rx)
|
||||
{
|
||||
unsigned int i;
|
||||
u32 reg;
|
||||
@ -3305,6 +3305,14 @@ static u32 bcmgenet_dma_disable(struct bcmgenet_priv *priv)
|
||||
udelay(10);
|
||||
bcmgenet_umac_writel(priv, 0, UMAC_TX_FLUSH);
|
||||
|
||||
if (flush_rx) {
|
||||
reg = bcmgenet_rbuf_ctrl_get(priv);
|
||||
bcmgenet_rbuf_ctrl_set(priv, reg | BIT(0));
|
||||
udelay(10);
|
||||
bcmgenet_rbuf_ctrl_set(priv, reg);
|
||||
udelay(10);
|
||||
}
|
||||
|
||||
return dma_ctrl;
|
||||
}
|
||||
|
||||
@ -3368,8 +3376,8 @@ static int bcmgenet_open(struct net_device *dev)
|
||||
|
||||
bcmgenet_set_hw_addr(priv, dev->dev_addr);
|
||||
|
||||
/* Disable RX/TX DMA and flush TX queues */
|
||||
dma_ctrl = bcmgenet_dma_disable(priv);
|
||||
/* Disable RX/TX DMA and flush TX and RX queues */
|
||||
dma_ctrl = bcmgenet_dma_disable(priv, true);
|
||||
|
||||
/* Reinitialize TDMA and RDMA and SW housekeeping */
|
||||
ret = bcmgenet_init_dma(priv);
|
||||
@ -4235,7 +4243,7 @@ static int bcmgenet_resume(struct device *d)
|
||||
bcmgenet_hfb_create_rxnfc_filter(priv, rule);
|
||||
|
||||
/* Disable RX/TX DMA and flush TX queues */
|
||||
dma_ctrl = bcmgenet_dma_disable(priv);
|
||||
dma_ctrl = bcmgenet_dma_disable(priv, false);
|
||||
|
||||
/* Reinitialize TDMA and RDMA and SW housekeeping */
|
||||
ret = bcmgenet_init_dma(priv);
|
||||
|
@ -2454,8 +2454,6 @@ static int fec_enet_mii_probe(struct net_device *ndev)
|
||||
fep->link = 0;
|
||||
fep->full_duplex = 0;
|
||||
|
||||
phy_dev->mac_managed_pm = true;
|
||||
|
||||
phy_attached_info(phy_dev);
|
||||
|
||||
return 0;
|
||||
@ -2467,10 +2465,12 @@ static int fec_enet_mii_init(struct platform_device *pdev)
|
||||
struct net_device *ndev = platform_get_drvdata(pdev);
|
||||
struct fec_enet_private *fep = netdev_priv(ndev);
|
||||
bool suppress_preamble = false;
|
||||
struct phy_device *phydev;
|
||||
struct device_node *node;
|
||||
int err = -ENXIO;
|
||||
u32 mii_speed, holdtime;
|
||||
u32 bus_freq;
|
||||
int addr;
|
||||
|
||||
/*
|
||||
* The i.MX28 dual fec interfaces are not equal.
|
||||
@ -2584,6 +2584,13 @@ static int fec_enet_mii_init(struct platform_device *pdev)
|
||||
goto err_out_free_mdiobus;
|
||||
of_node_put(node);
|
||||
|
||||
/* find all the PHY devices on the bus and set mac_managed_pm to true */
|
||||
for (addr = 0; addr < PHY_MAX_ADDR; addr++) {
|
||||
phydev = mdiobus_get_phy(fep->mii_bus, addr);
|
||||
if (phydev)
|
||||
phydev->mac_managed_pm = true;
|
||||
}
|
||||
|
||||
mii_cnt++;
|
||||
|
||||
/* save fec0 mii_bus */
|
||||
|
@ -628,6 +628,7 @@ struct e1000_phy_info {
|
||||
u32 id;
|
||||
u32 reset_delay_us; /* in usec */
|
||||
u32 revision;
|
||||
u32 retry_count;
|
||||
|
||||
enum e1000_media_type media_type;
|
||||
|
||||
@ -644,6 +645,7 @@ struct e1000_phy_info {
|
||||
bool polarity_correction;
|
||||
bool speed_downgraded;
|
||||
bool autoneg_wait_to_complete;
|
||||
bool retry_enabled;
|
||||
};
|
||||
|
||||
struct e1000_nvm_info {
|
||||
|
@ -222,11 +222,18 @@ out:
|
||||
if (hw->mac.type >= e1000_pch_lpt) {
|
||||
/* Only unforce SMBus if ME is not active */
|
||||
if (!(er32(FWSM) & E1000_ICH_FWSM_FW_VALID)) {
|
||||
/* Switching PHY interface always returns MDI error
|
||||
* so disable retry mechanism to avoid wasting time
|
||||
*/
|
||||
e1000e_disable_phy_retry(hw);
|
||||
|
||||
/* Unforce SMBus mode in PHY */
|
||||
e1e_rphy_locked(hw, CV_SMB_CTRL, &phy_reg);
|
||||
phy_reg &= ~CV_SMB_CTRL_FORCE_SMBUS;
|
||||
e1e_wphy_locked(hw, CV_SMB_CTRL, phy_reg);
|
||||
|
||||
e1000e_enable_phy_retry(hw);
|
||||
|
||||
/* Unforce SMBus mode in MAC */
|
||||
mac_reg = er32(CTRL_EXT);
|
||||
mac_reg &= ~E1000_CTRL_EXT_FORCE_SMBUS;
|
||||
@ -310,6 +317,11 @@ static s32 e1000_init_phy_workarounds_pchlan(struct e1000_hw *hw)
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* There is no guarantee that the PHY is accessible at this time
|
||||
* so disable retry mechanism to avoid wasting time
|
||||
*/
|
||||
e1000e_disable_phy_retry(hw);
|
||||
|
||||
/* The MAC-PHY interconnect may be in SMBus mode. If the PHY is
|
||||
* inaccessible and resetting the PHY is not blocked, toggle the
|
||||
* LANPHYPC Value bit to force the interconnect to PCIe mode.
|
||||
@ -380,6 +392,8 @@ static s32 e1000_init_phy_workarounds_pchlan(struct e1000_hw *hw)
|
||||
break;
|
||||
}
|
||||
|
||||
e1000e_enable_phy_retry(hw);
|
||||
|
||||
hw->phy.ops.release(hw);
|
||||
if (!ret_val) {
|
||||
|
||||
@ -449,6 +463,11 @@ static s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw)
|
||||
|
||||
phy->id = e1000_phy_unknown;
|
||||
|
||||
if (hw->mac.type == e1000_pch_mtp) {
|
||||
phy->retry_count = 2;
|
||||
e1000e_enable_phy_retry(hw);
|
||||
}
|
||||
|
||||
ret_val = e1000_init_phy_workarounds_pchlan(hw);
|
||||
if (ret_val)
|
||||
return ret_val;
|
||||
@ -1146,18 +1165,6 @@ s32 e1000_enable_ulp_lpt_lp(struct e1000_hw *hw, bool to_sx)
|
||||
if (ret_val)
|
||||
goto out;
|
||||
|
||||
/* Force SMBus mode in PHY */
|
||||
ret_val = e1000_read_phy_reg_hv_locked(hw, CV_SMB_CTRL, &phy_reg);
|
||||
if (ret_val)
|
||||
goto release;
|
||||
phy_reg |= CV_SMB_CTRL_FORCE_SMBUS;
|
||||
e1000_write_phy_reg_hv_locked(hw, CV_SMB_CTRL, phy_reg);
|
||||
|
||||
/* Force SMBus mode in MAC */
|
||||
mac_reg = er32(CTRL_EXT);
|
||||
mac_reg |= E1000_CTRL_EXT_FORCE_SMBUS;
|
||||
ew32(CTRL_EXT, mac_reg);
|
||||
|
||||
/* Si workaround for ULP entry flow on i127/rev6 h/w. Enable
|
||||
* LPLU and disable Gig speed when entering ULP
|
||||
*/
|
||||
@ -1313,6 +1320,11 @@ static s32 e1000_disable_ulp_lpt_lp(struct e1000_hw *hw, bool force)
|
||||
/* Toggle LANPHYPC Value bit */
|
||||
e1000_toggle_lanphypc_pch_lpt(hw);
|
||||
|
||||
/* Switching PHY interface always returns MDI error
|
||||
* so disable retry mechanism to avoid wasting time
|
||||
*/
|
||||
e1000e_disable_phy_retry(hw);
|
||||
|
||||
/* Unforce SMBus mode in PHY */
|
||||
ret_val = e1000_read_phy_reg_hv_locked(hw, CV_SMB_CTRL, &phy_reg);
|
||||
if (ret_val) {
|
||||
@ -1333,6 +1345,8 @@ static s32 e1000_disable_ulp_lpt_lp(struct e1000_hw *hw, bool force)
|
||||
phy_reg &= ~CV_SMB_CTRL_FORCE_SMBUS;
|
||||
e1000_write_phy_reg_hv_locked(hw, CV_SMB_CTRL, phy_reg);
|
||||
|
||||
e1000e_enable_phy_retry(hw);
|
||||
|
||||
/* Unforce SMBus mode in MAC */
|
||||
mac_reg = er32(CTRL_EXT);
|
||||
mac_reg &= ~E1000_CTRL_EXT_FORCE_SMBUS;
|
||||
|
@ -6623,6 +6623,7 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool runtime)
|
||||
struct e1000_hw *hw = &adapter->hw;
|
||||
u32 ctrl, ctrl_ext, rctl, status, wufc;
|
||||
int retval = 0;
|
||||
u16 smb_ctrl;
|
||||
|
||||
/* Runtime suspend should only enable wakeup for link changes */
|
||||
if (runtime)
|
||||
@ -6696,6 +6697,23 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool runtime)
|
||||
if (retval)
|
||||
return retval;
|
||||
}
|
||||
|
||||
/* Force SMBUS to allow WOL */
|
||||
/* Switching PHY interface always returns MDI error
|
||||
* so disable retry mechanism to avoid wasting time
|
||||
*/
|
||||
e1000e_disable_phy_retry(hw);
|
||||
|
||||
e1e_rphy(hw, CV_SMB_CTRL, &smb_ctrl);
|
||||
smb_ctrl |= CV_SMB_CTRL_FORCE_SMBUS;
|
||||
e1e_wphy(hw, CV_SMB_CTRL, smb_ctrl);
|
||||
|
||||
e1000e_enable_phy_retry(hw);
|
||||
|
||||
/* Force SMBus mode in MAC */
|
||||
ctrl_ext = er32(CTRL_EXT);
|
||||
ctrl_ext |= E1000_CTRL_EXT_FORCE_SMBUS;
|
||||
ew32(CTRL_EXT, ctrl_ext);
|
||||
}
|
||||
|
||||
/* Ensure that the appropriate bits are set in LPI_CTRL
|
||||
|
@ -107,6 +107,16 @@ s32 e1000e_phy_reset_dsp(struct e1000_hw *hw)
|
||||
return e1e_wphy(hw, M88E1000_PHY_GEN_CONTROL, 0);
|
||||
}
|
||||
|
||||
void e1000e_disable_phy_retry(struct e1000_hw *hw)
|
||||
{
|
||||
hw->phy.retry_enabled = false;
|
||||
}
|
||||
|
||||
void e1000e_enable_phy_retry(struct e1000_hw *hw)
|
||||
{
|
||||
hw->phy.retry_enabled = true;
|
||||
}
|
||||
|
||||
/**
|
||||
* e1000e_read_phy_reg_mdic - Read MDI control register
|
||||
* @hw: pointer to the HW structure
|
||||
@ -118,55 +128,73 @@ s32 e1000e_phy_reset_dsp(struct e1000_hw *hw)
|
||||
**/
|
||||
s32 e1000e_read_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 *data)
|
||||
{
|
||||
u32 i, mdic = 0, retry_counter, retry_max;
|
||||
struct e1000_phy_info *phy = &hw->phy;
|
||||
u32 i, mdic = 0;
|
||||
bool success;
|
||||
|
||||
if (offset > MAX_PHY_REG_ADDRESS) {
|
||||
e_dbg("PHY Address %d is out of range\n", offset);
|
||||
return -E1000_ERR_PARAM;
|
||||
}
|
||||
|
||||
retry_max = phy->retry_enabled ? phy->retry_count : 0;
|
||||
|
||||
/* Set up Op-code, Phy Address, and register offset in the MDI
|
||||
* Control register. The MAC will take care of interfacing with the
|
||||
* PHY to retrieve the desired data.
|
||||
*/
|
||||
mdic = ((offset << E1000_MDIC_REG_SHIFT) |
|
||||
(phy->addr << E1000_MDIC_PHY_SHIFT) |
|
||||
(E1000_MDIC_OP_READ));
|
||||
for (retry_counter = 0; retry_counter <= retry_max; retry_counter++) {
|
||||
success = true;
|
||||
|
||||
ew32(MDIC, mdic);
|
||||
mdic = ((offset << E1000_MDIC_REG_SHIFT) |
|
||||
(phy->addr << E1000_MDIC_PHY_SHIFT) |
|
||||
(E1000_MDIC_OP_READ));
|
||||
|
||||
/* Poll the ready bit to see if the MDI read completed
|
||||
* Increasing the time out as testing showed failures with
|
||||
* the lower time out
|
||||
*/
|
||||
for (i = 0; i < (E1000_GEN_POLL_TIMEOUT * 3); i++) {
|
||||
udelay(50);
|
||||
mdic = er32(MDIC);
|
||||
if (mdic & E1000_MDIC_READY)
|
||||
break;
|
||||
}
|
||||
if (!(mdic & E1000_MDIC_READY)) {
|
||||
e_dbg("MDI Read PHY Reg Address %d did not complete\n", offset);
|
||||
return -E1000_ERR_PHY;
|
||||
}
|
||||
if (mdic & E1000_MDIC_ERROR) {
|
||||
e_dbg("MDI Read PHY Reg Address %d Error\n", offset);
|
||||
return -E1000_ERR_PHY;
|
||||
}
|
||||
if (FIELD_GET(E1000_MDIC_REG_MASK, mdic) != offset) {
|
||||
e_dbg("MDI Read offset error - requested %d, returned %d\n",
|
||||
offset, FIELD_GET(E1000_MDIC_REG_MASK, mdic));
|
||||
return -E1000_ERR_PHY;
|
||||
}
|
||||
*data = (u16)mdic;
|
||||
ew32(MDIC, mdic);
|
||||
|
||||
/* Allow some time after each MDIC transaction to avoid
|
||||
* reading duplicate data in the next MDIC transaction.
|
||||
*/
|
||||
if (hw->mac.type == e1000_pch2lan)
|
||||
udelay(100);
|
||||
return 0;
|
||||
/* Poll the ready bit to see if the MDI read completed
|
||||
* Increasing the time out as testing showed failures with
|
||||
* the lower time out
|
||||
*/
|
||||
for (i = 0; i < (E1000_GEN_POLL_TIMEOUT * 3); i++) {
|
||||
usleep_range(50, 60);
|
||||
mdic = er32(MDIC);
|
||||
if (mdic & E1000_MDIC_READY)
|
||||
break;
|
||||
}
|
||||
if (!(mdic & E1000_MDIC_READY)) {
|
||||
e_dbg("MDI Read PHY Reg Address %d did not complete\n",
|
||||
offset);
|
||||
success = false;
|
||||
}
|
||||
if (mdic & E1000_MDIC_ERROR) {
|
||||
e_dbg("MDI Read PHY Reg Address %d Error\n", offset);
|
||||
success = false;
|
||||
}
|
||||
if (FIELD_GET(E1000_MDIC_REG_MASK, mdic) != offset) {
|
||||
e_dbg("MDI Read offset error - requested %d, returned %d\n",
|
||||
offset, FIELD_GET(E1000_MDIC_REG_MASK, mdic));
|
||||
success = false;
|
||||
}
|
||||
|
||||
/* Allow some time after each MDIC transaction to avoid
|
||||
* reading duplicate data in the next MDIC transaction.
|
||||
*/
|
||||
if (hw->mac.type == e1000_pch2lan)
|
||||
usleep_range(100, 150);
|
||||
|
||||
if (success) {
|
||||
*data = (u16)mdic;
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (retry_counter != retry_max) {
|
||||
e_dbg("Perform retry on PHY transaction...\n");
|
||||
mdelay(10);
|
||||
}
|
||||
}
|
||||
|
||||
return -E1000_ERR_PHY;
|
||||
}
|
||||
|
||||
/**
|
||||
@ -179,56 +207,72 @@ s32 e1000e_read_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 *data)
|
||||
**/
|
||||
s32 e1000e_write_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 data)
|
||||
{
|
||||
u32 i, mdic = 0, retry_counter, retry_max;
|
||||
struct e1000_phy_info *phy = &hw->phy;
|
||||
u32 i, mdic = 0;
|
||||
bool success;
|
||||
|
||||
if (offset > MAX_PHY_REG_ADDRESS) {
|
||||
e_dbg("PHY Address %d is out of range\n", offset);
|
||||
return -E1000_ERR_PARAM;
|
||||
}
|
||||
|
||||
retry_max = phy->retry_enabled ? phy->retry_count : 0;
|
||||
|
||||
/* Set up Op-code, Phy Address, and register offset in the MDI
|
||||
* Control register. The MAC will take care of interfacing with the
|
||||
* PHY to retrieve the desired data.
|
||||
*/
|
||||
mdic = (((u32)data) |
|
||||
(offset << E1000_MDIC_REG_SHIFT) |
|
||||
(phy->addr << E1000_MDIC_PHY_SHIFT) |
|
||||
(E1000_MDIC_OP_WRITE));
|
||||
for (retry_counter = 0; retry_counter <= retry_max; retry_counter++) {
|
||||
success = true;
|
||||
|
||||
ew32(MDIC, mdic);
|
||||
mdic = (((u32)data) |
|
||||
(offset << E1000_MDIC_REG_SHIFT) |
|
||||
(phy->addr << E1000_MDIC_PHY_SHIFT) |
|
||||
(E1000_MDIC_OP_WRITE));
|
||||
|
||||
/* Poll the ready bit to see if the MDI read completed
|
||||
* Increasing the time out as testing showed failures with
|
||||
* the lower time out
|
||||
*/
|
||||
for (i = 0; i < (E1000_GEN_POLL_TIMEOUT * 3); i++) {
|
||||
udelay(50);
|
||||
mdic = er32(MDIC);
|
||||
if (mdic & E1000_MDIC_READY)
|
||||
break;
|
||||
}
|
||||
if (!(mdic & E1000_MDIC_READY)) {
|
||||
e_dbg("MDI Write PHY Reg Address %d did not complete\n", offset);
|
||||
return -E1000_ERR_PHY;
|
||||
}
|
||||
if (mdic & E1000_MDIC_ERROR) {
|
||||
e_dbg("MDI Write PHY Red Address %d Error\n", offset);
|
||||
return -E1000_ERR_PHY;
|
||||
}
|
||||
if (FIELD_GET(E1000_MDIC_REG_MASK, mdic) != offset) {
|
||||
e_dbg("MDI Write offset error - requested %d, returned %d\n",
|
||||
offset, FIELD_GET(E1000_MDIC_REG_MASK, mdic));
|
||||
return -E1000_ERR_PHY;
|
||||
ew32(MDIC, mdic);
|
||||
|
||||
/* Poll the ready bit to see if the MDI read completed
|
||||
* Increasing the time out as testing showed failures with
|
||||
* the lower time out
|
||||
*/
|
||||
for (i = 0; i < (E1000_GEN_POLL_TIMEOUT * 3); i++) {
|
||||
usleep_range(50, 60);
|
||||
mdic = er32(MDIC);
|
||||
if (mdic & E1000_MDIC_READY)
|
||||
break;
|
||||
}
|
||||
if (!(mdic & E1000_MDIC_READY)) {
|
||||
e_dbg("MDI Write PHY Reg Address %d did not complete\n",
|
||||
offset);
|
||||
success = false;
|
||||
}
|
||||
if (mdic & E1000_MDIC_ERROR) {
|
||||
e_dbg("MDI Write PHY Reg Address %d Error\n", offset);
|
||||
success = false;
|
||||
}
|
||||
if (FIELD_GET(E1000_MDIC_REG_MASK, mdic) != offset) {
|
||||
e_dbg("MDI Write offset error - requested %d, returned %d\n",
|
||||
offset, FIELD_GET(E1000_MDIC_REG_MASK, mdic));
|
||||
success = false;
|
||||
}
|
||||
|
||||
/* Allow some time after each MDIC transaction to avoid
|
||||
* reading duplicate data in the next MDIC transaction.
|
||||
*/
|
||||
if (hw->mac.type == e1000_pch2lan)
|
||||
usleep_range(100, 150);
|
||||
|
||||
if (success)
|
||||
return 0;
|
||||
|
||||
if (retry_counter != retry_max) {
|
||||
e_dbg("Perform retry on PHY transaction...\n");
|
||||
mdelay(10);
|
||||
}
|
||||
}
|
||||
|
||||
/* Allow some time after each MDIC transaction to avoid
|
||||
* reading duplicate data in the next MDIC transaction.
|
||||
*/
|
||||
if (hw->mac.type == e1000_pch2lan)
|
||||
udelay(100);
|
||||
|
||||
return 0;
|
||||
return -E1000_ERR_PHY;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -51,6 +51,8 @@ s32 e1000e_read_phy_reg_bm2(struct e1000_hw *hw, u32 offset, u16 *data);
|
||||
s32 e1000e_write_phy_reg_bm2(struct e1000_hw *hw, u32 offset, u16 data);
|
||||
void e1000_power_up_phy_copper(struct e1000_hw *hw);
|
||||
void e1000_power_down_phy_copper(struct e1000_hw *hw);
|
||||
void e1000e_disable_phy_retry(struct e1000_hw *hw);
|
||||
void e1000e_enable_phy_retry(struct e1000_hw *hw);
|
||||
s32 e1000e_read_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 *data);
|
||||
s32 e1000e_write_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 data);
|
||||
s32 e1000_read_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 *data);
|
||||
|
@ -955,6 +955,7 @@ struct i40e_q_vector {
|
||||
struct rcu_head rcu; /* to avoid race with update stats on free */
|
||||
char name[I40E_INT_NAME_STR_LEN];
|
||||
bool arm_wb_state;
|
||||
bool in_busy_poll;
|
||||
int irq_num; /* IRQ assigned to this q_vector */
|
||||
} ____cacheline_internodealigned_in_smp;
|
||||
|
||||
|
@ -1253,8 +1253,11 @@ int i40e_count_filters(struct i40e_vsi *vsi)
|
||||
int bkt;
|
||||
int cnt = 0;
|
||||
|
||||
hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist)
|
||||
++cnt;
|
||||
hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
|
||||
if (f->state == I40E_FILTER_NEW ||
|
||||
f->state == I40E_FILTER_ACTIVE)
|
||||
++cnt;
|
||||
}
|
||||
|
||||
return cnt;
|
||||
}
|
||||
@ -3911,6 +3914,12 @@ static void i40e_vsi_configure_msix(struct i40e_vsi *vsi)
|
||||
q_vector->tx.target_itr >> 1);
|
||||
q_vector->tx.current_itr = q_vector->tx.target_itr;
|
||||
|
||||
/* Set ITR for software interrupts triggered after exiting
|
||||
* busy-loop polling.
|
||||
*/
|
||||
wr32(hw, I40E_PFINT_ITRN(I40E_SW_ITR, vector - 1),
|
||||
I40E_ITR_20K);
|
||||
|
||||
wr32(hw, I40E_PFINT_RATEN(vector - 1),
|
||||
i40e_intrl_usec_to_reg(vsi->int_rate_limit));
|
||||
|
||||
|
@ -333,8 +333,11 @@
|
||||
#define I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT 3
|
||||
#define I40E_PFINT_DYN_CTLN_ITR_INDX_MASK I40E_MASK(0x3, I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT)
|
||||
#define I40E_PFINT_DYN_CTLN_INTERVAL_SHIFT 5
|
||||
#define I40E_PFINT_DYN_CTLN_INTERVAL_MASK I40E_MASK(0xFFF, I40E_PFINT_DYN_CTLN_INTERVAL_SHIFT)
|
||||
#define I40E_PFINT_DYN_CTLN_SW_ITR_INDX_ENA_SHIFT 24
|
||||
#define I40E_PFINT_DYN_CTLN_SW_ITR_INDX_ENA_MASK I40E_MASK(0x1, I40E_PFINT_DYN_CTLN_SW_ITR_INDX_ENA_SHIFT)
|
||||
#define I40E_PFINT_DYN_CTLN_SW_ITR_INDX_SHIFT 25
|
||||
#define I40E_PFINT_DYN_CTLN_SW_ITR_INDX_MASK I40E_MASK(0x3, I40E_PFINT_DYN_CTLN_SW_ITR_INDX_SHIFT)
|
||||
#define I40E_PFINT_ICR0 0x00038780 /* Reset: CORER */
|
||||
#define I40E_PFINT_ICR0_INTEVENT_SHIFT 0
|
||||
#define I40E_PFINT_ICR0_INTEVENT_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_INTEVENT_SHIFT)
|
||||
|
@ -2630,7 +2630,22 @@ process_next:
|
||||
return failure ? budget : (int)total_rx_packets;
|
||||
}
|
||||
|
||||
static inline u32 i40e_buildreg_itr(const int type, u16 itr)
|
||||
/**
|
||||
* i40e_buildreg_itr - build a value for writing to I40E_PFINT_DYN_CTLN register
|
||||
* @itr_idx: interrupt throttling index
|
||||
* @interval: interrupt throttling interval value in usecs
|
||||
* @force_swint: force software interrupt
|
||||
*
|
||||
* The function builds a value for I40E_PFINT_DYN_CTLN register that
|
||||
* is used to update interrupt throttling interval for specified ITR index
|
||||
* and optionally enforces a software interrupt. If the @itr_idx is equal
|
||||
* to I40E_ITR_NONE then no interval change is applied and only @force_swint
|
||||
* parameter is taken into account. If the interval change and enforced
|
||||
* software interrupt are not requested then the built value just enables
|
||||
* appropriate vector interrupt.
|
||||
**/
|
||||
static u32 i40e_buildreg_itr(enum i40e_dyn_idx itr_idx, u16 interval,
|
||||
bool force_swint)
|
||||
{
|
||||
u32 val;
|
||||
|
||||
@ -2644,23 +2659,33 @@ static inline u32 i40e_buildreg_itr(const int type, u16 itr)
|
||||
* an event in the PBA anyway so we need to rely on the automask
|
||||
* to hold pending events for us until the interrupt is re-enabled
|
||||
*
|
||||
* The itr value is reported in microseconds, and the register
|
||||
* value is recorded in 2 microsecond units. For this reason we
|
||||
* only need to shift by the interval shift - 1 instead of the
|
||||
* full value.
|
||||
* We have to shift the given value as it is reported in microseconds
|
||||
* and the register value is recorded in 2 microsecond units.
|
||||
*/
|
||||
itr &= I40E_ITR_MASK;
|
||||
interval >>= 1;
|
||||
|
||||
/* 1. Enable vector interrupt
|
||||
* 2. Update the interval for the specified ITR index
|
||||
* (I40E_ITR_NONE in the register is used to indicate that
|
||||
* no interval update is requested)
|
||||
*/
|
||||
val = I40E_PFINT_DYN_CTLN_INTENA_MASK |
|
||||
(type << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT) |
|
||||
(itr << (I40E_PFINT_DYN_CTLN_INTERVAL_SHIFT - 1));
|
||||
FIELD_PREP(I40E_PFINT_DYN_CTLN_ITR_INDX_MASK, itr_idx) |
|
||||
FIELD_PREP(I40E_PFINT_DYN_CTLN_INTERVAL_MASK, interval);
|
||||
|
||||
/* 3. Enforce software interrupt trigger if requested
|
||||
* (These software interrupts rate is limited by ITR2 that is
|
||||
* set to 20K interrupts per second)
|
||||
*/
|
||||
if (force_swint)
|
||||
val |= I40E_PFINT_DYN_CTLN_SWINT_TRIG_MASK |
|
||||
I40E_PFINT_DYN_CTLN_SW_ITR_INDX_ENA_MASK |
|
||||
FIELD_PREP(I40E_PFINT_DYN_CTLN_SW_ITR_INDX_MASK,
|
||||
I40E_SW_ITR);
|
||||
|
||||
return val;
|
||||
}
|
||||
|
||||
/* a small macro to shorten up some long lines */
|
||||
#define INTREG I40E_PFINT_DYN_CTLN
|
||||
|
||||
/* The act of updating the ITR will cause it to immediately trigger. In order
|
||||
* to prevent this from throwing off adaptive update statistics we defer the
|
||||
* update so that it can only happen so often. So after either Tx or Rx are
|
||||
@ -2679,8 +2704,10 @@ static inline u32 i40e_buildreg_itr(const int type, u16 itr)
|
||||
static inline void i40e_update_enable_itr(struct i40e_vsi *vsi,
|
||||
struct i40e_q_vector *q_vector)
|
||||
{
|
||||
enum i40e_dyn_idx itr_idx = I40E_ITR_NONE;
|
||||
struct i40e_hw *hw = &vsi->back->hw;
|
||||
u32 intval;
|
||||
u16 interval = 0;
|
||||
u32 itr_val;
|
||||
|
||||
/* If we don't have MSIX, then we only need to re-enable icr0 */
|
||||
if (!test_bit(I40E_FLAG_MSIX_ENA, vsi->back->flags)) {
|
||||
@ -2702,8 +2729,8 @@ static inline void i40e_update_enable_itr(struct i40e_vsi *vsi,
|
||||
*/
|
||||
if (q_vector->rx.target_itr < q_vector->rx.current_itr) {
|
||||
/* Rx ITR needs to be reduced, this is highest priority */
|
||||
intval = i40e_buildreg_itr(I40E_RX_ITR,
|
||||
q_vector->rx.target_itr);
|
||||
itr_idx = I40E_RX_ITR;
|
||||
interval = q_vector->rx.target_itr;
|
||||
q_vector->rx.current_itr = q_vector->rx.target_itr;
|
||||
q_vector->itr_countdown = ITR_COUNTDOWN_START;
|
||||
} else if ((q_vector->tx.target_itr < q_vector->tx.current_itr) ||
|
||||
@ -2712,25 +2739,36 @@ static inline void i40e_update_enable_itr(struct i40e_vsi *vsi,
|
||||
/* Tx ITR needs to be reduced, this is second priority
|
||||
* Tx ITR needs to be increased more than Rx, fourth priority
|
||||
*/
|
||||
intval = i40e_buildreg_itr(I40E_TX_ITR,
|
||||
q_vector->tx.target_itr);
|
||||
itr_idx = I40E_TX_ITR;
|
||||
interval = q_vector->tx.target_itr;
|
||||
q_vector->tx.current_itr = q_vector->tx.target_itr;
|
||||
q_vector->itr_countdown = ITR_COUNTDOWN_START;
|
||||
} else if (q_vector->rx.current_itr != q_vector->rx.target_itr) {
|
||||
/* Rx ITR needs to be increased, third priority */
|
||||
intval = i40e_buildreg_itr(I40E_RX_ITR,
|
||||
q_vector->rx.target_itr);
|
||||
itr_idx = I40E_RX_ITR;
|
||||
interval = q_vector->rx.target_itr;
|
||||
q_vector->rx.current_itr = q_vector->rx.target_itr;
|
||||
q_vector->itr_countdown = ITR_COUNTDOWN_START;
|
||||
} else {
|
||||
/* No ITR update, lowest priority */
|
||||
intval = i40e_buildreg_itr(I40E_ITR_NONE, 0);
|
||||
if (q_vector->itr_countdown)
|
||||
q_vector->itr_countdown--;
|
||||
}
|
||||
|
||||
if (!test_bit(__I40E_VSI_DOWN, vsi->state))
|
||||
wr32(hw, INTREG(q_vector->reg_idx), intval);
|
||||
/* Do not update interrupt control register if VSI is down */
|
||||
if (test_bit(__I40E_VSI_DOWN, vsi->state))
|
||||
return;
|
||||
|
||||
/* Update ITR interval if necessary and enforce software interrupt
|
||||
* if we are exiting busy poll.
|
||||
*/
|
||||
if (q_vector->in_busy_poll) {
|
||||
itr_val = i40e_buildreg_itr(itr_idx, interval, true);
|
||||
q_vector->in_busy_poll = false;
|
||||
} else {
|
||||
itr_val = i40e_buildreg_itr(itr_idx, interval, false);
|
||||
}
|
||||
wr32(hw, I40E_PFINT_DYN_CTLN(q_vector->reg_idx), itr_val);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -2845,6 +2883,8 @@ tx_only:
|
||||
*/
|
||||
if (likely(napi_complete_done(napi, work_done)))
|
||||
i40e_update_enable_itr(vsi, q_vector);
|
||||
else
|
||||
q_vector->in_busy_poll = true;
|
||||
|
||||
return min(work_done, budget - 1);
|
||||
}
|
||||
|
@ -68,6 +68,7 @@ enum i40e_dyn_idx {
|
||||
/* these are indexes into ITRN registers */
|
||||
#define I40E_RX_ITR I40E_IDX_ITR0
|
||||
#define I40E_TX_ITR I40E_IDX_ITR1
|
||||
#define I40E_SW_ITR I40E_IDX_ITR2
|
||||
|
||||
/* Supported RSS offloads */
|
||||
#define I40E_DEFAULT_RSS_HENA ( \
|
||||
|
@ -1624,8 +1624,8 @@ bool i40e_reset_all_vfs(struct i40e_pf *pf, bool flr)
|
||||
{
|
||||
struct i40e_hw *hw = &pf->hw;
|
||||
struct i40e_vf *vf;
|
||||
int i, v;
|
||||
u32 reg;
|
||||
int i;
|
||||
|
||||
/* If we don't have any VFs, then there is nothing to reset */
|
||||
if (!pf->num_alloc_vfs)
|
||||
@ -1636,11 +1636,10 @@ bool i40e_reset_all_vfs(struct i40e_pf *pf, bool flr)
|
||||
return false;
|
||||
|
||||
/* Begin reset on all VFs at once */
|
||||
for (v = 0; v < pf->num_alloc_vfs; v++) {
|
||||
vf = &pf->vf[v];
|
||||
for (vf = &pf->vf[0]; vf < &pf->vf[pf->num_alloc_vfs]; ++vf) {
|
||||
/* If VF is being reset no need to trigger reset again */
|
||||
if (!test_bit(I40E_VF_STATE_RESETTING, &vf->vf_states))
|
||||
i40e_trigger_vf_reset(&pf->vf[v], flr);
|
||||
i40e_trigger_vf_reset(vf, flr);
|
||||
}
|
||||
|
||||
/* HW requires some time to make sure it can flush the FIFO for a VF
|
||||
@ -1649,14 +1648,13 @@ bool i40e_reset_all_vfs(struct i40e_pf *pf, bool flr)
|
||||
* the VFs using a simple iterator that increments once that VF has
|
||||
* finished resetting.
|
||||
*/
|
||||
for (i = 0, v = 0; i < 10 && v < pf->num_alloc_vfs; i++) {
|
||||
for (i = 0, vf = &pf->vf[0]; i < 10 && vf < &pf->vf[pf->num_alloc_vfs]; ++i) {
|
||||
usleep_range(10000, 20000);
|
||||
|
||||
/* Check each VF in sequence, beginning with the VF to fail
|
||||
* the previous check.
|
||||
*/
|
||||
while (v < pf->num_alloc_vfs) {
|
||||
vf = &pf->vf[v];
|
||||
while (vf < &pf->vf[pf->num_alloc_vfs]) {
|
||||
if (!test_bit(I40E_VF_STATE_RESETTING, &vf->vf_states)) {
|
||||
reg = rd32(hw, I40E_VPGEN_VFRSTAT(vf->vf_id));
|
||||
if (!(reg & I40E_VPGEN_VFRSTAT_VFRD_MASK))
|
||||
@ -1666,7 +1664,7 @@ bool i40e_reset_all_vfs(struct i40e_pf *pf, bool flr)
|
||||
/* If the current VF has finished resetting, move on
|
||||
* to the next VF in sequence.
|
||||
*/
|
||||
v++;
|
||||
++vf;
|
||||
}
|
||||
}
|
||||
|
||||
@ -1676,39 +1674,39 @@ bool i40e_reset_all_vfs(struct i40e_pf *pf, bool flr)
|
||||
/* Display a warning if at least one VF didn't manage to reset in
|
||||
* time, but continue on with the operation.
|
||||
*/
|
||||
if (v < pf->num_alloc_vfs)
|
||||
if (vf < &pf->vf[pf->num_alloc_vfs])
|
||||
dev_err(&pf->pdev->dev, "VF reset check timeout on VF %d\n",
|
||||
pf->vf[v].vf_id);
|
||||
vf->vf_id);
|
||||
usleep_range(10000, 20000);
|
||||
|
||||
/* Begin disabling all the rings associated with VFs, but do not wait
|
||||
* between each VF.
|
||||
*/
|
||||
for (v = 0; v < pf->num_alloc_vfs; v++) {
|
||||
for (vf = &pf->vf[0]; vf < &pf->vf[pf->num_alloc_vfs]; ++vf) {
|
||||
/* On initial reset, we don't have any queues to disable */
|
||||
if (pf->vf[v].lan_vsi_idx == 0)
|
||||
if (vf->lan_vsi_idx == 0)
|
||||
continue;
|
||||
|
||||
/* If VF is reset in another thread just continue */
|
||||
if (test_bit(I40E_VF_STATE_RESETTING, &vf->vf_states))
|
||||
continue;
|
||||
|
||||
i40e_vsi_stop_rings_no_wait(pf->vsi[pf->vf[v].lan_vsi_idx]);
|
||||
i40e_vsi_stop_rings_no_wait(pf->vsi[vf->lan_vsi_idx]);
|
||||
}
|
||||
|
||||
/* Now that we've notified HW to disable all of the VF rings, wait
|
||||
* until they finish.
|
||||
*/
|
||||
for (v = 0; v < pf->num_alloc_vfs; v++) {
|
||||
for (vf = &pf->vf[0]; vf < &pf->vf[pf->num_alloc_vfs]; ++vf) {
|
||||
/* On initial reset, we don't have any queues to disable */
|
||||
if (pf->vf[v].lan_vsi_idx == 0)
|
||||
if (vf->lan_vsi_idx == 0)
|
||||
continue;
|
||||
|
||||
/* If VF is reset in another thread just continue */
|
||||
if (test_bit(I40E_VF_STATE_RESETTING, &vf->vf_states))
|
||||
continue;
|
||||
|
||||
i40e_vsi_wait_queues_disabled(pf->vsi[pf->vf[v].lan_vsi_idx]);
|
||||
i40e_vsi_wait_queues_disabled(pf->vsi[vf->lan_vsi_idx]);
|
||||
}
|
||||
|
||||
/* Hw may need up to 50ms to finish disabling the RX queues. We
|
||||
@ -1717,12 +1715,12 @@ bool i40e_reset_all_vfs(struct i40e_pf *pf, bool flr)
|
||||
mdelay(50);
|
||||
|
||||
/* Finish the reset on each VF */
|
||||
for (v = 0; v < pf->num_alloc_vfs; v++) {
|
||||
for (vf = &pf->vf[0]; vf < &pf->vf[pf->num_alloc_vfs]; ++vf) {
|
||||
/* If VF is reset in another thread just continue */
|
||||
if (test_bit(I40E_VF_STATE_RESETTING, &vf->vf_states))
|
||||
continue;
|
||||
|
||||
i40e_cleanup_reset_vf(&pf->vf[v]);
|
||||
i40e_cleanup_reset_vf(vf);
|
||||
}
|
||||
|
||||
i40e_flush(hw);
|
||||
@ -3139,11 +3137,12 @@ static int i40e_vc_del_mac_addr_msg(struct i40e_vf *vf, u8 *msg)
|
||||
/* Allow to delete VF primary MAC only if it was not set
|
||||
* administratively by PF or if VF is trusted.
|
||||
*/
|
||||
if (ether_addr_equal(addr, vf->default_lan_addr.addr) &&
|
||||
i40e_can_vf_change_mac(vf))
|
||||
was_unimac_deleted = true;
|
||||
else
|
||||
continue;
|
||||
if (ether_addr_equal(addr, vf->default_lan_addr.addr)) {
|
||||
if (i40e_can_vf_change_mac(vf))
|
||||
was_unimac_deleted = true;
|
||||
else
|
||||
continue;
|
||||
}
|
||||
|
||||
if (i40e_del_mac_filter(vsi, al->list[i].addr)) {
|
||||
ret = -EINVAL;
|
||||
|
@ -1002,8 +1002,8 @@ static void ice_get_itr_intrl_gran(struct ice_hw *hw)
|
||||
*/
|
||||
int ice_init_hw(struct ice_hw *hw)
|
||||
{
|
||||
struct ice_aqc_get_phy_caps_data *pcaps __free(kfree);
|
||||
void *mac_buf __free(kfree);
|
||||
struct ice_aqc_get_phy_caps_data *pcaps __free(kfree) = NULL;
|
||||
void *mac_buf __free(kfree) = NULL;
|
||||
u16 mac_buf_len;
|
||||
int status;
|
||||
|
||||
@ -3272,7 +3272,7 @@ int ice_update_link_info(struct ice_port_info *pi)
|
||||
return status;
|
||||
|
||||
if (li->link_info & ICE_AQ_MEDIA_AVAILABLE) {
|
||||
struct ice_aqc_get_phy_caps_data *pcaps __free(kfree);
|
||||
struct ice_aqc_get_phy_caps_data *pcaps __free(kfree) = NULL;
|
||||
|
||||
pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL);
|
||||
if (!pcaps)
|
||||
@ -3420,7 +3420,7 @@ ice_cfg_phy_fc(struct ice_port_info *pi, struct ice_aqc_set_phy_cfg_data *cfg,
|
||||
int
|
||||
ice_set_fc(struct ice_port_info *pi, u8 *aq_failures, bool ena_auto_link_update)
|
||||
{
|
||||
struct ice_aqc_get_phy_caps_data *pcaps __free(kfree);
|
||||
struct ice_aqc_get_phy_caps_data *pcaps __free(kfree) = NULL;
|
||||
struct ice_aqc_set_phy_cfg_data cfg = { 0 };
|
||||
struct ice_hw *hw;
|
||||
int status;
|
||||
@ -3561,7 +3561,7 @@ int
|
||||
ice_cfg_phy_fec(struct ice_port_info *pi, struct ice_aqc_set_phy_cfg_data *cfg,
|
||||
enum ice_fec_mode fec)
|
||||
{
|
||||
struct ice_aqc_get_phy_caps_data *pcaps __free(kfree);
|
||||
struct ice_aqc_get_phy_caps_data *pcaps __free(kfree) = NULL;
|
||||
struct ice_hw *hw;
|
||||
int status;
|
||||
|
||||
|
@ -941,11 +941,11 @@ static u64 ice_loopback_test(struct net_device *netdev)
|
||||
struct ice_netdev_priv *np = netdev_priv(netdev);
|
||||
struct ice_vsi *orig_vsi = np->vsi, *test_vsi;
|
||||
struct ice_pf *pf = orig_vsi->back;
|
||||
u8 *tx_frame __free(kfree) = NULL;
|
||||
u8 broadcast[ETH_ALEN], ret = 0;
|
||||
int num_frames, valid_frames;
|
||||
struct ice_tx_ring *tx_ring;
|
||||
struct ice_rx_ring *rx_ring;
|
||||
u8 *tx_frame __free(kfree);
|
||||
int i;
|
||||
|
||||
netdev_info(netdev, "loopback test\n");
|
||||
|
@ -26,24 +26,22 @@ static void ice_port_vlan_on(struct ice_vsi *vsi)
|
||||
struct ice_vsi_vlan_ops *vlan_ops;
|
||||
struct ice_pf *pf = vsi->back;
|
||||
|
||||
/* setup inner VLAN ops */
|
||||
vlan_ops = &vsi->inner_vlan_ops;
|
||||
|
||||
if (ice_is_dvm_ena(&pf->hw)) {
|
||||
vlan_ops = &vsi->outer_vlan_ops;
|
||||
|
||||
/* setup outer VLAN ops */
|
||||
vlan_ops->set_port_vlan = ice_vsi_set_outer_port_vlan;
|
||||
vlan_ops->clear_port_vlan = ice_vsi_clear_outer_port_vlan;
|
||||
|
||||
/* setup inner VLAN ops */
|
||||
vlan_ops = &vsi->inner_vlan_ops;
|
||||
vlan_ops->add_vlan = noop_vlan_arg;
|
||||
vlan_ops->del_vlan = noop_vlan_arg;
|
||||
vlan_ops->ena_stripping = ice_vsi_ena_inner_stripping;
|
||||
vlan_ops->dis_stripping = ice_vsi_dis_inner_stripping;
|
||||
vlan_ops->ena_insertion = ice_vsi_ena_inner_insertion;
|
||||
vlan_ops->dis_insertion = ice_vsi_dis_inner_insertion;
|
||||
} else {
|
||||
vlan_ops = &vsi->inner_vlan_ops;
|
||||
|
||||
/* setup outer VLAN ops */
|
||||
vlan_ops = &vsi->outer_vlan_ops;
|
||||
vlan_ops->set_port_vlan = ice_vsi_set_outer_port_vlan;
|
||||
vlan_ops->clear_port_vlan = ice_vsi_clear_outer_port_vlan;
|
||||
} else {
|
||||
vlan_ops->set_port_vlan = ice_vsi_set_inner_port_vlan;
|
||||
vlan_ops->clear_port_vlan = ice_vsi_clear_inner_port_vlan;
|
||||
}
|
||||
|
@ -2941,6 +2941,8 @@ static int idpf_rx_process_skb_fields(struct idpf_queue *rxq,
|
||||
rx_ptype = le16_get_bits(rx_desc->ptype_err_fflags0,
|
||||
VIRTCHNL2_RX_FLEX_DESC_ADV_PTYPE_M);
|
||||
|
||||
skb->protocol = eth_type_trans(skb, rxq->vport->netdev);
|
||||
|
||||
decoded = rxq->vport->rx_ptype_lkup[rx_ptype];
|
||||
/* If we don't know the ptype we can't do anything else with it. Just
|
||||
* pass it up the stack as-is.
|
||||
@ -2951,8 +2953,6 @@ static int idpf_rx_process_skb_fields(struct idpf_queue *rxq,
|
||||
/* process RSS/hash */
|
||||
idpf_rx_hash(rxq, skb, rx_desc, &decoded);
|
||||
|
||||
skb->protocol = eth_type_trans(skb, rxq->vport->netdev);
|
||||
|
||||
if (le16_get_bits(rx_desc->hdrlen_flags,
|
||||
VIRTCHNL2_RX_FLEX_DESC_ADV_RSC_M))
|
||||
return idpf_rx_rsc(rxq, skb, rx_desc, &decoded);
|
||||
|
@ -160,6 +160,8 @@ static int rvu_map_cgx_lmac_pf(struct rvu *rvu)
|
||||
continue;
|
||||
lmac_bmap = cgx_get_lmac_bmap(rvu_cgx_pdata(cgx, rvu));
|
||||
for_each_set_bit(iter, &lmac_bmap, rvu->hw->lmac_per_cgx) {
|
||||
if (iter >= MAX_LMAC_COUNT)
|
||||
continue;
|
||||
lmac = cgx_get_lmacid(rvu_cgx_pdata(cgx, rvu),
|
||||
iter);
|
||||
rvu->pf2cgxlmac_map[pf] = cgxlmac_id_to_bmap(cgx, lmac);
|
||||
|
@ -1657,7 +1657,7 @@ static int npc_fwdb_detect_load_prfl_img(struct rvu *rvu, uint64_t prfl_sz,
|
||||
struct npc_coalesced_kpu_prfl *img_data = NULL;
|
||||
int i = 0, rc = -EINVAL;
|
||||
void __iomem *kpu_prfl_addr;
|
||||
u16 offset;
|
||||
u32 offset;
|
||||
|
||||
img_data = (struct npc_coalesced_kpu_prfl __force *)rvu->kpu_prfl_addr;
|
||||
if (le64_to_cpu(img_data->signature) == KPU_SIGN &&
|
||||
|
@ -1933,7 +1933,7 @@ int otx2_open(struct net_device *netdev)
|
||||
* mcam entries are enabled to receive the packets. Hence disable the
|
||||
* packet I/O.
|
||||
*/
|
||||
if (err == EIO)
|
||||
if (err == -EIO)
|
||||
goto err_disable_rxtx;
|
||||
else if (err)
|
||||
goto err_tx_stop_queues;
|
||||
|
@ -14,6 +14,7 @@
|
||||
#include <linux/module.h>
|
||||
#include <linux/phy.h>
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/rtnetlink.h>
|
||||
#include <linux/skbuff.h>
|
||||
|
||||
#include "mlxbf_gige.h"
|
||||
@ -492,8 +493,13 @@ static void mlxbf_gige_shutdown(struct platform_device *pdev)
|
||||
{
|
||||
struct mlxbf_gige *priv = platform_get_drvdata(pdev);
|
||||
|
||||
writeq(0, priv->base + MLXBF_GIGE_INT_EN);
|
||||
mlxbf_gige_clean_port(priv);
|
||||
rtnl_lock();
|
||||
netif_device_detach(priv->netdev);
|
||||
|
||||
if (netif_running(priv->netdev))
|
||||
dev_close(priv->netdev);
|
||||
|
||||
rtnl_unlock();
|
||||
}
|
||||
|
||||
static const struct acpi_device_id __maybe_unused mlxbf_gige_acpi_match[] = {
|
||||
|
@ -601,7 +601,7 @@ static void mana_get_rxbuf_cfg(int mtu, u32 *datasize, u32 *alloc_size,
|
||||
|
||||
*alloc_size = mtu + MANA_RXBUF_PAD + *headroom;
|
||||
|
||||
*datasize = ALIGN(mtu + ETH_HLEN, MANA_RX_DATA_ALIGN);
|
||||
*datasize = mtu + ETH_HLEN;
|
||||
}
|
||||
|
||||
static int mana_pre_alloc_rxbufs(struct mana_port_context *mpc, int new_mtu)
|
||||
|
@ -1314,17 +1314,40 @@ static void rtl8168ep_stop_cmac(struct rtl8169_private *tp)
|
||||
RTL_W8(tp, IBCR0, RTL_R8(tp, IBCR0) & ~0x01);
|
||||
}
|
||||
|
||||
static void rtl_dash_loop_wait(struct rtl8169_private *tp,
|
||||
const struct rtl_cond *c,
|
||||
unsigned long usecs, int n, bool high)
|
||||
{
|
||||
if (!tp->dash_enabled)
|
||||
return;
|
||||
rtl_loop_wait(tp, c, usecs, n, high);
|
||||
}
|
||||
|
||||
static void rtl_dash_loop_wait_high(struct rtl8169_private *tp,
|
||||
const struct rtl_cond *c,
|
||||
unsigned long d, int n)
|
||||
{
|
||||
rtl_dash_loop_wait(tp, c, d, n, true);
|
||||
}
|
||||
|
||||
static void rtl_dash_loop_wait_low(struct rtl8169_private *tp,
|
||||
const struct rtl_cond *c,
|
||||
unsigned long d, int n)
|
||||
{
|
||||
rtl_dash_loop_wait(tp, c, d, n, false);
|
||||
}
|
||||
|
||||
static void rtl8168dp_driver_start(struct rtl8169_private *tp)
|
||||
{
|
||||
r8168dp_oob_notify(tp, OOB_CMD_DRIVER_START);
|
||||
rtl_loop_wait_high(tp, &rtl_dp_ocp_read_cond, 10000, 10);
|
||||
rtl_dash_loop_wait_high(tp, &rtl_dp_ocp_read_cond, 10000, 10);
|
||||
}
|
||||
|
||||
static void rtl8168ep_driver_start(struct rtl8169_private *tp)
|
||||
{
|
||||
r8168ep_ocp_write(tp, 0x01, 0x180, OOB_CMD_DRIVER_START);
|
||||
r8168ep_ocp_write(tp, 0x01, 0x30, r8168ep_ocp_read(tp, 0x30) | 0x01);
|
||||
rtl_loop_wait_high(tp, &rtl_ep_ocp_read_cond, 10000, 30);
|
||||
rtl_dash_loop_wait_high(tp, &rtl_ep_ocp_read_cond, 10000, 30);
|
||||
}
|
||||
|
||||
static void rtl8168_driver_start(struct rtl8169_private *tp)
|
||||
@ -1338,7 +1361,7 @@ static void rtl8168_driver_start(struct rtl8169_private *tp)
|
||||
static void rtl8168dp_driver_stop(struct rtl8169_private *tp)
|
||||
{
|
||||
r8168dp_oob_notify(tp, OOB_CMD_DRIVER_STOP);
|
||||
rtl_loop_wait_low(tp, &rtl_dp_ocp_read_cond, 10000, 10);
|
||||
rtl_dash_loop_wait_low(tp, &rtl_dp_ocp_read_cond, 10000, 10);
|
||||
}
|
||||
|
||||
static void rtl8168ep_driver_stop(struct rtl8169_private *tp)
|
||||
@ -1346,7 +1369,7 @@ static void rtl8168ep_driver_stop(struct rtl8169_private *tp)
|
||||
rtl8168ep_stop_cmac(tp);
|
||||
r8168ep_ocp_write(tp, 0x01, 0x180, OOB_CMD_DRIVER_STOP);
|
||||
r8168ep_ocp_write(tp, 0x01, 0x30, r8168ep_ocp_read(tp, 0x30) | 0x01);
|
||||
rtl_loop_wait_low(tp, &rtl_ep_ocp_read_cond, 10000, 10);
|
||||
rtl_dash_loop_wait_low(tp, &rtl_ep_ocp_read_cond, 10000, 10);
|
||||
}
|
||||
|
||||
static void rtl8168_driver_stop(struct rtl8169_private *tp)
|
||||
@ -5141,6 +5164,15 @@ static int r8169_mdio_register(struct rtl8169_private *tp)
|
||||
struct mii_bus *new_bus;
|
||||
int ret;
|
||||
|
||||
/* On some boards with this chip version the BIOS is buggy and misses
|
||||
* to reset the PHY page selector. This results in the PHY ID read
|
||||
* accessing registers on a different page, returning a more or
|
||||
* less random value. Fix this by resetting the page selector first.
|
||||
*/
|
||||
if (tp->mac_version == RTL_GIGA_MAC_VER_25 ||
|
||||
tp->mac_version == RTL_GIGA_MAC_VER_26)
|
||||
r8169_mdio_write(tp, 0x1f, 0);
|
||||
|
||||
new_bus = devm_mdiobus_alloc(&pdev->dev);
|
||||
if (!new_bus)
|
||||
return -ENOMEM;
|
||||
|
@ -1324,12 +1324,12 @@ static int ravb_poll(struct napi_struct *napi, int budget)
|
||||
int q = napi - priv->napi;
|
||||
int mask = BIT(q);
|
||||
int quota = budget;
|
||||
bool unmask;
|
||||
|
||||
/* Processing RX Descriptor Ring */
|
||||
/* Clear RX interrupt */
|
||||
ravb_write(ndev, ~(mask | RIS0_RESERVED), RIS0);
|
||||
if (ravb_rx(ndev, "a, q))
|
||||
goto out;
|
||||
unmask = !ravb_rx(ndev, "a, q);
|
||||
|
||||
/* Processing TX Descriptor Ring */
|
||||
spin_lock_irqsave(&priv->lock, flags);
|
||||
@ -1339,6 +1339,18 @@ static int ravb_poll(struct napi_struct *napi, int budget)
|
||||
netif_wake_subqueue(ndev, q);
|
||||
spin_unlock_irqrestore(&priv->lock, flags);
|
||||
|
||||
/* Receive error message handling */
|
||||
priv->rx_over_errors = priv->stats[RAVB_BE].rx_over_errors;
|
||||
if (info->nc_queues)
|
||||
priv->rx_over_errors += priv->stats[RAVB_NC].rx_over_errors;
|
||||
if (priv->rx_over_errors != ndev->stats.rx_over_errors)
|
||||
ndev->stats.rx_over_errors = priv->rx_over_errors;
|
||||
if (priv->rx_fifo_errors != ndev->stats.rx_fifo_errors)
|
||||
ndev->stats.rx_fifo_errors = priv->rx_fifo_errors;
|
||||
|
||||
if (!unmask)
|
||||
goto out;
|
||||
|
||||
napi_complete(napi);
|
||||
|
||||
/* Re-enable RX/TX interrupts */
|
||||
@ -1352,14 +1364,6 @@ static int ravb_poll(struct napi_struct *napi, int budget)
|
||||
}
|
||||
spin_unlock_irqrestore(&priv->lock, flags);
|
||||
|
||||
/* Receive error message handling */
|
||||
priv->rx_over_errors = priv->stats[RAVB_BE].rx_over_errors;
|
||||
if (info->nc_queues)
|
||||
priv->rx_over_errors += priv->stats[RAVB_NC].rx_over_errors;
|
||||
if (priv->rx_over_errors != ndev->stats.rx_over_errors)
|
||||
ndev->stats.rx_over_errors = priv->rx_over_errors;
|
||||
if (priv->rx_fifo_errors != ndev->stats.rx_fifo_errors)
|
||||
ndev->stats.rx_fifo_errors = priv->rx_fifo_errors;
|
||||
out:
|
||||
return budget - quota;
|
||||
}
|
||||
|
@ -92,19 +92,41 @@ static void dwmac4_rx_queue_priority(struct mac_device_info *hw,
|
||||
u32 prio, u32 queue)
|
||||
{
|
||||
void __iomem *ioaddr = hw->pcsr;
|
||||
u32 base_register;
|
||||
u32 value;
|
||||
u32 clear_mask = 0;
|
||||
u32 ctrl2, ctrl3;
|
||||
int i;
|
||||
|
||||
base_register = (queue < 4) ? GMAC_RXQ_CTRL2 : GMAC_RXQ_CTRL3;
|
||||
if (queue >= 4)
|
||||
ctrl2 = readl(ioaddr + GMAC_RXQ_CTRL2);
|
||||
ctrl3 = readl(ioaddr + GMAC_RXQ_CTRL3);
|
||||
|
||||
/* The software must ensure that the same priority
|
||||
* is not mapped to multiple Rx queues
|
||||
*/
|
||||
for (i = 0; i < 4; i++)
|
||||
clear_mask |= ((prio << GMAC_RXQCTRL_PSRQX_SHIFT(i)) &
|
||||
GMAC_RXQCTRL_PSRQX_MASK(i));
|
||||
|
||||
ctrl2 &= ~clear_mask;
|
||||
ctrl3 &= ~clear_mask;
|
||||
|
||||
/* First assign new priorities to a queue, then
|
||||
* clear them from others queues
|
||||
*/
|
||||
if (queue < 4) {
|
||||
ctrl2 |= (prio << GMAC_RXQCTRL_PSRQX_SHIFT(queue)) &
|
||||
GMAC_RXQCTRL_PSRQX_MASK(queue);
|
||||
|
||||
writel(ctrl2, ioaddr + GMAC_RXQ_CTRL2);
|
||||
writel(ctrl3, ioaddr + GMAC_RXQ_CTRL3);
|
||||
} else {
|
||||
queue -= 4;
|
||||
|
||||
value = readl(ioaddr + base_register);
|
||||
|
||||
value &= ~GMAC_RXQCTRL_PSRQX_MASK(queue);
|
||||
value |= (prio << GMAC_RXQCTRL_PSRQX_SHIFT(queue)) &
|
||||
ctrl3 |= (prio << GMAC_RXQCTRL_PSRQX_SHIFT(queue)) &
|
||||
GMAC_RXQCTRL_PSRQX_MASK(queue);
|
||||
writel(value, ioaddr + base_register);
|
||||
|
||||
writel(ctrl3, ioaddr + GMAC_RXQ_CTRL3);
|
||||
writel(ctrl2, ioaddr + GMAC_RXQ_CTRL2);
|
||||
}
|
||||
}
|
||||
|
||||
static void dwmac4_tx_queue_priority(struct mac_device_info *hw,
|
||||
|
@ -105,17 +105,41 @@ static void dwxgmac2_rx_queue_prio(struct mac_device_info *hw, u32 prio,
|
||||
u32 queue)
|
||||
{
|
||||
void __iomem *ioaddr = hw->pcsr;
|
||||
u32 value, reg;
|
||||
u32 clear_mask = 0;
|
||||
u32 ctrl2, ctrl3;
|
||||
int i;
|
||||
|
||||
reg = (queue < 4) ? XGMAC_RXQ_CTRL2 : XGMAC_RXQ_CTRL3;
|
||||
if (queue >= 4)
|
||||
ctrl2 = readl(ioaddr + XGMAC_RXQ_CTRL2);
|
||||
ctrl3 = readl(ioaddr + XGMAC_RXQ_CTRL3);
|
||||
|
||||
/* The software must ensure that the same priority
|
||||
* is not mapped to multiple Rx queues
|
||||
*/
|
||||
for (i = 0; i < 4; i++)
|
||||
clear_mask |= ((prio << XGMAC_PSRQ_SHIFT(i)) &
|
||||
XGMAC_PSRQ(i));
|
||||
|
||||
ctrl2 &= ~clear_mask;
|
||||
ctrl3 &= ~clear_mask;
|
||||
|
||||
/* First assign new priorities to a queue, then
|
||||
* clear them from others queues
|
||||
*/
|
||||
if (queue < 4) {
|
||||
ctrl2 |= (prio << XGMAC_PSRQ_SHIFT(queue)) &
|
||||
XGMAC_PSRQ(queue);
|
||||
|
||||
writel(ctrl2, ioaddr + XGMAC_RXQ_CTRL2);
|
||||
writel(ctrl3, ioaddr + XGMAC_RXQ_CTRL3);
|
||||
} else {
|
||||
queue -= 4;
|
||||
|
||||
value = readl(ioaddr + reg);
|
||||
value &= ~XGMAC_PSRQ(queue);
|
||||
value |= (prio << XGMAC_PSRQ_SHIFT(queue)) & XGMAC_PSRQ(queue);
|
||||
ctrl3 |= (prio << XGMAC_PSRQ_SHIFT(queue)) &
|
||||
XGMAC_PSRQ(queue);
|
||||
|
||||
writel(value, ioaddr + reg);
|
||||
writel(ctrl3, ioaddr + XGMAC_RXQ_CTRL3);
|
||||
writel(ctrl2, ioaddr + XGMAC_RXQ_CTRL2);
|
||||
}
|
||||
}
|
||||
|
||||
static void dwxgmac2_tx_queue_prio(struct mac_device_info *hw, u32 prio,
|
||||
|
@ -20,6 +20,8 @@
|
||||
#include "txgbe_phy.h"
|
||||
#include "txgbe_hw.h"
|
||||
|
||||
#define TXGBE_I2C_CLK_DEV_NAME "i2c_dw"
|
||||
|
||||
static int txgbe_swnodes_register(struct txgbe *txgbe)
|
||||
{
|
||||
struct txgbe_nodes *nodes = &txgbe->nodes;
|
||||
@ -571,8 +573,8 @@ static int txgbe_clock_register(struct txgbe *txgbe)
|
||||
char clk_name[32];
|
||||
struct clk *clk;
|
||||
|
||||
snprintf(clk_name, sizeof(clk_name), "i2c_dw.%d",
|
||||
pci_dev_id(pdev));
|
||||
snprintf(clk_name, sizeof(clk_name), "%s.%d",
|
||||
TXGBE_I2C_CLK_DEV_NAME, pci_dev_id(pdev));
|
||||
|
||||
clk = clk_register_fixed_rate(NULL, clk_name, NULL, 0, 156250000);
|
||||
if (IS_ERR(clk))
|
||||
@ -634,7 +636,7 @@ static int txgbe_i2c_register(struct txgbe *txgbe)
|
||||
|
||||
info.parent = &pdev->dev;
|
||||
info.fwnode = software_node_fwnode(txgbe->nodes.group[SWNODE_I2C]);
|
||||
info.name = "i2c_designware";
|
||||
info.name = TXGBE_I2C_CLK_DEV_NAME;
|
||||
info.id = pci_dev_id(pdev);
|
||||
|
||||
info.res = &DEFINE_RES_IRQ(pdev->irq);
|
||||
|
@ -2431,6 +2431,7 @@ static int lan8814_hwtstamp(struct mii_timestamper *mii_ts,
|
||||
struct lan8814_ptp_rx_ts *rx_ts, *tmp;
|
||||
int txcfg = 0, rxcfg = 0;
|
||||
int pkt_ts_enable;
|
||||
int tx_mod;
|
||||
|
||||
ptp_priv->hwts_tx_type = config->tx_type;
|
||||
ptp_priv->rx_filter = config->rx_filter;
|
||||
@ -2477,9 +2478,14 @@ static int lan8814_hwtstamp(struct mii_timestamper *mii_ts,
|
||||
lanphy_write_page_reg(ptp_priv->phydev, 5, PTP_RX_TIMESTAMP_EN, pkt_ts_enable);
|
||||
lanphy_write_page_reg(ptp_priv->phydev, 5, PTP_TX_TIMESTAMP_EN, pkt_ts_enable);
|
||||
|
||||
if (ptp_priv->hwts_tx_type == HWTSTAMP_TX_ONESTEP_SYNC)
|
||||
tx_mod = lanphy_read_page_reg(ptp_priv->phydev, 5, PTP_TX_MOD);
|
||||
if (ptp_priv->hwts_tx_type == HWTSTAMP_TX_ONESTEP_SYNC) {
|
||||
lanphy_write_page_reg(ptp_priv->phydev, 5, PTP_TX_MOD,
|
||||
PTP_TX_MOD_TX_PTP_SYNC_TS_INSERT_);
|
||||
tx_mod | PTP_TX_MOD_TX_PTP_SYNC_TS_INSERT_);
|
||||
} else if (ptp_priv->hwts_tx_type == HWTSTAMP_TX_ON) {
|
||||
lanphy_write_page_reg(ptp_priv->phydev, 5, PTP_TX_MOD,
|
||||
tx_mod & ~PTP_TX_MOD_TX_PTP_SYNC_TS_INSERT_);
|
||||
}
|
||||
|
||||
if (config->rx_filter != HWTSTAMP_FILTER_NONE)
|
||||
lan8814_config_ts_intr(ptp_priv->phydev, true);
|
||||
@ -2537,7 +2543,7 @@ static void lan8814_txtstamp(struct mii_timestamper *mii_ts,
|
||||
}
|
||||
}
|
||||
|
||||
static void lan8814_get_sig_rx(struct sk_buff *skb, u16 *sig)
|
||||
static bool lan8814_get_sig_rx(struct sk_buff *skb, u16 *sig)
|
||||
{
|
||||
struct ptp_header *ptp_header;
|
||||
u32 type;
|
||||
@ -2547,7 +2553,11 @@ static void lan8814_get_sig_rx(struct sk_buff *skb, u16 *sig)
|
||||
ptp_header = ptp_parse_header(skb, type);
|
||||
skb_pull_inline(skb, ETH_HLEN);
|
||||
|
||||
if (!ptp_header)
|
||||
return false;
|
||||
|
||||
*sig = (__force u16)(ntohs(ptp_header->sequence_id));
|
||||
return true;
|
||||
}
|
||||
|
||||
static bool lan8814_match_rx_skb(struct kszphy_ptp_priv *ptp_priv,
|
||||
@ -2559,7 +2569,8 @@ static bool lan8814_match_rx_skb(struct kszphy_ptp_priv *ptp_priv,
|
||||
bool ret = false;
|
||||
u16 skb_sig;
|
||||
|
||||
lan8814_get_sig_rx(skb, &skb_sig);
|
||||
if (!lan8814_get_sig_rx(skb, &skb_sig))
|
||||
return ret;
|
||||
|
||||
/* Iterate over all RX timestamps and match it with the received skbs */
|
||||
spin_lock_irqsave(&ptp_priv->rx_ts_lock, flags);
|
||||
@ -2834,7 +2845,7 @@ static int lan8814_ptpci_adjfine(struct ptp_clock_info *ptpci, long scaled_ppm)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void lan8814_get_sig_tx(struct sk_buff *skb, u16 *sig)
|
||||
static bool lan8814_get_sig_tx(struct sk_buff *skb, u16 *sig)
|
||||
{
|
||||
struct ptp_header *ptp_header;
|
||||
u32 type;
|
||||
@ -2842,7 +2853,11 @@ static void lan8814_get_sig_tx(struct sk_buff *skb, u16 *sig)
|
||||
type = ptp_classify_raw(skb);
|
||||
ptp_header = ptp_parse_header(skb, type);
|
||||
|
||||
if (!ptp_header)
|
||||
return false;
|
||||
|
||||
*sig = (__force u16)(ntohs(ptp_header->sequence_id));
|
||||
return true;
|
||||
}
|
||||
|
||||
static void lan8814_match_tx_skb(struct kszphy_ptp_priv *ptp_priv,
|
||||
@ -2856,7 +2871,8 @@ static void lan8814_match_tx_skb(struct kszphy_ptp_priv *ptp_priv,
|
||||
|
||||
spin_lock_irqsave(&ptp_priv->tx_queue.lock, flags);
|
||||
skb_queue_walk_safe(&ptp_priv->tx_queue, skb, skb_tmp) {
|
||||
lan8814_get_sig_tx(skb, &skb_sig);
|
||||
if (!lan8814_get_sig_tx(skb, &skb_sig))
|
||||
continue;
|
||||
|
||||
if (memcmp(&skb_sig, &seq_id, sizeof(seq_id)))
|
||||
continue;
|
||||
@ -2910,7 +2926,8 @@ static bool lan8814_match_skb(struct kszphy_ptp_priv *ptp_priv,
|
||||
|
||||
spin_lock_irqsave(&ptp_priv->rx_queue.lock, flags);
|
||||
skb_queue_walk_safe(&ptp_priv->rx_queue, skb, skb_tmp) {
|
||||
lan8814_get_sig_rx(skb, &skb_sig);
|
||||
if (!lan8814_get_sig_rx(skb, &skb_sig))
|
||||
continue;
|
||||
|
||||
if (memcmp(&skb_sig, &rx_ts->seq_id, sizeof(rx_ts->seq_id)))
|
||||
continue;
|
||||
|
@ -1273,6 +1273,8 @@ static void ax88179_get_mac_addr(struct usbnet *dev)
|
||||
|
||||
if (is_valid_ether_addr(mac)) {
|
||||
eth_hw_addr_set(dev->net, mac);
|
||||
if (!is_local_ether_addr(mac))
|
||||
dev->net->addr_assign_type = NET_ADDR_PERM;
|
||||
} else {
|
||||
netdev_info(dev->net, "invalid MAC address, using random\n");
|
||||
eth_hw_addr_random(dev->net);
|
||||
|
@ -285,6 +285,7 @@ static struct sk_buff *xennet_alloc_one_rx_buffer(struct netfront_queue *queue)
|
||||
return NULL;
|
||||
}
|
||||
skb_add_rx_frag(skb, 0, page, 0, 0, PAGE_SIZE);
|
||||
skb_mark_for_recycle(skb);
|
||||
|
||||
/* Align ip header to a 16 bytes boundary */
|
||||
skb_reserve(skb, NET_IP_ALIGN);
|
||||
|
@ -1574,12 +1574,26 @@ struct bpf_link {
|
||||
enum bpf_link_type type;
|
||||
const struct bpf_link_ops *ops;
|
||||
struct bpf_prog *prog;
|
||||
struct work_struct work;
|
||||
/* rcu is used before freeing, work can be used to schedule that
|
||||
* RCU-based freeing before that, so they never overlap
|
||||
*/
|
||||
union {
|
||||
struct rcu_head rcu;
|
||||
struct work_struct work;
|
||||
};
|
||||
};
|
||||
|
||||
struct bpf_link_ops {
|
||||
void (*release)(struct bpf_link *link);
|
||||
/* deallocate link resources callback, called without RCU grace period
|
||||
* waiting
|
||||
*/
|
||||
void (*dealloc)(struct bpf_link *link);
|
||||
/* deallocate link resources callback, called after RCU grace period;
|
||||
* if underlying BPF program is sleepable we go through tasks trace
|
||||
* RCU GP and then "classic" RCU GP
|
||||
*/
|
||||
void (*dealloc_deferred)(struct bpf_link *link);
|
||||
int (*detach)(struct bpf_link *link);
|
||||
int (*update_prog)(struct bpf_link *link, struct bpf_prog *new_prog,
|
||||
struct bpf_prog *old_prog);
|
||||
|
@ -150,6 +150,24 @@ static inline void udp_cmsg_recv(struct msghdr *msg, struct sock *sk,
|
||||
}
|
||||
}
|
||||
|
||||
DECLARE_STATIC_KEY_FALSE(udp_encap_needed_key);
|
||||
#if IS_ENABLED(CONFIG_IPV6)
|
||||
DECLARE_STATIC_KEY_FALSE(udpv6_encap_needed_key);
|
||||
#endif
|
||||
|
||||
static inline bool udp_encap_needed(void)
|
||||
{
|
||||
if (static_branch_unlikely(&udp_encap_needed_key))
|
||||
return true;
|
||||
|
||||
#if IS_ENABLED(CONFIG_IPV6)
|
||||
if (static_branch_unlikely(&udpv6_encap_needed_key))
|
||||
return true;
|
||||
#endif
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
static inline bool udp_unexpected_gso(struct sock *sk, struct sk_buff *skb)
|
||||
{
|
||||
if (!skb_is_gso(skb))
|
||||
@ -163,6 +181,16 @@ static inline bool udp_unexpected_gso(struct sock *sk, struct sk_buff *skb)
|
||||
!udp_test_bit(ACCEPT_FRAGLIST, sk))
|
||||
return true;
|
||||
|
||||
/* GSO packets lacking the SKB_GSO_UDP_TUNNEL/_CSUM bits might still
|
||||
* land in a tunnel as the socket check in udp_gro_receive cannot be
|
||||
* foolproof.
|
||||
*/
|
||||
if (udp_encap_needed() &&
|
||||
READ_ONCE(udp_sk(sk)->encap_rcv) &&
|
||||
!(skb_shinfo(skb)->gso_type &
|
||||
(SKB_GSO_UDP_TUNNEL | SKB_GSO_UDP_TUNNEL_CSUM)))
|
||||
return true;
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
|
@ -176,6 +176,15 @@ enum {
|
||||
*/
|
||||
HCI_QUIRK_USE_BDADDR_PROPERTY,
|
||||
|
||||
/* When this quirk is set, the Bluetooth Device Address provided by
|
||||
* the 'local-bd-address' fwnode property is incorrectly specified in
|
||||
* big-endian order.
|
||||
*
|
||||
* This quirk can be set before hci_register_dev is called or
|
||||
* during the hdev->setup vendor callback.
|
||||
*/
|
||||
HCI_QUIRK_BDADDR_PROPERTY_BROKEN,
|
||||
|
||||
/* When this quirk is set, the duplicate filtering during
|
||||
* scanning is based on Bluetooth devices addresses. To allow
|
||||
* RSSI based updates, restart scanning if needed.
|
||||
|
@ -39,7 +39,6 @@ enum TRI_STATE {
|
||||
#define COMP_ENTRY_SIZE 64
|
||||
|
||||
#define RX_BUFFERS_PER_QUEUE 512
|
||||
#define MANA_RX_DATA_ALIGN 64
|
||||
|
||||
#define MAX_SEND_BUFFERS_PER_QUEUE 256
|
||||
|
||||
|
@ -3024,17 +3024,46 @@ void bpf_link_inc(struct bpf_link *link)
|
||||
atomic64_inc(&link->refcnt);
|
||||
}
|
||||
|
||||
static void bpf_link_defer_dealloc_rcu_gp(struct rcu_head *rcu)
|
||||
{
|
||||
struct bpf_link *link = container_of(rcu, struct bpf_link, rcu);
|
||||
|
||||
/* free bpf_link and its containing memory */
|
||||
link->ops->dealloc_deferred(link);
|
||||
}
|
||||
|
||||
static void bpf_link_defer_dealloc_mult_rcu_gp(struct rcu_head *rcu)
|
||||
{
|
||||
if (rcu_trace_implies_rcu_gp())
|
||||
bpf_link_defer_dealloc_rcu_gp(rcu);
|
||||
else
|
||||
call_rcu(rcu, bpf_link_defer_dealloc_rcu_gp);
|
||||
}
|
||||
|
||||
/* bpf_link_free is guaranteed to be called from process context */
|
||||
static void bpf_link_free(struct bpf_link *link)
|
||||
{
|
||||
bool sleepable = false;
|
||||
|
||||
bpf_link_free_id(link->id);
|
||||
if (link->prog) {
|
||||
sleepable = link->prog->sleepable;
|
||||
/* detach BPF program, clean up used resources */
|
||||
link->ops->release(link);
|
||||
bpf_prog_put(link->prog);
|
||||
}
|
||||
/* free bpf_link and its containing memory */
|
||||
link->ops->dealloc(link);
|
||||
if (link->ops->dealloc_deferred) {
|
||||
/* schedule BPF link deallocation; if underlying BPF program
|
||||
* is sleepable, we need to first wait for RCU tasks trace
|
||||
* sync, then go through "classic" RCU grace period
|
||||
*/
|
||||
if (sleepable)
|
||||
call_rcu_tasks_trace(&link->rcu, bpf_link_defer_dealloc_mult_rcu_gp);
|
||||
else
|
||||
call_rcu(&link->rcu, bpf_link_defer_dealloc_rcu_gp);
|
||||
}
|
||||
if (link->ops->dealloc)
|
||||
link->ops->dealloc(link);
|
||||
}
|
||||
|
||||
static void bpf_link_put_deferred(struct work_struct *work)
|
||||
@ -3544,7 +3573,7 @@ static int bpf_raw_tp_link_fill_link_info(const struct bpf_link *link,
|
||||
|
||||
static const struct bpf_link_ops bpf_raw_tp_link_lops = {
|
||||
.release = bpf_raw_tp_link_release,
|
||||
.dealloc = bpf_raw_tp_link_dealloc,
|
||||
.dealloc_deferred = bpf_raw_tp_link_dealloc,
|
||||
.show_fdinfo = bpf_raw_tp_link_show_fdinfo,
|
||||
.fill_link_info = bpf_raw_tp_link_fill_link_info,
|
||||
};
|
||||
|
@ -18379,15 +18379,18 @@ static int resolve_pseudo_ldimm64(struct bpf_verifier_env *env)
|
||||
}
|
||||
if (!env->prog->jit_requested) {
|
||||
verbose(env, "JIT is required to use arena\n");
|
||||
fdput(f);
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
if (!bpf_jit_supports_arena()) {
|
||||
verbose(env, "JIT doesn't support arena\n");
|
||||
fdput(f);
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
env->prog->aux->arena = (void *)map;
|
||||
if (!bpf_arena_get_user_vm_start(env->prog->aux->arena)) {
|
||||
verbose(env, "arena's user address must be set via map_extra or mmap()\n");
|
||||
fdput(f);
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
|
@ -2728,7 +2728,7 @@ static int bpf_kprobe_multi_link_fill_link_info(const struct bpf_link *link,
|
||||
|
||||
static const struct bpf_link_ops bpf_kprobe_multi_link_lops = {
|
||||
.release = bpf_kprobe_multi_link_release,
|
||||
.dealloc = bpf_kprobe_multi_link_dealloc,
|
||||
.dealloc_deferred = bpf_kprobe_multi_link_dealloc,
|
||||
.fill_link_info = bpf_kprobe_multi_link_fill_link_info,
|
||||
};
|
||||
|
||||
@ -3157,6 +3157,9 @@ static void bpf_uprobe_multi_link_release(struct bpf_link *link)
|
||||
|
||||
umulti_link = container_of(link, struct bpf_uprobe_multi_link, link);
|
||||
bpf_uprobe_unregister(&umulti_link->path, umulti_link->uprobes, umulti_link->cnt);
|
||||
if (umulti_link->task)
|
||||
put_task_struct(umulti_link->task);
|
||||
path_put(&umulti_link->path);
|
||||
}
|
||||
|
||||
static void bpf_uprobe_multi_link_dealloc(struct bpf_link *link)
|
||||
@ -3164,9 +3167,6 @@ static void bpf_uprobe_multi_link_dealloc(struct bpf_link *link)
|
||||
struct bpf_uprobe_multi_link *umulti_link;
|
||||
|
||||
umulti_link = container_of(link, struct bpf_uprobe_multi_link, link);
|
||||
if (umulti_link->task)
|
||||
put_task_struct(umulti_link->task);
|
||||
path_put(&umulti_link->path);
|
||||
kvfree(umulti_link->uprobes);
|
||||
kfree(umulti_link);
|
||||
}
|
||||
@ -3242,7 +3242,7 @@ static int bpf_uprobe_multi_link_fill_link_info(const struct bpf_link *link,
|
||||
|
||||
static const struct bpf_link_ops bpf_uprobe_multi_link_lops = {
|
||||
.release = bpf_uprobe_multi_link_release,
|
||||
.dealloc = bpf_uprobe_multi_link_dealloc,
|
||||
.dealloc_deferred = bpf_uprobe_multi_link_dealloc,
|
||||
.fill_link_info = bpf_uprobe_multi_link_fill_link_info,
|
||||
};
|
||||
|
||||
|
@ -105,7 +105,7 @@ void ax25_dev_device_down(struct net_device *dev)
|
||||
spin_lock_bh(&ax25_dev_lock);
|
||||
|
||||
#ifdef CONFIG_AX25_DAMA_SLAVE
|
||||
ax25_ds_del_timer(ax25_dev);
|
||||
timer_shutdown_sync(&ax25_dev->dama.slave_timer);
|
||||
#endif
|
||||
|
||||
/*
|
||||
|
@ -2874,7 +2874,7 @@ static void hci_cancel_cmd_sync(struct hci_dev *hdev, int err)
|
||||
cancel_delayed_work_sync(&hdev->ncmd_timer);
|
||||
atomic_set(&hdev->cmd_cnt, 1);
|
||||
|
||||
hci_cmd_sync_cancel_sync(hdev, -err);
|
||||
hci_cmd_sync_cancel_sync(hdev, err);
|
||||
}
|
||||
|
||||
/* Suspend HCI device */
|
||||
@ -2894,7 +2894,7 @@ int hci_suspend_dev(struct hci_dev *hdev)
|
||||
return 0;
|
||||
|
||||
/* Cancel potentially blocking sync operation before suspend */
|
||||
hci_cancel_cmd_sync(hdev, -EHOSTDOWN);
|
||||
hci_cancel_cmd_sync(hdev, EHOSTDOWN);
|
||||
|
||||
hci_req_sync_lock(hdev);
|
||||
ret = hci_suspend_sync(hdev);
|
||||
@ -4210,7 +4210,7 @@ static void hci_send_cmd_sync(struct hci_dev *hdev, struct sk_buff *skb)
|
||||
|
||||
err = hci_send_frame(hdev, skb);
|
||||
if (err < 0) {
|
||||
hci_cmd_sync_cancel_sync(hdev, err);
|
||||
hci_cmd_sync_cancel_sync(hdev, -err);
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -218,10 +218,12 @@ static int conn_info_min_age_set(void *data, u64 val)
|
||||
{
|
||||
struct hci_dev *hdev = data;
|
||||
|
||||
if (val == 0 || val > hdev->conn_info_max_age)
|
||||
return -EINVAL;
|
||||
|
||||
hci_dev_lock(hdev);
|
||||
if (val == 0 || val > hdev->conn_info_max_age) {
|
||||
hci_dev_unlock(hdev);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
hdev->conn_info_min_age = val;
|
||||
hci_dev_unlock(hdev);
|
||||
|
||||
@ -246,10 +248,12 @@ static int conn_info_max_age_set(void *data, u64 val)
|
||||
{
|
||||
struct hci_dev *hdev = data;
|
||||
|
||||
if (val == 0 || val < hdev->conn_info_min_age)
|
||||
return -EINVAL;
|
||||
|
||||
hci_dev_lock(hdev);
|
||||
if (val == 0 || val < hdev->conn_info_min_age) {
|
||||
hci_dev_unlock(hdev);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
hdev->conn_info_max_age = val;
|
||||
hci_dev_unlock(hdev);
|
||||
|
||||
@ -567,10 +571,12 @@ static int sniff_min_interval_set(void *data, u64 val)
|
||||
{
|
||||
struct hci_dev *hdev = data;
|
||||
|
||||
if (val == 0 || val % 2 || val > hdev->sniff_max_interval)
|
||||
return -EINVAL;
|
||||
|
||||
hci_dev_lock(hdev);
|
||||
if (val == 0 || val % 2 || val > hdev->sniff_max_interval) {
|
||||
hci_dev_unlock(hdev);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
hdev->sniff_min_interval = val;
|
||||
hci_dev_unlock(hdev);
|
||||
|
||||
@ -595,10 +601,12 @@ static int sniff_max_interval_set(void *data, u64 val)
|
||||
{
|
||||
struct hci_dev *hdev = data;
|
||||
|
||||
if (val == 0 || val % 2 || val < hdev->sniff_min_interval)
|
||||
return -EINVAL;
|
||||
|
||||
hci_dev_lock(hdev);
|
||||
if (val == 0 || val % 2 || val < hdev->sniff_min_interval) {
|
||||
hci_dev_unlock(hdev);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
hdev->sniff_max_interval = val;
|
||||
hci_dev_unlock(hdev);
|
||||
|
||||
@ -850,10 +858,12 @@ static int conn_min_interval_set(void *data, u64 val)
|
||||
{
|
||||
struct hci_dev *hdev = data;
|
||||
|
||||
if (val < 0x0006 || val > 0x0c80 || val > hdev->le_conn_max_interval)
|
||||
return -EINVAL;
|
||||
|
||||
hci_dev_lock(hdev);
|
||||
if (val < 0x0006 || val > 0x0c80 || val > hdev->le_conn_max_interval) {
|
||||
hci_dev_unlock(hdev);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
hdev->le_conn_min_interval = val;
|
||||
hci_dev_unlock(hdev);
|
||||
|
||||
@ -878,10 +888,12 @@ static int conn_max_interval_set(void *data, u64 val)
|
||||
{
|
||||
struct hci_dev *hdev = data;
|
||||
|
||||
if (val < 0x0006 || val > 0x0c80 || val < hdev->le_conn_min_interval)
|
||||
return -EINVAL;
|
||||
|
||||
hci_dev_lock(hdev);
|
||||
if (val < 0x0006 || val > 0x0c80 || val < hdev->le_conn_min_interval) {
|
||||
hci_dev_unlock(hdev);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
hdev->le_conn_max_interval = val;
|
||||
hci_dev_unlock(hdev);
|
||||
|
||||
@ -990,10 +1002,12 @@ static int adv_min_interval_set(void *data, u64 val)
|
||||
{
|
||||
struct hci_dev *hdev = data;
|
||||
|
||||
if (val < 0x0020 || val > 0x4000 || val > hdev->le_adv_max_interval)
|
||||
return -EINVAL;
|
||||
|
||||
hci_dev_lock(hdev);
|
||||
if (val < 0x0020 || val > 0x4000 || val > hdev->le_adv_max_interval) {
|
||||
hci_dev_unlock(hdev);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
hdev->le_adv_min_interval = val;
|
||||
hci_dev_unlock(hdev);
|
||||
|
||||
@ -1018,10 +1032,12 @@ static int adv_max_interval_set(void *data, u64 val)
|
||||
{
|
||||
struct hci_dev *hdev = data;
|
||||
|
||||
if (val < 0x0020 || val > 0x4000 || val < hdev->le_adv_min_interval)
|
||||
return -EINVAL;
|
||||
|
||||
hci_dev_lock(hdev);
|
||||
if (val < 0x0020 || val > 0x4000 || val < hdev->le_adv_min_interval) {
|
||||
hci_dev_unlock(hdev);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
hdev->le_adv_max_interval = val;
|
||||
hci_dev_unlock(hdev);
|
||||
|
||||
|
@ -3208,6 +3208,31 @@ static void hci_conn_complete_evt(struct hci_dev *hdev, void *data,
|
||||
if (test_bit(HCI_ENCRYPT, &hdev->flags))
|
||||
set_bit(HCI_CONN_ENCRYPT, &conn->flags);
|
||||
|
||||
/* "Link key request" completed ahead of "connect request" completes */
|
||||
if (ev->encr_mode == 1 && !test_bit(HCI_CONN_ENCRYPT, &conn->flags) &&
|
||||
ev->link_type == ACL_LINK) {
|
||||
struct link_key *key;
|
||||
struct hci_cp_read_enc_key_size cp;
|
||||
|
||||
key = hci_find_link_key(hdev, &ev->bdaddr);
|
||||
if (key) {
|
||||
set_bit(HCI_CONN_ENCRYPT, &conn->flags);
|
||||
|
||||
if (!(hdev->commands[20] & 0x10)) {
|
||||
conn->enc_key_size = HCI_LINK_KEY_SIZE;
|
||||
} else {
|
||||
cp.handle = cpu_to_le16(conn->handle);
|
||||
if (hci_send_cmd(hdev, HCI_OP_READ_ENC_KEY_SIZE,
|
||||
sizeof(cp), &cp)) {
|
||||
bt_dev_err(hdev, "sending read key size failed");
|
||||
conn->enc_key_size = HCI_LINK_KEY_SIZE;
|
||||
}
|
||||
}
|
||||
|
||||
hci_encrypt_cfm(conn, ev->status);
|
||||
}
|
||||
}
|
||||
|
||||
/* Get remote features */
|
||||
if (conn->type == ACL_LINK) {
|
||||
struct hci_cp_read_remote_features cp;
|
||||
|
@ -617,7 +617,10 @@ void hci_cmd_sync_cancel_sync(struct hci_dev *hdev, int err)
|
||||
bt_dev_dbg(hdev, "err 0x%2.2x", err);
|
||||
|
||||
if (hdev->req_status == HCI_REQ_PEND) {
|
||||
hdev->req_result = err;
|
||||
/* req_result is __u32 so error must be positive to be properly
|
||||
* propagated.
|
||||
*/
|
||||
hdev->req_result = err < 0 ? -err : err;
|
||||
hdev->req_status = HCI_REQ_CANCELED;
|
||||
|
||||
wake_up_interruptible(&hdev->req_wait_q);
|
||||
@ -3416,7 +3419,10 @@ static void hci_dev_get_bd_addr_from_property(struct hci_dev *hdev)
|
||||
if (ret < 0 || !bacmp(&ba, BDADDR_ANY))
|
||||
return;
|
||||
|
||||
bacpy(&hdev->public_addr, &ba);
|
||||
if (test_bit(HCI_QUIRK_BDADDR_PROPERTY_BROKEN, &hdev->quirks))
|
||||
baswap(&hdev->public_addr, &ba);
|
||||
else
|
||||
bacpy(&hdev->public_addr, &ba);
|
||||
}
|
||||
|
||||
struct hci_init_stage {
|
||||
|
@ -1111,6 +1111,8 @@ static int do_replace(struct net *net, sockptr_t arg, unsigned int len)
|
||||
struct ebt_table_info *newinfo;
|
||||
struct ebt_replace tmp;
|
||||
|
||||
if (len < sizeof(tmp))
|
||||
return -EINVAL;
|
||||
if (copy_from_sockptr(&tmp, arg, sizeof(tmp)) != 0)
|
||||
return -EFAULT;
|
||||
|
||||
@ -1423,6 +1425,8 @@ static int update_counters(struct net *net, sockptr_t arg, unsigned int len)
|
||||
{
|
||||
struct ebt_replace hlp;
|
||||
|
||||
if (len < sizeof(hlp))
|
||||
return -EINVAL;
|
||||
if (copy_from_sockptr(&hlp, arg, sizeof(hlp)))
|
||||
return -EFAULT;
|
||||
|
||||
@ -2352,6 +2356,8 @@ static int compat_update_counters(struct net *net, sockptr_t arg,
|
||||
{
|
||||
struct compat_ebt_replace hlp;
|
||||
|
||||
if (len < sizeof(hlp))
|
||||
return -EINVAL;
|
||||
if (copy_from_sockptr(&hlp, arg, sizeof(hlp)))
|
||||
return -EFAULT;
|
||||
|
||||
|
@ -429,7 +429,7 @@ EXPORT_PER_CPU_SYMBOL(softnet_data);
|
||||
* PP consumers must pay attention to run APIs in the appropriate context
|
||||
* (e.g. NAPI context).
|
||||
*/
|
||||
static DEFINE_PER_CPU_ALIGNED(struct page_pool *, system_page_pool);
|
||||
static DEFINE_PER_CPU(struct page_pool *, system_page_pool);
|
||||
|
||||
#ifdef CONFIG_LOCKDEP
|
||||
/*
|
||||
|
@ -192,8 +192,9 @@ int skb_gro_receive(struct sk_buff *p, struct sk_buff *skb)
|
||||
}
|
||||
|
||||
merge:
|
||||
/* sk owenrship - if any - completely transferred to the aggregated packet */
|
||||
/* sk ownership - if any - completely transferred to the aggregated packet */
|
||||
skb->destructor = NULL;
|
||||
skb->sk = NULL;
|
||||
delta_truesize = skb->truesize;
|
||||
if (offset > headlen) {
|
||||
unsigned int eat = offset - headlen;
|
||||
|
@ -411,6 +411,9 @@ static int __sock_map_delete(struct bpf_stab *stab, struct sock *sk_test,
|
||||
struct sock *sk;
|
||||
int err = 0;
|
||||
|
||||
if (irqs_disabled())
|
||||
return -EOPNOTSUPP; /* locks here are hardirq-unsafe */
|
||||
|
||||
spin_lock_bh(&stab->lock);
|
||||
sk = *psk;
|
||||
if (!sk_test || sk_test == sk)
|
||||
@ -933,6 +936,9 @@ static long sock_hash_delete_elem(struct bpf_map *map, void *key)
|
||||
struct bpf_shtab_elem *elem;
|
||||
int ret = -ENOENT;
|
||||
|
||||
if (irqs_disabled())
|
||||
return -EOPNOTSUPP; /* locks here are hardirq-unsafe */
|
||||
|
||||
hash = sock_hash_bucket_hash(key, key_size);
|
||||
bucket = sock_hash_select_bucket(htab, hash);
|
||||
|
||||
|
@ -132,30 +132,29 @@ static int hsr_dev_open(struct net_device *dev)
|
||||
{
|
||||
struct hsr_priv *hsr;
|
||||
struct hsr_port *port;
|
||||
char designation;
|
||||
const char *designation = NULL;
|
||||
|
||||
hsr = netdev_priv(dev);
|
||||
designation = '\0';
|
||||
|
||||
hsr_for_each_port(hsr, port) {
|
||||
if (port->type == HSR_PT_MASTER)
|
||||
continue;
|
||||
switch (port->type) {
|
||||
case HSR_PT_SLAVE_A:
|
||||
designation = 'A';
|
||||
designation = "Slave A";
|
||||
break;
|
||||
case HSR_PT_SLAVE_B:
|
||||
designation = 'B';
|
||||
designation = "Slave B";
|
||||
break;
|
||||
default:
|
||||
designation = '?';
|
||||
designation = "Unknown";
|
||||
}
|
||||
if (!is_slave_up(port->dev))
|
||||
netdev_warn(dev, "Slave %c (%s) is not up; please bring it up to get a fully working HSR network\n",
|
||||
netdev_warn(dev, "%s (%s) is not up; please bring it up to get a fully working HSR network\n",
|
||||
designation, port->dev->name);
|
||||
}
|
||||
|
||||
if (designation == '\0')
|
||||
if (!designation)
|
||||
netdev_warn(dev, "No slave devices configured\n");
|
||||
|
||||
return 0;
|
||||
|
@ -203,8 +203,15 @@ static bool __inet_bhash2_conflict(const struct sock *sk, struct sock *sk2,
|
||||
kuid_t sk_uid, bool relax,
|
||||
bool reuseport_cb_ok, bool reuseport_ok)
|
||||
{
|
||||
if (sk->sk_family == AF_INET && ipv6_only_sock(sk2))
|
||||
return false;
|
||||
if (ipv6_only_sock(sk2)) {
|
||||
if (sk->sk_family == AF_INET)
|
||||
return false;
|
||||
|
||||
#if IS_ENABLED(CONFIG_IPV6)
|
||||
if (ipv6_addr_v4mapped(&sk->sk_v6_rcv_saddr))
|
||||
return false;
|
||||
#endif
|
||||
}
|
||||
|
||||
return inet_bind_conflict(sk, sk2, sk_uid, relax,
|
||||
reuseport_cb_ok, reuseport_ok);
|
||||
@ -287,6 +294,7 @@ static bool inet_bhash2_addr_any_conflict(const struct sock *sk, int port, int l
|
||||
struct sock_reuseport *reuseport_cb;
|
||||
struct inet_bind_hashbucket *head2;
|
||||
struct inet_bind2_bucket *tb2;
|
||||
bool conflict = false;
|
||||
bool reuseport_cb_ok;
|
||||
|
||||
rcu_read_lock();
|
||||
@ -299,18 +307,20 @@ static bool inet_bhash2_addr_any_conflict(const struct sock *sk, int port, int l
|
||||
|
||||
spin_lock(&head2->lock);
|
||||
|
||||
inet_bind_bucket_for_each(tb2, &head2->chain)
|
||||
if (inet_bind2_bucket_match_addr_any(tb2, net, port, l3mdev, sk))
|
||||
break;
|
||||
inet_bind_bucket_for_each(tb2, &head2->chain) {
|
||||
if (!inet_bind2_bucket_match_addr_any(tb2, net, port, l3mdev, sk))
|
||||
continue;
|
||||
|
||||
if (tb2 && inet_bhash2_conflict(sk, tb2, uid, relax, reuseport_cb_ok,
|
||||
reuseport_ok)) {
|
||||
spin_unlock(&head2->lock);
|
||||
return true;
|
||||
if (!inet_bhash2_conflict(sk, tb2, uid, relax, reuseport_cb_ok, reuseport_ok))
|
||||
continue;
|
||||
|
||||
conflict = true;
|
||||
break;
|
||||
}
|
||||
|
||||
spin_unlock(&head2->lock);
|
||||
return false;
|
||||
|
||||
return conflict;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -280,8 +280,13 @@ static int erspan_rcv(struct sk_buff *skb, struct tnl_ptk_info *tpi,
|
||||
tpi->flags | TUNNEL_NO_KEY,
|
||||
iph->saddr, iph->daddr, 0);
|
||||
} else {
|
||||
if (unlikely(!pskb_may_pull(skb,
|
||||
gre_hdr_len + sizeof(*ershdr))))
|
||||
return PACKET_REJECT;
|
||||
|
||||
ershdr = (struct erspan_base_hdr *)(skb->data + gre_hdr_len);
|
||||
ver = ershdr->ver;
|
||||
iph = ip_hdr(skb);
|
||||
tunnel = ip_tunnel_lookup(itn, skb->dev->ifindex,
|
||||
tpi->flags | TUNNEL_KEY,
|
||||
iph->saddr, iph->daddr, tpi->key);
|
||||
|
@ -956,6 +956,8 @@ static int do_replace(struct net *net, sockptr_t arg, unsigned int len)
|
||||
void *loc_cpu_entry;
|
||||
struct arpt_entry *iter;
|
||||
|
||||
if (len < sizeof(tmp))
|
||||
return -EINVAL;
|
||||
if (copy_from_sockptr(&tmp, arg, sizeof(tmp)) != 0)
|
||||
return -EFAULT;
|
||||
|
||||
@ -1254,6 +1256,8 @@ static int compat_do_replace(struct net *net, sockptr_t arg, unsigned int len)
|
||||
void *loc_cpu_entry;
|
||||
struct arpt_entry *iter;
|
||||
|
||||
if (len < sizeof(tmp))
|
||||
return -EINVAL;
|
||||
if (copy_from_sockptr(&tmp, arg, sizeof(tmp)) != 0)
|
||||
return -EFAULT;
|
||||
|
||||
|
@ -1108,6 +1108,8 @@ do_replace(struct net *net, sockptr_t arg, unsigned int len)
|
||||
void *loc_cpu_entry;
|
||||
struct ipt_entry *iter;
|
||||
|
||||
if (len < sizeof(tmp))
|
||||
return -EINVAL;
|
||||
if (copy_from_sockptr(&tmp, arg, sizeof(tmp)) != 0)
|
||||
return -EFAULT;
|
||||
|
||||
@ -1492,6 +1494,8 @@ compat_do_replace(struct net *net, sockptr_t arg, unsigned int len)
|
||||
void *loc_cpu_entry;
|
||||
struct ipt_entry *iter;
|
||||
|
||||
if (len < sizeof(tmp))
|
||||
return -EINVAL;
|
||||
if (copy_from_sockptr(&tmp, arg, sizeof(tmp)) != 0)
|
||||
return -EFAULT;
|
||||
|
||||
|
@ -582,6 +582,13 @@ static inline bool __udp_is_mcast_sock(struct net *net, const struct sock *sk,
|
||||
}
|
||||
|
||||
DEFINE_STATIC_KEY_FALSE(udp_encap_needed_key);
|
||||
EXPORT_SYMBOL(udp_encap_needed_key);
|
||||
|
||||
#if IS_ENABLED(CONFIG_IPV6)
|
||||
DEFINE_STATIC_KEY_FALSE(udpv6_encap_needed_key);
|
||||
EXPORT_SYMBOL(udpv6_encap_needed_key);
|
||||
#endif
|
||||
|
||||
void udp_encap_enable(void)
|
||||
{
|
||||
static_branch_inc(&udp_encap_needed_key);
|
||||
|
@ -449,8 +449,9 @@ static int skb_gro_receive_list(struct sk_buff *p, struct sk_buff *skb)
|
||||
NAPI_GRO_CB(p)->count++;
|
||||
p->data_len += skb->len;
|
||||
|
||||
/* sk owenrship - if any - completely transferred to the aggregated packet */
|
||||
/* sk ownership - if any - completely transferred to the aggregated packet */
|
||||
skb->destructor = NULL;
|
||||
skb->sk = NULL;
|
||||
p->truesize += skb->truesize;
|
||||
p->len += skb->len;
|
||||
|
||||
@ -551,11 +552,19 @@ struct sk_buff *udp_gro_receive(struct list_head *head, struct sk_buff *skb,
|
||||
unsigned int off = skb_gro_offset(skb);
|
||||
int flush = 1;
|
||||
|
||||
/* we can do L4 aggregation only if the packet can't land in a tunnel
|
||||
* otherwise we could corrupt the inner stream
|
||||
/* We can do L4 aggregation only if the packet can't land in a tunnel
|
||||
* otherwise we could corrupt the inner stream. Detecting such packets
|
||||
* cannot be foolproof and the aggregation might still happen in some
|
||||
* cases. Such packets should be caught in udp_unexpected_gso later.
|
||||
*/
|
||||
NAPI_GRO_CB(skb)->is_flist = 0;
|
||||
if (!sk || !udp_sk(sk)->gro_receive) {
|
||||
/* If the packet was locally encapsulated in a UDP tunnel that
|
||||
* wasn't detected above, do not GRO.
|
||||
*/
|
||||
if (skb->encapsulation)
|
||||
goto out;
|
||||
|
||||
if (skb->dev->features & NETIF_F_GRO_FRAGLIST)
|
||||
NAPI_GRO_CB(skb)->is_flist = sk ? !udp_test_bit(GRO_ENABLED, sk) : 1;
|
||||
|
||||
@ -719,13 +728,7 @@ INDIRECT_CALLABLE_SCOPE int udp4_gro_complete(struct sk_buff *skb, int nhoff)
|
||||
skb_shinfo(skb)->gso_type |= (SKB_GSO_FRAGLIST|SKB_GSO_UDP_L4);
|
||||
skb_shinfo(skb)->gso_segs = NAPI_GRO_CB(skb)->count;
|
||||
|
||||
if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
|
||||
if (skb->csum_level < SKB_MAX_CSUM_LEVEL)
|
||||
skb->csum_level++;
|
||||
} else {
|
||||
skb->ip_summed = CHECKSUM_UNNECESSARY;
|
||||
skb->csum_level = 0;
|
||||
}
|
||||
__skb_incr_checksum_unnecessary(skb);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -651,19 +651,19 @@ static int inet6_dump_fib(struct sk_buff *skb, struct netlink_callback *cb)
|
||||
if (!w) {
|
||||
/* New dump:
|
||||
*
|
||||
* 1. hook callback destructor.
|
||||
*/
|
||||
cb->args[3] = (long)cb->done;
|
||||
cb->done = fib6_dump_done;
|
||||
|
||||
/*
|
||||
* 2. allocate and initialize walker.
|
||||
* 1. allocate and initialize walker.
|
||||
*/
|
||||
w = kzalloc(sizeof(*w), GFP_ATOMIC);
|
||||
if (!w)
|
||||
return -ENOMEM;
|
||||
w->func = fib6_dump_node;
|
||||
cb->args[2] = (long)w;
|
||||
|
||||
/* 2. hook callback destructor.
|
||||
*/
|
||||
cb->args[3] = (long)cb->done;
|
||||
cb->done = fib6_dump_done;
|
||||
|
||||
}
|
||||
|
||||
arg.skb = skb;
|
||||
|
@ -528,6 +528,9 @@ static int ip6erspan_rcv(struct sk_buff *skb,
|
||||
struct ip6_tnl *tunnel;
|
||||
u8 ver;
|
||||
|
||||
if (unlikely(!pskb_may_pull(skb, sizeof(*ershdr))))
|
||||
return PACKET_REJECT;
|
||||
|
||||
ipv6h = ipv6_hdr(skb);
|
||||
ershdr = (struct erspan_base_hdr *)skb->data;
|
||||
ver = ershdr->ver;
|
||||
|
@ -1125,6 +1125,8 @@ do_replace(struct net *net, sockptr_t arg, unsigned int len)
|
||||
void *loc_cpu_entry;
|
||||
struct ip6t_entry *iter;
|
||||
|
||||
if (len < sizeof(tmp))
|
||||
return -EINVAL;
|
||||
if (copy_from_sockptr(&tmp, arg, sizeof(tmp)) != 0)
|
||||
return -EFAULT;
|
||||
|
||||
@ -1501,6 +1503,8 @@ compat_do_replace(struct net *net, sockptr_t arg, unsigned int len)
|
||||
void *loc_cpu_entry;
|
||||
struct ip6t_entry *iter;
|
||||
|
||||
if (len < sizeof(tmp))
|
||||
return -EINVAL;
|
||||
if (copy_from_sockptr(&tmp, arg, sizeof(tmp)) != 0)
|
||||
return -EFAULT;
|
||||
|
||||
|
@ -447,7 +447,7 @@ csum_copy_err:
|
||||
goto try_again;
|
||||
}
|
||||
|
||||
DEFINE_STATIC_KEY_FALSE(udpv6_encap_needed_key);
|
||||
DECLARE_STATIC_KEY_FALSE(udpv6_encap_needed_key);
|
||||
void udpv6_encap_enable(void)
|
||||
{
|
||||
static_branch_inc(&udpv6_encap_needed_key);
|
||||
|
@ -174,13 +174,7 @@ INDIRECT_CALLABLE_SCOPE int udp6_gro_complete(struct sk_buff *skb, int nhoff)
|
||||
skb_shinfo(skb)->gso_type |= (SKB_GSO_FRAGLIST|SKB_GSO_UDP_L4);
|
||||
skb_shinfo(skb)->gso_segs = NAPI_GRO_CB(skb)->count;
|
||||
|
||||
if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
|
||||
if (skb->csum_level < SKB_MAX_CSUM_LEVEL)
|
||||
skb->csum_level++;
|
||||
} else {
|
||||
skb->ip_summed = CHECKSUM_UNNECESSARY;
|
||||
skb->csum_level = 0;
|
||||
}
|
||||
__skb_incr_checksum_unnecessary(skb);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -3937,8 +3937,6 @@ static int mptcp_stream_accept(struct socket *sock, struct socket *newsock,
|
||||
mptcp_set_state(newsk, TCP_CLOSE);
|
||||
}
|
||||
} else {
|
||||
MPTCP_INC_STATS(sock_net(ssk),
|
||||
MPTCP_MIB_MPCAPABLEPASSIVEFALLBACK);
|
||||
tcpfallback:
|
||||
newsk->sk_kern_sock = kern;
|
||||
lock_sock(newsk);
|
||||
|
@ -1493,6 +1493,10 @@ int mptcp_set_rcvlowat(struct sock *sk, int val)
|
||||
struct mptcp_subflow_context *subflow;
|
||||
int space, cap;
|
||||
|
||||
/* bpf can land here with a wrong sk type */
|
||||
if (sk->sk_protocol == IPPROTO_TCP)
|
||||
return -EINVAL;
|
||||
|
||||
if (sk->sk_userlocks & SOCK_RCVBUF_LOCK)
|
||||
cap = sk->sk_rcvbuf >> 1;
|
||||
else
|
||||
|
@ -905,6 +905,8 @@ dispose_child:
|
||||
return child;
|
||||
|
||||
fallback:
|
||||
if (fallback)
|
||||
SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_MPCAPABLEPASSIVEFALLBACK);
|
||||
mptcp_subflow_drop_ctx(child);
|
||||
return child;
|
||||
}
|
||||
|
@ -1209,10 +1209,11 @@ static bool nft_table_pending_update(const struct nft_ctx *ctx)
|
||||
return true;
|
||||
|
||||
list_for_each_entry(trans, &nft_net->commit_list, list) {
|
||||
if ((trans->msg_type == NFT_MSG_NEWCHAIN ||
|
||||
trans->msg_type == NFT_MSG_DELCHAIN) &&
|
||||
trans->ctx.table == ctx->table &&
|
||||
nft_trans_chain_update(trans))
|
||||
if (trans->ctx.table == ctx->table &&
|
||||
((trans->msg_type == NFT_MSG_NEWCHAIN &&
|
||||
nft_trans_chain_update(trans)) ||
|
||||
(trans->msg_type == NFT_MSG_DELCHAIN &&
|
||||
nft_is_base_chain(trans->ctx.chain))))
|
||||
return true;
|
||||
}
|
||||
|
||||
@ -2449,6 +2450,9 @@ static int nf_tables_addchain(struct nft_ctx *ctx, u8 family, u8 genmask,
|
||||
struct nft_stats __percpu *stats = NULL;
|
||||
struct nft_chain_hook hook = {};
|
||||
|
||||
if (table->flags & __NFT_TABLE_F_UPDATE)
|
||||
return -EINVAL;
|
||||
|
||||
if (flags & NFT_CHAIN_BINDING)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
@ -8293,11 +8297,12 @@ static int nft_flowtable_parse_hook(const struct nft_ctx *ctx,
|
||||
return err;
|
||||
}
|
||||
|
||||
/* call under rcu_read_lock */
|
||||
static const struct nf_flowtable_type *__nft_flowtable_type_get(u8 family)
|
||||
{
|
||||
const struct nf_flowtable_type *type;
|
||||
|
||||
list_for_each_entry(type, &nf_tables_flowtables, list) {
|
||||
list_for_each_entry_rcu(type, &nf_tables_flowtables, list) {
|
||||
if (family == type->family)
|
||||
return type;
|
||||
}
|
||||
@ -8309,9 +8314,13 @@ nft_flowtable_type_get(struct net *net, u8 family)
|
||||
{
|
||||
const struct nf_flowtable_type *type;
|
||||
|
||||
rcu_read_lock();
|
||||
type = __nft_flowtable_type_get(family);
|
||||
if (type != NULL && try_module_get(type->owner))
|
||||
if (type != NULL && try_module_get(type->owner)) {
|
||||
rcu_read_unlock();
|
||||
return type;
|
||||
}
|
||||
rcu_read_unlock();
|
||||
|
||||
lockdep_nfnl_nft_mutex_not_held();
|
||||
#ifdef CONFIG_MODULES
|
||||
@ -10455,10 +10464,11 @@ static int __nf_tables_abort(struct net *net, enum nfnl_abort_action action)
|
||||
struct nft_trans *trans, *next;
|
||||
LIST_HEAD(set_update_list);
|
||||
struct nft_trans_elem *te;
|
||||
int err = 0;
|
||||
|
||||
if (action == NFNL_ABORT_VALIDATE &&
|
||||
nf_tables_validate(net) < 0)
|
||||
return -EAGAIN;
|
||||
err = -EAGAIN;
|
||||
|
||||
list_for_each_entry_safe_reverse(trans, next, &nft_net->commit_list,
|
||||
list) {
|
||||
@ -10650,12 +10660,7 @@ static int __nf_tables_abort(struct net *net, enum nfnl_abort_action action)
|
||||
nf_tables_abort_release(trans);
|
||||
}
|
||||
|
||||
if (action == NFNL_ABORT_AUTOLOAD)
|
||||
nf_tables_module_autoload(net);
|
||||
else
|
||||
nf_tables_module_autoload_cleanup(net);
|
||||
|
||||
return 0;
|
||||
return err;
|
||||
}
|
||||
|
||||
static int nf_tables_abort(struct net *net, struct sk_buff *skb,
|
||||
@ -10668,6 +10673,17 @@ static int nf_tables_abort(struct net *net, struct sk_buff *skb,
|
||||
gc_seq = nft_gc_seq_begin(nft_net);
|
||||
ret = __nf_tables_abort(net, action);
|
||||
nft_gc_seq_end(nft_net, gc_seq);
|
||||
|
||||
WARN_ON_ONCE(!list_empty(&nft_net->commit_list));
|
||||
|
||||
/* module autoload needs to happen after GC sequence update because it
|
||||
* temporarily releases and grabs mutex again.
|
||||
*/
|
||||
if (action == NFNL_ABORT_AUTOLOAD)
|
||||
nf_tables_module_autoload(net);
|
||||
else
|
||||
nf_tables_module_autoload_cleanup(net);
|
||||
|
||||
mutex_unlock(&nft_net->commit_mutex);
|
||||
|
||||
return ret;
|
||||
@ -11473,9 +11489,10 @@ static void __net_exit nf_tables_exit_net(struct net *net)
|
||||
|
||||
gc_seq = nft_gc_seq_begin(nft_net);
|
||||
|
||||
if (!list_empty(&nft_net->commit_list) ||
|
||||
!list_empty(&nft_net->module_list))
|
||||
__nf_tables_abort(net, NFNL_ABORT_NONE);
|
||||
WARN_ON_ONCE(!list_empty(&nft_net->commit_list));
|
||||
|
||||
if (!list_empty(&nft_net->module_list))
|
||||
nf_tables_module_autoload_cleanup(net);
|
||||
|
||||
__nft_release_tables(net);
|
||||
|
||||
@ -11567,6 +11584,7 @@ static void __exit nf_tables_module_exit(void)
|
||||
unregister_netdevice_notifier(&nf_tables_flowtable_notifier);
|
||||
nft_chain_filter_fini();
|
||||
nft_chain_route_fini();
|
||||
nf_tables_trans_destroy_flush_work();
|
||||
unregister_pernet_subsys(&nf_tables_net_ops);
|
||||
cancel_work_sync(&trans_gc_work);
|
||||
cancel_work_sync(&trans_destroy_work);
|
||||
|
@ -302,7 +302,7 @@ static int __rds_rdma_map(struct rds_sock *rs, struct rds_get_mr_args *args,
|
||||
}
|
||||
ret = PTR_ERR(trans_private);
|
||||
/* Trigger connection so that its ready for the next retry */
|
||||
if (ret == -ENODEV)
|
||||
if (ret == -ENODEV && cp)
|
||||
rds_conn_connect_if_down(cp->cp_conn);
|
||||
goto out;
|
||||
}
|
||||
|
@ -241,13 +241,13 @@ static int tcf_skbmod_dump(struct sk_buff *skb, struct tc_action *a,
|
||||
struct tcf_skbmod *d = to_skbmod(a);
|
||||
unsigned char *b = skb_tail_pointer(skb);
|
||||
struct tcf_skbmod_params *p;
|
||||
struct tc_skbmod opt = {
|
||||
.index = d->tcf_index,
|
||||
.refcnt = refcount_read(&d->tcf_refcnt) - ref,
|
||||
.bindcnt = atomic_read(&d->tcf_bindcnt) - bind,
|
||||
};
|
||||
struct tc_skbmod opt;
|
||||
struct tcf_t t;
|
||||
|
||||
memset(&opt, 0, sizeof(opt));
|
||||
opt.index = d->tcf_index;
|
||||
opt.refcnt = refcount_read(&d->tcf_refcnt) - ref,
|
||||
opt.bindcnt = atomic_read(&d->tcf_bindcnt) - bind;
|
||||
spin_lock_bh(&d->tcf_lock);
|
||||
opt.action = d->tcf_action;
|
||||
p = rcu_dereference_protected(d->skbmod_p,
|
||||
|
@ -809,7 +809,7 @@ void qdisc_tree_reduce_backlog(struct Qdisc *sch, int n, int len)
|
||||
notify = !sch->q.qlen && !WARN_ON_ONCE(!n &&
|
||||
!qdisc_is_offloaded);
|
||||
/* TODO: perform the search on a per txq basis */
|
||||
sch = qdisc_lookup(qdisc_dev(sch), TC_H_MAJ(parentid));
|
||||
sch = qdisc_lookup_rcu(qdisc_dev(sch), TC_H_MAJ(parentid));
|
||||
if (sch == NULL) {
|
||||
WARN_ON_ONCE(parentid != TC_H_ROOT);
|
||||
break;
|
||||
|
@ -120,7 +120,6 @@ virtio_transport_send_pkt_work(struct work_struct *work)
|
||||
if (!skb)
|
||||
break;
|
||||
|
||||
virtio_transport_deliver_tap_pkt(skb);
|
||||
reply = virtio_vsock_skb_reply(skb);
|
||||
sgs = vsock->out_sgs;
|
||||
sg_init_one(sgs[out_sg], virtio_vsock_hdr(skb),
|
||||
@ -170,6 +169,8 @@ virtio_transport_send_pkt_work(struct work_struct *work)
|
||||
break;
|
||||
}
|
||||
|
||||
virtio_transport_deliver_tap_pkt(skb);
|
||||
|
||||
if (reply) {
|
||||
struct virtqueue *rx_vq = vsock->vqs[VSOCK_VQ_RX];
|
||||
int val;
|
||||
|
@ -3,6 +3,8 @@
|
||||
#ifndef _LINUX_BTF_IDS_H
|
||||
#define _LINUX_BTF_IDS_H
|
||||
|
||||
#include <linux/types.h> /* for u32 */
|
||||
|
||||
struct btf_id_set {
|
||||
u32 cnt;
|
||||
u32 ids[];
|
||||
|
@ -6,7 +6,9 @@
|
||||
|
||||
#include "../kselftest_harness.h"
|
||||
|
||||
struct in6_addr in6addr_v4mapped_any = {
|
||||
static const __u32 in4addr_any = INADDR_ANY;
|
||||
static const __u32 in4addr_loopback = INADDR_LOOPBACK;
|
||||
static const struct in6_addr in6addr_v4mapped_any = {
|
||||
.s6_addr = {
|
||||
0, 0, 0, 0,
|
||||
0, 0, 0, 0,
|
||||
@ -14,8 +16,7 @@ struct in6_addr in6addr_v4mapped_any = {
|
||||
0, 0, 0, 0
|
||||
}
|
||||
};
|
||||
|
||||
struct in6_addr in6addr_v4mapped_loopback = {
|
||||
static const struct in6_addr in6addr_v4mapped_loopback = {
|
||||
.s6_addr = {
|
||||
0, 0, 0, 0,
|
||||
0, 0, 0, 0,
|
||||
@ -24,137 +25,785 @@ struct in6_addr in6addr_v4mapped_loopback = {
|
||||
}
|
||||
};
|
||||
|
||||
#define NR_SOCKETS 8
|
||||
|
||||
FIXTURE(bind_wildcard)
|
||||
{
|
||||
struct sockaddr_in addr4;
|
||||
struct sockaddr_in6 addr6;
|
||||
int fd[NR_SOCKETS];
|
||||
socklen_t addrlen[NR_SOCKETS];
|
||||
union {
|
||||
struct sockaddr addr;
|
||||
struct sockaddr_in addr4;
|
||||
struct sockaddr_in6 addr6;
|
||||
} addr[NR_SOCKETS];
|
||||
};
|
||||
|
||||
FIXTURE_VARIANT(bind_wildcard)
|
||||
{
|
||||
const __u32 addr4_const;
|
||||
const struct in6_addr *addr6_const;
|
||||
int expected_errno;
|
||||
sa_family_t family[2];
|
||||
const void *addr[2];
|
||||
bool ipv6_only[2];
|
||||
|
||||
/* 6 bind() calls below follow two bind() for the defined 2 addresses:
|
||||
*
|
||||
* 0.0.0.0
|
||||
* 127.0.0.1
|
||||
* ::
|
||||
* ::1
|
||||
* ::ffff:0.0.0.0
|
||||
* ::ffff:127.0.0.1
|
||||
*/
|
||||
int expected_errno[NR_SOCKETS];
|
||||
int expected_reuse_errno[NR_SOCKETS];
|
||||
};
|
||||
|
||||
/* (IPv4, IPv4) */
|
||||
FIXTURE_VARIANT_ADD(bind_wildcard, v4_any_v4_local)
|
||||
{
|
||||
.family = {AF_INET, AF_INET},
|
||||
.addr = {&in4addr_any, &in4addr_loopback},
|
||||
.expected_errno = {0, EADDRINUSE,
|
||||
EADDRINUSE, EADDRINUSE,
|
||||
EADDRINUSE, 0,
|
||||
EADDRINUSE, EADDRINUSE},
|
||||
.expected_reuse_errno = {0, 0,
|
||||
EADDRINUSE, EADDRINUSE,
|
||||
EADDRINUSE, 0,
|
||||
EADDRINUSE, EADDRINUSE},
|
||||
};
|
||||
|
||||
FIXTURE_VARIANT_ADD(bind_wildcard, v4_local_v4_any)
|
||||
{
|
||||
.family = {AF_INET, AF_INET},
|
||||
.addr = {&in4addr_loopback, &in4addr_any},
|
||||
.expected_errno = {0, EADDRINUSE,
|
||||
EADDRINUSE, EADDRINUSE,
|
||||
EADDRINUSE, 0,
|
||||
EADDRINUSE, EADDRINUSE},
|
||||
.expected_reuse_errno = {0, 0,
|
||||
EADDRINUSE, EADDRINUSE,
|
||||
EADDRINUSE, 0,
|
||||
EADDRINUSE, EADDRINUSE},
|
||||
};
|
||||
|
||||
/* (IPv4, IPv6) */
|
||||
FIXTURE_VARIANT_ADD(bind_wildcard, v4_any_v6_any)
|
||||
{
|
||||
.addr4_const = INADDR_ANY,
|
||||
.addr6_const = &in6addr_any,
|
||||
.expected_errno = EADDRINUSE,
|
||||
.family = {AF_INET, AF_INET6},
|
||||
.addr = {&in4addr_any, &in6addr_any},
|
||||
.expected_errno = {0, EADDRINUSE,
|
||||
EADDRINUSE, EADDRINUSE,
|
||||
EADDRINUSE, 0,
|
||||
EADDRINUSE, EADDRINUSE},
|
||||
.expected_reuse_errno = {0, 0,
|
||||
EADDRINUSE, EADDRINUSE,
|
||||
EADDRINUSE, EADDRINUSE,
|
||||
EADDRINUSE, EADDRINUSE},
|
||||
};
|
||||
|
||||
FIXTURE_VARIANT_ADD(bind_wildcard, v4_any_v6_any_only)
|
||||
{
|
||||
.family = {AF_INET, AF_INET6},
|
||||
.addr = {&in4addr_any, &in6addr_any},
|
||||
.ipv6_only = {false, true},
|
||||
.expected_errno = {0, 0,
|
||||
EADDRINUSE, EADDRINUSE,
|
||||
EADDRINUSE, EADDRINUSE,
|
||||
EADDRINUSE, EADDRINUSE},
|
||||
.expected_reuse_errno = {0, 0,
|
||||
EADDRINUSE, EADDRINUSE,
|
||||
EADDRINUSE, EADDRINUSE,
|
||||
EADDRINUSE, EADDRINUSE},
|
||||
};
|
||||
|
||||
FIXTURE_VARIANT_ADD(bind_wildcard, v4_any_v6_local)
|
||||
{
|
||||
.addr4_const = INADDR_ANY,
|
||||
.addr6_const = &in6addr_loopback,
|
||||
.expected_errno = 0,
|
||||
.family = {AF_INET, AF_INET6},
|
||||
.addr = {&in4addr_any, &in6addr_loopback},
|
||||
.expected_errno = {0, 0,
|
||||
EADDRINUSE, EADDRINUSE,
|
||||
EADDRINUSE, EADDRINUSE,
|
||||
EADDRINUSE, EADDRINUSE},
|
||||
.expected_reuse_errno = {0, 0,
|
||||
EADDRINUSE, EADDRINUSE,
|
||||
EADDRINUSE, EADDRINUSE,
|
||||
EADDRINUSE, EADDRINUSE},
|
||||
};
|
||||
|
||||
FIXTURE_VARIANT_ADD(bind_wildcard, v4_any_v6_v4mapped_any)
|
||||
{
|
||||
.addr4_const = INADDR_ANY,
|
||||
.addr6_const = &in6addr_v4mapped_any,
|
||||
.expected_errno = EADDRINUSE,
|
||||
.family = {AF_INET, AF_INET6},
|
||||
.addr = {&in4addr_any, &in6addr_v4mapped_any},
|
||||
.expected_errno = {0, EADDRINUSE,
|
||||
EADDRINUSE, EADDRINUSE,
|
||||
EADDRINUSE, 0,
|
||||
EADDRINUSE, EADDRINUSE},
|
||||
.expected_reuse_errno = {0, 0,
|
||||
EADDRINUSE, EADDRINUSE,
|
||||
EADDRINUSE, 0,
|
||||
EADDRINUSE, EADDRINUSE},
|
||||
};
|
||||
|
||||
FIXTURE_VARIANT_ADD(bind_wildcard, v4_any_v6_v4mapped_local)
|
||||
{
|
||||
.addr4_const = INADDR_ANY,
|
||||
.addr6_const = &in6addr_v4mapped_loopback,
|
||||
.expected_errno = EADDRINUSE,
|
||||
.family = {AF_INET, AF_INET6},
|
||||
.addr = {&in4addr_any, &in6addr_v4mapped_loopback},
|
||||
.expected_errno = {0, EADDRINUSE,
|
||||
EADDRINUSE, EADDRINUSE,
|
||||
EADDRINUSE, 0,
|
||||
EADDRINUSE, EADDRINUSE},
|
||||
.expected_reuse_errno = {0, 0,
|
||||
EADDRINUSE, EADDRINUSE,
|
||||
EADDRINUSE, 0,
|
||||
EADDRINUSE, EADDRINUSE},
|
||||
};
|
||||
|
||||
FIXTURE_VARIANT_ADD(bind_wildcard, v4_local_v6_any)
|
||||
{
|
||||
.addr4_const = INADDR_LOOPBACK,
|
||||
.addr6_const = &in6addr_any,
|
||||
.expected_errno = EADDRINUSE,
|
||||
.family = {AF_INET, AF_INET6},
|
||||
.addr = {&in4addr_loopback, &in6addr_any},
|
||||
.expected_errno = {0, EADDRINUSE,
|
||||
EADDRINUSE, EADDRINUSE,
|
||||
EADDRINUSE, 0,
|
||||
EADDRINUSE, EADDRINUSE},
|
||||
.expected_reuse_errno = {0, 0,
|
||||
EADDRINUSE, EADDRINUSE,
|
||||
EADDRINUSE, EADDRINUSE,
|
||||
EADDRINUSE, EADDRINUSE},
|
||||
};
|
||||
|
||||
FIXTURE_VARIANT_ADD(bind_wildcard, v4_local_v6_any_only)
|
||||
{
|
||||
.family = {AF_INET, AF_INET6},
|
||||
.addr = {&in4addr_loopback, &in6addr_any},
|
||||
.ipv6_only = {false, true},
|
||||
.expected_errno = {0, 0,
|
||||
EADDRINUSE, EADDRINUSE,
|
||||
EADDRINUSE, EADDRINUSE,
|
||||
EADDRINUSE, EADDRINUSE},
|
||||
.expected_reuse_errno = {0, 0,
|
||||
EADDRINUSE, EADDRINUSE,
|
||||
EADDRINUSE, EADDRINUSE,
|
||||
EADDRINUSE, EADDRINUSE},
|
||||
};
|
||||
|
||||
FIXTURE_VARIANT_ADD(bind_wildcard, v4_local_v6_local)
|
||||
{
|
||||
.addr4_const = INADDR_LOOPBACK,
|
||||
.addr6_const = &in6addr_loopback,
|
||||
.expected_errno = 0,
|
||||
.family = {AF_INET, AF_INET6},
|
||||
.addr = {&in4addr_loopback, &in6addr_loopback},
|
||||
.expected_errno = {0, 0,
|
||||
EADDRINUSE, EADDRINUSE,
|
||||
EADDRINUSE, EADDRINUSE,
|
||||
EADDRINUSE, EADDRINUSE},
|
||||
.expected_reuse_errno = {0, 0,
|
||||
EADDRINUSE, EADDRINUSE,
|
||||
EADDRINUSE, EADDRINUSE,
|
||||
EADDRINUSE, EADDRINUSE},
|
||||
};
|
||||
|
||||
FIXTURE_VARIANT_ADD(bind_wildcard, v4_local_v6_v4mapped_any)
|
||||
{
|
||||
.addr4_const = INADDR_LOOPBACK,
|
||||
.addr6_const = &in6addr_v4mapped_any,
|
||||
.expected_errno = EADDRINUSE,
|
||||
.family = {AF_INET, AF_INET6},
|
||||
.addr = {&in4addr_loopback, &in6addr_v4mapped_any},
|
||||
.expected_errno = {0, EADDRINUSE,
|
||||
EADDRINUSE, EADDRINUSE,
|
||||
EADDRINUSE, 0,
|
||||
EADDRINUSE, EADDRINUSE},
|
||||
.expected_reuse_errno = {0, 0,
|
||||
EADDRINUSE, EADDRINUSE,
|
||||
EADDRINUSE, 0,
|
||||
EADDRINUSE, EADDRINUSE},
|
||||
};
|
||||
|
||||
FIXTURE_VARIANT_ADD(bind_wildcard, v4_local_v6_v4mapped_local)
|
||||
{
|
||||
.addr4_const = INADDR_LOOPBACK,
|
||||
.addr6_const = &in6addr_v4mapped_loopback,
|
||||
.expected_errno = EADDRINUSE,
|
||||
.family = {AF_INET, AF_INET6},
|
||||
.addr = {&in4addr_loopback, &in6addr_v4mapped_loopback},
|
||||
.expected_errno = {0, EADDRINUSE,
|
||||
EADDRINUSE, EADDRINUSE,
|
||||
EADDRINUSE, 0,
|
||||
EADDRINUSE, EADDRINUSE},
|
||||
.expected_reuse_errno = {0, 0,
|
||||
EADDRINUSE, EADDRINUSE,
|
||||
EADDRINUSE, 0,
|
||||
EADDRINUSE, EADDRINUSE},
|
||||
};
|
||||
|
||||
/* (IPv6, IPv4) */
|
||||
FIXTURE_VARIANT_ADD(bind_wildcard, v6_any_v4_any)
|
||||
{
|
||||
.family = {AF_INET6, AF_INET},
|
||||
.addr = {&in6addr_any, &in4addr_any},
|
||||
.expected_errno = {0, EADDRINUSE,
|
||||
EADDRINUSE, EADDRINUSE,
|
||||
EADDRINUSE, EADDRINUSE,
|
||||
EADDRINUSE, EADDRINUSE},
|
||||
.expected_reuse_errno = {0, 0,
|
||||
EADDRINUSE, EADDRINUSE,
|
||||
EADDRINUSE, EADDRINUSE,
|
||||
EADDRINUSE, EADDRINUSE},
|
||||
};
|
||||
|
||||
FIXTURE_VARIANT_ADD(bind_wildcard, v6_any_only_v4_any)
|
||||
{
|
||||
.family = {AF_INET6, AF_INET},
|
||||
.addr = {&in6addr_any, &in4addr_any},
|
||||
.ipv6_only = {true, false},
|
||||
.expected_errno = {0, 0,
|
||||
EADDRINUSE, EADDRINUSE,
|
||||
EADDRINUSE, EADDRINUSE,
|
||||
EADDRINUSE, EADDRINUSE},
|
||||
.expected_reuse_errno = {0, 0,
|
||||
EADDRINUSE, EADDRINUSE,
|
||||
EADDRINUSE, EADDRINUSE,
|
||||
EADDRINUSE, EADDRINUSE},
|
||||
};
|
||||
|
||||
FIXTURE_VARIANT_ADD(bind_wildcard, v6_any_v4_local)
|
||||
{
|
||||
.family = {AF_INET6, AF_INET},
|
||||
.addr = {&in6addr_any, &in4addr_loopback},
|
||||
.expected_errno = {0, EADDRINUSE,
|
||||
EADDRINUSE, EADDRINUSE,
|
||||
EADDRINUSE, EADDRINUSE,
|
||||
EADDRINUSE, EADDRINUSE},
|
||||
.expected_reuse_errno = {0, 0,
|
||||
EADDRINUSE, EADDRINUSE,
|
||||
EADDRINUSE, EADDRINUSE,
|
||||
EADDRINUSE, EADDRINUSE},
|
||||
};
|
||||
|
||||
FIXTURE_VARIANT_ADD(bind_wildcard, v6_any_only_v4_local)
|
||||
{
|
||||
.family = {AF_INET6, AF_INET},
|
||||
.addr = {&in6addr_any, &in4addr_loopback},
|
||||
.ipv6_only = {true, false},
|
||||
.expected_errno = {0, 0,
|
||||
EADDRINUSE, EADDRINUSE,
|
||||
EADDRINUSE, EADDRINUSE,
|
||||
EADDRINUSE, EADDRINUSE},
|
||||
.expected_reuse_errno = {0, 0,
|
||||
EADDRINUSE, EADDRINUSE,
|
||||
EADDRINUSE, EADDRINUSE,
|
||||
EADDRINUSE, EADDRINUSE},
|
||||
};
|
||||
|
||||
FIXTURE_VARIANT_ADD(bind_wildcard, v6_local_v4_any)
|
||||
{
|
||||
.family = {AF_INET6, AF_INET},
|
||||
.addr = {&in6addr_loopback, &in4addr_any},
|
||||
.expected_errno = {0, 0,
|
||||
EADDRINUSE, EADDRINUSE,
|
||||
EADDRINUSE, EADDRINUSE,
|
||||
EADDRINUSE, EADDRINUSE},
|
||||
.expected_reuse_errno = {0, 0,
|
||||
EADDRINUSE, EADDRINUSE,
|
||||
EADDRINUSE, EADDRINUSE,
|
||||
EADDRINUSE, EADDRINUSE},
|
||||
};
|
||||
|
||||
FIXTURE_VARIANT_ADD(bind_wildcard, v6_local_v4_local)
|
||||
{
|
||||
.family = {AF_INET6, AF_INET},
|
||||
.addr = {&in6addr_loopback, &in4addr_loopback},
|
||||
.expected_errno = {0, 0,
|
||||
EADDRINUSE, EADDRINUSE,
|
||||
EADDRINUSE, EADDRINUSE,
|
||||
EADDRINUSE, EADDRINUSE},
|
||||
.expected_reuse_errno = {0, 0,
|
||||
EADDRINUSE, EADDRINUSE,
|
||||
EADDRINUSE, EADDRINUSE,
|
||||
EADDRINUSE, EADDRINUSE},
|
||||
};
|
||||
|
||||
FIXTURE_VARIANT_ADD(bind_wildcard, v6_v4mapped_any_v4_any)
|
||||
{
|
||||
.family = {AF_INET6, AF_INET},
|
||||
.addr = {&in6addr_v4mapped_any, &in4addr_any},
|
||||
.expected_errno = {0, EADDRINUSE,
|
||||
EADDRINUSE, EADDRINUSE,
|
||||
EADDRINUSE, 0,
|
||||
EADDRINUSE, EADDRINUSE},
|
||||
.expected_reuse_errno = {0, 0,
|
||||
EADDRINUSE, EADDRINUSE,
|
||||
EADDRINUSE, 0,
|
||||
EADDRINUSE, EADDRINUSE},
|
||||
};
|
||||
|
||||
FIXTURE_VARIANT_ADD(bind_wildcard, v6_v4mapped_any_v4_local)
|
||||
{
|
||||
.family = {AF_INET6, AF_INET},
|
||||
.addr = {&in6addr_v4mapped_any, &in4addr_loopback},
|
||||
.expected_errno = {0, EADDRINUSE,
|
||||
EADDRINUSE, EADDRINUSE,
|
||||
EADDRINUSE, 0,
|
||||
EADDRINUSE, EADDRINUSE},
|
||||
.expected_reuse_errno = {0, 0,
|
||||
EADDRINUSE, EADDRINUSE,
|
||||
EADDRINUSE, 0,
|
||||
EADDRINUSE, EADDRINUSE},
|
||||
};
|
||||
|
||||
FIXTURE_VARIANT_ADD(bind_wildcard, v6_v4mapped_local_v4_any)
|
||||
{
|
||||
.family = {AF_INET6, AF_INET},
|
||||
.addr = {&in6addr_v4mapped_loopback, &in4addr_any},
|
||||
.expected_errno = {0, EADDRINUSE,
|
||||
EADDRINUSE, EADDRINUSE,
|
||||
EADDRINUSE, 0,
|
||||
EADDRINUSE, EADDRINUSE},
|
||||
.expected_reuse_errno = {0, 0,
|
||||
EADDRINUSE, EADDRINUSE,
|
||||
EADDRINUSE, 0,
|
||||
EADDRINUSE, EADDRINUSE},
|
||||
};
|
||||
|
||||
FIXTURE_VARIANT_ADD(bind_wildcard, v6_v4mapped_local_v4_local)
|
||||
{
|
||||
.family = {AF_INET6, AF_INET},
|
||||
.addr = {&in6addr_v4mapped_loopback, &in4addr_loopback},
|
||||
.expected_errno = {0, EADDRINUSE,
|
||||
EADDRINUSE, EADDRINUSE,
|
||||
EADDRINUSE, 0,
|
||||
EADDRINUSE, EADDRINUSE},
|
||||
.expected_reuse_errno = {0, 0,
|
||||
EADDRINUSE, EADDRINUSE,
|
||||
EADDRINUSE, 0,
|
||||
EADDRINUSE, EADDRINUSE},
|
||||
};
|
||||
|
||||
/* (IPv6, IPv6) */
|
||||
FIXTURE_VARIANT_ADD(bind_wildcard, v6_any_v6_any)
|
||||
{
|
||||
.family = {AF_INET6, AF_INET6},
|
||||
.addr = {&in6addr_any, &in6addr_any},
|
||||
.expected_errno = {0, EADDRINUSE,
|
||||
EADDRINUSE, EADDRINUSE,
|
||||
EADDRINUSE, EADDRINUSE,
|
||||
EADDRINUSE, EADDRINUSE},
|
||||
.expected_reuse_errno = {0, 0,
|
||||
EADDRINUSE, EADDRINUSE,
|
||||
EADDRINUSE, EADDRINUSE,
|
||||
EADDRINUSE, EADDRINUSE},
|
||||
};
|
||||
|
||||
FIXTURE_VARIANT_ADD(bind_wildcard, v6_any_only_v6_any)
|
||||
{
|
||||
.family = {AF_INET6, AF_INET6},
|
||||
.addr = {&in6addr_any, &in6addr_any},
|
||||
.ipv6_only = {true, false},
|
||||
.expected_errno = {0, EADDRINUSE,
|
||||
0, EADDRINUSE,
|
||||
EADDRINUSE, EADDRINUSE,
|
||||
EADDRINUSE, EADDRINUSE},
|
||||
.expected_reuse_errno = {0, 0,
|
||||
EADDRINUSE, EADDRINUSE,
|
||||
EADDRINUSE, EADDRINUSE,
|
||||
EADDRINUSE, EADDRINUSE},
|
||||
};
|
||||
|
||||
FIXTURE_VARIANT_ADD(bind_wildcard, v6_any_v6_any_only)
|
||||
{
|
||||
.family = {AF_INET6, AF_INET6},
|
||||
.addr = {&in6addr_any, &in6addr_any},
|
||||
.ipv6_only = {false, true},
|
||||
.expected_errno = {0, EADDRINUSE,
|
||||
EADDRINUSE, EADDRINUSE,
|
||||
EADDRINUSE, EADDRINUSE,
|
||||
EADDRINUSE, EADDRINUSE},
|
||||
.expected_reuse_errno = {0, 0,
|
||||
EADDRINUSE, EADDRINUSE,
|
||||
EADDRINUSE, EADDRINUSE,
|
||||
EADDRINUSE, EADDRINUSE},
|
||||
};
|
||||
|
||||
FIXTURE_VARIANT_ADD(bind_wildcard, v6_any_only_v6_any_only)
|
||||
{
|
||||
.family = {AF_INET6, AF_INET6},
|
||||
.addr = {&in6addr_any, &in6addr_any},
|
||||
.ipv6_only = {true, true},
|
||||
.expected_errno = {0, EADDRINUSE,
|
||||
0, EADDRINUSE,
|
||||
EADDRINUSE, EADDRINUSE,
|
||||
EADDRINUSE, EADDRINUSE},
|
||||
.expected_reuse_errno = {0, 0,
|
||||
0, EADDRINUSE,
|
||||
EADDRINUSE, EADDRINUSE,
|
||||
EADDRINUSE, EADDRINUSE},
|
||||
};
|
||||
|
||||
FIXTURE_VARIANT_ADD(bind_wildcard, v6_any_v6_local)
|
||||
{
|
||||
.family = {AF_INET6, AF_INET6},
|
||||
.addr = {&in6addr_any, &in6addr_loopback},
|
||||
.expected_errno = {0, EADDRINUSE,
|
||||
EADDRINUSE, EADDRINUSE,
|
||||
EADDRINUSE, EADDRINUSE,
|
||||
EADDRINUSE, EADDRINUSE},
|
||||
.expected_reuse_errno = {0, 0,
|
||||
EADDRINUSE, EADDRINUSE,
|
||||
EADDRINUSE, EADDRINUSE,
|
||||
EADDRINUSE, EADDRINUSE},
|
||||
};
|
||||
|
||||
FIXTURE_VARIANT_ADD(bind_wildcard, v6_any_only_v6_local)
|
||||
{
|
||||
.family = {AF_INET6, AF_INET6},
|
||||
.addr = {&in6addr_any, &in6addr_loopback},
|
||||
.ipv6_only = {true, false},
|
||||
.expected_errno = {0, EADDRINUSE,
|
||||
0, EADDRINUSE,
|
||||
EADDRINUSE, EADDRINUSE,
|
||||
EADDRINUSE, EADDRINUSE},
|
||||
.expected_reuse_errno = {0, 0,
|
||||
0, EADDRINUSE,
|
||||
EADDRINUSE, EADDRINUSE,
|
||||
EADDRINUSE, EADDRINUSE},
|
||||
};
|
||||
|
||||
FIXTURE_VARIANT_ADD(bind_wildcard, v6_any_v6_v4mapped_any)
|
||||
{
|
||||
.family = {AF_INET6, AF_INET6},
|
||||
.addr = {&in6addr_any, &in6addr_v4mapped_any},
|
||||
.expected_errno = {0, EADDRINUSE,
|
||||
EADDRINUSE, EADDRINUSE,
|
||||
EADDRINUSE, EADDRINUSE,
|
||||
EADDRINUSE, EADDRINUSE},
|
||||
.expected_reuse_errno = {0, 0,
|
||||
EADDRINUSE, EADDRINUSE,
|
||||
EADDRINUSE, EADDRINUSE,
|
||||
EADDRINUSE, EADDRINUSE},
|
||||
};
|
||||
|
||||
FIXTURE_VARIANT_ADD(bind_wildcard, v6_any_only_v6_v4mapped_any)
|
||||
{
|
||||
.family = {AF_INET6, AF_INET6},
|
||||
.addr = {&in6addr_any, &in6addr_v4mapped_any},
|
||||
.ipv6_only = {true, false},
|
||||
.expected_errno = {0, 0,
|
||||
EADDRINUSE, EADDRINUSE,
|
||||
EADDRINUSE, EADDRINUSE,
|
||||
EADDRINUSE, EADDRINUSE},
|
||||
.expected_reuse_errno = {0, 0,
|
||||
EADDRINUSE, EADDRINUSE,
|
||||
EADDRINUSE, EADDRINUSE,
|
||||
EADDRINUSE, EADDRINUSE},
|
||||
};
|
||||
|
||||
FIXTURE_VARIANT_ADD(bind_wildcard, v6_any_v6_v4mapped_local)
|
||||
{
|
||||
.family = {AF_INET6, AF_INET6},
|
||||
.addr = {&in6addr_any, &in6addr_v4mapped_loopback},
|
||||
.expected_errno = {0, EADDRINUSE,
|
||||
EADDRINUSE, EADDRINUSE,
|
||||
EADDRINUSE, EADDRINUSE,
|
||||
EADDRINUSE, EADDRINUSE},
|
||||
.expected_reuse_errno = {0, 0,
|
||||
EADDRINUSE, EADDRINUSE,
|
||||
EADDRINUSE, EADDRINUSE,
|
||||
EADDRINUSE, EADDRINUSE},
|
||||
};
|
||||
|
||||
FIXTURE_VARIANT_ADD(bind_wildcard, v6_any_only_v6_v4mapped_local)
|
||||
{
|
||||
.family = {AF_INET6, AF_INET6},
|
||||
.addr = {&in6addr_any, &in6addr_v4mapped_loopback},
|
||||
.ipv6_only = {true, false},
|
||||
.expected_errno = {0, 0,
|
||||
EADDRINUSE, EADDRINUSE,
|
||||
EADDRINUSE, EADDRINUSE,
|
||||
EADDRINUSE, EADDRINUSE},
|
||||
.expected_reuse_errno = {0, 0,
|
||||
EADDRINUSE, EADDRINUSE,
|
||||
EADDRINUSE, EADDRINUSE,
|
||||
EADDRINUSE, EADDRINUSE},
|
||||
};
|
||||
|
||||
FIXTURE_VARIANT_ADD(bind_wildcard, v6_local_v6_any)
|
||||
{
|
||||
.family = {AF_INET6, AF_INET6},
|
||||
.addr = {&in6addr_loopback, &in6addr_any},
|
||||
.expected_errno = {0, EADDRINUSE,
|
||||
0, EADDRINUSE,
|
||||
EADDRINUSE, EADDRINUSE,
|
||||
EADDRINUSE, EADDRINUSE},
|
||||
.expected_reuse_errno = {0, 0,
|
||||
EADDRINUSE, EADDRINUSE,
|
||||
EADDRINUSE, EADDRINUSE,
|
||||
EADDRINUSE, EADDRINUSE},
|
||||
};
|
||||
|
||||
FIXTURE_VARIANT_ADD(bind_wildcard, v6_local_v6_any_only)
|
||||
{
|
||||
.family = {AF_INET6, AF_INET6},
|
||||
.addr = {&in6addr_loopback, &in6addr_any},
|
||||
.ipv6_only = {false, true},
|
||||
.expected_errno = {0, EADDRINUSE,
|
||||
0, EADDRINUSE,
|
||||
EADDRINUSE, EADDRINUSE,
|
||||
EADDRINUSE, EADDRINUSE},
|
||||
.expected_reuse_errno = {0, 0,
|
||||
0, EADDRINUSE,
|
||||
EADDRINUSE, EADDRINUSE,
|
||||
EADDRINUSE, EADDRINUSE},
|
||||
};
|
||||
|
||||
FIXTURE_VARIANT_ADD(bind_wildcard, v6_local_v6_v4mapped_any)
|
||||
{
|
||||
.family = {AF_INET6, AF_INET6},
|
||||
.addr = {&in6addr_loopback, &in6addr_v4mapped_any},
|
||||
.expected_errno = {0, 0,
|
||||
EADDRINUSE, EADDRINUSE,
|
||||
EADDRINUSE, EADDRINUSE,
|
||||
EADDRINUSE, EADDRINUSE},
|
||||
.expected_reuse_errno = {0, 0,
|
||||
EADDRINUSE, EADDRINUSE,
|
||||
EADDRINUSE, EADDRINUSE,
|
||||
EADDRINUSE, EADDRINUSE},
|
||||
};
|
||||
|
||||
FIXTURE_VARIANT_ADD(bind_wildcard, v6_local_v6_v4mapped_local)
|
||||
{
|
||||
.family = {AF_INET6, AF_INET6},
|
||||
.addr = {&in6addr_loopback, &in6addr_v4mapped_loopback},
|
||||
.expected_errno = {0, 0,
|
||||
EADDRINUSE, EADDRINUSE,
|
||||
EADDRINUSE, EADDRINUSE,
|
||||
EADDRINUSE, EADDRINUSE},
|
||||
.expected_reuse_errno = {0, 0,
|
||||
EADDRINUSE, EADDRINUSE,
|
||||
EADDRINUSE, EADDRINUSE,
|
||||
EADDRINUSE, EADDRINUSE},
|
||||
};
|
||||
|
||||
FIXTURE_VARIANT_ADD(bind_wildcard, v6_v4mapped_any_v6_any)
|
||||
{
|
||||
.family = {AF_INET6, AF_INET6},
|
||||
.addr = {&in6addr_v4mapped_any, &in6addr_any},
|
||||
.expected_errno = {0, EADDRINUSE,
|
||||
EADDRINUSE, EADDRINUSE,
|
||||
EADDRINUSE, 0,
|
||||
EADDRINUSE, EADDRINUSE},
|
||||
.expected_reuse_errno = {0, 0,
|
||||
EADDRINUSE, EADDRINUSE,
|
||||
EADDRINUSE, EADDRINUSE,
|
||||
EADDRINUSE, EADDRINUSE},
|
||||
};
|
||||
|
||||
FIXTURE_VARIANT_ADD(bind_wildcard, v6_v4mapped_any_v6_any_only)
|
||||
{
|
||||
.family = {AF_INET6, AF_INET6},
|
||||
.addr = {&in6addr_v4mapped_any, &in6addr_any},
|
||||
.ipv6_only = {false, true},
|
||||
.expected_errno = {0, 0,
|
||||
EADDRINUSE, EADDRINUSE,
|
||||
EADDRINUSE, EADDRINUSE,
|
||||
EADDRINUSE, EADDRINUSE},
|
||||
.expected_reuse_errno = {0, 0,
|
||||
EADDRINUSE, EADDRINUSE,
|
||||
EADDRINUSE, EADDRINUSE,
|
||||
EADDRINUSE, EADDRINUSE},
|
||||
};
|
||||
|
||||
FIXTURE_VARIANT_ADD(bind_wildcard, v6_v4mapped_any_v6_local)
|
||||
{
|
||||
.family = {AF_INET6, AF_INET6},
|
||||
.addr = {&in6addr_v4mapped_any, &in6addr_loopback},
|
||||
.expected_errno = {0, 0,
|
||||
EADDRINUSE, EADDRINUSE,
|
||||
EADDRINUSE, EADDRINUSE,
|
||||
EADDRINUSE, EADDRINUSE},
|
||||
.expected_reuse_errno = {0, 0,
|
||||
EADDRINUSE, EADDRINUSE,
|
||||
EADDRINUSE, EADDRINUSE,
|
||||
EADDRINUSE, EADDRINUSE},
|
||||
};
|
||||
|
||||
FIXTURE_VARIANT_ADD(bind_wildcard, v6_v4mapped_any_v6_v4mapped_local)
|
||||
{
|
||||
.family = {AF_INET6, AF_INET6},
|
||||
.addr = {&in6addr_v4mapped_any, &in6addr_v4mapped_loopback},
|
||||
.expected_errno = {0, EADDRINUSE,
|
||||
EADDRINUSE, EADDRINUSE,
|
||||
EADDRINUSE, 0,
|
||||
EADDRINUSE, EADDRINUSE},
|
||||
.expected_reuse_errno = {0, 0,
|
||||
EADDRINUSE, EADDRINUSE,
|
||||
EADDRINUSE, 0,
|
||||
EADDRINUSE, EADDRINUSE},
|
||||
};
|
||||
|
||||
FIXTURE_VARIANT_ADD(bind_wildcard, v6_v4mapped_loopback_v6_any)
|
||||
{
|
||||
.family = {AF_INET6, AF_INET6},
|
||||
.addr = {&in6addr_v4mapped_loopback, &in6addr_any},
|
||||
.expected_errno = {0, EADDRINUSE,
|
||||
EADDRINUSE, EADDRINUSE,
|
||||
EADDRINUSE, 0,
|
||||
EADDRINUSE, EADDRINUSE},
|
||||
.expected_reuse_errno = {0, 0,
|
||||
EADDRINUSE, EADDRINUSE,
|
||||
EADDRINUSE, EADDRINUSE,
|
||||
EADDRINUSE, EADDRINUSE},
|
||||
};
|
||||
|
||||
FIXTURE_VARIANT_ADD(bind_wildcard, v6_v4mapped_loopback_v6_any_only)
|
||||
{
|
||||
.family = {AF_INET6, AF_INET6},
|
||||
.addr = {&in6addr_v4mapped_loopback, &in6addr_any},
|
||||
.ipv6_only = {false, true},
|
||||
.expected_errno = {0, 0,
|
||||
EADDRINUSE, EADDRINUSE,
|
||||
EADDRINUSE, EADDRINUSE,
|
||||
EADDRINUSE, EADDRINUSE},
|
||||
.expected_reuse_errno = {0, 0,
|
||||
EADDRINUSE, EADDRINUSE,
|
||||
EADDRINUSE, EADDRINUSE,
|
||||
EADDRINUSE, EADDRINUSE},
|
||||
};
|
||||
|
||||
FIXTURE_VARIANT_ADD(bind_wildcard, v6_v4mapped_loopback_v6_local)
|
||||
{
|
||||
.family = {AF_INET6, AF_INET6},
|
||||
.addr = {&in6addr_v4mapped_loopback, &in6addr_loopback},
|
||||
.expected_errno = {0, 0,
|
||||
EADDRINUSE, EADDRINUSE,
|
||||
EADDRINUSE, EADDRINUSE,
|
||||
EADDRINUSE, EADDRINUSE},
|
||||
.expected_reuse_errno = {0, 0,
|
||||
EADDRINUSE, EADDRINUSE,
|
||||
EADDRINUSE, EADDRINUSE,
|
||||
EADDRINUSE, EADDRINUSE},
|
||||
};
|
||||
|
||||
FIXTURE_VARIANT_ADD(bind_wildcard, v6_v4mapped_loopback_v6_v4mapped_any)
|
||||
{
|
||||
.family = {AF_INET6, AF_INET6},
|
||||
.addr = {&in6addr_v4mapped_loopback, &in6addr_v4mapped_any},
|
||||
.expected_errno = {0, EADDRINUSE,
|
||||
EADDRINUSE, EADDRINUSE,
|
||||
EADDRINUSE, 0,
|
||||
EADDRINUSE, EADDRINUSE},
|
||||
.expected_reuse_errno = {0, 0,
|
||||
EADDRINUSE, EADDRINUSE,
|
||||
EADDRINUSE, 0,
|
||||
EADDRINUSE, EADDRINUSE},
|
||||
};
|
||||
|
||||
static void setup_addr(FIXTURE_DATA(bind_wildcard) *self, int i,
|
||||
int family, const void *addr_const)
|
||||
{
|
||||
if (family == AF_INET) {
|
||||
struct sockaddr_in *addr4 = &self->addr[i].addr4;
|
||||
const __u32 *addr4_const = addr_const;
|
||||
|
||||
addr4->sin_family = AF_INET;
|
||||
addr4->sin_port = htons(0);
|
||||
addr4->sin_addr.s_addr = htonl(*addr4_const);
|
||||
|
||||
self->addrlen[i] = sizeof(struct sockaddr_in);
|
||||
} else {
|
||||
struct sockaddr_in6 *addr6 = &self->addr[i].addr6;
|
||||
const struct in6_addr *addr6_const = addr_const;
|
||||
|
||||
addr6->sin6_family = AF_INET6;
|
||||
addr6->sin6_port = htons(0);
|
||||
addr6->sin6_addr = *addr6_const;
|
||||
|
||||
self->addrlen[i] = sizeof(struct sockaddr_in6);
|
||||
}
|
||||
}
|
||||
|
||||
FIXTURE_SETUP(bind_wildcard)
|
||||
{
|
||||
self->addr4.sin_family = AF_INET;
|
||||
self->addr4.sin_port = htons(0);
|
||||
self->addr4.sin_addr.s_addr = htonl(variant->addr4_const);
|
||||
setup_addr(self, 0, variant->family[0], variant->addr[0]);
|
||||
setup_addr(self, 1, variant->family[1], variant->addr[1]);
|
||||
|
||||
self->addr6.sin6_family = AF_INET6;
|
||||
self->addr6.sin6_port = htons(0);
|
||||
self->addr6.sin6_addr = *variant->addr6_const;
|
||||
setup_addr(self, 2, AF_INET, &in4addr_any);
|
||||
setup_addr(self, 3, AF_INET, &in4addr_loopback);
|
||||
|
||||
setup_addr(self, 4, AF_INET6, &in6addr_any);
|
||||
setup_addr(self, 5, AF_INET6, &in6addr_loopback);
|
||||
setup_addr(self, 6, AF_INET6, &in6addr_v4mapped_any);
|
||||
setup_addr(self, 7, AF_INET6, &in6addr_v4mapped_loopback);
|
||||
}
|
||||
|
||||
FIXTURE_TEARDOWN(bind_wildcard)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < NR_SOCKETS; i++)
|
||||
close(self->fd[i]);
|
||||
}
|
||||
|
||||
void bind_sockets(struct __test_metadata *_metadata,
|
||||
FIXTURE_DATA(bind_wildcard) *self,
|
||||
int expected_errno,
|
||||
struct sockaddr *addr1, socklen_t addrlen1,
|
||||
struct sockaddr *addr2, socklen_t addrlen2)
|
||||
void bind_socket(struct __test_metadata *_metadata,
|
||||
FIXTURE_DATA(bind_wildcard) *self,
|
||||
const FIXTURE_VARIANT(bind_wildcard) *variant,
|
||||
int i, int reuse)
|
||||
{
|
||||
int fd[2];
|
||||
int ret;
|
||||
|
||||
fd[0] = socket(addr1->sa_family, SOCK_STREAM, 0);
|
||||
ASSERT_GT(fd[0], 0);
|
||||
self->fd[i] = socket(self->addr[i].addr.sa_family, SOCK_STREAM, 0);
|
||||
ASSERT_GT(self->fd[i], 0);
|
||||
|
||||
ret = bind(fd[0], addr1, addrlen1);
|
||||
ASSERT_EQ(ret, 0);
|
||||
|
||||
ret = getsockname(fd[0], addr1, &addrlen1);
|
||||
ASSERT_EQ(ret, 0);
|
||||
|
||||
((struct sockaddr_in *)addr2)->sin_port = ((struct sockaddr_in *)addr1)->sin_port;
|
||||
|
||||
fd[1] = socket(addr2->sa_family, SOCK_STREAM, 0);
|
||||
ASSERT_GT(fd[1], 0);
|
||||
|
||||
ret = bind(fd[1], addr2, addrlen2);
|
||||
if (expected_errno) {
|
||||
ASSERT_EQ(ret, -1);
|
||||
ASSERT_EQ(errno, expected_errno);
|
||||
} else {
|
||||
if (i < 2 && variant->ipv6_only[i]) {
|
||||
ret = setsockopt(self->fd[i], SOL_IPV6, IPV6_V6ONLY, &(int){1}, sizeof(int));
|
||||
ASSERT_EQ(ret, 0);
|
||||
}
|
||||
|
||||
close(fd[1]);
|
||||
close(fd[0]);
|
||||
if (i < 2 && reuse) {
|
||||
ret = setsockopt(self->fd[i], SOL_SOCKET, reuse, &(int){1}, sizeof(int));
|
||||
ASSERT_EQ(ret, 0);
|
||||
}
|
||||
|
||||
self->addr[i].addr4.sin_port = self->addr[0].addr4.sin_port;
|
||||
|
||||
ret = bind(self->fd[i], &self->addr[i].addr, self->addrlen[i]);
|
||||
|
||||
if (reuse) {
|
||||
if (variant->expected_reuse_errno[i]) {
|
||||
ASSERT_EQ(ret, -1);
|
||||
ASSERT_EQ(errno, variant->expected_reuse_errno[i]);
|
||||
} else {
|
||||
ASSERT_EQ(ret, 0);
|
||||
}
|
||||
} else {
|
||||
if (variant->expected_errno[i]) {
|
||||
ASSERT_EQ(ret, -1);
|
||||
ASSERT_EQ(errno, variant->expected_errno[i]);
|
||||
} else {
|
||||
ASSERT_EQ(ret, 0);
|
||||
}
|
||||
}
|
||||
|
||||
if (i == 0) {
|
||||
ret = getsockname(self->fd[0], &self->addr[0].addr, &self->addrlen[0]);
|
||||
ASSERT_EQ(ret, 0);
|
||||
}
|
||||
}
|
||||
|
||||
TEST_F(bind_wildcard, v4_v6)
|
||||
TEST_F(bind_wildcard, plain)
|
||||
{
|
||||
bind_sockets(_metadata, self, variant->expected_errno,
|
||||
(struct sockaddr *)&self->addr4, sizeof(self->addr4),
|
||||
(struct sockaddr *)&self->addr6, sizeof(self->addr6));
|
||||
int i;
|
||||
|
||||
for (i = 0; i < NR_SOCKETS; i++)
|
||||
bind_socket(_metadata, self, variant, i, 0);
|
||||
}
|
||||
|
||||
TEST_F(bind_wildcard, v6_v4)
|
||||
TEST_F(bind_wildcard, reuseaddr)
|
||||
{
|
||||
bind_sockets(_metadata, self, variant->expected_errno,
|
||||
(struct sockaddr *)&self->addr6, sizeof(self->addr6),
|
||||
(struct sockaddr *)&self->addr4, sizeof(self->addr4));
|
||||
int i;
|
||||
|
||||
for (i = 0; i < NR_SOCKETS; i++)
|
||||
bind_socket(_metadata, self, variant, i, SO_REUSEADDR);
|
||||
}
|
||||
|
||||
TEST_F(bind_wildcard, reuseport)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < NR_SOCKETS; i++)
|
||||
bind_socket(_metadata, self, variant, i, SO_REUSEPORT);
|
||||
}
|
||||
|
||||
TEST_HARNESS_MAIN
|
||||
|
@ -383,12 +383,14 @@ do_transfer()
|
||||
local stat_cookierx_last
|
||||
local stat_csum_err_s
|
||||
local stat_csum_err_c
|
||||
local stat_tcpfb_last_l
|
||||
stat_synrx_last_l=$(mptcp_lib_get_counter "${listener_ns}" "MPTcpExtMPCapableSYNRX")
|
||||
stat_ackrx_last_l=$(mptcp_lib_get_counter "${listener_ns}" "MPTcpExtMPCapableACKRX")
|
||||
stat_cookietx_last=$(mptcp_lib_get_counter "${listener_ns}" "TcpExtSyncookiesSent")
|
||||
stat_cookierx_last=$(mptcp_lib_get_counter "${listener_ns}" "TcpExtSyncookiesRecv")
|
||||
stat_csum_err_s=$(mptcp_lib_get_counter "${listener_ns}" "MPTcpExtDataCsumErr")
|
||||
stat_csum_err_c=$(mptcp_lib_get_counter "${connector_ns}" "MPTcpExtDataCsumErr")
|
||||
stat_tcpfb_last_l=$(mptcp_lib_get_counter "${listener_ns}" "MPTcpExtMPCapableFallbackACK")
|
||||
|
||||
timeout ${timeout_test} \
|
||||
ip netns exec ${listener_ns} \
|
||||
@ -457,11 +459,13 @@ do_transfer()
|
||||
local stat_cookietx_now
|
||||
local stat_cookierx_now
|
||||
local stat_ooo_now
|
||||
local stat_tcpfb_now_l
|
||||
stat_synrx_now_l=$(mptcp_lib_get_counter "${listener_ns}" "MPTcpExtMPCapableSYNRX")
|
||||
stat_ackrx_now_l=$(mptcp_lib_get_counter "${listener_ns}" "MPTcpExtMPCapableACKRX")
|
||||
stat_cookietx_now=$(mptcp_lib_get_counter "${listener_ns}" "TcpExtSyncookiesSent")
|
||||
stat_cookierx_now=$(mptcp_lib_get_counter "${listener_ns}" "TcpExtSyncookiesRecv")
|
||||
stat_ooo_now=$(mptcp_lib_get_counter "${listener_ns}" "TcpExtTCPOFOQueue")
|
||||
stat_tcpfb_now_l=$(mptcp_lib_get_counter "${listener_ns}" "MPTcpExtMPCapableFallbackACK")
|
||||
|
||||
expect_synrx=$((stat_synrx_last_l))
|
||||
expect_ackrx=$((stat_ackrx_last_l))
|
||||
@ -508,6 +512,11 @@ do_transfer()
|
||||
fi
|
||||
fi
|
||||
|
||||
if [ ${stat_ooo_now} -eq 0 ] && [ ${stat_tcpfb_last_l} -ne ${stat_tcpfb_now_l} ]; then
|
||||
mptcp_lib_pr_fail "unexpected fallback to TCP"
|
||||
rets=1
|
||||
fi
|
||||
|
||||
if [ $cookies -eq 2 ];then
|
||||
if [ $stat_cookietx_last -ge $stat_cookietx_now ] ;then
|
||||
extra+=" WARN: CookieSent: did not advance"
|
||||
|
@ -729,7 +729,7 @@ pm_nl_check_endpoint()
|
||||
[ -n "$_flags" ]; flags="flags $_flags"
|
||||
shift
|
||||
elif [ $1 = "dev" ]; then
|
||||
[ -n "$2" ]; dev="dev $1"
|
||||
[ -n "$2" ]; dev="dev $2"
|
||||
shift
|
||||
elif [ $1 = "id" ]; then
|
||||
_id=$2
|
||||
@ -3610,6 +3610,8 @@ endpoint_tests()
|
||||
local tests_pid=$!
|
||||
|
||||
wait_mpj $ns2
|
||||
pm_nl_check_endpoint "creation" \
|
||||
$ns2 10.0.2.2 id 2 flags subflow dev ns2eth2
|
||||
chk_subflow_nr "before delete" 2
|
||||
chk_mptcp_info subflows 1 subflows 1
|
||||
|
||||
|
@ -109,6 +109,6 @@ int main(void)
|
||||
fd1 = open_port(0, 1);
|
||||
if (fd1 >= 0)
|
||||
error(1, 0, "Was allowed to create an ipv4 reuseport on an already bound non-reuseport socket with no ipv6");
|
||||
fprintf(stderr, "Success");
|
||||
fprintf(stderr, "Success\n");
|
||||
return 0;
|
||||
}
|
||||
|
@ -244,7 +244,7 @@ for family in 4 6; do
|
||||
create_vxlan_pair
|
||||
ip netns exec $NS_DST ethtool -K veth$DST generic-receive-offload on
|
||||
ip netns exec $NS_DST ethtool -K veth$DST rx-gro-list on
|
||||
run_test "GRO frag list over UDP tunnel" $OL_NET$DST 1 1
|
||||
run_test "GRO frag list over UDP tunnel" $OL_NET$DST 10 10
|
||||
cleanup
|
||||
|
||||
# use NAT to circumvent GRO FWD check
|
||||
@ -258,13 +258,7 @@ for family in 4 6; do
|
||||
# load arp cache before running the test to reduce the amount of
|
||||
# stray traffic on top of the UDP tunnel
|
||||
ip netns exec $NS_SRC $PING -q -c 1 $OL_NET$DST_NAT >/dev/null
|
||||
run_test "GRO fwd over UDP tunnel" $OL_NET$DST_NAT 1 1 $OL_NET$DST
|
||||
cleanup
|
||||
|
||||
create_vxlan_pair
|
||||
run_bench "UDP tunnel fwd perf" $OL_NET$DST
|
||||
ip netns exec $NS_DST ethtool -K veth$DST rx-udp-gro-forwarding on
|
||||
run_bench "UDP tunnel GRO fwd perf" $OL_NET$DST
|
||||
run_test "GRO fwd over UDP tunnel" $OL_NET$DST_NAT 10 10 $OL_NET$DST
|
||||
cleanup
|
||||
done
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user