mirror of
https://github.com/torvalds/linux.git
synced 2024-12-29 14:21:47 +00:00
Including fixes from netfilter, and wifi.
Current release - regressions: - Revert "net/sched: taprio: make qdisc_leaf() see the per-netdev-queue pfifo child qdiscs", it may cause crashes when the qdisc is reconfigured - inet: ping: fix splat due to packet allocation refactoring in inet - tcp: clean up kernel listener's reqsk in inet_twsk_purge(), fix UAF due to races when per-netns hash table is used Current release - new code bugs: - eth: adin1110: check in netdev_event that netdev belongs to driver - fixes for PTR_ERR() vs NULL bugs in driver code, from Dan and co. Previous releases - regressions: - ipv4: handle attempt to delete multipath route when fib_info contains an nh reference, avoid oob access - wifi: fix handful of bugs in the new Multi-BSSID code - wifi: mt76: fix rate reporting / throughput regression on mt7915 and newer, fix checksum offload - wifi: iwlwifi: mvm: fix double list_add at iwl_mvm_mac_wake_tx_queue (other cases) - wifi: mac80211: do not drop packets smaller than the LLC-SNAP header on fast-rx Previous releases - always broken: - ieee802154: don't warn zero-sized raw_sendmsg() - ipv6: ping: fix wrong checksum for large frames - mctp: prevent double key removal and unref - tcp/udp: fix memory leaks and races around IPV6_ADDRFORM - hv_netvsc: fix race between VF offering and VF association message Misc: - remove -Warray-bounds silencing in the drivers, compilers fixed Signed-off-by: Jakub Kicinski <kuba@kernel.org> -----BEGIN PGP SIGNATURE----- iQIzBAABCAAdFiEE6jPA+I1ugmIBA4hXMUZtbf5SIrsFAmNISaMACgkQMUZtbf5S IruEARAArjYZbOEGkUVqtcEbnV0vmxQ5GsVyvurDkmzUULJ1rVAITtG7BbxcPyZ7 tJf5BPmmpXxEXh/lZBIlgHLOGgf/cx4gkCH9Jz6LYlSoTpTZiTqxlOfAZNeei0FI PD95Slvd3TnIOEysv5RH/pQzIoKdd6+YqOhVITbwCW36cCLaUm+r7JUhzDrnHMNE KCcsOX9DDtW7MDJrJj/E0wlWeWcudpHY4DLG2A723X6Esu+8k6krK32XtkrFIKqa PFxeU1NPgMkn4S2xRPKqy+W3dTMfMKB4WWBMMUzEU220MIxV4l/RZSrnI5nrnLh2 uXyUefpx+lD92D5BOiqUw8rK7B4Jq0uUrawuCf+70tbO1f13ThkkAlV6cEzrlnZY tGQxs0ayFIDVypU1tpY9cemUiYXrnPpCkpz+V1G0us8L323eCHxjz/f5TUlb51Na BVFvRqvxkjztprBv2LrH2SmnVtcH2kvQG8qMYmXRchBM+11rivz6BrPdE0V+muMg Hjr6HefYMBpSgcD+ADVFr8a/OB/W7AuWpTBd3z/WyNQ5MxkFX9Kf2Lt2+j8SRfpE ELO0AANFQZ1Gyp6LTbEkA3mFs1LhNNQyfjHcMHC16ZExHmV3i37BE9LJdnFM27N8 R8lIm4YDs6Jj6YIUDy2wExgUAgkUk7mfZNCMPNi2nSsdJksyAsc= =AyqG -----END PGP SIGNATURE----- Merge tag 'net-6.1-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net Pull networking fixes from Jakub Kicinski: "Including fixes from netfilter, and wifi. Current release - regressions: - Revert "net/sched: taprio: make qdisc_leaf() see the per-netdev-queue pfifo child qdiscs", it may cause crashes when the qdisc is reconfigured - inet: ping: fix splat due to packet allocation refactoring in inet - tcp: clean up kernel listener's reqsk in inet_twsk_purge(), fix UAF due to races when per-netns hash table is used Current release - new code bugs: - eth: adin1110: check in netdev_event that netdev belongs to driver - fixes for PTR_ERR() vs NULL bugs in driver code, from Dan and co. Previous releases - regressions: - ipv4: handle attempt to delete multipath route when fib_info contains an nh reference, avoid oob access - wifi: fix handful of bugs in the new Multi-BSSID code - wifi: mt76: fix rate reporting / throughput regression on mt7915 and newer, fix checksum offload - wifi: iwlwifi: mvm: fix double list_add at iwl_mvm_mac_wake_tx_queue (other cases) - wifi: mac80211: do not drop packets smaller than the LLC-SNAP header on fast-rx Previous releases - always broken: - ieee802154: don't warn zero-sized raw_sendmsg() - ipv6: ping: fix wrong checksum for large frames - mctp: prevent double key removal and unref - tcp/udp: fix memory leaks and races around IPV6_ADDRFORM - hv_netvsc: fix race between VF offering and VF association message Misc: - remove -Warray-bounds silencing in the drivers, compilers fixed" * tag 'net-6.1-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net: (73 commits) sunhme: fix an IS_ERR() vs NULL check in probe net: marvell: prestera: fix a couple NULL vs IS_ERR() checks kcm: avoid potential race in kcm_tx_work tcp: Clean up kernel listener's reqsk in inet_twsk_purge() net: phy: micrel: Fixes FIELD_GET assertion openvswitch: add nf_ct_is_confirmed check before assigning the helper tcp: Fix data races around icsk->icsk_af_ops. ipv6: Fix data races around sk->sk_prot. tcp/udp: Call inet6_destroy_sock() in IPv6 sk->sk_destruct(). udp: Call inet6_destroy_sock() in setsockopt(IPV6_ADDRFORM). tcp/udp: Fix memory leak in ipv6_renew_options(). mctp: prevent double key removal and unref selftests: netfilter: Fix nft_fib.sh for all.rp_filter=1 netfilter: rpfilter/fib: Populate flowic_l3mdev field selftests: netfilter: Test reverse path filtering net/mlx5: Make ASO poll CQ usable in atomic context tcp: cdg: allow tcp_cdg_release() to be called multiple times inet: ping: fix recent breakage ipv6: ping: fix wrong checksum for large frames net: ethernet: ti: am65-cpsw: set correct devlink flavour for unused ports ...
This commit is contained in:
commit
66ae04368e
@ -120,7 +120,7 @@ required delays, as defined per the RGMII standard, several options may be
|
||||
available:
|
||||
|
||||
* Some SoCs may offer a pin pad/mux/controller capable of configuring a given
|
||||
set of pins'strength, delays, and voltage; and it may be a suitable
|
||||
set of pins' strength, delays, and voltage; and it may be a suitable
|
||||
option to insert the expected 2ns RGMII delay.
|
||||
|
||||
* Modifying the PCB design to include a fixed delay (e.g: using a specifically
|
||||
|
@ -18670,6 +18670,7 @@ F: drivers/misc/sgi-xp/
|
||||
SHARED MEMORY COMMUNICATIONS (SMC) SOCKETS
|
||||
M: Karsten Graul <kgraul@linux.ibm.com>
|
||||
M: Wenjia Zhang <wenjia@linux.ibm.com>
|
||||
M: Jan Karcher <jaka@linux.ibm.com>
|
||||
L: linux-s390@vger.kernel.org
|
||||
S: Supported
|
||||
W: http://www.ibm.com/developerworks/linux/linux390/
|
||||
|
@ -2345,8 +2345,7 @@ HFC_init(void)
|
||||
static void __exit
|
||||
HFC_cleanup(void)
|
||||
{
|
||||
if (timer_pending(&hfc_tl))
|
||||
del_timer_sync(&hfc_tl);
|
||||
del_timer_sync(&hfc_tl);
|
||||
|
||||
pci_unregister_driver(&hfc_driver);
|
||||
}
|
||||
|
@ -178,6 +178,8 @@ struct kvaser_usb_dev_cfg {
|
||||
extern const struct kvaser_usb_dev_ops kvaser_usb_hydra_dev_ops;
|
||||
extern const struct kvaser_usb_dev_ops kvaser_usb_leaf_dev_ops;
|
||||
|
||||
void kvaser_usb_unlink_tx_urbs(struct kvaser_usb_net_priv *priv);
|
||||
|
||||
int kvaser_usb_recv_cmd(const struct kvaser_usb *dev, void *cmd, int len,
|
||||
int *actual_len);
|
||||
|
||||
|
@ -477,7 +477,7 @@ static void kvaser_usb_reset_tx_urb_contexts(struct kvaser_usb_net_priv *priv)
|
||||
/* This method might sleep. Do not call it in the atomic context
|
||||
* of URB completions.
|
||||
*/
|
||||
static void kvaser_usb_unlink_tx_urbs(struct kvaser_usb_net_priv *priv)
|
||||
void kvaser_usb_unlink_tx_urbs(struct kvaser_usb_net_priv *priv)
|
||||
{
|
||||
usb_kill_anchored_urbs(&priv->tx_submitted);
|
||||
kvaser_usb_reset_tx_urb_contexts(priv);
|
||||
@ -729,6 +729,7 @@ static int kvaser_usb_init_one(struct kvaser_usb *dev, int channel)
|
||||
init_usb_anchor(&priv->tx_submitted);
|
||||
init_completion(&priv->start_comp);
|
||||
init_completion(&priv->stop_comp);
|
||||
init_completion(&priv->flush_comp);
|
||||
priv->can.ctrlmode_supported = 0;
|
||||
|
||||
priv->dev = dev;
|
||||
|
@ -1916,7 +1916,7 @@ static int kvaser_usb_hydra_flush_queue(struct kvaser_usb_net_priv *priv)
|
||||
{
|
||||
int err;
|
||||
|
||||
init_completion(&priv->flush_comp);
|
||||
reinit_completion(&priv->flush_comp);
|
||||
|
||||
err = kvaser_usb_hydra_send_simple_cmd(priv->dev, CMD_FLUSH_QUEUE,
|
||||
priv->channel);
|
||||
|
@ -310,6 +310,38 @@ struct kvaser_cmd {
|
||||
} u;
|
||||
} __packed;
|
||||
|
||||
#define CMD_SIZE_ANY 0xff
|
||||
#define kvaser_fsize(field) sizeof_field(struct kvaser_cmd, field)
|
||||
|
||||
static const u8 kvaser_usb_leaf_cmd_sizes_leaf[] = {
|
||||
[CMD_START_CHIP_REPLY] = kvaser_fsize(u.simple),
|
||||
[CMD_STOP_CHIP_REPLY] = kvaser_fsize(u.simple),
|
||||
[CMD_GET_CARD_INFO_REPLY] = kvaser_fsize(u.cardinfo),
|
||||
[CMD_TX_ACKNOWLEDGE] = kvaser_fsize(u.tx_acknowledge_header),
|
||||
[CMD_GET_SOFTWARE_INFO_REPLY] = kvaser_fsize(u.leaf.softinfo),
|
||||
[CMD_RX_STD_MESSAGE] = kvaser_fsize(u.leaf.rx_can),
|
||||
[CMD_RX_EXT_MESSAGE] = kvaser_fsize(u.leaf.rx_can),
|
||||
[CMD_LEAF_LOG_MESSAGE] = kvaser_fsize(u.leaf.log_message),
|
||||
[CMD_CHIP_STATE_EVENT] = kvaser_fsize(u.leaf.chip_state_event),
|
||||
[CMD_CAN_ERROR_EVENT] = kvaser_fsize(u.leaf.error_event),
|
||||
/* ignored events: */
|
||||
[CMD_FLUSH_QUEUE_REPLY] = CMD_SIZE_ANY,
|
||||
};
|
||||
|
||||
static const u8 kvaser_usb_leaf_cmd_sizes_usbcan[] = {
|
||||
[CMD_START_CHIP_REPLY] = kvaser_fsize(u.simple),
|
||||
[CMD_STOP_CHIP_REPLY] = kvaser_fsize(u.simple),
|
||||
[CMD_GET_CARD_INFO_REPLY] = kvaser_fsize(u.cardinfo),
|
||||
[CMD_TX_ACKNOWLEDGE] = kvaser_fsize(u.tx_acknowledge_header),
|
||||
[CMD_GET_SOFTWARE_INFO_REPLY] = kvaser_fsize(u.usbcan.softinfo),
|
||||
[CMD_RX_STD_MESSAGE] = kvaser_fsize(u.usbcan.rx_can),
|
||||
[CMD_RX_EXT_MESSAGE] = kvaser_fsize(u.usbcan.rx_can),
|
||||
[CMD_CHIP_STATE_EVENT] = kvaser_fsize(u.usbcan.chip_state_event),
|
||||
[CMD_CAN_ERROR_EVENT] = kvaser_fsize(u.usbcan.error_event),
|
||||
/* ignored events: */
|
||||
[CMD_USBCAN_CLOCK_OVERFLOW_EVENT] = CMD_SIZE_ANY,
|
||||
};
|
||||
|
||||
/* Summary of a kvaser error event, for a unified Leaf/Usbcan error
|
||||
* handling. Some discrepancies between the two families exist:
|
||||
*
|
||||
@ -397,6 +429,43 @@ static const struct kvaser_usb_dev_cfg kvaser_usb_leaf_imx_dev_cfg_32mhz = {
|
||||
.bittiming_const = &kvaser_usb_flexc_bittiming_const,
|
||||
};
|
||||
|
||||
static int kvaser_usb_leaf_verify_size(const struct kvaser_usb *dev,
|
||||
const struct kvaser_cmd *cmd)
|
||||
{
|
||||
/* buffer size >= cmd->len ensured by caller */
|
||||
u8 min_size = 0;
|
||||
|
||||
switch (dev->driver_info->family) {
|
||||
case KVASER_LEAF:
|
||||
if (cmd->id < ARRAY_SIZE(kvaser_usb_leaf_cmd_sizes_leaf))
|
||||
min_size = kvaser_usb_leaf_cmd_sizes_leaf[cmd->id];
|
||||
break;
|
||||
case KVASER_USBCAN:
|
||||
if (cmd->id < ARRAY_SIZE(kvaser_usb_leaf_cmd_sizes_usbcan))
|
||||
min_size = kvaser_usb_leaf_cmd_sizes_usbcan[cmd->id];
|
||||
break;
|
||||
}
|
||||
|
||||
if (min_size == CMD_SIZE_ANY)
|
||||
return 0;
|
||||
|
||||
if (min_size) {
|
||||
min_size += CMD_HEADER_LEN;
|
||||
if (cmd->len >= min_size)
|
||||
return 0;
|
||||
|
||||
dev_err_ratelimited(&dev->intf->dev,
|
||||
"Received command %u too short (size %u, needed %u)",
|
||||
cmd->id, cmd->len, min_size);
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
dev_warn_ratelimited(&dev->intf->dev,
|
||||
"Unhandled command (%d, size %d)\n",
|
||||
cmd->id, cmd->len);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
static void *
|
||||
kvaser_usb_leaf_frame_to_cmd(const struct kvaser_usb_net_priv *priv,
|
||||
const struct sk_buff *skb, int *cmd_len,
|
||||
@ -502,6 +571,9 @@ static int kvaser_usb_leaf_wait_cmd(const struct kvaser_usb *dev, u8 id,
|
||||
end:
|
||||
kfree(buf);
|
||||
|
||||
if (err == 0)
|
||||
err = kvaser_usb_leaf_verify_size(dev, cmd);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
@ -1133,6 +1205,9 @@ static void kvaser_usb_leaf_stop_chip_reply(const struct kvaser_usb *dev,
|
||||
static void kvaser_usb_leaf_handle_command(const struct kvaser_usb *dev,
|
||||
const struct kvaser_cmd *cmd)
|
||||
{
|
||||
if (kvaser_usb_leaf_verify_size(dev, cmd) < 0)
|
||||
return;
|
||||
|
||||
switch (cmd->id) {
|
||||
case CMD_START_CHIP_REPLY:
|
||||
kvaser_usb_leaf_start_chip_reply(dev, cmd);
|
||||
@ -1351,9 +1426,13 @@ static int kvaser_usb_leaf_set_mode(struct net_device *netdev,
|
||||
|
||||
switch (mode) {
|
||||
case CAN_MODE_START:
|
||||
kvaser_usb_unlink_tx_urbs(priv);
|
||||
|
||||
err = kvaser_usb_leaf_simple_cmd_async(priv, CMD_START_CHIP);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
priv->can.state = CAN_STATE_ERROR_ACTIVE;
|
||||
break;
|
||||
default:
|
||||
return -EOPNOTSUPP;
|
||||
|
@ -1169,6 +1169,11 @@ static int adin1110_port_bridge_leave(struct adin1110_port_priv *port_priv,
|
||||
return ret;
|
||||
}
|
||||
|
||||
static bool adin1110_port_dev_check(const struct net_device *dev)
|
||||
{
|
||||
return dev->netdev_ops == &adin1110_netdev_ops;
|
||||
}
|
||||
|
||||
static int adin1110_netdevice_event(struct notifier_block *unused,
|
||||
unsigned long event, void *ptr)
|
||||
{
|
||||
@ -1177,6 +1182,9 @@ static int adin1110_netdevice_event(struct notifier_block *unused,
|
||||
struct netdev_notifier_changeupper_info *info = ptr;
|
||||
int ret = 0;
|
||||
|
||||
if (!adin1110_port_dev_check(dev))
|
||||
return NOTIFY_DONE;
|
||||
|
||||
switch (event) {
|
||||
case NETDEV_CHANGEUPPER:
|
||||
if (netif_is_bridge_master(info->upper_dev)) {
|
||||
@ -1202,11 +1210,6 @@ static void adin1110_disconnect_phy(void *data)
|
||||
phy_disconnect(data);
|
||||
}
|
||||
|
||||
static bool adin1110_port_dev_check(const struct net_device *dev)
|
||||
{
|
||||
return dev->netdev_ops == &adin1110_netdev_ops;
|
||||
}
|
||||
|
||||
static int adin1110_port_set_forwarding_state(struct adin1110_port_priv *port_priv)
|
||||
{
|
||||
struct adin1110_priv *priv = port_priv->priv;
|
||||
|
@ -17,8 +17,3 @@ obj-$(CONFIG_BGMAC_BCMA) += bgmac-bcma.o bgmac-bcma-mdio.o
|
||||
obj-$(CONFIG_BGMAC_PLATFORM) += bgmac-platform.o
|
||||
obj-$(CONFIG_SYSTEMPORT) += bcmsysport.o
|
||||
obj-$(CONFIG_BNXT) += bnxt/
|
||||
|
||||
# FIXME: temporarily silence -Warray-bounds on non W=1+ builds
|
||||
ifndef KBUILD_EXTRA_WARN
|
||||
CFLAGS_tg3.o += -Wno-array-bounds
|
||||
endif
|
||||
|
@ -484,7 +484,7 @@ struct bcm_rsb {
|
||||
|
||||
/* Number of Receive hardware descriptor words */
|
||||
#define SP_NUM_HW_RX_DESC_WORDS 1024
|
||||
#define SP_LT_NUM_HW_RX_DESC_WORDS 256
|
||||
#define SP_LT_NUM_HW_RX_DESC_WORDS 512
|
||||
|
||||
/* Internal linked-list RAM size */
|
||||
#define SP_NUM_TX_DESC 1536
|
||||
|
@ -7,7 +7,6 @@
|
||||
#include <linux/math64.h>
|
||||
#include <linux/refcount.h>
|
||||
#include <net/pkt_cls.h>
|
||||
#include <net/pkt_sched.h>
|
||||
#include <net/tc_act/tc_gate.h>
|
||||
|
||||
static u16 enetc_get_max_gcl_len(struct enetc_hw *hw)
|
||||
|
@ -1182,8 +1182,10 @@ static int mcs_register_interrupts(struct mcs *mcs)
|
||||
mcs_reg_write(mcs, MCSX_PAB_TX_SLAVE_PAB_INT_ENB, 0xff);
|
||||
|
||||
mcs->tx_sa_active = alloc_mem(mcs, mcs->hw->sc_entries);
|
||||
if (!mcs->tx_sa_active)
|
||||
if (!mcs->tx_sa_active) {
|
||||
ret = -ENOMEM;
|
||||
goto exit;
|
||||
}
|
||||
|
||||
return ret;
|
||||
exit:
|
||||
|
@ -133,7 +133,7 @@ static int cn10k_mcs_alloc_rsrc(struct otx2_nic *pfvf, enum mcs_direction dir,
|
||||
default:
|
||||
ret = -EINVAL;
|
||||
goto fail;
|
||||
};
|
||||
}
|
||||
|
||||
mutex_unlock(&mbox->lock);
|
||||
|
||||
@ -284,7 +284,7 @@ static int cn10k_mcs_write_sc_cam(struct otx2_nic *pfvf,
|
||||
|
||||
sc_req = otx2_mbox_alloc_msg_mcs_rx_sc_cam_write(mbox);
|
||||
if (!sc_req) {
|
||||
return -ENOMEM;
|
||||
ret = -ENOMEM;
|
||||
goto fail;
|
||||
}
|
||||
|
||||
@ -594,7 +594,7 @@ static int cn10k_mcs_ena_dis_flowid(struct otx2_nic *pfvf, u16 hw_flow_id,
|
||||
|
||||
req = otx2_mbox_alloc_msg_mcs_flowid_ena_entry(mbox);
|
||||
if (!req) {
|
||||
return -ENOMEM;
|
||||
ret = -ENOMEM;
|
||||
goto fail;
|
||||
}
|
||||
|
||||
@ -1653,6 +1653,7 @@ int cn10k_mcs_init(struct otx2_nic *pfvf)
|
||||
return 0;
|
||||
fail:
|
||||
dev_err(pfvf->dev, "Cannot notify PN wrapped event\n");
|
||||
mutex_unlock(&mbox->lock);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -2810,7 +2810,7 @@ static int otx2_probe(struct pci_dev *pdev, const struct pci_device_id *id)
|
||||
err = register_netdev(netdev);
|
||||
if (err) {
|
||||
dev_err(dev, "Failed to register netdevice\n");
|
||||
goto err_del_mcam_entries;
|
||||
goto err_mcs_free;
|
||||
}
|
||||
|
||||
err = otx2_wq_init(pf);
|
||||
@ -2849,6 +2849,8 @@ err_mcam_flow_del:
|
||||
otx2_mcam_flow_del(pf);
|
||||
err_unreg_netdev:
|
||||
unregister_netdev(netdev);
|
||||
err_mcs_free:
|
||||
cn10k_mcs_free(pf);
|
||||
err_del_mcam_entries:
|
||||
otx2_mcam_flow_del(pf);
|
||||
err_ptp_destroy:
|
||||
|
@ -96,6 +96,8 @@ int prestera_mall_replace(struct prestera_flow_block *block,
|
||||
|
||||
list_for_each_entry(binding, &block->binding_list, list) {
|
||||
err = prestera_span_rule_add(binding, port, block->ingress);
|
||||
if (err == -EEXIST)
|
||||
return err;
|
||||
if (err)
|
||||
goto rollback;
|
||||
}
|
||||
|
@ -498,8 +498,8 @@ prestera_nexthop_group_get(struct prestera_switch *sw,
|
||||
refcount_inc(&nh_grp->refcount);
|
||||
} else {
|
||||
nh_grp = __prestera_nexthop_group_create(sw, key);
|
||||
if (IS_ERR(nh_grp))
|
||||
return ERR_CAST(nh_grp);
|
||||
if (!nh_grp)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
refcount_set(&nh_grp->refcount, 1);
|
||||
}
|
||||
@ -651,7 +651,7 @@ prestera_fib_node_create(struct prestera_switch *sw,
|
||||
case PRESTERA_FIB_TYPE_UC_NH:
|
||||
fib_node->info.nh_grp = prestera_nexthop_group_get(sw,
|
||||
nh_grp_key);
|
||||
if (!fib_node->info.nh_grp)
|
||||
if (IS_ERR(fib_node->info.nh_grp))
|
||||
goto err_nh_grp_get;
|
||||
|
||||
grp_id = fib_node->info.nh_grp->grp_id;
|
||||
|
@ -107,7 +107,7 @@ static int prestera_span_put(struct prestera_switch *sw, u8 span_id)
|
||||
|
||||
entry = prestera_span_entry_find_by_id(sw->span, span_id);
|
||||
if (!entry)
|
||||
return false;
|
||||
return -ENOENT;
|
||||
|
||||
if (!refcount_dec_and_test(&entry->ref_count))
|
||||
return 0;
|
||||
@ -151,6 +151,9 @@ int prestera_span_rule_del(struct prestera_flow_block_binding *binding,
|
||||
{
|
||||
int err;
|
||||
|
||||
if (binding->span_id == PRESTERA_SPAN_INVALID_ID)
|
||||
return -ENOENT;
|
||||
|
||||
err = prestera_hw_span_unbind(binding->port, ingress);
|
||||
if (err)
|
||||
return err;
|
||||
|
@ -11,8 +11,3 @@ mtk_eth-$(CONFIG_NET_MEDIATEK_SOC_WED) += mtk_wed_debugfs.o
|
||||
endif
|
||||
obj-$(CONFIG_NET_MEDIATEK_SOC_WED) += mtk_wed_ops.o
|
||||
obj-$(CONFIG_NET_MEDIATEK_STAR_EMAC) += mtk_star_emac.o
|
||||
|
||||
# FIXME: temporarily silence -Warray-bounds on non W=1+ builds
|
||||
ifndef KBUILD_EXTRA_WARN
|
||||
CFLAGS_mtk_ppe.o += -Wno-array-bounds
|
||||
endif
|
||||
|
@ -115,6 +115,7 @@ mlx5e_tc_meter_modify(struct mlx5_core_dev *mdev,
|
||||
struct mlx5e_flow_meters *flow_meters;
|
||||
u8 cir_man, cir_exp, cbs_man, cbs_exp;
|
||||
struct mlx5_aso_wqe *aso_wqe;
|
||||
unsigned long expires;
|
||||
struct mlx5_aso *aso;
|
||||
u64 rate, burst;
|
||||
u8 ds_cnt;
|
||||
@ -187,7 +188,12 @@ mlx5e_tc_meter_modify(struct mlx5_core_dev *mdev,
|
||||
mlx5_aso_post_wqe(aso, true, &aso_wqe->ctrl);
|
||||
|
||||
/* With newer FW, the wait for the first ASO WQE is more than 2us, put the wait 10ms. */
|
||||
err = mlx5_aso_poll_cq(aso, true, 10);
|
||||
expires = jiffies + msecs_to_jiffies(10);
|
||||
do {
|
||||
err = mlx5_aso_poll_cq(aso, true);
|
||||
if (err)
|
||||
usleep_range(2, 10);
|
||||
} while (err && time_is_after_jiffies(expires));
|
||||
mutex_unlock(&flow_meters->aso_lock);
|
||||
|
||||
return err;
|
||||
|
@ -1405,7 +1405,7 @@ static int macsec_aso_set_arm_event(struct mlx5_core_dev *mdev, struct mlx5e_mac
|
||||
MLX5_ACCESS_ASO_OPC_MOD_MACSEC);
|
||||
macsec_aso_build_ctrl(aso, &aso_wqe->aso_ctrl, in);
|
||||
mlx5_aso_post_wqe(maso, false, &aso_wqe->ctrl);
|
||||
err = mlx5_aso_poll_cq(maso, false, 10);
|
||||
err = mlx5_aso_poll_cq(maso, false);
|
||||
mutex_unlock(&aso->aso_lock);
|
||||
|
||||
return err;
|
||||
@ -1430,7 +1430,7 @@ static int macsec_aso_query(struct mlx5_core_dev *mdev, struct mlx5e_macsec *mac
|
||||
macsec_aso_build_wqe_ctrl_seg(aso, &aso_wqe->aso_ctrl, NULL);
|
||||
|
||||
mlx5_aso_post_wqe(maso, false, &aso_wqe->ctrl);
|
||||
err = mlx5_aso_poll_cq(maso, false, 10);
|
||||
err = mlx5_aso_poll_cq(maso, false);
|
||||
if (err)
|
||||
goto err_out;
|
||||
|
||||
|
@ -381,20 +381,12 @@ void mlx5_aso_post_wqe(struct mlx5_aso *aso, bool with_data,
|
||||
WRITE_ONCE(doorbell_cseg, NULL);
|
||||
}
|
||||
|
||||
int mlx5_aso_poll_cq(struct mlx5_aso *aso, bool with_data, u32 interval_ms)
|
||||
int mlx5_aso_poll_cq(struct mlx5_aso *aso, bool with_data)
|
||||
{
|
||||
struct mlx5_aso_cq *cq = &aso->cq;
|
||||
struct mlx5_cqe64 *cqe;
|
||||
unsigned long expires;
|
||||
|
||||
cqe = mlx5_cqwq_get_cqe(&cq->wq);
|
||||
|
||||
expires = jiffies + msecs_to_jiffies(interval_ms);
|
||||
while (!cqe && time_is_after_jiffies(expires)) {
|
||||
usleep_range(2, 10);
|
||||
cqe = mlx5_cqwq_get_cqe(&cq->wq);
|
||||
}
|
||||
|
||||
if (!cqe)
|
||||
return -ETIMEDOUT;
|
||||
|
||||
|
@ -83,7 +83,7 @@ void mlx5_aso_build_wqe(struct mlx5_aso *aso, u8 ds_cnt,
|
||||
u32 obj_id, u32 opc_mode);
|
||||
void mlx5_aso_post_wqe(struct mlx5_aso *aso, bool with_data,
|
||||
struct mlx5_wqe_ctrl_seg *doorbell_cseg);
|
||||
int mlx5_aso_poll_cq(struct mlx5_aso *aso, bool with_data, u32 interval_ms);
|
||||
int mlx5_aso_poll_cq(struct mlx5_aso *aso, bool with_data);
|
||||
|
||||
struct mlx5_aso *mlx5_aso_create(struct mlx5_core_dev *mdev, u32 pdn);
|
||||
void mlx5_aso_destroy(struct mlx5_aso *aso);
|
||||
|
@ -373,10 +373,10 @@ nfp_flower_calculate_key_layers(struct nfp_app *app,
|
||||
if (ipv6_tun) {
|
||||
key_layer_two |= NFP_FLOWER_LAYER2_TUN_IPV6;
|
||||
key_size +=
|
||||
sizeof(struct nfp_flower_ipv6_udp_tun);
|
||||
sizeof(struct nfp_flower_ipv6_gre_tun);
|
||||
} else {
|
||||
key_size +=
|
||||
sizeof(struct nfp_flower_ipv4_udp_tun);
|
||||
sizeof(struct nfp_flower_ipv4_gre_tun);
|
||||
}
|
||||
|
||||
if (enc_op.key) {
|
||||
|
@ -2896,8 +2896,8 @@ static int happy_meal_pci_probe(struct pci_dev *pdev,
|
||||
|
||||
hpreg_res = devm_request_region(&pdev->dev, pci_resource_start(pdev, 0),
|
||||
pci_resource_len(pdev, 0), DRV_NAME);
|
||||
if (IS_ERR(hpreg_res)) {
|
||||
err = PTR_ERR(hpreg_res);
|
||||
if (!hpreg_res) {
|
||||
err = -EBUSY;
|
||||
dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting.\n");
|
||||
goto err_out_clear_quattro;
|
||||
}
|
||||
|
@ -2476,7 +2476,10 @@ static int am65_cpsw_nuss_register_devlink(struct am65_cpsw_common *common)
|
||||
port = am65_common_get_port(common, i);
|
||||
dl_port = &port->devlink_port;
|
||||
|
||||
attrs.flavour = DEVLINK_PORT_FLAVOUR_PHYSICAL;
|
||||
if (port->ndev)
|
||||
attrs.flavour = DEVLINK_PORT_FLAVOUR_PHYSICAL;
|
||||
else
|
||||
attrs.flavour = DEVLINK_PORT_FLAVOUR_UNUSED;
|
||||
attrs.phys.port_number = port->port_id;
|
||||
attrs.switch_id.id_len = sizeof(resource_size_t);
|
||||
memcpy(attrs.switch_id.id, common->switch_id, attrs.switch_id.id_len);
|
||||
|
@ -1051,7 +1051,8 @@ struct net_device_context {
|
||||
u32 vf_alloc;
|
||||
/* Serial number of the VF to team with */
|
||||
u32 vf_serial;
|
||||
|
||||
/* completion variable to confirm vf association */
|
||||
struct completion vf_add;
|
||||
/* Is the current data path through the VF NIC? */
|
||||
bool data_path_is_vf;
|
||||
|
||||
|
@ -1580,6 +1580,10 @@ static void netvsc_send_vf(struct net_device *ndev,
|
||||
|
||||
net_device_ctx->vf_alloc = nvmsg->msg.v4_msg.vf_assoc.allocated;
|
||||
net_device_ctx->vf_serial = nvmsg->msg.v4_msg.vf_assoc.serial;
|
||||
|
||||
if (net_device_ctx->vf_alloc)
|
||||
complete(&net_device_ctx->vf_add);
|
||||
|
||||
netdev_info(ndev, "VF slot %u %s\n",
|
||||
net_device_ctx->vf_serial,
|
||||
net_device_ctx->vf_alloc ? "added" : "removed");
|
||||
|
@ -2313,6 +2313,18 @@ static struct net_device *get_netvsc_byslot(const struct net_device *vf_netdev)
|
||||
|
||||
}
|
||||
|
||||
/* Fallback path to check synthetic vf with
|
||||
* help of mac addr
|
||||
*/
|
||||
list_for_each_entry(ndev_ctx, &netvsc_dev_list, list) {
|
||||
ndev = hv_get_drvdata(ndev_ctx->device_ctx);
|
||||
if (ether_addr_equal(vf_netdev->perm_addr, ndev->perm_addr)) {
|
||||
netdev_notice(vf_netdev,
|
||||
"falling back to mac addr based matching\n");
|
||||
return ndev;
|
||||
}
|
||||
}
|
||||
|
||||
netdev_notice(vf_netdev,
|
||||
"no netdev found for vf serial:%u\n", serial);
|
||||
return NULL;
|
||||
@ -2409,6 +2421,11 @@ static int netvsc_vf_changed(struct net_device *vf_netdev, unsigned long event)
|
||||
if (net_device_ctx->data_path_is_vf == vf_is_up)
|
||||
return NOTIFY_OK;
|
||||
|
||||
if (vf_is_up && !net_device_ctx->vf_alloc) {
|
||||
netdev_info(ndev, "Waiting for the VF association from host\n");
|
||||
wait_for_completion(&net_device_ctx->vf_add);
|
||||
}
|
||||
|
||||
ret = netvsc_switch_datapath(ndev, vf_is_up);
|
||||
|
||||
if (ret) {
|
||||
@ -2440,6 +2457,7 @@ static int netvsc_unregister_vf(struct net_device *vf_netdev)
|
||||
|
||||
netvsc_vf_setxdp(vf_netdev, NULL);
|
||||
|
||||
reinit_completion(&net_device_ctx->vf_add);
|
||||
netdev_rx_handler_unregister(vf_netdev);
|
||||
netdev_upper_dev_unlink(vf_netdev, ndev);
|
||||
RCU_INIT_POINTER(net_device_ctx->vf_netdev, NULL);
|
||||
@ -2479,6 +2497,7 @@ static int netvsc_probe(struct hv_device *dev,
|
||||
|
||||
INIT_DELAYED_WORK(&net_device_ctx->dwork, netvsc_link_change);
|
||||
|
||||
init_completion(&net_device_ctx->vf_add);
|
||||
spin_lock_init(&net_device_ctx->lock);
|
||||
INIT_LIST_HEAD(&net_device_ctx->reconfig_events);
|
||||
INIT_DELAYED_WORK(&net_device_ctx->vf_takeover, netvsc_vf_setup);
|
||||
|
@ -1192,7 +1192,7 @@ void macvlan_common_setup(struct net_device *dev)
|
||||
{
|
||||
ether_setup(dev);
|
||||
|
||||
dev->min_mtu = 0;
|
||||
/* ether_setup() has set dev->min_mtu to ETH_MIN_MTU. */
|
||||
dev->max_mtu = ETH_MAX_MTU;
|
||||
dev->priv_flags &= ~IFF_TX_SKB_SHARING;
|
||||
netif_keep_dst(dev);
|
||||
|
@ -1838,7 +1838,7 @@ static int ksz886x_cable_test_start(struct phy_device *phydev)
|
||||
return phy_clear_bits(phydev, MII_BMCR, BMCR_ANENABLE | BMCR_SPEED100);
|
||||
}
|
||||
|
||||
static int ksz886x_cable_test_result_trans(u16 status, u16 mask)
|
||||
static __always_inline int ksz886x_cable_test_result_trans(u16 status, u16 mask)
|
||||
{
|
||||
switch (FIELD_GET(mask, status)) {
|
||||
case KSZ8081_LMD_STAT_NORMAL:
|
||||
@ -1854,13 +1854,13 @@ static int ksz886x_cable_test_result_trans(u16 status, u16 mask)
|
||||
}
|
||||
}
|
||||
|
||||
static bool ksz886x_cable_test_failed(u16 status, u16 mask)
|
||||
static __always_inline bool ksz886x_cable_test_failed(u16 status, u16 mask)
|
||||
{
|
||||
return FIELD_GET(mask, status) ==
|
||||
KSZ8081_LMD_STAT_FAIL;
|
||||
}
|
||||
|
||||
static bool ksz886x_cable_test_fault_length_valid(u16 status, u16 mask)
|
||||
static __always_inline bool ksz886x_cable_test_fault_length_valid(u16 status, u16 mask)
|
||||
{
|
||||
switch (FIELD_GET(mask, status)) {
|
||||
case KSZ8081_LMD_STAT_OPEN:
|
||||
@ -1871,7 +1871,8 @@ static bool ksz886x_cable_test_fault_length_valid(u16 status, u16 mask)
|
||||
return false;
|
||||
}
|
||||
|
||||
static int ksz886x_cable_test_fault_length(struct phy_device *phydev, u16 status, u16 data_mask)
|
||||
static __always_inline int ksz886x_cable_test_fault_length(struct phy_device *phydev,
|
||||
u16 status, u16 data_mask)
|
||||
{
|
||||
int dt;
|
||||
|
||||
|
@ -257,6 +257,7 @@ void sfp_parse_support(struct sfp_bus *bus, const struct sfp_eeprom_id *id,
|
||||
case SFF8024_ECC_100GBASE_SR4_25GBASE_SR:
|
||||
phylink_set(modes, 100000baseSR4_Full);
|
||||
phylink_set(modes, 25000baseSR_Full);
|
||||
__set_bit(PHY_INTERFACE_MODE_25GBASER, interfaces);
|
||||
break;
|
||||
case SFF8024_ECC_100GBASE_LR4_25GBASE_LR:
|
||||
case SFF8024_ECC_100GBASE_ER4_25GBASE_ER:
|
||||
@ -268,6 +269,7 @@ void sfp_parse_support(struct sfp_bus *bus, const struct sfp_eeprom_id *id,
|
||||
case SFF8024_ECC_25GBASE_CR_S:
|
||||
case SFF8024_ECC_25GBASE_CR_N:
|
||||
phylink_set(modes, 25000baseCR_Full);
|
||||
__set_bit(PHY_INTERFACE_MODE_25GBASER, interfaces);
|
||||
break;
|
||||
case SFF8024_ECC_10GBASE_T_SFI:
|
||||
case SFF8024_ECC_10GBASE_T_SR:
|
||||
@ -276,6 +278,7 @@ void sfp_parse_support(struct sfp_bus *bus, const struct sfp_eeprom_id *id,
|
||||
break;
|
||||
case SFF8024_ECC_5GBASE_T:
|
||||
phylink_set(modes, 5000baseT_Full);
|
||||
__set_bit(PHY_INTERFACE_MODE_5GBASER, interfaces);
|
||||
break;
|
||||
case SFF8024_ECC_2_5GBASE_T:
|
||||
phylink_set(modes, 2500baseT_Full);
|
||||
|
@ -14,6 +14,7 @@ if PSE_CONTROLLER
|
||||
|
||||
config PSE_REGULATOR
|
||||
tristate "Regulator based PSE controller"
|
||||
depends on REGULATOR || COMPILE_TEST
|
||||
help
|
||||
This module provides support for simple regulator based Ethernet Power
|
||||
Sourcing Equipment without automatic classification support. For
|
||||
|
@ -2081,7 +2081,7 @@ static void ath11k_peer_assoc_h_he(struct ath11k *ar,
|
||||
struct cfg80211_chan_def def;
|
||||
const struct ieee80211_sta_he_cap *he_cap = &sta->deflink.he_cap;
|
||||
enum nl80211_band band;
|
||||
u16 *he_mcs_mask;
|
||||
u16 he_mcs_mask[NL80211_HE_NSS_MAX];
|
||||
u8 max_nss, he_mcs;
|
||||
u16 he_tx_mcs = 0, v = 0;
|
||||
int i, he_nss, nss_idx;
|
||||
@ -2098,7 +2098,8 @@ static void ath11k_peer_assoc_h_he(struct ath11k *ar,
|
||||
return;
|
||||
|
||||
band = def.chan->band;
|
||||
he_mcs_mask = arvif->bitrate_mask.control[band].he_mcs;
|
||||
memcpy(he_mcs_mask, arvif->bitrate_mask.control[band].he_mcs,
|
||||
sizeof(he_mcs_mask));
|
||||
|
||||
if (ath11k_peer_assoc_h_he_masked(he_mcs_mask))
|
||||
return;
|
||||
|
@ -384,6 +384,7 @@ static int iwl_mvm_disable_txq(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
|
||||
iwl_mvm_txq_from_tid(sta, tid);
|
||||
|
||||
mvmtxq->txq_id = IWL_MVM_INVALID_QUEUE;
|
||||
list_del_init(&mvmtxq->list);
|
||||
}
|
||||
|
||||
/* Regardless if this is a reserved TXQ for a STA - mark it as false */
|
||||
@ -478,6 +479,7 @@ static int iwl_mvm_remove_sta_queue_marking(struct iwl_mvm *mvm, int queue)
|
||||
mvmsta->tid_data[tid].txq_id = IWL_MVM_INVALID_QUEUE;
|
||||
|
||||
mvmtxq->txq_id = IWL_MVM_INVALID_QUEUE;
|
||||
list_del_init(&mvmtxq->list);
|
||||
}
|
||||
|
||||
mvmsta->tfd_queue_msk &= ~BIT(queue); /* Don't use this queue anymore */
|
||||
|
@ -4973,6 +4973,8 @@ static int hwsim_cloned_frame_received_nl(struct sk_buff *skb_2,
|
||||
}
|
||||
|
||||
rx_status.rate_idx = nla_get_u32(info->attrs[HWSIM_ATTR_RX_RATE]);
|
||||
if (rx_status.rate_idx >= data2->hw->wiphy->bands[rx_status.band]->n_bitrates)
|
||||
goto out;
|
||||
rx_status.signal = nla_get_u32(info->attrs[HWSIM_ATTR_SIGNAL]);
|
||||
|
||||
hdr = (void *)skb->data;
|
||||
|
@ -696,10 +696,7 @@ mt76_dma_rx_process(struct mt76_dev *dev, struct mt76_queue *q, int budget)
|
||||
|
||||
skb_reserve(skb, q->buf_offset);
|
||||
|
||||
if (q == &dev->q_rx[MT_RXQ_MCU]) {
|
||||
u32 *rxfce = (u32 *)skb->cb;
|
||||
*rxfce = info;
|
||||
}
|
||||
*(u32 *)skb->cb = info;
|
||||
|
||||
__skb_put(skb, len);
|
||||
done++;
|
||||
|
@ -345,6 +345,7 @@ static int mt7615_mac_fill_rx(struct mt7615_dev *dev, struct sk_buff *skb)
|
||||
u32 rxd1 = le32_to_cpu(rxd[1]);
|
||||
u32 rxd2 = le32_to_cpu(rxd[2]);
|
||||
u32 csum_mask = MT_RXD0_NORMAL_IP_SUM | MT_RXD0_NORMAL_UDP_TCP_SUM;
|
||||
u32 csum_status = *(u32 *)skb->cb;
|
||||
bool unicast, hdr_trans, remove_pad, insert_ccmp_hdr = false;
|
||||
u16 hdr_gap;
|
||||
int phy_idx;
|
||||
@ -394,7 +395,8 @@ static int mt7615_mac_fill_rx(struct mt7615_dev *dev, struct sk_buff *skb)
|
||||
spin_unlock_bh(&dev->sta_poll_lock);
|
||||
}
|
||||
|
||||
if ((rxd0 & csum_mask) == csum_mask)
|
||||
if (mt76_is_mmio(&dev->mt76) && (rxd0 & csum_mask) == csum_mask &&
|
||||
!(csum_status & (BIT(0) | BIT(2) | BIT(3))))
|
||||
skb->ip_summed = CHECKSUM_UNNECESSARY;
|
||||
|
||||
if (rxd2 & MT_RXD2_NORMAL_FCS_ERR)
|
||||
@ -610,14 +612,14 @@ static int mt7615_mac_fill_rx(struct mt7615_dev *dev, struct sk_buff *skb)
|
||||
* When header translation failure is indicated,
|
||||
* the hardware will insert an extra 2-byte field
|
||||
* containing the data length after the protocol
|
||||
* type field.
|
||||
* type field. This happens either when the LLC-SNAP
|
||||
* pattern did not match, or if a VLAN header was
|
||||
* detected.
|
||||
*/
|
||||
pad_start = 12;
|
||||
if (get_unaligned_be16(skb->data + pad_start) == ETH_P_8021Q)
|
||||
pad_start += 4;
|
||||
|
||||
if (get_unaligned_be16(skb->data + pad_start) !=
|
||||
skb->len - pad_start - 2)
|
||||
else
|
||||
pad_start = 0;
|
||||
}
|
||||
|
||||
|
@ -233,6 +233,7 @@ mt7915_mac_fill_rx(struct mt7915_dev *dev, struct sk_buff *skb)
|
||||
u8 remove_pad, amsdu_info;
|
||||
u8 mode = 0, qos_ctl = 0;
|
||||
struct mt7915_sta *msta = NULL;
|
||||
u32 csum_status = *(u32 *)skb->cb;
|
||||
bool hdr_trans;
|
||||
u16 hdr_gap;
|
||||
u16 seq_ctrl = 0;
|
||||
@ -288,7 +289,8 @@ mt7915_mac_fill_rx(struct mt7915_dev *dev, struct sk_buff *skb)
|
||||
if (!sband->channels)
|
||||
return -EINVAL;
|
||||
|
||||
if ((rxd0 & csum_mask) == csum_mask)
|
||||
if ((rxd0 & csum_mask) == csum_mask &&
|
||||
!(csum_status & (BIT(0) | BIT(2) | BIT(3))))
|
||||
skb->ip_summed = CHECKSUM_UNNECESSARY;
|
||||
|
||||
if (rxd1 & MT_RXD1_NORMAL_FCS_ERR)
|
||||
@ -446,14 +448,14 @@ mt7915_mac_fill_rx(struct mt7915_dev *dev, struct sk_buff *skb)
|
||||
* When header translation failure is indicated,
|
||||
* the hardware will insert an extra 2-byte field
|
||||
* containing the data length after the protocol
|
||||
* type field.
|
||||
* type field. This happens either when the LLC-SNAP
|
||||
* pattern did not match, or if a VLAN header was
|
||||
* detected.
|
||||
*/
|
||||
pad_start = 12;
|
||||
if (get_unaligned_be16(skb->data + pad_start) == ETH_P_8021Q)
|
||||
pad_start += 4;
|
||||
|
||||
if (get_unaligned_be16(skb->data + pad_start) !=
|
||||
skb->len - pad_start - 2)
|
||||
else
|
||||
pad_start = 0;
|
||||
}
|
||||
|
||||
|
@ -230,6 +230,7 @@ mt7921_mac_fill_rx(struct mt7921_dev *dev, struct sk_buff *skb)
|
||||
struct mt76_phy *mphy = &dev->mt76.phy;
|
||||
struct mt7921_phy *phy = &dev->phy;
|
||||
struct ieee80211_supported_band *sband;
|
||||
u32 csum_status = *(u32 *)skb->cb;
|
||||
u32 rxd0 = le32_to_cpu(rxd[0]);
|
||||
u32 rxd1 = le32_to_cpu(rxd[1]);
|
||||
u32 rxd2 = le32_to_cpu(rxd[2]);
|
||||
@ -290,7 +291,8 @@ mt7921_mac_fill_rx(struct mt7921_dev *dev, struct sk_buff *skb)
|
||||
if (!sband->channels)
|
||||
return -EINVAL;
|
||||
|
||||
if ((rxd0 & csum_mask) == csum_mask)
|
||||
if (mt76_is_mmio(&dev->mt76) && (rxd0 & csum_mask) == csum_mask &&
|
||||
!(csum_status & (BIT(0) | BIT(2) | BIT(3))))
|
||||
skb->ip_summed = CHECKSUM_UNNECESSARY;
|
||||
|
||||
if (rxd1 & MT_RXD1_NORMAL_FCS_ERR)
|
||||
|
@ -60,14 +60,20 @@ mt76_tx_status_unlock(struct mt76_dev *dev, struct sk_buff_head *list)
|
||||
.skb = skb,
|
||||
.info = IEEE80211_SKB_CB(skb),
|
||||
};
|
||||
struct ieee80211_rate_status rs = {};
|
||||
struct mt76_tx_cb *cb = mt76_tx_skb_cb(skb);
|
||||
struct mt76_wcid *wcid;
|
||||
|
||||
wcid = rcu_dereference(dev->wcid[cb->wcid]);
|
||||
if (wcid) {
|
||||
status.sta = wcid_to_sta(wcid);
|
||||
status.rates = NULL;
|
||||
status.n_rates = 0;
|
||||
if (status.sta && (wcid->rate.flags || wcid->rate.legacy)) {
|
||||
rs.rate_idx = wcid->rate;
|
||||
status.rates = &rs;
|
||||
status.n_rates = 1;
|
||||
} else {
|
||||
status.n_rates = 0;
|
||||
}
|
||||
}
|
||||
|
||||
hw = mt76_tx_status_get_hw(dev, skb);
|
||||
|
@ -3657,6 +3657,7 @@ ptp_ocp_detach_sysfs(struct ptp_ocp *bp)
|
||||
struct device *dev = &bp->dev;
|
||||
|
||||
sysfs_remove_link(&dev->kobj, "ttyGNSS");
|
||||
sysfs_remove_link(&dev->kobj, "ttyGNSS2");
|
||||
sysfs_remove_link(&dev->kobj, "ttyMAC");
|
||||
sysfs_remove_link(&dev->kobj, "ptp");
|
||||
sysfs_remove_link(&dev->kobj, "pps");
|
||||
|
@ -26,7 +26,15 @@ struct compat_iw_point {
|
||||
struct __compat_iw_event {
|
||||
__u16 len; /* Real length of this stuff */
|
||||
__u16 cmd; /* Wireless IOCTL */
|
||||
compat_caddr_t pointer;
|
||||
|
||||
union {
|
||||
compat_caddr_t pointer;
|
||||
|
||||
/* we need ptr_bytes to make memcpy() run-time destination
|
||||
* buffer bounds checking happy, nothing special
|
||||
*/
|
||||
DECLARE_FLEX_ARRAY(__u8, ptr_bytes);
|
||||
};
|
||||
};
|
||||
#define IW_EV_COMPAT_LCP_LEN offsetof(struct __compat_iw_event, pointer)
|
||||
#define IW_EV_COMPAT_POINT_OFF offsetof(struct compat_iw_point, length)
|
||||
|
@ -185,21 +185,27 @@ static inline int
|
||||
ieee802154_sockaddr_check_size(struct sockaddr_ieee802154 *daddr, int len)
|
||||
{
|
||||
struct ieee802154_addr_sa *sa;
|
||||
int ret = 0;
|
||||
|
||||
sa = &daddr->addr;
|
||||
if (len < IEEE802154_MIN_NAMELEN)
|
||||
return -EINVAL;
|
||||
switch (sa->addr_type) {
|
||||
case IEEE802154_ADDR_NONE:
|
||||
break;
|
||||
case IEEE802154_ADDR_SHORT:
|
||||
if (len < IEEE802154_NAMELEN_SHORT)
|
||||
return -EINVAL;
|
||||
ret = -EINVAL;
|
||||
break;
|
||||
case IEEE802154_ADDR_LONG:
|
||||
if (len < IEEE802154_NAMELEN_LONG)
|
||||
return -EINVAL;
|
||||
ret = -EINVAL;
|
||||
break;
|
||||
default:
|
||||
ret = -EINVAL;
|
||||
break;
|
||||
}
|
||||
return 0;
|
||||
return ret;
|
||||
}
|
||||
|
||||
static inline void ieee802154_addr_from_sa(struct ieee802154_addr *a,
|
||||
|
@ -1182,6 +1182,8 @@ void ipv6_icmp_error(struct sock *sk, struct sk_buff *skb, int err, __be16 port,
|
||||
void ipv6_local_error(struct sock *sk, int err, struct flowi6 *fl6, u32 info);
|
||||
void ipv6_local_rxpmtu(struct sock *sk, struct flowi6 *fl6, u32 mtu);
|
||||
|
||||
void inet6_cleanup_sock(struct sock *sk);
|
||||
void inet6_sock_destruct(struct sock *sk);
|
||||
int inet6_release(struct socket *sock);
|
||||
int inet6_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len);
|
||||
int inet6_getname(struct socket *sock, struct sockaddr *uaddr,
|
||||
|
@ -247,7 +247,7 @@ static inline bool udp_sk_bound_dev_eq(struct net *net, int bound_dev_if,
|
||||
}
|
||||
|
||||
/* net/ipv4/udp.c */
|
||||
void udp_destruct_sock(struct sock *sk);
|
||||
void udp_destruct_common(struct sock *sk);
|
||||
void skb_consume_udp(struct sock *sk, struct sk_buff *skb, int len);
|
||||
int __udp_enqueue_schedule_skb(struct sock *sk, struct sk_buff *skb);
|
||||
void udp_skb_destructor(struct sock *sk, struct sk_buff *skb);
|
||||
|
@ -25,14 +25,6 @@ static __inline__ int udplite_getfrag(void *from, char *to, int offset,
|
||||
return copy_from_iter_full(to, len, &msg->msg_iter) ? 0 : -EFAULT;
|
||||
}
|
||||
|
||||
/* Designate sk as UDP-Lite socket */
|
||||
static inline int udplite_sk_init(struct sock *sk)
|
||||
{
|
||||
udp_init_sock(sk);
|
||||
udp_sk(sk)->pcflag = UDPLITE_BIT;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Checksumming routines
|
||||
*/
|
||||
|
@ -3610,7 +3610,8 @@ int sock_common_getsockopt(struct socket *sock, int level, int optname,
|
||||
{
|
||||
struct sock *sk = sock->sk;
|
||||
|
||||
return sk->sk_prot->getsockopt(sk, level, optname, optval, optlen);
|
||||
/* IPV6_ADDRFORM can change sk->sk_prot under us. */
|
||||
return READ_ONCE(sk->sk_prot)->getsockopt(sk, level, optname, optval, optlen);
|
||||
}
|
||||
EXPORT_SYMBOL(sock_common_getsockopt);
|
||||
|
||||
@ -3636,7 +3637,8 @@ int sock_common_setsockopt(struct socket *sock, int level, int optname,
|
||||
{
|
||||
struct sock *sk = sock->sk;
|
||||
|
||||
return sk->sk_prot->setsockopt(sk, level, optname, optval, optlen);
|
||||
/* IPV6_ADDRFORM can change sk->sk_prot under us. */
|
||||
return READ_ONCE(sk->sk_prot)->setsockopt(sk, level, optname, optval, optlen);
|
||||
}
|
||||
EXPORT_SYMBOL(sock_common_setsockopt);
|
||||
|
||||
|
@ -1681,7 +1681,7 @@ int dsa_port_phylink_create(struct dsa_port *dp)
|
||||
pl = phylink_create(&dp->pl_config, of_fwnode_handle(dp->dn),
|
||||
mode, &dsa_port_phylink_mac_ops);
|
||||
if (IS_ERR(pl)) {
|
||||
pr_err("error creating PHYLINK: %ld\n", PTR_ERR(dp->pl));
|
||||
pr_err("error creating PHYLINK: %ld\n", PTR_ERR(pl));
|
||||
return PTR_ERR(pl);
|
||||
}
|
||||
|
||||
|
@ -251,9 +251,6 @@ static int raw_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
if (!size)
|
||||
return -EINVAL;
|
||||
|
||||
lock_sock(sk);
|
||||
if (!sk->sk_bound_dev_if)
|
||||
dev = dev_getfirstbyhwtype(sock_net(sk), ARPHRD_IEEE802154);
|
||||
@ -275,6 +272,10 @@ static int raw_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
|
||||
err = -EMSGSIZE;
|
||||
goto out_dev;
|
||||
}
|
||||
if (!size) {
|
||||
err = 0;
|
||||
goto out_dev;
|
||||
}
|
||||
|
||||
hlen = LL_RESERVED_SPACE(dev);
|
||||
tlen = dev->needed_tailroom;
|
||||
|
@ -558,22 +558,27 @@ int inet_dgram_connect(struct socket *sock, struct sockaddr *uaddr,
|
||||
int addr_len, int flags)
|
||||
{
|
||||
struct sock *sk = sock->sk;
|
||||
const struct proto *prot;
|
||||
int err;
|
||||
|
||||
if (addr_len < sizeof(uaddr->sa_family))
|
||||
return -EINVAL;
|
||||
|
||||
/* IPV6_ADDRFORM can change sk->sk_prot under us. */
|
||||
prot = READ_ONCE(sk->sk_prot);
|
||||
|
||||
if (uaddr->sa_family == AF_UNSPEC)
|
||||
return sk->sk_prot->disconnect(sk, flags);
|
||||
return prot->disconnect(sk, flags);
|
||||
|
||||
if (BPF_CGROUP_PRE_CONNECT_ENABLED(sk)) {
|
||||
err = sk->sk_prot->pre_connect(sk, uaddr, addr_len);
|
||||
err = prot->pre_connect(sk, uaddr, addr_len);
|
||||
if (err)
|
||||
return err;
|
||||
}
|
||||
|
||||
if (data_race(!inet_sk(sk)->inet_num) && inet_autobind(sk))
|
||||
return -EAGAIN;
|
||||
return sk->sk_prot->connect(sk, uaddr, addr_len);
|
||||
return prot->connect(sk, uaddr, addr_len);
|
||||
}
|
||||
EXPORT_SYMBOL(inet_dgram_connect);
|
||||
|
||||
@ -734,10 +739,11 @@ EXPORT_SYMBOL(inet_stream_connect);
|
||||
int inet_accept(struct socket *sock, struct socket *newsock, int flags,
|
||||
bool kern)
|
||||
{
|
||||
struct sock *sk1 = sock->sk;
|
||||
struct sock *sk1 = sock->sk, *sk2;
|
||||
int err = -EINVAL;
|
||||
struct sock *sk2 = sk1->sk_prot->accept(sk1, flags, &err, kern);
|
||||
|
||||
/* IPV6_ADDRFORM can change sk->sk_prot under us. */
|
||||
sk2 = READ_ONCE(sk1->sk_prot)->accept(sk1, flags, &err, kern);
|
||||
if (!sk2)
|
||||
goto do_err;
|
||||
|
||||
@ -825,12 +831,15 @@ ssize_t inet_sendpage(struct socket *sock, struct page *page, int offset,
|
||||
size_t size, int flags)
|
||||
{
|
||||
struct sock *sk = sock->sk;
|
||||
const struct proto *prot;
|
||||
|
||||
if (unlikely(inet_send_prepare(sk)))
|
||||
return -EAGAIN;
|
||||
|
||||
if (sk->sk_prot->sendpage)
|
||||
return sk->sk_prot->sendpage(sk, page, offset, size, flags);
|
||||
/* IPV6_ADDRFORM can change sk->sk_prot under us. */
|
||||
prot = READ_ONCE(sk->sk_prot);
|
||||
if (prot->sendpage)
|
||||
return prot->sendpage(sk, page, offset, size, flags);
|
||||
return sock_no_sendpage(sock, page, offset, size, flags);
|
||||
}
|
||||
EXPORT_SYMBOL(inet_sendpage);
|
||||
|
@ -888,13 +888,13 @@ int fib_nh_match(struct net *net, struct fib_config *cfg, struct fib_info *fi,
|
||||
return 1;
|
||||
}
|
||||
|
||||
/* cannot match on nexthop object attributes */
|
||||
if (fi->nh)
|
||||
return 1;
|
||||
|
||||
if (cfg->fc_oif || cfg->fc_gw_family) {
|
||||
struct fib_nh *nh;
|
||||
|
||||
/* cannot match on nexthop object attributes */
|
||||
if (fi->nh)
|
||||
return 1;
|
||||
|
||||
nh = fib_info_nh(fi, 0);
|
||||
if (cfg->fc_encap) {
|
||||
if (fib_encap_match(net, cfg->fc_encap_type,
|
||||
|
@ -268,8 +268,21 @@ restart_rcu:
|
||||
rcu_read_lock();
|
||||
restart:
|
||||
sk_nulls_for_each_rcu(sk, node, &head->chain) {
|
||||
if (sk->sk_state != TCP_TIME_WAIT)
|
||||
if (sk->sk_state != TCP_TIME_WAIT) {
|
||||
/* A kernel listener socket might not hold refcnt for net,
|
||||
* so reqsk_timer_handler() could be fired after net is
|
||||
* freed. Userspace listener and reqsk never exist here.
|
||||
*/
|
||||
if (unlikely(sk->sk_state == TCP_NEW_SYN_RECV &&
|
||||
hashinfo->pernet)) {
|
||||
struct request_sock *req = inet_reqsk(sk);
|
||||
|
||||
inet_csk_reqsk_queue_drop_and_put(req->rsk_listener, req);
|
||||
}
|
||||
|
||||
continue;
|
||||
}
|
||||
|
||||
tw = inet_twsk(sk);
|
||||
if ((tw->tw_family != family) ||
|
||||
refcount_read(&twsk_net(tw)->ns.count))
|
||||
|
@ -77,7 +77,7 @@ static bool rpfilter_mt(const struct sk_buff *skb, struct xt_action_param *par)
|
||||
flow.flowi4_mark = info->flags & XT_RPFILTER_VALID_MARK ? skb->mark : 0;
|
||||
flow.flowi4_tos = iph->tos & IPTOS_RT_MASK;
|
||||
flow.flowi4_scope = RT_SCOPE_UNIVERSE;
|
||||
flow.flowi4_oif = l3mdev_master_ifindex_rcu(xt_in(par));
|
||||
flow.flowi4_l3mdev = l3mdev_master_ifindex_rcu(xt_in(par));
|
||||
|
||||
return rpfilter_lookup_reverse(xt_net(par), &flow, xt_in(par), info->flags) ^ invert;
|
||||
}
|
||||
|
@ -84,7 +84,7 @@ void nft_fib4_eval(const struct nft_expr *expr, struct nft_regs *regs,
|
||||
oif = NULL;
|
||||
|
||||
if (priv->flags & NFTA_FIB_F_IIF)
|
||||
fl4.flowi4_oif = l3mdev_master_ifindex_rcu(oif);
|
||||
fl4.flowi4_l3mdev = l3mdev_master_ifindex_rcu(oif);
|
||||
|
||||
if (nft_hook(pkt) == NF_INET_PRE_ROUTING &&
|
||||
nft_fib_is_loopback(pkt->skb, nft_in(pkt))) {
|
||||
|
@ -617,21 +617,9 @@ int ping_getfrag(void *from, char *to,
|
||||
{
|
||||
struct pingfakehdr *pfh = from;
|
||||
|
||||
if (offset == 0) {
|
||||
fraglen -= sizeof(struct icmphdr);
|
||||
if (fraglen < 0)
|
||||
BUG();
|
||||
if (!csum_and_copy_from_iter_full(to + sizeof(struct icmphdr),
|
||||
fraglen, &pfh->wcheck,
|
||||
&pfh->msg->msg_iter))
|
||||
return -EFAULT;
|
||||
} else if (offset < sizeof(struct icmphdr)) {
|
||||
BUG();
|
||||
} else {
|
||||
if (!csum_and_copy_from_iter_full(to, fraglen, &pfh->wcheck,
|
||||
&pfh->msg->msg_iter))
|
||||
return -EFAULT;
|
||||
}
|
||||
if (!csum_and_copy_from_iter_full(to, fraglen, &pfh->wcheck,
|
||||
&pfh->msg->msg_iter))
|
||||
return -EFAULT;
|
||||
|
||||
#if IS_ENABLED(CONFIG_IPV6)
|
||||
/* For IPv6, checksum each skb as we go along, as expected by
|
||||
@ -639,7 +627,7 @@ int ping_getfrag(void *from, char *to,
|
||||
* wcheck, it will be finalized in ping_v4_push_pending_frames.
|
||||
*/
|
||||
if (pfh->family == AF_INET6) {
|
||||
skb->csum = pfh->wcheck;
|
||||
skb->csum = csum_block_add(skb->csum, pfh->wcheck, odd);
|
||||
skb->ip_summed = CHECKSUM_NONE;
|
||||
pfh->wcheck = 0;
|
||||
}
|
||||
@ -842,7 +830,8 @@ back_from_confirm:
|
||||
pfh.family = AF_INET;
|
||||
|
||||
err = ip_append_data(sk, &fl4, ping_getfrag, &pfh, len,
|
||||
0, &ipc, &rt, msg->msg_flags);
|
||||
sizeof(struct icmphdr), &ipc, &rt,
|
||||
msg->msg_flags);
|
||||
if (err)
|
||||
ip_flush_pending_frames(sk);
|
||||
else
|
||||
|
@ -3796,8 +3796,9 @@ int tcp_setsockopt(struct sock *sk, int level, int optname, sockptr_t optval,
|
||||
const struct inet_connection_sock *icsk = inet_csk(sk);
|
||||
|
||||
if (level != SOL_TCP)
|
||||
return icsk->icsk_af_ops->setsockopt(sk, level, optname,
|
||||
optval, optlen);
|
||||
/* Paired with WRITE_ONCE() in do_ipv6_setsockopt() and tcp_v6_connect() */
|
||||
return READ_ONCE(icsk->icsk_af_ops)->setsockopt(sk, level, optname,
|
||||
optval, optlen);
|
||||
return do_tcp_setsockopt(sk, level, optname, optval, optlen);
|
||||
}
|
||||
EXPORT_SYMBOL(tcp_setsockopt);
|
||||
@ -4396,8 +4397,9 @@ int tcp_getsockopt(struct sock *sk, int level, int optname, char __user *optval,
|
||||
struct inet_connection_sock *icsk = inet_csk(sk);
|
||||
|
||||
if (level != SOL_TCP)
|
||||
return icsk->icsk_af_ops->getsockopt(sk, level, optname,
|
||||
optval, optlen);
|
||||
/* Paired with WRITE_ONCE() in do_ipv6_setsockopt() and tcp_v6_connect() */
|
||||
return READ_ONCE(icsk->icsk_af_ops)->getsockopt(sk, level, optname,
|
||||
optval, optlen);
|
||||
return do_tcp_getsockopt(sk, level, optname, USER_SOCKPTR(optval),
|
||||
USER_SOCKPTR(optlen));
|
||||
}
|
||||
|
@ -375,6 +375,7 @@ static void tcp_cdg_init(struct sock *sk)
|
||||
struct cdg *ca = inet_csk_ca(sk);
|
||||
struct tcp_sock *tp = tcp_sk(sk);
|
||||
|
||||
ca->gradients = NULL;
|
||||
/* We silently fall back to window = 1 if allocation fails. */
|
||||
if (window > 1)
|
||||
ca->gradients = kcalloc(window, sizeof(ca->gradients[0]),
|
||||
@ -388,6 +389,7 @@ static void tcp_cdg_release(struct sock *sk)
|
||||
struct cdg *ca = inet_csk_ca(sk);
|
||||
|
||||
kfree(ca->gradients);
|
||||
ca->gradients = NULL;
|
||||
}
|
||||
|
||||
static struct tcp_congestion_ops tcp_cdg __read_mostly = {
|
||||
|
@ -353,13 +353,14 @@ void tcp_twsk_purge(struct list_head *net_exit_list, int family)
|
||||
struct net *net;
|
||||
|
||||
list_for_each_entry(net, net_exit_list, exit_list) {
|
||||
/* The last refcount is decremented in tcp_sk_exit_batch() */
|
||||
if (refcount_read(&net->ipv4.tcp_death_row.tw_refcount) == 1)
|
||||
continue;
|
||||
|
||||
if (net->ipv4.tcp_death_row.hashinfo->pernet) {
|
||||
/* Even if tw_refcount == 1, we must clean up kernel reqsk */
|
||||
inet_twsk_purge(net->ipv4.tcp_death_row.hashinfo, family);
|
||||
} else if (!purged_once) {
|
||||
/* The last refcount is decremented in tcp_sk_exit_batch() */
|
||||
if (refcount_read(&net->ipv4.tcp_death_row.tw_refcount) == 1)
|
||||
continue;
|
||||
|
||||
inet_twsk_purge(&tcp_hashinfo, family);
|
||||
purged_once = true;
|
||||
}
|
||||
|
@ -1598,7 +1598,7 @@ drop:
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(__udp_enqueue_schedule_skb);
|
||||
|
||||
void udp_destruct_sock(struct sock *sk)
|
||||
void udp_destruct_common(struct sock *sk)
|
||||
{
|
||||
/* reclaim completely the forward allocated memory */
|
||||
struct udp_sock *up = udp_sk(sk);
|
||||
@ -1611,10 +1611,14 @@ void udp_destruct_sock(struct sock *sk)
|
||||
kfree_skb(skb);
|
||||
}
|
||||
udp_rmem_release(sk, total, 0, true);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(udp_destruct_common);
|
||||
|
||||
static void udp_destruct_sock(struct sock *sk)
|
||||
{
|
||||
udp_destruct_common(sk);
|
||||
inet_sock_destruct(sk);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(udp_destruct_sock);
|
||||
|
||||
int udp_init_sock(struct sock *sk)
|
||||
{
|
||||
@ -1622,7 +1626,6 @@ int udp_init_sock(struct sock *sk)
|
||||
sk->sk_destruct = udp_destruct_sock;
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(udp_init_sock);
|
||||
|
||||
void skb_consume_udp(struct sock *sk, struct sk_buff *skb, int len)
|
||||
{
|
||||
|
@ -17,6 +17,14 @@
|
||||
struct udp_table udplite_table __read_mostly;
|
||||
EXPORT_SYMBOL(udplite_table);
|
||||
|
||||
/* Designate sk as UDP-Lite socket */
|
||||
static int udplite_sk_init(struct sock *sk)
|
||||
{
|
||||
udp_init_sock(sk);
|
||||
udp_sk(sk)->pcflag = UDPLITE_BIT;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int udplite_rcv(struct sk_buff *skb)
|
||||
{
|
||||
return __udp4_lib_rcv(skb, &udplite_table, IPPROTO_UDPLITE);
|
||||
|
@ -109,6 +109,12 @@ static __inline__ struct ipv6_pinfo *inet6_sk_generic(struct sock *sk)
|
||||
return (struct ipv6_pinfo *)(((u8 *)sk) + offset);
|
||||
}
|
||||
|
||||
void inet6_sock_destruct(struct sock *sk)
|
||||
{
|
||||
inet6_cleanup_sock(sk);
|
||||
inet_sock_destruct(sk);
|
||||
}
|
||||
|
||||
static int inet6_create(struct net *net, struct socket *sock, int protocol,
|
||||
int kern)
|
||||
{
|
||||
@ -201,7 +207,7 @@ lookup_protocol:
|
||||
inet->hdrincl = 1;
|
||||
}
|
||||
|
||||
sk->sk_destruct = inet_sock_destruct;
|
||||
sk->sk_destruct = inet6_sock_destruct;
|
||||
sk->sk_family = PF_INET6;
|
||||
sk->sk_protocol = protocol;
|
||||
|
||||
@ -510,6 +516,12 @@ void inet6_destroy_sock(struct sock *sk)
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(inet6_destroy_sock);
|
||||
|
||||
void inet6_cleanup_sock(struct sock *sk)
|
||||
{
|
||||
inet6_destroy_sock(sk);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(inet6_cleanup_sock);
|
||||
|
||||
/*
|
||||
* This does both peername and sockname.
|
||||
*/
|
||||
|
@ -419,15 +419,18 @@ int do_ipv6_setsockopt(struct sock *sk, int level, int optname,
|
||||
rtnl_lock();
|
||||
sockopt_lock_sock(sk);
|
||||
|
||||
/* Another thread has converted the socket into IPv4 with
|
||||
* IPV6_ADDRFORM concurrently.
|
||||
*/
|
||||
if (unlikely(sk->sk_family != AF_INET6))
|
||||
goto unlock;
|
||||
|
||||
switch (optname) {
|
||||
|
||||
case IPV6_ADDRFORM:
|
||||
if (optlen < sizeof(int))
|
||||
goto e_inval;
|
||||
if (val == PF_INET) {
|
||||
struct ipv6_txoptions *opt;
|
||||
struct sk_buff *pktopt;
|
||||
|
||||
if (sk->sk_type == SOCK_RAW)
|
||||
break;
|
||||
|
||||
@ -458,7 +461,6 @@ int do_ipv6_setsockopt(struct sock *sk, int level, int optname,
|
||||
break;
|
||||
}
|
||||
|
||||
fl6_free_socklist(sk);
|
||||
__ipv6_sock_mc_close(sk);
|
||||
__ipv6_sock_ac_close(sk);
|
||||
|
||||
@ -475,9 +477,10 @@ int do_ipv6_setsockopt(struct sock *sk, int level, int optname,
|
||||
sock_prot_inuse_add(net, sk->sk_prot, -1);
|
||||
sock_prot_inuse_add(net, &tcp_prot, 1);
|
||||
|
||||
/* Paired with READ_ONCE(sk->sk_prot) in net/ipv6/af_inet6.c */
|
||||
/* Paired with READ_ONCE(sk->sk_prot) in inet6_stream_ops */
|
||||
WRITE_ONCE(sk->sk_prot, &tcp_prot);
|
||||
icsk->icsk_af_ops = &ipv4_specific;
|
||||
/* Paired with READ_ONCE() in tcp_(get|set)sockopt() */
|
||||
WRITE_ONCE(icsk->icsk_af_ops, &ipv4_specific);
|
||||
sk->sk_socket->ops = &inet_stream_ops;
|
||||
sk->sk_family = PF_INET;
|
||||
tcp_sync_mss(sk, icsk->icsk_pmtu_cookie);
|
||||
@ -490,19 +493,19 @@ int do_ipv6_setsockopt(struct sock *sk, int level, int optname,
|
||||
sock_prot_inuse_add(net, sk->sk_prot, -1);
|
||||
sock_prot_inuse_add(net, prot, 1);
|
||||
|
||||
/* Paired with READ_ONCE(sk->sk_prot) in net/ipv6/af_inet6.c */
|
||||
/* Paired with READ_ONCE(sk->sk_prot) in inet6_dgram_ops */
|
||||
WRITE_ONCE(sk->sk_prot, prot);
|
||||
sk->sk_socket->ops = &inet_dgram_ops;
|
||||
sk->sk_family = PF_INET;
|
||||
}
|
||||
opt = xchg((__force struct ipv6_txoptions **)&np->opt,
|
||||
NULL);
|
||||
if (opt) {
|
||||
atomic_sub(opt->tot_len, &sk->sk_omem_alloc);
|
||||
txopt_put(opt);
|
||||
}
|
||||
pktopt = xchg(&np->pktoptions, NULL);
|
||||
kfree_skb(pktopt);
|
||||
|
||||
/* Disable all options not to allocate memory anymore,
|
||||
* but there is still a race. See the lockless path
|
||||
* in udpv6_sendmsg() and ipv6_local_rxpmtu().
|
||||
*/
|
||||
np->rxopt.all = 0;
|
||||
|
||||
inet6_cleanup_sock(sk);
|
||||
|
||||
/*
|
||||
* ... and add it to the refcnt debug socks count
|
||||
@ -994,6 +997,7 @@ done:
|
||||
break;
|
||||
}
|
||||
|
||||
unlock:
|
||||
sockopt_release_sock(sk);
|
||||
if (needs_rtnl)
|
||||
rtnl_unlock();
|
||||
|
@ -37,6 +37,7 @@ static bool rpfilter_lookup_reverse6(struct net *net, const struct sk_buff *skb,
|
||||
bool ret = false;
|
||||
struct flowi6 fl6 = {
|
||||
.flowi6_iif = LOOPBACK_IFINDEX,
|
||||
.flowi6_l3mdev = l3mdev_master_ifindex_rcu(dev),
|
||||
.flowlabel = (* (__be32 *) iph) & IPV6_FLOWINFO_MASK,
|
||||
.flowi6_proto = iph->nexthdr,
|
||||
.daddr = iph->saddr,
|
||||
@ -55,9 +56,7 @@ static bool rpfilter_lookup_reverse6(struct net *net, const struct sk_buff *skb,
|
||||
if (rpfilter_addr_linklocal(&iph->saddr)) {
|
||||
lookup_flags |= RT6_LOOKUP_F_IFACE;
|
||||
fl6.flowi6_oif = dev->ifindex;
|
||||
/* Set flowi6_oif for vrf devices to lookup route in l3mdev domain. */
|
||||
} else if (netif_is_l3_master(dev) || netif_is_l3_slave(dev) ||
|
||||
(flags & XT_RPFILTER_LOOSE) == 0)
|
||||
} else if ((flags & XT_RPFILTER_LOOSE) == 0)
|
||||
fl6.flowi6_oif = dev->ifindex;
|
||||
|
||||
rt = (void *)ip6_route_lookup(net, &fl6, skb, lookup_flags);
|
||||
@ -72,9 +71,7 @@ static bool rpfilter_lookup_reverse6(struct net *net, const struct sk_buff *skb,
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (rt->rt6i_idev->dev == dev ||
|
||||
l3mdev_master_ifindex_rcu(rt->rt6i_idev->dev) == dev->ifindex ||
|
||||
(flags & XT_RPFILTER_LOOSE))
|
||||
if (rt->rt6i_idev->dev == dev || (flags & XT_RPFILTER_LOOSE))
|
||||
ret = true;
|
||||
out:
|
||||
ip6_rt_put(rt);
|
||||
|
@ -41,9 +41,8 @@ static int nft_fib6_flowi_init(struct flowi6 *fl6, const struct nft_fib *priv,
|
||||
if (ipv6_addr_type(&fl6->daddr) & IPV6_ADDR_LINKLOCAL) {
|
||||
lookup_flags |= RT6_LOOKUP_F_IFACE;
|
||||
fl6->flowi6_oif = get_ifindex(dev ? dev : pkt->skb->dev);
|
||||
} else if ((priv->flags & NFTA_FIB_F_IIF) &&
|
||||
(netif_is_l3_master(dev) || netif_is_l3_slave(dev))) {
|
||||
fl6->flowi6_oif = dev->ifindex;
|
||||
} else if (priv->flags & NFTA_FIB_F_IIF) {
|
||||
fl6->flowi6_l3mdev = l3mdev_master_ifindex_rcu(dev);
|
||||
}
|
||||
|
||||
if (ipv6_addr_type(&fl6->saddr) & IPV6_ADDR_UNICAST)
|
||||
|
@ -179,7 +179,7 @@ static int ping_v6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
|
||||
|
||||
lock_sock(sk);
|
||||
err = ip6_append_data(sk, ping_getfrag, &pfh, len,
|
||||
0, &ipc6, &fl6, rt,
|
||||
sizeof(struct icmp6hdr), &ipc6, &fl6, rt,
|
||||
MSG_DONTWAIT);
|
||||
|
||||
if (err) {
|
||||
|
@ -238,7 +238,8 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
|
||||
sin.sin_port = usin->sin6_port;
|
||||
sin.sin_addr.s_addr = usin->sin6_addr.s6_addr32[3];
|
||||
|
||||
icsk->icsk_af_ops = &ipv6_mapped;
|
||||
/* Paired with READ_ONCE() in tcp_(get|set)sockopt() */
|
||||
WRITE_ONCE(icsk->icsk_af_ops, &ipv6_mapped);
|
||||
if (sk_is_mptcp(sk))
|
||||
mptcpv6_handle_mapped(sk, true);
|
||||
sk->sk_backlog_rcv = tcp_v4_do_rcv;
|
||||
@ -250,7 +251,8 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
|
||||
|
||||
if (err) {
|
||||
icsk->icsk_ext_hdr_len = exthdrlen;
|
||||
icsk->icsk_af_ops = &ipv6_specific;
|
||||
/* Paired with READ_ONCE() in tcp_(get|set)sockopt() */
|
||||
WRITE_ONCE(icsk->icsk_af_ops, &ipv6_specific);
|
||||
if (sk_is_mptcp(sk))
|
||||
mptcpv6_handle_mapped(sk, false);
|
||||
sk->sk_backlog_rcv = tcp_v6_do_rcv;
|
||||
|
@ -56,6 +56,19 @@
|
||||
#include <trace/events/skb.h>
|
||||
#include "udp_impl.h"
|
||||
|
||||
static void udpv6_destruct_sock(struct sock *sk)
|
||||
{
|
||||
udp_destruct_common(sk);
|
||||
inet6_sock_destruct(sk);
|
||||
}
|
||||
|
||||
int udpv6_init_sock(struct sock *sk)
|
||||
{
|
||||
skb_queue_head_init(&udp_sk(sk)->reader_queue);
|
||||
sk->sk_destruct = udpv6_destruct_sock;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static u32 udp6_ehashfn(const struct net *net,
|
||||
const struct in6_addr *laddr,
|
||||
const u16 lport,
|
||||
@ -1733,7 +1746,7 @@ struct proto udpv6_prot = {
|
||||
.connect = ip6_datagram_connect,
|
||||
.disconnect = udp_disconnect,
|
||||
.ioctl = udp_ioctl,
|
||||
.init = udp_init_sock,
|
||||
.init = udpv6_init_sock,
|
||||
.destroy = udpv6_destroy_sock,
|
||||
.setsockopt = udpv6_setsockopt,
|
||||
.getsockopt = udpv6_getsockopt,
|
||||
|
@ -12,6 +12,7 @@ int __udp6_lib_rcv(struct sk_buff *, struct udp_table *, int);
|
||||
int __udp6_lib_err(struct sk_buff *, struct inet6_skb_parm *, u8, u8, int,
|
||||
__be32, struct udp_table *);
|
||||
|
||||
int udpv6_init_sock(struct sock *sk);
|
||||
int udp_v6_get_port(struct sock *sk, unsigned short snum);
|
||||
void udp_v6_rehash(struct sock *sk);
|
||||
|
||||
|
@ -12,6 +12,13 @@
|
||||
#include <linux/proc_fs.h>
|
||||
#include "udp_impl.h"
|
||||
|
||||
static int udplitev6_sk_init(struct sock *sk)
|
||||
{
|
||||
udpv6_init_sock(sk);
|
||||
udp_sk(sk)->pcflag = UDPLITE_BIT;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int udplitev6_rcv(struct sk_buff *skb)
|
||||
{
|
||||
return __udp6_lib_rcv(skb, &udplite_table, IPPROTO_UDPLITE);
|
||||
@ -38,7 +45,7 @@ struct proto udplitev6_prot = {
|
||||
.connect = ip6_datagram_connect,
|
||||
.disconnect = udp_disconnect,
|
||||
.ioctl = udp_ioctl,
|
||||
.init = udplite_sk_init,
|
||||
.init = udplitev6_sk_init,
|
||||
.destroy = udpv6_destroy_sock,
|
||||
.setsockopt = udpv6_setsockopt,
|
||||
.getsockopt = udpv6_getsockopt,
|
||||
|
@ -1838,10 +1838,10 @@ static int kcm_release(struct socket *sock)
|
||||
kcm = kcm_sk(sk);
|
||||
mux = kcm->mux;
|
||||
|
||||
lock_sock(sk);
|
||||
sock_orphan(sk);
|
||||
kfree_skb(kcm->seq_skb);
|
||||
|
||||
lock_sock(sk);
|
||||
/* Purge queue under lock to avoid race condition with tx_work trying
|
||||
* to act when queue is nonempty. If tx_work runs after this point
|
||||
* it will just return.
|
||||
|
@ -1709,6 +1709,14 @@ struct ieee802_11_elems {
|
||||
|
||||
/* whether a parse error occurred while retrieving these elements */
|
||||
bool parse_error;
|
||||
|
||||
/*
|
||||
* scratch buffer that can be used for various element parsing related
|
||||
* tasks, e.g., element de-fragmentation etc.
|
||||
*/
|
||||
size_t scratch_len;
|
||||
u8 *scratch_pos;
|
||||
u8 scratch[];
|
||||
};
|
||||
|
||||
static inline struct ieee80211_local *hw_to_local(
|
||||
|
@ -243,7 +243,7 @@ static int ieee80211_can_powered_addr_change(struct ieee80211_sub_if_data *sdata
|
||||
*/
|
||||
break;
|
||||
default:
|
||||
return -EOPNOTSUPP;
|
||||
ret = -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
unlock:
|
||||
@ -461,7 +461,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata, bool going_do
|
||||
/*
|
||||
* Stop TX on this interface first.
|
||||
*/
|
||||
if (sdata->dev)
|
||||
if (!local->ops->wake_tx_queue && sdata->dev)
|
||||
netif_tx_stop_all_queues(sdata->dev);
|
||||
|
||||
ieee80211_roc_purge(local, sdata);
|
||||
@ -1412,8 +1412,6 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up)
|
||||
sdata->vif.type != NL80211_IFTYPE_STATION);
|
||||
}
|
||||
|
||||
set_bit(SDATA_STATE_RUNNING, &sdata->state);
|
||||
|
||||
switch (sdata->vif.type) {
|
||||
case NL80211_IFTYPE_P2P_DEVICE:
|
||||
rcu_assign_pointer(local->p2p_sdata, sdata);
|
||||
@ -1472,6 +1470,8 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up)
|
||||
spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags);
|
||||
}
|
||||
|
||||
set_bit(SDATA_STATE_RUNNING, &sdata->state);
|
||||
|
||||
return 0;
|
||||
err_del_interface:
|
||||
drv_remove_interface(local, sdata);
|
||||
|
@ -4409,8 +4409,11 @@ ieee80211_verify_peer_he_mcs_support(struct ieee80211_sub_if_data *sdata,
|
||||
he_cap_elem = cfg80211_find_ext_elem(WLAN_EID_EXT_HE_CAPABILITY,
|
||||
ies->data, ies->len);
|
||||
|
||||
if (!he_cap_elem)
|
||||
return false;
|
||||
|
||||
/* invalid HE IE */
|
||||
if (!he_cap_elem || he_cap_elem->datalen < 1 + sizeof(*he_cap)) {
|
||||
if (he_cap_elem->datalen < 1 + sizeof(*he_cap)) {
|
||||
sdata_info(sdata,
|
||||
"Invalid HE elem, Disable HE\n");
|
||||
return false;
|
||||
@ -4676,8 +4679,6 @@ static int ieee80211_prep_channel(struct ieee80211_sub_if_data *sdata,
|
||||
}
|
||||
|
||||
if (!elems->vht_cap_elem) {
|
||||
sdata_info(sdata,
|
||||
"bad VHT capabilities, disabling VHT\n");
|
||||
*conn_flags |= IEEE80211_CONN_DISABLE_VHT;
|
||||
vht_oper = NULL;
|
||||
}
|
||||
|
@ -1978,10 +1978,11 @@ ieee80211_rx_h_decrypt(struct ieee80211_rx_data *rx)
|
||||
|
||||
if (mmie_keyidx < NUM_DEFAULT_KEYS + NUM_DEFAULT_MGMT_KEYS ||
|
||||
mmie_keyidx >= NUM_DEFAULT_KEYS + NUM_DEFAULT_MGMT_KEYS +
|
||||
NUM_DEFAULT_BEACON_KEYS) {
|
||||
cfg80211_rx_unprot_mlme_mgmt(rx->sdata->dev,
|
||||
skb->data,
|
||||
skb->len);
|
||||
NUM_DEFAULT_BEACON_KEYS) {
|
||||
if (rx->sdata->dev)
|
||||
cfg80211_rx_unprot_mlme_mgmt(rx->sdata->dev,
|
||||
skb->data,
|
||||
skb->len);
|
||||
return RX_DROP_MONITOR; /* unexpected BIP keyidx */
|
||||
}
|
||||
|
||||
@ -2131,7 +2132,8 @@ ieee80211_rx_h_decrypt(struct ieee80211_rx_data *rx)
|
||||
/* either the frame has been decrypted or will be dropped */
|
||||
status->flag |= RX_FLAG_DECRYPTED;
|
||||
|
||||
if (unlikely(ieee80211_is_beacon(fc) && result == RX_DROP_UNUSABLE))
|
||||
if (unlikely(ieee80211_is_beacon(fc) && result == RX_DROP_UNUSABLE &&
|
||||
rx->sdata->dev))
|
||||
cfg80211_rx_unprot_mlme_mgmt(rx->sdata->dev,
|
||||
skb->data, skb->len);
|
||||
|
||||
@ -4352,6 +4354,7 @@ void ieee80211_check_fast_rx(struct sta_info *sta)
|
||||
.vif_type = sdata->vif.type,
|
||||
.control_port_protocol = sdata->control_port_protocol,
|
||||
}, *old, *new = NULL;
|
||||
u32 offload_flags;
|
||||
bool set_offload = false;
|
||||
bool assign = false;
|
||||
bool offload;
|
||||
@ -4467,10 +4470,10 @@ void ieee80211_check_fast_rx(struct sta_info *sta)
|
||||
if (assign)
|
||||
new = kmemdup(&fastrx, sizeof(fastrx), GFP_KERNEL);
|
||||
|
||||
offload = assign &&
|
||||
(sdata->vif.offload_flags & IEEE80211_OFFLOAD_DECAP_ENABLED);
|
||||
offload_flags = get_bss_sdata(sdata)->vif.offload_flags;
|
||||
offload = offload_flags & IEEE80211_OFFLOAD_DECAP_ENABLED;
|
||||
|
||||
if (offload)
|
||||
if (assign && offload)
|
||||
set_offload = !test_and_set_sta_flag(sta, WLAN_STA_DECAP_OFFLOAD);
|
||||
else
|
||||
set_offload = test_and_clear_sta_flag(sta, WLAN_STA_DECAP_OFFLOAD);
|
||||
@ -4708,7 +4711,7 @@ static bool ieee80211_invoke_fast_rx(struct ieee80211_rx_data *rx,
|
||||
|
||||
if (!(status->rx_flags & IEEE80211_RX_AMSDU)) {
|
||||
if (!pskb_may_pull(skb, snap_offs + sizeof(*payload)))
|
||||
goto drop;
|
||||
return false;
|
||||
|
||||
payload = (void *)(skb->data + snap_offs);
|
||||
|
||||
|
@ -2319,6 +2319,10 @@ netdev_tx_t ieee80211_monitor_start_xmit(struct sk_buff *skb,
|
||||
u16 len_rthdr;
|
||||
int hdrlen;
|
||||
|
||||
sdata = IEEE80211_DEV_TO_SUB_IF(dev);
|
||||
if (unlikely(!ieee80211_sdata_running(sdata)))
|
||||
goto fail;
|
||||
|
||||
memset(info, 0, sizeof(*info));
|
||||
info->flags = IEEE80211_TX_CTL_REQ_TX_STATUS |
|
||||
IEEE80211_TX_CTL_INJECTED;
|
||||
@ -2378,8 +2382,6 @@ netdev_tx_t ieee80211_monitor_start_xmit(struct sk_buff *skb,
|
||||
* This is necessary, for example, for old hostapd versions that
|
||||
* don't use nl80211-based management TX/RX.
|
||||
*/
|
||||
sdata = IEEE80211_DEV_TO_SUB_IF(dev);
|
||||
|
||||
list_for_each_entry_rcu(tmp_sdata, &local->interfaces, list) {
|
||||
if (!ieee80211_sdata_running(tmp_sdata))
|
||||
continue;
|
||||
@ -4169,7 +4171,7 @@ void __ieee80211_subif_start_xmit(struct sk_buff *skb,
|
||||
struct sk_buff *next;
|
||||
int len = skb->len;
|
||||
|
||||
if (unlikely(skb->len < ETH_HLEN)) {
|
||||
if (unlikely(!ieee80211_sdata_running(sdata) || skb->len < ETH_HLEN)) {
|
||||
kfree_skb(skb);
|
||||
return;
|
||||
}
|
||||
@ -4566,7 +4568,7 @@ netdev_tx_t ieee80211_subif_start_xmit_8023(struct sk_buff *skb,
|
||||
struct ieee80211_key *key;
|
||||
struct sta_info *sta;
|
||||
|
||||
if (unlikely(skb->len < ETH_HLEN)) {
|
||||
if (unlikely(!ieee80211_sdata_running(sdata) || skb->len < ETH_HLEN)) {
|
||||
kfree_skb(skb);
|
||||
return NETDEV_TX_OK;
|
||||
}
|
||||
|
@ -1445,6 +1445,8 @@ static size_t ieee802_11_find_bssid_profile(const u8 *start, size_t len,
|
||||
for_each_element_id(elem, WLAN_EID_MULTIPLE_BSSID, start, len) {
|
||||
if (elem->datalen < 2)
|
||||
continue;
|
||||
if (elem->data[0] < 1 || elem->data[0] > 8)
|
||||
continue;
|
||||
|
||||
for_each_element(sub, elem->data + 1, elem->datalen - 1) {
|
||||
u8 new_bssid[ETH_ALEN];
|
||||
@ -1504,24 +1506,26 @@ ieee802_11_parse_elems_full(struct ieee80211_elems_parse_params *params)
|
||||
const struct element *non_inherit = NULL;
|
||||
u8 *nontransmitted_profile;
|
||||
int nontransmitted_profile_len = 0;
|
||||
size_t scratch_len = params->len;
|
||||
|
||||
elems = kzalloc(sizeof(*elems), GFP_ATOMIC);
|
||||
elems = kzalloc(sizeof(*elems) + scratch_len, GFP_ATOMIC);
|
||||
if (!elems)
|
||||
return NULL;
|
||||
elems->ie_start = params->start;
|
||||
elems->total_len = params->len;
|
||||
elems->scratch_len = scratch_len;
|
||||
elems->scratch_pos = elems->scratch;
|
||||
|
||||
nontransmitted_profile = kmalloc(params->len, GFP_ATOMIC);
|
||||
if (nontransmitted_profile) {
|
||||
nontransmitted_profile_len =
|
||||
ieee802_11_find_bssid_profile(params->start, params->len,
|
||||
elems, params->bss,
|
||||
nontransmitted_profile);
|
||||
non_inherit =
|
||||
cfg80211_find_ext_elem(WLAN_EID_EXT_NON_INHERITANCE,
|
||||
nontransmitted_profile,
|
||||
nontransmitted_profile_len);
|
||||
}
|
||||
nontransmitted_profile = elems->scratch_pos;
|
||||
nontransmitted_profile_len =
|
||||
ieee802_11_find_bssid_profile(params->start, params->len,
|
||||
elems, params->bss,
|
||||
nontransmitted_profile);
|
||||
elems->scratch_pos += nontransmitted_profile_len;
|
||||
elems->scratch_len -= nontransmitted_profile_len;
|
||||
non_inherit = cfg80211_find_ext_elem(WLAN_EID_EXT_NON_INHERITANCE,
|
||||
nontransmitted_profile,
|
||||
nontransmitted_profile_len);
|
||||
|
||||
elems->crc = _ieee802_11_parse_elems_full(params, elems, non_inherit);
|
||||
|
||||
@ -1555,8 +1559,6 @@ ieee802_11_parse_elems_full(struct ieee80211_elems_parse_params *params)
|
||||
offsetofend(struct ieee80211_bssid_index, dtim_count))
|
||||
elems->dtim_count = elems->bssid_index->dtim_count;
|
||||
|
||||
kfree(nontransmitted_profile);
|
||||
|
||||
return elems;
|
||||
}
|
||||
|
||||
@ -2046,7 +2048,7 @@ static int ieee80211_build_preq_ies_band(struct ieee80211_sub_if_data *sdata,
|
||||
if (he_cap) {
|
||||
enum nl80211_iftype iftype =
|
||||
ieee80211_vif_type_p2p(&sdata->vif);
|
||||
__le16 cap = ieee80211_get_he_6ghz_capa(sband, iftype);
|
||||
__le16 cap = ieee80211_get_he_6ghz_capa(sband6, iftype);
|
||||
|
||||
pos = ieee80211_write_he_6ghz_cap(pos, cap, end);
|
||||
}
|
||||
|
@ -295,11 +295,12 @@ __must_hold(&net->mctp.keys_lock)
|
||||
mctp_dev_release_key(key->dev, key);
|
||||
spin_unlock_irqrestore(&key->lock, flags);
|
||||
|
||||
hlist_del(&key->hlist);
|
||||
hlist_del(&key->sklist);
|
||||
|
||||
/* unref for the lists */
|
||||
mctp_key_unref(key);
|
||||
if (!hlist_unhashed(&key->hlist)) {
|
||||
hlist_del_init(&key->hlist);
|
||||
hlist_del_init(&key->sklist);
|
||||
/* unref for the lists */
|
||||
mctp_key_unref(key);
|
||||
}
|
||||
|
||||
kfree_skb(skb);
|
||||
}
|
||||
@ -373,9 +374,17 @@ static int mctp_ioctl_alloctag(struct mctp_sock *msk, unsigned long arg)
|
||||
|
||||
ctl.tag = tag | MCTP_TAG_OWNER | MCTP_TAG_PREALLOC;
|
||||
if (copy_to_user((void __user *)arg, &ctl, sizeof(ctl))) {
|
||||
spin_lock_irqsave(&key->lock, flags);
|
||||
__mctp_key_remove(key, net, flags, MCTP_TRACE_KEY_DROPPED);
|
||||
unsigned long fl2;
|
||||
/* Unwind our key allocation: the keys list lock needs to be
|
||||
* taken before the individual key locks, and we need a valid
|
||||
* flags value (fl2) to pass to __mctp_key_remove, hence the
|
||||
* second spin_lock_irqsave() rather than a plain spin_lock().
|
||||
*/
|
||||
spin_lock_irqsave(&net->mctp.keys_lock, flags);
|
||||
spin_lock_irqsave(&key->lock, fl2);
|
||||
__mctp_key_remove(key, net, fl2, MCTP_TRACE_KEY_DROPPED);
|
||||
mctp_key_unref(key);
|
||||
spin_unlock_irqrestore(&net->mctp.keys_lock, flags);
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
|
@ -228,12 +228,12 @@ __releases(&key->lock)
|
||||
|
||||
if (!key->manual_alloc) {
|
||||
spin_lock_irqsave(&net->mctp.keys_lock, flags);
|
||||
hlist_del(&key->hlist);
|
||||
hlist_del(&key->sklist);
|
||||
if (!hlist_unhashed(&key->hlist)) {
|
||||
hlist_del_init(&key->hlist);
|
||||
hlist_del_init(&key->sklist);
|
||||
mctp_key_unref(key);
|
||||
}
|
||||
spin_unlock_irqrestore(&net->mctp.keys_lock, flags);
|
||||
|
||||
/* unref for the lists */
|
||||
mctp_key_unref(key);
|
||||
}
|
||||
|
||||
/* and one for the local reference */
|
||||
|
@ -1015,7 +1015,8 @@ static int __ovs_ct_lookup(struct net *net, struct sw_flow_key *key,
|
||||
* connections which we will commit, we may need to attach
|
||||
* the helper here.
|
||||
*/
|
||||
if (info->commit && info->helper && !nfct_help(ct)) {
|
||||
if (!nf_ct_is_confirmed(ct) && info->commit &&
|
||||
info->helper && !nfct_help(ct)) {
|
||||
int err = __nf_ct_try_assign_helper(ct, info->ct,
|
||||
GFP_ATOMIC);
|
||||
if (err)
|
||||
|
@ -2043,14 +2043,12 @@ start_error:
|
||||
|
||||
static struct Qdisc *taprio_leaf(struct Qdisc *sch, unsigned long cl)
|
||||
{
|
||||
struct taprio_sched *q = qdisc_priv(sch);
|
||||
struct net_device *dev = qdisc_dev(sch);
|
||||
unsigned int ntx = cl - 1;
|
||||
struct netdev_queue *dev_queue = taprio_queue_get(sch, cl);
|
||||
|
||||
if (ntx >= dev->num_tx_queues)
|
||||
if (!dev_queue)
|
||||
return NULL;
|
||||
|
||||
return q->qdiscs[ntx];
|
||||
return dev_queue->qdisc_sleeping;
|
||||
}
|
||||
|
||||
static unsigned long taprio_find(struct Qdisc *sch, u32 classid)
|
||||
|
@ -13265,7 +13265,9 @@ static int nl80211_parse_wowlan_tcp(struct cfg80211_registered_device *rdev,
|
||||
wake_mask_size);
|
||||
if (tok) {
|
||||
cfg->tokens_size = tokens_size;
|
||||
memcpy(&cfg->payload_tok, tok, sizeof(*tok) + tokens_size);
|
||||
cfg->payload_tok = *tok;
|
||||
memcpy(cfg->payload_tok.token_stream, tok->token_stream,
|
||||
tokens_size);
|
||||
}
|
||||
|
||||
trig->tcp = cfg;
|
||||
|
@ -143,18 +143,12 @@ static inline void bss_ref_get(struct cfg80211_registered_device *rdev,
|
||||
lockdep_assert_held(&rdev->bss_lock);
|
||||
|
||||
bss->refcount++;
|
||||
if (bss->pub.hidden_beacon_bss) {
|
||||
bss = container_of(bss->pub.hidden_beacon_bss,
|
||||
struct cfg80211_internal_bss,
|
||||
pub);
|
||||
bss->refcount++;
|
||||
}
|
||||
if (bss->pub.transmitted_bss) {
|
||||
bss = container_of(bss->pub.transmitted_bss,
|
||||
struct cfg80211_internal_bss,
|
||||
pub);
|
||||
bss->refcount++;
|
||||
}
|
||||
|
||||
if (bss->pub.hidden_beacon_bss)
|
||||
bss_from_pub(bss->pub.hidden_beacon_bss)->refcount++;
|
||||
|
||||
if (bss->pub.transmitted_bss)
|
||||
bss_from_pub(bss->pub.transmitted_bss)->refcount++;
|
||||
}
|
||||
|
||||
static inline void bss_ref_put(struct cfg80211_registered_device *rdev,
|
||||
@ -304,7 +298,8 @@ static size_t cfg80211_gen_new_ie(const u8 *ie, size_t ielen,
|
||||
tmp_old = cfg80211_find_ie(WLAN_EID_SSID, ie, ielen);
|
||||
tmp_old = (tmp_old) ? tmp_old + tmp_old[1] + 2 : ie;
|
||||
|
||||
while (tmp_old + tmp_old[1] + 2 - ie <= ielen) {
|
||||
while (tmp_old + 2 - ie <= ielen &&
|
||||
tmp_old + tmp_old[1] + 2 - ie <= ielen) {
|
||||
if (tmp_old[0] == 0) {
|
||||
tmp_old++;
|
||||
continue;
|
||||
@ -364,7 +359,8 @@ static size_t cfg80211_gen_new_ie(const u8 *ie, size_t ielen,
|
||||
* copied to new ie, skip ssid, capability, bssid-index ie
|
||||
*/
|
||||
tmp_new = sub_copy;
|
||||
while (tmp_new + tmp_new[1] + 2 - sub_copy <= subie_len) {
|
||||
while (tmp_new + 2 - sub_copy <= subie_len &&
|
||||
tmp_new + tmp_new[1] + 2 - sub_copy <= subie_len) {
|
||||
if (!(tmp_new[0] == WLAN_EID_NON_TX_BSSID_CAP ||
|
||||
tmp_new[0] == WLAN_EID_SSID)) {
|
||||
memcpy(pos, tmp_new, tmp_new[1] + 2);
|
||||
@ -427,6 +423,15 @@ cfg80211_add_nontrans_list(struct cfg80211_bss *trans_bss,
|
||||
|
||||
rcu_read_unlock();
|
||||
|
||||
/*
|
||||
* This is a bit weird - it's not on the list, but already on another
|
||||
* one! The only way that could happen is if there's some BSSID/SSID
|
||||
* shared by multiple APs in their multi-BSSID profiles, potentially
|
||||
* with hidden SSID mixed in ... ignore it.
|
||||
*/
|
||||
if (!list_empty(&nontrans_bss->nontrans_list))
|
||||
return -EINVAL;
|
||||
|
||||
/* add to the list */
|
||||
list_add_tail(&nontrans_bss->nontrans_list, &trans_bss->nontrans_list);
|
||||
return 0;
|
||||
@ -1602,6 +1607,23 @@ struct cfg80211_non_tx_bss {
|
||||
u8 bssid_index;
|
||||
};
|
||||
|
||||
static void cfg80211_update_hidden_bsses(struct cfg80211_internal_bss *known,
|
||||
const struct cfg80211_bss_ies *new_ies,
|
||||
const struct cfg80211_bss_ies *old_ies)
|
||||
{
|
||||
struct cfg80211_internal_bss *bss;
|
||||
|
||||
/* Assign beacon IEs to all sub entries */
|
||||
list_for_each_entry(bss, &known->hidden_list, hidden_list) {
|
||||
const struct cfg80211_bss_ies *ies;
|
||||
|
||||
ies = rcu_access_pointer(bss->pub.beacon_ies);
|
||||
WARN_ON(ies != old_ies);
|
||||
|
||||
rcu_assign_pointer(bss->pub.beacon_ies, new_ies);
|
||||
}
|
||||
}
|
||||
|
||||
static bool
|
||||
cfg80211_update_known_bss(struct cfg80211_registered_device *rdev,
|
||||
struct cfg80211_internal_bss *known,
|
||||
@ -1625,7 +1647,6 @@ cfg80211_update_known_bss(struct cfg80211_registered_device *rdev,
|
||||
kfree_rcu((struct cfg80211_bss_ies *)old, rcu_head);
|
||||
} else if (rcu_access_pointer(new->pub.beacon_ies)) {
|
||||
const struct cfg80211_bss_ies *old;
|
||||
struct cfg80211_internal_bss *bss;
|
||||
|
||||
if (known->pub.hidden_beacon_bss &&
|
||||
!list_empty(&known->hidden_list)) {
|
||||
@ -1653,16 +1674,7 @@ cfg80211_update_known_bss(struct cfg80211_registered_device *rdev,
|
||||
if (old == rcu_access_pointer(known->pub.ies))
|
||||
rcu_assign_pointer(known->pub.ies, new->pub.beacon_ies);
|
||||
|
||||
/* Assign beacon IEs to all sub entries */
|
||||
list_for_each_entry(bss, &known->hidden_list, hidden_list) {
|
||||
const struct cfg80211_bss_ies *ies;
|
||||
|
||||
ies = rcu_access_pointer(bss->pub.beacon_ies);
|
||||
WARN_ON(ies != old);
|
||||
|
||||
rcu_assign_pointer(bss->pub.beacon_ies,
|
||||
new->pub.beacon_ies);
|
||||
}
|
||||
cfg80211_update_hidden_bsses(known, new->pub.beacon_ies, old);
|
||||
|
||||
if (old)
|
||||
kfree_rcu((struct cfg80211_bss_ies *)old, rcu_head);
|
||||
@ -1739,6 +1751,8 @@ cfg80211_bss_update(struct cfg80211_registered_device *rdev,
|
||||
new->refcount = 1;
|
||||
INIT_LIST_HEAD(&new->hidden_list);
|
||||
INIT_LIST_HEAD(&new->pub.nontrans_list);
|
||||
/* we'll set this later if it was non-NULL */
|
||||
new->pub.transmitted_bss = NULL;
|
||||
|
||||
if (rcu_access_pointer(tmp->pub.proberesp_ies)) {
|
||||
hidden = rb_find_bss(rdev, tmp, BSS_CMP_HIDE_ZLEN);
|
||||
@ -2021,10 +2035,15 @@ cfg80211_inform_single_bss_data(struct wiphy *wiphy,
|
||||
spin_lock_bh(&rdev->bss_lock);
|
||||
if (cfg80211_add_nontrans_list(non_tx_data->tx_bss,
|
||||
&res->pub)) {
|
||||
if (__cfg80211_unlink_bss(rdev, res))
|
||||
if (__cfg80211_unlink_bss(rdev, res)) {
|
||||
rdev->bss_generation++;
|
||||
res = NULL;
|
||||
}
|
||||
}
|
||||
spin_unlock_bh(&rdev->bss_lock);
|
||||
|
||||
if (!res)
|
||||
return NULL;
|
||||
}
|
||||
|
||||
trace_cfg80211_return_bss(&res->pub);
|
||||
@ -2143,6 +2162,8 @@ static void cfg80211_parse_mbssid_data(struct wiphy *wiphy,
|
||||
for_each_element_id(elem, WLAN_EID_MULTIPLE_BSSID, ie, ielen) {
|
||||
if (elem->datalen < 4)
|
||||
continue;
|
||||
if (elem->data[0] < 1 || (int)elem->data[0] > 8)
|
||||
continue;
|
||||
for_each_element(sub, elem->data + 1, elem->datalen - 1) {
|
||||
u8 profile_len;
|
||||
|
||||
@ -2279,7 +2300,7 @@ cfg80211_update_notlisted_nontrans(struct wiphy *wiphy,
|
||||
size_t new_ie_len;
|
||||
struct cfg80211_bss_ies *new_ies;
|
||||
const struct cfg80211_bss_ies *old;
|
||||
u8 cpy_len;
|
||||
size_t cpy_len;
|
||||
|
||||
lockdep_assert_held(&wiphy_to_rdev(wiphy)->bss_lock);
|
||||
|
||||
@ -2346,6 +2367,8 @@ cfg80211_update_notlisted_nontrans(struct wiphy *wiphy,
|
||||
} else {
|
||||
old = rcu_access_pointer(nontrans_bss->beacon_ies);
|
||||
rcu_assign_pointer(nontrans_bss->beacon_ies, new_ies);
|
||||
cfg80211_update_hidden_bsses(bss_from_pub(nontrans_bss),
|
||||
new_ies, old);
|
||||
rcu_assign_pointer(nontrans_bss->ies, new_ies);
|
||||
if (old)
|
||||
kfree_rcu((struct cfg80211_bss_ies *)old, rcu_head);
|
||||
|
@ -559,7 +559,7 @@ int ieee80211_data_to_8023_exthdr(struct sk_buff *skb, struct ethhdr *ehdr,
|
||||
return -1;
|
||||
|
||||
hdrlen = ieee80211_hdrlen(hdr->frame_control) + data_offset;
|
||||
if (skb->len < hdrlen + 8)
|
||||
if (skb->len < hdrlen)
|
||||
return -1;
|
||||
|
||||
/* convert IEEE 802.11 header + possible LLC headers into Ethernet
|
||||
@ -574,8 +574,9 @@ int ieee80211_data_to_8023_exthdr(struct sk_buff *skb, struct ethhdr *ehdr,
|
||||
memcpy(tmp.h_dest, ieee80211_get_DA(hdr), ETH_ALEN);
|
||||
memcpy(tmp.h_source, ieee80211_get_SA(hdr), ETH_ALEN);
|
||||
|
||||
if (iftype == NL80211_IFTYPE_MESH_POINT)
|
||||
skb_copy_bits(skb, hdrlen, &mesh_flags, 1);
|
||||
if (iftype == NL80211_IFTYPE_MESH_POINT &&
|
||||
skb_copy_bits(skb, hdrlen, &mesh_flags, 1) < 0)
|
||||
return -1;
|
||||
|
||||
mesh_flags &= MESH_FLAGS_AE;
|
||||
|
||||
@ -595,11 +596,12 @@ int ieee80211_data_to_8023_exthdr(struct sk_buff *skb, struct ethhdr *ehdr,
|
||||
if (iftype == NL80211_IFTYPE_MESH_POINT) {
|
||||
if (mesh_flags == MESH_FLAGS_AE_A4)
|
||||
return -1;
|
||||
if (mesh_flags == MESH_FLAGS_AE_A5_A6) {
|
||||
skb_copy_bits(skb, hdrlen +
|
||||
offsetof(struct ieee80211s_hdr, eaddr1),
|
||||
tmp.h_dest, 2 * ETH_ALEN);
|
||||
}
|
||||
if (mesh_flags == MESH_FLAGS_AE_A5_A6 &&
|
||||
skb_copy_bits(skb, hdrlen +
|
||||
offsetof(struct ieee80211s_hdr, eaddr1),
|
||||
tmp.h_dest, 2 * ETH_ALEN) < 0)
|
||||
return -1;
|
||||
|
||||
hdrlen += __ieee80211_get_mesh_hdrlen(mesh_flags);
|
||||
}
|
||||
break;
|
||||
@ -613,10 +615,11 @@ int ieee80211_data_to_8023_exthdr(struct sk_buff *skb, struct ethhdr *ehdr,
|
||||
if (iftype == NL80211_IFTYPE_MESH_POINT) {
|
||||
if (mesh_flags == MESH_FLAGS_AE_A5_A6)
|
||||
return -1;
|
||||
if (mesh_flags == MESH_FLAGS_AE_A4)
|
||||
skb_copy_bits(skb, hdrlen +
|
||||
offsetof(struct ieee80211s_hdr, eaddr1),
|
||||
tmp.h_source, ETH_ALEN);
|
||||
if (mesh_flags == MESH_FLAGS_AE_A4 &&
|
||||
skb_copy_bits(skb, hdrlen +
|
||||
offsetof(struct ieee80211s_hdr, eaddr1),
|
||||
tmp.h_source, ETH_ALEN) < 0)
|
||||
return -1;
|
||||
hdrlen += __ieee80211_get_mesh_hdrlen(mesh_flags);
|
||||
}
|
||||
break;
|
||||
@ -628,16 +631,15 @@ int ieee80211_data_to_8023_exthdr(struct sk_buff *skb, struct ethhdr *ehdr,
|
||||
break;
|
||||
}
|
||||
|
||||
skb_copy_bits(skb, hdrlen, &payload, sizeof(payload));
|
||||
tmp.h_proto = payload.proto;
|
||||
|
||||
if (likely((!is_amsdu && ether_addr_equal(payload.hdr, rfc1042_header) &&
|
||||
tmp.h_proto != htons(ETH_P_AARP) &&
|
||||
tmp.h_proto != htons(ETH_P_IPX)) ||
|
||||
ether_addr_equal(payload.hdr, bridge_tunnel_header))) {
|
||||
if (likely(skb_copy_bits(skb, hdrlen, &payload, sizeof(payload)) == 0 &&
|
||||
((!is_amsdu && ether_addr_equal(payload.hdr, rfc1042_header) &&
|
||||
payload.proto != htons(ETH_P_AARP) &&
|
||||
payload.proto != htons(ETH_P_IPX)) ||
|
||||
ether_addr_equal(payload.hdr, bridge_tunnel_header)))) {
|
||||
/* remove RFC1042 or Bridge-Tunnel encapsulation and
|
||||
* replace EtherType */
|
||||
hdrlen += ETH_ALEN + 2;
|
||||
tmp.h_proto = payload.proto;
|
||||
skb_postpull_rcsum(skb, &payload, ETH_ALEN + 2);
|
||||
} else {
|
||||
tmp.h_proto = htons(skb->len - hdrlen);
|
||||
|
@ -468,6 +468,7 @@ void wireless_send_event(struct net_device * dev,
|
||||
struct __compat_iw_event *compat_event;
|
||||
struct compat_iw_point compat_wrqu;
|
||||
struct sk_buff *compskb;
|
||||
int ptr_len;
|
||||
#endif
|
||||
|
||||
/*
|
||||
@ -582,6 +583,9 @@ void wireless_send_event(struct net_device * dev,
|
||||
nlmsg_end(skb, nlh);
|
||||
#ifdef CONFIG_COMPAT
|
||||
hdr_len = compat_event_type_size[descr->header_type];
|
||||
|
||||
/* ptr_len is remaining size in event header apart from LCP */
|
||||
ptr_len = hdr_len - IW_EV_COMPAT_LCP_LEN;
|
||||
event_len = hdr_len + extra_len;
|
||||
|
||||
compskb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC);
|
||||
@ -612,16 +616,15 @@ void wireless_send_event(struct net_device * dev,
|
||||
if (descr->header_type == IW_HEADER_TYPE_POINT) {
|
||||
compat_wrqu.length = wrqu->data.length;
|
||||
compat_wrqu.flags = wrqu->data.flags;
|
||||
memcpy(&compat_event->pointer,
|
||||
((char *) &compat_wrqu) + IW_EV_COMPAT_POINT_OFF,
|
||||
hdr_len - IW_EV_COMPAT_LCP_LEN);
|
||||
memcpy(compat_event->ptr_bytes,
|
||||
((char *)&compat_wrqu) + IW_EV_COMPAT_POINT_OFF,
|
||||
ptr_len);
|
||||
if (extra_len)
|
||||
memcpy(((char *) compat_event) + hdr_len,
|
||||
extra, extra_len);
|
||||
memcpy(&compat_event->ptr_bytes[ptr_len],
|
||||
extra, extra_len);
|
||||
} else {
|
||||
/* extra_len must be zero, so no if (extra) needed */
|
||||
memcpy(&compat_event->pointer, wrqu,
|
||||
hdr_len - IW_EV_COMPAT_LCP_LEN);
|
||||
memcpy(compat_event->ptr_bytes, wrqu, ptr_len);
|
||||
}
|
||||
|
||||
nlmsg_end(compskb, nlh);
|
||||
|
@ -1223,6 +1223,11 @@ ipv4_fcnal()
|
||||
log_test $rc 0 "Delete nexthop route warning"
|
||||
run_cmd "$IP route delete 172.16.101.1/32 nhid 12"
|
||||
run_cmd "$IP nexthop del id 12"
|
||||
|
||||
run_cmd "$IP nexthop add id 21 via 172.16.1.6 dev veth1"
|
||||
run_cmd "$IP ro add 172.16.101.0/24 nhid 21"
|
||||
run_cmd "$IP ro del 172.16.101.0/24 nexthop via 172.16.1.7 dev veth1 nexthop via 172.16.1.8 dev veth1"
|
||||
log_test $? 2 "Delete multipath route with only nh id based entry"
|
||||
}
|
||||
|
||||
ipv4_grp_fcnal()
|
||||
|
@ -6,7 +6,7 @@ TEST_PROGS := nft_trans_stress.sh nft_fib.sh nft_nat.sh bridge_brouter.sh \
|
||||
nft_concat_range.sh nft_conntrack_helper.sh \
|
||||
nft_queue.sh nft_meta.sh nf_nat_edemux.sh \
|
||||
ipip-conntrack-mtu.sh conntrack_tcp_unreplied.sh \
|
||||
conntrack_vrf.sh nft_synproxy.sh
|
||||
conntrack_vrf.sh nft_synproxy.sh rpath.sh
|
||||
|
||||
CFLAGS += $(shell pkg-config --cflags libmnl 2>/dev/null || echo "-I/usr/include/libmnl")
|
||||
LDLIBS = -lmnl
|
||||
|
@ -188,6 +188,7 @@ test_ping() {
|
||||
ip netns exec ${nsrouter} sysctl net.ipv6.conf.all.forwarding=1 > /dev/null
|
||||
ip netns exec ${nsrouter} sysctl net.ipv4.conf.veth0.forwarding=1 > /dev/null
|
||||
ip netns exec ${nsrouter} sysctl net.ipv4.conf.veth1.forwarding=1 > /dev/null
|
||||
ip netns exec ${nsrouter} sysctl net.ipv4.conf.all.rp_filter=0 > /dev/null
|
||||
ip netns exec ${nsrouter} sysctl net.ipv4.conf.veth0.rp_filter=0 > /dev/null
|
||||
|
||||
sleep 3
|
||||
|
147
tools/testing/selftests/netfilter/rpath.sh
Executable file
147
tools/testing/selftests/netfilter/rpath.sh
Executable file
@ -0,0 +1,147 @@
|
||||
#!/bin/bash
|
||||
# SPDX-License-Identifier: GPL-2.0
|
||||
|
||||
# return code to signal skipped test
|
||||
ksft_skip=4
|
||||
|
||||
# search for legacy iptables (it uses the xtables extensions
|
||||
if iptables-legacy --version >/dev/null 2>&1; then
|
||||
iptables='iptables-legacy'
|
||||
elif iptables --version >/dev/null 2>&1; then
|
||||
iptables='iptables'
|
||||
else
|
||||
iptables=''
|
||||
fi
|
||||
|
||||
if ip6tables-legacy --version >/dev/null 2>&1; then
|
||||
ip6tables='ip6tables-legacy'
|
||||
elif ! ip6tables --version >/dev/null 2>&1; then
|
||||
ip6tables='ip6tables'
|
||||
else
|
||||
ip6tables=''
|
||||
fi
|
||||
|
||||
if nft --version >/dev/null 2>&1; then
|
||||
nft='nft'
|
||||
else
|
||||
nft=''
|
||||
fi
|
||||
|
||||
if [ -z "$iptables$ip6tables$nft" ]; then
|
||||
echo "SKIP: Test needs iptables, ip6tables or nft"
|
||||
exit $ksft_skip
|
||||
fi
|
||||
|
||||
sfx=$(mktemp -u "XXXXXXXX")
|
||||
ns1="ns1-$sfx"
|
||||
ns2="ns2-$sfx"
|
||||
trap "ip netns del $ns1; ip netns del $ns2" EXIT
|
||||
|
||||
# create two netns, disable rp_filter in ns2 and
|
||||
# keep IPv6 address when moving into VRF
|
||||
ip netns add "$ns1"
|
||||
ip netns add "$ns2"
|
||||
ip netns exec "$ns2" sysctl -q net.ipv4.conf.all.rp_filter=0
|
||||
ip netns exec "$ns2" sysctl -q net.ipv4.conf.default.rp_filter=0
|
||||
ip netns exec "$ns2" sysctl -q net.ipv6.conf.all.keep_addr_on_down=1
|
||||
|
||||
# a standard connection between the netns, should not trigger rp filter
|
||||
ip -net "$ns1" link add v0 type veth peer name v0 netns "$ns2"
|
||||
ip -net "$ns1" link set v0 up; ip -net "$ns2" link set v0 up
|
||||
ip -net "$ns1" a a 192.168.23.2/24 dev v0
|
||||
ip -net "$ns2" a a 192.168.23.1/24 dev v0
|
||||
ip -net "$ns1" a a fec0:23::2/64 dev v0 nodad
|
||||
ip -net "$ns2" a a fec0:23::1/64 dev v0 nodad
|
||||
|
||||
# rp filter testing: ns1 sends packets via v0 which ns2 would route back via d0
|
||||
ip -net "$ns2" link add d0 type dummy
|
||||
ip -net "$ns2" link set d0 up
|
||||
ip -net "$ns1" a a 192.168.42.2/24 dev v0
|
||||
ip -net "$ns2" a a 192.168.42.1/24 dev d0
|
||||
ip -net "$ns1" a a fec0:42::2/64 dev v0 nodad
|
||||
ip -net "$ns2" a a fec0:42::1/64 dev d0 nodad
|
||||
|
||||
# firewall matches to test
|
||||
ip netns exec "$ns2" "$iptables" -t raw -A PREROUTING -s 192.168.0.0/16 -m rpfilter
|
||||
ip netns exec "$ns2" "$ip6tables" -t raw -A PREROUTING -s fec0::/16 -m rpfilter
|
||||
ip netns exec "$ns2" nft -f - <<EOF
|
||||
table inet t {
|
||||
chain c {
|
||||
type filter hook prerouting priority raw;
|
||||
ip saddr 192.168.0.0/16 fib saddr . iif oif exists counter
|
||||
ip6 saddr fec0::/16 fib saddr . iif oif exists counter
|
||||
}
|
||||
}
|
||||
EOF
|
||||
|
||||
die() {
|
||||
echo "FAIL: $*"
|
||||
#ip netns exec "$ns2" "$iptables" -t raw -vS
|
||||
#ip netns exec "$ns2" "$ip6tables" -t raw -vS
|
||||
#ip netns exec "$ns2" nft list ruleset
|
||||
exit 1
|
||||
}
|
||||
|
||||
# check rule counters, return true if rule did not match
|
||||
ipt_zero_rule() { # (command)
|
||||
[ -n "$1" ] || return 0
|
||||
ip netns exec "$ns2" "$1" -t raw -vS | grep -q -- "-m rpfilter -c 0 0"
|
||||
}
|
||||
nft_zero_rule() { # (family)
|
||||
[ -n "$nft" ] || return 0
|
||||
ip netns exec "$ns2" "$nft" list chain inet t c | \
|
||||
grep -q "$1 saddr .* counter packets 0 bytes 0"
|
||||
}
|
||||
|
||||
netns_ping() { # (netns, args...)
|
||||
local netns="$1"
|
||||
shift
|
||||
ip netns exec "$netns" ping -q -c 1 -W 1 "$@" >/dev/null
|
||||
}
|
||||
|
||||
testrun() {
|
||||
# clear counters first
|
||||
[ -n "$iptables" ] && ip netns exec "$ns2" "$iptables" -t raw -Z
|
||||
[ -n "$ip6tables" ] && ip netns exec "$ns2" "$ip6tables" -t raw -Z
|
||||
if [ -n "$nft" ]; then
|
||||
(
|
||||
echo "delete table inet t";
|
||||
ip netns exec "$ns2" nft -s list table inet t;
|
||||
) | ip netns exec "$ns2" nft -f -
|
||||
fi
|
||||
|
||||
# test 1: martian traffic should fail rpfilter matches
|
||||
netns_ping "$ns1" -I v0 192.168.42.1 && \
|
||||
die "martian ping 192.168.42.1 succeeded"
|
||||
netns_ping "$ns1" -I v0 fec0:42::1 && \
|
||||
die "martian ping fec0:42::1 succeeded"
|
||||
|
||||
ipt_zero_rule "$iptables" || die "iptables matched martian"
|
||||
ipt_zero_rule "$ip6tables" || die "ip6tables matched martian"
|
||||
nft_zero_rule ip || die "nft IPv4 matched martian"
|
||||
nft_zero_rule ip6 || die "nft IPv6 matched martian"
|
||||
|
||||
# test 2: rpfilter match should pass for regular traffic
|
||||
netns_ping "$ns1" 192.168.23.1 || \
|
||||
die "regular ping 192.168.23.1 failed"
|
||||
netns_ping "$ns1" fec0:23::1 || \
|
||||
die "regular ping fec0:23::1 failed"
|
||||
|
||||
ipt_zero_rule "$iptables" && die "iptables match not effective"
|
||||
ipt_zero_rule "$ip6tables" && die "ip6tables match not effective"
|
||||
nft_zero_rule ip && die "nft IPv4 match not effective"
|
||||
nft_zero_rule ip6 && die "nft IPv6 match not effective"
|
||||
|
||||
}
|
||||
|
||||
testrun
|
||||
|
||||
# repeat test with vrf device in $ns2
|
||||
ip -net "$ns2" link add vrf0 type vrf table 10
|
||||
ip -net "$ns2" link set vrf0 up
|
||||
ip -net "$ns2" link set v0 master vrf0
|
||||
|
||||
testrun
|
||||
|
||||
echo "PASS: netfilter reverse path match works as intended"
|
||||
exit 0
|
Loading…
Reference in New Issue
Block a user