mirror of
https://github.com/torvalds/linux.git
synced 2024-11-10 22:21:40 +00:00
Networking fixes for 5.16-final, including fixes from bpf, and WiFi.
Current release - regressions: - Revert "xsk: Do not sleep in poll() when need_wakeup set", made the problem worse - Revert "net: phy: fixed_phy: Fix NULL vs IS_ERR() checking in __fixed_phy_register", broke EPROBE_DEFER handling - Revert "net: usb: r8152: Add MAC pass-through support for more Lenovo Docks", broke setups without a Lenovo dock Current release - new code bugs: - selftests: set amt.sh executable Previous releases - regressions: - batman-adv: mcast: don't send link-local multicast to mcast routers Previous releases - always broken: - ipv4/ipv6: check attribute length for RTA_FLOW / RTA_GATEWAY - sctp: hold endpoint before calling cb in sctp_transport_lookup_process - mac80211: mesh: embed mesh_paths and mpp_paths into ieee80211_if_mesh to avoid complicated handling of sub-object allocation failures - seg6: fix traceroute in the presence of SRv6 - tipc: fix a kernel-infoleak in __tipc_sendmsg() Signed-off-by: Jakub Kicinski <kuba@kernel.org> -----BEGIN PGP SIGNATURE----- iQIzBAABCAAdFiEE6jPA+I1ugmIBA4hXMUZtbf5SIrsFAmHV/ksACgkQMUZtbf5S IrtZHBAAotpSY1buJLCHC+4EdqyMvYdcuTQJsqYBx2oNdMJ2D5bPSX7d2u2xkhgR kBL7cAfnH6C7IdgLirh+JbHG2j1e3WMJikhqtWEMcBMt0eYRzEPGOnABYBjd8wdb Ie6IiLw/0zXAdE5pfh2yzHTgyzaGPImA04E45nimoxiHOVWJLCFvI5H4BZvK9JLj tmRxFG37m5wWRMdfsizXCvFJyMlg52FLIO1Duu82Gc7ZWMiYnxkD1dF8kzFj2jXM wmIWRg1wJa+7mHJHPdUR2I1BNWaapamVVa+9NDONWOi3stImUEqNNDHuzlu4hT/p khRXZNPHIbB/c7yR7bCJ9YK/raKKYh5GPRanF0YRL2RDqf80V7uLtVoQ8/Sar4pM L2jRAC76SGdHVGJMckVV9LE9NPKTNYw0cA97MhwL5Nc/Ks0oB4oBxfG56350S8sb 5hel3pJ6lFoWIr88qWgJXzgkVLxLvG7EQBFg6URwGJjBgLLJLzMMO88ALrqR+SN+ tEwTfcjuG+9tEVIb4DQuXQm0LKcfD8Z7FzHEf5ikoyAbOSbGwZzr4vZu8fOw5Z1y Z1YihoEoaHv1sZGGQf4MKD71cZmVrTDgYRZ5p/00jXs/NY6EyWCR2+j1tADgjFvY UNKa4LlQPx1hfe9QxCpSBRf/eULYZjWT1qzfj4GVX9W9bk+Cz8c= =xIOF -----END PGP SIGNATURE----- Merge tag 'net-5.16-final' of git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net Pull networking fixes from Jakub Kicinski" "Networking fixes, including fixes from bpf, and WiFi. One last pull request, turns out some of the recent fixes did more harm than good. Current release - regressions: - Revert "xsk: Do not sleep in poll() when need_wakeup set", made the problem worse - Revert "net: phy: fixed_phy: Fix NULL vs IS_ERR() checking in __fixed_phy_register", broke EPROBE_DEFER handling - Revert "net: usb: r8152: Add MAC pass-through support for more Lenovo Docks", broke setups without a Lenovo dock Current release - new code bugs: - selftests: set amt.sh executable Previous releases - regressions: - batman-adv: mcast: don't send link-local multicast to mcast routers Previous releases - always broken: - ipv4/ipv6: check attribute length for RTA_FLOW / RTA_GATEWAY - sctp: hold endpoint before calling cb in sctp_transport_lookup_process - mac80211: mesh: embed mesh_paths and mpp_paths into ieee80211_if_mesh to avoid complicated handling of sub-object allocation failures - seg6: fix traceroute in the presence of SRv6 - tipc: fix a kernel-infoleak in __tipc_sendmsg()" * tag 'net-5.16-final' of git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net: (36 commits) selftests: set amt.sh executable Revert "net: usb: r8152: Add MAC passthrough support for more Lenovo Docks" sfc: The RX page_ring is optional iavf: Fix limit of total number of queues to active queues of VF i40e: Fix incorrect netdev's real number of RX/TX queues i40e: Fix for displaying message regarding NVM version i40e: fix use-after-free in i40e_sync_filters_subtask() i40e: Fix to not show opcode msg on unsuccessful VF MAC change ieee802154: atusb: fix uninit value in atusb_set_extended_addr mac80211: mesh: embedd mesh_paths and mpp_paths into ieee80211_if_mesh mac80211: initialize variable have_higher_than_11mbit sch_qfq: prevent shift-out-of-bounds in qfq_init_qdisc netrom: fix copying in user data in nr_setsockopt udp6: Use Segment Routing Header for dest address if present icmp: ICMPV6: Examine invoking packet for Segment Route Headers. seg6: export get_srh() for ICMP handling Revert "net: phy: fixed_phy: Fix NULL vs IS_ERR() checking in __fixed_phy_register" ipv6: Do cleanup if attribute validation fails in multipath route ipv6: Continue processing multipath route even if gateway attribute is invalid net/fsl: Remove leftover definition in xgmac_mdio ...
This commit is contained in:
commit
75acfdb6fd
@ -1288,26 +1288,22 @@ static int handle_invalid_req_id(struct ena_ring *ring, u16 req_id,
|
||||
|
||||
static int validate_tx_req_id(struct ena_ring *tx_ring, u16 req_id)
|
||||
{
|
||||
struct ena_tx_buffer *tx_info = NULL;
|
||||
struct ena_tx_buffer *tx_info;
|
||||
|
||||
if (likely(req_id < tx_ring->ring_size)) {
|
||||
tx_info = &tx_ring->tx_buffer_info[req_id];
|
||||
if (likely(tx_info->skb))
|
||||
return 0;
|
||||
}
|
||||
tx_info = &tx_ring->tx_buffer_info[req_id];
|
||||
if (likely(tx_info->skb))
|
||||
return 0;
|
||||
|
||||
return handle_invalid_req_id(tx_ring, req_id, tx_info, false);
|
||||
}
|
||||
|
||||
static int validate_xdp_req_id(struct ena_ring *xdp_ring, u16 req_id)
|
||||
{
|
||||
struct ena_tx_buffer *tx_info = NULL;
|
||||
struct ena_tx_buffer *tx_info;
|
||||
|
||||
if (likely(req_id < xdp_ring->ring_size)) {
|
||||
tx_info = &xdp_ring->tx_buffer_info[req_id];
|
||||
if (likely(tx_info->xdpf))
|
||||
return 0;
|
||||
}
|
||||
tx_info = &xdp_ring->tx_buffer_info[req_id];
|
||||
if (likely(tx_info->xdpf))
|
||||
return 0;
|
||||
|
||||
return handle_invalid_req_id(xdp_ring, req_id, tx_info, true);
|
||||
}
|
||||
@ -1332,9 +1328,14 @@ static int ena_clean_tx_irq(struct ena_ring *tx_ring, u32 budget)
|
||||
|
||||
rc = ena_com_tx_comp_req_id_get(tx_ring->ena_com_io_cq,
|
||||
&req_id);
|
||||
if (rc)
|
||||
if (rc) {
|
||||
if (unlikely(rc == -EINVAL))
|
||||
handle_invalid_req_id(tx_ring, req_id, NULL,
|
||||
false);
|
||||
break;
|
||||
}
|
||||
|
||||
/* validate that the request id points to a valid skb */
|
||||
rc = validate_tx_req_id(tx_ring, req_id);
|
||||
if (rc)
|
||||
break;
|
||||
@ -1427,6 +1428,7 @@ static struct sk_buff *ena_rx_skb(struct ena_ring *rx_ring,
|
||||
u16 *next_to_clean)
|
||||
{
|
||||
struct ena_rx_buffer *rx_info;
|
||||
struct ena_adapter *adapter;
|
||||
u16 len, req_id, buf = 0;
|
||||
struct sk_buff *skb;
|
||||
void *page_addr;
|
||||
@ -1439,8 +1441,14 @@ static struct sk_buff *ena_rx_skb(struct ena_ring *rx_ring,
|
||||
rx_info = &rx_ring->rx_buffer_info[req_id];
|
||||
|
||||
if (unlikely(!rx_info->page)) {
|
||||
netif_err(rx_ring->adapter, rx_err, rx_ring->netdev,
|
||||
"Page is NULL\n");
|
||||
adapter = rx_ring->adapter;
|
||||
netif_err(adapter, rx_err, rx_ring->netdev,
|
||||
"Page is NULL. qid %u req_id %u\n", rx_ring->qid, req_id);
|
||||
ena_increase_stat(&rx_ring->rx_stats.bad_req_id, 1, &rx_ring->syncp);
|
||||
adapter->reset_reason = ENA_REGS_RESET_INV_RX_REQ_ID;
|
||||
/* Make sure reset reason is set before triggering the reset */
|
||||
smp_mb__before_atomic();
|
||||
set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
@ -1896,9 +1904,14 @@ static int ena_clean_xdp_irq(struct ena_ring *xdp_ring, u32 budget)
|
||||
|
||||
rc = ena_com_tx_comp_req_id_get(xdp_ring->ena_com_io_cq,
|
||||
&req_id);
|
||||
if (rc)
|
||||
if (rc) {
|
||||
if (unlikely(rc == -EINVAL))
|
||||
handle_invalid_req_id(xdp_ring, req_id, NULL,
|
||||
true);
|
||||
break;
|
||||
}
|
||||
|
||||
/* validate that the request id points to a valid xdp_frame */
|
||||
rc = validate_xdp_req_id(xdp_ring, req_id);
|
||||
if (rc)
|
||||
break;
|
||||
@ -4013,10 +4026,6 @@ static u32 ena_calc_max_io_queue_num(struct pci_dev *pdev,
|
||||
max_num_io_queues = min_t(u32, max_num_io_queues, io_tx_cq_num);
|
||||
/* 1 IRQ for mgmnt and 1 IRQs for each IO direction */
|
||||
max_num_io_queues = min_t(u32, max_num_io_queues, pci_msix_vec_count(pdev) - 1);
|
||||
if (unlikely(!max_num_io_queues)) {
|
||||
dev_err(&pdev->dev, "The device doesn't have io queues\n");
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
return max_num_io_queues;
|
||||
}
|
||||
|
@ -47,7 +47,6 @@ struct tgec_mdio_controller {
|
||||
#define MDIO_CTL_READ BIT(15)
|
||||
|
||||
#define MDIO_DATA(x) (x & 0xffff)
|
||||
#define MDIO_DATA_BSY BIT(31)
|
||||
|
||||
struct mdio_fsl_priv {
|
||||
struct tgec_mdio_controller __iomem *mdio_base;
|
||||
|
@ -99,6 +99,24 @@ MODULE_LICENSE("GPL v2");
|
||||
|
||||
static struct workqueue_struct *i40e_wq;
|
||||
|
||||
static void netdev_hw_addr_refcnt(struct i40e_mac_filter *f,
|
||||
struct net_device *netdev, int delta)
|
||||
{
|
||||
struct netdev_hw_addr *ha;
|
||||
|
||||
if (!f || !netdev)
|
||||
return;
|
||||
|
||||
netdev_for_each_mc_addr(ha, netdev) {
|
||||
if (ether_addr_equal(ha->addr, f->macaddr)) {
|
||||
ha->refcount += delta;
|
||||
if (ha->refcount <= 0)
|
||||
ha->refcount = 1;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* i40e_allocate_dma_mem_d - OS specific memory alloc for shared code
|
||||
* @hw: pointer to the HW structure
|
||||
@ -2036,6 +2054,7 @@ static void i40e_undo_add_filter_entries(struct i40e_vsi *vsi,
|
||||
hlist_for_each_entry_safe(new, h, from, hlist) {
|
||||
/* We can simply free the wrapper structure */
|
||||
hlist_del(&new->hlist);
|
||||
netdev_hw_addr_refcnt(new->f, vsi->netdev, -1);
|
||||
kfree(new);
|
||||
}
|
||||
}
|
||||
@ -2383,6 +2402,10 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
|
||||
&tmp_add_list,
|
||||
&tmp_del_list,
|
||||
vlan_filters);
|
||||
|
||||
hlist_for_each_entry(new, &tmp_add_list, hlist)
|
||||
netdev_hw_addr_refcnt(new->f, vsi->netdev, 1);
|
||||
|
||||
if (retval)
|
||||
goto err_no_memory_locked;
|
||||
|
||||
@ -2515,6 +2538,7 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
|
||||
if (new->f->state == I40E_FILTER_NEW)
|
||||
new->f->state = new->state;
|
||||
hlist_del(&new->hlist);
|
||||
netdev_hw_addr_refcnt(new->f, vsi->netdev, -1);
|
||||
kfree(new);
|
||||
}
|
||||
spin_unlock_bh(&vsi->mac_filter_hash_lock);
|
||||
@ -8716,6 +8740,27 @@ int i40e_open(struct net_device *netdev)
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* i40e_netif_set_realnum_tx_rx_queues - Update number of tx/rx queues
|
||||
* @vsi: vsi structure
|
||||
*
|
||||
* This updates netdev's number of tx/rx queues
|
||||
*
|
||||
* Returns status of setting tx/rx queues
|
||||
**/
|
||||
static int i40e_netif_set_realnum_tx_rx_queues(struct i40e_vsi *vsi)
|
||||
{
|
||||
int ret;
|
||||
|
||||
ret = netif_set_real_num_rx_queues(vsi->netdev,
|
||||
vsi->num_queue_pairs);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
return netif_set_real_num_tx_queues(vsi->netdev,
|
||||
vsi->num_queue_pairs);
|
||||
}
|
||||
|
||||
/**
|
||||
* i40e_vsi_open -
|
||||
* @vsi: the VSI to open
|
||||
@ -8752,13 +8797,7 @@ int i40e_vsi_open(struct i40e_vsi *vsi)
|
||||
goto err_setup_rx;
|
||||
|
||||
/* Notify the stack of the actual queue counts. */
|
||||
err = netif_set_real_num_tx_queues(vsi->netdev,
|
||||
vsi->num_queue_pairs);
|
||||
if (err)
|
||||
goto err_set_queues;
|
||||
|
||||
err = netif_set_real_num_rx_queues(vsi->netdev,
|
||||
vsi->num_queue_pairs);
|
||||
err = i40e_netif_set_realnum_tx_rx_queues(vsi);
|
||||
if (err)
|
||||
goto err_set_queues;
|
||||
|
||||
@ -14149,6 +14188,9 @@ struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf, u8 type,
|
||||
case I40E_VSI_MAIN:
|
||||
case I40E_VSI_VMDQ2:
|
||||
ret = i40e_config_netdev(vsi);
|
||||
if (ret)
|
||||
goto err_netdev;
|
||||
ret = i40e_netif_set_realnum_tx_rx_queues(vsi);
|
||||
if (ret)
|
||||
goto err_netdev;
|
||||
ret = register_netdev(vsi->netdev);
|
||||
@ -15451,8 +15493,8 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
|
||||
|
||||
if (hw->aq.api_maj_ver == I40E_FW_API_VERSION_MAJOR &&
|
||||
hw->aq.api_min_ver > I40E_FW_MINOR_VERSION(hw))
|
||||
dev_info(&pdev->dev,
|
||||
"The driver for the device detected a newer version of the NVM image v%u.%u than expected v%u.%u. Please install the most recent version of the network driver.\n",
|
||||
dev_dbg(&pdev->dev,
|
||||
"The driver for the device detected a newer version of the NVM image v%u.%u than v%u.%u.\n",
|
||||
hw->aq.api_maj_ver,
|
||||
hw->aq.api_min_ver,
|
||||
I40E_FW_API_VERSION_MAJOR,
|
||||
|
@ -1877,17 +1877,19 @@ sriov_configure_out:
|
||||
/***********************virtual channel routines******************/
|
||||
|
||||
/**
|
||||
* i40e_vc_send_msg_to_vf
|
||||
* i40e_vc_send_msg_to_vf_ex
|
||||
* @vf: pointer to the VF info
|
||||
* @v_opcode: virtual channel opcode
|
||||
* @v_retval: virtual channel return value
|
||||
* @msg: pointer to the msg buffer
|
||||
* @msglen: msg length
|
||||
* @is_quiet: true for not printing unsuccessful return values, false otherwise
|
||||
*
|
||||
* send msg to VF
|
||||
**/
|
||||
static int i40e_vc_send_msg_to_vf(struct i40e_vf *vf, u32 v_opcode,
|
||||
u32 v_retval, u8 *msg, u16 msglen)
|
||||
static int i40e_vc_send_msg_to_vf_ex(struct i40e_vf *vf, u32 v_opcode,
|
||||
u32 v_retval, u8 *msg, u16 msglen,
|
||||
bool is_quiet)
|
||||
{
|
||||
struct i40e_pf *pf;
|
||||
struct i40e_hw *hw;
|
||||
@ -1903,7 +1905,7 @@ static int i40e_vc_send_msg_to_vf(struct i40e_vf *vf, u32 v_opcode,
|
||||
abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id;
|
||||
|
||||
/* single place to detect unsuccessful return values */
|
||||
if (v_retval) {
|
||||
if (v_retval && !is_quiet) {
|
||||
vf->num_invalid_msgs++;
|
||||
dev_info(&pf->pdev->dev, "VF %d failed opcode %d, retval: %d\n",
|
||||
vf->vf_id, v_opcode, v_retval);
|
||||
@ -1933,6 +1935,23 @@ static int i40e_vc_send_msg_to_vf(struct i40e_vf *vf, u32 v_opcode,
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* i40e_vc_send_msg_to_vf
|
||||
* @vf: pointer to the VF info
|
||||
* @v_opcode: virtual channel opcode
|
||||
* @v_retval: virtual channel return value
|
||||
* @msg: pointer to the msg buffer
|
||||
* @msglen: msg length
|
||||
*
|
||||
* send msg to VF
|
||||
**/
|
||||
static int i40e_vc_send_msg_to_vf(struct i40e_vf *vf, u32 v_opcode,
|
||||
u32 v_retval, u8 *msg, u16 msglen)
|
||||
{
|
||||
return i40e_vc_send_msg_to_vf_ex(vf, v_opcode, v_retval,
|
||||
msg, msglen, false);
|
||||
}
|
||||
|
||||
/**
|
||||
* i40e_vc_send_resp_to_vf
|
||||
* @vf: pointer to the VF info
|
||||
@ -2695,6 +2714,7 @@ error_param:
|
||||
* i40e_check_vf_permission
|
||||
* @vf: pointer to the VF info
|
||||
* @al: MAC address list from virtchnl
|
||||
* @is_quiet: set true for printing msg without opcode info, false otherwise
|
||||
*
|
||||
* Check that the given list of MAC addresses is allowed. Will return -EPERM
|
||||
* if any address in the list is not valid. Checks the following conditions:
|
||||
@ -2709,13 +2729,15 @@ error_param:
|
||||
* addresses might not be accurate.
|
||||
**/
|
||||
static inline int i40e_check_vf_permission(struct i40e_vf *vf,
|
||||
struct virtchnl_ether_addr_list *al)
|
||||
struct virtchnl_ether_addr_list *al,
|
||||
bool *is_quiet)
|
||||
{
|
||||
struct i40e_pf *pf = vf->pf;
|
||||
struct i40e_vsi *vsi = pf->vsi[vf->lan_vsi_idx];
|
||||
int mac2add_cnt = 0;
|
||||
int i;
|
||||
|
||||
*is_quiet = false;
|
||||
for (i = 0; i < al->num_elements; i++) {
|
||||
struct i40e_mac_filter *f;
|
||||
u8 *addr = al->list[i].addr;
|
||||
@ -2739,6 +2761,7 @@ static inline int i40e_check_vf_permission(struct i40e_vf *vf,
|
||||
!ether_addr_equal(addr, vf->default_lan_addr.addr)) {
|
||||
dev_err(&pf->pdev->dev,
|
||||
"VF attempting to override administratively set MAC address, bring down and up the VF interface to resume normal operation\n");
|
||||
*is_quiet = true;
|
||||
return -EPERM;
|
||||
}
|
||||
|
||||
@ -2775,6 +2798,7 @@ static int i40e_vc_add_mac_addr_msg(struct i40e_vf *vf, u8 *msg)
|
||||
(struct virtchnl_ether_addr_list *)msg;
|
||||
struct i40e_pf *pf = vf->pf;
|
||||
struct i40e_vsi *vsi = NULL;
|
||||
bool is_quiet = false;
|
||||
i40e_status ret = 0;
|
||||
int i;
|
||||
|
||||
@ -2791,7 +2815,7 @@ static int i40e_vc_add_mac_addr_msg(struct i40e_vf *vf, u8 *msg)
|
||||
*/
|
||||
spin_lock_bh(&vsi->mac_filter_hash_lock);
|
||||
|
||||
ret = i40e_check_vf_permission(vf, al);
|
||||
ret = i40e_check_vf_permission(vf, al, &is_quiet);
|
||||
if (ret) {
|
||||
spin_unlock_bh(&vsi->mac_filter_hash_lock);
|
||||
goto error_param;
|
||||
@ -2829,8 +2853,8 @@ static int i40e_vc_add_mac_addr_msg(struct i40e_vf *vf, u8 *msg)
|
||||
|
||||
error_param:
|
||||
/* send the response to the VF */
|
||||
return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_ADD_ETH_ADDR,
|
||||
ret);
|
||||
return i40e_vc_send_msg_to_vf_ex(vf, VIRTCHNL_OP_ADD_ETH_ADDR,
|
||||
ret, NULL, 0, is_quiet);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -2708,8 +2708,11 @@ static int iavf_validate_ch_config(struct iavf_adapter *adapter,
|
||||
total_max_rate += tx_rate;
|
||||
num_qps += mqprio_qopt->qopt.count[i];
|
||||
}
|
||||
if (num_qps > IAVF_MAX_REQ_QUEUES)
|
||||
if (num_qps > adapter->num_active_queues) {
|
||||
dev_err(&adapter->pdev->dev,
|
||||
"Cannot support requested number of queues\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
ret = iavf_validate_tx_bandwidth(adapter, total_max_rate);
|
||||
return ret;
|
||||
|
@ -110,6 +110,8 @@ static struct page *ef4_reuse_page(struct ef4_rx_queue *rx_queue)
|
||||
struct ef4_rx_page_state *state;
|
||||
unsigned index;
|
||||
|
||||
if (unlikely(!rx_queue->page_ring))
|
||||
return NULL;
|
||||
index = rx_queue->page_remove & rx_queue->page_ptr_mask;
|
||||
page = rx_queue->page_ring[index];
|
||||
if (page == NULL)
|
||||
@ -293,6 +295,9 @@ static void ef4_recycle_rx_pages(struct ef4_channel *channel,
|
||||
{
|
||||
struct ef4_rx_queue *rx_queue = ef4_channel_get_rx_queue(channel);
|
||||
|
||||
if (unlikely(!rx_queue->page_ring))
|
||||
return;
|
||||
|
||||
do {
|
||||
ef4_recycle_rx_page(channel, rx_buf);
|
||||
rx_buf = ef4_rx_buf_next(rx_queue, rx_buf);
|
||||
|
@ -45,6 +45,8 @@ static struct page *efx_reuse_page(struct efx_rx_queue *rx_queue)
|
||||
unsigned int index;
|
||||
struct page *page;
|
||||
|
||||
if (unlikely(!rx_queue->page_ring))
|
||||
return NULL;
|
||||
index = rx_queue->page_remove & rx_queue->page_ptr_mask;
|
||||
page = rx_queue->page_ring[index];
|
||||
if (page == NULL)
|
||||
@ -114,6 +116,9 @@ void efx_recycle_rx_pages(struct efx_channel *channel,
|
||||
{
|
||||
struct efx_rx_queue *rx_queue = efx_channel_get_rx_queue(channel);
|
||||
|
||||
if (unlikely(!rx_queue->page_ring))
|
||||
return;
|
||||
|
||||
do {
|
||||
efx_recycle_rx_page(channel, rx_buf);
|
||||
rx_buf = efx_rx_buf_next(rx_queue, rx_buf);
|
||||
|
@ -93,7 +93,9 @@ static int atusb_control_msg(struct atusb *atusb, unsigned int pipe,
|
||||
|
||||
ret = usb_control_msg(usb_dev, pipe, request, requesttype,
|
||||
value, index, data, size, timeout);
|
||||
if (ret < 0) {
|
||||
if (ret < size) {
|
||||
ret = ret < 0 ? ret : -ENODATA;
|
||||
|
||||
atusb->err = ret;
|
||||
dev_err(&usb_dev->dev,
|
||||
"%s: req 0x%02x val 0x%x idx 0x%x, error %d\n",
|
||||
@ -861,9 +863,9 @@ static int atusb_get_and_show_build(struct atusb *atusb)
|
||||
if (!build)
|
||||
return -ENOMEM;
|
||||
|
||||
ret = atusb_control_msg(atusb, usb_rcvctrlpipe(usb_dev, 0),
|
||||
ATUSB_BUILD, ATUSB_REQ_FROM_DEV, 0, 0,
|
||||
build, ATUSB_BUILD_SIZE, 1000);
|
||||
/* We cannot call atusb_control_msg() here, since this request may read various length data */
|
||||
ret = usb_control_msg(atusb->usb_dev, usb_rcvctrlpipe(usb_dev, 0), ATUSB_BUILD,
|
||||
ATUSB_REQ_FROM_DEV, 0, 0, build, ATUSB_BUILD_SIZE, 1000);
|
||||
if (ret >= 0) {
|
||||
build[ret] = 0;
|
||||
dev_info(&usb_dev->dev, "Firmware: build %s\n", build);
|
||||
|
@ -239,8 +239,8 @@ static struct phy_device *__fixed_phy_register(unsigned int irq,
|
||||
/* Check if we have a GPIO associated with this fixed phy */
|
||||
if (!gpiod) {
|
||||
gpiod = fixed_phy_get_gpiod(np);
|
||||
if (!gpiod)
|
||||
return ERR_PTR(-EINVAL);
|
||||
if (IS_ERR(gpiod))
|
||||
return ERR_CAST(gpiod);
|
||||
}
|
||||
|
||||
/* Get the next available PHY address, up to PHY_MAX_ADDR */
|
||||
|
@ -9638,9 +9638,12 @@ static int rtl8152_probe(struct usb_interface *intf,
|
||||
netdev->hw_features &= ~NETIF_F_RXCSUM;
|
||||
}
|
||||
|
||||
if (udev->parent &&
|
||||
le16_to_cpu(udev->parent->descriptor.idVendor) == VENDOR_ID_LENOVO) {
|
||||
tp->lenovo_macpassthru = 1;
|
||||
if (le16_to_cpu(udev->descriptor.idVendor) == VENDOR_ID_LENOVO) {
|
||||
switch (le16_to_cpu(udev->descriptor.idProduct)) {
|
||||
case DEVICE_ID_THINKPAD_THUNDERBOLT3_DOCK_GEN2:
|
||||
case DEVICE_ID_THINKPAD_USB_C_DOCK_GEN2:
|
||||
tp->lenovo_macpassthru = 1;
|
||||
}
|
||||
}
|
||||
|
||||
if (le16_to_cpu(udev->descriptor.bcdDevice) == 0x3011 && udev->serial &&
|
||||
|
@ -608,6 +608,11 @@ static const struct usb_device_id products [] = {
|
||||
USB_DEVICE_AND_INTERFACE_INFO(0x1630, 0x0042,
|
||||
USB_CLASS_COMM, 2 /* ACM */, 0x0ff),
|
||||
.driver_info = (unsigned long) &rndis_poll_status_info,
|
||||
}, {
|
||||
/* Hytera Communications DMR radios' "Radio to PC Network" */
|
||||
USB_VENDOR_AND_INTERFACE_INFO(0x238b,
|
||||
USB_CLASS_COMM, 2 /* ACM */, 0x0ff),
|
||||
.driver_info = (unsigned long)&rndis_info,
|
||||
}, {
|
||||
/* RNDIS is MSFT's un-official variant of CDC ACM */
|
||||
USB_INTERFACE_INFO(USB_CLASS_COMM, 2 /* ACM */, 0x0ff),
|
||||
|
@ -133,6 +133,7 @@ struct inet6_skb_parm {
|
||||
__u16 dsthao;
|
||||
#endif
|
||||
__u16 frag_max_size;
|
||||
__u16 srhoff;
|
||||
|
||||
#define IP6SKB_XFRM_TRANSFORMED 1
|
||||
#define IP6SKB_FORWARDED 2
|
||||
@ -142,6 +143,7 @@ struct inet6_skb_parm {
|
||||
#define IP6SKB_HOPBYHOP 32
|
||||
#define IP6SKB_L3SLAVE 64
|
||||
#define IP6SKB_JUMBOGRAM 128
|
||||
#define IP6SKB_SEG6 256
|
||||
};
|
||||
|
||||
#if defined(CONFIG_NET_L3_MASTER_DEV)
|
||||
|
@ -112,8 +112,7 @@ struct sctp_transport *sctp_transport_get_next(struct net *net,
|
||||
struct rhashtable_iter *iter);
|
||||
struct sctp_transport *sctp_transport_get_idx(struct net *net,
|
||||
struct rhashtable_iter *iter, int pos);
|
||||
int sctp_transport_lookup_process(int (*cb)(struct sctp_transport *, void *),
|
||||
struct net *net,
|
||||
int sctp_transport_lookup_process(sctp_callback_t cb, struct net *net,
|
||||
const union sctp_addr *laddr,
|
||||
const union sctp_addr *paddr, void *p);
|
||||
int sctp_transport_traverse_process(sctp_callback_t cb, sctp_callback_t cb_done,
|
||||
|
@ -58,9 +58,30 @@ extern int seg6_local_init(void);
|
||||
extern void seg6_local_exit(void);
|
||||
|
||||
extern bool seg6_validate_srh(struct ipv6_sr_hdr *srh, int len, bool reduced);
|
||||
extern struct ipv6_sr_hdr *seg6_get_srh(struct sk_buff *skb, int flags);
|
||||
extern void seg6_icmp_srh(struct sk_buff *skb, struct inet6_skb_parm *opt);
|
||||
extern int seg6_do_srh_encap(struct sk_buff *skb, struct ipv6_sr_hdr *osrh,
|
||||
int proto);
|
||||
extern int seg6_do_srh_inline(struct sk_buff *skb, struct ipv6_sr_hdr *osrh);
|
||||
extern int seg6_lookup_nexthop(struct sk_buff *skb, struct in6_addr *nhaddr,
|
||||
u32 tbl_id);
|
||||
|
||||
/* If the packet which invoked an ICMP error contains an SRH return
|
||||
* the true destination address from within the SRH, otherwise use the
|
||||
* destination address in the IP header.
|
||||
*/
|
||||
static inline const struct in6_addr *seg6_get_daddr(struct sk_buff *skb,
|
||||
struct inet6_skb_parm *opt)
|
||||
{
|
||||
struct ipv6_sr_hdr *srh;
|
||||
|
||||
if (opt->flags & IP6SKB_SEG6) {
|
||||
srh = (struct ipv6_sr_hdr *)(skb->data + opt->srhoff);
|
||||
return &srh->segments[0];
|
||||
}
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
|
||||
#endif
|
||||
|
@ -1339,6 +1339,7 @@ batadv_mcast_forw_rtr_node_get(struct batadv_priv *bat_priv,
|
||||
* @bat_priv: the bat priv with all the soft interface information
|
||||
* @skb: The multicast packet to check
|
||||
* @orig: an originator to be set to forward the skb to
|
||||
* @is_routable: stores whether the destination is routable
|
||||
*
|
||||
* Return: the forwarding mode as enum batadv_forw_mode and in case of
|
||||
* BATADV_FORW_SINGLE set the orig to the single originator the skb
|
||||
@ -1346,17 +1347,16 @@ batadv_mcast_forw_rtr_node_get(struct batadv_priv *bat_priv,
|
||||
*/
|
||||
enum batadv_forw_mode
|
||||
batadv_mcast_forw_mode(struct batadv_priv *bat_priv, struct sk_buff *skb,
|
||||
struct batadv_orig_node **orig)
|
||||
struct batadv_orig_node **orig, int *is_routable)
|
||||
{
|
||||
int ret, tt_count, ip_count, unsnoop_count, total_count;
|
||||
bool is_unsnoopable = false;
|
||||
unsigned int mcast_fanout;
|
||||
struct ethhdr *ethhdr;
|
||||
int is_routable = 0;
|
||||
int rtr_count = 0;
|
||||
|
||||
ret = batadv_mcast_forw_mode_check(bat_priv, skb, &is_unsnoopable,
|
||||
&is_routable);
|
||||
is_routable);
|
||||
if (ret == -ENOMEM)
|
||||
return BATADV_FORW_NONE;
|
||||
else if (ret < 0)
|
||||
@ -1369,7 +1369,7 @@ batadv_mcast_forw_mode(struct batadv_priv *bat_priv, struct sk_buff *skb,
|
||||
ip_count = batadv_mcast_forw_want_all_ip_count(bat_priv, ethhdr);
|
||||
unsnoop_count = !is_unsnoopable ? 0 :
|
||||
atomic_read(&bat_priv->mcast.num_want_all_unsnoopables);
|
||||
rtr_count = batadv_mcast_forw_rtr_count(bat_priv, is_routable);
|
||||
rtr_count = batadv_mcast_forw_rtr_count(bat_priv, *is_routable);
|
||||
|
||||
total_count = tt_count + ip_count + unsnoop_count + rtr_count;
|
||||
|
||||
@ -1689,6 +1689,7 @@ batadv_mcast_forw_want_rtr(struct batadv_priv *bat_priv,
|
||||
* @bat_priv: the bat priv with all the soft interface information
|
||||
* @skb: the multicast packet to transmit
|
||||
* @vid: the vlan identifier
|
||||
* @is_routable: stores whether the destination is routable
|
||||
*
|
||||
* Sends copies of a frame with multicast destination to any node that signaled
|
||||
* interest in it, that is either via the translation table or the according
|
||||
@ -1701,7 +1702,7 @@ batadv_mcast_forw_want_rtr(struct batadv_priv *bat_priv,
|
||||
* is neither IPv4 nor IPv6. NET_XMIT_SUCCESS otherwise.
|
||||
*/
|
||||
int batadv_mcast_forw_send(struct batadv_priv *bat_priv, struct sk_buff *skb,
|
||||
unsigned short vid)
|
||||
unsigned short vid, int is_routable)
|
||||
{
|
||||
int ret;
|
||||
|
||||
@ -1717,12 +1718,16 @@ int batadv_mcast_forw_send(struct batadv_priv *bat_priv, struct sk_buff *skb,
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (!is_routable)
|
||||
goto skip_mc_router;
|
||||
|
||||
ret = batadv_mcast_forw_want_rtr(bat_priv, skb, vid);
|
||||
if (ret != NET_XMIT_SUCCESS) {
|
||||
kfree_skb(skb);
|
||||
return ret;
|
||||
}
|
||||
|
||||
skip_mc_router:
|
||||
consume_skb(skb);
|
||||
return ret;
|
||||
}
|
||||
|
@ -43,7 +43,8 @@ enum batadv_forw_mode {
|
||||
|
||||
enum batadv_forw_mode
|
||||
batadv_mcast_forw_mode(struct batadv_priv *bat_priv, struct sk_buff *skb,
|
||||
struct batadv_orig_node **mcast_single_orig);
|
||||
struct batadv_orig_node **mcast_single_orig,
|
||||
int *is_routable);
|
||||
|
||||
int batadv_mcast_forw_send_orig(struct batadv_priv *bat_priv,
|
||||
struct sk_buff *skb,
|
||||
@ -51,7 +52,7 @@ int batadv_mcast_forw_send_orig(struct batadv_priv *bat_priv,
|
||||
struct batadv_orig_node *orig_node);
|
||||
|
||||
int batadv_mcast_forw_send(struct batadv_priv *bat_priv, struct sk_buff *skb,
|
||||
unsigned short vid);
|
||||
unsigned short vid, int is_routable);
|
||||
|
||||
void batadv_mcast_init(struct batadv_priv *bat_priv);
|
||||
|
||||
@ -68,7 +69,8 @@ void batadv_mcast_purge_orig(struct batadv_orig_node *orig_node);
|
||||
|
||||
static inline enum batadv_forw_mode
|
||||
batadv_mcast_forw_mode(struct batadv_priv *bat_priv, struct sk_buff *skb,
|
||||
struct batadv_orig_node **mcast_single_orig)
|
||||
struct batadv_orig_node **mcast_single_orig,
|
||||
int *is_routable)
|
||||
{
|
||||
return BATADV_FORW_ALL;
|
||||
}
|
||||
@ -85,7 +87,7 @@ batadv_mcast_forw_send_orig(struct batadv_priv *bat_priv,
|
||||
|
||||
static inline int
|
||||
batadv_mcast_forw_send(struct batadv_priv *bat_priv, struct sk_buff *skb,
|
||||
unsigned short vid)
|
||||
unsigned short vid, int is_routable)
|
||||
{
|
||||
kfree_skb(skb);
|
||||
return NET_XMIT_DROP;
|
||||
|
@ -198,6 +198,7 @@ static netdev_tx_t batadv_interface_tx(struct sk_buff *skb,
|
||||
int gw_mode;
|
||||
enum batadv_forw_mode forw_mode = BATADV_FORW_SINGLE;
|
||||
struct batadv_orig_node *mcast_single_orig = NULL;
|
||||
int mcast_is_routable = 0;
|
||||
int network_offset = ETH_HLEN;
|
||||
__be16 proto;
|
||||
|
||||
@ -300,7 +301,8 @@ static netdev_tx_t batadv_interface_tx(struct sk_buff *skb,
|
||||
send:
|
||||
if (do_bcast && !is_broadcast_ether_addr(ethhdr->h_dest)) {
|
||||
forw_mode = batadv_mcast_forw_mode(bat_priv, skb,
|
||||
&mcast_single_orig);
|
||||
&mcast_single_orig,
|
||||
&mcast_is_routable);
|
||||
if (forw_mode == BATADV_FORW_NONE)
|
||||
goto dropped;
|
||||
|
||||
@ -359,7 +361,8 @@ send:
|
||||
ret = batadv_mcast_forw_send_orig(bat_priv, skb, vid,
|
||||
mcast_single_orig);
|
||||
} else if (forw_mode == BATADV_FORW_SOME) {
|
||||
ret = batadv_mcast_forw_send(bat_priv, skb, vid);
|
||||
ret = batadv_mcast_forw_send(bat_priv, skb, vid,
|
||||
mcast_is_routable);
|
||||
} else {
|
||||
if (batadv_dat_snoop_outgoing_arp_request(bat_priv,
|
||||
skb))
|
||||
|
@ -197,6 +197,10 @@ int lwtunnel_valid_encap_type_attr(struct nlattr *attr, int remaining,
|
||||
nla_entype = nla_find(attrs, attrlen, RTA_ENCAP_TYPE);
|
||||
|
||||
if (nla_entype) {
|
||||
if (nla_len(nla_entype) < sizeof(u16)) {
|
||||
NL_SET_ERR_MSG(extack, "Invalid RTA_ENCAP_TYPE");
|
||||
return -EINVAL;
|
||||
}
|
||||
encap_type = nla_get_u16(nla_entype);
|
||||
|
||||
if (lwtunnel_valid_encap_type(encap_type,
|
||||
|
@ -662,6 +662,19 @@ static int fib_count_nexthops(struct rtnexthop *rtnh, int remaining,
|
||||
return nhs;
|
||||
}
|
||||
|
||||
static int fib_gw_from_attr(__be32 *gw, struct nlattr *nla,
|
||||
struct netlink_ext_ack *extack)
|
||||
{
|
||||
if (nla_len(nla) < sizeof(*gw)) {
|
||||
NL_SET_ERR_MSG(extack, "Invalid IPv4 address in RTA_GATEWAY");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
*gw = nla_get_in_addr(nla);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* only called when fib_nh is integrated into fib_info */
|
||||
static int fib_get_nhs(struct fib_info *fi, struct rtnexthop *rtnh,
|
||||
int remaining, struct fib_config *cfg,
|
||||
@ -704,7 +717,11 @@ static int fib_get_nhs(struct fib_info *fi, struct rtnexthop *rtnh,
|
||||
return -EINVAL;
|
||||
}
|
||||
if (nla) {
|
||||
fib_cfg.fc_gw4 = nla_get_in_addr(nla);
|
||||
ret = fib_gw_from_attr(&fib_cfg.fc_gw4, nla,
|
||||
extack);
|
||||
if (ret)
|
||||
goto errout;
|
||||
|
||||
if (fib_cfg.fc_gw4)
|
||||
fib_cfg.fc_gw_family = AF_INET;
|
||||
} else if (nlav) {
|
||||
@ -714,10 +731,18 @@ static int fib_get_nhs(struct fib_info *fi, struct rtnexthop *rtnh,
|
||||
}
|
||||
|
||||
nla = nla_find(attrs, attrlen, RTA_FLOW);
|
||||
if (nla)
|
||||
if (nla) {
|
||||
if (nla_len(nla) < sizeof(u32)) {
|
||||
NL_SET_ERR_MSG(extack, "Invalid RTA_FLOW");
|
||||
return -EINVAL;
|
||||
}
|
||||
fib_cfg.fc_flow = nla_get_u32(nla);
|
||||
}
|
||||
|
||||
fib_cfg.fc_encap = nla_find(attrs, attrlen, RTA_ENCAP);
|
||||
/* RTA_ENCAP_TYPE length checked in
|
||||
* lwtunnel_valid_encap_type_attr
|
||||
*/
|
||||
nla = nla_find(attrs, attrlen, RTA_ENCAP_TYPE);
|
||||
if (nla)
|
||||
fib_cfg.fc_encap_type = nla_get_u16(nla);
|
||||
@ -902,6 +927,7 @@ int fib_nh_match(struct net *net, struct fib_config *cfg, struct fib_info *fi,
|
||||
attrlen = rtnh_attrlen(rtnh);
|
||||
if (attrlen > 0) {
|
||||
struct nlattr *nla, *nlav, *attrs = rtnh_attrs(rtnh);
|
||||
int err;
|
||||
|
||||
nla = nla_find(attrs, attrlen, RTA_GATEWAY);
|
||||
nlav = nla_find(attrs, attrlen, RTA_VIA);
|
||||
@ -912,12 +938,17 @@ int fib_nh_match(struct net *net, struct fib_config *cfg, struct fib_info *fi,
|
||||
}
|
||||
|
||||
if (nla) {
|
||||
__be32 gw;
|
||||
|
||||
err = fib_gw_from_attr(&gw, nla, extack);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
if (nh->fib_nh_gw_family != AF_INET ||
|
||||
nla_get_in_addr(nla) != nh->fib_nh_gw4)
|
||||
gw != nh->fib_nh_gw4)
|
||||
return 1;
|
||||
} else if (nlav) {
|
||||
struct fib_config cfg2;
|
||||
int err;
|
||||
|
||||
err = fib_gw_from_via(&cfg2, nlav, extack);
|
||||
if (err)
|
||||
@ -940,8 +971,14 @@ int fib_nh_match(struct net *net, struct fib_config *cfg, struct fib_info *fi,
|
||||
|
||||
#ifdef CONFIG_IP_ROUTE_CLASSID
|
||||
nla = nla_find(attrs, attrlen, RTA_FLOW);
|
||||
if (nla && nla_get_u32(nla) != nh->nh_tclassid)
|
||||
return 1;
|
||||
if (nla) {
|
||||
if (nla_len(nla) < sizeof(u32)) {
|
||||
NL_SET_ERR_MSG(extack, "Invalid RTA_FLOW");
|
||||
return -EINVAL;
|
||||
}
|
||||
if (nla_get_u32(nla) != nh->nh_tclassid)
|
||||
return 1;
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
|
@ -57,6 +57,7 @@
|
||||
#include <net/protocol.h>
|
||||
#include <net/raw.h>
|
||||
#include <net/rawv6.h>
|
||||
#include <net/seg6.h>
|
||||
#include <net/transp_v6.h>
|
||||
#include <net/ip6_route.h>
|
||||
#include <net/addrconf.h>
|
||||
@ -820,6 +821,7 @@ out_bh_enable:
|
||||
|
||||
void icmpv6_notify(struct sk_buff *skb, u8 type, u8 code, __be32 info)
|
||||
{
|
||||
struct inet6_skb_parm *opt = IP6CB(skb);
|
||||
const struct inet6_protocol *ipprot;
|
||||
int inner_offset;
|
||||
__be16 frag_off;
|
||||
@ -829,6 +831,8 @@ void icmpv6_notify(struct sk_buff *skb, u8 type, u8 code, __be32 info)
|
||||
if (!pskb_may_pull(skb, sizeof(struct ipv6hdr)))
|
||||
goto out;
|
||||
|
||||
seg6_icmp_srh(skb, opt);
|
||||
|
||||
nexthdr = ((struct ipv6hdr *)skb->data)->nexthdr;
|
||||
if (ipv6_ext_hdr(nexthdr)) {
|
||||
/* now skip over extension headers */
|
||||
@ -853,7 +857,7 @@ void icmpv6_notify(struct sk_buff *skb, u8 type, u8 code, __be32 info)
|
||||
|
||||
ipprot = rcu_dereference(inet6_protos[nexthdr]);
|
||||
if (ipprot && ipprot->err_handler)
|
||||
ipprot->err_handler(skb, NULL, type, code, inner_offset, info);
|
||||
ipprot->err_handler(skb, opt, type, code, inner_offset, info);
|
||||
|
||||
raw6_icmp_error(skb, nexthdr, type, code, inner_offset, info);
|
||||
return;
|
||||
|
@ -5224,6 +5224,19 @@ out:
|
||||
return should_notify;
|
||||
}
|
||||
|
||||
static int fib6_gw_from_attr(struct in6_addr *gw, struct nlattr *nla,
|
||||
struct netlink_ext_ack *extack)
|
||||
{
|
||||
if (nla_len(nla) < sizeof(*gw)) {
|
||||
NL_SET_ERR_MSG(extack, "Invalid IPv6 address in RTA_GATEWAY");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
*gw = nla_get_in6_addr(nla);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int ip6_route_multipath_add(struct fib6_config *cfg,
|
||||
struct netlink_ext_ack *extack)
|
||||
{
|
||||
@ -5264,10 +5277,18 @@ static int ip6_route_multipath_add(struct fib6_config *cfg,
|
||||
|
||||
nla = nla_find(attrs, attrlen, RTA_GATEWAY);
|
||||
if (nla) {
|
||||
r_cfg.fc_gateway = nla_get_in6_addr(nla);
|
||||
err = fib6_gw_from_attr(&r_cfg.fc_gateway, nla,
|
||||
extack);
|
||||
if (err)
|
||||
goto cleanup;
|
||||
|
||||
r_cfg.fc_flags |= RTF_GATEWAY;
|
||||
}
|
||||
r_cfg.fc_encap = nla_find(attrs, attrlen, RTA_ENCAP);
|
||||
|
||||
/* RTA_ENCAP_TYPE length checked in
|
||||
* lwtunnel_valid_encap_type_attr
|
||||
*/
|
||||
nla = nla_find(attrs, attrlen, RTA_ENCAP_TYPE);
|
||||
if (nla)
|
||||
r_cfg.fc_encap_type = nla_get_u16(nla);
|
||||
@ -5434,7 +5455,13 @@ static int ip6_route_multipath_del(struct fib6_config *cfg,
|
||||
|
||||
nla = nla_find(attrs, attrlen, RTA_GATEWAY);
|
||||
if (nla) {
|
||||
nla_memcpy(&r_cfg.fc_gateway, nla, 16);
|
||||
err = fib6_gw_from_attr(&r_cfg.fc_gateway, nla,
|
||||
extack);
|
||||
if (err) {
|
||||
last_err = err;
|
||||
goto next_rtnh;
|
||||
}
|
||||
|
||||
r_cfg.fc_flags |= RTF_GATEWAY;
|
||||
}
|
||||
}
|
||||
@ -5442,6 +5469,7 @@ static int ip6_route_multipath_del(struct fib6_config *cfg,
|
||||
if (err)
|
||||
last_err = err;
|
||||
|
||||
next_rtnh:
|
||||
rtnh = rtnh_next(rtnh, &remaining);
|
||||
}
|
||||
|
||||
|
@ -75,6 +75,65 @@ bool seg6_validate_srh(struct ipv6_sr_hdr *srh, int len, bool reduced)
|
||||
return true;
|
||||
}
|
||||
|
||||
struct ipv6_sr_hdr *seg6_get_srh(struct sk_buff *skb, int flags)
|
||||
{
|
||||
struct ipv6_sr_hdr *srh;
|
||||
int len, srhoff = 0;
|
||||
|
||||
if (ipv6_find_hdr(skb, &srhoff, IPPROTO_ROUTING, NULL, &flags) < 0)
|
||||
return NULL;
|
||||
|
||||
if (!pskb_may_pull(skb, srhoff + sizeof(*srh)))
|
||||
return NULL;
|
||||
|
||||
srh = (struct ipv6_sr_hdr *)(skb->data + srhoff);
|
||||
|
||||
len = (srh->hdrlen + 1) << 3;
|
||||
|
||||
if (!pskb_may_pull(skb, srhoff + len))
|
||||
return NULL;
|
||||
|
||||
/* note that pskb_may_pull may change pointers in header;
|
||||
* for this reason it is necessary to reload them when needed.
|
||||
*/
|
||||
srh = (struct ipv6_sr_hdr *)(skb->data + srhoff);
|
||||
|
||||
if (!seg6_validate_srh(srh, len, true))
|
||||
return NULL;
|
||||
|
||||
return srh;
|
||||
}
|
||||
|
||||
/* Determine if an ICMP invoking packet contains a segment routing
|
||||
* header. If it does, extract the offset to the true destination
|
||||
* address, which is in the first segment address.
|
||||
*/
|
||||
void seg6_icmp_srh(struct sk_buff *skb, struct inet6_skb_parm *opt)
|
||||
{
|
||||
__u16 network_header = skb->network_header;
|
||||
struct ipv6_sr_hdr *srh;
|
||||
|
||||
/* Update network header to point to the invoking packet
|
||||
* inside the ICMP packet, so we can use the seg6_get_srh()
|
||||
* helper.
|
||||
*/
|
||||
skb_reset_network_header(skb);
|
||||
|
||||
srh = seg6_get_srh(skb, 0);
|
||||
if (!srh)
|
||||
goto out;
|
||||
|
||||
if (srh->type != IPV6_SRCRT_TYPE_4)
|
||||
goto out;
|
||||
|
||||
opt->flags |= IP6SKB_SEG6;
|
||||
opt->srhoff = (unsigned char *)srh - skb->data;
|
||||
|
||||
out:
|
||||
/* Restore the network header back to the ICMP packet */
|
||||
skb->network_header = network_header;
|
||||
}
|
||||
|
||||
static struct genl_family seg6_genl_family;
|
||||
|
||||
static const struct nla_policy seg6_genl_policy[SEG6_ATTR_MAX + 1] = {
|
||||
|
@ -150,40 +150,11 @@ static struct seg6_local_lwt *seg6_local_lwtunnel(struct lwtunnel_state *lwt)
|
||||
return (struct seg6_local_lwt *)lwt->data;
|
||||
}
|
||||
|
||||
static struct ipv6_sr_hdr *get_srh(struct sk_buff *skb, int flags)
|
||||
{
|
||||
struct ipv6_sr_hdr *srh;
|
||||
int len, srhoff = 0;
|
||||
|
||||
if (ipv6_find_hdr(skb, &srhoff, IPPROTO_ROUTING, NULL, &flags) < 0)
|
||||
return NULL;
|
||||
|
||||
if (!pskb_may_pull(skb, srhoff + sizeof(*srh)))
|
||||
return NULL;
|
||||
|
||||
srh = (struct ipv6_sr_hdr *)(skb->data + srhoff);
|
||||
|
||||
len = (srh->hdrlen + 1) << 3;
|
||||
|
||||
if (!pskb_may_pull(skb, srhoff + len))
|
||||
return NULL;
|
||||
|
||||
/* note that pskb_may_pull may change pointers in header;
|
||||
* for this reason it is necessary to reload them when needed.
|
||||
*/
|
||||
srh = (struct ipv6_sr_hdr *)(skb->data + srhoff);
|
||||
|
||||
if (!seg6_validate_srh(srh, len, true))
|
||||
return NULL;
|
||||
|
||||
return srh;
|
||||
}
|
||||
|
||||
static struct ipv6_sr_hdr *get_and_validate_srh(struct sk_buff *skb)
|
||||
{
|
||||
struct ipv6_sr_hdr *srh;
|
||||
|
||||
srh = get_srh(skb, IP6_FH_F_SKIP_RH);
|
||||
srh = seg6_get_srh(skb, IP6_FH_F_SKIP_RH);
|
||||
if (!srh)
|
||||
return NULL;
|
||||
|
||||
@ -200,7 +171,7 @@ static bool decap_and_validate(struct sk_buff *skb, int proto)
|
||||
struct ipv6_sr_hdr *srh;
|
||||
unsigned int off = 0;
|
||||
|
||||
srh = get_srh(skb, 0);
|
||||
srh = seg6_get_srh(skb, 0);
|
||||
if (srh && srh->segments_left > 0)
|
||||
return false;
|
||||
|
||||
|
@ -40,6 +40,7 @@
|
||||
#include <net/transp_v6.h>
|
||||
#include <net/ip6_route.h>
|
||||
#include <net/raw.h>
|
||||
#include <net/seg6.h>
|
||||
#include <net/tcp_states.h>
|
||||
#include <net/ip6_checksum.h>
|
||||
#include <net/ip6_tunnel.h>
|
||||
@ -561,7 +562,7 @@ int __udp6_lib_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
|
||||
struct ipv6_pinfo *np;
|
||||
const struct ipv6hdr *hdr = (const struct ipv6hdr *)skb->data;
|
||||
const struct in6_addr *saddr = &hdr->saddr;
|
||||
const struct in6_addr *daddr = &hdr->daddr;
|
||||
const struct in6_addr *daddr = seg6_get_daddr(skb, opt) ? : &hdr->daddr;
|
||||
struct udphdr *uh = (struct udphdr *)(skb->data+offset);
|
||||
bool tunnel = false;
|
||||
struct sock *sk;
|
||||
|
@ -647,6 +647,26 @@ struct mesh_csa_settings {
|
||||
struct cfg80211_csa_settings settings;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct mesh_table
|
||||
*
|
||||
* @known_gates: list of known mesh gates and their mpaths by the station. The
|
||||
* gate's mpath may or may not be resolved and active.
|
||||
* @gates_lock: protects updates to known_gates
|
||||
* @rhead: the rhashtable containing struct mesh_paths, keyed by dest addr
|
||||
* @walk_head: linked list containing all mesh_path objects
|
||||
* @walk_lock: lock protecting walk_head
|
||||
* @entries: number of entries in the table
|
||||
*/
|
||||
struct mesh_table {
|
||||
struct hlist_head known_gates;
|
||||
spinlock_t gates_lock;
|
||||
struct rhashtable rhead;
|
||||
struct hlist_head walk_head;
|
||||
spinlock_t walk_lock;
|
||||
atomic_t entries; /* Up to MAX_MESH_NEIGHBOURS */
|
||||
};
|
||||
|
||||
struct ieee80211_if_mesh {
|
||||
struct timer_list housekeeping_timer;
|
||||
struct timer_list mesh_path_timer;
|
||||
@ -721,8 +741,8 @@ struct ieee80211_if_mesh {
|
||||
/* offset from skb->data while building IE */
|
||||
int meshconf_offset;
|
||||
|
||||
struct mesh_table *mesh_paths;
|
||||
struct mesh_table *mpp_paths; /* Store paths for MPP&MAP */
|
||||
struct mesh_table mesh_paths;
|
||||
struct mesh_table mpp_paths; /* Store paths for MPP&MAP */
|
||||
int mesh_paths_generation;
|
||||
int mpp_paths_generation;
|
||||
};
|
||||
|
@ -127,26 +127,6 @@ struct mesh_path {
|
||||
u32 path_change_count;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct mesh_table
|
||||
*
|
||||
* @known_gates: list of known mesh gates and their mpaths by the station. The
|
||||
* gate's mpath may or may not be resolved and active.
|
||||
* @gates_lock: protects updates to known_gates
|
||||
* @rhead: the rhashtable containing struct mesh_paths, keyed by dest addr
|
||||
* @walk_head: linked list containing all mesh_path objects
|
||||
* @walk_lock: lock protecting walk_head
|
||||
* @entries: number of entries in the table
|
||||
*/
|
||||
struct mesh_table {
|
||||
struct hlist_head known_gates;
|
||||
spinlock_t gates_lock;
|
||||
struct rhashtable rhead;
|
||||
struct hlist_head walk_head;
|
||||
spinlock_t walk_lock;
|
||||
atomic_t entries; /* Up to MAX_MESH_NEIGHBOURS */
|
||||
};
|
||||
|
||||
/* Recent multicast cache */
|
||||
/* RMC_BUCKETS must be a power of 2, maximum 256 */
|
||||
#define RMC_BUCKETS 256
|
||||
@ -308,7 +288,7 @@ int mesh_path_error_tx(struct ieee80211_sub_if_data *sdata,
|
||||
void mesh_path_assign_nexthop(struct mesh_path *mpath, struct sta_info *sta);
|
||||
void mesh_path_flush_pending(struct mesh_path *mpath);
|
||||
void mesh_path_tx_pending(struct mesh_path *mpath);
|
||||
int mesh_pathtbl_init(struct ieee80211_sub_if_data *sdata);
|
||||
void mesh_pathtbl_init(struct ieee80211_sub_if_data *sdata);
|
||||
void mesh_pathtbl_unregister(struct ieee80211_sub_if_data *sdata);
|
||||
int mesh_path_del(struct ieee80211_sub_if_data *sdata, const u8 *addr);
|
||||
void mesh_path_timer(struct timer_list *t);
|
||||
|
@ -47,32 +47,24 @@ static void mesh_path_rht_free(void *ptr, void *tblptr)
|
||||
mesh_path_free_rcu(tbl, mpath);
|
||||
}
|
||||
|
||||
static struct mesh_table *mesh_table_alloc(void)
|
||||
static void mesh_table_init(struct mesh_table *tbl)
|
||||
{
|
||||
struct mesh_table *newtbl;
|
||||
INIT_HLIST_HEAD(&tbl->known_gates);
|
||||
INIT_HLIST_HEAD(&tbl->walk_head);
|
||||
atomic_set(&tbl->entries, 0);
|
||||
spin_lock_init(&tbl->gates_lock);
|
||||
spin_lock_init(&tbl->walk_lock);
|
||||
|
||||
newtbl = kmalloc(sizeof(struct mesh_table), GFP_ATOMIC);
|
||||
if (!newtbl)
|
||||
return NULL;
|
||||
|
||||
INIT_HLIST_HEAD(&newtbl->known_gates);
|
||||
INIT_HLIST_HEAD(&newtbl->walk_head);
|
||||
atomic_set(&newtbl->entries, 0);
|
||||
spin_lock_init(&newtbl->gates_lock);
|
||||
spin_lock_init(&newtbl->walk_lock);
|
||||
if (rhashtable_init(&newtbl->rhead, &mesh_rht_params)) {
|
||||
kfree(newtbl);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
return newtbl;
|
||||
/* rhashtable_init() may fail only in case of wrong
|
||||
* mesh_rht_params
|
||||
*/
|
||||
WARN_ON(rhashtable_init(&tbl->rhead, &mesh_rht_params));
|
||||
}
|
||||
|
||||
static void mesh_table_free(struct mesh_table *tbl)
|
||||
{
|
||||
rhashtable_free_and_destroy(&tbl->rhead,
|
||||
mesh_path_rht_free, tbl);
|
||||
kfree(tbl);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -238,13 +230,13 @@ static struct mesh_path *mpath_lookup(struct mesh_table *tbl, const u8 *dst,
|
||||
struct mesh_path *
|
||||
mesh_path_lookup(struct ieee80211_sub_if_data *sdata, const u8 *dst)
|
||||
{
|
||||
return mpath_lookup(sdata->u.mesh.mesh_paths, dst, sdata);
|
||||
return mpath_lookup(&sdata->u.mesh.mesh_paths, dst, sdata);
|
||||
}
|
||||
|
||||
struct mesh_path *
|
||||
mpp_path_lookup(struct ieee80211_sub_if_data *sdata, const u8 *dst)
|
||||
{
|
||||
return mpath_lookup(sdata->u.mesh.mpp_paths, dst, sdata);
|
||||
return mpath_lookup(&sdata->u.mesh.mpp_paths, dst, sdata);
|
||||
}
|
||||
|
||||
static struct mesh_path *
|
||||
@ -281,7 +273,7 @@ __mesh_path_lookup_by_idx(struct mesh_table *tbl, int idx)
|
||||
struct mesh_path *
|
||||
mesh_path_lookup_by_idx(struct ieee80211_sub_if_data *sdata, int idx)
|
||||
{
|
||||
return __mesh_path_lookup_by_idx(sdata->u.mesh.mesh_paths, idx);
|
||||
return __mesh_path_lookup_by_idx(&sdata->u.mesh.mesh_paths, idx);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -296,7 +288,7 @@ mesh_path_lookup_by_idx(struct ieee80211_sub_if_data *sdata, int idx)
|
||||
struct mesh_path *
|
||||
mpp_path_lookup_by_idx(struct ieee80211_sub_if_data *sdata, int idx)
|
||||
{
|
||||
return __mesh_path_lookup_by_idx(sdata->u.mesh.mpp_paths, idx);
|
||||
return __mesh_path_lookup_by_idx(&sdata->u.mesh.mpp_paths, idx);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -309,7 +301,7 @@ int mesh_path_add_gate(struct mesh_path *mpath)
|
||||
int err;
|
||||
|
||||
rcu_read_lock();
|
||||
tbl = mpath->sdata->u.mesh.mesh_paths;
|
||||
tbl = &mpath->sdata->u.mesh.mesh_paths;
|
||||
|
||||
spin_lock_bh(&mpath->state_lock);
|
||||
if (mpath->is_gate) {
|
||||
@ -418,7 +410,7 @@ struct mesh_path *mesh_path_add(struct ieee80211_sub_if_data *sdata,
|
||||
if (!new_mpath)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
tbl = sdata->u.mesh.mesh_paths;
|
||||
tbl = &sdata->u.mesh.mesh_paths;
|
||||
spin_lock_bh(&tbl->walk_lock);
|
||||
mpath = rhashtable_lookup_get_insert_fast(&tbl->rhead,
|
||||
&new_mpath->rhash,
|
||||
@ -460,7 +452,7 @@ int mpp_path_add(struct ieee80211_sub_if_data *sdata,
|
||||
return -ENOMEM;
|
||||
|
||||
memcpy(new_mpath->mpp, mpp, ETH_ALEN);
|
||||
tbl = sdata->u.mesh.mpp_paths;
|
||||
tbl = &sdata->u.mesh.mpp_paths;
|
||||
|
||||
spin_lock_bh(&tbl->walk_lock);
|
||||
ret = rhashtable_lookup_insert_fast(&tbl->rhead,
|
||||
@ -489,7 +481,7 @@ int mpp_path_add(struct ieee80211_sub_if_data *sdata,
|
||||
void mesh_plink_broken(struct sta_info *sta)
|
||||
{
|
||||
struct ieee80211_sub_if_data *sdata = sta->sdata;
|
||||
struct mesh_table *tbl = sdata->u.mesh.mesh_paths;
|
||||
struct mesh_table *tbl = &sdata->u.mesh.mesh_paths;
|
||||
static const u8 bcast[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
|
||||
struct mesh_path *mpath;
|
||||
|
||||
@ -548,7 +540,7 @@ static void __mesh_path_del(struct mesh_table *tbl, struct mesh_path *mpath)
|
||||
void mesh_path_flush_by_nexthop(struct sta_info *sta)
|
||||
{
|
||||
struct ieee80211_sub_if_data *sdata = sta->sdata;
|
||||
struct mesh_table *tbl = sdata->u.mesh.mesh_paths;
|
||||
struct mesh_table *tbl = &sdata->u.mesh.mesh_paths;
|
||||
struct mesh_path *mpath;
|
||||
struct hlist_node *n;
|
||||
|
||||
@ -563,7 +555,7 @@ void mesh_path_flush_by_nexthop(struct sta_info *sta)
|
||||
static void mpp_flush_by_proxy(struct ieee80211_sub_if_data *sdata,
|
||||
const u8 *proxy)
|
||||
{
|
||||
struct mesh_table *tbl = sdata->u.mesh.mpp_paths;
|
||||
struct mesh_table *tbl = &sdata->u.mesh.mpp_paths;
|
||||
struct mesh_path *mpath;
|
||||
struct hlist_node *n;
|
||||
|
||||
@ -597,8 +589,8 @@ static void table_flush_by_iface(struct mesh_table *tbl)
|
||||
*/
|
||||
void mesh_path_flush_by_iface(struct ieee80211_sub_if_data *sdata)
|
||||
{
|
||||
table_flush_by_iface(sdata->u.mesh.mesh_paths);
|
||||
table_flush_by_iface(sdata->u.mesh.mpp_paths);
|
||||
table_flush_by_iface(&sdata->u.mesh.mesh_paths);
|
||||
table_flush_by_iface(&sdata->u.mesh.mpp_paths);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -644,7 +636,7 @@ int mesh_path_del(struct ieee80211_sub_if_data *sdata, const u8 *addr)
|
||||
/* flush relevant mpp entries first */
|
||||
mpp_flush_by_proxy(sdata, addr);
|
||||
|
||||
err = table_path_del(sdata->u.mesh.mesh_paths, sdata, addr);
|
||||
err = table_path_del(&sdata->u.mesh.mesh_paths, sdata, addr);
|
||||
sdata->u.mesh.mesh_paths_generation++;
|
||||
return err;
|
||||
}
|
||||
@ -682,7 +674,7 @@ int mesh_path_send_to_gates(struct mesh_path *mpath)
|
||||
struct mesh_path *gate;
|
||||
bool copy = false;
|
||||
|
||||
tbl = sdata->u.mesh.mesh_paths;
|
||||
tbl = &sdata->u.mesh.mesh_paths;
|
||||
|
||||
rcu_read_lock();
|
||||
hlist_for_each_entry_rcu(gate, &tbl->known_gates, gate_list) {
|
||||
@ -762,29 +754,10 @@ void mesh_path_fix_nexthop(struct mesh_path *mpath, struct sta_info *next_hop)
|
||||
mesh_path_tx_pending(mpath);
|
||||
}
|
||||
|
||||
int mesh_pathtbl_init(struct ieee80211_sub_if_data *sdata)
|
||||
void mesh_pathtbl_init(struct ieee80211_sub_if_data *sdata)
|
||||
{
|
||||
struct mesh_table *tbl_path, *tbl_mpp;
|
||||
int ret;
|
||||
|
||||
tbl_path = mesh_table_alloc();
|
||||
if (!tbl_path)
|
||||
return -ENOMEM;
|
||||
|
||||
tbl_mpp = mesh_table_alloc();
|
||||
if (!tbl_mpp) {
|
||||
ret = -ENOMEM;
|
||||
goto free_path;
|
||||
}
|
||||
|
||||
sdata->u.mesh.mesh_paths = tbl_path;
|
||||
sdata->u.mesh.mpp_paths = tbl_mpp;
|
||||
|
||||
return 0;
|
||||
|
||||
free_path:
|
||||
mesh_table_free(tbl_path);
|
||||
return ret;
|
||||
mesh_table_init(&sdata->u.mesh.mesh_paths);
|
||||
mesh_table_init(&sdata->u.mesh.mpp_paths);
|
||||
}
|
||||
|
||||
static
|
||||
@ -806,12 +779,12 @@ void mesh_path_tbl_expire(struct ieee80211_sub_if_data *sdata,
|
||||
|
||||
void mesh_path_expire(struct ieee80211_sub_if_data *sdata)
|
||||
{
|
||||
mesh_path_tbl_expire(sdata, sdata->u.mesh.mesh_paths);
|
||||
mesh_path_tbl_expire(sdata, sdata->u.mesh.mpp_paths);
|
||||
mesh_path_tbl_expire(sdata, &sdata->u.mesh.mesh_paths);
|
||||
mesh_path_tbl_expire(sdata, &sdata->u.mesh.mpp_paths);
|
||||
}
|
||||
|
||||
void mesh_pathtbl_unregister(struct ieee80211_sub_if_data *sdata)
|
||||
{
|
||||
mesh_table_free(sdata->u.mesh.mesh_paths);
|
||||
mesh_table_free(sdata->u.mesh.mpp_paths);
|
||||
mesh_table_free(&sdata->u.mesh.mesh_paths);
|
||||
mesh_table_free(&sdata->u.mesh.mpp_paths);
|
||||
}
|
||||
|
@ -5265,7 +5265,7 @@ static int ieee80211_prep_connection(struct ieee80211_sub_if_data *sdata,
|
||||
*/
|
||||
if (new_sta) {
|
||||
u32 rates = 0, basic_rates = 0;
|
||||
bool have_higher_than_11mbit;
|
||||
bool have_higher_than_11mbit = false;
|
||||
int min_rate = INT_MAX, min_rate_index = -1;
|
||||
const struct cfg80211_bss_ies *ies;
|
||||
int shift = ieee80211_vif_get_shift(&sdata->vif);
|
||||
|
@ -85,8 +85,8 @@ void mctp_neigh_remove_dev(struct mctp_dev *mdev)
|
||||
mutex_unlock(&net->mctp.neigh_lock);
|
||||
}
|
||||
|
||||
// TODO: add a "source" flag so netlink can only delete static neighbours?
|
||||
static int mctp_neigh_remove(struct mctp_dev *mdev, mctp_eid_t eid)
|
||||
static int mctp_neigh_remove(struct mctp_dev *mdev, mctp_eid_t eid,
|
||||
enum mctp_neigh_source source)
|
||||
{
|
||||
struct net *net = dev_net(mdev->dev);
|
||||
struct mctp_neigh *neigh, *tmp;
|
||||
@ -94,7 +94,8 @@ static int mctp_neigh_remove(struct mctp_dev *mdev, mctp_eid_t eid)
|
||||
|
||||
mutex_lock(&net->mctp.neigh_lock);
|
||||
list_for_each_entry_safe(neigh, tmp, &net->mctp.neighbours, list) {
|
||||
if (neigh->dev == mdev && neigh->eid == eid) {
|
||||
if (neigh->dev == mdev && neigh->eid == eid &&
|
||||
neigh->source == source) {
|
||||
list_del_rcu(&neigh->list);
|
||||
/* TODO: immediate RTM_DELNEIGH */
|
||||
call_rcu(&neigh->rcu, __mctp_neigh_free);
|
||||
@ -202,7 +203,7 @@ static int mctp_rtm_delneigh(struct sk_buff *skb, struct nlmsghdr *nlh,
|
||||
if (!mdev)
|
||||
return -ENODEV;
|
||||
|
||||
return mctp_neigh_remove(mdev, eid);
|
||||
return mctp_neigh_remove(mdev, eid, MCTP_NEIGH_STATIC);
|
||||
}
|
||||
|
||||
static int mctp_fill_neigh(struct sk_buff *skb, u32 portid, u32 seq, int event,
|
||||
|
@ -306,7 +306,7 @@ static int nr_setsockopt(struct socket *sock, int level, int optname,
|
||||
if (optlen < sizeof(unsigned int))
|
||||
return -EINVAL;
|
||||
|
||||
if (copy_from_sockptr(&opt, optval, sizeof(unsigned int)))
|
||||
if (copy_from_sockptr(&opt, optval, sizeof(unsigned long)))
|
||||
return -EFAULT;
|
||||
|
||||
switch (optname) {
|
||||
|
@ -1421,10 +1421,8 @@ static int qfq_init_qdisc(struct Qdisc *sch, struct nlattr *opt,
|
||||
if (err < 0)
|
||||
return err;
|
||||
|
||||
if (qdisc_dev(sch)->tx_queue_len + 1 > QFQ_MAX_AGG_CLASSES)
|
||||
max_classes = QFQ_MAX_AGG_CLASSES;
|
||||
else
|
||||
max_classes = qdisc_dev(sch)->tx_queue_len + 1;
|
||||
max_classes = min_t(u64, (u64)qdisc_dev(sch)->tx_queue_len + 1,
|
||||
QFQ_MAX_AGG_CLASSES);
|
||||
/* max_cl_shift = floor(log_2(max_classes)) */
|
||||
max_cl_shift = __fls(max_classes);
|
||||
q->max_agg_classes = 1<<max_cl_shift;
|
||||
|
@ -245,48 +245,44 @@ static size_t inet_assoc_attr_size(struct sctp_association *asoc)
|
||||
+ 64;
|
||||
}
|
||||
|
||||
static int sctp_tsp_dump_one(struct sctp_transport *tsp, void *p)
|
||||
static int sctp_sock_dump_one(struct sctp_endpoint *ep, struct sctp_transport *tsp, void *p)
|
||||
{
|
||||
struct sctp_association *assoc = tsp->asoc;
|
||||
struct sock *sk = tsp->asoc->base.sk;
|
||||
struct sctp_comm_param *commp = p;
|
||||
struct sk_buff *in_skb = commp->skb;
|
||||
struct sock *sk = ep->base.sk;
|
||||
const struct inet_diag_req_v2 *req = commp->r;
|
||||
const struct nlmsghdr *nlh = commp->nlh;
|
||||
struct net *net = sock_net(in_skb->sk);
|
||||
struct sk_buff *skb = commp->skb;
|
||||
struct sk_buff *rep;
|
||||
int err;
|
||||
|
||||
err = sock_diag_check_cookie(sk, req->id.idiag_cookie);
|
||||
if (err)
|
||||
goto out;
|
||||
return err;
|
||||
|
||||
err = -ENOMEM;
|
||||
rep = nlmsg_new(inet_assoc_attr_size(assoc), GFP_KERNEL);
|
||||
if (!rep)
|
||||
goto out;
|
||||
return -ENOMEM;
|
||||
|
||||
lock_sock(sk);
|
||||
if (sk != assoc->base.sk) {
|
||||
release_sock(sk);
|
||||
sk = assoc->base.sk;
|
||||
lock_sock(sk);
|
||||
}
|
||||
err = inet_sctp_diag_fill(sk, assoc, rep, req,
|
||||
sk_user_ns(NETLINK_CB(in_skb).sk),
|
||||
NETLINK_CB(in_skb).portid,
|
||||
nlh->nlmsg_seq, 0, nlh,
|
||||
commp->net_admin);
|
||||
release_sock(sk);
|
||||
if (err < 0) {
|
||||
WARN_ON(err == -EMSGSIZE);
|
||||
kfree_skb(rep);
|
||||
if (ep != assoc->ep) {
|
||||
err = -EAGAIN;
|
||||
goto out;
|
||||
}
|
||||
|
||||
err = nlmsg_unicast(net->diag_nlsk, rep, NETLINK_CB(in_skb).portid);
|
||||
err = inet_sctp_diag_fill(sk, assoc, rep, req, sk_user_ns(NETLINK_CB(skb).sk),
|
||||
NETLINK_CB(skb).portid, commp->nlh->nlmsg_seq, 0,
|
||||
commp->nlh, commp->net_admin);
|
||||
if (err < 0) {
|
||||
WARN_ON(err == -EMSGSIZE);
|
||||
goto out;
|
||||
}
|
||||
release_sock(sk);
|
||||
|
||||
return nlmsg_unicast(sock_net(skb->sk)->diag_nlsk, rep, NETLINK_CB(skb).portid);
|
||||
|
||||
out:
|
||||
release_sock(sk);
|
||||
kfree_skb(rep);
|
||||
return err;
|
||||
}
|
||||
|
||||
@ -429,15 +425,15 @@ static void sctp_diag_get_info(struct sock *sk, struct inet_diag_msg *r,
|
||||
static int sctp_diag_dump_one(struct netlink_callback *cb,
|
||||
const struct inet_diag_req_v2 *req)
|
||||
{
|
||||
struct sk_buff *in_skb = cb->skb;
|
||||
struct net *net = sock_net(in_skb->sk);
|
||||
struct sk_buff *skb = cb->skb;
|
||||
struct net *net = sock_net(skb->sk);
|
||||
const struct nlmsghdr *nlh = cb->nlh;
|
||||
union sctp_addr laddr, paddr;
|
||||
struct sctp_comm_param commp = {
|
||||
.skb = in_skb,
|
||||
.skb = skb,
|
||||
.r = req,
|
||||
.nlh = nlh,
|
||||
.net_admin = netlink_net_capable(in_skb, CAP_NET_ADMIN),
|
||||
.net_admin = netlink_net_capable(skb, CAP_NET_ADMIN),
|
||||
};
|
||||
|
||||
if (req->sdiag_family == AF_INET) {
|
||||
@ -460,7 +456,7 @@ static int sctp_diag_dump_one(struct netlink_callback *cb,
|
||||
paddr.v6.sin6_family = AF_INET6;
|
||||
}
|
||||
|
||||
return sctp_transport_lookup_process(sctp_tsp_dump_one,
|
||||
return sctp_transport_lookup_process(sctp_sock_dump_one,
|
||||
net, &laddr, &paddr, &commp);
|
||||
}
|
||||
|
||||
|
@ -5317,23 +5317,31 @@ int sctp_for_each_endpoint(int (*cb)(struct sctp_endpoint *, void *),
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(sctp_for_each_endpoint);
|
||||
|
||||
int sctp_transport_lookup_process(int (*cb)(struct sctp_transport *, void *),
|
||||
struct net *net,
|
||||
int sctp_transport_lookup_process(sctp_callback_t cb, struct net *net,
|
||||
const union sctp_addr *laddr,
|
||||
const union sctp_addr *paddr, void *p)
|
||||
{
|
||||
struct sctp_transport *transport;
|
||||
int err;
|
||||
struct sctp_endpoint *ep;
|
||||
int err = -ENOENT;
|
||||
|
||||
rcu_read_lock();
|
||||
transport = sctp_addrs_lookup_transport(net, laddr, paddr);
|
||||
if (!transport) {
|
||||
rcu_read_unlock();
|
||||
return err;
|
||||
}
|
||||
ep = transport->asoc->ep;
|
||||
if (!sctp_endpoint_hold(ep)) { /* asoc can be peeled off */
|
||||
sctp_transport_put(transport);
|
||||
rcu_read_unlock();
|
||||
return err;
|
||||
}
|
||||
rcu_read_unlock();
|
||||
if (!transport)
|
||||
return -ENOENT;
|
||||
|
||||
err = cb(transport, p);
|
||||
err = cb(ep, transport, p);
|
||||
sctp_endpoint_put(ep);
|
||||
sctp_transport_put(transport);
|
||||
|
||||
return err;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(sctp_transport_lookup_process);
|
||||
|
@ -1461,6 +1461,8 @@ static int __tipc_sendmsg(struct socket *sock, struct msghdr *m, size_t dlen)
|
||||
msg_set_syn(hdr, 1);
|
||||
}
|
||||
|
||||
memset(&skaddr, 0, sizeof(skaddr));
|
||||
|
||||
/* Determine destination */
|
||||
if (atype == TIPC_SERVICE_RANGE) {
|
||||
return tipc_sendmcast(sock, ua, m, dlen, timeout);
|
||||
|
@ -677,6 +677,8 @@ static __poll_t xsk_poll(struct file *file, struct socket *sock,
|
||||
struct xdp_sock *xs = xdp_sk(sk);
|
||||
struct xsk_buff_pool *pool;
|
||||
|
||||
sock_poll_wait(file, sock, wait);
|
||||
|
||||
if (unlikely(!xsk_is_bound(xs)))
|
||||
return mask;
|
||||
|
||||
@ -688,8 +690,6 @@ static __poll_t xsk_poll(struct file *file, struct socket *sock,
|
||||
else
|
||||
/* Poll needs to drive Tx also in copy mode */
|
||||
__xsk_sendmsg(sk);
|
||||
} else {
|
||||
sock_poll_wait(file, sock, wait);
|
||||
}
|
||||
|
||||
if (xs->rx && !xskq_prod_is_empty(xs->rx))
|
||||
|
@ -1078,7 +1078,7 @@
|
||||
.errstr_unpriv = "R0 pointer -= pointer prohibited",
|
||||
},
|
||||
{
|
||||
"map access: trying to leak tained dst reg",
|
||||
"map access: trying to leak tainted dst reg",
|
||||
.insns = {
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
|
||||
|
0
tools/testing/selftests/net/amt.sh
Normal file → Executable file
0
tools/testing/selftests/net/amt.sh
Normal file → Executable file
@ -193,7 +193,8 @@ for family in 4 6; do
|
||||
SUFFIX="64 nodad"
|
||||
VXDEV=vxlan6
|
||||
IPT=ip6tables
|
||||
PING="ping6"
|
||||
# Use ping6 on systems where ping doesn't handle IPv6
|
||||
ping -w 1 -c 1 ::1 > /dev/null 2>&1 || PING="ping6"
|
||||
fi
|
||||
|
||||
echo "IPv$family"
|
||||
|
Loading…
Reference in New Issue
Block a user