forked from Minki/linux
Networking fixes for 5.13-rc7, including fixes from wireless, bpf,
bluetooth, netfilter and can. Current release - regressions: - mlxsw: spectrum_qdisc: Pass handle, not band number to find_class() to fix modifying offloaded qdiscs - lantiq: net: fix duplicated skb in rx descriptor ring - rtnetlink: fix regression in bridge VLAN configuration, empty info is not an error, bot-generated "fix" was not needed - libbpf: s/rx/tx/ typo on umem->rx_ring_setup_done to fix umem creation Current release - new code bugs: - ethtool: fix NULL pointer dereference during module EEPROM dump via the new netlink API - mlx5e: don't update netdev RQs with PTP-RQ, the special purpose queue should not be visible to the stack - mlx5e: select special PTP queue only for SKBTX_HW_TSTAMP skbs - mlx5e: verify dev is present in get devlink port ndo, avoid a panic Previous releases - regressions: - neighbour: allow NUD_NOARP entries to be force GCed - further fixes for fallout from reorg of WiFi locking (staging: rtl8723bs, mac80211, cfg80211) - skbuff: fix incorrect msg_zerocopy copy notifications - mac80211: fix NULL ptr deref for injected rate info - Revert "net/mlx5: Arm only EQs with EQEs" it may cause missed IRQs Previous releases - always broken: - bpf: more speculative execution fixes - netfilter: nft_fib_ipv6: skip ipv6 packets from any to link-local - udp: fix race between close() and udp_abort() resulting in a panic - fix out of bounds when parsing TCP options before packets are validated (in netfilter: synproxy, tc: sch_cake and mptcp) - mptcp: improve operation under memory pressure, add missing wake-ups - mptcp: fix double-lock/soft lookup in subflow_error_report() - bridge: fix races (null pointer deref and UAF) in vlan tunnel egress - ena: fix DMA mapping function issues in XDP - rds: fix memory leak in rds_recvmsg Misc: - vrf: allow larger MTUs - icmp: don't send out ICMP messages with a source address of 0.0.0.0 - cdc_ncm: switch to eth%d interface naming Signed-off-by: Jakub Kicinski <kuba@kernel.org> -----BEGIN PGP SIGNATURE----- iQIzBAABCAAdFiEE6jPA+I1ugmIBA4hXMUZtbf5SIrsFAmDNP7EACgkQMUZtbf5S IrvTmxAAgOAM9MdRl9wnYtqXKPXJ1JJtenozwt1yX6b6OG+Ns7cm6YYafU3KoZWR KlzpvP90vRrER3RqksbMngHzvGjZKDS4LWRur7sRlJ1TBQoLrQCIbriAh07d7wlU 0nnS4J8mczTCKx78QCUYy1QBIX5TQrUbx0JQZDPoIPBjFeILW+Gx/Ghg5tUR4mhf 6icYqwIPocTXO37ZmWOzezZNVOXJF4kaQUZeuOHNe5hOtm6EeIpZbW1Xx3DIr5bd 80a/uNU7nVyos0n7jxnfVE/oelTnYbT5scZeV/PPVqZ4U113f7uex2QP23/XhGSX lK1EhwPqPOyaNhQoihLM6Xzd4o7aZOcmF8NY96xqjC+DqdN+juvfJU+ClCZojGIj H4bwCSaj3y2PiimfQdBiIKvYMc5d4zBdw/Dpk/gLDp4d5N638TAtuunK4Mj+TEuT QF1qkBLIB4HFtLS0M35/twk93md/5GUdSTij2GB3fOkAWRu2m266P5m+4DigW/TB Xm8FgKdetvxVP0Qv/p49nPEn24Ny8wCafH1x1wVTmoda2qi6j1EXMuSa0PlCdz70 Sl5FrlxdEkOpC4p+Aoc8APSoBXnOriAlpU+z/EVb8Co4JR/+Ge5zBWpsiZDVD0/K Ay0FW3I87iyn9tw1H1Fzr9GBlVl5vWRauZFHjzl90fWakCrCzJE= =xxUe -----END PGP SIGNATURE----- Merge tag 'net-5.13-rc7' of git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net Pull networking fixes from Jakub Kicinski: "Networking fixes for 5.13-rc7, including fixes from wireless, bpf, bluetooth, netfilter and can. Current release - regressions: - mlxsw: spectrum_qdisc: Pass handle, not band number to find_class() to fix modifying offloaded qdiscs - lantiq: net: fix duplicated skb in rx descriptor ring - rtnetlink: fix regression in bridge VLAN configuration, empty info is not an error, bot-generated "fix" was not needed - libbpf: s/rx/tx/ typo on umem->rx_ring_setup_done to fix umem creation Current release - new code bugs: - ethtool: fix NULL pointer dereference during module EEPROM dump via the new netlink API - mlx5e: don't update netdev RQs with PTP-RQ, the special purpose queue should not be visible to the stack - mlx5e: select special PTP queue only for SKBTX_HW_TSTAMP skbs - mlx5e: verify dev is present in get devlink port ndo, avoid a panic Previous releases - regressions: - neighbour: allow NUD_NOARP entries to be force GCed - further fixes for fallout from reorg of WiFi locking (staging: rtl8723bs, mac80211, cfg80211) - skbuff: fix incorrect msg_zerocopy copy notifications - mac80211: fix NULL ptr deref for injected rate info - Revert "net/mlx5: Arm only EQs with EQEs" it may cause missed IRQs Previous releases - always broken: - bpf: more speculative execution fixes - netfilter: nft_fib_ipv6: skip ipv6 packets from any to link-local - udp: fix race between close() and udp_abort() resulting in a panic - fix out of bounds when parsing TCP options before packets are validated (in netfilter: synproxy, tc: sch_cake and mptcp) - mptcp: improve operation under memory pressure, add missing wake-ups - mptcp: fix double-lock/soft lookup in subflow_error_report() - bridge: fix races (null pointer deref and UAF) in vlan tunnel egress - ena: fix DMA mapping function issues in XDP - rds: fix memory leak in rds_recvmsg Misc: - vrf: allow larger MTUs - icmp: don't send out ICMP messages with a source address of 0.0.0.0 - cdc_ncm: switch to eth%d interface naming" * tag 'net-5.13-rc7' of git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net: (139 commits) net: ethernet: fix potential use-after-free in ec_bhf_remove selftests/net: Add icmp.sh for testing ICMP dummy address responses icmp: don't send out ICMP messages with a source address of 0.0.0.0 net: ll_temac: Avoid ndo_start_xmit returning NETDEV_TX_BUSY net: ll_temac: Fix TX BD buffer overwrite net: ll_temac: Add memory-barriers for TX BD access net: ll_temac: Make sure to free skb when it is completely used MAINTAINERS: add Guvenc as SMC maintainer bnxt_en: Call bnxt_ethtool_free() in bnxt_init_one() error path bnxt_en: Fix TQM fastpath ring backing store computation bnxt_en: Rediscover PHY capabilities after firmware reset cxgb4: fix wrong shift. mac80211: handle various extensible elements correctly mac80211: reset profile_periodicity/ema_ap cfg80211: avoid double free of PMSR request cfg80211: make certificate generation more robust mac80211: minstrel_ht: fix sample time check net: qed: Fix memcpy() overflow of qed_dcbx_params() net: cdc_eem: fix tx fixup skb leak net: hamradio: fix memory leak in mkiss_close ...
This commit is contained in:
commit
9ed13a17e3
@ -16560,6 +16560,7 @@ F: drivers/misc/sgi-xp/
|
||||
|
||||
SHARED MEMORY COMMUNICATIONS (SMC) SOCKETS
|
||||
M: Karsten Graul <kgraul@linux.ibm.com>
|
||||
M: Guvenc Gulce <guvenc@linux.ibm.com>
|
||||
L: linux-s390@vger.kernel.org
|
||||
S: Supported
|
||||
W: http://www.ibm.com/developerworks/linux/linux390/
|
||||
|
@ -350,6 +350,7 @@ static int ldisc_open(struct tty_struct *tty)
|
||||
rtnl_lock();
|
||||
result = register_netdevice(dev);
|
||||
if (result) {
|
||||
tty_kref_put(tty);
|
||||
rtnl_unlock();
|
||||
free_netdev(dev);
|
||||
return -ENODEV;
|
||||
|
@ -82,6 +82,8 @@ struct mcba_priv {
|
||||
bool can_ka_first_pass;
|
||||
bool can_speed_check;
|
||||
atomic_t free_ctx_cnt;
|
||||
void *rxbuf[MCBA_MAX_RX_URBS];
|
||||
dma_addr_t rxbuf_dma[MCBA_MAX_RX_URBS];
|
||||
};
|
||||
|
||||
/* CAN frame */
|
||||
@ -633,6 +635,7 @@ static int mcba_usb_start(struct mcba_priv *priv)
|
||||
for (i = 0; i < MCBA_MAX_RX_URBS; i++) {
|
||||
struct urb *urb = NULL;
|
||||
u8 *buf;
|
||||
dma_addr_t buf_dma;
|
||||
|
||||
/* create a URB, and a buffer for it */
|
||||
urb = usb_alloc_urb(0, GFP_KERNEL);
|
||||
@ -642,7 +645,7 @@ static int mcba_usb_start(struct mcba_priv *priv)
|
||||
}
|
||||
|
||||
buf = usb_alloc_coherent(priv->udev, MCBA_USB_RX_BUFF_SIZE,
|
||||
GFP_KERNEL, &urb->transfer_dma);
|
||||
GFP_KERNEL, &buf_dma);
|
||||
if (!buf) {
|
||||
netdev_err(netdev, "No memory left for USB buffer\n");
|
||||
usb_free_urb(urb);
|
||||
@ -661,11 +664,14 @@ static int mcba_usb_start(struct mcba_priv *priv)
|
||||
if (err) {
|
||||
usb_unanchor_urb(urb);
|
||||
usb_free_coherent(priv->udev, MCBA_USB_RX_BUFF_SIZE,
|
||||
buf, urb->transfer_dma);
|
||||
buf, buf_dma);
|
||||
usb_free_urb(urb);
|
||||
break;
|
||||
}
|
||||
|
||||
priv->rxbuf[i] = buf;
|
||||
priv->rxbuf_dma[i] = buf_dma;
|
||||
|
||||
/* Drop reference, USB core will take care of freeing it */
|
||||
usb_free_urb(urb);
|
||||
}
|
||||
@ -708,7 +714,14 @@ static int mcba_usb_open(struct net_device *netdev)
|
||||
|
||||
static void mcba_urb_unlink(struct mcba_priv *priv)
|
||||
{
|
||||
int i;
|
||||
|
||||
usb_kill_anchored_urbs(&priv->rx_submitted);
|
||||
|
||||
for (i = 0; i < MCBA_MAX_RX_URBS; ++i)
|
||||
usb_free_coherent(priv->udev, MCBA_USB_RX_BUFF_SIZE,
|
||||
priv->rxbuf[i], priv->rxbuf_dma[i]);
|
||||
|
||||
usb_kill_anchored_urbs(&priv->tx_submitted);
|
||||
}
|
||||
|
||||
|
@ -236,36 +236,48 @@ static int ena_xdp_io_poll(struct napi_struct *napi, int budget)
|
||||
static int ena_xdp_tx_map_frame(struct ena_ring *xdp_ring,
|
||||
struct ena_tx_buffer *tx_info,
|
||||
struct xdp_frame *xdpf,
|
||||
void **push_hdr,
|
||||
u32 *push_len)
|
||||
struct ena_com_tx_ctx *ena_tx_ctx)
|
||||
{
|
||||
struct ena_adapter *adapter = xdp_ring->adapter;
|
||||
struct ena_com_buf *ena_buf;
|
||||
dma_addr_t dma = 0;
|
||||
int push_len = 0;
|
||||
dma_addr_t dma;
|
||||
void *data;
|
||||
u32 size;
|
||||
|
||||
tx_info->xdpf = xdpf;
|
||||
data = tx_info->xdpf->data;
|
||||
size = tx_info->xdpf->len;
|
||||
ena_buf = tx_info->bufs;
|
||||
|
||||
/* llq push buffer */
|
||||
*push_len = min_t(u32, size, xdp_ring->tx_max_header_size);
|
||||
*push_hdr = tx_info->xdpf->data;
|
||||
if (xdp_ring->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) {
|
||||
/* Designate part of the packet for LLQ */
|
||||
push_len = min_t(u32, size, xdp_ring->tx_max_header_size);
|
||||
|
||||
if (size - *push_len > 0) {
|
||||
ena_tx_ctx->push_header = data;
|
||||
|
||||
size -= push_len;
|
||||
data += push_len;
|
||||
}
|
||||
|
||||
ena_tx_ctx->header_len = push_len;
|
||||
|
||||
if (size > 0) {
|
||||
dma = dma_map_single(xdp_ring->dev,
|
||||
*push_hdr + *push_len,
|
||||
size - *push_len,
|
||||
data,
|
||||
size,
|
||||
DMA_TO_DEVICE);
|
||||
if (unlikely(dma_mapping_error(xdp_ring->dev, dma)))
|
||||
goto error_report_dma_error;
|
||||
|
||||
tx_info->map_linear_data = 1;
|
||||
tx_info->num_of_bufs = 1;
|
||||
}
|
||||
tx_info->map_linear_data = 0;
|
||||
|
||||
ena_buf->paddr = dma;
|
||||
ena_buf->len = size;
|
||||
ena_buf = tx_info->bufs;
|
||||
ena_buf->paddr = dma;
|
||||
ena_buf->len = size;
|
||||
|
||||
ena_tx_ctx->ena_bufs = ena_buf;
|
||||
ena_tx_ctx->num_bufs = tx_info->num_of_bufs = 1;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
@ -274,10 +286,6 @@ error_report_dma_error:
|
||||
&xdp_ring->syncp);
|
||||
netif_warn(adapter, tx_queued, adapter->netdev, "Failed to map xdp buff\n");
|
||||
|
||||
xdp_return_frame_rx_napi(tx_info->xdpf);
|
||||
tx_info->xdpf = NULL;
|
||||
tx_info->num_of_bufs = 0;
|
||||
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
@ -289,8 +297,6 @@ static int ena_xdp_xmit_frame(struct ena_ring *xdp_ring,
|
||||
struct ena_com_tx_ctx ena_tx_ctx = {};
|
||||
struct ena_tx_buffer *tx_info;
|
||||
u16 next_to_use, req_id;
|
||||
void *push_hdr;
|
||||
u32 push_len;
|
||||
int rc;
|
||||
|
||||
next_to_use = xdp_ring->next_to_use;
|
||||
@ -298,15 +304,11 @@ static int ena_xdp_xmit_frame(struct ena_ring *xdp_ring,
|
||||
tx_info = &xdp_ring->tx_buffer_info[req_id];
|
||||
tx_info->num_of_bufs = 0;
|
||||
|
||||
rc = ena_xdp_tx_map_frame(xdp_ring, tx_info, xdpf, &push_hdr, &push_len);
|
||||
rc = ena_xdp_tx_map_frame(xdp_ring, tx_info, xdpf, &ena_tx_ctx);
|
||||
if (unlikely(rc))
|
||||
return rc;
|
||||
|
||||
ena_tx_ctx.ena_bufs = tx_info->bufs;
|
||||
ena_tx_ctx.push_header = push_hdr;
|
||||
ena_tx_ctx.num_bufs = tx_info->num_of_bufs;
|
||||
ena_tx_ctx.req_id = req_id;
|
||||
ena_tx_ctx.header_len = push_len;
|
||||
|
||||
rc = ena_xmit_common(dev,
|
||||
xdp_ring,
|
||||
|
@ -1849,6 +1849,7 @@ out_free_netdev:
|
||||
free_netdev(netdev);
|
||||
out_pci_release:
|
||||
pci_release_mem_regions(pdev);
|
||||
pci_disable_pcie_error_reporting(pdev);
|
||||
out_pci_disable:
|
||||
pci_disable_device(pdev);
|
||||
return err;
|
||||
|
@ -7308,7 +7308,7 @@ skip_rdma:
|
||||
entries_sp = ctx->vnic_max_vnic_entries + ctx->qp_max_l2_entries +
|
||||
2 * (extra_qps + ctx->qp_min_qp1_entries) + min;
|
||||
entries_sp = roundup(entries_sp, ctx->tqm_entries_multiple);
|
||||
entries = ctx->qp_max_l2_entries + extra_qps + ctx->qp_min_qp1_entries;
|
||||
entries = ctx->qp_max_l2_entries + 2 * (extra_qps + ctx->qp_min_qp1_entries);
|
||||
entries = roundup(entries, ctx->tqm_entries_multiple);
|
||||
entries = clamp_t(u32, entries, min, ctx->tqm_max_entries_per_ring);
|
||||
for (i = 0; i < ctx->tqm_fp_rings_count + 1; i++) {
|
||||
@ -11750,6 +11750,8 @@ static void bnxt_fw_init_one_p3(struct bnxt *bp)
|
||||
bnxt_hwrm_coal_params_qcaps(bp);
|
||||
}
|
||||
|
||||
static int bnxt_probe_phy(struct bnxt *bp, bool fw_dflt);
|
||||
|
||||
static int bnxt_fw_init_one(struct bnxt *bp)
|
||||
{
|
||||
int rc;
|
||||
@ -11764,6 +11766,9 @@ static int bnxt_fw_init_one(struct bnxt *bp)
|
||||
netdev_err(bp->dev, "Firmware init phase 2 failed\n");
|
||||
return rc;
|
||||
}
|
||||
rc = bnxt_probe_phy(bp, false);
|
||||
if (rc)
|
||||
return rc;
|
||||
rc = bnxt_approve_mac(bp, bp->dev->dev_addr, false);
|
||||
if (rc)
|
||||
return rc;
|
||||
@ -13155,6 +13160,7 @@ init_err_pci_clean:
|
||||
bnxt_hwrm_func_drv_unrgtr(bp);
|
||||
bnxt_free_hwrm_short_cmd_req(bp);
|
||||
bnxt_free_hwrm_resources(bp);
|
||||
bnxt_ethtool_free(bp);
|
||||
kfree(bp->fw_health);
|
||||
bp->fw_health = NULL;
|
||||
bnxt_cleanup_pci(bp);
|
||||
|
@ -1337,13 +1337,27 @@ static int cxgb4_ethtool_flash_phy(struct net_device *netdev,
|
||||
return ret;
|
||||
}
|
||||
|
||||
spin_lock_bh(&adap->win0_lock);
|
||||
ret = t4_load_phy_fw(adap, MEMWIN_NIC, NULL, data, size);
|
||||
spin_unlock_bh(&adap->win0_lock);
|
||||
if (ret)
|
||||
dev_err(adap->pdev_dev, "Failed to load PHY FW\n");
|
||||
/* We have to RESET the chip/firmware because we need the
|
||||
* chip in uninitialized state for loading new PHY image.
|
||||
* Otherwise, the running firmware will only store the PHY
|
||||
* image in local RAM which will be lost after next reset.
|
||||
*/
|
||||
ret = t4_fw_reset(adap, adap->mbox, PIORSTMODE_F | PIORST_F);
|
||||
if (ret < 0) {
|
||||
dev_err(adap->pdev_dev,
|
||||
"Set FW to RESET for flashing PHY FW failed. ret: %d\n",
|
||||
ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
return ret;
|
||||
ret = t4_load_phy_fw(adap, MEMWIN_NIC, NULL, data, size);
|
||||
if (ret < 0) {
|
||||
dev_err(adap->pdev_dev, "Failed to load PHY FW. ret: %d\n",
|
||||
ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int cxgb4_ethtool_flash_fw(struct net_device *netdev,
|
||||
@ -1610,16 +1624,14 @@ static struct filter_entry *cxgb4_get_filter_entry(struct adapter *adap,
|
||||
u32 ftid)
|
||||
{
|
||||
struct tid_info *t = &adap->tids;
|
||||
struct filter_entry *f;
|
||||
|
||||
if (ftid < t->nhpftids)
|
||||
f = &adap->tids.hpftid_tab[ftid];
|
||||
else if (ftid < t->nftids)
|
||||
f = &adap->tids.ftid_tab[ftid - t->nhpftids];
|
||||
else
|
||||
f = lookup_tid(&adap->tids, ftid);
|
||||
if (ftid >= t->hpftid_base && ftid < t->hpftid_base + t->nhpftids)
|
||||
return &t->hpftid_tab[ftid - t->hpftid_base];
|
||||
|
||||
return f;
|
||||
if (ftid >= t->ftid_base && ftid < t->ftid_base + t->nftids)
|
||||
return &t->ftid_tab[ftid - t->ftid_base];
|
||||
|
||||
return lookup_tid(t, ftid);
|
||||
}
|
||||
|
||||
static void cxgb4_fill_filter_rule(struct ethtool_rx_flow_spec *fs,
|
||||
@ -1826,6 +1838,11 @@ static int cxgb4_ntuple_del_filter(struct net_device *dev,
|
||||
filter_id = filter_info->loc_array[cmd->fs.location];
|
||||
f = cxgb4_get_filter_entry(adapter, filter_id);
|
||||
|
||||
if (f->fs.prio)
|
||||
filter_id -= adapter->tids.hpftid_base;
|
||||
else if (!f->fs.hash)
|
||||
filter_id -= (adapter->tids.ftid_base - adapter->tids.nhpftids);
|
||||
|
||||
ret = cxgb4_flow_rule_destroy(dev, f->fs.tc_prio, &f->fs, filter_id);
|
||||
if (ret)
|
||||
goto err;
|
||||
@ -1885,6 +1902,11 @@ static int cxgb4_ntuple_set_filter(struct net_device *netdev,
|
||||
|
||||
filter_info = &adapter->ethtool_filters->port[pi->port_id];
|
||||
|
||||
if (fs.prio)
|
||||
tid += adapter->tids.hpftid_base;
|
||||
else if (!fs.hash)
|
||||
tid += (adapter->tids.ftid_base - adapter->tids.nhpftids);
|
||||
|
||||
filter_info->loc_array[cmd->fs.location] = tid;
|
||||
set_bit(cmd->fs.location, filter_info->bmap);
|
||||
filter_info->in_use++;
|
||||
|
@ -198,7 +198,7 @@ static void set_nat_params(struct adapter *adap, struct filter_entry *f,
|
||||
WORD_MASK, f->fs.nat_lip[3] |
|
||||
f->fs.nat_lip[2] << 8 |
|
||||
f->fs.nat_lip[1] << 16 |
|
||||
(u64)f->fs.nat_lip[0] << 25, 1);
|
||||
(u64)f->fs.nat_lip[0] << 24, 1);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -4424,10 +4424,8 @@ static int adap_init0_phy(struct adapter *adap)
|
||||
|
||||
/* Load PHY Firmware onto adapter.
|
||||
*/
|
||||
spin_lock_bh(&adap->win0_lock);
|
||||
ret = t4_load_phy_fw(adap, MEMWIN_NIC, phy_info->phy_fw_version,
|
||||
(u8 *)phyf->data, phyf->size);
|
||||
spin_unlock_bh(&adap->win0_lock);
|
||||
if (ret < 0)
|
||||
dev_err(adap->pdev_dev, "PHY Firmware transfer error %d\n",
|
||||
-ret);
|
||||
|
@ -3060,16 +3060,19 @@ int t4_read_flash(struct adapter *adapter, unsigned int addr,
|
||||
* @addr: the start address to write
|
||||
* @n: length of data to write in bytes
|
||||
* @data: the data to write
|
||||
* @byte_oriented: whether to store data as bytes or as words
|
||||
*
|
||||
* Writes up to a page of data (256 bytes) to the serial flash starting
|
||||
* at the given address. All the data must be written to the same page.
|
||||
* If @byte_oriented is set the write data is stored as byte stream
|
||||
* (i.e. matches what on disk), otherwise in big-endian.
|
||||
*/
|
||||
static int t4_write_flash(struct adapter *adapter, unsigned int addr,
|
||||
unsigned int n, const u8 *data)
|
||||
unsigned int n, const u8 *data, bool byte_oriented)
|
||||
{
|
||||
int ret;
|
||||
u32 buf[64];
|
||||
unsigned int i, c, left, val, offset = addr & 0xff;
|
||||
u32 buf[64];
|
||||
int ret;
|
||||
|
||||
if (addr >= adapter->params.sf_size || offset + n > SF_PAGE_SIZE)
|
||||
return -EINVAL;
|
||||
@ -3080,10 +3083,14 @@ static int t4_write_flash(struct adapter *adapter, unsigned int addr,
|
||||
(ret = sf1_write(adapter, 4, 1, 1, val)) != 0)
|
||||
goto unlock;
|
||||
|
||||
for (left = n; left; left -= c) {
|
||||
for (left = n; left; left -= c, data += c) {
|
||||
c = min(left, 4U);
|
||||
for (val = 0, i = 0; i < c; ++i)
|
||||
val = (val << 8) + *data++;
|
||||
for (val = 0, i = 0; i < c; ++i) {
|
||||
if (byte_oriented)
|
||||
val = (val << 8) + data[i];
|
||||
else
|
||||
val = (val << 8) + data[c - i - 1];
|
||||
}
|
||||
|
||||
ret = sf1_write(adapter, c, c != left, 1, val);
|
||||
if (ret)
|
||||
@ -3096,7 +3103,8 @@ static int t4_write_flash(struct adapter *adapter, unsigned int addr,
|
||||
t4_write_reg(adapter, SF_OP_A, 0); /* unlock SF */
|
||||
|
||||
/* Read the page to verify the write succeeded */
|
||||
ret = t4_read_flash(adapter, addr & ~0xff, ARRAY_SIZE(buf), buf, 1);
|
||||
ret = t4_read_flash(adapter, addr & ~0xff, ARRAY_SIZE(buf), buf,
|
||||
byte_oriented);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
@ -3692,7 +3700,7 @@ int t4_load_fw(struct adapter *adap, const u8 *fw_data, unsigned int size)
|
||||
*/
|
||||
memcpy(first_page, fw_data, SF_PAGE_SIZE);
|
||||
((struct fw_hdr *)first_page)->fw_ver = cpu_to_be32(0xffffffff);
|
||||
ret = t4_write_flash(adap, fw_start, SF_PAGE_SIZE, first_page);
|
||||
ret = t4_write_flash(adap, fw_start, SF_PAGE_SIZE, first_page, true);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
@ -3700,14 +3708,14 @@ int t4_load_fw(struct adapter *adap, const u8 *fw_data, unsigned int size)
|
||||
for (size -= SF_PAGE_SIZE; size; size -= SF_PAGE_SIZE) {
|
||||
addr += SF_PAGE_SIZE;
|
||||
fw_data += SF_PAGE_SIZE;
|
||||
ret = t4_write_flash(adap, addr, SF_PAGE_SIZE, fw_data);
|
||||
ret = t4_write_flash(adap, addr, SF_PAGE_SIZE, fw_data, true);
|
||||
if (ret)
|
||||
goto out;
|
||||
}
|
||||
|
||||
ret = t4_write_flash(adap,
|
||||
fw_start + offsetof(struct fw_hdr, fw_ver),
|
||||
sizeof(hdr->fw_ver), (const u8 *)&hdr->fw_ver);
|
||||
ret = t4_write_flash(adap, fw_start + offsetof(struct fw_hdr, fw_ver),
|
||||
sizeof(hdr->fw_ver), (const u8 *)&hdr->fw_ver,
|
||||
true);
|
||||
out:
|
||||
if (ret)
|
||||
dev_err(adap->pdev_dev, "firmware download failed, error %d\n",
|
||||
@ -3812,9 +3820,11 @@ int t4_load_phy_fw(struct adapter *adap, int win,
|
||||
/* Copy the supplied PHY Firmware image to the adapter memory location
|
||||
* allocated by the adapter firmware.
|
||||
*/
|
||||
spin_lock_bh(&adap->win0_lock);
|
||||
ret = t4_memory_rw(adap, win, mtype, maddr,
|
||||
phy_fw_size, (__be32 *)phy_fw_data,
|
||||
T4_MEMORY_WRITE);
|
||||
spin_unlock_bh(&adap->win0_lock);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
@ -10208,7 +10218,7 @@ int t4_load_cfg(struct adapter *adap, const u8 *cfg_data, unsigned int size)
|
||||
n = size - i;
|
||||
else
|
||||
n = SF_PAGE_SIZE;
|
||||
ret = t4_write_flash(adap, addr, n, cfg_data);
|
||||
ret = t4_write_flash(adap, addr, n, cfg_data, true);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
@ -10677,13 +10687,14 @@ int t4_load_boot(struct adapter *adap, u8 *boot_data,
|
||||
for (size -= SF_PAGE_SIZE; size; size -= SF_PAGE_SIZE) {
|
||||
addr += SF_PAGE_SIZE;
|
||||
boot_data += SF_PAGE_SIZE;
|
||||
ret = t4_write_flash(adap, addr, SF_PAGE_SIZE, boot_data);
|
||||
ret = t4_write_flash(adap, addr, SF_PAGE_SIZE, boot_data,
|
||||
false);
|
||||
if (ret)
|
||||
goto out;
|
||||
}
|
||||
|
||||
ret = t4_write_flash(adap, boot_sector, SF_PAGE_SIZE,
|
||||
(const u8 *)header);
|
||||
(const u8 *)header, false);
|
||||
|
||||
out:
|
||||
if (ret)
|
||||
@ -10758,7 +10769,7 @@ int t4_load_bootcfg(struct adapter *adap, const u8 *cfg_data, unsigned int size)
|
||||
for (i = 0; i < size; i += SF_PAGE_SIZE) {
|
||||
n = min_t(u32, size - i, SF_PAGE_SIZE);
|
||||
|
||||
ret = t4_write_flash(adap, addr, n, cfg_data);
|
||||
ret = t4_write_flash(adap, addr, n, cfg_data, false);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
@ -10770,7 +10781,8 @@ int t4_load_bootcfg(struct adapter *adap, const u8 *cfg_data, unsigned int size)
|
||||
for (i = 0; i < npad; i++) {
|
||||
u8 data = 0;
|
||||
|
||||
ret = t4_write_flash(adap, cfg_addr + size + i, 1, &data);
|
||||
ret = t4_write_flash(adap, cfg_addr + size + i, 1, &data,
|
||||
false);
|
||||
if (ret)
|
||||
goto out;
|
||||
}
|
||||
|
@ -576,10 +576,12 @@ static void ec_bhf_remove(struct pci_dev *dev)
|
||||
struct ec_bhf_priv *priv = netdev_priv(net_dev);
|
||||
|
||||
unregister_netdev(net_dev);
|
||||
free_netdev(net_dev);
|
||||
|
||||
pci_iounmap(dev, priv->dma_io);
|
||||
pci_iounmap(dev, priv->io);
|
||||
|
||||
free_netdev(net_dev);
|
||||
|
||||
pci_release_regions(dev);
|
||||
pci_clear_master(dev);
|
||||
pci_disable_device(dev);
|
||||
|
@ -5897,6 +5897,7 @@ drv_cleanup:
|
||||
unmap_bars:
|
||||
be_unmap_pci_bars(adapter);
|
||||
free_netdev:
|
||||
pci_disable_pcie_error_reporting(pdev);
|
||||
free_netdev(netdev);
|
||||
rel_reg:
|
||||
pci_release_regions(pdev);
|
||||
|
@ -215,15 +215,13 @@ static u64 fec_ptp_read(const struct cyclecounter *cc)
|
||||
{
|
||||
struct fec_enet_private *fep =
|
||||
container_of(cc, struct fec_enet_private, cc);
|
||||
const struct platform_device_id *id_entry =
|
||||
platform_get_device_id(fep->pdev);
|
||||
u32 tempval;
|
||||
|
||||
tempval = readl(fep->hwp + FEC_ATIME_CTRL);
|
||||
tempval |= FEC_T_CTRL_CAPTURE;
|
||||
writel(tempval, fep->hwp + FEC_ATIME_CTRL);
|
||||
|
||||
if (id_entry->driver_data & FEC_QUIRK_BUG_CAPTURE)
|
||||
if (fep->quirks & FEC_QUIRK_BUG_CAPTURE)
|
||||
udelay(1);
|
||||
|
||||
return readl(fep->hwp + FEC_ATIME);
|
||||
@ -604,6 +602,10 @@ void fec_ptp_init(struct platform_device *pdev, int irq_idx)
|
||||
fep->ptp_caps.enable = fec_ptp_enable;
|
||||
|
||||
fep->cycle_speed = clk_get_rate(fep->clk_ptp);
|
||||
if (!fep->cycle_speed) {
|
||||
fep->cycle_speed = NSEC_PER_SEC;
|
||||
dev_err(&fep->pdev->dev, "clk_ptp clock rate is zero\n");
|
||||
}
|
||||
fep->ptp_inc = NSEC_PER_SEC / fep->cycle_speed;
|
||||
|
||||
spin_lock_init(&fep->tmreg_lock);
|
||||
|
@ -1717,12 +1717,13 @@ setup_rings:
|
||||
* ice_vsi_cfg_txqs - Configure the VSI for Tx
|
||||
* @vsi: the VSI being configured
|
||||
* @rings: Tx ring array to be configured
|
||||
* @count: number of Tx ring array elements
|
||||
*
|
||||
* Return 0 on success and a negative value on error
|
||||
* Configure the Tx VSI for operation.
|
||||
*/
|
||||
static int
|
||||
ice_vsi_cfg_txqs(struct ice_vsi *vsi, struct ice_ring **rings)
|
||||
ice_vsi_cfg_txqs(struct ice_vsi *vsi, struct ice_ring **rings, u16 count)
|
||||
{
|
||||
struct ice_aqc_add_tx_qgrp *qg_buf;
|
||||
u16 q_idx = 0;
|
||||
@ -1734,7 +1735,7 @@ ice_vsi_cfg_txqs(struct ice_vsi *vsi, struct ice_ring **rings)
|
||||
|
||||
qg_buf->num_txqs = 1;
|
||||
|
||||
for (q_idx = 0; q_idx < vsi->num_txq; q_idx++) {
|
||||
for (q_idx = 0; q_idx < count; q_idx++) {
|
||||
err = ice_vsi_cfg_txq(vsi, rings[q_idx], qg_buf);
|
||||
if (err)
|
||||
goto err_cfg_txqs;
|
||||
@ -1754,7 +1755,7 @@ err_cfg_txqs:
|
||||
*/
|
||||
int ice_vsi_cfg_lan_txqs(struct ice_vsi *vsi)
|
||||
{
|
||||
return ice_vsi_cfg_txqs(vsi, vsi->tx_rings);
|
||||
return ice_vsi_cfg_txqs(vsi, vsi->tx_rings, vsi->num_txq);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -1769,7 +1770,7 @@ int ice_vsi_cfg_xdp_txqs(struct ice_vsi *vsi)
|
||||
int ret;
|
||||
int i;
|
||||
|
||||
ret = ice_vsi_cfg_txqs(vsi, vsi->xdp_rings);
|
||||
ret = ice_vsi_cfg_txqs(vsi, vsi->xdp_rings, vsi->num_xdp_txq);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
@ -2009,17 +2010,18 @@ int ice_vsi_stop_all_rx_rings(struct ice_vsi *vsi)
|
||||
* @rst_src: reset source
|
||||
* @rel_vmvf_num: Relative ID of VF/VM
|
||||
* @rings: Tx ring array to be stopped
|
||||
* @count: number of Tx ring array elements
|
||||
*/
|
||||
static int
|
||||
ice_vsi_stop_tx_rings(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src,
|
||||
u16 rel_vmvf_num, struct ice_ring **rings)
|
||||
u16 rel_vmvf_num, struct ice_ring **rings, u16 count)
|
||||
{
|
||||
u16 q_idx;
|
||||
|
||||
if (vsi->num_txq > ICE_LAN_TXQ_MAX_QDIS)
|
||||
return -EINVAL;
|
||||
|
||||
for (q_idx = 0; q_idx < vsi->num_txq; q_idx++) {
|
||||
for (q_idx = 0; q_idx < count; q_idx++) {
|
||||
struct ice_txq_meta txq_meta = { };
|
||||
int status;
|
||||
|
||||
@ -2047,7 +2049,7 @@ int
|
||||
ice_vsi_stop_lan_tx_rings(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src,
|
||||
u16 rel_vmvf_num)
|
||||
{
|
||||
return ice_vsi_stop_tx_rings(vsi, rst_src, rel_vmvf_num, vsi->tx_rings);
|
||||
return ice_vsi_stop_tx_rings(vsi, rst_src, rel_vmvf_num, vsi->tx_rings, vsi->num_txq);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -2056,7 +2058,7 @@ ice_vsi_stop_lan_tx_rings(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src,
|
||||
*/
|
||||
int ice_vsi_stop_xdp_tx_rings(struct ice_vsi *vsi)
|
||||
{
|
||||
return ice_vsi_stop_tx_rings(vsi, ICE_NO_RESET, 0, vsi->xdp_rings);
|
||||
return ice_vsi_stop_tx_rings(vsi, ICE_NO_RESET, 0, vsi->xdp_rings, vsi->num_xdp_txq);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -2555,6 +2555,20 @@ ice_xdp_setup_prog(struct ice_vsi *vsi, struct bpf_prog *prog,
|
||||
return (ret || xdp_ring_err) ? -ENOMEM : 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* ice_xdp_safe_mode - XDP handler for safe mode
|
||||
* @dev: netdevice
|
||||
* @xdp: XDP command
|
||||
*/
|
||||
static int ice_xdp_safe_mode(struct net_device __always_unused *dev,
|
||||
struct netdev_bpf *xdp)
|
||||
{
|
||||
NL_SET_ERR_MSG_MOD(xdp->extack,
|
||||
"Please provide working DDP firmware package in order to use XDP\n"
|
||||
"Refer to Documentation/networking/device_drivers/ethernet/intel/ice.rst");
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
/**
|
||||
* ice_xdp - implements XDP handler
|
||||
* @dev: netdevice
|
||||
@ -6937,6 +6951,7 @@ static const struct net_device_ops ice_netdev_safe_mode_ops = {
|
||||
.ndo_change_mtu = ice_change_mtu,
|
||||
.ndo_get_stats64 = ice_get_stats64,
|
||||
.ndo_tx_timeout = ice_tx_timeout,
|
||||
.ndo_bpf = ice_xdp_safe_mode,
|
||||
};
|
||||
|
||||
static const struct net_device_ops ice_netdev_ops = {
|
||||
|
@ -154,6 +154,7 @@ static int xrx200_close(struct net_device *net_dev)
|
||||
|
||||
static int xrx200_alloc_skb(struct xrx200_chan *ch)
|
||||
{
|
||||
struct sk_buff *skb = ch->skb[ch->dma.desc];
|
||||
dma_addr_t mapping;
|
||||
int ret = 0;
|
||||
|
||||
@ -168,6 +169,7 @@ static int xrx200_alloc_skb(struct xrx200_chan *ch)
|
||||
XRX200_DMA_DATA_LEN, DMA_FROM_DEVICE);
|
||||
if (unlikely(dma_mapping_error(ch->priv->dev, mapping))) {
|
||||
dev_kfree_skb_any(ch->skb[ch->dma.desc]);
|
||||
ch->skb[ch->dma.desc] = skb;
|
||||
ret = -ENOMEM;
|
||||
goto skip;
|
||||
}
|
||||
@ -198,7 +200,6 @@ static int xrx200_hw_receive(struct xrx200_chan *ch)
|
||||
ch->dma.desc %= LTQ_DESC_NUM;
|
||||
|
||||
if (ret) {
|
||||
ch->skb[ch->dma.desc] = skb;
|
||||
net_dev->stats.rx_dropped++;
|
||||
netdev_err(net_dev, "failed to allocate new rx buffer\n");
|
||||
return ret;
|
||||
@ -352,8 +353,8 @@ static irqreturn_t xrx200_dma_irq(int irq, void *ptr)
|
||||
struct xrx200_chan *ch = ptr;
|
||||
|
||||
if (napi_schedule_prep(&ch->napi)) {
|
||||
__napi_schedule(&ch->napi);
|
||||
ltq_dma_disable_irq(&ch->dma);
|
||||
__napi_schedule(&ch->napi);
|
||||
}
|
||||
|
||||
ltq_dma_ack_irq(&ch->dma);
|
||||
|
@ -303,6 +303,7 @@ int mlx5_attach_device(struct mlx5_core_dev *dev)
|
||||
int ret = 0, i;
|
||||
|
||||
mutex_lock(&mlx5_intf_mutex);
|
||||
priv->flags &= ~MLX5_PRIV_FLAGS_DETACH;
|
||||
for (i = 0; i < ARRAY_SIZE(mlx5_adev_devices); i++) {
|
||||
if (!priv->adev[i]) {
|
||||
bool is_supported = false;
|
||||
@ -320,6 +321,16 @@ int mlx5_attach_device(struct mlx5_core_dev *dev)
|
||||
}
|
||||
} else {
|
||||
adev = &priv->adev[i]->adev;
|
||||
|
||||
/* Pay attention that this is not PCI driver that
|
||||
* mlx5_core_dev is connected, but auxiliary driver.
|
||||
*
|
||||
* Here we can race of module unload with devlink
|
||||
* reload, but we don't need to take extra lock because
|
||||
* we are holding global mlx5_intf_mutex.
|
||||
*/
|
||||
if (!adev->dev.driver)
|
||||
continue;
|
||||
adrv = to_auxiliary_drv(adev->dev.driver);
|
||||
|
||||
if (adrv->resume)
|
||||
@ -350,6 +361,10 @@ void mlx5_detach_device(struct mlx5_core_dev *dev)
|
||||
continue;
|
||||
|
||||
adev = &priv->adev[i]->adev;
|
||||
/* Auxiliary driver was unbind manually through sysfs */
|
||||
if (!adev->dev.driver)
|
||||
goto skip_suspend;
|
||||
|
||||
adrv = to_auxiliary_drv(adev->dev.driver);
|
||||
|
||||
if (adrv->suspend) {
|
||||
@ -357,9 +372,11 @@ void mlx5_detach_device(struct mlx5_core_dev *dev)
|
||||
continue;
|
||||
}
|
||||
|
||||
skip_suspend:
|
||||
del_adev(&priv->adev[i]->adev);
|
||||
priv->adev[i] = NULL;
|
||||
}
|
||||
priv->flags |= MLX5_PRIV_FLAGS_DETACH;
|
||||
mutex_unlock(&mlx5_intf_mutex);
|
||||
}
|
||||
|
||||
@ -448,6 +465,8 @@ int mlx5_rescan_drivers_locked(struct mlx5_core_dev *dev)
|
||||
struct mlx5_priv *priv = &dev->priv;
|
||||
|
||||
lockdep_assert_held(&mlx5_intf_mutex);
|
||||
if (priv->flags & MLX5_PRIV_FLAGS_DETACH)
|
||||
return 0;
|
||||
|
||||
delete_drivers(dev);
|
||||
if (priv->flags & MLX5_PRIV_FLAGS_DISABLE_ALL_ADEV)
|
||||
|
@ -64,6 +64,8 @@ struct devlink_port *mlx5e_get_devlink_port(struct net_device *dev)
|
||||
struct mlx5e_priv *priv = netdev_priv(dev);
|
||||
struct devlink_port *port;
|
||||
|
||||
if (!netif_device_present(dev))
|
||||
return NULL;
|
||||
port = mlx5e_devlink_get_dl_port(priv);
|
||||
if (port->registered)
|
||||
return port;
|
||||
|
@ -1,7 +1,6 @@
|
||||
// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
|
||||
// Copyright (c) 2020 Mellanox Technologies
|
||||
|
||||
#include <linux/ptp_classify.h>
|
||||
#include "en/ptp.h"
|
||||
#include "en/txrx.h"
|
||||
#include "en/params.h"
|
||||
|
@ -6,6 +6,7 @@
|
||||
|
||||
#include "en.h"
|
||||
#include "en_stats.h"
|
||||
#include <linux/ptp_classify.h>
|
||||
|
||||
struct mlx5e_ptpsq {
|
||||
struct mlx5e_txqsq txqsq;
|
||||
@ -43,6 +44,27 @@ struct mlx5e_ptp {
|
||||
DECLARE_BITMAP(state, MLX5E_PTP_STATE_NUM_STATES);
|
||||
};
|
||||
|
||||
static inline bool mlx5e_use_ptpsq(struct sk_buff *skb)
|
||||
{
|
||||
struct flow_keys fk;
|
||||
|
||||
if (!(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP))
|
||||
return false;
|
||||
|
||||
if (!skb_flow_dissect_flow_keys(skb, &fk, 0))
|
||||
return false;
|
||||
|
||||
if (fk.basic.n_proto == htons(ETH_P_1588))
|
||||
return true;
|
||||
|
||||
if (fk.basic.n_proto != htons(ETH_P_IP) &&
|
||||
fk.basic.n_proto != htons(ETH_P_IPV6))
|
||||
return false;
|
||||
|
||||
return (fk.basic.ip_proto == IPPROTO_UDP &&
|
||||
fk.ports.dst == htons(PTP_EV_PORT));
|
||||
}
|
||||
|
||||
int mlx5e_ptp_open(struct mlx5e_priv *priv, struct mlx5e_params *params,
|
||||
u8 lag_port, struct mlx5e_ptp **cp);
|
||||
void mlx5e_ptp_close(struct mlx5e_ptp *c);
|
||||
|
@ -129,10 +129,9 @@ static void mlx5e_rep_neigh_update(struct work_struct *work)
|
||||
work);
|
||||
struct mlx5e_neigh_hash_entry *nhe = update_work->nhe;
|
||||
struct neighbour *n = update_work->n;
|
||||
struct mlx5e_encap_entry *e = NULL;
|
||||
bool neigh_connected, same_dev;
|
||||
struct mlx5e_encap_entry *e;
|
||||
unsigned char ha[ETH_ALEN];
|
||||
struct mlx5e_priv *priv;
|
||||
u8 nud_state, dead;
|
||||
|
||||
rtnl_lock();
|
||||
@ -156,14 +155,12 @@ static void mlx5e_rep_neigh_update(struct work_struct *work)
|
||||
if (!same_dev)
|
||||
goto out;
|
||||
|
||||
list_for_each_entry(e, &nhe->encap_list, encap_list) {
|
||||
if (!mlx5e_encap_take(e))
|
||||
continue;
|
||||
/* mlx5e_get_next_init_encap() releases previous encap before returning
|
||||
* the next one.
|
||||
*/
|
||||
while ((e = mlx5e_get_next_init_encap(nhe, e)) != NULL)
|
||||
mlx5e_rep_update_flows(netdev_priv(e->out_dev), e, neigh_connected, ha);
|
||||
|
||||
priv = netdev_priv(e->out_dev);
|
||||
mlx5e_rep_update_flows(priv, e, neigh_connected, ha);
|
||||
mlx5e_encap_put(priv, e);
|
||||
}
|
||||
out:
|
||||
rtnl_unlock();
|
||||
mlx5e_release_neigh_update_work(update_work);
|
||||
|
@ -94,13 +94,9 @@ void mlx5e_rep_update_flows(struct mlx5e_priv *priv,
|
||||
|
||||
ASSERT_RTNL();
|
||||
|
||||
/* wait for encap to be fully initialized */
|
||||
wait_for_completion(&e->res_ready);
|
||||
|
||||
mutex_lock(&esw->offloads.encap_tbl_lock);
|
||||
encap_connected = !!(e->flags & MLX5_ENCAP_ENTRY_VALID);
|
||||
if (e->compl_result < 0 || (encap_connected == neigh_connected &&
|
||||
ether_addr_equal(e->h_dest, ha)))
|
||||
if (encap_connected == neigh_connected && ether_addr_equal(e->h_dest, ha))
|
||||
goto unlock;
|
||||
|
||||
mlx5e_take_all_encap_flows(e, &flow_list);
|
||||
|
@ -251,9 +251,12 @@ static void mlx5e_take_all_route_decap_flows(struct mlx5e_route_entry *r,
|
||||
mlx5e_take_tmp_flow(flow, flow_list, 0);
|
||||
}
|
||||
|
||||
typedef bool (match_cb)(struct mlx5e_encap_entry *);
|
||||
|
||||
static struct mlx5e_encap_entry *
|
||||
mlx5e_get_next_valid_encap(struct mlx5e_neigh_hash_entry *nhe,
|
||||
struct mlx5e_encap_entry *e)
|
||||
mlx5e_get_next_matching_encap(struct mlx5e_neigh_hash_entry *nhe,
|
||||
struct mlx5e_encap_entry *e,
|
||||
match_cb match)
|
||||
{
|
||||
struct mlx5e_encap_entry *next = NULL;
|
||||
|
||||
@ -288,7 +291,7 @@ retry:
|
||||
/* wait for encap to be fully initialized */
|
||||
wait_for_completion(&next->res_ready);
|
||||
/* continue searching if encap entry is not in valid state after completion */
|
||||
if (!(next->flags & MLX5_ENCAP_ENTRY_VALID)) {
|
||||
if (!match(next)) {
|
||||
e = next;
|
||||
goto retry;
|
||||
}
|
||||
@ -296,6 +299,30 @@ retry:
|
||||
return next;
|
||||
}
|
||||
|
||||
static bool mlx5e_encap_valid(struct mlx5e_encap_entry *e)
|
||||
{
|
||||
return e->flags & MLX5_ENCAP_ENTRY_VALID;
|
||||
}
|
||||
|
||||
static struct mlx5e_encap_entry *
|
||||
mlx5e_get_next_valid_encap(struct mlx5e_neigh_hash_entry *nhe,
|
||||
struct mlx5e_encap_entry *e)
|
||||
{
|
||||
return mlx5e_get_next_matching_encap(nhe, e, mlx5e_encap_valid);
|
||||
}
|
||||
|
||||
static bool mlx5e_encap_initialized(struct mlx5e_encap_entry *e)
|
||||
{
|
||||
return e->compl_result >= 0;
|
||||
}
|
||||
|
||||
struct mlx5e_encap_entry *
|
||||
mlx5e_get_next_init_encap(struct mlx5e_neigh_hash_entry *nhe,
|
||||
struct mlx5e_encap_entry *e)
|
||||
{
|
||||
return mlx5e_get_next_matching_encap(nhe, e, mlx5e_encap_initialized);
|
||||
}
|
||||
|
||||
void mlx5e_tc_update_neigh_used_value(struct mlx5e_neigh_hash_entry *nhe)
|
||||
{
|
||||
struct mlx5e_neigh *m_neigh = &nhe->m_neigh;
|
||||
|
@ -532,9 +532,6 @@ void mlx5e_ipsec_build_netdev(struct mlx5e_priv *priv)
|
||||
struct mlx5_core_dev *mdev = priv->mdev;
|
||||
struct net_device *netdev = priv->netdev;
|
||||
|
||||
if (!priv->ipsec)
|
||||
return;
|
||||
|
||||
if (!(mlx5_accel_ipsec_device_caps(mdev) & MLX5_ACCEL_IPSEC_CAP_ESP) ||
|
||||
!MLX5_CAP_ETH(mdev, swp)) {
|
||||
mlx5_core_dbg(mdev, "mlx5e: ESP and SWP offload not supported\n");
|
||||
|
@ -356,7 +356,7 @@ err:
|
||||
|
||||
int mlx5e_arfs_create_tables(struct mlx5e_priv *priv)
|
||||
{
|
||||
int err = 0;
|
||||
int err = -ENOMEM;
|
||||
int i;
|
||||
|
||||
if (!(priv->netdev->hw_features & NETIF_F_NTUPLE))
|
||||
|
@ -2705,8 +2705,6 @@ static int mlx5e_update_netdev_queues(struct mlx5e_priv *priv)
|
||||
nch = priv->channels.params.num_channels;
|
||||
ntc = priv->channels.params.num_tc;
|
||||
num_rxqs = nch * priv->profile->rq_groups;
|
||||
if (priv->channels.params.ptp_rx)
|
||||
num_rxqs++;
|
||||
|
||||
mlx5e_netdev_set_tcs(netdev, nch, ntc);
|
||||
|
||||
@ -4824,22 +4822,15 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev)
|
||||
}
|
||||
|
||||
if (mlx5_vxlan_allowed(mdev->vxlan) || mlx5_geneve_tx_allowed(mdev)) {
|
||||
netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL |
|
||||
NETIF_F_GSO_UDP_TUNNEL_CSUM;
|
||||
netdev->hw_enc_features |= NETIF_F_GSO_UDP_TUNNEL |
|
||||
NETIF_F_GSO_UDP_TUNNEL_CSUM;
|
||||
netdev->gso_partial_features = NETIF_F_GSO_UDP_TUNNEL_CSUM;
|
||||
netdev->vlan_features |= NETIF_F_GSO_UDP_TUNNEL |
|
||||
NETIF_F_GSO_UDP_TUNNEL_CSUM;
|
||||
netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL;
|
||||
netdev->hw_enc_features |= NETIF_F_GSO_UDP_TUNNEL;
|
||||
netdev->vlan_features |= NETIF_F_GSO_UDP_TUNNEL;
|
||||
}
|
||||
|
||||
if (mlx5e_tunnel_proto_supported_tx(mdev, IPPROTO_GRE)) {
|
||||
netdev->hw_features |= NETIF_F_GSO_GRE |
|
||||
NETIF_F_GSO_GRE_CSUM;
|
||||
netdev->hw_enc_features |= NETIF_F_GSO_GRE |
|
||||
NETIF_F_GSO_GRE_CSUM;
|
||||
netdev->gso_partial_features |= NETIF_F_GSO_GRE |
|
||||
NETIF_F_GSO_GRE_CSUM;
|
||||
netdev->hw_features |= NETIF_F_GSO_GRE;
|
||||
netdev->hw_enc_features |= NETIF_F_GSO_GRE;
|
||||
netdev->gso_partial_features |= NETIF_F_GSO_GRE;
|
||||
}
|
||||
|
||||
if (mlx5e_tunnel_proto_supported_tx(mdev, IPPROTO_IPIP)) {
|
||||
|
@ -4765,7 +4765,7 @@ static void mlx5e_tc_hairpin_update_dead_peer(struct mlx5e_priv *priv,
|
||||
list_for_each_entry_safe(hpe, tmp, &init_wait_list, dead_peer_wait_list) {
|
||||
wait_for_completion(&hpe->res_ready);
|
||||
if (!IS_ERR_OR_NULL(hpe->hp) && hpe->peer_vhca_id == peer_vhca_id)
|
||||
hpe->hp->pair->peer_gone = true;
|
||||
mlx5_core_hairpin_clear_dead_peer(hpe->hp->pair);
|
||||
|
||||
mlx5e_hairpin_put(priv, hpe);
|
||||
}
|
||||
|
@ -178,6 +178,9 @@ void mlx5e_take_all_encap_flows(struct mlx5e_encap_entry *e, struct list_head *f
|
||||
void mlx5e_put_flow_list(struct mlx5e_priv *priv, struct list_head *flow_list);
|
||||
|
||||
struct mlx5e_neigh_hash_entry;
|
||||
struct mlx5e_encap_entry *
|
||||
mlx5e_get_next_init_encap(struct mlx5e_neigh_hash_entry *nhe,
|
||||
struct mlx5e_encap_entry *e);
|
||||
void mlx5e_tc_update_neigh_used_value(struct mlx5e_neigh_hash_entry *nhe);
|
||||
|
||||
void mlx5e_tc_reoffload_flows_work(struct work_struct *work);
|
||||
|
@ -32,7 +32,6 @@
|
||||
|
||||
#include <linux/tcp.h>
|
||||
#include <linux/if_vlan.h>
|
||||
#include <linux/ptp_classify.h>
|
||||
#include <net/geneve.h>
|
||||
#include <net/dsfield.h>
|
||||
#include "en.h"
|
||||
@ -67,24 +66,6 @@ static inline int mlx5e_get_dscp_up(struct mlx5e_priv *priv, struct sk_buff *skb
|
||||
}
|
||||
#endif
|
||||
|
||||
static bool mlx5e_use_ptpsq(struct sk_buff *skb)
|
||||
{
|
||||
struct flow_keys fk;
|
||||
|
||||
if (!skb_flow_dissect_flow_keys(skb, &fk, 0))
|
||||
return false;
|
||||
|
||||
if (fk.basic.n_proto == htons(ETH_P_1588))
|
||||
return true;
|
||||
|
||||
if (fk.basic.n_proto != htons(ETH_P_IP) &&
|
||||
fk.basic.n_proto != htons(ETH_P_IPV6))
|
||||
return false;
|
||||
|
||||
return (fk.basic.ip_proto == IPPROTO_UDP &&
|
||||
fk.ports.dst == htons(PTP_EV_PORT));
|
||||
}
|
||||
|
||||
static u16 mlx5e_select_ptpsq(struct net_device *dev, struct sk_buff *skb)
|
||||
{
|
||||
struct mlx5e_priv *priv = netdev_priv(dev);
|
||||
@ -145,9 +126,9 @@ u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb,
|
||||
}
|
||||
|
||||
ptp_channel = READ_ONCE(priv->channels.ptp);
|
||||
if (unlikely(ptp_channel) &&
|
||||
test_bit(MLX5E_PTP_STATE_TX, ptp_channel->state) &&
|
||||
mlx5e_use_ptpsq(skb))
|
||||
if (unlikely(ptp_channel &&
|
||||
test_bit(MLX5E_PTP_STATE_TX, ptp_channel->state) &&
|
||||
mlx5e_use_ptpsq(skb)))
|
||||
return mlx5e_select_ptpsq(dev, skb);
|
||||
|
||||
txq_ix = netdev_pick_tx(dev, skb, NULL);
|
||||
|
@ -136,7 +136,7 @@ static int mlx5_eq_comp_int(struct notifier_block *nb,
|
||||
|
||||
eqe = next_eqe_sw(eq);
|
||||
if (!eqe)
|
||||
return 0;
|
||||
goto out;
|
||||
|
||||
do {
|
||||
struct mlx5_core_cq *cq;
|
||||
@ -161,6 +161,8 @@ static int mlx5_eq_comp_int(struct notifier_block *nb,
|
||||
++eq->cons_index;
|
||||
|
||||
} while ((++num_eqes < MLX5_EQ_POLLING_BUDGET) && (eqe = next_eqe_sw(eq)));
|
||||
|
||||
out:
|
||||
eq_update_ci(eq, 1);
|
||||
|
||||
if (cqn != -1)
|
||||
@ -248,9 +250,9 @@ static int mlx5_eq_async_int(struct notifier_block *nb,
|
||||
++eq->cons_index;
|
||||
|
||||
} while ((++num_eqes < MLX5_EQ_POLLING_BUDGET) && (eqe = next_eqe_sw(eq)));
|
||||
eq_update_ci(eq, 1);
|
||||
|
||||
out:
|
||||
eq_update_ci(eq, 1);
|
||||
mlx5_eq_async_int_unlock(eq_async, recovery, &flags);
|
||||
|
||||
return unlikely(recovery) ? num_eqes : 0;
|
||||
|
@ -1054,6 +1054,12 @@ int mlx5_esw_vport_enable(struct mlx5_eswitch *esw, u16 vport_num,
|
||||
goto err_vhca_mapping;
|
||||
}
|
||||
|
||||
/* External controller host PF has factory programmed MAC.
|
||||
* Read it from the device.
|
||||
*/
|
||||
if (mlx5_core_is_ecpf(esw->dev) && vport_num == MLX5_VPORT_PF)
|
||||
mlx5_query_nic_vport_mac_address(esw->dev, vport_num, true, vport->info.mac);
|
||||
|
||||
esw_vport_change_handle_locked(vport);
|
||||
|
||||
esw->enabled_vports++;
|
||||
|
@ -1161,7 +1161,7 @@ static int mlx5_load(struct mlx5_core_dev *dev)
|
||||
err = mlx5_core_set_hca_defaults(dev);
|
||||
if (err) {
|
||||
mlx5_core_err(dev, "Failed to set hca defaults\n");
|
||||
goto err_sriov;
|
||||
goto err_set_hca;
|
||||
}
|
||||
|
||||
mlx5_vhca_event_start(dev);
|
||||
@ -1194,6 +1194,7 @@ err_ec:
|
||||
mlx5_sf_hw_table_destroy(dev);
|
||||
err_vhca:
|
||||
mlx5_vhca_event_stop(dev);
|
||||
err_set_hca:
|
||||
mlx5_cleanup_fs(dev);
|
||||
err_fs:
|
||||
mlx5_accel_tls_cleanup(dev);
|
||||
|
@ -54,7 +54,7 @@ int mlx5_core_create_mkey(struct mlx5_core_dev *dev,
|
||||
mkey_index = MLX5_GET(create_mkey_out, lout, mkey_index);
|
||||
mkey->iova = MLX5_GET64(mkc, mkc, start_addr);
|
||||
mkey->size = MLX5_GET64(mkc, mkc, len);
|
||||
mkey->key |= mlx5_idx_to_mkey(mkey_index);
|
||||
mkey->key = (u32)mlx5_mkey_variant(mkey->key) | mlx5_idx_to_mkey(mkey_index);
|
||||
mkey->pd = MLX5_GET(mkc, mkc, pd);
|
||||
init_waitqueue_head(&mkey->wait);
|
||||
|
||||
|
@ -156,6 +156,9 @@ void mlx5_rdma_enable_roce(struct mlx5_core_dev *dev)
|
||||
{
|
||||
int err;
|
||||
|
||||
if (!MLX5_CAP_GEN(dev, roce))
|
||||
return;
|
||||
|
||||
err = mlx5_nic_vport_enable_roce(dev);
|
||||
if (err) {
|
||||
mlx5_core_err(dev, "Failed to enable RoCE: %d\n", err);
|
||||
|
@ -163,6 +163,7 @@ mlx5_sf_dev_state_change_handler(struct notifier_block *nb, unsigned long event_
|
||||
sf_index = event->function_id - base_id;
|
||||
sf_dev = xa_load(&table->devices, sf_index);
|
||||
switch (event->new_vhca_state) {
|
||||
case MLX5_VHCA_STATE_INVALID:
|
||||
case MLX5_VHCA_STATE_ALLOCATED:
|
||||
if (sf_dev)
|
||||
mlx5_sf_dev_del(table->dev, sf_dev, sf_index);
|
||||
|
@ -694,7 +694,11 @@ static int dr_ste_v1_set_action_decap_l3_list(void *data,
|
||||
if (hw_action_sz / DR_STE_ACTION_DOUBLE_SZ < DR_STE_DECAP_L3_ACTION_NUM)
|
||||
return -EINVAL;
|
||||
|
||||
memcpy(padded_data, data, data_sz);
|
||||
inline_data_sz =
|
||||
MLX5_FLD_SZ_BYTES(ste_double_action_insert_with_inline_v1, inline_data);
|
||||
|
||||
/* Add an alignment padding */
|
||||
memcpy(padded_data + data_sz % inline_data_sz, data, data_sz);
|
||||
|
||||
/* Remove L2L3 outer headers */
|
||||
MLX5_SET(ste_single_action_remove_header_v1, hw_action, action_id,
|
||||
@ -706,32 +710,34 @@ static int dr_ste_v1_set_action_decap_l3_list(void *data,
|
||||
hw_action += DR_STE_ACTION_DOUBLE_SZ;
|
||||
used_actions++; /* Remove and NOP are a single double action */
|
||||
|
||||
inline_data_sz =
|
||||
MLX5_FLD_SZ_BYTES(ste_double_action_insert_with_inline_v1, inline_data);
|
||||
/* Point to the last dword of the header */
|
||||
data_ptr += (data_sz / inline_data_sz) * inline_data_sz;
|
||||
|
||||
/* Add the new header inline + 2 extra bytes */
|
||||
/* Add the new header using inline action 4Byte at a time, the header
|
||||
* is added in reversed order to the beginning of the packet to avoid
|
||||
* incorrect parsing by the HW. Since header is 14B or 18B an extra
|
||||
* two bytes are padded and later removed.
|
||||
*/
|
||||
for (i = 0; i < data_sz / inline_data_sz + 1; i++) {
|
||||
void *addr_inline;
|
||||
|
||||
MLX5_SET(ste_double_action_insert_with_inline_v1, hw_action, action_id,
|
||||
DR_STE_V1_ACTION_ID_INSERT_INLINE);
|
||||
/* The hardware expects here offset to words (2 bytes) */
|
||||
MLX5_SET(ste_double_action_insert_with_inline_v1, hw_action, start_offset,
|
||||
i * 2);
|
||||
MLX5_SET(ste_double_action_insert_with_inline_v1, hw_action, start_offset, 0);
|
||||
|
||||
/* Copy bytes one by one to avoid endianness problem */
|
||||
addr_inline = MLX5_ADDR_OF(ste_double_action_insert_with_inline_v1,
|
||||
hw_action, inline_data);
|
||||
memcpy(addr_inline, data_ptr, inline_data_sz);
|
||||
memcpy(addr_inline, data_ptr - i * inline_data_sz, inline_data_sz);
|
||||
hw_action += DR_STE_ACTION_DOUBLE_SZ;
|
||||
data_ptr += inline_data_sz;
|
||||
used_actions++;
|
||||
}
|
||||
|
||||
/* Remove 2 extra bytes */
|
||||
/* Remove first 2 extra bytes */
|
||||
MLX5_SET(ste_single_action_remove_header_size_v1, hw_action, action_id,
|
||||
DR_STE_V1_ACTION_ID_REMOVE_BY_SIZE);
|
||||
MLX5_SET(ste_single_action_remove_header_size_v1, hw_action, start_offset, data_sz / 2);
|
||||
MLX5_SET(ste_single_action_remove_header_size_v1, hw_action, start_offset, 0);
|
||||
/* The hardware expects here size in words (2 bytes) */
|
||||
MLX5_SET(ste_single_action_remove_header_size_v1, hw_action, remove_size, 1);
|
||||
used_actions++;
|
||||
|
@ -124,10 +124,11 @@ int mlx5dr_action_destroy(struct mlx5dr_action *action);
|
||||
static inline bool
|
||||
mlx5dr_is_supported(struct mlx5_core_dev *dev)
|
||||
{
|
||||
return MLX5_CAP_ESW_FLOWTABLE_FDB(dev, sw_owner) ||
|
||||
(MLX5_CAP_ESW_FLOWTABLE_FDB(dev, sw_owner_v2) &&
|
||||
(MLX5_CAP_GEN(dev, steering_format_version) <=
|
||||
MLX5_STEERING_FORMAT_CONNECTX_6DX));
|
||||
return MLX5_CAP_GEN(dev, roce) &&
|
||||
(MLX5_CAP_ESW_FLOWTABLE_FDB(dev, sw_owner) ||
|
||||
(MLX5_CAP_ESW_FLOWTABLE_FDB(dev, sw_owner_v2) &&
|
||||
(MLX5_CAP_GEN(dev, steering_format_version) <=
|
||||
MLX5_STEERING_FORMAT_CONNECTX_6DX)));
|
||||
}
|
||||
|
||||
/* buddy functions & structure */
|
||||
|
@ -424,6 +424,15 @@ err_modify_sq:
|
||||
return err;
|
||||
}
|
||||
|
||||
static void mlx5_hairpin_unpair_peer_sq(struct mlx5_hairpin *hp)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < hp->num_channels; i++)
|
||||
mlx5_hairpin_modify_sq(hp->peer_mdev, hp->sqn[i], MLX5_SQC_STATE_RDY,
|
||||
MLX5_SQC_STATE_RST, 0, 0);
|
||||
}
|
||||
|
||||
static void mlx5_hairpin_unpair_queues(struct mlx5_hairpin *hp)
|
||||
{
|
||||
int i;
|
||||
@ -432,13 +441,9 @@ static void mlx5_hairpin_unpair_queues(struct mlx5_hairpin *hp)
|
||||
for (i = 0; i < hp->num_channels; i++)
|
||||
mlx5_hairpin_modify_rq(hp->func_mdev, hp->rqn[i], MLX5_RQC_STATE_RDY,
|
||||
MLX5_RQC_STATE_RST, 0, 0);
|
||||
|
||||
/* unset peer SQs */
|
||||
if (hp->peer_gone)
|
||||
return;
|
||||
for (i = 0; i < hp->num_channels; i++)
|
||||
mlx5_hairpin_modify_sq(hp->peer_mdev, hp->sqn[i], MLX5_SQC_STATE_RDY,
|
||||
MLX5_SQC_STATE_RST, 0, 0);
|
||||
if (!hp->peer_gone)
|
||||
mlx5_hairpin_unpair_peer_sq(hp);
|
||||
}
|
||||
|
||||
struct mlx5_hairpin *
|
||||
@ -485,3 +490,16 @@ void mlx5_core_hairpin_destroy(struct mlx5_hairpin *hp)
|
||||
mlx5_hairpin_destroy_queues(hp);
|
||||
kfree(hp);
|
||||
}
|
||||
|
||||
void mlx5_core_hairpin_clear_dead_peer(struct mlx5_hairpin *hp)
|
||||
{
|
||||
int i;
|
||||
|
||||
mlx5_hairpin_unpair_peer_sq(hp);
|
||||
|
||||
/* destroy peer SQ */
|
||||
for (i = 0; i < hp->num_channels; i++)
|
||||
mlx5_core_destroy_sq(hp->peer_mdev, hp->sqn[i]);
|
||||
|
||||
hp->peer_gone = true;
|
||||
}
|
||||
|
@ -465,8 +465,6 @@ int mlx5_modify_nic_vport_node_guid(struct mlx5_core_dev *mdev,
|
||||
void *in;
|
||||
int err;
|
||||
|
||||
if (!vport)
|
||||
return -EINVAL;
|
||||
if (!MLX5_CAP_GEN(mdev, vport_group_manager))
|
||||
return -EACCES;
|
||||
|
||||
|
@ -693,7 +693,8 @@ mlxsw_thermal_module_tz_init(struct mlxsw_thermal_module *module_tz)
|
||||
MLXSW_THERMAL_TRIP_MASK,
|
||||
module_tz,
|
||||
&mlxsw_thermal_module_ops,
|
||||
NULL, 0, 0);
|
||||
NULL, 0,
|
||||
module_tz->parent->polling_delay);
|
||||
if (IS_ERR(module_tz->tzdev)) {
|
||||
err = PTR_ERR(module_tz->tzdev);
|
||||
return err;
|
||||
@ -815,7 +816,8 @@ mlxsw_thermal_gearbox_tz_init(struct mlxsw_thermal_module *gearbox_tz)
|
||||
MLXSW_THERMAL_TRIP_MASK,
|
||||
gearbox_tz,
|
||||
&mlxsw_thermal_gearbox_ops,
|
||||
NULL, 0, 0);
|
||||
NULL, 0,
|
||||
gearbox_tz->parent->polling_delay);
|
||||
if (IS_ERR(gearbox_tz->tzdev))
|
||||
return PTR_ERR(gearbox_tz->tzdev);
|
||||
|
||||
|
@ -3907,7 +3907,7 @@ MLXSW_ITEM32(reg, qeec, max_shaper_bs, 0x1C, 0, 6);
|
||||
#define MLXSW_REG_QEEC_HIGHEST_SHAPER_BS 25
|
||||
#define MLXSW_REG_QEEC_LOWEST_SHAPER_BS_SP1 5
|
||||
#define MLXSW_REG_QEEC_LOWEST_SHAPER_BS_SP2 11
|
||||
#define MLXSW_REG_QEEC_LOWEST_SHAPER_BS_SP3 5
|
||||
#define MLXSW_REG_QEEC_LOWEST_SHAPER_BS_SP3 11
|
||||
|
||||
static inline void mlxsw_reg_qeec_pack(char *payload, u8 local_port,
|
||||
enum mlxsw_reg_qeec_hr hr, u8 index,
|
||||
|
@ -1332,6 +1332,7 @@ __mlxsw_sp_qdisc_ets_graft(struct mlxsw_sp_port *mlxsw_sp_port,
|
||||
u8 band, u32 child_handle)
|
||||
{
|
||||
struct mlxsw_sp_qdisc *old_qdisc;
|
||||
u32 parent;
|
||||
|
||||
if (band < mlxsw_sp_qdisc->num_classes &&
|
||||
mlxsw_sp_qdisc->qdiscs[band].handle == child_handle)
|
||||
@ -1352,7 +1353,9 @@ __mlxsw_sp_qdisc_ets_graft(struct mlxsw_sp_port *mlxsw_sp_port,
|
||||
if (old_qdisc)
|
||||
mlxsw_sp_qdisc_destroy(mlxsw_sp_port, old_qdisc);
|
||||
|
||||
mlxsw_sp_qdisc = mlxsw_sp_qdisc->ops->find_class(mlxsw_sp_qdisc, band);
|
||||
parent = TC_H_MAKE(mlxsw_sp_qdisc->handle, band + 1);
|
||||
mlxsw_sp_qdisc = mlxsw_sp_qdisc->ops->find_class(mlxsw_sp_qdisc,
|
||||
parent);
|
||||
if (!WARN_ON(!mlxsw_sp_qdisc))
|
||||
mlxsw_sp_qdisc_destroy(mlxsw_sp_port, mlxsw_sp_qdisc);
|
||||
|
||||
|
@ -379,6 +379,7 @@ static u32 ocelot_read_eq_avail(struct ocelot *ocelot, int port)
|
||||
|
||||
int ocelot_port_flush(struct ocelot *ocelot, int port)
|
||||
{
|
||||
unsigned int pause_ena;
|
||||
int err, val;
|
||||
|
||||
/* Disable dequeuing from the egress queues */
|
||||
@ -387,6 +388,7 @@ int ocelot_port_flush(struct ocelot *ocelot, int port)
|
||||
QSYS_PORT_MODE, port);
|
||||
|
||||
/* Disable flow control */
|
||||
ocelot_fields_read(ocelot, port, SYS_PAUSE_CFG_PAUSE_ENA, &pause_ena);
|
||||
ocelot_fields_write(ocelot, port, SYS_PAUSE_CFG_PAUSE_ENA, 0);
|
||||
|
||||
/* Disable priority flow control */
|
||||
@ -422,6 +424,9 @@ int ocelot_port_flush(struct ocelot *ocelot, int port)
|
||||
/* Clear flushing again. */
|
||||
ocelot_rmw_gix(ocelot, 0, REW_PORT_CFG_FLUSH_ENA, REW_PORT_CFG, port);
|
||||
|
||||
/* Re-enable flow control */
|
||||
ocelot_fields_write(ocelot, port, SYS_PAUSE_CFG_PAUSE_ENA, pause_ena);
|
||||
|
||||
return err;
|
||||
}
|
||||
EXPORT_SYMBOL(ocelot_port_flush);
|
||||
|
@ -1602,6 +1602,8 @@ err_out_free_netdev:
|
||||
free_netdev(netdev);
|
||||
|
||||
err_out_free_res:
|
||||
if (NX_IS_REVISION_P3(pdev->revision))
|
||||
pci_disable_pcie_error_reporting(pdev);
|
||||
pci_release_regions(pdev);
|
||||
|
||||
err_out_disable_pdev:
|
||||
|
@ -1266,9 +1266,11 @@ int qed_dcbx_get_config_params(struct qed_hwfn *p_hwfn,
|
||||
p_hwfn->p_dcbx_info->set.ver_num |= DCBX_CONFIG_VERSION_STATIC;
|
||||
|
||||
p_hwfn->p_dcbx_info->set.enabled = dcbx_info->operational.enabled;
|
||||
BUILD_BUG_ON(sizeof(dcbx_info->operational.params) !=
|
||||
sizeof(p_hwfn->p_dcbx_info->set.config.params));
|
||||
memcpy(&p_hwfn->p_dcbx_info->set.config.params,
|
||||
&dcbx_info->operational.params,
|
||||
sizeof(struct qed_dcbx_admin_params));
|
||||
sizeof(p_hwfn->p_dcbx_info->set.config.params));
|
||||
p_hwfn->p_dcbx_info->set.config.valid = true;
|
||||
|
||||
memcpy(params, &p_hwfn->p_dcbx_info->set, sizeof(struct qed_dcbx_set));
|
||||
|
@ -2690,6 +2690,7 @@ err_out_free_hw_res:
|
||||
kfree(ahw);
|
||||
|
||||
err_out_free_res:
|
||||
pci_disable_pcie_error_reporting(pdev);
|
||||
pci_release_regions(pdev);
|
||||
|
||||
err_out_disable_pdev:
|
||||
|
@ -126,24 +126,24 @@ static void rmnet_get_stats64(struct net_device *dev,
|
||||
struct rtnl_link_stats64 *s)
|
||||
{
|
||||
struct rmnet_priv *priv = netdev_priv(dev);
|
||||
struct rmnet_vnd_stats total_stats;
|
||||
struct rmnet_vnd_stats total_stats = { };
|
||||
struct rmnet_pcpu_stats *pcpu_ptr;
|
||||
struct rmnet_vnd_stats snapshot;
|
||||
unsigned int cpu, start;
|
||||
|
||||
memset(&total_stats, 0, sizeof(struct rmnet_vnd_stats));
|
||||
|
||||
for_each_possible_cpu(cpu) {
|
||||
pcpu_ptr = per_cpu_ptr(priv->pcpu_stats, cpu);
|
||||
|
||||
do {
|
||||
start = u64_stats_fetch_begin_irq(&pcpu_ptr->syncp);
|
||||
total_stats.rx_pkts += pcpu_ptr->stats.rx_pkts;
|
||||
total_stats.rx_bytes += pcpu_ptr->stats.rx_bytes;
|
||||
total_stats.tx_pkts += pcpu_ptr->stats.tx_pkts;
|
||||
total_stats.tx_bytes += pcpu_ptr->stats.tx_bytes;
|
||||
snapshot = pcpu_ptr->stats; /* struct assignment */
|
||||
} while (u64_stats_fetch_retry_irq(&pcpu_ptr->syncp, start));
|
||||
|
||||
total_stats.tx_drops += pcpu_ptr->stats.tx_drops;
|
||||
total_stats.rx_pkts += snapshot.rx_pkts;
|
||||
total_stats.rx_bytes += snapshot.rx_bytes;
|
||||
total_stats.tx_pkts += snapshot.tx_pkts;
|
||||
total_stats.tx_bytes += snapshot.tx_bytes;
|
||||
total_stats.tx_drops += snapshot.tx_drops;
|
||||
}
|
||||
|
||||
s->rx_packets = total_stats.rx_pkts;
|
||||
@ -354,4 +354,4 @@ int rmnet_vnd_update_dev_mtu(struct rmnet_port *port,
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
@ -1671,7 +1671,7 @@ static void rtl8169_get_strings(struct net_device *dev, u32 stringset, u8 *data)
|
||||
{
|
||||
switch(stringset) {
|
||||
case ETH_SS_STATS:
|
||||
memcpy(data, *rtl8169_gstrings, sizeof(rtl8169_gstrings));
|
||||
memcpy(data, rtl8169_gstrings, sizeof(rtl8169_gstrings));
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
@ -2287,7 +2287,7 @@ static void sh_eth_get_strings(struct net_device *ndev, u32 stringset, u8 *data)
|
||||
{
|
||||
switch (stringset) {
|
||||
case ETH_SS_STATS:
|
||||
memcpy(data, *sh_eth_gstrings_stats,
|
||||
memcpy(data, sh_eth_gstrings_stats,
|
||||
sizeof(sh_eth_gstrings_stats));
|
||||
break;
|
||||
}
|
||||
|
@ -76,10 +76,10 @@ enum power_event {
|
||||
#define LPI_CTRL_STATUS_TLPIEN 0x00000001 /* Transmit LPI Entry */
|
||||
|
||||
/* GMAC HW ADDR regs */
|
||||
#define GMAC_ADDR_HIGH(reg) (((reg > 15) ? 0x00000800 : 0x00000040) + \
|
||||
(reg * 8))
|
||||
#define GMAC_ADDR_LOW(reg) (((reg > 15) ? 0x00000804 : 0x00000044) + \
|
||||
(reg * 8))
|
||||
#define GMAC_ADDR_HIGH(reg) ((reg > 15) ? 0x00000800 + (reg - 16) * 8 : \
|
||||
0x00000040 + (reg * 8))
|
||||
#define GMAC_ADDR_LOW(reg) ((reg > 15) ? 0x00000804 + (reg - 16) * 8 : \
|
||||
0x00000044 + (reg * 8))
|
||||
#define GMAC_MAX_PERFECT_ADDRESSES 1
|
||||
|
||||
#define GMAC_PCS_BASE 0x000000c0 /* PCS register base */
|
||||
|
@ -622,6 +622,8 @@ error_pclk_get:
|
||||
void stmmac_remove_config_dt(struct platform_device *pdev,
|
||||
struct plat_stmmacenet_data *plat)
|
||||
{
|
||||
clk_disable_unprepare(plat->stmmac_clk);
|
||||
clk_disable_unprepare(plat->pclk);
|
||||
of_node_put(plat->phy_node);
|
||||
of_node_put(plat->mdio_node);
|
||||
}
|
||||
|
@ -774,12 +774,15 @@ static void temac_start_xmit_done(struct net_device *ndev)
|
||||
stat = be32_to_cpu(cur_p->app0);
|
||||
|
||||
while (stat & STS_CTRL_APP0_CMPLT) {
|
||||
/* Make sure that the other fields are read after bd is
|
||||
* released by dma
|
||||
*/
|
||||
rmb();
|
||||
dma_unmap_single(ndev->dev.parent, be32_to_cpu(cur_p->phys),
|
||||
be32_to_cpu(cur_p->len), DMA_TO_DEVICE);
|
||||
skb = (struct sk_buff *)ptr_from_txbd(cur_p);
|
||||
if (skb)
|
||||
dev_consume_skb_irq(skb);
|
||||
cur_p->app0 = 0;
|
||||
cur_p->app1 = 0;
|
||||
cur_p->app2 = 0;
|
||||
cur_p->app3 = 0;
|
||||
@ -788,6 +791,12 @@ static void temac_start_xmit_done(struct net_device *ndev)
|
||||
ndev->stats.tx_packets++;
|
||||
ndev->stats.tx_bytes += be32_to_cpu(cur_p->len);
|
||||
|
||||
/* app0 must be visible last, as it is used to flag
|
||||
* availability of the bd
|
||||
*/
|
||||
smp_mb();
|
||||
cur_p->app0 = 0;
|
||||
|
||||
lp->tx_bd_ci++;
|
||||
if (lp->tx_bd_ci >= lp->tx_bd_num)
|
||||
lp->tx_bd_ci = 0;
|
||||
@ -814,6 +823,9 @@ static inline int temac_check_tx_bd_space(struct temac_local *lp, int num_frag)
|
||||
if (cur_p->app0)
|
||||
return NETDEV_TX_BUSY;
|
||||
|
||||
/* Make sure to read next bd app0 after this one */
|
||||
rmb();
|
||||
|
||||
tail++;
|
||||
if (tail >= lp->tx_bd_num)
|
||||
tail = 0;
|
||||
@ -849,7 +861,7 @@ temac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
|
||||
smp_mb();
|
||||
|
||||
/* Space might have just been freed - check again */
|
||||
if (temac_check_tx_bd_space(lp, num_frag))
|
||||
if (temac_check_tx_bd_space(lp, num_frag + 1))
|
||||
return NETDEV_TX_BUSY;
|
||||
|
||||
netif_wake_queue(ndev);
|
||||
@ -876,7 +888,6 @@ temac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
|
||||
return NETDEV_TX_OK;
|
||||
}
|
||||
cur_p->phys = cpu_to_be32(skb_dma_addr);
|
||||
ptr_to_txbd((void *)skb, cur_p);
|
||||
|
||||
for (ii = 0; ii < num_frag; ii++) {
|
||||
if (++lp->tx_bd_tail >= lp->tx_bd_num)
|
||||
@ -915,6 +926,11 @@ temac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
|
||||
}
|
||||
cur_p->app0 |= cpu_to_be32(STS_CTRL_APP0_EOP);
|
||||
|
||||
/* Mark last fragment with skb address, so it can be consumed
|
||||
* in temac_start_xmit_done()
|
||||
*/
|
||||
ptr_to_txbd((void *)skb, cur_p);
|
||||
|
||||
tail_p = lp->tx_bd_p + sizeof(*lp->tx_bd_v) * lp->tx_bd_tail;
|
||||
lp->tx_bd_tail++;
|
||||
if (lp->tx_bd_tail >= lp->tx_bd_num)
|
||||
@ -926,6 +942,11 @@ temac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
|
||||
wmb();
|
||||
lp->dma_out(lp, TX_TAILDESC_PTR, tail_p); /* DMA start */
|
||||
|
||||
if (temac_check_tx_bd_space(lp, MAX_SKB_FRAGS + 1)) {
|
||||
netdev_info(ndev, "%s -> netif_stop_queue\n", __func__);
|
||||
netif_stop_queue(ndev);
|
||||
}
|
||||
|
||||
return NETDEV_TX_OK;
|
||||
}
|
||||
|
||||
|
@ -799,6 +799,7 @@ static void mkiss_close(struct tty_struct *tty)
|
||||
ax->tty = NULL;
|
||||
|
||||
unregister_netdev(ax->dev);
|
||||
free_netdev(ax->dev);
|
||||
}
|
||||
|
||||
/* Perform I/O control on an active ax25 channel. */
|
||||
|
@ -49,7 +49,7 @@ static int mhi_ndo_stop(struct net_device *ndev)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int mhi_ndo_xmit(struct sk_buff *skb, struct net_device *ndev)
|
||||
static netdev_tx_t mhi_ndo_xmit(struct sk_buff *skb, struct net_device *ndev)
|
||||
{
|
||||
struct mhi_net_dev *mhi_netdev = netdev_priv(ndev);
|
||||
const struct mhi_net_proto *proto = mhi_netdev->proto;
|
||||
|
@ -826,16 +826,12 @@ static int dp83867_phy_reset(struct phy_device *phydev)
|
||||
{
|
||||
int err;
|
||||
|
||||
err = phy_write(phydev, DP83867_CTRL, DP83867_SW_RESET);
|
||||
err = phy_write(phydev, DP83867_CTRL, DP83867_SW_RESTART);
|
||||
if (err < 0)
|
||||
return err;
|
||||
|
||||
usleep_range(10, 20);
|
||||
|
||||
/* After reset FORCE_LINK_GOOD bit is set. Although the
|
||||
* default value should be unset. Disable FORCE_LINK_GOOD
|
||||
* for the phy to work properly.
|
||||
*/
|
||||
return phy_modify(phydev, MII_DP83867_PHYCTRL,
|
||||
DP83867_PHYCR_FORCE_LINK_GOOD, 0);
|
||||
}
|
||||
|
@ -123,10 +123,10 @@ static struct sk_buff *eem_tx_fixup(struct usbnet *dev, struct sk_buff *skb,
|
||||
}
|
||||
|
||||
skb2 = skb_copy_expand(skb, EEM_HEAD, ETH_FCS_LEN + padlen, flags);
|
||||
dev_kfree_skb_any(skb);
|
||||
if (!skb2)
|
||||
return NULL;
|
||||
|
||||
dev_kfree_skb_any(skb);
|
||||
skb = skb2;
|
||||
|
||||
done:
|
||||
|
@ -1880,7 +1880,7 @@ static void cdc_ncm_status(struct usbnet *dev, struct urb *urb)
|
||||
static const struct driver_info cdc_ncm_info = {
|
||||
.description = "CDC NCM",
|
||||
.flags = FLAG_POINTTOPOINT | FLAG_NO_SETINT | FLAG_MULTI_PACKET
|
||||
| FLAG_LINK_INTR,
|
||||
| FLAG_LINK_INTR | FLAG_ETHER,
|
||||
.bind = cdc_ncm_bind,
|
||||
.unbind = cdc_ncm_unbind,
|
||||
.manage_power = usbnet_manage_power,
|
||||
|
@ -575,7 +575,7 @@ static int qmi_wwan_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
|
||||
|
||||
if (info->flags & QMI_WWAN_FLAG_PASS_THROUGH) {
|
||||
skb->protocol = htons(ETH_P_MAP);
|
||||
return (netif_rx(skb) == NET_RX_SUCCESS);
|
||||
return 1;
|
||||
}
|
||||
|
||||
switch (skb->data[0] & 0xf0) {
|
||||
|
@ -8678,7 +8678,7 @@ static void rtl8152_get_strings(struct net_device *dev, u32 stringset, u8 *data)
|
||||
{
|
||||
switch (stringset) {
|
||||
case ETH_SS_STATS:
|
||||
memcpy(data, *rtl8152_gstrings, sizeof(rtl8152_gstrings));
|
||||
memcpy(data, rtl8152_gstrings, sizeof(rtl8152_gstrings));
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
@ -1483,7 +1483,7 @@ static int smsc75xx_bind(struct usbnet *dev, struct usb_interface *intf)
|
||||
ret = smsc75xx_wait_ready(dev, 0);
|
||||
if (ret < 0) {
|
||||
netdev_warn(dev->net, "device not ready in smsc75xx_bind\n");
|
||||
goto err;
|
||||
goto free_pdata;
|
||||
}
|
||||
|
||||
smsc75xx_init_mac_address(dev);
|
||||
@ -1492,7 +1492,7 @@ static int smsc75xx_bind(struct usbnet *dev, struct usb_interface *intf)
|
||||
ret = smsc75xx_reset(dev);
|
||||
if (ret < 0) {
|
||||
netdev_warn(dev->net, "smsc75xx_reset error %d\n", ret);
|
||||
goto err;
|
||||
goto cancel_work;
|
||||
}
|
||||
|
||||
dev->net->netdev_ops = &smsc75xx_netdev_ops;
|
||||
@ -1503,8 +1503,11 @@ static int smsc75xx_bind(struct usbnet *dev, struct usb_interface *intf)
|
||||
dev->net->max_mtu = MAX_SINGLE_PACKET_SIZE;
|
||||
return 0;
|
||||
|
||||
err:
|
||||
cancel_work:
|
||||
cancel_work_sync(&pdata->set_multicast);
|
||||
free_pdata:
|
||||
kfree(pdata);
|
||||
dev->data[0] = 0;
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -1515,7 +1518,6 @@ static void smsc75xx_unbind(struct usbnet *dev, struct usb_interface *intf)
|
||||
cancel_work_sync(&pdata->set_multicast);
|
||||
netif_dbg(dev, ifdown, dev->net, "free pdata\n");
|
||||
kfree(pdata);
|
||||
pdata = NULL;
|
||||
dev->data[0] = 0;
|
||||
}
|
||||
}
|
||||
|
@ -1183,9 +1183,6 @@ static int vrf_dev_init(struct net_device *dev)
|
||||
|
||||
dev->flags = IFF_MASTER | IFF_NOARP;
|
||||
|
||||
/* MTU is irrelevant for VRF device; set to 64k similar to lo */
|
||||
dev->mtu = 64 * 1024;
|
||||
|
||||
/* similarly, oper state is irrelevant; set to up to avoid confusion */
|
||||
dev->operstate = IF_OPER_UP;
|
||||
netdev_lockdep_set_classes(dev);
|
||||
@ -1685,7 +1682,8 @@ static void vrf_setup(struct net_device *dev)
|
||||
* which breaks networking.
|
||||
*/
|
||||
dev->min_mtu = IPV6_MIN_MTU;
|
||||
dev->max_mtu = ETH_MAX_MTU;
|
||||
dev->max_mtu = IP6_MAX_MTU;
|
||||
dev->mtu = dev->max_mtu;
|
||||
}
|
||||
|
||||
static int vrf_validate(struct nlattr *tb[], struct nlattr *data[],
|
||||
|
@ -1693,8 +1693,13 @@ static int mac80211_hwsim_start(struct ieee80211_hw *hw)
|
||||
static void mac80211_hwsim_stop(struct ieee80211_hw *hw)
|
||||
{
|
||||
struct mac80211_hwsim_data *data = hw->priv;
|
||||
|
||||
data->started = false;
|
||||
hrtimer_cancel(&data->beacon_timer);
|
||||
|
||||
while (!skb_queue_empty(&data->pending))
|
||||
ieee80211_free_txskb(hw, skb_dequeue(&data->pending));
|
||||
|
||||
wiphy_dbg(hw->wiphy, "%s\n", __func__);
|
||||
}
|
||||
|
||||
|
@ -63,7 +63,7 @@ static void enqueue_external_timestamp(struct timestamp_event_queue *queue,
|
||||
spin_unlock_irqrestore(&queue->lock, flags);
|
||||
}
|
||||
|
||||
s32 scaled_ppm_to_ppb(long ppm)
|
||||
long scaled_ppm_to_ppb(long ppm)
|
||||
{
|
||||
/*
|
||||
* The 'freq' field in the 'struct timex' is in parts per
|
||||
@ -80,7 +80,7 @@ s32 scaled_ppm_to_ppb(long ppm)
|
||||
s64 ppb = 1 + ppm;
|
||||
ppb *= 125;
|
||||
ppb >>= 13;
|
||||
return (s32) ppb;
|
||||
return (long) ppb;
|
||||
}
|
||||
EXPORT_SYMBOL(scaled_ppm_to_ppb);
|
||||
|
||||
@ -138,7 +138,7 @@ static int ptp_clock_adjtime(struct posix_clock *pc, struct __kernel_timex *tx)
|
||||
delta = ktime_to_ns(kt);
|
||||
err = ops->adjtime(ops, delta);
|
||||
} else if (tx->modes & ADJ_FREQUENCY) {
|
||||
s32 ppb = scaled_ppm_to_ppb(tx->freq);
|
||||
long ppb = scaled_ppm_to_ppb(tx->freq);
|
||||
if (ppb > ops->max_adj || ppb < -ops->max_adj)
|
||||
return -ERANGE;
|
||||
if (ops->adjfine)
|
||||
|
@ -2284,7 +2284,7 @@ static int rtw_cfg80211_add_monitor_if(struct adapter *padapter, char *name, str
|
||||
mon_wdev->iftype = NL80211_IFTYPE_MONITOR;
|
||||
mon_ndev->ieee80211_ptr = mon_wdev;
|
||||
|
||||
ret = register_netdevice(mon_ndev);
|
||||
ret = cfg80211_register_netdevice(mon_ndev);
|
||||
if (ret) {
|
||||
goto out;
|
||||
}
|
||||
@ -2360,7 +2360,7 @@ static int cfg80211_rtw_del_virtual_intf(struct wiphy *wiphy,
|
||||
adapter = rtw_netdev_priv(ndev);
|
||||
pwdev_priv = adapter_wdev_data(adapter);
|
||||
|
||||
unregister_netdevice(ndev);
|
||||
cfg80211_unregister_netdevice(ndev);
|
||||
|
||||
if (ndev == pwdev_priv->pmon_ndev) {
|
||||
pwdev_priv->pmon_ndev = NULL;
|
||||
|
@ -542,6 +542,10 @@ struct mlx5_core_roce {
|
||||
enum {
|
||||
MLX5_PRIV_FLAGS_DISABLE_IB_ADEV = 1 << 0,
|
||||
MLX5_PRIV_FLAGS_DISABLE_ALL_ADEV = 1 << 1,
|
||||
/* Set during device detach to block any further devices
|
||||
* creation/deletion on drivers rescan. Unset during device attach.
|
||||
*/
|
||||
MLX5_PRIV_FLAGS_DETACH = 1 << 2,
|
||||
};
|
||||
|
||||
struct mlx5_adev {
|
||||
|
@ -85,4 +85,5 @@ mlx5_core_hairpin_create(struct mlx5_core_dev *func_mdev,
|
||||
struct mlx5_hairpin_params *params);
|
||||
|
||||
void mlx5_core_hairpin_destroy(struct mlx5_hairpin *pair);
|
||||
void mlx5_core_hairpin_clear_dead_peer(struct mlx5_hairpin *hp);
|
||||
#endif /* __TRANSOBJ_H__ */
|
||||
|
@ -235,7 +235,7 @@ extern int ptp_clock_index(struct ptp_clock *ptp);
|
||||
* @ppm: Parts per million, but with a 16 bit binary fractional field
|
||||
*/
|
||||
|
||||
extern s32 scaled_ppm_to_ppb(long ppm);
|
||||
extern long scaled_ppm_to_ppb(long ppm);
|
||||
|
||||
/**
|
||||
* ptp_find_pin() - obtain the pin index of a given auxiliary function
|
||||
|
@ -438,6 +438,4 @@ extern int __sys_socketpair(int family, int type, int protocol,
|
||||
int __user *usockvec);
|
||||
extern int __sys_shutdown_sock(struct socket *sock, int how);
|
||||
extern int __sys_shutdown(int fd, int how);
|
||||
|
||||
extern struct ns_common *get_net_ns(struct ns_common *ns);
|
||||
#endif /* _LINUX_SOCKET_H */
|
||||
|
@ -5537,7 +5537,7 @@ void ieee80211_iterate_active_interfaces_atomic(struct ieee80211_hw *hw,
|
||||
*
|
||||
* This function iterates over the interfaces associated with a given
|
||||
* hardware that are currently active and calls the callback for them.
|
||||
* This version can only be used while holding the RTNL.
|
||||
* This version can only be used while holding the wiphy mutex.
|
||||
*
|
||||
* @hw: the hardware struct of which the interfaces should be iterated over
|
||||
* @iter_flags: iteration flags, see &enum ieee80211_interface_iteration_flags
|
||||
@ -6392,7 +6392,12 @@ bool ieee80211_tx_prepare_skb(struct ieee80211_hw *hw,
|
||||
|
||||
/**
|
||||
* ieee80211_parse_tx_radiotap - Sanity-check and parse the radiotap header
|
||||
* of injected frames
|
||||
* of injected frames.
|
||||
*
|
||||
* To accurately parse and take into account rate and retransmission fields,
|
||||
* you must initialize the chandef field in the ieee80211_tx_info structure
|
||||
* of the skb before calling this function.
|
||||
*
|
||||
* @skb: packet injected by userspace
|
||||
* @dev: the &struct device of this 802.11 device
|
||||
*/
|
||||
|
@ -184,6 +184,9 @@ struct net *copy_net_ns(unsigned long flags, struct user_namespace *user_ns,
|
||||
void net_ns_get_ownership(const struct net *net, kuid_t *uid, kgid_t *gid);
|
||||
|
||||
void net_ns_barrier(void);
|
||||
|
||||
struct ns_common *get_net_ns(struct ns_common *ns);
|
||||
struct net *get_net_ns_by_fd(int fd);
|
||||
#else /* CONFIG_NET_NS */
|
||||
#include <linux/sched.h>
|
||||
#include <linux/nsproxy.h>
|
||||
@ -203,13 +206,22 @@ static inline void net_ns_get_ownership(const struct net *net,
|
||||
}
|
||||
|
||||
static inline void net_ns_barrier(void) {}
|
||||
|
||||
static inline struct ns_common *get_net_ns(struct ns_common *ns)
|
||||
{
|
||||
return ERR_PTR(-EINVAL);
|
||||
}
|
||||
|
||||
static inline struct net *get_net_ns_by_fd(int fd)
|
||||
{
|
||||
return ERR_PTR(-EINVAL);
|
||||
}
|
||||
#endif /* CONFIG_NET_NS */
|
||||
|
||||
|
||||
extern struct list_head net_namespace_list;
|
||||
|
||||
struct net *get_net_ns_by_pid(pid_t pid);
|
||||
struct net *get_net_ns_by_fd(int fd);
|
||||
|
||||
#ifdef CONFIG_SYSCTL
|
||||
void ipx_register_sysctl(void);
|
||||
|
@ -1934,7 +1934,8 @@ static inline u32 net_tx_rndhash(void)
|
||||
|
||||
static inline void sk_set_txhash(struct sock *sk)
|
||||
{
|
||||
sk->sk_txhash = net_tx_rndhash();
|
||||
/* This pairs with READ_ONCE() in skb_set_hash_from_sk() */
|
||||
WRITE_ONCE(sk->sk_txhash, net_tx_rndhash());
|
||||
}
|
||||
|
||||
static inline bool sk_rethink_txhash(struct sock *sk)
|
||||
@ -2206,9 +2207,12 @@ static inline void sock_poll_wait(struct file *filp, struct socket *sock,
|
||||
|
||||
static inline void skb_set_hash_from_sk(struct sk_buff *skb, struct sock *sk)
|
||||
{
|
||||
if (sk->sk_txhash) {
|
||||
/* This pairs with WRITE_ONCE() in sk_set_txhash() */
|
||||
u32 txhash = READ_ONCE(sk->sk_txhash);
|
||||
|
||||
if (txhash) {
|
||||
skb->l4_hash = 1;
|
||||
skb->hash = sk->sk_txhash;
|
||||
skb->hash = txhash;
|
||||
}
|
||||
}
|
||||
|
||||
@ -2266,8 +2270,13 @@ struct sk_buff *sock_dequeue_err_skb(struct sock *sk);
|
||||
static inline int sock_error(struct sock *sk)
|
||||
{
|
||||
int err;
|
||||
if (likely(!sk->sk_err))
|
||||
|
||||
/* Avoid an atomic operation for the common case.
|
||||
* This is racy since another cpu/thread can change sk_err under us.
|
||||
*/
|
||||
if (likely(data_race(!sk->sk_err)))
|
||||
return 0;
|
||||
|
||||
err = xchg(&sk->sk_err, 0);
|
||||
return -err;
|
||||
}
|
||||
|
@ -289,6 +289,9 @@ struct sockaddr_in {
|
||||
/* Address indicating an error return. */
|
||||
#define INADDR_NONE ((unsigned long int) 0xffffffff)
|
||||
|
||||
/* Dummy address for src of ICMP replies if no real address is set (RFC7600). */
|
||||
#define INADDR_DUMMY ((unsigned long int) 0xc0000008)
|
||||
|
||||
/* Network number for local host loopback. */
|
||||
#define IN_LOOPBACKNET 127
|
||||
|
||||
|
@ -6483,6 +6483,27 @@ struct bpf_sanitize_info {
|
||||
bool mask_to_left;
|
||||
};
|
||||
|
||||
static struct bpf_verifier_state *
|
||||
sanitize_speculative_path(struct bpf_verifier_env *env,
|
||||
const struct bpf_insn *insn,
|
||||
u32 next_idx, u32 curr_idx)
|
||||
{
|
||||
struct bpf_verifier_state *branch;
|
||||
struct bpf_reg_state *regs;
|
||||
|
||||
branch = push_stack(env, next_idx, curr_idx, true);
|
||||
if (branch && insn) {
|
||||
regs = branch->frame[branch->curframe]->regs;
|
||||
if (BPF_SRC(insn->code) == BPF_K) {
|
||||
mark_reg_unknown(env, regs, insn->dst_reg);
|
||||
} else if (BPF_SRC(insn->code) == BPF_X) {
|
||||
mark_reg_unknown(env, regs, insn->dst_reg);
|
||||
mark_reg_unknown(env, regs, insn->src_reg);
|
||||
}
|
||||
}
|
||||
return branch;
|
||||
}
|
||||
|
||||
static int sanitize_ptr_alu(struct bpf_verifier_env *env,
|
||||
struct bpf_insn *insn,
|
||||
const struct bpf_reg_state *ptr_reg,
|
||||
@ -6566,12 +6587,26 @@ do_sim:
|
||||
tmp = *dst_reg;
|
||||
*dst_reg = *ptr_reg;
|
||||
}
|
||||
ret = push_stack(env, env->insn_idx + 1, env->insn_idx, true);
|
||||
ret = sanitize_speculative_path(env, NULL, env->insn_idx + 1,
|
||||
env->insn_idx);
|
||||
if (!ptr_is_dst_reg && ret)
|
||||
*dst_reg = tmp;
|
||||
return !ret ? REASON_STACK : 0;
|
||||
}
|
||||
|
||||
static void sanitize_mark_insn_seen(struct bpf_verifier_env *env)
|
||||
{
|
||||
struct bpf_verifier_state *vstate = env->cur_state;
|
||||
|
||||
/* If we simulate paths under speculation, we don't update the
|
||||
* insn as 'seen' such that when we verify unreachable paths in
|
||||
* the non-speculative domain, sanitize_dead_code() can still
|
||||
* rewrite/sanitize them.
|
||||
*/
|
||||
if (!vstate->speculative)
|
||||
env->insn_aux_data[env->insn_idx].seen = env->pass_cnt;
|
||||
}
|
||||
|
||||
static int sanitize_err(struct bpf_verifier_env *env,
|
||||
const struct bpf_insn *insn, int reason,
|
||||
const struct bpf_reg_state *off_reg,
|
||||
@ -8750,14 +8785,28 @@ static int check_cond_jmp_op(struct bpf_verifier_env *env,
|
||||
if (err)
|
||||
return err;
|
||||
}
|
||||
|
||||
if (pred == 1) {
|
||||
/* only follow the goto, ignore fall-through */
|
||||
/* Only follow the goto, ignore fall-through. If needed, push
|
||||
* the fall-through branch for simulation under speculative
|
||||
* execution.
|
||||
*/
|
||||
if (!env->bypass_spec_v1 &&
|
||||
!sanitize_speculative_path(env, insn, *insn_idx + 1,
|
||||
*insn_idx))
|
||||
return -EFAULT;
|
||||
*insn_idx += insn->off;
|
||||
return 0;
|
||||
} else if (pred == 0) {
|
||||
/* only follow fall-through branch, since
|
||||
* that's where the program will go
|
||||
/* Only follow the fall-through branch, since that's where the
|
||||
* program will go. If needed, push the goto branch for
|
||||
* simulation under speculative execution.
|
||||
*/
|
||||
if (!env->bypass_spec_v1 &&
|
||||
!sanitize_speculative_path(env, insn,
|
||||
*insn_idx + insn->off + 1,
|
||||
*insn_idx))
|
||||
return -EFAULT;
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -10630,7 +10679,7 @@ static int do_check(struct bpf_verifier_env *env)
|
||||
}
|
||||
|
||||
regs = cur_regs(env);
|
||||
env->insn_aux_data[env->insn_idx].seen = env->pass_cnt;
|
||||
sanitize_mark_insn_seen(env);
|
||||
prev_insn_idx = env->insn_idx;
|
||||
|
||||
if (class == BPF_ALU || class == BPF_ALU64) {
|
||||
@ -10857,7 +10906,7 @@ process_bpf_exit:
|
||||
return err;
|
||||
|
||||
env->insn_idx++;
|
||||
env->insn_aux_data[env->insn_idx].seen = env->pass_cnt;
|
||||
sanitize_mark_insn_seen(env);
|
||||
} else {
|
||||
verbose(env, "invalid BPF_LD mode\n");
|
||||
return -EINVAL;
|
||||
@ -11366,6 +11415,7 @@ static int adjust_insn_aux_data(struct bpf_verifier_env *env,
|
||||
{
|
||||
struct bpf_insn_aux_data *new_data, *old_data = env->insn_aux_data;
|
||||
struct bpf_insn *insn = new_prog->insnsi;
|
||||
u32 old_seen = old_data[off].seen;
|
||||
u32 prog_len;
|
||||
int i;
|
||||
|
||||
@ -11386,7 +11436,8 @@ static int adjust_insn_aux_data(struct bpf_verifier_env *env,
|
||||
memcpy(new_data + off + cnt - 1, old_data + off,
|
||||
sizeof(struct bpf_insn_aux_data) * (prog_len - off - cnt + 1));
|
||||
for (i = off; i < off + cnt - 1; i++) {
|
||||
new_data[i].seen = env->pass_cnt;
|
||||
/* Expand insni[off]'s seen count to the patched range. */
|
||||
new_data[i].seen = old_seen;
|
||||
new_data[i].zext_dst = insn_has_def32(env, insn + i);
|
||||
}
|
||||
env->insn_aux_data = new_data;
|
||||
@ -12710,6 +12761,9 @@ static void free_states(struct bpf_verifier_env *env)
|
||||
* insn_aux_data was touched. These variables are compared to clear temporary
|
||||
* data from failed pass. For testing and experiments do_check_common() can be
|
||||
* run multiple times even when prior attempt to verify is unsuccessful.
|
||||
*
|
||||
* Note that special handling is needed on !env->bypass_spec_v1 if this is
|
||||
* ever called outside of error path with subsequent program rejection.
|
||||
*/
|
||||
static void sanitize_insn_aux_data(struct bpf_verifier_env *env)
|
||||
{
|
||||
|
@ -768,7 +768,7 @@ static int aarp_rcv(struct sk_buff *skb, struct net_device *dev,
|
||||
if (a && a->status & ATIF_PROBE) {
|
||||
a->status |= ATIF_PROBE_FAIL;
|
||||
/*
|
||||
* we do not respond to probe or request packets for
|
||||
* we do not respond to probe or request packets of
|
||||
* this address while we are probing this address
|
||||
*/
|
||||
goto unlock;
|
||||
|
@ -409,8 +409,10 @@ static void batadv_iv_ogm_emit(struct batadv_forw_packet *forw_packet)
|
||||
if (WARN_ON(!forw_packet->if_outgoing))
|
||||
return;
|
||||
|
||||
if (WARN_ON(forw_packet->if_outgoing->soft_iface != soft_iface))
|
||||
if (forw_packet->if_outgoing->soft_iface != soft_iface) {
|
||||
pr_warn("%s: soft interface switch for queued OGM\n", __func__);
|
||||
return;
|
||||
}
|
||||
|
||||
if (forw_packet->if_incoming->if_status != BATADV_IF_ACTIVE)
|
||||
return;
|
||||
|
@ -3229,7 +3229,7 @@ static inline struct l2cap_chan *smp_new_conn_cb(struct l2cap_chan *pchan)
|
||||
{
|
||||
struct l2cap_chan *chan;
|
||||
|
||||
bt_dev_dbg(pchan->conn->hcon->hdev, "pchan %p", pchan);
|
||||
BT_DBG("pchan %p", pchan);
|
||||
|
||||
chan = l2cap_chan_create();
|
||||
if (!chan)
|
||||
@ -3250,7 +3250,7 @@ static inline struct l2cap_chan *smp_new_conn_cb(struct l2cap_chan *pchan)
|
||||
*/
|
||||
atomic_set(&chan->nesting, L2CAP_NESTING_SMP);
|
||||
|
||||
bt_dev_dbg(pchan->conn->hcon->hdev, "created chan %p", chan);
|
||||
BT_DBG("created chan %p", chan);
|
||||
|
||||
return chan;
|
||||
}
|
||||
@ -3354,7 +3354,7 @@ static void smp_del_chan(struct l2cap_chan *chan)
|
||||
{
|
||||
struct smp_dev *smp;
|
||||
|
||||
bt_dev_dbg(chan->conn->hcon->hdev, "chan %p", chan);
|
||||
BT_DBG("chan %p", chan);
|
||||
|
||||
smp = chan->data;
|
||||
if (smp) {
|
||||
|
@ -90,8 +90,8 @@ struct bridge_mcast_stats {
|
||||
#endif
|
||||
|
||||
struct br_tunnel_info {
|
||||
__be64 tunnel_id;
|
||||
struct metadata_dst *tunnel_dst;
|
||||
__be64 tunnel_id;
|
||||
struct metadata_dst __rcu *tunnel_dst;
|
||||
};
|
||||
|
||||
/* private vlan flags */
|
||||
|
@ -41,26 +41,33 @@ static struct net_bridge_vlan *br_vlan_tunnel_lookup(struct rhashtable *tbl,
|
||||
br_vlan_tunnel_rht_params);
|
||||
}
|
||||
|
||||
static void vlan_tunnel_info_release(struct net_bridge_vlan *vlan)
|
||||
{
|
||||
struct metadata_dst *tdst = rtnl_dereference(vlan->tinfo.tunnel_dst);
|
||||
|
||||
WRITE_ONCE(vlan->tinfo.tunnel_id, 0);
|
||||
RCU_INIT_POINTER(vlan->tinfo.tunnel_dst, NULL);
|
||||
dst_release(&tdst->dst);
|
||||
}
|
||||
|
||||
void vlan_tunnel_info_del(struct net_bridge_vlan_group *vg,
|
||||
struct net_bridge_vlan *vlan)
|
||||
{
|
||||
if (!vlan->tinfo.tunnel_dst)
|
||||
if (!rcu_access_pointer(vlan->tinfo.tunnel_dst))
|
||||
return;
|
||||
rhashtable_remove_fast(&vg->tunnel_hash, &vlan->tnode,
|
||||
br_vlan_tunnel_rht_params);
|
||||
vlan->tinfo.tunnel_id = 0;
|
||||
dst_release(&vlan->tinfo.tunnel_dst->dst);
|
||||
vlan->tinfo.tunnel_dst = NULL;
|
||||
vlan_tunnel_info_release(vlan);
|
||||
}
|
||||
|
||||
static int __vlan_tunnel_info_add(struct net_bridge_vlan_group *vg,
|
||||
struct net_bridge_vlan *vlan, u32 tun_id)
|
||||
{
|
||||
struct metadata_dst *metadata = NULL;
|
||||
struct metadata_dst *metadata = rtnl_dereference(vlan->tinfo.tunnel_dst);
|
||||
__be64 key = key32_to_tunnel_id(cpu_to_be32(tun_id));
|
||||
int err;
|
||||
|
||||
if (vlan->tinfo.tunnel_dst)
|
||||
if (metadata)
|
||||
return -EEXIST;
|
||||
|
||||
metadata = __ip_tun_set_dst(0, 0, 0, 0, 0, TUNNEL_KEY,
|
||||
@ -69,8 +76,8 @@ static int __vlan_tunnel_info_add(struct net_bridge_vlan_group *vg,
|
||||
return -EINVAL;
|
||||
|
||||
metadata->u.tun_info.mode |= IP_TUNNEL_INFO_TX | IP_TUNNEL_INFO_BRIDGE;
|
||||
vlan->tinfo.tunnel_dst = metadata;
|
||||
vlan->tinfo.tunnel_id = key;
|
||||
rcu_assign_pointer(vlan->tinfo.tunnel_dst, metadata);
|
||||
WRITE_ONCE(vlan->tinfo.tunnel_id, key);
|
||||
|
||||
err = rhashtable_lookup_insert_fast(&vg->tunnel_hash, &vlan->tnode,
|
||||
br_vlan_tunnel_rht_params);
|
||||
@ -79,9 +86,7 @@ static int __vlan_tunnel_info_add(struct net_bridge_vlan_group *vg,
|
||||
|
||||
return 0;
|
||||
out:
|
||||
dst_release(&vlan->tinfo.tunnel_dst->dst);
|
||||
vlan->tinfo.tunnel_dst = NULL;
|
||||
vlan->tinfo.tunnel_id = 0;
|
||||
vlan_tunnel_info_release(vlan);
|
||||
|
||||
return err;
|
||||
}
|
||||
@ -182,12 +187,15 @@ int br_handle_ingress_vlan_tunnel(struct sk_buff *skb,
|
||||
int br_handle_egress_vlan_tunnel(struct sk_buff *skb,
|
||||
struct net_bridge_vlan *vlan)
|
||||
{
|
||||
struct metadata_dst *tunnel_dst;
|
||||
__be64 tunnel_id;
|
||||
int err;
|
||||
|
||||
if (!vlan || !vlan->tinfo.tunnel_id)
|
||||
if (!vlan)
|
||||
return 0;
|
||||
|
||||
if (unlikely(!skb_vlan_tag_present(skb)))
|
||||
tunnel_id = READ_ONCE(vlan->tinfo.tunnel_id);
|
||||
if (!tunnel_id || unlikely(!skb_vlan_tag_present(skb)))
|
||||
return 0;
|
||||
|
||||
skb_dst_drop(skb);
|
||||
@ -195,7 +203,9 @@ int br_handle_egress_vlan_tunnel(struct sk_buff *skb,
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
skb_dst_set(skb, dst_clone(&vlan->tinfo.tunnel_dst->dst));
|
||||
tunnel_dst = rcu_dereference(vlan->tinfo.tunnel_dst);
|
||||
if (tunnel_dst && dst_hold_safe(&tunnel_dst->dst))
|
||||
skb_dst_set(skb, &tunnel_dst->dst);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -125,7 +125,7 @@ struct bcm_sock {
|
||||
struct sock sk;
|
||||
int bound;
|
||||
int ifindex;
|
||||
struct notifier_block notifier;
|
||||
struct list_head notifier;
|
||||
struct list_head rx_ops;
|
||||
struct list_head tx_ops;
|
||||
unsigned long dropped_usr_msgs;
|
||||
@ -133,6 +133,10 @@ struct bcm_sock {
|
||||
char procname [32]; /* inode number in decimal with \0 */
|
||||
};
|
||||
|
||||
static LIST_HEAD(bcm_notifier_list);
|
||||
static DEFINE_SPINLOCK(bcm_notifier_lock);
|
||||
static struct bcm_sock *bcm_busy_notifier;
|
||||
|
||||
static inline struct bcm_sock *bcm_sk(const struct sock *sk)
|
||||
{
|
||||
return (struct bcm_sock *)sk;
|
||||
@ -402,6 +406,7 @@ static enum hrtimer_restart bcm_tx_timeout_handler(struct hrtimer *hrtimer)
|
||||
if (!op->count && (op->flags & TX_COUNTEVT)) {
|
||||
|
||||
/* create notification to user */
|
||||
memset(&msg_head, 0, sizeof(msg_head));
|
||||
msg_head.opcode = TX_EXPIRED;
|
||||
msg_head.flags = op->flags;
|
||||
msg_head.count = op->count;
|
||||
@ -439,6 +444,7 @@ static void bcm_rx_changed(struct bcm_op *op, struct canfd_frame *data)
|
||||
/* this element is not throttled anymore */
|
||||
data->flags &= (BCM_CAN_FLAGS_MASK|RX_RECV);
|
||||
|
||||
memset(&head, 0, sizeof(head));
|
||||
head.opcode = RX_CHANGED;
|
||||
head.flags = op->flags;
|
||||
head.count = op->count;
|
||||
@ -560,6 +566,7 @@ static enum hrtimer_restart bcm_rx_timeout_handler(struct hrtimer *hrtimer)
|
||||
}
|
||||
|
||||
/* create notification to user */
|
||||
memset(&msg_head, 0, sizeof(msg_head));
|
||||
msg_head.opcode = RX_TIMEOUT;
|
||||
msg_head.flags = op->flags;
|
||||
msg_head.count = op->count;
|
||||
@ -1378,20 +1385,15 @@ static int bcm_sendmsg(struct socket *sock, struct msghdr *msg, size_t size)
|
||||
/*
|
||||
* notification handler for netdevice status changes
|
||||
*/
|
||||
static int bcm_notifier(struct notifier_block *nb, unsigned long msg,
|
||||
void *ptr)
|
||||
static void bcm_notify(struct bcm_sock *bo, unsigned long msg,
|
||||
struct net_device *dev)
|
||||
{
|
||||
struct net_device *dev = netdev_notifier_info_to_dev(ptr);
|
||||
struct bcm_sock *bo = container_of(nb, struct bcm_sock, notifier);
|
||||
struct sock *sk = &bo->sk;
|
||||
struct bcm_op *op;
|
||||
int notify_enodev = 0;
|
||||
|
||||
if (!net_eq(dev_net(dev), sock_net(sk)))
|
||||
return NOTIFY_DONE;
|
||||
|
||||
if (dev->type != ARPHRD_CAN)
|
||||
return NOTIFY_DONE;
|
||||
return;
|
||||
|
||||
switch (msg) {
|
||||
|
||||
@ -1426,7 +1428,28 @@ static int bcm_notifier(struct notifier_block *nb, unsigned long msg,
|
||||
sk->sk_error_report(sk);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static int bcm_notifier(struct notifier_block *nb, unsigned long msg,
|
||||
void *ptr)
|
||||
{
|
||||
struct net_device *dev = netdev_notifier_info_to_dev(ptr);
|
||||
|
||||
if (dev->type != ARPHRD_CAN)
|
||||
return NOTIFY_DONE;
|
||||
if (msg != NETDEV_UNREGISTER && msg != NETDEV_DOWN)
|
||||
return NOTIFY_DONE;
|
||||
if (unlikely(bcm_busy_notifier)) /* Check for reentrant bug. */
|
||||
return NOTIFY_DONE;
|
||||
|
||||
spin_lock(&bcm_notifier_lock);
|
||||
list_for_each_entry(bcm_busy_notifier, &bcm_notifier_list, notifier) {
|
||||
spin_unlock(&bcm_notifier_lock);
|
||||
bcm_notify(bcm_busy_notifier, msg, dev);
|
||||
spin_lock(&bcm_notifier_lock);
|
||||
}
|
||||
bcm_busy_notifier = NULL;
|
||||
spin_unlock(&bcm_notifier_lock);
|
||||
return NOTIFY_DONE;
|
||||
}
|
||||
|
||||
@ -1446,9 +1469,9 @@ static int bcm_init(struct sock *sk)
|
||||
INIT_LIST_HEAD(&bo->rx_ops);
|
||||
|
||||
/* set notifier */
|
||||
bo->notifier.notifier_call = bcm_notifier;
|
||||
|
||||
register_netdevice_notifier(&bo->notifier);
|
||||
spin_lock(&bcm_notifier_lock);
|
||||
list_add_tail(&bo->notifier, &bcm_notifier_list);
|
||||
spin_unlock(&bcm_notifier_lock);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -1471,7 +1494,14 @@ static int bcm_release(struct socket *sock)
|
||||
|
||||
/* remove bcm_ops, timer, rx_unregister(), etc. */
|
||||
|
||||
unregister_netdevice_notifier(&bo->notifier);
|
||||
spin_lock(&bcm_notifier_lock);
|
||||
while (bcm_busy_notifier == bo) {
|
||||
spin_unlock(&bcm_notifier_lock);
|
||||
schedule_timeout_uninterruptible(1);
|
||||
spin_lock(&bcm_notifier_lock);
|
||||
}
|
||||
list_del(&bo->notifier);
|
||||
spin_unlock(&bcm_notifier_lock);
|
||||
|
||||
lock_sock(sk);
|
||||
|
||||
@ -1692,6 +1722,10 @@ static struct pernet_operations canbcm_pernet_ops __read_mostly = {
|
||||
.exit = canbcm_pernet_exit,
|
||||
};
|
||||
|
||||
static struct notifier_block canbcm_notifier = {
|
||||
.notifier_call = bcm_notifier
|
||||
};
|
||||
|
||||
static int __init bcm_module_init(void)
|
||||
{
|
||||
int err;
|
||||
@ -1705,12 +1739,14 @@ static int __init bcm_module_init(void)
|
||||
}
|
||||
|
||||
register_pernet_subsys(&canbcm_pernet_ops);
|
||||
register_netdevice_notifier(&canbcm_notifier);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void __exit bcm_module_exit(void)
|
||||
{
|
||||
can_proto_unregister(&bcm_can_proto);
|
||||
unregister_netdevice_notifier(&canbcm_notifier);
|
||||
unregister_pernet_subsys(&canbcm_pernet_ops);
|
||||
}
|
||||
|
||||
|
@ -143,10 +143,14 @@ struct isotp_sock {
|
||||
u32 force_tx_stmin;
|
||||
u32 force_rx_stmin;
|
||||
struct tpcon rx, tx;
|
||||
struct notifier_block notifier;
|
||||
struct list_head notifier;
|
||||
wait_queue_head_t wait;
|
||||
};
|
||||
|
||||
static LIST_HEAD(isotp_notifier_list);
|
||||
static DEFINE_SPINLOCK(isotp_notifier_lock);
|
||||
static struct isotp_sock *isotp_busy_notifier;
|
||||
|
||||
static inline struct isotp_sock *isotp_sk(const struct sock *sk)
|
||||
{
|
||||
return (struct isotp_sock *)sk;
|
||||
@ -1013,7 +1017,14 @@ static int isotp_release(struct socket *sock)
|
||||
/* wait for complete transmission of current pdu */
|
||||
wait_event_interruptible(so->wait, so->tx.state == ISOTP_IDLE);
|
||||
|
||||
unregister_netdevice_notifier(&so->notifier);
|
||||
spin_lock(&isotp_notifier_lock);
|
||||
while (isotp_busy_notifier == so) {
|
||||
spin_unlock(&isotp_notifier_lock);
|
||||
schedule_timeout_uninterruptible(1);
|
||||
spin_lock(&isotp_notifier_lock);
|
||||
}
|
||||
list_del(&so->notifier);
|
||||
spin_unlock(&isotp_notifier_lock);
|
||||
|
||||
lock_sock(sk);
|
||||
|
||||
@ -1317,21 +1328,16 @@ static int isotp_getsockopt(struct socket *sock, int level, int optname,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int isotp_notifier(struct notifier_block *nb, unsigned long msg,
|
||||
void *ptr)
|
||||
static void isotp_notify(struct isotp_sock *so, unsigned long msg,
|
||||
struct net_device *dev)
|
||||
{
|
||||
struct net_device *dev = netdev_notifier_info_to_dev(ptr);
|
||||
struct isotp_sock *so = container_of(nb, struct isotp_sock, notifier);
|
||||
struct sock *sk = &so->sk;
|
||||
|
||||
if (!net_eq(dev_net(dev), sock_net(sk)))
|
||||
return NOTIFY_DONE;
|
||||
|
||||
if (dev->type != ARPHRD_CAN)
|
||||
return NOTIFY_DONE;
|
||||
return;
|
||||
|
||||
if (so->ifindex != dev->ifindex)
|
||||
return NOTIFY_DONE;
|
||||
return;
|
||||
|
||||
switch (msg) {
|
||||
case NETDEV_UNREGISTER:
|
||||
@ -1357,7 +1363,28 @@ static int isotp_notifier(struct notifier_block *nb, unsigned long msg,
|
||||
sk->sk_error_report(sk);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
static int isotp_notifier(struct notifier_block *nb, unsigned long msg,
|
||||
void *ptr)
|
||||
{
|
||||
struct net_device *dev = netdev_notifier_info_to_dev(ptr);
|
||||
|
||||
if (dev->type != ARPHRD_CAN)
|
||||
return NOTIFY_DONE;
|
||||
if (msg != NETDEV_UNREGISTER && msg != NETDEV_DOWN)
|
||||
return NOTIFY_DONE;
|
||||
if (unlikely(isotp_busy_notifier)) /* Check for reentrant bug. */
|
||||
return NOTIFY_DONE;
|
||||
|
||||
spin_lock(&isotp_notifier_lock);
|
||||
list_for_each_entry(isotp_busy_notifier, &isotp_notifier_list, notifier) {
|
||||
spin_unlock(&isotp_notifier_lock);
|
||||
isotp_notify(isotp_busy_notifier, msg, dev);
|
||||
spin_lock(&isotp_notifier_lock);
|
||||
}
|
||||
isotp_busy_notifier = NULL;
|
||||
spin_unlock(&isotp_notifier_lock);
|
||||
return NOTIFY_DONE;
|
||||
}
|
||||
|
||||
@ -1394,8 +1421,9 @@ static int isotp_init(struct sock *sk)
|
||||
|
||||
init_waitqueue_head(&so->wait);
|
||||
|
||||
so->notifier.notifier_call = isotp_notifier;
|
||||
register_netdevice_notifier(&so->notifier);
|
||||
spin_lock(&isotp_notifier_lock);
|
||||
list_add_tail(&so->notifier, &isotp_notifier_list);
|
||||
spin_unlock(&isotp_notifier_lock);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -1442,6 +1470,10 @@ static const struct can_proto isotp_can_proto = {
|
||||
.prot = &isotp_proto,
|
||||
};
|
||||
|
||||
static struct notifier_block canisotp_notifier = {
|
||||
.notifier_call = isotp_notifier
|
||||
};
|
||||
|
||||
static __init int isotp_module_init(void)
|
||||
{
|
||||
int err;
|
||||
@ -1451,6 +1483,8 @@ static __init int isotp_module_init(void)
|
||||
err = can_proto_register(&isotp_can_proto);
|
||||
if (err < 0)
|
||||
pr_err("can: registration of isotp protocol failed\n");
|
||||
else
|
||||
register_netdevice_notifier(&canisotp_notifier);
|
||||
|
||||
return err;
|
||||
}
|
||||
@ -1458,6 +1492,7 @@ static __init int isotp_module_init(void)
|
||||
static __exit void isotp_module_exit(void)
|
||||
{
|
||||
can_proto_unregister(&isotp_can_proto);
|
||||
unregister_netdevice_notifier(&canisotp_notifier);
|
||||
}
|
||||
|
||||
module_init(isotp_module_init);
|
||||
|
@ -330,6 +330,9 @@ static void j1939_session_skb_drop_old(struct j1939_session *session)
|
||||
|
||||
if ((do_skcb->offset + do_skb->len) < offset_start) {
|
||||
__skb_unlink(do_skb, &session->skb_queue);
|
||||
/* drop ref taken in j1939_session_skb_queue() */
|
||||
skb_unref(do_skb);
|
||||
|
||||
kfree_skb(do_skb);
|
||||
}
|
||||
spin_unlock_irqrestore(&session->skb_queue.lock, flags);
|
||||
@ -349,12 +352,13 @@ void j1939_session_skb_queue(struct j1939_session *session,
|
||||
|
||||
skcb->flags |= J1939_ECU_LOCAL_SRC;
|
||||
|
||||
skb_get(skb);
|
||||
skb_queue_tail(&session->skb_queue, skb);
|
||||
}
|
||||
|
||||
static struct
|
||||
sk_buff *j1939_session_skb_find_by_offset(struct j1939_session *session,
|
||||
unsigned int offset_start)
|
||||
sk_buff *j1939_session_skb_get_by_offset(struct j1939_session *session,
|
||||
unsigned int offset_start)
|
||||
{
|
||||
struct j1939_priv *priv = session->priv;
|
||||
struct j1939_sk_buff_cb *do_skcb;
|
||||
@ -371,6 +375,10 @@ sk_buff *j1939_session_skb_find_by_offset(struct j1939_session *session,
|
||||
skb = do_skb;
|
||||
}
|
||||
}
|
||||
|
||||
if (skb)
|
||||
skb_get(skb);
|
||||
|
||||
spin_unlock_irqrestore(&session->skb_queue.lock, flags);
|
||||
|
||||
if (!skb)
|
||||
@ -381,12 +389,12 @@ sk_buff *j1939_session_skb_find_by_offset(struct j1939_session *session,
|
||||
return skb;
|
||||
}
|
||||
|
||||
static struct sk_buff *j1939_session_skb_find(struct j1939_session *session)
|
||||
static struct sk_buff *j1939_session_skb_get(struct j1939_session *session)
|
||||
{
|
||||
unsigned int offset_start;
|
||||
|
||||
offset_start = session->pkt.dpo * 7;
|
||||
return j1939_session_skb_find_by_offset(session, offset_start);
|
||||
return j1939_session_skb_get_by_offset(session, offset_start);
|
||||
}
|
||||
|
||||
/* see if we are receiver
|
||||
@ -776,7 +784,7 @@ static int j1939_session_tx_dat(struct j1939_session *session)
|
||||
int ret = 0;
|
||||
u8 dat[8];
|
||||
|
||||
se_skb = j1939_session_skb_find_by_offset(session, session->pkt.tx * 7);
|
||||
se_skb = j1939_session_skb_get_by_offset(session, session->pkt.tx * 7);
|
||||
if (!se_skb)
|
||||
return -ENOBUFS;
|
||||
|
||||
@ -801,7 +809,8 @@ static int j1939_session_tx_dat(struct j1939_session *session)
|
||||
netdev_err_once(priv->ndev,
|
||||
"%s: 0x%p: requested data outside of queued buffer: offset %i, len %i, pkt.tx: %i\n",
|
||||
__func__, session, skcb->offset, se_skb->len , session->pkt.tx);
|
||||
return -EOVERFLOW;
|
||||
ret = -EOVERFLOW;
|
||||
goto out_free;
|
||||
}
|
||||
|
||||
if (!len) {
|
||||
@ -835,6 +844,12 @@ static int j1939_session_tx_dat(struct j1939_session *session)
|
||||
if (pkt_done)
|
||||
j1939_tp_set_rxtimeout(session, 250);
|
||||
|
||||
out_free:
|
||||
if (ret)
|
||||
kfree_skb(se_skb);
|
||||
else
|
||||
consume_skb(se_skb);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -1007,7 +1022,7 @@ static int j1939_xtp_txnext_receiver(struct j1939_session *session)
|
||||
static int j1939_simple_txnext(struct j1939_session *session)
|
||||
{
|
||||
struct j1939_priv *priv = session->priv;
|
||||
struct sk_buff *se_skb = j1939_session_skb_find(session);
|
||||
struct sk_buff *se_skb = j1939_session_skb_get(session);
|
||||
struct sk_buff *skb;
|
||||
int ret;
|
||||
|
||||
@ -1015,8 +1030,10 @@ static int j1939_simple_txnext(struct j1939_session *session)
|
||||
return 0;
|
||||
|
||||
skb = skb_clone(se_skb, GFP_ATOMIC);
|
||||
if (!skb)
|
||||
return -ENOMEM;
|
||||
if (!skb) {
|
||||
ret = -ENOMEM;
|
||||
goto out_free;
|
||||
}
|
||||
|
||||
can_skb_set_owner(skb, se_skb->sk);
|
||||
|
||||
@ -1024,12 +1041,18 @@ static int j1939_simple_txnext(struct j1939_session *session)
|
||||
|
||||
ret = j1939_send_one(priv, skb);
|
||||
if (ret)
|
||||
return ret;
|
||||
goto out_free;
|
||||
|
||||
j1939_sk_errqueue(session, J1939_ERRQUEUE_SCHED);
|
||||
j1939_sk_queue_activate_next(session);
|
||||
|
||||
return 0;
|
||||
out_free:
|
||||
if (ret)
|
||||
kfree_skb(se_skb);
|
||||
else
|
||||
consume_skb(se_skb);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static bool j1939_session_deactivate_locked(struct j1939_session *session)
|
||||
@ -1170,9 +1193,10 @@ static void j1939_session_completed(struct j1939_session *session)
|
||||
struct sk_buff *skb;
|
||||
|
||||
if (!session->transmission) {
|
||||
skb = j1939_session_skb_find(session);
|
||||
skb = j1939_session_skb_get(session);
|
||||
/* distribute among j1939 receivers */
|
||||
j1939_sk_recv(session->priv, skb);
|
||||
consume_skb(skb);
|
||||
}
|
||||
|
||||
j1939_session_deactivate_activate_next(session);
|
||||
@ -1744,7 +1768,7 @@ static void j1939_xtp_rx_dat_one(struct j1939_session *session,
|
||||
{
|
||||
struct j1939_priv *priv = session->priv;
|
||||
struct j1939_sk_buff_cb *skcb;
|
||||
struct sk_buff *se_skb;
|
||||
struct sk_buff *se_skb = NULL;
|
||||
const u8 *dat;
|
||||
u8 *tpdat;
|
||||
int offset;
|
||||
@ -1786,7 +1810,7 @@ static void j1939_xtp_rx_dat_one(struct j1939_session *session,
|
||||
goto out_session_cancel;
|
||||
}
|
||||
|
||||
se_skb = j1939_session_skb_find_by_offset(session, packet * 7);
|
||||
se_skb = j1939_session_skb_get_by_offset(session, packet * 7);
|
||||
if (!se_skb) {
|
||||
netdev_warn(priv->ndev, "%s: 0x%p: no skb found\n", __func__,
|
||||
session);
|
||||
@ -1848,11 +1872,13 @@ static void j1939_xtp_rx_dat_one(struct j1939_session *session,
|
||||
j1939_tp_set_rxtimeout(session, 250);
|
||||
}
|
||||
session->last_cmd = 0xff;
|
||||
consume_skb(se_skb);
|
||||
j1939_session_put(session);
|
||||
|
||||
return;
|
||||
|
||||
out_session_cancel:
|
||||
kfree_skb(se_skb);
|
||||
j1939_session_timers_cancel(session);
|
||||
j1939_session_cancel(session, J1939_XTP_ABORT_FAULT);
|
||||
j1939_session_put(session);
|
||||
|
@ -83,7 +83,7 @@ struct raw_sock {
|
||||
struct sock sk;
|
||||
int bound;
|
||||
int ifindex;
|
||||
struct notifier_block notifier;
|
||||
struct list_head notifier;
|
||||
int loopback;
|
||||
int recv_own_msgs;
|
||||
int fd_frames;
|
||||
@ -95,6 +95,10 @@ struct raw_sock {
|
||||
struct uniqframe __percpu *uniq;
|
||||
};
|
||||
|
||||
static LIST_HEAD(raw_notifier_list);
|
||||
static DEFINE_SPINLOCK(raw_notifier_lock);
|
||||
static struct raw_sock *raw_busy_notifier;
|
||||
|
||||
/* Return pointer to store the extra msg flags for raw_recvmsg().
|
||||
* We use the space of one unsigned int beyond the 'struct sockaddr_can'
|
||||
* in skb->cb.
|
||||
@ -263,21 +267,16 @@ static int raw_enable_allfilters(struct net *net, struct net_device *dev,
|
||||
return err;
|
||||
}
|
||||
|
||||
static int raw_notifier(struct notifier_block *nb,
|
||||
unsigned long msg, void *ptr)
|
||||
static void raw_notify(struct raw_sock *ro, unsigned long msg,
|
||||
struct net_device *dev)
|
||||
{
|
||||
struct net_device *dev = netdev_notifier_info_to_dev(ptr);
|
||||
struct raw_sock *ro = container_of(nb, struct raw_sock, notifier);
|
||||
struct sock *sk = &ro->sk;
|
||||
|
||||
if (!net_eq(dev_net(dev), sock_net(sk)))
|
||||
return NOTIFY_DONE;
|
||||
|
||||
if (dev->type != ARPHRD_CAN)
|
||||
return NOTIFY_DONE;
|
||||
return;
|
||||
|
||||
if (ro->ifindex != dev->ifindex)
|
||||
return NOTIFY_DONE;
|
||||
return;
|
||||
|
||||
switch (msg) {
|
||||
case NETDEV_UNREGISTER:
|
||||
@ -305,7 +304,28 @@ static int raw_notifier(struct notifier_block *nb,
|
||||
sk->sk_error_report(sk);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
static int raw_notifier(struct notifier_block *nb, unsigned long msg,
|
||||
void *ptr)
|
||||
{
|
||||
struct net_device *dev = netdev_notifier_info_to_dev(ptr);
|
||||
|
||||
if (dev->type != ARPHRD_CAN)
|
||||
return NOTIFY_DONE;
|
||||
if (msg != NETDEV_UNREGISTER && msg != NETDEV_DOWN)
|
||||
return NOTIFY_DONE;
|
||||
if (unlikely(raw_busy_notifier)) /* Check for reentrant bug. */
|
||||
return NOTIFY_DONE;
|
||||
|
||||
spin_lock(&raw_notifier_lock);
|
||||
list_for_each_entry(raw_busy_notifier, &raw_notifier_list, notifier) {
|
||||
spin_unlock(&raw_notifier_lock);
|
||||
raw_notify(raw_busy_notifier, msg, dev);
|
||||
spin_lock(&raw_notifier_lock);
|
||||
}
|
||||
raw_busy_notifier = NULL;
|
||||
spin_unlock(&raw_notifier_lock);
|
||||
return NOTIFY_DONE;
|
||||
}
|
||||
|
||||
@ -334,9 +354,9 @@ static int raw_init(struct sock *sk)
|
||||
return -ENOMEM;
|
||||
|
||||
/* set notifier */
|
||||
ro->notifier.notifier_call = raw_notifier;
|
||||
|
||||
register_netdevice_notifier(&ro->notifier);
|
||||
spin_lock(&raw_notifier_lock);
|
||||
list_add_tail(&ro->notifier, &raw_notifier_list);
|
||||
spin_unlock(&raw_notifier_lock);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -351,7 +371,14 @@ static int raw_release(struct socket *sock)
|
||||
|
||||
ro = raw_sk(sk);
|
||||
|
||||
unregister_netdevice_notifier(&ro->notifier);
|
||||
spin_lock(&raw_notifier_lock);
|
||||
while (raw_busy_notifier == ro) {
|
||||
spin_unlock(&raw_notifier_lock);
|
||||
schedule_timeout_uninterruptible(1);
|
||||
spin_lock(&raw_notifier_lock);
|
||||
}
|
||||
list_del(&ro->notifier);
|
||||
spin_unlock(&raw_notifier_lock);
|
||||
|
||||
lock_sock(sk);
|
||||
|
||||
@ -889,6 +916,10 @@ static const struct can_proto raw_can_proto = {
|
||||
.prot = &raw_proto,
|
||||
};
|
||||
|
||||
static struct notifier_block canraw_notifier = {
|
||||
.notifier_call = raw_notifier
|
||||
};
|
||||
|
||||
static __init int raw_module_init(void)
|
||||
{
|
||||
int err;
|
||||
@ -898,6 +929,8 @@ static __init int raw_module_init(void)
|
||||
err = can_proto_register(&raw_can_proto);
|
||||
if (err < 0)
|
||||
pr_err("can: registration of raw protocol failed\n");
|
||||
else
|
||||
register_netdevice_notifier(&canraw_notifier);
|
||||
|
||||
return err;
|
||||
}
|
||||
@ -905,6 +938,7 @@ static __init int raw_module_init(void)
|
||||
static __exit void raw_module_exit(void)
|
||||
{
|
||||
can_proto_unregister(&raw_can_proto);
|
||||
unregister_netdevice_notifier(&canraw_notifier);
|
||||
}
|
||||
|
||||
module_init(raw_module_init);
|
||||
|
@ -238,6 +238,7 @@ static int neigh_forced_gc(struct neigh_table *tbl)
|
||||
|
||||
write_lock(&n->lock);
|
||||
if ((n->nud_state == NUD_FAILED) ||
|
||||
(n->nud_state == NUD_NOARP) ||
|
||||
(tbl->is_multicast &&
|
||||
tbl->is_multicast(n->primary_key)) ||
|
||||
time_after(tref, n->updated))
|
||||
|
@ -641,6 +641,18 @@ void __put_net(struct net *net)
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(__put_net);
|
||||
|
||||
/**
|
||||
* get_net_ns - increment the refcount of the network namespace
|
||||
* @ns: common namespace (net)
|
||||
*
|
||||
* Returns the net's common namespace.
|
||||
*/
|
||||
struct ns_common *get_net_ns(struct ns_common *ns)
|
||||
{
|
||||
return &get_net(container_of(ns, struct net, ns))->ns;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(get_net_ns);
|
||||
|
||||
struct net *get_net_ns_by_fd(int fd)
|
||||
{
|
||||
struct file *file;
|
||||
@ -660,14 +672,8 @@ struct net *get_net_ns_by_fd(int fd)
|
||||
fput(file);
|
||||
return net;
|
||||
}
|
||||
|
||||
#else
|
||||
struct net *get_net_ns_by_fd(int fd)
|
||||
{
|
||||
return ERR_PTR(-EINVAL);
|
||||
}
|
||||
#endif
|
||||
EXPORT_SYMBOL_GPL(get_net_ns_by_fd);
|
||||
#endif
|
||||
|
||||
struct net *get_net_ns_by_pid(pid_t pid)
|
||||
{
|
||||
|
@ -4842,10 +4842,12 @@ static int rtnl_bridge_notify(struct net_device *dev)
|
||||
if (err < 0)
|
||||
goto errout;
|
||||
|
||||
if (!skb->len) {
|
||||
err = -EINVAL;
|
||||
/* Notification info is only filled for bridge ports, not the bridge
|
||||
* device itself. Therefore, a zero notification length is valid and
|
||||
* should not result in an error.
|
||||
*/
|
||||
if (!skb->len)
|
||||
goto errout;
|
||||
}
|
||||
|
||||
rtnl_notify(skb, net, 0, RTNLGRP_LINK, NULL, GFP_ATOMIC);
|
||||
return 0;
|
||||
|
@ -1253,6 +1253,7 @@ static void __msg_zerocopy_callback(struct ubuf_info *uarg)
|
||||
struct sock *sk = skb->sk;
|
||||
struct sk_buff_head *q;
|
||||
unsigned long flags;
|
||||
bool is_zerocopy;
|
||||
u32 lo, hi;
|
||||
u16 len;
|
||||
|
||||
@ -1267,6 +1268,7 @@ static void __msg_zerocopy_callback(struct ubuf_info *uarg)
|
||||
len = uarg->len;
|
||||
lo = uarg->id;
|
||||
hi = uarg->id + len - 1;
|
||||
is_zerocopy = uarg->zerocopy;
|
||||
|
||||
serr = SKB_EXT_ERR(skb);
|
||||
memset(serr, 0, sizeof(*serr));
|
||||
@ -1274,7 +1276,7 @@ static void __msg_zerocopy_callback(struct ubuf_info *uarg)
|
||||
serr->ee.ee_origin = SO_EE_ORIGIN_ZEROCOPY;
|
||||
serr->ee.ee_data = hi;
|
||||
serr->ee.ee_info = lo;
|
||||
if (!uarg->zerocopy)
|
||||
if (!is_zerocopy)
|
||||
serr->ee.ee_code |= SO_EE_CODE_ZEROCOPY_COPIED;
|
||||
|
||||
q = &sk->sk_error_queue;
|
||||
|
@ -95,7 +95,7 @@ static int get_module_eeprom_by_page(struct net_device *dev,
|
||||
if (dev->sfp_bus)
|
||||
return sfp_get_module_eeprom_by_page(dev->sfp_bus, page_data, extack);
|
||||
|
||||
if (ops->get_module_info)
|
||||
if (ops->get_module_eeprom_by_page)
|
||||
return ops->get_module_eeprom_by_page(dev, page_data, extack);
|
||||
|
||||
return -EOPNOTSUPP;
|
||||
|
@ -1421,7 +1421,7 @@ static int ethtool_get_any_eeprom(struct net_device *dev, void __user *useraddr,
|
||||
if (eeprom.offset + eeprom.len > total_len)
|
||||
return -EINVAL;
|
||||
|
||||
data = kmalloc(PAGE_SIZE, GFP_USER);
|
||||
data = kzalloc(PAGE_SIZE, GFP_USER);
|
||||
if (!data)
|
||||
return -ENOMEM;
|
||||
|
||||
@ -1486,7 +1486,7 @@ static int ethtool_set_eeprom(struct net_device *dev, void __user *useraddr)
|
||||
if (eeprom.offset + eeprom.len > ops->get_eeprom_len(dev))
|
||||
return -EINVAL;
|
||||
|
||||
data = kmalloc(PAGE_SIZE, GFP_USER);
|
||||
data = kzalloc(PAGE_SIZE, GFP_USER);
|
||||
if (!data)
|
||||
return -ENOMEM;
|
||||
|
||||
@ -1765,7 +1765,7 @@ static int ethtool_self_test(struct net_device *dev, char __user *useraddr)
|
||||
return -EFAULT;
|
||||
|
||||
test.len = test_len;
|
||||
data = kmalloc_array(test_len, sizeof(u64), GFP_USER);
|
||||
data = kcalloc(test_len, sizeof(u64), GFP_USER);
|
||||
if (!data)
|
||||
return -ENOMEM;
|
||||
|
||||
@ -2293,7 +2293,7 @@ static int ethtool_get_tunable(struct net_device *dev, void __user *useraddr)
|
||||
ret = ethtool_tunable_valid(&tuna);
|
||||
if (ret)
|
||||
return ret;
|
||||
data = kmalloc(tuna.len, GFP_USER);
|
||||
data = kzalloc(tuna.len, GFP_USER);
|
||||
if (!data)
|
||||
return -ENOMEM;
|
||||
ret = ops->get_tunable(dev, &tuna, data);
|
||||
@ -2485,7 +2485,7 @@ static int get_phy_tunable(struct net_device *dev, void __user *useraddr)
|
||||
ret = ethtool_phy_tunable_valid(&tuna);
|
||||
if (ret)
|
||||
return ret;
|
||||
data = kmalloc(tuna.len, GFP_USER);
|
||||
data = kzalloc(tuna.len, GFP_USER);
|
||||
if (!data)
|
||||
return -ENOMEM;
|
||||
if (phy_drv_tunable) {
|
||||
|
@ -353,6 +353,8 @@ static int strset_reply_size(const struct ethnl_req_info *req_base,
|
||||
int len = 0;
|
||||
int ret;
|
||||
|
||||
len += nla_total_size(0); /* ETHTOOL_A_STRSET_STRINGSETS */
|
||||
|
||||
for (i = 0; i < ETH_SS_COUNT; i++) {
|
||||
const struct strset_info *set_info = &data->sets[i];
|
||||
|
||||
|
@ -575,7 +575,7 @@ int inet_dgram_connect(struct socket *sock, struct sockaddr *uaddr,
|
||||
return err;
|
||||
}
|
||||
|
||||
if (!inet_sk(sk)->inet_num && inet_autobind(sk))
|
||||
if (data_race(!inet_sk(sk)->inet_num) && inet_autobind(sk))
|
||||
return -EAGAIN;
|
||||
return sk->sk_prot->connect(sk, uaddr, addr_len);
|
||||
}
|
||||
@ -803,7 +803,7 @@ int inet_send_prepare(struct sock *sk)
|
||||
sock_rps_record_flow(sk);
|
||||
|
||||
/* We may need to bind the socket. */
|
||||
if (!inet_sk(sk)->inet_num && !sk->sk_prot->no_autobind &&
|
||||
if (data_race(!inet_sk(sk)->inet_num) && !sk->sk_prot->no_autobind &&
|
||||
inet_autobind(sk))
|
||||
return -EAGAIN;
|
||||
|
||||
|
@ -472,6 +472,7 @@ void cipso_v4_doi_free(struct cipso_v4_doi *doi_def)
|
||||
kfree(doi_def->map.std->lvl.local);
|
||||
kfree(doi_def->map.std->cat.cipso);
|
||||
kfree(doi_def->map.std->cat.local);
|
||||
kfree(doi_def->map.std);
|
||||
break;
|
||||
}
|
||||
kfree(doi_def);
|
||||
|
@ -1989,7 +1989,7 @@ static int inet_set_link_af(struct net_device *dev, const struct nlattr *nla,
|
||||
return -EAFNOSUPPORT;
|
||||
|
||||
if (nla_parse_nested_deprecated(tb, IFLA_INET_MAX, nla, NULL, NULL) < 0)
|
||||
BUG();
|
||||
return -EINVAL;
|
||||
|
||||
if (tb[IFLA_INET_CONF]) {
|
||||
nla_for_each_nested(a, tb[IFLA_INET_CONF], rem)
|
||||
|
@ -759,6 +759,13 @@ void __icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info,
|
||||
icmp_param.data_len = room;
|
||||
icmp_param.head_len = sizeof(struct icmphdr);
|
||||
|
||||
/* if we don't have a source address at this point, fall back to the
|
||||
* dummy address instead of sending out a packet with a source address
|
||||
* of 0.0.0.0
|
||||
*/
|
||||
if (!fl4.saddr)
|
||||
fl4.saddr = htonl(INADDR_DUMMY);
|
||||
|
||||
icmp_push_reply(&icmp_param, &fl4, &ipc, &rt);
|
||||
ende:
|
||||
ip_rt_put(rt);
|
||||
|
@ -1801,6 +1801,7 @@ void ip_mc_destroy_dev(struct in_device *in_dev)
|
||||
while ((i = rtnl_dereference(in_dev->mc_list)) != NULL) {
|
||||
in_dev->mc_list = i->next_rcu;
|
||||
in_dev->mc_count--;
|
||||
ip_mc_clear_src(i);
|
||||
ip_ma_put(i);
|
||||
}
|
||||
}
|
||||
|
@ -954,6 +954,7 @@ bool ping_rcv(struct sk_buff *skb)
|
||||
struct sock *sk;
|
||||
struct net *net = dev_net(skb->dev);
|
||||
struct icmphdr *icmph = icmp_hdr(skb);
|
||||
bool rc = false;
|
||||
|
||||
/* We assume the packet has already been checked by icmp_rcv */
|
||||
|
||||
@ -968,14 +969,15 @@ bool ping_rcv(struct sk_buff *skb)
|
||||
struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
|
||||
|
||||
pr_debug("rcv on socket %p\n", sk);
|
||||
if (skb2)
|
||||
ping_queue_rcv_skb(sk, skb2);
|
||||
if (skb2 && !ping_queue_rcv_skb(sk, skb2))
|
||||
rc = true;
|
||||
sock_put(sk);
|
||||
return true;
|
||||
}
|
||||
pr_debug("no socket, dropping\n");
|
||||
|
||||
return false;
|
||||
if (!rc)
|
||||
pr_debug("no socket, dropping\n");
|
||||
|
||||
return rc;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(ping_rcv);
|
||||
|
||||
|
@ -2056,6 +2056,19 @@ martian_source:
|
||||
return err;
|
||||
}
|
||||
|
||||
/* get device for dst_alloc with local routes */
|
||||
static struct net_device *ip_rt_get_dev(struct net *net,
|
||||
const struct fib_result *res)
|
||||
{
|
||||
struct fib_nh_common *nhc = res->fi ? res->nhc : NULL;
|
||||
struct net_device *dev = NULL;
|
||||
|
||||
if (nhc)
|
||||
dev = l3mdev_master_dev_rcu(nhc->nhc_dev);
|
||||
|
||||
return dev ? : net->loopback_dev;
|
||||
}
|
||||
|
||||
/*
|
||||
* NOTE. We drop all the packets that has local source
|
||||
* addresses, because every properly looped back packet
|
||||
@ -2212,7 +2225,7 @@ local_input:
|
||||
}
|
||||
}
|
||||
|
||||
rth = rt_dst_alloc(l3mdev_master_dev_rcu(dev) ? : net->loopback_dev,
|
||||
rth = rt_dst_alloc(ip_rt_get_dev(net, res),
|
||||
flags | RTCF_LOCAL, res->type,
|
||||
IN_DEV_ORCONF(in_dev, NOPOLICY), false);
|
||||
if (!rth)
|
||||
|
@ -2607,6 +2607,9 @@ void udp_destroy_sock(struct sock *sk)
|
||||
{
|
||||
struct udp_sock *up = udp_sk(sk);
|
||||
bool slow = lock_sock_fast(sk);
|
||||
|
||||
/* protects from races with udp_abort() */
|
||||
sock_set_flag(sk, SOCK_DEAD);
|
||||
udp_flush_pending_frames(sk);
|
||||
unlock_sock_fast(sk, slow);
|
||||
if (static_branch_unlikely(&udp_encap_needed_key)) {
|
||||
@ -2857,10 +2860,17 @@ int udp_abort(struct sock *sk, int err)
|
||||
{
|
||||
lock_sock(sk);
|
||||
|
||||
/* udp{v6}_destroy_sock() sets it under the sk lock, avoid racing
|
||||
* with close()
|
||||
*/
|
||||
if (sock_flag(sk, SOCK_DEAD))
|
||||
goto out;
|
||||
|
||||
sk->sk_err = err;
|
||||
sk->sk_error_report(sk);
|
||||
__udp_disconnect(sk, 0);
|
||||
|
||||
out:
|
||||
release_sock(sk);
|
||||
|
||||
return 0;
|
||||
|
@ -5827,7 +5827,7 @@ static int inet6_set_link_af(struct net_device *dev, const struct nlattr *nla,
|
||||
return -EAFNOSUPPORT;
|
||||
|
||||
if (nla_parse_nested_deprecated(tb, IFLA_INET6_MAX, nla, NULL, NULL) < 0)
|
||||
BUG();
|
||||
return -EINVAL;
|
||||
|
||||
if (tb[IFLA_INET6_TOKEN]) {
|
||||
err = inet6_set_iftoken(idev, nla_data(tb[IFLA_INET6_TOKEN]),
|
||||
|
@ -135,6 +135,17 @@ void nft_fib6_eval_type(const struct nft_expr *expr, struct nft_regs *regs,
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(nft_fib6_eval_type);
|
||||
|
||||
static bool nft_fib_v6_skip_icmpv6(const struct sk_buff *skb, u8 next, const struct ipv6hdr *iph)
|
||||
{
|
||||
if (likely(next != IPPROTO_ICMPV6))
|
||||
return false;
|
||||
|
||||
if (ipv6_addr_type(&iph->saddr) != IPV6_ADDR_ANY)
|
||||
return false;
|
||||
|
||||
return ipv6_addr_type(&iph->daddr) & IPV6_ADDR_LINKLOCAL;
|
||||
}
|
||||
|
||||
void nft_fib6_eval(const struct nft_expr *expr, struct nft_regs *regs,
|
||||
const struct nft_pktinfo *pkt)
|
||||
{
|
||||
@ -163,10 +174,13 @@ void nft_fib6_eval(const struct nft_expr *expr, struct nft_regs *regs,
|
||||
|
||||
lookup_flags = nft_fib6_flowi_init(&fl6, priv, pkt, oif, iph);
|
||||
|
||||
if (nft_hook(pkt) == NF_INET_PRE_ROUTING &&
|
||||
nft_fib_is_loopback(pkt->skb, nft_in(pkt))) {
|
||||
nft_fib_store_result(dest, priv, nft_in(pkt));
|
||||
return;
|
||||
if (nft_hook(pkt) == NF_INET_PRE_ROUTING ||
|
||||
nft_hook(pkt) == NF_INET_INGRESS) {
|
||||
if (nft_fib_is_loopback(pkt->skb, nft_in(pkt)) ||
|
||||
nft_fib_v6_skip_icmpv6(pkt->skb, pkt->tprot, iph)) {
|
||||
nft_fib_store_result(dest, priv, nft_in(pkt));
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
*dest = 0;
|
||||
|
@ -1598,6 +1598,9 @@ void udpv6_destroy_sock(struct sock *sk)
|
||||
{
|
||||
struct udp_sock *up = udp_sk(sk);
|
||||
lock_sock(sk);
|
||||
|
||||
/* protects from races with udp_abort() */
|
||||
sock_set_flag(sk, SOCK_DEAD);
|
||||
udp_v6_flush_pending_frames(sk);
|
||||
release_sock(sk);
|
||||
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user