forked from Minki/linux
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Pull networking fixes from David Miller: 1) Out of bounds access in xfrm IPSEC policy unlink, from Yue Haibing. 2) Missing length check for esp4 UDP encap, from Sabrina Dubroca. 3) Fix byte order of RX STBC access in mac80211, from Johannes Berg. 4) Inifnite loop in bpftool map create, from Alban Crequy. 5) Register mark fix in ebpf verifier after pkt/null checks, from Paul Chaignon. 6) Properly use rcu_dereference_sk_user_data in L2TP code, from Eric Dumazet. 7) Buffer overrun in marvell phy driver, from Andrew Lunn. 8) Several crash and statistics handling fixes to bnxt_en driver, from Michael Chan and Vasundhara Volam. 9) Several fixes to the TLS layer from Jakub Kicinski (copying negative amounts of data in reencrypt, reencrypt frag copying, blind nskb->sk NULL deref, etc). 10) Several UDP GRO fixes, from Paolo Abeni and Eric Dumazet. 11) PID/UID checks on ipv6 flow labels are inverted, from Willem de Bruijn. 12) Use after free in l2tp, from Eric Dumazet. 13) IPV6 route destroy races, also from Eric Dumazet. 14) SCTP state machine can erroneously run recursively, fix from Xin Long. 15) Adjust AF_PACKET msg_name length checks, add padding bytes if necessary. From Willem de Bruijn. 16) Preserve skb_iif, so that forwarded packets have consistent values even if fragmentation is involved. From Shmulik Ladkani. * git://git.kernel.org/pub/scm/linux/kernel/git/davem/net: (69 commits) udp: fix GRO packet of death ipv6: A few fixes on dereferencing rt->from rds: ib: force endiannes annotation selftests: fib_rule_tests: print the result and return 1 if any tests failed ipv4: ip_do_fragment: Preserve skb_iif during fragmentation net/tls: avoid NULL pointer deref on nskb->sk in fallback selftests: fib_rule_tests: Fix icmp proto with ipv6 packet: validate msg_namelen in send directly packet: in recvmsg msg_name return at least sizeof sockaddr_ll sctp: avoid running the sctp state machine recursively stmmac: pci: Fix typo in IOT2000 comment Documentation: fix netdev-FAQ.rst markup warning ipv6: fix races in ip6_dst_destroy() l2ip: fix possible use-after-free appletalk: Set error code if register_snap_client failed net: dsa: bcm_sf2: fix buffer overflow doing set_rxnfc rxrpc: Fix net namespace cleanup ipv6/flowlabel: wait rcu grace period before put_pid() vrf: Use orig netdev to count Ip6InNoRoutes and a fresh route lookup when sending dest unreach tcp: add sanity tests in tcp_add_backlog() ...
This commit is contained in:
commit
ea9866793d
@ -1337,6 +1337,7 @@ tag - INTEGER
|
||||
Default value is 0.
|
||||
|
||||
xfrm4_gc_thresh - INTEGER
|
||||
(Obsolete since linux-4.14)
|
||||
The threshold at which we will start garbage collecting for IPv4
|
||||
destination cache entries. At twice this value the system will
|
||||
refuse new allocations.
|
||||
@ -1920,6 +1921,7 @@ echo_ignore_all - BOOLEAN
|
||||
Default: 0
|
||||
|
||||
xfrm6_gc_thresh - INTEGER
|
||||
(Obsolete since linux-4.14)
|
||||
The threshold at which we will start garbage collecting for IPv6
|
||||
destination cache entries. At twice this value the system will
|
||||
refuse new allocations.
|
||||
|
@ -132,7 +132,7 @@ version that should be applied. If there is any doubt, the maintainer
|
||||
will reply and ask what should be done.
|
||||
|
||||
Q: I made changes to only a few patches in a patch series should I resend only those changed?
|
||||
--------------------------------------------------------------------------------------------
|
||||
---------------------------------------------------------------------------------------------
|
||||
A: No, please resend the entire patch series and make sure you do number your
|
||||
patches such that it is clear this is the latest and greatest set of patches
|
||||
that can be applied.
|
||||
|
@ -186,8 +186,9 @@ enum which_ebpf_reg {
|
||||
* separate frame pointer, so BPF_REG_10 relative accesses are
|
||||
* adjusted to be $sp relative.
|
||||
*/
|
||||
int ebpf_to_mips_reg(struct jit_ctx *ctx, const struct bpf_insn *insn,
|
||||
enum which_ebpf_reg w)
|
||||
static int ebpf_to_mips_reg(struct jit_ctx *ctx,
|
||||
const struct bpf_insn *insn,
|
||||
enum which_ebpf_reg w)
|
||||
{
|
||||
int ebpf_reg = (w == src_reg || w == src_reg_no_fp) ?
|
||||
insn->src_reg : insn->dst_reg;
|
||||
|
@ -886,6 +886,9 @@ static int bcm_sf2_cfp_rule_set(struct dsa_switch *ds, int port,
|
||||
fs->m_ext.data[1]))
|
||||
return -EINVAL;
|
||||
|
||||
if (fs->location != RX_CLS_LOC_ANY && fs->location >= CFP_NUM_RULES)
|
||||
return -EINVAL;
|
||||
|
||||
if (fs->location != RX_CLS_LOC_ANY &&
|
||||
test_bit(fs->location, priv->cfp.used))
|
||||
return -EBUSY;
|
||||
@ -974,6 +977,9 @@ static int bcm_sf2_cfp_rule_del(struct bcm_sf2_priv *priv, int port, u32 loc)
|
||||
struct cfp_rule *rule;
|
||||
int ret;
|
||||
|
||||
if (loc >= CFP_NUM_RULES)
|
||||
return -EINVAL;
|
||||
|
||||
/* Refuse deleting unused rules, and those that are not unique since
|
||||
* that could leave IPv6 rules with one of the chained rule in the
|
||||
* table.
|
||||
|
@ -1625,7 +1625,7 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
|
||||
netdev_warn(bp->dev, "RX buffer error %x\n", rx_err);
|
||||
bnxt_sched_reset(bp, rxr);
|
||||
}
|
||||
goto next_rx;
|
||||
goto next_rx_no_len;
|
||||
}
|
||||
|
||||
len = le32_to_cpu(rxcmp->rx_cmp_len_flags_type) >> RX_CMP_LEN_SHIFT;
|
||||
@ -1706,12 +1706,13 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
|
||||
rc = 1;
|
||||
|
||||
next_rx:
|
||||
rxr->rx_prod = NEXT_RX(prod);
|
||||
rxr->rx_next_cons = NEXT_RX(cons);
|
||||
|
||||
cpr->rx_packets += 1;
|
||||
cpr->rx_bytes += len;
|
||||
|
||||
next_rx_no_len:
|
||||
rxr->rx_prod = NEXT_RX(prod);
|
||||
rxr->rx_next_cons = NEXT_RX(cons);
|
||||
|
||||
next_rx_no_prod_no_len:
|
||||
*raw_cons = tmp_raw_cons;
|
||||
|
||||
@ -5135,10 +5136,10 @@ static void bnxt_hwrm_ring_free(struct bnxt *bp, bool close_path)
|
||||
for (i = 0; i < bp->tx_nr_rings; i++) {
|
||||
struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
|
||||
struct bnxt_ring_struct *ring = &txr->tx_ring_struct;
|
||||
u32 cmpl_ring_id;
|
||||
|
||||
cmpl_ring_id = bnxt_cp_ring_for_tx(bp, txr);
|
||||
if (ring->fw_ring_id != INVALID_HW_RING_ID) {
|
||||
u32 cmpl_ring_id = bnxt_cp_ring_for_tx(bp, txr);
|
||||
|
||||
hwrm_ring_free_send_msg(bp, ring,
|
||||
RING_FREE_REQ_RING_TYPE_TX,
|
||||
close_path ? cmpl_ring_id :
|
||||
@ -5151,10 +5152,10 @@ static void bnxt_hwrm_ring_free(struct bnxt *bp, bool close_path)
|
||||
struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
|
||||
struct bnxt_ring_struct *ring = &rxr->rx_ring_struct;
|
||||
u32 grp_idx = rxr->bnapi->index;
|
||||
u32 cmpl_ring_id;
|
||||
|
||||
cmpl_ring_id = bnxt_cp_ring_for_rx(bp, rxr);
|
||||
if (ring->fw_ring_id != INVALID_HW_RING_ID) {
|
||||
u32 cmpl_ring_id = bnxt_cp_ring_for_rx(bp, rxr);
|
||||
|
||||
hwrm_ring_free_send_msg(bp, ring,
|
||||
RING_FREE_REQ_RING_TYPE_RX,
|
||||
close_path ? cmpl_ring_id :
|
||||
@ -5173,10 +5174,10 @@ static void bnxt_hwrm_ring_free(struct bnxt *bp, bool close_path)
|
||||
struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
|
||||
struct bnxt_ring_struct *ring = &rxr->rx_agg_ring_struct;
|
||||
u32 grp_idx = rxr->bnapi->index;
|
||||
u32 cmpl_ring_id;
|
||||
|
||||
cmpl_ring_id = bnxt_cp_ring_for_rx(bp, rxr);
|
||||
if (ring->fw_ring_id != INVALID_HW_RING_ID) {
|
||||
u32 cmpl_ring_id = bnxt_cp_ring_for_rx(bp, rxr);
|
||||
|
||||
hwrm_ring_free_send_msg(bp, ring, type,
|
||||
close_path ? cmpl_ring_id :
|
||||
INVALID_HW_RING_ID);
|
||||
@ -5315,17 +5316,16 @@ __bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, struct hwrm_func_cfg_input *req,
|
||||
req->num_tx_rings = cpu_to_le16(tx_rings);
|
||||
if (BNXT_NEW_RM(bp)) {
|
||||
enables |= rx_rings ? FUNC_CFG_REQ_ENABLES_NUM_RX_RINGS : 0;
|
||||
enables |= stats ? FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0;
|
||||
if (bp->flags & BNXT_FLAG_CHIP_P5) {
|
||||
enables |= cp_rings ? FUNC_CFG_REQ_ENABLES_NUM_MSIX : 0;
|
||||
enables |= tx_rings + ring_grps ?
|
||||
FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS |
|
||||
FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0;
|
||||
FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0;
|
||||
enables |= rx_rings ?
|
||||
FUNC_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 0;
|
||||
} else {
|
||||
enables |= cp_rings ?
|
||||
FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS |
|
||||
FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0;
|
||||
FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0;
|
||||
enables |= ring_grps ?
|
||||
FUNC_CFG_REQ_ENABLES_NUM_HW_RING_GRPS |
|
||||
FUNC_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 0;
|
||||
@ -5365,14 +5365,13 @@ __bnxt_hwrm_reserve_vf_rings(struct bnxt *bp,
|
||||
enables |= tx_rings ? FUNC_VF_CFG_REQ_ENABLES_NUM_TX_RINGS : 0;
|
||||
enables |= rx_rings ? FUNC_VF_CFG_REQ_ENABLES_NUM_RX_RINGS |
|
||||
FUNC_VF_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 0;
|
||||
enables |= stats ? FUNC_VF_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0;
|
||||
if (bp->flags & BNXT_FLAG_CHIP_P5) {
|
||||
enables |= tx_rings + ring_grps ?
|
||||
FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS |
|
||||
FUNC_VF_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0;
|
||||
FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0;
|
||||
} else {
|
||||
enables |= cp_rings ?
|
||||
FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS |
|
||||
FUNC_VF_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0;
|
||||
FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0;
|
||||
enables |= ring_grps ?
|
||||
FUNC_VF_CFG_REQ_ENABLES_NUM_HW_RING_GRPS : 0;
|
||||
}
|
||||
@ -6753,6 +6752,7 @@ static int bnxt_hwrm_port_qstats_ext(struct bnxt *bp)
|
||||
struct hwrm_queue_pri2cos_qcfg_input req2 = {0};
|
||||
struct hwrm_port_qstats_ext_input req = {0};
|
||||
struct bnxt_pf_info *pf = &bp->pf;
|
||||
u32 tx_stat_size;
|
||||
int rc;
|
||||
|
||||
if (!(bp->flags & BNXT_FLAG_PORT_STATS_EXT))
|
||||
@ -6762,13 +6762,16 @@ static int bnxt_hwrm_port_qstats_ext(struct bnxt *bp)
|
||||
req.port_id = cpu_to_le16(pf->port_id);
|
||||
req.rx_stat_size = cpu_to_le16(sizeof(struct rx_port_stats_ext));
|
||||
req.rx_stat_host_addr = cpu_to_le64(bp->hw_rx_port_stats_ext_map);
|
||||
req.tx_stat_size = cpu_to_le16(sizeof(struct tx_port_stats_ext));
|
||||
tx_stat_size = bp->hw_tx_port_stats_ext ?
|
||||
sizeof(*bp->hw_tx_port_stats_ext) : 0;
|
||||
req.tx_stat_size = cpu_to_le16(tx_stat_size);
|
||||
req.tx_stat_host_addr = cpu_to_le64(bp->hw_tx_port_stats_ext_map);
|
||||
mutex_lock(&bp->hwrm_cmd_lock);
|
||||
rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
|
||||
if (!rc) {
|
||||
bp->fw_rx_stats_ext_size = le16_to_cpu(resp->rx_stat_size) / 8;
|
||||
bp->fw_tx_stats_ext_size = le16_to_cpu(resp->tx_stat_size) / 8;
|
||||
bp->fw_tx_stats_ext_size = tx_stat_size ?
|
||||
le16_to_cpu(resp->tx_stat_size) / 8 : 0;
|
||||
} else {
|
||||
bp->fw_rx_stats_ext_size = 0;
|
||||
bp->fw_tx_stats_ext_size = 0;
|
||||
@ -8961,8 +8964,15 @@ static int bnxt_cfg_rx_mode(struct bnxt *bp)
|
||||
|
||||
skip_uc:
|
||||
rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, 0);
|
||||
if (rc && vnic->mc_list_count) {
|
||||
netdev_info(bp->dev, "Failed setting MC filters rc: %d, turning on ALL_MCAST mode\n",
|
||||
rc);
|
||||
vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
|
||||
vnic->mc_list_count = 0;
|
||||
rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, 0);
|
||||
}
|
||||
if (rc)
|
||||
netdev_err(bp->dev, "HWRM cfa l2 rx mask failure rc: %x\n",
|
||||
netdev_err(bp->dev, "HWRM cfa l2 rx mask failure rc: %d\n",
|
||||
rc);
|
||||
|
||||
return rc;
|
||||
@ -10685,6 +10695,7 @@ init_err_cleanup_tc:
|
||||
bnxt_clear_int_mode(bp);
|
||||
|
||||
init_err_pci_clean:
|
||||
bnxt_free_hwrm_short_cmd_req(bp);
|
||||
bnxt_free_hwrm_resources(bp);
|
||||
bnxt_free_ctx_mem(bp);
|
||||
kfree(bp->ctx);
|
||||
|
@ -333,6 +333,9 @@ static int stm32mp1_parse_data(struct stm32_dwmac *dwmac,
|
||||
*/
|
||||
dwmac->irq_pwr_wakeup = platform_get_irq_byname(pdev,
|
||||
"stm32_pwr_wakeup");
|
||||
if (dwmac->irq_pwr_wakeup == -EPROBE_DEFER)
|
||||
return -EPROBE_DEFER;
|
||||
|
||||
if (!dwmac->clk_eth_ck && dwmac->irq_pwr_wakeup >= 0) {
|
||||
err = device_init_wakeup(&pdev->dev, true);
|
||||
if (err) {
|
||||
|
@ -160,7 +160,7 @@ static const struct dmi_system_id quark_pci_dmi[] = {
|
||||
.driver_data = (void *)&galileo_stmmac_dmi_data,
|
||||
},
|
||||
/*
|
||||
* There are 2 types of SIMATIC IOT2000: IOT20202 and IOT2040.
|
||||
* There are 2 types of SIMATIC IOT2000: IOT2020 and IOT2040.
|
||||
* The asset tag "6ES7647-0AA00-0YA2" is only for IOT2020 which
|
||||
* has only one pci network device while other asset tags are
|
||||
* for IOT2040 which has two.
|
||||
|
@ -533,6 +533,8 @@ mcr20a_start(struct ieee802154_hw *hw)
|
||||
dev_dbg(printdev(lp), "no slotted operation\n");
|
||||
ret = regmap_update_bits(lp->regmap_dar, DAR_PHY_CTRL1,
|
||||
DAR_PHY_CTRL1_SLOTTED, 0x0);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
/* enable irq */
|
||||
enable_irq(lp->spi->irq);
|
||||
@ -540,11 +542,15 @@ mcr20a_start(struct ieee802154_hw *hw)
|
||||
/* Unmask SEQ interrupt */
|
||||
ret = regmap_update_bits(lp->regmap_dar, DAR_PHY_CTRL2,
|
||||
DAR_PHY_CTRL2_SEQMSK, 0x0);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
/* Start the RX sequence */
|
||||
dev_dbg(printdev(lp), "start the RX sequence\n");
|
||||
ret = regmap_update_bits(lp->regmap_dar, DAR_PHY_CTRL1,
|
||||
DAR_PHY_CTRL1_XCVSEQ_MASK, MCR20A_XCVSEQ_RX);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -1489,9 +1489,10 @@ static int marvell_get_sset_count(struct phy_device *phydev)
|
||||
|
||||
static void marvell_get_strings(struct phy_device *phydev, u8 *data)
|
||||
{
|
||||
int count = marvell_get_sset_count(phydev);
|
||||
int i;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(marvell_hw_stats); i++) {
|
||||
for (i = 0; i < count; i++) {
|
||||
strlcpy(data + i * ETH_GSTRING_LEN,
|
||||
marvell_hw_stats[i].string, ETH_GSTRING_LEN);
|
||||
}
|
||||
@ -1519,9 +1520,10 @@ static u64 marvell_get_stat(struct phy_device *phydev, int i)
|
||||
static void marvell_get_stats(struct phy_device *phydev,
|
||||
struct ethtool_stats *stats, u64 *data)
|
||||
{
|
||||
int count = marvell_get_sset_count(phydev);
|
||||
int i;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(marvell_hw_stats); i++)
|
||||
for (i = 0; i < count; i++)
|
||||
data[i] = marvell_get_stat(phydev, i);
|
||||
}
|
||||
|
||||
|
@ -1122,9 +1122,16 @@ static const struct usb_device_id products[] = {
|
||||
{QMI_FIXED_INTF(0x0846, 0x68d3, 8)}, /* Netgear Aircard 779S */
|
||||
{QMI_FIXED_INTF(0x12d1, 0x140c, 1)}, /* Huawei E173 */
|
||||
{QMI_FIXED_INTF(0x12d1, 0x14ac, 1)}, /* Huawei E1820 */
|
||||
{QMI_FIXED_INTF(0x1435, 0x0918, 3)}, /* Wistron NeWeb D16Q1 */
|
||||
{QMI_FIXED_INTF(0x1435, 0x0918, 4)}, /* Wistron NeWeb D16Q1 */
|
||||
{QMI_FIXED_INTF(0x1435, 0x0918, 5)}, /* Wistron NeWeb D16Q1 */
|
||||
{QMI_FIXED_INTF(0x1435, 0x3185, 4)}, /* Wistron NeWeb M18Q5 */
|
||||
{QMI_FIXED_INTF(0x1435, 0xd111, 4)}, /* M9615A DM11-1 D51QC */
|
||||
{QMI_FIXED_INTF(0x1435, 0xd181, 3)}, /* Wistron NeWeb D18Q1 */
|
||||
{QMI_FIXED_INTF(0x1435, 0xd181, 4)}, /* Wistron NeWeb D18Q1 */
|
||||
{QMI_FIXED_INTF(0x1435, 0xd181, 5)}, /* Wistron NeWeb D18Q1 */
|
||||
{QMI_FIXED_INTF(0x1435, 0xd182, 4)}, /* Wistron NeWeb D18 */
|
||||
{QMI_FIXED_INTF(0x1435, 0xd182, 5)}, /* Wistron NeWeb D18 */
|
||||
{QMI_FIXED_INTF(0x1435, 0xd191, 4)}, /* Wistron NeWeb D19Q1 */
|
||||
{QMI_QUIRK_SET_DTR(0x1508, 0x1001, 4)}, /* Fibocom NL668 series */
|
||||
{QMI_FIXED_INTF(0x16d8, 0x6003, 0)}, /* CMOTech 6003 */
|
||||
@ -1180,6 +1187,7 @@ static const struct usb_device_id products[] = {
|
||||
{QMI_FIXED_INTF(0x19d2, 0x0265, 4)}, /* ONDA MT8205 4G LTE */
|
||||
{QMI_FIXED_INTF(0x19d2, 0x0284, 4)}, /* ZTE MF880 */
|
||||
{QMI_FIXED_INTF(0x19d2, 0x0326, 4)}, /* ZTE MF821D */
|
||||
{QMI_FIXED_INTF(0x19d2, 0x0396, 3)}, /* ZTE ZM8620 */
|
||||
{QMI_FIXED_INTF(0x19d2, 0x0412, 4)}, /* Telewell TW-LTE 4G */
|
||||
{QMI_FIXED_INTF(0x19d2, 0x1008, 4)}, /* ZTE (Vodafone) K3570-Z */
|
||||
{QMI_FIXED_INTF(0x19d2, 0x1010, 4)}, /* ZTE (Vodafone) K3571-Z */
|
||||
@ -1200,7 +1208,9 @@ static const struct usb_device_id products[] = {
|
||||
{QMI_FIXED_INTF(0x19d2, 0x1425, 2)},
|
||||
{QMI_FIXED_INTF(0x19d2, 0x1426, 2)}, /* ZTE MF91 */
|
||||
{QMI_FIXED_INTF(0x19d2, 0x1428, 2)}, /* Telewell TW-LTE 4G v2 */
|
||||
{QMI_FIXED_INTF(0x19d2, 0x1432, 3)}, /* ZTE ME3620 */
|
||||
{QMI_FIXED_INTF(0x19d2, 0x2002, 4)}, /* ZTE (Vodafone) K3765-Z */
|
||||
{QMI_FIXED_INTF(0x2001, 0x7e16, 3)}, /* D-Link DWM-221 */
|
||||
{QMI_FIXED_INTF(0x2001, 0x7e19, 4)}, /* D-Link DWM-221 B1 */
|
||||
{QMI_FIXED_INTF(0x2001, 0x7e35, 4)}, /* D-Link DWM-222 */
|
||||
{QMI_FIXED_INTF(0x2020, 0x2031, 4)}, /* Olicard 600 */
|
||||
|
@ -1855,7 +1855,7 @@ void ath10k_ce_dump_registers(struct ath10k *ar,
|
||||
struct ath10k_ce_crash_data ce_data;
|
||||
u32 addr, id;
|
||||
|
||||
lockdep_assert_held(&ar->data_lock);
|
||||
lockdep_assert_held(&ar->dump_mutex);
|
||||
|
||||
ath10k_err(ar, "Copy Engine register dump:\n");
|
||||
|
||||
|
@ -3119,6 +3119,7 @@ struct ath10k *ath10k_core_create(size_t priv_size, struct device *dev,
|
||||
goto err_free_wq;
|
||||
|
||||
mutex_init(&ar->conf_mutex);
|
||||
mutex_init(&ar->dump_mutex);
|
||||
spin_lock_init(&ar->data_lock);
|
||||
|
||||
INIT_LIST_HEAD(&ar->peers);
|
||||
|
@ -1063,6 +1063,9 @@ struct ath10k {
|
||||
/* prevents concurrent FW reconfiguration */
|
||||
struct mutex conf_mutex;
|
||||
|
||||
/* protects coredump data */
|
||||
struct mutex dump_mutex;
|
||||
|
||||
/* protects shared structure data */
|
||||
spinlock_t data_lock;
|
||||
|
||||
|
@ -1102,7 +1102,7 @@ struct ath10k_fw_crash_data *ath10k_coredump_new(struct ath10k *ar)
|
||||
{
|
||||
struct ath10k_fw_crash_data *crash_data = ar->coredump.fw_crash_data;
|
||||
|
||||
lockdep_assert_held(&ar->data_lock);
|
||||
lockdep_assert_held(&ar->dump_mutex);
|
||||
|
||||
if (ath10k_coredump_mask == 0)
|
||||
/* coredump disabled */
|
||||
@ -1146,7 +1146,7 @@ static struct ath10k_dump_file_data *ath10k_coredump_build(struct ath10k *ar)
|
||||
if (!buf)
|
||||
return NULL;
|
||||
|
||||
spin_lock_bh(&ar->data_lock);
|
||||
mutex_lock(&ar->dump_mutex);
|
||||
|
||||
dump_data = (struct ath10k_dump_file_data *)(buf);
|
||||
strlcpy(dump_data->df_magic, "ATH10K-FW-DUMP",
|
||||
@ -1213,7 +1213,7 @@ static struct ath10k_dump_file_data *ath10k_coredump_build(struct ath10k *ar)
|
||||
sofar += sizeof(*dump_tlv) + crash_data->ramdump_buf_len;
|
||||
}
|
||||
|
||||
spin_unlock_bh(&ar->data_lock);
|
||||
mutex_unlock(&ar->dump_mutex);
|
||||
|
||||
return dump_data;
|
||||
}
|
||||
|
@ -5774,7 +5774,7 @@ static void ath10k_bss_info_changed(struct ieee80211_hw *hw,
|
||||
}
|
||||
|
||||
if (changed & BSS_CHANGED_MCAST_RATE &&
|
||||
!WARN_ON(ath10k_mac_vif_chan(arvif->vif, &def))) {
|
||||
!ath10k_mac_vif_chan(arvif->vif, &def)) {
|
||||
band = def.chan->band;
|
||||
rateidx = vif->bss_conf.mcast_rate[band] - 1;
|
||||
|
||||
@ -5812,7 +5812,7 @@ static void ath10k_bss_info_changed(struct ieee80211_hw *hw,
|
||||
}
|
||||
|
||||
if (changed & BSS_CHANGED_BASIC_RATES) {
|
||||
if (WARN_ON(ath10k_mac_vif_chan(vif, &def))) {
|
||||
if (ath10k_mac_vif_chan(vif, &def)) {
|
||||
mutex_unlock(&ar->conf_mutex);
|
||||
return;
|
||||
}
|
||||
|
@ -1441,7 +1441,7 @@ static void ath10k_pci_dump_registers(struct ath10k *ar,
|
||||
__le32 reg_dump_values[REG_DUMP_COUNT_QCA988X] = {};
|
||||
int i, ret;
|
||||
|
||||
lockdep_assert_held(&ar->data_lock);
|
||||
lockdep_assert_held(&ar->dump_mutex);
|
||||
|
||||
ret = ath10k_pci_diag_read_hi(ar, ®_dump_values[0],
|
||||
hi_failure_state,
|
||||
@ -1656,7 +1656,7 @@ static void ath10k_pci_dump_memory(struct ath10k *ar,
|
||||
int ret, i;
|
||||
u8 *buf;
|
||||
|
||||
lockdep_assert_held(&ar->data_lock);
|
||||
lockdep_assert_held(&ar->dump_mutex);
|
||||
|
||||
if (!crash_data)
|
||||
return;
|
||||
@ -1734,14 +1734,19 @@ static void ath10k_pci_dump_memory(struct ath10k *ar,
|
||||
}
|
||||
}
|
||||
|
||||
static void ath10k_pci_fw_crashed_dump(struct ath10k *ar)
|
||||
static void ath10k_pci_fw_dump_work(struct work_struct *work)
|
||||
{
|
||||
struct ath10k_pci *ar_pci = container_of(work, struct ath10k_pci,
|
||||
dump_work);
|
||||
struct ath10k_fw_crash_data *crash_data;
|
||||
struct ath10k *ar = ar_pci->ar;
|
||||
char guid[UUID_STRING_LEN + 1];
|
||||
|
||||
spin_lock_bh(&ar->data_lock);
|
||||
mutex_lock(&ar->dump_mutex);
|
||||
|
||||
spin_lock_bh(&ar->data_lock);
|
||||
ar->stats.fw_crash_counter++;
|
||||
spin_unlock_bh(&ar->data_lock);
|
||||
|
||||
crash_data = ath10k_coredump_new(ar);
|
||||
|
||||
@ -1756,11 +1761,18 @@ static void ath10k_pci_fw_crashed_dump(struct ath10k *ar)
|
||||
ath10k_ce_dump_registers(ar, crash_data);
|
||||
ath10k_pci_dump_memory(ar, crash_data);
|
||||
|
||||
spin_unlock_bh(&ar->data_lock);
|
||||
mutex_unlock(&ar->dump_mutex);
|
||||
|
||||
queue_work(ar->workqueue, &ar->restart_work);
|
||||
}
|
||||
|
||||
static void ath10k_pci_fw_crashed_dump(struct ath10k *ar)
|
||||
{
|
||||
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
|
||||
|
||||
queue_work(ar->workqueue, &ar_pci->dump_work);
|
||||
}
|
||||
|
||||
void ath10k_pci_hif_send_complete_check(struct ath10k *ar, u8 pipe,
|
||||
int force)
|
||||
{
|
||||
@ -3442,6 +3454,8 @@ int ath10k_pci_setup_resource(struct ath10k *ar)
|
||||
spin_lock_init(&ar_pci->ps_lock);
|
||||
mutex_init(&ar_pci->ce_diag_mutex);
|
||||
|
||||
INIT_WORK(&ar_pci->dump_work, ath10k_pci_fw_dump_work);
|
||||
|
||||
timer_setup(&ar_pci->rx_post_retry, ath10k_pci_rx_replenish_retry, 0);
|
||||
|
||||
if (QCA_REV_6174(ar) || QCA_REV_9377(ar))
|
||||
|
@ -121,6 +121,8 @@ struct ath10k_pci {
|
||||
/* For protecting ce_diag */
|
||||
struct mutex ce_diag_mutex;
|
||||
|
||||
struct work_struct dump_work;
|
||||
|
||||
struct ath10k_ce ce;
|
||||
struct timer_list rx_post_retry;
|
||||
|
||||
|
@ -201,7 +201,7 @@ static const struct iwl_ht_params iwl_22000_ht_params = {
|
||||
#define IWL_DEVICE_AX210 \
|
||||
IWL_DEVICE_AX200_COMMON, \
|
||||
.device_family = IWL_DEVICE_FAMILY_AX210, \
|
||||
.base_params = &iwl_22000_base_params, \
|
||||
.base_params = &iwl_22560_base_params, \
|
||||
.csr = &iwl_csr_v1, \
|
||||
.min_txq_size = 128
|
||||
|
||||
|
@ -1,7 +1,7 @@
|
||||
/******************************************************************************
|
||||
*
|
||||
* Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved.
|
||||
* Copyright(c) 2018 Intel Corporation
|
||||
* Copyright(c) 2018 - 2019 Intel Corporation
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms of version 2 of the GNU General Public License as
|
||||
@ -136,6 +136,7 @@ const struct iwl_cfg iwl5350_agn_cfg = {
|
||||
.ht_params = &iwl5000_ht_params,
|
||||
.led_mode = IWL_LED_BLINK,
|
||||
.internal_wimax_coex = true,
|
||||
.csr = &iwl_csr_v1,
|
||||
};
|
||||
|
||||
#define IWL_DEVICE_5150 \
|
||||
|
@ -93,7 +93,7 @@ struct iwl_ucode_header {
|
||||
} u;
|
||||
};
|
||||
|
||||
#define IWL_UCODE_INI_TLV_GROUP BIT(24)
|
||||
#define IWL_UCODE_INI_TLV_GROUP 0x1000000
|
||||
|
||||
/*
|
||||
* new TLV uCode file layout
|
||||
@ -148,11 +148,14 @@ enum iwl_ucode_tlv_type {
|
||||
IWL_UCODE_TLV_UMAC_DEBUG_ADDRS = 54,
|
||||
IWL_UCODE_TLV_LMAC_DEBUG_ADDRS = 55,
|
||||
IWL_UCODE_TLV_FW_RECOVERY_INFO = 57,
|
||||
IWL_UCODE_TLV_TYPE_BUFFER_ALLOCATION = IWL_UCODE_INI_TLV_GROUP | 0x1,
|
||||
IWL_UCODE_TLV_TYPE_HCMD = IWL_UCODE_INI_TLV_GROUP | 0x2,
|
||||
IWL_UCODE_TLV_TYPE_REGIONS = IWL_UCODE_INI_TLV_GROUP | 0x3,
|
||||
IWL_UCODE_TLV_TYPE_TRIGGERS = IWL_UCODE_INI_TLV_GROUP | 0x4,
|
||||
IWL_UCODE_TLV_TYPE_DEBUG_FLOW = IWL_UCODE_INI_TLV_GROUP | 0x5,
|
||||
|
||||
IWL_UCODE_TLV_TYPE_BUFFER_ALLOCATION = IWL_UCODE_INI_TLV_GROUP + 0x1,
|
||||
IWL_UCODE_TLV_DEBUG_BASE = IWL_UCODE_TLV_TYPE_BUFFER_ALLOCATION,
|
||||
IWL_UCODE_TLV_TYPE_HCMD = IWL_UCODE_INI_TLV_GROUP + 0x2,
|
||||
IWL_UCODE_TLV_TYPE_REGIONS = IWL_UCODE_INI_TLV_GROUP + 0x3,
|
||||
IWL_UCODE_TLV_TYPE_TRIGGERS = IWL_UCODE_INI_TLV_GROUP + 0x4,
|
||||
IWL_UCODE_TLV_TYPE_DEBUG_FLOW = IWL_UCODE_INI_TLV_GROUP + 0x5,
|
||||
IWL_UCODE_TLV_DEBUG_MAX = IWL_UCODE_TLV_TYPE_DEBUG_FLOW,
|
||||
|
||||
/* TLVs 0x1000-0x2000 are for internal driver usage */
|
||||
IWL_UCODE_TLV_FW_DBG_DUMP_LST = 0x1000,
|
||||
|
@ -126,7 +126,8 @@ void iwl_alloc_dbg_tlv(struct iwl_trans *trans, size_t len, const u8 *data,
|
||||
len -= ALIGN(tlv_len, 4);
|
||||
data += sizeof(*tlv) + ALIGN(tlv_len, 4);
|
||||
|
||||
if (!(tlv_type & IWL_UCODE_INI_TLV_GROUP))
|
||||
if (tlv_type < IWL_UCODE_TLV_DEBUG_BASE ||
|
||||
tlv_type > IWL_UCODE_TLV_DEBUG_MAX)
|
||||
continue;
|
||||
|
||||
hdr = (void *)&tlv->data[0];
|
||||
|
@ -774,8 +774,7 @@ void iwl_mvm_vif_dbgfs_register(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
|
||||
return;
|
||||
|
||||
mvmvif->dbgfs_dir = debugfs_create_dir("iwlmvm", dbgfs_dir);
|
||||
|
||||
if (!mvmvif->dbgfs_dir) {
|
||||
if (IS_ERR_OR_NULL(mvmvif->dbgfs_dir)) {
|
||||
IWL_ERR(mvm, "Failed to create debugfs directory under %pd\n",
|
||||
dbgfs_dir);
|
||||
return;
|
||||
|
@ -1121,7 +1121,9 @@ int iwl_mvm_up(struct iwl_mvm *mvm)
|
||||
ret = iwl_mvm_load_rt_fw(mvm);
|
||||
if (ret) {
|
||||
IWL_ERR(mvm, "Failed to start RT ucode: %d\n", ret);
|
||||
iwl_fw_dbg_error_collect(&mvm->fwrt, FW_DBG_TRIGGER_DRIVER);
|
||||
if (ret != -ERFKILL)
|
||||
iwl_fw_dbg_error_collect(&mvm->fwrt,
|
||||
FW_DBG_TRIGGER_DRIVER);
|
||||
goto error;
|
||||
}
|
||||
|
||||
|
@ -834,7 +834,7 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
|
||||
mutex_lock(&mvm->mutex);
|
||||
iwl_mvm_ref(mvm, IWL_MVM_REF_INIT_UCODE);
|
||||
err = iwl_run_init_mvm_ucode(mvm, true);
|
||||
if (err)
|
||||
if (err && err != -ERFKILL)
|
||||
iwl_fw_dbg_error_collect(&mvm->fwrt, FW_DBG_TRIGGER_DRIVER);
|
||||
if (!iwlmvm_mod_params.init_dbg || !err)
|
||||
iwl_mvm_stop_device(mvm);
|
||||
|
@ -169,9 +169,9 @@ static inline int iwl_mvm_check_pn(struct iwl_mvm *mvm, struct sk_buff *skb,
|
||||
}
|
||||
|
||||
/* iwl_mvm_create_skb Adds the rxb to a new skb */
|
||||
static void iwl_mvm_create_skb(struct sk_buff *skb, struct ieee80211_hdr *hdr,
|
||||
u16 len, u8 crypt_len,
|
||||
struct iwl_rx_cmd_buffer *rxb)
|
||||
static int iwl_mvm_create_skb(struct iwl_mvm *mvm, struct sk_buff *skb,
|
||||
struct ieee80211_hdr *hdr, u16 len, u8 crypt_len,
|
||||
struct iwl_rx_cmd_buffer *rxb)
|
||||
{
|
||||
struct iwl_rx_packet *pkt = rxb_addr(rxb);
|
||||
struct iwl_rx_mpdu_desc *desc = (void *)pkt->data;
|
||||
@ -204,6 +204,20 @@ static void iwl_mvm_create_skb(struct sk_buff *skb, struct ieee80211_hdr *hdr,
|
||||
* present before copying packet data.
|
||||
*/
|
||||
hdrlen += crypt_len;
|
||||
|
||||
if (WARN_ONCE(headlen < hdrlen,
|
||||
"invalid packet lengths (hdrlen=%d, len=%d, crypt_len=%d)\n",
|
||||
hdrlen, len, crypt_len)) {
|
||||
/*
|
||||
* We warn and trace because we want to be able to see
|
||||
* it in trace-cmd as well.
|
||||
*/
|
||||
IWL_DEBUG_RX(mvm,
|
||||
"invalid packet lengths (hdrlen=%d, len=%d, crypt_len=%d)\n",
|
||||
hdrlen, len, crypt_len);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
skb_put_data(skb, hdr, hdrlen);
|
||||
skb_put_data(skb, (u8 *)hdr + hdrlen + pad_len, headlen - hdrlen);
|
||||
|
||||
@ -216,6 +230,8 @@ static void iwl_mvm_create_skb(struct sk_buff *skb, struct ieee80211_hdr *hdr,
|
||||
skb_add_rx_frag(skb, 0, rxb_steal_page(rxb), offset,
|
||||
fraglen, rxb->truesize);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void iwl_mvm_add_rtap_sniffer_config(struct iwl_mvm *mvm,
|
||||
@ -1671,7 +1687,11 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
|
||||
rx_status->boottime_ns = ktime_get_boot_ns();
|
||||
}
|
||||
|
||||
iwl_mvm_create_skb(skb, hdr, len, crypt_len, rxb);
|
||||
if (iwl_mvm_create_skb(mvm, skb, hdr, len, crypt_len, rxb)) {
|
||||
kfree_skb(skb);
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (!iwl_mvm_reorder(mvm, napi, queue, sta, skb, desc))
|
||||
iwl_mvm_pass_packet_to_mac80211(mvm, napi, skb, queue,
|
||||
sta, csi);
|
||||
|
@ -3644,20 +3644,27 @@ out_no_pci:
|
||||
|
||||
void iwl_trans_pcie_sync_nmi(struct iwl_trans *trans)
|
||||
{
|
||||
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
|
||||
unsigned long timeout = jiffies + IWL_TRANS_NMI_TIMEOUT;
|
||||
u32 inta_addr, sw_err_bit;
|
||||
|
||||
if (trans_pcie->msix_enabled) {
|
||||
inta_addr = CSR_MSIX_HW_INT_CAUSES_AD;
|
||||
sw_err_bit = MSIX_HW_INT_CAUSES_REG_SW_ERR;
|
||||
} else {
|
||||
inta_addr = CSR_INT;
|
||||
sw_err_bit = CSR_INT_BIT_SW_ERR;
|
||||
}
|
||||
|
||||
iwl_disable_interrupts(trans);
|
||||
iwl_force_nmi(trans);
|
||||
while (time_after(timeout, jiffies)) {
|
||||
u32 inta_hw = iwl_read32(trans,
|
||||
CSR_MSIX_HW_INT_CAUSES_AD);
|
||||
u32 inta_hw = iwl_read32(trans, inta_addr);
|
||||
|
||||
/* Error detected by uCode */
|
||||
if (inta_hw & MSIX_HW_INT_CAUSES_REG_SW_ERR) {
|
||||
if (inta_hw & sw_err_bit) {
|
||||
/* Clear causes register */
|
||||
iwl_write32(trans, CSR_MSIX_HW_INT_CAUSES_AD,
|
||||
inta_hw &
|
||||
MSIX_HW_INT_CAUSES_REG_SW_ERR);
|
||||
iwl_write32(trans, inta_addr, inta_hw & sw_err_bit);
|
||||
break;
|
||||
}
|
||||
|
||||
|
@ -181,7 +181,7 @@ static int mwifiex_sdio_resume(struct device *dev)
|
||||
|
||||
adapter = card->adapter;
|
||||
|
||||
if (test_bit(MWIFIEX_IS_SUSPENDED, &adapter->work_flags)) {
|
||||
if (!test_bit(MWIFIEX_IS_SUSPENDED, &adapter->work_flags)) {
|
||||
mwifiex_dbg(adapter, WARN,
|
||||
"device already resumed\n");
|
||||
return 0;
|
||||
|
@ -510,7 +510,7 @@ int bpf_prog_array_copy(struct bpf_prog_array __rcu *old_array,
|
||||
} \
|
||||
_out: \
|
||||
rcu_read_unlock(); \
|
||||
preempt_enable_no_resched(); \
|
||||
preempt_enable(); \
|
||||
_ret; \
|
||||
})
|
||||
|
||||
|
@ -105,7 +105,6 @@ enum sctp_verb {
|
||||
SCTP_CMD_T1_RETRAN, /* Mark for retransmission after T1 timeout */
|
||||
SCTP_CMD_UPDATE_INITTAG, /* Update peer inittag */
|
||||
SCTP_CMD_SEND_MSG, /* Send the whole use message */
|
||||
SCTP_CMD_SEND_NEXT_ASCONF, /* Send the next ASCONF after ACK */
|
||||
SCTP_CMD_PURGE_ASCONF_QUEUE, /* Purge all asconf queues.*/
|
||||
SCTP_CMD_SET_ASOC, /* Restore association context */
|
||||
SCTP_CMD_LAST
|
||||
|
@ -295,7 +295,8 @@ struct xfrm_replay {
|
||||
};
|
||||
|
||||
struct xfrm_if_cb {
|
||||
struct xfrm_if *(*decode_session)(struct sk_buff *skb);
|
||||
struct xfrm_if *(*decode_session)(struct sk_buff *skb,
|
||||
unsigned short family);
|
||||
};
|
||||
|
||||
void xfrm_if_register_cb(const struct xfrm_if_cb *ifcb);
|
||||
@ -1404,6 +1405,23 @@ static inline int xfrm_state_kern(const struct xfrm_state *x)
|
||||
return atomic_read(&x->tunnel_users);
|
||||
}
|
||||
|
||||
static inline bool xfrm_id_proto_valid(u8 proto)
|
||||
{
|
||||
switch (proto) {
|
||||
case IPPROTO_AH:
|
||||
case IPPROTO_ESP:
|
||||
case IPPROTO_COMP:
|
||||
#if IS_ENABLED(CONFIG_IPV6)
|
||||
case IPPROTO_ROUTING:
|
||||
case IPPROTO_DSTOPTS:
|
||||
#endif
|
||||
return true;
|
||||
default:
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
/* IPSEC_PROTO_ANY only matches 3 IPsec protocols, 0 could match all. */
|
||||
static inline int xfrm_id_proto_match(u8 proto, u8 userproto)
|
||||
{
|
||||
return (!userproto || proto == userproto ||
|
||||
|
@ -4138,15 +4138,35 @@ static int check_alu_op(struct bpf_verifier_env *env, struct bpf_insn *insn)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void __find_good_pkt_pointers(struct bpf_func_state *state,
|
||||
struct bpf_reg_state *dst_reg,
|
||||
enum bpf_reg_type type, u16 new_range)
|
||||
{
|
||||
struct bpf_reg_state *reg;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < MAX_BPF_REG; i++) {
|
||||
reg = &state->regs[i];
|
||||
if (reg->type == type && reg->id == dst_reg->id)
|
||||
/* keep the maximum range already checked */
|
||||
reg->range = max(reg->range, new_range);
|
||||
}
|
||||
|
||||
bpf_for_each_spilled_reg(i, state, reg) {
|
||||
if (!reg)
|
||||
continue;
|
||||
if (reg->type == type && reg->id == dst_reg->id)
|
||||
reg->range = max(reg->range, new_range);
|
||||
}
|
||||
}
|
||||
|
||||
static void find_good_pkt_pointers(struct bpf_verifier_state *vstate,
|
||||
struct bpf_reg_state *dst_reg,
|
||||
enum bpf_reg_type type,
|
||||
bool range_right_open)
|
||||
{
|
||||
struct bpf_func_state *state = vstate->frame[vstate->curframe];
|
||||
struct bpf_reg_state *regs = state->regs, *reg;
|
||||
u16 new_range;
|
||||
int i, j;
|
||||
int i;
|
||||
|
||||
if (dst_reg->off < 0 ||
|
||||
(dst_reg->off == 0 && range_right_open))
|
||||
@ -4211,20 +4231,9 @@ static void find_good_pkt_pointers(struct bpf_verifier_state *vstate,
|
||||
* the range won't allow anything.
|
||||
* dst_reg->off is known < MAX_PACKET_OFF, therefore it fits in a u16.
|
||||
*/
|
||||
for (i = 0; i < MAX_BPF_REG; i++)
|
||||
if (regs[i].type == type && regs[i].id == dst_reg->id)
|
||||
/* keep the maximum range already checked */
|
||||
regs[i].range = max(regs[i].range, new_range);
|
||||
|
||||
for (j = 0; j <= vstate->curframe; j++) {
|
||||
state = vstate->frame[j];
|
||||
bpf_for_each_spilled_reg(i, state, reg) {
|
||||
if (!reg)
|
||||
continue;
|
||||
if (reg->type == type && reg->id == dst_reg->id)
|
||||
reg->range = max(reg->range, new_range);
|
||||
}
|
||||
}
|
||||
for (i = 0; i <= vstate->curframe; i++)
|
||||
__find_good_pkt_pointers(vstate->frame[i], dst_reg, type,
|
||||
new_range);
|
||||
}
|
||||
|
||||
/* compute branch direction of the expression "if (reg opcode val) goto target;"
|
||||
@ -4698,6 +4707,22 @@ static void mark_ptr_or_null_reg(struct bpf_func_state *state,
|
||||
}
|
||||
}
|
||||
|
||||
static void __mark_ptr_or_null_regs(struct bpf_func_state *state, u32 id,
|
||||
bool is_null)
|
||||
{
|
||||
struct bpf_reg_state *reg;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < MAX_BPF_REG; i++)
|
||||
mark_ptr_or_null_reg(state, &state->regs[i], id, is_null);
|
||||
|
||||
bpf_for_each_spilled_reg(i, state, reg) {
|
||||
if (!reg)
|
||||
continue;
|
||||
mark_ptr_or_null_reg(state, reg, id, is_null);
|
||||
}
|
||||
}
|
||||
|
||||
/* The logic is similar to find_good_pkt_pointers(), both could eventually
|
||||
* be folded together at some point.
|
||||
*/
|
||||
@ -4705,10 +4730,10 @@ static void mark_ptr_or_null_regs(struct bpf_verifier_state *vstate, u32 regno,
|
||||
bool is_null)
|
||||
{
|
||||
struct bpf_func_state *state = vstate->frame[vstate->curframe];
|
||||
struct bpf_reg_state *reg, *regs = state->regs;
|
||||
struct bpf_reg_state *regs = state->regs;
|
||||
u32 ref_obj_id = regs[regno].ref_obj_id;
|
||||
u32 id = regs[regno].id;
|
||||
int i, j;
|
||||
int i;
|
||||
|
||||
if (ref_obj_id && ref_obj_id == id && is_null)
|
||||
/* regs[regno] is in the " == NULL" branch.
|
||||
@ -4717,17 +4742,8 @@ static void mark_ptr_or_null_regs(struct bpf_verifier_state *vstate, u32 regno,
|
||||
*/
|
||||
WARN_ON_ONCE(release_reference_state(state, id));
|
||||
|
||||
for (i = 0; i < MAX_BPF_REG; i++)
|
||||
mark_ptr_or_null_reg(state, ®s[i], id, is_null);
|
||||
|
||||
for (j = 0; j <= vstate->curframe; j++) {
|
||||
state = vstate->frame[j];
|
||||
bpf_for_each_spilled_reg(i, state, reg) {
|
||||
if (!reg)
|
||||
continue;
|
||||
mark_ptr_or_null_reg(state, reg, id, is_null);
|
||||
}
|
||||
}
|
||||
for (i = 0; i <= vstate->curframe; i++)
|
||||
__mark_ptr_or_null_regs(vstate->frame[i], id, is_null);
|
||||
}
|
||||
|
||||
static bool try_match_pkt_pointers(const struct bpf_insn *insn,
|
||||
|
@ -1920,6 +1920,7 @@ static int __init atalk_init(void)
|
||||
ddp_dl = register_snap_client(ddp_snap_id, atalk_rcv);
|
||||
if (!ddp_dl) {
|
||||
pr_crit("Unable to register DDP with SNAP.\n");
|
||||
rc = -ENOMEM;
|
||||
goto out_sock;
|
||||
}
|
||||
|
||||
|
@ -226,7 +226,7 @@ static void esp_output_fill_trailer(u8 *tail, int tfclen, int plen, __u8 proto)
|
||||
tail[plen - 1] = proto;
|
||||
}
|
||||
|
||||
static void esp_output_udp_encap(struct xfrm_state *x, struct sk_buff *skb, struct esp_info *esp)
|
||||
static int esp_output_udp_encap(struct xfrm_state *x, struct sk_buff *skb, struct esp_info *esp)
|
||||
{
|
||||
int encap_type;
|
||||
struct udphdr *uh;
|
||||
@ -234,6 +234,7 @@ static void esp_output_udp_encap(struct xfrm_state *x, struct sk_buff *skb, stru
|
||||
__be16 sport, dport;
|
||||
struct xfrm_encap_tmpl *encap = x->encap;
|
||||
struct ip_esp_hdr *esph = esp->esph;
|
||||
unsigned int len;
|
||||
|
||||
spin_lock_bh(&x->lock);
|
||||
sport = encap->encap_sport;
|
||||
@ -241,11 +242,14 @@ static void esp_output_udp_encap(struct xfrm_state *x, struct sk_buff *skb, stru
|
||||
encap_type = encap->encap_type;
|
||||
spin_unlock_bh(&x->lock);
|
||||
|
||||
len = skb->len + esp->tailen - skb_transport_offset(skb);
|
||||
if (len + sizeof(struct iphdr) >= IP_MAX_MTU)
|
||||
return -EMSGSIZE;
|
||||
|
||||
uh = (struct udphdr *)esph;
|
||||
uh->source = sport;
|
||||
uh->dest = dport;
|
||||
uh->len = htons(skb->len + esp->tailen
|
||||
- skb_transport_offset(skb));
|
||||
uh->len = htons(len);
|
||||
uh->check = 0;
|
||||
|
||||
switch (encap_type) {
|
||||
@ -262,6 +266,8 @@ static void esp_output_udp_encap(struct xfrm_state *x, struct sk_buff *skb, stru
|
||||
|
||||
*skb_mac_header(skb) = IPPROTO_UDP;
|
||||
esp->esph = esph;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int esp_output_head(struct xfrm_state *x, struct sk_buff *skb, struct esp_info *esp)
|
||||
@ -275,8 +281,12 @@ int esp_output_head(struct xfrm_state *x, struct sk_buff *skb, struct esp_info *
|
||||
int tailen = esp->tailen;
|
||||
|
||||
/* this is non-NULL only with UDP Encapsulation */
|
||||
if (x->encap)
|
||||
esp_output_udp_encap(x, skb, esp);
|
||||
if (x->encap) {
|
||||
int err = esp_output_udp_encap(x, skb, esp);
|
||||
|
||||
if (err < 0)
|
||||
return err;
|
||||
}
|
||||
|
||||
if (!skb_cloned(skb)) {
|
||||
if (tailen <= skb_tailroom(skb)) {
|
||||
|
@ -52,13 +52,13 @@ static struct sk_buff *esp4_gro_receive(struct list_head *head,
|
||||
goto out;
|
||||
|
||||
if (sp->len == XFRM_MAX_DEPTH)
|
||||
goto out;
|
||||
goto out_reset;
|
||||
|
||||
x = xfrm_state_lookup(dev_net(skb->dev), skb->mark,
|
||||
(xfrm_address_t *)&ip_hdr(skb)->daddr,
|
||||
spi, IPPROTO_ESP, AF_INET);
|
||||
if (!x)
|
||||
goto out;
|
||||
goto out_reset;
|
||||
|
||||
sp->xvec[sp->len++] = x;
|
||||
sp->olen++;
|
||||
@ -66,7 +66,7 @@ static struct sk_buff *esp4_gro_receive(struct list_head *head,
|
||||
xo = xfrm_offload(skb);
|
||||
if (!xo) {
|
||||
xfrm_state_put(x);
|
||||
goto out;
|
||||
goto out_reset;
|
||||
}
|
||||
}
|
||||
|
||||
@ -82,6 +82,8 @@ static struct sk_buff *esp4_gro_receive(struct list_head *head,
|
||||
xfrm_input(skb, IPPROTO_ESP, spi, -2);
|
||||
|
||||
return ERR_PTR(-EINPROGRESS);
|
||||
out_reset:
|
||||
secpath_reset(skb);
|
||||
out:
|
||||
skb_push(skb, offset);
|
||||
NAPI_GRO_CB(skb)->same_flow = 0;
|
||||
|
@ -519,6 +519,7 @@ static void ip_copy_metadata(struct sk_buff *to, struct sk_buff *from)
|
||||
to->pkt_type = from->pkt_type;
|
||||
to->priority = from->priority;
|
||||
to->protocol = from->protocol;
|
||||
to->skb_iif = from->skb_iif;
|
||||
skb_dst_drop(to);
|
||||
skb_dst_copy(to, from);
|
||||
to->dev = from->dev;
|
||||
|
@ -646,10 +646,8 @@ static int __init vti_init(void)
|
||||
|
||||
msg = "ipip tunnel";
|
||||
err = xfrm4_tunnel_register(&ipip_handler, AF_INET);
|
||||
if (err < 0) {
|
||||
pr_info("%s: cant't register tunnel\n",__func__);
|
||||
if (err < 0)
|
||||
goto xfrm_tunnel_failed;
|
||||
}
|
||||
|
||||
msg = "netlink interface";
|
||||
err = rtnl_link_register(&vti_link_ops);
|
||||
@ -659,9 +657,9 @@ static int __init vti_init(void)
|
||||
return err;
|
||||
|
||||
rtnl_link_failed:
|
||||
xfrm4_protocol_deregister(&vti_ipcomp4_protocol, IPPROTO_COMP);
|
||||
xfrm_tunnel_failed:
|
||||
xfrm4_tunnel_deregister(&ipip_handler, AF_INET);
|
||||
xfrm_tunnel_failed:
|
||||
xfrm4_protocol_deregister(&vti_ipcomp4_protocol, IPPROTO_COMP);
|
||||
xfrm_proto_comp_failed:
|
||||
xfrm4_protocol_deregister(&vti_ah4_protocol, IPPROTO_AH);
|
||||
xfrm_proto_ah_failed:
|
||||
@ -676,6 +674,7 @@ pernet_dev_failed:
|
||||
static void __exit vti_fini(void)
|
||||
{
|
||||
rtnl_link_unregister(&vti_link_ops);
|
||||
xfrm4_tunnel_deregister(&ipip_handler, AF_INET);
|
||||
xfrm4_protocol_deregister(&vti_ipcomp4_protocol, IPPROTO_COMP);
|
||||
xfrm4_protocol_deregister(&vti_ah4_protocol, IPPROTO_AH);
|
||||
xfrm4_protocol_deregister(&vti_esp4_protocol, IPPROTO_ESP);
|
||||
|
@ -1673,7 +1673,9 @@ bool tcp_add_backlog(struct sock *sk, struct sk_buff *skb)
|
||||
if (TCP_SKB_CB(tail)->end_seq != TCP_SKB_CB(skb)->seq ||
|
||||
TCP_SKB_CB(tail)->ip_dsfield != TCP_SKB_CB(skb)->ip_dsfield ||
|
||||
((TCP_SKB_CB(tail)->tcp_flags |
|
||||
TCP_SKB_CB(skb)->tcp_flags) & TCPHDR_URG) ||
|
||||
TCP_SKB_CB(skb)->tcp_flags) & (TCPHDR_SYN | TCPHDR_RST | TCPHDR_URG)) ||
|
||||
!((TCP_SKB_CB(tail)->tcp_flags &
|
||||
TCP_SKB_CB(skb)->tcp_flags) & TCPHDR_ACK) ||
|
||||
((TCP_SKB_CB(tail)->tcp_flags ^
|
||||
TCP_SKB_CB(skb)->tcp_flags) & (TCPHDR_ECE | TCPHDR_CWR)) ||
|
||||
#ifdef CONFIG_TLS_DEVICE
|
||||
@ -1692,6 +1694,15 @@ bool tcp_add_backlog(struct sock *sk, struct sk_buff *skb)
|
||||
if (after(TCP_SKB_CB(skb)->ack_seq, TCP_SKB_CB(tail)->ack_seq))
|
||||
TCP_SKB_CB(tail)->ack_seq = TCP_SKB_CB(skb)->ack_seq;
|
||||
|
||||
/* We have to update both TCP_SKB_CB(tail)->tcp_flags and
|
||||
* thtail->fin, so that the fast path in tcp_rcv_established()
|
||||
* is not entered if we append a packet with a FIN.
|
||||
* SYN, RST, URG are not present.
|
||||
* ACK is set on both packets.
|
||||
* PSH : we do not really care in TCP stack,
|
||||
* at least for 'GRO' packets.
|
||||
*/
|
||||
thtail->fin |= th->fin;
|
||||
TCP_SKB_CB(tail)->tcp_flags |= TCP_SKB_CB(skb)->tcp_flags;
|
||||
|
||||
if (TCP_SKB_CB(skb)->has_rxtstamp) {
|
||||
|
@ -352,6 +352,7 @@ static struct sk_buff *udp_gro_receive_segment(struct list_head *head,
|
||||
struct sk_buff *pp = NULL;
|
||||
struct udphdr *uh2;
|
||||
struct sk_buff *p;
|
||||
unsigned int ulen;
|
||||
|
||||
/* requires non zero csum, for symmetry with GSO */
|
||||
if (!uh->check) {
|
||||
@ -359,6 +360,12 @@ static struct sk_buff *udp_gro_receive_segment(struct list_head *head,
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/* Do not deal with padded or malicious packets, sorry ! */
|
||||
ulen = ntohs(uh->len);
|
||||
if (ulen <= sizeof(*uh) || ulen != skb_gro_len(skb)) {
|
||||
NAPI_GRO_CB(skb)->flush = 1;
|
||||
return NULL;
|
||||
}
|
||||
/* pull encapsulating udp header */
|
||||
skb_gro_pull(skb, sizeof(struct udphdr));
|
||||
skb_gro_postpull_rcsum(skb, uh, sizeof(struct udphdr));
|
||||
@ -377,13 +384,14 @@ static struct sk_buff *udp_gro_receive_segment(struct list_head *head,
|
||||
|
||||
/* Terminate the flow on len mismatch or if it grow "too much".
|
||||
* Under small packet flood GRO count could elsewhere grow a lot
|
||||
* leading to execessive truesize values
|
||||
* leading to excessive truesize values.
|
||||
* On len mismatch merge the first packet shorter than gso_size,
|
||||
* otherwise complete the GRO packet.
|
||||
*/
|
||||
if (!skb_gro_receive(p, skb) &&
|
||||
if (ulen > ntohs(uh2->len) || skb_gro_receive(p, skb) ||
|
||||
ulen != ntohs(uh2->len) ||
|
||||
NAPI_GRO_CB(p)->count >= UDP_GRO_CNT_MAX)
|
||||
pp = p;
|
||||
else if (uh->len != uh2->len)
|
||||
pp = p;
|
||||
|
||||
return pp;
|
||||
}
|
||||
|
@ -111,7 +111,8 @@ static void
|
||||
_decode_session4(struct sk_buff *skb, struct flowi *fl, int reverse)
|
||||
{
|
||||
const struct iphdr *iph = ip_hdr(skb);
|
||||
u8 *xprth = skb_network_header(skb) + iph->ihl * 4;
|
||||
int ihl = iph->ihl;
|
||||
u8 *xprth = skb_network_header(skb) + ihl * 4;
|
||||
struct flowi4 *fl4 = &fl->u.ip4;
|
||||
int oif = 0;
|
||||
|
||||
@ -122,6 +123,11 @@ _decode_session4(struct sk_buff *skb, struct flowi *fl, int reverse)
|
||||
fl4->flowi4_mark = skb->mark;
|
||||
fl4->flowi4_oif = reverse ? skb->skb_iif : oif;
|
||||
|
||||
fl4->flowi4_proto = iph->protocol;
|
||||
fl4->daddr = reverse ? iph->saddr : iph->daddr;
|
||||
fl4->saddr = reverse ? iph->daddr : iph->saddr;
|
||||
fl4->flowi4_tos = iph->tos;
|
||||
|
||||
if (!ip_is_fragment(iph)) {
|
||||
switch (iph->protocol) {
|
||||
case IPPROTO_UDP:
|
||||
@ -133,7 +139,7 @@ _decode_session4(struct sk_buff *skb, struct flowi *fl, int reverse)
|
||||
pskb_may_pull(skb, xprth + 4 - skb->data)) {
|
||||
__be16 *ports;
|
||||
|
||||
xprth = skb_network_header(skb) + iph->ihl * 4;
|
||||
xprth = skb_network_header(skb) + ihl * 4;
|
||||
ports = (__be16 *)xprth;
|
||||
|
||||
fl4->fl4_sport = ports[!!reverse];
|
||||
@ -146,7 +152,7 @@ _decode_session4(struct sk_buff *skb, struct flowi *fl, int reverse)
|
||||
pskb_may_pull(skb, xprth + 2 - skb->data)) {
|
||||
u8 *icmp;
|
||||
|
||||
xprth = skb_network_header(skb) + iph->ihl * 4;
|
||||
xprth = skb_network_header(skb) + ihl * 4;
|
||||
icmp = xprth;
|
||||
|
||||
fl4->fl4_icmp_type = icmp[0];
|
||||
@ -159,7 +165,7 @@ _decode_session4(struct sk_buff *skb, struct flowi *fl, int reverse)
|
||||
pskb_may_pull(skb, xprth + 4 - skb->data)) {
|
||||
__be32 *ehdr;
|
||||
|
||||
xprth = skb_network_header(skb) + iph->ihl * 4;
|
||||
xprth = skb_network_header(skb) + ihl * 4;
|
||||
ehdr = (__be32 *)xprth;
|
||||
|
||||
fl4->fl4_ipsec_spi = ehdr[0];
|
||||
@ -171,7 +177,7 @@ _decode_session4(struct sk_buff *skb, struct flowi *fl, int reverse)
|
||||
pskb_may_pull(skb, xprth + 8 - skb->data)) {
|
||||
__be32 *ah_hdr;
|
||||
|
||||
xprth = skb_network_header(skb) + iph->ihl * 4;
|
||||
xprth = skb_network_header(skb) + ihl * 4;
|
||||
ah_hdr = (__be32 *)xprth;
|
||||
|
||||
fl4->fl4_ipsec_spi = ah_hdr[1];
|
||||
@ -183,7 +189,7 @@ _decode_session4(struct sk_buff *skb, struct flowi *fl, int reverse)
|
||||
pskb_may_pull(skb, xprth + 4 - skb->data)) {
|
||||
__be16 *ipcomp_hdr;
|
||||
|
||||
xprth = skb_network_header(skb) + iph->ihl * 4;
|
||||
xprth = skb_network_header(skb) + ihl * 4;
|
||||
ipcomp_hdr = (__be16 *)xprth;
|
||||
|
||||
fl4->fl4_ipsec_spi = htonl(ntohs(ipcomp_hdr[1]));
|
||||
@ -196,7 +202,7 @@ _decode_session4(struct sk_buff *skb, struct flowi *fl, int reverse)
|
||||
__be16 *greflags;
|
||||
__be32 *gre_hdr;
|
||||
|
||||
xprth = skb_network_header(skb) + iph->ihl * 4;
|
||||
xprth = skb_network_header(skb) + ihl * 4;
|
||||
greflags = (__be16 *)xprth;
|
||||
gre_hdr = (__be32 *)xprth;
|
||||
|
||||
@ -213,10 +219,6 @@ _decode_session4(struct sk_buff *skb, struct flowi *fl, int reverse)
|
||||
break;
|
||||
}
|
||||
}
|
||||
fl4->flowi4_proto = iph->protocol;
|
||||
fl4->daddr = reverse ? iph->saddr : iph->daddr;
|
||||
fl4->saddr = reverse ? iph->daddr : iph->saddr;
|
||||
fl4->flowi4_tos = iph->tos;
|
||||
}
|
||||
|
||||
static void xfrm4_update_pmtu(struct dst_entry *dst, struct sock *sk,
|
||||
|
@ -74,13 +74,13 @@ static struct sk_buff *esp6_gro_receive(struct list_head *head,
|
||||
goto out;
|
||||
|
||||
if (sp->len == XFRM_MAX_DEPTH)
|
||||
goto out;
|
||||
goto out_reset;
|
||||
|
||||
x = xfrm_state_lookup(dev_net(skb->dev), skb->mark,
|
||||
(xfrm_address_t *)&ipv6_hdr(skb)->daddr,
|
||||
spi, IPPROTO_ESP, AF_INET6);
|
||||
if (!x)
|
||||
goto out;
|
||||
goto out_reset;
|
||||
|
||||
sp->xvec[sp->len++] = x;
|
||||
sp->olen++;
|
||||
@ -88,7 +88,7 @@ static struct sk_buff *esp6_gro_receive(struct list_head *head,
|
||||
xo = xfrm_offload(skb);
|
||||
if (!xo) {
|
||||
xfrm_state_put(x);
|
||||
goto out;
|
||||
goto out_reset;
|
||||
}
|
||||
}
|
||||
|
||||
@ -109,6 +109,8 @@ static struct sk_buff *esp6_gro_receive(struct list_head *head,
|
||||
xfrm_input(skb, IPPROTO_ESP, spi, -2);
|
||||
|
||||
return ERR_PTR(-EINPROGRESS);
|
||||
out_reset:
|
||||
secpath_reset(skb);
|
||||
out:
|
||||
skb_push(skb, offset);
|
||||
NAPI_GRO_CB(skb)->same_flow = 0;
|
||||
|
@ -921,9 +921,7 @@ static void fib6_drop_pcpu_from(struct fib6_info *f6i,
|
||||
if (pcpu_rt) {
|
||||
struct fib6_info *from;
|
||||
|
||||
from = rcu_dereference_protected(pcpu_rt->from,
|
||||
lockdep_is_held(&table->tb6_lock));
|
||||
rcu_assign_pointer(pcpu_rt->from, NULL);
|
||||
from = xchg((__force struct fib6_info **)&pcpu_rt->from, NULL);
|
||||
fib6_info_release(from);
|
||||
}
|
||||
}
|
||||
|
@ -94,15 +94,21 @@ static struct ip6_flowlabel *fl_lookup(struct net *net, __be32 label)
|
||||
return fl;
|
||||
}
|
||||
|
||||
static void fl_free_rcu(struct rcu_head *head)
|
||||
{
|
||||
struct ip6_flowlabel *fl = container_of(head, struct ip6_flowlabel, rcu);
|
||||
|
||||
if (fl->share == IPV6_FL_S_PROCESS)
|
||||
put_pid(fl->owner.pid);
|
||||
kfree(fl->opt);
|
||||
kfree(fl);
|
||||
}
|
||||
|
||||
|
||||
static void fl_free(struct ip6_flowlabel *fl)
|
||||
{
|
||||
if (fl) {
|
||||
if (fl->share == IPV6_FL_S_PROCESS)
|
||||
put_pid(fl->owner.pid);
|
||||
kfree(fl->opt);
|
||||
kfree_rcu(fl, rcu);
|
||||
}
|
||||
if (fl)
|
||||
call_rcu(&fl->rcu, fl_free_rcu);
|
||||
}
|
||||
|
||||
static void fl_release(struct ip6_flowlabel *fl)
|
||||
@ -633,9 +639,9 @@ recheck:
|
||||
if (fl1->share == IPV6_FL_S_EXCL ||
|
||||
fl1->share != fl->share ||
|
||||
((fl1->share == IPV6_FL_S_PROCESS) &&
|
||||
(fl1->owner.pid == fl->owner.pid)) ||
|
||||
(fl1->owner.pid != fl->owner.pid)) ||
|
||||
((fl1->share == IPV6_FL_S_USER) &&
|
||||
uid_eq(fl1->owner.uid, fl->owner.uid)))
|
||||
!uid_eq(fl1->owner.uid, fl->owner.uid)))
|
||||
goto release;
|
||||
|
||||
err = -ENOMEM;
|
||||
|
@ -379,11 +379,8 @@ static void ip6_dst_destroy(struct dst_entry *dst)
|
||||
in6_dev_put(idev);
|
||||
}
|
||||
|
||||
rcu_read_lock();
|
||||
from = rcu_dereference(rt->from);
|
||||
rcu_assign_pointer(rt->from, NULL);
|
||||
from = xchg((__force struct fib6_info **)&rt->from, NULL);
|
||||
fib6_info_release(from);
|
||||
rcu_read_unlock();
|
||||
}
|
||||
|
||||
static void ip6_dst_ifdown(struct dst_entry *dst, struct net_device *dev,
|
||||
@ -1288,9 +1285,7 @@ static void rt6_remove_exception(struct rt6_exception_bucket *bucket,
|
||||
/* purge completely the exception to allow releasing the held resources:
|
||||
* some [sk] cache may keep the dst around for unlimited time
|
||||
*/
|
||||
from = rcu_dereference_protected(rt6_ex->rt6i->from,
|
||||
lockdep_is_held(&rt6_exception_lock));
|
||||
rcu_assign_pointer(rt6_ex->rt6i->from, NULL);
|
||||
from = xchg((__force struct fib6_info **)&rt6_ex->rt6i->from, NULL);
|
||||
fib6_info_release(from);
|
||||
dst_dev_put(&rt6_ex->rt6i->dst);
|
||||
|
||||
@ -3397,11 +3392,8 @@ static void rt6_do_redirect(struct dst_entry *dst, struct sock *sk, struct sk_bu
|
||||
|
||||
rcu_read_lock();
|
||||
from = rcu_dereference(rt->from);
|
||||
/* This fib6_info_hold() is safe here because we hold reference to rt
|
||||
* and rt already holds reference to fib6_info.
|
||||
*/
|
||||
fib6_info_hold(from);
|
||||
rcu_read_unlock();
|
||||
if (!from)
|
||||
goto out;
|
||||
|
||||
nrt = ip6_rt_cache_alloc(from, &msg->dest, NULL);
|
||||
if (!nrt)
|
||||
@ -3413,10 +3405,7 @@ static void rt6_do_redirect(struct dst_entry *dst, struct sock *sk, struct sk_bu
|
||||
|
||||
nrt->rt6i_gateway = *(struct in6_addr *)neigh->primary_key;
|
||||
|
||||
/* No need to remove rt from the exception table if rt is
|
||||
* a cached route because rt6_insert_exception() will
|
||||
* takes care of it
|
||||
*/
|
||||
/* rt6_insert_exception() will take care of duplicated exceptions */
|
||||
if (rt6_insert_exception(nrt, from)) {
|
||||
dst_release_immediate(&nrt->dst);
|
||||
goto out;
|
||||
@ -3429,7 +3418,7 @@ static void rt6_do_redirect(struct dst_entry *dst, struct sock *sk, struct sk_bu
|
||||
call_netevent_notifiers(NETEVENT_REDIRECT, &netevent);
|
||||
|
||||
out:
|
||||
fib6_info_release(from);
|
||||
rcu_read_unlock();
|
||||
neigh_release(neigh);
|
||||
}
|
||||
|
||||
@ -3668,23 +3657,34 @@ int ipv6_route_ioctl(struct net *net, unsigned int cmd, void __user *arg)
|
||||
|
||||
static int ip6_pkt_drop(struct sk_buff *skb, u8 code, int ipstats_mib_noroutes)
|
||||
{
|
||||
int type;
|
||||
struct dst_entry *dst = skb_dst(skb);
|
||||
struct net *net = dev_net(dst->dev);
|
||||
struct inet6_dev *idev;
|
||||
int type;
|
||||
|
||||
if (netif_is_l3_master(skb->dev) &&
|
||||
dst->dev == net->loopback_dev)
|
||||
idev = __in6_dev_get_safely(dev_get_by_index_rcu(net, IP6CB(skb)->iif));
|
||||
else
|
||||
idev = ip6_dst_idev(dst);
|
||||
|
||||
switch (ipstats_mib_noroutes) {
|
||||
case IPSTATS_MIB_INNOROUTES:
|
||||
type = ipv6_addr_type(&ipv6_hdr(skb)->daddr);
|
||||
if (type == IPV6_ADDR_ANY) {
|
||||
IP6_INC_STATS(dev_net(dst->dev),
|
||||
__in6_dev_get_safely(skb->dev),
|
||||
IPSTATS_MIB_INADDRERRORS);
|
||||
IP6_INC_STATS(net, idev, IPSTATS_MIB_INADDRERRORS);
|
||||
break;
|
||||
}
|
||||
/* FALLTHROUGH */
|
||||
case IPSTATS_MIB_OUTNOROUTES:
|
||||
IP6_INC_STATS(dev_net(dst->dev), ip6_dst_idev(dst),
|
||||
ipstats_mib_noroutes);
|
||||
IP6_INC_STATS(net, idev, ipstats_mib_noroutes);
|
||||
break;
|
||||
}
|
||||
|
||||
/* Start over by dropping the dst for l3mdev case */
|
||||
if (netif_is_l3_master(skb->dev))
|
||||
skb_dst_drop(skb);
|
||||
|
||||
icmpv6_send(skb, ICMPV6_DEST_UNREACH, code, 0);
|
||||
kfree_skb(skb);
|
||||
return 0;
|
||||
@ -5017,16 +5017,20 @@ static int inet6_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh,
|
||||
|
||||
rcu_read_lock();
|
||||
from = rcu_dereference(rt->from);
|
||||
|
||||
if (fibmatch)
|
||||
err = rt6_fill_node(net, skb, from, NULL, NULL, NULL, iif,
|
||||
RTM_NEWROUTE, NETLINK_CB(in_skb).portid,
|
||||
nlh->nlmsg_seq, 0);
|
||||
else
|
||||
err = rt6_fill_node(net, skb, from, dst, &fl6.daddr,
|
||||
&fl6.saddr, iif, RTM_NEWROUTE,
|
||||
NETLINK_CB(in_skb).portid, nlh->nlmsg_seq,
|
||||
0);
|
||||
if (from) {
|
||||
if (fibmatch)
|
||||
err = rt6_fill_node(net, skb, from, NULL, NULL, NULL,
|
||||
iif, RTM_NEWROUTE,
|
||||
NETLINK_CB(in_skb).portid,
|
||||
nlh->nlmsg_seq, 0);
|
||||
else
|
||||
err = rt6_fill_node(net, skb, from, dst, &fl6.daddr,
|
||||
&fl6.saddr, iif, RTM_NEWROUTE,
|
||||
NETLINK_CB(in_skb).portid,
|
||||
nlh->nlmsg_seq, 0);
|
||||
} else {
|
||||
err = -ENETUNREACH;
|
||||
}
|
||||
rcu_read_unlock();
|
||||
|
||||
if (err < 0) {
|
||||
|
@ -345,7 +345,7 @@ static void __net_exit xfrm6_tunnel_net_exit(struct net *net)
|
||||
unsigned int i;
|
||||
|
||||
xfrm_flush_gc();
|
||||
xfrm_state_flush(net, IPSEC_PROTO_ANY, false, true);
|
||||
xfrm_state_flush(net, 0, false, true);
|
||||
|
||||
for (i = 0; i < XFRM6_TUNNEL_SPI_BYADDR_HSIZE; i++)
|
||||
WARN_ON_ONCE(!hlist_empty(&xfrm6_tn->spi_byaddr[i]));
|
||||
@ -402,6 +402,10 @@ static void __exit xfrm6_tunnel_fini(void)
|
||||
xfrm6_tunnel_deregister(&xfrm6_tunnel_handler, AF_INET6);
|
||||
xfrm_unregister_type(&xfrm6_tunnel_type, AF_INET6);
|
||||
unregister_pernet_subsys(&xfrm6_tunnel_net_ops);
|
||||
/* Someone maybe has gotten the xfrm6_tunnel_spi.
|
||||
* So need to wait it.
|
||||
*/
|
||||
rcu_barrier();
|
||||
kmem_cache_destroy(xfrm6_tunnel_spi_kmem);
|
||||
}
|
||||
|
||||
|
@ -1951,8 +1951,10 @@ parse_ipsecrequest(struct xfrm_policy *xp, struct sadb_x_ipsecrequest *rq)
|
||||
|
||||
if (rq->sadb_x_ipsecrequest_mode == 0)
|
||||
return -EINVAL;
|
||||
if (!xfrm_id_proto_valid(rq->sadb_x_ipsecrequest_proto))
|
||||
return -EINVAL;
|
||||
|
||||
t->id.proto = rq->sadb_x_ipsecrequest_proto; /* XXX check proto */
|
||||
t->id.proto = rq->sadb_x_ipsecrequest_proto;
|
||||
if ((mode = pfkey_mode_to_xfrm(rq->sadb_x_ipsecrequest_mode)) < 0)
|
||||
return -EINVAL;
|
||||
t->mode = mode;
|
||||
|
@ -169,8 +169,8 @@ struct l2tp_tunnel *l2tp_tunnel_get(const struct net *net, u32 tunnel_id)
|
||||
|
||||
rcu_read_lock_bh();
|
||||
list_for_each_entry_rcu(tunnel, &pn->l2tp_tunnel_list, list) {
|
||||
if (tunnel->tunnel_id == tunnel_id) {
|
||||
l2tp_tunnel_inc_refcount(tunnel);
|
||||
if (tunnel->tunnel_id == tunnel_id &&
|
||||
refcount_inc_not_zero(&tunnel->ref_count)) {
|
||||
rcu_read_unlock_bh();
|
||||
|
||||
return tunnel;
|
||||
@ -190,8 +190,8 @@ struct l2tp_tunnel *l2tp_tunnel_get_nth(const struct net *net, int nth)
|
||||
|
||||
rcu_read_lock_bh();
|
||||
list_for_each_entry_rcu(tunnel, &pn->l2tp_tunnel_list, list) {
|
||||
if (++count > nth) {
|
||||
l2tp_tunnel_inc_refcount(tunnel);
|
||||
if (++count > nth &&
|
||||
refcount_inc_not_zero(&tunnel->ref_count)) {
|
||||
rcu_read_unlock_bh();
|
||||
return tunnel;
|
||||
}
|
||||
@ -909,7 +909,7 @@ int l2tp_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
|
||||
{
|
||||
struct l2tp_tunnel *tunnel;
|
||||
|
||||
tunnel = l2tp_tunnel(sk);
|
||||
tunnel = rcu_dereference_sk_user_data(sk);
|
||||
if (tunnel == NULL)
|
||||
goto pass_up;
|
||||
|
||||
|
@ -841,7 +841,7 @@ void ieee80211_debugfs_rename_netdev(struct ieee80211_sub_if_data *sdata)
|
||||
|
||||
dir = sdata->vif.debugfs_dir;
|
||||
|
||||
if (!dir)
|
||||
if (IS_ERR_OR_NULL(dir))
|
||||
return;
|
||||
|
||||
sprintf(buf, "netdev:%s", sdata->name);
|
||||
|
@ -112,8 +112,9 @@ void ieee80211_apply_htcap_overrides(struct ieee80211_sub_if_data *sdata,
|
||||
IEEE80211_HT_CAP_TX_STBC);
|
||||
|
||||
/* Allow user to configure RX STBC bits */
|
||||
if (ht_capa_mask->cap_info & IEEE80211_HT_CAP_RX_STBC)
|
||||
ht_cap->cap |= ht_capa->cap_info & IEEE80211_HT_CAP_RX_STBC;
|
||||
if (ht_capa_mask->cap_info & cpu_to_le16(IEEE80211_HT_CAP_RX_STBC))
|
||||
ht_cap->cap |= le16_to_cpu(ht_capa->cap_info) &
|
||||
IEEE80211_HT_CAP_RX_STBC;
|
||||
|
||||
/* Allow user to decrease AMPDU factor */
|
||||
if (ht_capa_mask->ampdu_params_info &
|
||||
|
@ -1908,6 +1908,9 @@ void ieee80211_if_remove(struct ieee80211_sub_if_data *sdata)
|
||||
list_del_rcu(&sdata->list);
|
||||
mutex_unlock(&sdata->local->iflist_mtx);
|
||||
|
||||
if (sdata->vif.txq)
|
||||
ieee80211_txq_purge(sdata->local, to_txq_info(sdata->vif.txq));
|
||||
|
||||
synchronize_rcu();
|
||||
|
||||
if (sdata->dev) {
|
||||
|
@ -362,8 +362,8 @@ int genl_register_family(struct genl_family *family)
|
||||
} else
|
||||
family->attrbuf = NULL;
|
||||
|
||||
family->id = idr_alloc(&genl_fam_idr, family,
|
||||
start, end + 1, GFP_KERNEL);
|
||||
family->id = idr_alloc_cyclic(&genl_fam_idr, family,
|
||||
start, end + 1, GFP_KERNEL);
|
||||
if (family->id < 0) {
|
||||
err = family->id;
|
||||
goto errout_free;
|
||||
|
@ -2602,8 +2602,8 @@ static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
|
||||
void *ph;
|
||||
DECLARE_SOCKADDR(struct sockaddr_ll *, saddr, msg->msg_name);
|
||||
bool need_wait = !(msg->msg_flags & MSG_DONTWAIT);
|
||||
unsigned char *addr = NULL;
|
||||
int tp_len, size_max;
|
||||
unsigned char *addr;
|
||||
void *data;
|
||||
int len_sum = 0;
|
||||
int status = TP_STATUS_AVAILABLE;
|
||||
@ -2614,7 +2614,6 @@ static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
|
||||
if (likely(saddr == NULL)) {
|
||||
dev = packet_cached_dev_get(po);
|
||||
proto = po->num;
|
||||
addr = NULL;
|
||||
} else {
|
||||
err = -EINVAL;
|
||||
if (msg->msg_namelen < sizeof(struct sockaddr_ll))
|
||||
@ -2624,10 +2623,13 @@ static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
|
||||
sll_addr)))
|
||||
goto out;
|
||||
proto = saddr->sll_protocol;
|
||||
addr = saddr->sll_halen ? saddr->sll_addr : NULL;
|
||||
dev = dev_get_by_index(sock_net(&po->sk), saddr->sll_ifindex);
|
||||
if (addr && dev && saddr->sll_halen < dev->addr_len)
|
||||
goto out_put;
|
||||
if (po->sk.sk_socket->type == SOCK_DGRAM) {
|
||||
if (dev && msg->msg_namelen < dev->addr_len +
|
||||
offsetof(struct sockaddr_ll, sll_addr))
|
||||
goto out_put;
|
||||
addr = saddr->sll_addr;
|
||||
}
|
||||
}
|
||||
|
||||
err = -ENXIO;
|
||||
@ -2799,7 +2801,7 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len)
|
||||
struct sk_buff *skb;
|
||||
struct net_device *dev;
|
||||
__be16 proto;
|
||||
unsigned char *addr;
|
||||
unsigned char *addr = NULL;
|
||||
int err, reserve = 0;
|
||||
struct sockcm_cookie sockc;
|
||||
struct virtio_net_hdr vnet_hdr = { 0 };
|
||||
@ -2816,7 +2818,6 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len)
|
||||
if (likely(saddr == NULL)) {
|
||||
dev = packet_cached_dev_get(po);
|
||||
proto = po->num;
|
||||
addr = NULL;
|
||||
} else {
|
||||
err = -EINVAL;
|
||||
if (msg->msg_namelen < sizeof(struct sockaddr_ll))
|
||||
@ -2824,10 +2825,13 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len)
|
||||
if (msg->msg_namelen < (saddr->sll_halen + offsetof(struct sockaddr_ll, sll_addr)))
|
||||
goto out;
|
||||
proto = saddr->sll_protocol;
|
||||
addr = saddr->sll_halen ? saddr->sll_addr : NULL;
|
||||
dev = dev_get_by_index(sock_net(sk), saddr->sll_ifindex);
|
||||
if (addr && dev && saddr->sll_halen < dev->addr_len)
|
||||
goto out_unlock;
|
||||
if (sock->type == SOCK_DGRAM) {
|
||||
if (dev && msg->msg_namelen < dev->addr_len +
|
||||
offsetof(struct sockaddr_ll, sll_addr))
|
||||
goto out_unlock;
|
||||
addr = saddr->sll_addr;
|
||||
}
|
||||
}
|
||||
|
||||
err = -ENXIO;
|
||||
@ -3344,20 +3348,29 @@ static int packet_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
|
||||
sock_recv_ts_and_drops(msg, sk, skb);
|
||||
|
||||
if (msg->msg_name) {
|
||||
int copy_len;
|
||||
|
||||
/* If the address length field is there to be filled
|
||||
* in, we fill it in now.
|
||||
*/
|
||||
if (sock->type == SOCK_PACKET) {
|
||||
__sockaddr_check_size(sizeof(struct sockaddr_pkt));
|
||||
msg->msg_namelen = sizeof(struct sockaddr_pkt);
|
||||
copy_len = msg->msg_namelen;
|
||||
} else {
|
||||
struct sockaddr_ll *sll = &PACKET_SKB_CB(skb)->sa.ll;
|
||||
|
||||
msg->msg_namelen = sll->sll_halen +
|
||||
offsetof(struct sockaddr_ll, sll_addr);
|
||||
copy_len = msg->msg_namelen;
|
||||
if (msg->msg_namelen < sizeof(struct sockaddr_ll)) {
|
||||
memset(msg->msg_name +
|
||||
offsetof(struct sockaddr_ll, sll_addr),
|
||||
0, sizeof(sll->sll_addr));
|
||||
msg->msg_namelen = sizeof(struct sockaddr_ll);
|
||||
}
|
||||
}
|
||||
memcpy(msg->msg_name, &PACKET_SKB_CB(skb)->sa,
|
||||
msg->msg_namelen);
|
||||
memcpy(msg->msg_name, &PACKET_SKB_CB(skb)->sa, copy_len);
|
||||
}
|
||||
|
||||
if (pkt_sk(sk)->auxdata) {
|
||||
|
@ -772,7 +772,7 @@ static void rds_ib_cong_recv(struct rds_connection *conn,
|
||||
unsigned long frag_off;
|
||||
unsigned long to_copy;
|
||||
unsigned long copied;
|
||||
uint64_t uncongested = 0;
|
||||
__le64 uncongested = 0;
|
||||
void *addr;
|
||||
|
||||
/* catch completely corrupt packets */
|
||||
@ -789,7 +789,7 @@ static void rds_ib_cong_recv(struct rds_connection *conn,
|
||||
copied = 0;
|
||||
|
||||
while (copied < RDS_CONG_MAP_BYTES) {
|
||||
uint64_t *src, *dst;
|
||||
__le64 *src, *dst;
|
||||
unsigned int k;
|
||||
|
||||
to_copy = min(RDS_FRAG_SIZE - frag_off, PAGE_SIZE - map_off);
|
||||
@ -824,9 +824,7 @@ static void rds_ib_cong_recv(struct rds_connection *conn,
|
||||
}
|
||||
|
||||
/* the congestion map is in little endian order */
|
||||
uncongested = le64_to_cpu(uncongested);
|
||||
|
||||
rds_cong_map_updated(map, uncongested);
|
||||
rds_cong_map_updated(map, le64_to_cpu(uncongested));
|
||||
}
|
||||
|
||||
static void rds_ib_process_recv(struct rds_connection *conn,
|
||||
|
@ -604,30 +604,30 @@ void rxrpc_destroy_all_calls(struct rxrpc_net *rxnet)
|
||||
|
||||
_enter("");
|
||||
|
||||
if (list_empty(&rxnet->calls))
|
||||
return;
|
||||
if (!list_empty(&rxnet->calls)) {
|
||||
write_lock(&rxnet->call_lock);
|
||||
|
||||
write_lock(&rxnet->call_lock);
|
||||
while (!list_empty(&rxnet->calls)) {
|
||||
call = list_entry(rxnet->calls.next,
|
||||
struct rxrpc_call, link);
|
||||
_debug("Zapping call %p", call);
|
||||
|
||||
while (!list_empty(&rxnet->calls)) {
|
||||
call = list_entry(rxnet->calls.next, struct rxrpc_call, link);
|
||||
_debug("Zapping call %p", call);
|
||||
rxrpc_see_call(call);
|
||||
list_del_init(&call->link);
|
||||
|
||||
rxrpc_see_call(call);
|
||||
list_del_init(&call->link);
|
||||
pr_err("Call %p still in use (%d,%s,%lx,%lx)!\n",
|
||||
call, atomic_read(&call->usage),
|
||||
rxrpc_call_states[call->state],
|
||||
call->flags, call->events);
|
||||
|
||||
pr_err("Call %p still in use (%d,%s,%lx,%lx)!\n",
|
||||
call, atomic_read(&call->usage),
|
||||
rxrpc_call_states[call->state],
|
||||
call->flags, call->events);
|
||||
write_unlock(&rxnet->call_lock);
|
||||
cond_resched();
|
||||
write_lock(&rxnet->call_lock);
|
||||
}
|
||||
|
||||
write_unlock(&rxnet->call_lock);
|
||||
cond_resched();
|
||||
write_lock(&rxnet->call_lock);
|
||||
}
|
||||
|
||||
write_unlock(&rxnet->call_lock);
|
||||
|
||||
atomic_dec(&rxnet->nr_calls);
|
||||
wait_var_event(&rxnet->nr_calls, !atomic_read(&rxnet->nr_calls));
|
||||
}
|
||||
|
@ -1112,32 +1112,6 @@ static void sctp_cmd_send_msg(struct sctp_association *asoc,
|
||||
}
|
||||
|
||||
|
||||
/* Sent the next ASCONF packet currently stored in the association.
|
||||
* This happens after the ASCONF_ACK was succeffully processed.
|
||||
*/
|
||||
static void sctp_cmd_send_asconf(struct sctp_association *asoc)
|
||||
{
|
||||
struct net *net = sock_net(asoc->base.sk);
|
||||
|
||||
/* Send the next asconf chunk from the addip chunk
|
||||
* queue.
|
||||
*/
|
||||
if (!list_empty(&asoc->addip_chunk_list)) {
|
||||
struct list_head *entry = asoc->addip_chunk_list.next;
|
||||
struct sctp_chunk *asconf = list_entry(entry,
|
||||
struct sctp_chunk, list);
|
||||
list_del_init(entry);
|
||||
|
||||
/* Hold the chunk until an ASCONF_ACK is received. */
|
||||
sctp_chunk_hold(asconf);
|
||||
if (sctp_primitive_ASCONF(net, asoc, asconf))
|
||||
sctp_chunk_free(asconf);
|
||||
else
|
||||
asoc->addip_last_asconf = asconf;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/* These three macros allow us to pull the debugging code out of the
|
||||
* main flow of sctp_do_sm() to keep attention focused on the real
|
||||
* functionality there.
|
||||
@ -1783,9 +1757,6 @@ static int sctp_cmd_interpreter(enum sctp_event_type event_type,
|
||||
}
|
||||
sctp_cmd_send_msg(asoc, cmd->obj.msg, gfp);
|
||||
break;
|
||||
case SCTP_CMD_SEND_NEXT_ASCONF:
|
||||
sctp_cmd_send_asconf(asoc);
|
||||
break;
|
||||
case SCTP_CMD_PURGE_ASCONF_QUEUE:
|
||||
sctp_asconf_queue_teardown(asoc);
|
||||
break;
|
||||
|
@ -3824,6 +3824,29 @@ enum sctp_disposition sctp_sf_do_asconf(struct net *net,
|
||||
return SCTP_DISPOSITION_CONSUME;
|
||||
}
|
||||
|
||||
static enum sctp_disposition sctp_send_next_asconf(
|
||||
struct net *net,
|
||||
const struct sctp_endpoint *ep,
|
||||
struct sctp_association *asoc,
|
||||
const union sctp_subtype type,
|
||||
struct sctp_cmd_seq *commands)
|
||||
{
|
||||
struct sctp_chunk *asconf;
|
||||
struct list_head *entry;
|
||||
|
||||
if (list_empty(&asoc->addip_chunk_list))
|
||||
return SCTP_DISPOSITION_CONSUME;
|
||||
|
||||
entry = asoc->addip_chunk_list.next;
|
||||
asconf = list_entry(entry, struct sctp_chunk, list);
|
||||
|
||||
list_del_init(entry);
|
||||
sctp_chunk_hold(asconf);
|
||||
asoc->addip_last_asconf = asconf;
|
||||
|
||||
return sctp_sf_do_prm_asconf(net, ep, asoc, type, asconf, commands);
|
||||
}
|
||||
|
||||
/*
|
||||
* ADDIP Section 4.3 General rules for address manipulation
|
||||
* When building TLV parameters for the ASCONF Chunk that will add or
|
||||
@ -3915,14 +3938,10 @@ enum sctp_disposition sctp_sf_do_asconf_ack(struct net *net,
|
||||
SCTP_TO(SCTP_EVENT_TIMEOUT_T4_RTO));
|
||||
|
||||
if (!sctp_process_asconf_ack((struct sctp_association *)asoc,
|
||||
asconf_ack)) {
|
||||
/* Successfully processed ASCONF_ACK. We can
|
||||
* release the next asconf if we have one.
|
||||
*/
|
||||
sctp_add_cmd_sf(commands, SCTP_CMD_SEND_NEXT_ASCONF,
|
||||
SCTP_NULL());
|
||||
return SCTP_DISPOSITION_CONSUME;
|
||||
}
|
||||
asconf_ack))
|
||||
return sctp_send_next_asconf(net, ep,
|
||||
(struct sctp_association *)asoc,
|
||||
type, commands);
|
||||
|
||||
abort = sctp_make_abort(asoc, asconf_ack,
|
||||
sizeof(struct sctp_errhdr));
|
||||
|
@ -597,7 +597,7 @@ void handle_device_resync(struct sock *sk, u32 seq, u64 rcd_sn)
|
||||
static int tls_device_reencrypt(struct sock *sk, struct sk_buff *skb)
|
||||
{
|
||||
struct strp_msg *rxm = strp_msg(skb);
|
||||
int err = 0, offset = rxm->offset, copy, nsg;
|
||||
int err = 0, offset = rxm->offset, copy, nsg, data_len, pos;
|
||||
struct sk_buff *skb_iter, *unused;
|
||||
struct scatterlist sg[1];
|
||||
char *orig_buf, *buf;
|
||||
@ -628,27 +628,44 @@ static int tls_device_reencrypt(struct sock *sk, struct sk_buff *skb)
|
||||
else
|
||||
err = 0;
|
||||
|
||||
copy = min_t(int, skb_pagelen(skb) - offset,
|
||||
rxm->full_len - TLS_CIPHER_AES_GCM_128_TAG_SIZE);
|
||||
data_len = rxm->full_len - TLS_CIPHER_AES_GCM_128_TAG_SIZE;
|
||||
|
||||
if (skb->decrypted)
|
||||
skb_store_bits(skb, offset, buf, copy);
|
||||
if (skb_pagelen(skb) > offset) {
|
||||
copy = min_t(int, skb_pagelen(skb) - offset, data_len);
|
||||
|
||||
offset += copy;
|
||||
buf += copy;
|
||||
|
||||
skb_walk_frags(skb, skb_iter) {
|
||||
copy = min_t(int, skb_iter->len,
|
||||
rxm->full_len - offset + rxm->offset -
|
||||
TLS_CIPHER_AES_GCM_128_TAG_SIZE);
|
||||
|
||||
if (skb_iter->decrypted)
|
||||
skb_store_bits(skb_iter, offset, buf, copy);
|
||||
if (skb->decrypted)
|
||||
skb_store_bits(skb, offset, buf, copy);
|
||||
|
||||
offset += copy;
|
||||
buf += copy;
|
||||
}
|
||||
|
||||
pos = skb_pagelen(skb);
|
||||
skb_walk_frags(skb, skb_iter) {
|
||||
int frag_pos;
|
||||
|
||||
/* Practically all frags must belong to msg if reencrypt
|
||||
* is needed with current strparser and coalescing logic,
|
||||
* but strparser may "get optimized", so let's be safe.
|
||||
*/
|
||||
if (pos + skb_iter->len <= offset)
|
||||
goto done_with_frag;
|
||||
if (pos >= data_len + rxm->offset)
|
||||
break;
|
||||
|
||||
frag_pos = offset - pos;
|
||||
copy = min_t(int, skb_iter->len - frag_pos,
|
||||
data_len + rxm->offset - offset);
|
||||
|
||||
if (skb_iter->decrypted)
|
||||
skb_store_bits(skb_iter, frag_pos, buf, copy);
|
||||
|
||||
offset += copy;
|
||||
buf += copy;
|
||||
done_with_frag:
|
||||
pos += skb_iter->len;
|
||||
}
|
||||
|
||||
free_buf:
|
||||
kfree(orig_buf);
|
||||
return err;
|
||||
|
@ -201,13 +201,14 @@ static void complete_skb(struct sk_buff *nskb, struct sk_buff *skb, int headln)
|
||||
|
||||
skb_put(nskb, skb->len);
|
||||
memcpy(nskb->data, skb->data, headln);
|
||||
update_chksum(nskb, headln);
|
||||
|
||||
nskb->destructor = skb->destructor;
|
||||
nskb->sk = sk;
|
||||
skb->destructor = NULL;
|
||||
skb->sk = NULL;
|
||||
|
||||
update_chksum(nskb, headln);
|
||||
|
||||
delta = nskb->truesize - skb->truesize;
|
||||
if (likely(delta < 0))
|
||||
WARN_ON_ONCE(refcount_sub_and_test(-delta, &sk->sk_wmem_alloc));
|
||||
|
@ -3778,10 +3778,9 @@ void wiphy_regulatory_register(struct wiphy *wiphy)
|
||||
/*
|
||||
* The last request may have been received before this
|
||||
* registration call. Call the driver notifier if
|
||||
* initiator is USER and user type is CELL_BASE.
|
||||
* initiator is USER.
|
||||
*/
|
||||
if (lr->initiator == NL80211_REGDOM_SET_BY_USER &&
|
||||
lr->user_reg_hint_type == NL80211_USER_REG_HINT_CELL_BASE)
|
||||
if (lr->initiator == NL80211_REGDOM_SET_BY_USER)
|
||||
reg_call_notifier(wiphy, lr);
|
||||
}
|
||||
|
||||
|
@ -70,17 +70,28 @@ static struct xfrm_if *xfrmi_lookup(struct net *net, struct xfrm_state *x)
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static struct xfrm_if *xfrmi_decode_session(struct sk_buff *skb)
|
||||
static struct xfrm_if *xfrmi_decode_session(struct sk_buff *skb,
|
||||
unsigned short family)
|
||||
{
|
||||
struct xfrmi_net *xfrmn;
|
||||
int ifindex;
|
||||
struct xfrm_if *xi;
|
||||
int ifindex = 0;
|
||||
|
||||
if (!secpath_exists(skb) || !skb->dev)
|
||||
return NULL;
|
||||
|
||||
switch (family) {
|
||||
case AF_INET6:
|
||||
ifindex = inet6_sdif(skb);
|
||||
break;
|
||||
case AF_INET:
|
||||
ifindex = inet_sdif(skb);
|
||||
break;
|
||||
}
|
||||
if (!ifindex)
|
||||
ifindex = skb->dev->ifindex;
|
||||
|
||||
xfrmn = net_generic(xs_net(xfrm_input_state(skb)), xfrmi_net_id);
|
||||
ifindex = skb->dev->ifindex;
|
||||
|
||||
for_each_xfrmi_rcu(xfrmn->xfrmi[0], xi) {
|
||||
if (ifindex == xi->dev->ifindex &&
|
||||
|
@ -3313,7 +3313,7 @@ int __xfrm_policy_check(struct sock *sk, int dir, struct sk_buff *skb,
|
||||
ifcb = xfrm_if_get_cb();
|
||||
|
||||
if (ifcb) {
|
||||
xi = ifcb->decode_session(skb);
|
||||
xi = ifcb->decode_session(skb, family);
|
||||
if (xi) {
|
||||
if_id = xi->p.if_id;
|
||||
net = xi->net;
|
||||
|
@ -2384,7 +2384,7 @@ void xfrm_state_fini(struct net *net)
|
||||
|
||||
flush_work(&net->xfrm.state_hash_work);
|
||||
flush_work(&xfrm_state_gc_work);
|
||||
xfrm_state_flush(net, IPSEC_PROTO_ANY, false, true);
|
||||
xfrm_state_flush(net, 0, false, true);
|
||||
|
||||
WARN_ON(!list_empty(&net->xfrm.state_all));
|
||||
|
||||
|
@ -1424,7 +1424,7 @@ static int verify_newpolicy_info(struct xfrm_userpolicy_info *p)
|
||||
ret = verify_policy_dir(p->dir);
|
||||
if (ret)
|
||||
return ret;
|
||||
if (p->index && ((p->index & XFRM_POLICY_MAX) != p->dir))
|
||||
if (p->index && (xfrm_policy_id2dir(p->index) != p->dir))
|
||||
return -EINVAL;
|
||||
|
||||
return 0;
|
||||
@ -1513,20 +1513,8 @@ static int validate_tmpl(int nr, struct xfrm_user_tmpl *ut, u16 family)
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
switch (ut[i].id.proto) {
|
||||
case IPPROTO_AH:
|
||||
case IPPROTO_ESP:
|
||||
case IPPROTO_COMP:
|
||||
#if IS_ENABLED(CONFIG_IPV6)
|
||||
case IPPROTO_ROUTING:
|
||||
case IPPROTO_DSTOPTS:
|
||||
#endif
|
||||
case IPSEC_PROTO_ANY:
|
||||
break;
|
||||
default:
|
||||
if (!xfrm_id_proto_valid(ut[i].id.proto))
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
@ -1151,6 +1151,9 @@ static int do_create(int argc, char **argv)
|
||||
return -1;
|
||||
}
|
||||
NEXT_ARG();
|
||||
} else {
|
||||
p_err("unknown arg %s", *argv);
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
|
||||
|
1
tools/lib/bpf/.gitignore
vendored
1
tools/lib/bpf/.gitignore
vendored
@ -1,3 +1,4 @@
|
||||
libbpf_version.h
|
||||
FEATURE-DUMP.libbpf
|
||||
test_libbpf
|
||||
libbpf.so.*
|
||||
|
@ -374,6 +374,31 @@
|
||||
.prog_type = BPF_PROG_TYPE_XDP,
|
||||
.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
|
||||
},
|
||||
{
|
||||
"calls: ptr null check in subprog",
|
||||
.insns = {
|
||||
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
|
||||
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
|
||||
BPF_LD_MAP_FD(BPF_REG_1, 0),
|
||||
BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
|
||||
BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
|
||||
BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
|
||||
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
|
||||
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
|
||||
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_6, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
|
||||
BPF_MOV64_IMM(BPF_REG_0, 1),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.errstr_unpriv = "function calls to other bpf functions are allowed for root only",
|
||||
.fixup_map_hash_48b = { 3 },
|
||||
.result_unpriv = REJECT,
|
||||
.result = ACCEPT,
|
||||
.retval = 0,
|
||||
},
|
||||
{
|
||||
"calls: two calls with args",
|
||||
.insns = {
|
||||
|
@ -631,3 +631,25 @@
|
||||
.errstr = "invalid access to packet",
|
||||
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
|
||||
},
|
||||
{
|
||||
"direct packet access: test29 (reg > pkt_end in subprog)",
|
||||
.insns = {
|
||||
BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
|
||||
offsetof(struct __sk_buff, data)),
|
||||
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
|
||||
offsetof(struct __sk_buff, data_end)),
|
||||
BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, 8),
|
||||
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
|
||||
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
|
||||
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_6, 0),
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_2, 1),
|
||||
BPF_MOV64_IMM(BPF_REG_0, 1),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.result = ACCEPT,
|
||||
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
|
||||
},
|
||||
|
@ -27,6 +27,7 @@ log_test()
|
||||
nsuccess=$((nsuccess+1))
|
||||
printf "\n TEST: %-50s [ OK ]\n" "${msg}"
|
||||
else
|
||||
ret=1
|
||||
nfail=$((nfail+1))
|
||||
printf "\n TEST: %-50s [FAIL]\n" "${msg}"
|
||||
if [ "${PAUSE_ON_FAIL}" = "yes" ]; then
|
||||
@ -147,8 +148,8 @@ fib_rule6_test()
|
||||
|
||||
fib_check_iproute_support "ipproto" "ipproto"
|
||||
if [ $? -eq 0 ]; then
|
||||
match="ipproto icmp"
|
||||
fib_rule6_test_match_n_redirect "$match" "$match" "ipproto icmp match"
|
||||
match="ipproto ipv6-icmp"
|
||||
fib_rule6_test_match_n_redirect "$match" "$match" "ipproto ipv6-icmp match"
|
||||
fi
|
||||
}
|
||||
|
||||
@ -245,4 +246,9 @@ setup
|
||||
run_fibrule_tests
|
||||
cleanup
|
||||
|
||||
if [ "$TESTS" != "none" ]; then
|
||||
printf "\nTests passed: %3d\n" ${nsuccess}
|
||||
printf "Tests failed: %3d\n" ${nfail}
|
||||
fi
|
||||
|
||||
exit $ret
|
||||
|
Loading…
Reference in New Issue
Block a user