forked from Minki/linux
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Pull networking fixes from David Miller: 1) Fix multiqueue in stmmac driver on PCI, from Andy Shevchenko. 2) cdc_ncm doesn't actually fully zero out the padding area is allocates on TX, from Jim Baxter. 3) Don't leak map addresses in BPF verifier, from Daniel Borkmann. 4) If we randomize TCP timestamps, we have to do it everywhere including SYN cookies. From Eric Dumazet. 5) Fix "ethtool -S" crash in aquantia driver, from Pavel Belous. 6) Fix allocation size for ntp filter bitmap in bnxt_en driver, from Dan Carpenter. 7) Add missing memory allocation return value check to DSA loop driver, from Christophe Jaillet. 8) Fix XDP leak on driver unload in qed driver, from Suddarsana Reddy Kalluru. 9) Don't inherit MC list from parent inet connection sockets, another syzkaller spotted gem. Fix from Eric Dumazet. * git://git.kernel.org/pub/scm/linux/kernel/git/davem/net: (43 commits) dccp/tcp: do not inherit mc_list from parent qede: Split PF/VF ndos. qed: Correct doorbell configuration for !4Kb pages qed: Tell QM the number of tasks qed: Fix VF removal sequence qede: Fix XDP memory leak on unload net/mlx4_core: Reduce harmless SRIOV error message to debug level net/mlx4_en: Avoid adding steering rules with invalid ring net/mlx4_en: Change the error print to debug print drivers: net: wimax: i2400m: i2400m-usb: Use time_after for time comparison DECnet: Use container_of() for embedded struct Revert "ipv4: restore rt->fi for reference counting" net: mdio-mux: bcm-iproc: call mdiobus_free() in error path net: ethernet: ti: cpsw: adjust cpsw fifos depth for fullduplex flow control ipv6: reorder ip6_route_dev_notifier after ipv6_dev_notf net: cdc_ncm: Fix TX zero padding stmmac: pci: split out common_default_data() helper stmmac: pci: RX queue routing configuration stmmac: pci: TX and RX queue priority configuration stmmac: pci: set default number of rx and tx queues ...
This commit is contained in:
commit
50fb55d88c
@ -549,7 +549,8 @@ static int bond_fill_info(struct sk_buff *skb,
|
|||||||
targets_added = 0;
|
targets_added = 0;
|
||||||
for (i = 0; i < BOND_MAX_ARP_TARGETS; i++) {
|
for (i = 0; i < BOND_MAX_ARP_TARGETS; i++) {
|
||||||
if (bond->params.arp_targets[i]) {
|
if (bond->params.arp_targets[i]) {
|
||||||
nla_put_be32(skb, i, bond->params.arp_targets[i]);
|
if (nla_put_be32(skb, i, bond->params.arp_targets[i]))
|
||||||
|
goto nla_put_failure;
|
||||||
targets_added = 1;
|
targets_added = 1;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -256,6 +256,9 @@ static int dsa_loop_drv_probe(struct mdio_device *mdiodev)
|
|||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
ps = devm_kzalloc(&mdiodev->dev, sizeof(*ps), GFP_KERNEL);
|
ps = devm_kzalloc(&mdiodev->dev, sizeof(*ps), GFP_KERNEL);
|
||||||
|
if (!ps)
|
||||||
|
return -ENOMEM;
|
||||||
|
|
||||||
ps->netdev = dev_get_by_name(&init_net, pdata->netdev);
|
ps->netdev = dev_get_by_name(&init_net, pdata->netdev);
|
||||||
if (!ps->netdev)
|
if (!ps->netdev)
|
||||||
return -EPROBE_DEFER;
|
return -EPROBE_DEFER;
|
||||||
|
@ -755,7 +755,7 @@ void aq_nic_get_stats(struct aq_nic_s *self, u64 *data)
|
|||||||
count = 0U;
|
count = 0U;
|
||||||
|
|
||||||
for (i = 0U, aq_vec = self->aq_vec[0];
|
for (i = 0U, aq_vec = self->aq_vec[0];
|
||||||
self->aq_vecs > i; ++i, aq_vec = self->aq_vec[i]) {
|
aq_vec && self->aq_vecs > i; ++i, aq_vec = self->aq_vec[i]) {
|
||||||
data += count;
|
data += count;
|
||||||
aq_vec_get_sw_stats(aq_vec, data, &count);
|
aq_vec_get_sw_stats(aq_vec, data, &count);
|
||||||
}
|
}
|
||||||
@ -959,8 +959,10 @@ void aq_nic_free_hot_resources(struct aq_nic_s *self)
|
|||||||
goto err_exit;
|
goto err_exit;
|
||||||
|
|
||||||
for (i = AQ_DIMOF(self->aq_vec); i--;) {
|
for (i = AQ_DIMOF(self->aq_vec); i--;) {
|
||||||
if (self->aq_vec[i])
|
if (self->aq_vec[i]) {
|
||||||
aq_vec_free(self->aq_vec[i]);
|
aq_vec_free(self->aq_vec[i]);
|
||||||
|
self->aq_vec[i] = NULL;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
err_exit:;
|
err_exit:;
|
||||||
|
@ -838,7 +838,7 @@ static int alx_enable_msix(struct alx_priv *alx)
|
|||||||
|
|
||||||
err = pci_alloc_irq_vectors(alx->hw.pdev, num_vec, num_vec,
|
err = pci_alloc_irq_vectors(alx->hw.pdev, num_vec, num_vec,
|
||||||
PCI_IRQ_MSIX);
|
PCI_IRQ_MSIX);
|
||||||
if (err) {
|
if (err < 0) {
|
||||||
netdev_warn(alx->dev, "Enabling MSI-X interrupts failed!\n");
|
netdev_warn(alx->dev, "Enabling MSI-X interrupts failed!\n");
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
@ -904,7 +904,7 @@ static int alx_init_intr(struct alx_priv *alx)
|
|||||||
|
|
||||||
ret = pci_alloc_irq_vectors(alx->hw.pdev, 1, 1,
|
ret = pci_alloc_irq_vectors(alx->hw.pdev, 1, 1,
|
||||||
PCI_IRQ_MSI | PCI_IRQ_LEGACY);
|
PCI_IRQ_MSI | PCI_IRQ_LEGACY);
|
||||||
if (ret)
|
if (ret < 0)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
alx->num_vec = 1;
|
alx->num_vec = 1;
|
||||||
|
@ -3000,7 +3000,8 @@ static int bnxt_alloc_ntp_fltrs(struct bnxt *bp)
|
|||||||
INIT_HLIST_HEAD(&bp->ntp_fltr_hash_tbl[i]);
|
INIT_HLIST_HEAD(&bp->ntp_fltr_hash_tbl[i]);
|
||||||
|
|
||||||
bp->ntp_fltr_count = 0;
|
bp->ntp_fltr_count = 0;
|
||||||
bp->ntp_fltr_bmap = kzalloc(BITS_TO_LONGS(BNXT_NTP_FLTR_MAX_FLTR),
|
bp->ntp_fltr_bmap = kcalloc(BITS_TO_LONGS(BNXT_NTP_FLTR_MAX_FLTR),
|
||||||
|
sizeof(long),
|
||||||
GFP_KERNEL);
|
GFP_KERNEL);
|
||||||
|
|
||||||
if (!bp->ntp_fltr_bmap)
|
if (!bp->ntp_fltr_bmap)
|
||||||
|
@ -2845,7 +2845,7 @@ bfa_ioc_get_adapter_optrom_ver(struct bfa_ioc *ioc, char *optrom_ver)
|
|||||||
static void
|
static void
|
||||||
bfa_ioc_get_adapter_manufacturer(struct bfa_ioc *ioc, char *manufacturer)
|
bfa_ioc_get_adapter_manufacturer(struct bfa_ioc *ioc, char *manufacturer)
|
||||||
{
|
{
|
||||||
memcpy(manufacturer, BFA_MFG_NAME, BFA_ADAPTER_MFG_NAME_LEN);
|
strncpy(manufacturer, BFA_MFG_NAME, BFA_ADAPTER_MFG_NAME_LEN);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
|
@ -547,7 +547,7 @@ bnad_get_strings(struct net_device *netdev, u32 stringset, u8 *string)
|
|||||||
for (i = 0; i < BNAD_ETHTOOL_STATS_NUM; i++) {
|
for (i = 0; i < BNAD_ETHTOOL_STATS_NUM; i++) {
|
||||||
BUG_ON(!(strlen(bnad_net_stats_strings[i]) <
|
BUG_ON(!(strlen(bnad_net_stats_strings[i]) <
|
||||||
ETH_GSTRING_LEN));
|
ETH_GSTRING_LEN));
|
||||||
memcpy(string, bnad_net_stats_strings[i],
|
strncpy(string, bnad_net_stats_strings[i],
|
||||||
ETH_GSTRING_LEN);
|
ETH_GSTRING_LEN);
|
||||||
string += ETH_GSTRING_LEN;
|
string += ETH_GSTRING_LEN;
|
||||||
}
|
}
|
||||||
|
@ -108,6 +108,12 @@ enum {
|
|||||||
PAUSE_AUTONEG = 1 << 2
|
PAUSE_AUTONEG = 1 << 2
|
||||||
};
|
};
|
||||||
|
|
||||||
|
enum {
|
||||||
|
FEC_AUTO = 1 << 0, /* IEEE 802.3 "automatic" */
|
||||||
|
FEC_RS = 1 << 1, /* Reed-Solomon */
|
||||||
|
FEC_BASER_RS = 1 << 2 /* BaseR/Reed-Solomon */
|
||||||
|
};
|
||||||
|
|
||||||
struct port_stats {
|
struct port_stats {
|
||||||
u64 tx_octets; /* total # of octets in good frames */
|
u64 tx_octets; /* total # of octets in good frames */
|
||||||
u64 tx_frames; /* all good frames */
|
u64 tx_frames; /* all good frames */
|
||||||
@ -432,6 +438,9 @@ struct link_config {
|
|||||||
unsigned int speed; /* actual link speed */
|
unsigned int speed; /* actual link speed */
|
||||||
unsigned char requested_fc; /* flow control user has requested */
|
unsigned char requested_fc; /* flow control user has requested */
|
||||||
unsigned char fc; /* actual link flow control */
|
unsigned char fc; /* actual link flow control */
|
||||||
|
unsigned char auto_fec; /* Forward Error Correction: */
|
||||||
|
unsigned char requested_fec; /* "automatic" (IEEE 802.3), */
|
||||||
|
unsigned char fec; /* requested, and actual in use */
|
||||||
unsigned char autoneg; /* autonegotiating? */
|
unsigned char autoneg; /* autonegotiating? */
|
||||||
unsigned char link_ok; /* link up? */
|
unsigned char link_ok; /* link up? */
|
||||||
unsigned char link_down_rc; /* link down reason */
|
unsigned char link_down_rc; /* link down reason */
|
||||||
|
@ -3707,7 +3707,8 @@ int t4_link_l1cfg(struct adapter *adap, unsigned int mbox, unsigned int port,
|
|||||||
struct link_config *lc)
|
struct link_config *lc)
|
||||||
{
|
{
|
||||||
struct fw_port_cmd c;
|
struct fw_port_cmd c;
|
||||||
unsigned int fc = 0, mdi = FW_PORT_CAP_MDI_V(FW_PORT_CAP_MDI_AUTO);
|
unsigned int mdi = FW_PORT_CAP_MDI_V(FW_PORT_CAP_MDI_AUTO);
|
||||||
|
unsigned int fc = 0, fec = 0, fw_fec = 0;
|
||||||
|
|
||||||
lc->link_ok = 0;
|
lc->link_ok = 0;
|
||||||
if (lc->requested_fc & PAUSE_RX)
|
if (lc->requested_fc & PAUSE_RX)
|
||||||
@ -3715,6 +3716,13 @@ int t4_link_l1cfg(struct adapter *adap, unsigned int mbox, unsigned int port,
|
|||||||
if (lc->requested_fc & PAUSE_TX)
|
if (lc->requested_fc & PAUSE_TX)
|
||||||
fc |= FW_PORT_CAP_FC_TX;
|
fc |= FW_PORT_CAP_FC_TX;
|
||||||
|
|
||||||
|
fec = lc->requested_fec & FEC_AUTO ? lc->auto_fec : lc->requested_fec;
|
||||||
|
|
||||||
|
if (fec & FEC_RS)
|
||||||
|
fw_fec |= FW_PORT_CAP_FEC_RS;
|
||||||
|
if (fec & FEC_BASER_RS)
|
||||||
|
fw_fec |= FW_PORT_CAP_FEC_BASER_RS;
|
||||||
|
|
||||||
memset(&c, 0, sizeof(c));
|
memset(&c, 0, sizeof(c));
|
||||||
c.op_to_portid = cpu_to_be32(FW_CMD_OP_V(FW_PORT_CMD) |
|
c.op_to_portid = cpu_to_be32(FW_CMD_OP_V(FW_PORT_CMD) |
|
||||||
FW_CMD_REQUEST_F | FW_CMD_EXEC_F |
|
FW_CMD_REQUEST_F | FW_CMD_EXEC_F |
|
||||||
@ -3725,13 +3733,15 @@ int t4_link_l1cfg(struct adapter *adap, unsigned int mbox, unsigned int port,
|
|||||||
|
|
||||||
if (!(lc->supported & FW_PORT_CAP_ANEG)) {
|
if (!(lc->supported & FW_PORT_CAP_ANEG)) {
|
||||||
c.u.l1cfg.rcap = cpu_to_be32((lc->supported & ADVERT_MASK) |
|
c.u.l1cfg.rcap = cpu_to_be32((lc->supported & ADVERT_MASK) |
|
||||||
fc);
|
fc | fw_fec);
|
||||||
lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
|
lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
|
||||||
} else if (lc->autoneg == AUTONEG_DISABLE) {
|
} else if (lc->autoneg == AUTONEG_DISABLE) {
|
||||||
c.u.l1cfg.rcap = cpu_to_be32(lc->requested_speed | fc | mdi);
|
c.u.l1cfg.rcap = cpu_to_be32(lc->requested_speed | fc |
|
||||||
|
fw_fec | mdi);
|
||||||
lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
|
lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
|
||||||
} else
|
} else
|
||||||
c.u.l1cfg.rcap = cpu_to_be32(lc->advertising | fc | mdi);
|
c.u.l1cfg.rcap = cpu_to_be32(lc->advertising | fc |
|
||||||
|
fw_fec | mdi);
|
||||||
|
|
||||||
return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
|
return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
|
||||||
}
|
}
|
||||||
@ -7407,13 +7417,26 @@ static void get_pci_mode(struct adapter *adapter, struct pci_params *p)
|
|||||||
* Initializes the SW state maintained for each link, including the link's
|
* Initializes the SW state maintained for each link, including the link's
|
||||||
* capabilities and default speed/flow-control/autonegotiation settings.
|
* capabilities and default speed/flow-control/autonegotiation settings.
|
||||||
*/
|
*/
|
||||||
static void init_link_config(struct link_config *lc, unsigned int caps)
|
static void init_link_config(struct link_config *lc, unsigned int pcaps,
|
||||||
|
unsigned int acaps)
|
||||||
{
|
{
|
||||||
lc->supported = caps;
|
lc->supported = pcaps;
|
||||||
lc->lp_advertising = 0;
|
lc->lp_advertising = 0;
|
||||||
lc->requested_speed = 0;
|
lc->requested_speed = 0;
|
||||||
lc->speed = 0;
|
lc->speed = 0;
|
||||||
lc->requested_fc = lc->fc = PAUSE_RX | PAUSE_TX;
|
lc->requested_fc = lc->fc = PAUSE_RX | PAUSE_TX;
|
||||||
|
lc->auto_fec = 0;
|
||||||
|
|
||||||
|
/* For Forward Error Control, we default to whatever the Firmware
|
||||||
|
* tells us the Link is currently advertising.
|
||||||
|
*/
|
||||||
|
if (acaps & FW_PORT_CAP_FEC_RS)
|
||||||
|
lc->auto_fec |= FEC_RS;
|
||||||
|
if (acaps & FW_PORT_CAP_FEC_BASER_RS)
|
||||||
|
lc->auto_fec |= FEC_BASER_RS;
|
||||||
|
lc->requested_fec = FEC_AUTO;
|
||||||
|
lc->fec = lc->auto_fec;
|
||||||
|
|
||||||
if (lc->supported & FW_PORT_CAP_ANEG) {
|
if (lc->supported & FW_PORT_CAP_ANEG) {
|
||||||
lc->advertising = lc->supported & ADVERT_MASK;
|
lc->advertising = lc->supported & ADVERT_MASK;
|
||||||
lc->autoneg = AUTONEG_ENABLE;
|
lc->autoneg = AUTONEG_ENABLE;
|
||||||
@ -7991,7 +8014,8 @@ int t4_init_portinfo(struct port_info *pi, int mbox,
|
|||||||
pi->port_type = FW_PORT_CMD_PTYPE_G(ret);
|
pi->port_type = FW_PORT_CMD_PTYPE_G(ret);
|
||||||
pi->mod_type = FW_PORT_MOD_TYPE_NA;
|
pi->mod_type = FW_PORT_MOD_TYPE_NA;
|
||||||
|
|
||||||
init_link_config(&pi->link_cfg, be16_to_cpu(c.u.info.pcap));
|
init_link_config(&pi->link_cfg, be16_to_cpu(c.u.info.pcap),
|
||||||
|
be16_to_cpu(c.u.info.acap));
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2263,9 +2263,9 @@ enum fw_port_cap {
|
|||||||
FW_PORT_CAP_ANEG = 0x0100,
|
FW_PORT_CAP_ANEG = 0x0100,
|
||||||
FW_PORT_CAP_MDIX = 0x0200,
|
FW_PORT_CAP_MDIX = 0x0200,
|
||||||
FW_PORT_CAP_MDIAUTO = 0x0400,
|
FW_PORT_CAP_MDIAUTO = 0x0400,
|
||||||
FW_PORT_CAP_FEC = 0x0800,
|
FW_PORT_CAP_FEC_RS = 0x0800,
|
||||||
FW_PORT_CAP_TECHKR = 0x1000,
|
FW_PORT_CAP_FEC_BASER_RS = 0x1000,
|
||||||
FW_PORT_CAP_TECHKX4 = 0x2000,
|
FW_PORT_CAP_FEC_RESERVED = 0x2000,
|
||||||
FW_PORT_CAP_802_3_PAUSE = 0x4000,
|
FW_PORT_CAP_802_3_PAUSE = 0x4000,
|
||||||
FW_PORT_CAP_802_3_ASM_DIR = 0x8000,
|
FW_PORT_CAP_802_3_ASM_DIR = 0x8000,
|
||||||
};
|
};
|
||||||
|
@ -1789,9 +1789,17 @@ static int mlx4_master_process_vhcr(struct mlx4_dev *dev, int slave,
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (err) {
|
if (err) {
|
||||||
if (!(dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR))
|
if (!(dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR)) {
|
||||||
|
if (vhcr->op == MLX4_CMD_ALLOC_RES &&
|
||||||
|
(vhcr->in_modifier & 0xff) == RES_COUNTER &&
|
||||||
|
err == -EDQUOT)
|
||||||
|
mlx4_dbg(dev,
|
||||||
|
"Unable to allocate counter for slave %d (%d)\n",
|
||||||
|
slave, err);
|
||||||
|
else
|
||||||
mlx4_warn(dev, "vhcr command:0x%x slave:%d failed with error:%d, status %d\n",
|
mlx4_warn(dev, "vhcr command:0x%x slave:%d failed with error:%d, status %d\n",
|
||||||
vhcr->op, slave, vhcr->errno, err);
|
vhcr->op, slave, vhcr->errno, err);
|
||||||
|
}
|
||||||
vhcr_cmd->status = mlx4_errno_to_status(err);
|
vhcr_cmd->status = mlx4_errno_to_status(err);
|
||||||
goto out_status;
|
goto out_status;
|
||||||
}
|
}
|
||||||
|
@ -1562,6 +1562,11 @@ static int mlx4_en_flow_replace(struct net_device *dev,
|
|||||||
qpn = priv->drop_qp.qpn;
|
qpn = priv->drop_qp.qpn;
|
||||||
else if (cmd->fs.ring_cookie & EN_ETHTOOL_QP_ATTACH) {
|
else if (cmd->fs.ring_cookie & EN_ETHTOOL_QP_ATTACH) {
|
||||||
qpn = cmd->fs.ring_cookie & (EN_ETHTOOL_QP_ATTACH - 1);
|
qpn = cmd->fs.ring_cookie & (EN_ETHTOOL_QP_ATTACH - 1);
|
||||||
|
if (qpn < priv->rss_map.base_qpn ||
|
||||||
|
qpn >= priv->rss_map.base_qpn + priv->rx_ring_num) {
|
||||||
|
en_warn(priv, "rxnfc: QP (0x%x) doesn't exist\n", qpn);
|
||||||
|
return -EINVAL;
|
||||||
|
}
|
||||||
} else {
|
} else {
|
||||||
if (cmd->fs.ring_cookie >= priv->rx_ring_num) {
|
if (cmd->fs.ring_cookie >= priv->rx_ring_num) {
|
||||||
en_warn(priv, "rxnfc: RX ring (%llu) doesn't exist\n",
|
en_warn(priv, "rxnfc: RX ring (%llu) doesn't exist\n",
|
||||||
|
@ -997,7 +997,8 @@ void mlx4_en_calc_rx_buf(struct net_device *dev)
|
|||||||
en_dbg(DRV, priv, "Rx buffer scatter-list (effective-mtu:%d num_frags:%d):\n",
|
en_dbg(DRV, priv, "Rx buffer scatter-list (effective-mtu:%d num_frags:%d):\n",
|
||||||
eff_mtu, priv->num_frags);
|
eff_mtu, priv->num_frags);
|
||||||
for (i = 0; i < priv->num_frags; i++) {
|
for (i = 0; i < priv->num_frags; i++) {
|
||||||
en_err(priv,
|
en_dbg(DRV,
|
||||||
|
priv,
|
||||||
" frag:%d - size:%d stride:%d\n",
|
" frag:%d - size:%d stride:%d\n",
|
||||||
i,
|
i,
|
||||||
priv->frag_info[i].frag_size,
|
priv->frag_info[i].frag_size,
|
||||||
|
@ -311,7 +311,7 @@ static inline int mlx4_grant_resource(struct mlx4_dev *dev, int slave,
|
|||||||
struct mlx4_priv *priv = mlx4_priv(dev);
|
struct mlx4_priv *priv = mlx4_priv(dev);
|
||||||
struct resource_allocator *res_alloc =
|
struct resource_allocator *res_alloc =
|
||||||
&priv->mfunc.master.res_tracker.res_alloc[res_type];
|
&priv->mfunc.master.res_tracker.res_alloc[res_type];
|
||||||
int err = -EINVAL;
|
int err = -EDQUOT;
|
||||||
int allocated, free, reserved, guaranteed, from_free;
|
int allocated, free, reserved, guaranteed, from_free;
|
||||||
int from_rsvd;
|
int from_rsvd;
|
||||||
|
|
||||||
|
@ -1460,6 +1460,7 @@ void qed_qm_init_pf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
|
|||||||
params.is_first_pf = p_hwfn->first_on_engine;
|
params.is_first_pf = p_hwfn->first_on_engine;
|
||||||
params.num_pf_cids = iids.cids;
|
params.num_pf_cids = iids.cids;
|
||||||
params.num_vf_cids = iids.vf_cids;
|
params.num_vf_cids = iids.vf_cids;
|
||||||
|
params.num_tids = iids.tids;
|
||||||
params.start_pq = qm_info->start_pq;
|
params.start_pq = qm_info->start_pq;
|
||||||
params.num_pf_pqs = qm_info->num_pqs - qm_info->num_vf_pqs;
|
params.num_pf_pqs = qm_info->num_pqs - qm_info->num_vf_pqs;
|
||||||
params.num_vf_pqs = qm_info->num_vf_pqs;
|
params.num_vf_pqs = qm_info->num_vf_pqs;
|
||||||
|
@ -1370,7 +1370,7 @@ qed_hw_init_pf_doorbell_bar(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
|
|||||||
NULL) +
|
NULL) +
|
||||||
qed_cxt_get_proto_cid_count(p_hwfn, PROTOCOLID_ETH,
|
qed_cxt_get_proto_cid_count(p_hwfn, PROTOCOLID_ETH,
|
||||||
NULL);
|
NULL);
|
||||||
norm_regsize = roundup(QED_PF_DEMS_SIZE * non_pwm_conn, 4096);
|
norm_regsize = roundup(QED_PF_DEMS_SIZE * non_pwm_conn, PAGE_SIZE);
|
||||||
min_addr_reg1 = norm_regsize / 4096;
|
min_addr_reg1 = norm_regsize / 4096;
|
||||||
pwm_regsize = db_bar_size - norm_regsize;
|
pwm_regsize = db_bar_size - norm_regsize;
|
||||||
|
|
||||||
|
@ -1093,10 +1093,12 @@ static int qed_slowpath_stop(struct qed_dev *cdev)
|
|||||||
qed_free_stream_mem(cdev);
|
qed_free_stream_mem(cdev);
|
||||||
if (IS_QED_ETH_IF(cdev))
|
if (IS_QED_ETH_IF(cdev))
|
||||||
qed_sriov_disable(cdev, true);
|
qed_sriov_disable(cdev, true);
|
||||||
|
}
|
||||||
|
|
||||||
qed_nic_stop(cdev);
|
qed_nic_stop(cdev);
|
||||||
|
|
||||||
|
if (IS_PF(cdev))
|
||||||
qed_slowpath_irq_free(cdev);
|
qed_slowpath_irq_free(cdev);
|
||||||
}
|
|
||||||
|
|
||||||
qed_disable_msix(cdev);
|
qed_disable_msix(cdev);
|
||||||
|
|
||||||
|
@ -1028,11 +1028,6 @@ int qede_xdp(struct net_device *dev, struct netdev_xdp *xdp)
|
|||||||
{
|
{
|
||||||
struct qede_dev *edev = netdev_priv(dev);
|
struct qede_dev *edev = netdev_priv(dev);
|
||||||
|
|
||||||
if (IS_VF(edev)) {
|
|
||||||
DP_NOTICE(edev, "VFs don't support XDP\n");
|
|
||||||
return -EOPNOTSUPP;
|
|
||||||
}
|
|
||||||
|
|
||||||
switch (xdp->command) {
|
switch (xdp->command) {
|
||||||
case XDP_SETUP_PROG:
|
case XDP_SETUP_PROG:
|
||||||
return qede_xdp_set(edev, xdp->prog);
|
return qede_xdp_set(edev, xdp->prog);
|
||||||
|
@ -563,6 +563,23 @@ static const struct net_device_ops qede_netdev_ops = {
|
|||||||
#endif
|
#endif
|
||||||
};
|
};
|
||||||
|
|
||||||
|
static const struct net_device_ops qede_netdev_vf_ops = {
|
||||||
|
.ndo_open = qede_open,
|
||||||
|
.ndo_stop = qede_close,
|
||||||
|
.ndo_start_xmit = qede_start_xmit,
|
||||||
|
.ndo_set_rx_mode = qede_set_rx_mode,
|
||||||
|
.ndo_set_mac_address = qede_set_mac_addr,
|
||||||
|
.ndo_validate_addr = eth_validate_addr,
|
||||||
|
.ndo_change_mtu = qede_change_mtu,
|
||||||
|
.ndo_vlan_rx_add_vid = qede_vlan_rx_add_vid,
|
||||||
|
.ndo_vlan_rx_kill_vid = qede_vlan_rx_kill_vid,
|
||||||
|
.ndo_set_features = qede_set_features,
|
||||||
|
.ndo_get_stats64 = qede_get_stats64,
|
||||||
|
.ndo_udp_tunnel_add = qede_udp_tunnel_add,
|
||||||
|
.ndo_udp_tunnel_del = qede_udp_tunnel_del,
|
||||||
|
.ndo_features_check = qede_features_check,
|
||||||
|
};
|
||||||
|
|
||||||
/* -------------------------------------------------------------------------
|
/* -------------------------------------------------------------------------
|
||||||
* START OF PROBE / REMOVE
|
* START OF PROBE / REMOVE
|
||||||
* -------------------------------------------------------------------------
|
* -------------------------------------------------------------------------
|
||||||
@ -622,6 +639,9 @@ static void qede_init_ndev(struct qede_dev *edev)
|
|||||||
|
|
||||||
ndev->watchdog_timeo = TX_TIMEOUT;
|
ndev->watchdog_timeo = TX_TIMEOUT;
|
||||||
|
|
||||||
|
if (IS_VF(edev))
|
||||||
|
ndev->netdev_ops = &qede_netdev_vf_ops;
|
||||||
|
else
|
||||||
ndev->netdev_ops = &qede_netdev_ops;
|
ndev->netdev_ops = &qede_netdev_ops;
|
||||||
|
|
||||||
qede_set_ethtool_ops(ndev);
|
qede_set_ethtool_ops(ndev);
|
||||||
@ -1313,6 +1333,9 @@ static void qede_free_mem_fp(struct qede_dev *edev, struct qede_fastpath *fp)
|
|||||||
if (fp->type & QEDE_FASTPATH_RX)
|
if (fp->type & QEDE_FASTPATH_RX)
|
||||||
qede_free_mem_rxq(edev, fp->rxq);
|
qede_free_mem_rxq(edev, fp->rxq);
|
||||||
|
|
||||||
|
if (fp->type & QEDE_FASTPATH_XDP)
|
||||||
|
qede_free_mem_txq(edev, fp->xdp_tx);
|
||||||
|
|
||||||
if (fp->type & QEDE_FASTPATH_TX)
|
if (fp->type & QEDE_FASTPATH_TX)
|
||||||
qede_free_mem_txq(edev, fp->txq);
|
qede_free_mem_txq(edev, fp->txq);
|
||||||
}
|
}
|
||||||
|
@ -765,7 +765,7 @@ int ql_core_dump(struct ql_adapter *qdev, struct ql_mpi_coredump *mpi_coredump)
|
|||||||
sizeof(struct mpi_coredump_global_header);
|
sizeof(struct mpi_coredump_global_header);
|
||||||
mpi_coredump->mpi_global_header.imageSize =
|
mpi_coredump->mpi_global_header.imageSize =
|
||||||
sizeof(struct ql_mpi_coredump);
|
sizeof(struct ql_mpi_coredump);
|
||||||
memcpy(mpi_coredump->mpi_global_header.idString, "MPI Coredump",
|
strncpy(mpi_coredump->mpi_global_header.idString, "MPI Coredump",
|
||||||
sizeof(mpi_coredump->mpi_global_header.idString));
|
sizeof(mpi_coredump->mpi_global_header.idString));
|
||||||
|
|
||||||
/* Get generic NIC reg dump */
|
/* Get generic NIC reg dump */
|
||||||
@ -1255,7 +1255,7 @@ static void ql_gen_reg_dump(struct ql_adapter *qdev,
|
|||||||
sizeof(struct mpi_coredump_global_header);
|
sizeof(struct mpi_coredump_global_header);
|
||||||
mpi_coredump->mpi_global_header.imageSize =
|
mpi_coredump->mpi_global_header.imageSize =
|
||||||
sizeof(struct ql_reg_dump);
|
sizeof(struct ql_reg_dump);
|
||||||
memcpy(mpi_coredump->mpi_global_header.idString, "MPI Coredump",
|
strncpy(mpi_coredump->mpi_global_header.idString, "MPI Coredump",
|
||||||
sizeof(mpi_coredump->mpi_global_header.idString));
|
sizeof(mpi_coredump->mpi_global_header.idString));
|
||||||
|
|
||||||
|
|
||||||
|
@ -70,11 +70,8 @@ static int stmmac_pci_find_phy_addr(struct stmmac_pci_info *info)
|
|||||||
return -ENODEV;
|
return -ENODEV;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void stmmac_default_data(struct plat_stmmacenet_data *plat)
|
static void common_default_data(struct plat_stmmacenet_data *plat)
|
||||||
{
|
{
|
||||||
plat->bus_id = 1;
|
|
||||||
plat->phy_addr = 0;
|
|
||||||
plat->interface = PHY_INTERFACE_MODE_GMII;
|
|
||||||
plat->clk_csr = 2; /* clk_csr_i = 20-35MHz & MDC = clk_csr_i/16 */
|
plat->clk_csr = 2; /* clk_csr_i = 20-35MHz & MDC = clk_csr_i/16 */
|
||||||
plat->has_gmac = 1;
|
plat->has_gmac = 1;
|
||||||
plat->force_sf_dma_mode = 1;
|
plat->force_sf_dma_mode = 1;
|
||||||
@ -82,10 +79,6 @@ static void stmmac_default_data(struct plat_stmmacenet_data *plat)
|
|||||||
plat->mdio_bus_data->phy_reset = NULL;
|
plat->mdio_bus_data->phy_reset = NULL;
|
||||||
plat->mdio_bus_data->phy_mask = 0;
|
plat->mdio_bus_data->phy_mask = 0;
|
||||||
|
|
||||||
plat->dma_cfg->pbl = 32;
|
|
||||||
plat->dma_cfg->pblx8 = true;
|
|
||||||
/* TODO: AXI */
|
|
||||||
|
|
||||||
/* Set default value for multicast hash bins */
|
/* Set default value for multicast hash bins */
|
||||||
plat->multicast_filter_bins = HASH_TABLE_SIZE;
|
plat->multicast_filter_bins = HASH_TABLE_SIZE;
|
||||||
|
|
||||||
@ -107,12 +100,29 @@ static void stmmac_default_data(struct plat_stmmacenet_data *plat)
|
|||||||
plat->rx_queues_cfg[0].pkt_route = 0x0;
|
plat->rx_queues_cfg[0].pkt_route = 0x0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void stmmac_default_data(struct plat_stmmacenet_data *plat)
|
||||||
|
{
|
||||||
|
/* Set common default data first */
|
||||||
|
common_default_data(plat);
|
||||||
|
|
||||||
|
plat->bus_id = 1;
|
||||||
|
plat->phy_addr = 0;
|
||||||
|
plat->interface = PHY_INTERFACE_MODE_GMII;
|
||||||
|
|
||||||
|
plat->dma_cfg->pbl = 32;
|
||||||
|
plat->dma_cfg->pblx8 = true;
|
||||||
|
/* TODO: AXI */
|
||||||
|
}
|
||||||
|
|
||||||
static int quark_default_data(struct plat_stmmacenet_data *plat,
|
static int quark_default_data(struct plat_stmmacenet_data *plat,
|
||||||
struct stmmac_pci_info *info)
|
struct stmmac_pci_info *info)
|
||||||
{
|
{
|
||||||
struct pci_dev *pdev = info->pdev;
|
struct pci_dev *pdev = info->pdev;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
|
/* Set common default data first */
|
||||||
|
common_default_data(plat);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Refuse to load the driver and register net device if MAC controller
|
* Refuse to load the driver and register net device if MAC controller
|
||||||
* does not connect to any PHY interface.
|
* does not connect to any PHY interface.
|
||||||
@ -124,27 +134,12 @@ static int quark_default_data(struct plat_stmmacenet_data *plat,
|
|||||||
plat->bus_id = PCI_DEVID(pdev->bus->number, pdev->devfn);
|
plat->bus_id = PCI_DEVID(pdev->bus->number, pdev->devfn);
|
||||||
plat->phy_addr = ret;
|
plat->phy_addr = ret;
|
||||||
plat->interface = PHY_INTERFACE_MODE_RMII;
|
plat->interface = PHY_INTERFACE_MODE_RMII;
|
||||||
plat->clk_csr = 2;
|
|
||||||
plat->has_gmac = 1;
|
|
||||||
plat->force_sf_dma_mode = 1;
|
|
||||||
|
|
||||||
plat->mdio_bus_data->phy_reset = NULL;
|
|
||||||
plat->mdio_bus_data->phy_mask = 0;
|
|
||||||
|
|
||||||
plat->dma_cfg->pbl = 16;
|
plat->dma_cfg->pbl = 16;
|
||||||
plat->dma_cfg->pblx8 = true;
|
plat->dma_cfg->pblx8 = true;
|
||||||
plat->dma_cfg->fixed_burst = 1;
|
plat->dma_cfg->fixed_burst = 1;
|
||||||
/* AXI (TODO) */
|
/* AXI (TODO) */
|
||||||
|
|
||||||
/* Set default value for multicast hash bins */
|
|
||||||
plat->multicast_filter_bins = HASH_TABLE_SIZE;
|
|
||||||
|
|
||||||
/* Set default value for unicast filter entries */
|
|
||||||
plat->unicast_filter_entries = 1;
|
|
||||||
|
|
||||||
/* Set the maxmtu to a default of JUMBO_LEN */
|
|
||||||
plat->maxmtu = JUMBO_LEN;
|
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -287,6 +287,10 @@ struct cpsw_ss_regs {
|
|||||||
/* Bit definitions for the CPSW1_TS_SEQ_LTYPE register */
|
/* Bit definitions for the CPSW1_TS_SEQ_LTYPE register */
|
||||||
#define CPSW_V1_SEQ_ID_OFS_SHIFT 16
|
#define CPSW_V1_SEQ_ID_OFS_SHIFT 16
|
||||||
|
|
||||||
|
#define CPSW_MAX_BLKS_TX 15
|
||||||
|
#define CPSW_MAX_BLKS_TX_SHIFT 4
|
||||||
|
#define CPSW_MAX_BLKS_RX 5
|
||||||
|
|
||||||
struct cpsw_host_regs {
|
struct cpsw_host_regs {
|
||||||
u32 max_blks;
|
u32 max_blks;
|
||||||
u32 blk_cnt;
|
u32 blk_cnt;
|
||||||
@ -1278,11 +1282,23 @@ static void cpsw_slave_open(struct cpsw_slave *slave, struct cpsw_priv *priv)
|
|||||||
switch (cpsw->version) {
|
switch (cpsw->version) {
|
||||||
case CPSW_VERSION_1:
|
case CPSW_VERSION_1:
|
||||||
slave_write(slave, TX_PRIORITY_MAPPING, CPSW1_TX_PRI_MAP);
|
slave_write(slave, TX_PRIORITY_MAPPING, CPSW1_TX_PRI_MAP);
|
||||||
|
/* Increase RX FIFO size to 5 for supporting fullduplex
|
||||||
|
* flow control mode
|
||||||
|
*/
|
||||||
|
slave_write(slave,
|
||||||
|
(CPSW_MAX_BLKS_TX << CPSW_MAX_BLKS_TX_SHIFT) |
|
||||||
|
CPSW_MAX_BLKS_RX, CPSW1_MAX_BLKS);
|
||||||
break;
|
break;
|
||||||
case CPSW_VERSION_2:
|
case CPSW_VERSION_2:
|
||||||
case CPSW_VERSION_3:
|
case CPSW_VERSION_3:
|
||||||
case CPSW_VERSION_4:
|
case CPSW_VERSION_4:
|
||||||
slave_write(slave, TX_PRIORITY_MAPPING, CPSW2_TX_PRI_MAP);
|
slave_write(slave, TX_PRIORITY_MAPPING, CPSW2_TX_PRI_MAP);
|
||||||
|
/* Increase RX FIFO size to 5 for supporting fullduplex
|
||||||
|
* flow control mode
|
||||||
|
*/
|
||||||
|
slave_write(slave,
|
||||||
|
(CPSW_MAX_BLKS_TX << CPSW_MAX_BLKS_TX_SHIFT) |
|
||||||
|
CPSW_MAX_BLKS_RX, CPSW2_MAX_BLKS);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -976,12 +976,10 @@ static int yam_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
|
|||||||
case SIOCYAMSMCS:
|
case SIOCYAMSMCS:
|
||||||
if (netif_running(dev))
|
if (netif_running(dev))
|
||||||
return -EINVAL; /* Cannot change this parameter when up */
|
return -EINVAL; /* Cannot change this parameter when up */
|
||||||
if ((ym = kmalloc(sizeof(struct yamdrv_ioctl_mcs), GFP_KERNEL)) == NULL)
|
ym = memdup_user(ifr->ifr_data,
|
||||||
return -ENOBUFS;
|
sizeof(struct yamdrv_ioctl_mcs));
|
||||||
if (copy_from_user(ym, ifr->ifr_data, sizeof(struct yamdrv_ioctl_mcs))) {
|
if (IS_ERR(ym))
|
||||||
kfree(ym);
|
return PTR_ERR(ym);
|
||||||
return -EFAULT;
|
|
||||||
}
|
|
||||||
if (ym->bitrate > YAM_MAXBITRATE) {
|
if (ym->bitrate > YAM_MAXBITRATE) {
|
||||||
kfree(ym);
|
kfree(ym);
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
@ -1616,17 +1616,14 @@ static int rr_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
|
|||||||
return -EPERM;
|
return -EPERM;
|
||||||
}
|
}
|
||||||
|
|
||||||
image = kmalloc(EEPROM_WORDS * sizeof(u32), GFP_KERNEL);
|
image = memdup_user(rq->ifr_data, EEPROM_BYTES);
|
||||||
oldimage = kmalloc(EEPROM_WORDS * sizeof(u32), GFP_KERNEL);
|
if (IS_ERR(image))
|
||||||
if (!image || !oldimage) {
|
return PTR_ERR(image);
|
||||||
error = -ENOMEM;
|
|
||||||
goto wf_out;
|
|
||||||
}
|
|
||||||
|
|
||||||
error = copy_from_user(image, rq->ifr_data, EEPROM_BYTES);
|
oldimage = kmalloc(EEPROM_BYTES, GFP_KERNEL);
|
||||||
if (error) {
|
if (!oldimage) {
|
||||||
error = -EFAULT;
|
kfree(image);
|
||||||
goto wf_out;
|
return -ENOMEM;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (rrpriv->fw_running){
|
if (rrpriv->fw_running){
|
||||||
|
@ -203,11 +203,14 @@ static int mdio_mux_iproc_probe(struct platform_device *pdev)
|
|||||||
&md->mux_handle, md, md->mii_bus);
|
&md->mux_handle, md, md->mii_bus);
|
||||||
if (rc) {
|
if (rc) {
|
||||||
dev_info(md->dev, "mdiomux initialization failed\n");
|
dev_info(md->dev, "mdiomux initialization failed\n");
|
||||||
goto out;
|
goto out_register;
|
||||||
}
|
}
|
||||||
|
|
||||||
dev_info(md->dev, "iProc mdiomux registered\n");
|
dev_info(md->dev, "iProc mdiomux registered\n");
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
|
out_register:
|
||||||
|
mdiobus_unregister(bus);
|
||||||
out:
|
out:
|
||||||
mdiobus_free(bus);
|
mdiobus_free(bus);
|
||||||
return rc;
|
return rc;
|
||||||
|
@ -1088,6 +1088,7 @@ cdc_ncm_fill_tx_frame(struct usbnet *dev, struct sk_buff *skb, __le32 sign)
|
|||||||
u16 n = 0, index, ndplen;
|
u16 n = 0, index, ndplen;
|
||||||
u8 ready2send = 0;
|
u8 ready2send = 0;
|
||||||
u32 delayed_ndp_size;
|
u32 delayed_ndp_size;
|
||||||
|
size_t padding_count;
|
||||||
|
|
||||||
/* When our NDP gets written in cdc_ncm_ndp(), then skb_out->len gets updated
|
/* When our NDP gets written in cdc_ncm_ndp(), then skb_out->len gets updated
|
||||||
* accordingly. Otherwise, we should check here.
|
* accordingly. Otherwise, we should check here.
|
||||||
@ -1244,11 +1245,13 @@ cdc_ncm_fill_tx_frame(struct usbnet *dev, struct sk_buff *skb, __le32 sign)
|
|||||||
* a ZLP after full sized NTBs.
|
* a ZLP after full sized NTBs.
|
||||||
*/
|
*/
|
||||||
if (!(dev->driver_info->flags & FLAG_SEND_ZLP) &&
|
if (!(dev->driver_info->flags & FLAG_SEND_ZLP) &&
|
||||||
skb_out->len > ctx->min_tx_pkt)
|
skb_out->len > ctx->min_tx_pkt) {
|
||||||
memset(skb_put(skb_out, ctx->tx_max - skb_out->len), 0,
|
padding_count = ctx->tx_max - skb_out->len;
|
||||||
ctx->tx_max - skb_out->len);
|
memset(skb_put(skb_out, padding_count), 0, padding_count);
|
||||||
else if (skb_out->len < ctx->tx_max && (skb_out->len % dev->maxpacket) == 0)
|
} else if (skb_out->len < ctx->tx_max &&
|
||||||
|
(skb_out->len % dev->maxpacket) == 0) {
|
||||||
*skb_put(skb_out, 1) = 0; /* force short packet */
|
*skb_put(skb_out, 1) = 0; /* force short packet */
|
||||||
|
}
|
||||||
|
|
||||||
/* set final frame length */
|
/* set final frame length */
|
||||||
nth16 = (struct usb_cdc_ncm_nth16 *)skb_out->data;
|
nth16 = (struct usb_cdc_ncm_nth16 *)skb_out->data;
|
||||||
|
@ -131,7 +131,7 @@ static inline int edc_inc(struct edc *edc, u16 max_err, u16 timeframe)
|
|||||||
unsigned long now;
|
unsigned long now;
|
||||||
|
|
||||||
now = jiffies;
|
now = jiffies;
|
||||||
if (now - edc->timestart > timeframe) {
|
if (time_after(now, edc->timestart + timeframe)) {
|
||||||
edc->errorcount = 1;
|
edc->errorcount = 1;
|
||||||
edc->timestart = now;
|
edc->timestart = now;
|
||||||
} else if (++edc->errorcount > max_err) {
|
} else if (++edc->errorcount > max_err) {
|
||||||
|
@ -522,7 +522,7 @@ int ath9k_hw_process_rxdesc_edma(struct ath_hw *ah, struct ath_rx_status *rxs,
|
|||||||
rxs->rs_moreaggr = (rxsp->status11 & AR_RxMoreAggr) ? 1 : 0;
|
rxs->rs_moreaggr = (rxsp->status11 & AR_RxMoreAggr) ? 1 : 0;
|
||||||
rxs->rs_antenna = (MS(rxsp->status4, AR_RxAntenna) & 0x7);
|
rxs->rs_antenna = (MS(rxsp->status4, AR_RxAntenna) & 0x7);
|
||||||
rxs->enc_flags |= (rxsp->status4 & AR_GI) ? RX_ENC_FLAG_SHORT_GI : 0;
|
rxs->enc_flags |= (rxsp->status4 & AR_GI) ? RX_ENC_FLAG_SHORT_GI : 0;
|
||||||
rxs->enc_flags |= (rxsp->status4 & AR_2040) ? RX_ENC_FLAG_40MHZ : 0;
|
rxs->bw = (rxsp->status4 & AR_2040) ? RATE_INFO_BW_40 : RATE_INFO_BW_20;
|
||||||
|
|
||||||
rxs->evm0 = rxsp->status6;
|
rxs->evm0 = rxsp->status6;
|
||||||
rxs->evm1 = rxsp->status7;
|
rxs->evm1 = rxsp->status7;
|
||||||
|
@ -580,8 +580,8 @@ int ath9k_hw_rxprocdesc(struct ath_hw *ah, struct ath_desc *ds,
|
|||||||
/* directly mapped flags for ieee80211_rx_status */
|
/* directly mapped flags for ieee80211_rx_status */
|
||||||
rs->enc_flags |=
|
rs->enc_flags |=
|
||||||
(ads.ds_rxstatus3 & AR_GI) ? RX_ENC_FLAG_SHORT_GI : 0;
|
(ads.ds_rxstatus3 & AR_GI) ? RX_ENC_FLAG_SHORT_GI : 0;
|
||||||
rs->enc_flags |=
|
rs->bw = (ads.ds_rxstatus3 & AR_2040) ? RATE_INFO_BW_40 :
|
||||||
(ads.ds_rxstatus3 & AR_2040) ? RX_ENC_FLAG_40MHZ : 0;
|
RATE_INFO_BW_20;
|
||||||
if (AR_SREV_9280_20_OR_LATER(ah))
|
if (AR_SREV_9280_20_OR_LATER(ah))
|
||||||
rs->enc_flags |=
|
rs->enc_flags |=
|
||||||
(ads.ds_rxstatus3 & AR_STBC) ?
|
(ads.ds_rxstatus3 & AR_STBC) ?
|
||||||
|
@ -734,7 +734,9 @@ il4965_hdl_rx(struct il_priv *il, struct il_rx_buf *rxb)
|
|||||||
if (rate_n_flags & RATE_MCS_HT_MSK)
|
if (rate_n_flags & RATE_MCS_HT_MSK)
|
||||||
rx_status.encoding = RX_ENC_HT;
|
rx_status.encoding = RX_ENC_HT;
|
||||||
if (rate_n_flags & RATE_MCS_HT40_MSK)
|
if (rate_n_flags & RATE_MCS_HT40_MSK)
|
||||||
rx_status.enc_flags |= RX_ENC_FLAG_40MHZ;
|
rx_status.bw = RATE_INFO_BW_40;
|
||||||
|
else
|
||||||
|
rx_status.bw = RATE_INFO_BW_20;
|
||||||
if (rate_n_flags & RATE_MCS_SGI_MSK)
|
if (rate_n_flags & RATE_MCS_SGI_MSK)
|
||||||
rx_status.enc_flags |= RX_ENC_FLAG_SHORT_GI;
|
rx_status.enc_flags |= RX_ENC_FLAG_SHORT_GI;
|
||||||
|
|
||||||
|
@ -889,7 +889,9 @@ static void iwlagn_rx_reply_rx(struct iwl_priv *priv,
|
|||||||
if (rate_n_flags & RATE_MCS_HT_MSK)
|
if (rate_n_flags & RATE_MCS_HT_MSK)
|
||||||
rx_status.encoding = RX_ENC_HT;
|
rx_status.encoding = RX_ENC_HT;
|
||||||
if (rate_n_flags & RATE_MCS_HT40_MSK)
|
if (rate_n_flags & RATE_MCS_HT40_MSK)
|
||||||
rx_status.enc_flags |= RX_ENC_FLAG_40MHZ;
|
rx_status.bw = RATE_INFO_BW_40;
|
||||||
|
else
|
||||||
|
rx_status.bw = RATE_INFO_BW_20;
|
||||||
if (rate_n_flags & RATE_MCS_SGI_MSK)
|
if (rate_n_flags & RATE_MCS_SGI_MSK)
|
||||||
rx_status.enc_flags |= RX_ENC_FLAG_SHORT_GI;
|
rx_status.enc_flags |= RX_ENC_FLAG_SHORT_GI;
|
||||||
if (rate_n_flags & RATE_MCS_GF_MSK)
|
if (rate_n_flags & RATE_MCS_GF_MSK)
|
||||||
|
@ -1201,7 +1201,13 @@ static bool mac80211_hwsim_tx_frame_no_nl(struct ieee80211_hw *hw,
|
|||||||
rx_status.encoding = RX_ENC_HT;
|
rx_status.encoding = RX_ENC_HT;
|
||||||
}
|
}
|
||||||
if (info->control.rates[0].flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
|
if (info->control.rates[0].flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
|
||||||
rx_status.enc_flags |= RX_ENC_FLAG_40MHZ;
|
rx_status.bw = RATE_INFO_BW_40;
|
||||||
|
else if (info->control.rates[0].flags & IEEE80211_TX_RC_80_MHZ_WIDTH)
|
||||||
|
rx_status.bw = RATE_INFO_BW_80;
|
||||||
|
else if (info->control.rates[0].flags & IEEE80211_TX_RC_160_MHZ_WIDTH)
|
||||||
|
rx_status.bw = RATE_INFO_BW_160;
|
||||||
|
else
|
||||||
|
rx_status.bw = RATE_INFO_BW_20;
|
||||||
if (info->control.rates[0].flags & IEEE80211_TX_RC_SHORT_GI)
|
if (info->control.rates[0].flags & IEEE80211_TX_RC_SHORT_GI)
|
||||||
rx_status.enc_flags |= RX_ENC_FLAG_SHORT_GI;
|
rx_status.enc_flags |= RX_ENC_FLAG_SHORT_GI;
|
||||||
/* TODO: simulate real signal strength (and optional packet loss) */
|
/* TODO: simulate real signal strength (and optional packet loss) */
|
||||||
|
@ -20,6 +20,8 @@
|
|||||||
#define ADDRCONF_TIMER_FUZZ (HZ / 4)
|
#define ADDRCONF_TIMER_FUZZ (HZ / 4)
|
||||||
#define ADDRCONF_TIMER_FUZZ_MAX (HZ)
|
#define ADDRCONF_TIMER_FUZZ_MAX (HZ)
|
||||||
|
|
||||||
|
#define ADDRCONF_NOTIFY_PRIORITY 0
|
||||||
|
|
||||||
#include <linux/in.h>
|
#include <linux/in.h>
|
||||||
#include <linux/in6.h>
|
#include <linux/in6.h>
|
||||||
|
|
||||||
|
@ -1666,7 +1666,7 @@ struct cfg80211_bss_select_adjust {
|
|||||||
* (others are filtered out).
|
* (others are filtered out).
|
||||||
* If ommited, all results are passed.
|
* If ommited, all results are passed.
|
||||||
* @n_match_sets: number of match sets
|
* @n_match_sets: number of match sets
|
||||||
* @results_wk: worker for processing results notification.
|
* @report_results: indicates that results were reported for this request
|
||||||
* @wiphy: the wiphy this was for
|
* @wiphy: the wiphy this was for
|
||||||
* @dev: the interface
|
* @dev: the interface
|
||||||
* @scan_start: start time of the scheduled scan
|
* @scan_start: start time of the scheduled scan
|
||||||
|
@ -1141,7 +1141,6 @@ enum mac80211_rx_flags {
|
|||||||
* enum mac80211_rx_encoding_flags - MCS & bandwidth flags
|
* enum mac80211_rx_encoding_flags - MCS & bandwidth flags
|
||||||
*
|
*
|
||||||
* @RX_ENC_FLAG_SHORTPRE: Short preamble was used for this frame
|
* @RX_ENC_FLAG_SHORTPRE: Short preamble was used for this frame
|
||||||
* @RX_ENC_FLAG_40MHZ: HT40 (40 MHz) was used
|
|
||||||
* @RX_ENC_FLAG_SHORT_GI: Short guard interval was used
|
* @RX_ENC_FLAG_SHORT_GI: Short guard interval was used
|
||||||
* @RX_ENC_FLAG_HT_GF: This frame was received in a HT-greenfield transmission,
|
* @RX_ENC_FLAG_HT_GF: This frame was received in a HT-greenfield transmission,
|
||||||
* if the driver fills this value it should add
|
* if the driver fills this value it should add
|
||||||
@ -1153,7 +1152,6 @@ enum mac80211_rx_flags {
|
|||||||
*/
|
*/
|
||||||
enum mac80211_rx_encoding_flags {
|
enum mac80211_rx_encoding_flags {
|
||||||
RX_ENC_FLAG_SHORTPRE = BIT(0),
|
RX_ENC_FLAG_SHORTPRE = BIT(0),
|
||||||
RX_ENC_FLAG_40MHZ = BIT(1),
|
|
||||||
RX_ENC_FLAG_SHORT_GI = BIT(2),
|
RX_ENC_FLAG_SHORT_GI = BIT(2),
|
||||||
RX_ENC_FLAG_HT_GF = BIT(3),
|
RX_ENC_FLAG_HT_GF = BIT(3),
|
||||||
RX_ENC_FLAG_STBC_MASK = BIT(4) | BIT(5),
|
RX_ENC_FLAG_STBC_MASK = BIT(4) | BIT(5),
|
||||||
|
@ -6,10 +6,12 @@
|
|||||||
u32 secure_ipv4_port_ephemeral(__be32 saddr, __be32 daddr, __be16 dport);
|
u32 secure_ipv4_port_ephemeral(__be32 saddr, __be32 daddr, __be16 dport);
|
||||||
u32 secure_ipv6_port_ephemeral(const __be32 *saddr, const __be32 *daddr,
|
u32 secure_ipv6_port_ephemeral(const __be32 *saddr, const __be32 *daddr,
|
||||||
__be16 dport);
|
__be16 dport);
|
||||||
u32 secure_tcp_seq_and_tsoff(__be32 saddr, __be32 daddr,
|
u32 secure_tcp_seq(__be32 saddr, __be32 daddr,
|
||||||
__be16 sport, __be16 dport, u32 *tsoff);
|
__be16 sport, __be16 dport);
|
||||||
u32 secure_tcpv6_seq_and_tsoff(const __be32 *saddr, const __be32 *daddr,
|
u32 secure_tcp_ts_off(__be32 saddr, __be32 daddr);
|
||||||
__be16 sport, __be16 dport, u32 *tsoff);
|
u32 secure_tcpv6_seq(const __be32 *saddr, const __be32 *daddr,
|
||||||
|
__be16 sport, __be16 dport);
|
||||||
|
u32 secure_tcpv6_ts_off(const __be32 *saddr, const __be32 *daddr);
|
||||||
u64 secure_dccp_sequence_number(__be32 saddr, __be32 daddr,
|
u64 secure_dccp_sequence_number(__be32 saddr, __be32 daddr,
|
||||||
__be16 sport, __be16 dport);
|
__be16 sport, __be16 dport);
|
||||||
u64 secure_dccpv6_sequence_number(__be32 *saddr, __be32 *daddr,
|
u64 secure_dccpv6_sequence_number(__be32 *saddr, __be32 *daddr,
|
||||||
|
@ -470,7 +470,7 @@ void inet_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb);
|
|||||||
/* From syncookies.c */
|
/* From syncookies.c */
|
||||||
struct sock *tcp_get_cookie_sock(struct sock *sk, struct sk_buff *skb,
|
struct sock *tcp_get_cookie_sock(struct sock *sk, struct sk_buff *skb,
|
||||||
struct request_sock *req,
|
struct request_sock *req,
|
||||||
struct dst_entry *dst);
|
struct dst_entry *dst, u32 tsoff);
|
||||||
int __cookie_v4_check(const struct iphdr *iph, const struct tcphdr *th,
|
int __cookie_v4_check(const struct iphdr *iph, const struct tcphdr *th,
|
||||||
u32 cookie);
|
u32 cookie);
|
||||||
struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb);
|
struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb);
|
||||||
@ -1234,10 +1234,12 @@ void tcp_cwnd_restart(struct sock *sk, s32 delta);
|
|||||||
|
|
||||||
static inline void tcp_slow_start_after_idle_check(struct sock *sk)
|
static inline void tcp_slow_start_after_idle_check(struct sock *sk)
|
||||||
{
|
{
|
||||||
|
const struct tcp_congestion_ops *ca_ops = inet_csk(sk)->icsk_ca_ops;
|
||||||
struct tcp_sock *tp = tcp_sk(sk);
|
struct tcp_sock *tp = tcp_sk(sk);
|
||||||
s32 delta;
|
s32 delta;
|
||||||
|
|
||||||
if (!sysctl_tcp_slow_start_after_idle || tp->packets_out)
|
if (!sysctl_tcp_slow_start_after_idle || tp->packets_out ||
|
||||||
|
ca_ops->cong_control)
|
||||||
return;
|
return;
|
||||||
delta = tcp_time_stamp - tp->lsndtime;
|
delta = tcp_time_stamp - tp->lsndtime;
|
||||||
if (delta > inet_csk(sk)->icsk_rto)
|
if (delta > inet_csk(sk)->icsk_rto)
|
||||||
@ -1822,7 +1824,8 @@ struct tcp_request_sock_ops {
|
|||||||
#endif
|
#endif
|
||||||
struct dst_entry *(*route_req)(const struct sock *sk, struct flowi *fl,
|
struct dst_entry *(*route_req)(const struct sock *sk, struct flowi *fl,
|
||||||
const struct request_sock *req);
|
const struct request_sock *req);
|
||||||
__u32 (*init_seq_tsoff)(const struct sk_buff *skb, u32 *tsoff);
|
u32 (*init_seq)(const struct sk_buff *skb);
|
||||||
|
u32 (*init_ts_off)(const struct sk_buff *skb);
|
||||||
int (*send_synack)(const struct sock *sk, struct dst_entry *dst,
|
int (*send_synack)(const struct sock *sk, struct dst_entry *dst,
|
||||||
struct flowi *fl, struct request_sock *req,
|
struct flowi *fl, struct request_sock *req,
|
||||||
struct tcp_fastopen_cookie *foc,
|
struct tcp_fastopen_cookie *foc,
|
||||||
|
@ -298,7 +298,8 @@ static const char *const bpf_jmp_string[16] = {
|
|||||||
[BPF_EXIT >> 4] = "exit",
|
[BPF_EXIT >> 4] = "exit",
|
||||||
};
|
};
|
||||||
|
|
||||||
static void print_bpf_insn(struct bpf_insn *insn)
|
static void print_bpf_insn(const struct bpf_verifier_env *env,
|
||||||
|
const struct bpf_insn *insn)
|
||||||
{
|
{
|
||||||
u8 class = BPF_CLASS(insn->code);
|
u8 class = BPF_CLASS(insn->code);
|
||||||
|
|
||||||
@ -362,9 +363,19 @@ static void print_bpf_insn(struct bpf_insn *insn)
|
|||||||
insn->code,
|
insn->code,
|
||||||
bpf_ldst_string[BPF_SIZE(insn->code) >> 3],
|
bpf_ldst_string[BPF_SIZE(insn->code) >> 3],
|
||||||
insn->src_reg, insn->imm);
|
insn->src_reg, insn->imm);
|
||||||
} else if (BPF_MODE(insn->code) == BPF_IMM) {
|
} else if (BPF_MODE(insn->code) == BPF_IMM &&
|
||||||
verbose("(%02x) r%d = 0x%x\n",
|
BPF_SIZE(insn->code) == BPF_DW) {
|
||||||
insn->code, insn->dst_reg, insn->imm);
|
/* At this point, we already made sure that the second
|
||||||
|
* part of the ldimm64 insn is accessible.
|
||||||
|
*/
|
||||||
|
u64 imm = ((u64)(insn + 1)->imm << 32) | (u32)insn->imm;
|
||||||
|
bool map_ptr = insn->src_reg == BPF_PSEUDO_MAP_FD;
|
||||||
|
|
||||||
|
if (map_ptr && !env->allow_ptr_leaks)
|
||||||
|
imm = 0;
|
||||||
|
|
||||||
|
verbose("(%02x) r%d = 0x%llx\n", insn->code,
|
||||||
|
insn->dst_reg, (unsigned long long)imm);
|
||||||
} else {
|
} else {
|
||||||
verbose("BUG_ld_%02x\n", insn->code);
|
verbose("BUG_ld_%02x\n", insn->code);
|
||||||
return;
|
return;
|
||||||
@ -2853,7 +2864,7 @@ static int do_check(struct bpf_verifier_env *env)
|
|||||||
|
|
||||||
if (log_level) {
|
if (log_level) {
|
||||||
verbose("%d: ", insn_idx);
|
verbose("%d: ", insn_idx);
|
||||||
print_bpf_insn(insn);
|
print_bpf_insn(env, insn);
|
||||||
}
|
}
|
||||||
|
|
||||||
err = ext_analyzer_insn_hook(env, insn_idx, prev_insn_idx);
|
err = ext_analyzer_insn_hook(env, insn_idx, prev_insn_idx);
|
||||||
|
@ -626,11 +626,18 @@ static netdev_features_t vlan_dev_fix_features(struct net_device *dev,
|
|||||||
{
|
{
|
||||||
struct net_device *real_dev = vlan_dev_priv(dev)->real_dev;
|
struct net_device *real_dev = vlan_dev_priv(dev)->real_dev;
|
||||||
netdev_features_t old_features = features;
|
netdev_features_t old_features = features;
|
||||||
|
netdev_features_t lower_features;
|
||||||
|
|
||||||
features = netdev_intersect_features(features, real_dev->vlan_features);
|
lower_features = netdev_intersect_features((real_dev->vlan_features |
|
||||||
features |= NETIF_F_RXCSUM;
|
NETIF_F_RXCSUM),
|
||||||
features = netdev_intersect_features(features, real_dev->features);
|
real_dev->features);
|
||||||
|
|
||||||
|
/* Add HW_CSUM setting to preserve user ability to control
|
||||||
|
* checksum offload on the vlan device.
|
||||||
|
*/
|
||||||
|
if (lower_features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))
|
||||||
|
lower_features |= NETIF_F_HW_CSUM;
|
||||||
|
features = netdev_intersect_features(features, lower_features);
|
||||||
features |= old_features & (NETIF_F_SOFT_FEATURES | NETIF_F_GSO_SOFTWARE);
|
features |= old_features & (NETIF_F_SOFT_FEATURES | NETIF_F_GSO_SOFTWARE);
|
||||||
features |= NETIF_F_LLTX;
|
features |= NETIF_F_LLTX;
|
||||||
|
|
||||||
|
@ -133,6 +133,8 @@ static inline size_t br_port_info_size(void)
|
|||||||
+ nla_total_size(1) /* IFLA_BRPORT_MCAST_TO_UCAST */
|
+ nla_total_size(1) /* IFLA_BRPORT_MCAST_TO_UCAST */
|
||||||
+ nla_total_size(1) /* IFLA_BRPORT_LEARNING */
|
+ nla_total_size(1) /* IFLA_BRPORT_LEARNING */
|
||||||
+ nla_total_size(1) /* IFLA_BRPORT_UNICAST_FLOOD */
|
+ nla_total_size(1) /* IFLA_BRPORT_UNICAST_FLOOD */
|
||||||
|
+ nla_total_size(1) /* IFLA_BRPORT_MCAST_FLOOD */
|
||||||
|
+ nla_total_size(1) /* IFLA_BRPORT_BCAST_FLOOD */
|
||||||
+ nla_total_size(1) /* IFLA_BRPORT_PROXYARP */
|
+ nla_total_size(1) /* IFLA_BRPORT_PROXYARP */
|
||||||
+ nla_total_size(1) /* IFLA_BRPORT_PROXYARP_WIFI */
|
+ nla_total_size(1) /* IFLA_BRPORT_PROXYARP_WIFI */
|
||||||
+ nla_total_size(1) /* IFLA_BRPORT_VLAN_TUNNEL */
|
+ nla_total_size(1) /* IFLA_BRPORT_VLAN_TUNNEL */
|
||||||
@ -633,6 +635,8 @@ static const struct nla_policy br_port_policy[IFLA_BRPORT_MAX + 1] = {
|
|||||||
[IFLA_BRPORT_PROXYARP_WIFI] = { .type = NLA_U8 },
|
[IFLA_BRPORT_PROXYARP_WIFI] = { .type = NLA_U8 },
|
||||||
[IFLA_BRPORT_MULTICAST_ROUTER] = { .type = NLA_U8 },
|
[IFLA_BRPORT_MULTICAST_ROUTER] = { .type = NLA_U8 },
|
||||||
[IFLA_BRPORT_MCAST_TO_UCAST] = { .type = NLA_U8 },
|
[IFLA_BRPORT_MCAST_TO_UCAST] = { .type = NLA_U8 },
|
||||||
|
[IFLA_BRPORT_MCAST_FLOOD] = { .type = NLA_U8 },
|
||||||
|
[IFLA_BRPORT_BCAST_FLOOD] = { .type = NLA_U8 },
|
||||||
};
|
};
|
||||||
|
|
||||||
/* Change the state of the port and notify spanning tree */
|
/* Change the state of the port and notify spanning tree */
|
||||||
|
@ -24,9 +24,13 @@ static siphash_key_t ts_secret __read_mostly;
|
|||||||
|
|
||||||
static __always_inline void net_secret_init(void)
|
static __always_inline void net_secret_init(void)
|
||||||
{
|
{
|
||||||
net_get_random_once(&ts_secret, sizeof(ts_secret));
|
|
||||||
net_get_random_once(&net_secret, sizeof(net_secret));
|
net_get_random_once(&net_secret, sizeof(net_secret));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static __always_inline void ts_secret_init(void)
|
||||||
|
{
|
||||||
|
net_get_random_once(&ts_secret, sizeof(ts_secret));
|
||||||
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#ifdef CONFIG_INET
|
#ifdef CONFIG_INET
|
||||||
@ -47,7 +51,7 @@ static u32 seq_scale(u32 seq)
|
|||||||
#endif
|
#endif
|
||||||
|
|
||||||
#if IS_ENABLED(CONFIG_IPV6)
|
#if IS_ENABLED(CONFIG_IPV6)
|
||||||
static u32 secure_tcpv6_ts_off(const __be32 *saddr, const __be32 *daddr)
|
u32 secure_tcpv6_ts_off(const __be32 *saddr, const __be32 *daddr)
|
||||||
{
|
{
|
||||||
const struct {
|
const struct {
|
||||||
struct in6_addr saddr;
|
struct in6_addr saddr;
|
||||||
@ -60,12 +64,14 @@ static u32 secure_tcpv6_ts_off(const __be32 *saddr, const __be32 *daddr)
|
|||||||
if (sysctl_tcp_timestamps != 1)
|
if (sysctl_tcp_timestamps != 1)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
|
ts_secret_init();
|
||||||
return siphash(&combined, offsetofend(typeof(combined), daddr),
|
return siphash(&combined, offsetofend(typeof(combined), daddr),
|
||||||
&ts_secret);
|
&ts_secret);
|
||||||
}
|
}
|
||||||
|
EXPORT_SYMBOL(secure_tcpv6_ts_off);
|
||||||
|
|
||||||
u32 secure_tcpv6_seq_and_tsoff(const __be32 *saddr, const __be32 *daddr,
|
u32 secure_tcpv6_seq(const __be32 *saddr, const __be32 *daddr,
|
||||||
__be16 sport, __be16 dport, u32 *tsoff)
|
__be16 sport, __be16 dport)
|
||||||
{
|
{
|
||||||
const struct {
|
const struct {
|
||||||
struct in6_addr saddr;
|
struct in6_addr saddr;
|
||||||
@ -78,14 +84,14 @@ u32 secure_tcpv6_seq_and_tsoff(const __be32 *saddr, const __be32 *daddr,
|
|||||||
.sport = sport,
|
.sport = sport,
|
||||||
.dport = dport
|
.dport = dport
|
||||||
};
|
};
|
||||||
u64 hash;
|
u32 hash;
|
||||||
|
|
||||||
net_secret_init();
|
net_secret_init();
|
||||||
hash = siphash(&combined, offsetofend(typeof(combined), dport),
|
hash = siphash(&combined, offsetofend(typeof(combined), dport),
|
||||||
&net_secret);
|
&net_secret);
|
||||||
*tsoff = secure_tcpv6_ts_off(saddr, daddr);
|
|
||||||
return seq_scale(hash);
|
return seq_scale(hash);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(secure_tcpv6_seq_and_tsoff);
|
EXPORT_SYMBOL(secure_tcpv6_seq);
|
||||||
|
|
||||||
u32 secure_ipv6_port_ephemeral(const __be32 *saddr, const __be32 *daddr,
|
u32 secure_ipv6_port_ephemeral(const __be32 *saddr, const __be32 *daddr,
|
||||||
__be16 dport)
|
__be16 dport)
|
||||||
@ -107,11 +113,12 @@ EXPORT_SYMBOL(secure_ipv6_port_ephemeral);
|
|||||||
#endif
|
#endif
|
||||||
|
|
||||||
#ifdef CONFIG_INET
|
#ifdef CONFIG_INET
|
||||||
static u32 secure_tcp_ts_off(__be32 saddr, __be32 daddr)
|
u32 secure_tcp_ts_off(__be32 saddr, __be32 daddr)
|
||||||
{
|
{
|
||||||
if (sysctl_tcp_timestamps != 1)
|
if (sysctl_tcp_timestamps != 1)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
|
ts_secret_init();
|
||||||
return siphash_2u32((__force u32)saddr, (__force u32)daddr,
|
return siphash_2u32((__force u32)saddr, (__force u32)daddr,
|
||||||
&ts_secret);
|
&ts_secret);
|
||||||
}
|
}
|
||||||
@ -121,15 +128,15 @@ static u32 secure_tcp_ts_off(__be32 saddr, __be32 daddr)
|
|||||||
* it would be easy enough to have the former function use siphash_4u32, passing
|
* it would be easy enough to have the former function use siphash_4u32, passing
|
||||||
* the arguments as separate u32.
|
* the arguments as separate u32.
|
||||||
*/
|
*/
|
||||||
u32 secure_tcp_seq_and_tsoff(__be32 saddr, __be32 daddr,
|
u32 secure_tcp_seq(__be32 saddr, __be32 daddr,
|
||||||
__be16 sport, __be16 dport, u32 *tsoff)
|
__be16 sport, __be16 dport)
|
||||||
{
|
{
|
||||||
u64 hash;
|
u32 hash;
|
||||||
|
|
||||||
net_secret_init();
|
net_secret_init();
|
||||||
hash = siphash_3u32((__force u32)saddr, (__force u32)daddr,
|
hash = siphash_3u32((__force u32)saddr, (__force u32)daddr,
|
||||||
(__force u32)sport << 16 | (__force u32)dport,
|
(__force u32)sport << 16 | (__force u32)dport,
|
||||||
&net_secret);
|
&net_secret);
|
||||||
*tsoff = secure_tcp_ts_off(saddr, daddr);
|
|
||||||
return seq_scale(hash);
|
return seq_scale(hash);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -110,7 +110,7 @@ struct neigh_table dn_neigh_table = {
|
|||||||
static int dn_neigh_construct(struct neighbour *neigh)
|
static int dn_neigh_construct(struct neighbour *neigh)
|
||||||
{
|
{
|
||||||
struct net_device *dev = neigh->dev;
|
struct net_device *dev = neigh->dev;
|
||||||
struct dn_neigh *dn = (struct dn_neigh *)neigh;
|
struct dn_neigh *dn = container_of(neigh, struct dn_neigh, n);
|
||||||
struct dn_dev *dn_db;
|
struct dn_dev *dn_db;
|
||||||
struct neigh_parms *parms;
|
struct neigh_parms *parms;
|
||||||
|
|
||||||
@ -339,7 +339,7 @@ int dn_to_neigh_output(struct net *net, struct sock *sk, struct sk_buff *skb)
|
|||||||
struct dst_entry *dst = skb_dst(skb);
|
struct dst_entry *dst = skb_dst(skb);
|
||||||
struct dn_route *rt = (struct dn_route *) dst;
|
struct dn_route *rt = (struct dn_route *) dst;
|
||||||
struct neighbour *neigh = rt->n;
|
struct neighbour *neigh = rt->n;
|
||||||
struct dn_neigh *dn = (struct dn_neigh *)neigh;
|
struct dn_neigh *dn = container_of(neigh, struct dn_neigh, n);
|
||||||
struct dn_dev *dn_db;
|
struct dn_dev *dn_db;
|
||||||
bool use_long;
|
bool use_long;
|
||||||
|
|
||||||
@ -391,7 +391,7 @@ int dn_neigh_router_hello(struct net *net, struct sock *sk, struct sk_buff *skb)
|
|||||||
|
|
||||||
neigh = __neigh_lookup(&dn_neigh_table, &src, skb->dev, 1);
|
neigh = __neigh_lookup(&dn_neigh_table, &src, skb->dev, 1);
|
||||||
|
|
||||||
dn = (struct dn_neigh *)neigh;
|
dn = container_of(neigh, struct dn_neigh, n);
|
||||||
|
|
||||||
if (neigh) {
|
if (neigh) {
|
||||||
write_lock(&neigh->lock);
|
write_lock(&neigh->lock);
|
||||||
@ -451,7 +451,7 @@ int dn_neigh_endnode_hello(struct net *net, struct sock *sk, struct sk_buff *skb
|
|||||||
|
|
||||||
neigh = __neigh_lookup(&dn_neigh_table, &src, skb->dev, 1);
|
neigh = __neigh_lookup(&dn_neigh_table, &src, skb->dev, 1);
|
||||||
|
|
||||||
dn = (struct dn_neigh *)neigh;
|
dn = container_of(neigh, struct dn_neigh, n);
|
||||||
|
|
||||||
if (neigh) {
|
if (neigh) {
|
||||||
write_lock(&neigh->lock);
|
write_lock(&neigh->lock);
|
||||||
@ -510,7 +510,7 @@ static void neigh_elist_cb(struct neighbour *neigh, void *_info)
|
|||||||
if (neigh->dev != s->dev)
|
if (neigh->dev != s->dev)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
dn = (struct dn_neigh *) neigh;
|
dn = container_of(neigh, struct dn_neigh, n);
|
||||||
if (!(dn->flags & (DN_NDFLAG_R1|DN_NDFLAG_R2)))
|
if (!(dn->flags & (DN_NDFLAG_R1|DN_NDFLAG_R2)))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
@ -549,7 +549,7 @@ int dn_neigh_elist(struct net_device *dev, unsigned char *ptr, int n)
|
|||||||
static inline void dn_neigh_format_entry(struct seq_file *seq,
|
static inline void dn_neigh_format_entry(struct seq_file *seq,
|
||||||
struct neighbour *n)
|
struct neighbour *n)
|
||||||
{
|
{
|
||||||
struct dn_neigh *dn = (struct dn_neigh *) n;
|
struct dn_neigh *dn = container_of(n, struct dn_neigh, n);
|
||||||
char buf[DN_ASCBUF_LEN];
|
char buf[DN_ASCBUF_LEN];
|
||||||
|
|
||||||
read_lock(&n->lock);
|
read_lock(&n->lock);
|
||||||
|
@ -794,6 +794,8 @@ struct sock *inet_csk_clone_lock(const struct sock *sk,
|
|||||||
/* listeners have SOCK_RCU_FREE, not the children */
|
/* listeners have SOCK_RCU_FREE, not the children */
|
||||||
sock_reset_flag(newsk, SOCK_RCU_FREE);
|
sock_reset_flag(newsk, SOCK_RCU_FREE);
|
||||||
|
|
||||||
|
inet_sk(newsk)->mc_list = NULL;
|
||||||
|
|
||||||
newsk->sk_mark = inet_rsk(req)->ir_mark;
|
newsk->sk_mark = inet_rsk(req)->ir_mark;
|
||||||
atomic64_set(&newsk->sk_cookie,
|
atomic64_set(&newsk->sk_cookie,
|
||||||
atomic64_read(&inet_rsk(req)->ir_cookie));
|
atomic64_read(&inet_rsk(req)->ir_cookie));
|
||||||
|
@ -546,12 +546,13 @@ static int vti_fill_info(struct sk_buff *skb, const struct net_device *dev)
|
|||||||
struct ip_tunnel *t = netdev_priv(dev);
|
struct ip_tunnel *t = netdev_priv(dev);
|
||||||
struct ip_tunnel_parm *p = &t->parms;
|
struct ip_tunnel_parm *p = &t->parms;
|
||||||
|
|
||||||
nla_put_u32(skb, IFLA_VTI_LINK, p->link);
|
if (nla_put_u32(skb, IFLA_VTI_LINK, p->link) ||
|
||||||
nla_put_be32(skb, IFLA_VTI_IKEY, p->i_key);
|
nla_put_be32(skb, IFLA_VTI_IKEY, p->i_key) ||
|
||||||
nla_put_be32(skb, IFLA_VTI_OKEY, p->o_key);
|
nla_put_be32(skb, IFLA_VTI_OKEY, p->o_key) ||
|
||||||
nla_put_in_addr(skb, IFLA_VTI_LOCAL, p->iph.saddr);
|
nla_put_in_addr(skb, IFLA_VTI_LOCAL, p->iph.saddr) ||
|
||||||
nla_put_in_addr(skb, IFLA_VTI_REMOTE, p->iph.daddr);
|
nla_put_in_addr(skb, IFLA_VTI_REMOTE, p->iph.daddr) ||
|
||||||
nla_put_u32(skb, IFLA_VTI_FWMARK, t->fwmark);
|
nla_put_u32(skb, IFLA_VTI_FWMARK, t->fwmark))
|
||||||
|
return -EMSGSIZE;
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -16,6 +16,7 @@
|
|||||||
#include <linux/siphash.h>
|
#include <linux/siphash.h>
|
||||||
#include <linux/kernel.h>
|
#include <linux/kernel.h>
|
||||||
#include <linux/export.h>
|
#include <linux/export.h>
|
||||||
|
#include <net/secure_seq.h>
|
||||||
#include <net/tcp.h>
|
#include <net/tcp.h>
|
||||||
#include <net/route.h>
|
#include <net/route.h>
|
||||||
|
|
||||||
@ -203,7 +204,7 @@ EXPORT_SYMBOL_GPL(__cookie_v4_check);
|
|||||||
|
|
||||||
struct sock *tcp_get_cookie_sock(struct sock *sk, struct sk_buff *skb,
|
struct sock *tcp_get_cookie_sock(struct sock *sk, struct sk_buff *skb,
|
||||||
struct request_sock *req,
|
struct request_sock *req,
|
||||||
struct dst_entry *dst)
|
struct dst_entry *dst, u32 tsoff)
|
||||||
{
|
{
|
||||||
struct inet_connection_sock *icsk = inet_csk(sk);
|
struct inet_connection_sock *icsk = inet_csk(sk);
|
||||||
struct sock *child;
|
struct sock *child;
|
||||||
@ -213,6 +214,7 @@ struct sock *tcp_get_cookie_sock(struct sock *sk, struct sk_buff *skb,
|
|||||||
NULL, &own_req);
|
NULL, &own_req);
|
||||||
if (child) {
|
if (child) {
|
||||||
atomic_set(&req->rsk_refcnt, 1);
|
atomic_set(&req->rsk_refcnt, 1);
|
||||||
|
tcp_sk(child)->tsoffset = tsoff;
|
||||||
sock_rps_save_rxhash(child, skb);
|
sock_rps_save_rxhash(child, skb);
|
||||||
inet_csk_reqsk_queue_add(sk, req, child);
|
inet_csk_reqsk_queue_add(sk, req, child);
|
||||||
} else {
|
} else {
|
||||||
@ -292,6 +294,7 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb)
|
|||||||
struct rtable *rt;
|
struct rtable *rt;
|
||||||
__u8 rcv_wscale;
|
__u8 rcv_wscale;
|
||||||
struct flowi4 fl4;
|
struct flowi4 fl4;
|
||||||
|
u32 tsoff = 0;
|
||||||
|
|
||||||
if (!sock_net(sk)->ipv4.sysctl_tcp_syncookies || !th->ack || th->rst)
|
if (!sock_net(sk)->ipv4.sysctl_tcp_syncookies || !th->ack || th->rst)
|
||||||
goto out;
|
goto out;
|
||||||
@ -311,6 +314,11 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb)
|
|||||||
memset(&tcp_opt, 0, sizeof(tcp_opt));
|
memset(&tcp_opt, 0, sizeof(tcp_opt));
|
||||||
tcp_parse_options(skb, &tcp_opt, 0, NULL);
|
tcp_parse_options(skb, &tcp_opt, 0, NULL);
|
||||||
|
|
||||||
|
if (tcp_opt.saw_tstamp && tcp_opt.rcv_tsecr) {
|
||||||
|
tsoff = secure_tcp_ts_off(ip_hdr(skb)->daddr, ip_hdr(skb)->saddr);
|
||||||
|
tcp_opt.rcv_tsecr -= tsoff;
|
||||||
|
}
|
||||||
|
|
||||||
if (!cookie_timestamp_decode(&tcp_opt))
|
if (!cookie_timestamp_decode(&tcp_opt))
|
||||||
goto out;
|
goto out;
|
||||||
|
|
||||||
@ -381,7 +389,7 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb)
|
|||||||
ireq->rcv_wscale = rcv_wscale;
|
ireq->rcv_wscale = rcv_wscale;
|
||||||
ireq->ecn_ok = cookie_ecn_ok(&tcp_opt, sock_net(sk), &rt->dst);
|
ireq->ecn_ok = cookie_ecn_ok(&tcp_opt, sock_net(sk), &rt->dst);
|
||||||
|
|
||||||
ret = tcp_get_cookie_sock(sk, skb, req, &rt->dst);
|
ret = tcp_get_cookie_sock(sk, skb, req, &rt->dst, tsoff);
|
||||||
/* ip_queue_xmit() depends on our flow being setup
|
/* ip_queue_xmit() depends on our flow being setup
|
||||||
* Normal sockets get it right from inet_csk_route_child_sock()
|
* Normal sockets get it right from inet_csk_route_child_sock()
|
||||||
*/
|
*/
|
||||||
|
@ -85,7 +85,6 @@ int sysctl_tcp_dsack __read_mostly = 1;
|
|||||||
int sysctl_tcp_app_win __read_mostly = 31;
|
int sysctl_tcp_app_win __read_mostly = 31;
|
||||||
int sysctl_tcp_adv_win_scale __read_mostly = 1;
|
int sysctl_tcp_adv_win_scale __read_mostly = 1;
|
||||||
EXPORT_SYMBOL(sysctl_tcp_adv_win_scale);
|
EXPORT_SYMBOL(sysctl_tcp_adv_win_scale);
|
||||||
EXPORT_SYMBOL(sysctl_tcp_timestamps);
|
|
||||||
|
|
||||||
/* rfc5961 challenge ack rate limiting */
|
/* rfc5961 challenge ack rate limiting */
|
||||||
int sysctl_tcp_challenge_ack_limit = 1000;
|
int sysctl_tcp_challenge_ack_limit = 1000;
|
||||||
@ -6347,8 +6346,8 @@ int tcp_conn_request(struct request_sock_ops *rsk_ops,
|
|||||||
if (security_inet_conn_request(sk, skb, req))
|
if (security_inet_conn_request(sk, skb, req))
|
||||||
goto drop_and_free;
|
goto drop_and_free;
|
||||||
|
|
||||||
if (isn && tmp_opt.tstamp_ok)
|
if (tmp_opt.tstamp_ok)
|
||||||
af_ops->init_seq_tsoff(skb, &tcp_rsk(req)->ts_off);
|
tcp_rsk(req)->ts_off = af_ops->init_ts_off(skb);
|
||||||
|
|
||||||
if (!want_cookie && !isn) {
|
if (!want_cookie && !isn) {
|
||||||
/* Kill the following clause, if you dislike this way. */
|
/* Kill the following clause, if you dislike this way. */
|
||||||
@ -6368,7 +6367,7 @@ int tcp_conn_request(struct request_sock_ops *rsk_ops,
|
|||||||
goto drop_and_release;
|
goto drop_and_release;
|
||||||
}
|
}
|
||||||
|
|
||||||
isn = af_ops->init_seq_tsoff(skb, &tcp_rsk(req)->ts_off);
|
isn = af_ops->init_seq(skb);
|
||||||
}
|
}
|
||||||
if (!dst) {
|
if (!dst) {
|
||||||
dst = af_ops->route_req(sk, &fl, req);
|
dst = af_ops->route_req(sk, &fl, req);
|
||||||
@ -6380,7 +6379,6 @@ int tcp_conn_request(struct request_sock_ops *rsk_ops,
|
|||||||
|
|
||||||
if (want_cookie) {
|
if (want_cookie) {
|
||||||
isn = cookie_init_sequence(af_ops, sk, skb, &req->mss);
|
isn = cookie_init_sequence(af_ops, sk, skb, &req->mss);
|
||||||
tcp_rsk(req)->ts_off = 0;
|
|
||||||
req->cookie_ts = tmp_opt.tstamp_ok;
|
req->cookie_ts = tmp_opt.tstamp_ok;
|
||||||
if (!tmp_opt.tstamp_ok)
|
if (!tmp_opt.tstamp_ok)
|
||||||
inet_rsk(req)->ecn_ok = 0;
|
inet_rsk(req)->ecn_ok = 0;
|
||||||
|
@ -94,12 +94,18 @@ static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
|
|||||||
struct inet_hashinfo tcp_hashinfo;
|
struct inet_hashinfo tcp_hashinfo;
|
||||||
EXPORT_SYMBOL(tcp_hashinfo);
|
EXPORT_SYMBOL(tcp_hashinfo);
|
||||||
|
|
||||||
static u32 tcp_v4_init_seq_and_tsoff(const struct sk_buff *skb, u32 *tsoff)
|
static u32 tcp_v4_init_seq(const struct sk_buff *skb)
|
||||||
{
|
{
|
||||||
return secure_tcp_seq_and_tsoff(ip_hdr(skb)->daddr,
|
return secure_tcp_seq(ip_hdr(skb)->daddr,
|
||||||
ip_hdr(skb)->saddr,
|
ip_hdr(skb)->saddr,
|
||||||
tcp_hdr(skb)->dest,
|
tcp_hdr(skb)->dest,
|
||||||
tcp_hdr(skb)->source, tsoff);
|
tcp_hdr(skb)->source);
|
||||||
|
}
|
||||||
|
|
||||||
|
static u32 tcp_v4_init_ts_off(const struct sk_buff *skb)
|
||||||
|
{
|
||||||
|
return secure_tcp_ts_off(ip_hdr(skb)->daddr,
|
||||||
|
ip_hdr(skb)->saddr);
|
||||||
}
|
}
|
||||||
|
|
||||||
int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp)
|
int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp)
|
||||||
@ -145,7 +151,6 @@ int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
|
|||||||
struct flowi4 *fl4;
|
struct flowi4 *fl4;
|
||||||
struct rtable *rt;
|
struct rtable *rt;
|
||||||
int err;
|
int err;
|
||||||
u32 seq;
|
|
||||||
struct ip_options_rcu *inet_opt;
|
struct ip_options_rcu *inet_opt;
|
||||||
struct inet_timewait_death_row *tcp_death_row = &sock_net(sk)->ipv4.tcp_death_row;
|
struct inet_timewait_death_row *tcp_death_row = &sock_net(sk)->ipv4.tcp_death_row;
|
||||||
|
|
||||||
@ -232,13 +237,13 @@ int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
|
|||||||
rt = NULL;
|
rt = NULL;
|
||||||
|
|
||||||
if (likely(!tp->repair)) {
|
if (likely(!tp->repair)) {
|
||||||
seq = secure_tcp_seq_and_tsoff(inet->inet_saddr,
|
if (!tp->write_seq)
|
||||||
|
tp->write_seq = secure_tcp_seq(inet->inet_saddr,
|
||||||
inet->inet_daddr,
|
inet->inet_daddr,
|
||||||
inet->inet_sport,
|
inet->inet_sport,
|
||||||
usin->sin_port,
|
usin->sin_port);
|
||||||
&tp->tsoffset);
|
tp->tsoffset = secure_tcp_ts_off(inet->inet_saddr,
|
||||||
if (!tp->write_seq)
|
inet->inet_daddr);
|
||||||
tp->write_seq = seq;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
inet->inet_id = tp->write_seq ^ jiffies;
|
inet->inet_id = tp->write_seq ^ jiffies;
|
||||||
@ -1239,7 +1244,8 @@ static const struct tcp_request_sock_ops tcp_request_sock_ipv4_ops = {
|
|||||||
.cookie_init_seq = cookie_v4_init_sequence,
|
.cookie_init_seq = cookie_v4_init_sequence,
|
||||||
#endif
|
#endif
|
||||||
.route_req = tcp_v4_route_req,
|
.route_req = tcp_v4_route_req,
|
||||||
.init_seq_tsoff = tcp_v4_init_seq_and_tsoff,
|
.init_seq = tcp_v4_init_seq,
|
||||||
|
.init_ts_off = tcp_v4_init_ts_off,
|
||||||
.send_synack = tcp_v4_send_synack,
|
.send_synack = tcp_v4_send_synack,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -1514,6 +1514,7 @@ static void tcp_cwnd_application_limited(struct sock *sk)
|
|||||||
|
|
||||||
static void tcp_cwnd_validate(struct sock *sk, bool is_cwnd_limited)
|
static void tcp_cwnd_validate(struct sock *sk, bool is_cwnd_limited)
|
||||||
{
|
{
|
||||||
|
const struct tcp_congestion_ops *ca_ops = inet_csk(sk)->icsk_ca_ops;
|
||||||
struct tcp_sock *tp = tcp_sk(sk);
|
struct tcp_sock *tp = tcp_sk(sk);
|
||||||
|
|
||||||
/* Track the maximum number of outstanding packets in each
|
/* Track the maximum number of outstanding packets in each
|
||||||
@ -1536,7 +1537,8 @@ static void tcp_cwnd_validate(struct sock *sk, bool is_cwnd_limited)
|
|||||||
tp->snd_cwnd_used = tp->packets_out;
|
tp->snd_cwnd_used = tp->packets_out;
|
||||||
|
|
||||||
if (sysctl_tcp_slow_start_after_idle &&
|
if (sysctl_tcp_slow_start_after_idle &&
|
||||||
(s32)(tcp_time_stamp - tp->snd_cwnd_stamp) >= inet_csk(sk)->icsk_rto)
|
(s32)(tcp_time_stamp - tp->snd_cwnd_stamp) >= inet_csk(sk)->icsk_rto &&
|
||||||
|
!ca_ops->cong_control)
|
||||||
tcp_cwnd_application_limited(sk);
|
tcp_cwnd_application_limited(sk);
|
||||||
|
|
||||||
/* The following conditions together indicate the starvation
|
/* The following conditions together indicate the starvation
|
||||||
|
@ -3548,6 +3548,7 @@ static int addrconf_notify(struct notifier_block *this, unsigned long event,
|
|||||||
*/
|
*/
|
||||||
static struct notifier_block ipv6_dev_notf = {
|
static struct notifier_block ipv6_dev_notf = {
|
||||||
.notifier_call = addrconf_notify,
|
.notifier_call = addrconf_notify,
|
||||||
|
.priority = ADDRCONF_NOTIFY_PRIORITY,
|
||||||
};
|
};
|
||||||
|
|
||||||
static void addrconf_type_change(struct net_device *dev, unsigned long event)
|
static void addrconf_type_change(struct net_device *dev, unsigned long event)
|
||||||
|
@ -3709,7 +3709,10 @@ static int ip6_route_dev_notify(struct notifier_block *this,
|
|||||||
struct net_device *dev = netdev_notifier_info_to_dev(ptr);
|
struct net_device *dev = netdev_notifier_info_to_dev(ptr);
|
||||||
struct net *net = dev_net(dev);
|
struct net *net = dev_net(dev);
|
||||||
|
|
||||||
if (event == NETDEV_REGISTER && (dev->flags & IFF_LOOPBACK)) {
|
if (!(dev->flags & IFF_LOOPBACK))
|
||||||
|
return NOTIFY_OK;
|
||||||
|
|
||||||
|
if (event == NETDEV_REGISTER) {
|
||||||
net->ipv6.ip6_null_entry->dst.dev = dev;
|
net->ipv6.ip6_null_entry->dst.dev = dev;
|
||||||
net->ipv6.ip6_null_entry->rt6i_idev = in6_dev_get(dev);
|
net->ipv6.ip6_null_entry->rt6i_idev = in6_dev_get(dev);
|
||||||
#ifdef CONFIG_IPV6_MULTIPLE_TABLES
|
#ifdef CONFIG_IPV6_MULTIPLE_TABLES
|
||||||
@ -3717,6 +3720,12 @@ static int ip6_route_dev_notify(struct notifier_block *this,
|
|||||||
net->ipv6.ip6_prohibit_entry->rt6i_idev = in6_dev_get(dev);
|
net->ipv6.ip6_prohibit_entry->rt6i_idev = in6_dev_get(dev);
|
||||||
net->ipv6.ip6_blk_hole_entry->dst.dev = dev;
|
net->ipv6.ip6_blk_hole_entry->dst.dev = dev;
|
||||||
net->ipv6.ip6_blk_hole_entry->rt6i_idev = in6_dev_get(dev);
|
net->ipv6.ip6_blk_hole_entry->rt6i_idev = in6_dev_get(dev);
|
||||||
|
#endif
|
||||||
|
} else if (event == NETDEV_UNREGISTER) {
|
||||||
|
in6_dev_put(net->ipv6.ip6_null_entry->rt6i_idev);
|
||||||
|
#ifdef CONFIG_IPV6_MULTIPLE_TABLES
|
||||||
|
in6_dev_put(net->ipv6.ip6_prohibit_entry->rt6i_idev);
|
||||||
|
in6_dev_put(net->ipv6.ip6_blk_hole_entry->rt6i_idev);
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -4024,7 +4033,7 @@ static struct pernet_operations ip6_route_net_late_ops = {
|
|||||||
|
|
||||||
static struct notifier_block ip6_route_dev_notifier = {
|
static struct notifier_block ip6_route_dev_notifier = {
|
||||||
.notifier_call = ip6_route_dev_notify,
|
.notifier_call = ip6_route_dev_notify,
|
||||||
.priority = 0,
|
.priority = ADDRCONF_NOTIFY_PRIORITY - 10,
|
||||||
};
|
};
|
||||||
|
|
||||||
void __init ip6_route_init_special_entries(void)
|
void __init ip6_route_init_special_entries(void)
|
||||||
|
@ -18,6 +18,7 @@
|
|||||||
#include <linux/random.h>
|
#include <linux/random.h>
|
||||||
#include <linux/siphash.h>
|
#include <linux/siphash.h>
|
||||||
#include <linux/kernel.h>
|
#include <linux/kernel.h>
|
||||||
|
#include <net/secure_seq.h>
|
||||||
#include <net/ipv6.h>
|
#include <net/ipv6.h>
|
||||||
#include <net/tcp.h>
|
#include <net/tcp.h>
|
||||||
|
|
||||||
@ -143,6 +144,7 @@ struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb)
|
|||||||
int mss;
|
int mss;
|
||||||
struct dst_entry *dst;
|
struct dst_entry *dst;
|
||||||
__u8 rcv_wscale;
|
__u8 rcv_wscale;
|
||||||
|
u32 tsoff = 0;
|
||||||
|
|
||||||
if (!sock_net(sk)->ipv4.sysctl_tcp_syncookies || !th->ack || th->rst)
|
if (!sock_net(sk)->ipv4.sysctl_tcp_syncookies || !th->ack || th->rst)
|
||||||
goto out;
|
goto out;
|
||||||
@ -162,6 +164,12 @@ struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb)
|
|||||||
memset(&tcp_opt, 0, sizeof(tcp_opt));
|
memset(&tcp_opt, 0, sizeof(tcp_opt));
|
||||||
tcp_parse_options(skb, &tcp_opt, 0, NULL);
|
tcp_parse_options(skb, &tcp_opt, 0, NULL);
|
||||||
|
|
||||||
|
if (tcp_opt.saw_tstamp && tcp_opt.rcv_tsecr) {
|
||||||
|
tsoff = secure_tcpv6_ts_off(ipv6_hdr(skb)->daddr.s6_addr32,
|
||||||
|
ipv6_hdr(skb)->saddr.s6_addr32);
|
||||||
|
tcp_opt.rcv_tsecr -= tsoff;
|
||||||
|
}
|
||||||
|
|
||||||
if (!cookie_timestamp_decode(&tcp_opt))
|
if (!cookie_timestamp_decode(&tcp_opt))
|
||||||
goto out;
|
goto out;
|
||||||
|
|
||||||
@ -242,7 +250,7 @@ struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb)
|
|||||||
ireq->rcv_wscale = rcv_wscale;
|
ireq->rcv_wscale = rcv_wscale;
|
||||||
ireq->ecn_ok = cookie_ecn_ok(&tcp_opt, sock_net(sk), dst);
|
ireq->ecn_ok = cookie_ecn_ok(&tcp_opt, sock_net(sk), dst);
|
||||||
|
|
||||||
ret = tcp_get_cookie_sock(sk, skb, req, dst);
|
ret = tcp_get_cookie_sock(sk, skb, req, dst, tsoff);
|
||||||
out:
|
out:
|
||||||
return ret;
|
return ret;
|
||||||
out_free:
|
out_free:
|
||||||
|
@ -101,12 +101,18 @@ static void inet6_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static u32 tcp_v6_init_seq_and_tsoff(const struct sk_buff *skb, u32 *tsoff)
|
static u32 tcp_v6_init_seq(const struct sk_buff *skb)
|
||||||
{
|
{
|
||||||
return secure_tcpv6_seq_and_tsoff(ipv6_hdr(skb)->daddr.s6_addr32,
|
return secure_tcpv6_seq(ipv6_hdr(skb)->daddr.s6_addr32,
|
||||||
ipv6_hdr(skb)->saddr.s6_addr32,
|
ipv6_hdr(skb)->saddr.s6_addr32,
|
||||||
tcp_hdr(skb)->dest,
|
tcp_hdr(skb)->dest,
|
||||||
tcp_hdr(skb)->source, tsoff);
|
tcp_hdr(skb)->source);
|
||||||
|
}
|
||||||
|
|
||||||
|
static u32 tcp_v6_init_ts_off(const struct sk_buff *skb)
|
||||||
|
{
|
||||||
|
return secure_tcpv6_ts_off(ipv6_hdr(skb)->daddr.s6_addr32,
|
||||||
|
ipv6_hdr(skb)->saddr.s6_addr32);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
|
static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
|
||||||
@ -122,7 +128,6 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
|
|||||||
struct flowi6 fl6;
|
struct flowi6 fl6;
|
||||||
struct dst_entry *dst;
|
struct dst_entry *dst;
|
||||||
int addr_type;
|
int addr_type;
|
||||||
u32 seq;
|
|
||||||
int err;
|
int err;
|
||||||
struct inet_timewait_death_row *tcp_death_row = &sock_net(sk)->ipv4.tcp_death_row;
|
struct inet_timewait_death_row *tcp_death_row = &sock_net(sk)->ipv4.tcp_death_row;
|
||||||
|
|
||||||
@ -282,13 +287,13 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
|
|||||||
sk_set_txhash(sk);
|
sk_set_txhash(sk);
|
||||||
|
|
||||||
if (likely(!tp->repair)) {
|
if (likely(!tp->repair)) {
|
||||||
seq = secure_tcpv6_seq_and_tsoff(np->saddr.s6_addr32,
|
if (!tp->write_seq)
|
||||||
|
tp->write_seq = secure_tcpv6_seq(np->saddr.s6_addr32,
|
||||||
sk->sk_v6_daddr.s6_addr32,
|
sk->sk_v6_daddr.s6_addr32,
|
||||||
inet->inet_sport,
|
inet->inet_sport,
|
||||||
inet->inet_dport,
|
inet->inet_dport);
|
||||||
&tp->tsoffset);
|
tp->tsoffset = secure_tcpv6_ts_off(np->saddr.s6_addr32,
|
||||||
if (!tp->write_seq)
|
sk->sk_v6_daddr.s6_addr32);
|
||||||
tp->write_seq = seq;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if (tcp_fastopen_defer_connect(sk, &err))
|
if (tcp_fastopen_defer_connect(sk, &err))
|
||||||
@ -749,7 +754,8 @@ static const struct tcp_request_sock_ops tcp_request_sock_ipv6_ops = {
|
|||||||
.cookie_init_seq = cookie_v6_init_sequence,
|
.cookie_init_seq = cookie_v6_init_sequence,
|
||||||
#endif
|
#endif
|
||||||
.route_req = tcp_v6_route_req,
|
.route_req = tcp_v6_route_req,
|
||||||
.init_seq_tsoff = tcp_v6_init_seq_and_tsoff,
|
.init_seq = tcp_v6_init_seq,
|
||||||
|
.init_ts_off = tcp_v6_init_ts_off,
|
||||||
.send_synack = tcp_v6_send_synack,
|
.send_synack = tcp_v6_send_synack,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -66,6 +66,8 @@ ieee80211_ibss_build_presp(struct ieee80211_sub_if_data *sdata,
|
|||||||
2 + (IEEE80211_MAX_SUPP_RATES - 8) +
|
2 + (IEEE80211_MAX_SUPP_RATES - 8) +
|
||||||
2 + sizeof(struct ieee80211_ht_cap) +
|
2 + sizeof(struct ieee80211_ht_cap) +
|
||||||
2 + sizeof(struct ieee80211_ht_operation) +
|
2 + sizeof(struct ieee80211_ht_operation) +
|
||||||
|
2 + sizeof(struct ieee80211_vht_cap) +
|
||||||
|
2 + sizeof(struct ieee80211_vht_operation) +
|
||||||
ifibss->ie_len;
|
ifibss->ie_len;
|
||||||
presp = kzalloc(sizeof(*presp) + frame_len, GFP_KERNEL);
|
presp = kzalloc(sizeof(*presp) + frame_len, GFP_KERNEL);
|
||||||
if (!presp)
|
if (!presp)
|
||||||
|
@ -4382,6 +4382,10 @@ static int ieee80211_prep_connection(struct ieee80211_sub_if_data *sdata,
|
|||||||
if (WARN_ON(!ifmgd->auth_data && !ifmgd->assoc_data))
|
if (WARN_ON(!ifmgd->auth_data && !ifmgd->assoc_data))
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
|
/* If a reconfig is happening, bail out */
|
||||||
|
if (local->in_reconfig)
|
||||||
|
return -EBUSY;
|
||||||
|
|
||||||
if (assoc) {
|
if (assoc) {
|
||||||
rcu_read_lock();
|
rcu_read_lock();
|
||||||
have_sta = sta_info_get(sdata, cbss->bssid);
|
have_sta = sta_info_get(sdata, cbss->bssid);
|
||||||
|
@ -2764,8 +2764,8 @@ static int nl80211_parse_mon_options(struct cfg80211_registered_device *rdev,
|
|||||||
nla_data(info->attrs[NL80211_ATTR_MU_MIMO_GROUP_DATA]);
|
nla_data(info->attrs[NL80211_ATTR_MU_MIMO_GROUP_DATA]);
|
||||||
|
|
||||||
/* bits 0 and 63 are reserved and must be zero */
|
/* bits 0 and 63 are reserved and must be zero */
|
||||||
if ((mumimo_groups[0] & BIT(7)) ||
|
if ((mumimo_groups[0] & BIT(0)) ||
|
||||||
(mumimo_groups[VHT_MUMIMO_GROUPS_DATA_LEN - 1] & BIT(0)))
|
(mumimo_groups[VHT_MUMIMO_GROUPS_DATA_LEN - 1] & BIT(7)))
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
params->vht_mumimo_groups = mumimo_groups;
|
params->vht_mumimo_groups = mumimo_groups;
|
||||||
|
Loading…
Reference in New Issue
Block a user