forked from Minki/linux
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
This commit is contained in:
commit
4e84b496fd
@ -56,6 +56,13 @@ ip_forward_use_pmtu - BOOLEAN
|
||||
0 - disabled
|
||||
1 - enabled
|
||||
|
||||
fwmark_reflect - BOOLEAN
|
||||
Controls the fwmark of kernel-generated IPv4 reply packets that are not
|
||||
associated with a socket for example, TCP RSTs or ICMP echo replies).
|
||||
If unset, these packets have a fwmark of zero. If set, they have the
|
||||
fwmark of the packet they are replying to.
|
||||
Default: 0
|
||||
|
||||
route/max_size - INTEGER
|
||||
Maximum number of routes allowed in the kernel. Increase
|
||||
this when using large numbers of interfaces and/or routes.
|
||||
@ -1209,6 +1216,13 @@ conf/all/forwarding - BOOLEAN
|
||||
proxy_ndp - BOOLEAN
|
||||
Do proxy ndp.
|
||||
|
||||
fwmark_reflect - BOOLEAN
|
||||
Controls the fwmark of kernel-generated IPv6 reply packets that are not
|
||||
associated with a socket for example, TCP RSTs or ICMPv6 echo replies).
|
||||
If unset, these packets have a fwmark of zero. If set, they have the
|
||||
fwmark of the packet they are replying to.
|
||||
Default: 0
|
||||
|
||||
conf/interface/*:
|
||||
Change special settings per interface.
|
||||
|
||||
|
@ -599,7 +599,7 @@
|
||||
compatible = "apm,xgene-enet";
|
||||
status = "disabled";
|
||||
reg = <0x0 0x17020000 0x0 0xd100>,
|
||||
<0x0 0X17030000 0x0 0X400>,
|
||||
<0x0 0X17030000 0x0 0Xc300>,
|
||||
<0x0 0X10000000 0x0 0X200>;
|
||||
reg-names = "enet_csr", "ring_csr", "ring_cmd";
|
||||
interrupts = <0x0 0x3c 0x4>;
|
||||
@ -624,9 +624,9 @@
|
||||
sgenet0: ethernet@1f210000 {
|
||||
compatible = "apm,xgene-enet";
|
||||
status = "disabled";
|
||||
reg = <0x0 0x1f210000 0x0 0x10000>,
|
||||
<0x0 0x1f200000 0x0 0X10000>,
|
||||
<0x0 0x1B000000 0x0 0X20000>;
|
||||
reg = <0x0 0x1f210000 0x0 0xd100>,
|
||||
<0x0 0x1f200000 0x0 0Xc300>,
|
||||
<0x0 0x1B000000 0x0 0X200>;
|
||||
reg-names = "enet_csr", "ring_csr", "ring_cmd";
|
||||
interrupts = <0x0 0xA0 0x4>;
|
||||
dma-coherent;
|
||||
@ -639,7 +639,7 @@
|
||||
compatible = "apm,xgene-enet";
|
||||
status = "disabled";
|
||||
reg = <0x0 0x1f610000 0x0 0xd100>,
|
||||
<0x0 0x1f600000 0x0 0X400>,
|
||||
<0x0 0x1f600000 0x0 0Xc300>,
|
||||
<0x0 0x18000000 0x0 0X200>;
|
||||
reg-names = "enet_csr", "ring_csr", "ring_cmd";
|
||||
interrupts = <0x0 0x60 0x4>;
|
||||
|
@ -575,10 +575,24 @@ static void xgene_gmac_tx_disable(struct xgene_enet_pdata *pdata)
|
||||
xgene_enet_wr_mcx_mac(pdata, MAC_CONFIG_1_ADDR, data & ~TX_EN);
|
||||
}
|
||||
|
||||
static void xgene_enet_reset(struct xgene_enet_pdata *pdata)
|
||||
bool xgene_ring_mgr_init(struct xgene_enet_pdata *p)
|
||||
{
|
||||
if (!ioread32(p->ring_csr_addr + CLKEN_ADDR))
|
||||
return false;
|
||||
|
||||
if (ioread32(p->ring_csr_addr + SRST_ADDR))
|
||||
return false;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static int xgene_enet_reset(struct xgene_enet_pdata *pdata)
|
||||
{
|
||||
u32 val;
|
||||
|
||||
if (!xgene_ring_mgr_init(pdata))
|
||||
return -ENODEV;
|
||||
|
||||
clk_prepare_enable(pdata->clk);
|
||||
clk_disable_unprepare(pdata->clk);
|
||||
clk_prepare_enable(pdata->clk);
|
||||
@ -590,6 +604,8 @@ static void xgene_enet_reset(struct xgene_enet_pdata *pdata)
|
||||
val |= SCAN_AUTO_INCR;
|
||||
MGMT_CLOCK_SEL_SET(&val, 1);
|
||||
xgene_enet_wr_mcx_mac(pdata, MII_MGMT_CONFIG_ADDR, val);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void xgene_gport_shutdown(struct xgene_enet_pdata *pdata)
|
||||
|
@ -104,6 +104,9 @@ enum xgene_enet_rm {
|
||||
#define BLOCK_ETH_MAC_OFFSET 0x0000
|
||||
#define BLOCK_ETH_MAC_CSR_OFFSET 0x2800
|
||||
|
||||
#define CLKEN_ADDR 0xc208
|
||||
#define SRST_ADDR 0xc200
|
||||
|
||||
#define MAC_ADDR_REG_OFFSET 0x00
|
||||
#define MAC_COMMAND_REG_OFFSET 0x04
|
||||
#define MAC_WRITE_REG_OFFSET 0x08
|
||||
@ -318,6 +321,7 @@ void xgene_enet_parse_error(struct xgene_enet_desc_ring *ring,
|
||||
|
||||
int xgene_enet_mdio_config(struct xgene_enet_pdata *pdata);
|
||||
void xgene_enet_mdio_remove(struct xgene_enet_pdata *pdata);
|
||||
bool xgene_ring_mgr_init(struct xgene_enet_pdata *p);
|
||||
|
||||
extern struct xgene_mac_ops xgene_gmac_ops;
|
||||
extern struct xgene_port_ops xgene_gport_ops;
|
||||
|
@ -639,9 +639,9 @@ static int xgene_enet_create_desc_rings(struct net_device *ndev)
|
||||
struct device *dev = ndev_to_dev(ndev);
|
||||
struct xgene_enet_desc_ring *rx_ring, *tx_ring, *cp_ring;
|
||||
struct xgene_enet_desc_ring *buf_pool = NULL;
|
||||
u8 cpu_bufnum = 0, eth_bufnum = 0;
|
||||
u8 bp_bufnum = 0x20;
|
||||
u16 ring_id, ring_num = 0;
|
||||
u8 cpu_bufnum = 0, eth_bufnum = START_ETH_BUFNUM;
|
||||
u8 bp_bufnum = START_BP_BUFNUM;
|
||||
u16 ring_id, ring_num = START_RING_NUM;
|
||||
int ret;
|
||||
|
||||
/* allocate rx descriptor ring */
|
||||
@ -840,7 +840,9 @@ static int xgene_enet_init_hw(struct xgene_enet_pdata *pdata)
|
||||
u16 dst_ring_num;
|
||||
int ret;
|
||||
|
||||
pdata->port_ops->reset(pdata);
|
||||
ret = pdata->port_ops->reset(pdata);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = xgene_enet_create_desc_rings(ndev);
|
||||
if (ret) {
|
||||
@ -942,6 +944,7 @@ static int xgene_enet_probe(struct platform_device *pdev)
|
||||
|
||||
return ret;
|
||||
err:
|
||||
unregister_netdev(ndev);
|
||||
free_netdev(ndev);
|
||||
return ret;
|
||||
}
|
||||
|
@ -38,6 +38,9 @@
|
||||
#define SKB_BUFFER_SIZE (XGENE_ENET_MAX_MTU - NET_IP_ALIGN)
|
||||
#define NUM_PKT_BUF 64
|
||||
#define NUM_BUFPOOL 32
|
||||
#define START_ETH_BUFNUM 2
|
||||
#define START_BP_BUFNUM 0x22
|
||||
#define START_RING_NUM 8
|
||||
|
||||
#define PHY_POLL_LINK_ON (10 * HZ)
|
||||
#define PHY_POLL_LINK_OFF (PHY_POLL_LINK_ON / 5)
|
||||
@ -83,7 +86,7 @@ struct xgene_mac_ops {
|
||||
};
|
||||
|
||||
struct xgene_port_ops {
|
||||
void (*reset)(struct xgene_enet_pdata *pdata);
|
||||
int (*reset)(struct xgene_enet_pdata *pdata);
|
||||
void (*cle_bypass)(struct xgene_enet_pdata *pdata,
|
||||
u32 dst_ring_num, u16 bufpool_id);
|
||||
void (*shutdown)(struct xgene_enet_pdata *pdata);
|
||||
|
@ -311,14 +311,19 @@ static void xgene_sgmac_tx_disable(struct xgene_enet_pdata *p)
|
||||
xgene_sgmac_rxtx(p, TX_EN, false);
|
||||
}
|
||||
|
||||
static void xgene_enet_reset(struct xgene_enet_pdata *p)
|
||||
static int xgene_enet_reset(struct xgene_enet_pdata *p)
|
||||
{
|
||||
if (!xgene_ring_mgr_init(p))
|
||||
return -ENODEV;
|
||||
|
||||
clk_prepare_enable(p->clk);
|
||||
clk_disable_unprepare(p->clk);
|
||||
clk_prepare_enable(p->clk);
|
||||
|
||||
xgene_enet_ecc_init(p);
|
||||
xgene_enet_config_ring_if_assoc(p);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void xgene_enet_cle_bypass(struct xgene_enet_pdata *p,
|
||||
|
@ -252,14 +252,19 @@ static void xgene_xgmac_tx_disable(struct xgene_enet_pdata *pdata)
|
||||
xgene_enet_wr_mac(pdata, AXGMAC_CONFIG_1, data & ~HSTTFEN);
|
||||
}
|
||||
|
||||
static void xgene_enet_reset(struct xgene_enet_pdata *pdata)
|
||||
static int xgene_enet_reset(struct xgene_enet_pdata *pdata)
|
||||
{
|
||||
if (!xgene_ring_mgr_init(pdata))
|
||||
return -ENODEV;
|
||||
|
||||
clk_prepare_enable(pdata->clk);
|
||||
clk_disable_unprepare(pdata->clk);
|
||||
clk_prepare_enable(pdata->clk);
|
||||
|
||||
xgene_enet_ecc_init(pdata);
|
||||
xgene_enet_config_ring_if_assoc(pdata);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void xgene_enet_xgcle_bypass(struct xgene_enet_pdata *pdata,
|
||||
|
@ -1110,7 +1110,8 @@ static int bcm_sysport_init_tx_ring(struct bcm_sysport_priv *priv,
|
||||
/* We just need one DMA descriptor which is DMA-able, since writing to
|
||||
* the port will allocate a new descriptor in its internal linked-list
|
||||
*/
|
||||
p = dma_zalloc_coherent(kdev, 1, &ring->desc_dma, GFP_KERNEL);
|
||||
p = dma_zalloc_coherent(kdev, sizeof(struct dma_desc), &ring->desc_dma,
|
||||
GFP_KERNEL);
|
||||
if (!p) {
|
||||
netif_err(priv, hw, priv->netdev, "DMA alloc failed\n");
|
||||
return -ENOMEM;
|
||||
@ -1174,6 +1175,13 @@ static void bcm_sysport_fini_tx_ring(struct bcm_sysport_priv *priv,
|
||||
if (!(reg & TDMA_DISABLED))
|
||||
netdev_warn(priv->netdev, "TDMA not stopped!\n");
|
||||
|
||||
/* ring->cbs is the last part in bcm_sysport_init_tx_ring which could
|
||||
* fail, so by checking this pointer we know whether the TX ring was
|
||||
* fully initialized or not.
|
||||
*/
|
||||
if (!ring->cbs)
|
||||
return;
|
||||
|
||||
napi_disable(&ring->napi);
|
||||
netif_napi_del(&ring->napi);
|
||||
|
||||
@ -1183,7 +1191,8 @@ static void bcm_sysport_fini_tx_ring(struct bcm_sysport_priv *priv,
|
||||
ring->cbs = NULL;
|
||||
|
||||
if (ring->desc_dma) {
|
||||
dma_free_coherent(kdev, 1, ring->desc_cpu, ring->desc_dma);
|
||||
dma_free_coherent(kdev, sizeof(struct dma_desc),
|
||||
ring->desc_cpu, ring->desc_dma);
|
||||
ring->desc_dma = 0;
|
||||
}
|
||||
ring->size = 0;
|
||||
|
@ -940,18 +940,8 @@ static int enic_rq_alloc_buf(struct vnic_rq *rq)
|
||||
struct vnic_rq_buf *buf = rq->to_use;
|
||||
|
||||
if (buf->os_buf) {
|
||||
buf = buf->next;
|
||||
rq->to_use = buf;
|
||||
rq->ring.desc_avail--;
|
||||
if ((buf->index & VNIC_RQ_RETURN_RATE) == 0) {
|
||||
/* Adding write memory barrier prevents compiler and/or
|
||||
* CPU reordering, thus avoiding descriptor posting
|
||||
* before descriptor is initialized. Otherwise, hardware
|
||||
* can read stale descriptor fields.
|
||||
*/
|
||||
wmb();
|
||||
iowrite32(buf->index, &rq->ctrl->posted_index);
|
||||
}
|
||||
enic_queue_rq_desc(rq, buf->os_buf, os_buf_index, buf->dma_addr,
|
||||
buf->len);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -1037,7 +1027,10 @@ static void enic_rq_indicate_buf(struct vnic_rq *rq,
|
||||
enic->rq_truncated_pkts++;
|
||||
}
|
||||
|
||||
pci_unmap_single(enic->pdev, buf->dma_addr, buf->len,
|
||||
PCI_DMA_FROMDEVICE);
|
||||
dev_kfree_skb_any(skb);
|
||||
buf->os_buf = NULL;
|
||||
|
||||
return;
|
||||
}
|
||||
@ -1088,7 +1081,10 @@ static void enic_rq_indicate_buf(struct vnic_rq *rq,
|
||||
/* Buffer overflow
|
||||
*/
|
||||
|
||||
pci_unmap_single(enic->pdev, buf->dma_addr, buf->len,
|
||||
PCI_DMA_FROMDEVICE);
|
||||
dev_kfree_skb_any(skb);
|
||||
buf->os_buf = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -3343,12 +3343,11 @@ static int __maybe_unused fec_suspend(struct device *dev)
|
||||
netif_device_detach(ndev);
|
||||
netif_tx_unlock_bh(ndev);
|
||||
fec_stop(ndev);
|
||||
fec_enet_clk_enable(ndev, false);
|
||||
pinctrl_pm_select_sleep_state(&fep->pdev->dev);
|
||||
}
|
||||
rtnl_unlock();
|
||||
|
||||
fec_enet_clk_enable(ndev, false);
|
||||
pinctrl_pm_select_sleep_state(&fep->pdev->dev);
|
||||
|
||||
if (fep->reg_phy)
|
||||
regulator_disable(fep->reg_phy);
|
||||
|
||||
@ -3367,13 +3366,14 @@ static int __maybe_unused fec_resume(struct device *dev)
|
||||
return ret;
|
||||
}
|
||||
|
||||
pinctrl_pm_select_default_state(&fep->pdev->dev);
|
||||
ret = fec_enet_clk_enable(ndev, true);
|
||||
if (ret)
|
||||
goto failed_clk;
|
||||
|
||||
rtnl_lock();
|
||||
if (netif_running(ndev)) {
|
||||
pinctrl_pm_select_default_state(&fep->pdev->dev);
|
||||
ret = fec_enet_clk_enable(ndev, true);
|
||||
if (ret) {
|
||||
rtnl_unlock();
|
||||
goto failed_clk;
|
||||
}
|
||||
fec_restart(ndev);
|
||||
netif_tx_lock_bh(ndev);
|
||||
netif_device_attach(ndev);
|
||||
|
@ -1047,7 +1047,6 @@ static int txq_reclaim(struct tx_queue *txq, int budget, int force)
|
||||
int tx_index;
|
||||
struct tx_desc *desc;
|
||||
u32 cmd_sts;
|
||||
struct sk_buff *skb;
|
||||
|
||||
tx_index = txq->tx_used_desc;
|
||||
desc = &txq->tx_desc_area[tx_index];
|
||||
@ -1066,19 +1065,22 @@ static int txq_reclaim(struct tx_queue *txq, int budget, int force)
|
||||
reclaimed++;
|
||||
txq->tx_desc_count--;
|
||||
|
||||
skb = NULL;
|
||||
if (cmd_sts & TX_LAST_DESC)
|
||||
skb = __skb_dequeue(&txq->tx_skb);
|
||||
if (!IS_TSO_HEADER(txq, desc->buf_ptr))
|
||||
dma_unmap_single(mp->dev->dev.parent, desc->buf_ptr,
|
||||
desc->byte_cnt, DMA_TO_DEVICE);
|
||||
|
||||
if (cmd_sts & TX_ENABLE_INTERRUPT) {
|
||||
struct sk_buff *skb = __skb_dequeue(&txq->tx_skb);
|
||||
|
||||
if (!WARN_ON(!skb))
|
||||
dev_kfree_skb(skb);
|
||||
}
|
||||
|
||||
if (cmd_sts & ERROR_SUMMARY) {
|
||||
netdev_info(mp->dev, "tx error\n");
|
||||
mp->dev->stats.tx_errors++;
|
||||
}
|
||||
|
||||
if (!IS_TSO_HEADER(txq, desc->buf_ptr))
|
||||
dma_unmap_single(mp->dev->dev.parent, desc->buf_ptr,
|
||||
desc->byte_cnt, DMA_TO_DEVICE);
|
||||
dev_kfree_skb(skb);
|
||||
}
|
||||
|
||||
__netif_tx_unlock_bh(nq);
|
||||
|
@ -1692,6 +1692,7 @@ static int mvpp2_prs_vlan_add(struct mvpp2 *priv, unsigned short tpid, int ai,
|
||||
{
|
||||
struct mvpp2_prs_entry *pe;
|
||||
int tid_aux, tid;
|
||||
int ret = 0;
|
||||
|
||||
pe = mvpp2_prs_vlan_find(priv, tpid, ai);
|
||||
|
||||
@ -1723,8 +1724,10 @@ static int mvpp2_prs_vlan_add(struct mvpp2 *priv, unsigned short tpid, int ai,
|
||||
break;
|
||||
}
|
||||
|
||||
if (tid <= tid_aux)
|
||||
return -EINVAL;
|
||||
if (tid <= tid_aux) {
|
||||
ret = -EINVAL;
|
||||
goto error;
|
||||
}
|
||||
|
||||
memset(pe, 0 , sizeof(struct mvpp2_prs_entry));
|
||||
mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_VLAN);
|
||||
@ -1756,9 +1759,10 @@ static int mvpp2_prs_vlan_add(struct mvpp2 *priv, unsigned short tpid, int ai,
|
||||
|
||||
mvpp2_prs_hw_write(priv, pe);
|
||||
|
||||
error:
|
||||
kfree(pe);
|
||||
|
||||
return 0;
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* Get first free double vlan ai number */
|
||||
@ -1821,7 +1825,7 @@ static int mvpp2_prs_double_vlan_add(struct mvpp2 *priv, unsigned short tpid1,
|
||||
unsigned int port_map)
|
||||
{
|
||||
struct mvpp2_prs_entry *pe;
|
||||
int tid_aux, tid, ai;
|
||||
int tid_aux, tid, ai, ret = 0;
|
||||
|
||||
pe = mvpp2_prs_double_vlan_find(priv, tpid1, tpid2);
|
||||
|
||||
@ -1838,8 +1842,10 @@ static int mvpp2_prs_double_vlan_add(struct mvpp2 *priv, unsigned short tpid1,
|
||||
|
||||
/* Set ai value for new double vlan entry */
|
||||
ai = mvpp2_prs_double_vlan_ai_free_get(priv);
|
||||
if (ai < 0)
|
||||
return ai;
|
||||
if (ai < 0) {
|
||||
ret = ai;
|
||||
goto error;
|
||||
}
|
||||
|
||||
/* Get first single/triple vlan tid */
|
||||
for (tid_aux = MVPP2_PE_FIRST_FREE_TID;
|
||||
@ -1859,8 +1865,10 @@ static int mvpp2_prs_double_vlan_add(struct mvpp2 *priv, unsigned short tpid1,
|
||||
break;
|
||||
}
|
||||
|
||||
if (tid >= tid_aux)
|
||||
return -ERANGE;
|
||||
if (tid >= tid_aux) {
|
||||
ret = -ERANGE;
|
||||
goto error;
|
||||
}
|
||||
|
||||
memset(pe, 0, sizeof(struct mvpp2_prs_entry));
|
||||
mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_VLAN);
|
||||
@ -1887,8 +1895,9 @@ static int mvpp2_prs_double_vlan_add(struct mvpp2 *priv, unsigned short tpid1,
|
||||
mvpp2_prs_tcam_port_map_set(pe, port_map);
|
||||
mvpp2_prs_hw_write(priv, pe);
|
||||
|
||||
error:
|
||||
kfree(pe);
|
||||
return 0;
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* IPv4 header parsing for fragmentation and L4 offset */
|
||||
|
@ -374,15 +374,14 @@ int mlx5_create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq, u8 vecidx,
|
||||
snprintf(eq->name, MLX5_MAX_EQ_NAME, "%s@pci:%s",
|
||||
name, pci_name(dev->pdev));
|
||||
eq->eqn = out.eq_number;
|
||||
eq->irqn = vecidx;
|
||||
eq->dev = dev;
|
||||
eq->doorbell = uar->map + MLX5_EQ_DOORBEL_OFFSET;
|
||||
err = request_irq(table->msix_arr[vecidx].vector, mlx5_msix_handler, 0,
|
||||
eq->name, eq);
|
||||
if (err)
|
||||
goto err_eq;
|
||||
|
||||
eq->irqn = vecidx;
|
||||
eq->dev = dev;
|
||||
eq->doorbell = uar->map + MLX5_EQ_DOORBEL_OFFSET;
|
||||
|
||||
err = mlx5_debug_eq_add(dev, eq);
|
||||
if (err)
|
||||
goto err_irq;
|
||||
|
@ -864,14 +864,14 @@ static int init_one(struct pci_dev *pdev,
|
||||
dev->profile = &profile[prof_sel];
|
||||
dev->event = mlx5_core_event;
|
||||
|
||||
INIT_LIST_HEAD(&priv->ctx_list);
|
||||
spin_lock_init(&priv->ctx_lock);
|
||||
err = mlx5_dev_init(dev, pdev);
|
||||
if (err) {
|
||||
dev_err(&pdev->dev, "mlx5_dev_init failed %d\n", err);
|
||||
goto out;
|
||||
}
|
||||
|
||||
INIT_LIST_HEAD(&priv->ctx_list);
|
||||
spin_lock_init(&priv->ctx_lock);
|
||||
err = mlx5_register_device(dev);
|
||||
if (err) {
|
||||
dev_err(&pdev->dev, "mlx5_register_device failed %d\n", err);
|
||||
|
@ -2762,7 +2762,8 @@ netxen_fw_poll_work(struct work_struct *work)
|
||||
if (test_bit(__NX_RESETTING, &adapter->state))
|
||||
goto reschedule;
|
||||
|
||||
if (test_bit(__NX_DEV_UP, &adapter->state)) {
|
||||
if (test_bit(__NX_DEV_UP, &adapter->state) &&
|
||||
!(adapter->capabilities & NX_FW_CAPABILITY_LINK_NOTIFICATION)) {
|
||||
if (!adapter->has_link_events) {
|
||||
|
||||
netxen_nic_handle_phy_intr(adapter);
|
||||
|
@ -180,7 +180,8 @@ static int efx_ef10_probe(struct efx_nic *efx)
|
||||
EFX_MAX_CHANNELS,
|
||||
resource_size(&efx->pci_dev->resource[EFX_MEM_BAR]) /
|
||||
(EFX_VI_PAGE_SIZE * EFX_TXQ_TYPES));
|
||||
BUG_ON(efx->max_channels == 0);
|
||||
if (WARN_ON(efx->max_channels == 0))
|
||||
return -EIO;
|
||||
|
||||
nic_data = kzalloc(sizeof(*nic_data), GFP_KERNEL);
|
||||
if (!nic_data)
|
||||
|
@ -2243,9 +2243,10 @@ static int smc_drv_probe(struct platform_device *pdev)
|
||||
const struct of_device_id *match = NULL;
|
||||
struct smc_local *lp;
|
||||
struct net_device *ndev;
|
||||
struct resource *res, *ires;
|
||||
struct resource *res;
|
||||
unsigned int __iomem *addr;
|
||||
unsigned long irq_flags = SMC_IRQ_FLAGS;
|
||||
unsigned long irq_resflags;
|
||||
int ret;
|
||||
|
||||
ndev = alloc_etherdev(sizeof(struct smc_local));
|
||||
@ -2337,16 +2338,19 @@ static int smc_drv_probe(struct platform_device *pdev)
|
||||
goto out_free_netdev;
|
||||
}
|
||||
|
||||
ires = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
|
||||
if (!ires) {
|
||||
ndev->irq = platform_get_irq(pdev, 0);
|
||||
if (ndev->irq <= 0) {
|
||||
ret = -ENODEV;
|
||||
goto out_release_io;
|
||||
}
|
||||
|
||||
ndev->irq = ires->start;
|
||||
|
||||
if (irq_flags == -1 || ires->flags & IRQF_TRIGGER_MASK)
|
||||
irq_flags = ires->flags & IRQF_TRIGGER_MASK;
|
||||
/*
|
||||
* If this platform does not specify any special irqflags, or if
|
||||
* the resource supplies a trigger, override the irqflags with
|
||||
* the trigger flags from the resource.
|
||||
*/
|
||||
irq_resflags = irqd_get_trigger_type(irq_get_irq_data(ndev->irq));
|
||||
if (irq_flags == -1 || irq_resflags & IRQF_TRIGGER_MASK)
|
||||
irq_flags = irq_resflags & IRQF_TRIGGER_MASK;
|
||||
|
||||
ret = smc_request_attrib(pdev, ndev);
|
||||
if (ret)
|
||||
|
@ -276,6 +276,7 @@ static void stmmac_eee_ctrl_timer(unsigned long arg)
|
||||
bool stmmac_eee_init(struct stmmac_priv *priv)
|
||||
{
|
||||
char *phy_bus_name = priv->plat->phy_bus_name;
|
||||
unsigned long flags;
|
||||
bool ret = false;
|
||||
|
||||
/* Using PCS we cannot dial with the phy registers at this stage
|
||||
@ -300,6 +301,7 @@ bool stmmac_eee_init(struct stmmac_priv *priv)
|
||||
* changed).
|
||||
* In that case the driver disable own timers.
|
||||
*/
|
||||
spin_lock_irqsave(&priv->lock, flags);
|
||||
if (priv->eee_active) {
|
||||
pr_debug("stmmac: disable EEE\n");
|
||||
del_timer_sync(&priv->eee_ctrl_timer);
|
||||
@ -307,9 +309,11 @@ bool stmmac_eee_init(struct stmmac_priv *priv)
|
||||
tx_lpi_timer);
|
||||
}
|
||||
priv->eee_active = 0;
|
||||
spin_unlock_irqrestore(&priv->lock, flags);
|
||||
goto out;
|
||||
}
|
||||
/* Activate the EEE and start timers */
|
||||
spin_lock_irqsave(&priv->lock, flags);
|
||||
if (!priv->eee_active) {
|
||||
priv->eee_active = 1;
|
||||
init_timer(&priv->eee_ctrl_timer);
|
||||
@ -325,9 +329,10 @@ bool stmmac_eee_init(struct stmmac_priv *priv)
|
||||
/* Set HW EEE according to the speed */
|
||||
priv->hw->mac->set_eee_pls(priv->hw, priv->phydev->link);
|
||||
|
||||
pr_debug("stmmac: Energy-Efficient Ethernet initialized\n");
|
||||
|
||||
ret = true;
|
||||
spin_unlock_irqrestore(&priv->lock, flags);
|
||||
|
||||
pr_debug("stmmac: Energy-Efficient Ethernet initialized\n");
|
||||
}
|
||||
out:
|
||||
return ret;
|
||||
@ -760,12 +765,12 @@ static void stmmac_adjust_link(struct net_device *dev)
|
||||
if (new_state && netif_msg_link(priv))
|
||||
phy_print_status(phydev);
|
||||
|
||||
spin_unlock_irqrestore(&priv->lock, flags);
|
||||
|
||||
/* At this stage, it could be needed to setup the EEE or adjust some
|
||||
* MAC related HW registers.
|
||||
*/
|
||||
priv->eee_enabled = stmmac_eee_init(priv);
|
||||
|
||||
spin_unlock_irqrestore(&priv->lock, flags);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -959,12 +964,12 @@ static void stmmac_clear_descriptors(struct stmmac_priv *priv)
|
||||
}
|
||||
|
||||
static int stmmac_init_rx_buffers(struct stmmac_priv *priv, struct dma_desc *p,
|
||||
int i)
|
||||
int i, gfp_t flags)
|
||||
{
|
||||
struct sk_buff *skb;
|
||||
|
||||
skb = __netdev_alloc_skb(priv->dev, priv->dma_buf_sz + NET_IP_ALIGN,
|
||||
GFP_KERNEL);
|
||||
flags);
|
||||
if (!skb) {
|
||||
pr_err("%s: Rx init fails; skb is NULL\n", __func__);
|
||||
return -ENOMEM;
|
||||
@ -1006,7 +1011,7 @@ static void stmmac_free_rx_buffers(struct stmmac_priv *priv, int i)
|
||||
* and allocates the socket buffers. It suppors the chained and ring
|
||||
* modes.
|
||||
*/
|
||||
static int init_dma_desc_rings(struct net_device *dev)
|
||||
static int init_dma_desc_rings(struct net_device *dev, gfp_t flags)
|
||||
{
|
||||
int i;
|
||||
struct stmmac_priv *priv = netdev_priv(dev);
|
||||
@ -1041,7 +1046,7 @@ static int init_dma_desc_rings(struct net_device *dev)
|
||||
else
|
||||
p = priv->dma_rx + i;
|
||||
|
||||
ret = stmmac_init_rx_buffers(priv, p, i);
|
||||
ret = stmmac_init_rx_buffers(priv, p, i, flags);
|
||||
if (ret)
|
||||
goto err_init_rx_buffers;
|
||||
|
||||
@ -1647,11 +1652,6 @@ static int stmmac_hw_setup(struct net_device *dev)
|
||||
struct stmmac_priv *priv = netdev_priv(dev);
|
||||
int ret;
|
||||
|
||||
ret = init_dma_desc_rings(dev);
|
||||
if (ret < 0) {
|
||||
pr_err("%s: DMA descriptors initialization failed\n", __func__);
|
||||
return ret;
|
||||
}
|
||||
/* DMA initialization and SW reset */
|
||||
ret = stmmac_init_dma_engine(priv);
|
||||
if (ret < 0) {
|
||||
@ -1705,10 +1705,6 @@ static int stmmac_hw_setup(struct net_device *dev)
|
||||
}
|
||||
priv->tx_lpi_timer = STMMAC_DEFAULT_TWT_LS;
|
||||
|
||||
priv->eee_enabled = stmmac_eee_init(priv);
|
||||
|
||||
stmmac_init_tx_coalesce(priv);
|
||||
|
||||
if ((priv->use_riwt) && (priv->hw->dma->rx_watchdog)) {
|
||||
priv->rx_riwt = MAX_DMA_RIWT;
|
||||
priv->hw->dma->rx_watchdog(priv->ioaddr, MAX_DMA_RIWT);
|
||||
@ -1761,12 +1757,20 @@ static int stmmac_open(struct net_device *dev)
|
||||
goto dma_desc_error;
|
||||
}
|
||||
|
||||
ret = init_dma_desc_rings(dev, GFP_KERNEL);
|
||||
if (ret < 0) {
|
||||
pr_err("%s: DMA descriptors initialization failed\n", __func__);
|
||||
goto init_error;
|
||||
}
|
||||
|
||||
ret = stmmac_hw_setup(dev);
|
||||
if (ret < 0) {
|
||||
pr_err("%s: Hw setup failed\n", __func__);
|
||||
goto init_error;
|
||||
}
|
||||
|
||||
stmmac_init_tx_coalesce(priv);
|
||||
|
||||
if (priv->phydev)
|
||||
phy_start(priv->phydev);
|
||||
|
||||
@ -1894,7 +1898,10 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
|
||||
unsigned int nopaged_len = skb_headlen(skb);
|
||||
unsigned int enh_desc = priv->plat->enh_desc;
|
||||
|
||||
spin_lock(&priv->tx_lock);
|
||||
|
||||
if (unlikely(stmmac_tx_avail(priv) < nfrags + 1)) {
|
||||
spin_unlock(&priv->tx_lock);
|
||||
if (!netif_queue_stopped(dev)) {
|
||||
netif_stop_queue(dev);
|
||||
/* This is a hard error, log it. */
|
||||
@ -1903,8 +1910,6 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
|
||||
return NETDEV_TX_BUSY;
|
||||
}
|
||||
|
||||
spin_lock(&priv->tx_lock);
|
||||
|
||||
if (priv->tx_path_in_lpi_mode)
|
||||
stmmac_disable_eee_mode(priv);
|
||||
|
||||
@ -2025,6 +2030,7 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
|
||||
return NETDEV_TX_OK;
|
||||
|
||||
dma_map_err:
|
||||
spin_unlock(&priv->tx_lock);
|
||||
dev_err(priv->device, "Tx dma map failed\n");
|
||||
dev_kfree_skb(skb);
|
||||
priv->dev->stats.tx_dropped++;
|
||||
@ -2281,9 +2287,7 @@ static void stmmac_set_rx_mode(struct net_device *dev)
|
||||
{
|
||||
struct stmmac_priv *priv = netdev_priv(dev);
|
||||
|
||||
spin_lock(&priv->lock);
|
||||
priv->hw->mac->set_filter(priv->hw, dev);
|
||||
spin_unlock(&priv->lock);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -2950,7 +2954,7 @@ int stmmac_suspend(struct net_device *ndev)
|
||||
stmmac_set_mac(priv->ioaddr, false);
|
||||
pinctrl_pm_select_sleep_state(priv->device);
|
||||
/* Disable clock in case of PWM is off */
|
||||
clk_disable_unprepare(priv->stmmac_clk);
|
||||
clk_disable(priv->stmmac_clk);
|
||||
}
|
||||
spin_unlock_irqrestore(&priv->lock, flags);
|
||||
|
||||
@ -2982,7 +2986,7 @@ int stmmac_resume(struct net_device *ndev)
|
||||
} else {
|
||||
pinctrl_pm_select_default_state(priv->device);
|
||||
/* enable the clk prevously disabled */
|
||||
clk_prepare_enable(priv->stmmac_clk);
|
||||
clk_enable(priv->stmmac_clk);
|
||||
/* reset the phy so that it's ready */
|
||||
if (priv->mii)
|
||||
stmmac_mdio_reset(priv->mii);
|
||||
@ -2990,7 +2994,9 @@ int stmmac_resume(struct net_device *ndev)
|
||||
|
||||
netif_device_attach(ndev);
|
||||
|
||||
init_dma_desc_rings(ndev, GFP_ATOMIC);
|
||||
stmmac_hw_setup(ndev);
|
||||
stmmac_init_tx_coalesce(priv);
|
||||
|
||||
napi_enable(&priv->napi);
|
||||
|
||||
|
@ -785,7 +785,6 @@ int cpsw_ale_destroy(struct cpsw_ale *ale)
|
||||
{
|
||||
if (!ale)
|
||||
return -EINVAL;
|
||||
cpsw_ale_stop(ale);
|
||||
cpsw_ale_control_set(ale, 0, ALE_ENABLE, 0);
|
||||
kfree(ale);
|
||||
return 0;
|
||||
|
@ -629,6 +629,8 @@ static void macvtap_skb_to_vnet_hdr(const struct sk_buff *skb,
|
||||
if (skb->ip_summed == CHECKSUM_PARTIAL) {
|
||||
vnet_hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
|
||||
vnet_hdr->csum_start = skb_checksum_start_offset(skb);
|
||||
if (vlan_tx_tag_present(skb))
|
||||
vnet_hdr->csum_start += VLAN_HLEN;
|
||||
vnet_hdr->csum_offset = skb->csum_offset;
|
||||
} else if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
|
||||
vnet_hdr->flags = VIRTIO_NET_HDR_F_DATA_VALID;
|
||||
|
@ -1235,12 +1235,20 @@ static ssize_t tun_put_user(struct tun_struct *tun,
|
||||
struct tun_pi pi = { 0, skb->protocol };
|
||||
ssize_t total = 0;
|
||||
int vlan_offset = 0, copied;
|
||||
int vlan_hlen = 0;
|
||||
int vnet_hdr_sz = 0;
|
||||
|
||||
if (vlan_tx_tag_present(skb))
|
||||
vlan_hlen = VLAN_HLEN;
|
||||
|
||||
if (tun->flags & TUN_VNET_HDR)
|
||||
vnet_hdr_sz = tun->vnet_hdr_sz;
|
||||
|
||||
if (!(tun->flags & TUN_NO_PI)) {
|
||||
if ((len -= sizeof(pi)) < 0)
|
||||
return -EINVAL;
|
||||
|
||||
if (len < skb->len) {
|
||||
if (len < skb->len + vlan_hlen + vnet_hdr_sz) {
|
||||
/* Packet will be striped */
|
||||
pi.flags |= TUN_PKT_STRIP;
|
||||
}
|
||||
@ -1250,9 +1258,9 @@ static ssize_t tun_put_user(struct tun_struct *tun,
|
||||
total += sizeof(pi);
|
||||
}
|
||||
|
||||
if (tun->flags & TUN_VNET_HDR) {
|
||||
if (vnet_hdr_sz) {
|
||||
struct virtio_net_hdr gso = { 0 }; /* no info leak */
|
||||
if ((len -= tun->vnet_hdr_sz) < 0)
|
||||
if ((len -= vnet_hdr_sz) < 0)
|
||||
return -EINVAL;
|
||||
|
||||
if (skb_is_gso(skb)) {
|
||||
@ -1284,7 +1292,8 @@ static ssize_t tun_put_user(struct tun_struct *tun,
|
||||
|
||||
if (skb->ip_summed == CHECKSUM_PARTIAL) {
|
||||
gso.flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
|
||||
gso.csum_start = skb_checksum_start_offset(skb);
|
||||
gso.csum_start = skb_checksum_start_offset(skb) +
|
||||
vlan_hlen;
|
||||
gso.csum_offset = skb->csum_offset;
|
||||
} else if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
|
||||
gso.flags = VIRTIO_NET_HDR_F_DATA_VALID;
|
||||
@ -1293,14 +1302,13 @@ static ssize_t tun_put_user(struct tun_struct *tun,
|
||||
if (unlikely(memcpy_toiovecend(iv, (void *)&gso, total,
|
||||
sizeof(gso))))
|
||||
return -EFAULT;
|
||||
total += tun->vnet_hdr_sz;
|
||||
total += vnet_hdr_sz;
|
||||
}
|
||||
|
||||
copied = total;
|
||||
total += skb->len;
|
||||
if (!vlan_tx_tag_present(skb)) {
|
||||
len = min_t(int, skb->len, len);
|
||||
} else {
|
||||
len = min_t(int, skb->len + vlan_hlen, len);
|
||||
total += skb->len + vlan_hlen;
|
||||
if (vlan_hlen) {
|
||||
int copy, ret;
|
||||
struct {
|
||||
__be16 h_vlan_proto;
|
||||
@ -1311,8 +1319,6 @@ static ssize_t tun_put_user(struct tun_struct *tun,
|
||||
veth.h_vlan_TCI = htons(vlan_tx_tag_get(skb));
|
||||
|
||||
vlan_offset = offsetof(struct vlan_ethhdr, h_vlan_proto);
|
||||
len = min_t(int, skb->len + VLAN_HLEN, len);
|
||||
total += VLAN_HLEN;
|
||||
|
||||
copy = min_t(int, vlan_offset, len);
|
||||
ret = skb_copy_datagram_const_iovec(skb, 0, iv, copied, copy);
|
||||
|
@ -256,7 +256,7 @@ struct ucred {
|
||||
#define MSG_EOF MSG_FIN
|
||||
|
||||
#define MSG_FASTOPEN 0x20000000 /* Send data in TCP SYN */
|
||||
#define MSG_CMSG_CLOEXEC 0x40000000 /* Set close_on_exit for file
|
||||
#define MSG_CMSG_CLOEXEC 0x40000000 /* Set close_on_exec for file
|
||||
descriptor received through
|
||||
SCM_RIGHTS */
|
||||
#if defined(CONFIG_COMPAT)
|
||||
|
@ -34,7 +34,6 @@
|
||||
* @list: used to maintain a list of currently available transports
|
||||
* @name: the human-readable name of the transport
|
||||
* @maxsize: transport provided maximum packet size
|
||||
* @pref: Preferences of this transport
|
||||
* @def: set if this transport should be considered the default
|
||||
* @create: member function to create a new connection on this transport
|
||||
* @close: member function to discard a connection on this transport
|
||||
|
@ -125,6 +125,7 @@ header-y += filter.h
|
||||
header-y += firewire-cdev.h
|
||||
header-y += firewire-constants.h
|
||||
header-y += flat.h
|
||||
header-y += fou.h
|
||||
header-y += fs.h
|
||||
header-y += fsl_hypervisor.h
|
||||
header-y += fuse.h
|
||||
@ -141,6 +142,7 @@ header-y += hid.h
|
||||
header-y += hiddev.h
|
||||
header-y += hidraw.h
|
||||
header-y += hpet.h
|
||||
header-y += hsr_netlink.h
|
||||
header-y += hyperv.h
|
||||
header-y += hysdn_if.h
|
||||
header-y += i2c-dev.h
|
||||
@ -251,6 +253,7 @@ header-y += mii.h
|
||||
header-y += minix_fs.h
|
||||
header-y += mman.h
|
||||
header-y += mmtimer.h
|
||||
header-y += mpls.h
|
||||
header-y += mqueue.h
|
||||
header-y += mroute.h
|
||||
header-y += mroute6.h
|
||||
@ -424,6 +427,7 @@ header-y += virtio_net.h
|
||||
header-y += virtio_pci.h
|
||||
header-y += virtio_ring.h
|
||||
header-y += virtio_rng.h
|
||||
header=y += vm_sockets.h
|
||||
header-y += vt.h
|
||||
header-y += wait.h
|
||||
header-y += wanrouter.h
|
||||
|
@ -15,6 +15,7 @@
|
||||
|
||||
#include <linux/types.h>
|
||||
#include <linux/if_ether.h>
|
||||
#include <linux/in6.h>
|
||||
|
||||
#define SYSFS_BRIDGE_ATTR "bridge"
|
||||
#define SYSFS_BRIDGE_FDB "brforward"
|
||||
|
@ -18,6 +18,7 @@
|
||||
#include <net/netfilter/ipv6/nf_reject.h>
|
||||
#include <linux/ip.h>
|
||||
#include <net/ip.h>
|
||||
#include <net/ip6_checksum.h>
|
||||
#include <linux/netfilter_bridge.h>
|
||||
#include "../br_private.h"
|
||||
|
||||
|
@ -553,11 +553,14 @@ static void dsa_slave_phy_setup(struct dsa_slave_priv *p,
|
||||
/* We could not connect to a designated PHY, so use the switch internal
|
||||
* MDIO bus instead
|
||||
*/
|
||||
if (!p->phy)
|
||||
if (!p->phy) {
|
||||
p->phy = ds->slave_mii_bus->phy_map[p->port];
|
||||
else
|
||||
phy_connect_direct(slave_dev, p->phy, dsa_slave_adjust_link,
|
||||
p->phy_interface);
|
||||
} else {
|
||||
pr_info("attached PHY at address %d [%s]\n",
|
||||
p->phy->addr, p->phy->drv->name);
|
||||
}
|
||||
}
|
||||
|
||||
int dsa_slave_suspend(struct net_device *slave_dev)
|
||||
|
@ -144,6 +144,8 @@ int geneve_xmit_skb(struct geneve_sock *gs, struct rtable *rt,
|
||||
gnvh = (struct genevehdr *)__skb_push(skb, sizeof(*gnvh) + opt_len);
|
||||
geneve_build_header(gnvh, tun_flags, vni, opt_len, opt);
|
||||
|
||||
skb_set_inner_protocol(skb, htons(ETH_P_TEB));
|
||||
|
||||
return udp_tunnel_xmit_skb(gs->sock, rt, skb, src, dst,
|
||||
tos, ttl, df, src_port, dst_port, xnet);
|
||||
}
|
||||
@ -364,6 +366,7 @@ late_initcall(geneve_init_module);
|
||||
static void __exit geneve_cleanup_module(void)
|
||||
{
|
||||
destroy_workqueue(geneve_wq);
|
||||
unregister_pernet_subsys(&geneve_net_ops);
|
||||
}
|
||||
module_exit(geneve_cleanup_module);
|
||||
|
||||
|
@ -2316,6 +2316,35 @@ static inline bool tcp_packet_delayed(const struct tcp_sock *tp)
|
||||
|
||||
/* Undo procedures. */
|
||||
|
||||
/* We can clear retrans_stamp when there are no retransmissions in the
|
||||
* window. It would seem that it is trivially available for us in
|
||||
* tp->retrans_out, however, that kind of assumptions doesn't consider
|
||||
* what will happen if errors occur when sending retransmission for the
|
||||
* second time. ...It could the that such segment has only
|
||||
* TCPCB_EVER_RETRANS set at the present time. It seems that checking
|
||||
* the head skb is enough except for some reneging corner cases that
|
||||
* are not worth the effort.
|
||||
*
|
||||
* Main reason for all this complexity is the fact that connection dying
|
||||
* time now depends on the validity of the retrans_stamp, in particular,
|
||||
* that successive retransmissions of a segment must not advance
|
||||
* retrans_stamp under any conditions.
|
||||
*/
|
||||
static bool tcp_any_retrans_done(const struct sock *sk)
|
||||
{
|
||||
const struct tcp_sock *tp = tcp_sk(sk);
|
||||
struct sk_buff *skb;
|
||||
|
||||
if (tp->retrans_out)
|
||||
return true;
|
||||
|
||||
skb = tcp_write_queue_head(sk);
|
||||
if (unlikely(skb && TCP_SKB_CB(skb)->sacked & TCPCB_EVER_RETRANS))
|
||||
return true;
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
#if FASTRETRANS_DEBUG > 1
|
||||
static void DBGUNDO(struct sock *sk, const char *msg)
|
||||
{
|
||||
@ -2411,6 +2440,8 @@ static bool tcp_try_undo_recovery(struct sock *sk)
|
||||
* is ACKed. For Reno it is MUST to prevent false
|
||||
* fast retransmits (RFC2582). SACK TCP is safe. */
|
||||
tcp_moderate_cwnd(tp);
|
||||
if (!tcp_any_retrans_done(sk))
|
||||
tp->retrans_stamp = 0;
|
||||
return true;
|
||||
}
|
||||
tcp_set_ca_state(sk, TCP_CA_Open);
|
||||
@ -2431,35 +2462,6 @@ static bool tcp_try_undo_dsack(struct sock *sk)
|
||||
return false;
|
||||
}
|
||||
|
||||
/* We can clear retrans_stamp when there are no retransmissions in the
|
||||
* window. It would seem that it is trivially available for us in
|
||||
* tp->retrans_out, however, that kind of assumptions doesn't consider
|
||||
* what will happen if errors occur when sending retransmission for the
|
||||
* second time. ...It could the that such segment has only
|
||||
* TCPCB_EVER_RETRANS set at the present time. It seems that checking
|
||||
* the head skb is enough except for some reneging corner cases that
|
||||
* are not worth the effort.
|
||||
*
|
||||
* Main reason for all this complexity is the fact that connection dying
|
||||
* time now depends on the validity of the retrans_stamp, in particular,
|
||||
* that successive retransmissions of a segment must not advance
|
||||
* retrans_stamp under any conditions.
|
||||
*/
|
||||
static bool tcp_any_retrans_done(const struct sock *sk)
|
||||
{
|
||||
const struct tcp_sock *tp = tcp_sk(sk);
|
||||
struct sk_buff *skb;
|
||||
|
||||
if (tp->retrans_out)
|
||||
return true;
|
||||
|
||||
skb = tcp_write_queue_head(sk);
|
||||
if (unlikely(skb && TCP_SKB_CB(skb)->sacked & TCPCB_EVER_RETRANS))
|
||||
return true;
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
/* Undo during loss recovery after partial ACK or using F-RTO. */
|
||||
static bool tcp_try_undo_loss(struct sock *sk, bool frto_undo)
|
||||
{
|
||||
|
@ -961,8 +961,6 @@ static void ip6gre_tnl_link_config(struct ip6_tnl *t, int set_mtu)
|
||||
else
|
||||
dev->flags &= ~IFF_POINTOPOINT;
|
||||
|
||||
dev->iflink = p->link;
|
||||
|
||||
/* Precalculate GRE options length */
|
||||
if (t->parms.o_flags&(GRE_CSUM|GRE_KEY|GRE_SEQ)) {
|
||||
if (t->parms.o_flags&GRE_CSUM)
|
||||
@ -1272,6 +1270,7 @@ static int ip6gre_tunnel_init(struct net_device *dev)
|
||||
u64_stats_init(&ip6gre_tunnel_stats->syncp);
|
||||
}
|
||||
|
||||
dev->iflink = tunnel->parms.link;
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -1481,6 +1480,8 @@ static int ip6gre_tap_init(struct net_device *dev)
|
||||
if (!dev->tstats)
|
||||
return -ENOMEM;
|
||||
|
||||
dev->iflink = tunnel->parms.link;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -289,9 +289,6 @@ static int ip6_tnl_create2(struct net_device *dev)
|
||||
int err;
|
||||
|
||||
t = netdev_priv(dev);
|
||||
err = ip6_tnl_dev_init(dev);
|
||||
if (err < 0)
|
||||
goto out;
|
||||
|
||||
err = register_netdevice(dev);
|
||||
if (err < 0)
|
||||
@ -1526,6 +1523,7 @@ ip6_tnl_change_mtu(struct net_device *dev, int new_mtu)
|
||||
|
||||
|
||||
static const struct net_device_ops ip6_tnl_netdev_ops = {
|
||||
.ndo_init = ip6_tnl_dev_init,
|
||||
.ndo_uninit = ip6_tnl_dev_uninit,
|
||||
.ndo_start_xmit = ip6_tnl_xmit,
|
||||
.ndo_do_ioctl = ip6_tnl_ioctl,
|
||||
@ -1610,16 +1608,10 @@ static int __net_init ip6_fb_tnl_dev_init(struct net_device *dev)
|
||||
struct ip6_tnl *t = netdev_priv(dev);
|
||||
struct net *net = dev_net(dev);
|
||||
struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
|
||||
int err = ip6_tnl_dev_init_gen(dev);
|
||||
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
t->parms.proto = IPPROTO_IPV6;
|
||||
dev_hold(dev);
|
||||
|
||||
ip6_tnl_link_config(t);
|
||||
|
||||
rcu_assign_pointer(ip6n->tnls_wc[0], t);
|
||||
return 0;
|
||||
}
|
||||
|
@ -172,10 +172,6 @@ static int vti6_tnl_create2(struct net_device *dev)
|
||||
struct vti6_net *ip6n = net_generic(net, vti6_net_id);
|
||||
int err;
|
||||
|
||||
err = vti6_dev_init(dev);
|
||||
if (err < 0)
|
||||
goto out;
|
||||
|
||||
err = register_netdevice(dev);
|
||||
if (err < 0)
|
||||
goto out;
|
||||
@ -789,6 +785,7 @@ static int vti6_change_mtu(struct net_device *dev, int new_mtu)
|
||||
}
|
||||
|
||||
static const struct net_device_ops vti6_netdev_ops = {
|
||||
.ndo_init = vti6_dev_init,
|
||||
.ndo_uninit = vti6_dev_uninit,
|
||||
.ndo_start_xmit = vti6_tnl_xmit,
|
||||
.ndo_do_ioctl = vti6_ioctl,
|
||||
@ -858,16 +855,10 @@ static int __net_init vti6_fb_tnl_dev_init(struct net_device *dev)
|
||||
struct ip6_tnl *t = netdev_priv(dev);
|
||||
struct net *net = dev_net(dev);
|
||||
struct vti6_net *ip6n = net_generic(net, vti6_net_id);
|
||||
int err = vti6_dev_init_gen(dev);
|
||||
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
t->parms.proto = IPPROTO_IPV6;
|
||||
dev_hold(dev);
|
||||
|
||||
vti6_link_config(t);
|
||||
|
||||
rcu_assign_pointer(ip6n->tnls_wc[0], t);
|
||||
return 0;
|
||||
}
|
||||
|
@ -195,10 +195,8 @@ static int ipip6_tunnel_create(struct net_device *dev)
|
||||
struct sit_net *sitn = net_generic(net, sit_net_id);
|
||||
int err;
|
||||
|
||||
err = ipip6_tunnel_init(dev);
|
||||
if (err < 0)
|
||||
goto out;
|
||||
ipip6_tunnel_clone_6rd(dev, sitn);
|
||||
memcpy(dev->dev_addr, &t->parms.iph.saddr, 4);
|
||||
memcpy(dev->broadcast, &t->parms.iph.daddr, 4);
|
||||
|
||||
if ((__force u16)t->parms.i_flags & SIT_ISATAP)
|
||||
dev->priv_flags |= IFF_ISATAP;
|
||||
@ -207,7 +205,8 @@ static int ipip6_tunnel_create(struct net_device *dev)
|
||||
if (err < 0)
|
||||
goto out;
|
||||
|
||||
strcpy(t->parms.name, dev->name);
|
||||
ipip6_tunnel_clone_6rd(dev, sitn);
|
||||
|
||||
dev->rtnl_link_ops = &sit_link_ops;
|
||||
|
||||
dev_hold(dev);
|
||||
@ -1330,6 +1329,7 @@ static int ipip6_tunnel_change_mtu(struct net_device *dev, int new_mtu)
|
||||
}
|
||||
|
||||
static const struct net_device_ops ipip6_netdev_ops = {
|
||||
.ndo_init = ipip6_tunnel_init,
|
||||
.ndo_uninit = ipip6_tunnel_uninit,
|
||||
.ndo_start_xmit = sit_tunnel_xmit,
|
||||
.ndo_do_ioctl = ipip6_tunnel_ioctl,
|
||||
@ -1378,9 +1378,7 @@ static int ipip6_tunnel_init(struct net_device *dev)
|
||||
|
||||
tunnel->dev = dev;
|
||||
tunnel->net = dev_net(dev);
|
||||
|
||||
memcpy(dev->dev_addr, &tunnel->parms.iph.saddr, 4);
|
||||
memcpy(dev->broadcast, &tunnel->parms.iph.daddr, 4);
|
||||
strcpy(tunnel->parms.name, dev->name);
|
||||
|
||||
ipip6_tunnel_bind_dev(dev);
|
||||
dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
|
||||
@ -1405,7 +1403,6 @@ static int __net_init ipip6_fb_tunnel_init(struct net_device *dev)
|
||||
|
||||
tunnel->dev = dev;
|
||||
tunnel->net = dev_net(dev);
|
||||
strcpy(tunnel->parms.name, dev->name);
|
||||
|
||||
iph->version = 4;
|
||||
iph->protocol = IPPROTO_IPV6;
|
||||
|
Loading…
Reference in New Issue
Block a user