forked from Minki/linux
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6
* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6: (21 commits) niu: VLAN_ETH_HLEN should be used to make sure that the whole MAC header was copied to the head buffer in the Vlan packets case KS8851: Fix ks8851_set_rx_mode() for IFF_MULTICAST KS8851: Fix MAC address write order KS8851: Add soft reset at probe time net: fix section mismatch in fec.c net: Fix struct inet_timewait_sock bitfield annotation tcp: Try to catch MSG_PEEK bug net: Fix IP_MULTICAST_IF bluetooth: static lock key fix bluetooth: scheduling while atomic bug fix tcp: fix TCP_DEFER_ACCEPT retrans calculation tcp: reduce SYN-ACK retrans for TCP_DEFER_ACCEPT tcp: accept socket after TCP_DEFER_ACCEPT period Revert "tcp: fix tcp_defer_accept to consider the timeout" AF_UNIX: Fix deadlock on connecting to shutdown socket ethoc: clear only pending irqs ethoc: inline regs access vmxnet3: use dev_dbg, fix build for CONFIG_BLOCK=n virtio_net: use dev_kfree_skb_any() in free_old_xmit_skbs() be2net: fix support for PCI hot plug ...
This commit is contained in:
commit
4848490c50
@ -243,15 +243,26 @@ static int be_POST_stage_get(struct be_adapter *adapter, u16 *stage)
|
||||
|
||||
int be_cmd_POST(struct be_adapter *adapter)
|
||||
{
|
||||
u16 stage, error;
|
||||
u16 stage;
|
||||
int status, timeout = 0;
|
||||
|
||||
error = be_POST_stage_get(adapter, &stage);
|
||||
if (error || stage != POST_STAGE_ARMFW_RDY) {
|
||||
dev_err(&adapter->pdev->dev, "POST failed.\n");
|
||||
return -1;
|
||||
}
|
||||
do {
|
||||
status = be_POST_stage_get(adapter, &stage);
|
||||
if (status) {
|
||||
dev_err(&adapter->pdev->dev, "POST error; stage=0x%x\n",
|
||||
stage);
|
||||
return -1;
|
||||
} else if (stage != POST_STAGE_ARMFW_RDY) {
|
||||
set_current_state(TASK_INTERRUPTIBLE);
|
||||
schedule_timeout(2 * HZ);
|
||||
timeout += 2;
|
||||
} else {
|
||||
return 0;
|
||||
}
|
||||
} while (timeout < 20);
|
||||
|
||||
return 0;
|
||||
dev_err(&adapter->pdev->dev, "POST timeout; stage=0x%x\n", stage);
|
||||
return -1;
|
||||
}
|
||||
|
||||
static inline void *embedded_payload(struct be_mcc_wrb *wrb)
|
||||
@ -729,8 +740,8 @@ int be_cmd_q_destroy(struct be_adapter *adapter, struct be_queue_info *q,
|
||||
/* Create an rx filtering policy configuration on an i/f
|
||||
* Uses mbox
|
||||
*/
|
||||
int be_cmd_if_create(struct be_adapter *adapter, u32 flags, u8 *mac,
|
||||
bool pmac_invalid, u32 *if_handle, u32 *pmac_id)
|
||||
int be_cmd_if_create(struct be_adapter *adapter, u32 cap_flags, u32 en_flags,
|
||||
u8 *mac, bool pmac_invalid, u32 *if_handle, u32 *pmac_id)
|
||||
{
|
||||
struct be_mcc_wrb *wrb;
|
||||
struct be_cmd_req_if_create *req;
|
||||
@ -746,8 +757,8 @@ int be_cmd_if_create(struct be_adapter *adapter, u32 flags, u8 *mac,
|
||||
be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
|
||||
OPCODE_COMMON_NTWK_INTERFACE_CREATE, sizeof(*req));
|
||||
|
||||
req->capability_flags = cpu_to_le32(flags);
|
||||
req->enable_flags = cpu_to_le32(flags);
|
||||
req->capability_flags = cpu_to_le32(cap_flags);
|
||||
req->enable_flags = cpu_to_le32(en_flags);
|
||||
req->pmac_invalid = pmac_invalid;
|
||||
if (!pmac_invalid)
|
||||
memcpy(req->mac_addr, mac, ETH_ALEN);
|
||||
|
@ -720,8 +720,9 @@ extern int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr,
|
||||
extern int be_cmd_pmac_add(struct be_adapter *adapter, u8 *mac_addr,
|
||||
u32 if_id, u32 *pmac_id);
|
||||
extern int be_cmd_pmac_del(struct be_adapter *adapter, u32 if_id, u32 pmac_id);
|
||||
extern int be_cmd_if_create(struct be_adapter *adapter, u32 if_flags, u8 *mac,
|
||||
bool pmac_invalid, u32 *if_handle, u32 *pmac_id);
|
||||
extern int be_cmd_if_create(struct be_adapter *adapter, u32 cap_flags,
|
||||
u32 en_flags, u8 *mac, bool pmac_invalid,
|
||||
u32 *if_handle, u32 *pmac_id);
|
||||
extern int be_cmd_if_destroy(struct be_adapter *adapter, u32 if_handle);
|
||||
extern int be_cmd_eq_create(struct be_adapter *adapter,
|
||||
struct be_queue_info *eq, int eq_delay);
|
||||
|
@ -1620,19 +1620,22 @@ static int be_open(struct net_device *netdev)
|
||||
static int be_setup(struct be_adapter *adapter)
|
||||
{
|
||||
struct net_device *netdev = adapter->netdev;
|
||||
u32 if_flags;
|
||||
u32 cap_flags, en_flags;
|
||||
int status;
|
||||
|
||||
if_flags = BE_IF_FLAGS_BROADCAST | BE_IF_FLAGS_PROMISCUOUS |
|
||||
BE_IF_FLAGS_MCAST_PROMISCUOUS | BE_IF_FLAGS_UNTAGGED |
|
||||
BE_IF_FLAGS_PASS_L3L4_ERRORS;
|
||||
status = be_cmd_if_create(adapter, if_flags, netdev->dev_addr,
|
||||
false/* pmac_invalid */, &adapter->if_handle,
|
||||
&adapter->pmac_id);
|
||||
cap_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
|
||||
BE_IF_FLAGS_MCAST_PROMISCUOUS |
|
||||
BE_IF_FLAGS_PROMISCUOUS |
|
||||
BE_IF_FLAGS_PASS_L3L4_ERRORS;
|
||||
en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
|
||||
BE_IF_FLAGS_PASS_L3L4_ERRORS;
|
||||
|
||||
status = be_cmd_if_create(adapter, cap_flags, en_flags,
|
||||
netdev->dev_addr, false/* pmac_invalid */,
|
||||
&adapter->if_handle, &adapter->pmac_id);
|
||||
if (status != 0)
|
||||
goto do_none;
|
||||
|
||||
|
||||
status = be_tx_queues_create(adapter);
|
||||
if (status != 0)
|
||||
goto if_destroy;
|
||||
@ -2055,6 +2058,10 @@ static int be_hw_up(struct be_adapter *adapter)
|
||||
if (status)
|
||||
return status;
|
||||
|
||||
status = be_cmd_reset_function(adapter);
|
||||
if (status)
|
||||
return status;
|
||||
|
||||
status = be_cmd_get_fw_ver(adapter, adapter->fw_ver);
|
||||
if (status)
|
||||
return status;
|
||||
@ -2108,10 +2115,6 @@ static int __devinit be_probe(struct pci_dev *pdev,
|
||||
if (status)
|
||||
goto free_netdev;
|
||||
|
||||
status = be_cmd_reset_function(adapter);
|
||||
if (status)
|
||||
goto ctrl_clean;
|
||||
|
||||
status = be_stats_init(adapter);
|
||||
if (status)
|
||||
goto ctrl_clean;
|
||||
|
@ -223,24 +223,25 @@ struct ethoc_bd {
|
||||
u32 addr;
|
||||
};
|
||||
|
||||
static u32 ethoc_read(struct ethoc *dev, loff_t offset)
|
||||
static inline u32 ethoc_read(struct ethoc *dev, loff_t offset)
|
||||
{
|
||||
return ioread32(dev->iobase + offset);
|
||||
}
|
||||
|
||||
static void ethoc_write(struct ethoc *dev, loff_t offset, u32 data)
|
||||
static inline void ethoc_write(struct ethoc *dev, loff_t offset, u32 data)
|
||||
{
|
||||
iowrite32(data, dev->iobase + offset);
|
||||
}
|
||||
|
||||
static void ethoc_read_bd(struct ethoc *dev, int index, struct ethoc_bd *bd)
|
||||
static inline void ethoc_read_bd(struct ethoc *dev, int index,
|
||||
struct ethoc_bd *bd)
|
||||
{
|
||||
loff_t offset = ETHOC_BD_BASE + (index * sizeof(struct ethoc_bd));
|
||||
bd->stat = ethoc_read(dev, offset + 0);
|
||||
bd->addr = ethoc_read(dev, offset + 4);
|
||||
}
|
||||
|
||||
static void ethoc_write_bd(struct ethoc *dev, int index,
|
||||
static inline void ethoc_write_bd(struct ethoc *dev, int index,
|
||||
const struct ethoc_bd *bd)
|
||||
{
|
||||
loff_t offset = ETHOC_BD_BASE + (index * sizeof(struct ethoc_bd));
|
||||
@ -248,33 +249,33 @@ static void ethoc_write_bd(struct ethoc *dev, int index,
|
||||
ethoc_write(dev, offset + 4, bd->addr);
|
||||
}
|
||||
|
||||
static void ethoc_enable_irq(struct ethoc *dev, u32 mask)
|
||||
static inline void ethoc_enable_irq(struct ethoc *dev, u32 mask)
|
||||
{
|
||||
u32 imask = ethoc_read(dev, INT_MASK);
|
||||
imask |= mask;
|
||||
ethoc_write(dev, INT_MASK, imask);
|
||||
}
|
||||
|
||||
static void ethoc_disable_irq(struct ethoc *dev, u32 mask)
|
||||
static inline void ethoc_disable_irq(struct ethoc *dev, u32 mask)
|
||||
{
|
||||
u32 imask = ethoc_read(dev, INT_MASK);
|
||||
imask &= ~mask;
|
||||
ethoc_write(dev, INT_MASK, imask);
|
||||
}
|
||||
|
||||
static void ethoc_ack_irq(struct ethoc *dev, u32 mask)
|
||||
static inline void ethoc_ack_irq(struct ethoc *dev, u32 mask)
|
||||
{
|
||||
ethoc_write(dev, INT_SOURCE, mask);
|
||||
}
|
||||
|
||||
static void ethoc_enable_rx_and_tx(struct ethoc *dev)
|
||||
static inline void ethoc_enable_rx_and_tx(struct ethoc *dev)
|
||||
{
|
||||
u32 mode = ethoc_read(dev, MODER);
|
||||
mode |= MODER_RXEN | MODER_TXEN;
|
||||
ethoc_write(dev, MODER, mode);
|
||||
}
|
||||
|
||||
static void ethoc_disable_rx_and_tx(struct ethoc *dev)
|
||||
static inline void ethoc_disable_rx_and_tx(struct ethoc *dev)
|
||||
{
|
||||
u32 mode = ethoc_read(dev, MODER);
|
||||
mode &= ~(MODER_RXEN | MODER_TXEN);
|
||||
@ -508,7 +509,7 @@ static irqreturn_t ethoc_interrupt(int irq, void *dev_id)
|
||||
return IRQ_NONE;
|
||||
}
|
||||
|
||||
ethoc_ack_irq(priv, INT_MASK_ALL);
|
||||
ethoc_ack_irq(priv, pending);
|
||||
|
||||
if (pending & INT_MASK_BUSY) {
|
||||
dev_err(&dev->dev, "packet dropped\n");
|
||||
|
@ -1654,7 +1654,7 @@ static const struct net_device_ops fec_netdev_ops = {
|
||||
*
|
||||
* index is only used in legacy code
|
||||
*/
|
||||
int __init fec_enet_init(struct net_device *dev, int index)
|
||||
static int fec_enet_init(struct net_device *dev, int index)
|
||||
{
|
||||
struct fec_enet_private *fep = netdev_priv(dev);
|
||||
struct bufdesc *cbd_base;
|
||||
|
@ -170,6 +170,36 @@ static void ks8851_wrreg16(struct ks8851_net *ks, unsigned reg, unsigned val)
|
||||
ks_err(ks, "spi_sync() failed\n");
|
||||
}
|
||||
|
||||
/**
|
||||
* ks8851_wrreg8 - write 8bit register value to chip
|
||||
* @ks: The chip state
|
||||
* @reg: The register address
|
||||
* @val: The value to write
|
||||
*
|
||||
* Issue a write to put the value @val into the register specified in @reg.
|
||||
*/
|
||||
static void ks8851_wrreg8(struct ks8851_net *ks, unsigned reg, unsigned val)
|
||||
{
|
||||
struct spi_transfer *xfer = &ks->spi_xfer1;
|
||||
struct spi_message *msg = &ks->spi_msg1;
|
||||
__le16 txb[2];
|
||||
int ret;
|
||||
int bit;
|
||||
|
||||
bit = 1 << (reg & 3);
|
||||
|
||||
txb[0] = cpu_to_le16(MK_OP(bit, reg) | KS_SPIOP_WR);
|
||||
txb[1] = val;
|
||||
|
||||
xfer->tx_buf = txb;
|
||||
xfer->rx_buf = NULL;
|
||||
xfer->len = 3;
|
||||
|
||||
ret = spi_sync(ks->spidev, msg);
|
||||
if (ret < 0)
|
||||
ks_err(ks, "spi_sync() failed\n");
|
||||
}
|
||||
|
||||
/**
|
||||
* ks8851_rx_1msg - select whether to use one or two messages for spi read
|
||||
* @ks: The device structure
|
||||
@ -322,13 +352,12 @@ static void ks8851_soft_reset(struct ks8851_net *ks, unsigned op)
|
||||
static int ks8851_write_mac_addr(struct net_device *dev)
|
||||
{
|
||||
struct ks8851_net *ks = netdev_priv(dev);
|
||||
u16 *mcp = (u16 *)dev->dev_addr;
|
||||
int i;
|
||||
|
||||
mutex_lock(&ks->lock);
|
||||
|
||||
ks8851_wrreg16(ks, KS_MARL, mcp[0]);
|
||||
ks8851_wrreg16(ks, KS_MARM, mcp[1]);
|
||||
ks8851_wrreg16(ks, KS_MARH, mcp[2]);
|
||||
for (i = 0; i < ETH_ALEN; i++)
|
||||
ks8851_wrreg8(ks, KS_MAR(i), dev->dev_addr[i]);
|
||||
|
||||
mutex_unlock(&ks->lock);
|
||||
|
||||
@ -951,7 +980,7 @@ static void ks8851_set_rx_mode(struct net_device *dev)
|
||||
mcptr = mcptr->next;
|
||||
}
|
||||
|
||||
rxctrl.rxcr1 = RXCR1_RXME | RXCR1_RXAE | RXCR1_RXPAFMA;
|
||||
rxctrl.rxcr1 = RXCR1_RXME | RXCR1_RXPAFMA;
|
||||
} else {
|
||||
/* just accept broadcast / unicast */
|
||||
rxctrl.rxcr1 = RXCR1_RXPAFMA;
|
||||
@ -1239,6 +1268,9 @@ static int __devinit ks8851_probe(struct spi_device *spi)
|
||||
ndev->netdev_ops = &ks8851_netdev_ops;
|
||||
ndev->irq = spi->irq;
|
||||
|
||||
/* issue a global soft reset to reset the device. */
|
||||
ks8851_soft_reset(ks, GRR_GSR);
|
||||
|
||||
/* simple check for a valid chip being connected to the bus */
|
||||
|
||||
if ((ks8851_rdreg16(ks, KS_CIDER) & ~CIDER_REV_MASK) != CIDER_ID) {
|
||||
|
@ -16,6 +16,7 @@
|
||||
#define CCR_32PIN (1 << 0)
|
||||
|
||||
/* MAC address registers */
|
||||
#define KS_MAR(_m) 0x15 - (_m)
|
||||
#define KS_MARL 0x10
|
||||
#define KS_MARM 0x12
|
||||
#define KS_MARH 0x14
|
||||
|
@ -3545,7 +3545,7 @@ static int niu_process_rx_pkt(struct napi_struct *napi, struct niu *np,
|
||||
rp->rcr_index = index;
|
||||
|
||||
skb_reserve(skb, NET_IP_ALIGN);
|
||||
__pskb_pull_tail(skb, min(len, NIU_RXPULL_MAX));
|
||||
__pskb_pull_tail(skb, min(len, VLAN_ETH_HLEN));
|
||||
|
||||
rp->rx_packets++;
|
||||
rp->rx_bytes += skb->len;
|
||||
|
@ -454,7 +454,7 @@ static unsigned int free_old_xmit_skbs(struct virtnet_info *vi)
|
||||
vi->dev->stats.tx_bytes += skb->len;
|
||||
vi->dev->stats.tx_packets++;
|
||||
tot_sgs += skb_vnet_hdr(skb)->num_sg;
|
||||
kfree_skb(skb);
|
||||
dev_kfree_skb_any(skb);
|
||||
}
|
||||
return tot_sgs;
|
||||
}
|
||||
|
@ -481,7 +481,8 @@ vmxnet3_rq_alloc_rx_buf(struct vmxnet3_rx_queue *rq, u32 ring_idx,
|
||||
}
|
||||
rq->uncommitted[ring_idx] += num_allocated;
|
||||
|
||||
dprintk(KERN_ERR "alloc_rx_buf: %d allocated, next2fill %u, next2comp "
|
||||
dev_dbg(&adapter->netdev->dev,
|
||||
"alloc_rx_buf: %d allocated, next2fill %u, next2comp "
|
||||
"%u, uncommited %u\n", num_allocated, ring->next2fill,
|
||||
ring->next2comp, rq->uncommitted[ring_idx]);
|
||||
|
||||
@ -539,7 +540,8 @@ vmxnet3_map_pkt(struct sk_buff *skb, struct vmxnet3_tx_ctx *ctx,
|
||||
tbi = tq->buf_info + tq->tx_ring.next2fill;
|
||||
tbi->map_type = VMXNET3_MAP_NONE;
|
||||
|
||||
dprintk(KERN_ERR "txd[%u]: 0x%Lx 0x%x 0x%x\n",
|
||||
dev_dbg(&adapter->netdev->dev,
|
||||
"txd[%u]: 0x%Lx 0x%x 0x%x\n",
|
||||
tq->tx_ring.next2fill, ctx->sop_txd->txd.addr,
|
||||
ctx->sop_txd->dword[2], ctx->sop_txd->dword[3]);
|
||||
vmxnet3_cmd_ring_adv_next2fill(&tq->tx_ring);
|
||||
@ -572,7 +574,8 @@ vmxnet3_map_pkt(struct sk_buff *skb, struct vmxnet3_tx_ctx *ctx,
|
||||
gdesc->dword[2] = dw2 | buf_size;
|
||||
gdesc->dword[3] = 0;
|
||||
|
||||
dprintk(KERN_ERR "txd[%u]: 0x%Lx 0x%x 0x%x\n",
|
||||
dev_dbg(&adapter->netdev->dev,
|
||||
"txd[%u]: 0x%Lx 0x%x 0x%x\n",
|
||||
tq->tx_ring.next2fill, gdesc->txd.addr,
|
||||
gdesc->dword[2], gdesc->dword[3]);
|
||||
vmxnet3_cmd_ring_adv_next2fill(&tq->tx_ring);
|
||||
@ -600,7 +603,8 @@ vmxnet3_map_pkt(struct sk_buff *skb, struct vmxnet3_tx_ctx *ctx,
|
||||
gdesc->dword[2] = dw2 | frag->size;
|
||||
gdesc->dword[3] = 0;
|
||||
|
||||
dprintk(KERN_ERR "txd[%u]: 0x%llu %u %u\n",
|
||||
dev_dbg(&adapter->netdev->dev,
|
||||
"txd[%u]: 0x%llu %u %u\n",
|
||||
tq->tx_ring.next2fill, gdesc->txd.addr,
|
||||
gdesc->dword[2], gdesc->dword[3]);
|
||||
vmxnet3_cmd_ring_adv_next2fill(&tq->tx_ring);
|
||||
@ -697,7 +701,8 @@ vmxnet3_parse_and_copy_hdr(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
|
||||
tdd = tq->data_ring.base + tq->tx_ring.next2fill;
|
||||
|
||||
memcpy(tdd->data, skb->data, ctx->copy_size);
|
||||
dprintk(KERN_ERR "copy %u bytes to dataRing[%u]\n",
|
||||
dev_dbg(&adapter->netdev->dev,
|
||||
"copy %u bytes to dataRing[%u]\n",
|
||||
ctx->copy_size, tq->tx_ring.next2fill);
|
||||
return 1;
|
||||
|
||||
@ -808,7 +813,8 @@ vmxnet3_tq_xmit(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
|
||||
|
||||
if (count > vmxnet3_cmd_ring_desc_avail(&tq->tx_ring)) {
|
||||
tq->stats.tx_ring_full++;
|
||||
dprintk(KERN_ERR "tx queue stopped on %s, next2comp %u"
|
||||
dev_dbg(&adapter->netdev->dev,
|
||||
"tx queue stopped on %s, next2comp %u"
|
||||
" next2fill %u\n", adapter->netdev->name,
|
||||
tq->tx_ring.next2comp, tq->tx_ring.next2fill);
|
||||
|
||||
@ -853,7 +859,8 @@ vmxnet3_tq_xmit(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
|
||||
|
||||
/* finally flips the GEN bit of the SOP desc */
|
||||
gdesc->dword[2] ^= VMXNET3_TXD_GEN;
|
||||
dprintk(KERN_ERR "txd[%u]: SOP 0x%Lx 0x%x 0x%x\n",
|
||||
dev_dbg(&adapter->netdev->dev,
|
||||
"txd[%u]: SOP 0x%Lx 0x%x 0x%x\n",
|
||||
(u32)((union Vmxnet3_GenericDesc *)ctx.sop_txd -
|
||||
tq->tx_ring.base), gdesc->txd.addr, gdesc->dword[2],
|
||||
gdesc->dword[3]);
|
||||
@ -990,7 +997,8 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq,
|
||||
if (unlikely(rcd->len == 0)) {
|
||||
/* Pretend the rx buffer is skipped. */
|
||||
BUG_ON(!(rcd->sop && rcd->eop));
|
||||
dprintk(KERN_ERR "rxRing[%u][%u] 0 length\n",
|
||||
dev_dbg(&adapter->netdev->dev,
|
||||
"rxRing[%u][%u] 0 length\n",
|
||||
ring_idx, idx);
|
||||
goto rcd_done;
|
||||
}
|
||||
@ -1683,7 +1691,8 @@ vmxnet3_activate_dev(struct vmxnet3_adapter *adapter)
|
||||
int err;
|
||||
u32 ret;
|
||||
|
||||
dprintk(KERN_ERR "%s: skb_buf_size %d, rx_buf_per_pkt %d, ring sizes"
|
||||
dev_dbg(&adapter->netdev->dev,
|
||||
"%s: skb_buf_size %d, rx_buf_per_pkt %d, ring sizes"
|
||||
" %u %u %u\n", adapter->netdev->name, adapter->skb_buf_size,
|
||||
adapter->rx_buf_per_pkt, adapter->tx_queue.tx_ring.size,
|
||||
adapter->rx_queue.rx_ring[0].size,
|
||||
|
@ -30,6 +30,7 @@
|
||||
#include <linux/types.h>
|
||||
#include <linux/ethtool.h>
|
||||
#include <linux/delay.h>
|
||||
#include <linux/device.h>
|
||||
#include <linux/netdevice.h>
|
||||
#include <linux/pci.h>
|
||||
#include <linux/ethtool.h>
|
||||
@ -59,7 +60,6 @@
|
||||
#include <linux/if_vlan.h>
|
||||
#include <linux/if_arp.h>
|
||||
#include <linux/inetdevice.h>
|
||||
#include <linux/dst.h>
|
||||
|
||||
#include "vmxnet3_defs.h"
|
||||
|
||||
|
@ -130,11 +130,11 @@ struct inet_timewait_sock {
|
||||
__u16 tw_num;
|
||||
kmemcheck_bitfield_begin(flags);
|
||||
/* And these are ours. */
|
||||
__u8 tw_ipv6only:1,
|
||||
tw_transparent:1;
|
||||
/* 14 bits hole, try to pack */
|
||||
unsigned int tw_ipv6only : 1,
|
||||
tw_transparent : 1,
|
||||
tw_pad : 14, /* 14 bits hole */
|
||||
tw_ipv6_offset : 16;
|
||||
kmemcheck_bitfield_end(flags);
|
||||
__u16 tw_ipv6_offset;
|
||||
unsigned long tw_ttd;
|
||||
struct inet_bind_bucket *tw_tb;
|
||||
struct hlist_node tw_death_node;
|
||||
|
@ -92,6 +92,8 @@ static void add_conn(struct work_struct *work)
|
||||
|
||||
dev_set_name(&conn->dev, "%s:%d", hdev->name, conn->handle);
|
||||
|
||||
dev_set_drvdata(&conn->dev, conn);
|
||||
|
||||
if (device_add(&conn->dev) < 0) {
|
||||
BT_ERR("Failed to register connection device");
|
||||
return;
|
||||
@ -144,8 +146,6 @@ void hci_conn_init_sysfs(struct hci_conn *conn)
|
||||
conn->dev.class = bt_class;
|
||||
conn->dev.parent = &hdev->dev;
|
||||
|
||||
dev_set_drvdata(&conn->dev, conn);
|
||||
|
||||
device_initialize(&conn->dev);
|
||||
|
||||
INIT_WORK(&conn->work_add, add_conn);
|
||||
|
@ -555,12 +555,12 @@ static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
|
||||
|
||||
conn->feat_mask = 0;
|
||||
|
||||
setup_timer(&conn->info_timer, l2cap_info_timeout,
|
||||
(unsigned long) conn);
|
||||
|
||||
spin_lock_init(&conn->lock);
|
||||
rwlock_init(&conn->chan_list.lock);
|
||||
|
||||
setup_timer(&conn->info_timer, l2cap_info_timeout,
|
||||
(unsigned long) conn);
|
||||
|
||||
conn->disc_reason = 0x13;
|
||||
|
||||
return conn;
|
||||
@ -783,6 +783,9 @@ static void l2cap_sock_init(struct sock *sk, struct sock *parent)
|
||||
/* Default config options */
|
||||
pi->conf_len = 0;
|
||||
pi->flush_to = L2CAP_DEFAULT_FLUSH_TO;
|
||||
skb_queue_head_init(TX_QUEUE(sk));
|
||||
skb_queue_head_init(SREJ_QUEUE(sk));
|
||||
INIT_LIST_HEAD(SREJ_LIST(sk));
|
||||
}
|
||||
|
||||
static struct proto l2cap_proto = {
|
||||
|
@ -446,6 +446,28 @@ extern int sysctl_tcp_synack_retries;
|
||||
|
||||
EXPORT_SYMBOL_GPL(inet_csk_reqsk_queue_hash_add);
|
||||
|
||||
/* Decide when to expire the request and when to resend SYN-ACK */
|
||||
static inline void syn_ack_recalc(struct request_sock *req, const int thresh,
|
||||
const int max_retries,
|
||||
const u8 rskq_defer_accept,
|
||||
int *expire, int *resend)
|
||||
{
|
||||
if (!rskq_defer_accept) {
|
||||
*expire = req->retrans >= thresh;
|
||||
*resend = 1;
|
||||
return;
|
||||
}
|
||||
*expire = req->retrans >= thresh &&
|
||||
(!inet_rsk(req)->acked || req->retrans >= max_retries);
|
||||
/*
|
||||
* Do not resend while waiting for data after ACK,
|
||||
* start to resend on end of deferring period to give
|
||||
* last chance for data or ACK to create established socket.
|
||||
*/
|
||||
*resend = !inet_rsk(req)->acked ||
|
||||
req->retrans >= rskq_defer_accept - 1;
|
||||
}
|
||||
|
||||
void inet_csk_reqsk_queue_prune(struct sock *parent,
|
||||
const unsigned long interval,
|
||||
const unsigned long timeout,
|
||||
@ -501,9 +523,15 @@ void inet_csk_reqsk_queue_prune(struct sock *parent,
|
||||
reqp=&lopt->syn_table[i];
|
||||
while ((req = *reqp) != NULL) {
|
||||
if (time_after_eq(now, req->expires)) {
|
||||
if ((req->retrans < thresh ||
|
||||
(inet_rsk(req)->acked && req->retrans < max_retries))
|
||||
&& !req->rsk_ops->rtx_syn_ack(parent, req)) {
|
||||
int expire = 0, resend = 0;
|
||||
|
||||
syn_ack_recalc(req, thresh, max_retries,
|
||||
queue->rskq_defer_accept,
|
||||
&expire, &resend);
|
||||
if (!expire &&
|
||||
(!resend ||
|
||||
!req->rsk_ops->rtx_syn_ack(parent, req) ||
|
||||
inet_rsk(req)->acked)) {
|
||||
unsigned long timeo;
|
||||
|
||||
if (req->retrans++ == 0)
|
||||
|
@ -634,17 +634,16 @@ static int do_ip_setsockopt(struct sock *sk, int level,
|
||||
break;
|
||||
}
|
||||
dev = ip_dev_find(sock_net(sk), mreq.imr_address.s_addr);
|
||||
if (dev) {
|
||||
if (dev)
|
||||
mreq.imr_ifindex = dev->ifindex;
|
||||
dev_put(dev);
|
||||
}
|
||||
} else
|
||||
dev = __dev_get_by_index(sock_net(sk), mreq.imr_ifindex);
|
||||
dev = dev_get_by_index(sock_net(sk), mreq.imr_ifindex);
|
||||
|
||||
|
||||
err = -EADDRNOTAVAIL;
|
||||
if (!dev)
|
||||
break;
|
||||
dev_put(dev);
|
||||
|
||||
err = -EINVAL;
|
||||
if (sk->sk_bound_dev_if &&
|
||||
|
@ -326,6 +326,43 @@ void tcp_enter_memory_pressure(struct sock *sk)
|
||||
|
||||
EXPORT_SYMBOL(tcp_enter_memory_pressure);
|
||||
|
||||
/* Convert seconds to retransmits based on initial and max timeout */
|
||||
static u8 secs_to_retrans(int seconds, int timeout, int rto_max)
|
||||
{
|
||||
u8 res = 0;
|
||||
|
||||
if (seconds > 0) {
|
||||
int period = timeout;
|
||||
|
||||
res = 1;
|
||||
while (seconds > period && res < 255) {
|
||||
res++;
|
||||
timeout <<= 1;
|
||||
if (timeout > rto_max)
|
||||
timeout = rto_max;
|
||||
period += timeout;
|
||||
}
|
||||
}
|
||||
return res;
|
||||
}
|
||||
|
||||
/* Convert retransmits to seconds based on initial and max timeout */
|
||||
static int retrans_to_secs(u8 retrans, int timeout, int rto_max)
|
||||
{
|
||||
int period = 0;
|
||||
|
||||
if (retrans > 0) {
|
||||
period = timeout;
|
||||
while (--retrans) {
|
||||
timeout <<= 1;
|
||||
if (timeout > rto_max)
|
||||
timeout = rto_max;
|
||||
period += timeout;
|
||||
}
|
||||
}
|
||||
return period;
|
||||
}
|
||||
|
||||
/*
|
||||
* Wait for a TCP event.
|
||||
*
|
||||
@ -1405,7 +1442,9 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
|
||||
goto found_ok_skb;
|
||||
if (tcp_hdr(skb)->fin)
|
||||
goto found_fin_ok;
|
||||
WARN_ON(!(flags & MSG_PEEK));
|
||||
if (WARN_ON(!(flags & MSG_PEEK)))
|
||||
printk(KERN_INFO "recvmsg bug 2: copied %X "
|
||||
"seq %X\n", *seq, TCP_SKB_CB(skb)->seq);
|
||||
}
|
||||
|
||||
/* Well, if we have backlog, try to process it now yet. */
|
||||
@ -2163,16 +2202,10 @@ static int do_tcp_setsockopt(struct sock *sk, int level,
|
||||
break;
|
||||
|
||||
case TCP_DEFER_ACCEPT:
|
||||
icsk->icsk_accept_queue.rskq_defer_accept = 0;
|
||||
if (val > 0) {
|
||||
/* Translate value in seconds to number of
|
||||
* retransmits */
|
||||
while (icsk->icsk_accept_queue.rskq_defer_accept < 32 &&
|
||||
val > ((TCP_TIMEOUT_INIT / HZ) <<
|
||||
icsk->icsk_accept_queue.rskq_defer_accept))
|
||||
icsk->icsk_accept_queue.rskq_defer_accept++;
|
||||
icsk->icsk_accept_queue.rskq_defer_accept++;
|
||||
}
|
||||
/* Translate value in seconds to number of retransmits */
|
||||
icsk->icsk_accept_queue.rskq_defer_accept =
|
||||
secs_to_retrans(val, TCP_TIMEOUT_INIT / HZ,
|
||||
TCP_RTO_MAX / HZ);
|
||||
break;
|
||||
|
||||
case TCP_WINDOW_CLAMP:
|
||||
@ -2353,8 +2386,8 @@ static int do_tcp_getsockopt(struct sock *sk, int level,
|
||||
val = (val ? : sysctl_tcp_fin_timeout) / HZ;
|
||||
break;
|
||||
case TCP_DEFER_ACCEPT:
|
||||
val = !icsk->icsk_accept_queue.rskq_defer_accept ? 0 :
|
||||
((TCP_TIMEOUT_INIT / HZ) << (icsk->icsk_accept_queue.rskq_defer_accept - 1));
|
||||
val = retrans_to_secs(icsk->icsk_accept_queue.rskq_defer_accept,
|
||||
TCP_TIMEOUT_INIT / HZ, TCP_RTO_MAX / HZ);
|
||||
break;
|
||||
case TCP_WINDOW_CLAMP:
|
||||
val = tp->window_clamp;
|
||||
|
@ -641,10 +641,9 @@ struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb,
|
||||
if (!(flg & TCP_FLAG_ACK))
|
||||
return NULL;
|
||||
|
||||
/* If TCP_DEFER_ACCEPT is set, drop bare ACK. */
|
||||
if (inet_csk(sk)->icsk_accept_queue.rskq_defer_accept &&
|
||||
/* While TCP_DEFER_ACCEPT is active, drop bare ACK. */
|
||||
if (req->retrans < inet_csk(sk)->icsk_accept_queue.rskq_defer_accept &&
|
||||
TCP_SKB_CB(skb)->end_seq == tcp_rsk(req)->rcv_isn + 1) {
|
||||
inet_csk(sk)->icsk_accept_queue.rskq_defer_accept--;
|
||||
inet_rsk(req)->acked = 1;
|
||||
return NULL;
|
||||
}
|
||||
|
@ -496,13 +496,17 @@ done:
|
||||
goto e_inval;
|
||||
|
||||
if (val) {
|
||||
struct net_device *dev;
|
||||
|
||||
if (sk->sk_bound_dev_if && sk->sk_bound_dev_if != val)
|
||||
goto e_inval;
|
||||
|
||||
if (__dev_get_by_index(net, val) == NULL) {
|
||||
dev = dev_get_by_index(net, val);
|
||||
if (!dev) {
|
||||
retv = -ENODEV;
|
||||
break;
|
||||
}
|
||||
dev_put(dev);
|
||||
}
|
||||
np->mcast_oif = val;
|
||||
retv = 0;
|
||||
|
@ -1074,6 +1074,8 @@ restart:
|
||||
err = -ECONNREFUSED;
|
||||
if (other->sk_state != TCP_LISTEN)
|
||||
goto out_unlock;
|
||||
if (other->sk_shutdown & RCV_SHUTDOWN)
|
||||
goto out_unlock;
|
||||
|
||||
if (unix_recvq_full(other)) {
|
||||
err = -EAGAIN;
|
||||
|
Loading…
Reference in New Issue
Block a user