mirror of
https://github.com/torvalds/linux.git
synced 2024-11-11 06:31:49 +00:00
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6
* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6: (27 commits) bnx2x: allow device properly initialize after hotplug bnx2x: fix DMAE timeout according to hw specifications bnx2x: properly handle CFC DEL in cnic flow bnx2x: call dev_kfree_skb_any instead of dev_kfree_skb net: filter: move forward declarations to avoid compile warnings pktgen: refactor pg_init() code pktgen: use vzalloc_node() instead of vmalloc_node() + memset() net: skb_trim explicitely check the linearity instead of data_len ipv4: Give backtrace in ip_rt_bug(). net: avoid synchronize_rcu() in dev_deactivate_many net: remove synchronize_net() from netdev_set_master() rtnetlink: ignore NETDEV_RELEASE and NETDEV_JOIN event net: rename NETDEV_BONDING_DESLAVE to NETDEV_RELEASE bridge: call NETDEV_JOIN notifiers when add a slave netpoll: disable netpoll when enslave a device macvlan: Forward unicast frames in bridge mode to lowerdev net: Remove linux/prefetch.h include from linux/skbuff.h ipv4: Include linux/prefetch.h in fib_trie.c netlabel: Remove prefetches from list handlers. drivers/net: add prefetch header for prefetch users ... Fixed up prefetch parts: removed a few duplicate prefetch.h includes, fixed the location of the igb prefetch.h, took my version of the skbuff.h code without the extra parentheses etc.
This commit is contained in:
commit
53ee7569ce
@ -131,7 +131,7 @@ static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
|
|||||||
|
|
||||||
/* release skb */
|
/* release skb */
|
||||||
WARN_ON(!skb);
|
WARN_ON(!skb);
|
||||||
dev_kfree_skb(skb);
|
dev_kfree_skb_any(skb);
|
||||||
tx_buf->first_bd = 0;
|
tx_buf->first_bd = 0;
|
||||||
tx_buf->skb = NULL;
|
tx_buf->skb = NULL;
|
||||||
|
|
||||||
@ -465,7 +465,7 @@ static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
|
|||||||
} else {
|
} else {
|
||||||
DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages"
|
DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages"
|
||||||
" - dropping packet!\n");
|
" - dropping packet!\n");
|
||||||
dev_kfree_skb(skb);
|
dev_kfree_skb_any(skb);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@ -840,7 +840,7 @@ static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp,
|
|||||||
mapping = dma_map_single(&bp->pdev->dev, skb->data, fp->rx_buf_size,
|
mapping = dma_map_single(&bp->pdev->dev, skb->data, fp->rx_buf_size,
|
||||||
DMA_FROM_DEVICE);
|
DMA_FROM_DEVICE);
|
||||||
if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
|
if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
|
||||||
dev_kfree_skb(skb);
|
dev_kfree_skb_any(skb);
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -571,7 +571,7 @@ static int bnx2x_issue_dmae_with_comp(struct bnx2x *bp,
|
|||||||
struct dmae_command *dmae)
|
struct dmae_command *dmae)
|
||||||
{
|
{
|
||||||
u32 *wb_comp = bnx2x_sp(bp, wb_comp);
|
u32 *wb_comp = bnx2x_sp(bp, wb_comp);
|
||||||
int cnt = CHIP_REV_IS_SLOW(bp) ? (400000) : 40;
|
int cnt = CHIP_REV_IS_SLOW(bp) ? (400000) : 4000;
|
||||||
int rc = 0;
|
int rc = 0;
|
||||||
|
|
||||||
DP(BNX2X_MSG_OFF, "data before [0x%08x 0x%08x 0x%08x 0x%08x]\n",
|
DP(BNX2X_MSG_OFF, "data before [0x%08x 0x%08x 0x%08x 0x%08x]\n",
|
||||||
@ -3666,7 +3666,8 @@ static int bnx2x_cnic_handle_cfc_del(struct bnx2x *bp, u32 cid,
|
|||||||
union event_ring_elem *elem)
|
union event_ring_elem *elem)
|
||||||
{
|
{
|
||||||
if (!bp->cnic_eth_dev.starting_cid ||
|
if (!bp->cnic_eth_dev.starting_cid ||
|
||||||
cid < bp->cnic_eth_dev.starting_cid)
|
(cid < bp->cnic_eth_dev.starting_cid &&
|
||||||
|
cid != bp->cnic_eth_dev.iscsi_l2_cid))
|
||||||
return 1;
|
return 1;
|
||||||
|
|
||||||
DP(BNX2X_MSG_SP, "got delete ramrod for CNIC CID %d\n", cid);
|
DP(BNX2X_MSG_SP, "got delete ramrod for CNIC CID %d\n", cid);
|
||||||
@ -7287,51 +7288,35 @@ static inline void bnx2x_mcp_wait_one(struct bnx2x *bp)
|
|||||||
msleep(MCP_ONE_TIMEOUT);
|
msleep(MCP_ONE_TIMEOUT);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int bnx2x_reset_mcp_comp(struct bnx2x *bp, u32 magic_val)
|
/*
|
||||||
|
* initializes bp->common.shmem_base and waits for validity signature to appear
|
||||||
|
*/
|
||||||
|
static int bnx2x_init_shmem(struct bnx2x *bp)
|
||||||
{
|
{
|
||||||
u32 shmem, cnt, validity_offset, val;
|
int cnt = 0;
|
||||||
int rc = 0;
|
u32 val = 0;
|
||||||
|
|
||||||
msleep(100);
|
do {
|
||||||
|
bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
|
||||||
/* Get shmem offset */
|
if (bp->common.shmem_base) {
|
||||||
shmem = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
|
val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
|
||||||
if (shmem == 0) {
|
if (val & SHR_MEM_VALIDITY_MB)
|
||||||
BNX2X_ERR("Shmem 0 return failure\n");
|
return 0;
|
||||||
rc = -ENOTTY;
|
}
|
||||||
goto exit_lbl;
|
|
||||||
}
|
|
||||||
|
|
||||||
validity_offset = offsetof(struct shmem_region, validity_map[0]);
|
|
||||||
|
|
||||||
/* Wait for MCP to come up */
|
|
||||||
for (cnt = 0; cnt < (MCP_TIMEOUT / MCP_ONE_TIMEOUT); cnt++) {
|
|
||||||
/* TBD: its best to check validity map of last port.
|
|
||||||
* currently checks on port 0.
|
|
||||||
*/
|
|
||||||
val = REG_RD(bp, shmem + validity_offset);
|
|
||||||
DP(NETIF_MSG_HW, "shmem 0x%x validity map(0x%x)=0x%x\n", shmem,
|
|
||||||
shmem + validity_offset, val);
|
|
||||||
|
|
||||||
/* check that shared memory is valid. */
|
|
||||||
if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
|
|
||||||
== (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
|
|
||||||
break;
|
|
||||||
|
|
||||||
bnx2x_mcp_wait_one(bp);
|
bnx2x_mcp_wait_one(bp);
|
||||||
}
|
|
||||||
|
|
||||||
DP(NETIF_MSG_HW, "Cnt=%d Shmem validity map 0x%x\n", cnt, val);
|
} while (cnt++ < (MCP_TIMEOUT / MCP_ONE_TIMEOUT));
|
||||||
|
|
||||||
/* Check that shared memory is valid. This indicates that MCP is up. */
|
BNX2X_ERR("BAD MCP validity signature\n");
|
||||||
if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) !=
|
|
||||||
(SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) {
|
return -ENODEV;
|
||||||
BNX2X_ERR("Shmem signature not present. MCP is not up !!\n");
|
}
|
||||||
rc = -ENOTTY;
|
|
||||||
goto exit_lbl;
|
static int bnx2x_reset_mcp_comp(struct bnx2x *bp, u32 magic_val)
|
||||||
}
|
{
|
||||||
|
int rc = bnx2x_init_shmem(bp);
|
||||||
|
|
||||||
exit_lbl:
|
|
||||||
/* Restore the `magic' bit value */
|
/* Restore the `magic' bit value */
|
||||||
if (!CHIP_IS_E1(bp))
|
if (!CHIP_IS_E1(bp))
|
||||||
bnx2x_clp_reset_done(bp, magic_val);
|
bnx2x_clp_reset_done(bp, magic_val);
|
||||||
@ -7844,10 +7829,12 @@ static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
|
|||||||
BNX2X_DEV_INFO("flash_size 0x%x (%d)\n",
|
BNX2X_DEV_INFO("flash_size 0x%x (%d)\n",
|
||||||
bp->common.flash_size, bp->common.flash_size);
|
bp->common.flash_size, bp->common.flash_size);
|
||||||
|
|
||||||
bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
|
bnx2x_init_shmem(bp);
|
||||||
|
|
||||||
bp->common.shmem2_base = REG_RD(bp, (BP_PATH(bp) ?
|
bp->common.shmem2_base = REG_RD(bp, (BP_PATH(bp) ?
|
||||||
MISC_REG_GENERIC_CR_1 :
|
MISC_REG_GENERIC_CR_1 :
|
||||||
MISC_REG_GENERIC_CR_0));
|
MISC_REG_GENERIC_CR_0));
|
||||||
|
|
||||||
bp->link_params.shmem_base = bp->common.shmem_base;
|
bp->link_params.shmem_base = bp->common.shmem_base;
|
||||||
bp->link_params.shmem2_base = bp->common.shmem2_base;
|
bp->link_params.shmem2_base = bp->common.shmem2_base;
|
||||||
BNX2X_DEV_INFO("shmem offset 0x%x shmem2 offset 0x%x\n",
|
BNX2X_DEV_INFO("shmem offset 0x%x shmem2 offset 0x%x\n",
|
||||||
@ -7859,11 +7846,6 @@ static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
|
|
||||||
if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
|
|
||||||
!= (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
|
|
||||||
BNX2X_ERR("BAD MCP validity signature\n");
|
|
||||||
|
|
||||||
bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config);
|
bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config);
|
||||||
BNX2X_DEV_INFO("hw_config 0x%08x\n", bp->common.hw_config);
|
BNX2X_DEV_INFO("hw_config 0x%08x\n", bp->common.hw_config);
|
||||||
|
|
||||||
|
@ -1640,6 +1640,8 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
call_netdevice_notifiers(NETDEV_JOIN, slave_dev);
|
||||||
|
|
||||||
/* If this is the first slave, then we need to set the master's hardware
|
/* If this is the first slave, then we need to set the master's hardware
|
||||||
* address to be the same as the slave's. */
|
* address to be the same as the slave's. */
|
||||||
if (is_zero_ether_addr(bond->dev->dev_addr))
|
if (is_zero_ether_addr(bond->dev->dev_addr))
|
||||||
@ -1972,7 +1974,7 @@ int bond_release(struct net_device *bond_dev, struct net_device *slave_dev)
|
|||||||
}
|
}
|
||||||
|
|
||||||
block_netpoll_tx();
|
block_netpoll_tx();
|
||||||
netdev_bonding_change(bond_dev, NETDEV_BONDING_DESLAVE);
|
netdev_bonding_change(bond_dev, NETDEV_RELEASE);
|
||||||
write_lock_bh(&bond->lock);
|
write_lock_bh(&bond->lock);
|
||||||
|
|
||||||
slave = bond_get_slave_by_dev(bond, slave_dev);
|
slave = bond_get_slave_by_dev(bond, slave_dev);
|
||||||
|
@ -238,10 +238,8 @@ static int macvlan_queue_xmit(struct sk_buff *skb, struct net_device *dev)
|
|||||||
|
|
||||||
dest = macvlan_hash_lookup(port, eth->h_dest);
|
dest = macvlan_hash_lookup(port, eth->h_dest);
|
||||||
if (dest && dest->mode == MACVLAN_MODE_BRIDGE) {
|
if (dest && dest->mode == MACVLAN_MODE_BRIDGE) {
|
||||||
unsigned int length = skb->len + ETH_HLEN;
|
/* send to lowerdev first for its network taps */
|
||||||
int ret = dest->forward(dest->dev, skb);
|
vlan->forward(vlan->lowerdev, skb);
|
||||||
macvlan_count_rx(dest, length,
|
|
||||||
ret == NET_RX_SUCCESS, 0);
|
|
||||||
|
|
||||||
return NET_XMIT_SUCCESS;
|
return NET_XMIT_SUCCESS;
|
||||||
}
|
}
|
||||||
|
@ -621,11 +621,10 @@ static int netconsole_netdev_event(struct notifier_block *this,
|
|||||||
bool stopped = false;
|
bool stopped = false;
|
||||||
|
|
||||||
if (!(event == NETDEV_CHANGENAME || event == NETDEV_UNREGISTER ||
|
if (!(event == NETDEV_CHANGENAME || event == NETDEV_UNREGISTER ||
|
||||||
event == NETDEV_BONDING_DESLAVE || event == NETDEV_GOING_DOWN))
|
event == NETDEV_RELEASE || event == NETDEV_JOIN))
|
||||||
goto done;
|
goto done;
|
||||||
|
|
||||||
spin_lock_irqsave(&target_list_lock, flags);
|
spin_lock_irqsave(&target_list_lock, flags);
|
||||||
restart:
|
|
||||||
list_for_each_entry(nt, &target_list, list) {
|
list_for_each_entry(nt, &target_list, list) {
|
||||||
netconsole_target_get(nt);
|
netconsole_target_get(nt);
|
||||||
if (nt->np.dev == dev) {
|
if (nt->np.dev == dev) {
|
||||||
@ -633,6 +632,8 @@ restart:
|
|||||||
case NETDEV_CHANGENAME:
|
case NETDEV_CHANGENAME:
|
||||||
strlcpy(nt->np.dev_name, dev->name, IFNAMSIZ);
|
strlcpy(nt->np.dev_name, dev->name, IFNAMSIZ);
|
||||||
break;
|
break;
|
||||||
|
case NETDEV_RELEASE:
|
||||||
|
case NETDEV_JOIN:
|
||||||
case NETDEV_UNREGISTER:
|
case NETDEV_UNREGISTER:
|
||||||
/*
|
/*
|
||||||
* rtnl_lock already held
|
* rtnl_lock already held
|
||||||
@ -647,11 +648,7 @@ restart:
|
|||||||
dev_put(nt->np.dev);
|
dev_put(nt->np.dev);
|
||||||
nt->np.dev = NULL;
|
nt->np.dev = NULL;
|
||||||
netconsole_target_put(nt);
|
netconsole_target_put(nt);
|
||||||
goto restart;
|
|
||||||
}
|
}
|
||||||
/* Fall through */
|
|
||||||
case NETDEV_GOING_DOWN:
|
|
||||||
case NETDEV_BONDING_DESLAVE:
|
|
||||||
nt->enabled = 0;
|
nt->enabled = 0;
|
||||||
stopped = true;
|
stopped = true;
|
||||||
break;
|
break;
|
||||||
@ -660,10 +657,21 @@ restart:
|
|||||||
netconsole_target_put(nt);
|
netconsole_target_put(nt);
|
||||||
}
|
}
|
||||||
spin_unlock_irqrestore(&target_list_lock, flags);
|
spin_unlock_irqrestore(&target_list_lock, flags);
|
||||||
if (stopped && (event == NETDEV_UNREGISTER || event == NETDEV_BONDING_DESLAVE))
|
if (stopped) {
|
||||||
printk(KERN_INFO "netconsole: network logging stopped on "
|
printk(KERN_INFO "netconsole: network logging stopped on "
|
||||||
"interface %s as it %s\n", dev->name,
|
"interface %s as it ", dev->name);
|
||||||
event == NETDEV_UNREGISTER ? "unregistered" : "released slaves");
|
switch (event) {
|
||||||
|
case NETDEV_UNREGISTER:
|
||||||
|
printk(KERN_CONT "unregistered\n");
|
||||||
|
break;
|
||||||
|
case NETDEV_RELEASE:
|
||||||
|
printk(KERN_CONT "released slaves\n");
|
||||||
|
break;
|
||||||
|
case NETDEV_JOIN:
|
||||||
|
printk(KERN_CONT "is joining a master device\n");
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
done:
|
done:
|
||||||
return NOTIFY_DONE;
|
return NOTIFY_DONE;
|
||||||
|
@ -162,8 +162,8 @@ static int rionet_queue_tx_msg(struct sk_buff *skb, struct net_device *ndev,
|
|||||||
rnet->tx_slot &= (RIONET_TX_RING_SIZE - 1);
|
rnet->tx_slot &= (RIONET_TX_RING_SIZE - 1);
|
||||||
|
|
||||||
if (netif_msg_tx_queued(rnet))
|
if (netif_msg_tx_queued(rnet))
|
||||||
printk(KERN_INFO "%s: queued skb %8.8x len %8.8x\n", DRV_NAME,
|
printk(KERN_INFO "%s: queued skb len %8.8x\n", DRV_NAME,
|
||||||
(u32) skb, skb->len);
|
skb->len);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -131,6 +131,10 @@ struct sock_fprog { /* Required for SO_ATTACH_FILTER. */
|
|||||||
#define SKF_LL_OFF (-0x200000)
|
#define SKF_LL_OFF (-0x200000)
|
||||||
|
|
||||||
#ifdef __KERNEL__
|
#ifdef __KERNEL__
|
||||||
|
|
||||||
|
struct sk_buff;
|
||||||
|
struct sock;
|
||||||
|
|
||||||
struct sk_filter
|
struct sk_filter
|
||||||
{
|
{
|
||||||
atomic_t refcnt;
|
atomic_t refcnt;
|
||||||
@ -146,9 +150,6 @@ static inline unsigned int sk_filter_len(const struct sk_filter *fp)
|
|||||||
return fp->len * sizeof(struct sock_filter) + sizeof(*fp);
|
return fp->len * sizeof(struct sock_filter) + sizeof(*fp);
|
||||||
}
|
}
|
||||||
|
|
||||||
struct sk_buff;
|
|
||||||
struct sock;
|
|
||||||
|
|
||||||
extern int sk_filter(struct sock *sk, struct sk_buff *skb);
|
extern int sk_filter(struct sock *sk, struct sk_buff *skb);
|
||||||
extern unsigned int sk_run_filter(const struct sk_buff *skb,
|
extern unsigned int sk_run_filter(const struct sk_buff *skb,
|
||||||
const struct sock_filter *filter);
|
const struct sock_filter *filter);
|
||||||
|
@ -209,8 +209,9 @@ static inline int notifier_to_errno(int ret)
|
|||||||
#define NETDEV_POST_TYPE_CHANGE 0x000F
|
#define NETDEV_POST_TYPE_CHANGE 0x000F
|
||||||
#define NETDEV_POST_INIT 0x0010
|
#define NETDEV_POST_INIT 0x0010
|
||||||
#define NETDEV_UNREGISTER_BATCH 0x0011
|
#define NETDEV_UNREGISTER_BATCH 0x0011
|
||||||
#define NETDEV_BONDING_DESLAVE 0x0012
|
#define NETDEV_RELEASE 0x0012
|
||||||
#define NETDEV_NOTIFY_PEERS 0x0013
|
#define NETDEV_NOTIFY_PEERS 0x0013
|
||||||
|
#define NETDEV_JOIN 0x0014
|
||||||
|
|
||||||
#define SYS_DOWN 0x0001 /* Notify of system down */
|
#define SYS_DOWN 0x0001 /* Notify of system down */
|
||||||
#define SYS_RESTART SYS_DOWN
|
#define SYS_RESTART SYS_DOWN
|
||||||
|
@ -1442,7 +1442,7 @@ extern int ___pskb_trim(struct sk_buff *skb, unsigned int len);
|
|||||||
|
|
||||||
static inline void __skb_trim(struct sk_buff *skb, unsigned int len)
|
static inline void __skb_trim(struct sk_buff *skb, unsigned int len)
|
||||||
{
|
{
|
||||||
if (unlikely(skb->data_len)) {
|
if (unlikely(skb_is_nonlinear(skb))) {
|
||||||
WARN_ON(1);
|
WARN_ON(1);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
@ -15,7 +15,6 @@ struct cfpktq;
|
|||||||
struct caif_payload_info;
|
struct caif_payload_info;
|
||||||
struct caif_packet_funcs;
|
struct caif_packet_funcs;
|
||||||
|
|
||||||
|
|
||||||
#define CAIF_LAYER_NAME_SZ 16
|
#define CAIF_LAYER_NAME_SZ 16
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -33,7 +32,6 @@ do { \
|
|||||||
} \
|
} \
|
||||||
} while (0)
|
} while (0)
|
||||||
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* enum caif_ctrlcmd - CAIF Stack Control Signaling sent in layer.ctrlcmd().
|
* enum caif_ctrlcmd - CAIF Stack Control Signaling sent in layer.ctrlcmd().
|
||||||
*
|
*
|
||||||
@ -141,7 +139,7 @@ enum caif_direction {
|
|||||||
* - All layers must use this structure. If embedding it, then place this
|
* - All layers must use this structure. If embedding it, then place this
|
||||||
* structure first in the layer specific structure.
|
* structure first in the layer specific structure.
|
||||||
*
|
*
|
||||||
* - Each layer should not depend on any others layer private data.
|
* - Each layer should not depend on any others layer's private data.
|
||||||
*
|
*
|
||||||
* - In order to send data upwards do
|
* - In order to send data upwards do
|
||||||
* layer->up->receive(layer->up, packet);
|
* layer->up->receive(layer->up, packet);
|
||||||
@ -155,16 +153,23 @@ struct cflayer {
|
|||||||
struct list_head node;
|
struct list_head node;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* receive() - Receive Function.
|
* receive() - Receive Function (non-blocking).
|
||||||
* Contract: Each layer must implement a receive function passing the
|
* Contract: Each layer must implement a receive function passing the
|
||||||
* CAIF packets upwards in the stack.
|
* CAIF packets upwards in the stack.
|
||||||
* Packet handling rules:
|
* Packet handling rules:
|
||||||
* - The CAIF packet (cfpkt) cannot be accessed after
|
* - The CAIF packet (cfpkt) ownership is passed to the
|
||||||
* passing it to the next layer using up->receive().
|
* called receive function. This means that the the
|
||||||
|
* packet cannot be accessed after passing it to the
|
||||||
|
* above layer using up->receive().
|
||||||
|
*
|
||||||
* - If parsing of the packet fails, the packet must be
|
* - If parsing of the packet fails, the packet must be
|
||||||
* destroyed and -1 returned from the function.
|
* destroyed and negative error code returned
|
||||||
|
* from the function.
|
||||||
|
* EXCEPTION: If the framing layer (cffrml) returns
|
||||||
|
* -EILSEQ, the packet is not freed.
|
||||||
|
*
|
||||||
* - If parsing succeeds (and above layers return OK) then
|
* - If parsing succeeds (and above layers return OK) then
|
||||||
* the function must return a value > 0.
|
* the function must return a value >= 0.
|
||||||
*
|
*
|
||||||
* Returns result < 0 indicates an error, 0 or positive value
|
* Returns result < 0 indicates an error, 0 or positive value
|
||||||
* indicates success.
|
* indicates success.
|
||||||
@ -176,7 +181,7 @@ struct cflayer {
|
|||||||
int (*receive)(struct cflayer *layr, struct cfpkt *cfpkt);
|
int (*receive)(struct cflayer *layr, struct cfpkt *cfpkt);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* transmit() - Transmit Function.
|
* transmit() - Transmit Function (non-blocking).
|
||||||
* Contract: Each layer must implement a transmit function passing the
|
* Contract: Each layer must implement a transmit function passing the
|
||||||
* CAIF packet downwards in the stack.
|
* CAIF packet downwards in the stack.
|
||||||
* Packet handling rules:
|
* Packet handling rules:
|
||||||
@ -185,15 +190,16 @@ struct cflayer {
|
|||||||
* cannot be accessed after passing it to the below
|
* cannot be accessed after passing it to the below
|
||||||
* layer using dn->transmit().
|
* layer using dn->transmit().
|
||||||
*
|
*
|
||||||
* - If transmit fails, however, the ownership is returned
|
* - Upon error the packet ownership is still passed on,
|
||||||
* to thecaller. The caller of "dn->transmit()" must
|
* so the packet shall be freed where error is detected.
|
||||||
* destroy or resend packet.
|
* Callers of the transmit function shall not free packets,
|
||||||
|
* but errors shall be returned.
|
||||||
*
|
*
|
||||||
* - Return value less than zero means error, zero or
|
* - Return value less than zero means error, zero or
|
||||||
* greater than zero means OK.
|
* greater than zero means OK.
|
||||||
*
|
*
|
||||||
* result < 0 indicates an error, 0 or positive value
|
* Returns result < 0 indicates an error, 0 or positive value
|
||||||
* indicate success.
|
* indicates success.
|
||||||
*
|
*
|
||||||
* @layr: Pointer to the current layer the receive function
|
* @layr: Pointer to the current layer the receive function
|
||||||
* isimplemented for (this pointer).
|
* isimplemented for (this pointer).
|
||||||
@ -202,7 +208,7 @@ struct cflayer {
|
|||||||
int (*transmit) (struct cflayer *layr, struct cfpkt *cfpkt);
|
int (*transmit) (struct cflayer *layr, struct cfpkt *cfpkt);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* cttrlcmd() - Control Function upwards in CAIF Stack.
|
* cttrlcmd() - Control Function upwards in CAIF Stack (non-blocking).
|
||||||
* Used for signaling responses (CAIF_CTRLCMD_*_RSP)
|
* Used for signaling responses (CAIF_CTRLCMD_*_RSP)
|
||||||
* and asynchronous events from the modem (CAIF_CTRLCMD_*_IND)
|
* and asynchronous events from the modem (CAIF_CTRLCMD_*_IND)
|
||||||
*
|
*
|
||||||
|
@ -147,6 +147,7 @@ static void del_nbp(struct net_bridge_port *p)
|
|||||||
dev->priv_flags &= ~IFF_BRIDGE_PORT;
|
dev->priv_flags &= ~IFF_BRIDGE_PORT;
|
||||||
|
|
||||||
netdev_rx_handler_unregister(dev);
|
netdev_rx_handler_unregister(dev);
|
||||||
|
synchronize_net();
|
||||||
|
|
||||||
netdev_set_master(dev, NULL);
|
netdev_set_master(dev, NULL);
|
||||||
|
|
||||||
@ -338,6 +339,8 @@ int br_add_if(struct net_bridge *br, struct net_device *dev)
|
|||||||
if (IS_ERR(p))
|
if (IS_ERR(p))
|
||||||
return PTR_ERR(p);
|
return PTR_ERR(p);
|
||||||
|
|
||||||
|
call_netdevice_notifiers(NETDEV_JOIN, dev);
|
||||||
|
|
||||||
err = dev_set_promiscuity(dev, 1);
|
err = dev_set_promiscuity(dev, 1);
|
||||||
if (err)
|
if (err)
|
||||||
goto put_back;
|
goto put_back;
|
||||||
|
@ -142,6 +142,7 @@ static int receive(struct sk_buff *skb, struct net_device *dev,
|
|||||||
{
|
{
|
||||||
struct cfpkt *pkt;
|
struct cfpkt *pkt;
|
||||||
struct caif_device_entry *caifd;
|
struct caif_device_entry *caifd;
|
||||||
|
int err;
|
||||||
|
|
||||||
pkt = cfpkt_fromnative(CAIF_DIR_IN, skb);
|
pkt = cfpkt_fromnative(CAIF_DIR_IN, skb);
|
||||||
|
|
||||||
@ -159,7 +160,11 @@ static int receive(struct sk_buff *skb, struct net_device *dev,
|
|||||||
caifd_hold(caifd);
|
caifd_hold(caifd);
|
||||||
rcu_read_unlock();
|
rcu_read_unlock();
|
||||||
|
|
||||||
caifd->layer.up->receive(caifd->layer.up, pkt);
|
err = caifd->layer.up->receive(caifd->layer.up, pkt);
|
||||||
|
|
||||||
|
/* For -EILSEQ the packet is not freed so so it now */
|
||||||
|
if (err == -EILSEQ)
|
||||||
|
cfpkt_destroy(pkt);
|
||||||
|
|
||||||
/* Release reference to stack upwards */
|
/* Release reference to stack upwards */
|
||||||
caifd_put(caifd);
|
caifd_put(caifd);
|
||||||
|
@ -19,7 +19,7 @@
|
|||||||
#include <linux/uaccess.h>
|
#include <linux/uaccess.h>
|
||||||
#include <linux/debugfs.h>
|
#include <linux/debugfs.h>
|
||||||
#include <linux/caif/caif_socket.h>
|
#include <linux/caif/caif_socket.h>
|
||||||
#include <asm/atomic.h>
|
#include <linux/atomic.h>
|
||||||
#include <net/sock.h>
|
#include <net/sock.h>
|
||||||
#include <net/tcp_states.h>
|
#include <net/tcp_states.h>
|
||||||
#include <net/caif/caif_layer.h>
|
#include <net/caif/caif_layer.h>
|
||||||
@ -816,6 +816,7 @@ static int caif_connect(struct socket *sock, struct sockaddr *uaddr,
|
|||||||
if (sk->sk_shutdown & SHUTDOWN_MASK) {
|
if (sk->sk_shutdown & SHUTDOWN_MASK) {
|
||||||
/* Allow re-connect after SHUTDOWN_IND */
|
/* Allow re-connect after SHUTDOWN_IND */
|
||||||
caif_disconnect_client(sock_net(sk), &cf_sk->layer);
|
caif_disconnect_client(sock_net(sk), &cf_sk->layer);
|
||||||
|
caif_free_client(&cf_sk->layer);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
/* No reconnect on a seqpacket socket */
|
/* No reconnect on a seqpacket socket */
|
||||||
@ -926,7 +927,6 @@ static int caif_release(struct socket *sock)
|
|||||||
{
|
{
|
||||||
struct sock *sk = sock->sk;
|
struct sock *sk = sock->sk;
|
||||||
struct caifsock *cf_sk = container_of(sk, struct caifsock, sk);
|
struct caifsock *cf_sk = container_of(sk, struct caifsock, sk);
|
||||||
int res = 0;
|
|
||||||
|
|
||||||
if (!sk)
|
if (!sk)
|
||||||
return 0;
|
return 0;
|
||||||
@ -953,10 +953,7 @@ static int caif_release(struct socket *sock)
|
|||||||
sk->sk_state = CAIF_DISCONNECTED;
|
sk->sk_state = CAIF_DISCONNECTED;
|
||||||
sk->sk_shutdown = SHUTDOWN_MASK;
|
sk->sk_shutdown = SHUTDOWN_MASK;
|
||||||
|
|
||||||
if (cf_sk->sk.sk_socket->state == SS_CONNECTED ||
|
caif_disconnect_client(sock_net(sk), &cf_sk->layer);
|
||||||
cf_sk->sk.sk_socket->state == SS_CONNECTING)
|
|
||||||
res = caif_disconnect_client(sock_net(sk), &cf_sk->layer);
|
|
||||||
|
|
||||||
cf_sk->sk.sk_socket->state = SS_DISCONNECTING;
|
cf_sk->sk.sk_socket->state = SS_DISCONNECTING;
|
||||||
wake_up_interruptible_poll(sk_sleep(sk), POLLERR|POLLHUP);
|
wake_up_interruptible_poll(sk_sleep(sk), POLLERR|POLLHUP);
|
||||||
|
|
||||||
@ -964,7 +961,7 @@ static int caif_release(struct socket *sock)
|
|||||||
sk_stream_kill_queues(&cf_sk->sk);
|
sk_stream_kill_queues(&cf_sk->sk);
|
||||||
release_sock(sk);
|
release_sock(sk);
|
||||||
sock_put(sk);
|
sock_put(sk);
|
||||||
return res;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Copied from af_unix.c:unix_poll(), added CAIF tx_flow handling */
|
/* Copied from af_unix.c:unix_poll(), added CAIF tx_flow handling */
|
||||||
@ -1120,7 +1117,7 @@ static int caif_create(struct net *net, struct socket *sock, int protocol,
|
|||||||
set_rx_flow_on(cf_sk);
|
set_rx_flow_on(cf_sk);
|
||||||
|
|
||||||
/* Set default options on configuration */
|
/* Set default options on configuration */
|
||||||
cf_sk->sk.sk_priority= CAIF_PRIO_NORMAL;
|
cf_sk->sk.sk_priority = CAIF_PRIO_NORMAL;
|
||||||
cf_sk->conn_req.link_selector = CAIF_LINK_LOW_LATENCY;
|
cf_sk->conn_req.link_selector = CAIF_LINK_LOW_LATENCY;
|
||||||
cf_sk->conn_req.protocol = protocol;
|
cf_sk->conn_req.protocol = protocol;
|
||||||
/* Increase the number of sockets created. */
|
/* Increase the number of sockets created. */
|
||||||
|
@ -182,39 +182,26 @@ static int cfcnfg_get_id_from_ifi(struct cfcnfg *cnfg, int ifi)
|
|||||||
|
|
||||||
int caif_disconnect_client(struct net *net, struct cflayer *adap_layer)
|
int caif_disconnect_client(struct net *net, struct cflayer *adap_layer)
|
||||||
{
|
{
|
||||||
u8 channel_id = 0;
|
u8 channel_id;
|
||||||
int ret = 0;
|
|
||||||
struct cflayer *servl = NULL;
|
|
||||||
struct cfcnfg *cfg = get_cfcnfg(net);
|
struct cfcnfg *cfg = get_cfcnfg(net);
|
||||||
|
|
||||||
caif_assert(adap_layer != NULL);
|
caif_assert(adap_layer != NULL);
|
||||||
|
|
||||||
channel_id = adap_layer->id;
|
|
||||||
if (adap_layer->dn == NULL || channel_id == 0) {
|
|
||||||
pr_err("adap_layer->dn == NULL or adap_layer->id is 0\n");
|
|
||||||
ret = -ENOTCONN;
|
|
||||||
goto end;
|
|
||||||
}
|
|
||||||
|
|
||||||
servl = cfmuxl_remove_uplayer(cfg->mux, channel_id);
|
|
||||||
if (servl == NULL) {
|
|
||||||
pr_err("PROTOCOL ERROR - "
|
|
||||||
"Error removing service_layer Channel_Id(%d)",
|
|
||||||
channel_id);
|
|
||||||
ret = -EINVAL;
|
|
||||||
goto end;
|
|
||||||
}
|
|
||||||
|
|
||||||
ret = cfctrl_linkdown_req(cfg->ctrl, channel_id, adap_layer);
|
|
||||||
|
|
||||||
end:
|
|
||||||
cfctrl_cancel_req(cfg->ctrl, adap_layer);
|
cfctrl_cancel_req(cfg->ctrl, adap_layer);
|
||||||
|
channel_id = adap_layer->id;
|
||||||
|
if (channel_id != 0) {
|
||||||
|
struct cflayer *servl;
|
||||||
|
servl = cfmuxl_remove_uplayer(cfg->mux, channel_id);
|
||||||
|
if (servl != NULL)
|
||||||
|
layer_set_up(servl, NULL);
|
||||||
|
} else
|
||||||
|
pr_debug("nothing to disconnect\n");
|
||||||
|
cfctrl_linkdown_req(cfg->ctrl, channel_id, adap_layer);
|
||||||
|
|
||||||
/* Do RCU sync before initiating cleanup */
|
/* Do RCU sync before initiating cleanup */
|
||||||
synchronize_rcu();
|
synchronize_rcu();
|
||||||
if (adap_layer->ctrlcmd != NULL)
|
if (adap_layer->ctrlcmd != NULL)
|
||||||
adap_layer->ctrlcmd(adap_layer, CAIF_CTRLCMD_DEINIT_RSP, 0);
|
adap_layer->ctrlcmd(adap_layer, CAIF_CTRLCMD_DEINIT_RSP, 0);
|
||||||
return ret;
|
return 0;
|
||||||
|
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(caif_disconnect_client);
|
EXPORT_SYMBOL(caif_disconnect_client);
|
||||||
@ -400,6 +387,14 @@ cfcnfg_linkup_rsp(struct cflayer *layer, u8 channel_id, enum cfctrl_srv serv,
|
|||||||
struct cfcnfg_phyinfo *phyinfo;
|
struct cfcnfg_phyinfo *phyinfo;
|
||||||
struct net_device *netdev;
|
struct net_device *netdev;
|
||||||
|
|
||||||
|
if (channel_id == 0) {
|
||||||
|
pr_warn("received channel_id zero\n");
|
||||||
|
if (adapt_layer != NULL && adapt_layer->ctrlcmd != NULL)
|
||||||
|
adapt_layer->ctrlcmd(adapt_layer,
|
||||||
|
CAIF_CTRLCMD_INIT_FAIL_RSP, 0);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
rcu_read_lock();
|
rcu_read_lock();
|
||||||
|
|
||||||
if (adapt_layer == NULL) {
|
if (adapt_layer == NULL) {
|
||||||
@ -523,7 +518,6 @@ got_phyid:
|
|||||||
phyinfo->use_stx = stx;
|
phyinfo->use_stx = stx;
|
||||||
phyinfo->use_fcs = fcs;
|
phyinfo->use_fcs = fcs;
|
||||||
|
|
||||||
phy_layer->type = phy_type;
|
|
||||||
frml = cffrml_create(phyid, fcs);
|
frml = cffrml_create(phyid, fcs);
|
||||||
|
|
||||||
if (!frml) {
|
if (!frml) {
|
||||||
|
@ -178,20 +178,23 @@ static void init_info(struct caif_payload_info *info, struct cfctrl *cfctrl)
|
|||||||
void cfctrl_enum_req(struct cflayer *layer, u8 physlinkid)
|
void cfctrl_enum_req(struct cflayer *layer, u8 physlinkid)
|
||||||
{
|
{
|
||||||
struct cfctrl *cfctrl = container_obj(layer);
|
struct cfctrl *cfctrl = container_obj(layer);
|
||||||
int ret;
|
|
||||||
struct cfpkt *pkt = cfpkt_create(CFPKT_CTRL_PKT_LEN);
|
struct cfpkt *pkt = cfpkt_create(CFPKT_CTRL_PKT_LEN);
|
||||||
|
struct cflayer *dn = cfctrl->serv.layer.dn;
|
||||||
if (!pkt) {
|
if (!pkt) {
|
||||||
pr_warn("Out of memory\n");
|
pr_warn("Out of memory\n");
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
if (!dn) {
|
||||||
|
pr_debug("not able to send enum request\n");
|
||||||
|
return;
|
||||||
|
}
|
||||||
caif_assert(offsetof(struct cfctrl, serv.layer) == 0);
|
caif_assert(offsetof(struct cfctrl, serv.layer) == 0);
|
||||||
init_info(cfpkt_info(pkt), cfctrl);
|
init_info(cfpkt_info(pkt), cfctrl);
|
||||||
cfpkt_info(pkt)->dev_info->id = physlinkid;
|
cfpkt_info(pkt)->dev_info->id = physlinkid;
|
||||||
cfctrl->serv.dev_info.id = physlinkid;
|
cfctrl->serv.dev_info.id = physlinkid;
|
||||||
cfpkt_addbdy(pkt, CFCTRL_CMD_ENUM);
|
cfpkt_addbdy(pkt, CFCTRL_CMD_ENUM);
|
||||||
cfpkt_addbdy(pkt, physlinkid);
|
cfpkt_addbdy(pkt, physlinkid);
|
||||||
ret =
|
dn->transmit(dn, pkt);
|
||||||
cfctrl->serv.layer.dn->transmit(cfctrl->serv.layer.dn, pkt);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
int cfctrl_linkup_request(struct cflayer *layer,
|
int cfctrl_linkup_request(struct cflayer *layer,
|
||||||
@ -206,6 +209,12 @@ int cfctrl_linkup_request(struct cflayer *layer,
|
|||||||
int ret;
|
int ret;
|
||||||
char utility_name[16];
|
char utility_name[16];
|
||||||
struct cfpkt *pkt;
|
struct cfpkt *pkt;
|
||||||
|
struct cflayer *dn = cfctrl->serv.layer.dn;
|
||||||
|
|
||||||
|
if (!dn) {
|
||||||
|
pr_debug("not able to send linkup request\n");
|
||||||
|
return -ENODEV;
|
||||||
|
}
|
||||||
|
|
||||||
if (cfctrl_cancel_req(layer, user_layer) > 0) {
|
if (cfctrl_cancel_req(layer, user_layer) > 0) {
|
||||||
/* Slight Paranoia, check if already connecting */
|
/* Slight Paranoia, check if already connecting */
|
||||||
@ -282,7 +291,7 @@ int cfctrl_linkup_request(struct cflayer *layer,
|
|||||||
*/
|
*/
|
||||||
cfpkt_info(pkt)->dev_info->id = param->phyid;
|
cfpkt_info(pkt)->dev_info->id = param->phyid;
|
||||||
ret =
|
ret =
|
||||||
cfctrl->serv.layer.dn->transmit(cfctrl->serv.layer.dn, pkt);
|
dn->transmit(dn, pkt);
|
||||||
if (ret < 0) {
|
if (ret < 0) {
|
||||||
int count;
|
int count;
|
||||||
|
|
||||||
@ -301,15 +310,23 @@ int cfctrl_linkdown_req(struct cflayer *layer, u8 channelid,
|
|||||||
int ret;
|
int ret;
|
||||||
struct cfctrl *cfctrl = container_obj(layer);
|
struct cfctrl *cfctrl = container_obj(layer);
|
||||||
struct cfpkt *pkt = cfpkt_create(CFPKT_CTRL_PKT_LEN);
|
struct cfpkt *pkt = cfpkt_create(CFPKT_CTRL_PKT_LEN);
|
||||||
|
struct cflayer *dn = cfctrl->serv.layer.dn;
|
||||||
|
|
||||||
if (!pkt) {
|
if (!pkt) {
|
||||||
pr_warn("Out of memory\n");
|
pr_warn("Out of memory\n");
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (!dn) {
|
||||||
|
pr_debug("not able to send link-down request\n");
|
||||||
|
return -ENODEV;
|
||||||
|
}
|
||||||
|
|
||||||
cfpkt_addbdy(pkt, CFCTRL_CMD_LINK_DESTROY);
|
cfpkt_addbdy(pkt, CFCTRL_CMD_LINK_DESTROY);
|
||||||
cfpkt_addbdy(pkt, channelid);
|
cfpkt_addbdy(pkt, channelid);
|
||||||
init_info(cfpkt_info(pkt), cfctrl);
|
init_info(cfpkt_info(pkt), cfctrl);
|
||||||
ret =
|
ret =
|
||||||
cfctrl->serv.layer.dn->transmit(cfctrl->serv.layer.dn, pkt);
|
dn->transmit(dn, pkt);
|
||||||
#ifndef CAIF_NO_LOOP
|
#ifndef CAIF_NO_LOOP
|
||||||
cfctrl->loop_linkused[channelid] = 0;
|
cfctrl->loop_linkused[channelid] = 0;
|
||||||
#endif
|
#endif
|
||||||
@ -351,7 +368,8 @@ static int cfctrl_recv(struct cflayer *layer, struct cfpkt *pkt)
|
|||||||
cfpkt_extr_head(pkt, &cmdrsp, 1);
|
cfpkt_extr_head(pkt, &cmdrsp, 1);
|
||||||
cmd = cmdrsp & CFCTRL_CMD_MASK;
|
cmd = cmdrsp & CFCTRL_CMD_MASK;
|
||||||
if (cmd != CFCTRL_CMD_LINK_ERR
|
if (cmd != CFCTRL_CMD_LINK_ERR
|
||||||
&& CFCTRL_RSP_BIT != (CFCTRL_RSP_BIT & cmdrsp)) {
|
&& CFCTRL_RSP_BIT != (CFCTRL_RSP_BIT & cmdrsp)
|
||||||
|
&& CFCTRL_ERR_BIT != (CFCTRL_ERR_BIT & cmdrsp)) {
|
||||||
if (handle_loop(cfctrl, cmd, pkt) != 0)
|
if (handle_loop(cfctrl, cmd, pkt) != 0)
|
||||||
cmdrsp |= CFCTRL_ERR_BIT;
|
cmdrsp |= CFCTRL_ERR_BIT;
|
||||||
}
|
}
|
||||||
@ -477,7 +495,7 @@ static int cfctrl_recv(struct cflayer *layer, struct cfpkt *pkt)
|
|||||||
cfpkt_extr_head(pkt, ¶m, len);
|
cfpkt_extr_head(pkt, ¶m, len);
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
pr_warn("Request setup - invalid link type (%d)\n",
|
pr_warn("Request setup, invalid type (%d)\n",
|
||||||
serv);
|
serv);
|
||||||
goto error;
|
goto error;
|
||||||
}
|
}
|
||||||
@ -489,7 +507,8 @@ static int cfctrl_recv(struct cflayer *layer, struct cfpkt *pkt)
|
|||||||
|
|
||||||
if (CFCTRL_ERR_BIT == (CFCTRL_ERR_BIT & cmdrsp) ||
|
if (CFCTRL_ERR_BIT == (CFCTRL_ERR_BIT & cmdrsp) ||
|
||||||
cfpkt_erroneous(pkt)) {
|
cfpkt_erroneous(pkt)) {
|
||||||
pr_err("Invalid O/E bit or parse error on CAIF control channel\n");
|
pr_err("Invalid O/E bit or parse error "
|
||||||
|
"on CAIF control channel\n");
|
||||||
cfctrl->res.reject_rsp(cfctrl->serv.layer.up,
|
cfctrl->res.reject_rsp(cfctrl->serv.layer.up,
|
||||||
0,
|
0,
|
||||||
req ? req->client_layer
|
req ? req->client_layer
|
||||||
@ -550,9 +569,8 @@ static void cfctrl_ctrlcmd(struct cflayer *layr, enum caif_ctrlcmd ctrl,
|
|||||||
case _CAIF_CTRLCMD_PHYIF_FLOW_OFF_IND:
|
case _CAIF_CTRLCMD_PHYIF_FLOW_OFF_IND:
|
||||||
case CAIF_CTRLCMD_FLOW_OFF_IND:
|
case CAIF_CTRLCMD_FLOW_OFF_IND:
|
||||||
spin_lock_bh(&this->info_list_lock);
|
spin_lock_bh(&this->info_list_lock);
|
||||||
if (!list_empty(&this->list)) {
|
if (!list_empty(&this->list))
|
||||||
pr_debug("Received flow off in control layer\n");
|
pr_debug("Received flow off in control layer\n");
|
||||||
}
|
|
||||||
spin_unlock_bh(&this->info_list_lock);
|
spin_unlock_bh(&this->info_list_lock);
|
||||||
break;
|
break;
|
||||||
case _CAIF_CTRLCMD_PHYIF_DOWN_IND: {
|
case _CAIF_CTRLCMD_PHYIF_DOWN_IND: {
|
||||||
@ -587,16 +605,16 @@ static int handle_loop(struct cfctrl *ctrl, int cmd, struct cfpkt *pkt)
|
|||||||
case CFCTRL_CMD_LINK_SETUP:
|
case CFCTRL_CMD_LINK_SETUP:
|
||||||
spin_lock_bh(&ctrl->loop_linkid_lock);
|
spin_lock_bh(&ctrl->loop_linkid_lock);
|
||||||
if (!dec) {
|
if (!dec) {
|
||||||
for (linkid = last_linkid + 1; linkid < 255; linkid++)
|
for (linkid = last_linkid + 1; linkid < 254; linkid++)
|
||||||
if (!ctrl->loop_linkused[linkid])
|
if (!ctrl->loop_linkused[linkid])
|
||||||
goto found;
|
goto found;
|
||||||
}
|
}
|
||||||
dec = 1;
|
dec = 1;
|
||||||
for (linkid = last_linkid - 1; linkid > 0; linkid--)
|
for (linkid = last_linkid - 1; linkid > 1; linkid--)
|
||||||
if (!ctrl->loop_linkused[linkid])
|
if (!ctrl->loop_linkused[linkid])
|
||||||
goto found;
|
goto found;
|
||||||
spin_unlock_bh(&ctrl->loop_linkid_lock);
|
spin_unlock_bh(&ctrl->loop_linkid_lock);
|
||||||
|
return -1;
|
||||||
found:
|
found:
|
||||||
if (linkid < 10)
|
if (linkid < 10)
|
||||||
dec = 0;
|
dec = 0;
|
||||||
|
@ -62,16 +62,6 @@ struct cflayer *cfmuxl_create(void)
|
|||||||
return &this->layer;
|
return &this->layer;
|
||||||
}
|
}
|
||||||
|
|
||||||
int cfmuxl_set_uplayer(struct cflayer *layr, struct cflayer *up, u8 linkid)
|
|
||||||
{
|
|
||||||
struct cfmuxl *muxl = container_obj(layr);
|
|
||||||
|
|
||||||
spin_lock_bh(&muxl->receive_lock);
|
|
||||||
list_add_rcu(&up->node, &muxl->srvl_list);
|
|
||||||
spin_unlock_bh(&muxl->receive_lock);
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
int cfmuxl_set_dnlayer(struct cflayer *layr, struct cflayer *dn, u8 phyid)
|
int cfmuxl_set_dnlayer(struct cflayer *layr, struct cflayer *dn, u8 phyid)
|
||||||
{
|
{
|
||||||
struct cfmuxl *muxl = (struct cfmuxl *) layr;
|
struct cfmuxl *muxl = (struct cfmuxl *) layr;
|
||||||
@ -93,6 +83,24 @@ static struct cflayer *get_from_id(struct list_head *list, u16 id)
|
|||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
int cfmuxl_set_uplayer(struct cflayer *layr, struct cflayer *up, u8 linkid)
|
||||||
|
{
|
||||||
|
struct cfmuxl *muxl = container_obj(layr);
|
||||||
|
struct cflayer *old;
|
||||||
|
|
||||||
|
spin_lock_bh(&muxl->receive_lock);
|
||||||
|
|
||||||
|
/* Two entries with same id is wrong, so remove old layer from mux */
|
||||||
|
old = get_from_id(&muxl->srvl_list, linkid);
|
||||||
|
if (old != NULL)
|
||||||
|
list_del_rcu(&old->node);
|
||||||
|
|
||||||
|
list_add_rcu(&up->node, &muxl->srvl_list);
|
||||||
|
spin_unlock_bh(&muxl->receive_lock);
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
struct cflayer *cfmuxl_remove_dnlayer(struct cflayer *layr, u8 phyid)
|
struct cflayer *cfmuxl_remove_dnlayer(struct cflayer *layr, u8 phyid)
|
||||||
{
|
{
|
||||||
struct cfmuxl *muxl = container_obj(layr);
|
struct cfmuxl *muxl = container_obj(layr);
|
||||||
@ -146,6 +154,11 @@ struct cflayer *cfmuxl_remove_uplayer(struct cflayer *layr, u8 id)
|
|||||||
struct cfmuxl *muxl = container_obj(layr);
|
struct cfmuxl *muxl = container_obj(layr);
|
||||||
int idx = id % UP_CACHE_SIZE;
|
int idx = id % UP_CACHE_SIZE;
|
||||||
|
|
||||||
|
if (id == 0) {
|
||||||
|
pr_warn("Trying to remove control layer\n");
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
|
||||||
spin_lock_bh(&muxl->receive_lock);
|
spin_lock_bh(&muxl->receive_lock);
|
||||||
up = get_from_id(&muxl->srvl_list, id);
|
up = get_from_id(&muxl->srvl_list, id);
|
||||||
if (up == NULL)
|
if (up == NULL)
|
||||||
@ -235,12 +248,26 @@ static void cfmuxl_ctrlcmd(struct cflayer *layr, enum caif_ctrlcmd ctrl,
|
|||||||
{
|
{
|
||||||
struct cfmuxl *muxl = container_obj(layr);
|
struct cfmuxl *muxl = container_obj(layr);
|
||||||
struct cflayer *layer;
|
struct cflayer *layer;
|
||||||
|
int idx;
|
||||||
|
|
||||||
rcu_read_lock();
|
rcu_read_lock();
|
||||||
list_for_each_entry_rcu(layer, &muxl->srvl_list, node) {
|
list_for_each_entry_rcu(layer, &muxl->srvl_list, node) {
|
||||||
if (cfsrvl_phyid_match(layer, phyid) && layer->ctrlcmd)
|
|
||||||
|
if (cfsrvl_phyid_match(layer, phyid) && layer->ctrlcmd) {
|
||||||
|
|
||||||
|
if ((ctrl == _CAIF_CTRLCMD_PHYIF_FLOW_OFF_IND ||
|
||||||
|
ctrl == CAIF_CTRLCMD_REMOTE_SHUTDOWN_IND) &&
|
||||||
|
layer->id != 0) {
|
||||||
|
|
||||||
|
idx = layer->id % UP_CACHE_SIZE;
|
||||||
|
spin_lock_bh(&muxl->receive_lock);
|
||||||
|
rcu_assign_pointer(muxl->up_cache[idx], NULL);
|
||||||
|
list_del_rcu(&layer->node);
|
||||||
|
spin_unlock_bh(&muxl->receive_lock);
|
||||||
|
}
|
||||||
/* NOTE: ctrlcmd is not allowed to block */
|
/* NOTE: ctrlcmd is not allowed to block */
|
||||||
layer->ctrlcmd(layer, ctrl, phyid);
|
layer->ctrlcmd(layer, ctrl, phyid);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
rcu_read_unlock();
|
rcu_read_unlock();
|
||||||
}
|
}
|
||||||
|
@ -4294,10 +4294,8 @@ int netdev_set_master(struct net_device *slave, struct net_device *master)
|
|||||||
|
|
||||||
slave->master = master;
|
slave->master = master;
|
||||||
|
|
||||||
if (old) {
|
if (old)
|
||||||
synchronize_net();
|
|
||||||
dev_put(old);
|
dev_put(old);
|
||||||
}
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(netdev_set_master);
|
EXPORT_SYMBOL(netdev_set_master);
|
||||||
|
@ -3544,13 +3544,12 @@ static int pktgen_add_device(struct pktgen_thread *t, const char *ifname)
|
|||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
strcpy(pkt_dev->odevname, ifname);
|
strcpy(pkt_dev->odevname, ifname);
|
||||||
pkt_dev->flows = vmalloc_node(MAX_CFLOWS * sizeof(struct flow_state),
|
pkt_dev->flows = vzalloc_node(MAX_CFLOWS * sizeof(struct flow_state),
|
||||||
node);
|
node);
|
||||||
if (pkt_dev->flows == NULL) {
|
if (pkt_dev->flows == NULL) {
|
||||||
kfree(pkt_dev);
|
kfree(pkt_dev);
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
}
|
}
|
||||||
memset(pkt_dev->flows, 0, MAX_CFLOWS * sizeof(struct flow_state));
|
|
||||||
|
|
||||||
pkt_dev->removal_mark = 0;
|
pkt_dev->removal_mark = 0;
|
||||||
pkt_dev->min_pkt_size = ETH_ZLEN;
|
pkt_dev->min_pkt_size = ETH_ZLEN;
|
||||||
@ -3708,6 +3707,7 @@ static int __init pg_init(void)
|
|||||||
{
|
{
|
||||||
int cpu;
|
int cpu;
|
||||||
struct proc_dir_entry *pe;
|
struct proc_dir_entry *pe;
|
||||||
|
int ret = 0;
|
||||||
|
|
||||||
pr_info("%s", version);
|
pr_info("%s", version);
|
||||||
|
|
||||||
@ -3718,11 +3718,10 @@ static int __init pg_init(void)
|
|||||||
pe = proc_create(PGCTRL, 0600, pg_proc_dir, &pktgen_fops);
|
pe = proc_create(PGCTRL, 0600, pg_proc_dir, &pktgen_fops);
|
||||||
if (pe == NULL) {
|
if (pe == NULL) {
|
||||||
pr_err("ERROR: cannot create %s procfs entry\n", PGCTRL);
|
pr_err("ERROR: cannot create %s procfs entry\n", PGCTRL);
|
||||||
proc_net_remove(&init_net, PG_PROC_DIR);
|
ret = -EINVAL;
|
||||||
return -EINVAL;
|
goto remove_dir;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Register us to receive netdevice events */
|
|
||||||
register_netdevice_notifier(&pktgen_notifier_block);
|
register_netdevice_notifier(&pktgen_notifier_block);
|
||||||
|
|
||||||
for_each_online_cpu(cpu) {
|
for_each_online_cpu(cpu) {
|
||||||
@ -3736,13 +3735,18 @@ static int __init pg_init(void)
|
|||||||
|
|
||||||
if (list_empty(&pktgen_threads)) {
|
if (list_empty(&pktgen_threads)) {
|
||||||
pr_err("ERROR: Initialization failed for all threads\n");
|
pr_err("ERROR: Initialization failed for all threads\n");
|
||||||
unregister_netdevice_notifier(&pktgen_notifier_block);
|
ret = -ENODEV;
|
||||||
remove_proc_entry(PGCTRL, pg_proc_dir);
|
goto unregister;
|
||||||
proc_net_remove(&init_net, PG_PROC_DIR);
|
|
||||||
return -ENODEV;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
|
unregister:
|
||||||
|
unregister_netdevice_notifier(&pktgen_notifier_block);
|
||||||
|
remove_proc_entry(PGCTRL, pg_proc_dir);
|
||||||
|
remove_dir:
|
||||||
|
proc_net_remove(&init_net, PG_PROC_DIR);
|
||||||
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void __exit pg_cleanup(void)
|
static void __exit pg_cleanup(void)
|
||||||
|
@ -1956,6 +1956,8 @@ static int rtnetlink_event(struct notifier_block *this, unsigned long event, voi
|
|||||||
case NETDEV_GOING_DOWN:
|
case NETDEV_GOING_DOWN:
|
||||||
case NETDEV_UNREGISTER:
|
case NETDEV_UNREGISTER:
|
||||||
case NETDEV_UNREGISTER_BATCH:
|
case NETDEV_UNREGISTER_BATCH:
|
||||||
|
case NETDEV_RELEASE:
|
||||||
|
case NETDEV_JOIN:
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
rtmsg_ifinfo(RTM_NEWLINK, dev, 0);
|
rtmsg_ifinfo(RTM_NEWLINK, dev, 0);
|
||||||
|
@ -1665,6 +1665,7 @@ static int ip_rt_bug(struct sk_buff *skb)
|
|||||||
&ip_hdr(skb)->saddr, &ip_hdr(skb)->daddr,
|
&ip_hdr(skb)->saddr, &ip_hdr(skb)->daddr,
|
||||||
skb->dev ? skb->dev->name : "?");
|
skb->dev ? skb->dev->name : "?");
|
||||||
kfree_skb(skb);
|
kfree_skb(skb);
|
||||||
|
WARN_ON(1);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -815,9 +815,17 @@ static bool some_qdisc_is_busy(struct net_device *dev)
|
|||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* dev_deactivate_many - deactivate transmissions on several devices
|
||||||
|
* @head: list of devices to deactivate
|
||||||
|
*
|
||||||
|
* This function returns only when all outstanding transmissions
|
||||||
|
* have completed, unless all devices are in dismantle phase.
|
||||||
|
*/
|
||||||
void dev_deactivate_many(struct list_head *head)
|
void dev_deactivate_many(struct list_head *head)
|
||||||
{
|
{
|
||||||
struct net_device *dev;
|
struct net_device *dev;
|
||||||
|
bool sync_needed = false;
|
||||||
|
|
||||||
list_for_each_entry(dev, head, unreg_list) {
|
list_for_each_entry(dev, head, unreg_list) {
|
||||||
netdev_for_each_tx_queue(dev, dev_deactivate_queue,
|
netdev_for_each_tx_queue(dev, dev_deactivate_queue,
|
||||||
@ -827,10 +835,15 @@ void dev_deactivate_many(struct list_head *head)
|
|||||||
&noop_qdisc);
|
&noop_qdisc);
|
||||||
|
|
||||||
dev_watchdog_down(dev);
|
dev_watchdog_down(dev);
|
||||||
|
sync_needed |= !dev->dismantle;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Wait for outstanding qdisc-less dev_queue_xmit calls. */
|
/* Wait for outstanding qdisc-less dev_queue_xmit calls.
|
||||||
synchronize_rcu();
|
* This is avoided if all devices are in dismantle phase :
|
||||||
|
* Caller will call synchronize_net() for us
|
||||||
|
*/
|
||||||
|
if (sync_needed)
|
||||||
|
synchronize_net();
|
||||||
|
|
||||||
/* Wait for outstanding qdisc_run calls. */
|
/* Wait for outstanding qdisc_run calls. */
|
||||||
list_for_each_entry(dev, head, unreg_list)
|
list_for_each_entry(dev, head, unreg_list)
|
||||||
|
Loading…
Reference in New Issue
Block a user