forked from Minki/linux
[NET] CORE: Fix whitespace errors.
Signed-off-by: YOSHIFUJI Hideaki <yoshfuji@linux-ipv6.org> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
parent
9d6f229fc4
commit
4ec93edb14
@ -860,7 +860,7 @@ int dev_open(struct net_device *dev)
|
|||||||
clear_bit(__LINK_STATE_START, &dev->state);
|
clear_bit(__LINK_STATE_START, &dev->state);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* If it went open OK then:
|
* If it went open OK then:
|
||||||
*/
|
*/
|
||||||
|
|
||||||
@ -1157,7 +1157,7 @@ void netif_device_attach(struct net_device *dev)
|
|||||||
if (!test_and_set_bit(__LINK_STATE_PRESENT, &dev->state) &&
|
if (!test_and_set_bit(__LINK_STATE_PRESENT, &dev->state) &&
|
||||||
netif_running(dev)) {
|
netif_running(dev)) {
|
||||||
netif_wake_queue(dev);
|
netif_wake_queue(dev);
|
||||||
__netdev_watchdog_up(dev);
|
__netdev_watchdog_up(dev);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(netif_device_attach);
|
EXPORT_SYMBOL(netif_device_attach);
|
||||||
@ -1449,8 +1449,8 @@ int dev_queue_xmit(struct sk_buff *skb)
|
|||||||
(!(dev->features & NETIF_F_GEN_CSUM) &&
|
(!(dev->features & NETIF_F_GEN_CSUM) &&
|
||||||
(!(dev->features & NETIF_F_IP_CSUM) ||
|
(!(dev->features & NETIF_F_IP_CSUM) ||
|
||||||
skb->protocol != htons(ETH_P_IP))))
|
skb->protocol != htons(ETH_P_IP))))
|
||||||
if (skb_checksum_help(skb))
|
if (skb_checksum_help(skb))
|
||||||
goto out_kfree_skb;
|
goto out_kfree_skb;
|
||||||
|
|
||||||
gso:
|
gso:
|
||||||
spin_lock_prefetch(&dev->queue_lock);
|
spin_lock_prefetch(&dev->queue_lock);
|
||||||
@ -2061,7 +2061,7 @@ static int dev_ifconf(char __user *arg)
|
|||||||
total += done;
|
total += done;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* All done. Write the updated control block back to the caller.
|
* All done. Write the updated control block back to the caller.
|
||||||
@ -2154,7 +2154,7 @@ static struct netif_rx_stats *softnet_get_online(loff_t *pos)
|
|||||||
struct netif_rx_stats *rc = NULL;
|
struct netif_rx_stats *rc = NULL;
|
||||||
|
|
||||||
while (*pos < NR_CPUS)
|
while (*pos < NR_CPUS)
|
||||||
if (cpu_online(*pos)) {
|
if (cpu_online(*pos)) {
|
||||||
rc = &per_cpu(netdev_rx_stat, *pos);
|
rc = &per_cpu(netdev_rx_stat, *pos);
|
||||||
break;
|
break;
|
||||||
} else
|
} else
|
||||||
@ -2319,7 +2319,7 @@ void dev_set_promiscuity(struct net_device *dev, int inc)
|
|||||||
dev_mc_upload(dev);
|
dev_mc_upload(dev);
|
||||||
printk(KERN_INFO "device %s %s promiscuous mode\n",
|
printk(KERN_INFO "device %s %s promiscuous mode\n",
|
||||||
dev->name, (dev->flags & IFF_PROMISC) ? "entered" :
|
dev->name, (dev->flags & IFF_PROMISC) ? "entered" :
|
||||||
"left");
|
"left");
|
||||||
audit_log(current->audit_context, GFP_ATOMIC,
|
audit_log(current->audit_context, GFP_ATOMIC,
|
||||||
AUDIT_ANOM_PROMISCUOUS,
|
AUDIT_ANOM_PROMISCUOUS,
|
||||||
"dev=%s prom=%d old_prom=%d auid=%u",
|
"dev=%s prom=%d old_prom=%d auid=%u",
|
||||||
@ -2816,7 +2816,7 @@ int dev_ioctl(unsigned int cmd, void __user *arg)
|
|||||||
rtnl_unlock();
|
rtnl_unlock();
|
||||||
if (IW_IS_GET(cmd) &&
|
if (IW_IS_GET(cmd) &&
|
||||||
copy_to_user(arg, &ifr,
|
copy_to_user(arg, &ifr,
|
||||||
sizeof(struct ifreq)))
|
sizeof(struct ifreq)))
|
||||||
ret = -EFAULT;
|
ret = -EFAULT;
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
@ -2923,9 +2923,9 @@ int register_netdevice(struct net_device *dev)
|
|||||||
= hlist_entry(p, struct net_device, name_hlist);
|
= hlist_entry(p, struct net_device, name_hlist);
|
||||||
if (!strncmp(d->name, dev->name, IFNAMSIZ)) {
|
if (!strncmp(d->name, dev->name, IFNAMSIZ)) {
|
||||||
ret = -EEXIST;
|
ret = -EEXIST;
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Fix illegal SG+CSUM combinations. */
|
/* Fix illegal SG+CSUM combinations. */
|
||||||
if ((dev->features & NETIF_F_SG) &&
|
if ((dev->features & NETIF_F_SG) &&
|
||||||
|
@ -53,7 +53,7 @@ static void *__load_pointer(struct sk_buff *skb, int k)
|
|||||||
}
|
}
|
||||||
|
|
||||||
static inline void *load_pointer(struct sk_buff *skb, int k,
|
static inline void *load_pointer(struct sk_buff *skb, int k,
|
||||||
unsigned int size, void *buffer)
|
unsigned int size, void *buffer)
|
||||||
{
|
{
|
||||||
if (k >= 0)
|
if (k >= 0)
|
||||||
return skb_header_pointer(skb, k, size, buffer);
|
return skb_header_pointer(skb, k, size, buffer);
|
||||||
|
@ -209,7 +209,7 @@ int csum_partial_copy_fromiovecend(unsigned char *kdata, struct iovec *iov,
|
|||||||
if (partial_cnt) {
|
if (partial_cnt) {
|
||||||
copy -= partial_cnt;
|
copy -= partial_cnt;
|
||||||
if (copy_from_user(kdata + copy, base + copy,
|
if (copy_from_user(kdata + copy, base + copy,
|
||||||
partial_cnt))
|
partial_cnt))
|
||||||
goto out_fault;
|
goto out_fault;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -224,7 +224,7 @@ int csum_partial_copy_fromiovecend(unsigned char *kdata, struct iovec *iov,
|
|||||||
kdata += copy + partial_cnt;
|
kdata += copy + partial_cnt;
|
||||||
iov++;
|
iov++;
|
||||||
}
|
}
|
||||||
*csump = csum;
|
*csump = csum;
|
||||||
out:
|
out:
|
||||||
return err;
|
return err;
|
||||||
|
|
||||||
|
@ -687,9 +687,9 @@ next_elt:
|
|||||||
np = &n->next;
|
np = &n->next;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Cycle through all hash buckets every base_reachable_time/2 ticks.
|
/* Cycle through all hash buckets every base_reachable_time/2 ticks.
|
||||||
* ARP entry timeouts range from 1/2 base_reachable_time to 3/2
|
* ARP entry timeouts range from 1/2 base_reachable_time to 3/2
|
||||||
* base_reachable_time.
|
* base_reachable_time.
|
||||||
*/
|
*/
|
||||||
expire = tbl->parms.base_reachable_time >> 1;
|
expire = tbl->parms.base_reachable_time >> 1;
|
||||||
expire /= (tbl->hash_mask + 1);
|
expire /= (tbl->hash_mask + 1);
|
||||||
@ -1129,7 +1129,7 @@ int neigh_compat_output(struct sk_buff *skb)
|
|||||||
|
|
||||||
if (dev->hard_header &&
|
if (dev->hard_header &&
|
||||||
dev->hard_header(skb, dev, ntohs(skb->protocol), NULL, NULL,
|
dev->hard_header(skb, dev, ntohs(skb->protocol), NULL, NULL,
|
||||||
skb->len) < 0 &&
|
skb->len) < 0 &&
|
||||||
dev->rebuild_header(skb))
|
dev->rebuild_header(skb))
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
@ -2663,7 +2663,7 @@ int neigh_sysctl_register(struct net_device *dev, struct neigh_parms *p,
|
|||||||
t->neigh_vars[14].procname = NULL;
|
t->neigh_vars[14].procname = NULL;
|
||||||
t->neigh_vars[15].procname = NULL;
|
t->neigh_vars[15].procname = NULL;
|
||||||
} else {
|
} else {
|
||||||
dev_name_source = t->neigh_dev[0].procname;
|
dev_name_source = t->neigh_dev[0].procname;
|
||||||
t->neigh_vars[12].data = (int *)(p + 1);
|
t->neigh_vars[12].data = (int *)(p + 1);
|
||||||
t->neigh_vars[13].data = (int *)(p + 1) + 1;
|
t->neigh_vars[13].data = (int *)(p + 1) + 1;
|
||||||
t->neigh_vars[14].data = (int *)(p + 1) + 2;
|
t->neigh_vars[14].data = (int *)(p + 1) + 2;
|
||||||
@ -2698,7 +2698,7 @@ int neigh_sysctl_register(struct net_device *dev, struct neigh_parms *p,
|
|||||||
goto free;
|
goto free;
|
||||||
}
|
}
|
||||||
|
|
||||||
t->neigh_dev[0].procname = dev_name;
|
t->neigh_dev[0].procname = dev_name;
|
||||||
|
|
||||||
t->neigh_neigh_dir[0].ctl_name = pdev_id;
|
t->neigh_neigh_dir[0].ctl_name = pdev_id;
|
||||||
|
|
||||||
|
@ -237,13 +237,13 @@ static void netpoll_send_skb(struct netpoll *np, struct sk_buff *skb)
|
|||||||
{
|
{
|
||||||
int status = NETDEV_TX_BUSY;
|
int status = NETDEV_TX_BUSY;
|
||||||
unsigned long tries;
|
unsigned long tries;
|
||||||
struct net_device *dev = np->dev;
|
struct net_device *dev = np->dev;
|
||||||
struct netpoll_info *npinfo = np->dev->npinfo;
|
struct netpoll_info *npinfo = np->dev->npinfo;
|
||||||
|
|
||||||
if (!npinfo || !netif_running(dev) || !netif_device_present(dev)) {
|
if (!npinfo || !netif_running(dev) || !netif_device_present(dev)) {
|
||||||
__kfree_skb(skb);
|
__kfree_skb(skb);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* don't get messages out of order, and no recursion */
|
/* don't get messages out of order, and no recursion */
|
||||||
if (skb_queue_len(&npinfo->txq) == 0 &&
|
if (skb_queue_len(&npinfo->txq) == 0 &&
|
||||||
@ -676,7 +676,7 @@ int netpoll_setup(struct netpoll *np)
|
|||||||
}
|
}
|
||||||
|
|
||||||
atleast = jiffies + HZ/10;
|
atleast = jiffies + HZ/10;
|
||||||
atmost = jiffies + 4*HZ;
|
atmost = jiffies + 4*HZ;
|
||||||
while (!netif_carrier_ok(ndev)) {
|
while (!netif_carrier_ok(ndev)) {
|
||||||
if (time_after(jiffies, atmost)) {
|
if (time_after(jiffies, atmost)) {
|
||||||
printk(KERN_NOTICE
|
printk(KERN_NOTICE
|
||||||
@ -772,9 +772,9 @@ void netpoll_cleanup(struct netpoll *np)
|
|||||||
np->dev->npinfo = NULL;
|
np->dev->npinfo = NULL;
|
||||||
if (atomic_dec_and_test(&npinfo->refcnt)) {
|
if (atomic_dec_and_test(&npinfo->refcnt)) {
|
||||||
skb_queue_purge(&npinfo->arp_tx);
|
skb_queue_purge(&npinfo->arp_tx);
|
||||||
skb_queue_purge(&npinfo->txq);
|
skb_queue_purge(&npinfo->txq);
|
||||||
cancel_rearming_delayed_work(&npinfo->tx_work);
|
cancel_rearming_delayed_work(&npinfo->tx_work);
|
||||||
flush_scheduled_work();
|
flush_scheduled_work();
|
||||||
|
|
||||||
kfree(npinfo);
|
kfree(npinfo);
|
||||||
}
|
}
|
||||||
|
@ -874,7 +874,7 @@ void __init rtnetlink_init(void)
|
|||||||
panic("rtnetlink_init: cannot allocate rta_buf\n");
|
panic("rtnetlink_init: cannot allocate rta_buf\n");
|
||||||
|
|
||||||
rtnl = netlink_kernel_create(NETLINK_ROUTE, RTNLGRP_MAX, rtnetlink_rcv,
|
rtnl = netlink_kernel_create(NETLINK_ROUTE, RTNLGRP_MAX, rtnetlink_rcv,
|
||||||
THIS_MODULE);
|
THIS_MODULE);
|
||||||
if (rtnl == NULL)
|
if (rtnl == NULL)
|
||||||
panic("rtnetlink_init: cannot initialize rtnetlink\n");
|
panic("rtnetlink_init: cannot initialize rtnetlink\n");
|
||||||
netlink_set_nonroot(NETLINK_ROUTE, NL_NONROOT_RECV);
|
netlink_set_nonroot(NETLINK_ROUTE, NL_NONROOT_RECV);
|
||||||
|
@ -88,7 +88,7 @@ static struct kmem_cache *skbuff_fclone_cache __read_mostly;
|
|||||||
void skb_over_panic(struct sk_buff *skb, int sz, void *here)
|
void skb_over_panic(struct sk_buff *skb, int sz, void *here)
|
||||||
{
|
{
|
||||||
printk(KERN_EMERG "skb_over_panic: text:%p len:%d put:%d head:%p "
|
printk(KERN_EMERG "skb_over_panic: text:%p len:%d put:%d head:%p "
|
||||||
"data:%p tail:%p end:%p dev:%s\n",
|
"data:%p tail:%p end:%p dev:%s\n",
|
||||||
here, skb->len, sz, skb->head, skb->data, skb->tail, skb->end,
|
here, skb->len, sz, skb->head, skb->data, skb->tail, skb->end,
|
||||||
skb->dev ? skb->dev->name : "<NULL>");
|
skb->dev ? skb->dev->name : "<NULL>");
|
||||||
BUG();
|
BUG();
|
||||||
@ -106,7 +106,7 @@ void skb_over_panic(struct sk_buff *skb, int sz, void *here)
|
|||||||
void skb_under_panic(struct sk_buff *skb, int sz, void *here)
|
void skb_under_panic(struct sk_buff *skb, int sz, void *here)
|
||||||
{
|
{
|
||||||
printk(KERN_EMERG "skb_under_panic: text:%p len:%d put:%d head:%p "
|
printk(KERN_EMERG "skb_under_panic: text:%p len:%d put:%d head:%p "
|
||||||
"data:%p tail:%p end:%p dev:%s\n",
|
"data:%p tail:%p end:%p dev:%s\n",
|
||||||
here, skb->len, sz, skb->head, skb->data, skb->tail, skb->end,
|
here, skb->len, sz, skb->head, skb->data, skb->tail, skb->end,
|
||||||
skb->dev ? skb->dev->name : "<NULL>");
|
skb->dev ? skb->dev->name : "<NULL>");
|
||||||
BUG();
|
BUG();
|
||||||
@ -271,7 +271,7 @@ struct sk_buff *__netdev_alloc_skb(struct net_device *dev,
|
|||||||
int node = dev->dev.parent ? dev_to_node(dev->dev.parent) : -1;
|
int node = dev->dev.parent ? dev_to_node(dev->dev.parent) : -1;
|
||||||
struct sk_buff *skb;
|
struct sk_buff *skb;
|
||||||
|
|
||||||
skb = __alloc_skb(length + NET_SKB_PAD, gfp_mask, 0, node);
|
skb = __alloc_skb(length + NET_SKB_PAD, gfp_mask, 0, node);
|
||||||
if (likely(skb)) {
|
if (likely(skb)) {
|
||||||
skb_reserve(skb, NET_SKB_PAD);
|
skb_reserve(skb, NET_SKB_PAD);
|
||||||
skb->dev = dev;
|
skb->dev = dev;
|
||||||
|
@ -361,18 +361,18 @@ int sock_setsockopt(struct socket *sock, int level, int optname,
|
|||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
if(optlen<sizeof(int))
|
if(optlen<sizeof(int))
|
||||||
return(-EINVAL);
|
return(-EINVAL);
|
||||||
|
|
||||||
if (get_user(val, (int __user *)optval))
|
if (get_user(val, (int __user *)optval))
|
||||||
return -EFAULT;
|
return -EFAULT;
|
||||||
|
|
||||||
valbool = val?1:0;
|
valbool = val?1:0;
|
||||||
|
|
||||||
lock_sock(sk);
|
lock_sock(sk);
|
||||||
|
|
||||||
switch(optname)
|
switch(optname)
|
||||||
{
|
{
|
||||||
case SO_DEBUG:
|
case SO_DEBUG:
|
||||||
if(val && !capable(CAP_NET_ADMIN))
|
if(val && !capable(CAP_NET_ADMIN))
|
||||||
{
|
{
|
||||||
@ -389,7 +389,7 @@ int sock_setsockopt(struct socket *sock, int level, int optname,
|
|||||||
case SO_TYPE:
|
case SO_TYPE:
|
||||||
case SO_ERROR:
|
case SO_ERROR:
|
||||||
ret = -ENOPROTOOPT;
|
ret = -ENOPROTOOPT;
|
||||||
break;
|
break;
|
||||||
case SO_DONTROUTE:
|
case SO_DONTROUTE:
|
||||||
if (valbool)
|
if (valbool)
|
||||||
sock_set_flag(sk, SOCK_LOCALROUTE);
|
sock_set_flag(sk, SOCK_LOCALROUTE);
|
||||||
@ -474,11 +474,11 @@ set_rcvbuf:
|
|||||||
sock_valbool_flag(sk, SOCK_KEEPOPEN, valbool);
|
sock_valbool_flag(sk, SOCK_KEEPOPEN, valbool);
|
||||||
break;
|
break;
|
||||||
|
|
||||||
case SO_OOBINLINE:
|
case SO_OOBINLINE:
|
||||||
sock_valbool_flag(sk, SOCK_URGINLINE, valbool);
|
sock_valbool_flag(sk, SOCK_URGINLINE, valbool);
|
||||||
break;
|
break;
|
||||||
|
|
||||||
case SO_NO_CHECK:
|
case SO_NO_CHECK:
|
||||||
sk->sk_no_check = valbool;
|
sk->sk_no_check = valbool;
|
||||||
break;
|
break;
|
||||||
|
|
||||||
@ -608,7 +608,7 @@ set_rcvbuf:
|
|||||||
case SO_DETACH_FILTER:
|
case SO_DETACH_FILTER:
|
||||||
rcu_read_lock_bh();
|
rcu_read_lock_bh();
|
||||||
filter = rcu_dereference(sk->sk_filter);
|
filter = rcu_dereference(sk->sk_filter);
|
||||||
if (filter) {
|
if (filter) {
|
||||||
rcu_assign_pointer(sk->sk_filter, NULL);
|
rcu_assign_pointer(sk->sk_filter, NULL);
|
||||||
sk_filter_release(sk, filter);
|
sk_filter_release(sk, filter);
|
||||||
rcu_read_unlock_bh();
|
rcu_read_unlock_bh();
|
||||||
@ -628,9 +628,9 @@ set_rcvbuf:
|
|||||||
/* We implement the SO_SNDLOWAT etc to
|
/* We implement the SO_SNDLOWAT etc to
|
||||||
not be settable (1003.1g 5.3) */
|
not be settable (1003.1g 5.3) */
|
||||||
default:
|
default:
|
||||||
ret = -ENOPROTOOPT;
|
ret = -ENOPROTOOPT;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
release_sock(sk);
|
release_sock(sk);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
@ -643,21 +643,21 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
|
|||||||
|
|
||||||
union
|
union
|
||||||
{
|
{
|
||||||
int val;
|
int val;
|
||||||
struct linger ling;
|
struct linger ling;
|
||||||
struct timeval tm;
|
struct timeval tm;
|
||||||
} v;
|
} v;
|
||||||
|
|
||||||
unsigned int lv = sizeof(int);
|
unsigned int lv = sizeof(int);
|
||||||
int len;
|
int len;
|
||||||
|
|
||||||
if(get_user(len,optlen))
|
if(get_user(len,optlen))
|
||||||
return -EFAULT;
|
return -EFAULT;
|
||||||
if(len < 0)
|
if(len < 0)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
switch(optname)
|
switch(optname)
|
||||||
{
|
{
|
||||||
case SO_DEBUG:
|
case SO_DEBUG:
|
||||||
v.val = sock_flag(sk, SOCK_DBG);
|
v.val = sock_flag(sk, SOCK_DBG);
|
||||||
break;
|
break;
|
||||||
@ -711,7 +711,7 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
|
|||||||
case SO_LINGER:
|
case SO_LINGER:
|
||||||
lv = sizeof(v.ling);
|
lv = sizeof(v.ling);
|
||||||
v.ling.l_onoff = !!sock_flag(sk, SOCK_LINGER);
|
v.ling.l_onoff = !!sock_flag(sk, SOCK_LINGER);
|
||||||
v.ling.l_linger = sk->sk_lingertime / HZ;
|
v.ling.l_linger = sk->sk_lingertime / HZ;
|
||||||
break;
|
break;
|
||||||
|
|
||||||
case SO_BSDCOMPAT:
|
case SO_BSDCOMPAT:
|
||||||
@ -798,9 +798,9 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
|
|||||||
if (copy_to_user(optval, &v, len))
|
if (copy_to_user(optval, &v, len))
|
||||||
return -EFAULT;
|
return -EFAULT;
|
||||||
lenout:
|
lenout:
|
||||||
if (put_user(len, optlen))
|
if (put_user(len, optlen))
|
||||||
return -EFAULT;
|
return -EFAULT;
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -1074,7 +1074,7 @@ void *sock_kmalloc(struct sock *sk, int size, gfp_t priority)
|
|||||||
atomic_read(&sk->sk_omem_alloc) + size < sysctl_optmem_max) {
|
atomic_read(&sk->sk_omem_alloc) + size < sysctl_optmem_max) {
|
||||||
void *mem;
|
void *mem;
|
||||||
/* First do the add, to avoid the race if kmalloc
|
/* First do the add, to avoid the race if kmalloc
|
||||||
* might sleep.
|
* might sleep.
|
||||||
*/
|
*/
|
||||||
atomic_add(size, &sk->sk_omem_alloc);
|
atomic_add(size, &sk->sk_omem_alloc);
|
||||||
mem = kmalloc(size, priority);
|
mem = kmalloc(size, priority);
|
||||||
|
@ -58,7 +58,7 @@ int dma_skb_copy_datagram_iovec(struct dma_chan *chan,
|
|||||||
if (copy > len)
|
if (copy > len)
|
||||||
copy = len;
|
copy = len;
|
||||||
cookie = dma_memcpy_to_iovec(chan, to, pinned_list,
|
cookie = dma_memcpy_to_iovec(chan, to, pinned_list,
|
||||||
skb->data + offset, copy);
|
skb->data + offset, copy);
|
||||||
if (cookie < 0)
|
if (cookie < 0)
|
||||||
goto fault;
|
goto fault;
|
||||||
len -= copy;
|
len -= copy;
|
||||||
@ -108,8 +108,8 @@ int dma_skb_copy_datagram_iovec(struct dma_chan *chan,
|
|||||||
if (copy > len)
|
if (copy > len)
|
||||||
copy = len;
|
copy = len;
|
||||||
cookie = dma_skb_copy_datagram_iovec(chan, list,
|
cookie = dma_skb_copy_datagram_iovec(chan, list,
|
||||||
offset - start, to, copy,
|
offset - start, to, copy,
|
||||||
pinned_list);
|
pinned_list);
|
||||||
if (cookie < 0)
|
if (cookie < 0)
|
||||||
goto fault;
|
goto fault;
|
||||||
len -= copy;
|
len -= copy;
|
||||||
@ -128,5 +128,5 @@ end:
|
|||||||
}
|
}
|
||||||
|
|
||||||
fault:
|
fault:
|
||||||
return -EFAULT;
|
return -EFAULT;
|
||||||
}
|
}
|
||||||
|
@ -2015,7 +2015,7 @@ void wireless_send_event(struct net_device * dev,
|
|||||||
* The best the driver could do is to log an error message.
|
* The best the driver could do is to log an error message.
|
||||||
* We will do it ourselves instead...
|
* We will do it ourselves instead...
|
||||||
*/
|
*/
|
||||||
printk(KERN_ERR "%s (WE) : Invalid/Unknown Wireless Event (0x%04X)\n",
|
printk(KERN_ERR "%s (WE) : Invalid/Unknown Wireless Event (0x%04X)\n",
|
||||||
dev->name, cmd);
|
dev->name, cmd);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
@ -2029,11 +2029,11 @@ void wireless_send_event(struct net_device * dev,
|
|||||||
if(descr->header_type == IW_HEADER_TYPE_POINT) {
|
if(descr->header_type == IW_HEADER_TYPE_POINT) {
|
||||||
/* Check if number of token fits within bounds */
|
/* Check if number of token fits within bounds */
|
||||||
if(wrqu->data.length > descr->max_tokens) {
|
if(wrqu->data.length > descr->max_tokens) {
|
||||||
printk(KERN_ERR "%s (WE) : Wireless Event too big (%d)\n", dev->name, wrqu->data.length);
|
printk(KERN_ERR "%s (WE) : Wireless Event too big (%d)\n", dev->name, wrqu->data.length);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
if(wrqu->data.length < descr->min_tokens) {
|
if(wrqu->data.length < descr->min_tokens) {
|
||||||
printk(KERN_ERR "%s (WE) : Wireless Event too small (%d)\n", dev->name, wrqu->data.length);
|
printk(KERN_ERR "%s (WE) : Wireless Event too small (%d)\n", dev->name, wrqu->data.length);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
/* Calculate extra_len - extra is NULL for restricted events */
|
/* Calculate extra_len - extra is NULL for restricted events */
|
||||||
|
Loading…
Reference in New Issue
Block a user