mirror of
https://github.com/torvalds/linux.git
synced 2024-11-11 14:42:24 +00:00
Merge branch 'ipv4-null-cmp'
Ian Morris says: ==================== ipv4: coding style - comparisons with NULL Per the suggestion of Joe Perches, attached is a patch which aligns the coding style in ipv4 for comparisons with NULL. The code uses multiple different styles when comparing with NULL (I.e. x == NULL and !x as well as x != NULL and x). Generally the latter form is preferred in netdev and so this changes aligns the code to this style. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
commit
0bd6682722
@ -217,7 +217,7 @@ int inet_listen(struct socket *sock, int backlog)
|
||||
* shutdown() (rather than close()).
|
||||
*/
|
||||
if ((sysctl_tcp_fastopen & TFO_SERVER_ENABLE) != 0 &&
|
||||
inet_csk(sk)->icsk_accept_queue.fastopenq == NULL) {
|
||||
!inet_csk(sk)->icsk_accept_queue.fastopenq) {
|
||||
if ((sysctl_tcp_fastopen & TFO_SERVER_WO_SOCKOPT1) != 0)
|
||||
err = fastopen_init_queue(sk, backlog);
|
||||
else if ((sysctl_tcp_fastopen &
|
||||
@ -314,11 +314,11 @@ lookup_protocol:
|
||||
answer_flags = answer->flags;
|
||||
rcu_read_unlock();
|
||||
|
||||
WARN_ON(answer_prot->slab == NULL);
|
||||
WARN_ON(!answer_prot->slab);
|
||||
|
||||
err = -ENOBUFS;
|
||||
sk = sk_alloc(net, PF_INET, GFP_KERNEL, answer_prot);
|
||||
if (sk == NULL)
|
||||
if (!sk)
|
||||
goto out;
|
||||
|
||||
err = 0;
|
||||
@ -1269,7 +1269,7 @@ static struct sk_buff *inet_gso_segment(struct sk_buff *skb,
|
||||
if (udpfrag) {
|
||||
iph->id = htons(id);
|
||||
iph->frag_off = htons(offset >> 3);
|
||||
if (skb->next != NULL)
|
||||
if (skb->next)
|
||||
iph->frag_off |= htons(IP_MF);
|
||||
offset += skb->len - nhoff - ihl;
|
||||
} else {
|
||||
|
@ -228,7 +228,7 @@ static int arp_constructor(struct neighbour *neigh)
|
||||
|
||||
rcu_read_lock();
|
||||
in_dev = __in_dev_get_rcu(dev);
|
||||
if (in_dev == NULL) {
|
||||
if (!in_dev) {
|
||||
rcu_read_unlock();
|
||||
return -EINVAL;
|
||||
}
|
||||
@ -475,7 +475,7 @@ static inline int arp_fwd_pvlan(struct in_device *in_dev,
|
||||
*/
|
||||
|
||||
/*
|
||||
* Create an arp packet. If (dest_hw == NULL), we create a broadcast
|
||||
* Create an arp packet. If dest_hw is not set, we create a broadcast
|
||||
* message.
|
||||
*/
|
||||
struct sk_buff *arp_create(int type, int ptype, __be32 dest_ip,
|
||||
@ -495,7 +495,7 @@ struct sk_buff *arp_create(int type, int ptype, __be32 dest_ip,
|
||||
*/
|
||||
|
||||
skb = alloc_skb(arp_hdr_len(dev) + hlen + tlen, GFP_ATOMIC);
|
||||
if (skb == NULL)
|
||||
if (!skb)
|
||||
return NULL;
|
||||
|
||||
skb_reserve(skb, hlen);
|
||||
@ -503,9 +503,9 @@ struct sk_buff *arp_create(int type, int ptype, __be32 dest_ip,
|
||||
arp = (struct arphdr *) skb_put(skb, arp_hdr_len(dev));
|
||||
skb->dev = dev;
|
||||
skb->protocol = htons(ETH_P_ARP);
|
||||
if (src_hw == NULL)
|
||||
if (!src_hw)
|
||||
src_hw = dev->dev_addr;
|
||||
if (dest_hw == NULL)
|
||||
if (!dest_hw)
|
||||
dest_hw = dev->broadcast;
|
||||
|
||||
/*
|
||||
@ -569,7 +569,7 @@ struct sk_buff *arp_create(int type, int ptype, __be32 dest_ip,
|
||||
break;
|
||||
#endif
|
||||
default:
|
||||
if (target_hw != NULL)
|
||||
if (target_hw)
|
||||
memcpy(arp_ptr, target_hw, dev->addr_len);
|
||||
else
|
||||
memset(arp_ptr, 0, dev->addr_len);
|
||||
@ -614,7 +614,7 @@ void arp_send(int type, int ptype, __be32 dest_ip,
|
||||
|
||||
skb = arp_create(type, ptype, dest_ip, dev, src_ip,
|
||||
dest_hw, src_hw, target_hw);
|
||||
if (skb == NULL)
|
||||
if (!skb)
|
||||
return;
|
||||
|
||||
arp_xmit(skb);
|
||||
@ -644,7 +644,7 @@ static int arp_process(struct sk_buff *skb)
|
||||
* is ARP'able.
|
||||
*/
|
||||
|
||||
if (in_dev == NULL)
|
||||
if (!in_dev)
|
||||
goto out;
|
||||
|
||||
arp = arp_hdr(skb);
|
||||
@ -808,7 +808,7 @@ static int arp_process(struct sk_buff *skb)
|
||||
is_garp = arp->ar_op == htons(ARPOP_REQUEST) && tip == sip &&
|
||||
inet_addr_type(net, sip) == RTN_UNICAST;
|
||||
|
||||
if (n == NULL &&
|
||||
if (!n &&
|
||||
((arp->ar_op == htons(ARPOP_REPLY) &&
|
||||
inet_addr_type(net, sip) == RTN_UNICAST) || is_garp))
|
||||
n = __neigh_lookup(&arp_tbl, &sip, dev, 1);
|
||||
@ -900,7 +900,7 @@ out_of_mem:
|
||||
|
||||
static int arp_req_set_proxy(struct net *net, struct net_device *dev, int on)
|
||||
{
|
||||
if (dev == NULL) {
|
||||
if (!dev) {
|
||||
IPV4_DEVCONF_ALL(net, PROXY_ARP) = on;
|
||||
return 0;
|
||||
}
|
||||
@ -926,7 +926,7 @@ static int arp_req_set_public(struct net *net, struct arpreq *r,
|
||||
return -ENODEV;
|
||||
}
|
||||
if (mask) {
|
||||
if (pneigh_lookup(&arp_tbl, net, &ip, dev, 1) == NULL)
|
||||
if (!pneigh_lookup(&arp_tbl, net, &ip, dev, 1))
|
||||
return -ENOBUFS;
|
||||
return 0;
|
||||
}
|
||||
@ -947,7 +947,7 @@ static int arp_req_set(struct net *net, struct arpreq *r,
|
||||
ip = ((struct sockaddr_in *)&r->arp_pa)->sin_addr.s_addr;
|
||||
if (r->arp_flags & ATF_PERM)
|
||||
r->arp_flags |= ATF_COM;
|
||||
if (dev == NULL) {
|
||||
if (!dev) {
|
||||
struct rtable *rt = ip_route_output(net, ip, 0, RTO_ONLINK, 0);
|
||||
|
||||
if (IS_ERR(rt))
|
||||
@ -1067,7 +1067,7 @@ static int arp_req_delete(struct net *net, struct arpreq *r,
|
||||
return arp_req_delete_public(net, r, dev);
|
||||
|
||||
ip = ((struct sockaddr_in *)&r->arp_pa)->sin_addr.s_addr;
|
||||
if (dev == NULL) {
|
||||
if (!dev) {
|
||||
struct rtable *rt = ip_route_output(net, ip, 0, RTO_ONLINK, 0);
|
||||
if (IS_ERR(rt))
|
||||
return PTR_ERR(rt);
|
||||
@ -1116,7 +1116,7 @@ int arp_ioctl(struct net *net, unsigned int cmd, void __user *arg)
|
||||
if (r.arp_dev[0]) {
|
||||
err = -ENODEV;
|
||||
dev = __dev_get_by_name(net, r.arp_dev);
|
||||
if (dev == NULL)
|
||||
if (!dev)
|
||||
goto out;
|
||||
|
||||
/* Mmmm... It is wrong... ARPHRD_NETROM==0 */
|
||||
|
@ -255,7 +255,7 @@ static int __init cipso_v4_cache_init(void)
|
||||
cipso_v4_cache = kcalloc(CIPSO_V4_CACHE_BUCKETS,
|
||||
sizeof(struct cipso_v4_map_cache_bkt),
|
||||
GFP_KERNEL);
|
||||
if (cipso_v4_cache == NULL)
|
||||
if (!cipso_v4_cache)
|
||||
return -ENOMEM;
|
||||
|
||||
for (iter = 0; iter < CIPSO_V4_CACHE_BUCKETS; iter++) {
|
||||
@ -339,7 +339,7 @@ static int cipso_v4_cache_check(const unsigned char *key,
|
||||
secattr->cache = entry->lsm_data;
|
||||
secattr->flags |= NETLBL_SECATTR_CACHE;
|
||||
secattr->type = NETLBL_NLTYPE_CIPSOV4;
|
||||
if (prev_entry == NULL) {
|
||||
if (!prev_entry) {
|
||||
spin_unlock_bh(&cipso_v4_cache[bkt].lock);
|
||||
return 0;
|
||||
}
|
||||
@ -393,10 +393,10 @@ int cipso_v4_cache_add(const unsigned char *cipso_ptr,
|
||||
cipso_ptr_len = cipso_ptr[1];
|
||||
|
||||
entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
|
||||
if (entry == NULL)
|
||||
if (!entry)
|
||||
return -ENOMEM;
|
||||
entry->key = kmemdup(cipso_ptr, cipso_ptr_len, GFP_ATOMIC);
|
||||
if (entry->key == NULL) {
|
||||
if (!entry->key) {
|
||||
ret_val = -ENOMEM;
|
||||
goto cache_add_failure;
|
||||
}
|
||||
@ -502,7 +502,7 @@ int cipso_v4_doi_add(struct cipso_v4_doi *doi_def,
|
||||
atomic_set(&doi_def->refcount, 1);
|
||||
|
||||
spin_lock(&cipso_v4_doi_list_lock);
|
||||
if (cipso_v4_doi_search(doi_def->doi) != NULL) {
|
||||
if (cipso_v4_doi_search(doi_def->doi)) {
|
||||
spin_unlock(&cipso_v4_doi_list_lock);
|
||||
ret_val = -EEXIST;
|
||||
goto doi_add_return;
|
||||
@ -513,7 +513,7 @@ int cipso_v4_doi_add(struct cipso_v4_doi *doi_def,
|
||||
|
||||
doi_add_return:
|
||||
audit_buf = netlbl_audit_start(AUDIT_MAC_CIPSOV4_ADD, audit_info);
|
||||
if (audit_buf != NULL) {
|
||||
if (audit_buf) {
|
||||
const char *type_str;
|
||||
switch (doi_type) {
|
||||
case CIPSO_V4_MAP_TRANS:
|
||||
@ -547,7 +547,7 @@ doi_add_return:
|
||||
*/
|
||||
void cipso_v4_doi_free(struct cipso_v4_doi *doi_def)
|
||||
{
|
||||
if (doi_def == NULL)
|
||||
if (!doi_def)
|
||||
return;
|
||||
|
||||
switch (doi_def->type) {
|
||||
@ -598,7 +598,7 @@ int cipso_v4_doi_remove(u32 doi, struct netlbl_audit *audit_info)
|
||||
|
||||
spin_lock(&cipso_v4_doi_list_lock);
|
||||
doi_def = cipso_v4_doi_search(doi);
|
||||
if (doi_def == NULL) {
|
||||
if (!doi_def) {
|
||||
spin_unlock(&cipso_v4_doi_list_lock);
|
||||
ret_val = -ENOENT;
|
||||
goto doi_remove_return;
|
||||
@ -617,7 +617,7 @@ int cipso_v4_doi_remove(u32 doi, struct netlbl_audit *audit_info)
|
||||
|
||||
doi_remove_return:
|
||||
audit_buf = netlbl_audit_start(AUDIT_MAC_CIPSOV4_DEL, audit_info);
|
||||
if (audit_buf != NULL) {
|
||||
if (audit_buf) {
|
||||
audit_log_format(audit_buf,
|
||||
" cipso_doi=%u res=%u",
|
||||
doi, ret_val == 0 ? 1 : 0);
|
||||
@ -644,7 +644,7 @@ struct cipso_v4_doi *cipso_v4_doi_getdef(u32 doi)
|
||||
|
||||
rcu_read_lock();
|
||||
doi_def = cipso_v4_doi_search(doi);
|
||||
if (doi_def == NULL)
|
||||
if (!doi_def)
|
||||
goto doi_getdef_return;
|
||||
if (!atomic_inc_not_zero(&doi_def->refcount))
|
||||
doi_def = NULL;
|
||||
@ -664,7 +664,7 @@ doi_getdef_return:
|
||||
*/
|
||||
void cipso_v4_doi_putdef(struct cipso_v4_doi *doi_def)
|
||||
{
|
||||
if (doi_def == NULL)
|
||||
if (!doi_def)
|
||||
return;
|
||||
|
||||
if (!atomic_dec_and_test(&doi_def->refcount))
|
||||
@ -1642,7 +1642,7 @@ int cipso_v4_validate(const struct sk_buff *skb, unsigned char **option)
|
||||
|
||||
rcu_read_lock();
|
||||
doi_def = cipso_v4_doi_search(get_unaligned_be32(&opt[2]));
|
||||
if (doi_def == NULL) {
|
||||
if (!doi_def) {
|
||||
err_offset = 2;
|
||||
goto validate_return_locked;
|
||||
}
|
||||
@ -1736,7 +1736,7 @@ int cipso_v4_validate(const struct sk_buff *skb, unsigned char **option)
|
||||
* not the loopback device drop the packet. Further,
|
||||
* there is no legitimate reason for setting this from
|
||||
* userspace so reject it if skb is NULL. */
|
||||
if (skb == NULL || !(skb->dev->flags & IFF_LOOPBACK)) {
|
||||
if (!skb || !(skb->dev->flags & IFF_LOOPBACK)) {
|
||||
err_offset = opt_iter;
|
||||
goto validate_return_locked;
|
||||
}
|
||||
@ -1897,7 +1897,7 @@ int cipso_v4_sock_setattr(struct sock *sk,
|
||||
* defined yet but it is not a problem as the only users of these
|
||||
* "lite" PF_INET sockets are functions which do an accept() call
|
||||
* afterwards so we will label the socket as part of the accept(). */
|
||||
if (sk == NULL)
|
||||
if (!sk)
|
||||
return 0;
|
||||
|
||||
/* We allocate the maximum CIPSO option size here so we are probably
|
||||
@ -1905,7 +1905,7 @@ int cipso_v4_sock_setattr(struct sock *sk,
|
||||
* on and after all we are only talking about 40 bytes. */
|
||||
buf_len = CIPSO_V4_OPT_LEN_MAX;
|
||||
buf = kmalloc(buf_len, GFP_ATOMIC);
|
||||
if (buf == NULL) {
|
||||
if (!buf) {
|
||||
ret_val = -ENOMEM;
|
||||
goto socket_setattr_failure;
|
||||
}
|
||||
@ -1921,7 +1921,7 @@ int cipso_v4_sock_setattr(struct sock *sk,
|
||||
* set the IPOPT_CIPSO option. */
|
||||
opt_len = (buf_len + 3) & ~3;
|
||||
opt = kzalloc(sizeof(*opt) + opt_len, GFP_ATOMIC);
|
||||
if (opt == NULL) {
|
||||
if (!opt) {
|
||||
ret_val = -ENOMEM;
|
||||
goto socket_setattr_failure;
|
||||
}
|
||||
@ -1981,7 +1981,7 @@ int cipso_v4_req_setattr(struct request_sock *req,
|
||||
* on and after all we are only talking about 40 bytes. */
|
||||
buf_len = CIPSO_V4_OPT_LEN_MAX;
|
||||
buf = kmalloc(buf_len, GFP_ATOMIC);
|
||||
if (buf == NULL) {
|
||||
if (!buf) {
|
||||
ret_val = -ENOMEM;
|
||||
goto req_setattr_failure;
|
||||
}
|
||||
@ -1997,7 +1997,7 @@ int cipso_v4_req_setattr(struct request_sock *req,
|
||||
* set the IPOPT_CIPSO option. */
|
||||
opt_len = (buf_len + 3) & ~3;
|
||||
opt = kzalloc(sizeof(*opt) + opt_len, GFP_ATOMIC);
|
||||
if (opt == NULL) {
|
||||
if (!opt) {
|
||||
ret_val = -ENOMEM;
|
||||
goto req_setattr_failure;
|
||||
}
|
||||
@ -2102,7 +2102,7 @@ void cipso_v4_sock_delattr(struct sock *sk)
|
||||
|
||||
sk_inet = inet_sk(sk);
|
||||
opt = rcu_dereference_protected(sk_inet->inet_opt, 1);
|
||||
if (opt == NULL || opt->opt.cipso == 0)
|
||||
if (!opt || opt->opt.cipso == 0)
|
||||
return;
|
||||
|
||||
hdr_delta = cipso_v4_delopt(&sk_inet->inet_opt);
|
||||
@ -2128,7 +2128,7 @@ void cipso_v4_req_delattr(struct request_sock *req)
|
||||
|
||||
req_inet = inet_rsk(req);
|
||||
opt = req_inet->opt;
|
||||
if (opt == NULL || opt->opt.cipso == 0)
|
||||
if (!opt || opt->opt.cipso == 0)
|
||||
return;
|
||||
|
||||
cipso_v4_delopt(&req_inet->opt);
|
||||
@ -2157,7 +2157,7 @@ int cipso_v4_getattr(const unsigned char *cipso,
|
||||
doi = get_unaligned_be32(&cipso[2]);
|
||||
rcu_read_lock();
|
||||
doi_def = cipso_v4_doi_search(doi);
|
||||
if (doi_def == NULL)
|
||||
if (!doi_def)
|
||||
goto getattr_return;
|
||||
/* XXX - This code assumes only one tag per CIPSO option which isn't
|
||||
* really a good assumption to make but since we only support the MAC
|
||||
|
@ -585,7 +585,7 @@ static int inet_rtm_deladdr(struct sk_buff *skb, struct nlmsghdr *nlh)
|
||||
|
||||
ifm = nlmsg_data(nlh);
|
||||
in_dev = inetdev_by_index(net, ifm->ifa_index);
|
||||
if (in_dev == NULL) {
|
||||
if (!in_dev) {
|
||||
err = -ENODEV;
|
||||
goto errout;
|
||||
}
|
||||
@ -755,21 +755,21 @@ static struct in_ifaddr *rtm_to_ifaddr(struct net *net, struct nlmsghdr *nlh,
|
||||
|
||||
ifm = nlmsg_data(nlh);
|
||||
err = -EINVAL;
|
||||
if (ifm->ifa_prefixlen > 32 || tb[IFA_LOCAL] == NULL)
|
||||
if (ifm->ifa_prefixlen > 32 || !tb[IFA_LOCAL])
|
||||
goto errout;
|
||||
|
||||
dev = __dev_get_by_index(net, ifm->ifa_index);
|
||||
err = -ENODEV;
|
||||
if (dev == NULL)
|
||||
if (!dev)
|
||||
goto errout;
|
||||
|
||||
in_dev = __in_dev_get_rtnl(dev);
|
||||
err = -ENOBUFS;
|
||||
if (in_dev == NULL)
|
||||
if (!in_dev)
|
||||
goto errout;
|
||||
|
||||
ifa = inet_alloc_ifa();
|
||||
if (ifa == NULL)
|
||||
if (!ifa)
|
||||
/*
|
||||
* A potential indev allocation can be left alive, it stays
|
||||
* assigned to its device and is destroy with it.
|
||||
@ -780,7 +780,7 @@ static struct in_ifaddr *rtm_to_ifaddr(struct net *net, struct nlmsghdr *nlh,
|
||||
neigh_parms_data_state_setall(in_dev->arp_parms);
|
||||
in_dev_hold(in_dev);
|
||||
|
||||
if (tb[IFA_ADDRESS] == NULL)
|
||||
if (!tb[IFA_ADDRESS])
|
||||
tb[IFA_ADDRESS] = tb[IFA_LOCAL];
|
||||
|
||||
INIT_HLIST_NODE(&ifa->hash);
|
||||
@ -1290,7 +1290,7 @@ __be32 inet_confirm_addr(struct net *net, struct in_device *in_dev,
|
||||
__be32 addr = 0;
|
||||
struct net_device *dev;
|
||||
|
||||
if (in_dev != NULL)
|
||||
if (in_dev)
|
||||
return confirm_addr_indev(in_dev, dst, local, scope);
|
||||
|
||||
rcu_read_lock();
|
||||
@ -1340,7 +1340,7 @@ static void inetdev_changename(struct net_device *dev, struct in_device *in_dev)
|
||||
if (named++ == 0)
|
||||
goto skip;
|
||||
dot = strchr(old, ':');
|
||||
if (dot == NULL) {
|
||||
if (!dot) {
|
||||
sprintf(old, ":%d", named);
|
||||
dot = old;
|
||||
}
|
||||
@ -1509,7 +1509,7 @@ static int inet_fill_ifaddr(struct sk_buff *skb, struct in_ifaddr *ifa,
|
||||
u32 preferred, valid;
|
||||
|
||||
nlh = nlmsg_put(skb, portid, seq, event, sizeof(*ifm), flags);
|
||||
if (nlh == NULL)
|
||||
if (!nlh)
|
||||
return -EMSGSIZE;
|
||||
|
||||
ifm = nlmsg_data(nlh);
|
||||
@ -1628,7 +1628,7 @@ static void rtmsg_ifa(int event, struct in_ifaddr *ifa, struct nlmsghdr *nlh,
|
||||
|
||||
net = dev_net(ifa->ifa_dev->dev);
|
||||
skb = nlmsg_new(inet_nlmsg_size(), GFP_KERNEL);
|
||||
if (skb == NULL)
|
||||
if (!skb)
|
||||
goto errout;
|
||||
|
||||
err = inet_fill_ifaddr(skb, ifa, portid, seq, event, 0);
|
||||
@ -1665,7 +1665,7 @@ static int inet_fill_link_af(struct sk_buff *skb, const struct net_device *dev)
|
||||
return -ENODATA;
|
||||
|
||||
nla = nla_reserve(skb, IFLA_INET_CONF, IPV4_DEVCONF_MAX * 4);
|
||||
if (nla == NULL)
|
||||
if (!nla)
|
||||
return -EMSGSIZE;
|
||||
|
||||
for (i = 0; i < IPV4_DEVCONF_MAX; i++)
|
||||
@ -1754,7 +1754,7 @@ static int inet_netconf_fill_devconf(struct sk_buff *skb, int ifindex,
|
||||
|
||||
nlh = nlmsg_put(skb, portid, seq, event, sizeof(struct netconfmsg),
|
||||
flags);
|
||||
if (nlh == NULL)
|
||||
if (!nlh)
|
||||
return -EMSGSIZE;
|
||||
|
||||
ncm = nlmsg_data(nlh);
|
||||
@ -1796,7 +1796,7 @@ void inet_netconf_notify_devconf(struct net *net, int type, int ifindex,
|
||||
int err = -ENOBUFS;
|
||||
|
||||
skb = nlmsg_new(inet_netconf_msgsize_devconf(type), GFP_ATOMIC);
|
||||
if (skb == NULL)
|
||||
if (!skb)
|
||||
goto errout;
|
||||
|
||||
err = inet_netconf_fill_devconf(skb, ifindex, devconf, 0, 0,
|
||||
@ -1853,10 +1853,10 @@ static int inet_netconf_get_devconf(struct sk_buff *in_skb,
|
||||
break;
|
||||
default:
|
||||
dev = __dev_get_by_index(net, ifindex);
|
||||
if (dev == NULL)
|
||||
if (!dev)
|
||||
goto errout;
|
||||
in_dev = __in_dev_get_rtnl(dev);
|
||||
if (in_dev == NULL)
|
||||
if (!in_dev)
|
||||
goto errout;
|
||||
devconf = &in_dev->cnf;
|
||||
break;
|
||||
@ -1864,7 +1864,7 @@ static int inet_netconf_get_devconf(struct sk_buff *in_skb,
|
||||
|
||||
err = -ENOBUFS;
|
||||
skb = nlmsg_new(inet_netconf_msgsize_devconf(-1), GFP_ATOMIC);
|
||||
if (skb == NULL)
|
||||
if (!skb)
|
||||
goto errout;
|
||||
|
||||
err = inet_netconf_fill_devconf(skb, ifindex, devconf,
|
||||
@ -2215,7 +2215,7 @@ static void __devinet_sysctl_unregister(struct ipv4_devconf *cnf)
|
||||
{
|
||||
struct devinet_sysctl_table *t = cnf->sysctl;
|
||||
|
||||
if (t == NULL)
|
||||
if (!t)
|
||||
return;
|
||||
|
||||
cnf->sysctl = NULL;
|
||||
@ -2276,16 +2276,16 @@ static __net_init int devinet_init_net(struct net *net)
|
||||
|
||||
if (!net_eq(net, &init_net)) {
|
||||
all = kmemdup(all, sizeof(ipv4_devconf), GFP_KERNEL);
|
||||
if (all == NULL)
|
||||
if (!all)
|
||||
goto err_alloc_all;
|
||||
|
||||
dflt = kmemdup(dflt, sizeof(ipv4_devconf_dflt), GFP_KERNEL);
|
||||
if (dflt == NULL)
|
||||
if (!dflt)
|
||||
goto err_alloc_dflt;
|
||||
|
||||
#ifdef CONFIG_SYSCTL
|
||||
tbl = kmemdup(tbl, sizeof(ctl_forward_entry), GFP_KERNEL);
|
||||
if (tbl == NULL)
|
||||
if (!tbl)
|
||||
goto err_alloc_ctl;
|
||||
|
||||
tbl[0].data = &all->data[IPV4_DEVCONF_FORWARDING - 1];
|
||||
@ -2305,7 +2305,7 @@ static __net_init int devinet_init_net(struct net *net)
|
||||
|
||||
err = -ENOMEM;
|
||||
forw_hdr = register_net_sysctl(net, "net/ipv4", tbl);
|
||||
if (forw_hdr == NULL)
|
||||
if (!forw_hdr)
|
||||
goto err_reg_ctl;
|
||||
net->ipv4.forw_hdr = forw_hdr;
|
||||
#endif
|
||||
|
@ -553,7 +553,7 @@ static int esp_init_authenc(struct xfrm_state *x)
|
||||
int err;
|
||||
|
||||
err = -EINVAL;
|
||||
if (x->ealg == NULL)
|
||||
if (!x->ealg)
|
||||
goto error;
|
||||
|
||||
err = -ENAMETOOLONG;
|
||||
|
@ -53,11 +53,11 @@ static int __net_init fib4_rules_init(struct net *net)
|
||||
struct fib_table *local_table, *main_table;
|
||||
|
||||
main_table = fib_trie_table(RT_TABLE_MAIN, NULL);
|
||||
if (main_table == NULL)
|
||||
if (!main_table)
|
||||
return -ENOMEM;
|
||||
|
||||
local_table = fib_trie_table(RT_TABLE_LOCAL, main_table);
|
||||
if (local_table == NULL)
|
||||
if (!local_table)
|
||||
goto fail;
|
||||
|
||||
hlist_add_head_rcu(&local_table->tb_hlist,
|
||||
@ -486,7 +486,7 @@ static int rtentry_to_fib_config(struct net *net, int cmd, struct rtentry *rt,
|
||||
for (ifa = in_dev->ifa_list; ifa; ifa = ifa->ifa_next)
|
||||
if (strcmp(ifa->ifa_label, devname) == 0)
|
||||
break;
|
||||
if (ifa == NULL)
|
||||
if (!ifa)
|
||||
return -ENODEV;
|
||||
cfg->fc_prefsrc = ifa->ifa_local;
|
||||
}
|
||||
@ -514,7 +514,7 @@ static int rtentry_to_fib_config(struct net *net, int cmd, struct rtentry *rt,
|
||||
int len = 0;
|
||||
|
||||
mx = kzalloc(3 * nla_total_size(4), GFP_KERNEL);
|
||||
if (mx == NULL)
|
||||
if (!mx)
|
||||
return -ENOMEM;
|
||||
|
||||
if (rt->rt_flags & RTF_MTU)
|
||||
@ -676,7 +676,7 @@ static int inet_rtm_delroute(struct sk_buff *skb, struct nlmsghdr *nlh)
|
||||
goto errout;
|
||||
|
||||
tb = fib_get_table(net, cfg.fc_table);
|
||||
if (tb == NULL) {
|
||||
if (!tb) {
|
||||
err = -ESRCH;
|
||||
goto errout;
|
||||
}
|
||||
@ -698,7 +698,7 @@ static int inet_rtm_newroute(struct sk_buff *skb, struct nlmsghdr *nlh)
|
||||
goto errout;
|
||||
|
||||
tb = fib_new_table(net, cfg.fc_table);
|
||||
if (tb == NULL) {
|
||||
if (!tb) {
|
||||
err = -ENOBUFS;
|
||||
goto errout;
|
||||
}
|
||||
@ -779,7 +779,7 @@ static void fib_magic(int cmd, int type, __be32 dst, int dst_len, struct in_ifad
|
||||
else
|
||||
tb = fib_new_table(net, RT_TABLE_LOCAL);
|
||||
|
||||
if (tb == NULL)
|
||||
if (!tb)
|
||||
return;
|
||||
|
||||
cfg.fc_table = tb->tb_id;
|
||||
@ -806,7 +806,7 @@ void fib_add_ifaddr(struct in_ifaddr *ifa)
|
||||
|
||||
if (ifa->ifa_flags & IFA_F_SECONDARY) {
|
||||
prim = inet_ifa_byprefix(in_dev, prefix, mask);
|
||||
if (prim == NULL) {
|
||||
if (!prim) {
|
||||
pr_warn("%s: bug: prim == NULL\n", __func__);
|
||||
return;
|
||||
}
|
||||
@ -860,7 +860,7 @@ void fib_del_ifaddr(struct in_ifaddr *ifa, struct in_ifaddr *iprim)
|
||||
|
||||
if (ifa->ifa_flags & IFA_F_SECONDARY) {
|
||||
prim = inet_ifa_byprefix(in_dev, any, ifa->ifa_mask);
|
||||
if (prim == NULL) {
|
||||
if (!prim) {
|
||||
pr_warn("%s: bug: prim == NULL\n", __func__);
|
||||
return;
|
||||
}
|
||||
@ -1030,7 +1030,7 @@ static void nl_fib_input(struct sk_buff *skb)
|
||||
return;
|
||||
|
||||
skb = netlink_skb_clone(skb, GFP_KERNEL);
|
||||
if (skb == NULL)
|
||||
if (!skb)
|
||||
return;
|
||||
nlh = nlmsg_hdr(skb);
|
||||
|
||||
@ -1051,7 +1051,7 @@ static int __net_init nl_fib_lookup_init(struct net *net)
|
||||
};
|
||||
|
||||
sk = netlink_kernel_create(net, NETLINK_FIB_LOOKUP, &cfg);
|
||||
if (sk == NULL)
|
||||
if (!sk)
|
||||
return -EAFNOSUPPORT;
|
||||
net->ipv4.fibnl = sk;
|
||||
return 0;
|
||||
@ -1089,7 +1089,7 @@ static int fib_inetaddr_event(struct notifier_block *this, unsigned long event,
|
||||
case NETDEV_DOWN:
|
||||
fib_del_ifaddr(ifa, NULL);
|
||||
atomic_inc(&net->ipv4.dev_addr_genid);
|
||||
if (ifa->ifa_dev->ifa_list == NULL) {
|
||||
if (!ifa->ifa_dev->ifa_list) {
|
||||
/* Last address was deleted from this interface.
|
||||
* Disable IP.
|
||||
*/
|
||||
@ -1157,7 +1157,7 @@ static int __net_init ip_fib_net_init(struct net *net)
|
||||
size = max_t(size_t, size, L1_CACHE_BYTES);
|
||||
|
||||
net->ipv4.fib_table_hash = kzalloc(size, GFP_KERNEL);
|
||||
if (net->ipv4.fib_table_hash == NULL)
|
||||
if (!net->ipv4.fib_table_hash)
|
||||
return -ENOMEM;
|
||||
|
||||
err = fib4_rules_init(net);
|
||||
|
@ -153,7 +153,7 @@ static struct fib_table *fib_empty_table(struct net *net)
|
||||
u32 id;
|
||||
|
||||
for (id = 1; id <= RT_TABLE_MAX; id++)
|
||||
if (fib_get_table(net, id) == NULL)
|
||||
if (!fib_get_table(net, id))
|
||||
return fib_new_table(net, id);
|
||||
return NULL;
|
||||
}
|
||||
@ -184,7 +184,7 @@ static int fib4_rule_configure(struct fib_rule *rule, struct sk_buff *skb,
|
||||
struct fib_table *table;
|
||||
|
||||
table = fib_empty_table(net);
|
||||
if (table == NULL) {
|
||||
if (!table) {
|
||||
err = -ENOBUFS;
|
||||
goto errout;
|
||||
}
|
||||
|
@ -390,7 +390,7 @@ void rtmsg_fib(int event, __be32 key, struct fib_alias *fa,
|
||||
int err = -ENOBUFS;
|
||||
|
||||
skb = nlmsg_new(fib_nlmsg_size(fa->fa_info), GFP_KERNEL);
|
||||
if (skb == NULL)
|
||||
if (!skb)
|
||||
goto errout;
|
||||
|
||||
err = fib_dump_info(skb, info->portid, seq, event, tb_id,
|
||||
@ -503,7 +503,7 @@ int fib_nh_match(struct fib_config *cfg, struct fib_info *fi)
|
||||
}
|
||||
|
||||
#ifdef CONFIG_IP_ROUTE_MULTIPATH
|
||||
if (cfg->fc_mp == NULL)
|
||||
if (!cfg->fc_mp)
|
||||
return 0;
|
||||
|
||||
rtnh = cfg->fc_mp;
|
||||
@ -646,7 +646,7 @@ static int fib_check_nh(struct fib_config *cfg, struct fib_info *fi,
|
||||
rcu_read_lock();
|
||||
err = -ENODEV;
|
||||
in_dev = inetdev_by_index(net, nh->nh_oif);
|
||||
if (in_dev == NULL)
|
||||
if (!in_dev)
|
||||
goto out;
|
||||
err = -ENETDOWN;
|
||||
if (!(in_dev->dev->flags & IFF_UP))
|
||||
@ -803,7 +803,7 @@ struct fib_info *fib_create_info(struct fib_config *cfg)
|
||||
}
|
||||
|
||||
fi = kzalloc(sizeof(*fi)+nhs*sizeof(struct fib_nh), GFP_KERNEL);
|
||||
if (fi == NULL)
|
||||
if (!fi)
|
||||
goto failure;
|
||||
fib_info_cnt++;
|
||||
if (cfg->fc_mx) {
|
||||
@ -921,7 +921,7 @@ struct fib_info *fib_create_info(struct fib_config *cfg)
|
||||
nh->nh_scope = RT_SCOPE_NOWHERE;
|
||||
nh->nh_dev = dev_get_by_index(net, fi->fib_nh->nh_oif);
|
||||
err = -ENODEV;
|
||||
if (nh->nh_dev == NULL)
|
||||
if (!nh->nh_dev)
|
||||
goto failure;
|
||||
} else {
|
||||
change_nexthops(fi) {
|
||||
@ -995,7 +995,7 @@ int fib_dump_info(struct sk_buff *skb, u32 portid, u32 seq, int event,
|
||||
struct rtmsg *rtm;
|
||||
|
||||
nlh = nlmsg_put(skb, portid, seq, event, sizeof(*rtm), flags);
|
||||
if (nlh == NULL)
|
||||
if (!nlh)
|
||||
return -EMSGSIZE;
|
||||
|
||||
rtm = nlmsg_data(nlh);
|
||||
@ -1045,12 +1045,12 @@ int fib_dump_info(struct sk_buff *skb, u32 portid, u32 seq, int event,
|
||||
struct nlattr *mp;
|
||||
|
||||
mp = nla_nest_start(skb, RTA_MULTIPATH);
|
||||
if (mp == NULL)
|
||||
if (!mp)
|
||||
goto nla_put_failure;
|
||||
|
||||
for_nexthops(fi) {
|
||||
rtnh = nla_reserve_nohdr(skb, sizeof(*rtnh));
|
||||
if (rtnh == NULL)
|
||||
if (!rtnh)
|
||||
goto nla_put_failure;
|
||||
|
||||
rtnh->rtnh_flags = nh->nh_flags & 0xFF;
|
||||
@ -1093,7 +1093,7 @@ int fib_sync_down_addr(struct net *net, __be32 local)
|
||||
struct hlist_head *head = &fib_info_laddrhash[hash];
|
||||
struct fib_info *fi;
|
||||
|
||||
if (fib_info_laddrhash == NULL || local == 0)
|
||||
if (!fib_info_laddrhash || local == 0)
|
||||
return 0;
|
||||
|
||||
hlist_for_each_entry(fi, head, fib_lhash) {
|
||||
@ -1182,7 +1182,7 @@ void fib_select_default(struct fib_result *res)
|
||||
|
||||
fib_alias_accessed(fa);
|
||||
|
||||
if (fi == NULL) {
|
||||
if (!fi) {
|
||||
if (next_fi != res->fi)
|
||||
break;
|
||||
} else if (!fib_detect_death(fi, order, &last_resort,
|
||||
@ -1195,7 +1195,7 @@ void fib_select_default(struct fib_result *res)
|
||||
order++;
|
||||
}
|
||||
|
||||
if (order <= 0 || fi == NULL) {
|
||||
if (order <= 0 || !fi) {
|
||||
tb->tb_default = -1;
|
||||
goto out;
|
||||
}
|
||||
@ -1251,7 +1251,7 @@ int fib_sync_up(struct net_device *dev)
|
||||
alive++;
|
||||
continue;
|
||||
}
|
||||
if (nexthop_nh->nh_dev == NULL ||
|
||||
if (!nexthop_nh->nh_dev ||
|
||||
!(nexthop_nh->nh_dev->flags & IFF_UP))
|
||||
continue;
|
||||
if (nexthop_nh->nh_dev != dev ||
|
||||
|
@ -391,9 +391,9 @@ static void put_child(struct key_vector *tn, unsigned long i,
|
||||
BUG_ON(i >= child_length(tn));
|
||||
|
||||
/* update emptyChildren, overflow into fullChildren */
|
||||
if (n == NULL && chi != NULL)
|
||||
if (!n && chi)
|
||||
empty_child_inc(tn);
|
||||
if (n != NULL && chi == NULL)
|
||||
if (n && !chi)
|
||||
empty_child_dec(tn);
|
||||
|
||||
/* update fullChildren */
|
||||
@ -528,7 +528,7 @@ static struct key_vector *inflate(struct trie *t,
|
||||
unsigned long j, k;
|
||||
|
||||
/* An empty child */
|
||||
if (inode == NULL)
|
||||
if (!inode)
|
||||
continue;
|
||||
|
||||
/* A leaf or an internal node with skipped bits */
|
||||
@ -1154,7 +1154,7 @@ int fib_table_insert(struct fib_table *tb, struct fib_config *cfg)
|
||||
}
|
||||
err = -ENOBUFS;
|
||||
new_fa = kmem_cache_alloc(fn_alias_kmem, GFP_KERNEL);
|
||||
if (new_fa == NULL)
|
||||
if (!new_fa)
|
||||
goto out;
|
||||
|
||||
fi_drop = fa->fa_info;
|
||||
@ -1204,7 +1204,7 @@ int fib_table_insert(struct fib_table *tb, struct fib_config *cfg)
|
||||
|
||||
err = -ENOBUFS;
|
||||
new_fa = kmem_cache_alloc(fn_alias_kmem, GFP_KERNEL);
|
||||
if (new_fa == NULL)
|
||||
if (!new_fa)
|
||||
goto out;
|
||||
|
||||
new_fa->fa_info = fi;
|
||||
@ -1975,7 +1975,7 @@ struct fib_table *fib_trie_table(u32 id, struct fib_table *alias)
|
||||
sz += sizeof(struct trie);
|
||||
|
||||
tb = kzalloc(sz, GFP_KERNEL);
|
||||
if (tb == NULL)
|
||||
if (!tb)
|
||||
return NULL;
|
||||
|
||||
tb->tb_id = id;
|
||||
|
@ -196,7 +196,7 @@ static struct sk_buff **geneve_gro_receive(struct sk_buff **head,
|
||||
|
||||
rcu_read_lock();
|
||||
ptype = gro_find_receive_by_type(type);
|
||||
if (ptype == NULL) {
|
||||
if (!ptype) {
|
||||
flush = 1;
|
||||
goto out_unlock;
|
||||
}
|
||||
@ -230,7 +230,7 @@ static int geneve_gro_complete(struct sk_buff *skb, int nhoff,
|
||||
|
||||
rcu_read_lock();
|
||||
ptype = gro_find_complete_by_type(type);
|
||||
if (ptype != NULL)
|
||||
if (ptype)
|
||||
err = ptype->callbacks.gro_complete(skb, nhoff + gh_len);
|
||||
|
||||
rcu_read_unlock();
|
||||
|
@ -149,7 +149,7 @@ static struct sk_buff **gre_gro_receive(struct sk_buff **head,
|
||||
|
||||
rcu_read_lock();
|
||||
ptype = gro_find_receive_by_type(type);
|
||||
if (ptype == NULL)
|
||||
if (!ptype)
|
||||
goto out_unlock;
|
||||
|
||||
grehlen = GRE_HEADER_SECTION;
|
||||
@ -243,7 +243,7 @@ static int gre_gro_complete(struct sk_buff *skb, int nhoff)
|
||||
|
||||
rcu_read_lock();
|
||||
ptype = gro_find_complete_by_type(type);
|
||||
if (ptype != NULL)
|
||||
if (ptype)
|
||||
err = ptype->callbacks.gro_complete(skb, nhoff + grehlen);
|
||||
|
||||
rcu_read_unlock();
|
||||
|
@ -399,7 +399,7 @@ static void icmp_reply(struct icmp_bxm *icmp_param, struct sk_buff *skb)
|
||||
return;
|
||||
|
||||
sk = icmp_xmit_lock(net);
|
||||
if (sk == NULL)
|
||||
if (!sk)
|
||||
return;
|
||||
inet = inet_sk(sk);
|
||||
|
||||
@ -609,7 +609,7 @@ void icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info)
|
||||
skb_in->data,
|
||||
sizeof(_inner_type),
|
||||
&_inner_type);
|
||||
if (itp == NULL)
|
||||
if (!itp)
|
||||
goto out;
|
||||
|
||||
/*
|
||||
@ -627,7 +627,7 @@ void icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info)
|
||||
return;
|
||||
|
||||
sk = icmp_xmit_lock(net);
|
||||
if (sk == NULL)
|
||||
if (!sk)
|
||||
goto out_free;
|
||||
|
||||
/*
|
||||
|
@ -692,7 +692,7 @@ static int igmp_send_report(struct in_device *in_dev, struct ip_mc_list *pmc,
|
||||
hlen = LL_RESERVED_SPACE(dev);
|
||||
tlen = dev->needed_tailroom;
|
||||
skb = alloc_skb(IGMP_SIZE + hlen + tlen, GFP_ATOMIC);
|
||||
if (skb == NULL) {
|
||||
if (!skb) {
|
||||
ip_rt_put(rt);
|
||||
return -1;
|
||||
}
|
||||
@ -981,7 +981,7 @@ int igmp_rcv(struct sk_buff *skb)
|
||||
int len = skb->len;
|
||||
bool dropped = true;
|
||||
|
||||
if (in_dev == NULL)
|
||||
if (!in_dev)
|
||||
goto drop;
|
||||
|
||||
if (!pskb_may_pull(skb, sizeof(struct igmphdr)))
|
||||
@ -1888,7 +1888,7 @@ int ip_mc_join_group(struct sock *sk, struct ip_mreqn *imr)
|
||||
if (count >= sysctl_igmp_max_memberships)
|
||||
goto done;
|
||||
iml = sock_kmalloc(sk, sizeof(*iml), GFP_KERNEL);
|
||||
if (iml == NULL)
|
||||
if (!iml)
|
||||
goto done;
|
||||
|
||||
memcpy(&iml->multi, imr, sizeof(*imr));
|
||||
@ -1909,7 +1909,7 @@ static int ip_mc_leave_src(struct sock *sk, struct ip_mc_socklist *iml,
|
||||
struct ip_sf_socklist *psf = rtnl_dereference(iml->sflist);
|
||||
int err;
|
||||
|
||||
if (psf == NULL) {
|
||||
if (!psf) {
|
||||
/* any-source empty exclude case */
|
||||
return ip_mc_del_src(in_dev, &iml->multi.imr_multiaddr.s_addr,
|
||||
iml->sfmode, 0, NULL, 0);
|
||||
@ -2360,7 +2360,7 @@ void ip_mc_drop_socket(struct sock *sk)
|
||||
struct ip_mc_socklist *iml;
|
||||
struct net *net = sock_net(sk);
|
||||
|
||||
if (inet->mc_list == NULL)
|
||||
if (!inet->mc_list)
|
||||
return;
|
||||
|
||||
rtnl_lock();
|
||||
@ -2370,7 +2370,7 @@ void ip_mc_drop_socket(struct sock *sk)
|
||||
inet->mc_list = iml->next_rcu;
|
||||
in_dev = inetdev_by_index(net, iml->multi.imr_ifindex);
|
||||
(void) ip_mc_leave_src(sk, iml, in_dev);
|
||||
if (in_dev != NULL)
|
||||
if (in_dev)
|
||||
ip_mc_dec_group(in_dev, iml->multi.imr_multiaddr.s_addr);
|
||||
/* decrease mem now to avoid the memleak warning */
|
||||
atomic_sub(sizeof(*iml), &sk->sk_omem_alloc);
|
||||
@ -2587,13 +2587,13 @@ static inline struct ip_sf_list *igmp_mcf_get_first(struct seq_file *seq)
|
||||
for_each_netdev_rcu(net, state->dev) {
|
||||
struct in_device *idev;
|
||||
idev = __in_dev_get_rcu(state->dev);
|
||||
if (unlikely(idev == NULL))
|
||||
if (unlikely(!idev))
|
||||
continue;
|
||||
im = rcu_dereference(idev->mc_list);
|
||||
if (likely(im != NULL)) {
|
||||
if (likely(im)) {
|
||||
spin_lock_bh(&im->lock);
|
||||
psf = im->sources;
|
||||
if (likely(psf != NULL)) {
|
||||
if (likely(psf)) {
|
||||
state->im = im;
|
||||
state->idev = idev;
|
||||
break;
|
||||
@ -2663,7 +2663,7 @@ static void igmp_mcf_seq_stop(struct seq_file *seq, void *v)
|
||||
__releases(rcu)
|
||||
{
|
||||
struct igmp_mcf_iter_state *state = igmp_mcf_seq_private(seq);
|
||||
if (likely(state->im != NULL)) {
|
||||
if (likely(state->im)) {
|
||||
spin_unlock_bh(&state->im->lock);
|
||||
state->im = NULL;
|
||||
}
|
||||
|
@ -673,7 +673,7 @@ struct sock *inet_csk_clone_lock(const struct sock *sk,
|
||||
{
|
||||
struct sock *newsk = sk_clone_lock(sk, priority);
|
||||
|
||||
if (newsk != NULL) {
|
||||
if (newsk) {
|
||||
struct inet_connection_sock *newicsk = inet_csk(newsk);
|
||||
|
||||
newsk->sk_state = TCP_SYN_RECV;
|
||||
@ -843,7 +843,7 @@ void inet_csk_listen_stop(struct sock *sk)
|
||||
sk_acceptq_removed(sk);
|
||||
reqsk_put(req);
|
||||
}
|
||||
if (queue->fastopenq != NULL) {
|
||||
if (queue->fastopenq) {
|
||||
/* Free all the reqs queued in rskq_rst_head. */
|
||||
spin_lock_bh(&queue->fastopenq->lock);
|
||||
acc_req = queue->fastopenq->rskq_rst_head;
|
||||
@ -875,7 +875,7 @@ int inet_csk_compat_getsockopt(struct sock *sk, int level, int optname,
|
||||
{
|
||||
const struct inet_connection_sock *icsk = inet_csk(sk);
|
||||
|
||||
if (icsk->icsk_af_ops->compat_getsockopt != NULL)
|
||||
if (icsk->icsk_af_ops->compat_getsockopt)
|
||||
return icsk->icsk_af_ops->compat_getsockopt(sk, level, optname,
|
||||
optval, optlen);
|
||||
return icsk->icsk_af_ops->getsockopt(sk, level, optname,
|
||||
@ -888,7 +888,7 @@ int inet_csk_compat_setsockopt(struct sock *sk, int level, int optname,
|
||||
{
|
||||
const struct inet_connection_sock *icsk = inet_csk(sk);
|
||||
|
||||
if (icsk->icsk_af_ops->compat_setsockopt != NULL)
|
||||
if (icsk->icsk_af_ops->compat_setsockopt)
|
||||
return icsk->icsk_af_ops->compat_setsockopt(sk, level, optname,
|
||||
optval, optlen);
|
||||
return icsk->icsk_af_ops->setsockopt(sk, level, optname,
|
||||
|
@ -385,7 +385,7 @@ static struct inet_frag_queue *inet_frag_alloc(struct netns_frags *nf,
|
||||
}
|
||||
|
||||
q = kmem_cache_zalloc(f->frags_cachep, GFP_ATOMIC);
|
||||
if (q == NULL)
|
||||
if (!q)
|
||||
return NULL;
|
||||
|
||||
q->net = nf;
|
||||
@ -406,7 +406,7 @@ static struct inet_frag_queue *inet_frag_create(struct netns_frags *nf,
|
||||
struct inet_frag_queue *q;
|
||||
|
||||
q = inet_frag_alloc(nf, f, arg);
|
||||
if (q == NULL)
|
||||
if (!q)
|
||||
return NULL;
|
||||
|
||||
return inet_frag_intern(nf, q, f, arg);
|
||||
|
@ -64,7 +64,7 @@ struct inet_bind_bucket *inet_bind_bucket_create(struct kmem_cache *cachep,
|
||||
{
|
||||
struct inet_bind_bucket *tb = kmem_cache_alloc(cachep, GFP_ATOMIC);
|
||||
|
||||
if (tb != NULL) {
|
||||
if (tb) {
|
||||
write_pnet(&tb->ib_net, net);
|
||||
tb->port = snum;
|
||||
tb->fastreuse = 0;
|
||||
|
@ -173,7 +173,7 @@ struct inet_timewait_sock *inet_twsk_alloc(const struct sock *sk, const int stat
|
||||
struct inet_timewait_sock *tw =
|
||||
kmem_cache_alloc(sk->sk_prot_creator->twsk_prot->twsk_slab,
|
||||
GFP_ATOMIC);
|
||||
if (tw != NULL) {
|
||||
if (tw) {
|
||||
const struct inet_sock *inet = inet_sk(sk);
|
||||
|
||||
kmemcheck_annotate_bitfield(tw, flags);
|
||||
|
@ -372,7 +372,7 @@ static int ip_frag_queue(struct ipq *qp, struct sk_buff *skb)
|
||||
goto err;
|
||||
|
||||
err = -ENOMEM;
|
||||
if (pskb_pull(skb, ihl) == NULL)
|
||||
if (!pskb_pull(skb, ihl))
|
||||
goto err;
|
||||
|
||||
err = pskb_trim_rcsum(skb, end - offset);
|
||||
@ -537,7 +537,7 @@ static int ip_frag_reasm(struct ipq *qp, struct sk_buff *prev,
|
||||
qp->q.fragments = head;
|
||||
}
|
||||
|
||||
WARN_ON(head == NULL);
|
||||
WARN_ON(!head);
|
||||
WARN_ON(FRAG_CB(head)->offset != 0);
|
||||
|
||||
/* Allocate a new buffer for the datagram. */
|
||||
@ -559,7 +559,8 @@ static int ip_frag_reasm(struct ipq *qp, struct sk_buff *prev,
|
||||
struct sk_buff *clone;
|
||||
int i, plen = 0;
|
||||
|
||||
if ((clone = alloc_skb(0, GFP_ATOMIC)) == NULL)
|
||||
clone = alloc_skb(0, GFP_ATOMIC);
|
||||
if (!clone)
|
||||
goto out_nomem;
|
||||
clone->next = head->next;
|
||||
head->next = clone;
|
||||
@ -638,7 +639,8 @@ int ip_defrag(struct sk_buff *skb, u32 user)
|
||||
IP_INC_STATS_BH(net, IPSTATS_MIB_REASMREQDS);
|
||||
|
||||
/* Lookup (or create) queue header */
|
||||
if ((qp = ip_find(net, ip_hdr(skb), user)) != NULL) {
|
||||
qp = ip_find(net, ip_hdr(skb), user);
|
||||
if (qp) {
|
||||
int ret;
|
||||
|
||||
spin_lock(&qp->q.lock);
|
||||
@ -754,7 +756,7 @@ static int __net_init ip4_frags_ns_ctl_register(struct net *net)
|
||||
table = ip4_frags_ns_ctl_table;
|
||||
if (!net_eq(net, &init_net)) {
|
||||
table = kmemdup(table, sizeof(ip4_frags_ns_ctl_table), GFP_KERNEL);
|
||||
if (table == NULL)
|
||||
if (!table)
|
||||
goto err_alloc;
|
||||
|
||||
table[0].data = &net->ipv4.frags.high_thresh;
|
||||
@ -770,7 +772,7 @@ static int __net_init ip4_frags_ns_ctl_register(struct net *net)
|
||||
}
|
||||
|
||||
hdr = register_net_sysctl(net, "net/ipv4", table);
|
||||
if (hdr == NULL)
|
||||
if (!hdr)
|
||||
goto err_reg;
|
||||
|
||||
net->ipv4.frags_hdr = hdr;
|
||||
|
@ -182,7 +182,7 @@ static int ipgre_err(struct sk_buff *skb, u32 info,
|
||||
t = ip_tunnel_lookup(itn, skb->dev->ifindex, tpi->flags,
|
||||
iph->daddr, iph->saddr, tpi->key);
|
||||
|
||||
if (t == NULL)
|
||||
if (!t)
|
||||
return PACKET_REJECT;
|
||||
|
||||
if (t->parms.iph.daddr == 0 ||
|
||||
@ -423,7 +423,7 @@ static int ipgre_open(struct net_device *dev)
|
||||
return -EADDRNOTAVAIL;
|
||||
dev = rt->dst.dev;
|
||||
ip_rt_put(rt);
|
||||
if (__in_dev_get_rtnl(dev) == NULL)
|
||||
if (!__in_dev_get_rtnl(dev))
|
||||
return -EADDRNOTAVAIL;
|
||||
t->mlink = dev->ifindex;
|
||||
ip_mc_inc_group(__in_dev_get_rtnl(dev), t->parms.iph.daddr);
|
||||
|
@ -203,7 +203,7 @@ static int ip_local_deliver_finish(struct sk_buff *skb)
|
||||
raw = raw_local_deliver(skb, protocol);
|
||||
|
||||
ipprot = rcu_dereference(inet_protos[protocol]);
|
||||
if (ipprot != NULL) {
|
||||
if (ipprot) {
|
||||
int ret;
|
||||
|
||||
if (!ipprot->no_policy) {
|
||||
@ -314,7 +314,7 @@ static int ip_rcv_finish(struct sk_buff *skb)
|
||||
const struct iphdr *iph = ip_hdr(skb);
|
||||
struct rtable *rt;
|
||||
|
||||
if (sysctl_ip_early_demux && !skb_dst(skb) && skb->sk == NULL) {
|
||||
if (sysctl_ip_early_demux && !skb_dst(skb) && !skb->sk) {
|
||||
const struct net_protocol *ipprot;
|
||||
int protocol = iph->protocol;
|
||||
|
||||
@ -387,7 +387,8 @@ int ip_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt,
|
||||
|
||||
IP_UPD_PO_STATS_BH(dev_net(dev), IPSTATS_MIB_IN, skb->len);
|
||||
|
||||
if ((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL) {
|
||||
skb = skb_share_check(skb, GFP_ATOMIC);
|
||||
if (!skb) {
|
||||
IP_INC_STATS_BH(dev_net(dev), IPSTATS_MIB_INDISCARDS);
|
||||
goto out;
|
||||
}
|
||||
|
@ -264,7 +264,7 @@ int ip_options_compile(struct net *net,
|
||||
unsigned char *iph;
|
||||
int optlen, l;
|
||||
|
||||
if (skb != NULL) {
|
||||
if (skb) {
|
||||
rt = skb_rtable(skb);
|
||||
optptr = (unsigned char *)&(ip_hdr(skb)[1]);
|
||||
} else
|
||||
|
@ -182,7 +182,7 @@ static inline int ip_finish_output2(struct sk_buff *skb)
|
||||
struct sk_buff *skb2;
|
||||
|
||||
skb2 = skb_realloc_headroom(skb, LL_RESERVED_SPACE(dev));
|
||||
if (skb2 == NULL) {
|
||||
if (!skb2) {
|
||||
kfree_skb(skb);
|
||||
return -ENOMEM;
|
||||
}
|
||||
@ -257,7 +257,7 @@ static int ip_finish_output(struct sk_buff *skb)
|
||||
{
|
||||
#if defined(CONFIG_NETFILTER) && defined(CONFIG_XFRM)
|
||||
/* Policy lookup after SNAT yielded a new policy */
|
||||
if (skb_dst(skb)->xfrm != NULL) {
|
||||
if (skb_dst(skb)->xfrm) {
|
||||
IPCB(skb)->flags |= IPSKB_REROUTED;
|
||||
return dst_output(skb);
|
||||
}
|
||||
@ -376,12 +376,12 @@ int ip_queue_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl)
|
||||
inet_opt = rcu_dereference(inet->inet_opt);
|
||||
fl4 = &fl->u.ip4;
|
||||
rt = skb_rtable(skb);
|
||||
if (rt != NULL)
|
||||
if (rt)
|
||||
goto packet_routed;
|
||||
|
||||
/* Make sure we can route this packet. */
|
||||
rt = (struct rtable *)__sk_dst_check(sk, 0);
|
||||
if (rt == NULL) {
|
||||
if (!rt) {
|
||||
__be32 daddr;
|
||||
|
||||
/* Use correct destination address if we have options. */
|
||||
@ -587,7 +587,7 @@ int ip_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *))
|
||||
ip_options_fragment(frag);
|
||||
offset += skb->len - hlen;
|
||||
iph->frag_off = htons(offset>>3);
|
||||
if (frag->next != NULL)
|
||||
if (frag->next)
|
||||
iph->frag_off |= htons(IP_MF);
|
||||
/* Ready, complete checksum */
|
||||
ip_send_check(iph);
|
||||
@ -790,12 +790,13 @@ static inline int ip_ufo_append_data(struct sock *sk,
|
||||
* device, so create one single skb packet containing complete
|
||||
* udp datagram
|
||||
*/
|
||||
if ((skb = skb_peek_tail(queue)) == NULL) {
|
||||
skb = skb_peek_tail(queue);
|
||||
if (!skb) {
|
||||
skb = sock_alloc_send_skb(sk,
|
||||
hh_len + fragheaderlen + transhdrlen + 20,
|
||||
(flags & MSG_DONTWAIT), &err);
|
||||
|
||||
if (skb == NULL)
|
||||
if (!skb)
|
||||
return err;
|
||||
|
||||
/* reserve space for Hardware header */
|
||||
@ -961,10 +962,10 @@ alloc_new_skb:
|
||||
skb = sock_wmalloc(sk,
|
||||
alloclen + hh_len + 15, 1,
|
||||
sk->sk_allocation);
|
||||
if (unlikely(skb == NULL))
|
||||
if (unlikely(!skb))
|
||||
err = -ENOBUFS;
|
||||
}
|
||||
if (skb == NULL)
|
||||
if (!skb)
|
||||
goto error;
|
||||
|
||||
/*
|
||||
@ -1088,10 +1089,10 @@ static int ip_setup_cork(struct sock *sk, struct inet_cork *cork,
|
||||
*/
|
||||
opt = ipc->opt;
|
||||
if (opt) {
|
||||
if (cork->opt == NULL) {
|
||||
if (!cork->opt) {
|
||||
cork->opt = kmalloc(sizeof(struct ip_options) + 40,
|
||||
sk->sk_allocation);
|
||||
if (unlikely(cork->opt == NULL))
|
||||
if (unlikely(!cork->opt))
|
||||
return -ENOBUFS;
|
||||
}
|
||||
memcpy(cork->opt, &opt->opt, sizeof(struct ip_options) + opt->opt.optlen);
|
||||
@ -1198,7 +1199,8 @@ ssize_t ip_append_page(struct sock *sk, struct flowi4 *fl4, struct page *page,
|
||||
return -EMSGSIZE;
|
||||
}
|
||||
|
||||
if ((skb = skb_peek_tail(&sk->sk_write_queue)) == NULL)
|
||||
skb = skb_peek_tail(&sk->sk_write_queue);
|
||||
if (!skb)
|
||||
return -EINVAL;
|
||||
|
||||
cork->length += size;
|
||||
@ -1329,7 +1331,8 @@ struct sk_buff *__ip_make_skb(struct sock *sk,
|
||||
__be16 df = 0;
|
||||
__u8 ttl;
|
||||
|
||||
if ((skb = __skb_dequeue(queue)) == NULL)
|
||||
skb = __skb_dequeue(queue);
|
||||
if (!skb)
|
||||
goto out;
|
||||
tail_skb = &(skb_shinfo(skb)->frag_list);
|
||||
|
||||
|
@ -351,7 +351,7 @@ int ip_ra_control(struct sock *sk, unsigned char on,
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
if (new_ra == NULL) {
|
||||
if (!new_ra) {
|
||||
spin_unlock_bh(&ip_ra_lock);
|
||||
return -ENOBUFS;
|
||||
}
|
||||
@ -387,7 +387,7 @@ void ip_icmp_error(struct sock *sk, struct sk_buff *skb, int err,
|
||||
skb_network_header(skb);
|
||||
serr->port = port;
|
||||
|
||||
if (skb_pull(skb, payload - skb->data) != NULL) {
|
||||
if (skb_pull(skb, payload - skb->data)) {
|
||||
skb_reset_transport_header(skb);
|
||||
if (sock_queue_err_skb(sk, skb) == 0)
|
||||
return;
|
||||
@ -482,7 +482,7 @@ int ip_recv_error(struct sock *sk, struct msghdr *msg, int len, int *addr_len)
|
||||
|
||||
err = -EAGAIN;
|
||||
skb = sock_dequeue_err_skb(sk);
|
||||
if (skb == NULL)
|
||||
if (!skb)
|
||||
goto out;
|
||||
|
||||
copied = skb->len;
|
||||
|
@ -654,7 +654,7 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
|
||||
if (dst == 0) {
|
||||
/* NBMA tunnel */
|
||||
|
||||
if (skb_dst(skb) == NULL) {
|
||||
if (!skb_dst(skb)) {
|
||||
dev->stats.tx_fifo_errors++;
|
||||
goto tx_error;
|
||||
}
|
||||
@ -672,7 +672,7 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
|
||||
|
||||
neigh = dst_neigh_lookup(skb_dst(skb),
|
||||
&ipv6_hdr(skb)->daddr);
|
||||
if (neigh == NULL)
|
||||
if (!neigh)
|
||||
goto tx_error;
|
||||
|
||||
addr6 = (const struct in6_addr *)&neigh->primary_key;
|
||||
@ -843,7 +843,7 @@ int ip_tunnel_ioctl(struct net_device *dev, struct ip_tunnel_parm *p, int cmd)
|
||||
case SIOCGETTUNNEL:
|
||||
if (dev == itn->fb_tunnel_dev) {
|
||||
t = ip_tunnel_find(itn, p, itn->fb_tunnel_dev->type);
|
||||
if (t == NULL)
|
||||
if (!t)
|
||||
t = netdev_priv(dev);
|
||||
}
|
||||
memcpy(p, &t->parms, sizeof(*p));
|
||||
@ -876,7 +876,7 @@ int ip_tunnel_ioctl(struct net_device *dev, struct ip_tunnel_parm *p, int cmd)
|
||||
break;
|
||||
}
|
||||
if (dev != itn->fb_tunnel_dev && cmd == SIOCCHGTUNNEL) {
|
||||
if (t != NULL) {
|
||||
if (t) {
|
||||
if (t->dev != dev) {
|
||||
err = -EEXIST;
|
||||
break;
|
||||
@ -914,7 +914,7 @@ int ip_tunnel_ioctl(struct net_device *dev, struct ip_tunnel_parm *p, int cmd)
|
||||
if (dev == itn->fb_tunnel_dev) {
|
||||
err = -ENOENT;
|
||||
t = ip_tunnel_find(itn, p, itn->fb_tunnel_dev->type);
|
||||
if (t == NULL)
|
||||
if (!t)
|
||||
goto done;
|
||||
err = -EPERM;
|
||||
if (t == netdev_priv(itn->fb_tunnel_dev))
|
||||
|
@ -60,7 +60,7 @@ static int vti_input(struct sk_buff *skb, int nexthdr, __be32 spi,
|
||||
|
||||
tunnel = ip_tunnel_lookup(itn, skb->dev->ifindex, TUNNEL_NO_KEY,
|
||||
iph->saddr, iph->daddr, 0);
|
||||
if (tunnel != NULL) {
|
||||
if (tunnel) {
|
||||
if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb))
|
||||
goto drop;
|
||||
|
||||
|
@ -63,7 +63,7 @@ static struct xfrm_state *ipcomp_tunnel_create(struct xfrm_state *x)
|
||||
struct xfrm_state *t;
|
||||
|
||||
t = xfrm_state_alloc(net);
|
||||
if (t == NULL)
|
||||
if (!t)
|
||||
goto out;
|
||||
|
||||
t->id.proto = IPPROTO_IPIP;
|
||||
|
@ -504,7 +504,8 @@ ic_rarp_recv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt
|
||||
if (!net_eq(dev_net(dev), &init_net))
|
||||
goto drop;
|
||||
|
||||
if ((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL)
|
||||
skb = skb_share_check(skb, GFP_ATOMIC);
|
||||
if (!skb)
|
||||
return NET_RX_DROP;
|
||||
|
||||
if (!pskb_may_pull(skb, sizeof(struct arphdr)))
|
||||
@ -958,7 +959,8 @@ static int __init ic_bootp_recv(struct sk_buff *skb, struct net_device *dev, str
|
||||
if (skb->pkt_type == PACKET_OTHERHOST)
|
||||
goto drop;
|
||||
|
||||
if ((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL)
|
||||
skb = skb_share_check(skb, GFP_ATOMIC);
|
||||
if (!skb)
|
||||
return NET_RX_DROP;
|
||||
|
||||
if (!pskb_may_pull(skb,
|
||||
|
@ -144,7 +144,7 @@ static int ipip_err(struct sk_buff *skb, u32 info)
|
||||
err = -ENOENT;
|
||||
t = ip_tunnel_lookup(itn, skb->dev->ifindex, TUNNEL_NO_KEY,
|
||||
iph->daddr, iph->saddr, 0);
|
||||
if (t == NULL)
|
||||
if (!t)
|
||||
goto out;
|
||||
|
||||
if (type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED) {
|
||||
|
@ -189,7 +189,7 @@ static int ipmr_rule_action(struct fib_rule *rule, struct flowi *flp,
|
||||
}
|
||||
|
||||
mrt = ipmr_get_table(rule->fr_net, rule->table);
|
||||
if (mrt == NULL)
|
||||
if (!mrt)
|
||||
return -EAGAIN;
|
||||
res->mrt = mrt;
|
||||
return 0;
|
||||
@ -253,7 +253,7 @@ static int __net_init ipmr_rules_init(struct net *net)
|
||||
INIT_LIST_HEAD(&net->ipv4.mr_tables);
|
||||
|
||||
mrt = ipmr_new_table(net, RT_TABLE_DEFAULT);
|
||||
if (mrt == NULL) {
|
||||
if (!mrt) {
|
||||
err = -ENOMEM;
|
||||
goto err1;
|
||||
}
|
||||
@ -316,11 +316,11 @@ static struct mr_table *ipmr_new_table(struct net *net, u32 id)
|
||||
unsigned int i;
|
||||
|
||||
mrt = ipmr_get_table(net, id);
|
||||
if (mrt != NULL)
|
||||
if (mrt)
|
||||
return mrt;
|
||||
|
||||
mrt = kzalloc(sizeof(*mrt), GFP_KERNEL);
|
||||
if (mrt == NULL)
|
||||
if (!mrt)
|
||||
return NULL;
|
||||
write_pnet(&mrt->net, net);
|
||||
mrt->id = id;
|
||||
@ -422,7 +422,7 @@ struct net_device *ipmr_new_tunnel(struct net *net, struct vifctl *v)
|
||||
dev->flags |= IFF_MULTICAST;
|
||||
|
||||
in_dev = __in_dev_get_rtnl(dev);
|
||||
if (in_dev == NULL)
|
||||
if (!in_dev)
|
||||
goto failure;
|
||||
|
||||
ipv4_devconf_setall(in_dev);
|
||||
@ -506,7 +506,7 @@ static struct net_device *ipmr_reg_vif(struct net *net, struct mr_table *mrt)
|
||||
|
||||
dev = alloc_netdev(0, name, NET_NAME_UNKNOWN, reg_vif_setup);
|
||||
|
||||
if (dev == NULL)
|
||||
if (!dev)
|
||||
return NULL;
|
||||
|
||||
dev_net_set(dev, net);
|
||||
@ -762,7 +762,7 @@ static int vif_add(struct net *net, struct mr_table *mrt,
|
||||
case 0:
|
||||
if (vifc->vifc_flags == VIFF_USE_IFINDEX) {
|
||||
dev = dev_get_by_index(net, vifc->vifc_lcl_ifindex);
|
||||
if (dev && __in_dev_get_rtnl(dev) == NULL) {
|
||||
if (dev && !__in_dev_get_rtnl(dev)) {
|
||||
dev_put(dev);
|
||||
return -EADDRNOTAVAIL;
|
||||
}
|
||||
@ -1008,7 +1008,7 @@ static int ipmr_cache_report(struct mr_table *mrt,
|
||||
|
||||
rcu_read_lock();
|
||||
mroute_sk = rcu_dereference(mrt->mroute_sk);
|
||||
if (mroute_sk == NULL) {
|
||||
if (!mroute_sk) {
|
||||
rcu_read_unlock();
|
||||
kfree_skb(skb);
|
||||
return -EINVAL;
|
||||
@ -1161,7 +1161,7 @@ static int ipmr_mfc_add(struct net *net, struct mr_table *mrt,
|
||||
return -EINVAL;
|
||||
|
||||
c = ipmr_cache_alloc();
|
||||
if (c == NULL)
|
||||
if (!c)
|
||||
return -ENOMEM;
|
||||
|
||||
c->mfc_origin = mfc->mfcc_origin.s_addr;
|
||||
@ -1283,7 +1283,7 @@ int ip_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, unsi
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
mrt = ipmr_get_table(net, raw_sk(sk)->ipmr_table ? : RT_TABLE_DEFAULT);
|
||||
if (mrt == NULL)
|
||||
if (!mrt)
|
||||
return -ENOENT;
|
||||
|
||||
if (optname != MRT_INIT) {
|
||||
@ -1446,7 +1446,7 @@ int ip_mroute_getsockopt(struct sock *sk, int optname, char __user *optval, int
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
mrt = ipmr_get_table(net, raw_sk(sk)->ipmr_table ? : RT_TABLE_DEFAULT);
|
||||
if (mrt == NULL)
|
||||
if (!mrt)
|
||||
return -ENOENT;
|
||||
|
||||
if (optname != MRT_VERSION &&
|
||||
@ -1492,7 +1492,7 @@ int ipmr_ioctl(struct sock *sk, int cmd, void __user *arg)
|
||||
struct mr_table *mrt;
|
||||
|
||||
mrt = ipmr_get_table(net, raw_sk(sk)->ipmr_table ? : RT_TABLE_DEFAULT);
|
||||
if (mrt == NULL)
|
||||
if (!mrt)
|
||||
return -ENOENT;
|
||||
|
||||
switch (cmd) {
|
||||
@ -1566,7 +1566,7 @@ int ipmr_compat_ioctl(struct sock *sk, unsigned int cmd, void __user *arg)
|
||||
struct mr_table *mrt;
|
||||
|
||||
mrt = ipmr_get_table(net, raw_sk(sk)->ipmr_table ? : RT_TABLE_DEFAULT);
|
||||
if (mrt == NULL)
|
||||
if (!mrt)
|
||||
return -ENOENT;
|
||||
|
||||
switch (cmd) {
|
||||
@ -1701,7 +1701,7 @@ static void ipmr_queue_xmit(struct net *net, struct mr_table *mrt,
|
||||
struct flowi4 fl4;
|
||||
int encap = 0;
|
||||
|
||||
if (vif->dev == NULL)
|
||||
if (!vif->dev)
|
||||
goto out_free;
|
||||
|
||||
#ifdef CONFIG_IP_PIMSM
|
||||
@ -1992,7 +1992,7 @@ int ip_mr_input(struct sk_buff *skb)
|
||||
|
||||
/* already under rcu_read_lock() */
|
||||
cache = ipmr_cache_find(mrt, ip_hdr(skb)->saddr, ip_hdr(skb)->daddr);
|
||||
if (cache == NULL) {
|
||||
if (!cache) {
|
||||
int vif = ipmr_find_vif(mrt, skb->dev);
|
||||
|
||||
if (vif >= 0)
|
||||
@ -2003,13 +2003,13 @@ int ip_mr_input(struct sk_buff *skb)
|
||||
/*
|
||||
* No usable cache entry
|
||||
*/
|
||||
if (cache == NULL) {
|
||||
if (!cache) {
|
||||
int vif;
|
||||
|
||||
if (local) {
|
||||
struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
|
||||
ip_local_deliver(skb);
|
||||
if (skb2 == NULL)
|
||||
if (!skb2)
|
||||
return -ENOBUFS;
|
||||
skb = skb2;
|
||||
}
|
||||
@ -2068,7 +2068,7 @@ static int __pim_rcv(struct mr_table *mrt, struct sk_buff *skb,
|
||||
reg_dev = mrt->vif_table[mrt->mroute_reg_vif_num].dev;
|
||||
read_unlock(&mrt_lock);
|
||||
|
||||
if (reg_dev == NULL)
|
||||
if (!reg_dev)
|
||||
return 1;
|
||||
|
||||
skb->mac_header = skb->network_header;
|
||||
@ -2198,18 +2198,18 @@ int ipmr_get_route(struct net *net, struct sk_buff *skb,
|
||||
int err;
|
||||
|
||||
mrt = ipmr_get_table(net, RT_TABLE_DEFAULT);
|
||||
if (mrt == NULL)
|
||||
if (!mrt)
|
||||
return -ENOENT;
|
||||
|
||||
rcu_read_lock();
|
||||
cache = ipmr_cache_find(mrt, saddr, daddr);
|
||||
if (cache == NULL && skb->dev) {
|
||||
if (!cache && skb->dev) {
|
||||
int vif = ipmr_find_vif(mrt, skb->dev);
|
||||
|
||||
if (vif >= 0)
|
||||
cache = ipmr_cache_find_any(mrt, daddr, vif);
|
||||
}
|
||||
if (cache == NULL) {
|
||||
if (!cache) {
|
||||
struct sk_buff *skb2;
|
||||
struct iphdr *iph;
|
||||
struct net_device *dev;
|
||||
@ -2267,7 +2267,7 @@ static int ipmr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb,
|
||||
int err;
|
||||
|
||||
nlh = nlmsg_put(skb, portid, seq, cmd, sizeof(*rtm), flags);
|
||||
if (nlh == NULL)
|
||||
if (!nlh)
|
||||
return -EMSGSIZE;
|
||||
|
||||
rtm = nlmsg_data(nlh);
|
||||
@ -2332,7 +2332,7 @@ static void mroute_netlink_event(struct mr_table *mrt, struct mfc_cache *mfc,
|
||||
|
||||
skb = nlmsg_new(mroute_msgsize(mfc->mfc_parent >= MAXVIFS, mrt->maxvif),
|
||||
GFP_ATOMIC);
|
||||
if (skb == NULL)
|
||||
if (!skb)
|
||||
goto errout;
|
||||
|
||||
err = ipmr_fill_mroute(mrt, skb, 0, 0, mfc, cmd, 0);
|
||||
@ -2447,7 +2447,7 @@ static void *ipmr_vif_seq_start(struct seq_file *seq, loff_t *pos)
|
||||
struct mr_table *mrt;
|
||||
|
||||
mrt = ipmr_get_table(net, RT_TABLE_DEFAULT);
|
||||
if (mrt == NULL)
|
||||
if (!mrt)
|
||||
return ERR_PTR(-ENOENT);
|
||||
|
||||
iter->mrt = mrt;
|
||||
@ -2566,7 +2566,7 @@ static void *ipmr_mfc_seq_start(struct seq_file *seq, loff_t *pos)
|
||||
struct mr_table *mrt;
|
||||
|
||||
mrt = ipmr_get_table(net, RT_TABLE_DEFAULT);
|
||||
if (mrt == NULL)
|
||||
if (!mrt)
|
||||
return ERR_PTR(-ENOENT);
|
||||
|
||||
it->mrt = mrt;
|
||||
|
@ -516,7 +516,7 @@ void ping_err(struct sk_buff *skb, int offset, u32 info)
|
||||
ntohs(icmph->un.echo.sequence));
|
||||
|
||||
sk = ping_lookup(net, skb, ntohs(icmph->un.echo.id));
|
||||
if (sk == NULL) {
|
||||
if (!sk) {
|
||||
pr_debug("no socket, dropping\n");
|
||||
return; /* No socket for error */
|
||||
}
|
||||
@ -971,7 +971,7 @@ bool ping_rcv(struct sk_buff *skb)
|
||||
skb_push(skb, skb->data - (u8 *)icmph);
|
||||
|
||||
sk = ping_lookup(net, skb, ntohs(icmph->un.echo.id));
|
||||
if (sk != NULL) {
|
||||
if (sk) {
|
||||
struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
|
||||
|
||||
pr_debug("rcv on socket %p\n", sk);
|
||||
|
@ -293,7 +293,7 @@ void raw_icmp_error(struct sk_buff *skb, int protocol, u32 info)
|
||||
|
||||
read_lock(&raw_v4_hashinfo.lock);
|
||||
raw_sk = sk_head(&raw_v4_hashinfo.ht[hash]);
|
||||
if (raw_sk != NULL) {
|
||||
if (raw_sk) {
|
||||
iph = (const struct iphdr *)skb->data;
|
||||
net = dev_net(skb->dev);
|
||||
|
||||
@ -363,7 +363,7 @@ static int raw_send_hdrinc(struct sock *sk, struct flowi4 *fl4,
|
||||
skb = sock_alloc_send_skb(sk,
|
||||
length + hlen + tlen + 15,
|
||||
flags & MSG_DONTWAIT, &err);
|
||||
if (skb == NULL)
|
||||
if (!skb)
|
||||
goto error;
|
||||
skb_reserve(skb, hlen);
|
||||
|
||||
@ -872,7 +872,7 @@ static int raw_ioctl(struct sock *sk, int cmd, unsigned long arg)
|
||||
|
||||
spin_lock_bh(&sk->sk_receive_queue.lock);
|
||||
skb = skb_peek(&sk->sk_receive_queue);
|
||||
if (skb != NULL)
|
||||
if (skb)
|
||||
amount = skb->len;
|
||||
spin_unlock_bh(&sk->sk_receive_queue.lock);
|
||||
return put_user(amount, (int __user *)arg);
|
||||
|
@ -1056,7 +1056,7 @@ void ipv4_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, u32 mtu)
|
||||
__build_flow_key(&fl4, sk, iph, 0, 0, 0, 0, 0);
|
||||
|
||||
rt = (struct rtable *)odst;
|
||||
if (odst->obsolete && odst->ops->check(odst, 0) == NULL) {
|
||||
if (odst->obsolete && !odst->ops->check(odst, 0)) {
|
||||
rt = ip_route_output_flow(sock_net(sk), &fl4, sk);
|
||||
if (IS_ERR(rt))
|
||||
goto out;
|
||||
@ -1450,7 +1450,7 @@ static int ip_route_input_mc(struct sk_buff *skb, __be32 daddr, __be32 saddr,
|
||||
|
||||
/* Primary sanity checks. */
|
||||
|
||||
if (in_dev == NULL)
|
||||
if (!in_dev)
|
||||
return -EINVAL;
|
||||
|
||||
if (ipv4_is_multicast(saddr) || ipv4_is_lbcast(saddr) ||
|
||||
@ -1553,7 +1553,7 @@ static int __mkroute_input(struct sk_buff *skb,
|
||||
|
||||
/* get a working reference to the output device */
|
||||
out_dev = __in_dev_get_rcu(FIB_RES_DEV(*res));
|
||||
if (out_dev == NULL) {
|
||||
if (!out_dev) {
|
||||
net_crit_ratelimited("Bug in ip_route_input_slow(). Please report.\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
@ -1591,7 +1591,7 @@ static int __mkroute_input(struct sk_buff *skb,
|
||||
|
||||
fnhe = find_exception(&FIB_RES_NH(*res), daddr);
|
||||
if (do_cache) {
|
||||
if (fnhe != NULL)
|
||||
if (fnhe)
|
||||
rth = rcu_dereference(fnhe->fnhe_rth_input);
|
||||
else
|
||||
rth = rcu_dereference(FIB_RES_NH(*res).nh_rth_input);
|
||||
@ -2054,7 +2054,7 @@ struct rtable *__ip_route_output_key(struct net *net, struct flowi4 *fl4)
|
||||
ipv4_is_lbcast(fl4->daddr))) {
|
||||
/* It is equivalent to inet_addr_type(saddr) == RTN_LOCAL */
|
||||
dev_out = __ip_dev_find(net, fl4->saddr, false);
|
||||
if (dev_out == NULL)
|
||||
if (!dev_out)
|
||||
goto out;
|
||||
|
||||
/* Special hack: user can direct multicasts
|
||||
@ -2087,7 +2087,7 @@ struct rtable *__ip_route_output_key(struct net *net, struct flowi4 *fl4)
|
||||
if (fl4->flowi4_oif) {
|
||||
dev_out = dev_get_by_index_rcu(net, fl4->flowi4_oif);
|
||||
rth = ERR_PTR(-ENODEV);
|
||||
if (dev_out == NULL)
|
||||
if (!dev_out)
|
||||
goto out;
|
||||
|
||||
/* RACE: Check return value of inet_select_addr instead. */
|
||||
@ -2299,7 +2299,7 @@ static int rt_fill_info(struct net *net, __be32 dst, __be32 src,
|
||||
u32 metrics[RTAX_MAX];
|
||||
|
||||
nlh = nlmsg_put(skb, portid, seq, event, sizeof(*r), flags);
|
||||
if (nlh == NULL)
|
||||
if (!nlh)
|
||||
return -EMSGSIZE;
|
||||
|
||||
r = nlmsg_data(nlh);
|
||||
@ -2421,7 +2421,7 @@ static int inet_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh)
|
||||
rtm = nlmsg_data(nlh);
|
||||
|
||||
skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
|
||||
if (skb == NULL) {
|
||||
if (!skb) {
|
||||
err = -ENOBUFS;
|
||||
goto errout;
|
||||
}
|
||||
@ -2452,7 +2452,7 @@ static int inet_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh)
|
||||
struct net_device *dev;
|
||||
|
||||
dev = __dev_get_by_index(net, iif);
|
||||
if (dev == NULL) {
|
||||
if (!dev) {
|
||||
err = -ENODEV;
|
||||
goto errout_free;
|
||||
}
|
||||
@ -2651,7 +2651,7 @@ static __net_init int sysctl_route_net_init(struct net *net)
|
||||
tbl = ipv4_route_flush_table;
|
||||
if (!net_eq(net, &init_net)) {
|
||||
tbl = kmemdup(tbl, sizeof(ipv4_route_flush_table), GFP_KERNEL);
|
||||
if (tbl == NULL)
|
||||
if (!tbl)
|
||||
goto err_dup;
|
||||
|
||||
/* Don't export sysctls to unprivileged users */
|
||||
@ -2661,7 +2661,7 @@ static __net_init int sysctl_route_net_init(struct net *net)
|
||||
tbl[0].extra1 = net;
|
||||
|
||||
net->ipv4.route_hdr = register_net_sysctl(net, "net/ipv4/route", tbl);
|
||||
if (net->ipv4.route_hdr == NULL)
|
||||
if (!net->ipv4.route_hdr)
|
||||
goto err_reg;
|
||||
return 0;
|
||||
|
||||
|
@ -909,7 +909,7 @@ static __net_init int ipv4_sysctl_init_net(struct net *net)
|
||||
int i;
|
||||
|
||||
table = kmemdup(table, sizeof(ipv4_net_table), GFP_KERNEL);
|
||||
if (table == NULL)
|
||||
if (!table)
|
||||
goto err_alloc;
|
||||
|
||||
/* Update the variables to point into the current struct net */
|
||||
@ -918,7 +918,7 @@ static __net_init int ipv4_sysctl_init_net(struct net *net)
|
||||
}
|
||||
|
||||
net->ipv4.ipv4_hdr = register_net_sysctl(net, "net/ipv4", table);
|
||||
if (net->ipv4.ipv4_hdr == NULL)
|
||||
if (!net->ipv4.ipv4_hdr)
|
||||
goto err_reg;
|
||||
|
||||
net->ipv4.sysctl_local_reserved_ports = kzalloc(65536 / 8, GFP_KERNEL);
|
||||
@ -956,7 +956,7 @@ static __init int sysctl_ipv4_init(void)
|
||||
struct ctl_table_header *hdr;
|
||||
|
||||
hdr = register_net_sysctl(&init_net, "net/ipv4", ipv4_table);
|
||||
if (hdr == NULL)
|
||||
if (!hdr)
|
||||
return -ENOMEM;
|
||||
|
||||
if (register_pernet_subsys(&ipv4_sysctl_ops)) {
|
||||
|
@ -496,7 +496,7 @@ unsigned int tcp_poll(struct file *file, struct socket *sock, poll_table *wait)
|
||||
|
||||
/* Connected or passive Fast Open socket? */
|
||||
if (sk->sk_state != TCP_SYN_SENT &&
|
||||
(sk->sk_state != TCP_SYN_RECV || tp->fastopen_rsk != NULL)) {
|
||||
(sk->sk_state != TCP_SYN_RECV || tp->fastopen_rsk)) {
|
||||
int target = sock_rcvlowat(sk, 0, INT_MAX);
|
||||
|
||||
if (tp->urg_seq == tp->copied_seq &&
|
||||
@ -1028,7 +1028,7 @@ static inline int select_size(const struct sock *sk, bool sg)
|
||||
|
||||
void tcp_free_fastopen_req(struct tcp_sock *tp)
|
||||
{
|
||||
if (tp->fastopen_req != NULL) {
|
||||
if (tp->fastopen_req) {
|
||||
kfree(tp->fastopen_req);
|
||||
tp->fastopen_req = NULL;
|
||||
}
|
||||
@ -1042,12 +1042,12 @@ static int tcp_sendmsg_fastopen(struct sock *sk, struct msghdr *msg,
|
||||
|
||||
if (!(sysctl_tcp_fastopen & TFO_CLIENT_ENABLE))
|
||||
return -EOPNOTSUPP;
|
||||
if (tp->fastopen_req != NULL)
|
||||
if (tp->fastopen_req)
|
||||
return -EALREADY; /* Another Fast Open is in progress */
|
||||
|
||||
tp->fastopen_req = kzalloc(sizeof(struct tcp_fastopen_request),
|
||||
sk->sk_allocation);
|
||||
if (unlikely(tp->fastopen_req == NULL))
|
||||
if (unlikely(!tp->fastopen_req))
|
||||
return -ENOBUFS;
|
||||
tp->fastopen_req->data = msg;
|
||||
tp->fastopen_req->size = size;
|
||||
@ -2138,7 +2138,7 @@ adjudge_to_death:
|
||||
* aborted (e.g., closed with unread data) before 3WHS
|
||||
* finishes.
|
||||
*/
|
||||
if (req != NULL)
|
||||
if (req)
|
||||
reqsk_fastopen_remove(sk, req, false);
|
||||
inet_csk_destroy_sock(sk);
|
||||
}
|
||||
@ -2776,7 +2776,7 @@ static int do_tcp_getsockopt(struct sock *sk, int level,
|
||||
break;
|
||||
|
||||
case TCP_FASTOPEN:
|
||||
if (icsk->icsk_accept_queue.fastopenq != NULL)
|
||||
if (icsk->icsk_accept_queue.fastopenq)
|
||||
val = icsk->icsk_accept_queue.fastopenq->max_qlen;
|
||||
else
|
||||
val = 0;
|
||||
@ -2960,7 +2960,7 @@ void tcp_done(struct sock *sk)
|
||||
|
||||
tcp_set_state(sk, TCP_CLOSE);
|
||||
tcp_clear_xmit_timers(sk);
|
||||
if (req != NULL)
|
||||
if (req)
|
||||
reqsk_fastopen_remove(sk, req, false);
|
||||
|
||||
sk->sk_shutdown = SHUTDOWN_MASK;
|
||||
|
@ -29,7 +29,7 @@ static void tcp_diag_get_info(struct sock *sk, struct inet_diag_msg *r,
|
||||
r->idiag_rqueue = max_t(int, tp->rcv_nxt - tp->copied_seq, 0);
|
||||
r->idiag_wqueue = tp->write_seq - tp->snd_una;
|
||||
}
|
||||
if (info != NULL)
|
||||
if (info)
|
||||
tcp_get_info(sk, info);
|
||||
}
|
||||
|
||||
|
@ -141,7 +141,7 @@ static bool tcp_fastopen_create_child(struct sock *sk,
|
||||
req->sk = NULL;
|
||||
|
||||
child = inet_csk(sk)->icsk_af_ops->syn_recv_sock(sk, skb, req, NULL);
|
||||
if (child == NULL)
|
||||
if (!child)
|
||||
return false;
|
||||
|
||||
spin_lock(&queue->fastopenq->lock);
|
||||
@ -214,7 +214,7 @@ static bool tcp_fastopen_create_child(struct sock *sk,
|
||||
sk->sk_data_ready(sk);
|
||||
bh_unlock_sock(child);
|
||||
sock_put(child);
|
||||
WARN_ON(req->sk == NULL);
|
||||
WARN_ON(!req->sk);
|
||||
return true;
|
||||
}
|
||||
|
||||
@ -233,7 +233,7 @@ static bool tcp_fastopen_queue_check(struct sock *sk)
|
||||
* temporarily vs a server not supporting Fast Open at all.
|
||||
*/
|
||||
fastopenq = inet_csk(sk)->icsk_accept_queue.fastopenq;
|
||||
if (fastopenq == NULL || fastopenq->max_qlen == 0)
|
||||
if (!fastopenq || fastopenq->max_qlen == 0)
|
||||
return false;
|
||||
|
||||
if (fastopenq->qlen >= fastopenq->max_qlen) {
|
||||
|
@ -866,7 +866,7 @@ static void tcp_update_reordering(struct sock *sk, const int metric,
|
||||
/* This must be called before lost_out is incremented */
|
||||
static void tcp_verify_retransmit_hint(struct tcp_sock *tp, struct sk_buff *skb)
|
||||
{
|
||||
if ((tp->retransmit_skb_hint == NULL) ||
|
||||
if (!tp->retransmit_skb_hint ||
|
||||
before(TCP_SKB_CB(skb)->seq,
|
||||
TCP_SKB_CB(tp->retransmit_skb_hint)->seq))
|
||||
tp->retransmit_skb_hint = skb;
|
||||
@ -1256,7 +1256,7 @@ static u8 tcp_sacktag_one(struct sock *sk,
|
||||
fack_count += pcount;
|
||||
|
||||
/* Lost marker hint past SACKed? Tweak RFC3517 cnt */
|
||||
if (!tcp_is_fack(tp) && (tp->lost_skb_hint != NULL) &&
|
||||
if (!tcp_is_fack(tp) && tp->lost_skb_hint &&
|
||||
before(start_seq, TCP_SKB_CB(tp->lost_skb_hint)->seq))
|
||||
tp->lost_cnt_hint += pcount;
|
||||
|
||||
@ -1535,7 +1535,7 @@ static struct sk_buff *tcp_sacktag_walk(struct sk_buff *skb, struct sock *sk,
|
||||
if (!before(TCP_SKB_CB(skb)->seq, end_seq))
|
||||
break;
|
||||
|
||||
if ((next_dup != NULL) &&
|
||||
if (next_dup &&
|
||||
before(TCP_SKB_CB(skb)->seq, next_dup->end_seq)) {
|
||||
in_sack = tcp_match_skb_to_sack(sk, skb,
|
||||
next_dup->start_seq,
|
||||
@ -1551,7 +1551,7 @@ static struct sk_buff *tcp_sacktag_walk(struct sk_buff *skb, struct sock *sk,
|
||||
if (in_sack <= 0) {
|
||||
tmp = tcp_shift_skb_data(sk, skb, state,
|
||||
start_seq, end_seq, dup_sack);
|
||||
if (tmp != NULL) {
|
||||
if (tmp) {
|
||||
if (tmp != skb) {
|
||||
skb = tmp;
|
||||
continue;
|
||||
@ -1614,7 +1614,7 @@ static struct sk_buff *tcp_maybe_skipping_dsack(struct sk_buff *skb,
|
||||
struct tcp_sacktag_state *state,
|
||||
u32 skip_to_seq)
|
||||
{
|
||||
if (next_dup == NULL)
|
||||
if (!next_dup)
|
||||
return skb;
|
||||
|
||||
if (before(next_dup->start_seq, skip_to_seq)) {
|
||||
@ -1783,7 +1783,7 @@ tcp_sacktag_write_queue(struct sock *sk, const struct sk_buff *ack_skb,
|
||||
if (tcp_highest_sack_seq(tp) == cache->end_seq) {
|
||||
/* ...but better entrypoint exists! */
|
||||
skb = tcp_highest_sack(sk);
|
||||
if (skb == NULL)
|
||||
if (!skb)
|
||||
break;
|
||||
state.fack_count = tp->fackets_out;
|
||||
cache++;
|
||||
@ -1798,7 +1798,7 @@ tcp_sacktag_write_queue(struct sock *sk, const struct sk_buff *ack_skb,
|
||||
|
||||
if (!before(start_seq, tcp_highest_sack_seq(tp))) {
|
||||
skb = tcp_highest_sack(sk);
|
||||
if (skb == NULL)
|
||||
if (!skb)
|
||||
break;
|
||||
state.fack_count = tp->fackets_out;
|
||||
}
|
||||
@ -3698,7 +3698,7 @@ void tcp_parse_options(const struct sk_buff *skb,
|
||||
*/
|
||||
if (opsize < TCPOLEN_EXP_FASTOPEN_BASE ||
|
||||
get_unaligned_be16(ptr) != TCPOPT_FASTOPEN_MAGIC ||
|
||||
foc == NULL || !th->syn || (opsize & 1))
|
||||
!foc || !th->syn || (opsize & 1))
|
||||
break;
|
||||
foc->len = opsize - TCPOLEN_EXP_FASTOPEN_BASE;
|
||||
if (foc->len >= TCP_FASTOPEN_COOKIE_MIN &&
|
||||
@ -4669,7 +4669,7 @@ static void tcp_collapse_ofo_queue(struct sock *sk)
|
||||
struct sk_buff *head;
|
||||
u32 start, end;
|
||||
|
||||
if (skb == NULL)
|
||||
if (!skb)
|
||||
return;
|
||||
|
||||
start = TCP_SKB_CB(skb)->seq;
|
||||
@ -5124,7 +5124,7 @@ void tcp_rcv_established(struct sock *sk, struct sk_buff *skb,
|
||||
{
|
||||
struct tcp_sock *tp = tcp_sk(sk);
|
||||
|
||||
if (unlikely(sk->sk_rx_dst == NULL))
|
||||
if (unlikely(!sk->sk_rx_dst))
|
||||
inet_csk(sk)->icsk_af_ops->sk_rx_dst_set(sk, skb);
|
||||
/*
|
||||
* Header prediction.
|
||||
@ -5321,7 +5321,7 @@ void tcp_finish_connect(struct sock *sk, struct sk_buff *skb)
|
||||
|
||||
tcp_set_state(sk, TCP_ESTABLISHED);
|
||||
|
||||
if (skb != NULL) {
|
||||
if (skb) {
|
||||
icsk->icsk_af_ops->sk_rx_dst_set(sk, skb);
|
||||
security_inet_conn_established(sk, skb);
|
||||
}
|
||||
@ -5690,11 +5690,11 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
|
||||
}
|
||||
|
||||
req = tp->fastopen_rsk;
|
||||
if (req != NULL) {
|
||||
if (req) {
|
||||
WARN_ON_ONCE(sk->sk_state != TCP_SYN_RECV &&
|
||||
sk->sk_state != TCP_FIN_WAIT1);
|
||||
|
||||
if (tcp_check_req(sk, skb, req, true) == NULL)
|
||||
if (!tcp_check_req(sk, skb, req, true))
|
||||
goto discard;
|
||||
}
|
||||
|
||||
@ -5780,7 +5780,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
|
||||
* ACK we have received, this would have acknowledged
|
||||
* our SYNACK so stop the SYNACK timer.
|
||||
*/
|
||||
if (req != NULL) {
|
||||
if (req) {
|
||||
/* Return RST if ack_seq is invalid.
|
||||
* Note that RFC793 only says to generate a
|
||||
* DUPACK for it but for TCP Fast Open it seems
|
||||
|
@ -122,7 +122,7 @@ int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp)
|
||||
and use initial timestamp retrieved from peer table.
|
||||
*/
|
||||
if (tcptw->tw_ts_recent_stamp &&
|
||||
(twp == NULL || (sysctl_tcp_tw_reuse &&
|
||||
(!twp || (sysctl_tcp_tw_reuse &&
|
||||
get_seconds() - tcptw->tw_ts_recent_stamp > 1))) {
|
||||
tp->write_seq = tcptw->tw_snd_nxt + 65535 + 2;
|
||||
if (tp->write_seq == 0)
|
||||
@ -494,7 +494,7 @@ void tcp_v4_err(struct sk_buff *icmp_skb, u32 info)
|
||||
/* Only in fast or simultaneous open. If a fast open socket is
|
||||
* is already accepted it is treated as a connected one below.
|
||||
*/
|
||||
if (fastopen && fastopen->sk == NULL)
|
||||
if (fastopen && !fastopen->sk)
|
||||
break;
|
||||
|
||||
if (!sock_owned_by_user(sk)) {
|
||||
@ -1305,7 +1305,7 @@ struct sock *tcp_v4_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
|
||||
/* Copy over the MD5 key from the original socket */
|
||||
key = tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&newinet->inet_daddr,
|
||||
AF_INET);
|
||||
if (key != NULL) {
|
||||
if (key) {
|
||||
/*
|
||||
* We're using one, so create a matching key
|
||||
* on the newsk structure. If we fail to get
|
||||
@ -1390,7 +1390,7 @@ int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
|
||||
sk_mark_napi_id(sk, skb);
|
||||
if (dst) {
|
||||
if (inet_sk(sk)->rx_dst_ifindex != skb->skb_iif ||
|
||||
dst->ops->check(dst, 0) == NULL) {
|
||||
!dst->ops->check(dst, 0)) {
|
||||
dst_release(dst);
|
||||
sk->sk_rx_dst = NULL;
|
||||
}
|
||||
@ -1797,7 +1797,7 @@ void tcp_v4_destroy_sock(struct sock *sk)
|
||||
if (inet_csk(sk)->icsk_bind_hash)
|
||||
inet_put_port(sk);
|
||||
|
||||
BUG_ON(tp->fastopen_rsk != NULL);
|
||||
BUG_ON(tp->fastopen_rsk);
|
||||
|
||||
/* If socket is aborted during connect operation */
|
||||
tcp_free_fastopen_req(tp);
|
||||
|
@ -505,7 +505,7 @@ void tcp_init_metrics(struct sock *sk)
|
||||
struct tcp_metrics_block *tm;
|
||||
u32 val, crtt = 0; /* cached RTT scaled by 8 */
|
||||
|
||||
if (dst == NULL)
|
||||
if (!dst)
|
||||
goto reset;
|
||||
|
||||
dst_confirm(dst);
|
||||
|
@ -294,7 +294,7 @@ void tcp_time_wait(struct sock *sk, int state, int timeo)
|
||||
if (tcp_death_row.tw_count < tcp_death_row.sysctl_max_tw_buckets)
|
||||
tw = inet_twsk_alloc(sk, state);
|
||||
|
||||
if (tw != NULL) {
|
||||
if (tw) {
|
||||
struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw);
|
||||
const int rto = (icsk->icsk_rto << 2) - (icsk->icsk_rto >> 1);
|
||||
struct inet_sock *inet = inet_sk(sk);
|
||||
@ -332,7 +332,7 @@ void tcp_time_wait(struct sock *sk, int state, int timeo)
|
||||
struct tcp_md5sig_key *key;
|
||||
tcptw->tw_md5_key = NULL;
|
||||
key = tp->af_specific->md5_lookup(sk, sk);
|
||||
if (key != NULL) {
|
||||
if (key) {
|
||||
tcptw->tw_md5_key = kmemdup(key, sizeof(*key), GFP_ATOMIC);
|
||||
if (tcptw->tw_md5_key && !tcp_alloc_md5sig_pool())
|
||||
BUG();
|
||||
@ -454,7 +454,7 @@ struct sock *tcp_create_openreq_child(struct sock *sk, struct request_sock *req,
|
||||
{
|
||||
struct sock *newsk = inet_csk_clone_lock(sk, req, GFP_ATOMIC);
|
||||
|
||||
if (newsk != NULL) {
|
||||
if (newsk) {
|
||||
const struct inet_request_sock *ireq = inet_rsk(req);
|
||||
struct tcp_request_sock *treq = tcp_rsk(req);
|
||||
struct inet_connection_sock *newicsk = inet_csk(newsk);
|
||||
@ -763,7 +763,7 @@ struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb,
|
||||
* socket is created, wait for troubles.
|
||||
*/
|
||||
child = inet_csk(sk)->icsk_af_ops->syn_recv_sock(sk, skb, req, NULL);
|
||||
if (child == NULL)
|
||||
if (!child)
|
||||
goto listen_overflow;
|
||||
|
||||
inet_csk_reqsk_queue_unlink(sk, req);
|
||||
|
@ -565,7 +565,7 @@ static unsigned int tcp_syn_options(struct sock *sk, struct sk_buff *skb,
|
||||
opts->mss = tcp_advertise_mss(sk);
|
||||
remaining -= TCPOLEN_MSS_ALIGNED;
|
||||
|
||||
if (likely(sysctl_tcp_timestamps && *md5 == NULL)) {
|
||||
if (likely(sysctl_tcp_timestamps && !*md5)) {
|
||||
opts->options |= OPTION_TS;
|
||||
opts->tsval = tcp_skb_timestamp(skb) + tp->tsoffset;
|
||||
opts->tsecr = tp->rx_opt.ts_recent;
|
||||
@ -641,7 +641,7 @@ static unsigned int tcp_synack_options(struct sock *sk,
|
||||
if (unlikely(!ireq->tstamp_ok))
|
||||
remaining -= TCPOLEN_SACKPERM_ALIGNED;
|
||||
}
|
||||
if (foc != NULL && foc->len >= 0) {
|
||||
if (foc && foc->len >= 0) {
|
||||
u32 need = TCPOLEN_EXP_FASTOPEN_BASE + foc->len;
|
||||
need = (need + 3) & ~3U; /* Align to 32 bits */
|
||||
if (remaining >= need) {
|
||||
@ -1148,7 +1148,7 @@ int tcp_fragment(struct sock *sk, struct sk_buff *skb, u32 len,
|
||||
|
||||
/* Get a new skb... force flag on. */
|
||||
buff = sk_stream_alloc_skb(sk, nsize, gfp);
|
||||
if (buff == NULL)
|
||||
if (!buff)
|
||||
return -ENOMEM; /* We'll just try again later. */
|
||||
|
||||
sk->sk_wmem_queued += buff->truesize;
|
||||
@ -1707,7 +1707,7 @@ static int tso_fragment(struct sock *sk, struct sk_buff *skb, unsigned int len,
|
||||
return tcp_fragment(sk, skb, len, mss_now, gfp);
|
||||
|
||||
buff = sk_stream_alloc_skb(sk, 0, gfp);
|
||||
if (unlikely(buff == NULL))
|
||||
if (unlikely(!buff))
|
||||
return -ENOMEM;
|
||||
|
||||
sk->sk_wmem_queued += buff->truesize;
|
||||
@ -1925,7 +1925,8 @@ static int tcp_mtu_probe(struct sock *sk)
|
||||
}
|
||||
|
||||
/* We're allowed to probe. Build it now. */
|
||||
if ((nskb = sk_stream_alloc_skb(sk, probe_size, GFP_ATOMIC)) == NULL)
|
||||
nskb = sk_stream_alloc_skb(sk, probe_size, GFP_ATOMIC);
|
||||
if (!nskb)
|
||||
return -1;
|
||||
sk->sk_wmem_queued += nskb->truesize;
|
||||
sk_mem_charge(sk, nskb->truesize);
|
||||
@ -2223,7 +2224,7 @@ void tcp_send_loss_probe(struct sock *sk)
|
||||
int mss = tcp_current_mss(sk);
|
||||
int err = -1;
|
||||
|
||||
if (tcp_send_head(sk) != NULL) {
|
||||
if (tcp_send_head(sk)) {
|
||||
err = tcp_write_xmit(sk, mss, TCP_NAGLE_OFF, 2, GFP_ATOMIC);
|
||||
goto rearm_timer;
|
||||
}
|
||||
@ -2733,7 +2734,7 @@ void tcp_xmit_retransmit_queue(struct sock *sk)
|
||||
if (skb == tcp_send_head(sk))
|
||||
break;
|
||||
/* we could do better than to assign each time */
|
||||
if (hole == NULL)
|
||||
if (!hole)
|
||||
tp->retransmit_skb_hint = skb;
|
||||
|
||||
/* Assume this retransmit will generate
|
||||
@ -2757,7 +2758,7 @@ begin_fwd:
|
||||
if (!tcp_can_forward_retransmit(sk))
|
||||
break;
|
||||
/* Backtrack if necessary to non-L'ed skb */
|
||||
if (hole != NULL) {
|
||||
if (hole) {
|
||||
skb = hole;
|
||||
hole = NULL;
|
||||
}
|
||||
@ -2765,7 +2766,7 @@ begin_fwd:
|
||||
goto begin_fwd;
|
||||
|
||||
} else if (!(sacked & TCPCB_LOST)) {
|
||||
if (hole == NULL && !(sacked & (TCPCB_SACKED_RETRANS|TCPCB_SACKED_ACKED)))
|
||||
if (!hole && !(sacked & (TCPCB_SACKED_RETRANS|TCPCB_SACKED_ACKED)))
|
||||
hole = skb;
|
||||
continue;
|
||||
|
||||
@ -2810,7 +2811,7 @@ void tcp_send_fin(struct sock *sk)
|
||||
*/
|
||||
mss_now = tcp_current_mss(sk);
|
||||
|
||||
if (tcp_send_head(sk) != NULL) {
|
||||
if (tcp_send_head(sk)) {
|
||||
TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_FIN;
|
||||
TCP_SKB_CB(skb)->end_seq++;
|
||||
tp->write_seq++;
|
||||
@ -2868,14 +2869,14 @@ int tcp_send_synack(struct sock *sk)
|
||||
struct sk_buff *skb;
|
||||
|
||||
skb = tcp_write_queue_head(sk);
|
||||
if (skb == NULL || !(TCP_SKB_CB(skb)->tcp_flags & TCPHDR_SYN)) {
|
||||
if (!skb || !(TCP_SKB_CB(skb)->tcp_flags & TCPHDR_SYN)) {
|
||||
pr_debug("%s: wrong queue state\n", __func__);
|
||||
return -EFAULT;
|
||||
}
|
||||
if (!(TCP_SKB_CB(skb)->tcp_flags & TCPHDR_ACK)) {
|
||||
if (skb_cloned(skb)) {
|
||||
struct sk_buff *nskb = skb_copy(skb, GFP_ATOMIC);
|
||||
if (nskb == NULL)
|
||||
if (!nskb)
|
||||
return -ENOMEM;
|
||||
tcp_unlink_write_queue(skb, sk);
|
||||
__skb_header_release(nskb);
|
||||
@ -3014,7 +3015,7 @@ static void tcp_connect_init(struct sock *sk)
|
||||
(sysctl_tcp_timestamps ? TCPOLEN_TSTAMP_ALIGNED : 0);
|
||||
|
||||
#ifdef CONFIG_TCP_MD5SIG
|
||||
if (tp->af_specific->md5_lookup(sk, sk) != NULL)
|
||||
if (tp->af_specific->md5_lookup(sk, sk))
|
||||
tp->tcp_header_len += TCPOLEN_MD5SIG_ALIGNED;
|
||||
#endif
|
||||
|
||||
@ -3300,7 +3301,7 @@ void tcp_send_ack(struct sock *sk)
|
||||
* sock.
|
||||
*/
|
||||
buff = alloc_skb(MAX_TCP_HEADER, sk_gfp_atomic(sk, GFP_ATOMIC));
|
||||
if (buff == NULL) {
|
||||
if (!buff) {
|
||||
inet_csk_schedule_ack(sk);
|
||||
inet_csk(sk)->icsk_ack.ato = TCP_ATO_MIN;
|
||||
inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK,
|
||||
@ -3344,7 +3345,7 @@ static int tcp_xmit_probe_skb(struct sock *sk, int urgent)
|
||||
|
||||
/* We don't queue it, tcp_transmit_skb() sets ownership. */
|
||||
skb = alloc_skb(MAX_TCP_HEADER, sk_gfp_atomic(sk, GFP_ATOMIC));
|
||||
if (skb == NULL)
|
||||
if (!skb)
|
||||
return -1;
|
||||
|
||||
/* Reserve space for headers and set control bits. */
|
||||
@ -3375,8 +3376,8 @@ int tcp_write_wakeup(struct sock *sk)
|
||||
if (sk->sk_state == TCP_CLOSE)
|
||||
return -1;
|
||||
|
||||
if ((skb = tcp_send_head(sk)) != NULL &&
|
||||
before(TCP_SKB_CB(skb)->seq, tcp_wnd_end(tp))) {
|
||||
skb = tcp_send_head(sk);
|
||||
if (skb && before(TCP_SKB_CB(skb)->seq, tcp_wnd_end(tp))) {
|
||||
int err;
|
||||
unsigned int mss = tcp_current_mss(sk);
|
||||
unsigned int seg_size = tcp_wnd_end(tp) - TCP_SKB_CB(skb)->seq;
|
||||
|
@ -633,7 +633,7 @@ void __udp4_lib_err(struct sk_buff *skb, u32 info, struct udp_table *udptable)
|
||||
|
||||
sk = __udp4_lib_lookup(net, iph->daddr, uh->dest,
|
||||
iph->saddr, uh->source, skb->dev->ifindex, udptable);
|
||||
if (sk == NULL) {
|
||||
if (!sk) {
|
||||
ICMP_INC_STATS_BH(net, ICMP_MIB_INERRORS);
|
||||
return; /* No socket for error */
|
||||
}
|
||||
@ -1011,7 +1011,7 @@ int udp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
|
||||
if (connected)
|
||||
rt = (struct rtable *)sk_dst_check(sk, 0);
|
||||
|
||||
if (rt == NULL) {
|
||||
if (!rt) {
|
||||
struct net *net = sock_net(sk);
|
||||
|
||||
fl4 = &fl4_stack;
|
||||
@ -1522,7 +1522,7 @@ int udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
|
||||
|
||||
/* if we're overly short, let UDP handle it */
|
||||
encap_rcv = ACCESS_ONCE(up->encap_rcv);
|
||||
if (skb->len > sizeof(struct udphdr) && encap_rcv != NULL) {
|
||||
if (skb->len > sizeof(struct udphdr) && encap_rcv) {
|
||||
int ret;
|
||||
|
||||
/* Verify checksum before giving to encap */
|
||||
@ -1619,7 +1619,7 @@ static void flush_stack(struct sock **stack, unsigned int count,
|
||||
|
||||
for (i = 0; i < count; i++) {
|
||||
sk = stack[i];
|
||||
if (likely(skb1 == NULL))
|
||||
if (likely(!skb1))
|
||||
skb1 = (i == final) ? skb : skb_clone(skb, GFP_ATOMIC);
|
||||
|
||||
if (!skb1) {
|
||||
@ -1802,7 +1802,7 @@ int __udp4_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
|
||||
saddr, daddr, udptable, proto);
|
||||
|
||||
sk = __udp4_lib_lookup_skb(skb, uh->source, uh->dest, udptable);
|
||||
if (sk != NULL) {
|
||||
if (sk) {
|
||||
int ret;
|
||||
|
||||
if (inet_get_convert_csum(sk) && uh->check && !IS_UDPLITE(sk))
|
||||
|
@ -58,7 +58,7 @@ static int udp_dump_one(struct udp_table *tbl, struct sk_buff *in_skb,
|
||||
goto out_nosk;
|
||||
|
||||
err = -ENOENT;
|
||||
if (sk == NULL)
|
||||
if (!sk)
|
||||
goto out_nosk;
|
||||
|
||||
err = sock_diag_check_cookie(sk, req->id.idiag_cookie);
|
||||
|
@ -285,7 +285,7 @@ void udp_del_offload(struct udp_offload *uo)
|
||||
pr_warn("udp_del_offload: didn't find offload for port %d\n", ntohs(uo->port));
|
||||
unlock:
|
||||
spin_unlock(&udp_offload_lock);
|
||||
if (uo_priv != NULL)
|
||||
if (uo_priv)
|
||||
call_rcu(&uo_priv->rcu, udp_offload_free_routine);
|
||||
}
|
||||
EXPORT_SYMBOL(udp_del_offload);
|
||||
@ -394,7 +394,7 @@ int udp_gro_complete(struct sk_buff *skb, int nhoff)
|
||||
break;
|
||||
}
|
||||
|
||||
if (uo_priv != NULL) {
|
||||
if (uo_priv) {
|
||||
NAPI_GRO_CB(skb)->proto = uo_priv->offload->ipproto;
|
||||
err = uo_priv->offload->callbacks.gro_complete(skb,
|
||||
nhoff + sizeof(struct udphdr),
|
||||
|
@ -24,7 +24,7 @@ int xfrm4_extract_input(struct xfrm_state *x, struct sk_buff *skb)
|
||||
|
||||
static inline int xfrm4_rcv_encap_finish(struct sk_buff *skb)
|
||||
{
|
||||
if (skb_dst(skb) == NULL) {
|
||||
if (!skb_dst(skb)) {
|
||||
const struct iphdr *iph = ip_hdr(skb);
|
||||
|
||||
if (ip_route_input_noref(skb, iph->daddr, iph->saddr,
|
||||
|
@ -298,7 +298,7 @@ static void __net_exit xfrm4_net_exit(struct net *net)
|
||||
{
|
||||
struct ctl_table *table;
|
||||
|
||||
if (net->ipv4.xfrm4_hdr == NULL)
|
||||
if (!net->ipv4.xfrm4_hdr)
|
||||
return;
|
||||
|
||||
table = net->ipv4.xfrm4_hdr->ctl_table_arg;
|
||||
|
Loading…
Reference in New Issue
Block a user