Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/pablo/nf

Pablo Neira Ayuso says:

====================
The following patchset contains Netfilter/IPVS fixes for your net
tree, they are:

* Fix BUG_ON splat due to malformed TCP packets seen by synproxy, from
  Patrick McHardy.

* Fix possible weight overflow in lblc and lblcr schedulers due to
  32-bits arithmetics, from Simon Kirby.

* Fix possible memory access race in the lblc and lblcr schedulers,
  introduced when it was converted to use RCU, two patches from
  Julian Anastasov.

* Fix hard dependency on CPU 0 when reading per-cpu stats in the
  rate estimator, from Julian Anastasov.

* Fix race that may lead to object use after release, when invoking
  ipvsadm -C && ipvsadm -R, introduced when adding RCU, from Julian
  Anastasov.
====================

Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
David S. Miller 2013-10-01 12:39:35 -04:00
commit e024bdc051
13 changed files with 145 additions and 160 deletions

View File

@ -723,8 +723,6 @@ struct ip_vs_dest_dst {
struct rcu_head rcu_head;
};
/* In grace period after removing */
#define IP_VS_DEST_STATE_REMOVING 0x01
/*
* The real server destination forwarding entry
* with ip address, port number, and so on.
@ -742,7 +740,7 @@ struct ip_vs_dest {
atomic_t refcnt; /* reference counter */
struct ip_vs_stats stats; /* statistics */
unsigned long state; /* state flags */
unsigned long idle_start; /* start time, jiffies */
/* connection counters and thresholds */
atomic_t activeconns; /* active connections */
@ -756,14 +754,13 @@ struct ip_vs_dest {
struct ip_vs_dest_dst __rcu *dest_dst; /* cached dst info */
/* for virtual service */
struct ip_vs_service *svc; /* service it belongs to */
struct ip_vs_service __rcu *svc; /* service it belongs to */
__u16 protocol; /* which protocol (TCP/UDP) */
__be16 vport; /* virtual port number */
union nf_inet_addr vaddr; /* virtual IP address */
__u32 vfwmark; /* firewall mark of service */
struct list_head t_list; /* in dest_trash */
struct rcu_head rcu_head;
unsigned int in_rs_table:1; /* we are in rs_table */
};
@ -1649,7 +1646,7 @@ static inline void ip_vs_conn_drop_conntrack(struct ip_vs_conn *cp)
/* CONFIG_IP_VS_NFCT */
#endif
static inline unsigned int
static inline int
ip_vs_dest_conn_overhead(struct ip_vs_dest *dest)
{
/*

View File

@ -56,7 +56,7 @@ struct synproxy_options {
struct tcphdr;
struct xt_synproxy_info;
extern void synproxy_parse_options(const struct sk_buff *skb, unsigned int doff,
extern bool synproxy_parse_options(const struct sk_buff *skb, unsigned int doff,
const struct tcphdr *th,
struct synproxy_options *opts);
extern unsigned int synproxy_options_size(const struct synproxy_options *opts);

View File

@ -267,7 +267,8 @@ synproxy_tg4(struct sk_buff *skb, const struct xt_action_param *par)
if (th == NULL)
return NF_DROP;
synproxy_parse_options(skb, par->thoff, th, &opts);
if (!synproxy_parse_options(skb, par->thoff, th, &opts))
return NF_DROP;
if (th->syn && !(th->ack || th->fin || th->rst)) {
/* Initial SYN from client */
@ -350,7 +351,8 @@ static unsigned int ipv4_synproxy_hook(unsigned int hooknum,
/* fall through */
case TCP_CONNTRACK_SYN_SENT:
synproxy_parse_options(skb, thoff, th, &opts);
if (!synproxy_parse_options(skb, thoff, th, &opts))
return NF_DROP;
if (!th->syn && th->ack &&
CTINFO2DIR(ctinfo) == IP_CT_DIR_ORIGINAL) {
@ -373,7 +375,9 @@ static unsigned int ipv4_synproxy_hook(unsigned int hooknum,
if (!th->syn || !th->ack)
break;
synproxy_parse_options(skb, thoff, th, &opts);
if (!synproxy_parse_options(skb, thoff, th, &opts))
return NF_DROP;
if (opts.options & XT_SYNPROXY_OPT_TIMESTAMP)
synproxy->tsoff = opts.tsval - synproxy->its;

View File

@ -282,7 +282,8 @@ synproxy_tg6(struct sk_buff *skb, const struct xt_action_param *par)
if (th == NULL)
return NF_DROP;
synproxy_parse_options(skb, par->thoff, th, &opts);
if (!synproxy_parse_options(skb, par->thoff, th, &opts))
return NF_DROP;
if (th->syn && !(th->ack || th->fin || th->rst)) {
/* Initial SYN from client */
@ -372,7 +373,8 @@ static unsigned int ipv6_synproxy_hook(unsigned int hooknum,
/* fall through */
case TCP_CONNTRACK_SYN_SENT:
synproxy_parse_options(skb, thoff, th, &opts);
if (!synproxy_parse_options(skb, thoff, th, &opts))
return NF_DROP;
if (!th->syn && th->ack &&
CTINFO2DIR(ctinfo) == IP_CT_DIR_ORIGINAL) {
@ -395,7 +397,9 @@ static unsigned int ipv6_synproxy_hook(unsigned int hooknum,
if (!th->syn || !th->ack)
break;
synproxy_parse_options(skb, thoff, th, &opts);
if (!synproxy_parse_options(skb, thoff, th, &opts))
return NF_DROP;
if (opts.options & XT_SYNPROXY_OPT_TIMESTAMP)
synproxy->tsoff = opts.tsval - synproxy->its;

View File

@ -116,6 +116,7 @@ ip_vs_in_stats(struct ip_vs_conn *cp, struct sk_buff *skb)
if (dest && (dest->flags & IP_VS_DEST_F_AVAILABLE)) {
struct ip_vs_cpu_stats *s;
struct ip_vs_service *svc;
s = this_cpu_ptr(dest->stats.cpustats);
s->ustats.inpkts++;
@ -123,11 +124,14 @@ ip_vs_in_stats(struct ip_vs_conn *cp, struct sk_buff *skb)
s->ustats.inbytes += skb->len;
u64_stats_update_end(&s->syncp);
s = this_cpu_ptr(dest->svc->stats.cpustats);
rcu_read_lock();
svc = rcu_dereference(dest->svc);
s = this_cpu_ptr(svc->stats.cpustats);
s->ustats.inpkts++;
u64_stats_update_begin(&s->syncp);
s->ustats.inbytes += skb->len;
u64_stats_update_end(&s->syncp);
rcu_read_unlock();
s = this_cpu_ptr(ipvs->tot_stats.cpustats);
s->ustats.inpkts++;
@ -146,6 +150,7 @@ ip_vs_out_stats(struct ip_vs_conn *cp, struct sk_buff *skb)
if (dest && (dest->flags & IP_VS_DEST_F_AVAILABLE)) {
struct ip_vs_cpu_stats *s;
struct ip_vs_service *svc;
s = this_cpu_ptr(dest->stats.cpustats);
s->ustats.outpkts++;
@ -153,11 +158,14 @@ ip_vs_out_stats(struct ip_vs_conn *cp, struct sk_buff *skb)
s->ustats.outbytes += skb->len;
u64_stats_update_end(&s->syncp);
s = this_cpu_ptr(dest->svc->stats.cpustats);
rcu_read_lock();
svc = rcu_dereference(dest->svc);
s = this_cpu_ptr(svc->stats.cpustats);
s->ustats.outpkts++;
u64_stats_update_begin(&s->syncp);
s->ustats.outbytes += skb->len;
u64_stats_update_end(&s->syncp);
rcu_read_unlock();
s = this_cpu_ptr(ipvs->tot_stats.cpustats);
s->ustats.outpkts++;

View File

@ -460,7 +460,7 @@ static inline void
__ip_vs_bind_svc(struct ip_vs_dest *dest, struct ip_vs_service *svc)
{
atomic_inc(&svc->refcnt);
dest->svc = svc;
rcu_assign_pointer(dest->svc, svc);
}
static void ip_vs_service_free(struct ip_vs_service *svc)
@ -470,18 +470,25 @@ static void ip_vs_service_free(struct ip_vs_service *svc)
kfree(svc);
}
static void
__ip_vs_unbind_svc(struct ip_vs_dest *dest)
static void ip_vs_service_rcu_free(struct rcu_head *head)
{
struct ip_vs_service *svc = dest->svc;
struct ip_vs_service *svc;
dest->svc = NULL;
svc = container_of(head, struct ip_vs_service, rcu_head);
ip_vs_service_free(svc);
}
static void __ip_vs_svc_put(struct ip_vs_service *svc, bool do_delay)
{
if (atomic_dec_and_test(&svc->refcnt)) {
IP_VS_DBG_BUF(3, "Removing service %u/%s:%u\n",
svc->fwmark,
IP_VS_DBG_ADDR(svc->af, &svc->addr),
ntohs(svc->port));
ip_vs_service_free(svc);
if (do_delay)
call_rcu(&svc->rcu_head, ip_vs_service_rcu_free);
else
ip_vs_service_free(svc);
}
}
@ -667,11 +674,6 @@ ip_vs_trash_get_dest(struct ip_vs_service *svc, const union nf_inet_addr *daddr,
IP_VS_DBG_ADDR(svc->af, &dest->addr),
ntohs(dest->port),
atomic_read(&dest->refcnt));
/* We can not reuse dest while in grace period
* because conns still can use dest->svc
*/
if (test_bit(IP_VS_DEST_STATE_REMOVING, &dest->state))
continue;
if (dest->af == svc->af &&
ip_vs_addr_equal(svc->af, &dest->addr, daddr) &&
dest->port == dport &&
@ -697,8 +699,10 @@ out:
static void ip_vs_dest_free(struct ip_vs_dest *dest)
{
struct ip_vs_service *svc = rcu_dereference_protected(dest->svc, 1);
__ip_vs_dst_cache_reset(dest);
__ip_vs_unbind_svc(dest);
__ip_vs_svc_put(svc, false);
free_percpu(dest->stats.cpustats);
kfree(dest);
}
@ -771,6 +775,7 @@ __ip_vs_update_dest(struct ip_vs_service *svc, struct ip_vs_dest *dest,
struct ip_vs_dest_user_kern *udest, int add)
{
struct netns_ipvs *ipvs = net_ipvs(svc->net);
struct ip_vs_service *old_svc;
struct ip_vs_scheduler *sched;
int conn_flags;
@ -792,13 +797,14 @@ __ip_vs_update_dest(struct ip_vs_service *svc, struct ip_vs_dest *dest,
atomic_set(&dest->conn_flags, conn_flags);
/* bind the service */
if (!dest->svc) {
old_svc = rcu_dereference_protected(dest->svc, 1);
if (!old_svc) {
__ip_vs_bind_svc(dest, svc);
} else {
if (dest->svc != svc) {
__ip_vs_unbind_svc(dest);
if (old_svc != svc) {
ip_vs_zero_stats(&dest->stats);
__ip_vs_bind_svc(dest, svc);
__ip_vs_svc_put(old_svc, true);
}
}
@ -998,16 +1004,6 @@ ip_vs_edit_dest(struct ip_vs_service *svc, struct ip_vs_dest_user_kern *udest)
return 0;
}
static void ip_vs_dest_wait_readers(struct rcu_head *head)
{
struct ip_vs_dest *dest = container_of(head, struct ip_vs_dest,
rcu_head);
/* End of grace period after unlinking */
clear_bit(IP_VS_DEST_STATE_REMOVING, &dest->state);
}
/*
* Delete a destination (must be already unlinked from the service)
*/
@ -1023,20 +1019,16 @@ static void __ip_vs_del_dest(struct net *net, struct ip_vs_dest *dest,
*/
ip_vs_rs_unhash(dest);
if (!cleanup) {
set_bit(IP_VS_DEST_STATE_REMOVING, &dest->state);
call_rcu(&dest->rcu_head, ip_vs_dest_wait_readers);
}
spin_lock_bh(&ipvs->dest_trash_lock);
IP_VS_DBG_BUF(3, "Moving dest %s:%u into trash, dest->refcnt=%d\n",
IP_VS_DBG_ADDR(dest->af, &dest->addr), ntohs(dest->port),
atomic_read(&dest->refcnt));
if (list_empty(&ipvs->dest_trash) && !cleanup)
mod_timer(&ipvs->dest_trash_timer,
jiffies + IP_VS_DEST_TRASH_PERIOD);
jiffies + (IP_VS_DEST_TRASH_PERIOD >> 1));
/* dest lives in trash without reference */
list_add(&dest->t_list, &ipvs->dest_trash);
dest->idle_start = 0;
spin_unlock_bh(&ipvs->dest_trash_lock);
ip_vs_dest_put(dest);
}
@ -1108,24 +1100,30 @@ static void ip_vs_dest_trash_expire(unsigned long data)
struct net *net = (struct net *) data;
struct netns_ipvs *ipvs = net_ipvs(net);
struct ip_vs_dest *dest, *next;
unsigned long now = jiffies;
spin_lock(&ipvs->dest_trash_lock);
list_for_each_entry_safe(dest, next, &ipvs->dest_trash, t_list) {
/* Skip if dest is in grace period */
if (test_bit(IP_VS_DEST_STATE_REMOVING, &dest->state))
continue;
if (atomic_read(&dest->refcnt) > 0)
continue;
if (dest->idle_start) {
if (time_before(now, dest->idle_start +
IP_VS_DEST_TRASH_PERIOD))
continue;
} else {
dest->idle_start = max(1UL, now);
continue;
}
IP_VS_DBG_BUF(3, "Removing destination %u/%s:%u from trash\n",
dest->vfwmark,
IP_VS_DBG_ADDR(dest->svc->af, &dest->addr),
IP_VS_DBG_ADDR(dest->af, &dest->addr),
ntohs(dest->port));
list_del(&dest->t_list);
ip_vs_dest_free(dest);
}
if (!list_empty(&ipvs->dest_trash))
mod_timer(&ipvs->dest_trash_timer,
jiffies + IP_VS_DEST_TRASH_PERIOD);
jiffies + (IP_VS_DEST_TRASH_PERIOD >> 1));
spin_unlock(&ipvs->dest_trash_lock);
}
@ -1320,14 +1318,6 @@ out:
return ret;
}
static void ip_vs_service_rcu_free(struct rcu_head *head)
{
struct ip_vs_service *svc;
svc = container_of(head, struct ip_vs_service, rcu_head);
ip_vs_service_free(svc);
}
/*
* Delete a service from the service list
* - The service must be unlinked, unlocked and not referenced!
@ -1376,13 +1366,7 @@ static void __ip_vs_del_service(struct ip_vs_service *svc, bool cleanup)
/*
* Free the service if nobody refers to it
*/
if (atomic_dec_and_test(&svc->refcnt)) {
IP_VS_DBG_BUF(3, "Removing service %u/%s:%u\n",
svc->fwmark,
IP_VS_DBG_ADDR(svc->af, &svc->addr),
ntohs(svc->port));
call_rcu(&svc->rcu_head, ip_vs_service_rcu_free);
}
__ip_vs_svc_put(svc, true);
/* decrease the module use count */
ip_vs_use_count_dec();

View File

@ -59,12 +59,13 @@ static void ip_vs_read_cpu_stats(struct ip_vs_stats_user *sum,
struct ip_vs_cpu_stats __percpu *stats)
{
int i;
bool add = false;
for_each_possible_cpu(i) {
struct ip_vs_cpu_stats *s = per_cpu_ptr(stats, i);
unsigned int start;
__u64 inbytes, outbytes;
if (i) {
if (add) {
sum->conns += s->ustats.conns;
sum->inpkts += s->ustats.inpkts;
sum->outpkts += s->ustats.outpkts;
@ -76,6 +77,7 @@ static void ip_vs_read_cpu_stats(struct ip_vs_stats_user *sum,
sum->inbytes += inbytes;
sum->outbytes += outbytes;
} else {
add = true;
sum->conns = s->ustats.conns;
sum->inpkts = s->ustats.inpkts;
sum->outpkts = s->ustats.outpkts;

View File

@ -93,7 +93,7 @@ struct ip_vs_lblc_entry {
struct hlist_node list;
int af; /* address family */
union nf_inet_addr addr; /* destination IP address */
struct ip_vs_dest __rcu *dest; /* real server (cache) */
struct ip_vs_dest *dest; /* real server (cache) */
unsigned long lastuse; /* last used time */
struct rcu_head rcu_head;
};
@ -130,20 +130,21 @@ static struct ctl_table vs_vars_table[] = {
};
#endif
static inline void ip_vs_lblc_free(struct ip_vs_lblc_entry *en)
static void ip_vs_lblc_rcu_free(struct rcu_head *head)
{
struct ip_vs_dest *dest;
struct ip_vs_lblc_entry *en = container_of(head,
struct ip_vs_lblc_entry,
rcu_head);
hlist_del_rcu(&en->list);
/*
* We don't kfree dest because it is referred either by its service
* or the trash dest list.
*/
dest = rcu_dereference_protected(en->dest, 1);
ip_vs_dest_put(dest);
kfree_rcu(en, rcu_head);
ip_vs_dest_put(en->dest);
kfree(en);
}
static inline void ip_vs_lblc_del(struct ip_vs_lblc_entry *en)
{
hlist_del_rcu(&en->list);
call_rcu(&en->rcu_head, ip_vs_lblc_rcu_free);
}
/*
* Returns hash value for IPVS LBLC entry
@ -203,30 +204,23 @@ ip_vs_lblc_new(struct ip_vs_lblc_table *tbl, const union nf_inet_addr *daddr,
struct ip_vs_lblc_entry *en;
en = ip_vs_lblc_get(dest->af, tbl, daddr);
if (!en) {
en = kmalloc(sizeof(*en), GFP_ATOMIC);
if (!en)
return NULL;
en->af = dest->af;
ip_vs_addr_copy(dest->af, &en->addr, daddr);
en->lastuse = jiffies;
ip_vs_dest_hold(dest);
RCU_INIT_POINTER(en->dest, dest);
ip_vs_lblc_hash(tbl, en);
} else {
struct ip_vs_dest *old_dest;
old_dest = rcu_dereference_protected(en->dest, 1);
if (old_dest != dest) {
ip_vs_dest_put(old_dest);
ip_vs_dest_hold(dest);
/* No ordering constraints for refcnt */
RCU_INIT_POINTER(en->dest, dest);
}
if (en) {
if (en->dest == dest)
return en;
ip_vs_lblc_del(en);
}
en = kmalloc(sizeof(*en), GFP_ATOMIC);
if (!en)
return NULL;
en->af = dest->af;
ip_vs_addr_copy(dest->af, &en->addr, daddr);
en->lastuse = jiffies;
ip_vs_dest_hold(dest);
en->dest = dest;
ip_vs_lblc_hash(tbl, en);
return en;
}
@ -246,7 +240,7 @@ static void ip_vs_lblc_flush(struct ip_vs_service *svc)
tbl->dead = 1;
for (i=0; i<IP_VS_LBLC_TAB_SIZE; i++) {
hlist_for_each_entry_safe(en, next, &tbl->bucket[i], list) {
ip_vs_lblc_free(en);
ip_vs_lblc_del(en);
atomic_dec(&tbl->entries);
}
}
@ -281,7 +275,7 @@ static inline void ip_vs_lblc_full_check(struct ip_vs_service *svc)
sysctl_lblc_expiration(svc)))
continue;
ip_vs_lblc_free(en);
ip_vs_lblc_del(en);
atomic_dec(&tbl->entries);
}
spin_unlock(&svc->sched_lock);
@ -335,7 +329,7 @@ static void ip_vs_lblc_check_expire(unsigned long data)
if (time_before(now, en->lastuse + ENTRY_TIMEOUT))
continue;
ip_vs_lblc_free(en);
ip_vs_lblc_del(en);
atomic_dec(&tbl->entries);
goal--;
}
@ -443,8 +437,8 @@ __ip_vs_lblc_schedule(struct ip_vs_service *svc)
continue;
doh = ip_vs_dest_conn_overhead(dest);
if (loh * atomic_read(&dest->weight) >
doh * atomic_read(&least->weight)) {
if ((__s64)loh * atomic_read(&dest->weight) >
(__s64)doh * atomic_read(&least->weight)) {
least = dest;
loh = doh;
}
@ -511,7 +505,7 @@ ip_vs_lblc_schedule(struct ip_vs_service *svc, const struct sk_buff *skb,
* free up entries from the trash at any time.
*/
dest = rcu_dereference(en->dest);
dest = en->dest;
if ((dest->flags & IP_VS_DEST_F_AVAILABLE) &&
atomic_read(&dest->weight) > 0 && !is_overloaded(dest, svc))
goto out;
@ -631,7 +625,7 @@ static void __exit ip_vs_lblc_cleanup(void)
{
unregister_ip_vs_scheduler(&ip_vs_lblc_scheduler);
unregister_pernet_subsys(&ip_vs_lblc_ops);
synchronize_rcu();
rcu_barrier();
}

View File

@ -89,7 +89,7 @@
*/
struct ip_vs_dest_set_elem {
struct list_head list; /* list link */
struct ip_vs_dest __rcu *dest; /* destination server */
struct ip_vs_dest *dest; /* destination server */
struct rcu_head rcu_head;
};
@ -107,11 +107,7 @@ static void ip_vs_dest_set_insert(struct ip_vs_dest_set *set,
if (check) {
list_for_each_entry(e, &set->list, list) {
struct ip_vs_dest *d;
d = rcu_dereference_protected(e->dest, 1);
if (d == dest)
/* already existed */
if (e->dest == dest)
return;
}
}
@ -121,7 +117,7 @@ static void ip_vs_dest_set_insert(struct ip_vs_dest_set *set,
return;
ip_vs_dest_hold(dest);
RCU_INIT_POINTER(e->dest, dest);
e->dest = dest;
list_add_rcu(&e->list, &set->list);
atomic_inc(&set->size);
@ -129,22 +125,27 @@ static void ip_vs_dest_set_insert(struct ip_vs_dest_set *set,
set->lastmod = jiffies;
}
static void ip_vs_lblcr_elem_rcu_free(struct rcu_head *head)
{
struct ip_vs_dest_set_elem *e;
e = container_of(head, struct ip_vs_dest_set_elem, rcu_head);
ip_vs_dest_put(e->dest);
kfree(e);
}
static void
ip_vs_dest_set_erase(struct ip_vs_dest_set *set, struct ip_vs_dest *dest)
{
struct ip_vs_dest_set_elem *e;
list_for_each_entry(e, &set->list, list) {
struct ip_vs_dest *d;
d = rcu_dereference_protected(e->dest, 1);
if (d == dest) {
if (e->dest == dest) {
/* HIT */
atomic_dec(&set->size);
set->lastmod = jiffies;
ip_vs_dest_put(dest);
list_del_rcu(&e->list);
kfree_rcu(e, rcu_head);
call_rcu(&e->rcu_head, ip_vs_lblcr_elem_rcu_free);
break;
}
}
@ -155,16 +156,8 @@ static void ip_vs_dest_set_eraseall(struct ip_vs_dest_set *set)
struct ip_vs_dest_set_elem *e, *ep;
list_for_each_entry_safe(e, ep, &set->list, list) {
struct ip_vs_dest *d;
d = rcu_dereference_protected(e->dest, 1);
/*
* We don't kfree dest because it is referred either
* by its service or by the trash dest list.
*/
ip_vs_dest_put(d);
list_del_rcu(&e->list);
kfree_rcu(e, rcu_head);
call_rcu(&e->rcu_head, ip_vs_lblcr_elem_rcu_free);
}
}
@ -175,12 +168,9 @@ static inline struct ip_vs_dest *ip_vs_dest_set_min(struct ip_vs_dest_set *set)
struct ip_vs_dest *dest, *least;
int loh, doh;
if (set == NULL)
return NULL;
/* select the first destination server, whose weight > 0 */
list_for_each_entry_rcu(e, &set->list, list) {
least = rcu_dereference(e->dest);
least = e->dest;
if (least->flags & IP_VS_DEST_F_OVERLOAD)
continue;
@ -195,13 +185,13 @@ static inline struct ip_vs_dest *ip_vs_dest_set_min(struct ip_vs_dest_set *set)
/* find the destination with the weighted least load */
nextstage:
list_for_each_entry_continue_rcu(e, &set->list, list) {
dest = rcu_dereference(e->dest);
dest = e->dest;
if (dest->flags & IP_VS_DEST_F_OVERLOAD)
continue;
doh = ip_vs_dest_conn_overhead(dest);
if ((loh * atomic_read(&dest->weight) >
doh * atomic_read(&least->weight))
if (((__s64)loh * atomic_read(&dest->weight) >
(__s64)doh * atomic_read(&least->weight))
&& (dest->flags & IP_VS_DEST_F_AVAILABLE)) {
least = dest;
loh = doh;
@ -232,7 +222,7 @@ static inline struct ip_vs_dest *ip_vs_dest_set_max(struct ip_vs_dest_set *set)
/* select the first destination server, whose weight > 0 */
list_for_each_entry(e, &set->list, list) {
most = rcu_dereference_protected(e->dest, 1);
most = e->dest;
if (atomic_read(&most->weight) > 0) {
moh = ip_vs_dest_conn_overhead(most);
goto nextstage;
@ -243,11 +233,11 @@ static inline struct ip_vs_dest *ip_vs_dest_set_max(struct ip_vs_dest_set *set)
/* find the destination with the weighted most load */
nextstage:
list_for_each_entry_continue(e, &set->list, list) {
dest = rcu_dereference_protected(e->dest, 1);
dest = e->dest;
doh = ip_vs_dest_conn_overhead(dest);
/* moh/mw < doh/dw ==> moh*dw < doh*mw, where mw,dw>0 */
if ((moh * atomic_read(&dest->weight) <
doh * atomic_read(&most->weight))
if (((__s64)moh * atomic_read(&dest->weight) <
(__s64)doh * atomic_read(&most->weight))
&& (atomic_read(&dest->weight) > 0)) {
most = dest;
moh = doh;
@ -611,8 +601,8 @@ __ip_vs_lblcr_schedule(struct ip_vs_service *svc)
continue;
doh = ip_vs_dest_conn_overhead(dest);
if (loh * atomic_read(&dest->weight) >
doh * atomic_read(&least->weight)) {
if ((__s64)loh * atomic_read(&dest->weight) >
(__s64)doh * atomic_read(&least->weight)) {
least = dest;
loh = doh;
}
@ -819,7 +809,7 @@ static void __exit ip_vs_lblcr_cleanup(void)
{
unregister_ip_vs_scheduler(&ip_vs_lblcr_scheduler);
unregister_pernet_subsys(&ip_vs_lblcr_ops);
synchronize_rcu();
rcu_barrier();
}

View File

@ -40,7 +40,7 @@
#include <net/ip_vs.h>
static inline unsigned int
static inline int
ip_vs_nq_dest_overhead(struct ip_vs_dest *dest)
{
/*
@ -59,7 +59,7 @@ ip_vs_nq_schedule(struct ip_vs_service *svc, const struct sk_buff *skb,
struct ip_vs_iphdr *iph)
{
struct ip_vs_dest *dest, *least = NULL;
unsigned int loh = 0, doh;
int loh = 0, doh;
IP_VS_DBG(6, "%s(): Scheduling...\n", __func__);
@ -92,8 +92,8 @@ ip_vs_nq_schedule(struct ip_vs_service *svc, const struct sk_buff *skb,
}
if (!least ||
(loh * atomic_read(&dest->weight) >
doh * atomic_read(&least->weight))) {
((__s64)loh * atomic_read(&dest->weight) >
(__s64)doh * atomic_read(&least->weight))) {
least = dest;
loh = doh;
}

View File

@ -44,7 +44,7 @@
#include <net/ip_vs.h>
static inline unsigned int
static inline int
ip_vs_sed_dest_overhead(struct ip_vs_dest *dest)
{
/*
@ -63,7 +63,7 @@ ip_vs_sed_schedule(struct ip_vs_service *svc, const struct sk_buff *skb,
struct ip_vs_iphdr *iph)
{
struct ip_vs_dest *dest, *least;
unsigned int loh, doh;
int loh, doh;
IP_VS_DBG(6, "%s(): Scheduling...\n", __func__);
@ -99,8 +99,8 @@ ip_vs_sed_schedule(struct ip_vs_service *svc, const struct sk_buff *skb,
if (dest->flags & IP_VS_DEST_F_OVERLOAD)
continue;
doh = ip_vs_sed_dest_overhead(dest);
if (loh * atomic_read(&dest->weight) >
doh * atomic_read(&least->weight)) {
if ((__s64)loh * atomic_read(&dest->weight) >
(__s64)doh * atomic_read(&least->weight)) {
least = dest;
loh = doh;
}

View File

@ -35,7 +35,7 @@ ip_vs_wlc_schedule(struct ip_vs_service *svc, const struct sk_buff *skb,
struct ip_vs_iphdr *iph)
{
struct ip_vs_dest *dest, *least;
unsigned int loh, doh;
int loh, doh;
IP_VS_DBG(6, "ip_vs_wlc_schedule(): Scheduling...\n");
@ -71,8 +71,8 @@ ip_vs_wlc_schedule(struct ip_vs_service *svc, const struct sk_buff *skb,
if (dest->flags & IP_VS_DEST_F_OVERLOAD)
continue;
doh = ip_vs_dest_conn_overhead(dest);
if (loh * atomic_read(&dest->weight) >
doh * atomic_read(&least->weight)) {
if ((__s64)loh * atomic_read(&dest->weight) >
(__s64)doh * atomic_read(&least->weight)) {
least = dest;
loh = doh;
}

View File

@ -24,7 +24,7 @@
int synproxy_net_id;
EXPORT_SYMBOL_GPL(synproxy_net_id);
void
bool
synproxy_parse_options(const struct sk_buff *skb, unsigned int doff,
const struct tcphdr *th, struct synproxy_options *opts)
{
@ -32,7 +32,8 @@ synproxy_parse_options(const struct sk_buff *skb, unsigned int doff,
u8 buf[40], *ptr;
ptr = skb_header_pointer(skb, doff + sizeof(*th), length, buf);
BUG_ON(ptr == NULL);
if (ptr == NULL)
return false;
opts->options = 0;
while (length > 0) {
@ -41,16 +42,16 @@ synproxy_parse_options(const struct sk_buff *skb, unsigned int doff,
switch (opcode) {
case TCPOPT_EOL:
return;
return true;
case TCPOPT_NOP:
length--;
continue;
default:
opsize = *ptr++;
if (opsize < 2)
return;
return true;
if (opsize > length)
return;
return true;
switch (opcode) {
case TCPOPT_MSS:
@ -84,6 +85,7 @@ synproxy_parse_options(const struct sk_buff *skb, unsigned int doff,
length -= opsize;
}
}
return true;
}
EXPORT_SYMBOL_GPL(synproxy_parse_options);