Merge master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6

* master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6: (45 commits)
  [IPV4]: Restore multipath routing after rt_next changes.
  [XFRM] IPV6: Fix outbound RO transformation which is broken by IPsec tunnel patch.
  [NET]: Reorder fields of struct dst_entry
  [DECNET]: Convert decnet route to use the new dst_entry 'next' pointer
  [IPV6]: Convert ipv6 route to use the new dst_entry 'next' pointer
  [IPV4]: Convert ipv4 route to use the new dst_entry 'next' pointer
  [NET]: Introduce union in struct dst_entry to hold 'next' pointer
  [DECNET]: fix misannotation of linkinfo_dn
  [DECNET]: FRA_{DST,SRC} are le16 for decnet
  [UDP]: UDP can use sk_hash to speedup lookups
  [NET]: Fix whitespace errors.
  [NET] XFRM: Fix whitespace errors.
  [NET] X25: Fix whitespace errors.
  [NET] WANROUTER: Fix whitespace errors.
  [NET] UNIX: Fix whitespace errors.
  [NET] TIPC: Fix whitespace errors.
  [NET] SUNRPC: Fix whitespace errors.
  [NET] SCTP: Fix whitespace errors.
  [NET] SCHED: Fix whitespace errors.
  [NET] RXRPC: Fix whitespace errors.
  ...
This commit is contained in:
Linus Torvalds
2007-02-11 11:38:13 -08:00
539 changed files with 9009 additions and 9004 deletions

View File

@@ -68,9 +68,10 @@ extern void dn_rt_cache_flush(int delay);
struct dn_route { struct dn_route {
union { union {
struct dst_entry dst; struct dst_entry dst;
struct dn_route *rt_next;
} u; } u;
struct flowi fl;
__le16 rt_saddr; __le16 rt_saddr;
__le16 rt_daddr; __le16 rt_daddr;
__le16 rt_gateway; __le16 rt_gateway;
@@ -80,8 +81,6 @@ struct dn_route {
unsigned rt_flags; unsigned rt_flags;
unsigned rt_type; unsigned rt_type;
struct flowi fl;
}; };
extern void dn_route_init(void); extern void dn_route_init(void);

View File

@@ -37,9 +37,7 @@ struct sk_buff;
struct dst_entry struct dst_entry
{ {
struct dst_entry *next; struct rcu_head rcu_head;
atomic_t __refcnt; /* client references */
int __use;
struct dst_entry *child; struct dst_entry *child;
struct net_device *dev; struct net_device *dev;
short error; short error;
@@ -50,7 +48,6 @@ struct dst_entry
#define DST_NOPOLICY 4 #define DST_NOPOLICY 4
#define DST_NOHASH 8 #define DST_NOHASH 8
#define DST_BALANCED 0x10 #define DST_BALANCED 0x10
unsigned long lastuse;
unsigned long expires; unsigned long expires;
unsigned short header_len; /* more space at head required */ unsigned short header_len; /* more space at head required */
@@ -75,8 +72,16 @@ struct dst_entry
#endif #endif
struct dst_ops *ops; struct dst_ops *ops;
struct rcu_head rcu_head;
unsigned long lastuse;
atomic_t __refcnt; /* client references */
int __use;
union {
struct dst_entry *next;
struct rtable *rt_next;
struct rt6_info *rt6_next;
struct dn_route *dn_next;
};
char info[0]; char info[0];
}; };

View File

@@ -83,7 +83,6 @@ struct rt6_info
{ {
union { union {
struct dst_entry dst; struct dst_entry dst;
struct rt6_info *next;
} u; } u;
struct inet6_dev *rt6i_idev; struct inet6_dev *rt6i_idev;

View File

@@ -53,9 +53,11 @@ struct rtable
union union
{ {
struct dst_entry dst; struct dst_entry dst;
struct rtable *rt_next;
} u; } u;
/* Cache lookup keys */
struct flowi fl;
struct in_device *idev; struct in_device *idev;
unsigned rt_flags; unsigned rt_flags;
@@ -69,9 +71,6 @@ struct rtable
/* Info on neighbour */ /* Info on neighbour */
__be32 rt_gateway; __be32 rt_gateway;
/* Cache lookup keys */
struct flowi fl;
/* Miscellaneous cached information */ /* Miscellaneous cached information */
__be32 rt_spec_dst; /* RFC1122 specific destination */ __be32 rt_spec_dst; /* RFC1122 specific destination */
struct inet_peer *peer; /* long-living peer info */ struct inet_peer *peer; /* long-living peer info */

View File

@@ -167,11 +167,11 @@ static void dn_dst_check_expire(unsigned long dummy)
while((rt=*rtp) != NULL) { while((rt=*rtp) != NULL) {
if (atomic_read(&rt->u.dst.__refcnt) || if (atomic_read(&rt->u.dst.__refcnt) ||
(now - rt->u.dst.lastuse) < expire) { (now - rt->u.dst.lastuse) < expire) {
rtp = &rt->u.rt_next; rtp = &rt->u.dst.dn_next;
continue; continue;
} }
*rtp = rt->u.rt_next; *rtp = rt->u.dst.dn_next;
rt->u.rt_next = NULL; rt->u.dst.dn_next = NULL;
dnrt_free(rt); dnrt_free(rt);
} }
spin_unlock(&dn_rt_hash_table[i].lock); spin_unlock(&dn_rt_hash_table[i].lock);
@@ -198,11 +198,11 @@ static int dn_dst_gc(void)
while((rt=*rtp) != NULL) { while((rt=*rtp) != NULL) {
if (atomic_read(&rt->u.dst.__refcnt) || if (atomic_read(&rt->u.dst.__refcnt) ||
(now - rt->u.dst.lastuse) < expire) { (now - rt->u.dst.lastuse) < expire) {
rtp = &rt->u.rt_next; rtp = &rt->u.dst.dn_next;
continue; continue;
} }
*rtp = rt->u.rt_next; *rtp = rt->u.dst.dn_next;
rt->u.rt_next = NULL; rt->u.dst.dn_next = NULL;
dnrt_drop(rt); dnrt_drop(rt);
break; break;
} }
@@ -286,8 +286,8 @@ static int dn_insert_route(struct dn_route *rt, unsigned hash, struct dn_route *
while((rth = *rthp) != NULL) { while((rth = *rthp) != NULL) {
if (compare_keys(&rth->fl, &rt->fl)) { if (compare_keys(&rth->fl, &rt->fl)) {
/* Put it first */ /* Put it first */
*rthp = rth->u.rt_next; *rthp = rth->u.dst.dn_next;
rcu_assign_pointer(rth->u.rt_next, rcu_assign_pointer(rth->u.dst.dn_next,
dn_rt_hash_table[hash].chain); dn_rt_hash_table[hash].chain);
rcu_assign_pointer(dn_rt_hash_table[hash].chain, rth); rcu_assign_pointer(dn_rt_hash_table[hash].chain, rth);
@@ -300,10 +300,10 @@ static int dn_insert_route(struct dn_route *rt, unsigned hash, struct dn_route *
*rp = rth; *rp = rth;
return 0; return 0;
} }
rthp = &rth->u.rt_next; rthp = &rth->u.dst.dn_next;
} }
rcu_assign_pointer(rt->u.rt_next, dn_rt_hash_table[hash].chain); rcu_assign_pointer(rt->u.dst.dn_next, dn_rt_hash_table[hash].chain);
rcu_assign_pointer(dn_rt_hash_table[hash].chain, rt); rcu_assign_pointer(dn_rt_hash_table[hash].chain, rt);
dst_hold(&rt->u.dst); dst_hold(&rt->u.dst);
@@ -326,8 +326,8 @@ void dn_run_flush(unsigned long dummy)
goto nothing_to_declare; goto nothing_to_declare;
for(; rt; rt=next) { for(; rt; rt=next) {
next = rt->u.rt_next; next = rt->u.dst.dn_next;
rt->u.rt_next = NULL; rt->u.dst.dn_next = NULL;
dst_free((struct dst_entry *)rt); dst_free((struct dst_entry *)rt);
} }
@@ -1169,7 +1169,7 @@ static int __dn_route_output_key(struct dst_entry **pprt, const struct flowi *fl
if (!(flags & MSG_TRYHARD)) { if (!(flags & MSG_TRYHARD)) {
rcu_read_lock_bh(); rcu_read_lock_bh();
for(rt = rcu_dereference(dn_rt_hash_table[hash].chain); rt; for(rt = rcu_dereference(dn_rt_hash_table[hash].chain); rt;
rt = rcu_dereference(rt->u.rt_next)) { rt = rcu_dereference(rt->u.dst.dn_next)) {
if ((flp->fld_dst == rt->fl.fld_dst) && if ((flp->fld_dst == rt->fl.fld_dst) &&
(flp->fld_src == rt->fl.fld_src) && (flp->fld_src == rt->fl.fld_src) &&
(flp->mark == rt->fl.mark) && (flp->mark == rt->fl.mark) &&
@@ -1443,7 +1443,7 @@ int dn_route_input(struct sk_buff *skb)
rcu_read_lock(); rcu_read_lock();
for(rt = rcu_dereference(dn_rt_hash_table[hash].chain); rt != NULL; for(rt = rcu_dereference(dn_rt_hash_table[hash].chain); rt != NULL;
rt = rcu_dereference(rt->u.rt_next)) { rt = rcu_dereference(rt->u.dst.dn_next)) {
if ((rt->fl.fld_src == cb->src) && if ((rt->fl.fld_src == cb->src) &&
(rt->fl.fld_dst == cb->dst) && (rt->fl.fld_dst == cb->dst) &&
(rt->fl.oif == 0) && (rt->fl.oif == 0) &&
@@ -1627,7 +1627,7 @@ int dn_cache_dump(struct sk_buff *skb, struct netlink_callback *cb)
rcu_read_lock_bh(); rcu_read_lock_bh();
for(rt = rcu_dereference(dn_rt_hash_table[h].chain), idx = 0; for(rt = rcu_dereference(dn_rt_hash_table[h].chain), idx = 0;
rt; rt;
rt = rcu_dereference(rt->u.rt_next), idx++) { rt = rcu_dereference(rt->u.dst.dn_next), idx++) {
if (idx < s_idx) if (idx < s_idx)
continue; continue;
skb->dst = dst_clone(&rt->u.dst); skb->dst = dst_clone(&rt->u.dst);
@@ -1673,7 +1673,7 @@ static struct dn_route *dn_rt_cache_get_next(struct seq_file *seq, struct dn_rou
{ {
struct dn_rt_cache_iter_state *s = rcu_dereference(seq->private); struct dn_rt_cache_iter_state *s = rcu_dereference(seq->private);
rt = rt->u.rt_next; rt = rt->u.dst.dn_next;
while(!rt) { while(!rt) {
rcu_read_unlock_bh(); rcu_read_unlock_bh();
if (--s->bucket < 0) if (--s->bucket < 0)

View File

@@ -143,7 +143,7 @@ static void drr_select_route(const struct flowi *flp,
result = NULL; result = NULL;
cur_min = NULL; cur_min = NULL;
for (nh = rcu_dereference(first); nh; for (nh = rcu_dereference(first); nh;
nh = rcu_dereference(nh->u.rt_next)) { nh = rcu_dereference(nh->u.dst.rt_next)) {
if ((nh->u.dst.flags & DST_BALANCED) != 0 && if ((nh->u.dst.flags & DST_BALANCED) != 0 &&
multipath_comparekeys(&nh->fl, flp)) { multipath_comparekeys(&nh->fl, flp)) {
int nh_ifidx = nh->u.dst.dev->ifindex; int nh_ifidx = nh->u.dst.dev->ifindex;

View File

@@ -74,7 +74,7 @@ static void random_select_route(const struct flowi *flp,
/* count all candidate */ /* count all candidate */
for (rt = rcu_dereference(first); rt; for (rt = rcu_dereference(first); rt;
rt = rcu_dereference(rt->u.rt_next)) { rt = rcu_dereference(rt->u.dst.rt_next)) {
if ((rt->u.dst.flags & DST_BALANCED) != 0 && if ((rt->u.dst.flags & DST_BALANCED) != 0 &&
multipath_comparekeys(&rt->fl, flp)) multipath_comparekeys(&rt->fl, flp))
++candidate_count; ++candidate_count;
@@ -90,7 +90,7 @@ static void random_select_route(const struct flowi *flp,
/* find chosen candidate and adjust GC data for all candidates /* find chosen candidate and adjust GC data for all candidates
* to ensure they stay in cache * to ensure they stay in cache
*/ */
for (rt = first; rt; rt = rt->u.rt_next) { for (rt = first; rt; rt = rt->u.dst.rt_next) {
if ((rt->u.dst.flags & DST_BALANCED) != 0 && if ((rt->u.dst.flags & DST_BALANCED) != 0 &&
multipath_comparekeys(&rt->fl, flp)) { multipath_comparekeys(&rt->fl, flp)) {
rt->u.dst.lastuse = jiffies; rt->u.dst.lastuse = jiffies;

View File

@@ -58,7 +58,7 @@ static void rr_select_route(const struct flowi *flp,
*/ */
result = NULL; result = NULL;
for (nh = rcu_dereference(first); nh; for (nh = rcu_dereference(first); nh;
nh = rcu_dereference(nh->u.rt_next)) { nh = rcu_dereference(nh->u.dst.rt_next)) {
if ((nh->u.dst.flags & DST_BALANCED) != 0 && if ((nh->u.dst.flags & DST_BALANCED) != 0 &&
multipath_comparekeys(&nh->fl, flp)) { multipath_comparekeys(&nh->fl, flp)) {
nh->u.dst.lastuse = jiffies; nh->u.dst.lastuse = jiffies;

View File

@@ -167,7 +167,7 @@ static void wrandom_select_route(const struct flowi *flp,
/* collect all candidates and identify their weights */ /* collect all candidates and identify their weights */
for (rt = rcu_dereference(first); rt; for (rt = rcu_dereference(first); rt;
rt = rcu_dereference(rt->u.rt_next)) { rt = rcu_dereference(rt->u.dst.rt_next)) {
if ((rt->u.dst.flags & DST_BALANCED) != 0 && if ((rt->u.dst.flags & DST_BALANCED) != 0 &&
multipath_comparekeys(&rt->fl, flp)) { multipath_comparekeys(&rt->fl, flp)) {
struct multipath_candidate* mpc = struct multipath_candidate* mpc =

View File

@@ -289,7 +289,7 @@ static struct rtable *rt_cache_get_next(struct seq_file *seq, struct rtable *r)
{ {
struct rt_cache_iter_state *st = rcu_dereference(seq->private); struct rt_cache_iter_state *st = rcu_dereference(seq->private);
r = r->u.rt_next; r = r->u.dst.rt_next;
while (!r) { while (!r) {
rcu_read_unlock_bh(); rcu_read_unlock_bh();
if (--st->bucket < 0) if (--st->bucket < 0)
@@ -512,7 +512,7 @@ static __inline__ int rt_fast_clean(struct rtable *rth)
/* Kill broadcast/multicast entries very aggresively, if they /* Kill broadcast/multicast entries very aggresively, if they
collide in hash table with more useful entries */ collide in hash table with more useful entries */
return (rth->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST)) && return (rth->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST)) &&
rth->fl.iif && rth->u.rt_next; rth->fl.iif && rth->u.dst.rt_next;
} }
static __inline__ int rt_valuable(struct rtable *rth) static __inline__ int rt_valuable(struct rtable *rth)
@@ -595,10 +595,10 @@ static struct rtable **rt_remove_balanced_route(struct rtable **chain_head,
if (((*rthp)->u.dst.flags & DST_BALANCED) != 0 && if (((*rthp)->u.dst.flags & DST_BALANCED) != 0 &&
compare_keys(&(*rthp)->fl, &expentry->fl)) { compare_keys(&(*rthp)->fl, &expentry->fl)) {
if (*rthp == expentry) { if (*rthp == expentry) {
*rthp = rth->u.rt_next; *rthp = rth->u.dst.rt_next;
continue; continue;
} else { } else {
*rthp = rth->u.rt_next; *rthp = rth->u.dst.rt_next;
rt_free(rth); rt_free(rth);
if (removed_count) if (removed_count)
++(*removed_count); ++(*removed_count);
@@ -606,9 +606,9 @@ static struct rtable **rt_remove_balanced_route(struct rtable **chain_head,
} else { } else {
if (!((*rthp)->u.dst.flags & DST_BALANCED) && if (!((*rthp)->u.dst.flags & DST_BALANCED) &&
passedexpired && !nextstep) passedexpired && !nextstep)
nextstep = &rth->u.rt_next; nextstep = &rth->u.dst.rt_next;
rthp = &rth->u.rt_next; rthp = &rth->u.dst.rt_next;
} }
} }
@@ -649,12 +649,12 @@ static void rt_check_expire(unsigned long dummy)
/* Entry is expired even if it is in use */ /* Entry is expired even if it is in use */
if (time_before_eq(now, rth->u.dst.expires)) { if (time_before_eq(now, rth->u.dst.expires)) {
tmo >>= 1; tmo >>= 1;
rthp = &rth->u.rt_next; rthp = &rth->u.dst.rt_next;
continue; continue;
} }
} else if (!rt_may_expire(rth, tmo, ip_rt_gc_timeout)) { } else if (!rt_may_expire(rth, tmo, ip_rt_gc_timeout)) {
tmo >>= 1; tmo >>= 1;
rthp = &rth->u.rt_next; rthp = &rth->u.dst.rt_next;
continue; continue;
} }
@@ -668,11 +668,11 @@ static void rt_check_expire(unsigned long dummy)
if (!rthp) if (!rthp)
break; break;
} else { } else {
*rthp = rth->u.rt_next; *rthp = rth->u.dst.rt_next;
rt_free(rth); rt_free(rth);
} }
#else /* CONFIG_IP_ROUTE_MULTIPATH_CACHED */ #else /* CONFIG_IP_ROUTE_MULTIPATH_CACHED */
*rthp = rth->u.rt_next; *rthp = rth->u.dst.rt_next;
rt_free(rth); rt_free(rth);
#endif /* CONFIG_IP_ROUTE_MULTIPATH_CACHED */ #endif /* CONFIG_IP_ROUTE_MULTIPATH_CACHED */
} }
@@ -706,7 +706,7 @@ static void rt_run_flush(unsigned long dummy)
spin_unlock_bh(rt_hash_lock_addr(i)); spin_unlock_bh(rt_hash_lock_addr(i));
for (; rth; rth = next) { for (; rth; rth = next) {
next = rth->u.rt_next; next = rth->u.dst.rt_next;
rt_free(rth); rt_free(rth);
} }
} }
@@ -840,7 +840,7 @@ static int rt_garbage_collect(void)
while ((rth = *rthp) != NULL) { while ((rth = *rthp) != NULL) {
if (!rt_may_expire(rth, tmo, expire)) { if (!rt_may_expire(rth, tmo, expire)) {
tmo >>= 1; tmo >>= 1;
rthp = &rth->u.rt_next; rthp = &rth->u.dst.rt_next;
continue; continue;
} }
#ifdef CONFIG_IP_ROUTE_MULTIPATH_CACHED #ifdef CONFIG_IP_ROUTE_MULTIPATH_CACHED
@@ -858,12 +858,12 @@ static int rt_garbage_collect(void)
if (!rthp) if (!rthp)
break; break;
} else { } else {
*rthp = rth->u.rt_next; *rthp = rth->u.dst.rt_next;
rt_free(rth); rt_free(rth);
goal--; goal--;
} }
#else /* CONFIG_IP_ROUTE_MULTIPATH_CACHED */ #else /* CONFIG_IP_ROUTE_MULTIPATH_CACHED */
*rthp = rth->u.rt_next; *rthp = rth->u.dst.rt_next;
rt_free(rth); rt_free(rth);
goal--; goal--;
#endif /* CONFIG_IP_ROUTE_MULTIPATH_CACHED */ #endif /* CONFIG_IP_ROUTE_MULTIPATH_CACHED */
@@ -947,13 +947,13 @@ restart:
if (compare_keys(&rth->fl, &rt->fl)) { if (compare_keys(&rth->fl, &rt->fl)) {
#endif #endif
/* Put it first */ /* Put it first */
*rthp = rth->u.rt_next; *rthp = rth->u.dst.rt_next;
/* /*
* Since lookup is lockfree, the deletion * Since lookup is lockfree, the deletion
* must be visible to another weakly ordered CPU before * must be visible to another weakly ordered CPU before
* the insertion at the start of the hash chain. * the insertion at the start of the hash chain.
*/ */
rcu_assign_pointer(rth->u.rt_next, rcu_assign_pointer(rth->u.dst.rt_next,
rt_hash_table[hash].chain); rt_hash_table[hash].chain);
/* /*
* Since lookup is lockfree, the update writes * Since lookup is lockfree, the update writes
@@ -983,7 +983,7 @@ restart:
chain_length++; chain_length++;
rthp = &rth->u.rt_next; rthp = &rth->u.dst.rt_next;
} }
if (cand) { if (cand) {
@@ -994,7 +994,7 @@ restart:
* only 2 entries per bucket. We will see. * only 2 entries per bucket. We will see.
*/ */
if (chain_length > ip_rt_gc_elasticity) { if (chain_length > ip_rt_gc_elasticity) {
*candp = cand->u.rt_next; *candp = cand->u.dst.rt_next;
rt_free(cand); rt_free(cand);
} }
} }
@@ -1034,13 +1034,13 @@ restart:
} }
} }
rt->u.rt_next = rt_hash_table[hash].chain; rt->u.dst.rt_next = rt_hash_table[hash].chain;
#if RT_CACHE_DEBUG >= 2 #if RT_CACHE_DEBUG >= 2
if (rt->u.rt_next) { if (rt->u.dst.rt_next) {
struct rtable *trt; struct rtable *trt;
printk(KERN_DEBUG "rt_cache @%02x: %u.%u.%u.%u", hash, printk(KERN_DEBUG "rt_cache @%02x: %u.%u.%u.%u", hash,
NIPQUAD(rt->rt_dst)); NIPQUAD(rt->rt_dst));
for (trt = rt->u.rt_next; trt; trt = trt->u.rt_next) for (trt = rt->u.dst.rt_next; trt; trt = trt->u.dst.rt_next)
printk(" . %u.%u.%u.%u", NIPQUAD(trt->rt_dst)); printk(" . %u.%u.%u.%u", NIPQUAD(trt->rt_dst));
printk("\n"); printk("\n");
} }
@@ -1117,9 +1117,9 @@ static void rt_del(unsigned hash, struct rtable *rt)
spin_lock_bh(rt_hash_lock_addr(hash)); spin_lock_bh(rt_hash_lock_addr(hash));
ip_rt_put(rt); ip_rt_put(rt);
for (rthp = &rt_hash_table[hash].chain; *rthp; for (rthp = &rt_hash_table[hash].chain; *rthp;
rthp = &(*rthp)->u.rt_next) rthp = &(*rthp)->u.dst.rt_next)
if (*rthp == rt) { if (*rthp == rt) {
*rthp = rt->u.rt_next; *rthp = rt->u.dst.rt_next;
rt_free(rt); rt_free(rt);
break; break;
} }
@@ -1167,7 +1167,7 @@ void ip_rt_redirect(__be32 old_gw, __be32 daddr, __be32 new_gw,
rth->fl.fl4_src != skeys[i] || rth->fl.fl4_src != skeys[i] ||
rth->fl.oif != ikeys[k] || rth->fl.oif != ikeys[k] ||
rth->fl.iif != 0) { rth->fl.iif != 0) {
rthp = &rth->u.rt_next; rthp = &rth->u.dst.rt_next;
continue; continue;
} }
@@ -1416,7 +1416,7 @@ unsigned short ip_rt_frag_needed(struct iphdr *iph, unsigned short new_mtu)
rcu_read_lock(); rcu_read_lock();
for (rth = rcu_dereference(rt_hash_table[hash].chain); rth; for (rth = rcu_dereference(rt_hash_table[hash].chain); rth;
rth = rcu_dereference(rth->u.rt_next)) { rth = rcu_dereference(rth->u.dst.rt_next)) {
if (rth->fl.fl4_dst == daddr && if (rth->fl.fl4_dst == daddr &&
rth->fl.fl4_src == skeys[i] && rth->fl.fl4_src == skeys[i] &&
rth->rt_dst == daddr && rth->rt_dst == daddr &&
@@ -2099,7 +2099,7 @@ int ip_route_input(struct sk_buff *skb, __be32 daddr, __be32 saddr,
rcu_read_lock(); rcu_read_lock();
for (rth = rcu_dereference(rt_hash_table[hash].chain); rth; for (rth = rcu_dereference(rt_hash_table[hash].chain); rth;
rth = rcu_dereference(rth->u.rt_next)) { rth = rcu_dereference(rth->u.dst.rt_next)) {
if (rth->fl.fl4_dst == daddr && if (rth->fl.fl4_dst == daddr &&
rth->fl.fl4_src == saddr && rth->fl.fl4_src == saddr &&
rth->fl.iif == iif && rth->fl.iif == iif &&
@@ -2563,7 +2563,7 @@ int __ip_route_output_key(struct rtable **rp, const struct flowi *flp)
rcu_read_lock_bh(); rcu_read_lock_bh();
for (rth = rcu_dereference(rt_hash_table[hash].chain); rth; for (rth = rcu_dereference(rt_hash_table[hash].chain); rth;
rth = rcu_dereference(rth->u.rt_next)) { rth = rcu_dereference(rth->u.dst.rt_next)) {
if (rth->fl.fl4_dst == flp->fl4_dst && if (rth->fl.fl4_dst == flp->fl4_dst &&
rth->fl.fl4_src == flp->fl4_src && rth->fl.fl4_src == flp->fl4_src &&
rth->fl.iif == 0 && rth->fl.iif == 0 &&
@@ -2825,7 +2825,7 @@ int ip_rt_dump(struct sk_buff *skb, struct netlink_callback *cb)
s_idx = 0; s_idx = 0;
rcu_read_lock_bh(); rcu_read_lock_bh();
for (rt = rcu_dereference(rt_hash_table[h].chain), idx = 0; rt; for (rt = rcu_dereference(rt_hash_table[h].chain), idx = 0; rt;
rt = rcu_dereference(rt->u.rt_next), idx++) { rt = rcu_dereference(rt->u.dst.rt_next), idx++) {
if (idx < s_idx) if (idx < s_idx)
continue; continue;
skb->dst = dst_clone(&rt->u.dst); skb->dst = dst_clone(&rt->u.dst);

View File

@@ -120,7 +120,7 @@ static inline int __udp_lib_lport_inuse(__u16 num, struct hlist_head udptable[])
struct hlist_node *node; struct hlist_node *node;
sk_for_each(sk, node, &udptable[num & (UDP_HTABLE_SIZE - 1)]) sk_for_each(sk, node, &udptable[num & (UDP_HTABLE_SIZE - 1)])
if (inet_sk(sk)->num == num) if (sk->sk_hash == num)
return 1; return 1;
return 0; return 0;
} }
@@ -191,7 +191,7 @@ gotit:
head = &udptable[snum & (UDP_HTABLE_SIZE - 1)]; head = &udptable[snum & (UDP_HTABLE_SIZE - 1)];
sk_for_each(sk2, node, head) sk_for_each(sk2, node, head)
if (inet_sk(sk2)->num == snum && if (sk2->sk_hash == snum &&
sk2 != sk && sk2 != sk &&
(!sk2->sk_reuse || !sk->sk_reuse) && (!sk2->sk_reuse || !sk->sk_reuse) &&
(!sk2->sk_bound_dev_if || !sk->sk_bound_dev_if (!sk2->sk_bound_dev_if || !sk->sk_bound_dev_if
@@ -200,6 +200,7 @@ gotit:
goto fail; goto fail;
} }
inet_sk(sk)->num = snum; inet_sk(sk)->num = snum;
sk->sk_hash = snum;
if (sk_unhashed(sk)) { if (sk_unhashed(sk)) {
head = &udptable[snum & (UDP_HTABLE_SIZE - 1)]; head = &udptable[snum & (UDP_HTABLE_SIZE - 1)];
sk_add_node(sk, head); sk_add_node(sk, head);
@@ -247,7 +248,7 @@ static struct sock *__udp4_lib_lookup(__be32 saddr, __be16 sport,
sk_for_each(sk, node, &udptable[hnum & (UDP_HTABLE_SIZE - 1)]) { sk_for_each(sk, node, &udptable[hnum & (UDP_HTABLE_SIZE - 1)]) {
struct inet_sock *inet = inet_sk(sk); struct inet_sock *inet = inet_sk(sk);
if (inet->num == hnum && !ipv6_only_sock(sk)) { if (sk->sk_hash == hnum && !ipv6_only_sock(sk)) {
int score = (sk->sk_family == PF_INET ? 1 : 0); int score = (sk->sk_family == PF_INET ? 1 : 0);
if (inet->rcv_saddr) { if (inet->rcv_saddr) {
if (inet->rcv_saddr != daddr) if (inet->rcv_saddr != daddr)
@@ -296,7 +297,7 @@ static inline struct sock *udp_v4_mcast_next(struct sock *sk,
sk_for_each_from(s, node) { sk_for_each_from(s, node) {
struct inet_sock *inet = inet_sk(s); struct inet_sock *inet = inet_sk(s);
if (inet->num != hnum || if (s->sk_hash != hnum ||
(inet->daddr && inet->daddr != rmt_addr) || (inet->daddr && inet->daddr != rmt_addr) ||
(inet->dport != rmt_port && inet->dport) || (inet->dport != rmt_port && inet->dport) ||
(inet->rcv_saddr && inet->rcv_saddr != loc_addr) || (inet->rcv_saddr && inet->rcv_saddr != loc_addr) ||

View File

@@ -297,7 +297,7 @@ static int fib6_dump_node(struct fib6_walker_t *w)
int res; int res;
struct rt6_info *rt; struct rt6_info *rt;
for (rt = w->leaf; rt; rt = rt->u.next) { for (rt = w->leaf; rt; rt = rt->u.dst.rt6_next) {
res = rt6_dump_route(rt, w->args); res = rt6_dump_route(rt, w->args);
if (res < 0) { if (res < 0) {
/* Frame is full, suspend walking */ /* Frame is full, suspend walking */
@@ -623,11 +623,11 @@ static int fib6_add_rt2node(struct fib6_node *fn, struct rt6_info *rt,
fn->leaf == &ip6_null_entry && fn->leaf == &ip6_null_entry &&
!(rt->rt6i_flags & (RTF_DEFAULT | RTF_ADDRCONF)) ){ !(rt->rt6i_flags & (RTF_DEFAULT | RTF_ADDRCONF)) ){
fn->leaf = rt; fn->leaf = rt;
rt->u.next = NULL; rt->u.dst.rt6_next = NULL;
goto out; goto out;
} }
for (iter = fn->leaf; iter; iter=iter->u.next) { for (iter = fn->leaf; iter; iter=iter->u.dst.rt6_next) {
/* /*
* Search for duplicates * Search for duplicates
*/ */
@@ -655,7 +655,7 @@ static int fib6_add_rt2node(struct fib6_node *fn, struct rt6_info *rt,
if (iter->rt6i_metric > rt->rt6i_metric) if (iter->rt6i_metric > rt->rt6i_metric)
break; break;
ins = &iter->u.next; ins = &iter->u.dst.rt6_next;
} }
/* /*
@@ -663,7 +663,7 @@ static int fib6_add_rt2node(struct fib6_node *fn, struct rt6_info *rt,
*/ */
out: out:
rt->u.next = iter; rt->u.dst.rt6_next = iter;
*ins = rt; *ins = rt;
rt->rt6i_node = fn; rt->rt6i_node = fn;
atomic_inc(&rt->rt6i_ref); atomic_inc(&rt->rt6i_ref);
@@ -1104,7 +1104,7 @@ static void fib6_del_route(struct fib6_node *fn, struct rt6_info **rtp,
RT6_TRACE("fib6_del_route\n"); RT6_TRACE("fib6_del_route\n");
/* Unlink it */ /* Unlink it */
*rtp = rt->u.next; *rtp = rt->u.dst.rt6_next;
rt->rt6i_node = NULL; rt->rt6i_node = NULL;
rt6_stats.fib_rt_entries--; rt6_stats.fib_rt_entries--;
rt6_stats.fib_discarded_routes++; rt6_stats.fib_discarded_routes++;
@@ -1114,14 +1114,14 @@ static void fib6_del_route(struct fib6_node *fn, struct rt6_info **rtp,
FOR_WALKERS(w) { FOR_WALKERS(w) {
if (w->state == FWS_C && w->leaf == rt) { if (w->state == FWS_C && w->leaf == rt) {
RT6_TRACE("walker %p adjusted by delroute\n", w); RT6_TRACE("walker %p adjusted by delroute\n", w);
w->leaf = rt->u.next; w->leaf = rt->u.dst.rt6_next;
if (w->leaf == NULL) if (w->leaf == NULL)
w->state = FWS_U; w->state = FWS_U;
} }
} }
read_unlock(&fib6_walker_lock); read_unlock(&fib6_walker_lock);
rt->u.next = NULL; rt->u.dst.rt6_next = NULL;
if (fn->leaf == NULL && fn->fn_flags&RTN_TL_ROOT) if (fn->leaf == NULL && fn->fn_flags&RTN_TL_ROOT)
fn->leaf = &ip6_null_entry; fn->leaf = &ip6_null_entry;
@@ -1189,7 +1189,7 @@ int fib6_del(struct rt6_info *rt, struct nl_info *info)
* Walk the leaf entries looking for ourself * Walk the leaf entries looking for ourself
*/ */
for (rtp = &fn->leaf; *rtp; rtp = &(*rtp)->u.next) { for (rtp = &fn->leaf; *rtp; rtp = &(*rtp)->u.dst.rt6_next) {
if (*rtp == rt) { if (*rtp == rt) {
fib6_del_route(fn, rtp, info); fib6_del_route(fn, rtp, info);
return 0; return 0;
@@ -1316,7 +1316,7 @@ static int fib6_clean_node(struct fib6_walker_t *w)
struct rt6_info *rt; struct rt6_info *rt;
struct fib6_cleaner_t *c = (struct fib6_cleaner_t*)w; struct fib6_cleaner_t *c = (struct fib6_cleaner_t*)w;
for (rt = w->leaf; rt; rt = rt->u.next) { for (rt = w->leaf; rt; rt = rt->u.dst.rt6_next) {
res = c->func(rt, c->arg); res = c->func(rt, c->arg);
if (res < 0) { if (res < 0) {
w->leaf = rt; w->leaf = rt;

View File

@@ -243,7 +243,7 @@ static __inline__ struct rt6_info *rt6_device_match(struct rt6_info *rt,
struct rt6_info *sprt; struct rt6_info *sprt;
if (oif) { if (oif) {
for (sprt = rt; sprt; sprt = sprt->u.next) { for (sprt = rt; sprt; sprt = sprt->u.dst.rt6_next) {
struct net_device *dev = sprt->rt6i_dev; struct net_device *dev = sprt->rt6i_dev;
if (dev->ifindex == oif) if (dev->ifindex == oif)
return sprt; return sprt;
@@ -376,7 +376,7 @@ static struct rt6_info *rt6_select(struct rt6_info **head, int oif,
for (rt = rt0, metric = rt0->rt6i_metric; for (rt = rt0, metric = rt0->rt6i_metric;
rt && rt->rt6i_metric == metric && (!last || rt != rt0); rt && rt->rt6i_metric == metric && (!last || rt != rt0);
rt = rt->u.next) { rt = rt->u.dst.rt6_next) {
int m; int m;
if (rt6_check_expired(rt)) if (rt6_check_expired(rt))
@@ -404,9 +404,9 @@ static struct rt6_info *rt6_select(struct rt6_info **head, int oif,
/* no entries matched; do round-robin */ /* no entries matched; do round-robin */
static DEFINE_SPINLOCK(lock); static DEFINE_SPINLOCK(lock);
spin_lock(&lock); spin_lock(&lock);
*head = rt0->u.next; *head = rt0->u.dst.rt6_next;
rt0->u.next = last->u.next; rt0->u.dst.rt6_next = last->u.dst.rt6_next;
last->u.next = rt0; last->u.dst.rt6_next = rt0;
spin_unlock(&lock); spin_unlock(&lock);
} }
@@ -1278,7 +1278,7 @@ static int ip6_route_del(struct fib6_config *cfg)
&cfg->fc_src, cfg->fc_src_len); &cfg->fc_src, cfg->fc_src_len);
if (fn) { if (fn) {
for (rt = fn->leaf; rt; rt = rt->u.next) { for (rt = fn->leaf; rt; rt = rt->u.dst.rt6_next) {
if (cfg->fc_ifindex && if (cfg->fc_ifindex &&
(rt->rt6i_dev == NULL || (rt->rt6i_dev == NULL ||
rt->rt6i_dev->ifindex != cfg->fc_ifindex)) rt->rt6i_dev->ifindex != cfg->fc_ifindex))
@@ -1329,7 +1329,7 @@ static struct rt6_info *__ip6_route_redirect(struct fib6_table *table,
read_lock_bh(&table->tb6_lock); read_lock_bh(&table->tb6_lock);
fn = fib6_lookup(&table->tb6_root, &fl->fl6_dst, &fl->fl6_src); fn = fib6_lookup(&table->tb6_root, &fl->fl6_dst, &fl->fl6_src);
restart: restart:
for (rt = fn->leaf; rt; rt = rt->u.next) { for (rt = fn->leaf; rt; rt = rt->u.dst.rt6_next) {
/* /*
* Current route is on-link; redirect is always invalid. * Current route is on-link; redirect is always invalid.
* *
@@ -1590,7 +1590,7 @@ static struct rt6_info *rt6_get_route_info(struct in6_addr *prefix, int prefixle
if (!fn) if (!fn)
goto out; goto out;
for (rt = fn->leaf; rt; rt = rt->u.next) { for (rt = fn->leaf; rt; rt = rt->u.dst.rt6_next) {
if (rt->rt6i_dev->ifindex != ifindex) if (rt->rt6i_dev->ifindex != ifindex)
continue; continue;
if ((rt->rt6i_flags & (RTF_ROUTEINFO|RTF_GATEWAY)) != (RTF_ROUTEINFO|RTF_GATEWAY)) if ((rt->rt6i_flags & (RTF_ROUTEINFO|RTF_GATEWAY)) != (RTF_ROUTEINFO|RTF_GATEWAY))
@@ -1641,7 +1641,7 @@ struct rt6_info *rt6_get_dflt_router(struct in6_addr *addr, struct net_device *d
return NULL; return NULL;
write_lock_bh(&table->tb6_lock); write_lock_bh(&table->tb6_lock);
for (rt = table->tb6_root.leaf; rt; rt=rt->u.next) { for (rt = table->tb6_root.leaf; rt; rt=rt->u.dst.rt6_next) {
if (dev == rt->rt6i_dev && if (dev == rt->rt6i_dev &&
((rt->rt6i_flags & (RTF_ADDRCONF | RTF_DEFAULT)) == (RTF_ADDRCONF | RTF_DEFAULT)) && ((rt->rt6i_flags & (RTF_ADDRCONF | RTF_DEFAULT)) == (RTF_ADDRCONF | RTF_DEFAULT)) &&
ipv6_addr_equal(&rt->rt6i_gateway, addr)) ipv6_addr_equal(&rt->rt6i_gateway, addr))
@@ -1684,7 +1684,7 @@ void rt6_purge_dflt_routers(void)
restart: restart:
read_lock_bh(&table->tb6_lock); read_lock_bh(&table->tb6_lock);
for (rt = table->tb6_root.leaf; rt; rt = rt->u.next) { for (rt = table->tb6_root.leaf; rt; rt = rt->u.dst.rt6_next) {
if (rt->rt6i_flags & (RTF_DEFAULT | RTF_ADDRCONF)) { if (rt->rt6i_flags & (RTF_DEFAULT | RTF_ADDRCONF)) {
dst_hold(&rt->u.dst); dst_hold(&rt->u.dst);
read_unlock_bh(&table->tb6_lock); read_unlock_bh(&table->tb6_lock);

View File

@@ -71,7 +71,7 @@ static struct sock *__udp6_lib_lookup(struct in6_addr *saddr, __be16 sport,
sk_for_each(sk, node, &udptable[hnum & (UDP_HTABLE_SIZE - 1)]) { sk_for_each(sk, node, &udptable[hnum & (UDP_HTABLE_SIZE - 1)]) {
struct inet_sock *inet = inet_sk(sk); struct inet_sock *inet = inet_sk(sk);
if (inet->num == hnum && sk->sk_family == PF_INET6) { if (sk->sk_hash == hnum && sk->sk_family == PF_INET6) {
struct ipv6_pinfo *np = inet6_sk(sk); struct ipv6_pinfo *np = inet6_sk(sk);
int score = 0; int score = 0;
if (inet->dport) { if (inet->dport) {
@@ -309,7 +309,7 @@ static struct sock *udp_v6_mcast_next(struct sock *sk,
sk_for_each_from(s, node) { sk_for_each_from(s, node) {
struct inet_sock *inet = inet_sk(s); struct inet_sock *inet = inet_sk(s);
if (inet->num == num && s->sk_family == PF_INET6) { if (s->sk_hash == num && s->sk_family == PF_INET6) {
struct ipv6_pinfo *np = inet6_sk(s); struct ipv6_pinfo *np = inet6_sk(s);
if (inet->dport) { if (inet->dport) {
if (inet->dport != rmt_port) if (inet->dport != rmt_port)

View File

@@ -178,7 +178,8 @@ __xfrm6_bundle_create(struct xfrm_policy *policy, struct xfrm_state **xfrm, int
__xfrm6_bundle_len_inc(&header_len, &nfheader_len, xfrm[i]); __xfrm6_bundle_len_inc(&header_len, &nfheader_len, xfrm[i]);
trailer_len += xfrm[i]->props.trailer_len; trailer_len += xfrm[i]->props.trailer_len;
if (xfrm[i]->props.mode == XFRM_MODE_TUNNEL) { if (xfrm[i]->props.mode == XFRM_MODE_TUNNEL ||
xfrm[i]->props.mode == XFRM_MODE_ROUTEOPTIMIZATION) {
unsigned short encap_family = xfrm[i]->props.family; unsigned short encap_family = xfrm[i]->props.family;
switch(encap_family) { switch(encap_family) {
case AF_INET: case AF_INET:
@@ -186,8 +187,9 @@ __xfrm6_bundle_create(struct xfrm_policy *policy, struct xfrm_state **xfrm, int
fl_tunnel.fl4_src = xfrm[i]->props.saddr.a4; fl_tunnel.fl4_src = xfrm[i]->props.saddr.a4;
break; break;
case AF_INET6: case AF_INET6:
ipv6_addr_copy(&fl_tunnel.fl6_dst, (struct in6_addr*)&xfrm[i]->id.daddr.a6); ipv6_addr_copy(&fl_tunnel.fl6_dst, __xfrm6_bundle_addr_remote(xfrm[i], &fl->fl6_dst));
ipv6_addr_copy(&fl_tunnel.fl6_src, (struct in6_addr*)&xfrm[i]->props.saddr.a6);
ipv6_addr_copy(&fl_tunnel.fl6_src, __xfrm6_bundle_addr_remote(xfrm[i], &fl->fl6_src));
break; break;
default: default:
BUG_ON(1); BUG_ON(1);