2013-01-04 02:05:31 +00:00
|
|
|
/* Copyright (C) 2009-2013 B.A.T.M.A.N. contributors:
|
2010-12-13 11:19:28 +00:00
|
|
|
*
|
|
|
|
* Marek Lindner
|
|
|
|
*
|
|
|
|
* This program is free software; you can redistribute it and/or
|
|
|
|
* modify it under the terms of version 2 of the GNU General Public
|
|
|
|
* License as published by the Free Software Foundation.
|
|
|
|
*
|
|
|
|
* This program is distributed in the hope that it will be useful, but
|
|
|
|
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
|
|
|
* General Public License for more details.
|
|
|
|
*
|
|
|
|
* You should have received a copy of the GNU General Public License
|
|
|
|
* along with this program; if not, write to the Free Software
|
|
|
|
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
|
|
|
|
* 02110-1301, USA
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include "main.h"
|
2012-06-10 21:58:51 +00:00
|
|
|
#include "sysfs.h"
|
2010-12-13 11:19:28 +00:00
|
|
|
#include "gateway_client.h"
|
|
|
|
#include "gateway_common.h"
|
|
|
|
#include "hard-interface.h"
|
2011-03-14 22:43:33 +00:00
|
|
|
#include "originator.h"
|
2011-09-08 11:12:53 +00:00
|
|
|
#include "translation-table.h"
|
2011-04-26 19:31:45 +00:00
|
|
|
#include "routing.h"
|
2010-12-13 11:19:28 +00:00
|
|
|
#include <linux/ip.h>
|
|
|
|
#include <linux/ipv6.h>
|
|
|
|
#include <linux/udp.h>
|
|
|
|
#include <linux/if_vlan.h>
|
|
|
|
|
2011-04-26 19:31:45 +00:00
|
|
|
/* This is the offset of the options field in a dhcp packet starting at
|
2012-05-12 00:09:43 +00:00
|
|
|
* the beginning of the dhcp header
|
|
|
|
*/
|
2012-06-03 20:19:07 +00:00
|
|
|
#define BATADV_DHCP_OPTIONS_OFFSET 240
|
|
|
|
#define BATADV_DHCP_REQUEST 3
|
2011-04-26 19:31:45 +00:00
|
|
|
|
2012-06-05 20:31:31 +00:00
|
|
|
static void batadv_gw_node_free_ref(struct batadv_gw_node *gw_node)
|
2010-12-13 11:19:28 +00:00
|
|
|
{
|
2011-02-10 14:33:49 +00:00
|
|
|
if (atomic_dec_and_test(&gw_node->refcount))
|
2011-05-02 06:25:02 +00:00
|
|
|
kfree_rcu(gw_node, rcu);
|
2010-12-13 11:19:28 +00:00
|
|
|
}
|
|
|
|
|
2012-06-05 20:31:31 +00:00
|
|
|
static struct batadv_gw_node *
|
|
|
|
batadv_gw_get_selected_gw_node(struct batadv_priv *bat_priv)
|
2010-12-13 11:19:28 +00:00
|
|
|
{
|
2012-06-05 20:31:31 +00:00
|
|
|
struct batadv_gw_node *gw_node;
|
2010-12-13 11:19:28 +00:00
|
|
|
|
2011-02-13 21:13:02 +00:00
|
|
|
rcu_read_lock();
|
2012-07-15 20:26:51 +00:00
|
|
|
gw_node = rcu_dereference(bat_priv->gw.curr_gw);
|
2011-03-23 10:24:34 +00:00
|
|
|
if (!gw_node)
|
2011-02-18 12:28:10 +00:00
|
|
|
goto out;
|
2010-12-13 11:19:28 +00:00
|
|
|
|
2011-03-23 10:24:34 +00:00
|
|
|
if (!atomic_inc_not_zero(&gw_node->refcount))
|
|
|
|
gw_node = NULL;
|
2011-02-13 21:13:04 +00:00
|
|
|
|
2011-02-13 21:13:02 +00:00
|
|
|
out:
|
|
|
|
rcu_read_unlock();
|
2011-03-23 10:24:34 +00:00
|
|
|
return gw_node;
|
2010-12-13 11:19:28 +00:00
|
|
|
}
|
|
|
|
|
2012-06-05 20:31:31 +00:00
|
|
|
struct batadv_orig_node *
|
|
|
|
batadv_gw_get_selected_orig(struct batadv_priv *bat_priv)
|
2010-12-13 11:19:28 +00:00
|
|
|
{
|
2012-06-05 20:31:31 +00:00
|
|
|
struct batadv_gw_node *gw_node;
|
|
|
|
struct batadv_orig_node *orig_node = NULL;
|
2010-12-13 11:19:28 +00:00
|
|
|
|
2012-05-12 16:33:55 +00:00
|
|
|
gw_node = batadv_gw_get_selected_gw_node(bat_priv);
|
2011-03-23 10:24:34 +00:00
|
|
|
if (!gw_node)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
rcu_read_lock();
|
|
|
|
orig_node = gw_node->orig_node;
|
|
|
|
if (!orig_node)
|
|
|
|
goto unlock;
|
|
|
|
|
|
|
|
if (!atomic_inc_not_zero(&orig_node->refcount))
|
|
|
|
orig_node = NULL;
|
2010-12-13 11:19:28 +00:00
|
|
|
|
2011-03-23 10:24:34 +00:00
|
|
|
unlock:
|
|
|
|
rcu_read_unlock();
|
|
|
|
out:
|
2010-12-13 11:19:28 +00:00
|
|
|
if (gw_node)
|
2012-05-12 16:33:55 +00:00
|
|
|
batadv_gw_node_free_ref(gw_node);
|
2011-03-23 10:24:34 +00:00
|
|
|
return orig_node;
|
2010-12-13 11:19:28 +00:00
|
|
|
}
|
|
|
|
|
2012-06-05 20:31:31 +00:00
|
|
|
static void batadv_gw_select(struct batadv_priv *bat_priv,
|
|
|
|
struct batadv_gw_node *new_gw_node)
|
2010-12-13 11:19:28 +00:00
|
|
|
{
|
2012-06-05 20:31:31 +00:00
|
|
|
struct batadv_gw_node *curr_gw_node;
|
2010-12-13 11:19:28 +00:00
|
|
|
|
2012-07-15 20:26:51 +00:00
|
|
|
spin_lock_bh(&bat_priv->gw.list_lock);
|
2011-03-23 10:24:34 +00:00
|
|
|
|
2011-02-10 14:33:49 +00:00
|
|
|
if (new_gw_node && !atomic_inc_not_zero(&new_gw_node->refcount))
|
|
|
|
new_gw_node = NULL;
|
2010-12-13 11:19:28 +00:00
|
|
|
|
2012-07-15 20:26:51 +00:00
|
|
|
curr_gw_node = rcu_dereference_protected(bat_priv->gw.curr_gw, 1);
|
|
|
|
rcu_assign_pointer(bat_priv->gw.curr_gw, new_gw_node);
|
2011-02-10 14:33:49 +00:00
|
|
|
|
|
|
|
if (curr_gw_node)
|
2012-05-12 16:33:55 +00:00
|
|
|
batadv_gw_node_free_ref(curr_gw_node);
|
2011-03-23 10:24:34 +00:00
|
|
|
|
2012-07-15 20:26:51 +00:00
|
|
|
spin_unlock_bh(&bat_priv->gw.list_lock);
|
2011-03-23 10:24:34 +00:00
|
|
|
}
|
|
|
|
|
2012-06-05 20:31:31 +00:00
|
|
|
void batadv_gw_deselect(struct batadv_priv *bat_priv)
|
2011-03-23 10:24:34 +00:00
|
|
|
{
|
2012-07-15 20:26:51 +00:00
|
|
|
atomic_set(&bat_priv->gw.reselect, 1);
|
2010-12-13 11:19:28 +00:00
|
|
|
}
|
|
|
|
|
2012-06-05 20:31:31 +00:00
|
|
|
static struct batadv_gw_node *
|
|
|
|
batadv_gw_get_best_gw_node(struct batadv_priv *bat_priv)
|
2010-12-13 11:19:28 +00:00
|
|
|
{
|
2012-06-05 20:31:31 +00:00
|
|
|
struct batadv_neigh_node *router;
|
|
|
|
struct batadv_gw_node *gw_node, *curr_gw = NULL;
|
2010-12-13 11:19:28 +00:00
|
|
|
uint32_t max_gw_factor = 0, tmp_gw_factor = 0;
|
2012-07-08 16:33:51 +00:00
|
|
|
uint32_t gw_divisor;
|
2011-04-26 22:22:00 +00:00
|
|
|
uint8_t max_tq = 0;
|
2012-07-08 16:33:51 +00:00
|
|
|
uint8_t tq_avg;
|
2012-06-05 20:31:31 +00:00
|
|
|
struct batadv_orig_node *orig_node;
|
2010-12-13 11:19:28 +00:00
|
|
|
|
2012-07-08 16:33:51 +00:00
|
|
|
gw_divisor = BATADV_TQ_LOCAL_WINDOW_SIZE * BATADV_TQ_LOCAL_WINDOW_SIZE;
|
|
|
|
gw_divisor *= 64;
|
|
|
|
|
2011-03-23 10:24:34 +00:00
|
|
|
rcu_read_lock();
|
hlist: drop the node parameter from iterators
I'm not sure why, but the hlist for each entry iterators were conceived
list_for_each_entry(pos, head, member)
The hlist ones were greedy and wanted an extra parameter:
hlist_for_each_entry(tpos, pos, head, member)
Why did they need an extra pos parameter? I'm not quite sure. Not only
they don't really need it, it also prevents the iterator from looking
exactly like the list iterator, which is unfortunate.
Besides the semantic patch, there was some manual work required:
- Fix up the actual hlist iterators in linux/list.h
- Fix up the declaration of other iterators based on the hlist ones.
- A very small amount of places were using the 'node' parameter, this
was modified to use 'obj->member' instead.
- Coccinelle didn't handle the hlist_for_each_entry_safe iterator
properly, so those had to be fixed up manually.
The semantic patch which is mostly the work of Peter Senna Tschudin is here:
@@
iterator name hlist_for_each_entry, hlist_for_each_entry_continue, hlist_for_each_entry_from, hlist_for_each_entry_rcu, hlist_for_each_entry_rcu_bh, hlist_for_each_entry_continue_rcu_bh, for_each_busy_worker, ax25_uid_for_each, ax25_for_each, inet_bind_bucket_for_each, sctp_for_each_hentry, sk_for_each, sk_for_each_rcu, sk_for_each_from, sk_for_each_safe, sk_for_each_bound, hlist_for_each_entry_safe, hlist_for_each_entry_continue_rcu, nr_neigh_for_each, nr_neigh_for_each_safe, nr_node_for_each, nr_node_for_each_safe, for_each_gfn_indirect_valid_sp, for_each_gfn_sp, for_each_host;
type T;
expression a,c,d,e;
identifier b;
statement S;
@@
-T b;
<+... when != b
(
hlist_for_each_entry(a,
- b,
c, d) S
|
hlist_for_each_entry_continue(a,
- b,
c) S
|
hlist_for_each_entry_from(a,
- b,
c) S
|
hlist_for_each_entry_rcu(a,
- b,
c, d) S
|
hlist_for_each_entry_rcu_bh(a,
- b,
c, d) S
|
hlist_for_each_entry_continue_rcu_bh(a,
- b,
c) S
|
for_each_busy_worker(a, c,
- b,
d) S
|
ax25_uid_for_each(a,
- b,
c) S
|
ax25_for_each(a,
- b,
c) S
|
inet_bind_bucket_for_each(a,
- b,
c) S
|
sctp_for_each_hentry(a,
- b,
c) S
|
sk_for_each(a,
- b,
c) S
|
sk_for_each_rcu(a,
- b,
c) S
|
sk_for_each_from
-(a, b)
+(a)
S
+ sk_for_each_from(a) S
|
sk_for_each_safe(a,
- b,
c, d) S
|
sk_for_each_bound(a,
- b,
c) S
|
hlist_for_each_entry_safe(a,
- b,
c, d, e) S
|
hlist_for_each_entry_continue_rcu(a,
- b,
c) S
|
nr_neigh_for_each(a,
- b,
c) S
|
nr_neigh_for_each_safe(a,
- b,
c, d) S
|
nr_node_for_each(a,
- b,
c) S
|
nr_node_for_each_safe(a,
- b,
c, d) S
|
- for_each_gfn_sp(a, c, d, b) S
+ for_each_gfn_sp(a, c, d) S
|
- for_each_gfn_indirect_valid_sp(a, c, d, b) S
+ for_each_gfn_indirect_valid_sp(a, c, d) S
|
for_each_host(a,
- b,
c) S
|
for_each_host_safe(a,
- b,
c, d) S
|
for_each_mesh_entry(a,
- b,
c, d) S
)
...+>
[akpm@linux-foundation.org: drop bogus change from net/ipv4/raw.c]
[akpm@linux-foundation.org: drop bogus hunk from net/ipv6/raw.c]
[akpm@linux-foundation.org: checkpatch fixes]
[akpm@linux-foundation.org: fix warnings]
[akpm@linux-foudnation.org: redo intrusive kvm changes]
Tested-by: Peter Senna Tschudin <peter.senna@gmail.com>
Acked-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Signed-off-by: Sasha Levin <sasha.levin@oracle.com>
Cc: Wu Fengguang <fengguang.wu@intel.com>
Cc: Marcelo Tosatti <mtosatti@redhat.com>
Cc: Gleb Natapov <gleb@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2013-02-28 01:06:00 +00:00
|
|
|
hlist_for_each_entry_rcu(gw_node, &bat_priv->gw.list, list) {
|
2011-03-14 22:43:37 +00:00
|
|
|
if (gw_node->deleted)
|
2010-12-13 11:19:28 +00:00
|
|
|
continue;
|
|
|
|
|
2012-05-12 00:09:30 +00:00
|
|
|
orig_node = gw_node->orig_node;
|
2012-05-12 00:09:34 +00:00
|
|
|
router = batadv_orig_node_get_router(orig_node);
|
2011-03-14 22:43:37 +00:00
|
|
|
if (!router)
|
2010-12-13 11:19:28 +00:00
|
|
|
continue;
|
|
|
|
|
2011-04-26 22:22:00 +00:00
|
|
|
if (!atomic_inc_not_zero(&gw_node->refcount))
|
|
|
|
goto next;
|
|
|
|
|
2013-09-02 10:15:01 +00:00
|
|
|
tq_avg = router->bat_iv.tq_avg;
|
2012-07-08 16:33:51 +00:00
|
|
|
|
2010-12-13 11:19:28 +00:00
|
|
|
switch (atomic_read(&bat_priv->gw_sel_class)) {
|
|
|
|
case 1: /* fast connection */
|
2013-04-23 13:39:58 +00:00
|
|
|
tmp_gw_factor = tq_avg * tq_avg;
|
|
|
|
tmp_gw_factor *= gw_node->bandwidth_down;
|
|
|
|
tmp_gw_factor *= 100 * 100;
|
2012-07-08 16:33:51 +00:00
|
|
|
tmp_gw_factor /= gw_divisor;
|
2010-12-13 11:19:28 +00:00
|
|
|
|
|
|
|
if ((tmp_gw_factor > max_gw_factor) ||
|
|
|
|
((tmp_gw_factor == max_gw_factor) &&
|
2012-07-08 16:33:51 +00:00
|
|
|
(tq_avg > max_tq))) {
|
2011-04-26 22:22:00 +00:00
|
|
|
if (curr_gw)
|
2012-05-12 16:33:55 +00:00
|
|
|
batadv_gw_node_free_ref(curr_gw);
|
2011-04-26 22:22:00 +00:00
|
|
|
curr_gw = gw_node;
|
|
|
|
atomic_inc(&curr_gw->refcount);
|
|
|
|
}
|
2010-12-13 11:19:28 +00:00
|
|
|
break;
|
|
|
|
|
2012-05-12 00:09:43 +00:00
|
|
|
default: /* 2: stable connection (use best statistic)
|
2010-12-13 11:19:28 +00:00
|
|
|
* 3: fast-switch (use best statistic but change as
|
|
|
|
* soon as a better gateway appears)
|
|
|
|
* XX: late-switch (use best statistic but change as
|
|
|
|
* soon as a better gateway appears which has
|
|
|
|
* $routing_class more tq points)
|
2012-05-12 00:09:43 +00:00
|
|
|
*/
|
2012-07-08 16:33:51 +00:00
|
|
|
if (tq_avg > max_tq) {
|
2011-04-26 22:22:00 +00:00
|
|
|
if (curr_gw)
|
2012-05-12 16:33:55 +00:00
|
|
|
batadv_gw_node_free_ref(curr_gw);
|
2011-04-26 22:22:00 +00:00
|
|
|
curr_gw = gw_node;
|
|
|
|
atomic_inc(&curr_gw->refcount);
|
|
|
|
}
|
2010-12-13 11:19:28 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2012-07-08 16:33:51 +00:00
|
|
|
if (tq_avg > max_tq)
|
|
|
|
max_tq = tq_avg;
|
2010-12-13 11:19:28 +00:00
|
|
|
|
|
|
|
if (tmp_gw_factor > max_gw_factor)
|
|
|
|
max_gw_factor = tmp_gw_factor;
|
2011-03-14 22:43:37 +00:00
|
|
|
|
2012-05-12 16:33:55 +00:00
|
|
|
batadv_gw_node_free_ref(gw_node);
|
2011-04-26 22:22:00 +00:00
|
|
|
|
|
|
|
next:
|
2012-05-12 00:09:34 +00:00
|
|
|
batadv_neigh_node_free_ref(router);
|
2010-12-13 11:19:28 +00:00
|
|
|
}
|
2011-04-26 22:22:00 +00:00
|
|
|
rcu_read_unlock();
|
2010-12-13 11:19:28 +00:00
|
|
|
|
2011-04-26 22:22:00 +00:00
|
|
|
return curr_gw;
|
|
|
|
}
|
2010-12-13 11:19:28 +00:00
|
|
|
|
2013-07-12 22:06:00 +00:00
|
|
|
/**
|
|
|
|
* batadv_gw_check_client_stop - check if client mode has been switched off
|
|
|
|
* @bat_priv: the bat priv with all the soft interface information
|
|
|
|
*
|
|
|
|
* This function assumes the caller has checked that the gw state *is actually
|
|
|
|
* changing*. This function is not supposed to be called when there is no state
|
|
|
|
* change.
|
|
|
|
*/
|
|
|
|
void batadv_gw_check_client_stop(struct batadv_priv *bat_priv)
|
|
|
|
{
|
|
|
|
struct batadv_gw_node *curr_gw;
|
|
|
|
|
|
|
|
if (atomic_read(&bat_priv->gw_mode) != BATADV_GW_MODE_CLIENT)
|
|
|
|
return;
|
|
|
|
|
|
|
|
curr_gw = batadv_gw_get_selected_gw_node(bat_priv);
|
|
|
|
if (!curr_gw)
|
|
|
|
return;
|
|
|
|
|
|
|
|
/* if batman-adv is switching the gw client mode off and a gateway was
|
|
|
|
* already selected, send a DEL uevent
|
|
|
|
*/
|
|
|
|
batadv_throw_uevent(bat_priv, BATADV_UEV_GW, BATADV_UEV_DEL, NULL);
|
|
|
|
|
|
|
|
batadv_gw_node_free_ref(curr_gw);
|
|
|
|
}
|
|
|
|
|
2012-06-05 20:31:31 +00:00
|
|
|
void batadv_gw_election(struct batadv_priv *bat_priv)
|
2011-04-26 22:22:00 +00:00
|
|
|
{
|
2012-06-05 20:31:31 +00:00
|
|
|
struct batadv_gw_node *curr_gw = NULL, *next_gw = NULL;
|
|
|
|
struct batadv_neigh_node *router = NULL;
|
2011-06-12 09:58:58 +00:00
|
|
|
char gw_addr[18] = { '\0' };
|
2011-04-26 22:22:00 +00:00
|
|
|
|
2012-06-03 20:19:18 +00:00
|
|
|
if (atomic_read(&bat_priv->gw_mode) != BATADV_GW_MODE_CLIENT)
|
2011-04-26 22:22:00 +00:00
|
|
|
goto out;
|
|
|
|
|
2012-05-12 16:33:55 +00:00
|
|
|
curr_gw = batadv_gw_get_selected_gw_node(bat_priv);
|
2011-04-26 22:22:00 +00:00
|
|
|
|
2012-07-15 20:26:51 +00:00
|
|
|
if (!batadv_atomic_dec_not_zero(&bat_priv->gw.reselect) && curr_gw)
|
2012-08-04 04:13:26 +00:00
|
|
|
goto out;
|
|
|
|
|
2012-05-12 16:33:55 +00:00
|
|
|
next_gw = batadv_gw_get_best_gw_node(bat_priv);
|
2011-04-26 22:22:00 +00:00
|
|
|
|
|
|
|
if (curr_gw == next_gw)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
if (next_gw) {
|
2011-06-12 09:58:58 +00:00
|
|
|
sprintf(gw_addr, "%pM", next_gw->orig_node->orig);
|
|
|
|
|
2012-05-12 00:09:34 +00:00
|
|
|
router = batadv_orig_node_get_router(next_gw->orig_node);
|
2011-04-26 22:22:00 +00:00
|
|
|
if (!router) {
|
2012-05-12 00:09:29 +00:00
|
|
|
batadv_gw_deselect(bat_priv);
|
2011-04-26 22:22:00 +00:00
|
|
|
goto out;
|
|
|
|
}
|
2010-12-13 11:19:28 +00:00
|
|
|
}
|
|
|
|
|
2011-04-26 22:22:00 +00:00
|
|
|
if ((curr_gw) && (!next_gw)) {
|
2012-06-03 20:19:22 +00:00
|
|
|
batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
|
2012-05-12 11:48:58 +00:00
|
|
|
"Removing selected gateway - no gateway in range\n");
|
2012-06-03 20:19:22 +00:00
|
|
|
batadv_throw_uevent(bat_priv, BATADV_UEV_GW, BATADV_UEV_DEL,
|
|
|
|
NULL);
|
2011-04-26 22:22:00 +00:00
|
|
|
} else if ((!curr_gw) && (next_gw)) {
|
2012-06-03 20:19:22 +00:00
|
|
|
batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
|
2013-04-23 13:39:58 +00:00
|
|
|
"Adding route to gateway %pM (bandwidth: %u.%u/%u.%u MBit, tq: %i)\n",
|
2012-05-12 11:48:58 +00:00
|
|
|
next_gw->orig_node->orig,
|
2013-04-23 13:39:58 +00:00
|
|
|
next_gw->bandwidth_down / 10,
|
|
|
|
next_gw->bandwidth_down % 10,
|
|
|
|
next_gw->bandwidth_up / 10,
|
2013-09-02 10:15:01 +00:00
|
|
|
next_gw->bandwidth_up % 10, router->bat_iv.tq_avg);
|
2012-06-03 20:19:22 +00:00
|
|
|
batadv_throw_uevent(bat_priv, BATADV_UEV_GW, BATADV_UEV_ADD,
|
|
|
|
gw_addr);
|
2011-04-26 22:22:00 +00:00
|
|
|
} else {
|
2012-06-03 20:19:22 +00:00
|
|
|
batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
|
2013-04-23 13:39:58 +00:00
|
|
|
"Changing route to gateway %pM (bandwidth: %u.%u/%u.%u MBit, tq: %i)\n",
|
2012-05-12 11:48:58 +00:00
|
|
|
next_gw->orig_node->orig,
|
2013-04-23 13:39:58 +00:00
|
|
|
next_gw->bandwidth_down / 10,
|
|
|
|
next_gw->bandwidth_down % 10,
|
|
|
|
next_gw->bandwidth_up / 10,
|
2013-09-02 10:15:01 +00:00
|
|
|
next_gw->bandwidth_up % 10, router->bat_iv.tq_avg);
|
2012-06-03 20:19:22 +00:00
|
|
|
batadv_throw_uevent(bat_priv, BATADV_UEV_GW, BATADV_UEV_CHANGE,
|
|
|
|
gw_addr);
|
2011-04-26 22:22:00 +00:00
|
|
|
}
|
|
|
|
|
2012-05-12 16:33:55 +00:00
|
|
|
batadv_gw_select(bat_priv, next_gw);
|
2011-04-26 22:22:00 +00:00
|
|
|
|
2011-03-23 10:24:34 +00:00
|
|
|
out:
|
|
|
|
if (curr_gw)
|
2012-05-12 16:33:55 +00:00
|
|
|
batadv_gw_node_free_ref(curr_gw);
|
2011-04-26 22:22:00 +00:00
|
|
|
if (next_gw)
|
2012-05-12 16:33:55 +00:00
|
|
|
batadv_gw_node_free_ref(next_gw);
|
2011-04-26 22:22:00 +00:00
|
|
|
if (router)
|
2012-05-12 00:09:34 +00:00
|
|
|
batadv_neigh_node_free_ref(router);
|
2010-12-13 11:19:28 +00:00
|
|
|
}
|
|
|
|
|
2012-06-05 20:31:31 +00:00
|
|
|
void batadv_gw_check_election(struct batadv_priv *bat_priv,
|
|
|
|
struct batadv_orig_node *orig_node)
|
2010-12-13 11:19:28 +00:00
|
|
|
{
|
2012-06-05 20:31:31 +00:00
|
|
|
struct batadv_orig_node *curr_gw_orig;
|
|
|
|
struct batadv_neigh_node *router_gw = NULL, *router_orig = NULL;
|
2010-12-13 11:19:28 +00:00
|
|
|
uint8_t gw_tq_avg, orig_tq_avg;
|
|
|
|
|
2012-05-12 00:09:29 +00:00
|
|
|
curr_gw_orig = batadv_gw_get_selected_orig(bat_priv);
|
2011-03-14 22:43:33 +00:00
|
|
|
if (!curr_gw_orig)
|
|
|
|
goto deselect;
|
2010-12-13 11:19:28 +00:00
|
|
|
|
2012-05-12 00:09:34 +00:00
|
|
|
router_gw = batadv_orig_node_get_router(curr_gw_orig);
|
2011-03-14 22:43:37 +00:00
|
|
|
if (!router_gw)
|
|
|
|
goto deselect;
|
2010-12-13 11:19:28 +00:00
|
|
|
|
|
|
|
/* this node already is the gateway */
|
2011-03-14 22:43:33 +00:00
|
|
|
if (curr_gw_orig == orig_node)
|
2011-03-14 22:43:37 +00:00
|
|
|
goto out;
|
2010-12-13 11:19:28 +00:00
|
|
|
|
2012-05-12 00:09:34 +00:00
|
|
|
router_orig = batadv_orig_node_get_router(orig_node);
|
2011-03-14 22:43:37 +00:00
|
|
|
if (!router_orig)
|
|
|
|
goto out;
|
2011-02-13 21:13:02 +00:00
|
|
|
|
2013-09-02 10:15:01 +00:00
|
|
|
gw_tq_avg = router_gw->bat_iv.tq_avg;
|
|
|
|
orig_tq_avg = router_orig->bat_iv.tq_avg;
|
2010-12-13 11:19:28 +00:00
|
|
|
|
|
|
|
/* the TQ value has to be better */
|
|
|
|
if (orig_tq_avg < gw_tq_avg)
|
2011-02-13 21:13:02 +00:00
|
|
|
goto out;
|
2010-12-13 11:19:28 +00:00
|
|
|
|
2012-05-12 00:09:43 +00:00
|
|
|
/* if the routing class is greater than 3 the value tells us how much
|
2010-12-13 11:19:28 +00:00
|
|
|
* greater the TQ value of the new gateway must be
|
2012-05-12 00:09:43 +00:00
|
|
|
*/
|
2010-12-13 11:19:28 +00:00
|
|
|
if ((atomic_read(&bat_priv->gw_sel_class) > 3) &&
|
|
|
|
(orig_tq_avg - gw_tq_avg < atomic_read(&bat_priv->gw_sel_class)))
|
2011-02-13 21:13:02 +00:00
|
|
|
goto out;
|
2010-12-13 11:19:28 +00:00
|
|
|
|
2012-06-03 20:19:22 +00:00
|
|
|
batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
|
2012-05-12 11:48:58 +00:00
|
|
|
"Restarting gateway selection: better gateway found (tq curr: %i, tq new: %i)\n",
|
|
|
|
gw_tq_avg, orig_tq_avg);
|
2010-12-13 11:19:28 +00:00
|
|
|
|
|
|
|
deselect:
|
2012-05-12 00:09:29 +00:00
|
|
|
batadv_gw_deselect(bat_priv);
|
2011-02-13 21:13:02 +00:00
|
|
|
out:
|
2011-03-14 22:43:33 +00:00
|
|
|
if (curr_gw_orig)
|
2012-05-12 00:09:34 +00:00
|
|
|
batadv_orig_node_free_ref(curr_gw_orig);
|
2011-03-14 22:43:37 +00:00
|
|
|
if (router_gw)
|
2012-05-12 00:09:34 +00:00
|
|
|
batadv_neigh_node_free_ref(router_gw);
|
2011-03-14 22:43:37 +00:00
|
|
|
if (router_orig)
|
2012-05-12 00:09:34 +00:00
|
|
|
batadv_neigh_node_free_ref(router_orig);
|
2011-03-14 22:43:33 +00:00
|
|
|
|
2011-02-13 21:13:02 +00:00
|
|
|
return;
|
2010-12-13 11:19:28 +00:00
|
|
|
}
|
|
|
|
|
2013-04-23 13:39:58 +00:00
|
|
|
/**
|
|
|
|
* batadv_gw_node_add - add gateway node to list of available gateways
|
|
|
|
* @bat_priv: the bat priv with all the soft interface information
|
|
|
|
* @orig_node: originator announcing gateway capabilities
|
|
|
|
* @gateway: announced bandwidth information
|
|
|
|
*/
|
2012-06-05 20:31:31 +00:00
|
|
|
static void batadv_gw_node_add(struct batadv_priv *bat_priv,
|
|
|
|
struct batadv_orig_node *orig_node,
|
2013-04-23 13:39:58 +00:00
|
|
|
struct batadv_tvlv_gateway_data *gateway)
|
2010-12-13 11:19:28 +00:00
|
|
|
{
|
2012-06-05 20:31:31 +00:00
|
|
|
struct batadv_gw_node *gw_node;
|
2013-04-23 13:39:58 +00:00
|
|
|
|
|
|
|
if (gateway->bandwidth_down == 0)
|
|
|
|
return;
|
2010-12-13 11:19:28 +00:00
|
|
|
|
2011-05-14 21:14:54 +00:00
|
|
|
gw_node = kzalloc(sizeof(*gw_node), GFP_ATOMIC);
|
2010-12-13 11:19:28 +00:00
|
|
|
if (!gw_node)
|
|
|
|
return;
|
|
|
|
|
|
|
|
INIT_HLIST_NODE(&gw_node->list);
|
|
|
|
gw_node->orig_node = orig_node;
|
2011-02-10 14:33:49 +00:00
|
|
|
atomic_set(&gw_node->refcount, 1);
|
2010-12-13 11:19:28 +00:00
|
|
|
|
2012-07-15 20:26:51 +00:00
|
|
|
spin_lock_bh(&bat_priv->gw.list_lock);
|
|
|
|
hlist_add_head_rcu(&gw_node->list, &bat_priv->gw.list);
|
|
|
|
spin_unlock_bh(&bat_priv->gw.list_lock);
|
2010-12-13 11:19:28 +00:00
|
|
|
|
2012-06-03 20:19:22 +00:00
|
|
|
batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
|
2013-04-23 13:39:58 +00:00
|
|
|
"Found new gateway %pM -> gw bandwidth: %u.%u/%u.%u MBit\n",
|
|
|
|
orig_node->orig,
|
|
|
|
ntohl(gateway->bandwidth_down) / 10,
|
|
|
|
ntohl(gateway->bandwidth_down) % 10,
|
|
|
|
ntohl(gateway->bandwidth_up) / 10,
|
|
|
|
ntohl(gateway->bandwidth_up) % 10);
|
2010-12-13 11:19:28 +00:00
|
|
|
}
|
|
|
|
|
2013-04-23 13:39:58 +00:00
|
|
|
/**
|
|
|
|
* batadv_gw_node_get - retrieve gateway node from list of available gateways
|
|
|
|
* @bat_priv: the bat priv with all the soft interface information
|
|
|
|
* @orig_node: originator announcing gateway capabilities
|
|
|
|
*
|
|
|
|
* Returns gateway node if found or NULL otherwise.
|
|
|
|
*/
|
|
|
|
static struct batadv_gw_node *
|
|
|
|
batadv_gw_node_get(struct batadv_priv *bat_priv,
|
|
|
|
struct batadv_orig_node *orig_node)
|
2010-12-13 11:19:28 +00:00
|
|
|
{
|
2013-04-23 13:39:58 +00:00
|
|
|
struct batadv_gw_node *gw_node_tmp, *gw_node = NULL;
|
2010-12-13 11:19:28 +00:00
|
|
|
|
|
|
|
rcu_read_lock();
|
2013-04-23 13:39:58 +00:00
|
|
|
hlist_for_each_entry_rcu(gw_node_tmp, &bat_priv->gw.list, list) {
|
|
|
|
if (gw_node_tmp->orig_node != orig_node)
|
2010-12-13 11:19:28 +00:00
|
|
|
continue;
|
|
|
|
|
2013-04-23 13:39:58 +00:00
|
|
|
if (gw_node_tmp->deleted)
|
|
|
|
continue;
|
2010-12-13 11:19:28 +00:00
|
|
|
|
2013-04-23 13:39:58 +00:00
|
|
|
if (!atomic_inc_not_zero(&gw_node_tmp->refcount))
|
|
|
|
continue;
|
2010-12-13 11:19:28 +00:00
|
|
|
|
2013-04-23 13:39:58 +00:00
|
|
|
gw_node = gw_node_tmp;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
rcu_read_unlock();
|
2010-12-13 11:19:28 +00:00
|
|
|
|
2013-04-23 13:39:58 +00:00
|
|
|
return gw_node;
|
|
|
|
}
|
2010-12-13 11:19:28 +00:00
|
|
|
|
2013-04-23 13:39:58 +00:00
|
|
|
/**
|
|
|
|
* batadv_gw_node_update - update list of available gateways with changed
|
|
|
|
* bandwidth information
|
|
|
|
* @bat_priv: the bat priv with all the soft interface information
|
|
|
|
* @orig_node: originator announcing gateway capabilities
|
|
|
|
* @gateway: announced bandwidth information
|
|
|
|
*/
|
|
|
|
void batadv_gw_node_update(struct batadv_priv *bat_priv,
|
|
|
|
struct batadv_orig_node *orig_node,
|
|
|
|
struct batadv_tvlv_gateway_data *gateway)
|
|
|
|
{
|
|
|
|
struct batadv_gw_node *gw_node, *curr_gw = NULL;
|
|
|
|
|
|
|
|
gw_node = batadv_gw_node_get(bat_priv, orig_node);
|
|
|
|
if (!gw_node) {
|
|
|
|
batadv_gw_node_add(bat_priv, orig_node, gateway);
|
|
|
|
goto out;
|
2010-12-13 11:19:28 +00:00
|
|
|
}
|
|
|
|
|
2013-04-23 13:39:58 +00:00
|
|
|
if ((gw_node->bandwidth_down == ntohl(gateway->bandwidth_down)) &&
|
|
|
|
(gw_node->bandwidth_up == ntohl(gateway->bandwidth_up)))
|
|
|
|
goto out;
|
2010-12-13 11:19:28 +00:00
|
|
|
|
2013-04-23 13:39:58 +00:00
|
|
|
batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
|
|
|
|
"Gateway bandwidth of originator %pM changed from %u.%u/%u.%u MBit to %u.%u/%u.%u MBit\n",
|
|
|
|
orig_node->orig,
|
|
|
|
gw_node->bandwidth_down / 10,
|
|
|
|
gw_node->bandwidth_down % 10,
|
|
|
|
gw_node->bandwidth_up / 10,
|
|
|
|
gw_node->bandwidth_up % 10,
|
|
|
|
ntohl(gateway->bandwidth_down) / 10,
|
|
|
|
ntohl(gateway->bandwidth_down) % 10,
|
|
|
|
ntohl(gateway->bandwidth_up) / 10,
|
|
|
|
ntohl(gateway->bandwidth_up) % 10);
|
|
|
|
|
|
|
|
gw_node->bandwidth_down = ntohl(gateway->bandwidth_down);
|
|
|
|
gw_node->bandwidth_up = ntohl(gateway->bandwidth_up);
|
|
|
|
|
|
|
|
gw_node->deleted = 0;
|
|
|
|
if (ntohl(gateway->bandwidth_down) == 0) {
|
|
|
|
gw_node->deleted = jiffies;
|
|
|
|
batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
|
|
|
|
"Gateway %pM removed from gateway list\n",
|
|
|
|
orig_node->orig);
|
2011-03-23 10:24:34 +00:00
|
|
|
|
2013-04-23 13:39:58 +00:00
|
|
|
/* Note: We don't need a NULL check here, since curr_gw never
|
|
|
|
* gets dereferenced.
|
|
|
|
*/
|
|
|
|
curr_gw = batadv_gw_get_selected_gw_node(bat_priv);
|
|
|
|
if (gw_node == curr_gw)
|
|
|
|
batadv_gw_deselect(bat_priv);
|
|
|
|
}
|
2011-04-25 20:44:32 +00:00
|
|
|
|
2013-04-23 13:39:58 +00:00
|
|
|
out:
|
2011-03-23 10:24:34 +00:00
|
|
|
if (curr_gw)
|
2012-05-12 16:33:55 +00:00
|
|
|
batadv_gw_node_free_ref(curr_gw);
|
2013-04-23 13:39:58 +00:00
|
|
|
if (gw_node)
|
|
|
|
batadv_gw_node_free_ref(gw_node);
|
2010-12-13 11:19:28 +00:00
|
|
|
}
|
|
|
|
|
2012-06-05 20:31:31 +00:00
|
|
|
void batadv_gw_node_delete(struct batadv_priv *bat_priv,
|
|
|
|
struct batadv_orig_node *orig_node)
|
2010-12-13 11:19:28 +00:00
|
|
|
{
|
2013-04-23 13:39:58 +00:00
|
|
|
struct batadv_tvlv_gateway_data gateway;
|
|
|
|
|
|
|
|
gateway.bandwidth_down = 0;
|
|
|
|
gateway.bandwidth_up = 0;
|
|
|
|
|
|
|
|
batadv_gw_node_update(bat_priv, orig_node, &gateway);
|
2010-12-13 11:19:28 +00:00
|
|
|
}
|
|
|
|
|
2012-06-05 20:31:31 +00:00
|
|
|
void batadv_gw_node_purge(struct batadv_priv *bat_priv)
|
2010-12-13 11:19:28 +00:00
|
|
|
{
|
2012-06-05 20:31:31 +00:00
|
|
|
struct batadv_gw_node *gw_node, *curr_gw;
|
hlist: drop the node parameter from iterators
I'm not sure why, but the hlist for each entry iterators were conceived
list_for_each_entry(pos, head, member)
The hlist ones were greedy and wanted an extra parameter:
hlist_for_each_entry(tpos, pos, head, member)
Why did they need an extra pos parameter? I'm not quite sure. Not only
they don't really need it, it also prevents the iterator from looking
exactly like the list iterator, which is unfortunate.
Besides the semantic patch, there was some manual work required:
- Fix up the actual hlist iterators in linux/list.h
- Fix up the declaration of other iterators based on the hlist ones.
- A very small amount of places were using the 'node' parameter, this
was modified to use 'obj->member' instead.
- Coccinelle didn't handle the hlist_for_each_entry_safe iterator
properly, so those had to be fixed up manually.
The semantic patch which is mostly the work of Peter Senna Tschudin is here:
@@
iterator name hlist_for_each_entry, hlist_for_each_entry_continue, hlist_for_each_entry_from, hlist_for_each_entry_rcu, hlist_for_each_entry_rcu_bh, hlist_for_each_entry_continue_rcu_bh, for_each_busy_worker, ax25_uid_for_each, ax25_for_each, inet_bind_bucket_for_each, sctp_for_each_hentry, sk_for_each, sk_for_each_rcu, sk_for_each_from, sk_for_each_safe, sk_for_each_bound, hlist_for_each_entry_safe, hlist_for_each_entry_continue_rcu, nr_neigh_for_each, nr_neigh_for_each_safe, nr_node_for_each, nr_node_for_each_safe, for_each_gfn_indirect_valid_sp, for_each_gfn_sp, for_each_host;
type T;
expression a,c,d,e;
identifier b;
statement S;
@@
-T b;
<+... when != b
(
hlist_for_each_entry(a,
- b,
c, d) S
|
hlist_for_each_entry_continue(a,
- b,
c) S
|
hlist_for_each_entry_from(a,
- b,
c) S
|
hlist_for_each_entry_rcu(a,
- b,
c, d) S
|
hlist_for_each_entry_rcu_bh(a,
- b,
c, d) S
|
hlist_for_each_entry_continue_rcu_bh(a,
- b,
c) S
|
for_each_busy_worker(a, c,
- b,
d) S
|
ax25_uid_for_each(a,
- b,
c) S
|
ax25_for_each(a,
- b,
c) S
|
inet_bind_bucket_for_each(a,
- b,
c) S
|
sctp_for_each_hentry(a,
- b,
c) S
|
sk_for_each(a,
- b,
c) S
|
sk_for_each_rcu(a,
- b,
c) S
|
sk_for_each_from
-(a, b)
+(a)
S
+ sk_for_each_from(a) S
|
sk_for_each_safe(a,
- b,
c, d) S
|
sk_for_each_bound(a,
- b,
c) S
|
hlist_for_each_entry_safe(a,
- b,
c, d, e) S
|
hlist_for_each_entry_continue_rcu(a,
- b,
c) S
|
nr_neigh_for_each(a,
- b,
c) S
|
nr_neigh_for_each_safe(a,
- b,
c, d) S
|
nr_node_for_each(a,
- b,
c) S
|
nr_node_for_each_safe(a,
- b,
c, d) S
|
- for_each_gfn_sp(a, c, d, b) S
+ for_each_gfn_sp(a, c, d) S
|
- for_each_gfn_indirect_valid_sp(a, c, d, b) S
+ for_each_gfn_indirect_valid_sp(a, c, d) S
|
for_each_host(a,
- b,
c) S
|
for_each_host_safe(a,
- b,
c, d) S
|
for_each_mesh_entry(a,
- b,
c, d) S
)
...+>
[akpm@linux-foundation.org: drop bogus change from net/ipv4/raw.c]
[akpm@linux-foundation.org: drop bogus hunk from net/ipv6/raw.c]
[akpm@linux-foundation.org: checkpatch fixes]
[akpm@linux-foundation.org: fix warnings]
[akpm@linux-foudnation.org: redo intrusive kvm changes]
Tested-by: Peter Senna Tschudin <peter.senna@gmail.com>
Acked-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Signed-off-by: Sasha Levin <sasha.levin@oracle.com>
Cc: Wu Fengguang <fengguang.wu@intel.com>
Cc: Marcelo Tosatti <mtosatti@redhat.com>
Cc: Gleb Natapov <gleb@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2013-02-28 01:06:00 +00:00
|
|
|
struct hlist_node *node_tmp;
|
2012-06-03 20:19:17 +00:00
|
|
|
unsigned long timeout = msecs_to_jiffies(2 * BATADV_PURGE_TIMEOUT);
|
2011-06-15 07:41:37 +00:00
|
|
|
int do_deselect = 0;
|
2011-03-23 10:24:34 +00:00
|
|
|
|
2012-05-12 16:33:55 +00:00
|
|
|
curr_gw = batadv_gw_get_selected_gw_node(bat_priv);
|
2010-12-13 11:19:28 +00:00
|
|
|
|
2012-07-15 20:26:51 +00:00
|
|
|
spin_lock_bh(&bat_priv->gw.list_lock);
|
2010-12-13 11:19:28 +00:00
|
|
|
|
hlist: drop the node parameter from iterators
I'm not sure why, but the hlist for each entry iterators were conceived
list_for_each_entry(pos, head, member)
The hlist ones were greedy and wanted an extra parameter:
hlist_for_each_entry(tpos, pos, head, member)
Why did they need an extra pos parameter? I'm not quite sure. Not only
they don't really need it, it also prevents the iterator from looking
exactly like the list iterator, which is unfortunate.
Besides the semantic patch, there was some manual work required:
- Fix up the actual hlist iterators in linux/list.h
- Fix up the declaration of other iterators based on the hlist ones.
- A very small amount of places were using the 'node' parameter, this
was modified to use 'obj->member' instead.
- Coccinelle didn't handle the hlist_for_each_entry_safe iterator
properly, so those had to be fixed up manually.
The semantic patch which is mostly the work of Peter Senna Tschudin is here:
@@
iterator name hlist_for_each_entry, hlist_for_each_entry_continue, hlist_for_each_entry_from, hlist_for_each_entry_rcu, hlist_for_each_entry_rcu_bh, hlist_for_each_entry_continue_rcu_bh, for_each_busy_worker, ax25_uid_for_each, ax25_for_each, inet_bind_bucket_for_each, sctp_for_each_hentry, sk_for_each, sk_for_each_rcu, sk_for_each_from, sk_for_each_safe, sk_for_each_bound, hlist_for_each_entry_safe, hlist_for_each_entry_continue_rcu, nr_neigh_for_each, nr_neigh_for_each_safe, nr_node_for_each, nr_node_for_each_safe, for_each_gfn_indirect_valid_sp, for_each_gfn_sp, for_each_host;
type T;
expression a,c,d,e;
identifier b;
statement S;
@@
-T b;
<+... when != b
(
hlist_for_each_entry(a,
- b,
c, d) S
|
hlist_for_each_entry_continue(a,
- b,
c) S
|
hlist_for_each_entry_from(a,
- b,
c) S
|
hlist_for_each_entry_rcu(a,
- b,
c, d) S
|
hlist_for_each_entry_rcu_bh(a,
- b,
c, d) S
|
hlist_for_each_entry_continue_rcu_bh(a,
- b,
c) S
|
for_each_busy_worker(a, c,
- b,
d) S
|
ax25_uid_for_each(a,
- b,
c) S
|
ax25_for_each(a,
- b,
c) S
|
inet_bind_bucket_for_each(a,
- b,
c) S
|
sctp_for_each_hentry(a,
- b,
c) S
|
sk_for_each(a,
- b,
c) S
|
sk_for_each_rcu(a,
- b,
c) S
|
sk_for_each_from
-(a, b)
+(a)
S
+ sk_for_each_from(a) S
|
sk_for_each_safe(a,
- b,
c, d) S
|
sk_for_each_bound(a,
- b,
c) S
|
hlist_for_each_entry_safe(a,
- b,
c, d, e) S
|
hlist_for_each_entry_continue_rcu(a,
- b,
c) S
|
nr_neigh_for_each(a,
- b,
c) S
|
nr_neigh_for_each_safe(a,
- b,
c, d) S
|
nr_node_for_each(a,
- b,
c) S
|
nr_node_for_each_safe(a,
- b,
c, d) S
|
- for_each_gfn_sp(a, c, d, b) S
+ for_each_gfn_sp(a, c, d) S
|
- for_each_gfn_indirect_valid_sp(a, c, d, b) S
+ for_each_gfn_indirect_valid_sp(a, c, d) S
|
for_each_host(a,
- b,
c) S
|
for_each_host_safe(a,
- b,
c, d) S
|
for_each_mesh_entry(a,
- b,
c, d) S
)
...+>
[akpm@linux-foundation.org: drop bogus change from net/ipv4/raw.c]
[akpm@linux-foundation.org: drop bogus hunk from net/ipv6/raw.c]
[akpm@linux-foundation.org: checkpatch fixes]
[akpm@linux-foundation.org: fix warnings]
[akpm@linux-foudnation.org: redo intrusive kvm changes]
Tested-by: Peter Senna Tschudin <peter.senna@gmail.com>
Acked-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Signed-off-by: Sasha Levin <sasha.levin@oracle.com>
Cc: Wu Fengguang <fengguang.wu@intel.com>
Cc: Marcelo Tosatti <mtosatti@redhat.com>
Cc: Gleb Natapov <gleb@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2013-02-28 01:06:00 +00:00
|
|
|
hlist_for_each_entry_safe(gw_node, node_tmp,
|
2012-07-15 20:26:51 +00:00
|
|
|
&bat_priv->gw.list, list) {
|
2010-12-13 11:19:28 +00:00
|
|
|
if (((!gw_node->deleted) ||
|
|
|
|
(time_before(jiffies, gw_node->deleted + timeout))) &&
|
2012-06-03 20:19:22 +00:00
|
|
|
atomic_read(&bat_priv->mesh_state) == BATADV_MESH_ACTIVE)
|
2010-12-13 11:19:28 +00:00
|
|
|
continue;
|
|
|
|
|
2011-03-23 10:24:34 +00:00
|
|
|
if (curr_gw == gw_node)
|
|
|
|
do_deselect = 1;
|
2010-12-13 11:19:28 +00:00
|
|
|
|
|
|
|
hlist_del_rcu(&gw_node->list);
|
2012-05-12 16:33:55 +00:00
|
|
|
batadv_gw_node_free_ref(gw_node);
|
2010-12-13 11:19:28 +00:00
|
|
|
}
|
|
|
|
|
2012-07-15 20:26:51 +00:00
|
|
|
spin_unlock_bh(&bat_priv->gw.list_lock);
|
2011-03-23 10:24:34 +00:00
|
|
|
|
|
|
|
/* gw_deselect() needs to acquire the gw_list_lock */
|
|
|
|
if (do_deselect)
|
2012-05-12 00:09:29 +00:00
|
|
|
batadv_gw_deselect(bat_priv);
|
2011-03-23 10:24:34 +00:00
|
|
|
|
|
|
|
if (curr_gw)
|
2012-05-12 16:33:55 +00:00
|
|
|
batadv_gw_node_free_ref(curr_gw);
|
2010-12-13 11:19:28 +00:00
|
|
|
}
|
|
|
|
|
2012-05-12 00:09:43 +00:00
|
|
|
/* fails if orig_node has no router */
|
2012-06-05 20:31:31 +00:00
|
|
|
static int batadv_write_buffer_text(struct batadv_priv *bat_priv,
|
|
|
|
struct seq_file *seq,
|
|
|
|
const struct batadv_gw_node *gw_node)
|
2010-12-13 11:19:28 +00:00
|
|
|
{
|
2012-06-05 20:31:31 +00:00
|
|
|
struct batadv_gw_node *curr_gw;
|
|
|
|
struct batadv_neigh_node *router;
|
2013-04-23 13:39:58 +00:00
|
|
|
int ret = -1;
|
2010-12-13 11:19:28 +00:00
|
|
|
|
2012-05-12 00:09:34 +00:00
|
|
|
router = batadv_orig_node_get_router(gw_node->orig_node);
|
2011-03-14 22:43:37 +00:00
|
|
|
if (!router)
|
|
|
|
goto out;
|
2011-02-13 21:13:02 +00:00
|
|
|
|
2012-05-12 16:33:55 +00:00
|
|
|
curr_gw = batadv_gw_get_selected_gw_node(bat_priv);
|
2011-02-13 21:13:02 +00:00
|
|
|
|
2013-04-23 13:39:58 +00:00
|
|
|
ret = seq_printf(seq, "%s %pM (%3i) %pM [%10s]: %u.%u/%u.%u MBit\n",
|
2011-03-23 10:24:34 +00:00
|
|
|
(curr_gw == gw_node ? "=>" : " "),
|
|
|
|
gw_node->orig_node->orig,
|
2013-09-02 10:15:01 +00:00
|
|
|
router->bat_iv.tq_avg, router->addr,
|
2011-03-23 10:24:34 +00:00
|
|
|
router->if_incoming->net_dev->name,
|
2013-04-23 13:39:58 +00:00
|
|
|
gw_node->bandwidth_down / 10,
|
|
|
|
gw_node->bandwidth_down % 10,
|
|
|
|
gw_node->bandwidth_up / 10,
|
|
|
|
gw_node->bandwidth_up % 10);
|
2011-02-13 21:13:02 +00:00
|
|
|
|
2012-05-12 00:09:34 +00:00
|
|
|
batadv_neigh_node_free_ref(router);
|
2011-03-23 10:24:34 +00:00
|
|
|
if (curr_gw)
|
2012-05-12 16:33:55 +00:00
|
|
|
batadv_gw_node_free_ref(curr_gw);
|
2011-03-14 22:43:37 +00:00
|
|
|
out:
|
2011-02-13 21:13:02 +00:00
|
|
|
return ret;
|
2010-12-13 11:19:28 +00:00
|
|
|
}
|
|
|
|
|
2012-05-12 00:09:29 +00:00
|
|
|
int batadv_gw_client_seq_print_text(struct seq_file *seq, void *offset)
|
2010-12-13 11:19:28 +00:00
|
|
|
{
|
|
|
|
struct net_device *net_dev = (struct net_device *)seq->private;
|
2012-06-05 20:31:31 +00:00
|
|
|
struct batadv_priv *bat_priv = netdev_priv(net_dev);
|
|
|
|
struct batadv_hard_iface *primary_if;
|
|
|
|
struct batadv_gw_node *gw_node;
|
2012-08-03 15:15:46 +00:00
|
|
|
int gw_count = 0;
|
2010-12-13 11:19:28 +00:00
|
|
|
|
2012-08-03 15:15:46 +00:00
|
|
|
primary_if = batadv_seq_print_text_primary_if_get(seq);
|
|
|
|
if (!primary_if)
|
2011-04-20 13:40:58 +00:00
|
|
|
goto out;
|
2010-12-13 11:19:28 +00:00
|
|
|
|
2012-03-07 08:07:45 +00:00
|
|
|
seq_printf(seq,
|
2013-04-23 13:39:58 +00:00
|
|
|
" %-12s (%s/%i) %17s [%10s]: advertised uplink bandwidth ... [B.A.T.M.A.N. adv %s, MainIF/MAC: %s/%pM (%s)]\n",
|
2012-06-03 20:19:17 +00:00
|
|
|
"Gateway", "#", BATADV_TQ_MAX_VALUE, "Nexthop", "outgoingIF",
|
|
|
|
BATADV_SOURCE_VERSION, primary_if->net_dev->name,
|
2011-04-20 13:40:58 +00:00
|
|
|
primary_if->net_dev->dev_addr, net_dev->name);
|
2010-12-13 11:19:28 +00:00
|
|
|
|
|
|
|
rcu_read_lock();
|
hlist: drop the node parameter from iterators
I'm not sure why, but the hlist for each entry iterators were conceived
list_for_each_entry(pos, head, member)
The hlist ones were greedy and wanted an extra parameter:
hlist_for_each_entry(tpos, pos, head, member)
Why did they need an extra pos parameter? I'm not quite sure. Not only
they don't really need it, it also prevents the iterator from looking
exactly like the list iterator, which is unfortunate.
Besides the semantic patch, there was some manual work required:
- Fix up the actual hlist iterators in linux/list.h
- Fix up the declaration of other iterators based on the hlist ones.
- A very small amount of places were using the 'node' parameter, this
was modified to use 'obj->member' instead.
- Coccinelle didn't handle the hlist_for_each_entry_safe iterator
properly, so those had to be fixed up manually.
The semantic patch which is mostly the work of Peter Senna Tschudin is here:
@@
iterator name hlist_for_each_entry, hlist_for_each_entry_continue, hlist_for_each_entry_from, hlist_for_each_entry_rcu, hlist_for_each_entry_rcu_bh, hlist_for_each_entry_continue_rcu_bh, for_each_busy_worker, ax25_uid_for_each, ax25_for_each, inet_bind_bucket_for_each, sctp_for_each_hentry, sk_for_each, sk_for_each_rcu, sk_for_each_from, sk_for_each_safe, sk_for_each_bound, hlist_for_each_entry_safe, hlist_for_each_entry_continue_rcu, nr_neigh_for_each, nr_neigh_for_each_safe, nr_node_for_each, nr_node_for_each_safe, for_each_gfn_indirect_valid_sp, for_each_gfn_sp, for_each_host;
type T;
expression a,c,d,e;
identifier b;
statement S;
@@
-T b;
<+... when != b
(
hlist_for_each_entry(a,
- b,
c, d) S
|
hlist_for_each_entry_continue(a,
- b,
c) S
|
hlist_for_each_entry_from(a,
- b,
c) S
|
hlist_for_each_entry_rcu(a,
- b,
c, d) S
|
hlist_for_each_entry_rcu_bh(a,
- b,
c, d) S
|
hlist_for_each_entry_continue_rcu_bh(a,
- b,
c) S
|
for_each_busy_worker(a, c,
- b,
d) S
|
ax25_uid_for_each(a,
- b,
c) S
|
ax25_for_each(a,
- b,
c) S
|
inet_bind_bucket_for_each(a,
- b,
c) S
|
sctp_for_each_hentry(a,
- b,
c) S
|
sk_for_each(a,
- b,
c) S
|
sk_for_each_rcu(a,
- b,
c) S
|
sk_for_each_from
-(a, b)
+(a)
S
+ sk_for_each_from(a) S
|
sk_for_each_safe(a,
- b,
c, d) S
|
sk_for_each_bound(a,
- b,
c) S
|
hlist_for_each_entry_safe(a,
- b,
c, d, e) S
|
hlist_for_each_entry_continue_rcu(a,
- b,
c) S
|
nr_neigh_for_each(a,
- b,
c) S
|
nr_neigh_for_each_safe(a,
- b,
c, d) S
|
nr_node_for_each(a,
- b,
c) S
|
nr_node_for_each_safe(a,
- b,
c, d) S
|
- for_each_gfn_sp(a, c, d, b) S
+ for_each_gfn_sp(a, c, d) S
|
- for_each_gfn_indirect_valid_sp(a, c, d, b) S
+ for_each_gfn_indirect_valid_sp(a, c, d) S
|
for_each_host(a,
- b,
c) S
|
for_each_host_safe(a,
- b,
c, d) S
|
for_each_mesh_entry(a,
- b,
c, d) S
)
...+>
[akpm@linux-foundation.org: drop bogus change from net/ipv4/raw.c]
[akpm@linux-foundation.org: drop bogus hunk from net/ipv6/raw.c]
[akpm@linux-foundation.org: checkpatch fixes]
[akpm@linux-foundation.org: fix warnings]
[akpm@linux-foudnation.org: redo intrusive kvm changes]
Tested-by: Peter Senna Tschudin <peter.senna@gmail.com>
Acked-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Signed-off-by: Sasha Levin <sasha.levin@oracle.com>
Cc: Wu Fengguang <fengguang.wu@intel.com>
Cc: Marcelo Tosatti <mtosatti@redhat.com>
Cc: Gleb Natapov <gleb@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2013-02-28 01:06:00 +00:00
|
|
|
hlist_for_each_entry_rcu(gw_node, &bat_priv->gw.list, list) {
|
2010-12-13 11:19:28 +00:00
|
|
|
if (gw_node->deleted)
|
|
|
|
continue;
|
|
|
|
|
2011-03-14 22:43:37 +00:00
|
|
|
/* fails if orig_node has no router */
|
2012-05-12 16:33:55 +00:00
|
|
|
if (batadv_write_buffer_text(bat_priv, seq, gw_node) < 0)
|
2010-12-13 11:19:28 +00:00
|
|
|
continue;
|
|
|
|
|
|
|
|
gw_count++;
|
|
|
|
}
|
|
|
|
rcu_read_unlock();
|
|
|
|
|
|
|
|
if (gw_count == 0)
|
2013-03-21 08:23:29 +00:00
|
|
|
seq_puts(seq, "No gateways in range ...\n");
|
2010-12-13 11:19:28 +00:00
|
|
|
|
2011-04-20 13:40:58 +00:00
|
|
|
out:
|
|
|
|
if (primary_if)
|
2012-05-12 11:48:54 +00:00
|
|
|
batadv_hardif_free_ref(primary_if);
|
2012-08-03 15:15:46 +00:00
|
|
|
return 0;
|
2010-12-13 11:19:28 +00:00
|
|
|
}
|
|
|
|
|
2013-08-06 18:21:15 +00:00
|
|
|
/* this call might reallocate skb data */
|
2012-05-12 16:33:55 +00:00
|
|
|
static bool batadv_is_type_dhcprequest(struct sk_buff *skb, int header_len)
|
2011-04-26 19:31:45 +00:00
|
|
|
{
|
|
|
|
int ret = false;
|
|
|
|
unsigned char *p;
|
|
|
|
int pkt_len;
|
|
|
|
|
|
|
|
if (skb_linearize(skb) < 0)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
pkt_len = skb_headlen(skb);
|
|
|
|
|
2012-06-03 20:19:07 +00:00
|
|
|
if (pkt_len < header_len + BATADV_DHCP_OPTIONS_OFFSET + 1)
|
2011-04-26 19:31:45 +00:00
|
|
|
goto out;
|
|
|
|
|
2012-06-03 20:19:07 +00:00
|
|
|
p = skb->data + header_len + BATADV_DHCP_OPTIONS_OFFSET;
|
|
|
|
pkt_len -= header_len + BATADV_DHCP_OPTIONS_OFFSET + 1;
|
2011-04-26 19:31:45 +00:00
|
|
|
|
|
|
|
/* Access the dhcp option lists. Each entry is made up by:
|
2011-07-09 15:52:13 +00:00
|
|
|
* - octet 1: option type
|
|
|
|
* - octet 2: option data len (only if type != 255 and 0)
|
2012-05-12 00:09:43 +00:00
|
|
|
* - octet 3: option data
|
|
|
|
*/
|
2011-04-26 19:31:45 +00:00
|
|
|
while (*p != 255 && !ret) {
|
2011-07-09 15:52:13 +00:00
|
|
|
/* p now points to the first octet: option type */
|
2011-04-26 19:31:45 +00:00
|
|
|
if (*p == 53) {
|
|
|
|
/* type 53 is the message type option.
|
2012-05-12 00:09:43 +00:00
|
|
|
* Jump the len octet and go to the data octet
|
|
|
|
*/
|
2011-04-26 19:31:45 +00:00
|
|
|
if (pkt_len < 2)
|
|
|
|
goto out;
|
|
|
|
p += 2;
|
|
|
|
|
|
|
|
/* check if the message type is what we need */
|
2012-06-03 20:19:07 +00:00
|
|
|
if (*p == BATADV_DHCP_REQUEST)
|
2011-04-26 19:31:45 +00:00
|
|
|
ret = true;
|
|
|
|
break;
|
|
|
|
} else if (*p == 0) {
|
|
|
|
/* option type 0 (padding), just go forward */
|
|
|
|
if (pkt_len < 1)
|
|
|
|
goto out;
|
|
|
|
pkt_len--;
|
|
|
|
p++;
|
|
|
|
} else {
|
|
|
|
/* This is any other option. So we get the length... */
|
|
|
|
if (pkt_len < 1)
|
|
|
|
goto out;
|
|
|
|
pkt_len--;
|
|
|
|
p++;
|
|
|
|
|
|
|
|
/* ...and then we jump over the data */
|
2012-02-27 10:29:53 +00:00
|
|
|
if (pkt_len < 1 + (*p))
|
2011-04-26 19:31:45 +00:00
|
|
|
goto out;
|
2012-02-27 10:29:53 +00:00
|
|
|
pkt_len -= 1 + (*p);
|
|
|
|
p += 1 + (*p);
|
2011-04-26 19:31:45 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
out:
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2013-08-06 18:21:15 +00:00
|
|
|
/* this call might reallocate skb data */
|
2012-05-12 00:09:29 +00:00
|
|
|
bool batadv_gw_is_dhcp_target(struct sk_buff *skb, unsigned int *header_len)
|
2010-12-13 11:19:28 +00:00
|
|
|
{
|
|
|
|
struct ethhdr *ethhdr;
|
|
|
|
struct iphdr *iphdr;
|
|
|
|
struct ipv6hdr *ipv6hdr;
|
|
|
|
struct udphdr *udphdr;
|
2013-05-12 19:51:15 +00:00
|
|
|
struct vlan_ethhdr *vhdr;
|
|
|
|
__be16 proto;
|
2010-12-13 11:19:28 +00:00
|
|
|
|
|
|
|
/* check for ethernet header */
|
2011-09-08 11:12:53 +00:00
|
|
|
if (!pskb_may_pull(skb, *header_len + ETH_HLEN))
|
|
|
|
return false;
|
2010-12-13 11:19:28 +00:00
|
|
|
ethhdr = (struct ethhdr *)skb->data;
|
2013-05-12 19:51:15 +00:00
|
|
|
proto = ethhdr->h_proto;
|
2011-09-08 11:12:53 +00:00
|
|
|
*header_len += ETH_HLEN;
|
2010-12-13 11:19:28 +00:00
|
|
|
|
|
|
|
/* check for initial vlan header */
|
2013-05-12 19:51:15 +00:00
|
|
|
if (proto == htons(ETH_P_8021Q)) {
|
2011-09-08 11:12:53 +00:00
|
|
|
if (!pskb_may_pull(skb, *header_len + VLAN_HLEN))
|
|
|
|
return false;
|
2013-05-12 19:51:15 +00:00
|
|
|
|
|
|
|
vhdr = (struct vlan_ethhdr *)skb->data;
|
|
|
|
proto = vhdr->h_vlan_encapsulated_proto;
|
2011-09-08 11:12:53 +00:00
|
|
|
*header_len += VLAN_HLEN;
|
2010-12-13 11:19:28 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/* check for ip header */
|
2013-05-12 19:51:15 +00:00
|
|
|
switch (proto) {
|
|
|
|
case htons(ETH_P_IP):
|
2011-09-08 11:12:53 +00:00
|
|
|
if (!pskb_may_pull(skb, *header_len + sizeof(*iphdr)))
|
|
|
|
return false;
|
|
|
|
iphdr = (struct iphdr *)(skb->data + *header_len);
|
|
|
|
*header_len += iphdr->ihl * 4;
|
2010-12-13 11:19:28 +00:00
|
|
|
|
|
|
|
/* check for udp header */
|
|
|
|
if (iphdr->protocol != IPPROTO_UDP)
|
2011-09-08 11:12:53 +00:00
|
|
|
return false;
|
2010-12-13 11:19:28 +00:00
|
|
|
|
|
|
|
break;
|
2013-05-12 19:51:15 +00:00
|
|
|
case htons(ETH_P_IPV6):
|
2011-09-08 11:12:53 +00:00
|
|
|
if (!pskb_may_pull(skb, *header_len + sizeof(*ipv6hdr)))
|
|
|
|
return false;
|
|
|
|
ipv6hdr = (struct ipv6hdr *)(skb->data + *header_len);
|
|
|
|
*header_len += sizeof(*ipv6hdr);
|
2010-12-13 11:19:28 +00:00
|
|
|
|
|
|
|
/* check for udp header */
|
|
|
|
if (ipv6hdr->nexthdr != IPPROTO_UDP)
|
2011-09-08 11:12:53 +00:00
|
|
|
return false;
|
2010-12-13 11:19:28 +00:00
|
|
|
|
|
|
|
break;
|
|
|
|
default:
|
2011-09-08 11:12:53 +00:00
|
|
|
return false;
|
2010-12-13 11:19:28 +00:00
|
|
|
}
|
|
|
|
|
2011-09-08 11:12:53 +00:00
|
|
|
if (!pskb_may_pull(skb, *header_len + sizeof(*udphdr)))
|
|
|
|
return false;
|
2013-08-06 18:21:15 +00:00
|
|
|
|
|
|
|
/* skb->data might have been reallocated by pskb_may_pull() */
|
|
|
|
ethhdr = (struct ethhdr *)skb->data;
|
|
|
|
if (ntohs(ethhdr->h_proto) == ETH_P_8021Q)
|
|
|
|
ethhdr = (struct ethhdr *)(skb->data + VLAN_HLEN);
|
|
|
|
|
2011-09-08 11:12:53 +00:00
|
|
|
udphdr = (struct udphdr *)(skb->data + *header_len);
|
|
|
|
*header_len += sizeof(*udphdr);
|
2010-12-13 11:19:28 +00:00
|
|
|
|
|
|
|
/* check for bootp port */
|
2013-05-12 19:51:15 +00:00
|
|
|
if ((proto == htons(ETH_P_IP)) &&
|
2013-05-19 10:55:16 +00:00
|
|
|
(udphdr->dest != htons(67)))
|
2011-09-08 11:12:53 +00:00
|
|
|
return false;
|
2010-12-13 11:19:28 +00:00
|
|
|
|
2013-05-12 19:51:15 +00:00
|
|
|
if ((proto == htons(ETH_P_IPV6)) &&
|
2013-05-19 10:55:16 +00:00
|
|
|
(udphdr->dest != htons(547)))
|
2011-09-08 11:12:53 +00:00
|
|
|
return false;
|
2010-12-13 11:19:28 +00:00
|
|
|
|
2011-09-08 11:12:53 +00:00
|
|
|
return true;
|
|
|
|
}
|
2010-12-13 11:19:28 +00:00
|
|
|
|
2013-06-04 10:11:42 +00:00
|
|
|
/**
|
|
|
|
* batadv_gw_out_of_range - check if the dhcp request destination is the best gw
|
|
|
|
* @bat_priv: the bat priv with all the soft interface information
|
|
|
|
* @skb: the outgoing packet
|
|
|
|
*
|
|
|
|
* Check if the skb is a DHCP request and if it is sent to the current best GW
|
|
|
|
* server. Due to topology changes it may be the case that the GW server
|
|
|
|
* previously selected is not the best one anymore.
|
|
|
|
*
|
|
|
|
* Returns true if the packet destination is unicast and it is not the best gw,
|
|
|
|
* false otherwise.
|
|
|
|
*
|
|
|
|
* This call might reallocate skb data.
|
|
|
|
*/
|
2012-06-05 20:31:31 +00:00
|
|
|
bool batadv_gw_out_of_range(struct batadv_priv *bat_priv,
|
2013-08-06 18:21:15 +00:00
|
|
|
struct sk_buff *skb)
|
2011-09-08 11:12:53 +00:00
|
|
|
{
|
2012-06-05 20:31:31 +00:00
|
|
|
struct batadv_neigh_node *neigh_curr = NULL, *neigh_old = NULL;
|
|
|
|
struct batadv_orig_node *orig_dst_node = NULL;
|
2013-04-23 13:39:58 +00:00
|
|
|
struct batadv_gw_node *gw_node = NULL, *curr_gw = NULL;
|
2013-08-06 18:21:15 +00:00
|
|
|
struct ethhdr *ethhdr;
|
2011-09-08 11:12:53 +00:00
|
|
|
bool ret, out_of_range = false;
|
|
|
|
unsigned int header_len = 0;
|
|
|
|
uint8_t curr_tq_avg;
|
2013-06-04 10:11:42 +00:00
|
|
|
unsigned short vid;
|
|
|
|
|
|
|
|
vid = batadv_get_vid(skb, 0);
|
2011-09-08 11:12:53 +00:00
|
|
|
|
2012-05-12 00:09:29 +00:00
|
|
|
ret = batadv_gw_is_dhcp_target(skb, &header_len);
|
2011-09-08 11:12:53 +00:00
|
|
|
if (!ret)
|
|
|
|
goto out;
|
|
|
|
|
2013-08-06 18:21:15 +00:00
|
|
|
ethhdr = (struct ethhdr *)skb->data;
|
2012-05-12 00:09:39 +00:00
|
|
|
orig_dst_node = batadv_transtable_search(bat_priv, ethhdr->h_source,
|
2013-06-04 10:11:42 +00:00
|
|
|
ethhdr->h_dest, vid);
|
2011-09-08 11:12:53 +00:00
|
|
|
if (!orig_dst_node)
|
|
|
|
goto out;
|
|
|
|
|
2013-04-23 13:39:58 +00:00
|
|
|
gw_node = batadv_gw_node_get(bat_priv, orig_dst_node);
|
|
|
|
if (!gw_node->bandwidth_down == 0)
|
2011-09-08 11:12:53 +00:00
|
|
|
goto out;
|
|
|
|
|
2012-05-12 16:33:55 +00:00
|
|
|
ret = batadv_is_type_dhcprequest(skb, header_len);
|
2011-09-08 11:12:53 +00:00
|
|
|
if (!ret)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
switch (atomic_read(&bat_priv->gw_mode)) {
|
2012-06-03 20:19:18 +00:00
|
|
|
case BATADV_GW_MODE_SERVER:
|
2011-09-08 11:12:53 +00:00
|
|
|
/* If we are a GW then we are our best GW. We can artificially
|
2012-05-12 00:09:43 +00:00
|
|
|
* set the tq towards ourself as the maximum value
|
|
|
|
*/
|
2012-06-03 20:19:17 +00:00
|
|
|
curr_tq_avg = BATADV_TQ_MAX_VALUE;
|
2011-09-08 11:12:53 +00:00
|
|
|
break;
|
2012-06-03 20:19:18 +00:00
|
|
|
case BATADV_GW_MODE_CLIENT:
|
2012-05-12 16:33:55 +00:00
|
|
|
curr_gw = batadv_gw_get_selected_gw_node(bat_priv);
|
2011-09-08 11:12:53 +00:00
|
|
|
if (!curr_gw)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
/* packet is going to our gateway */
|
|
|
|
if (curr_gw->orig_node == orig_dst_node)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
/* If the dhcp packet has been sent to a different gw,
|
|
|
|
* we have to evaluate whether the old gw is still
|
2012-05-12 00:09:43 +00:00
|
|
|
* reliable enough
|
|
|
|
*/
|
2012-05-12 00:09:36 +00:00
|
|
|
neigh_curr = batadv_find_router(bat_priv, curr_gw->orig_node,
|
|
|
|
NULL);
|
2011-09-08 11:12:53 +00:00
|
|
|
if (!neigh_curr)
|
|
|
|
goto out;
|
|
|
|
|
2013-09-02 10:15:01 +00:00
|
|
|
curr_tq_avg = neigh_curr->bat_iv.tq_avg;
|
2011-09-08 11:12:53 +00:00
|
|
|
break;
|
2012-06-03 20:19:18 +00:00
|
|
|
case BATADV_GW_MODE_OFF:
|
2011-09-08 11:12:53 +00:00
|
|
|
default:
|
|
|
|
goto out;
|
2011-04-26 19:31:45 +00:00
|
|
|
}
|
2011-09-08 11:12:53 +00:00
|
|
|
|
2012-05-12 00:09:36 +00:00
|
|
|
neigh_old = batadv_find_router(bat_priv, orig_dst_node, NULL);
|
2011-11-29 06:09:09 +00:00
|
|
|
if (!neigh_old)
|
2011-09-08 11:12:53 +00:00
|
|
|
goto out;
|
|
|
|
|
2013-09-02 10:15:01 +00:00
|
|
|
if (curr_tq_avg - neigh_old->bat_iv.tq_avg > BATADV_GW_THRESHOLD)
|
2011-09-08 11:12:53 +00:00
|
|
|
out_of_range = true;
|
|
|
|
|
|
|
|
out:
|
|
|
|
if (orig_dst_node)
|
2012-05-12 00:09:34 +00:00
|
|
|
batadv_orig_node_free_ref(orig_dst_node);
|
2011-09-08 11:12:53 +00:00
|
|
|
if (curr_gw)
|
2012-05-12 16:33:55 +00:00
|
|
|
batadv_gw_node_free_ref(curr_gw);
|
2013-04-23 13:39:58 +00:00
|
|
|
if (gw_node)
|
|
|
|
batadv_gw_node_free_ref(gw_node);
|
2011-04-26 19:31:45 +00:00
|
|
|
if (neigh_old)
|
2012-05-12 00:09:34 +00:00
|
|
|
batadv_neigh_node_free_ref(neigh_old);
|
2011-04-26 19:31:45 +00:00
|
|
|
if (neigh_curr)
|
2012-05-12 00:09:34 +00:00
|
|
|
batadv_neigh_node_free_ref(neigh_curr);
|
2011-09-08 11:12:53 +00:00
|
|
|
return out_of_range;
|
2010-12-13 11:19:28 +00:00
|
|
|
}
|