forked from Minki/linux
Merge branch 'batman-adv/next' of git://git.open-mesh.org/ecsv/linux-merge
This commit is contained in:
commit
b8cec4a415
@ -35,7 +35,7 @@ static bool can_aggregate_with(struct batman_packet *new_batman_packet,
|
||||
int packet_len,
|
||||
unsigned long send_time,
|
||||
bool directlink,
|
||||
struct batman_if *if_incoming,
|
||||
struct hard_iface *if_incoming,
|
||||
struct forw_packet *forw_packet)
|
||||
{
|
||||
struct batman_packet *batman_packet =
|
||||
@ -99,7 +99,7 @@ static bool can_aggregate_with(struct batman_packet *new_batman_packet,
|
||||
/* create a new aggregated packet and add this packet to it */
|
||||
static void new_aggregated_packet(unsigned char *packet_buff, int packet_len,
|
||||
unsigned long send_time, bool direct_link,
|
||||
struct batman_if *if_incoming,
|
||||
struct hard_iface *if_incoming,
|
||||
int own_packet)
|
||||
{
|
||||
struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
|
||||
@ -188,7 +188,7 @@ static void aggregate(struct forw_packet *forw_packet_aggr,
|
||||
|
||||
void add_bat_packet_to_list(struct bat_priv *bat_priv,
|
||||
unsigned char *packet_buff, int packet_len,
|
||||
struct batman_if *if_incoming, char own_packet,
|
||||
struct hard_iface *if_incoming, char own_packet,
|
||||
unsigned long send_time)
|
||||
{
|
||||
/**
|
||||
@ -247,7 +247,7 @@ void add_bat_packet_to_list(struct bat_priv *bat_priv,
|
||||
|
||||
/* unpack the aggregated packets and process them one by one */
|
||||
void receive_aggr_bat_packet(struct ethhdr *ethhdr, unsigned char *packet_buff,
|
||||
int packet_len, struct batman_if *if_incoming)
|
||||
int packet_len, struct hard_iface *if_incoming)
|
||||
{
|
||||
struct batman_packet *batman_packet;
|
||||
int buff_pos = 0;
|
||||
|
@ -35,9 +35,9 @@ static inline int aggregated_packet(int buff_pos, int packet_len, int num_hna)
|
||||
|
||||
void add_bat_packet_to_list(struct bat_priv *bat_priv,
|
||||
unsigned char *packet_buff, int packet_len,
|
||||
struct batman_if *if_incoming, char own_packet,
|
||||
struct hard_iface *if_incoming, char own_packet,
|
||||
unsigned long send_time);
|
||||
void receive_aggr_bat_packet(struct ethhdr *ethhdr, unsigned char *packet_buff,
|
||||
int packet_len, struct batman_if *if_incoming);
|
||||
int packet_len, struct hard_iface *if_incoming);
|
||||
|
||||
#endif /* _NET_BATMAN_ADV_AGGREGATION_H_ */
|
||||
|
@ -441,16 +441,16 @@ static ssize_t show_mesh_iface(struct kobject *kobj, struct attribute *attr,
|
||||
char *buff)
|
||||
{
|
||||
struct net_device *net_dev = kobj_to_netdev(kobj);
|
||||
struct batman_if *batman_if = get_batman_if_by_netdev(net_dev);
|
||||
struct hard_iface *hard_iface = hardif_get_by_netdev(net_dev);
|
||||
ssize_t length;
|
||||
|
||||
if (!batman_if)
|
||||
if (!hard_iface)
|
||||
return 0;
|
||||
|
||||
length = sprintf(buff, "%s\n", batman_if->if_status == IF_NOT_IN_USE ?
|
||||
"none" : batman_if->soft_iface->name);
|
||||
length = sprintf(buff, "%s\n", hard_iface->if_status == IF_NOT_IN_USE ?
|
||||
"none" : hard_iface->soft_iface->name);
|
||||
|
||||
kref_put(&batman_if->refcount, hardif_free_ref);
|
||||
hardif_free_ref(hard_iface);
|
||||
|
||||
return length;
|
||||
}
|
||||
@ -459,11 +459,11 @@ static ssize_t store_mesh_iface(struct kobject *kobj, struct attribute *attr,
|
||||
char *buff, size_t count)
|
||||
{
|
||||
struct net_device *net_dev = kobj_to_netdev(kobj);
|
||||
struct batman_if *batman_if = get_batman_if_by_netdev(net_dev);
|
||||
struct hard_iface *hard_iface = hardif_get_by_netdev(net_dev);
|
||||
int status_tmp = -1;
|
||||
int ret;
|
||||
int ret = count;
|
||||
|
||||
if (!batman_if)
|
||||
if (!hard_iface)
|
||||
return count;
|
||||
|
||||
if (buff[count - 1] == '\n')
|
||||
@ -472,7 +472,7 @@ static ssize_t store_mesh_iface(struct kobject *kobj, struct attribute *attr,
|
||||
if (strlen(buff) >= IFNAMSIZ) {
|
||||
pr_err("Invalid parameter for 'mesh_iface' setting received: "
|
||||
"interface name too long '%s'\n", buff);
|
||||
kref_put(&batman_if->refcount, hardif_free_ref);
|
||||
hardif_free_ref(hard_iface);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
@ -481,30 +481,31 @@ static ssize_t store_mesh_iface(struct kobject *kobj, struct attribute *attr,
|
||||
else
|
||||
status_tmp = IF_I_WANT_YOU;
|
||||
|
||||
if ((batman_if->if_status == status_tmp) || ((batman_if->soft_iface) &&
|
||||
(strncmp(batman_if->soft_iface->name, buff, IFNAMSIZ) == 0))) {
|
||||
kref_put(&batman_if->refcount, hardif_free_ref);
|
||||
return count;
|
||||
}
|
||||
if (hard_iface->if_status == status_tmp)
|
||||
goto out;
|
||||
|
||||
if ((hard_iface->soft_iface) &&
|
||||
(strncmp(hard_iface->soft_iface->name, buff, IFNAMSIZ) == 0))
|
||||
goto out;
|
||||
|
||||
if (status_tmp == IF_NOT_IN_USE) {
|
||||
rtnl_lock();
|
||||
hardif_disable_interface(batman_if);
|
||||
hardif_disable_interface(hard_iface);
|
||||
rtnl_unlock();
|
||||
kref_put(&batman_if->refcount, hardif_free_ref);
|
||||
return count;
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* if the interface already is in use */
|
||||
if (batman_if->if_status != IF_NOT_IN_USE) {
|
||||
if (hard_iface->if_status != IF_NOT_IN_USE) {
|
||||
rtnl_lock();
|
||||
hardif_disable_interface(batman_if);
|
||||
hardif_disable_interface(hard_iface);
|
||||
rtnl_unlock();
|
||||
}
|
||||
|
||||
ret = hardif_enable_interface(batman_if, buff);
|
||||
kref_put(&batman_if->refcount, hardif_free_ref);
|
||||
ret = hardif_enable_interface(hard_iface, buff);
|
||||
|
||||
out:
|
||||
hardif_free_ref(hard_iface);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -512,13 +513,13 @@ static ssize_t show_iface_status(struct kobject *kobj, struct attribute *attr,
|
||||
char *buff)
|
||||
{
|
||||
struct net_device *net_dev = kobj_to_netdev(kobj);
|
||||
struct batman_if *batman_if = get_batman_if_by_netdev(net_dev);
|
||||
struct hard_iface *hard_iface = hardif_get_by_netdev(net_dev);
|
||||
ssize_t length;
|
||||
|
||||
if (!batman_if)
|
||||
if (!hard_iface)
|
||||
return 0;
|
||||
|
||||
switch (batman_if->if_status) {
|
||||
switch (hard_iface->if_status) {
|
||||
case IF_TO_BE_REMOVED:
|
||||
length = sprintf(buff, "disabling\n");
|
||||
break;
|
||||
@ -537,7 +538,7 @@ static ssize_t show_iface_status(struct kobject *kobj, struct attribute *attr,
|
||||
break;
|
||||
}
|
||||
|
||||
kref_put(&batman_if->refcount, hardif_free_ref);
|
||||
hardif_free_ref(hard_iface);
|
||||
|
||||
return length;
|
||||
}
|
||||
|
@ -28,58 +28,75 @@
|
||||
#include <linux/udp.h>
|
||||
#include <linux/if_vlan.h>
|
||||
|
||||
static void gw_node_free_ref(struct kref *refcount)
|
||||
{
|
||||
struct gw_node *gw_node;
|
||||
|
||||
gw_node = container_of(refcount, struct gw_node, refcount);
|
||||
kfree(gw_node);
|
||||
}
|
||||
|
||||
static void gw_node_free_rcu(struct rcu_head *rcu)
|
||||
{
|
||||
struct gw_node *gw_node;
|
||||
|
||||
gw_node = container_of(rcu, struct gw_node, rcu);
|
||||
kref_put(&gw_node->refcount, gw_node_free_ref);
|
||||
kfree(gw_node);
|
||||
}
|
||||
|
||||
static void gw_node_free_ref(struct gw_node *gw_node)
|
||||
{
|
||||
if (atomic_dec_and_test(&gw_node->refcount))
|
||||
call_rcu(&gw_node->rcu, gw_node_free_rcu);
|
||||
}
|
||||
|
||||
void *gw_get_selected(struct bat_priv *bat_priv)
|
||||
{
|
||||
struct gw_node *curr_gateway_tmp = bat_priv->curr_gw;
|
||||
struct gw_node *curr_gateway_tmp;
|
||||
struct orig_node *orig_node = NULL;
|
||||
|
||||
rcu_read_lock();
|
||||
curr_gateway_tmp = rcu_dereference(bat_priv->curr_gw);
|
||||
if (!curr_gateway_tmp)
|
||||
return NULL;
|
||||
goto out;
|
||||
|
||||
return curr_gateway_tmp->orig_node;
|
||||
orig_node = curr_gateway_tmp->orig_node;
|
||||
if (!orig_node)
|
||||
goto out;
|
||||
|
||||
if (!atomic_inc_not_zero(&orig_node->refcount))
|
||||
orig_node = NULL;
|
||||
|
||||
out:
|
||||
rcu_read_unlock();
|
||||
return orig_node;
|
||||
}
|
||||
|
||||
void gw_deselect(struct bat_priv *bat_priv)
|
||||
{
|
||||
struct gw_node *gw_node = bat_priv->curr_gw;
|
||||
struct gw_node *gw_node;
|
||||
|
||||
bat_priv->curr_gw = NULL;
|
||||
spin_lock_bh(&bat_priv->gw_list_lock);
|
||||
gw_node = rcu_dereference(bat_priv->curr_gw);
|
||||
rcu_assign_pointer(bat_priv->curr_gw, NULL);
|
||||
spin_unlock_bh(&bat_priv->gw_list_lock);
|
||||
|
||||
if (gw_node)
|
||||
kref_put(&gw_node->refcount, gw_node_free_ref);
|
||||
gw_node_free_ref(gw_node);
|
||||
}
|
||||
|
||||
static struct gw_node *gw_select(struct bat_priv *bat_priv,
|
||||
struct gw_node *new_gw_node)
|
||||
static void gw_select(struct bat_priv *bat_priv, struct gw_node *new_gw_node)
|
||||
{
|
||||
struct gw_node *curr_gw_node = bat_priv->curr_gw;
|
||||
struct gw_node *curr_gw_node;
|
||||
|
||||
if (new_gw_node)
|
||||
kref_get(&new_gw_node->refcount);
|
||||
if (new_gw_node && !atomic_inc_not_zero(&new_gw_node->refcount))
|
||||
new_gw_node = NULL;
|
||||
|
||||
bat_priv->curr_gw = new_gw_node;
|
||||
return curr_gw_node;
|
||||
spin_lock_bh(&bat_priv->gw_list_lock);
|
||||
curr_gw_node = rcu_dereference(bat_priv->curr_gw);
|
||||
rcu_assign_pointer(bat_priv->curr_gw, new_gw_node);
|
||||
spin_unlock_bh(&bat_priv->gw_list_lock);
|
||||
|
||||
if (curr_gw_node)
|
||||
gw_node_free_ref(curr_gw_node);
|
||||
}
|
||||
|
||||
void gw_election(struct bat_priv *bat_priv)
|
||||
{
|
||||
struct hlist_node *node;
|
||||
struct gw_node *gw_node, *curr_gw_tmp = NULL, *old_gw_node = NULL;
|
||||
struct gw_node *gw_node, *curr_gw, *curr_gw_tmp = NULL;
|
||||
uint8_t max_tq = 0;
|
||||
uint32_t max_gw_factor = 0, tmp_gw_factor = 0;
|
||||
int down, up;
|
||||
@ -93,19 +110,23 @@ void gw_election(struct bat_priv *bat_priv)
|
||||
if (atomic_read(&bat_priv->gw_mode) != GW_MODE_CLIENT)
|
||||
return;
|
||||
|
||||
if (bat_priv->curr_gw)
|
||||
return;
|
||||
|
||||
rcu_read_lock();
|
||||
if (hlist_empty(&bat_priv->gw_list)) {
|
||||
curr_gw = rcu_dereference(bat_priv->curr_gw);
|
||||
if (curr_gw) {
|
||||
rcu_read_unlock();
|
||||
return;
|
||||
}
|
||||
|
||||
if (bat_priv->curr_gw) {
|
||||
if (hlist_empty(&bat_priv->gw_list)) {
|
||||
|
||||
if (curr_gw) {
|
||||
rcu_read_unlock();
|
||||
bat_dbg(DBG_BATMAN, bat_priv,
|
||||
"Removing selected gateway - "
|
||||
"no gateway in range\n");
|
||||
gw_deselect(bat_priv);
|
||||
}
|
||||
} else
|
||||
rcu_read_unlock();
|
||||
|
||||
return;
|
||||
}
|
||||
@ -154,12 +175,12 @@ void gw_election(struct bat_priv *bat_priv)
|
||||
max_gw_factor = tmp_gw_factor;
|
||||
}
|
||||
|
||||
if (bat_priv->curr_gw != curr_gw_tmp) {
|
||||
if ((bat_priv->curr_gw) && (!curr_gw_tmp))
|
||||
if (curr_gw != curr_gw_tmp) {
|
||||
if ((curr_gw) && (!curr_gw_tmp))
|
||||
bat_dbg(DBG_BATMAN, bat_priv,
|
||||
"Removing selected gateway - "
|
||||
"no gateway in range\n");
|
||||
else if ((!bat_priv->curr_gw) && (curr_gw_tmp))
|
||||
else if ((!curr_gw) && (curr_gw_tmp))
|
||||
bat_dbg(DBG_BATMAN, bat_priv,
|
||||
"Adding route to gateway %pM "
|
||||
"(gw_flags: %i, tq: %i)\n",
|
||||
@ -174,43 +195,43 @@ void gw_election(struct bat_priv *bat_priv)
|
||||
curr_gw_tmp->orig_node->gw_flags,
|
||||
curr_gw_tmp->orig_node->router->tq_avg);
|
||||
|
||||
old_gw_node = gw_select(bat_priv, curr_gw_tmp);
|
||||
gw_select(bat_priv, curr_gw_tmp);
|
||||
}
|
||||
|
||||
rcu_read_unlock();
|
||||
|
||||
/* the kfree() has to be outside of the rcu lock */
|
||||
if (old_gw_node)
|
||||
kref_put(&old_gw_node->refcount, gw_node_free_ref);
|
||||
}
|
||||
|
||||
void gw_check_election(struct bat_priv *bat_priv, struct orig_node *orig_node)
|
||||
{
|
||||
struct gw_node *curr_gateway_tmp = bat_priv->curr_gw;
|
||||
struct gw_node *curr_gateway_tmp;
|
||||
uint8_t gw_tq_avg, orig_tq_avg;
|
||||
|
||||
rcu_read_lock();
|
||||
curr_gateway_tmp = rcu_dereference(bat_priv->curr_gw);
|
||||
if (!curr_gateway_tmp)
|
||||
return;
|
||||
goto out_rcu;
|
||||
|
||||
if (!curr_gateway_tmp->orig_node)
|
||||
goto deselect;
|
||||
goto deselect_rcu;
|
||||
|
||||
if (!curr_gateway_tmp->orig_node->router)
|
||||
goto deselect;
|
||||
goto deselect_rcu;
|
||||
|
||||
/* this node already is the gateway */
|
||||
if (curr_gateway_tmp->orig_node == orig_node)
|
||||
return;
|
||||
goto out_rcu;
|
||||
|
||||
if (!orig_node->router)
|
||||
return;
|
||||
goto out_rcu;
|
||||
|
||||
gw_tq_avg = curr_gateway_tmp->orig_node->router->tq_avg;
|
||||
rcu_read_unlock();
|
||||
|
||||
orig_tq_avg = orig_node->router->tq_avg;
|
||||
|
||||
/* the TQ value has to be better */
|
||||
if (orig_tq_avg < gw_tq_avg)
|
||||
return;
|
||||
goto out;
|
||||
|
||||
/**
|
||||
* if the routing class is greater than 3 the value tells us how much
|
||||
@ -218,15 +239,23 @@ void gw_check_election(struct bat_priv *bat_priv, struct orig_node *orig_node)
|
||||
**/
|
||||
if ((atomic_read(&bat_priv->gw_sel_class) > 3) &&
|
||||
(orig_tq_avg - gw_tq_avg < atomic_read(&bat_priv->gw_sel_class)))
|
||||
return;
|
||||
goto out;
|
||||
|
||||
bat_dbg(DBG_BATMAN, bat_priv,
|
||||
"Restarting gateway selection: better gateway found (tq curr: "
|
||||
"%i, tq new: %i)\n",
|
||||
gw_tq_avg, orig_tq_avg);
|
||||
goto deselect;
|
||||
|
||||
out_rcu:
|
||||
rcu_read_unlock();
|
||||
goto out;
|
||||
deselect_rcu:
|
||||
rcu_read_unlock();
|
||||
deselect:
|
||||
gw_deselect(bat_priv);
|
||||
out:
|
||||
return;
|
||||
}
|
||||
|
||||
static void gw_node_add(struct bat_priv *bat_priv,
|
||||
@ -242,7 +271,7 @@ static void gw_node_add(struct bat_priv *bat_priv,
|
||||
memset(gw_node, 0, sizeof(struct gw_node));
|
||||
INIT_HLIST_NODE(&gw_node->list);
|
||||
gw_node->orig_node = orig_node;
|
||||
kref_init(&gw_node->refcount);
|
||||
atomic_set(&gw_node->refcount, 1);
|
||||
|
||||
spin_lock_bh(&bat_priv->gw_list_lock);
|
||||
hlist_add_head_rcu(&gw_node->list, &bat_priv->gw_list);
|
||||
@ -283,7 +312,7 @@ void gw_node_update(struct bat_priv *bat_priv,
|
||||
"Gateway %pM removed from gateway list\n",
|
||||
orig_node->orig);
|
||||
|
||||
if (gw_node == bat_priv->curr_gw) {
|
||||
if (gw_node == rcu_dereference(bat_priv->curr_gw)) {
|
||||
rcu_read_unlock();
|
||||
gw_deselect(bat_priv);
|
||||
return;
|
||||
@ -321,11 +350,11 @@ void gw_node_purge(struct bat_priv *bat_priv)
|
||||
atomic_read(&bat_priv->mesh_state) == MESH_ACTIVE)
|
||||
continue;
|
||||
|
||||
if (bat_priv->curr_gw == gw_node)
|
||||
if (rcu_dereference(bat_priv->curr_gw) == gw_node)
|
||||
gw_deselect(bat_priv);
|
||||
|
||||
hlist_del_rcu(&gw_node->list);
|
||||
call_rcu(&gw_node->rcu, gw_node_free_rcu);
|
||||
gw_node_free_ref(gw_node);
|
||||
}
|
||||
|
||||
|
||||
@ -335,12 +364,16 @@ void gw_node_purge(struct bat_priv *bat_priv)
|
||||
static int _write_buffer_text(struct bat_priv *bat_priv,
|
||||
struct seq_file *seq, struct gw_node *gw_node)
|
||||
{
|
||||
int down, up;
|
||||
struct gw_node *curr_gw;
|
||||
int down, up, ret;
|
||||
|
||||
gw_bandwidth_to_kbit(gw_node->orig_node->gw_flags, &down, &up);
|
||||
|
||||
return seq_printf(seq, "%s %pM (%3i) %pM [%10s]: %3i - %i%s/%i%s\n",
|
||||
(bat_priv->curr_gw == gw_node ? "=>" : " "),
|
||||
rcu_read_lock();
|
||||
curr_gw = rcu_dereference(bat_priv->curr_gw);
|
||||
|
||||
ret = seq_printf(seq, "%s %pM (%3i) %pM [%10s]: %3i - %i%s/%i%s\n",
|
||||
(curr_gw == gw_node ? "=>" : " "),
|
||||
gw_node->orig_node->orig,
|
||||
gw_node->orig_node->router->tq_avg,
|
||||
gw_node->orig_node->router->addr,
|
||||
@ -350,6 +383,9 @@ static int _write_buffer_text(struct bat_priv *bat_priv,
|
||||
(down > 2048 ? "MBit" : "KBit"),
|
||||
(up > 2048 ? up / 1024 : up),
|
||||
(up > 2048 ? "MBit" : "KBit"));
|
||||
|
||||
rcu_read_unlock();
|
||||
return ret;
|
||||
}
|
||||
|
||||
int gw_client_seq_print_text(struct seq_file *seq, void *offset)
|
||||
@ -470,8 +506,12 @@ int gw_is_target(struct bat_priv *bat_priv, struct sk_buff *skb)
|
||||
if (atomic_read(&bat_priv->gw_mode) == GW_MODE_SERVER)
|
||||
return -1;
|
||||
|
||||
if (!bat_priv->curr_gw)
|
||||
rcu_read_lock();
|
||||
if (!rcu_dereference(bat_priv->curr_gw)) {
|
||||
rcu_read_unlock();
|
||||
return 0;
|
||||
}
|
||||
rcu_read_unlock();
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
@ -31,8 +31,8 @@
|
||||
|
||||
#include <linux/if_arp.h>
|
||||
|
||||
/* protect update critical side of if_list - but not the content */
|
||||
static DEFINE_SPINLOCK(if_list_lock);
|
||||
/* protect update critical side of hardif_list - but not the content */
|
||||
static DEFINE_SPINLOCK(hardif_list_lock);
|
||||
|
||||
|
||||
static int batman_skb_recv(struct sk_buff *skb,
|
||||
@ -40,33 +40,31 @@ static int batman_skb_recv(struct sk_buff *skb,
|
||||
struct packet_type *ptype,
|
||||
struct net_device *orig_dev);
|
||||
|
||||
static void hardif_free_rcu(struct rcu_head *rcu)
|
||||
void hardif_free_rcu(struct rcu_head *rcu)
|
||||
{
|
||||
struct batman_if *batman_if;
|
||||
struct hard_iface *hard_iface;
|
||||
|
||||
batman_if = container_of(rcu, struct batman_if, rcu);
|
||||
dev_put(batman_if->net_dev);
|
||||
kref_put(&batman_if->refcount, hardif_free_ref);
|
||||
hard_iface = container_of(rcu, struct hard_iface, rcu);
|
||||
dev_put(hard_iface->net_dev);
|
||||
kfree(hard_iface);
|
||||
}
|
||||
|
||||
struct batman_if *get_batman_if_by_netdev(struct net_device *net_dev)
|
||||
struct hard_iface *hardif_get_by_netdev(struct net_device *net_dev)
|
||||
{
|
||||
struct batman_if *batman_if;
|
||||
struct hard_iface *hard_iface;
|
||||
|
||||
rcu_read_lock();
|
||||
list_for_each_entry_rcu(batman_if, &if_list, list) {
|
||||
if (batman_if->net_dev == net_dev)
|
||||
list_for_each_entry_rcu(hard_iface, &hardif_list, list) {
|
||||
if (hard_iface->net_dev == net_dev &&
|
||||
atomic_inc_not_zero(&hard_iface->refcount))
|
||||
goto out;
|
||||
}
|
||||
|
||||
batman_if = NULL;
|
||||
hard_iface = NULL;
|
||||
|
||||
out:
|
||||
if (batman_if)
|
||||
kref_get(&batman_if->refcount);
|
||||
|
||||
rcu_read_unlock();
|
||||
return batman_if;
|
||||
return hard_iface;
|
||||
}
|
||||
|
||||
static int is_valid_iface(struct net_device *net_dev)
|
||||
@ -81,13 +79,8 @@ static int is_valid_iface(struct net_device *net_dev)
|
||||
return 0;
|
||||
|
||||
/* no batman over batman */
|
||||
#ifdef HAVE_NET_DEVICE_OPS
|
||||
if (net_dev->netdev_ops->ndo_start_xmit == interface_tx)
|
||||
if (softif_is_valid(net_dev))
|
||||
return 0;
|
||||
#else
|
||||
if (net_dev->hard_start_xmit == interface_tx)
|
||||
return 0;
|
||||
#endif
|
||||
|
||||
/* Device is being bridged */
|
||||
/* if (net_dev->priv_flags & IFF_BRIDGE_PORT)
|
||||
@ -96,27 +89,25 @@ static int is_valid_iface(struct net_device *net_dev)
|
||||
return 1;
|
||||
}
|
||||
|
||||
static struct batman_if *get_active_batman_if(struct net_device *soft_iface)
|
||||
static struct hard_iface *hardif_get_active(struct net_device *soft_iface)
|
||||
{
|
||||
struct batman_if *batman_if;
|
||||
struct hard_iface *hard_iface;
|
||||
|
||||
rcu_read_lock();
|
||||
list_for_each_entry_rcu(batman_if, &if_list, list) {
|
||||
if (batman_if->soft_iface != soft_iface)
|
||||
list_for_each_entry_rcu(hard_iface, &hardif_list, list) {
|
||||
if (hard_iface->soft_iface != soft_iface)
|
||||
continue;
|
||||
|
||||
if (batman_if->if_status == IF_ACTIVE)
|
||||
if (hard_iface->if_status == IF_ACTIVE &&
|
||||
atomic_inc_not_zero(&hard_iface->refcount))
|
||||
goto out;
|
||||
}
|
||||
|
||||
batman_if = NULL;
|
||||
hard_iface = NULL;
|
||||
|
||||
out:
|
||||
if (batman_if)
|
||||
kref_get(&batman_if->refcount);
|
||||
|
||||
rcu_read_unlock();
|
||||
return batman_if;
|
||||
return hard_iface;
|
||||
}
|
||||
|
||||
static void update_primary_addr(struct bat_priv *bat_priv)
|
||||
@ -132,24 +123,24 @@ static void update_primary_addr(struct bat_priv *bat_priv)
|
||||
}
|
||||
|
||||
static void set_primary_if(struct bat_priv *bat_priv,
|
||||
struct batman_if *batman_if)
|
||||
struct hard_iface *hard_iface)
|
||||
{
|
||||
struct batman_packet *batman_packet;
|
||||
struct batman_if *old_if;
|
||||
struct hard_iface *old_if;
|
||||
|
||||
if (batman_if)
|
||||
kref_get(&batman_if->refcount);
|
||||
if (hard_iface && !atomic_inc_not_zero(&hard_iface->refcount))
|
||||
hard_iface = NULL;
|
||||
|
||||
old_if = bat_priv->primary_if;
|
||||
bat_priv->primary_if = batman_if;
|
||||
bat_priv->primary_if = hard_iface;
|
||||
|
||||
if (old_if)
|
||||
kref_put(&old_if->refcount, hardif_free_ref);
|
||||
hardif_free_ref(old_if);
|
||||
|
||||
if (!bat_priv->primary_if)
|
||||
return;
|
||||
|
||||
batman_packet = (struct batman_packet *)(batman_if->packet_buff);
|
||||
batman_packet = (struct batman_packet *)(hard_iface->packet_buff);
|
||||
batman_packet->flags = PRIMARIES_FIRST_HOP;
|
||||
batman_packet->ttl = TTL;
|
||||
|
||||
@ -162,42 +153,42 @@ static void set_primary_if(struct bat_priv *bat_priv,
|
||||
atomic_set(&bat_priv->hna_local_changed, 1);
|
||||
}
|
||||
|
||||
static bool hardif_is_iface_up(struct batman_if *batman_if)
|
||||
static bool hardif_is_iface_up(struct hard_iface *hard_iface)
|
||||
{
|
||||
if (batman_if->net_dev->flags & IFF_UP)
|
||||
if (hard_iface->net_dev->flags & IFF_UP)
|
||||
return true;
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
static void update_mac_addresses(struct batman_if *batman_if)
|
||||
static void update_mac_addresses(struct hard_iface *hard_iface)
|
||||
{
|
||||
memcpy(((struct batman_packet *)(batman_if->packet_buff))->orig,
|
||||
batman_if->net_dev->dev_addr, ETH_ALEN);
|
||||
memcpy(((struct batman_packet *)(batman_if->packet_buff))->prev_sender,
|
||||
batman_if->net_dev->dev_addr, ETH_ALEN);
|
||||
memcpy(((struct batman_packet *)(hard_iface->packet_buff))->orig,
|
||||
hard_iface->net_dev->dev_addr, ETH_ALEN);
|
||||
memcpy(((struct batman_packet *)(hard_iface->packet_buff))->prev_sender,
|
||||
hard_iface->net_dev->dev_addr, ETH_ALEN);
|
||||
}
|
||||
|
||||
static void check_known_mac_addr(struct net_device *net_dev)
|
||||
{
|
||||
struct batman_if *batman_if;
|
||||
struct hard_iface *hard_iface;
|
||||
|
||||
rcu_read_lock();
|
||||
list_for_each_entry_rcu(batman_if, &if_list, list) {
|
||||
if ((batman_if->if_status != IF_ACTIVE) &&
|
||||
(batman_if->if_status != IF_TO_BE_ACTIVATED))
|
||||
list_for_each_entry_rcu(hard_iface, &hardif_list, list) {
|
||||
if ((hard_iface->if_status != IF_ACTIVE) &&
|
||||
(hard_iface->if_status != IF_TO_BE_ACTIVATED))
|
||||
continue;
|
||||
|
||||
if (batman_if->net_dev == net_dev)
|
||||
if (hard_iface->net_dev == net_dev)
|
||||
continue;
|
||||
|
||||
if (!compare_orig(batman_if->net_dev->dev_addr,
|
||||
net_dev->dev_addr))
|
||||
if (!compare_eth(hard_iface->net_dev->dev_addr,
|
||||
net_dev->dev_addr))
|
||||
continue;
|
||||
|
||||
pr_warning("The newly added mac address (%pM) already exists "
|
||||
"on: %s\n", net_dev->dev_addr,
|
||||
batman_if->net_dev->name);
|
||||
hard_iface->net_dev->name);
|
||||
pr_warning("It is strongly recommended to keep mac addresses "
|
||||
"unique to avoid problems!\n");
|
||||
}
|
||||
@ -207,7 +198,7 @@ static void check_known_mac_addr(struct net_device *net_dev)
|
||||
int hardif_min_mtu(struct net_device *soft_iface)
|
||||
{
|
||||
struct bat_priv *bat_priv = netdev_priv(soft_iface);
|
||||
struct batman_if *batman_if;
|
||||
struct hard_iface *hard_iface;
|
||||
/* allow big frames if all devices are capable to do so
|
||||
* (have MTU > 1500 + BAT_HEADER_LEN) */
|
||||
int min_mtu = ETH_DATA_LEN;
|
||||
@ -216,15 +207,15 @@ int hardif_min_mtu(struct net_device *soft_iface)
|
||||
goto out;
|
||||
|
||||
rcu_read_lock();
|
||||
list_for_each_entry_rcu(batman_if, &if_list, list) {
|
||||
if ((batman_if->if_status != IF_ACTIVE) &&
|
||||
(batman_if->if_status != IF_TO_BE_ACTIVATED))
|
||||
list_for_each_entry_rcu(hard_iface, &hardif_list, list) {
|
||||
if ((hard_iface->if_status != IF_ACTIVE) &&
|
||||
(hard_iface->if_status != IF_TO_BE_ACTIVATED))
|
||||
continue;
|
||||
|
||||
if (batman_if->soft_iface != soft_iface)
|
||||
if (hard_iface->soft_iface != soft_iface)
|
||||
continue;
|
||||
|
||||
min_mtu = min_t(int, batman_if->net_dev->mtu - BAT_HEADER_LEN,
|
||||
min_mtu = min_t(int, hard_iface->net_dev->mtu - BAT_HEADER_LEN,
|
||||
min_mtu);
|
||||
}
|
||||
rcu_read_unlock();
|
||||
@ -242,77 +233,95 @@ void update_min_mtu(struct net_device *soft_iface)
|
||||
soft_iface->mtu = min_mtu;
|
||||
}
|
||||
|
||||
static void hardif_activate_interface(struct batman_if *batman_if)
|
||||
static void hardif_activate_interface(struct hard_iface *hard_iface)
|
||||
{
|
||||
struct bat_priv *bat_priv;
|
||||
|
||||
if (batman_if->if_status != IF_INACTIVE)
|
||||
if (hard_iface->if_status != IF_INACTIVE)
|
||||
return;
|
||||
|
||||
bat_priv = netdev_priv(batman_if->soft_iface);
|
||||
bat_priv = netdev_priv(hard_iface->soft_iface);
|
||||
|
||||
update_mac_addresses(batman_if);
|
||||
batman_if->if_status = IF_TO_BE_ACTIVATED;
|
||||
update_mac_addresses(hard_iface);
|
||||
hard_iface->if_status = IF_TO_BE_ACTIVATED;
|
||||
|
||||
/**
|
||||
* the first active interface becomes our primary interface or
|
||||
* the next active interface after the old primay interface was removed
|
||||
*/
|
||||
if (!bat_priv->primary_if)
|
||||
set_primary_if(bat_priv, batman_if);
|
||||
set_primary_if(bat_priv, hard_iface);
|
||||
|
||||
bat_info(batman_if->soft_iface, "Interface activated: %s\n",
|
||||
batman_if->net_dev->name);
|
||||
bat_info(hard_iface->soft_iface, "Interface activated: %s\n",
|
||||
hard_iface->net_dev->name);
|
||||
|
||||
update_min_mtu(batman_if->soft_iface);
|
||||
update_min_mtu(hard_iface->soft_iface);
|
||||
return;
|
||||
}
|
||||
|
||||
static void hardif_deactivate_interface(struct batman_if *batman_if)
|
||||
static void hardif_deactivate_interface(struct hard_iface *hard_iface)
|
||||
{
|
||||
if ((batman_if->if_status != IF_ACTIVE) &&
|
||||
(batman_if->if_status != IF_TO_BE_ACTIVATED))
|
||||
if ((hard_iface->if_status != IF_ACTIVE) &&
|
||||
(hard_iface->if_status != IF_TO_BE_ACTIVATED))
|
||||
return;
|
||||
|
||||
batman_if->if_status = IF_INACTIVE;
|
||||
hard_iface->if_status = IF_INACTIVE;
|
||||
|
||||
bat_info(batman_if->soft_iface, "Interface deactivated: %s\n",
|
||||
batman_if->net_dev->name);
|
||||
bat_info(hard_iface->soft_iface, "Interface deactivated: %s\n",
|
||||
hard_iface->net_dev->name);
|
||||
|
||||
update_min_mtu(batman_if->soft_iface);
|
||||
update_min_mtu(hard_iface->soft_iface);
|
||||
}
|
||||
|
||||
int hardif_enable_interface(struct batman_if *batman_if, char *iface_name)
|
||||
int hardif_enable_interface(struct hard_iface *hard_iface, char *iface_name)
|
||||
{
|
||||
struct bat_priv *bat_priv;
|
||||
struct batman_packet *batman_packet;
|
||||
struct net_device *soft_iface;
|
||||
int ret;
|
||||
|
||||
if (batman_if->if_status != IF_NOT_IN_USE)
|
||||
if (hard_iface->if_status != IF_NOT_IN_USE)
|
||||
goto out;
|
||||
|
||||
batman_if->soft_iface = dev_get_by_name(&init_net, iface_name);
|
||||
if (!atomic_inc_not_zero(&hard_iface->refcount))
|
||||
goto out;
|
||||
|
||||
if (!batman_if->soft_iface) {
|
||||
batman_if->soft_iface = softif_create(iface_name);
|
||||
soft_iface = dev_get_by_name(&init_net, iface_name);
|
||||
|
||||
if (!batman_if->soft_iface)
|
||||
if (!soft_iface) {
|
||||
soft_iface = softif_create(iface_name);
|
||||
|
||||
if (!soft_iface) {
|
||||
ret = -ENOMEM;
|
||||
goto err;
|
||||
}
|
||||
|
||||
/* dev_get_by_name() increases the reference counter for us */
|
||||
dev_hold(batman_if->soft_iface);
|
||||
dev_hold(soft_iface);
|
||||
}
|
||||
|
||||
bat_priv = netdev_priv(batman_if->soft_iface);
|
||||
batman_if->packet_len = BAT_PACKET_LEN;
|
||||
batman_if->packet_buff = kmalloc(batman_if->packet_len, GFP_ATOMIC);
|
||||
|
||||
if (!batman_if->packet_buff) {
|
||||
bat_err(batman_if->soft_iface, "Can't add interface packet "
|
||||
"(%s): out of memory\n", batman_if->net_dev->name);
|
||||
if (!softif_is_valid(soft_iface)) {
|
||||
pr_err("Can't create batman mesh interface %s: "
|
||||
"already exists as regular interface\n",
|
||||
soft_iface->name);
|
||||
dev_put(soft_iface);
|
||||
ret = -EINVAL;
|
||||
goto err;
|
||||
}
|
||||
|
||||
batman_packet = (struct batman_packet *)(batman_if->packet_buff);
|
||||
hard_iface->soft_iface = soft_iface;
|
||||
bat_priv = netdev_priv(hard_iface->soft_iface);
|
||||
hard_iface->packet_len = BAT_PACKET_LEN;
|
||||
hard_iface->packet_buff = kmalloc(hard_iface->packet_len, GFP_ATOMIC);
|
||||
|
||||
if (!hard_iface->packet_buff) {
|
||||
bat_err(hard_iface->soft_iface, "Can't add interface packet "
|
||||
"(%s): out of memory\n", hard_iface->net_dev->name);
|
||||
ret = -ENOMEM;
|
||||
goto err;
|
||||
}
|
||||
|
||||
batman_packet = (struct batman_packet *)(hard_iface->packet_buff);
|
||||
batman_packet->packet_type = BAT_PACKET;
|
||||
batman_packet->version = COMPAT_VERSION;
|
||||
batman_packet->flags = 0;
|
||||
@ -320,107 +329,107 @@ int hardif_enable_interface(struct batman_if *batman_if, char *iface_name)
|
||||
batman_packet->tq = TQ_MAX_VALUE;
|
||||
batman_packet->num_hna = 0;
|
||||
|
||||
batman_if->if_num = bat_priv->num_ifaces;
|
||||
hard_iface->if_num = bat_priv->num_ifaces;
|
||||
bat_priv->num_ifaces++;
|
||||
batman_if->if_status = IF_INACTIVE;
|
||||
orig_hash_add_if(batman_if, bat_priv->num_ifaces);
|
||||
hard_iface->if_status = IF_INACTIVE;
|
||||
orig_hash_add_if(hard_iface, bat_priv->num_ifaces);
|
||||
|
||||
batman_if->batman_adv_ptype.type = __constant_htons(ETH_P_BATMAN);
|
||||
batman_if->batman_adv_ptype.func = batman_skb_recv;
|
||||
batman_if->batman_adv_ptype.dev = batman_if->net_dev;
|
||||
kref_get(&batman_if->refcount);
|
||||
dev_add_pack(&batman_if->batman_adv_ptype);
|
||||
hard_iface->batman_adv_ptype.type = __constant_htons(ETH_P_BATMAN);
|
||||
hard_iface->batman_adv_ptype.func = batman_skb_recv;
|
||||
hard_iface->batman_adv_ptype.dev = hard_iface->net_dev;
|
||||
dev_add_pack(&hard_iface->batman_adv_ptype);
|
||||
|
||||
atomic_set(&batman_if->seqno, 1);
|
||||
atomic_set(&batman_if->frag_seqno, 1);
|
||||
bat_info(batman_if->soft_iface, "Adding interface: %s\n",
|
||||
batman_if->net_dev->name);
|
||||
atomic_set(&hard_iface->seqno, 1);
|
||||
atomic_set(&hard_iface->frag_seqno, 1);
|
||||
bat_info(hard_iface->soft_iface, "Adding interface: %s\n",
|
||||
hard_iface->net_dev->name);
|
||||
|
||||
if (atomic_read(&bat_priv->fragmentation) && batman_if->net_dev->mtu <
|
||||
if (atomic_read(&bat_priv->fragmentation) && hard_iface->net_dev->mtu <
|
||||
ETH_DATA_LEN + BAT_HEADER_LEN)
|
||||
bat_info(batman_if->soft_iface,
|
||||
bat_info(hard_iface->soft_iface,
|
||||
"The MTU of interface %s is too small (%i) to handle "
|
||||
"the transport of batman-adv packets. Packets going "
|
||||
"over this interface will be fragmented on layer2 "
|
||||
"which could impact the performance. Setting the MTU "
|
||||
"to %zi would solve the problem.\n",
|
||||
batman_if->net_dev->name, batman_if->net_dev->mtu,
|
||||
hard_iface->net_dev->name, hard_iface->net_dev->mtu,
|
||||
ETH_DATA_LEN + BAT_HEADER_LEN);
|
||||
|
||||
if (!atomic_read(&bat_priv->fragmentation) && batman_if->net_dev->mtu <
|
||||
if (!atomic_read(&bat_priv->fragmentation) && hard_iface->net_dev->mtu <
|
||||
ETH_DATA_LEN + BAT_HEADER_LEN)
|
||||
bat_info(batman_if->soft_iface,
|
||||
bat_info(hard_iface->soft_iface,
|
||||
"The MTU of interface %s is too small (%i) to handle "
|
||||
"the transport of batman-adv packets. If you experience"
|
||||
" problems getting traffic through try increasing the "
|
||||
"MTU to %zi.\n",
|
||||
batman_if->net_dev->name, batman_if->net_dev->mtu,
|
||||
hard_iface->net_dev->name, hard_iface->net_dev->mtu,
|
||||
ETH_DATA_LEN + BAT_HEADER_LEN);
|
||||
|
||||
if (hardif_is_iface_up(batman_if))
|
||||
hardif_activate_interface(batman_if);
|
||||
if (hardif_is_iface_up(hard_iface))
|
||||
hardif_activate_interface(hard_iface);
|
||||
else
|
||||
bat_err(batman_if->soft_iface, "Not using interface %s "
|
||||
bat_err(hard_iface->soft_iface, "Not using interface %s "
|
||||
"(retrying later): interface not active\n",
|
||||
batman_if->net_dev->name);
|
||||
hard_iface->net_dev->name);
|
||||
|
||||
/* begin scheduling originator messages on that interface */
|
||||
schedule_own_packet(batman_if);
|
||||
schedule_own_packet(hard_iface);
|
||||
|
||||
out:
|
||||
return 0;
|
||||
|
||||
err:
|
||||
return -ENOMEM;
|
||||
hardif_free_ref(hard_iface);
|
||||
return ret;
|
||||
}
|
||||
|
||||
void hardif_disable_interface(struct batman_if *batman_if)
|
||||
void hardif_disable_interface(struct hard_iface *hard_iface)
|
||||
{
|
||||
struct bat_priv *bat_priv = netdev_priv(batman_if->soft_iface);
|
||||
struct bat_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
|
||||
|
||||
if (batman_if->if_status == IF_ACTIVE)
|
||||
hardif_deactivate_interface(batman_if);
|
||||
if (hard_iface->if_status == IF_ACTIVE)
|
||||
hardif_deactivate_interface(hard_iface);
|
||||
|
||||
if (batman_if->if_status != IF_INACTIVE)
|
||||
if (hard_iface->if_status != IF_INACTIVE)
|
||||
return;
|
||||
|
||||
bat_info(batman_if->soft_iface, "Removing interface: %s\n",
|
||||
batman_if->net_dev->name);
|
||||
dev_remove_pack(&batman_if->batman_adv_ptype);
|
||||
kref_put(&batman_if->refcount, hardif_free_ref);
|
||||
bat_info(hard_iface->soft_iface, "Removing interface: %s\n",
|
||||
hard_iface->net_dev->name);
|
||||
dev_remove_pack(&hard_iface->batman_adv_ptype);
|
||||
|
||||
bat_priv->num_ifaces--;
|
||||
orig_hash_del_if(batman_if, bat_priv->num_ifaces);
|
||||
orig_hash_del_if(hard_iface, bat_priv->num_ifaces);
|
||||
|
||||
if (batman_if == bat_priv->primary_if) {
|
||||
struct batman_if *new_if;
|
||||
if (hard_iface == bat_priv->primary_if) {
|
||||
struct hard_iface *new_if;
|
||||
|
||||
new_if = get_active_batman_if(batman_if->soft_iface);
|
||||
new_if = hardif_get_active(hard_iface->soft_iface);
|
||||
set_primary_if(bat_priv, new_if);
|
||||
|
||||
if (new_if)
|
||||
kref_put(&new_if->refcount, hardif_free_ref);
|
||||
hardif_free_ref(new_if);
|
||||
}
|
||||
|
||||
kfree(batman_if->packet_buff);
|
||||
batman_if->packet_buff = NULL;
|
||||
batman_if->if_status = IF_NOT_IN_USE;
|
||||
kfree(hard_iface->packet_buff);
|
||||
hard_iface->packet_buff = NULL;
|
||||
hard_iface->if_status = IF_NOT_IN_USE;
|
||||
|
||||
/* delete all references to this batman_if */
|
||||
/* delete all references to this hard_iface */
|
||||
purge_orig_ref(bat_priv);
|
||||
purge_outstanding_packets(bat_priv, batman_if);
|
||||
dev_put(batman_if->soft_iface);
|
||||
purge_outstanding_packets(bat_priv, hard_iface);
|
||||
dev_put(hard_iface->soft_iface);
|
||||
|
||||
/* nobody uses this interface anymore */
|
||||
if (!bat_priv->num_ifaces)
|
||||
softif_destroy(batman_if->soft_iface);
|
||||
softif_destroy(hard_iface->soft_iface);
|
||||
|
||||
batman_if->soft_iface = NULL;
|
||||
hard_iface->soft_iface = NULL;
|
||||
hardif_free_ref(hard_iface);
|
||||
}
|
||||
|
||||
static struct batman_if *hardif_add_interface(struct net_device *net_dev)
|
||||
static struct hard_iface *hardif_add_interface(struct net_device *net_dev)
|
||||
{
|
||||
struct batman_if *batman_if;
|
||||
struct hard_iface *hard_iface;
|
||||
int ret;
|
||||
|
||||
ret = is_valid_iface(net_dev);
|
||||
@ -429,73 +438,73 @@ static struct batman_if *hardif_add_interface(struct net_device *net_dev)
|
||||
|
||||
dev_hold(net_dev);
|
||||
|
||||
batman_if = kmalloc(sizeof(struct batman_if), GFP_ATOMIC);
|
||||
if (!batman_if) {
|
||||
hard_iface = kmalloc(sizeof(struct hard_iface), GFP_ATOMIC);
|
||||
if (!hard_iface) {
|
||||
pr_err("Can't add interface (%s): out of memory\n",
|
||||
net_dev->name);
|
||||
goto release_dev;
|
||||
}
|
||||
|
||||
ret = sysfs_add_hardif(&batman_if->hardif_obj, net_dev);
|
||||
ret = sysfs_add_hardif(&hard_iface->hardif_obj, net_dev);
|
||||
if (ret)
|
||||
goto free_if;
|
||||
|
||||
batman_if->if_num = -1;
|
||||
batman_if->net_dev = net_dev;
|
||||
batman_if->soft_iface = NULL;
|
||||
batman_if->if_status = IF_NOT_IN_USE;
|
||||
INIT_LIST_HEAD(&batman_if->list);
|
||||
kref_init(&batman_if->refcount);
|
||||
|
||||
check_known_mac_addr(batman_if->net_dev);
|
||||
|
||||
spin_lock(&if_list_lock);
|
||||
list_add_tail_rcu(&batman_if->list, &if_list);
|
||||
spin_unlock(&if_list_lock);
|
||||
|
||||
hard_iface->if_num = -1;
|
||||
hard_iface->net_dev = net_dev;
|
||||
hard_iface->soft_iface = NULL;
|
||||
hard_iface->if_status = IF_NOT_IN_USE;
|
||||
INIT_LIST_HEAD(&hard_iface->list);
|
||||
/* extra reference for return */
|
||||
kref_get(&batman_if->refcount);
|
||||
return batman_if;
|
||||
atomic_set(&hard_iface->refcount, 2);
|
||||
|
||||
check_known_mac_addr(hard_iface->net_dev);
|
||||
|
||||
spin_lock(&hardif_list_lock);
|
||||
list_add_tail_rcu(&hard_iface->list, &hardif_list);
|
||||
spin_unlock(&hardif_list_lock);
|
||||
|
||||
return hard_iface;
|
||||
|
||||
free_if:
|
||||
kfree(batman_if);
|
||||
kfree(hard_iface);
|
||||
release_dev:
|
||||
dev_put(net_dev);
|
||||
out:
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static void hardif_remove_interface(struct batman_if *batman_if)
|
||||
static void hardif_remove_interface(struct hard_iface *hard_iface)
|
||||
{
|
||||
/* first deactivate interface */
|
||||
if (batman_if->if_status != IF_NOT_IN_USE)
|
||||
hardif_disable_interface(batman_if);
|
||||
if (hard_iface->if_status != IF_NOT_IN_USE)
|
||||
hardif_disable_interface(hard_iface);
|
||||
|
||||
if (batman_if->if_status != IF_NOT_IN_USE)
|
||||
if (hard_iface->if_status != IF_NOT_IN_USE)
|
||||
return;
|
||||
|
||||
batman_if->if_status = IF_TO_BE_REMOVED;
|
||||
sysfs_del_hardif(&batman_if->hardif_obj);
|
||||
call_rcu(&batman_if->rcu, hardif_free_rcu);
|
||||
hard_iface->if_status = IF_TO_BE_REMOVED;
|
||||
sysfs_del_hardif(&hard_iface->hardif_obj);
|
||||
hardif_free_ref(hard_iface);
|
||||
}
|
||||
|
||||
void hardif_remove_interfaces(void)
|
||||
{
|
||||
struct batman_if *batman_if, *batman_if_tmp;
|
||||
struct hard_iface *hard_iface, *hard_iface_tmp;
|
||||
struct list_head if_queue;
|
||||
|
||||
INIT_LIST_HEAD(&if_queue);
|
||||
|
||||
spin_lock(&if_list_lock);
|
||||
list_for_each_entry_safe(batman_if, batman_if_tmp, &if_list, list) {
|
||||
list_del_rcu(&batman_if->list);
|
||||
list_add_tail(&batman_if->list, &if_queue);
|
||||
spin_lock(&hardif_list_lock);
|
||||
list_for_each_entry_safe(hard_iface, hard_iface_tmp,
|
||||
&hardif_list, list) {
|
||||
list_del_rcu(&hard_iface->list);
|
||||
list_add_tail(&hard_iface->list, &if_queue);
|
||||
}
|
||||
spin_unlock(&if_list_lock);
|
||||
spin_unlock(&hardif_list_lock);
|
||||
|
||||
rtnl_lock();
|
||||
list_for_each_entry_safe(batman_if, batman_if_tmp, &if_queue, list) {
|
||||
hardif_remove_interface(batman_if);
|
||||
list_for_each_entry_safe(hard_iface, hard_iface_tmp, &if_queue, list) {
|
||||
hardif_remove_interface(hard_iface);
|
||||
}
|
||||
rtnl_unlock();
|
||||
}
|
||||
@ -504,43 +513,43 @@ static int hard_if_event(struct notifier_block *this,
|
||||
unsigned long event, void *ptr)
|
||||
{
|
||||
struct net_device *net_dev = (struct net_device *)ptr;
|
||||
struct batman_if *batman_if = get_batman_if_by_netdev(net_dev);
|
||||
struct hard_iface *hard_iface = hardif_get_by_netdev(net_dev);
|
||||
struct bat_priv *bat_priv;
|
||||
|
||||
if (!batman_if && event == NETDEV_REGISTER)
|
||||
batman_if = hardif_add_interface(net_dev);
|
||||
if (!hard_iface && event == NETDEV_REGISTER)
|
||||
hard_iface = hardif_add_interface(net_dev);
|
||||
|
||||
if (!batman_if)
|
||||
if (!hard_iface)
|
||||
goto out;
|
||||
|
||||
switch (event) {
|
||||
case NETDEV_UP:
|
||||
hardif_activate_interface(batman_if);
|
||||
hardif_activate_interface(hard_iface);
|
||||
break;
|
||||
case NETDEV_GOING_DOWN:
|
||||
case NETDEV_DOWN:
|
||||
hardif_deactivate_interface(batman_if);
|
||||
hardif_deactivate_interface(hard_iface);
|
||||
break;
|
||||
case NETDEV_UNREGISTER:
|
||||
spin_lock(&if_list_lock);
|
||||
list_del_rcu(&batman_if->list);
|
||||
spin_unlock(&if_list_lock);
|
||||
spin_lock(&hardif_list_lock);
|
||||
list_del_rcu(&hard_iface->list);
|
||||
spin_unlock(&hardif_list_lock);
|
||||
|
||||
hardif_remove_interface(batman_if);
|
||||
hardif_remove_interface(hard_iface);
|
||||
break;
|
||||
case NETDEV_CHANGEMTU:
|
||||
if (batman_if->soft_iface)
|
||||
update_min_mtu(batman_if->soft_iface);
|
||||
if (hard_iface->soft_iface)
|
||||
update_min_mtu(hard_iface->soft_iface);
|
||||
break;
|
||||
case NETDEV_CHANGEADDR:
|
||||
if (batman_if->if_status == IF_NOT_IN_USE)
|
||||
if (hard_iface->if_status == IF_NOT_IN_USE)
|
||||
goto hardif_put;
|
||||
|
||||
check_known_mac_addr(batman_if->net_dev);
|
||||
update_mac_addresses(batman_if);
|
||||
check_known_mac_addr(hard_iface->net_dev);
|
||||
update_mac_addresses(hard_iface);
|
||||
|
||||
bat_priv = netdev_priv(batman_if->soft_iface);
|
||||
if (batman_if == bat_priv->primary_if)
|
||||
bat_priv = netdev_priv(hard_iface->soft_iface);
|
||||
if (hard_iface == bat_priv->primary_if)
|
||||
update_primary_addr(bat_priv);
|
||||
break;
|
||||
default:
|
||||
@ -548,7 +557,7 @@ static int hard_if_event(struct notifier_block *this,
|
||||
};
|
||||
|
||||
hardif_put:
|
||||
kref_put(&batman_if->refcount, hardif_free_ref);
|
||||
hardif_free_ref(hard_iface);
|
||||
out:
|
||||
return NOTIFY_DONE;
|
||||
}
|
||||
@ -561,10 +570,10 @@ static int batman_skb_recv(struct sk_buff *skb, struct net_device *dev,
|
||||
{
|
||||
struct bat_priv *bat_priv;
|
||||
struct batman_packet *batman_packet;
|
||||
struct batman_if *batman_if;
|
||||
struct hard_iface *hard_iface;
|
||||
int ret;
|
||||
|
||||
batman_if = container_of(ptype, struct batman_if, batman_adv_ptype);
|
||||
hard_iface = container_of(ptype, struct hard_iface, batman_adv_ptype);
|
||||
skb = skb_share_check(skb, GFP_ATOMIC);
|
||||
|
||||
/* skb was released by skb_share_check() */
|
||||
@ -580,16 +589,16 @@ static int batman_skb_recv(struct sk_buff *skb, struct net_device *dev,
|
||||
|| !skb_mac_header(skb)))
|
||||
goto err_free;
|
||||
|
||||
if (!batman_if->soft_iface)
|
||||
if (!hard_iface->soft_iface)
|
||||
goto err_free;
|
||||
|
||||
bat_priv = netdev_priv(batman_if->soft_iface);
|
||||
bat_priv = netdev_priv(hard_iface->soft_iface);
|
||||
|
||||
if (atomic_read(&bat_priv->mesh_state) != MESH_ACTIVE)
|
||||
goto err_free;
|
||||
|
||||
/* discard frames on not active interfaces */
|
||||
if (batman_if->if_status != IF_ACTIVE)
|
||||
if (hard_iface->if_status != IF_ACTIVE)
|
||||
goto err_free;
|
||||
|
||||
batman_packet = (struct batman_packet *)skb->data;
|
||||
@ -607,32 +616,32 @@ static int batman_skb_recv(struct sk_buff *skb, struct net_device *dev,
|
||||
switch (batman_packet->packet_type) {
|
||||
/* batman originator packet */
|
||||
case BAT_PACKET:
|
||||
ret = recv_bat_packet(skb, batman_if);
|
||||
ret = recv_bat_packet(skb, hard_iface);
|
||||
break;
|
||||
|
||||
/* batman icmp packet */
|
||||
case BAT_ICMP:
|
||||
ret = recv_icmp_packet(skb, batman_if);
|
||||
ret = recv_icmp_packet(skb, hard_iface);
|
||||
break;
|
||||
|
||||
/* unicast packet */
|
||||
case BAT_UNICAST:
|
||||
ret = recv_unicast_packet(skb, batman_if);
|
||||
ret = recv_unicast_packet(skb, hard_iface);
|
||||
break;
|
||||
|
||||
/* fragmented unicast packet */
|
||||
case BAT_UNICAST_FRAG:
|
||||
ret = recv_ucast_frag_packet(skb, batman_if);
|
||||
ret = recv_ucast_frag_packet(skb, hard_iface);
|
||||
break;
|
||||
|
||||
/* broadcast packet */
|
||||
case BAT_BCAST:
|
||||
ret = recv_bcast_packet(skb, batman_if);
|
||||
ret = recv_bcast_packet(skb, hard_iface);
|
||||
break;
|
||||
|
||||
/* vis packet */
|
||||
case BAT_VIS:
|
||||
ret = recv_vis_packet(skb, batman_if);
|
||||
ret = recv_vis_packet(skb, hard_iface);
|
||||
break;
|
||||
default:
|
||||
ret = NET_RX_DROP;
|
||||
|
@ -31,19 +31,18 @@
|
||||
|
||||
extern struct notifier_block hard_if_notifier;
|
||||
|
||||
struct batman_if *get_batman_if_by_netdev(struct net_device *net_dev);
|
||||
int hardif_enable_interface(struct batman_if *batman_if, char *iface_name);
|
||||
void hardif_disable_interface(struct batman_if *batman_if);
|
||||
struct hard_iface *hardif_get_by_netdev(struct net_device *net_dev);
|
||||
int hardif_enable_interface(struct hard_iface *hard_iface, char *iface_name);
|
||||
void hardif_disable_interface(struct hard_iface *hard_iface);
|
||||
void hardif_remove_interfaces(void);
|
||||
int hardif_min_mtu(struct net_device *soft_iface);
|
||||
void update_min_mtu(struct net_device *soft_iface);
|
||||
void hardif_free_rcu(struct rcu_head *rcu);
|
||||
|
||||
static inline void hardif_free_ref(struct kref *refcount)
|
||||
static inline void hardif_free_ref(struct hard_iface *hard_iface)
|
||||
{
|
||||
struct batman_if *batman_if;
|
||||
|
||||
batman_if = container_of(refcount, struct batman_if, refcount);
|
||||
kfree(batman_if);
|
||||
if (atomic_dec_and_test(&hard_iface->refcount))
|
||||
call_rcu(&hard_iface->rcu, hardif_free_rcu);
|
||||
}
|
||||
|
||||
#endif /* _NET_BATMAN_ADV_HARD_INTERFACE_H_ */
|
||||
|
@ -27,13 +27,16 @@ static void hash_init(struct hashtable_t *hash)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0 ; i < hash->size; i++)
|
||||
for (i = 0 ; i < hash->size; i++) {
|
||||
INIT_HLIST_HEAD(&hash->table[i]);
|
||||
spin_lock_init(&hash->list_locks[i]);
|
||||
}
|
||||
}
|
||||
|
||||
/* free only the hashtable and the hash itself. */
|
||||
void hash_destroy(struct hashtable_t *hash)
|
||||
{
|
||||
kfree(hash->list_locks);
|
||||
kfree(hash->table);
|
||||
kfree(hash);
|
||||
}
|
||||
@ -43,20 +46,25 @@ struct hashtable_t *hash_new(int size)
|
||||
{
|
||||
struct hashtable_t *hash;
|
||||
|
||||
hash = kmalloc(sizeof(struct hashtable_t) , GFP_ATOMIC);
|
||||
|
||||
hash = kmalloc(sizeof(struct hashtable_t), GFP_ATOMIC);
|
||||
if (!hash)
|
||||
return NULL;
|
||||
|
||||
hash->size = size;
|
||||
hash->table = kmalloc(sizeof(struct element_t *) * size, GFP_ATOMIC);
|
||||
if (!hash->table)
|
||||
goto free_hash;
|
||||
|
||||
if (!hash->table) {
|
||||
kfree(hash);
|
||||
return NULL;
|
||||
}
|
||||
hash->list_locks = kmalloc(sizeof(spinlock_t) * size, GFP_ATOMIC);
|
||||
if (!hash->list_locks)
|
||||
goto free_table;
|
||||
|
||||
hash->size = size;
|
||||
hash_init(hash);
|
||||
|
||||
return hash;
|
||||
|
||||
free_table:
|
||||
kfree(hash->table);
|
||||
free_hash:
|
||||
kfree(hash);
|
||||
return NULL;
|
||||
}
|
||||
|
@ -28,21 +28,17 @@
|
||||
* compare 2 element datas for their keys,
|
||||
* return 0 if same and not 0 if not
|
||||
* same */
|
||||
typedef int (*hashdata_compare_cb)(void *, void *);
|
||||
typedef int (*hashdata_compare_cb)(struct hlist_node *, void *);
|
||||
|
||||
/* the hashfunction, should return an index
|
||||
* based on the key in the data of the first
|
||||
* argument and the size the second */
|
||||
typedef int (*hashdata_choose_cb)(void *, int);
|
||||
typedef void (*hashdata_free_cb)(void *, void *);
|
||||
|
||||
struct element_t {
|
||||
void *data; /* pointer to the data */
|
||||
struct hlist_node hlist; /* bucket list pointer */
|
||||
};
|
||||
typedef void (*hashdata_free_cb)(struct hlist_node *, void *);
|
||||
|
||||
struct hashtable_t {
|
||||
struct hlist_head *table; /* the hashtable itself, with the buckets */
|
||||
struct hlist_head *table; /* the hashtable itself with the buckets */
|
||||
spinlock_t *list_locks; /* spinlock for each hash list entry */
|
||||
int size; /* size of hashtable */
|
||||
};
|
||||
|
||||
@ -59,21 +55,22 @@ static inline void hash_delete(struct hashtable_t *hash,
|
||||
hashdata_free_cb free_cb, void *arg)
|
||||
{
|
||||
struct hlist_head *head;
|
||||
struct hlist_node *walk, *safe;
|
||||
struct element_t *bucket;
|
||||
struct hlist_node *node, *node_tmp;
|
||||
spinlock_t *list_lock; /* spinlock to protect write access */
|
||||
int i;
|
||||
|
||||
for (i = 0; i < hash->size; i++) {
|
||||
head = &hash->table[i];
|
||||
list_lock = &hash->list_locks[i];
|
||||
|
||||
spin_lock_bh(list_lock);
|
||||
hlist_for_each_safe(node, node_tmp, head) {
|
||||
hlist_del_rcu(node);
|
||||
|
||||
hlist_for_each_safe(walk, safe, head) {
|
||||
bucket = hlist_entry(walk, struct element_t, hlist);
|
||||
if (free_cb)
|
||||
free_cb(bucket->data, arg);
|
||||
|
||||
hlist_del(walk);
|
||||
kfree(bucket);
|
||||
free_cb(node, arg);
|
||||
}
|
||||
spin_unlock_bh(list_lock);
|
||||
}
|
||||
|
||||
hash_destroy(hash);
|
||||
@ -82,35 +79,41 @@ static inline void hash_delete(struct hashtable_t *hash,
|
||||
/* adds data to the hashtable. returns 0 on success, -1 on error */
|
||||
static inline int hash_add(struct hashtable_t *hash,
|
||||
hashdata_compare_cb compare,
|
||||
hashdata_choose_cb choose, void *data)
|
||||
hashdata_choose_cb choose,
|
||||
void *data, struct hlist_node *data_node)
|
||||
{
|
||||
int index;
|
||||
struct hlist_head *head;
|
||||
struct hlist_node *walk, *safe;
|
||||
struct element_t *bucket;
|
||||
struct hlist_node *node;
|
||||
spinlock_t *list_lock; /* spinlock to protect write access */
|
||||
|
||||
if (!hash)
|
||||
return -1;
|
||||
goto err;
|
||||
|
||||
index = choose(data, hash->size);
|
||||
head = &hash->table[index];
|
||||
list_lock = &hash->list_locks[index];
|
||||
|
||||
hlist_for_each_safe(walk, safe, head) {
|
||||
bucket = hlist_entry(walk, struct element_t, hlist);
|
||||
if (compare(bucket->data, data))
|
||||
return -1;
|
||||
rcu_read_lock();
|
||||
__hlist_for_each_rcu(node, head) {
|
||||
if (!compare(node, data))
|
||||
continue;
|
||||
|
||||
goto err_unlock;
|
||||
}
|
||||
rcu_read_unlock();
|
||||
|
||||
/* no duplicate found in list, add new element */
|
||||
bucket = kmalloc(sizeof(struct element_t), GFP_ATOMIC);
|
||||
|
||||
if (!bucket)
|
||||
return -1;
|
||||
|
||||
bucket->data = data;
|
||||
hlist_add_head(&bucket->hlist, head);
|
||||
spin_lock_bh(list_lock);
|
||||
hlist_add_head_rcu(data_node, head);
|
||||
spin_unlock_bh(list_lock);
|
||||
|
||||
return 0;
|
||||
|
||||
err_unlock:
|
||||
rcu_read_unlock();
|
||||
err:
|
||||
return -1;
|
||||
}
|
||||
|
||||
/* removes data from hash, if found. returns pointer do data on success, so you
|
||||
@ -122,50 +125,25 @@ static inline void *hash_remove(struct hashtable_t *hash,
|
||||
hashdata_choose_cb choose, void *data)
|
||||
{
|
||||
size_t index;
|
||||
struct hlist_node *walk;
|
||||
struct element_t *bucket;
|
||||
struct hlist_node *node;
|
||||
struct hlist_head *head;
|
||||
void *data_save;
|
||||
void *data_save = NULL;
|
||||
|
||||
index = choose(data, hash->size);
|
||||
head = &hash->table[index];
|
||||
|
||||
hlist_for_each_entry(bucket, walk, head, hlist) {
|
||||
if (compare(bucket->data, data)) {
|
||||
data_save = bucket->data;
|
||||
hlist_del(walk);
|
||||
kfree(bucket);
|
||||
return data_save;
|
||||
}
|
||||
spin_lock_bh(&hash->list_locks[index]);
|
||||
hlist_for_each(node, head) {
|
||||
if (!compare(node, data))
|
||||
continue;
|
||||
|
||||
data_save = node;
|
||||
hlist_del_rcu(node);
|
||||
break;
|
||||
}
|
||||
spin_unlock_bh(&hash->list_locks[index]);
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/* finds data, based on the key in keydata. returns the found data on success,
|
||||
* or NULL on error */
|
||||
static inline void *hash_find(struct hashtable_t *hash,
|
||||
hashdata_compare_cb compare,
|
||||
hashdata_choose_cb choose, void *keydata)
|
||||
{
|
||||
int index;
|
||||
struct hlist_head *head;
|
||||
struct hlist_node *walk;
|
||||
struct element_t *bucket;
|
||||
|
||||
if (!hash)
|
||||
return NULL;
|
||||
|
||||
index = choose(keydata , hash->size);
|
||||
head = &hash->table[index];
|
||||
|
||||
hlist_for_each(walk, head) {
|
||||
bucket = hlist_entry(walk, struct element_t, hlist);
|
||||
if (compare(bucket->data, keydata))
|
||||
return bucket->data;
|
||||
}
|
||||
|
||||
return NULL;
|
||||
return data_save;
|
||||
}
|
||||
|
||||
#endif /* _NET_BATMAN_ADV_HASH_H_ */
|
||||
|
@ -156,10 +156,9 @@ static ssize_t bat_socket_write(struct file *file, const char __user *buff,
|
||||
struct sk_buff *skb;
|
||||
struct icmp_packet_rr *icmp_packet;
|
||||
|
||||
struct orig_node *orig_node;
|
||||
struct batman_if *batman_if;
|
||||
struct orig_node *orig_node = NULL;
|
||||
struct neigh_node *neigh_node = NULL;
|
||||
size_t packet_len = sizeof(struct icmp_packet);
|
||||
uint8_t dstaddr[ETH_ALEN];
|
||||
|
||||
if (len < sizeof(struct icmp_packet)) {
|
||||
bat_dbg(DBG_BATMAN, bat_priv,
|
||||
@ -219,47 +218,52 @@ static ssize_t bat_socket_write(struct file *file, const char __user *buff,
|
||||
if (atomic_read(&bat_priv->mesh_state) != MESH_ACTIVE)
|
||||
goto dst_unreach;
|
||||
|
||||
spin_lock_bh(&bat_priv->orig_hash_lock);
|
||||
orig_node = ((struct orig_node *)hash_find(bat_priv->orig_hash,
|
||||
compare_orig, choose_orig,
|
||||
icmp_packet->dst));
|
||||
rcu_read_lock();
|
||||
orig_node = orig_hash_find(bat_priv, icmp_packet->dst);
|
||||
|
||||
if (!orig_node)
|
||||
goto unlock;
|
||||
|
||||
if (!orig_node->router)
|
||||
neigh_node = orig_node->router;
|
||||
|
||||
if (!neigh_node)
|
||||
goto unlock;
|
||||
|
||||
batman_if = orig_node->router->if_incoming;
|
||||
memcpy(dstaddr, orig_node->router->addr, ETH_ALEN);
|
||||
if (!atomic_inc_not_zero(&neigh_node->refcount)) {
|
||||
neigh_node = NULL;
|
||||
goto unlock;
|
||||
}
|
||||
|
||||
spin_unlock_bh(&bat_priv->orig_hash_lock);
|
||||
rcu_read_unlock();
|
||||
|
||||
if (!batman_if)
|
||||
if (!neigh_node->if_incoming)
|
||||
goto dst_unreach;
|
||||
|
||||
if (batman_if->if_status != IF_ACTIVE)
|
||||
if (neigh_node->if_incoming->if_status != IF_ACTIVE)
|
||||
goto dst_unreach;
|
||||
|
||||
memcpy(icmp_packet->orig,
|
||||
bat_priv->primary_if->net_dev->dev_addr, ETH_ALEN);
|
||||
|
||||
if (packet_len == sizeof(struct icmp_packet_rr))
|
||||
memcpy(icmp_packet->rr, batman_if->net_dev->dev_addr, ETH_ALEN);
|
||||
|
||||
|
||||
send_skb_packet(skb, batman_if, dstaddr);
|
||||
memcpy(icmp_packet->rr,
|
||||
neigh_node->if_incoming->net_dev->dev_addr, ETH_ALEN);
|
||||
|
||||
send_skb_packet(skb, neigh_node->if_incoming, neigh_node->addr);
|
||||
goto out;
|
||||
|
||||
unlock:
|
||||
spin_unlock_bh(&bat_priv->orig_hash_lock);
|
||||
rcu_read_unlock();
|
||||
dst_unreach:
|
||||
icmp_packet->msg_type = DESTINATION_UNREACHABLE;
|
||||
bat_socket_add_packet(socket_client, icmp_packet, packet_len);
|
||||
free_skb:
|
||||
kfree_skb(skb);
|
||||
out:
|
||||
if (neigh_node)
|
||||
neigh_node_free_ref(neigh_node);
|
||||
if (orig_node)
|
||||
orig_node_free_ref(orig_node);
|
||||
return len;
|
||||
}
|
||||
|
||||
|
@ -33,7 +33,7 @@
|
||||
#include "vis.h"
|
||||
#include "hash.h"
|
||||
|
||||
struct list_head if_list;
|
||||
struct list_head hardif_list;
|
||||
|
||||
unsigned char broadcast_addr[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
|
||||
|
||||
@ -41,7 +41,7 @@ struct workqueue_struct *bat_event_workqueue;
|
||||
|
||||
static int __init batman_init(void)
|
||||
{
|
||||
INIT_LIST_HEAD(&if_list);
|
||||
INIT_LIST_HEAD(&hardif_list);
|
||||
|
||||
/* the name should not be longer than 10 chars - see
|
||||
* http://lwn.net/Articles/23634/ */
|
||||
@ -79,7 +79,6 @@ int mesh_init(struct net_device *soft_iface)
|
||||
{
|
||||
struct bat_priv *bat_priv = netdev_priv(soft_iface);
|
||||
|
||||
spin_lock_init(&bat_priv->orig_hash_lock);
|
||||
spin_lock_init(&bat_priv->forw_bat_list_lock);
|
||||
spin_lock_init(&bat_priv->forw_bcast_list_lock);
|
||||
spin_lock_init(&bat_priv->hna_lhash_lock);
|
||||
@ -154,14 +153,14 @@ void dec_module_count(void)
|
||||
|
||||
int is_my_mac(uint8_t *addr)
|
||||
{
|
||||
struct batman_if *batman_if;
|
||||
struct hard_iface *hard_iface;
|
||||
|
||||
rcu_read_lock();
|
||||
list_for_each_entry_rcu(batman_if, &if_list, list) {
|
||||
if (batman_if->if_status != IF_ACTIVE)
|
||||
list_for_each_entry_rcu(hard_iface, &hardif_list, list) {
|
||||
if (hard_iface->if_status != IF_ACTIVE)
|
||||
continue;
|
||||
|
||||
if (compare_orig(batman_if->net_dev->dev_addr, addr)) {
|
||||
if (compare_eth(hard_iface->net_dev->dev_addr, addr)) {
|
||||
rcu_read_unlock();
|
||||
return 1;
|
||||
}
|
||||
|
@ -122,7 +122,7 @@
|
||||
#define REVISION_VERSION_STR " "REVISION_VERSION
|
||||
#endif
|
||||
|
||||
extern struct list_head if_list;
|
||||
extern struct list_head hardif_list;
|
||||
|
||||
extern unsigned char broadcast_addr[];
|
||||
extern struct workqueue_struct *bat_event_workqueue;
|
||||
@ -165,4 +165,14 @@ static inline void bat_dbg(char type __always_unused,
|
||||
pr_err("%s: " fmt, _netdev->name, ## arg); \
|
||||
} while (0)
|
||||
|
||||
/**
|
||||
* returns 1 if they are the same ethernet addr
|
||||
*
|
||||
* note: can't use compare_ether_addr() as it requires aligned memory
|
||||
*/
|
||||
static inline int compare_eth(void *data1, void *data2)
|
||||
{
|
||||
return (memcmp(data1, data2, ETH_ALEN) == 0 ? 1 : 0);
|
||||
}
|
||||
|
||||
#endif /* _NET_BATMAN_ADV_MAIN_H_ */
|
||||
|
@ -44,24 +44,36 @@ int originator_init(struct bat_priv *bat_priv)
|
||||
if (bat_priv->orig_hash)
|
||||
return 1;
|
||||
|
||||
spin_lock_bh(&bat_priv->orig_hash_lock);
|
||||
bat_priv->orig_hash = hash_new(1024);
|
||||
|
||||
if (!bat_priv->orig_hash)
|
||||
goto err;
|
||||
|
||||
spin_unlock_bh(&bat_priv->orig_hash_lock);
|
||||
start_purge_timer(bat_priv);
|
||||
return 1;
|
||||
|
||||
err:
|
||||
spin_unlock_bh(&bat_priv->orig_hash_lock);
|
||||
return 0;
|
||||
}
|
||||
|
||||
struct neigh_node *
|
||||
create_neighbor(struct orig_node *orig_node, struct orig_node *orig_neigh_node,
|
||||
uint8_t *neigh, struct batman_if *if_incoming)
|
||||
static void neigh_node_free_rcu(struct rcu_head *rcu)
|
||||
{
|
||||
struct neigh_node *neigh_node;
|
||||
|
||||
neigh_node = container_of(rcu, struct neigh_node, rcu);
|
||||
kfree(neigh_node);
|
||||
}
|
||||
|
||||
void neigh_node_free_ref(struct neigh_node *neigh_node)
|
||||
{
|
||||
if (atomic_dec_and_test(&neigh_node->refcount))
|
||||
call_rcu(&neigh_node->rcu, neigh_node_free_rcu);
|
||||
}
|
||||
|
||||
struct neigh_node *create_neighbor(struct orig_node *orig_node,
|
||||
struct orig_node *orig_neigh_node,
|
||||
uint8_t *neigh,
|
||||
struct hard_iface *if_incoming)
|
||||
{
|
||||
struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
|
||||
struct neigh_node *neigh_node;
|
||||
@ -73,50 +85,94 @@ create_neighbor(struct orig_node *orig_node, struct orig_node *orig_neigh_node,
|
||||
if (!neigh_node)
|
||||
return NULL;
|
||||
|
||||
INIT_LIST_HEAD(&neigh_node->list);
|
||||
INIT_HLIST_NODE(&neigh_node->list);
|
||||
INIT_LIST_HEAD(&neigh_node->bonding_list);
|
||||
|
||||
memcpy(neigh_node->addr, neigh, ETH_ALEN);
|
||||
neigh_node->orig_node = orig_neigh_node;
|
||||
neigh_node->if_incoming = if_incoming;
|
||||
|
||||
list_add_tail(&neigh_node->list, &orig_node->neigh_list);
|
||||
/* extra reference for return */
|
||||
atomic_set(&neigh_node->refcount, 2);
|
||||
|
||||
spin_lock_bh(&orig_node->neigh_list_lock);
|
||||
hlist_add_head_rcu(&neigh_node->list, &orig_node->neigh_list);
|
||||
spin_unlock_bh(&orig_node->neigh_list_lock);
|
||||
return neigh_node;
|
||||
}
|
||||
|
||||
static void free_orig_node(void *data, void *arg)
|
||||
static void orig_node_free_rcu(struct rcu_head *rcu)
|
||||
{
|
||||
struct list_head *list_pos, *list_pos_tmp;
|
||||
struct neigh_node *neigh_node;
|
||||
struct orig_node *orig_node = (struct orig_node *)data;
|
||||
struct bat_priv *bat_priv = (struct bat_priv *)arg;
|
||||
struct hlist_node *node, *node_tmp;
|
||||
struct neigh_node *neigh_node, *tmp_neigh_node;
|
||||
struct orig_node *orig_node;
|
||||
|
||||
/* for all neighbors towards this originator ... */
|
||||
list_for_each_safe(list_pos, list_pos_tmp, &orig_node->neigh_list) {
|
||||
neigh_node = list_entry(list_pos, struct neigh_node, list);
|
||||
orig_node = container_of(rcu, struct orig_node, rcu);
|
||||
|
||||
list_del(list_pos);
|
||||
kfree(neigh_node);
|
||||
spin_lock_bh(&orig_node->neigh_list_lock);
|
||||
|
||||
/* for all bonding members ... */
|
||||
list_for_each_entry_safe(neigh_node, tmp_neigh_node,
|
||||
&orig_node->bond_list, bonding_list) {
|
||||
list_del_rcu(&neigh_node->bonding_list);
|
||||
neigh_node_free_ref(neigh_node);
|
||||
}
|
||||
|
||||
/* for all neighbors towards this originator ... */
|
||||
hlist_for_each_entry_safe(neigh_node, node, node_tmp,
|
||||
&orig_node->neigh_list, list) {
|
||||
hlist_del_rcu(&neigh_node->list);
|
||||
neigh_node_free_ref(neigh_node);
|
||||
}
|
||||
|
||||
spin_unlock_bh(&orig_node->neigh_list_lock);
|
||||
|
||||
frag_list_free(&orig_node->frag_list);
|
||||
hna_global_del_orig(bat_priv, orig_node, "originator timed out");
|
||||
hna_global_del_orig(orig_node->bat_priv, orig_node,
|
||||
"originator timed out");
|
||||
|
||||
kfree(orig_node->bcast_own);
|
||||
kfree(orig_node->bcast_own_sum);
|
||||
kfree(orig_node);
|
||||
}
|
||||
|
||||
void orig_node_free_ref(struct orig_node *orig_node)
|
||||
{
|
||||
if (atomic_dec_and_test(&orig_node->refcount))
|
||||
call_rcu(&orig_node->rcu, orig_node_free_rcu);
|
||||
}
|
||||
|
||||
void originator_free(struct bat_priv *bat_priv)
|
||||
{
|
||||
if (!bat_priv->orig_hash)
|
||||
struct hashtable_t *hash = bat_priv->orig_hash;
|
||||
struct hlist_node *node, *node_tmp;
|
||||
struct hlist_head *head;
|
||||
spinlock_t *list_lock; /* spinlock to protect write access */
|
||||
struct orig_node *orig_node;
|
||||
int i;
|
||||
|
||||
if (!hash)
|
||||
return;
|
||||
|
||||
cancel_delayed_work_sync(&bat_priv->orig_work);
|
||||
|
||||
spin_lock_bh(&bat_priv->orig_hash_lock);
|
||||
hash_delete(bat_priv->orig_hash, free_orig_node, bat_priv);
|
||||
bat_priv->orig_hash = NULL;
|
||||
spin_unlock_bh(&bat_priv->orig_hash_lock);
|
||||
|
||||
for (i = 0; i < hash->size; i++) {
|
||||
head = &hash->table[i];
|
||||
list_lock = &hash->list_locks[i];
|
||||
|
||||
spin_lock_bh(list_lock);
|
||||
hlist_for_each_entry_safe(orig_node, node, node_tmp,
|
||||
head, hash_entry) {
|
||||
|
||||
hlist_del_rcu(node);
|
||||
orig_node_free_ref(orig_node);
|
||||
}
|
||||
spin_unlock_bh(list_lock);
|
||||
}
|
||||
|
||||
hash_destroy(hash);
|
||||
}
|
||||
|
||||
/* this function finds or creates an originator entry for the given
|
||||
@ -127,10 +183,7 @@ struct orig_node *get_orig_node(struct bat_priv *bat_priv, uint8_t *addr)
|
||||
int size;
|
||||
int hash_added;
|
||||
|
||||
orig_node = ((struct orig_node *)hash_find(bat_priv->orig_hash,
|
||||
compare_orig, choose_orig,
|
||||
addr));
|
||||
|
||||
orig_node = orig_hash_find(bat_priv, addr);
|
||||
if (orig_node)
|
||||
return orig_node;
|
||||
|
||||
@ -141,8 +194,16 @@ struct orig_node *get_orig_node(struct bat_priv *bat_priv, uint8_t *addr)
|
||||
if (!orig_node)
|
||||
return NULL;
|
||||
|
||||
INIT_LIST_HEAD(&orig_node->neigh_list);
|
||||
INIT_HLIST_HEAD(&orig_node->neigh_list);
|
||||
INIT_LIST_HEAD(&orig_node->bond_list);
|
||||
spin_lock_init(&orig_node->ogm_cnt_lock);
|
||||
spin_lock_init(&orig_node->bcast_seqno_lock);
|
||||
spin_lock_init(&orig_node->neigh_list_lock);
|
||||
|
||||
/* extra reference for return */
|
||||
atomic_set(&orig_node->refcount, 2);
|
||||
|
||||
orig_node->bat_priv = bat_priv;
|
||||
memcpy(orig_node->orig, addr, ETH_ALEN);
|
||||
orig_node->router = NULL;
|
||||
orig_node->hna_buff = NULL;
|
||||
@ -151,6 +212,8 @@ struct orig_node *get_orig_node(struct bat_priv *bat_priv, uint8_t *addr)
|
||||
orig_node->batman_seqno_reset = jiffies - 1
|
||||
- msecs_to_jiffies(RESET_PROTECTION_MS);
|
||||
|
||||
atomic_set(&orig_node->bond_candidates, 0);
|
||||
|
||||
size = bat_priv->num_ifaces * sizeof(unsigned long) * NUM_WORDS;
|
||||
|
||||
orig_node->bcast_own = kzalloc(size, GFP_ATOMIC);
|
||||
@ -166,8 +229,8 @@ struct orig_node *get_orig_node(struct bat_priv *bat_priv, uint8_t *addr)
|
||||
if (!orig_node->bcast_own_sum)
|
||||
goto free_bcast_own;
|
||||
|
||||
hash_added = hash_add(bat_priv->orig_hash, compare_orig, choose_orig,
|
||||
orig_node);
|
||||
hash_added = hash_add(bat_priv->orig_hash, compare_orig,
|
||||
choose_orig, orig_node, &orig_node->hash_entry);
|
||||
if (hash_added < 0)
|
||||
goto free_bcast_own_sum;
|
||||
|
||||
@ -185,23 +248,30 @@ static bool purge_orig_neighbors(struct bat_priv *bat_priv,
|
||||
struct orig_node *orig_node,
|
||||
struct neigh_node **best_neigh_node)
|
||||
{
|
||||
struct list_head *list_pos, *list_pos_tmp;
|
||||
struct hlist_node *node, *node_tmp;
|
||||
struct neigh_node *neigh_node;
|
||||
bool neigh_purged = false;
|
||||
|
||||
*best_neigh_node = NULL;
|
||||
|
||||
spin_lock_bh(&orig_node->neigh_list_lock);
|
||||
|
||||
/* for all neighbors towards this originator ... */
|
||||
list_for_each_safe(list_pos, list_pos_tmp, &orig_node->neigh_list) {
|
||||
neigh_node = list_entry(list_pos, struct neigh_node, list);
|
||||
hlist_for_each_entry_safe(neigh_node, node, node_tmp,
|
||||
&orig_node->neigh_list, list) {
|
||||
|
||||
if ((time_after(jiffies,
|
||||
neigh_node->last_valid + PURGE_TIMEOUT * HZ)) ||
|
||||
(neigh_node->if_incoming->if_status == IF_INACTIVE) ||
|
||||
(neigh_node->if_incoming->if_status == IF_NOT_IN_USE) ||
|
||||
(neigh_node->if_incoming->if_status == IF_TO_BE_REMOVED)) {
|
||||
|
||||
if (neigh_node->if_incoming->if_status ==
|
||||
IF_TO_BE_REMOVED)
|
||||
if ((neigh_node->if_incoming->if_status ==
|
||||
IF_INACTIVE) ||
|
||||
(neigh_node->if_incoming->if_status ==
|
||||
IF_NOT_IN_USE) ||
|
||||
(neigh_node->if_incoming->if_status ==
|
||||
IF_TO_BE_REMOVED))
|
||||
bat_dbg(DBG_BATMAN, bat_priv,
|
||||
"neighbor purge: originator %pM, "
|
||||
"neighbor: %pM, iface: %s\n",
|
||||
@ -215,14 +285,18 @@ static bool purge_orig_neighbors(struct bat_priv *bat_priv,
|
||||
(neigh_node->last_valid / HZ));
|
||||
|
||||
neigh_purged = true;
|
||||
list_del(list_pos);
|
||||
kfree(neigh_node);
|
||||
|
||||
hlist_del_rcu(&neigh_node->list);
|
||||
bonding_candidate_del(orig_node, neigh_node);
|
||||
neigh_node_free_ref(neigh_node);
|
||||
} else {
|
||||
if ((!*best_neigh_node) ||
|
||||
(neigh_node->tq_avg > (*best_neigh_node)->tq_avg))
|
||||
*best_neigh_node = neigh_node;
|
||||
}
|
||||
}
|
||||
|
||||
spin_unlock_bh(&orig_node->neigh_list_lock);
|
||||
return neigh_purged;
|
||||
}
|
||||
|
||||
@ -245,9 +319,6 @@ static bool purge_orig_node(struct bat_priv *bat_priv,
|
||||
best_neigh_node,
|
||||
orig_node->hna_buff,
|
||||
orig_node->hna_buff_len);
|
||||
/* update bonding candidates, we could have lost
|
||||
* some candidates. */
|
||||
update_bonding_candidates(orig_node);
|
||||
}
|
||||
}
|
||||
|
||||
@ -257,40 +328,38 @@ static bool purge_orig_node(struct bat_priv *bat_priv,
|
||||
static void _purge_orig(struct bat_priv *bat_priv)
|
||||
{
|
||||
struct hashtable_t *hash = bat_priv->orig_hash;
|
||||
struct hlist_node *walk, *safe;
|
||||
struct hlist_node *node, *node_tmp;
|
||||
struct hlist_head *head;
|
||||
struct element_t *bucket;
|
||||
spinlock_t *list_lock; /* spinlock to protect write access */
|
||||
struct orig_node *orig_node;
|
||||
int i;
|
||||
|
||||
if (!hash)
|
||||
return;
|
||||
|
||||
spin_lock_bh(&bat_priv->orig_hash_lock);
|
||||
|
||||
/* for all origins... */
|
||||
for (i = 0; i < hash->size; i++) {
|
||||
head = &hash->table[i];
|
||||
list_lock = &hash->list_locks[i];
|
||||
|
||||
hlist_for_each_entry_safe(bucket, walk, safe, head, hlist) {
|
||||
orig_node = bucket->data;
|
||||
|
||||
spin_lock_bh(list_lock);
|
||||
hlist_for_each_entry_safe(orig_node, node, node_tmp,
|
||||
head, hash_entry) {
|
||||
if (purge_orig_node(bat_priv, orig_node)) {
|
||||
if (orig_node->gw_flags)
|
||||
gw_node_delete(bat_priv, orig_node);
|
||||
hlist_del(walk);
|
||||
kfree(bucket);
|
||||
free_orig_node(orig_node, bat_priv);
|
||||
hlist_del_rcu(node);
|
||||
orig_node_free_ref(orig_node);
|
||||
continue;
|
||||
}
|
||||
|
||||
if (time_after(jiffies, orig_node->last_frag_packet +
|
||||
msecs_to_jiffies(FRAG_TIMEOUT)))
|
||||
frag_list_free(&orig_node->frag_list);
|
||||
}
|
||||
spin_unlock_bh(list_lock);
|
||||
}
|
||||
|
||||
spin_unlock_bh(&bat_priv->orig_hash_lock);
|
||||
|
||||
gw_node_purge(bat_priv);
|
||||
gw_election(bat_priv);
|
||||
|
||||
@ -318,9 +387,8 @@ int orig_seq_print_text(struct seq_file *seq, void *offset)
|
||||
struct net_device *net_dev = (struct net_device *)seq->private;
|
||||
struct bat_priv *bat_priv = netdev_priv(net_dev);
|
||||
struct hashtable_t *hash = bat_priv->orig_hash;
|
||||
struct hlist_node *walk;
|
||||
struct hlist_node *node, *node_tmp;
|
||||
struct hlist_head *head;
|
||||
struct element_t *bucket;
|
||||
struct orig_node *orig_node;
|
||||
struct neigh_node *neigh_node;
|
||||
int batman_count = 0;
|
||||
@ -348,14 +416,11 @@ int orig_seq_print_text(struct seq_file *seq, void *offset)
|
||||
"Originator", "last-seen", "#", TQ_MAX_VALUE, "Nexthop",
|
||||
"outgoingIF", "Potential nexthops");
|
||||
|
||||
spin_lock_bh(&bat_priv->orig_hash_lock);
|
||||
|
||||
for (i = 0; i < hash->size; i++) {
|
||||
head = &hash->table[i];
|
||||
|
||||
hlist_for_each_entry(bucket, walk, head, hlist) {
|
||||
orig_node = bucket->data;
|
||||
|
||||
rcu_read_lock();
|
||||
hlist_for_each_entry_rcu(orig_node, node, head, hash_entry) {
|
||||
if (!orig_node->router)
|
||||
continue;
|
||||
|
||||
@ -374,8 +439,8 @@ int orig_seq_print_text(struct seq_file *seq, void *offset)
|
||||
neigh_node->addr,
|
||||
neigh_node->if_incoming->net_dev->name);
|
||||
|
||||
list_for_each_entry(neigh_node, &orig_node->neigh_list,
|
||||
list) {
|
||||
hlist_for_each_entry_rcu(neigh_node, node_tmp,
|
||||
&orig_node->neigh_list, list) {
|
||||
seq_printf(seq, " %pM (%3i)", neigh_node->addr,
|
||||
neigh_node->tq_avg);
|
||||
}
|
||||
@ -383,10 +448,9 @@ int orig_seq_print_text(struct seq_file *seq, void *offset)
|
||||
seq_printf(seq, "\n");
|
||||
batman_count++;
|
||||
}
|
||||
rcu_read_unlock();
|
||||
}
|
||||
|
||||
spin_unlock_bh(&bat_priv->orig_hash_lock);
|
||||
|
||||
if ((batman_count == 0))
|
||||
seq_printf(seq, "No batman nodes in range ...\n");
|
||||
|
||||
@ -423,36 +487,36 @@ static int orig_node_add_if(struct orig_node *orig_node, int max_if_num)
|
||||
return 0;
|
||||
}
|
||||
|
||||
int orig_hash_add_if(struct batman_if *batman_if, int max_if_num)
|
||||
int orig_hash_add_if(struct hard_iface *hard_iface, int max_if_num)
|
||||
{
|
||||
struct bat_priv *bat_priv = netdev_priv(batman_if->soft_iface);
|
||||
struct bat_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
|
||||
struct hashtable_t *hash = bat_priv->orig_hash;
|
||||
struct hlist_node *walk;
|
||||
struct hlist_node *node;
|
||||
struct hlist_head *head;
|
||||
struct element_t *bucket;
|
||||
struct orig_node *orig_node;
|
||||
int i;
|
||||
int i, ret;
|
||||
|
||||
/* resize all orig nodes because orig_node->bcast_own(_sum) depend on
|
||||
* if_num */
|
||||
spin_lock_bh(&bat_priv->orig_hash_lock);
|
||||
|
||||
for (i = 0; i < hash->size; i++) {
|
||||
head = &hash->table[i];
|
||||
|
||||
hlist_for_each_entry(bucket, walk, head, hlist) {
|
||||
orig_node = bucket->data;
|
||||
rcu_read_lock();
|
||||
hlist_for_each_entry_rcu(orig_node, node, head, hash_entry) {
|
||||
spin_lock_bh(&orig_node->ogm_cnt_lock);
|
||||
ret = orig_node_add_if(orig_node, max_if_num);
|
||||
spin_unlock_bh(&orig_node->ogm_cnt_lock);
|
||||
|
||||
if (orig_node_add_if(orig_node, max_if_num) == -1)
|
||||
if (ret == -1)
|
||||
goto err;
|
||||
}
|
||||
rcu_read_unlock();
|
||||
}
|
||||
|
||||
spin_unlock_bh(&bat_priv->orig_hash_lock);
|
||||
return 0;
|
||||
|
||||
err:
|
||||
spin_unlock_bh(&bat_priv->orig_hash_lock);
|
||||
rcu_read_unlock();
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
@ -508,57 +572,55 @@ free_own_sum:
|
||||
return 0;
|
||||
}
|
||||
|
||||
int orig_hash_del_if(struct batman_if *batman_if, int max_if_num)
|
||||
int orig_hash_del_if(struct hard_iface *hard_iface, int max_if_num)
|
||||
{
|
||||
struct bat_priv *bat_priv = netdev_priv(batman_if->soft_iface);
|
||||
struct bat_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
|
||||
struct hashtable_t *hash = bat_priv->orig_hash;
|
||||
struct hlist_node *walk;
|
||||
struct hlist_node *node;
|
||||
struct hlist_head *head;
|
||||
struct element_t *bucket;
|
||||
struct batman_if *batman_if_tmp;
|
||||
struct hard_iface *hard_iface_tmp;
|
||||
struct orig_node *orig_node;
|
||||
int i, ret;
|
||||
|
||||
/* resize all orig nodes because orig_node->bcast_own(_sum) depend on
|
||||
* if_num */
|
||||
spin_lock_bh(&bat_priv->orig_hash_lock);
|
||||
|
||||
for (i = 0; i < hash->size; i++) {
|
||||
head = &hash->table[i];
|
||||
|
||||
hlist_for_each_entry(bucket, walk, head, hlist) {
|
||||
orig_node = bucket->data;
|
||||
|
||||
rcu_read_lock();
|
||||
hlist_for_each_entry_rcu(orig_node, node, head, hash_entry) {
|
||||
spin_lock_bh(&orig_node->ogm_cnt_lock);
|
||||
ret = orig_node_del_if(orig_node, max_if_num,
|
||||
batman_if->if_num);
|
||||
hard_iface->if_num);
|
||||
spin_unlock_bh(&orig_node->ogm_cnt_lock);
|
||||
|
||||
if (ret == -1)
|
||||
goto err;
|
||||
}
|
||||
rcu_read_unlock();
|
||||
}
|
||||
|
||||
/* renumber remaining batman interfaces _inside_ of orig_hash_lock */
|
||||
rcu_read_lock();
|
||||
list_for_each_entry_rcu(batman_if_tmp, &if_list, list) {
|
||||
if (batman_if_tmp->if_status == IF_NOT_IN_USE)
|
||||
list_for_each_entry_rcu(hard_iface_tmp, &hardif_list, list) {
|
||||
if (hard_iface_tmp->if_status == IF_NOT_IN_USE)
|
||||
continue;
|
||||
|
||||
if (batman_if == batman_if_tmp)
|
||||
if (hard_iface == hard_iface_tmp)
|
||||
continue;
|
||||
|
||||
if (batman_if->soft_iface != batman_if_tmp->soft_iface)
|
||||
if (hard_iface->soft_iface != hard_iface_tmp->soft_iface)
|
||||
continue;
|
||||
|
||||
if (batman_if_tmp->if_num > batman_if->if_num)
|
||||
batman_if_tmp->if_num--;
|
||||
if (hard_iface_tmp->if_num > hard_iface->if_num)
|
||||
hard_iface_tmp->if_num--;
|
||||
}
|
||||
rcu_read_unlock();
|
||||
|
||||
batman_if->if_num = -1;
|
||||
spin_unlock_bh(&bat_priv->orig_hash_lock);
|
||||
hard_iface->if_num = -1;
|
||||
return 0;
|
||||
|
||||
err:
|
||||
spin_unlock_bh(&bat_priv->orig_hash_lock);
|
||||
rcu_read_unlock();
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
@ -22,21 +22,28 @@
|
||||
#ifndef _NET_BATMAN_ADV_ORIGINATOR_H_
|
||||
#define _NET_BATMAN_ADV_ORIGINATOR_H_
|
||||
|
||||
#include "hash.h"
|
||||
|
||||
int originator_init(struct bat_priv *bat_priv);
|
||||
void originator_free(struct bat_priv *bat_priv);
|
||||
void purge_orig_ref(struct bat_priv *bat_priv);
|
||||
void orig_node_free_ref(struct orig_node *orig_node);
|
||||
struct orig_node *get_orig_node(struct bat_priv *bat_priv, uint8_t *addr);
|
||||
struct neigh_node *
|
||||
create_neighbor(struct orig_node *orig_node, struct orig_node *orig_neigh_node,
|
||||
uint8_t *neigh, struct batman_if *if_incoming);
|
||||
struct neigh_node *create_neighbor(struct orig_node *orig_node,
|
||||
struct orig_node *orig_neigh_node,
|
||||
uint8_t *neigh,
|
||||
struct hard_iface *if_incoming);
|
||||
void neigh_node_free_ref(struct neigh_node *neigh_node);
|
||||
int orig_seq_print_text(struct seq_file *seq, void *offset);
|
||||
int orig_hash_add_if(struct batman_if *batman_if, int max_if_num);
|
||||
int orig_hash_del_if(struct batman_if *batman_if, int max_if_num);
|
||||
int orig_hash_add_if(struct hard_iface *hard_iface, int max_if_num);
|
||||
int orig_hash_del_if(struct hard_iface *hard_iface, int max_if_num);
|
||||
|
||||
|
||||
/* returns 1 if they are the same originator */
|
||||
static inline int compare_orig(void *data1, void *data2)
|
||||
static inline int compare_orig(struct hlist_node *node, void *data2)
|
||||
{
|
||||
void *data1 = container_of(node, struct orig_node, hash_entry);
|
||||
|
||||
return (memcmp(data1, data2, ETH_ALEN) == 0 ? 1 : 0);
|
||||
}
|
||||
|
||||
@ -61,4 +68,35 @@ static inline int choose_orig(void *data, int32_t size)
|
||||
return hash % size;
|
||||
}
|
||||
|
||||
static inline struct orig_node *orig_hash_find(struct bat_priv *bat_priv,
|
||||
void *data)
|
||||
{
|
||||
struct hashtable_t *hash = bat_priv->orig_hash;
|
||||
struct hlist_head *head;
|
||||
struct hlist_node *node;
|
||||
struct orig_node *orig_node, *orig_node_tmp = NULL;
|
||||
int index;
|
||||
|
||||
if (!hash)
|
||||
return NULL;
|
||||
|
||||
index = choose_orig(data, hash->size);
|
||||
head = &hash->table[index];
|
||||
|
||||
rcu_read_lock();
|
||||
hlist_for_each_entry_rcu(orig_node, node, head, hash_entry) {
|
||||
if (!compare_eth(orig_node, data))
|
||||
continue;
|
||||
|
||||
if (!atomic_inc_not_zero(&orig_node->refcount))
|
||||
continue;
|
||||
|
||||
orig_node_tmp = orig_node;
|
||||
break;
|
||||
}
|
||||
rcu_read_unlock();
|
||||
|
||||
return orig_node_tmp;
|
||||
}
|
||||
|
||||
#endif /* _NET_BATMAN_ADV_ORIGINATOR_H_ */
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -22,24 +22,25 @@
|
||||
#ifndef _NET_BATMAN_ADV_ROUTING_H_
|
||||
#define _NET_BATMAN_ADV_ROUTING_H_
|
||||
|
||||
void slide_own_bcast_window(struct batman_if *batman_if);
|
||||
void slide_own_bcast_window(struct hard_iface *hard_iface);
|
||||
void receive_bat_packet(struct ethhdr *ethhdr,
|
||||
struct batman_packet *batman_packet,
|
||||
unsigned char *hna_buff, int hna_buff_len,
|
||||
struct batman_if *if_incoming);
|
||||
struct hard_iface *if_incoming);
|
||||
void update_routes(struct bat_priv *bat_priv, struct orig_node *orig_node,
|
||||
struct neigh_node *neigh_node, unsigned char *hna_buff,
|
||||
int hna_buff_len);
|
||||
int route_unicast_packet(struct sk_buff *skb, struct batman_if *recv_if,
|
||||
int hdr_size);
|
||||
int recv_icmp_packet(struct sk_buff *skb, struct batman_if *recv_if);
|
||||
int recv_unicast_packet(struct sk_buff *skb, struct batman_if *recv_if);
|
||||
int recv_ucast_frag_packet(struct sk_buff *skb, struct batman_if *recv_if);
|
||||
int recv_bcast_packet(struct sk_buff *skb, struct batman_if *recv_if);
|
||||
int recv_vis_packet(struct sk_buff *skb, struct batman_if *recv_if);
|
||||
int recv_bat_packet(struct sk_buff *skb, struct batman_if *recv_if);
|
||||
int route_unicast_packet(struct sk_buff *skb, struct hard_iface *recv_if);
|
||||
int recv_icmp_packet(struct sk_buff *skb, struct hard_iface *recv_if);
|
||||
int recv_unicast_packet(struct sk_buff *skb, struct hard_iface *recv_if);
|
||||
int recv_ucast_frag_packet(struct sk_buff *skb, struct hard_iface *recv_if);
|
||||
int recv_bcast_packet(struct sk_buff *skb, struct hard_iface *recv_if);
|
||||
int recv_vis_packet(struct sk_buff *skb, struct hard_iface *recv_if);
|
||||
int recv_bat_packet(struct sk_buff *skb, struct hard_iface *recv_if);
|
||||
struct neigh_node *find_router(struct bat_priv *bat_priv,
|
||||
struct orig_node *orig_node, struct batman_if *recv_if);
|
||||
void update_bonding_candidates(struct orig_node *orig_node);
|
||||
struct orig_node *orig_node,
|
||||
struct hard_iface *recv_if);
|
||||
void bonding_candidate_del(struct orig_node *orig_node,
|
||||
struct neigh_node *neigh_node);
|
||||
|
||||
#endif /* _NET_BATMAN_ADV_ROUTING_H_ */
|
||||
|
@ -56,20 +56,20 @@ static unsigned long forward_send_time(void)
|
||||
/* send out an already prepared packet to the given address via the
|
||||
* specified batman interface */
|
||||
int send_skb_packet(struct sk_buff *skb,
|
||||
struct batman_if *batman_if,
|
||||
struct hard_iface *hard_iface,
|
||||
uint8_t *dst_addr)
|
||||
{
|
||||
struct ethhdr *ethhdr;
|
||||
|
||||
if (batman_if->if_status != IF_ACTIVE)
|
||||
if (hard_iface->if_status != IF_ACTIVE)
|
||||
goto send_skb_err;
|
||||
|
||||
if (unlikely(!batman_if->net_dev))
|
||||
if (unlikely(!hard_iface->net_dev))
|
||||
goto send_skb_err;
|
||||
|
||||
if (!(batman_if->net_dev->flags & IFF_UP)) {
|
||||
if (!(hard_iface->net_dev->flags & IFF_UP)) {
|
||||
pr_warning("Interface %s is not up - can't send packet via "
|
||||
"that interface!\n", batman_if->net_dev->name);
|
||||
"that interface!\n", hard_iface->net_dev->name);
|
||||
goto send_skb_err;
|
||||
}
|
||||
|
||||
@ -80,7 +80,7 @@ int send_skb_packet(struct sk_buff *skb,
|
||||
skb_reset_mac_header(skb);
|
||||
|
||||
ethhdr = (struct ethhdr *) skb_mac_header(skb);
|
||||
memcpy(ethhdr->h_source, batman_if->net_dev->dev_addr, ETH_ALEN);
|
||||
memcpy(ethhdr->h_source, hard_iface->net_dev->dev_addr, ETH_ALEN);
|
||||
memcpy(ethhdr->h_dest, dst_addr, ETH_ALEN);
|
||||
ethhdr->h_proto = __constant_htons(ETH_P_BATMAN);
|
||||
|
||||
@ -88,7 +88,7 @@ int send_skb_packet(struct sk_buff *skb,
|
||||
skb->priority = TC_PRIO_CONTROL;
|
||||
skb->protocol = __constant_htons(ETH_P_BATMAN);
|
||||
|
||||
skb->dev = batman_if->net_dev;
|
||||
skb->dev = hard_iface->net_dev;
|
||||
|
||||
/* dev_queue_xmit() returns a negative result on error. However on
|
||||
* congestion and traffic shaping, it drops and returns NET_XMIT_DROP
|
||||
@ -102,16 +102,16 @@ send_skb_err:
|
||||
|
||||
/* Send a packet to a given interface */
|
||||
static void send_packet_to_if(struct forw_packet *forw_packet,
|
||||
struct batman_if *batman_if)
|
||||
struct hard_iface *hard_iface)
|
||||
{
|
||||
struct bat_priv *bat_priv = netdev_priv(batman_if->soft_iface);
|
||||
struct bat_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
|
||||
char *fwd_str;
|
||||
uint8_t packet_num;
|
||||
int16_t buff_pos;
|
||||
struct batman_packet *batman_packet;
|
||||
struct sk_buff *skb;
|
||||
|
||||
if (batman_if->if_status != IF_ACTIVE)
|
||||
if (hard_iface->if_status != IF_ACTIVE)
|
||||
return;
|
||||
|
||||
packet_num = 0;
|
||||
@ -126,7 +126,7 @@ static void send_packet_to_if(struct forw_packet *forw_packet,
|
||||
/* we might have aggregated direct link packets with an
|
||||
* ordinary base packet */
|
||||
if ((forw_packet->direct_link_flags & (1 << packet_num)) &&
|
||||
(forw_packet->if_incoming == batman_if))
|
||||
(forw_packet->if_incoming == hard_iface))
|
||||
batman_packet->flags |= DIRECTLINK;
|
||||
else
|
||||
batman_packet->flags &= ~DIRECTLINK;
|
||||
@ -142,7 +142,8 @@ static void send_packet_to_if(struct forw_packet *forw_packet,
|
||||
batman_packet->tq, batman_packet->ttl,
|
||||
(batman_packet->flags & DIRECTLINK ?
|
||||
"on" : "off"),
|
||||
batman_if->net_dev->name, batman_if->net_dev->dev_addr);
|
||||
hard_iface->net_dev->name,
|
||||
hard_iface->net_dev->dev_addr);
|
||||
|
||||
buff_pos += sizeof(struct batman_packet) +
|
||||
(batman_packet->num_hna * ETH_ALEN);
|
||||
@ -154,13 +155,13 @@ static void send_packet_to_if(struct forw_packet *forw_packet,
|
||||
/* create clone because function is called more than once */
|
||||
skb = skb_clone(forw_packet->skb, GFP_ATOMIC);
|
||||
if (skb)
|
||||
send_skb_packet(skb, batman_if, broadcast_addr);
|
||||
send_skb_packet(skb, hard_iface, broadcast_addr);
|
||||
}
|
||||
|
||||
/* send a batman packet */
|
||||
static void send_packet(struct forw_packet *forw_packet)
|
||||
{
|
||||
struct batman_if *batman_if;
|
||||
struct hard_iface *hard_iface;
|
||||
struct net_device *soft_iface;
|
||||
struct bat_priv *bat_priv;
|
||||
struct batman_packet *batman_packet =
|
||||
@ -204,17 +205,17 @@ static void send_packet(struct forw_packet *forw_packet)
|
||||
|
||||
/* broadcast on every interface */
|
||||
rcu_read_lock();
|
||||
list_for_each_entry_rcu(batman_if, &if_list, list) {
|
||||
if (batman_if->soft_iface != soft_iface)
|
||||
list_for_each_entry_rcu(hard_iface, &hardif_list, list) {
|
||||
if (hard_iface->soft_iface != soft_iface)
|
||||
continue;
|
||||
|
||||
send_packet_to_if(forw_packet, batman_if);
|
||||
send_packet_to_if(forw_packet, hard_iface);
|
||||
}
|
||||
rcu_read_unlock();
|
||||
}
|
||||
|
||||
static void rebuild_batman_packet(struct bat_priv *bat_priv,
|
||||
struct batman_if *batman_if)
|
||||
struct hard_iface *hard_iface)
|
||||
{
|
||||
int new_len;
|
||||
unsigned char *new_buff;
|
||||
@ -226,7 +227,7 @@ static void rebuild_batman_packet(struct bat_priv *bat_priv,
|
||||
|
||||
/* keep old buffer if kmalloc should fail */
|
||||
if (new_buff) {
|
||||
memcpy(new_buff, batman_if->packet_buff,
|
||||
memcpy(new_buff, hard_iface->packet_buff,
|
||||
sizeof(struct batman_packet));
|
||||
batman_packet = (struct batman_packet *)new_buff;
|
||||
|
||||
@ -234,21 +235,21 @@ static void rebuild_batman_packet(struct bat_priv *bat_priv,
|
||||
new_buff + sizeof(struct batman_packet),
|
||||
new_len - sizeof(struct batman_packet));
|
||||
|
||||
kfree(batman_if->packet_buff);
|
||||
batman_if->packet_buff = new_buff;
|
||||
batman_if->packet_len = new_len;
|
||||
kfree(hard_iface->packet_buff);
|
||||
hard_iface->packet_buff = new_buff;
|
||||
hard_iface->packet_len = new_len;
|
||||
}
|
||||
}
|
||||
|
||||
void schedule_own_packet(struct batman_if *batman_if)
|
||||
void schedule_own_packet(struct hard_iface *hard_iface)
|
||||
{
|
||||
struct bat_priv *bat_priv = netdev_priv(batman_if->soft_iface);
|
||||
struct bat_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
|
||||
unsigned long send_time;
|
||||
struct batman_packet *batman_packet;
|
||||
int vis_server;
|
||||
|
||||
if ((batman_if->if_status == IF_NOT_IN_USE) ||
|
||||
(batman_if->if_status == IF_TO_BE_REMOVED))
|
||||
if ((hard_iface->if_status == IF_NOT_IN_USE) ||
|
||||
(hard_iface->if_status == IF_TO_BE_REMOVED))
|
||||
return;
|
||||
|
||||
vis_server = atomic_read(&bat_priv->vis_mode);
|
||||
@ -260,51 +261,51 @@ void schedule_own_packet(struct batman_if *batman_if)
|
||||
* outdated packets (especially uninitialized mac addresses) in the
|
||||
* packet queue
|
||||
*/
|
||||
if (batman_if->if_status == IF_TO_BE_ACTIVATED)
|
||||
batman_if->if_status = IF_ACTIVE;
|
||||
if (hard_iface->if_status == IF_TO_BE_ACTIVATED)
|
||||
hard_iface->if_status = IF_ACTIVE;
|
||||
|
||||
/* if local hna has changed and interface is a primary interface */
|
||||
if ((atomic_read(&bat_priv->hna_local_changed)) &&
|
||||
(batman_if == bat_priv->primary_if))
|
||||
rebuild_batman_packet(bat_priv, batman_if);
|
||||
(hard_iface == bat_priv->primary_if))
|
||||
rebuild_batman_packet(bat_priv, hard_iface);
|
||||
|
||||
/**
|
||||
* NOTE: packet_buff might just have been re-allocated in
|
||||
* rebuild_batman_packet()
|
||||
*/
|
||||
batman_packet = (struct batman_packet *)batman_if->packet_buff;
|
||||
batman_packet = (struct batman_packet *)hard_iface->packet_buff;
|
||||
|
||||
/* change sequence number to network order */
|
||||
batman_packet->seqno =
|
||||
htonl((uint32_t)atomic_read(&batman_if->seqno));
|
||||
htonl((uint32_t)atomic_read(&hard_iface->seqno));
|
||||
|
||||
if (vis_server == VIS_TYPE_SERVER_SYNC)
|
||||
batman_packet->flags |= VIS_SERVER;
|
||||
else
|
||||
batman_packet->flags &= ~VIS_SERVER;
|
||||
|
||||
if ((batman_if == bat_priv->primary_if) &&
|
||||
if ((hard_iface == bat_priv->primary_if) &&
|
||||
(atomic_read(&bat_priv->gw_mode) == GW_MODE_SERVER))
|
||||
batman_packet->gw_flags =
|
||||
(uint8_t)atomic_read(&bat_priv->gw_bandwidth);
|
||||
else
|
||||
batman_packet->gw_flags = 0;
|
||||
|
||||
atomic_inc(&batman_if->seqno);
|
||||
atomic_inc(&hard_iface->seqno);
|
||||
|
||||
slide_own_bcast_window(batman_if);
|
||||
slide_own_bcast_window(hard_iface);
|
||||
send_time = own_send_time(bat_priv);
|
||||
add_bat_packet_to_list(bat_priv,
|
||||
batman_if->packet_buff,
|
||||
batman_if->packet_len,
|
||||
batman_if, 1, send_time);
|
||||
hard_iface->packet_buff,
|
||||
hard_iface->packet_len,
|
||||
hard_iface, 1, send_time);
|
||||
}
|
||||
|
||||
void schedule_forward_packet(struct orig_node *orig_node,
|
||||
struct ethhdr *ethhdr,
|
||||
struct batman_packet *batman_packet,
|
||||
uint8_t directlink, int hna_buff_len,
|
||||
struct batman_if *if_incoming)
|
||||
struct hard_iface *if_incoming)
|
||||
{
|
||||
struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
|
||||
unsigned char in_tq, in_ttl, tq_avg = 0;
|
||||
@ -326,7 +327,7 @@ void schedule_forward_packet(struct orig_node *orig_node,
|
||||
if ((orig_node->router) && (orig_node->router->tq_avg != 0)) {
|
||||
|
||||
/* rebroadcast ogm of best ranking neighbor as is */
|
||||
if (!compare_orig(orig_node->router->addr, ethhdr->h_source)) {
|
||||
if (!compare_eth(orig_node->router->addr, ethhdr->h_source)) {
|
||||
batman_packet->tq = orig_node->router->tq_avg;
|
||||
|
||||
if (orig_node->router->last_ttl)
|
||||
@ -443,7 +444,7 @@ out:
|
||||
|
||||
static void send_outstanding_bcast_packet(struct work_struct *work)
|
||||
{
|
||||
struct batman_if *batman_if;
|
||||
struct hard_iface *hard_iface;
|
||||
struct delayed_work *delayed_work =
|
||||
container_of(work, struct delayed_work, work);
|
||||
struct forw_packet *forw_packet =
|
||||
@ -461,14 +462,14 @@ static void send_outstanding_bcast_packet(struct work_struct *work)
|
||||
|
||||
/* rebroadcast packet */
|
||||
rcu_read_lock();
|
||||
list_for_each_entry_rcu(batman_if, &if_list, list) {
|
||||
if (batman_if->soft_iface != soft_iface)
|
||||
list_for_each_entry_rcu(hard_iface, &hardif_list, list) {
|
||||
if (hard_iface->soft_iface != soft_iface)
|
||||
continue;
|
||||
|
||||
/* send a copy of the saved skb */
|
||||
skb1 = skb_clone(forw_packet->skb, GFP_ATOMIC);
|
||||
if (skb1)
|
||||
send_skb_packet(skb1, batman_if, broadcast_addr);
|
||||
send_skb_packet(skb1, hard_iface, broadcast_addr);
|
||||
}
|
||||
rcu_read_unlock();
|
||||
|
||||
@ -521,15 +522,15 @@ out:
|
||||
}
|
||||
|
||||
void purge_outstanding_packets(struct bat_priv *bat_priv,
|
||||
struct batman_if *batman_if)
|
||||
struct hard_iface *hard_iface)
|
||||
{
|
||||
struct forw_packet *forw_packet;
|
||||
struct hlist_node *tmp_node, *safe_tmp_node;
|
||||
|
||||
if (batman_if)
|
||||
if (hard_iface)
|
||||
bat_dbg(DBG_BATMAN, bat_priv,
|
||||
"purge_outstanding_packets(): %s\n",
|
||||
batman_if->net_dev->name);
|
||||
hard_iface->net_dev->name);
|
||||
else
|
||||
bat_dbg(DBG_BATMAN, bat_priv,
|
||||
"purge_outstanding_packets()\n");
|
||||
@ -543,8 +544,8 @@ void purge_outstanding_packets(struct bat_priv *bat_priv,
|
||||
* if purge_outstanding_packets() was called with an argmument
|
||||
* we delete only packets belonging to the given interface
|
||||
*/
|
||||
if ((batman_if) &&
|
||||
(forw_packet->if_incoming != batman_if))
|
||||
if ((hard_iface) &&
|
||||
(forw_packet->if_incoming != hard_iface))
|
||||
continue;
|
||||
|
||||
spin_unlock_bh(&bat_priv->forw_bcast_list_lock);
|
||||
@ -567,8 +568,8 @@ void purge_outstanding_packets(struct bat_priv *bat_priv,
|
||||
* if purge_outstanding_packets() was called with an argmument
|
||||
* we delete only packets belonging to the given interface
|
||||
*/
|
||||
if ((batman_if) &&
|
||||
(forw_packet->if_incoming != batman_if))
|
||||
if ((hard_iface) &&
|
||||
(forw_packet->if_incoming != hard_iface))
|
||||
continue;
|
||||
|
||||
spin_unlock_bh(&bat_priv->forw_bat_list_lock);
|
||||
|
@ -23,17 +23,17 @@
|
||||
#define _NET_BATMAN_ADV_SEND_H_
|
||||
|
||||
int send_skb_packet(struct sk_buff *skb,
|
||||
struct batman_if *batman_if,
|
||||
struct hard_iface *hard_iface,
|
||||
uint8_t *dst_addr);
|
||||
void schedule_own_packet(struct batman_if *batman_if);
|
||||
void schedule_own_packet(struct hard_iface *hard_iface);
|
||||
void schedule_forward_packet(struct orig_node *orig_node,
|
||||
struct ethhdr *ethhdr,
|
||||
struct batman_packet *batman_packet,
|
||||
uint8_t directlink, int hna_buff_len,
|
||||
struct batman_if *if_outgoing);
|
||||
struct hard_iface *if_outgoing);
|
||||
int add_bcast_packet_to_list(struct bat_priv *bat_priv, struct sk_buff *skb);
|
||||
void send_outstanding_bat_packet(struct work_struct *work);
|
||||
void purge_outstanding_packets(struct bat_priv *bat_priv,
|
||||
struct batman_if *batman_if);
|
||||
struct hard_iface *hard_iface);
|
||||
|
||||
#endif /* _NET_BATMAN_ADV_SEND_H_ */
|
||||
|
@ -29,14 +29,12 @@
|
||||
#include "hash.h"
|
||||
#include "gateway_common.h"
|
||||
#include "gateway_client.h"
|
||||
#include "send.h"
|
||||
#include "bat_sysfs.h"
|
||||
#include <linux/slab.h>
|
||||
#include <linux/ethtool.h>
|
||||
#include <linux/etherdevice.h>
|
||||
#include <linux/if_vlan.h>
|
||||
#include "unicast.h"
|
||||
#include "routing.h"
|
||||
|
||||
|
||||
static int bat_get_settings(struct net_device *dev, struct ethtool_cmd *cmd);
|
||||
@ -78,20 +76,18 @@ int my_skb_head_push(struct sk_buff *skb, unsigned int len)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void softif_neigh_free_ref(struct kref *refcount)
|
||||
{
|
||||
struct softif_neigh *softif_neigh;
|
||||
|
||||
softif_neigh = container_of(refcount, struct softif_neigh, refcount);
|
||||
kfree(softif_neigh);
|
||||
}
|
||||
|
||||
static void softif_neigh_free_rcu(struct rcu_head *rcu)
|
||||
{
|
||||
struct softif_neigh *softif_neigh;
|
||||
|
||||
softif_neigh = container_of(rcu, struct softif_neigh, rcu);
|
||||
kref_put(&softif_neigh->refcount, softif_neigh_free_ref);
|
||||
kfree(softif_neigh);
|
||||
}
|
||||
|
||||
static void softif_neigh_free_ref(struct softif_neigh *softif_neigh)
|
||||
{
|
||||
if (atomic_dec_and_test(&softif_neigh->refcount))
|
||||
call_rcu(&softif_neigh->rcu, softif_neigh_free_rcu);
|
||||
}
|
||||
|
||||
void softif_neigh_purge(struct bat_priv *bat_priv)
|
||||
@ -118,11 +114,10 @@ void softif_neigh_purge(struct bat_priv *bat_priv)
|
||||
softif_neigh->addr, softif_neigh->vid);
|
||||
softif_neigh_tmp = bat_priv->softif_neigh;
|
||||
bat_priv->softif_neigh = NULL;
|
||||
kref_put(&softif_neigh_tmp->refcount,
|
||||
softif_neigh_free_ref);
|
||||
softif_neigh_free_ref(softif_neigh_tmp);
|
||||
}
|
||||
|
||||
call_rcu(&softif_neigh->rcu, softif_neigh_free_rcu);
|
||||
softif_neigh_free_ref(softif_neigh);
|
||||
}
|
||||
|
||||
spin_unlock_bh(&bat_priv->softif_neigh_lock);
|
||||
@ -137,14 +132,17 @@ static struct softif_neigh *softif_neigh_get(struct bat_priv *bat_priv,
|
||||
rcu_read_lock();
|
||||
hlist_for_each_entry_rcu(softif_neigh, node,
|
||||
&bat_priv->softif_neigh_list, list) {
|
||||
if (memcmp(softif_neigh->addr, addr, ETH_ALEN) != 0)
|
||||
if (!compare_eth(softif_neigh->addr, addr))
|
||||
continue;
|
||||
|
||||
if (softif_neigh->vid != vid)
|
||||
continue;
|
||||
|
||||
if (!atomic_inc_not_zero(&softif_neigh->refcount))
|
||||
continue;
|
||||
|
||||
softif_neigh->last_seen = jiffies;
|
||||
goto found;
|
||||
goto out;
|
||||
}
|
||||
|
||||
softif_neigh = kzalloc(sizeof(struct softif_neigh), GFP_ATOMIC);
|
||||
@ -154,15 +152,14 @@ static struct softif_neigh *softif_neigh_get(struct bat_priv *bat_priv,
|
||||
memcpy(softif_neigh->addr, addr, ETH_ALEN);
|
||||
softif_neigh->vid = vid;
|
||||
softif_neigh->last_seen = jiffies;
|
||||
kref_init(&softif_neigh->refcount);
|
||||
/* initialize with 2 - caller decrements counter by one */
|
||||
atomic_set(&softif_neigh->refcount, 2);
|
||||
|
||||
INIT_HLIST_NODE(&softif_neigh->list);
|
||||
spin_lock_bh(&bat_priv->softif_neigh_lock);
|
||||
hlist_add_head_rcu(&softif_neigh->list, &bat_priv->softif_neigh_list);
|
||||
spin_unlock_bh(&bat_priv->softif_neigh_lock);
|
||||
|
||||
found:
|
||||
kref_get(&softif_neigh->refcount);
|
||||
out:
|
||||
rcu_read_unlock();
|
||||
return softif_neigh;
|
||||
@ -174,8 +171,6 @@ int softif_neigh_seq_print_text(struct seq_file *seq, void *offset)
|
||||
struct bat_priv *bat_priv = netdev_priv(net_dev);
|
||||
struct softif_neigh *softif_neigh;
|
||||
struct hlist_node *node;
|
||||
size_t buf_size, pos;
|
||||
char *buff;
|
||||
|
||||
if (!bat_priv->primary_if) {
|
||||
return seq_printf(seq, "BATMAN mesh %s disabled - "
|
||||
@ -185,33 +180,15 @@ int softif_neigh_seq_print_text(struct seq_file *seq, void *offset)
|
||||
|
||||
seq_printf(seq, "Softif neighbor list (%s)\n", net_dev->name);
|
||||
|
||||
buf_size = 1;
|
||||
/* Estimate length for: " xx:xx:xx:xx:xx:xx\n" */
|
||||
rcu_read_lock();
|
||||
hlist_for_each_entry_rcu(softif_neigh, node,
|
||||
&bat_priv->softif_neigh_list, list)
|
||||
buf_size += 30;
|
||||
rcu_read_unlock();
|
||||
|
||||
buff = kmalloc(buf_size, GFP_ATOMIC);
|
||||
if (!buff)
|
||||
return -ENOMEM;
|
||||
|
||||
buff[0] = '\0';
|
||||
pos = 0;
|
||||
|
||||
rcu_read_lock();
|
||||
hlist_for_each_entry_rcu(softif_neigh, node,
|
||||
&bat_priv->softif_neigh_list, list) {
|
||||
pos += snprintf(buff + pos, 31, "%s %pM (vid: %d)\n",
|
||||
seq_printf(seq, "%s %pM (vid: %d)\n",
|
||||
bat_priv->softif_neigh == softif_neigh
|
||||
? "=>" : " ", softif_neigh->addr,
|
||||
softif_neigh->vid);
|
||||
}
|
||||
rcu_read_unlock();
|
||||
|
||||
seq_printf(seq, "%s", buff);
|
||||
kfree(buff);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -266,7 +243,7 @@ static void softif_batman_recv(struct sk_buff *skb, struct net_device *dev,
|
||||
softif_neigh->addr, softif_neigh->vid);
|
||||
softif_neigh_tmp = bat_priv->softif_neigh;
|
||||
bat_priv->softif_neigh = softif_neigh;
|
||||
kref_put(&softif_neigh_tmp->refcount, softif_neigh_free_ref);
|
||||
softif_neigh_free_ref(softif_neigh_tmp);
|
||||
/* we need to hold the additional reference */
|
||||
goto err;
|
||||
}
|
||||
@ -284,7 +261,7 @@ static void softif_batman_recv(struct sk_buff *skb, struct net_device *dev,
|
||||
}
|
||||
|
||||
out:
|
||||
kref_put(&softif_neigh->refcount, softif_neigh_free_ref);
|
||||
softif_neigh_free_ref(softif_neigh);
|
||||
err:
|
||||
kfree_skb(skb);
|
||||
return;
|
||||
@ -437,7 +414,7 @@ end:
|
||||
}
|
||||
|
||||
void interface_rx(struct net_device *soft_iface,
|
||||
struct sk_buff *skb, struct batman_if *recv_if,
|
||||
struct sk_buff *skb, struct hard_iface *recv_if,
|
||||
int hdr_size)
|
||||
{
|
||||
struct bat_priv *bat_priv = netdev_priv(soft_iface);
|
||||
@ -485,7 +462,7 @@ void interface_rx(struct net_device *soft_iface,
|
||||
|
||||
memcpy(unicast_packet->dest,
|
||||
bat_priv->softif_neigh->addr, ETH_ALEN);
|
||||
ret = route_unicast_packet(skb, recv_if, hdr_size);
|
||||
ret = route_unicast_packet(skb, recv_if);
|
||||
if (ret == NET_RX_DROP)
|
||||
goto dropped;
|
||||
|
||||
@ -645,6 +622,19 @@ void softif_destroy(struct net_device *soft_iface)
|
||||
unregister_netdevice(soft_iface);
|
||||
}
|
||||
|
||||
int softif_is_valid(struct net_device *net_dev)
|
||||
{
|
||||
#ifdef HAVE_NET_DEVICE_OPS
|
||||
if (net_dev->netdev_ops->ndo_start_xmit == interface_tx)
|
||||
return 1;
|
||||
#else
|
||||
if (net_dev->hard_start_xmit == interface_tx)
|
||||
return 1;
|
||||
#endif
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* ethtool */
|
||||
static int bat_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
|
||||
{
|
||||
|
@ -27,9 +27,10 @@ int softif_neigh_seq_print_text(struct seq_file *seq, void *offset);
|
||||
void softif_neigh_purge(struct bat_priv *bat_priv);
|
||||
int interface_tx(struct sk_buff *skb, struct net_device *soft_iface);
|
||||
void interface_rx(struct net_device *soft_iface,
|
||||
struct sk_buff *skb, struct batman_if *recv_if,
|
||||
struct sk_buff *skb, struct hard_iface *recv_if,
|
||||
int hdr_size);
|
||||
struct net_device *softif_create(char *name);
|
||||
void softif_destroy(struct net_device *soft_iface);
|
||||
int softif_is_valid(struct net_device *net_dev);
|
||||
|
||||
#endif /* _NET_BATMAN_ADV_SOFT_INTERFACE_H_ */
|
||||
|
@ -30,12 +30,85 @@ static void _hna_global_del_orig(struct bat_priv *bat_priv,
|
||||
struct hna_global_entry *hna_global_entry,
|
||||
char *message);
|
||||
|
||||
/* returns 1 if they are the same mac addr */
|
||||
static int compare_lhna(struct hlist_node *node, void *data2)
|
||||
{
|
||||
void *data1 = container_of(node, struct hna_local_entry, hash_entry);
|
||||
|
||||
return (memcmp(data1, data2, ETH_ALEN) == 0 ? 1 : 0);
|
||||
}
|
||||
|
||||
/* returns 1 if they are the same mac addr */
|
||||
static int compare_ghna(struct hlist_node *node, void *data2)
|
||||
{
|
||||
void *data1 = container_of(node, struct hna_global_entry, hash_entry);
|
||||
|
||||
return (memcmp(data1, data2, ETH_ALEN) == 0 ? 1 : 0);
|
||||
}
|
||||
|
||||
static void hna_local_start_timer(struct bat_priv *bat_priv)
|
||||
{
|
||||
INIT_DELAYED_WORK(&bat_priv->hna_work, hna_local_purge);
|
||||
queue_delayed_work(bat_event_workqueue, &bat_priv->hna_work, 10 * HZ);
|
||||
}
|
||||
|
||||
static struct hna_local_entry *hna_local_hash_find(struct bat_priv *bat_priv,
|
||||
void *data)
|
||||
{
|
||||
struct hashtable_t *hash = bat_priv->hna_local_hash;
|
||||
struct hlist_head *head;
|
||||
struct hlist_node *node;
|
||||
struct hna_local_entry *hna_local_entry, *hna_local_entry_tmp = NULL;
|
||||
int index;
|
||||
|
||||
if (!hash)
|
||||
return NULL;
|
||||
|
||||
index = choose_orig(data, hash->size);
|
||||
head = &hash->table[index];
|
||||
|
||||
rcu_read_lock();
|
||||
hlist_for_each_entry_rcu(hna_local_entry, node, head, hash_entry) {
|
||||
if (!compare_eth(hna_local_entry, data))
|
||||
continue;
|
||||
|
||||
hna_local_entry_tmp = hna_local_entry;
|
||||
break;
|
||||
}
|
||||
rcu_read_unlock();
|
||||
|
||||
return hna_local_entry_tmp;
|
||||
}
|
||||
|
||||
static struct hna_global_entry *hna_global_hash_find(struct bat_priv *bat_priv,
|
||||
void *data)
|
||||
{
|
||||
struct hashtable_t *hash = bat_priv->hna_global_hash;
|
||||
struct hlist_head *head;
|
||||
struct hlist_node *node;
|
||||
struct hna_global_entry *hna_global_entry;
|
||||
struct hna_global_entry *hna_global_entry_tmp = NULL;
|
||||
int index;
|
||||
|
||||
if (!hash)
|
||||
return NULL;
|
||||
|
||||
index = choose_orig(data, hash->size);
|
||||
head = &hash->table[index];
|
||||
|
||||
rcu_read_lock();
|
||||
hlist_for_each_entry_rcu(hna_global_entry, node, head, hash_entry) {
|
||||
if (!compare_eth(hna_global_entry, data))
|
||||
continue;
|
||||
|
||||
hna_global_entry_tmp = hna_global_entry;
|
||||
break;
|
||||
}
|
||||
rcu_read_unlock();
|
||||
|
||||
return hna_global_entry_tmp;
|
||||
}
|
||||
|
||||
int hna_local_init(struct bat_priv *bat_priv)
|
||||
{
|
||||
if (bat_priv->hna_local_hash)
|
||||
@ -60,10 +133,7 @@ void hna_local_add(struct net_device *soft_iface, uint8_t *addr)
|
||||
int required_bytes;
|
||||
|
||||
spin_lock_bh(&bat_priv->hna_lhash_lock);
|
||||
hna_local_entry =
|
||||
((struct hna_local_entry *)hash_find(bat_priv->hna_local_hash,
|
||||
compare_orig, choose_orig,
|
||||
addr));
|
||||
hna_local_entry = hna_local_hash_find(bat_priv, addr);
|
||||
spin_unlock_bh(&bat_priv->hna_lhash_lock);
|
||||
|
||||
if (hna_local_entry) {
|
||||
@ -99,15 +169,15 @@ void hna_local_add(struct net_device *soft_iface, uint8_t *addr)
|
||||
hna_local_entry->last_seen = jiffies;
|
||||
|
||||
/* the batman interface mac address should never be purged */
|
||||
if (compare_orig(addr, soft_iface->dev_addr))
|
||||
if (compare_eth(addr, soft_iface->dev_addr))
|
||||
hna_local_entry->never_purge = 1;
|
||||
else
|
||||
hna_local_entry->never_purge = 0;
|
||||
|
||||
spin_lock_bh(&bat_priv->hna_lhash_lock);
|
||||
|
||||
hash_add(bat_priv->hna_local_hash, compare_orig, choose_orig,
|
||||
hna_local_entry);
|
||||
hash_add(bat_priv->hna_local_hash, compare_lhna, choose_orig,
|
||||
hna_local_entry, &hna_local_entry->hash_entry);
|
||||
bat_priv->num_local_hna++;
|
||||
atomic_set(&bat_priv->hna_local_changed, 1);
|
||||
|
||||
@ -116,9 +186,7 @@ void hna_local_add(struct net_device *soft_iface, uint8_t *addr)
|
||||
/* remove address from global hash if present */
|
||||
spin_lock_bh(&bat_priv->hna_ghash_lock);
|
||||
|
||||
hna_global_entry = ((struct hna_global_entry *)
|
||||
hash_find(bat_priv->hna_global_hash,
|
||||
compare_orig, choose_orig, addr));
|
||||
hna_global_entry = hna_global_hash_find(bat_priv, addr);
|
||||
|
||||
if (hna_global_entry)
|
||||
_hna_global_del_orig(bat_priv, hna_global_entry,
|
||||
@ -132,28 +200,27 @@ int hna_local_fill_buffer(struct bat_priv *bat_priv,
|
||||
{
|
||||
struct hashtable_t *hash = bat_priv->hna_local_hash;
|
||||
struct hna_local_entry *hna_local_entry;
|
||||
struct element_t *bucket;
|
||||
int i;
|
||||
struct hlist_node *walk;
|
||||
struct hlist_node *node;
|
||||
struct hlist_head *head;
|
||||
int count = 0;
|
||||
int i, count = 0;
|
||||
|
||||
spin_lock_bh(&bat_priv->hna_lhash_lock);
|
||||
|
||||
for (i = 0; i < hash->size; i++) {
|
||||
head = &hash->table[i];
|
||||
|
||||
hlist_for_each_entry(bucket, walk, head, hlist) {
|
||||
|
||||
rcu_read_lock();
|
||||
hlist_for_each_entry_rcu(hna_local_entry, node,
|
||||
head, hash_entry) {
|
||||
if (buff_len < (count + 1) * ETH_ALEN)
|
||||
break;
|
||||
|
||||
hna_local_entry = bucket->data;
|
||||
memcpy(buff + (count * ETH_ALEN), hna_local_entry->addr,
|
||||
ETH_ALEN);
|
||||
|
||||
count++;
|
||||
}
|
||||
rcu_read_unlock();
|
||||
}
|
||||
|
||||
/* if we did not get all new local hnas see you next time ;-) */
|
||||
@ -170,12 +237,11 @@ int hna_local_seq_print_text(struct seq_file *seq, void *offset)
|
||||
struct bat_priv *bat_priv = netdev_priv(net_dev);
|
||||
struct hashtable_t *hash = bat_priv->hna_local_hash;
|
||||
struct hna_local_entry *hna_local_entry;
|
||||
int i;
|
||||
struct hlist_node *walk;
|
||||
struct hlist_node *node;
|
||||
struct hlist_head *head;
|
||||
struct element_t *bucket;
|
||||
size_t buf_size, pos;
|
||||
char *buff;
|
||||
int i;
|
||||
|
||||
if (!bat_priv->primary_if) {
|
||||
return seq_printf(seq, "BATMAN mesh %s disabled - "
|
||||
@ -194,8 +260,10 @@ int hna_local_seq_print_text(struct seq_file *seq, void *offset)
|
||||
for (i = 0; i < hash->size; i++) {
|
||||
head = &hash->table[i];
|
||||
|
||||
hlist_for_each(walk, head)
|
||||
rcu_read_lock();
|
||||
__hlist_for_each_rcu(node, head)
|
||||
buf_size += 21;
|
||||
rcu_read_unlock();
|
||||
}
|
||||
|
||||
buff = kmalloc(buf_size, GFP_ATOMIC);
|
||||
@ -203,18 +271,20 @@ int hna_local_seq_print_text(struct seq_file *seq, void *offset)
|
||||
spin_unlock_bh(&bat_priv->hna_lhash_lock);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
buff[0] = '\0';
|
||||
pos = 0;
|
||||
|
||||
for (i = 0; i < hash->size; i++) {
|
||||
head = &hash->table[i];
|
||||
|
||||
hlist_for_each_entry(bucket, walk, head, hlist) {
|
||||
hna_local_entry = bucket->data;
|
||||
|
||||
rcu_read_lock();
|
||||
hlist_for_each_entry_rcu(hna_local_entry, node,
|
||||
head, hash_entry) {
|
||||
pos += snprintf(buff + pos, 22, " * %pM\n",
|
||||
hna_local_entry->addr);
|
||||
}
|
||||
rcu_read_unlock();
|
||||
}
|
||||
|
||||
spin_unlock_bh(&bat_priv->hna_lhash_lock);
|
||||
@ -224,9 +294,10 @@ int hna_local_seq_print_text(struct seq_file *seq, void *offset)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void _hna_local_del(void *data, void *arg)
|
||||
static void _hna_local_del(struct hlist_node *node, void *arg)
|
||||
{
|
||||
struct bat_priv *bat_priv = (struct bat_priv *)arg;
|
||||
void *data = container_of(node, struct hna_local_entry, hash_entry);
|
||||
|
||||
kfree(data);
|
||||
bat_priv->num_local_hna--;
|
||||
@ -240,9 +311,9 @@ static void hna_local_del(struct bat_priv *bat_priv,
|
||||
bat_dbg(DBG_ROUTES, bat_priv, "Deleting local hna entry (%pM): %s\n",
|
||||
hna_local_entry->addr, message);
|
||||
|
||||
hash_remove(bat_priv->hna_local_hash, compare_orig, choose_orig,
|
||||
hash_remove(bat_priv->hna_local_hash, compare_lhna, choose_orig,
|
||||
hna_local_entry->addr);
|
||||
_hna_local_del(hna_local_entry, bat_priv);
|
||||
_hna_local_del(&hna_local_entry->hash_entry, bat_priv);
|
||||
}
|
||||
|
||||
void hna_local_remove(struct bat_priv *bat_priv,
|
||||
@ -252,9 +323,7 @@ void hna_local_remove(struct bat_priv *bat_priv,
|
||||
|
||||
spin_lock_bh(&bat_priv->hna_lhash_lock);
|
||||
|
||||
hna_local_entry = (struct hna_local_entry *)
|
||||
hash_find(bat_priv->hna_local_hash, compare_orig, choose_orig,
|
||||
addr);
|
||||
hna_local_entry = hna_local_hash_find(bat_priv, addr);
|
||||
|
||||
if (hna_local_entry)
|
||||
hna_local_del(bat_priv, hna_local_entry, message);
|
||||
@ -270,27 +339,29 @@ static void hna_local_purge(struct work_struct *work)
|
||||
container_of(delayed_work, struct bat_priv, hna_work);
|
||||
struct hashtable_t *hash = bat_priv->hna_local_hash;
|
||||
struct hna_local_entry *hna_local_entry;
|
||||
int i;
|
||||
struct hlist_node *walk, *safe;
|
||||
struct hlist_node *node, *node_tmp;
|
||||
struct hlist_head *head;
|
||||
struct element_t *bucket;
|
||||
unsigned long timeout;
|
||||
int i;
|
||||
|
||||
spin_lock_bh(&bat_priv->hna_lhash_lock);
|
||||
|
||||
for (i = 0; i < hash->size; i++) {
|
||||
head = &hash->table[i];
|
||||
|
||||
hlist_for_each_entry_safe(bucket, walk, safe, head, hlist) {
|
||||
hna_local_entry = bucket->data;
|
||||
hlist_for_each_entry_safe(hna_local_entry, node, node_tmp,
|
||||
head, hash_entry) {
|
||||
if (hna_local_entry->never_purge)
|
||||
continue;
|
||||
|
||||
timeout = hna_local_entry->last_seen;
|
||||
timeout += LOCAL_HNA_TIMEOUT * HZ;
|
||||
|
||||
if ((!hna_local_entry->never_purge) &&
|
||||
time_after(jiffies, timeout))
|
||||
hna_local_del(bat_priv, hna_local_entry,
|
||||
"address timed out");
|
||||
if (time_before(jiffies, timeout))
|
||||
continue;
|
||||
|
||||
hna_local_del(bat_priv, hna_local_entry,
|
||||
"address timed out");
|
||||
}
|
||||
}
|
||||
|
||||
@ -334,9 +405,7 @@ void hna_global_add_orig(struct bat_priv *bat_priv,
|
||||
spin_lock_bh(&bat_priv->hna_ghash_lock);
|
||||
|
||||
hna_ptr = hna_buff + (hna_buff_count * ETH_ALEN);
|
||||
hna_global_entry = (struct hna_global_entry *)
|
||||
hash_find(bat_priv->hna_global_hash, compare_orig,
|
||||
choose_orig, hna_ptr);
|
||||
hna_global_entry = hna_global_hash_find(bat_priv, hna_ptr);
|
||||
|
||||
if (!hna_global_entry) {
|
||||
spin_unlock_bh(&bat_priv->hna_ghash_lock);
|
||||
@ -356,8 +425,9 @@ void hna_global_add_orig(struct bat_priv *bat_priv,
|
||||
hna_global_entry->addr, orig_node->orig);
|
||||
|
||||
spin_lock_bh(&bat_priv->hna_ghash_lock);
|
||||
hash_add(bat_priv->hna_global_hash, compare_orig,
|
||||
choose_orig, hna_global_entry);
|
||||
hash_add(bat_priv->hna_global_hash, compare_ghna,
|
||||
choose_orig, hna_global_entry,
|
||||
&hna_global_entry->hash_entry);
|
||||
|
||||
}
|
||||
|
||||
@ -368,9 +438,7 @@ void hna_global_add_orig(struct bat_priv *bat_priv,
|
||||
spin_lock_bh(&bat_priv->hna_lhash_lock);
|
||||
|
||||
hna_ptr = hna_buff + (hna_buff_count * ETH_ALEN);
|
||||
hna_local_entry = (struct hna_local_entry *)
|
||||
hash_find(bat_priv->hna_local_hash, compare_orig,
|
||||
choose_orig, hna_ptr);
|
||||
hna_local_entry = hna_local_hash_find(bat_priv, hna_ptr);
|
||||
|
||||
if (hna_local_entry)
|
||||
hna_local_del(bat_priv, hna_local_entry,
|
||||
@ -400,12 +468,11 @@ int hna_global_seq_print_text(struct seq_file *seq, void *offset)
|
||||
struct bat_priv *bat_priv = netdev_priv(net_dev);
|
||||
struct hashtable_t *hash = bat_priv->hna_global_hash;
|
||||
struct hna_global_entry *hna_global_entry;
|
||||
int i;
|
||||
struct hlist_node *walk;
|
||||
struct hlist_node *node;
|
||||
struct hlist_head *head;
|
||||
struct element_t *bucket;
|
||||
size_t buf_size, pos;
|
||||
char *buff;
|
||||
int i;
|
||||
|
||||
if (!bat_priv->primary_if) {
|
||||
return seq_printf(seq, "BATMAN mesh %s disabled - "
|
||||
@ -423,8 +490,10 @@ int hna_global_seq_print_text(struct seq_file *seq, void *offset)
|
||||
for (i = 0; i < hash->size; i++) {
|
||||
head = &hash->table[i];
|
||||
|
||||
hlist_for_each(walk, head)
|
||||
rcu_read_lock();
|
||||
__hlist_for_each_rcu(node, head)
|
||||
buf_size += 43;
|
||||
rcu_read_unlock();
|
||||
}
|
||||
|
||||
buff = kmalloc(buf_size, GFP_ATOMIC);
|
||||
@ -438,14 +507,15 @@ int hna_global_seq_print_text(struct seq_file *seq, void *offset)
|
||||
for (i = 0; i < hash->size; i++) {
|
||||
head = &hash->table[i];
|
||||
|
||||
hlist_for_each_entry(bucket, walk, head, hlist) {
|
||||
hna_global_entry = bucket->data;
|
||||
|
||||
rcu_read_lock();
|
||||
hlist_for_each_entry_rcu(hna_global_entry, node,
|
||||
head, hash_entry) {
|
||||
pos += snprintf(buff + pos, 44,
|
||||
" * %pM via %pM\n",
|
||||
hna_global_entry->addr,
|
||||
hna_global_entry->orig_node->orig);
|
||||
}
|
||||
rcu_read_unlock();
|
||||
}
|
||||
|
||||
spin_unlock_bh(&bat_priv->hna_ghash_lock);
|
||||
@ -464,7 +534,7 @@ static void _hna_global_del_orig(struct bat_priv *bat_priv,
|
||||
hna_global_entry->addr, hna_global_entry->orig_node->orig,
|
||||
message);
|
||||
|
||||
hash_remove(bat_priv->hna_global_hash, compare_orig, choose_orig,
|
||||
hash_remove(bat_priv->hna_global_hash, compare_ghna, choose_orig,
|
||||
hna_global_entry->addr);
|
||||
kfree(hna_global_entry);
|
||||
}
|
||||
@ -483,9 +553,7 @@ void hna_global_del_orig(struct bat_priv *bat_priv,
|
||||
|
||||
while ((hna_buff_count + 1) * ETH_ALEN <= orig_node->hna_buff_len) {
|
||||
hna_ptr = orig_node->hna_buff + (hna_buff_count * ETH_ALEN);
|
||||
hna_global_entry = (struct hna_global_entry *)
|
||||
hash_find(bat_priv->hna_global_hash, compare_orig,
|
||||
choose_orig, hna_ptr);
|
||||
hna_global_entry = hna_global_hash_find(bat_priv, hna_ptr);
|
||||
|
||||
if ((hna_global_entry) &&
|
||||
(hna_global_entry->orig_node == orig_node))
|
||||
@ -502,8 +570,10 @@ void hna_global_del_orig(struct bat_priv *bat_priv,
|
||||
orig_node->hna_buff = NULL;
|
||||
}
|
||||
|
||||
static void hna_global_del(void *data, void *arg)
|
||||
static void hna_global_del(struct hlist_node *node, void *arg)
|
||||
{
|
||||
void *data = container_of(node, struct hna_global_entry, hash_entry);
|
||||
|
||||
kfree(data);
|
||||
}
|
||||
|
||||
@ -519,15 +589,20 @@ void hna_global_free(struct bat_priv *bat_priv)
|
||||
struct orig_node *transtable_search(struct bat_priv *bat_priv, uint8_t *addr)
|
||||
{
|
||||
struct hna_global_entry *hna_global_entry;
|
||||
struct orig_node *orig_node = NULL;
|
||||
|
||||
spin_lock_bh(&bat_priv->hna_ghash_lock);
|
||||
hna_global_entry = (struct hna_global_entry *)
|
||||
hash_find(bat_priv->hna_global_hash,
|
||||
compare_orig, choose_orig, addr);
|
||||
spin_unlock_bh(&bat_priv->hna_ghash_lock);
|
||||
hna_global_entry = hna_global_hash_find(bat_priv, addr);
|
||||
|
||||
if (!hna_global_entry)
|
||||
return NULL;
|
||||
goto out;
|
||||
|
||||
return hna_global_entry->orig_node;
|
||||
if (!atomic_inc_not_zero(&hna_global_entry->orig_node->refcount))
|
||||
goto out;
|
||||
|
||||
orig_node = hna_global_entry->orig_node;
|
||||
|
||||
out:
|
||||
spin_unlock_bh(&bat_priv->hna_ghash_lock);
|
||||
return orig_node;
|
||||
}
|
||||
|
@ -33,7 +33,7 @@
|
||||
sizeof(struct bcast_packet))))
|
||||
|
||||
|
||||
struct batman_if {
|
||||
struct hard_iface {
|
||||
struct list_head list;
|
||||
int16_t if_num;
|
||||
char if_status;
|
||||
@ -43,7 +43,7 @@ struct batman_if {
|
||||
unsigned char *packet_buff;
|
||||
int packet_len;
|
||||
struct kobject *hardif_obj;
|
||||
struct kref refcount;
|
||||
atomic_t refcount;
|
||||
struct packet_type batman_adv_ptype;
|
||||
struct net_device *soft_iface;
|
||||
struct rcu_head rcu;
|
||||
@ -70,8 +70,6 @@ struct orig_node {
|
||||
struct neigh_node *router;
|
||||
unsigned long *bcast_own;
|
||||
uint8_t *bcast_own_sum;
|
||||
uint8_t tq_own;
|
||||
int tq_asym_penalty;
|
||||
unsigned long last_valid;
|
||||
unsigned long bcast_seqno_reset;
|
||||
unsigned long batman_seqno_reset;
|
||||
@ -83,20 +81,28 @@ struct orig_node {
|
||||
uint8_t last_ttl;
|
||||
unsigned long bcast_bits[NUM_WORDS];
|
||||
uint32_t last_bcast_seqno;
|
||||
struct list_head neigh_list;
|
||||
struct hlist_head neigh_list;
|
||||
struct list_head frag_list;
|
||||
spinlock_t neigh_list_lock; /* protects neighbor list */
|
||||
atomic_t refcount;
|
||||
struct rcu_head rcu;
|
||||
struct hlist_node hash_entry;
|
||||
struct bat_priv *bat_priv;
|
||||
unsigned long last_frag_packet;
|
||||
struct {
|
||||
uint8_t candidates;
|
||||
struct neigh_node *selected;
|
||||
} bond;
|
||||
spinlock_t ogm_cnt_lock; /* protects: bcast_own, bcast_own_sum,
|
||||
* neigh_node->real_bits,
|
||||
* neigh_node->real_packet_count */
|
||||
spinlock_t bcast_seqno_lock; /* protects bcast_bits,
|
||||
* last_bcast_seqno */
|
||||
atomic_t bond_candidates;
|
||||
struct list_head bond_list;
|
||||
};
|
||||
|
||||
struct gw_node {
|
||||
struct hlist_node list;
|
||||
struct orig_node *orig_node;
|
||||
unsigned long deleted;
|
||||
struct kref refcount;
|
||||
atomic_t refcount;
|
||||
struct rcu_head rcu;
|
||||
};
|
||||
|
||||
@ -105,18 +111,20 @@ struct gw_node {
|
||||
* @last_valid: when last packet via this neighbor was received
|
||||
*/
|
||||
struct neigh_node {
|
||||
struct list_head list;
|
||||
struct hlist_node list;
|
||||
uint8_t addr[ETH_ALEN];
|
||||
uint8_t real_packet_count;
|
||||
uint8_t tq_recv[TQ_GLOBAL_WINDOW_SIZE];
|
||||
uint8_t tq_index;
|
||||
uint8_t tq_avg;
|
||||
uint8_t last_ttl;
|
||||
struct neigh_node *next_bond_candidate;
|
||||
struct list_head bonding_list;
|
||||
unsigned long last_valid;
|
||||
unsigned long real_bits[NUM_WORDS];
|
||||
atomic_t refcount;
|
||||
struct rcu_head rcu;
|
||||
struct orig_node *orig_node;
|
||||
struct batman_if *if_incoming;
|
||||
struct hard_iface *if_incoming;
|
||||
};
|
||||
|
||||
|
||||
@ -140,7 +148,7 @@ struct bat_priv {
|
||||
struct hlist_head softif_neigh_list;
|
||||
struct softif_neigh *softif_neigh;
|
||||
struct debug_log *debug_log;
|
||||
struct batman_if *primary_if;
|
||||
struct hard_iface *primary_if;
|
||||
struct kobject *mesh_obj;
|
||||
struct dentry *debug_dir;
|
||||
struct hlist_head forw_bat_list;
|
||||
@ -151,12 +159,11 @@ struct bat_priv {
|
||||
struct hashtable_t *hna_local_hash;
|
||||
struct hashtable_t *hna_global_hash;
|
||||
struct hashtable_t *vis_hash;
|
||||
spinlock_t orig_hash_lock; /* protects orig_hash */
|
||||
spinlock_t forw_bat_list_lock; /* protects forw_bat_list */
|
||||
spinlock_t forw_bcast_list_lock; /* protects */
|
||||
spinlock_t hna_lhash_lock; /* protects hna_local_hash */
|
||||
spinlock_t hna_ghash_lock; /* protects hna_global_hash */
|
||||
spinlock_t gw_list_lock; /* protects gw_list */
|
||||
spinlock_t gw_list_lock; /* protects gw_list and curr_gw */
|
||||
spinlock_t vis_hash_lock; /* protects vis_hash */
|
||||
spinlock_t vis_list_lock; /* protects vis_info::recv_list */
|
||||
spinlock_t softif_neigh_lock; /* protects soft-interface neigh list */
|
||||
@ -165,7 +172,7 @@ struct bat_priv {
|
||||
struct delayed_work hna_work;
|
||||
struct delayed_work orig_work;
|
||||
struct delayed_work vis_work;
|
||||
struct gw_node *curr_gw;
|
||||
struct gw_node __rcu *curr_gw; /* rcu protected pointer */
|
||||
struct vis_info *my_vis_info;
|
||||
};
|
||||
|
||||
@ -188,11 +195,13 @@ struct hna_local_entry {
|
||||
uint8_t addr[ETH_ALEN];
|
||||
unsigned long last_seen;
|
||||
char never_purge;
|
||||
struct hlist_node hash_entry;
|
||||
};
|
||||
|
||||
struct hna_global_entry {
|
||||
uint8_t addr[ETH_ALEN];
|
||||
struct orig_node *orig_node;
|
||||
struct hlist_node hash_entry;
|
||||
};
|
||||
|
||||
/**
|
||||
@ -208,7 +217,7 @@ struct forw_packet {
|
||||
uint32_t direct_link_flags;
|
||||
uint8_t num_packets;
|
||||
struct delayed_work delayed_work;
|
||||
struct batman_if *if_incoming;
|
||||
struct hard_iface *if_incoming;
|
||||
};
|
||||
|
||||
/* While scanning for vis-entries of a particular vis-originator
|
||||
@ -242,6 +251,7 @@ struct vis_info {
|
||||
* from. we should not reply to them. */
|
||||
struct list_head send_list;
|
||||
struct kref refcount;
|
||||
struct hlist_node hash_entry;
|
||||
struct bat_priv *bat_priv;
|
||||
/* this packet might be part of the vis send queue. */
|
||||
struct sk_buff *skb_packet;
|
||||
@ -264,7 +274,7 @@ struct softif_neigh {
|
||||
uint8_t addr[ETH_ALEN];
|
||||
unsigned long last_seen;
|
||||
short vid;
|
||||
struct kref refcount;
|
||||
atomic_t refcount;
|
||||
struct rcu_head rcu;
|
||||
};
|
||||
|
||||
|
@ -183,15 +183,10 @@ int frag_reassemble_skb(struct sk_buff *skb, struct bat_priv *bat_priv,
|
||||
(struct unicast_frag_packet *)skb->data;
|
||||
|
||||
*new_skb = NULL;
|
||||
spin_lock_bh(&bat_priv->orig_hash_lock);
|
||||
orig_node = ((struct orig_node *)
|
||||
hash_find(bat_priv->orig_hash, compare_orig, choose_orig,
|
||||
unicast_packet->orig));
|
||||
|
||||
if (!orig_node) {
|
||||
pr_debug("couldn't find originator in orig_hash\n");
|
||||
orig_node = orig_hash_find(bat_priv, unicast_packet->orig);
|
||||
if (!orig_node)
|
||||
goto out;
|
||||
}
|
||||
|
||||
orig_node->last_frag_packet = jiffies;
|
||||
|
||||
@ -215,14 +210,15 @@ int frag_reassemble_skb(struct sk_buff *skb, struct bat_priv *bat_priv,
|
||||
/* if not, merge failed */
|
||||
if (*new_skb)
|
||||
ret = NET_RX_SUCCESS;
|
||||
out:
|
||||
spin_unlock_bh(&bat_priv->orig_hash_lock);
|
||||
|
||||
out:
|
||||
if (orig_node)
|
||||
orig_node_free_ref(orig_node);
|
||||
return ret;
|
||||
}
|
||||
|
||||
int frag_send_skb(struct sk_buff *skb, struct bat_priv *bat_priv,
|
||||
struct batman_if *batman_if, uint8_t dstaddr[])
|
||||
struct hard_iface *hard_iface, uint8_t dstaddr[])
|
||||
{
|
||||
struct unicast_packet tmp_uc, *unicast_packet;
|
||||
struct sk_buff *frag_skb;
|
||||
@ -267,12 +263,12 @@ int frag_send_skb(struct sk_buff *skb, struct bat_priv *bat_priv,
|
||||
frag1->flags = UNI_FRAG_HEAD | large_tail;
|
||||
frag2->flags = large_tail;
|
||||
|
||||
seqno = atomic_add_return(2, &batman_if->frag_seqno);
|
||||
seqno = atomic_add_return(2, &hard_iface->frag_seqno);
|
||||
frag1->seqno = htons(seqno - 1);
|
||||
frag2->seqno = htons(seqno);
|
||||
|
||||
send_skb_packet(skb, batman_if, dstaddr);
|
||||
send_skb_packet(frag_skb, batman_if, dstaddr);
|
||||
send_skb_packet(skb, hard_iface, dstaddr);
|
||||
send_skb_packet(frag_skb, hard_iface, dstaddr);
|
||||
return NET_RX_SUCCESS;
|
||||
|
||||
drop_frag:
|
||||
@ -286,40 +282,37 @@ int unicast_send_skb(struct sk_buff *skb, struct bat_priv *bat_priv)
|
||||
{
|
||||
struct ethhdr *ethhdr = (struct ethhdr *)skb->data;
|
||||
struct unicast_packet *unicast_packet;
|
||||
struct orig_node *orig_node = NULL;
|
||||
struct batman_if *batman_if;
|
||||
struct neigh_node *router;
|
||||
struct orig_node *orig_node;
|
||||
struct neigh_node *neigh_node;
|
||||
int data_len = skb->len;
|
||||
uint8_t dstaddr[6];
|
||||
|
||||
spin_lock_bh(&bat_priv->orig_hash_lock);
|
||||
int ret = 1;
|
||||
|
||||
/* get routing information */
|
||||
if (is_multicast_ether_addr(ethhdr->h_dest))
|
||||
if (is_multicast_ether_addr(ethhdr->h_dest)) {
|
||||
orig_node = (struct orig_node *)gw_get_selected(bat_priv);
|
||||
if (orig_node)
|
||||
goto find_router;
|
||||
}
|
||||
|
||||
/* check for hna host */
|
||||
if (!orig_node)
|
||||
orig_node = transtable_search(bat_priv, ethhdr->h_dest);
|
||||
/* check for hna host - increases orig_node refcount */
|
||||
orig_node = transtable_search(bat_priv, ethhdr->h_dest);
|
||||
|
||||
router = find_router(bat_priv, orig_node, NULL);
|
||||
find_router:
|
||||
/**
|
||||
* find_router():
|
||||
* - if orig_node is NULL it returns NULL
|
||||
* - increases neigh_nodes refcount if found.
|
||||
*/
|
||||
neigh_node = find_router(bat_priv, orig_node, NULL);
|
||||
|
||||
if (!router)
|
||||
goto unlock;
|
||||
if (!neigh_node)
|
||||
goto out;
|
||||
|
||||
/* don't lock while sending the packets ... we therefore
|
||||
* copy the required data before sending */
|
||||
|
||||
batman_if = router->if_incoming;
|
||||
memcpy(dstaddr, router->addr, ETH_ALEN);
|
||||
|
||||
spin_unlock_bh(&bat_priv->orig_hash_lock);
|
||||
|
||||
if (batman_if->if_status != IF_ACTIVE)
|
||||
goto dropped;
|
||||
if (neigh_node->if_incoming->if_status != IF_ACTIVE)
|
||||
goto out;
|
||||
|
||||
if (my_skb_head_push(skb, sizeof(struct unicast_packet)) < 0)
|
||||
goto dropped;
|
||||
goto out;
|
||||
|
||||
unicast_packet = (struct unicast_packet *)skb->data;
|
||||
|
||||
@ -333,18 +326,24 @@ int unicast_send_skb(struct sk_buff *skb, struct bat_priv *bat_priv)
|
||||
|
||||
if (atomic_read(&bat_priv->fragmentation) &&
|
||||
data_len + sizeof(struct unicast_packet) >
|
||||
batman_if->net_dev->mtu) {
|
||||
neigh_node->if_incoming->net_dev->mtu) {
|
||||
/* send frag skb decreases ttl */
|
||||
unicast_packet->ttl++;
|
||||
return frag_send_skb(skb, bat_priv, batman_if,
|
||||
dstaddr);
|
||||
ret = frag_send_skb(skb, bat_priv,
|
||||
neigh_node->if_incoming, neigh_node->addr);
|
||||
goto out;
|
||||
}
|
||||
send_skb_packet(skb, batman_if, dstaddr);
|
||||
return 0;
|
||||
|
||||
unlock:
|
||||
spin_unlock_bh(&bat_priv->orig_hash_lock);
|
||||
dropped:
|
||||
kfree_skb(skb);
|
||||
return 1;
|
||||
send_skb_packet(skb, neigh_node->if_incoming, neigh_node->addr);
|
||||
ret = 0;
|
||||
goto out;
|
||||
|
||||
out:
|
||||
if (neigh_node)
|
||||
neigh_node_free_ref(neigh_node);
|
||||
if (orig_node)
|
||||
orig_node_free_ref(orig_node);
|
||||
if (ret == 1)
|
||||
kfree_skb(skb);
|
||||
return ret;
|
||||
}
|
||||
|
@ -32,7 +32,7 @@ int frag_reassemble_skb(struct sk_buff *skb, struct bat_priv *bat_priv,
|
||||
void frag_list_free(struct list_head *head);
|
||||
int unicast_send_skb(struct sk_buff *skb, struct bat_priv *bat_priv);
|
||||
int frag_send_skb(struct sk_buff *skb, struct bat_priv *bat_priv,
|
||||
struct batman_if *batman_if, uint8_t dstaddr[]);
|
||||
struct hard_iface *hard_iface, uint8_t dstaddr[]);
|
||||
|
||||
static inline int frag_can_reassemble(struct sk_buff *skb, int mtu)
|
||||
{
|
||||
|
@ -68,15 +68,16 @@ static void free_info(struct kref *ref)
|
||||
}
|
||||
|
||||
/* Compare two vis packets, used by the hashing algorithm */
|
||||
static int vis_info_cmp(void *data1, void *data2)
|
||||
static int vis_info_cmp(struct hlist_node *node, void *data2)
|
||||
{
|
||||
struct vis_info *d1, *d2;
|
||||
struct vis_packet *p1, *p2;
|
||||
d1 = data1;
|
||||
|
||||
d1 = container_of(node, struct vis_info, hash_entry);
|
||||
d2 = data2;
|
||||
p1 = (struct vis_packet *)d1->skb_packet->data;
|
||||
p2 = (struct vis_packet *)d2->skb_packet->data;
|
||||
return compare_orig(p1->vis_orig, p2->vis_orig);
|
||||
return compare_eth(p1->vis_orig, p2->vis_orig);
|
||||
}
|
||||
|
||||
/* hash function to choose an entry in a hash table of given size */
|
||||
@ -104,6 +105,34 @@ static int vis_info_choose(void *data, int size)
|
||||
return hash % size;
|
||||
}
|
||||
|
||||
static struct vis_info *vis_hash_find(struct bat_priv *bat_priv,
|
||||
void *data)
|
||||
{
|
||||
struct hashtable_t *hash = bat_priv->vis_hash;
|
||||
struct hlist_head *head;
|
||||
struct hlist_node *node;
|
||||
struct vis_info *vis_info, *vis_info_tmp = NULL;
|
||||
int index;
|
||||
|
||||
if (!hash)
|
||||
return NULL;
|
||||
|
||||
index = vis_info_choose(data, hash->size);
|
||||
head = &hash->table[index];
|
||||
|
||||
rcu_read_lock();
|
||||
hlist_for_each_entry_rcu(vis_info, node, head, hash_entry) {
|
||||
if (!vis_info_cmp(node, data))
|
||||
continue;
|
||||
|
||||
vis_info_tmp = vis_info;
|
||||
break;
|
||||
}
|
||||
rcu_read_unlock();
|
||||
|
||||
return vis_info_tmp;
|
||||
}
|
||||
|
||||
/* insert interface to the list of interfaces of one originator, if it
|
||||
* does not already exist in the list */
|
||||
static void vis_data_insert_interface(const uint8_t *interface,
|
||||
@ -114,7 +143,7 @@ static void vis_data_insert_interface(const uint8_t *interface,
|
||||
struct hlist_node *pos;
|
||||
|
||||
hlist_for_each_entry(entry, pos, if_list, list) {
|
||||
if (compare_orig(entry->addr, (void *)interface))
|
||||
if (compare_eth(entry->addr, (void *)interface))
|
||||
return;
|
||||
}
|
||||
|
||||
@ -166,7 +195,7 @@ static ssize_t vis_data_read_entry(char *buff, struct vis_info_entry *entry,
|
||||
/* maximal length: max(4+17+2, 3+17+1+3+2) == 26 */
|
||||
if (primary && entry->quality == 0)
|
||||
return sprintf(buff, "HNA %pM, ", entry->dest);
|
||||
else if (compare_orig(entry->src, src))
|
||||
else if (compare_eth(entry->src, src))
|
||||
return sprintf(buff, "TQ %pM %d, ", entry->dest,
|
||||
entry->quality);
|
||||
|
||||
@ -175,9 +204,8 @@ static ssize_t vis_data_read_entry(char *buff, struct vis_info_entry *entry,
|
||||
|
||||
int vis_seq_print_text(struct seq_file *seq, void *offset)
|
||||
{
|
||||
struct hlist_node *walk;
|
||||
struct hlist_node *node;
|
||||
struct hlist_head *head;
|
||||
struct element_t *bucket;
|
||||
struct vis_info *info;
|
||||
struct vis_packet *packet;
|
||||
struct vis_info_entry *entries;
|
||||
@ -203,8 +231,8 @@ int vis_seq_print_text(struct seq_file *seq, void *offset)
|
||||
for (i = 0; i < hash->size; i++) {
|
||||
head = &hash->table[i];
|
||||
|
||||
hlist_for_each_entry(bucket, walk, head, hlist) {
|
||||
info = bucket->data;
|
||||
rcu_read_lock();
|
||||
hlist_for_each_entry_rcu(info, node, head, hash_entry) {
|
||||
packet = (struct vis_packet *)info->skb_packet->data;
|
||||
entries = (struct vis_info_entry *)
|
||||
((char *)packet + sizeof(struct vis_packet));
|
||||
@ -213,7 +241,7 @@ int vis_seq_print_text(struct seq_file *seq, void *offset)
|
||||
if (entries[j].quality == 0)
|
||||
continue;
|
||||
compare =
|
||||
compare_orig(entries[j].src, packet->vis_orig);
|
||||
compare_eth(entries[j].src, packet->vis_orig);
|
||||
vis_data_insert_interface(entries[j].src,
|
||||
&vis_if_list,
|
||||
compare);
|
||||
@ -223,7 +251,7 @@ int vis_seq_print_text(struct seq_file *seq, void *offset)
|
||||
buf_size += 18 + 26 * packet->entries;
|
||||
|
||||
/* add primary/secondary records */
|
||||
if (compare_orig(entry->addr, packet->vis_orig))
|
||||
if (compare_eth(entry->addr, packet->vis_orig))
|
||||
buf_size +=
|
||||
vis_data_count_prim_sec(&vis_if_list);
|
||||
|
||||
@ -236,6 +264,7 @@ int vis_seq_print_text(struct seq_file *seq, void *offset)
|
||||
kfree(entry);
|
||||
}
|
||||
}
|
||||
rcu_read_unlock();
|
||||
}
|
||||
|
||||
buff = kmalloc(buf_size, GFP_ATOMIC);
|
||||
@ -249,8 +278,8 @@ int vis_seq_print_text(struct seq_file *seq, void *offset)
|
||||
for (i = 0; i < hash->size; i++) {
|
||||
head = &hash->table[i];
|
||||
|
||||
hlist_for_each_entry(bucket, walk, head, hlist) {
|
||||
info = bucket->data;
|
||||
rcu_read_lock();
|
||||
hlist_for_each_entry_rcu(info, node, head, hash_entry) {
|
||||
packet = (struct vis_packet *)info->skb_packet->data;
|
||||
entries = (struct vis_info_entry *)
|
||||
((char *)packet + sizeof(struct vis_packet));
|
||||
@ -259,7 +288,7 @@ int vis_seq_print_text(struct seq_file *seq, void *offset)
|
||||
if (entries[j].quality == 0)
|
||||
continue;
|
||||
compare =
|
||||
compare_orig(entries[j].src, packet->vis_orig);
|
||||
compare_eth(entries[j].src, packet->vis_orig);
|
||||
vis_data_insert_interface(entries[j].src,
|
||||
&vis_if_list,
|
||||
compare);
|
||||
@ -277,7 +306,7 @@ int vis_seq_print_text(struct seq_file *seq, void *offset)
|
||||
entry->primary);
|
||||
|
||||
/* add primary/secondary records */
|
||||
if (compare_orig(entry->addr, packet->vis_orig))
|
||||
if (compare_eth(entry->addr, packet->vis_orig))
|
||||
buff_pos +=
|
||||
vis_data_read_prim_sec(buff + buff_pos,
|
||||
&vis_if_list);
|
||||
@ -291,6 +320,7 @@ int vis_seq_print_text(struct seq_file *seq, void *offset)
|
||||
kfree(entry);
|
||||
}
|
||||
}
|
||||
rcu_read_unlock();
|
||||
}
|
||||
|
||||
spin_unlock_bh(&bat_priv->vis_hash_lock);
|
||||
@ -345,7 +375,7 @@ static int recv_list_is_in(struct bat_priv *bat_priv,
|
||||
|
||||
spin_lock_bh(&bat_priv->vis_list_lock);
|
||||
list_for_each_entry(entry, recv_list, list) {
|
||||
if (memcmp(entry->mac, mac, ETH_ALEN) == 0) {
|
||||
if (compare_eth(entry->mac, mac)) {
|
||||
spin_unlock_bh(&bat_priv->vis_list_lock);
|
||||
return 1;
|
||||
}
|
||||
@ -381,8 +411,7 @@ static struct vis_info *add_packet(struct bat_priv *bat_priv,
|
||||
sizeof(struct vis_packet));
|
||||
|
||||
memcpy(search_packet->vis_orig, vis_packet->vis_orig, ETH_ALEN);
|
||||
old_info = hash_find(bat_priv->vis_hash, vis_info_cmp, vis_info_choose,
|
||||
&search_elem);
|
||||
old_info = vis_hash_find(bat_priv, &search_elem);
|
||||
kfree_skb(search_elem.skb_packet);
|
||||
|
||||
if (old_info) {
|
||||
@ -442,7 +471,7 @@ static struct vis_info *add_packet(struct bat_priv *bat_priv,
|
||||
|
||||
/* try to add it */
|
||||
hash_added = hash_add(bat_priv->vis_hash, vis_info_cmp, vis_info_choose,
|
||||
info);
|
||||
info, &info->hash_entry);
|
||||
if (hash_added < 0) {
|
||||
/* did not work (for some reason) */
|
||||
kref_put(&info->refcount, free_info);
|
||||
@ -529,9 +558,8 @@ static int find_best_vis_server(struct bat_priv *bat_priv,
|
||||
struct vis_info *info)
|
||||
{
|
||||
struct hashtable_t *hash = bat_priv->orig_hash;
|
||||
struct hlist_node *walk;
|
||||
struct hlist_node *node;
|
||||
struct hlist_head *head;
|
||||
struct element_t *bucket;
|
||||
struct orig_node *orig_node;
|
||||
struct vis_packet *packet;
|
||||
int best_tq = -1, i;
|
||||
@ -541,16 +569,17 @@ static int find_best_vis_server(struct bat_priv *bat_priv,
|
||||
for (i = 0; i < hash->size; i++) {
|
||||
head = &hash->table[i];
|
||||
|
||||
hlist_for_each_entry(bucket, walk, head, hlist) {
|
||||
orig_node = bucket->data;
|
||||
rcu_read_lock();
|
||||
hlist_for_each_entry_rcu(orig_node, node, head, hash_entry) {
|
||||
if ((orig_node) && (orig_node->router) &&
|
||||
(orig_node->flags & VIS_SERVER) &&
|
||||
(orig_node->router->tq_avg > best_tq)) {
|
||||
(orig_node->flags & VIS_SERVER) &&
|
||||
(orig_node->router->tq_avg > best_tq)) {
|
||||
best_tq = orig_node->router->tq_avg;
|
||||
memcpy(packet->target_orig, orig_node->orig,
|
||||
ETH_ALEN);
|
||||
}
|
||||
}
|
||||
rcu_read_unlock();
|
||||
}
|
||||
|
||||
return best_tq;
|
||||
@ -573,9 +602,8 @@ static bool vis_packet_full(struct vis_info *info)
|
||||
static int generate_vis_packet(struct bat_priv *bat_priv)
|
||||
{
|
||||
struct hashtable_t *hash = bat_priv->orig_hash;
|
||||
struct hlist_node *walk;
|
||||
struct hlist_node *node;
|
||||
struct hlist_head *head;
|
||||
struct element_t *bucket;
|
||||
struct orig_node *orig_node;
|
||||
struct neigh_node *neigh_node;
|
||||
struct vis_info *info = (struct vis_info *)bat_priv->my_vis_info;
|
||||
@ -587,7 +615,6 @@ static int generate_vis_packet(struct bat_priv *bat_priv)
|
||||
info->first_seen = jiffies;
|
||||
packet->vis_type = atomic_read(&bat_priv->vis_mode);
|
||||
|
||||
spin_lock_bh(&bat_priv->orig_hash_lock);
|
||||
memcpy(packet->target_orig, broadcast_addr, ETH_ALEN);
|
||||
packet->ttl = TTL;
|
||||
packet->seqno = htonl(ntohl(packet->seqno) + 1);
|
||||
@ -597,23 +624,21 @@ static int generate_vis_packet(struct bat_priv *bat_priv)
|
||||
if (packet->vis_type == VIS_TYPE_CLIENT_UPDATE) {
|
||||
best_tq = find_best_vis_server(bat_priv, info);
|
||||
|
||||
if (best_tq < 0) {
|
||||
spin_unlock_bh(&bat_priv->orig_hash_lock);
|
||||
if (best_tq < 0)
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
|
||||
for (i = 0; i < hash->size; i++) {
|
||||
head = &hash->table[i];
|
||||
|
||||
hlist_for_each_entry(bucket, walk, head, hlist) {
|
||||
orig_node = bucket->data;
|
||||
rcu_read_lock();
|
||||
hlist_for_each_entry_rcu(orig_node, node, head, hash_entry) {
|
||||
neigh_node = orig_node->router;
|
||||
|
||||
if (!neigh_node)
|
||||
continue;
|
||||
|
||||
if (!compare_orig(neigh_node->addr, orig_node->orig))
|
||||
if (!compare_eth(neigh_node->addr, orig_node->orig))
|
||||
continue;
|
||||
|
||||
if (neigh_node->if_incoming->if_status != IF_ACTIVE)
|
||||
@ -632,23 +657,19 @@ static int generate_vis_packet(struct bat_priv *bat_priv)
|
||||
entry->quality = neigh_node->tq_avg;
|
||||
packet->entries++;
|
||||
|
||||
if (vis_packet_full(info)) {
|
||||
spin_unlock_bh(&bat_priv->orig_hash_lock);
|
||||
return 0;
|
||||
}
|
||||
if (vis_packet_full(info))
|
||||
goto unlock;
|
||||
}
|
||||
rcu_read_unlock();
|
||||
}
|
||||
|
||||
spin_unlock_bh(&bat_priv->orig_hash_lock);
|
||||
|
||||
hash = bat_priv->hna_local_hash;
|
||||
|
||||
spin_lock_bh(&bat_priv->hna_lhash_lock);
|
||||
for (i = 0; i < hash->size; i++) {
|
||||
head = &hash->table[i];
|
||||
|
||||
hlist_for_each_entry(bucket, walk, head, hlist) {
|
||||
hna_local_entry = bucket->data;
|
||||
hlist_for_each_entry(hna_local_entry, node, head, hash_entry) {
|
||||
entry = (struct vis_info_entry *)
|
||||
skb_put(info->skb_packet,
|
||||
sizeof(*entry));
|
||||
@ -666,6 +687,10 @@ static int generate_vis_packet(struct bat_priv *bat_priv)
|
||||
|
||||
spin_unlock_bh(&bat_priv->hna_lhash_lock);
|
||||
return 0;
|
||||
|
||||
unlock:
|
||||
rcu_read_unlock();
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* free old vis packets. Must be called with this vis_hash_lock
|
||||
@ -674,25 +699,22 @@ static void purge_vis_packets(struct bat_priv *bat_priv)
|
||||
{
|
||||
int i;
|
||||
struct hashtable_t *hash = bat_priv->vis_hash;
|
||||
struct hlist_node *walk, *safe;
|
||||
struct hlist_node *node, *node_tmp;
|
||||
struct hlist_head *head;
|
||||
struct element_t *bucket;
|
||||
struct vis_info *info;
|
||||
|
||||
for (i = 0; i < hash->size; i++) {
|
||||
head = &hash->table[i];
|
||||
|
||||
hlist_for_each_entry_safe(bucket, walk, safe, head, hlist) {
|
||||
info = bucket->data;
|
||||
|
||||
hlist_for_each_entry_safe(info, node, node_tmp,
|
||||
head, hash_entry) {
|
||||
/* never purge own data. */
|
||||
if (info == bat_priv->my_vis_info)
|
||||
continue;
|
||||
|
||||
if (time_after(jiffies,
|
||||
info->first_seen + VIS_TIMEOUT * HZ)) {
|
||||
hlist_del(walk);
|
||||
kfree(bucket);
|
||||
hlist_del(node);
|
||||
send_list_del(info);
|
||||
kref_put(&info->refcount, free_info);
|
||||
}
|
||||
@ -704,27 +726,24 @@ static void broadcast_vis_packet(struct bat_priv *bat_priv,
|
||||
struct vis_info *info)
|
||||
{
|
||||
struct hashtable_t *hash = bat_priv->orig_hash;
|
||||
struct hlist_node *walk;
|
||||
struct hlist_node *node;
|
||||
struct hlist_head *head;
|
||||
struct element_t *bucket;
|
||||
struct orig_node *orig_node;
|
||||
struct vis_packet *packet;
|
||||
struct sk_buff *skb;
|
||||
struct batman_if *batman_if;
|
||||
struct hard_iface *hard_iface;
|
||||
uint8_t dstaddr[ETH_ALEN];
|
||||
int i;
|
||||
|
||||
|
||||
spin_lock_bh(&bat_priv->orig_hash_lock);
|
||||
packet = (struct vis_packet *)info->skb_packet->data;
|
||||
|
||||
/* send to all routers in range. */
|
||||
for (i = 0; i < hash->size; i++) {
|
||||
head = &hash->table[i];
|
||||
|
||||
hlist_for_each_entry(bucket, walk, head, hlist) {
|
||||
orig_node = bucket->data;
|
||||
|
||||
rcu_read_lock();
|
||||
hlist_for_each_entry_rcu(orig_node, node, head, hash_entry) {
|
||||
/* if it's a vis server and reachable, send it. */
|
||||
if ((!orig_node) || (!orig_node->router))
|
||||
continue;
|
||||
@ -737,54 +756,61 @@ static void broadcast_vis_packet(struct bat_priv *bat_priv,
|
||||
continue;
|
||||
|
||||
memcpy(packet->target_orig, orig_node->orig, ETH_ALEN);
|
||||
batman_if = orig_node->router->if_incoming;
|
||||
hard_iface = orig_node->router->if_incoming;
|
||||
memcpy(dstaddr, orig_node->router->addr, ETH_ALEN);
|
||||
spin_unlock_bh(&bat_priv->orig_hash_lock);
|
||||
|
||||
skb = skb_clone(info->skb_packet, GFP_ATOMIC);
|
||||
if (skb)
|
||||
send_skb_packet(skb, batman_if, dstaddr);
|
||||
send_skb_packet(skb, hard_iface, dstaddr);
|
||||
|
||||
spin_lock_bh(&bat_priv->orig_hash_lock);
|
||||
}
|
||||
|
||||
rcu_read_unlock();
|
||||
}
|
||||
|
||||
spin_unlock_bh(&bat_priv->orig_hash_lock);
|
||||
}
|
||||
|
||||
static void unicast_vis_packet(struct bat_priv *bat_priv,
|
||||
struct vis_info *info)
|
||||
{
|
||||
struct orig_node *orig_node;
|
||||
struct neigh_node *neigh_node = NULL;
|
||||
struct sk_buff *skb;
|
||||
struct vis_packet *packet;
|
||||
struct batman_if *batman_if;
|
||||
uint8_t dstaddr[ETH_ALEN];
|
||||
|
||||
spin_lock_bh(&bat_priv->orig_hash_lock);
|
||||
packet = (struct vis_packet *)info->skb_packet->data;
|
||||
orig_node = ((struct orig_node *)hash_find(bat_priv->orig_hash,
|
||||
compare_orig, choose_orig,
|
||||
packet->target_orig));
|
||||
|
||||
if ((!orig_node) || (!orig_node->router))
|
||||
goto out;
|
||||
rcu_read_lock();
|
||||
orig_node = orig_hash_find(bat_priv, packet->target_orig);
|
||||
|
||||
/* don't lock while sending the packets ... we therefore
|
||||
* copy the required data before sending */
|
||||
batman_if = orig_node->router->if_incoming;
|
||||
memcpy(dstaddr, orig_node->router->addr, ETH_ALEN);
|
||||
spin_unlock_bh(&bat_priv->orig_hash_lock);
|
||||
if (!orig_node)
|
||||
goto unlock;
|
||||
|
||||
neigh_node = orig_node->router;
|
||||
|
||||
if (!neigh_node)
|
||||
goto unlock;
|
||||
|
||||
if (!atomic_inc_not_zero(&neigh_node->refcount)) {
|
||||
neigh_node = NULL;
|
||||
goto unlock;
|
||||
}
|
||||
|
||||
rcu_read_unlock();
|
||||
|
||||
skb = skb_clone(info->skb_packet, GFP_ATOMIC);
|
||||
if (skb)
|
||||
send_skb_packet(skb, batman_if, dstaddr);
|
||||
send_skb_packet(skb, neigh_node->if_incoming,
|
||||
neigh_node->addr);
|
||||
|
||||
return;
|
||||
goto out;
|
||||
|
||||
unlock:
|
||||
rcu_read_unlock();
|
||||
out:
|
||||
spin_unlock_bh(&bat_priv->orig_hash_lock);
|
||||
if (neigh_node)
|
||||
neigh_node_free_ref(neigh_node);
|
||||
if (orig_node)
|
||||
orig_node_free_ref(orig_node);
|
||||
return;
|
||||
}
|
||||
|
||||
/* only send one vis packet. called from send_vis_packets() */
|
||||
@ -896,7 +922,8 @@ int vis_init(struct bat_priv *bat_priv)
|
||||
INIT_LIST_HEAD(&bat_priv->vis_send_list);
|
||||
|
||||
hash_added = hash_add(bat_priv->vis_hash, vis_info_cmp, vis_info_choose,
|
||||
bat_priv->my_vis_info);
|
||||
bat_priv->my_vis_info,
|
||||
&bat_priv->my_vis_info->hash_entry);
|
||||
if (hash_added < 0) {
|
||||
pr_err("Can't add own vis packet into hash\n");
|
||||
/* not in hash, need to remove it manually. */
|
||||
@ -918,10 +945,11 @@ err:
|
||||
}
|
||||
|
||||
/* Decrease the reference count on a hash item info */
|
||||
static void free_info_ref(void *data, void *arg)
|
||||
static void free_info_ref(struct hlist_node *node, void *arg)
|
||||
{
|
||||
struct vis_info *info = data;
|
||||
struct vis_info *info;
|
||||
|
||||
info = container_of(node, struct vis_info, hash_entry);
|
||||
send_list_del(info);
|
||||
kref_put(&info->refcount, free_info);
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user