mirror of
https://github.com/torvalds/linux.git
synced 2024-11-15 16:41:58 +00:00
Included changes:
- avoid recursive invocations of call_rcu() which would fool users waiting on rcu_barrier() - prevent immediate kfree of objects used in rcu protected contexts -----BEGIN PGP SIGNATURE----- Version: GnuPG v2 iQIcBAABCAAGBQJWmwezAAoJENpFlCjNi1MRBcsP/3F4cOvbTw4inAVNkE3nSvrl lPP/z87pcMFs1uLOInR9vmpXnlZ9JCU5pef6nXOSs4TBMKoB9yU4vl1RnW6/kbw8 M5MsgMF7RiAnPQTxm/GRHwuYD3o/+4GwKXz5cquhIt6282c9a1JsjF01OlbRVooB FDbJIU5MNIXK/3RKznDQRPXiKYMXFaqBbRCDMOUf8SF4tzfybJJWScplGTdgH9Kh OcqLdAdzPmd6b+Vb4BRRcVPx/oT1uXA+G1Ktwu/QIHb6+mpqDJykVv3Z/tMexYvh jo2Xcs3qvWWdVRdxV4IQHgN4bfEhJwrpMGreX9QbV4aF9muGjnx9TU2/2/UFUT+2 T1gE5UFC88y2qDx1dF6/SaHA1fnZ5G97rAww+5OU0ntVRe8N+9lSfwejDOoYDgAH dFvc1ln1r1phqg548xKrH4y7ymnTv0MJuZ4t7x+TAZYhiPP91JNPwtoK1Eceu4sc mq86HqF+pmGMAoREdqQOdB7wfpZDEShcEoDj1lQekBiQp2uYtVOldseNFsYQ7aIU 59MdizN3hDDEJw4eOKEXvtZyEi78G0AqJgaww/Zd8GyJqcbhKuGwc3qCdQF/WzFX tvnIM3ChDluXejBy55Gc0jAkFVjBKyWG4g7VYCial6CIRx3VGSqjqlRU0iUTslzl I25ZqXkSSCsVQjw7bO5d =pdnx -----END PGP SIGNATURE----- Merge tag 'batman-adv-fix-for-davem' of git://git.open-mesh.org/linux-merge Antonio Quartulli says: ==================== pull request [net]: batman-adv 20160117 here you have a bunch of patches intended for net. This patchset is provided by Sven Eckelmann and it is basically fixing 2 major issues that exist in several parts of the code - that is why we have 8 patches. The first bugfix (patch 1 and 2) is preventing call_rcu from being invoked recursively. This would deceive any user waiting on rcu_barrier() because the latter won't be able to wait for the nested invocation thus triggering any sort of undefined behaviours. The second bugfix (patches from 3 to 8) prevents the code from freeing rcu protected objects without waiting for the proper grace period. This issue can potentially lead to wrong memory access and thus kernel crashes. Unfortunately this bogus code pattern was copy/pasted all around the place when developing new features, therefore Sven diligently created several patches to address each component independently. Given that such bugs were introduced quite some time ago, all the patches except patch 5 should be considered for submission to stable. Included changes: - avoid recursive invocations of call_rcu() which would fool users waiting on rcu_barrier() - prevent immediate kfree of objects used in rcu protected contexts ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
commit
78c5b2c667
@ -127,21 +127,17 @@ batadv_backbone_gw_free_ref(struct batadv_bla_backbone_gw *backbone_gw)
|
||||
}
|
||||
|
||||
/* finally deinitialize the claim */
|
||||
static void batadv_claim_free_rcu(struct rcu_head *rcu)
|
||||
static void batadv_claim_release(struct batadv_bla_claim *claim)
|
||||
{
|
||||
struct batadv_bla_claim *claim;
|
||||
|
||||
claim = container_of(rcu, struct batadv_bla_claim, rcu);
|
||||
|
||||
batadv_backbone_gw_free_ref(claim->backbone_gw);
|
||||
kfree(claim);
|
||||
kfree_rcu(claim, rcu);
|
||||
}
|
||||
|
||||
/* free a claim, call claim_free_rcu if its the last reference */
|
||||
static void batadv_claim_free_ref(struct batadv_bla_claim *claim)
|
||||
{
|
||||
if (atomic_dec_and_test(&claim->refcount))
|
||||
call_rcu(&claim->rcu, batadv_claim_free_rcu);
|
||||
batadv_claim_release(claim);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -75,18 +75,6 @@ batadv_hardif_free_ref(struct batadv_hard_iface *hard_iface)
|
||||
call_rcu(&hard_iface->rcu, batadv_hardif_free_rcu);
|
||||
}
|
||||
|
||||
/**
|
||||
* batadv_hardif_free_ref_now - decrement the hard interface refcounter and
|
||||
* possibly free it (without rcu callback)
|
||||
* @hard_iface: the hard interface to free
|
||||
*/
|
||||
static inline void
|
||||
batadv_hardif_free_ref_now(struct batadv_hard_iface *hard_iface)
|
||||
{
|
||||
if (atomic_dec_and_test(&hard_iface->refcount))
|
||||
batadv_hardif_free_rcu(&hard_iface->rcu);
|
||||
}
|
||||
|
||||
static inline struct batadv_hard_iface *
|
||||
batadv_primary_if_get_selected(struct batadv_priv *bat_priv)
|
||||
{
|
||||
|
@ -203,28 +203,25 @@ void batadv_nc_init_orig(struct batadv_orig_node *orig_node)
|
||||
}
|
||||
|
||||
/**
|
||||
* batadv_nc_node_free_rcu - rcu callback to free an nc node and remove
|
||||
* its refcount on the orig_node
|
||||
* @rcu: rcu pointer of the nc node
|
||||
* batadv_nc_node_release - release nc_node from lists and queue for free after
|
||||
* rcu grace period
|
||||
* @nc_node: the nc node to free
|
||||
*/
|
||||
static void batadv_nc_node_free_rcu(struct rcu_head *rcu)
|
||||
static void batadv_nc_node_release(struct batadv_nc_node *nc_node)
|
||||
{
|
||||
struct batadv_nc_node *nc_node;
|
||||
|
||||
nc_node = container_of(rcu, struct batadv_nc_node, rcu);
|
||||
batadv_orig_node_free_ref(nc_node->orig_node);
|
||||
kfree(nc_node);
|
||||
kfree_rcu(nc_node, rcu);
|
||||
}
|
||||
|
||||
/**
|
||||
* batadv_nc_node_free_ref - decrements the nc node refcounter and possibly
|
||||
* frees it
|
||||
* batadv_nc_node_free_ref - decrement the nc node refcounter and possibly
|
||||
* release it
|
||||
* @nc_node: the nc node to free
|
||||
*/
|
||||
static void batadv_nc_node_free_ref(struct batadv_nc_node *nc_node)
|
||||
{
|
||||
if (atomic_dec_and_test(&nc_node->refcount))
|
||||
call_rcu(&nc_node->rcu, batadv_nc_node_free_rcu);
|
||||
batadv_nc_node_release(nc_node);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -163,148 +163,101 @@ err:
|
||||
}
|
||||
|
||||
/**
|
||||
* batadv_neigh_ifinfo_free_rcu - free the neigh_ifinfo object
|
||||
* @rcu: rcu pointer of the neigh_ifinfo object
|
||||
*/
|
||||
static void batadv_neigh_ifinfo_free_rcu(struct rcu_head *rcu)
|
||||
{
|
||||
struct batadv_neigh_ifinfo *neigh_ifinfo;
|
||||
|
||||
neigh_ifinfo = container_of(rcu, struct batadv_neigh_ifinfo, rcu);
|
||||
|
||||
if (neigh_ifinfo->if_outgoing != BATADV_IF_DEFAULT)
|
||||
batadv_hardif_free_ref_now(neigh_ifinfo->if_outgoing);
|
||||
|
||||
kfree(neigh_ifinfo);
|
||||
}
|
||||
|
||||
/**
|
||||
* batadv_neigh_ifinfo_free_now - decrement the refcounter and possibly free
|
||||
* the neigh_ifinfo (without rcu callback)
|
||||
* batadv_neigh_ifinfo_release - release neigh_ifinfo from lists and queue for
|
||||
* free after rcu grace period
|
||||
* @neigh_ifinfo: the neigh_ifinfo object to release
|
||||
*/
|
||||
static void
|
||||
batadv_neigh_ifinfo_free_ref_now(struct batadv_neigh_ifinfo *neigh_ifinfo)
|
||||
batadv_neigh_ifinfo_release(struct batadv_neigh_ifinfo *neigh_ifinfo)
|
||||
{
|
||||
if (atomic_dec_and_test(&neigh_ifinfo->refcount))
|
||||
batadv_neigh_ifinfo_free_rcu(&neigh_ifinfo->rcu);
|
||||
if (neigh_ifinfo->if_outgoing != BATADV_IF_DEFAULT)
|
||||
batadv_hardif_free_ref(neigh_ifinfo->if_outgoing);
|
||||
|
||||
kfree_rcu(neigh_ifinfo, rcu);
|
||||
}
|
||||
|
||||
/**
|
||||
* batadv_neigh_ifinfo_free_ref - decrement the refcounter and possibly free
|
||||
* batadv_neigh_ifinfo_free_ref - decrement the refcounter and possibly release
|
||||
* the neigh_ifinfo
|
||||
* @neigh_ifinfo: the neigh_ifinfo object to release
|
||||
*/
|
||||
void batadv_neigh_ifinfo_free_ref(struct batadv_neigh_ifinfo *neigh_ifinfo)
|
||||
{
|
||||
if (atomic_dec_and_test(&neigh_ifinfo->refcount))
|
||||
call_rcu(&neigh_ifinfo->rcu, batadv_neigh_ifinfo_free_rcu);
|
||||
batadv_neigh_ifinfo_release(neigh_ifinfo);
|
||||
}
|
||||
|
||||
/**
|
||||
* batadv_hardif_neigh_free_rcu - free the hardif neigh_node
|
||||
* @rcu: rcu pointer of the neigh_node
|
||||
*/
|
||||
static void batadv_hardif_neigh_free_rcu(struct rcu_head *rcu)
|
||||
{
|
||||
struct batadv_hardif_neigh_node *hardif_neigh;
|
||||
|
||||
hardif_neigh = container_of(rcu, struct batadv_hardif_neigh_node, rcu);
|
||||
|
||||
batadv_hardif_free_ref_now(hardif_neigh->if_incoming);
|
||||
kfree(hardif_neigh);
|
||||
}
|
||||
|
||||
/**
|
||||
* batadv_hardif_neigh_free_now - decrement the hardif neighbors refcounter
|
||||
* and possibly free it (without rcu callback)
|
||||
* batadv_hardif_neigh_release - release hardif neigh node from lists and
|
||||
* queue for free after rcu grace period
|
||||
* @hardif_neigh: hardif neigh neighbor to free
|
||||
*/
|
||||
static void
|
||||
batadv_hardif_neigh_free_now(struct batadv_hardif_neigh_node *hardif_neigh)
|
||||
batadv_hardif_neigh_release(struct batadv_hardif_neigh_node *hardif_neigh)
|
||||
{
|
||||
if (atomic_dec_and_test(&hardif_neigh->refcount)) {
|
||||
spin_lock_bh(&hardif_neigh->if_incoming->neigh_list_lock);
|
||||
hlist_del_init_rcu(&hardif_neigh->list);
|
||||
spin_unlock_bh(&hardif_neigh->if_incoming->neigh_list_lock);
|
||||
spin_lock_bh(&hardif_neigh->if_incoming->neigh_list_lock);
|
||||
hlist_del_init_rcu(&hardif_neigh->list);
|
||||
spin_unlock_bh(&hardif_neigh->if_incoming->neigh_list_lock);
|
||||
|
||||
batadv_hardif_neigh_free_rcu(&hardif_neigh->rcu);
|
||||
}
|
||||
batadv_hardif_free_ref(hardif_neigh->if_incoming);
|
||||
kfree_rcu(hardif_neigh, rcu);
|
||||
}
|
||||
|
||||
/**
|
||||
* batadv_hardif_neigh_free_ref - decrement the hardif neighbors refcounter
|
||||
* and possibly free it
|
||||
* and possibly release it
|
||||
* @hardif_neigh: hardif neigh neighbor to free
|
||||
*/
|
||||
void batadv_hardif_neigh_free_ref(struct batadv_hardif_neigh_node *hardif_neigh)
|
||||
{
|
||||
if (atomic_dec_and_test(&hardif_neigh->refcount)) {
|
||||
spin_lock_bh(&hardif_neigh->if_incoming->neigh_list_lock);
|
||||
hlist_del_init_rcu(&hardif_neigh->list);
|
||||
spin_unlock_bh(&hardif_neigh->if_incoming->neigh_list_lock);
|
||||
|
||||
call_rcu(&hardif_neigh->rcu, batadv_hardif_neigh_free_rcu);
|
||||
}
|
||||
if (atomic_dec_and_test(&hardif_neigh->refcount))
|
||||
batadv_hardif_neigh_release(hardif_neigh);
|
||||
}
|
||||
|
||||
/**
|
||||
* batadv_neigh_node_free_rcu - free the neigh_node
|
||||
* @rcu: rcu pointer of the neigh_node
|
||||
* batadv_neigh_node_release - release neigh_node from lists and queue for
|
||||
* free after rcu grace period
|
||||
* @neigh_node: neigh neighbor to free
|
||||
*/
|
||||
static void batadv_neigh_node_free_rcu(struct rcu_head *rcu)
|
||||
static void batadv_neigh_node_release(struct batadv_neigh_node *neigh_node)
|
||||
{
|
||||
struct hlist_node *node_tmp;
|
||||
struct batadv_neigh_node *neigh_node;
|
||||
struct batadv_hardif_neigh_node *hardif_neigh;
|
||||
struct batadv_neigh_ifinfo *neigh_ifinfo;
|
||||
struct batadv_algo_ops *bao;
|
||||
|
||||
neigh_node = container_of(rcu, struct batadv_neigh_node, rcu);
|
||||
bao = neigh_node->orig_node->bat_priv->bat_algo_ops;
|
||||
|
||||
hlist_for_each_entry_safe(neigh_ifinfo, node_tmp,
|
||||
&neigh_node->ifinfo_list, list) {
|
||||
batadv_neigh_ifinfo_free_ref_now(neigh_ifinfo);
|
||||
batadv_neigh_ifinfo_free_ref(neigh_ifinfo);
|
||||
}
|
||||
|
||||
hardif_neigh = batadv_hardif_neigh_get(neigh_node->if_incoming,
|
||||
neigh_node->addr);
|
||||
if (hardif_neigh) {
|
||||
/* batadv_hardif_neigh_get() increases refcount too */
|
||||
batadv_hardif_neigh_free_now(hardif_neigh);
|
||||
batadv_hardif_neigh_free_now(hardif_neigh);
|
||||
batadv_hardif_neigh_free_ref(hardif_neigh);
|
||||
batadv_hardif_neigh_free_ref(hardif_neigh);
|
||||
}
|
||||
|
||||
if (bao->bat_neigh_free)
|
||||
bao->bat_neigh_free(neigh_node);
|
||||
|
||||
batadv_hardif_free_ref_now(neigh_node->if_incoming);
|
||||
batadv_hardif_free_ref(neigh_node->if_incoming);
|
||||
|
||||
kfree(neigh_node);
|
||||
}
|
||||
|
||||
/**
|
||||
* batadv_neigh_node_free_ref_now - decrement the neighbors refcounter
|
||||
* and possibly free it (without rcu callback)
|
||||
* @neigh_node: neigh neighbor to free
|
||||
*/
|
||||
static void
|
||||
batadv_neigh_node_free_ref_now(struct batadv_neigh_node *neigh_node)
|
||||
{
|
||||
if (atomic_dec_and_test(&neigh_node->refcount))
|
||||
batadv_neigh_node_free_rcu(&neigh_node->rcu);
|
||||
kfree_rcu(neigh_node, rcu);
|
||||
}
|
||||
|
||||
/**
|
||||
* batadv_neigh_node_free_ref - decrement the neighbors refcounter
|
||||
* and possibly free it
|
||||
* and possibly release it
|
||||
* @neigh_node: neigh neighbor to free
|
||||
*/
|
||||
void batadv_neigh_node_free_ref(struct batadv_neigh_node *neigh_node)
|
||||
{
|
||||
if (atomic_dec_and_test(&neigh_node->refcount))
|
||||
call_rcu(&neigh_node->rcu, batadv_neigh_node_free_rcu);
|
||||
batadv_neigh_node_release(neigh_node);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -733,79 +686,48 @@ int batadv_hardif_neigh_seq_print_text(struct seq_file *seq, void *offset)
|
||||
}
|
||||
|
||||
/**
|
||||
* batadv_orig_ifinfo_free_rcu - free the orig_ifinfo object
|
||||
* @rcu: rcu pointer of the orig_ifinfo object
|
||||
* batadv_orig_ifinfo_release - release orig_ifinfo from lists and queue for
|
||||
* free after rcu grace period
|
||||
* @orig_ifinfo: the orig_ifinfo object to release
|
||||
*/
|
||||
static void batadv_orig_ifinfo_free_rcu(struct rcu_head *rcu)
|
||||
static void batadv_orig_ifinfo_release(struct batadv_orig_ifinfo *orig_ifinfo)
|
||||
{
|
||||
struct batadv_orig_ifinfo *orig_ifinfo;
|
||||
struct batadv_neigh_node *router;
|
||||
|
||||
orig_ifinfo = container_of(rcu, struct batadv_orig_ifinfo, rcu);
|
||||
|
||||
if (orig_ifinfo->if_outgoing != BATADV_IF_DEFAULT)
|
||||
batadv_hardif_free_ref_now(orig_ifinfo->if_outgoing);
|
||||
batadv_hardif_free_ref(orig_ifinfo->if_outgoing);
|
||||
|
||||
/* this is the last reference to this object */
|
||||
router = rcu_dereference_protected(orig_ifinfo->router, true);
|
||||
if (router)
|
||||
batadv_neigh_node_free_ref_now(router);
|
||||
kfree(orig_ifinfo);
|
||||
batadv_neigh_node_free_ref(router);
|
||||
|
||||
kfree_rcu(orig_ifinfo, rcu);
|
||||
}
|
||||
|
||||
/**
|
||||
* batadv_orig_ifinfo_free_ref - decrement the refcounter and possibly free
|
||||
* the orig_ifinfo (without rcu callback)
|
||||
* @orig_ifinfo: the orig_ifinfo object to release
|
||||
*/
|
||||
static void
|
||||
batadv_orig_ifinfo_free_ref_now(struct batadv_orig_ifinfo *orig_ifinfo)
|
||||
{
|
||||
if (atomic_dec_and_test(&orig_ifinfo->refcount))
|
||||
batadv_orig_ifinfo_free_rcu(&orig_ifinfo->rcu);
|
||||
}
|
||||
|
||||
/**
|
||||
* batadv_orig_ifinfo_free_ref - decrement the refcounter and possibly free
|
||||
* batadv_orig_ifinfo_free_ref - decrement the refcounter and possibly release
|
||||
* the orig_ifinfo
|
||||
* @orig_ifinfo: the orig_ifinfo object to release
|
||||
*/
|
||||
void batadv_orig_ifinfo_free_ref(struct batadv_orig_ifinfo *orig_ifinfo)
|
||||
{
|
||||
if (atomic_dec_and_test(&orig_ifinfo->refcount))
|
||||
call_rcu(&orig_ifinfo->rcu, batadv_orig_ifinfo_free_rcu);
|
||||
batadv_orig_ifinfo_release(orig_ifinfo);
|
||||
}
|
||||
|
||||
/**
|
||||
* batadv_orig_node_free_rcu - free the orig_node
|
||||
* @rcu: rcu pointer of the orig_node
|
||||
*/
|
||||
static void batadv_orig_node_free_rcu(struct rcu_head *rcu)
|
||||
{
|
||||
struct hlist_node *node_tmp;
|
||||
struct batadv_neigh_node *neigh_node;
|
||||
struct batadv_orig_node *orig_node;
|
||||
struct batadv_orig_ifinfo *orig_ifinfo;
|
||||
|
||||
orig_node = container_of(rcu, struct batadv_orig_node, rcu);
|
||||
|
||||
spin_lock_bh(&orig_node->neigh_list_lock);
|
||||
|
||||
/* for all neighbors towards this originator ... */
|
||||
hlist_for_each_entry_safe(neigh_node, node_tmp,
|
||||
&orig_node->neigh_list, list) {
|
||||
hlist_del_rcu(&neigh_node->list);
|
||||
batadv_neigh_node_free_ref_now(neigh_node);
|
||||
}
|
||||
|
||||
hlist_for_each_entry_safe(orig_ifinfo, node_tmp,
|
||||
&orig_node->ifinfo_list, list) {
|
||||
hlist_del_rcu(&orig_ifinfo->list);
|
||||
batadv_orig_ifinfo_free_ref_now(orig_ifinfo);
|
||||
}
|
||||
spin_unlock_bh(&orig_node->neigh_list_lock);
|
||||
|
||||
batadv_mcast_purge_orig(orig_node);
|
||||
|
||||
/* Free nc_nodes */
|
||||
batadv_nc_purge_orig(orig_node->bat_priv, orig_node, NULL);
|
||||
|
||||
batadv_frag_purge_orig(orig_node, NULL);
|
||||
|
||||
if (orig_node->bat_priv->bat_algo_ops->bat_orig_free)
|
||||
@ -815,26 +737,48 @@ static void batadv_orig_node_free_rcu(struct rcu_head *rcu)
|
||||
kfree(orig_node);
|
||||
}
|
||||
|
||||
/**
|
||||
* batadv_orig_node_release - release orig_node from lists and queue for
|
||||
* free after rcu grace period
|
||||
* @orig_node: the orig node to free
|
||||
*/
|
||||
static void batadv_orig_node_release(struct batadv_orig_node *orig_node)
|
||||
{
|
||||
struct hlist_node *node_tmp;
|
||||
struct batadv_neigh_node *neigh_node;
|
||||
struct batadv_orig_ifinfo *orig_ifinfo;
|
||||
|
||||
spin_lock_bh(&orig_node->neigh_list_lock);
|
||||
|
||||
/* for all neighbors towards this originator ... */
|
||||
hlist_for_each_entry_safe(neigh_node, node_tmp,
|
||||
&orig_node->neigh_list, list) {
|
||||
hlist_del_rcu(&neigh_node->list);
|
||||
batadv_neigh_node_free_ref(neigh_node);
|
||||
}
|
||||
|
||||
hlist_for_each_entry_safe(orig_ifinfo, node_tmp,
|
||||
&orig_node->ifinfo_list, list) {
|
||||
hlist_del_rcu(&orig_ifinfo->list);
|
||||
batadv_orig_ifinfo_free_ref(orig_ifinfo);
|
||||
}
|
||||
spin_unlock_bh(&orig_node->neigh_list_lock);
|
||||
|
||||
/* Free nc_nodes */
|
||||
batadv_nc_purge_orig(orig_node->bat_priv, orig_node, NULL);
|
||||
|
||||
call_rcu(&orig_node->rcu, batadv_orig_node_free_rcu);
|
||||
}
|
||||
|
||||
/**
|
||||
* batadv_orig_node_free_ref - decrement the orig node refcounter and possibly
|
||||
* schedule an rcu callback for freeing it
|
||||
* release it
|
||||
* @orig_node: the orig node to free
|
||||
*/
|
||||
void batadv_orig_node_free_ref(struct batadv_orig_node *orig_node)
|
||||
{
|
||||
if (atomic_dec_and_test(&orig_node->refcount))
|
||||
call_rcu(&orig_node->rcu, batadv_orig_node_free_rcu);
|
||||
}
|
||||
|
||||
/**
|
||||
* batadv_orig_node_free_ref_now - decrement the orig node refcounter and
|
||||
* possibly free it (without rcu callback)
|
||||
* @orig_node: the orig node to free
|
||||
*/
|
||||
void batadv_orig_node_free_ref_now(struct batadv_orig_node *orig_node)
|
||||
{
|
||||
if (atomic_dec_and_test(&orig_node->refcount))
|
||||
batadv_orig_node_free_rcu(&orig_node->rcu);
|
||||
batadv_orig_node_release(orig_node);
|
||||
}
|
||||
|
||||
void batadv_originator_free(struct batadv_priv *bat_priv)
|
||||
|
@ -38,7 +38,6 @@ int batadv_originator_init(struct batadv_priv *bat_priv);
|
||||
void batadv_originator_free(struct batadv_priv *bat_priv);
|
||||
void batadv_purge_orig_ref(struct batadv_priv *bat_priv);
|
||||
void batadv_orig_node_free_ref(struct batadv_orig_node *orig_node);
|
||||
void batadv_orig_node_free_ref_now(struct batadv_orig_node *orig_node);
|
||||
struct batadv_orig_node *batadv_orig_node_new(struct batadv_priv *bat_priv,
|
||||
const u8 *addr);
|
||||
struct batadv_hardif_neigh_node *
|
||||
|
@ -240,20 +240,6 @@ int batadv_tt_global_hash_count(struct batadv_priv *bat_priv,
|
||||
return count;
|
||||
}
|
||||
|
||||
static void batadv_tt_orig_list_entry_free_rcu(struct rcu_head *rcu)
|
||||
{
|
||||
struct batadv_tt_orig_list_entry *orig_entry;
|
||||
|
||||
orig_entry = container_of(rcu, struct batadv_tt_orig_list_entry, rcu);
|
||||
|
||||
/* We are in an rcu callback here, therefore we cannot use
|
||||
* batadv_orig_node_free_ref() and its call_rcu():
|
||||
* An rcu_barrier() wouldn't wait for that to finish
|
||||
*/
|
||||
batadv_orig_node_free_ref_now(orig_entry->orig_node);
|
||||
kfree(orig_entry);
|
||||
}
|
||||
|
||||
/**
|
||||
* batadv_tt_local_size_mod - change the size by v of the local table identified
|
||||
* by vid
|
||||
@ -349,13 +335,25 @@ static void batadv_tt_global_size_dec(struct batadv_orig_node *orig_node,
|
||||
batadv_tt_global_size_mod(orig_node, vid, -1);
|
||||
}
|
||||
|
||||
/**
|
||||
* batadv_tt_orig_list_entry_release - release tt orig entry from lists and
|
||||
* queue for free after rcu grace period
|
||||
* @orig_entry: tt orig entry to be free'd
|
||||
*/
|
||||
static void
|
||||
batadv_tt_orig_list_entry_release(struct batadv_tt_orig_list_entry *orig_entry)
|
||||
{
|
||||
batadv_orig_node_free_ref(orig_entry->orig_node);
|
||||
kfree_rcu(orig_entry, rcu);
|
||||
}
|
||||
|
||||
static void
|
||||
batadv_tt_orig_list_entry_free_ref(struct batadv_tt_orig_list_entry *orig_entry)
|
||||
{
|
||||
if (!atomic_dec_and_test(&orig_entry->refcount))
|
||||
return;
|
||||
|
||||
call_rcu(&orig_entry->rcu, batadv_tt_orig_list_entry_free_rcu);
|
||||
batadv_tt_orig_list_entry_release(orig_entry);
|
||||
}
|
||||
|
||||
/**
|
||||
|
Loading…
Reference in New Issue
Block a user