rhashtable: Avoid calculating hash again to unlock
Caching the lock pointer avoids having to hash on the object again to unlock the bucket locks. Signed-off-by: Thomas Graf <tgraf@suug.ch> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
committed by
David S. Miller
parent
9f1ab18672
commit
617011e7d5
@@ -384,14 +384,16 @@ static bool __rhashtable_insert(struct rhashtable *ht, struct rhash_head *obj,
|
|||||||
struct rhash_head *head;
|
struct rhash_head *head;
|
||||||
bool no_resize_running;
|
bool no_resize_running;
|
||||||
unsigned hash;
|
unsigned hash;
|
||||||
|
spinlock_t *old_lock;
|
||||||
bool success = true;
|
bool success = true;
|
||||||
|
|
||||||
rcu_read_lock();
|
rcu_read_lock();
|
||||||
|
|
||||||
old_tbl = rht_dereference_rcu(ht->tbl, ht);
|
old_tbl = rht_dereference_rcu(ht->tbl, ht);
|
||||||
hash = head_hashfn(ht, old_tbl, obj);
|
hash = head_hashfn(ht, old_tbl, obj);
|
||||||
|
old_lock = bucket_lock(old_tbl, hash);
|
||||||
|
|
||||||
spin_lock_bh(bucket_lock(old_tbl, hash));
|
spin_lock_bh(old_lock);
|
||||||
|
|
||||||
/* Because we have already taken the bucket lock in old_tbl,
|
/* Because we have already taken the bucket lock in old_tbl,
|
||||||
* if we find that future_tbl is not yet visible then that
|
* if we find that future_tbl is not yet visible then that
|
||||||
@@ -428,13 +430,10 @@ static bool __rhashtable_insert(struct rhashtable *ht, struct rhash_head *obj,
|
|||||||
schedule_work(&ht->run_work);
|
schedule_work(&ht->run_work);
|
||||||
|
|
||||||
exit:
|
exit:
|
||||||
if (tbl != old_tbl) {
|
if (tbl != old_tbl)
|
||||||
hash = head_hashfn(ht, tbl, obj);
|
|
||||||
spin_unlock(bucket_lock(tbl, hash));
|
spin_unlock(bucket_lock(tbl, hash));
|
||||||
}
|
|
||||||
|
|
||||||
hash = head_hashfn(ht, old_tbl, obj);
|
spin_unlock_bh(old_lock);
|
||||||
spin_unlock_bh(bucket_lock(old_tbl, hash));
|
|
||||||
|
|
||||||
rcu_read_unlock();
|
rcu_read_unlock();
|
||||||
|
|
||||||
|
|||||||
Reference in New Issue
Block a user