forked from Minki/linux
net: Fix race condition in store_rps_map
There is a race condition in store_rps_map that allows jump label count in rps_needed to go below zero. This can happen when concurrently attempting to set and a clear map. Scenario: 1. rps_needed count is zero 2. New map is assigned by setting thread, but rps_needed count _not_ yet incremented (rps_needed count still zero) 2. Map is cleared by second thread, old_map set to that just assigned 3. Second thread performs static_key_slow_dec, rps_needed count now goes negative Fix is to increment or decrement rps_needed under the spinlock. Signed-off-by: Tom Herbert <tom@herbertland.com> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
parent
e05176a328
commit
10e4ea7514
@ -726,14 +726,17 @@ static ssize_t store_rps_map(struct netdev_rx_queue *queue,
|
||||
old_map = rcu_dereference_protected(queue->rps_map,
|
||||
lockdep_is_held(&rps_map_lock));
|
||||
rcu_assign_pointer(queue->rps_map, map);
|
||||
spin_unlock(&rps_map_lock);
|
||||
|
||||
if (map)
|
||||
static_key_slow_inc(&rps_needed);
|
||||
if (old_map) {
|
||||
kfree_rcu(old_map, rcu);
|
||||
if (old_map)
|
||||
static_key_slow_dec(&rps_needed);
|
||||
}
|
||||
|
||||
spin_unlock(&rps_map_lock);
|
||||
|
||||
if (old_map)
|
||||
kfree_rcu(old_map, rcu);
|
||||
|
||||
free_cpumask_var(mask);
|
||||
return len;
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user