net: rps: protect last_qtail with rps_input_queue_tail_save() helper

Removing one unnecessary reader protection and add another writer
protection to finish the locklessly proctection job.

Note: the removed READ_ONCE() is not needed because we only have to protect
the locklessly reader in the different context (rps_may_expire_flow()).

Signed-off-by: Jason Xing <kernelxing@tencent.com>
Reviewed-by: Eric Dumazet <edumazet@google.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
Jason Xing 2024-04-18 15:36:01 +08:00 committed by David S. Miller
parent 00ac0dc347
commit 84b6823cd9

View File

@ -4507,7 +4507,7 @@ set_rps_cpu(struct net_device *dev, struct sk_buff *skb,
struct netdev_rx_queue *rxqueue;
struct rps_dev_flow_table *flow_table;
struct rps_dev_flow *old_rflow;
u32 flow_id;
u32 flow_id, head;
u16 rxq_index;
int rc;
@ -4535,8 +4535,8 @@ set_rps_cpu(struct net_device *dev, struct sk_buff *skb,
old_rflow->filter = RPS_NO_FILTER;
out:
#endif
rflow->last_qtail =
READ_ONCE(per_cpu(softnet_data, next_cpu).input_queue_head);
head = READ_ONCE(per_cpu(softnet_data, next_cpu).input_queue_head);
rps_input_queue_tail_save(&rflow->last_qtail, head);
}
rflow->cpu = next_cpu;
@ -4619,7 +4619,7 @@ static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb,
if (unlikely(tcpu != next_cpu) &&
(tcpu >= nr_cpu_ids || !cpu_online(tcpu) ||
((int)(READ_ONCE(per_cpu(softnet_data, tcpu).input_queue_head) -
READ_ONCE(rflow->last_qtail))) >= 0)) {
rflow->last_qtail)) >= 0)) {
tcpu = next_cpu;
rflow = set_rps_cpu(dev, skb, rflow, next_cpu);
}