mirror of
https://github.com/torvalds/linux.git
synced 2024-11-10 06:01:57 +00:00
net: Remove the obsolte u64_stats_fetch_*_irq() users (net).
Now that the 32bit UP oddity is gone and 32bit uses always a sequence count, there is no need for the fetch_irq() variants anymore. Convert to the regular interface. Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org> Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
parent
068c38ad88
commit
d120d1a63b
@ -712,13 +712,13 @@ static void vlan_dev_get_stats64(struct net_device *dev,
|
||||
|
||||
p = per_cpu_ptr(vlan_dev_priv(dev)->vlan_pcpu_stats, i);
|
||||
do {
|
||||
start = u64_stats_fetch_begin_irq(&p->syncp);
|
||||
start = u64_stats_fetch_begin(&p->syncp);
|
||||
rxpackets = u64_stats_read(&p->rx_packets);
|
||||
rxbytes = u64_stats_read(&p->rx_bytes);
|
||||
rxmulticast = u64_stats_read(&p->rx_multicast);
|
||||
txpackets = u64_stats_read(&p->tx_packets);
|
||||
txbytes = u64_stats_read(&p->tx_bytes);
|
||||
} while (u64_stats_fetch_retry_irq(&p->syncp, start));
|
||||
} while (u64_stats_fetch_retry(&p->syncp, start));
|
||||
|
||||
stats->rx_packets += rxpackets;
|
||||
stats->rx_bytes += rxbytes;
|
||||
|
@ -4899,9 +4899,9 @@ void br_multicast_get_stats(const struct net_bridge *br,
|
||||
unsigned int start;
|
||||
|
||||
do {
|
||||
start = u64_stats_fetch_begin_irq(&cpu_stats->syncp);
|
||||
start = u64_stats_fetch_begin(&cpu_stats->syncp);
|
||||
memcpy(&temp, &cpu_stats->mstats, sizeof(temp));
|
||||
} while (u64_stats_fetch_retry_irq(&cpu_stats->syncp, start));
|
||||
} while (u64_stats_fetch_retry(&cpu_stats->syncp, start));
|
||||
|
||||
mcast_stats_add_dir(tdst.igmp_v1queries, temp.igmp_v1queries);
|
||||
mcast_stats_add_dir(tdst.igmp_v2queries, temp.igmp_v2queries);
|
||||
|
@ -1378,12 +1378,12 @@ void br_vlan_get_stats(const struct net_bridge_vlan *v,
|
||||
|
||||
cpu_stats = per_cpu_ptr(v->stats, i);
|
||||
do {
|
||||
start = u64_stats_fetch_begin_irq(&cpu_stats->syncp);
|
||||
start = u64_stats_fetch_begin(&cpu_stats->syncp);
|
||||
rxpackets = u64_stats_read(&cpu_stats->rx_packets);
|
||||
rxbytes = u64_stats_read(&cpu_stats->rx_bytes);
|
||||
txbytes = u64_stats_read(&cpu_stats->tx_bytes);
|
||||
txpackets = u64_stats_read(&cpu_stats->tx_packets);
|
||||
} while (u64_stats_fetch_retry_irq(&cpu_stats->syncp, start));
|
||||
} while (u64_stats_fetch_retry(&cpu_stats->syncp, start));
|
||||
|
||||
u64_stats_add(&stats->rx_packets, rxpackets);
|
||||
u64_stats_add(&stats->rx_bytes, rxbytes);
|
||||
|
@ -10477,12 +10477,12 @@ void dev_fetch_sw_netstats(struct rtnl_link_stats64 *s,
|
||||
|
||||
stats = per_cpu_ptr(netstats, cpu);
|
||||
do {
|
||||
start = u64_stats_fetch_begin_irq(&stats->syncp);
|
||||
start = u64_stats_fetch_begin(&stats->syncp);
|
||||
rx_packets = u64_stats_read(&stats->rx_packets);
|
||||
rx_bytes = u64_stats_read(&stats->rx_bytes);
|
||||
tx_packets = u64_stats_read(&stats->tx_packets);
|
||||
tx_bytes = u64_stats_read(&stats->tx_bytes);
|
||||
} while (u64_stats_fetch_retry_irq(&stats->syncp, start));
|
||||
} while (u64_stats_fetch_retry(&stats->syncp, start));
|
||||
|
||||
s->rx_packets += rx_packets;
|
||||
s->rx_bytes += rx_bytes;
|
||||
|
@ -8304,10 +8304,10 @@ static void devlink_trap_stats_read(struct devlink_stats __percpu *trap_stats,
|
||||
|
||||
cpu_stats = per_cpu_ptr(trap_stats, i);
|
||||
do {
|
||||
start = u64_stats_fetch_begin_irq(&cpu_stats->syncp);
|
||||
start = u64_stats_fetch_begin(&cpu_stats->syncp);
|
||||
rx_packets = u64_stats_read(&cpu_stats->rx_packets);
|
||||
rx_bytes = u64_stats_read(&cpu_stats->rx_bytes);
|
||||
} while (u64_stats_fetch_retry_irq(&cpu_stats->syncp, start));
|
||||
} while (u64_stats_fetch_retry(&cpu_stats->syncp, start));
|
||||
|
||||
u64_stats_add(&stats->rx_packets, rx_packets);
|
||||
u64_stats_add(&stats->rx_bytes, rx_bytes);
|
||||
|
@ -1432,9 +1432,9 @@ static void net_dm_stats_read(struct net_dm_stats *stats)
|
||||
u64 dropped;
|
||||
|
||||
do {
|
||||
start = u64_stats_fetch_begin_irq(&cpu_stats->syncp);
|
||||
start = u64_stats_fetch_begin(&cpu_stats->syncp);
|
||||
dropped = u64_stats_read(&cpu_stats->dropped);
|
||||
} while (u64_stats_fetch_retry_irq(&cpu_stats->syncp, start));
|
||||
} while (u64_stats_fetch_retry(&cpu_stats->syncp, start));
|
||||
|
||||
u64_stats_add(&stats->dropped, dropped);
|
||||
}
|
||||
@ -1476,9 +1476,9 @@ static void net_dm_hw_stats_read(struct net_dm_stats *stats)
|
||||
u64 dropped;
|
||||
|
||||
do {
|
||||
start = u64_stats_fetch_begin_irq(&cpu_stats->syncp);
|
||||
start = u64_stats_fetch_begin(&cpu_stats->syncp);
|
||||
dropped = u64_stats_read(&cpu_stats->dropped);
|
||||
} while (u64_stats_fetch_retry_irq(&cpu_stats->syncp, start));
|
||||
} while (u64_stats_fetch_retry(&cpu_stats->syncp, start));
|
||||
|
||||
u64_stats_add(&stats->dropped, dropped);
|
||||
}
|
||||
|
@ -135,10 +135,10 @@ static void gnet_stats_add_basic_cpu(struct gnet_stats_basic_sync *bstats,
|
||||
u64 bytes, packets;
|
||||
|
||||
do {
|
||||
start = u64_stats_fetch_begin_irq(&bcpu->syncp);
|
||||
start = u64_stats_fetch_begin(&bcpu->syncp);
|
||||
bytes = u64_stats_read(&bcpu->bytes);
|
||||
packets = u64_stats_read(&bcpu->packets);
|
||||
} while (u64_stats_fetch_retry_irq(&bcpu->syncp, start));
|
||||
} while (u64_stats_fetch_retry(&bcpu->syncp, start));
|
||||
|
||||
t_bytes += bytes;
|
||||
t_packets += packets;
|
||||
@ -162,10 +162,10 @@ void gnet_stats_add_basic(struct gnet_stats_basic_sync *bstats,
|
||||
}
|
||||
do {
|
||||
if (running)
|
||||
start = u64_stats_fetch_begin_irq(&b->syncp);
|
||||
start = u64_stats_fetch_begin(&b->syncp);
|
||||
bytes = u64_stats_read(&b->bytes);
|
||||
packets = u64_stats_read(&b->packets);
|
||||
} while (running && u64_stats_fetch_retry_irq(&b->syncp, start));
|
||||
} while (running && u64_stats_fetch_retry(&b->syncp, start));
|
||||
|
||||
_bstats_update(bstats, bytes, packets);
|
||||
}
|
||||
@ -187,10 +187,10 @@ static void gnet_stats_read_basic(u64 *ret_bytes, u64 *ret_packets,
|
||||
u64 bytes, packets;
|
||||
|
||||
do {
|
||||
start = u64_stats_fetch_begin_irq(&bcpu->syncp);
|
||||
start = u64_stats_fetch_begin(&bcpu->syncp);
|
||||
bytes = u64_stats_read(&bcpu->bytes);
|
||||
packets = u64_stats_read(&bcpu->packets);
|
||||
} while (u64_stats_fetch_retry_irq(&bcpu->syncp, start));
|
||||
} while (u64_stats_fetch_retry(&bcpu->syncp, start));
|
||||
|
||||
t_bytes += bytes;
|
||||
t_packets += packets;
|
||||
@ -201,10 +201,10 @@ static void gnet_stats_read_basic(u64 *ret_bytes, u64 *ret_packets,
|
||||
}
|
||||
do {
|
||||
if (running)
|
||||
start = u64_stats_fetch_begin_irq(&b->syncp);
|
||||
start = u64_stats_fetch_begin(&b->syncp);
|
||||
*ret_bytes = u64_stats_read(&b->bytes);
|
||||
*ret_packets = u64_stats_read(&b->packets);
|
||||
} while (running && u64_stats_fetch_retry_irq(&b->syncp, start));
|
||||
} while (running && u64_stats_fetch_retry(&b->syncp, start));
|
||||
}
|
||||
|
||||
static int
|
||||
|
@ -976,12 +976,12 @@ static void dsa_slave_get_ethtool_stats(struct net_device *dev,
|
||||
|
||||
s = per_cpu_ptr(dev->tstats, i);
|
||||
do {
|
||||
start = u64_stats_fetch_begin_irq(&s->syncp);
|
||||
start = u64_stats_fetch_begin(&s->syncp);
|
||||
tx_packets = u64_stats_read(&s->tx_packets);
|
||||
tx_bytes = u64_stats_read(&s->tx_bytes);
|
||||
rx_packets = u64_stats_read(&s->rx_packets);
|
||||
rx_bytes = u64_stats_read(&s->rx_bytes);
|
||||
} while (u64_stats_fetch_retry_irq(&s->syncp, start));
|
||||
} while (u64_stats_fetch_retry(&s->syncp, start));
|
||||
data[0] += tx_packets;
|
||||
data[1] += tx_bytes;
|
||||
data[2] += rx_packets;
|
||||
|
@ -1706,9 +1706,9 @@ u64 snmp_get_cpu_field64(void __percpu *mib, int cpu, int offt,
|
||||
bhptr = per_cpu_ptr(mib, cpu);
|
||||
syncp = (struct u64_stats_sync *)(bhptr + syncp_offset);
|
||||
do {
|
||||
start = u64_stats_fetch_begin_irq(syncp);
|
||||
start = u64_stats_fetch_begin(syncp);
|
||||
v = *(((u64 *)bhptr) + offt);
|
||||
} while (u64_stats_fetch_retry_irq(syncp, start));
|
||||
} while (u64_stats_fetch_retry(syncp, start));
|
||||
|
||||
return v;
|
||||
}
|
||||
|
@ -1644,13 +1644,13 @@ static int put_nla_counters(struct sk_buff *skb, struct seg6_local_lwt *slwt)
|
||||
|
||||
pcounters = per_cpu_ptr(slwt->pcpu_counters, i);
|
||||
do {
|
||||
start = u64_stats_fetch_begin_irq(&pcounters->syncp);
|
||||
start = u64_stats_fetch_begin(&pcounters->syncp);
|
||||
|
||||
packets = u64_stats_read(&pcounters->packets);
|
||||
bytes = u64_stats_read(&pcounters->bytes);
|
||||
errors = u64_stats_read(&pcounters->errors);
|
||||
|
||||
} while (u64_stats_fetch_retry_irq(&pcounters->syncp, start));
|
||||
} while (u64_stats_fetch_retry(&pcounters->syncp, start));
|
||||
|
||||
counters.packets += packets;
|
||||
counters.bytes += bytes;
|
||||
|
@ -2427,9 +2427,9 @@ static inline u64 sta_get_tidstats_msdu(struct ieee80211_sta_rx_stats *rxstats,
|
||||
u64 value;
|
||||
|
||||
do {
|
||||
start = u64_stats_fetch_begin_irq(&rxstats->syncp);
|
||||
start = u64_stats_fetch_begin(&rxstats->syncp);
|
||||
value = rxstats->msdu[tid];
|
||||
} while (u64_stats_fetch_retry_irq(&rxstats->syncp, start));
|
||||
} while (u64_stats_fetch_retry(&rxstats->syncp, start));
|
||||
|
||||
return value;
|
||||
}
|
||||
@ -2495,9 +2495,9 @@ static inline u64 sta_get_stats_bytes(struct ieee80211_sta_rx_stats *rxstats)
|
||||
u64 value;
|
||||
|
||||
do {
|
||||
start = u64_stats_fetch_begin_irq(&rxstats->syncp);
|
||||
start = u64_stats_fetch_begin(&rxstats->syncp);
|
||||
value = rxstats->bytes;
|
||||
} while (u64_stats_fetch_retry_irq(&rxstats->syncp, start));
|
||||
} while (u64_stats_fetch_retry(&rxstats->syncp, start));
|
||||
|
||||
return value;
|
||||
}
|
||||
|
@ -1079,9 +1079,9 @@ static void mpls_get_stats(struct mpls_dev *mdev,
|
||||
|
||||
p = per_cpu_ptr(mdev->stats, i);
|
||||
do {
|
||||
start = u64_stats_fetch_begin_irq(&p->syncp);
|
||||
start = u64_stats_fetch_begin(&p->syncp);
|
||||
local = p->stats;
|
||||
} while (u64_stats_fetch_retry_irq(&p->syncp, start));
|
||||
} while (u64_stats_fetch_retry(&p->syncp, start));
|
||||
|
||||
stats->rx_packets += local.rx_packets;
|
||||
stats->rx_bytes += local.rx_bytes;
|
||||
|
@ -2296,13 +2296,13 @@ static int ip_vs_stats_percpu_show(struct seq_file *seq, void *v)
|
||||
u64 conns, inpkts, outpkts, inbytes, outbytes;
|
||||
|
||||
do {
|
||||
start = u64_stats_fetch_begin_irq(&u->syncp);
|
||||
start = u64_stats_fetch_begin(&u->syncp);
|
||||
conns = u->cnt.conns;
|
||||
inpkts = u->cnt.inpkts;
|
||||
outpkts = u->cnt.outpkts;
|
||||
inbytes = u->cnt.inbytes;
|
||||
outbytes = u->cnt.outbytes;
|
||||
} while (u64_stats_fetch_retry_irq(&u->syncp, start));
|
||||
} while (u64_stats_fetch_retry(&u->syncp, start));
|
||||
|
||||
seq_printf(seq, "%3X %8LX %8LX %8LX %16LX %16LX\n",
|
||||
i, (u64)conns, (u64)inpkts,
|
||||
|
@ -1534,10 +1534,10 @@ static int nft_dump_stats(struct sk_buff *skb, struct nft_stats __percpu *stats)
|
||||
for_each_possible_cpu(cpu) {
|
||||
cpu_stats = per_cpu_ptr(stats, cpu);
|
||||
do {
|
||||
seq = u64_stats_fetch_begin_irq(&cpu_stats->syncp);
|
||||
seq = u64_stats_fetch_begin(&cpu_stats->syncp);
|
||||
pkts = cpu_stats->pkts;
|
||||
bytes = cpu_stats->bytes;
|
||||
} while (u64_stats_fetch_retry_irq(&cpu_stats->syncp, seq));
|
||||
} while (u64_stats_fetch_retry(&cpu_stats->syncp, seq));
|
||||
total.pkts += pkts;
|
||||
total.bytes += bytes;
|
||||
}
|
||||
|
@ -716,9 +716,9 @@ static void get_dp_stats(const struct datapath *dp, struct ovs_dp_stats *stats,
|
||||
percpu_stats = per_cpu_ptr(dp->stats_percpu, i);
|
||||
|
||||
do {
|
||||
start = u64_stats_fetch_begin_irq(&percpu_stats->syncp);
|
||||
start = u64_stats_fetch_begin(&percpu_stats->syncp);
|
||||
local_stats = *percpu_stats;
|
||||
} while (u64_stats_fetch_retry_irq(&percpu_stats->syncp, start));
|
||||
} while (u64_stats_fetch_retry(&percpu_stats->syncp, start));
|
||||
|
||||
stats->n_hit += local_stats.n_hit;
|
||||
stats->n_missed += local_stats.n_missed;
|
||||
|
@ -205,9 +205,9 @@ static void tbl_mask_array_reset_counters(struct mask_array *ma)
|
||||
|
||||
stats = per_cpu_ptr(ma->masks_usage_stats, cpu);
|
||||
do {
|
||||
start = u64_stats_fetch_begin_irq(&stats->syncp);
|
||||
start = u64_stats_fetch_begin(&stats->syncp);
|
||||
counter = stats->usage_cntrs[i];
|
||||
} while (u64_stats_fetch_retry_irq(&stats->syncp, start));
|
||||
} while (u64_stats_fetch_retry(&stats->syncp, start));
|
||||
|
||||
ma->masks_usage_zero_cntr[i] += counter;
|
||||
}
|
||||
@ -1136,10 +1136,9 @@ void ovs_flow_masks_rebalance(struct flow_table *table)
|
||||
|
||||
stats = per_cpu_ptr(ma->masks_usage_stats, cpu);
|
||||
do {
|
||||
start = u64_stats_fetch_begin_irq(&stats->syncp);
|
||||
start = u64_stats_fetch_begin(&stats->syncp);
|
||||
counter = stats->usage_cntrs[i];
|
||||
} while (u64_stats_fetch_retry_irq(&stats->syncp,
|
||||
start));
|
||||
} while (u64_stats_fetch_retry(&stats->syncp, start));
|
||||
|
||||
masks_and_count[i].counter += counter;
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user