forked from Minki/linux
net: Use u64_stats_fetch_begin_irq() for stats fetch.
On 32bit-UP u64_stats_fetch_begin() disables only preemption. If the reader is in preemptible context and the writer side (u64_stats_update_begin*()) runs in an interrupt context (IRQ or softirq) then the writer can update the stats during the read operation. This update remains undetected. Use u64_stats_fetch_begin_irq() to ensure the stats fetch on 32bit-UP are not interrupted by a writer. 32bit-SMP remains unaffected by this change. Cc: "David S. Miller" <davem@davemloft.net> Cc: Catherine Sullivan <csully@google.com> Cc: David Awogbemila <awogbemila@google.com> Cc: Dimitris Michailidis <dmichail@fungible.com> Cc: Eric Dumazet <edumazet@google.com> Cc: Hans Ulli Kroll <ulli.kroll@googlemail.com> Cc: Jakub Kicinski <kuba@kernel.org> Cc: Jeroen de Borst <jeroendb@google.com> Cc: Johannes Berg <johannes@sipsolutions.net> Cc: Linus Walleij <linus.walleij@linaro.org> Cc: Paolo Abeni <pabeni@redhat.com> Cc: Simon Horman <simon.horman@corigine.com> Cc: linux-arm-kernel@lists.infradead.org Cc: linux-wireless@vger.kernel.org Cc: netdev@vger.kernel.org Cc: oss-drivers@corigine.com Cc: stable@vger.kernel.org Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> Reviewed-by: Simon Horman <simon.horman@corigine.com> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
parent
3f8ae9fe04
commit
278d3ba615
@ -1919,7 +1919,7 @@ static void gmac_get_stats64(struct net_device *netdev,
|
||||
|
||||
/* Racing with RX NAPI */
|
||||
do {
|
||||
start = u64_stats_fetch_begin(&port->rx_stats_syncp);
|
||||
start = u64_stats_fetch_begin_irq(&port->rx_stats_syncp);
|
||||
|
||||
stats->rx_packets = port->stats.rx_packets;
|
||||
stats->rx_bytes = port->stats.rx_bytes;
|
||||
@ -1931,11 +1931,11 @@ static void gmac_get_stats64(struct net_device *netdev,
|
||||
stats->rx_crc_errors = port->stats.rx_crc_errors;
|
||||
stats->rx_frame_errors = port->stats.rx_frame_errors;
|
||||
|
||||
} while (u64_stats_fetch_retry(&port->rx_stats_syncp, start));
|
||||
} while (u64_stats_fetch_retry_irq(&port->rx_stats_syncp, start));
|
||||
|
||||
/* Racing with MIB and TX completion interrupts */
|
||||
do {
|
||||
start = u64_stats_fetch_begin(&port->ir_stats_syncp);
|
||||
start = u64_stats_fetch_begin_irq(&port->ir_stats_syncp);
|
||||
|
||||
stats->tx_errors = port->stats.tx_errors;
|
||||
stats->tx_packets = port->stats.tx_packets;
|
||||
@ -1945,15 +1945,15 @@ static void gmac_get_stats64(struct net_device *netdev,
|
||||
stats->rx_missed_errors = port->stats.rx_missed_errors;
|
||||
stats->rx_fifo_errors = port->stats.rx_fifo_errors;
|
||||
|
||||
} while (u64_stats_fetch_retry(&port->ir_stats_syncp, start));
|
||||
} while (u64_stats_fetch_retry_irq(&port->ir_stats_syncp, start));
|
||||
|
||||
/* Racing with hard_start_xmit */
|
||||
do {
|
||||
start = u64_stats_fetch_begin(&port->tx_stats_syncp);
|
||||
start = u64_stats_fetch_begin_irq(&port->tx_stats_syncp);
|
||||
|
||||
stats->tx_dropped = port->stats.tx_dropped;
|
||||
|
||||
} while (u64_stats_fetch_retry(&port->tx_stats_syncp, start));
|
||||
} while (u64_stats_fetch_retry_irq(&port->tx_stats_syncp, start));
|
||||
|
||||
stats->rx_dropped += stats->rx_missed_errors;
|
||||
}
|
||||
@ -2031,18 +2031,18 @@ static void gmac_get_ethtool_stats(struct net_device *netdev,
|
||||
/* Racing with MIB interrupt */
|
||||
do {
|
||||
p = values;
|
||||
start = u64_stats_fetch_begin(&port->ir_stats_syncp);
|
||||
start = u64_stats_fetch_begin_irq(&port->ir_stats_syncp);
|
||||
|
||||
for (i = 0; i < RX_STATS_NUM; i++)
|
||||
*p++ = port->hw_stats[i];
|
||||
|
||||
} while (u64_stats_fetch_retry(&port->ir_stats_syncp, start));
|
||||
} while (u64_stats_fetch_retry_irq(&port->ir_stats_syncp, start));
|
||||
values = p;
|
||||
|
||||
/* Racing with RX NAPI */
|
||||
do {
|
||||
p = values;
|
||||
start = u64_stats_fetch_begin(&port->rx_stats_syncp);
|
||||
start = u64_stats_fetch_begin_irq(&port->rx_stats_syncp);
|
||||
|
||||
for (i = 0; i < RX_STATUS_NUM; i++)
|
||||
*p++ = port->rx_stats[i];
|
||||
@ -2050,13 +2050,13 @@ static void gmac_get_ethtool_stats(struct net_device *netdev,
|
||||
*p++ = port->rx_csum_stats[i];
|
||||
*p++ = port->rx_napi_exits;
|
||||
|
||||
} while (u64_stats_fetch_retry(&port->rx_stats_syncp, start));
|
||||
} while (u64_stats_fetch_retry_irq(&port->rx_stats_syncp, start));
|
||||
values = p;
|
||||
|
||||
/* Racing with TX start_xmit */
|
||||
do {
|
||||
p = values;
|
||||
start = u64_stats_fetch_begin(&port->tx_stats_syncp);
|
||||
start = u64_stats_fetch_begin_irq(&port->tx_stats_syncp);
|
||||
|
||||
for (i = 0; i < TX_MAX_FRAGS; i++) {
|
||||
*values++ = port->tx_frag_stats[i];
|
||||
@ -2065,7 +2065,7 @@ static void gmac_get_ethtool_stats(struct net_device *netdev,
|
||||
*values++ = port->tx_frags_linearized;
|
||||
*values++ = port->tx_hw_csummed;
|
||||
|
||||
} while (u64_stats_fetch_retry(&port->tx_stats_syncp, start));
|
||||
} while (u64_stats_fetch_retry_irq(&port->tx_stats_syncp, start));
|
||||
}
|
||||
|
||||
static int gmac_get_ksettings(struct net_device *netdev,
|
||||
|
@ -206,9 +206,9 @@ struct funeth_rxq {
|
||||
|
||||
#define FUN_QSTAT_READ(q, seq, stats_copy) \
|
||||
do { \
|
||||
seq = u64_stats_fetch_begin(&(q)->syncp); \
|
||||
seq = u64_stats_fetch_begin_irq(&(q)->syncp); \
|
||||
stats_copy = (q)->stats; \
|
||||
} while (u64_stats_fetch_retry(&(q)->syncp, (seq)))
|
||||
} while (u64_stats_fetch_retry_irq(&(q)->syncp, (seq)))
|
||||
|
||||
#define FUN_INT_NAME_LEN (IFNAMSIZ + 16)
|
||||
|
||||
|
@ -177,14 +177,14 @@ gve_get_ethtool_stats(struct net_device *netdev,
|
||||
struct gve_rx_ring *rx = &priv->rx[ring];
|
||||
|
||||
start =
|
||||
u64_stats_fetch_begin(&priv->rx[ring].statss);
|
||||
u64_stats_fetch_begin_irq(&priv->rx[ring].statss);
|
||||
tmp_rx_pkts = rx->rpackets;
|
||||
tmp_rx_bytes = rx->rbytes;
|
||||
tmp_rx_skb_alloc_fail = rx->rx_skb_alloc_fail;
|
||||
tmp_rx_buf_alloc_fail = rx->rx_buf_alloc_fail;
|
||||
tmp_rx_desc_err_dropped_pkt =
|
||||
rx->rx_desc_err_dropped_pkt;
|
||||
} while (u64_stats_fetch_retry(&priv->rx[ring].statss,
|
||||
} while (u64_stats_fetch_retry_irq(&priv->rx[ring].statss,
|
||||
start));
|
||||
rx_pkts += tmp_rx_pkts;
|
||||
rx_bytes += tmp_rx_bytes;
|
||||
@ -198,10 +198,10 @@ gve_get_ethtool_stats(struct net_device *netdev,
|
||||
if (priv->tx) {
|
||||
do {
|
||||
start =
|
||||
u64_stats_fetch_begin(&priv->tx[ring].statss);
|
||||
u64_stats_fetch_begin_irq(&priv->tx[ring].statss);
|
||||
tmp_tx_pkts = priv->tx[ring].pkt_done;
|
||||
tmp_tx_bytes = priv->tx[ring].bytes_done;
|
||||
} while (u64_stats_fetch_retry(&priv->tx[ring].statss,
|
||||
} while (u64_stats_fetch_retry_irq(&priv->tx[ring].statss,
|
||||
start));
|
||||
tx_pkts += tmp_tx_pkts;
|
||||
tx_bytes += tmp_tx_bytes;
|
||||
@ -259,13 +259,13 @@ gve_get_ethtool_stats(struct net_device *netdev,
|
||||
data[i++] = rx->fill_cnt - rx->cnt;
|
||||
do {
|
||||
start =
|
||||
u64_stats_fetch_begin(&priv->rx[ring].statss);
|
||||
u64_stats_fetch_begin_irq(&priv->rx[ring].statss);
|
||||
tmp_rx_bytes = rx->rbytes;
|
||||
tmp_rx_skb_alloc_fail = rx->rx_skb_alloc_fail;
|
||||
tmp_rx_buf_alloc_fail = rx->rx_buf_alloc_fail;
|
||||
tmp_rx_desc_err_dropped_pkt =
|
||||
rx->rx_desc_err_dropped_pkt;
|
||||
} while (u64_stats_fetch_retry(&priv->rx[ring].statss,
|
||||
} while (u64_stats_fetch_retry_irq(&priv->rx[ring].statss,
|
||||
start));
|
||||
data[i++] = tmp_rx_bytes;
|
||||
data[i++] = rx->rx_cont_packet_cnt;
|
||||
@ -331,9 +331,9 @@ gve_get_ethtool_stats(struct net_device *netdev,
|
||||
}
|
||||
do {
|
||||
start =
|
||||
u64_stats_fetch_begin(&priv->tx[ring].statss);
|
||||
u64_stats_fetch_begin_irq(&priv->tx[ring].statss);
|
||||
tmp_tx_bytes = tx->bytes_done;
|
||||
} while (u64_stats_fetch_retry(&priv->tx[ring].statss,
|
||||
} while (u64_stats_fetch_retry_irq(&priv->tx[ring].statss,
|
||||
start));
|
||||
data[i++] = tmp_tx_bytes;
|
||||
data[i++] = tx->wake_queue;
|
||||
|
@ -51,10 +51,10 @@ static void gve_get_stats(struct net_device *dev, struct rtnl_link_stats64 *s)
|
||||
for (ring = 0; ring < priv->rx_cfg.num_queues; ring++) {
|
||||
do {
|
||||
start =
|
||||
u64_stats_fetch_begin(&priv->rx[ring].statss);
|
||||
u64_stats_fetch_begin_irq(&priv->rx[ring].statss);
|
||||
packets = priv->rx[ring].rpackets;
|
||||
bytes = priv->rx[ring].rbytes;
|
||||
} while (u64_stats_fetch_retry(&priv->rx[ring].statss,
|
||||
} while (u64_stats_fetch_retry_irq(&priv->rx[ring].statss,
|
||||
start));
|
||||
s->rx_packets += packets;
|
||||
s->rx_bytes += bytes;
|
||||
@ -64,10 +64,10 @@ static void gve_get_stats(struct net_device *dev, struct rtnl_link_stats64 *s)
|
||||
for (ring = 0; ring < priv->tx_cfg.num_queues; ring++) {
|
||||
do {
|
||||
start =
|
||||
u64_stats_fetch_begin(&priv->tx[ring].statss);
|
||||
u64_stats_fetch_begin_irq(&priv->tx[ring].statss);
|
||||
packets = priv->tx[ring].pkt_done;
|
||||
bytes = priv->tx[ring].bytes_done;
|
||||
} while (u64_stats_fetch_retry(&priv->tx[ring].statss,
|
||||
} while (u64_stats_fetch_retry_irq(&priv->tx[ring].statss,
|
||||
start));
|
||||
s->tx_packets += packets;
|
||||
s->tx_bytes += bytes;
|
||||
@ -1274,9 +1274,9 @@ void gve_handle_report_stats(struct gve_priv *priv)
|
||||
}
|
||||
|
||||
do {
|
||||
start = u64_stats_fetch_begin(&priv->tx[idx].statss);
|
||||
start = u64_stats_fetch_begin_irq(&priv->tx[idx].statss);
|
||||
tx_bytes = priv->tx[idx].bytes_done;
|
||||
} while (u64_stats_fetch_retry(&priv->tx[idx].statss, start));
|
||||
} while (u64_stats_fetch_retry_irq(&priv->tx[idx].statss, start));
|
||||
stats[stats_idx++] = (struct stats) {
|
||||
.stat_name = cpu_to_be32(TX_WAKE_CNT),
|
||||
.value = cpu_to_be64(priv->tx[idx].wake_queue),
|
||||
|
@ -74,14 +74,14 @@ void hinic_rxq_get_stats(struct hinic_rxq *rxq, struct hinic_rxq_stats *stats)
|
||||
unsigned int start;
|
||||
|
||||
do {
|
||||
start = u64_stats_fetch_begin(&rxq_stats->syncp);
|
||||
start = u64_stats_fetch_begin_irq(&rxq_stats->syncp);
|
||||
stats->pkts = rxq_stats->pkts;
|
||||
stats->bytes = rxq_stats->bytes;
|
||||
stats->errors = rxq_stats->csum_errors +
|
||||
rxq_stats->other_errors;
|
||||
stats->csum_errors = rxq_stats->csum_errors;
|
||||
stats->other_errors = rxq_stats->other_errors;
|
||||
} while (u64_stats_fetch_retry(&rxq_stats->syncp, start));
|
||||
} while (u64_stats_fetch_retry_irq(&rxq_stats->syncp, start));
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -99,14 +99,14 @@ void hinic_txq_get_stats(struct hinic_txq *txq, struct hinic_txq_stats *stats)
|
||||
unsigned int start;
|
||||
|
||||
do {
|
||||
start = u64_stats_fetch_begin(&txq_stats->syncp);
|
||||
start = u64_stats_fetch_begin_irq(&txq_stats->syncp);
|
||||
stats->pkts = txq_stats->pkts;
|
||||
stats->bytes = txq_stats->bytes;
|
||||
stats->tx_busy = txq_stats->tx_busy;
|
||||
stats->tx_wake = txq_stats->tx_wake;
|
||||
stats->tx_dropped = txq_stats->tx_dropped;
|
||||
stats->big_frags_pkts = txq_stats->big_frags_pkts;
|
||||
} while (u64_stats_fetch_retry(&txq_stats->syncp, start));
|
||||
} while (u64_stats_fetch_retry_irq(&txq_stats->syncp, start));
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -1630,21 +1630,21 @@ static void nfp_net_stat64(struct net_device *netdev,
|
||||
unsigned int start;
|
||||
|
||||
do {
|
||||
start = u64_stats_fetch_begin(&r_vec->rx_sync);
|
||||
start = u64_stats_fetch_begin_irq(&r_vec->rx_sync);
|
||||
data[0] = r_vec->rx_pkts;
|
||||
data[1] = r_vec->rx_bytes;
|
||||
data[2] = r_vec->rx_drops;
|
||||
} while (u64_stats_fetch_retry(&r_vec->rx_sync, start));
|
||||
} while (u64_stats_fetch_retry_irq(&r_vec->rx_sync, start));
|
||||
stats->rx_packets += data[0];
|
||||
stats->rx_bytes += data[1];
|
||||
stats->rx_dropped += data[2];
|
||||
|
||||
do {
|
||||
start = u64_stats_fetch_begin(&r_vec->tx_sync);
|
||||
start = u64_stats_fetch_begin_irq(&r_vec->tx_sync);
|
||||
data[0] = r_vec->tx_pkts;
|
||||
data[1] = r_vec->tx_bytes;
|
||||
data[2] = r_vec->tx_errors;
|
||||
} while (u64_stats_fetch_retry(&r_vec->tx_sync, start));
|
||||
} while (u64_stats_fetch_retry_irq(&r_vec->tx_sync, start));
|
||||
stats->tx_packets += data[0];
|
||||
stats->tx_bytes += data[1];
|
||||
stats->tx_errors += data[2];
|
||||
|
@ -649,7 +649,7 @@ static u64 *nfp_vnic_get_sw_stats(struct net_device *netdev, u64 *data)
|
||||
unsigned int start;
|
||||
|
||||
do {
|
||||
start = u64_stats_fetch_begin(&nn->r_vecs[i].rx_sync);
|
||||
start = u64_stats_fetch_begin_irq(&nn->r_vecs[i].rx_sync);
|
||||
data[0] = nn->r_vecs[i].rx_pkts;
|
||||
tmp[0] = nn->r_vecs[i].hw_csum_rx_ok;
|
||||
tmp[1] = nn->r_vecs[i].hw_csum_rx_inner_ok;
|
||||
@ -657,10 +657,10 @@ static u64 *nfp_vnic_get_sw_stats(struct net_device *netdev, u64 *data)
|
||||
tmp[3] = nn->r_vecs[i].hw_csum_rx_error;
|
||||
tmp[4] = nn->r_vecs[i].rx_replace_buf_alloc_fail;
|
||||
tmp[5] = nn->r_vecs[i].hw_tls_rx;
|
||||
} while (u64_stats_fetch_retry(&nn->r_vecs[i].rx_sync, start));
|
||||
} while (u64_stats_fetch_retry_irq(&nn->r_vecs[i].rx_sync, start));
|
||||
|
||||
do {
|
||||
start = u64_stats_fetch_begin(&nn->r_vecs[i].tx_sync);
|
||||
start = u64_stats_fetch_begin_irq(&nn->r_vecs[i].tx_sync);
|
||||
data[1] = nn->r_vecs[i].tx_pkts;
|
||||
data[2] = nn->r_vecs[i].tx_busy;
|
||||
tmp[6] = nn->r_vecs[i].hw_csum_tx;
|
||||
@ -670,7 +670,7 @@ static u64 *nfp_vnic_get_sw_stats(struct net_device *netdev, u64 *data)
|
||||
tmp[10] = nn->r_vecs[i].hw_tls_tx;
|
||||
tmp[11] = nn->r_vecs[i].tls_tx_fallback;
|
||||
tmp[12] = nn->r_vecs[i].tls_tx_no_fallback;
|
||||
} while (u64_stats_fetch_retry(&nn->r_vecs[i].tx_sync, start));
|
||||
} while (u64_stats_fetch_retry_irq(&nn->r_vecs[i].tx_sync, start));
|
||||
|
||||
data += NN_RVEC_PER_Q_STATS;
|
||||
|
||||
|
@ -67,10 +67,10 @@ nsim_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
|
||||
unsigned int start;
|
||||
|
||||
do {
|
||||
start = u64_stats_fetch_begin(&ns->syncp);
|
||||
start = u64_stats_fetch_begin_irq(&ns->syncp);
|
||||
stats->tx_bytes = ns->tx_bytes;
|
||||
stats->tx_packets = ns->tx_packets;
|
||||
} while (u64_stats_fetch_retry(&ns->syncp, start));
|
||||
} while (u64_stats_fetch_retry_irq(&ns->syncp, start));
|
||||
}
|
||||
|
||||
static int
|
||||
|
@ -2316,9 +2316,9 @@ static inline u64 sta_get_tidstats_msdu(struct ieee80211_sta_rx_stats *rxstats,
|
||||
u64 value;
|
||||
|
||||
do {
|
||||
start = u64_stats_fetch_begin(&rxstats->syncp);
|
||||
start = u64_stats_fetch_begin_irq(&rxstats->syncp);
|
||||
value = rxstats->msdu[tid];
|
||||
} while (u64_stats_fetch_retry(&rxstats->syncp, start));
|
||||
} while (u64_stats_fetch_retry_irq(&rxstats->syncp, start));
|
||||
|
||||
return value;
|
||||
}
|
||||
@ -2384,9 +2384,9 @@ static inline u64 sta_get_stats_bytes(struct ieee80211_sta_rx_stats *rxstats)
|
||||
u64 value;
|
||||
|
||||
do {
|
||||
start = u64_stats_fetch_begin(&rxstats->syncp);
|
||||
start = u64_stats_fetch_begin_irq(&rxstats->syncp);
|
||||
value = rxstats->bytes;
|
||||
} while (u64_stats_fetch_retry(&rxstats->syncp, start));
|
||||
} while (u64_stats_fetch_retry_irq(&rxstats->syncp, start));
|
||||
|
||||
return value;
|
||||
}
|
||||
|
@ -1079,9 +1079,9 @@ static void mpls_get_stats(struct mpls_dev *mdev,
|
||||
|
||||
p = per_cpu_ptr(mdev->stats, i);
|
||||
do {
|
||||
start = u64_stats_fetch_begin(&p->syncp);
|
||||
start = u64_stats_fetch_begin_irq(&p->syncp);
|
||||
local = p->stats;
|
||||
} while (u64_stats_fetch_retry(&p->syncp, start));
|
||||
} while (u64_stats_fetch_retry_irq(&p->syncp, start));
|
||||
|
||||
stats->rx_packets += local.rx_packets;
|
||||
stats->rx_bytes += local.rx_bytes;
|
||||
|
Loading…
Reference in New Issue
Block a user