Merge branch 'for-davem' of git://git.kernel.org/pub/scm/linux/kernel/git/bwh/sfc-next

Ben Hutchings says:

====================
1. Further cleanup and refactoring in preparation for EF10.
2. Remove ethtool stats that are always zero on Falcon boards.
3. Add an ethtool stat for merged TX completions.
4. Prepare to support merged RX completions.
5. Prepare to support more hwmon sensors.
6. Add support for new events that are generated by EF10 firmware.
7. Update MC reboot detection for EF10.
====================

Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
David S. Miller 2013-08-29 01:56:01 -04:00
commit 4c9d546f6c
20 changed files with 1533 additions and 1323 deletions

View File

@ -80,8 +80,7 @@ const char *const efx_reset_type_names[] = {
[RESET_TYPE_TX_WATCHDOG] = "TX_WATCHDOG",
[RESET_TYPE_INT_ERROR] = "INT_ERROR",
[RESET_TYPE_RX_RECOVERY] = "RX_RECOVERY",
[RESET_TYPE_RX_DESC_FETCH] = "RX_DESC_FETCH",
[RESET_TYPE_TX_DESC_FETCH] = "TX_DESC_FETCH",
[RESET_TYPE_DMA_ERROR] = "DMA_ERROR",
[RESET_TYPE_TX_SKIP] = "TX_SKIP",
[RESET_TYPE_MC_FAILURE] = "MC_FAILURE",
};
@ -574,7 +573,7 @@ static void efx_start_datapath(struct efx_nic *efx)
* support the current MTU, including padding for header
* alignment and overruns.
*/
efx->rx_dma_len = (efx->type->rx_buffer_hash_size +
efx->rx_dma_len = (efx->rx_prefix_size +
EFX_MAX_FRAME_LEN(efx->net_dev->mtu) +
efx->type->rx_buffer_padding);
rx_buf_len = (sizeof(struct efx_rx_page_state) +
@ -1920,34 +1919,9 @@ static struct rtnl_link_stats64 *efx_net_stats(struct net_device *net_dev,
struct rtnl_link_stats64 *stats)
{
struct efx_nic *efx = netdev_priv(net_dev);
struct efx_mac_stats *mac_stats = &efx->mac_stats;
spin_lock_bh(&efx->stats_lock);
efx->type->update_stats(efx);
stats->rx_packets = mac_stats->rx_packets;
stats->tx_packets = mac_stats->tx_packets;
stats->rx_bytes = mac_stats->rx_bytes;
stats->tx_bytes = mac_stats->tx_bytes;
stats->rx_dropped = efx->n_rx_nodesc_drop_cnt;
stats->multicast = mac_stats->rx_multicast;
stats->collisions = mac_stats->tx_collision;
stats->rx_length_errors = (mac_stats->rx_gtjumbo +
mac_stats->rx_length_error);
stats->rx_crc_errors = mac_stats->rx_bad;
stats->rx_frame_errors = mac_stats->rx_align_error;
stats->rx_fifo_errors = mac_stats->rx_overflow;
stats->rx_missed_errors = mac_stats->rx_missed;
stats->tx_window_errors = mac_stats->tx_late_collision;
stats->rx_errors = (stats->rx_length_errors +
stats->rx_crc_errors +
stats->rx_frame_errors +
mac_stats->rx_symbol_error);
stats->tx_errors = (stats->tx_window_errors +
mac_stats->tx_bad);
efx->type->update_stats(efx, NULL, stats);
spin_unlock_bh(&efx->stats_lock);
return stats;
@ -2228,8 +2202,6 @@ int efx_reset_up(struct efx_nic *efx, enum reset_type method, bool ok)
"could not restore PHY settings\n");
}
efx->type->reconfigure_mac(efx);
efx_enable_interrupts(efx);
efx_restore_filters(efx);
efx_sriov_reset(efx);
@ -2484,6 +2456,9 @@ static int efx_init_struct(struct efx_nic *efx,
strlcpy(efx->name, pci_name(pci_dev), sizeof(efx->name));
efx->net_dev = net_dev;
efx->rx_prefix_size = efx->type->rx_prefix_size;
efx->rx_packet_hash_offset =
efx->type->rx_hash_offset - efx->type->rx_prefix_size;
spin_lock_init(&efx->stats_lock);
mutex_init(&efx->mac_lock);
efx->phy_op = &efx_dummy_phy_operations;

View File

@ -204,7 +204,12 @@ extern void efx_port_dummy_op_void(struct efx_nic *efx);
/* MTD */
#ifdef CONFIG_SFC_MTD
extern int efx_mtd_probe(struct efx_nic *efx);
extern int efx_mtd_add(struct efx_nic *efx, struct efx_mtd_partition *parts,
size_t n_parts, size_t sizeof_part);
static inline int efx_mtd_probe(struct efx_nic *efx)
{
return efx->type->mtd_probe(efx);
}
extern void efx_mtd_rename(struct efx_nic *efx);
extern void efx_mtd_remove(struct efx_nic *efx);
#else

View File

@ -147,8 +147,7 @@ enum efx_loopback_mode {
* @RESET_TYPE_TX_WATCHDOG: reset due to TX watchdog
* @RESET_TYPE_INT_ERROR: reset due to internal error
* @RESET_TYPE_RX_RECOVERY: reset to recover from RX datapath errors
* @RESET_TYPE_RX_DESC_FETCH: pcie error during rx descriptor fetch
* @RESET_TYPE_TX_DESC_FETCH: pcie error during tx descriptor fetch
* @RESET_TYPE_DMA_ERROR: DMA error
* @RESET_TYPE_TX_SKIP: hardware completed empty tx descriptors
* @RESET_TYPE_MC_FAILURE: MC reboot/assertion
*/
@ -163,8 +162,7 @@ enum reset_type {
RESET_TYPE_TX_WATCHDOG,
RESET_TYPE_INT_ERROR,
RESET_TYPE_RX_RECOVERY,
RESET_TYPE_RX_DESC_FETCH,
RESET_TYPE_TX_DESC_FETCH,
RESET_TYPE_DMA_ERROR,
RESET_TYPE_TX_SKIP,
RESET_TYPE_MC_FAILURE,
RESET_TYPE_MAX,

View File

@ -19,14 +19,9 @@
#include "filter.h"
#include "nic.h"
struct ethtool_string {
char name[ETH_GSTRING_LEN];
};
struct efx_ethtool_stat {
struct efx_sw_stat_desc {
const char *name;
enum {
EFX_ETHTOOL_STAT_SOURCE_mac_stats,
EFX_ETHTOOL_STAT_SOURCE_nic,
EFX_ETHTOOL_STAT_SOURCE_channel,
EFX_ETHTOOL_STAT_SOURCE_tx_queue
@ -35,7 +30,7 @@ struct efx_ethtool_stat {
u64(*get_stat) (void *field); /* Reader function */
};
/* Initialiser for a struct #efx_ethtool_stat with type-checking */
/* Initialiser for a struct efx_sw_stat_desc with type-checking */
#define EFX_ETHTOOL_STAT(stat_name, source_name, field, field_type, \
get_stat_function) { \
.name = #stat_name, \
@ -52,24 +47,11 @@ static u64 efx_get_uint_stat(void *field)
return *(unsigned int *)field;
}
static u64 efx_get_u64_stat(void *field)
{
return *(u64 *) field;
}
static u64 efx_get_atomic_stat(void *field)
{
return atomic_read((atomic_t *) field);
}
#define EFX_ETHTOOL_U64_MAC_STAT(field) \
EFX_ETHTOOL_STAT(field, mac_stats, field, \
u64, efx_get_u64_stat)
#define EFX_ETHTOOL_UINT_NIC_STAT(name) \
EFX_ETHTOOL_STAT(name, nic, n_##name, \
unsigned int, efx_get_uint_stat)
#define EFX_ETHTOOL_ATOMIC_NIC_ERROR_STAT(field) \
EFX_ETHTOOL_STAT(field, nic, field, \
atomic_t, efx_get_atomic_stat)
@ -82,72 +64,12 @@ static u64 efx_get_atomic_stat(void *field)
EFX_ETHTOOL_STAT(tx_##field, tx_queue, field, \
unsigned int, efx_get_uint_stat)
static const struct efx_ethtool_stat efx_ethtool_stats[] = {
EFX_ETHTOOL_U64_MAC_STAT(tx_bytes),
EFX_ETHTOOL_U64_MAC_STAT(tx_good_bytes),
EFX_ETHTOOL_U64_MAC_STAT(tx_bad_bytes),
EFX_ETHTOOL_U64_MAC_STAT(tx_packets),
EFX_ETHTOOL_U64_MAC_STAT(tx_bad),
EFX_ETHTOOL_U64_MAC_STAT(tx_pause),
EFX_ETHTOOL_U64_MAC_STAT(tx_control),
EFX_ETHTOOL_U64_MAC_STAT(tx_unicast),
EFX_ETHTOOL_U64_MAC_STAT(tx_multicast),
EFX_ETHTOOL_U64_MAC_STAT(tx_broadcast),
EFX_ETHTOOL_U64_MAC_STAT(tx_lt64),
EFX_ETHTOOL_U64_MAC_STAT(tx_64),
EFX_ETHTOOL_U64_MAC_STAT(tx_65_to_127),
EFX_ETHTOOL_U64_MAC_STAT(tx_128_to_255),
EFX_ETHTOOL_U64_MAC_STAT(tx_256_to_511),
EFX_ETHTOOL_U64_MAC_STAT(tx_512_to_1023),
EFX_ETHTOOL_U64_MAC_STAT(tx_1024_to_15xx),
EFX_ETHTOOL_U64_MAC_STAT(tx_15xx_to_jumbo),
EFX_ETHTOOL_U64_MAC_STAT(tx_gtjumbo),
EFX_ETHTOOL_U64_MAC_STAT(tx_collision),
EFX_ETHTOOL_U64_MAC_STAT(tx_single_collision),
EFX_ETHTOOL_U64_MAC_STAT(tx_multiple_collision),
EFX_ETHTOOL_U64_MAC_STAT(tx_excessive_collision),
EFX_ETHTOOL_U64_MAC_STAT(tx_deferred),
EFX_ETHTOOL_U64_MAC_STAT(tx_late_collision),
EFX_ETHTOOL_U64_MAC_STAT(tx_excessive_deferred),
EFX_ETHTOOL_U64_MAC_STAT(tx_non_tcpudp),
EFX_ETHTOOL_U64_MAC_STAT(tx_mac_src_error),
EFX_ETHTOOL_U64_MAC_STAT(tx_ip_src_error),
static const struct efx_sw_stat_desc efx_sw_stat_desc[] = {
EFX_ETHTOOL_UINT_TXQ_STAT(merge_events),
EFX_ETHTOOL_UINT_TXQ_STAT(tso_bursts),
EFX_ETHTOOL_UINT_TXQ_STAT(tso_long_headers),
EFX_ETHTOOL_UINT_TXQ_STAT(tso_packets),
EFX_ETHTOOL_UINT_TXQ_STAT(pushes),
EFX_ETHTOOL_U64_MAC_STAT(rx_bytes),
EFX_ETHTOOL_U64_MAC_STAT(rx_good_bytes),
EFX_ETHTOOL_U64_MAC_STAT(rx_bad_bytes),
EFX_ETHTOOL_U64_MAC_STAT(rx_packets),
EFX_ETHTOOL_U64_MAC_STAT(rx_good),
EFX_ETHTOOL_U64_MAC_STAT(rx_bad),
EFX_ETHTOOL_U64_MAC_STAT(rx_pause),
EFX_ETHTOOL_U64_MAC_STAT(rx_control),
EFX_ETHTOOL_U64_MAC_STAT(rx_unicast),
EFX_ETHTOOL_U64_MAC_STAT(rx_multicast),
EFX_ETHTOOL_U64_MAC_STAT(rx_broadcast),
EFX_ETHTOOL_U64_MAC_STAT(rx_lt64),
EFX_ETHTOOL_U64_MAC_STAT(rx_64),
EFX_ETHTOOL_U64_MAC_STAT(rx_65_to_127),
EFX_ETHTOOL_U64_MAC_STAT(rx_128_to_255),
EFX_ETHTOOL_U64_MAC_STAT(rx_256_to_511),
EFX_ETHTOOL_U64_MAC_STAT(rx_512_to_1023),
EFX_ETHTOOL_U64_MAC_STAT(rx_1024_to_15xx),
EFX_ETHTOOL_U64_MAC_STAT(rx_15xx_to_jumbo),
EFX_ETHTOOL_U64_MAC_STAT(rx_gtjumbo),
EFX_ETHTOOL_U64_MAC_STAT(rx_bad_lt64),
EFX_ETHTOOL_U64_MAC_STAT(rx_bad_64_to_15xx),
EFX_ETHTOOL_U64_MAC_STAT(rx_bad_15xx_to_jumbo),
EFX_ETHTOOL_U64_MAC_STAT(rx_bad_gtjumbo),
EFX_ETHTOOL_U64_MAC_STAT(rx_overflow),
EFX_ETHTOOL_U64_MAC_STAT(rx_missed),
EFX_ETHTOOL_U64_MAC_STAT(rx_false_carrier),
EFX_ETHTOOL_U64_MAC_STAT(rx_symbol_error),
EFX_ETHTOOL_U64_MAC_STAT(rx_align_error),
EFX_ETHTOOL_U64_MAC_STAT(rx_length_error),
EFX_ETHTOOL_U64_MAC_STAT(rx_internal_error),
EFX_ETHTOOL_UINT_NIC_STAT(rx_nodesc_drop_cnt),
EFX_ETHTOOL_ATOMIC_NIC_ERROR_STAT(rx_reset),
EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_tobe_disc),
EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_ip_hdr_chksum_err),
@ -157,8 +79,7 @@ static const struct efx_ethtool_stat efx_ethtool_stats[] = {
EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_nodesc_trunc),
};
/* Number of ethtool statistics */
#define EFX_ETHTOOL_NUM_STATS ARRAY_SIZE(efx_ethtool_stats)
#define EFX_ETHTOOL_SW_STAT_COUNT ARRAY_SIZE(efx_sw_stat_desc)
#define EFX_ETHTOOL_EEPROM_MAGIC 0xEFAB
@ -205,8 +126,6 @@ static int efx_ethtool_get_settings(struct net_device *net_dev,
efx->phy_op->get_settings(efx, ecmd);
mutex_unlock(&efx->mac_lock);
/* GMAC does not support 1000Mbps HD */
ecmd->supported &= ~SUPPORTED_1000baseT_Half;
/* Both MACs support pause frames (bidirectional and respond-only) */
ecmd->supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause;
@ -291,12 +210,11 @@ static void efx_ethtool_set_msglevel(struct net_device *net_dev, u32 msg_enable)
*
* Fill in an individual self-test entry.
*/
static void efx_fill_test(unsigned int test_index,
struct ethtool_string *strings, u64 *data,
static void efx_fill_test(unsigned int test_index, u8 *strings, u64 *data,
int *test, const char *unit_format, int unit_id,
const char *test_format, const char *test_id)
{
struct ethtool_string unit_str, test_str;
char unit_str[ETH_GSTRING_LEN], test_str[ETH_GSTRING_LEN];
/* Fill data value, if applicable */
if (data)
@ -305,15 +223,14 @@ static void efx_fill_test(unsigned int test_index,
/* Fill string, if applicable */
if (strings) {
if (strchr(unit_format, '%'))
snprintf(unit_str.name, sizeof(unit_str.name),
snprintf(unit_str, sizeof(unit_str),
unit_format, unit_id);
else
strcpy(unit_str.name, unit_format);
snprintf(test_str.name, sizeof(test_str.name),
test_format, test_id);
snprintf(strings[test_index].name,
sizeof(strings[test_index].name),
"%-6s %-24s", unit_str.name, test_str.name);
strcpy(unit_str, unit_format);
snprintf(test_str, sizeof(test_str), test_format, test_id);
snprintf(strings + test_index * ETH_GSTRING_LEN,
ETH_GSTRING_LEN,
"%-6s %-24s", unit_str, test_str);
}
}
@ -336,7 +253,7 @@ static int efx_fill_loopback_test(struct efx_nic *efx,
struct efx_loopback_self_tests *lb_tests,
enum efx_loopback_mode mode,
unsigned int test_index,
struct ethtool_string *strings, u64 *data)
u8 *strings, u64 *data)
{
struct efx_channel *channel =
efx_get_channel(efx, efx->tx_channel_offset);
@ -373,8 +290,7 @@ static int efx_fill_loopback_test(struct efx_nic *efx,
*/
static int efx_ethtool_fill_self_tests(struct efx_nic *efx,
struct efx_self_tests *tests,
struct ethtool_string *strings,
u64 *data)
u8 *strings, u64 *data)
{
struct efx_channel *channel;
unsigned int n = 0, i;
@ -433,12 +349,14 @@ static int efx_ethtool_fill_self_tests(struct efx_nic *efx,
static int efx_ethtool_get_sset_count(struct net_device *net_dev,
int string_set)
{
struct efx_nic *efx = netdev_priv(net_dev);
switch (string_set) {
case ETH_SS_STATS:
return EFX_ETHTOOL_NUM_STATS;
return efx->type->describe_stats(efx, NULL) +
EFX_ETHTOOL_SW_STAT_COUNT;
case ETH_SS_TEST:
return efx_ethtool_fill_self_tests(netdev_priv(net_dev),
NULL, NULL, NULL);
return efx_ethtool_fill_self_tests(efx, NULL, NULL, NULL);
default:
return -EINVAL;
}
@ -448,20 +366,18 @@ static void efx_ethtool_get_strings(struct net_device *net_dev,
u32 string_set, u8 *strings)
{
struct efx_nic *efx = netdev_priv(net_dev);
struct ethtool_string *ethtool_strings =
(struct ethtool_string *)strings;
int i;
switch (string_set) {
case ETH_SS_STATS:
for (i = 0; i < EFX_ETHTOOL_NUM_STATS; i++)
strlcpy(ethtool_strings[i].name,
efx_ethtool_stats[i].name,
sizeof(ethtool_strings[i].name));
strings += (efx->type->describe_stats(efx, strings) *
ETH_GSTRING_LEN);
for (i = 0; i < EFX_ETHTOOL_SW_STAT_COUNT; i++)
strlcpy(strings + i * ETH_GSTRING_LEN,
efx_sw_stat_desc[i].name, ETH_GSTRING_LEN);
break;
case ETH_SS_TEST:
efx_ethtool_fill_self_tests(efx, NULL,
ethtool_strings, NULL);
efx_ethtool_fill_self_tests(efx, NULL, strings, NULL);
break;
default:
/* No other string sets */
@ -474,27 +390,20 @@ static void efx_ethtool_get_stats(struct net_device *net_dev,
u64 *data)
{
struct efx_nic *efx = netdev_priv(net_dev);
struct efx_mac_stats *mac_stats = &efx->mac_stats;
const struct efx_ethtool_stat *stat;
const struct efx_sw_stat_desc *stat;
struct efx_channel *channel;
struct efx_tx_queue *tx_queue;
int i;
EFX_BUG_ON_PARANOID(stats->n_stats != EFX_ETHTOOL_NUM_STATS);
spin_lock_bh(&efx->stats_lock);
/* Update MAC and NIC statistics */
efx->type->update_stats(efx);
/* Get NIC statistics */
data += efx->type->update_stats(efx, data, NULL);
/* Fill detailed statistics buffer */
for (i = 0; i < EFX_ETHTOOL_NUM_STATS; i++) {
stat = &efx_ethtool_stats[i];
/* Get software statistics */
for (i = 0; i < EFX_ETHTOOL_SW_STAT_COUNT; i++) {
stat = &efx_sw_stat_desc[i];
switch (stat->source) {
case EFX_ETHTOOL_STAT_SOURCE_mac_stats:
data[i] = stat->get_stat((void *)mac_stats +
stat->offset);
break;
case EFX_ETHTOOL_STAT_SOURCE_nic:
data[i] = stat->get_stat((void *)efx + stat->offset);
break;

View File

@ -19,7 +19,6 @@
#include "net_driver.h"
#include "bitfield.h"
#include "efx.h"
#include "spi.h"
#include "nic.h"
#include "farch_regs.h"
#include "io.h"
@ -32,7 +31,7 @@
/**************************************************************************
*
* MAC stats DMA format
* NIC stats
*
**************************************************************************
*/
@ -132,38 +131,116 @@
#define XgDmaDone_offset 0xD4
#define XgDmaDone_WIDTH 32
#define FALCON_STATS_NOT_DONE 0x00000000
#define FALCON_STATS_DONE 0xffffffff
#define FALCON_XMAC_STATS_DMA_FLAG(efx) \
(*(u32 *)((efx)->stats_buffer.addr + XgDmaDone_offset))
#define FALCON_STAT_OFFSET(falcon_stat) EFX_VAL(falcon_stat, offset)
#define FALCON_STAT_WIDTH(falcon_stat) EFX_VAL(falcon_stat, WIDTH)
#define FALCON_DMA_STAT(ext_name, hw_name) \
[FALCON_STAT_ ## ext_name] = \
{ #ext_name, \
/* 48-bit stats are zero-padded to 64 on DMA */ \
hw_name ## _ ## WIDTH == 48 ? 64 : hw_name ## _ ## WIDTH, \
hw_name ## _ ## offset }
#define FALCON_OTHER_STAT(ext_name) \
[FALCON_STAT_ ## ext_name] = { #ext_name, 0, 0 }
/* Retrieve statistic from statistics block */
#define FALCON_STAT(efx, falcon_stat, efx_stat) do { \
if (FALCON_STAT_WIDTH(falcon_stat) == 16) \
(efx)->mac_stats.efx_stat += le16_to_cpu( \
*((__force __le16 *) \
(efx->stats_buffer.addr + \
FALCON_STAT_OFFSET(falcon_stat)))); \
else if (FALCON_STAT_WIDTH(falcon_stat) == 32) \
(efx)->mac_stats.efx_stat += le32_to_cpu( \
*((__force __le32 *) \
(efx->stats_buffer.addr + \
FALCON_STAT_OFFSET(falcon_stat)))); \
else \
(efx)->mac_stats.efx_stat += le64_to_cpu( \
*((__force __le64 *) \
(efx->stats_buffer.addr + \
FALCON_STAT_OFFSET(falcon_stat)))); \
} while (0)
static const struct efx_hw_stat_desc falcon_stat_desc[FALCON_STAT_COUNT] = {
FALCON_DMA_STAT(tx_bytes, XgTxOctets),
FALCON_DMA_STAT(tx_packets, XgTxPkts),
FALCON_DMA_STAT(tx_pause, XgTxPausePkts),
FALCON_DMA_STAT(tx_control, XgTxControlPkts),
FALCON_DMA_STAT(tx_unicast, XgTxUnicastPkts),
FALCON_DMA_STAT(tx_multicast, XgTxMulticastPkts),
FALCON_DMA_STAT(tx_broadcast, XgTxBroadcastPkts),
FALCON_DMA_STAT(tx_lt64, XgTxUndersizePkts),
FALCON_DMA_STAT(tx_64, XgTxPkts64Octets),
FALCON_DMA_STAT(tx_65_to_127, XgTxPkts65to127Octets),
FALCON_DMA_STAT(tx_128_to_255, XgTxPkts128to255Octets),
FALCON_DMA_STAT(tx_256_to_511, XgTxPkts256to511Octets),
FALCON_DMA_STAT(tx_512_to_1023, XgTxPkts512to1023Octets),
FALCON_DMA_STAT(tx_1024_to_15xx, XgTxPkts1024to15xxOctets),
FALCON_DMA_STAT(tx_15xx_to_jumbo, XgTxPkts1519toMaxOctets),
FALCON_DMA_STAT(tx_gtjumbo, XgTxOversizePkts),
FALCON_DMA_STAT(tx_non_tcpudp, XgTxNonTcpUdpPkt),
FALCON_DMA_STAT(tx_mac_src_error, XgTxMacSrcErrPkt),
FALCON_DMA_STAT(tx_ip_src_error, XgTxIpSrcErrPkt),
FALCON_DMA_STAT(rx_bytes, XgRxOctets),
FALCON_DMA_STAT(rx_good_bytes, XgRxOctetsOK),
FALCON_OTHER_STAT(rx_bad_bytes),
FALCON_DMA_STAT(rx_packets, XgRxPkts),
FALCON_DMA_STAT(rx_good, XgRxPktsOK),
FALCON_DMA_STAT(rx_bad, XgRxFCSerrorPkts),
FALCON_DMA_STAT(rx_pause, XgRxPausePkts),
FALCON_DMA_STAT(rx_control, XgRxControlPkts),
FALCON_DMA_STAT(rx_unicast, XgRxUnicastPkts),
FALCON_DMA_STAT(rx_multicast, XgRxMulticastPkts),
FALCON_DMA_STAT(rx_broadcast, XgRxBroadcastPkts),
FALCON_DMA_STAT(rx_lt64, XgRxUndersizePkts),
FALCON_DMA_STAT(rx_64, XgRxPkts64Octets),
FALCON_DMA_STAT(rx_65_to_127, XgRxPkts65to127Octets),
FALCON_DMA_STAT(rx_128_to_255, XgRxPkts128to255Octets),
FALCON_DMA_STAT(rx_256_to_511, XgRxPkts256to511Octets),
FALCON_DMA_STAT(rx_512_to_1023, XgRxPkts512to1023Octets),
FALCON_DMA_STAT(rx_1024_to_15xx, XgRxPkts1024to15xxOctets),
FALCON_DMA_STAT(rx_15xx_to_jumbo, XgRxPkts15xxtoMaxOctets),
FALCON_DMA_STAT(rx_gtjumbo, XgRxOversizePkts),
FALCON_DMA_STAT(rx_bad_lt64, XgRxUndersizeFCSerrorPkts),
FALCON_DMA_STAT(rx_bad_gtjumbo, XgRxJabberPkts),
FALCON_DMA_STAT(rx_overflow, XgRxDropEvents),
FALCON_DMA_STAT(rx_symbol_error, XgRxSymbolError),
FALCON_DMA_STAT(rx_align_error, XgRxAlignError),
FALCON_DMA_STAT(rx_length_error, XgRxLengthError),
FALCON_DMA_STAT(rx_internal_error, XgRxInternalMACError),
FALCON_OTHER_STAT(rx_nodesc_drop_cnt),
};
static const unsigned long falcon_stat_mask[] = {
[0 ... BITS_TO_LONGS(FALCON_STAT_COUNT) - 1] = ~0UL,
};
/**************************************************************************
*
* Non-volatile configuration
* Basic SPI command set and bit definitions
*
*************************************************************************/
#define SPI_WRSR 0x01 /* Write status register */
#define SPI_WRITE 0x02 /* Write data to memory array */
#define SPI_READ 0x03 /* Read data from memory array */
#define SPI_WRDI 0x04 /* Reset write enable latch */
#define SPI_RDSR 0x05 /* Read status register */
#define SPI_WREN 0x06 /* Set write enable latch */
#define SPI_SST_EWSR 0x50 /* SST: Enable write to status register */
#define SPI_STATUS_WPEN 0x80 /* Write-protect pin enabled */
#define SPI_STATUS_BP2 0x10 /* Block protection bit 2 */
#define SPI_STATUS_BP1 0x08 /* Block protection bit 1 */
#define SPI_STATUS_BP0 0x04 /* Block protection bit 0 */
#define SPI_STATUS_WEN 0x02 /* State of the write enable latch */
#define SPI_STATUS_NRDY 0x01 /* Device busy flag */
/**************************************************************************
*
* Non-volatile memory layout
*
**************************************************************************
*/
/* SFC4000 flash is partitioned into:
* 0-0x400 chip and board config (see struct falcon_nvconfig)
* 0x400-0x8000 unused (or may contain VPD if EEPROM not present)
* 0x8000-end boot code (mapped to PCI expansion ROM)
* SFC4000 small EEPROM (size < 0x400) is used for VPD only.
* SFC4000 large EEPROM (size >= 0x400) is partitioned into:
* 0-0x400 chip and board config
* configurable VPD
* 0x800-0x1800 boot config
* Aside from the chip and board config, all of these are optional and may
* be absent or truncated depending on the devices used.
*/
#define FALCON_NVCONFIG_END 0x400U
#define FALCON_FLASH_BOOTCODE_START 0x8000U
#define FALCON_EEPROM_BOOTCONFIG_START 0x800U
#define FALCON_EEPROM_BOOTCONFIG_END 0x1800U
/* Board configuration v2 (v1 is obsolete; later versions are compatible) */
struct falcon_nvconfig_board_v2 {
__le16 nports;
@ -434,9 +511,10 @@ static int falcon_spi_wait(struct efx_nic *efx)
}
}
int falcon_spi_cmd(struct efx_nic *efx, const struct falcon_spi_device *spi,
unsigned int command, int address,
const void *in, void *out, size_t len)
static int
falcon_spi_cmd(struct efx_nic *efx, const struct falcon_spi_device *spi,
unsigned int command, int address,
const void *in, void *out, size_t len)
{
bool addressed = (address >= 0);
bool reading = (out != NULL);
@ -490,13 +568,6 @@ int falcon_spi_cmd(struct efx_nic *efx, const struct falcon_spi_device *spi,
return 0;
}
static size_t
falcon_spi_write_limit(const struct falcon_spi_device *spi, size_t start)
{
return min(FALCON_SPI_MAX_LEN,
(spi->block_size - (start & (spi->block_size - 1))));
}
static inline u8
falcon_spi_munge_command(const struct falcon_spi_device *spi,
const u8 command, const unsigned int address)
@ -504,34 +575,9 @@ falcon_spi_munge_command(const struct falcon_spi_device *spi,
return command | (((address >> 8) & spi->munge_address) << 3);
}
/* Wait up to 10 ms for buffered write completion */
int
falcon_spi_wait_write(struct efx_nic *efx, const struct falcon_spi_device *spi)
{
unsigned long timeout = jiffies + 1 + DIV_ROUND_UP(HZ, 100);
u8 status;
int rc;
for (;;) {
rc = falcon_spi_cmd(efx, spi, SPI_RDSR, -1, NULL,
&status, sizeof(status));
if (rc)
return rc;
if (!(status & SPI_STATUS_NRDY))
return 0;
if (time_after_eq(jiffies, timeout)) {
netif_err(efx, hw, efx->net_dev,
"SPI write timeout on device %d"
" last status=0x%02x\n",
spi->device_id, status);
return -ETIMEDOUT;
}
schedule_timeout_uninterruptible(1);
}
}
int falcon_spi_read(struct efx_nic *efx, const struct falcon_spi_device *spi,
loff_t start, size_t len, size_t *retlen, u8 *buffer)
static int
falcon_spi_read(struct efx_nic *efx, const struct falcon_spi_device *spi,
loff_t start, size_t len, size_t *retlen, u8 *buffer)
{
size_t block_len, pos = 0;
unsigned int command;
@ -560,7 +606,51 @@ int falcon_spi_read(struct efx_nic *efx, const struct falcon_spi_device *spi,
return rc;
}
int
#ifdef CONFIG_SFC_MTD
struct falcon_mtd_partition {
struct efx_mtd_partition common;
const struct falcon_spi_device *spi;
size_t offset;
};
#define to_falcon_mtd_partition(mtd) \
container_of(mtd, struct falcon_mtd_partition, common.mtd)
static size_t
falcon_spi_write_limit(const struct falcon_spi_device *spi, size_t start)
{
return min(FALCON_SPI_MAX_LEN,
(spi->block_size - (start & (spi->block_size - 1))));
}
/* Wait up to 10 ms for buffered write completion */
static int
falcon_spi_wait_write(struct efx_nic *efx, const struct falcon_spi_device *spi)
{
unsigned long timeout = jiffies + 1 + DIV_ROUND_UP(HZ, 100);
u8 status;
int rc;
for (;;) {
rc = falcon_spi_cmd(efx, spi, SPI_RDSR, -1, NULL,
&status, sizeof(status));
if (rc)
return rc;
if (!(status & SPI_STATUS_NRDY))
return 0;
if (time_after_eq(jiffies, timeout)) {
netif_err(efx, hw, efx->net_dev,
"SPI write timeout on device %d"
" last status=0x%02x\n",
spi->device_id, status);
return -ETIMEDOUT;
}
schedule_timeout_uninterruptible(1);
}
}
static int
falcon_spi_write(struct efx_nic *efx, const struct falcon_spi_device *spi,
loff_t start, size_t len, size_t *retlen, const u8 *buffer)
{
@ -609,6 +699,238 @@ falcon_spi_write(struct efx_nic *efx, const struct falcon_spi_device *spi,
return rc;
}
static int
falcon_spi_slow_wait(struct falcon_mtd_partition *part, bool uninterruptible)
{
const struct falcon_spi_device *spi = part->spi;
struct efx_nic *efx = part->common.mtd.priv;
u8 status;
int rc, i;
/* Wait up to 4s for flash/EEPROM to finish a slow operation. */
for (i = 0; i < 40; i++) {
__set_current_state(uninterruptible ?
TASK_UNINTERRUPTIBLE : TASK_INTERRUPTIBLE);
schedule_timeout(HZ / 10);
rc = falcon_spi_cmd(efx, spi, SPI_RDSR, -1, NULL,
&status, sizeof(status));
if (rc)
return rc;
if (!(status & SPI_STATUS_NRDY))
return 0;
if (signal_pending(current))
return -EINTR;
}
pr_err("%s: timed out waiting for %s\n",
part->common.name, part->common.dev_type_name);
return -ETIMEDOUT;
}
static int
falcon_spi_unlock(struct efx_nic *efx, const struct falcon_spi_device *spi)
{
const u8 unlock_mask = (SPI_STATUS_BP2 | SPI_STATUS_BP1 |
SPI_STATUS_BP0);
u8 status;
int rc;
rc = falcon_spi_cmd(efx, spi, SPI_RDSR, -1, NULL,
&status, sizeof(status));
if (rc)
return rc;
if (!(status & unlock_mask))
return 0; /* already unlocked */
rc = falcon_spi_cmd(efx, spi, SPI_WREN, -1, NULL, NULL, 0);
if (rc)
return rc;
rc = falcon_spi_cmd(efx, spi, SPI_SST_EWSR, -1, NULL, NULL, 0);
if (rc)
return rc;
status &= ~unlock_mask;
rc = falcon_spi_cmd(efx, spi, SPI_WRSR, -1, &status,
NULL, sizeof(status));
if (rc)
return rc;
rc = falcon_spi_wait_write(efx, spi);
if (rc)
return rc;
return 0;
}
#define FALCON_SPI_VERIFY_BUF_LEN 16
static int
falcon_spi_erase(struct falcon_mtd_partition *part, loff_t start, size_t len)
{
const struct falcon_spi_device *spi = part->spi;
struct efx_nic *efx = part->common.mtd.priv;
unsigned pos, block_len;
u8 empty[FALCON_SPI_VERIFY_BUF_LEN];
u8 buffer[FALCON_SPI_VERIFY_BUF_LEN];
int rc;
if (len != spi->erase_size)
return -EINVAL;
if (spi->erase_command == 0)
return -EOPNOTSUPP;
rc = falcon_spi_unlock(efx, spi);
if (rc)
return rc;
rc = falcon_spi_cmd(efx, spi, SPI_WREN, -1, NULL, NULL, 0);
if (rc)
return rc;
rc = falcon_spi_cmd(efx, spi, spi->erase_command, start, NULL,
NULL, 0);
if (rc)
return rc;
rc = falcon_spi_slow_wait(part, false);
/* Verify the entire region has been wiped */
memset(empty, 0xff, sizeof(empty));
for (pos = 0; pos < len; pos += block_len) {
block_len = min(len - pos, sizeof(buffer));
rc = falcon_spi_read(efx, spi, start + pos, block_len,
NULL, buffer);
if (rc)
return rc;
if (memcmp(empty, buffer, block_len))
return -EIO;
/* Avoid locking up the system */
cond_resched();
if (signal_pending(current))
return -EINTR;
}
return rc;
}
static void falcon_mtd_rename(struct efx_mtd_partition *part)
{
struct efx_nic *efx = part->mtd.priv;
snprintf(part->name, sizeof(part->name), "%s %s",
efx->name, part->type_name);
}
static int falcon_mtd_read(struct mtd_info *mtd, loff_t start,
size_t len, size_t *retlen, u8 *buffer)
{
struct falcon_mtd_partition *part = to_falcon_mtd_partition(mtd);
struct efx_nic *efx = mtd->priv;
struct falcon_nic_data *nic_data = efx->nic_data;
int rc;
rc = mutex_lock_interruptible(&nic_data->spi_lock);
if (rc)
return rc;
rc = falcon_spi_read(efx, part->spi, part->offset + start,
len, retlen, buffer);
mutex_unlock(&nic_data->spi_lock);
return rc;
}
static int falcon_mtd_erase(struct mtd_info *mtd, loff_t start, size_t len)
{
struct falcon_mtd_partition *part = to_falcon_mtd_partition(mtd);
struct efx_nic *efx = mtd->priv;
struct falcon_nic_data *nic_data = efx->nic_data;
int rc;
rc = mutex_lock_interruptible(&nic_data->spi_lock);
if (rc)
return rc;
rc = falcon_spi_erase(part, part->offset + start, len);
mutex_unlock(&nic_data->spi_lock);
return rc;
}
static int falcon_mtd_write(struct mtd_info *mtd, loff_t start,
size_t len, size_t *retlen, const u8 *buffer)
{
struct falcon_mtd_partition *part = to_falcon_mtd_partition(mtd);
struct efx_nic *efx = mtd->priv;
struct falcon_nic_data *nic_data = efx->nic_data;
int rc;
rc = mutex_lock_interruptible(&nic_data->spi_lock);
if (rc)
return rc;
rc = falcon_spi_write(efx, part->spi, part->offset + start,
len, retlen, buffer);
mutex_unlock(&nic_data->spi_lock);
return rc;
}
static int falcon_mtd_sync(struct mtd_info *mtd)
{
struct falcon_mtd_partition *part = to_falcon_mtd_partition(mtd);
struct efx_nic *efx = mtd->priv;
struct falcon_nic_data *nic_data = efx->nic_data;
int rc;
mutex_lock(&nic_data->spi_lock);
rc = falcon_spi_slow_wait(part, true);
mutex_unlock(&nic_data->spi_lock);
return rc;
}
static int falcon_mtd_probe(struct efx_nic *efx)
{
struct falcon_nic_data *nic_data = efx->nic_data;
struct falcon_mtd_partition *parts;
struct falcon_spi_device *spi;
size_t n_parts;
int rc = -ENODEV;
ASSERT_RTNL();
/* Allocate space for maximum number of partitions */
parts = kcalloc(2, sizeof(*parts), GFP_KERNEL);
n_parts = 0;
spi = &nic_data->spi_flash;
if (falcon_spi_present(spi) && spi->size > FALCON_FLASH_BOOTCODE_START) {
parts[n_parts].spi = spi;
parts[n_parts].offset = FALCON_FLASH_BOOTCODE_START;
parts[n_parts].common.dev_type_name = "flash";
parts[n_parts].common.type_name = "sfc_flash_bootrom";
parts[n_parts].common.mtd.type = MTD_NORFLASH;
parts[n_parts].common.mtd.flags = MTD_CAP_NORFLASH;
parts[n_parts].common.mtd.size = spi->size - FALCON_FLASH_BOOTCODE_START;
parts[n_parts].common.mtd.erasesize = spi->erase_size;
n_parts++;
}
spi = &nic_data->spi_eeprom;
if (falcon_spi_present(spi) && spi->size > FALCON_EEPROM_BOOTCONFIG_START) {
parts[n_parts].spi = spi;
parts[n_parts].offset = FALCON_EEPROM_BOOTCONFIG_START;
parts[n_parts].common.dev_type_name = "EEPROM";
parts[n_parts].common.type_name = "sfc_bootconfig";
parts[n_parts].common.mtd.type = MTD_RAM;
parts[n_parts].common.mtd.flags = MTD_CAP_RAM;
parts[n_parts].common.mtd.size =
min(spi->size, FALCON_EEPROM_BOOTCONFIG_END) -
FALCON_EEPROM_BOOTCONFIG_START;
parts[n_parts].common.mtd.erasesize = spi->erase_size;
n_parts++;
}
rc = efx_mtd_add(efx, &parts[0].common, n_parts, sizeof(*parts));
if (rc)
kfree(parts);
return rc;
}
#endif /* CONFIG_SFC_MTD */
/**************************************************************************
*
* XMAC operations
@ -877,66 +1199,6 @@ static int falcon_reconfigure_xmac(struct efx_nic *efx)
return 0;
}
static void falcon_update_stats_xmac(struct efx_nic *efx)
{
struct efx_mac_stats *mac_stats = &efx->mac_stats;
/* Update MAC stats from DMAed values */
FALCON_STAT(efx, XgRxOctets, rx_bytes);
FALCON_STAT(efx, XgRxOctetsOK, rx_good_bytes);
FALCON_STAT(efx, XgRxPkts, rx_packets);
FALCON_STAT(efx, XgRxPktsOK, rx_good);
FALCON_STAT(efx, XgRxBroadcastPkts, rx_broadcast);
FALCON_STAT(efx, XgRxMulticastPkts, rx_multicast);
FALCON_STAT(efx, XgRxUnicastPkts, rx_unicast);
FALCON_STAT(efx, XgRxUndersizePkts, rx_lt64);
FALCON_STAT(efx, XgRxOversizePkts, rx_gtjumbo);
FALCON_STAT(efx, XgRxJabberPkts, rx_bad_gtjumbo);
FALCON_STAT(efx, XgRxUndersizeFCSerrorPkts, rx_bad_lt64);
FALCON_STAT(efx, XgRxDropEvents, rx_overflow);
FALCON_STAT(efx, XgRxFCSerrorPkts, rx_bad);
FALCON_STAT(efx, XgRxAlignError, rx_align_error);
FALCON_STAT(efx, XgRxSymbolError, rx_symbol_error);
FALCON_STAT(efx, XgRxInternalMACError, rx_internal_error);
FALCON_STAT(efx, XgRxControlPkts, rx_control);
FALCON_STAT(efx, XgRxPausePkts, rx_pause);
FALCON_STAT(efx, XgRxPkts64Octets, rx_64);
FALCON_STAT(efx, XgRxPkts65to127Octets, rx_65_to_127);
FALCON_STAT(efx, XgRxPkts128to255Octets, rx_128_to_255);
FALCON_STAT(efx, XgRxPkts256to511Octets, rx_256_to_511);
FALCON_STAT(efx, XgRxPkts512to1023Octets, rx_512_to_1023);
FALCON_STAT(efx, XgRxPkts1024to15xxOctets, rx_1024_to_15xx);
FALCON_STAT(efx, XgRxPkts15xxtoMaxOctets, rx_15xx_to_jumbo);
FALCON_STAT(efx, XgRxLengthError, rx_length_error);
FALCON_STAT(efx, XgTxPkts, tx_packets);
FALCON_STAT(efx, XgTxOctets, tx_bytes);
FALCON_STAT(efx, XgTxMulticastPkts, tx_multicast);
FALCON_STAT(efx, XgTxBroadcastPkts, tx_broadcast);
FALCON_STAT(efx, XgTxUnicastPkts, tx_unicast);
FALCON_STAT(efx, XgTxControlPkts, tx_control);
FALCON_STAT(efx, XgTxPausePkts, tx_pause);
FALCON_STAT(efx, XgTxPkts64Octets, tx_64);
FALCON_STAT(efx, XgTxPkts65to127Octets, tx_65_to_127);
FALCON_STAT(efx, XgTxPkts128to255Octets, tx_128_to_255);
FALCON_STAT(efx, XgTxPkts256to511Octets, tx_256_to_511);
FALCON_STAT(efx, XgTxPkts512to1023Octets, tx_512_to_1023);
FALCON_STAT(efx, XgTxPkts1024to15xxOctets, tx_1024_to_15xx);
FALCON_STAT(efx, XgTxPkts1519toMaxOctets, tx_15xx_to_jumbo);
FALCON_STAT(efx, XgTxUndersizePkts, tx_lt64);
FALCON_STAT(efx, XgTxOversizePkts, tx_gtjumbo);
FALCON_STAT(efx, XgTxNonTcpUdpPkt, tx_non_tcpudp);
FALCON_STAT(efx, XgTxMacSrcErrPkt, tx_mac_src_error);
FALCON_STAT(efx, XgTxIpSrcErrPkt, tx_ip_src_error);
/* Update derived statistics */
efx_update_diff_stat(&mac_stats->tx_good_bytes,
mac_stats->tx_bytes - mac_stats->tx_bad_bytes -
mac_stats->tx_control * 64);
efx_update_diff_stat(&mac_stats->rx_bad_bytes,
mac_stats->rx_bytes - mac_stats->rx_good_bytes -
mac_stats->rx_control * 64);
}
static void falcon_poll_xmac(struct efx_nic *efx)
{
struct falcon_nic_data *nic_data = efx->nic_data;
@ -1116,10 +1378,7 @@ static void falcon_stats_request(struct efx_nic *efx)
WARN_ON(nic_data->stats_pending);
WARN_ON(nic_data->stats_disable_count);
if (nic_data->stats_dma_done == NULL)
return; /* no mac selected */
*nic_data->stats_dma_done = FALCON_STATS_NOT_DONE;
FALCON_XMAC_STATS_DMA_FLAG(efx) = 0;
nic_data->stats_pending = true;
wmb(); /* ensure done flag is clear */
@ -1141,9 +1400,11 @@ static void falcon_stats_complete(struct efx_nic *efx)
return;
nic_data->stats_pending = false;
if (*nic_data->stats_dma_done == FALCON_STATS_DONE) {
if (FALCON_XMAC_STATS_DMA_FLAG(efx)) {
rmb(); /* read the done flag before the stats */
falcon_update_stats_xmac(efx);
efx_nic_update_stats(falcon_stat_desc, FALCON_STAT_COUNT,
falcon_stat_mask, nic_data->stats,
efx->stats_buffer.addr, true);
} else {
netif_err(efx, hw, efx->net_dev,
"timed out waiting for statistics\n");
@ -1424,7 +1685,6 @@ static int falcon_probe_port(struct efx_nic *efx)
(u64)efx->stats_buffer.dma_addr,
efx->stats_buffer.addr,
(u64)virt_to_phys(efx->stats_buffer.addr));
nic_data->stats_dma_done = efx->stats_buffer.addr + XgDmaDone_offset;
return 0;
}
@ -1633,8 +1893,7 @@ static enum reset_type falcon_map_reset_reason(enum reset_type reason)
{
switch (reason) {
case RESET_TYPE_RX_RECOVERY:
case RESET_TYPE_RX_DESC_FETCH:
case RESET_TYPE_TX_DESC_FETCH:
case RESET_TYPE_DMA_ERROR:
case RESET_TYPE_TX_SKIP:
/* These can occasionally occur due to hardware bugs.
* We try to reset without disrupting the link.
@ -2260,24 +2519,65 @@ static void falcon_remove_nic(struct efx_nic *efx)
efx->nic_data = NULL;
}
static void falcon_update_nic_stats(struct efx_nic *efx)
static size_t falcon_describe_nic_stats(struct efx_nic *efx, u8 *names)
{
return efx_nic_describe_stats(falcon_stat_desc, FALCON_STAT_COUNT,
falcon_stat_mask, names);
}
static size_t falcon_update_nic_stats(struct efx_nic *efx, u64 *full_stats,
struct rtnl_link_stats64 *core_stats)
{
struct falcon_nic_data *nic_data = efx->nic_data;
u64 *stats = nic_data->stats;
efx_oword_t cnt;
if (nic_data->stats_disable_count)
return;
if (!nic_data->stats_disable_count) {
efx_reado(efx, &cnt, FR_AZ_RX_NODESC_DROP);
stats[FALCON_STAT_rx_nodesc_drop_cnt] +=
EFX_OWORD_FIELD(cnt, FRF_AB_RX_NODESC_DROP_CNT);
efx_reado(efx, &cnt, FR_AZ_RX_NODESC_DROP);
efx->n_rx_nodesc_drop_cnt +=
EFX_OWORD_FIELD(cnt, FRF_AB_RX_NODESC_DROP_CNT);
if (nic_data->stats_pending &&
FALCON_XMAC_STATS_DMA_FLAG(efx)) {
nic_data->stats_pending = false;
rmb(); /* read the done flag before the stats */
efx_nic_update_stats(
falcon_stat_desc, FALCON_STAT_COUNT,
falcon_stat_mask,
stats, efx->stats_buffer.addr, true);
}
if (nic_data->stats_pending &&
*nic_data->stats_dma_done == FALCON_STATS_DONE) {
nic_data->stats_pending = false;
rmb(); /* read the done flag before the stats */
falcon_update_stats_xmac(efx);
/* Update derived statistic */
efx_update_diff_stat(&stats[FALCON_STAT_rx_bad_bytes],
stats[FALCON_STAT_rx_bytes] -
stats[FALCON_STAT_rx_good_bytes] -
stats[FALCON_STAT_rx_control] * 64);
}
if (full_stats)
memcpy(full_stats, stats, sizeof(u64) * FALCON_STAT_COUNT);
if (core_stats) {
core_stats->rx_packets = stats[FALCON_STAT_rx_packets];
core_stats->tx_packets = stats[FALCON_STAT_tx_packets];
core_stats->rx_bytes = stats[FALCON_STAT_rx_bytes];
core_stats->tx_bytes = stats[FALCON_STAT_tx_bytes];
core_stats->rx_dropped = stats[FALCON_STAT_rx_nodesc_drop_cnt];
core_stats->multicast = stats[FALCON_STAT_rx_multicast];
core_stats->rx_length_errors =
stats[FALCON_STAT_rx_gtjumbo] +
stats[FALCON_STAT_rx_length_error];
core_stats->rx_crc_errors = stats[FALCON_STAT_rx_bad];
core_stats->rx_frame_errors = stats[FALCON_STAT_rx_align_error];
core_stats->rx_fifo_errors = stats[FALCON_STAT_rx_overflow];
core_stats->rx_errors = (core_stats->rx_length_errors +
core_stats->rx_crc_errors +
core_stats->rx_frame_errors +
stats[FALCON_STAT_rx_symbol_error]);
}
return FALCON_STAT_COUNT;
}
void falcon_start_nic_stats(struct efx_nic *efx)
@ -2306,7 +2606,7 @@ void falcon_stop_nic_stats(struct efx_nic *efx)
/* Wait enough time for the most recent transfer to
* complete. */
for (i = 0; i < 4 && nic_data->stats_pending; i++) {
if (*nic_data->stats_dma_done == FALCON_STATS_DONE)
if (FALCON_XMAC_STATS_DMA_FLAG(efx))
break;
msleep(1);
}
@ -2366,6 +2666,7 @@ const struct efx_nic_type falcon_a1_nic_type = {
.fini_dmaq = efx_farch_fini_dmaq,
.prepare_flush = falcon_prepare_flush,
.finish_flush = efx_port_dummy_op_void,
.describe_stats = falcon_describe_nic_stats,
.update_stats = falcon_update_nic_stats,
.start_stats = falcon_start_nic_stats,
.stop_stats = falcon_stop_nic_stats,
@ -2417,6 +2718,15 @@ const struct efx_nic_type falcon_a1_nic_type = {
.filter_get_rx_id_limit = efx_farch_filter_get_rx_id_limit,
.filter_get_rx_ids = efx_farch_filter_get_rx_ids,
#ifdef CONFIG_SFC_MTD
.mtd_probe = falcon_mtd_probe,
.mtd_rename = falcon_mtd_rename,
.mtd_read = falcon_mtd_read,
.mtd_erase = falcon_mtd_erase,
.mtd_write = falcon_mtd_write,
.mtd_sync = falcon_mtd_sync,
#endif
.revision = EFX_REV_FALCON_A1,
.txd_ptr_tbl_base = FR_AA_TX_DESC_PTR_TBL_KER,
.rxd_ptr_tbl_base = FR_AA_RX_DESC_PTR_TBL_KER,
@ -2449,6 +2759,7 @@ const struct efx_nic_type falcon_b0_nic_type = {
.fini_dmaq = efx_farch_fini_dmaq,
.prepare_flush = falcon_prepare_flush,
.finish_flush = efx_port_dummy_op_void,
.describe_stats = falcon_describe_nic_stats,
.update_stats = falcon_update_nic_stats,
.start_stats = falcon_start_nic_stats,
.stop_stats = falcon_stop_nic_stats,
@ -2500,6 +2811,14 @@ const struct efx_nic_type falcon_b0_nic_type = {
.filter_rfs_insert = efx_farch_filter_rfs_insert,
.filter_rfs_expire_one = efx_farch_filter_rfs_expire_one,
#endif
#ifdef CONFIG_SFC_MTD
.mtd_probe = falcon_mtd_probe,
.mtd_rename = falcon_mtd_rename,
.mtd_read = falcon_mtd_read,
.mtd_erase = falcon_mtd_erase,
.mtd_write = falcon_mtd_write,
.mtd_sync = falcon_mtd_sync,
#endif
.revision = EFX_REV_FALCON_B0,
.txd_ptr_tbl_base = FR_BZ_TX_DESC_PTR_TBL,
@ -2508,7 +2827,8 @@ const struct efx_nic_type falcon_b0_nic_type = {
.evq_ptr_tbl_base = FR_BZ_EVQ_PTR_TBL,
.evq_rptr_tbl_base = FR_BZ_EVQ_RPTR,
.max_dma_mask = DMA_BIT_MASK(FSF_AZ_TX_KER_BUF_ADDR_WIDTH),
.rx_buffer_hash_size = 0x10,
.rx_prefix_size = FS_BZ_RX_PREFIX_SIZE,
.rx_hash_offset = FS_BZ_RX_PREFIX_HASH_OFST,
.rx_buffer_padding = 0,
.can_rx_scatter = true,
.max_interrupt_mode = EFX_INT_MODE_MSIX,

View File

@ -832,7 +832,7 @@ efx_farch_handle_tx_event(struct efx_channel *channel, efx_qword_t *event)
efx_farch_notify_tx_desc(tx_queue);
netif_tx_unlock(efx->net_dev);
} else if (EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_PKT_ERR)) {
efx_schedule_reset(efx, RESET_TYPE_TX_DESC_FETCH);
efx_schedule_reset(efx, RESET_TYPE_DMA_ERROR);
} else {
netif_err(efx, tx_err, efx->net_dev,
"channel %d unexpected TX event "
@ -1217,7 +1217,7 @@ efx_farch_handle_driver_event(struct efx_channel *channel, efx_qword_t *event)
"RX DMA Q %d reports descriptor fetch error."
" RX Q %d is disabled.\n", ev_sub_data,
ev_sub_data);
efx_schedule_reset(efx, RESET_TYPE_RX_DESC_FETCH);
efx_schedule_reset(efx, RESET_TYPE_DMA_ERROR);
} else
efx_sriov_desc_fetch_err(efx, ev_sub_data);
break;
@ -1227,7 +1227,7 @@ efx_farch_handle_driver_event(struct efx_channel *channel, efx_qword_t *event)
"TX DMA Q %d reports descriptor fetch error."
" TX Q %d is disabled.\n", ev_sub_data,
ev_sub_data);
efx_schedule_reset(efx, RESET_TYPE_TX_DESC_FETCH);
efx_schedule_reset(efx, RESET_TYPE_DMA_ERROR);
} else
efx_sriov_desc_fetch_err(efx, ev_sub_data);
break;

View File

@ -2925,4 +2925,8 @@
#define FSF_AZ_DRV_GEN_EV_MAGIC_LBN 0
#define FSF_AZ_DRV_GEN_EV_MAGIC_WIDTH 32
/* RX packet prefix */
#define FS_BZ_RX_PREFIX_HASH_OFST 12
#define FS_BZ_RX_PREFIX_SIZE 16
#endif /* EFX_FARCH_REGS_H */

View File

@ -204,12 +204,12 @@ static inline void efx_reado_table(struct efx_nic *efx, efx_oword_t *value,
efx_reado(efx, value, reg + index * sizeof(efx_oword_t));
}
/* Page-mapped register block size */
#define EFX_PAGE_BLOCK_SIZE 0x2000
/* Page size used as step between per-VI registers */
#define EFX_VI_PAGE_SIZE 0x2000
/* Calculate offset to page-mapped register block */
/* Calculate offset to page-mapped register */
#define EFX_PAGED_REG(page, reg) \
((page) * EFX_PAGE_BLOCK_SIZE + (reg))
((page) * EFX_VI_PAGE_SIZE + (reg))
/* Write the whole of RX_DESC_UPD or TX_DESC_UPD */
static inline void _efx_writeo_page(struct efx_nic *efx, efx_oword_t *value,

View File

@ -26,9 +26,10 @@
/* A reboot/assertion causes the MCDI status word to be set after the
* command word is set or a REBOOT event is sent. If we notice a reboot
* via these mechanisms then wait 10ms for the status word to be set. */
* via these mechanisms then wait 20ms for the status word to be set.
*/
#define MCDI_STATUS_DELAY_US 100
#define MCDI_STATUS_DELAY_COUNT 100
#define MCDI_STATUS_DELAY_COUNT 200
#define MCDI_STATUS_SLEEP_MS \
(MCDI_STATUS_DELAY_US * MCDI_STATUS_DELAY_COUNT / 1000)
@ -56,6 +57,7 @@ int efx_mcdi_init(struct efx_nic *efx)
mcdi->mode = MCDI_MODE_POLL;
(void) efx_mcdi_poll_reboot(efx);
mcdi->new_epoch = true;
/* Recover from a failed assertion before probing */
return efx_mcdi_handle_assertion(efx);
@ -85,24 +87,26 @@ static void efx_mcdi_copyin(struct efx_nic *efx, unsigned cmd,
if (efx->type->mcdi_max_ver == 1) {
/* MCDI v1 */
EFX_POPULATE_DWORD_6(hdr[0],
EFX_POPULATE_DWORD_7(hdr[0],
MCDI_HEADER_RESPONSE, 0,
MCDI_HEADER_RESYNC, 1,
MCDI_HEADER_CODE, cmd,
MCDI_HEADER_DATALEN, inlen,
MCDI_HEADER_SEQ, seqno,
MCDI_HEADER_XFLAGS, xflags);
MCDI_HEADER_XFLAGS, xflags,
MCDI_HEADER_NOT_EPOCH, !mcdi->new_epoch);
hdr_len = 4;
} else {
/* MCDI v2 */
BUG_ON(inlen > MCDI_CTL_SDU_LEN_MAX_V2);
EFX_POPULATE_DWORD_6(hdr[0],
EFX_POPULATE_DWORD_7(hdr[0],
MCDI_HEADER_RESPONSE, 0,
MCDI_HEADER_RESYNC, 1,
MCDI_HEADER_CODE, MC_CMD_V2_EXTN,
MCDI_HEADER_DATALEN, 0,
MCDI_HEADER_SEQ, seqno,
MCDI_HEADER_XFLAGS, xflags);
MCDI_HEADER_XFLAGS, xflags,
MCDI_HEADER_NOT_EPOCH, !mcdi->new_epoch);
EFX_POPULATE_DWORD_2(hdr[1],
MC_CMD_V2_EXTN_IN_EXTENDED_CMD, cmd,
MC_CMD_V2_EXTN_IN_ACTUAL_LEN, inlen);
@ -236,21 +240,10 @@ static int efx_mcdi_poll(struct efx_nic *efx)
*/
int efx_mcdi_poll_reboot(struct efx_nic *efx)
{
int rc;
if (!efx->mcdi)
return 0;
rc = efx->type->mcdi_poll_reboot(efx);
if (!rc)
return 0;
/* MAC statistics have been cleared on the NIC; clear our copy
* so that efx_update_diff_stat() can continue to work.
*/
memset(&efx->mac_stats, 0, sizeof(efx->mac_stats));
return rc;
return efx->type->mcdi_poll_reboot(efx);
}
static void efx_mcdi_acquire(struct efx_mcdi_iface *mcdi)
@ -384,6 +377,7 @@ int efx_mcdi_rpc_start(struct efx_nic *efx, unsigned cmd,
spin_unlock_bh(&mcdi->iface_lock);
efx_mcdi_copyin(efx, cmd, inbuf, inlen);
mcdi->new_epoch = false;
return 0;
}
@ -446,6 +440,7 @@ int efx_mcdi_rpc_finish(struct efx_nic *efx, unsigned cmd, size_t inlen,
if (rc == -EIO || rc == -EINTR) {
msleep(MCDI_STATUS_SLEEP_MS);
efx_mcdi_poll_reboot(efx);
mcdi->new_epoch = true;
}
}
@ -541,6 +536,7 @@ static void efx_mcdi_ev_death(struct efx_nic *efx, int rc)
break;
udelay(MCDI_STATUS_DELAY_US);
}
mcdi->new_epoch = true;
}
spin_unlock(&mcdi->iface_lock);
@ -598,6 +594,14 @@ void efx_mcdi_process_event(struct efx_channel *channel,
efx_ptp_event(efx, event);
break;
case MCDI_EVENT_CODE_TX_ERR:
case MCDI_EVENT_CODE_RX_ERR:
netif_err(efx, hw, efx->net_dev,
"%s DMA error (event: "EFX_QWORD_FMT")\n",
code == MCDI_EVENT_CODE_TX_ERR ? "TX" : "RX",
EFX_QWORD_VAL(*event));
efx_schedule_reset(efx, RESET_TYPE_DMA_ERROR);
break;
default:
netif_err(efx, hw, efx->net_dev, "Unknown MCDI event 0x%x\n",
code);
@ -811,125 +815,6 @@ fail:
return rc;
}
int efx_mcdi_nvram_update_start(struct efx_nic *efx, unsigned int type)
{
MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_UPDATE_START_IN_LEN);
int rc;
MCDI_SET_DWORD(inbuf, NVRAM_UPDATE_START_IN_TYPE, type);
BUILD_BUG_ON(MC_CMD_NVRAM_UPDATE_START_OUT_LEN != 0);
rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_UPDATE_START, inbuf, sizeof(inbuf),
NULL, 0, NULL);
if (rc)
goto fail;
return 0;
fail:
netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
return rc;
}
int efx_mcdi_nvram_read(struct efx_nic *efx, unsigned int type,
loff_t offset, u8 *buffer, size_t length)
{
MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_READ_IN_LEN);
MCDI_DECLARE_BUF(outbuf,
MC_CMD_NVRAM_READ_OUT_LEN(EFX_MCDI_NVRAM_LEN_MAX));
size_t outlen;
int rc;
MCDI_SET_DWORD(inbuf, NVRAM_READ_IN_TYPE, type);
MCDI_SET_DWORD(inbuf, NVRAM_READ_IN_OFFSET, offset);
MCDI_SET_DWORD(inbuf, NVRAM_READ_IN_LENGTH, length);
rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_READ, inbuf, sizeof(inbuf),
outbuf, sizeof(outbuf), &outlen);
if (rc)
goto fail;
memcpy(buffer, MCDI_PTR(outbuf, NVRAM_READ_OUT_READ_BUFFER), length);
return 0;
fail:
netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
return rc;
}
int efx_mcdi_nvram_write(struct efx_nic *efx, unsigned int type,
loff_t offset, const u8 *buffer, size_t length)
{
MCDI_DECLARE_BUF(inbuf,
MC_CMD_NVRAM_WRITE_IN_LEN(EFX_MCDI_NVRAM_LEN_MAX));
int rc;
MCDI_SET_DWORD(inbuf, NVRAM_WRITE_IN_TYPE, type);
MCDI_SET_DWORD(inbuf, NVRAM_WRITE_IN_OFFSET, offset);
MCDI_SET_DWORD(inbuf, NVRAM_WRITE_IN_LENGTH, length);
memcpy(MCDI_PTR(inbuf, NVRAM_WRITE_IN_WRITE_BUFFER), buffer, length);
BUILD_BUG_ON(MC_CMD_NVRAM_WRITE_OUT_LEN != 0);
rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_WRITE, inbuf,
ALIGN(MC_CMD_NVRAM_WRITE_IN_LEN(length), 4),
NULL, 0, NULL);
if (rc)
goto fail;
return 0;
fail:
netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
return rc;
}
int efx_mcdi_nvram_erase(struct efx_nic *efx, unsigned int type,
loff_t offset, size_t length)
{
MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_ERASE_IN_LEN);
int rc;
MCDI_SET_DWORD(inbuf, NVRAM_ERASE_IN_TYPE, type);
MCDI_SET_DWORD(inbuf, NVRAM_ERASE_IN_OFFSET, offset);
MCDI_SET_DWORD(inbuf, NVRAM_ERASE_IN_LENGTH, length);
BUILD_BUG_ON(MC_CMD_NVRAM_ERASE_OUT_LEN != 0);
rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_ERASE, inbuf, sizeof(inbuf),
NULL, 0, NULL);
if (rc)
goto fail;
return 0;
fail:
netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
return rc;
}
int efx_mcdi_nvram_update_finish(struct efx_nic *efx, unsigned int type)
{
MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_UPDATE_FINISH_IN_LEN);
int rc;
MCDI_SET_DWORD(inbuf, NVRAM_UPDATE_FINISH_IN_TYPE, type);
BUILD_BUG_ON(MC_CMD_NVRAM_UPDATE_FINISH_OUT_LEN != 0);
rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_UPDATE_FINISH, inbuf, sizeof(inbuf),
NULL, 0, NULL);
if (rc)
goto fail;
return 0;
fail:
netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
return rc;
}
static int efx_mcdi_nvram_test(struct efx_nic *efx, unsigned int type)
{
MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_TEST_IN_LEN);
@ -1272,3 +1157,236 @@ fail:
return rc;
}
#ifdef CONFIG_SFC_MTD
#define EFX_MCDI_NVRAM_LEN_MAX 128
static int efx_mcdi_nvram_update_start(struct efx_nic *efx, unsigned int type)
{
MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_UPDATE_START_IN_LEN);
int rc;
MCDI_SET_DWORD(inbuf, NVRAM_UPDATE_START_IN_TYPE, type);
BUILD_BUG_ON(MC_CMD_NVRAM_UPDATE_START_OUT_LEN != 0);
rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_UPDATE_START, inbuf, sizeof(inbuf),
NULL, 0, NULL);
if (rc)
goto fail;
return 0;
fail:
netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
return rc;
}
static int efx_mcdi_nvram_read(struct efx_nic *efx, unsigned int type,
loff_t offset, u8 *buffer, size_t length)
{
MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_READ_IN_LEN);
MCDI_DECLARE_BUF(outbuf,
MC_CMD_NVRAM_READ_OUT_LEN(EFX_MCDI_NVRAM_LEN_MAX));
size_t outlen;
int rc;
MCDI_SET_DWORD(inbuf, NVRAM_READ_IN_TYPE, type);
MCDI_SET_DWORD(inbuf, NVRAM_READ_IN_OFFSET, offset);
MCDI_SET_DWORD(inbuf, NVRAM_READ_IN_LENGTH, length);
rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_READ, inbuf, sizeof(inbuf),
outbuf, sizeof(outbuf), &outlen);
if (rc)
goto fail;
memcpy(buffer, MCDI_PTR(outbuf, NVRAM_READ_OUT_READ_BUFFER), length);
return 0;
fail:
netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
return rc;
}
static int efx_mcdi_nvram_write(struct efx_nic *efx, unsigned int type,
loff_t offset, const u8 *buffer, size_t length)
{
MCDI_DECLARE_BUF(inbuf,
MC_CMD_NVRAM_WRITE_IN_LEN(EFX_MCDI_NVRAM_LEN_MAX));
int rc;
MCDI_SET_DWORD(inbuf, NVRAM_WRITE_IN_TYPE, type);
MCDI_SET_DWORD(inbuf, NVRAM_WRITE_IN_OFFSET, offset);
MCDI_SET_DWORD(inbuf, NVRAM_WRITE_IN_LENGTH, length);
memcpy(MCDI_PTR(inbuf, NVRAM_WRITE_IN_WRITE_BUFFER), buffer, length);
BUILD_BUG_ON(MC_CMD_NVRAM_WRITE_OUT_LEN != 0);
rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_WRITE, inbuf,
ALIGN(MC_CMD_NVRAM_WRITE_IN_LEN(length), 4),
NULL, 0, NULL);
if (rc)
goto fail;
return 0;
fail:
netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
return rc;
}
static int efx_mcdi_nvram_erase(struct efx_nic *efx, unsigned int type,
loff_t offset, size_t length)
{
MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_ERASE_IN_LEN);
int rc;
MCDI_SET_DWORD(inbuf, NVRAM_ERASE_IN_TYPE, type);
MCDI_SET_DWORD(inbuf, NVRAM_ERASE_IN_OFFSET, offset);
MCDI_SET_DWORD(inbuf, NVRAM_ERASE_IN_LENGTH, length);
BUILD_BUG_ON(MC_CMD_NVRAM_ERASE_OUT_LEN != 0);
rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_ERASE, inbuf, sizeof(inbuf),
NULL, 0, NULL);
if (rc)
goto fail;
return 0;
fail:
netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
return rc;
}
static int efx_mcdi_nvram_update_finish(struct efx_nic *efx, unsigned int type)
{
MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_UPDATE_FINISH_IN_LEN);
int rc;
MCDI_SET_DWORD(inbuf, NVRAM_UPDATE_FINISH_IN_TYPE, type);
BUILD_BUG_ON(MC_CMD_NVRAM_UPDATE_FINISH_OUT_LEN != 0);
rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_UPDATE_FINISH, inbuf, sizeof(inbuf),
NULL, 0, NULL);
if (rc)
goto fail;
return 0;
fail:
netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
return rc;
}
int efx_mcdi_mtd_read(struct mtd_info *mtd, loff_t start,
size_t len, size_t *retlen, u8 *buffer)
{
struct efx_mcdi_mtd_partition *part = to_efx_mcdi_mtd_partition(mtd);
struct efx_nic *efx = mtd->priv;
loff_t offset = start;
loff_t end = min_t(loff_t, start + len, mtd->size);
size_t chunk;
int rc = 0;
while (offset < end) {
chunk = min_t(size_t, end - offset, EFX_MCDI_NVRAM_LEN_MAX);
rc = efx_mcdi_nvram_read(efx, part->nvram_type, offset,
buffer, chunk);
if (rc)
goto out;
offset += chunk;
buffer += chunk;
}
out:
*retlen = offset - start;
return rc;
}
int efx_mcdi_mtd_erase(struct mtd_info *mtd, loff_t start, size_t len)
{
struct efx_mcdi_mtd_partition *part = to_efx_mcdi_mtd_partition(mtd);
struct efx_nic *efx = mtd->priv;
loff_t offset = start & ~((loff_t)(mtd->erasesize - 1));
loff_t end = min_t(loff_t, start + len, mtd->size);
size_t chunk = part->common.mtd.erasesize;
int rc = 0;
if (!part->updating) {
rc = efx_mcdi_nvram_update_start(efx, part->nvram_type);
if (rc)
goto out;
part->updating = true;
}
/* The MCDI interface can in fact do multiple erase blocks at once;
* but erasing may be slow, so we make multiple calls here to avoid
* tripping the MCDI RPC timeout. */
while (offset < end) {
rc = efx_mcdi_nvram_erase(efx, part->nvram_type, offset,
chunk);
if (rc)
goto out;
offset += chunk;
}
out:
return rc;
}
int efx_mcdi_mtd_write(struct mtd_info *mtd, loff_t start,
size_t len, size_t *retlen, const u8 *buffer)
{
struct efx_mcdi_mtd_partition *part = to_efx_mcdi_mtd_partition(mtd);
struct efx_nic *efx = mtd->priv;
loff_t offset = start;
loff_t end = min_t(loff_t, start + len, mtd->size);
size_t chunk;
int rc = 0;
if (!part->updating) {
rc = efx_mcdi_nvram_update_start(efx, part->nvram_type);
if (rc)
goto out;
part->updating = true;
}
while (offset < end) {
chunk = min_t(size_t, end - offset, EFX_MCDI_NVRAM_LEN_MAX);
rc = efx_mcdi_nvram_write(efx, part->nvram_type, offset,
buffer, chunk);
if (rc)
goto out;
offset += chunk;
buffer += chunk;
}
out:
*retlen = offset - start;
return rc;
}
int efx_mcdi_mtd_sync(struct mtd_info *mtd)
{
struct efx_mcdi_mtd_partition *part = to_efx_mcdi_mtd_partition(mtd);
struct efx_nic *efx = mtd->priv;
int rc = 0;
if (part->updating) {
part->updating = false;
rc = efx_mcdi_nvram_update_finish(efx, part->nvram_type);
}
return rc;
}
void efx_mcdi_mtd_rename(struct efx_mtd_partition *part)
{
struct efx_mcdi_mtd_partition *mcdi_part =
container_of(part, struct efx_mcdi_mtd_partition, common);
struct efx_nic *efx = part->mtd.priv;
snprintf(part->name, sizeof(part->name), "%s %s:%02x",
efx->name, part->type_name, mcdi_part->fw_subtype);
}
#endif /* CONFIG_SFC_MTD */

View File

@ -36,6 +36,7 @@ enum efx_mcdi_mode {
* @state: Request handling state. Waited for by @wq.
* @mode: Poll for mcdi completion, or wait for an mcdi_event.
* @wq: Wait queue for threads waiting for @state != %MCDI_STATE_RUNNING
* @new_epoch: Indicates start of day or start of MC reboot recovery
* @iface_lock: Serialises access to all the following fields
* @seqno: The next sequence number to use for mcdi requests.
* @credits: Number of spurious MCDI completion events allowed before we
@ -49,6 +50,7 @@ struct efx_mcdi_iface {
enum efx_mcdi_mode mode;
wait_queue_head_t wq;
spinlock_t iface_lock;
bool new_epoch;
unsigned int credits;
unsigned int seqno;
int resprc;
@ -65,6 +67,16 @@ struct efx_mcdi_mon {
unsigned int n_attrs;
};
struct efx_mcdi_mtd_partition {
struct efx_mtd_partition common;
bool updating;
u8 nvram_type;
u16 fw_subtype;
};
#define to_efx_mcdi_mtd_partition(mtd) \
container_of(mtd, struct efx_mcdi_mtd_partition, common.mtd)
/**
* struct efx_mcdi_data - extra state for NICs that implement MCDI
* @iface: Interface/protocol state
@ -250,18 +262,6 @@ extern int efx_mcdi_nvram_types(struct efx_nic *efx, u32 *nvram_types_out);
extern int efx_mcdi_nvram_info(struct efx_nic *efx, unsigned int type,
size_t *size_out, size_t *erase_size_out,
bool *protected_out);
extern int efx_mcdi_nvram_update_start(struct efx_nic *efx,
unsigned int type);
extern int efx_mcdi_nvram_read(struct efx_nic *efx, unsigned int type,
loff_t offset, u8 *buffer, size_t length);
extern int efx_mcdi_nvram_write(struct efx_nic *efx, unsigned int type,
loff_t offset, const u8 *buffer,
size_t length);
#define EFX_MCDI_NVRAM_LEN_MAX 128
extern int efx_mcdi_nvram_erase(struct efx_nic *efx, unsigned int type,
loff_t offset, size_t length);
extern int efx_mcdi_nvram_update_finish(struct efx_nic *efx,
unsigned int type);
extern int efx_mcdi_nvram_test_all(struct efx_nic *efx);
extern int efx_mcdi_handle_assertion(struct efx_nic *efx);
extern void efx_mcdi_set_id_led(struct efx_nic *efx, enum efx_led_mode mode);
@ -291,4 +291,14 @@ static inline int efx_mcdi_mon_probe(struct efx_nic *efx) { return 0; }
static inline void efx_mcdi_mon_remove(struct efx_nic *efx) {}
#endif
#ifdef CONFIG_SFC_MTD
extern int efx_mcdi_mtd_read(struct mtd_info *mtd, loff_t start,
size_t len, size_t *retlen, u8 *buffer);
extern int efx_mcdi_mtd_erase(struct mtd_info *mtd, loff_t start, size_t len);
extern int efx_mcdi_mtd_write(struct mtd_info *mtd, loff_t start,
size_t len, size_t *retlen, const u8 *buffer);
extern int efx_mcdi_mtd_sync(struct mtd_info *mtd);
extern void efx_mcdi_mtd_rename(struct efx_mtd_partition *part);
#endif
#endif /* EFX_MCDI_H */

View File

@ -28,7 +28,7 @@ static const struct {
const char *label;
enum efx_hwmon_type hwmon_type;
int port;
} efx_mcdi_sensor_type[MC_CMD_SENSOR_ENTRY_MAXNUM] = {
} efx_mcdi_sensor_type[] = {
#define SENSOR(name, label, hwmon_type, port) \
[MC_CMD_SENSOR_##name] = { label, hwmon_type, port }
SENSOR(CONTROLLER_TEMP, "Controller temp.", EFX_HWMON_TEMP, -1),
@ -54,6 +54,7 @@ static const char *const sensor_status_names[] = {
[MC_CMD_SENSOR_STATE_WARNING] = "Warning",
[MC_CMD_SENSOR_STATE_FATAL] = "Fatal",
[MC_CMD_SENSOR_STATE_BROKEN] = "Device failure",
[MC_CMD_SENSOR_STATE_NO_READING] = "No reading",
};
void efx_mcdi_sensor_event(struct efx_nic *efx, efx_qword_t *ev)
@ -85,6 +86,7 @@ struct efx_mcdi_mon_attribute {
struct device_attribute dev_attr;
unsigned int index;
unsigned int type;
enum efx_hwmon_type hwmon_type;
unsigned int limit_value;
char name[12];
};
@ -92,11 +94,12 @@ struct efx_mcdi_mon_attribute {
static int efx_mcdi_mon_update(struct efx_nic *efx)
{
struct efx_mcdi_mon *hwmon = efx_mcdi_mon(efx);
MCDI_DECLARE_BUF(inbuf, MC_CMD_READ_SENSORS_IN_LEN);
MCDI_DECLARE_BUF(inbuf, MC_CMD_READ_SENSORS_EXT_IN_LEN);
int rc;
MCDI_SET_QWORD(inbuf, READ_SENSORS_IN_DMA_ADDR,
MCDI_SET_QWORD(inbuf, READ_SENSORS_EXT_IN_DMA_ADDR,
hwmon->dma_buf.dma_addr);
MCDI_SET_DWORD(inbuf, READ_SENSORS_EXT_IN_LENGTH, hwmon->dma_buf.len);
rc = efx_mcdi_rpc(efx, MC_CMD_READ_SENSORS,
inbuf, sizeof(inbuf), NULL, 0, NULL);
@ -144,17 +147,21 @@ static ssize_t efx_mcdi_mon_show_value(struct device *dev,
struct efx_mcdi_mon_attribute *mon_attr =
container_of(attr, struct efx_mcdi_mon_attribute, dev_attr);
efx_dword_t entry;
unsigned int value;
unsigned int value, state;
int rc;
rc = efx_mcdi_mon_get_entry(dev, mon_attr->index, &entry);
if (rc)
return rc;
state = EFX_DWORD_FIELD(entry, MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_STATE);
if (state == MC_CMD_SENSOR_STATE_NO_READING)
return -EBUSY;
value = EFX_DWORD_FIELD(entry, MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_VALUE);
/* Convert temperature from degrees to milli-degrees Celsius */
if (efx_mcdi_sensor_type[mon_attr->type].hwmon_type == EFX_HWMON_TEMP)
if (mon_attr->hwmon_type == EFX_HWMON_TEMP)
value *= 1000;
return sprintf(buf, "%u\n", value);
@ -171,7 +178,7 @@ static ssize_t efx_mcdi_mon_show_limit(struct device *dev,
value = mon_attr->limit_value;
/* Convert temperature from degrees to milli-degrees Celsius */
if (efx_mcdi_sensor_type[mon_attr->type].hwmon_type == EFX_HWMON_TEMP)
if (mon_attr->hwmon_type == EFX_HWMON_TEMP)
value *= 1000;
return sprintf(buf, "%u\n", value);
@ -219,6 +226,10 @@ efx_mcdi_mon_add_attr(struct efx_nic *efx, const char *name,
strlcpy(attr->name, name, sizeof(attr->name));
attr->index = index;
attr->type = type;
if (type < ARRAY_SIZE(efx_mcdi_sensor_type))
attr->hwmon_type = efx_mcdi_sensor_type[type].hwmon_type;
else
attr->hwmon_type = EFX_HWMON_UNKNOWN;
attr->limit_value = limit_value;
sysfs_attr_init(&attr->dev_attr.attr);
attr->dev_attr.attr.name = attr->name;
@ -233,35 +244,42 @@ efx_mcdi_mon_add_attr(struct efx_nic *efx, const char *name,
int efx_mcdi_mon_probe(struct efx_nic *efx)
{
struct efx_mcdi_mon *hwmon = efx_mcdi_mon(efx);
unsigned int n_attrs, n_temp = 0, n_cool = 0, n_in = 0;
unsigned int n_temp = 0, n_cool = 0, n_in = 0;
MCDI_DECLARE_BUF(inbuf, MC_CMD_SENSOR_INFO_EXT_IN_LEN);
MCDI_DECLARE_BUF(outbuf, MC_CMD_SENSOR_INFO_OUT_LENMAX);
unsigned int n_pages, n_sensors, n_attrs, page;
size_t outlen;
char name[12];
u32 mask;
int rc, i, type;
int rc, i, j, type;
BUILD_BUG_ON(MC_CMD_SENSOR_INFO_IN_LEN != 0);
/* Find out how many sensors are present */
n_sensors = 0;
page = 0;
do {
MCDI_SET_DWORD(inbuf, SENSOR_INFO_EXT_IN_PAGE, page);
rc = efx_mcdi_rpc(efx, MC_CMD_SENSOR_INFO, NULL, 0,
outbuf, sizeof(outbuf), &outlen);
if (rc)
return rc;
if (outlen < MC_CMD_SENSOR_INFO_OUT_LENMIN)
return -EIO;
rc = efx_mcdi_rpc(efx, MC_CMD_SENSOR_INFO, inbuf, sizeof(inbuf),
outbuf, sizeof(outbuf), &outlen);
if (rc)
return rc;
if (outlen < MC_CMD_SENSOR_INFO_OUT_LENMIN)
return -EIO;
/* Find out which sensors are present. Don't create a device
* if there are none.
*/
mask = MCDI_DWORD(outbuf, SENSOR_INFO_OUT_MASK);
if (mask == 0)
mask = MCDI_DWORD(outbuf, SENSOR_INFO_OUT_MASK);
n_sensors += hweight32(mask & ~(1 << MC_CMD_SENSOR_PAGE0_NEXT));
++page;
} while (mask & (1 << MC_CMD_SENSOR_PAGE0_NEXT));
n_pages = page;
/* Don't create a device if there are none */
if (n_sensors == 0)
return 0;
/* Check again for short response */
if (outlen < MC_CMD_SENSOR_INFO_OUT_LEN(hweight32(mask)))
return -EIO;
rc = efx_nic_alloc_buffer(efx, &hwmon->dma_buf,
4 * MC_CMD_SENSOR_ENTRY_MAXNUM, GFP_KERNEL);
rc = efx_nic_alloc_buffer(
efx, &hwmon->dma_buf,
n_sensors * MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_LEN,
GFP_KERNEL);
if (rc)
return rc;
@ -272,7 +290,7 @@ int efx_mcdi_mon_probe(struct efx_nic *efx)
* attributes for this set of sensors: name of the driver plus
* value, min, max, crit, alarm and label for each sensor.
*/
n_attrs = 1 + 6 * hweight32(mask);
n_attrs = 1 + 6 * n_sensors;
hwmon->attrs = kcalloc(n_attrs, sizeof(*hwmon->attrs), GFP_KERNEL);
if (!hwmon->attrs) {
rc = -ENOMEM;
@ -289,26 +307,63 @@ int efx_mcdi_mon_probe(struct efx_nic *efx)
if (rc)
goto fail;
for (i = 0, type = -1; ; i++) {
for (i = 0, j = -1, type = -1; ; i++) {
enum efx_hwmon_type hwmon_type;
const char *hwmon_prefix;
unsigned hwmon_index;
u16 min1, max1, min2, max2;
/* Find next sensor type or exit if there is none */
type++;
while (!(mask & (1 << type))) {
do {
type++;
if (type == 32)
return 0;
if ((type % 32) == 0) {
page = type / 32;
j = -1;
if (page == n_pages)
return 0;
MCDI_SET_DWORD(inbuf, SENSOR_INFO_EXT_IN_PAGE,
page);
rc = efx_mcdi_rpc(efx, MC_CMD_SENSOR_INFO,
inbuf, sizeof(inbuf),
outbuf, sizeof(outbuf),
&outlen);
if (rc)
goto fail;
if (outlen < MC_CMD_SENSOR_INFO_OUT_LENMIN) {
rc = -EIO;
goto fail;
}
mask = (MCDI_DWORD(outbuf,
SENSOR_INFO_OUT_MASK) &
~(1 << MC_CMD_SENSOR_PAGE0_NEXT));
/* Check again for short response */
if (outlen <
MC_CMD_SENSOR_INFO_OUT_LEN(hweight32(mask))) {
rc = -EIO;
goto fail;
}
}
} while (!(mask & (1 << type % 32)));
j++;
if (type < ARRAY_SIZE(efx_mcdi_sensor_type)) {
hwmon_type = efx_mcdi_sensor_type[type].hwmon_type;
/* Skip sensors specific to a different port */
if (hwmon_type != EFX_HWMON_UNKNOWN &&
efx_mcdi_sensor_type[type].port >= 0 &&
efx_mcdi_sensor_type[type].port !=
efx_port_num(efx))
continue;
} else {
hwmon_type = EFX_HWMON_UNKNOWN;
}
/* Skip sensors specific to a different port */
if (efx_mcdi_sensor_type[type].hwmon_type != EFX_HWMON_UNKNOWN &&
efx_mcdi_sensor_type[type].port >= 0 &&
efx_mcdi_sensor_type[type].port != efx_port_num(efx))
continue;
switch (efx_mcdi_sensor_type[type].hwmon_type) {
switch (hwmon_type) {
case EFX_HWMON_TEMP:
hwmon_prefix = "temp";
hwmon_index = ++n_temp; /* 1-based */
@ -328,13 +383,13 @@ int efx_mcdi_mon_probe(struct efx_nic *efx)
}
min1 = MCDI_ARRAY_FIELD(outbuf, SENSOR_ENTRY,
SENSOR_INFO_ENTRY, i, MIN1);
SENSOR_INFO_ENTRY, j, MIN1);
max1 = MCDI_ARRAY_FIELD(outbuf, SENSOR_ENTRY,
SENSOR_INFO_ENTRY, i, MAX1);
SENSOR_INFO_ENTRY, j, MAX1);
min2 = MCDI_ARRAY_FIELD(outbuf, SENSOR_ENTRY,
SENSOR_INFO_ENTRY, i, MIN2);
SENSOR_INFO_ENTRY, j, MIN2);
max2 = MCDI_ARRAY_FIELD(outbuf, SENSOR_ENTRY,
SENSOR_INFO_ENTRY, i, MAX2);
SENSOR_INFO_ENTRY, j, MAX2);
if (min1 != max1) {
snprintf(name, sizeof(name), "%s%u_input",
@ -381,7 +436,8 @@ int efx_mcdi_mon_probe(struct efx_nic *efx)
if (rc)
goto fail;
if (efx_mcdi_sensor_type[type].label) {
if (type < ARRAY_SIZE(efx_mcdi_sensor_type) &&
efx_mcdi_sensor_type[type].label) {
snprintf(name, sizeof(name), "%s%u_label",
hwmon_prefix, hwmon_index);
rc = efx_mcdi_mon_add_attr(

View File

@ -8,168 +8,17 @@
* by the Free Software Foundation, incorporated herein by reference.
*/
#include <linux/bitops.h>
#include <linux/module.h>
#include <linux/mtd/mtd.h>
#include <linux/delay.h>
#include <linux/slab.h>
#include <linux/rtnetlink.h>
#include "net_driver.h"
#include "spi.h"
#include "efx.h"
#include "nic.h"
#include "mcdi.h"
#include "mcdi_pcol.h"
#define FALCON_SPI_VERIFY_BUF_LEN 16
struct efx_mtd_partition {
struct list_head node;
struct mtd_info mtd;
union {
struct {
bool updating;
u8 nvram_type;
u16 fw_subtype;
} mcdi;
struct {
const struct falcon_spi_device *spi;
size_t offset;
} falcon;
};
const char *dev_type_name;
const char *type_name;
char name[IFNAMSIZ + 20];
};
struct efx_mtd_ops {
int (*read)(struct mtd_info *mtd, loff_t start, size_t len,
size_t *retlen, u8 *buffer);
int (*erase)(struct mtd_info *mtd, loff_t start, size_t len);
int (*write)(struct mtd_info *mtd, loff_t start, size_t len,
size_t *retlen, const u8 *buffer);
int (*sync)(struct mtd_info *mtd);
};
#define to_efx_mtd_partition(mtd) \
container_of(mtd, struct efx_mtd_partition, mtd)
static int falcon_mtd_probe(struct efx_nic *efx);
static int siena_mtd_probe(struct efx_nic *efx);
/* SPI utilities */
static int
falcon_spi_slow_wait(struct efx_mtd_partition *part, bool uninterruptible)
{
const struct falcon_spi_device *spi = part->falcon.spi;
struct efx_nic *efx = part->mtd.priv;
u8 status;
int rc, i;
/* Wait up to 4s for flash/EEPROM to finish a slow operation. */
for (i = 0; i < 40; i++) {
__set_current_state(uninterruptible ?
TASK_UNINTERRUPTIBLE : TASK_INTERRUPTIBLE);
schedule_timeout(HZ / 10);
rc = falcon_spi_cmd(efx, spi, SPI_RDSR, -1, NULL,
&status, sizeof(status));
if (rc)
return rc;
if (!(status & SPI_STATUS_NRDY))
return 0;
if (signal_pending(current))
return -EINTR;
}
pr_err("%s: timed out waiting for %s\n",
part->name, part->dev_type_name);
return -ETIMEDOUT;
}
static int
falcon_spi_unlock(struct efx_nic *efx, const struct falcon_spi_device *spi)
{
const u8 unlock_mask = (SPI_STATUS_BP2 | SPI_STATUS_BP1 |
SPI_STATUS_BP0);
u8 status;
int rc;
rc = falcon_spi_cmd(efx, spi, SPI_RDSR, -1, NULL,
&status, sizeof(status));
if (rc)
return rc;
if (!(status & unlock_mask))
return 0; /* already unlocked */
rc = falcon_spi_cmd(efx, spi, SPI_WREN, -1, NULL, NULL, 0);
if (rc)
return rc;
rc = falcon_spi_cmd(efx, spi, SPI_SST_EWSR, -1, NULL, NULL, 0);
if (rc)
return rc;
status &= ~unlock_mask;
rc = falcon_spi_cmd(efx, spi, SPI_WRSR, -1, &status,
NULL, sizeof(status));
if (rc)
return rc;
rc = falcon_spi_wait_write(efx, spi);
if (rc)
return rc;
return 0;
}
static int
falcon_spi_erase(struct efx_mtd_partition *part, loff_t start, size_t len)
{
const struct falcon_spi_device *spi = part->falcon.spi;
struct efx_nic *efx = part->mtd.priv;
unsigned pos, block_len;
u8 empty[FALCON_SPI_VERIFY_BUF_LEN];
u8 buffer[FALCON_SPI_VERIFY_BUF_LEN];
int rc;
if (len != spi->erase_size)
return -EINVAL;
if (spi->erase_command == 0)
return -EOPNOTSUPP;
rc = falcon_spi_unlock(efx, spi);
if (rc)
return rc;
rc = falcon_spi_cmd(efx, spi, SPI_WREN, -1, NULL, NULL, 0);
if (rc)
return rc;
rc = falcon_spi_cmd(efx, spi, spi->erase_command, start, NULL,
NULL, 0);
if (rc)
return rc;
rc = falcon_spi_slow_wait(part, false);
/* Verify the entire region has been wiped */
memset(empty, 0xff, sizeof(empty));
for (pos = 0; pos < len; pos += block_len) {
block_len = min(len - pos, sizeof(buffer));
rc = falcon_spi_read(efx, spi, start + pos, block_len,
NULL, buffer);
if (rc)
return rc;
if (memcmp(empty, buffer, block_len))
return -EIO;
/* Avoid locking up the system */
cond_resched();
if (signal_pending(current))
return -EINTR;
}
return rc;
}
/* MTD interface */
static int efx_mtd_erase(struct mtd_info *mtd, struct erase_info *erase)
@ -177,7 +26,7 @@ static int efx_mtd_erase(struct mtd_info *mtd, struct erase_info *erase)
struct efx_nic *efx = mtd->priv;
int rc;
rc = efx->mtd_ops->erase(mtd, erase->addr, erase->len);
rc = efx->type->mtd_erase(mtd, erase->addr, erase->len);
if (rc == 0) {
erase->state = MTD_ERASE_DONE;
} else {
@ -194,7 +43,7 @@ static void efx_mtd_sync(struct mtd_info *mtd)
struct efx_nic *efx = mtd->priv;
int rc;
rc = efx->mtd_ops->sync(mtd);
rc = efx->type->mtd_sync(mtd);
if (rc)
pr_err("%s: %s sync failed (%d)\n",
part->name, part->dev_type_name, rc);
@ -214,26 +63,15 @@ static void efx_mtd_remove_partition(struct efx_mtd_partition *part)
list_del(&part->node);
}
static void efx_mtd_rename_partition(struct efx_mtd_partition *part)
{
struct efx_nic *efx = part->mtd.priv;
if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0)
snprintf(part->name, sizeof(part->name), "%s %s:%02x",
efx->name, part->type_name, part->mcdi.fw_subtype);
else
snprintf(part->name, sizeof(part->name), "%s %s",
efx->name, part->type_name);
}
static int efx_mtd_add(struct efx_nic *efx,
struct efx_mtd_partition *parts, size_t n_parts)
int efx_mtd_add(struct efx_nic *efx, struct efx_mtd_partition *parts,
size_t n_parts, size_t sizeof_part)
{
struct efx_mtd_partition *part;
size_t i;
for (i = 0; i < n_parts; i++) {
part = &parts[i];
part = (struct efx_mtd_partition *)((char *)parts +
i * sizeof_part);
part->mtd.writesize = 1;
@ -241,11 +79,11 @@ static int efx_mtd_add(struct efx_nic *efx,
part->mtd.priv = efx;
part->mtd.name = part->name;
part->mtd._erase = efx_mtd_erase;
part->mtd._read = efx->mtd_ops->read;
part->mtd._write = efx->mtd_ops->write;
part->mtd._read = efx->type->mtd_read;
part->mtd._write = efx->type->mtd_write;
part->mtd._sync = efx_mtd_sync;
efx_mtd_rename_partition(part);
efx->type->mtd_rename(part);
if (mtd_device_register(&part->mtd, NULL, 0))
goto fail;
@ -257,8 +95,11 @@ static int efx_mtd_add(struct efx_nic *efx,
return 0;
fail:
while (i--)
efx_mtd_remove_partition(&parts[i]);
while (i--) {
part = (struct efx_mtd_partition *)((char *)parts +
i * sizeof_part);
efx_mtd_remove_partition(part);
}
/* Failure is unlikely here, but probably means we're out of memory */
return -ENOMEM;
}
@ -288,367 +129,5 @@ void efx_mtd_rename(struct efx_nic *efx)
ASSERT_RTNL();
list_for_each_entry(part, &efx->mtd_list, node)
efx_mtd_rename_partition(part);
efx->type->mtd_rename(part);
}
int efx_mtd_probe(struct efx_nic *efx)
{
if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0)
return siena_mtd_probe(efx);
else
return falcon_mtd_probe(efx);
}
/* Implementation of MTD operations for Falcon */
static int falcon_mtd_read(struct mtd_info *mtd, loff_t start,
size_t len, size_t *retlen, u8 *buffer)
{
struct efx_mtd_partition *part = to_efx_mtd_partition(mtd);
struct efx_nic *efx = mtd->priv;
struct falcon_nic_data *nic_data = efx->nic_data;
int rc;
rc = mutex_lock_interruptible(&nic_data->spi_lock);
if (rc)
return rc;
rc = falcon_spi_read(efx, part->falcon.spi, part->falcon.offset + start,
len, retlen, buffer);
mutex_unlock(&nic_data->spi_lock);
return rc;
}
static int falcon_mtd_erase(struct mtd_info *mtd, loff_t start, size_t len)
{
struct efx_mtd_partition *part = to_efx_mtd_partition(mtd);
struct efx_nic *efx = mtd->priv;
struct falcon_nic_data *nic_data = efx->nic_data;
int rc;
rc = mutex_lock_interruptible(&nic_data->spi_lock);
if (rc)
return rc;
rc = falcon_spi_erase(part, part->falcon.offset + start, len);
mutex_unlock(&nic_data->spi_lock);
return rc;
}
static int falcon_mtd_write(struct mtd_info *mtd, loff_t start,
size_t len, size_t *retlen, const u8 *buffer)
{
struct efx_mtd_partition *part = to_efx_mtd_partition(mtd);
struct efx_nic *efx = mtd->priv;
struct falcon_nic_data *nic_data = efx->nic_data;
int rc;
rc = mutex_lock_interruptible(&nic_data->spi_lock);
if (rc)
return rc;
rc = falcon_spi_write(efx, part->falcon.spi,
part->falcon.offset + start, len, retlen, buffer);
mutex_unlock(&nic_data->spi_lock);
return rc;
}
static int falcon_mtd_sync(struct mtd_info *mtd)
{
struct efx_mtd_partition *part = to_efx_mtd_partition(mtd);
struct efx_nic *efx = mtd->priv;
struct falcon_nic_data *nic_data = efx->nic_data;
int rc;
mutex_lock(&nic_data->spi_lock);
rc = falcon_spi_slow_wait(part, true);
mutex_unlock(&nic_data->spi_lock);
return rc;
}
static const struct efx_mtd_ops falcon_mtd_ops = {
.read = falcon_mtd_read,
.erase = falcon_mtd_erase,
.write = falcon_mtd_write,
.sync = falcon_mtd_sync,
};
static int falcon_mtd_probe(struct efx_nic *efx)
{
struct falcon_nic_data *nic_data = efx->nic_data;
struct efx_mtd_partition *parts;
struct falcon_spi_device *spi;
size_t n_parts;
int rc = -ENODEV;
ASSERT_RTNL();
efx->mtd_ops = &falcon_mtd_ops;
/* Allocate space for maximum number of partitions */
parts = kcalloc(2, sizeof(*parts), GFP_KERNEL);
n_parts = 0;
spi = &nic_data->spi_flash;
if (falcon_spi_present(spi) && spi->size > FALCON_FLASH_BOOTCODE_START) {
parts[n_parts].falcon.spi = spi;
parts[n_parts].falcon.offset = FALCON_FLASH_BOOTCODE_START;
parts[n_parts].dev_type_name = "flash";
parts[n_parts].type_name = "sfc_flash_bootrom";
parts[n_parts].mtd.type = MTD_NORFLASH;
parts[n_parts].mtd.flags = MTD_CAP_NORFLASH;
parts[n_parts].mtd.size = spi->size - FALCON_FLASH_BOOTCODE_START;
parts[n_parts].mtd.erasesize = spi->erase_size;
n_parts++;
}
spi = &nic_data->spi_eeprom;
if (falcon_spi_present(spi) && spi->size > FALCON_EEPROM_BOOTCONFIG_START) {
parts[n_parts].falcon.spi = spi;
parts[n_parts].falcon.offset = FALCON_EEPROM_BOOTCONFIG_START;
parts[n_parts].dev_type_name = "EEPROM";
parts[n_parts].type_name = "sfc_bootconfig";
parts[n_parts].mtd.type = MTD_RAM;
parts[n_parts].mtd.flags = MTD_CAP_RAM;
parts[n_parts].mtd.size =
min(spi->size, FALCON_EEPROM_BOOTCONFIG_END) -
FALCON_EEPROM_BOOTCONFIG_START;
parts[n_parts].mtd.erasesize = spi->erase_size;
n_parts++;
}
rc = efx_mtd_add(efx, parts, n_parts);
if (rc)
kfree(parts);
return rc;
}
/* Implementation of MTD operations for Siena */
static int siena_mtd_read(struct mtd_info *mtd, loff_t start,
size_t len, size_t *retlen, u8 *buffer)
{
struct efx_mtd_partition *part = to_efx_mtd_partition(mtd);
struct efx_nic *efx = mtd->priv;
loff_t offset = start;
loff_t end = min_t(loff_t, start + len, mtd->size);
size_t chunk;
int rc = 0;
while (offset < end) {
chunk = min_t(size_t, end - offset, EFX_MCDI_NVRAM_LEN_MAX);
rc = efx_mcdi_nvram_read(efx, part->mcdi.nvram_type, offset,
buffer, chunk);
if (rc)
goto out;
offset += chunk;
buffer += chunk;
}
out:
*retlen = offset - start;
return rc;
}
static int siena_mtd_erase(struct mtd_info *mtd, loff_t start, size_t len)
{
struct efx_mtd_partition *part = to_efx_mtd_partition(mtd);
struct efx_nic *efx = mtd->priv;
loff_t offset = start & ~((loff_t)(mtd->erasesize - 1));
loff_t end = min_t(loff_t, start + len, mtd->size);
size_t chunk = part->mtd.erasesize;
int rc = 0;
if (!part->mcdi.updating) {
rc = efx_mcdi_nvram_update_start(efx, part->mcdi.nvram_type);
if (rc)
goto out;
part->mcdi.updating = true;
}
/* The MCDI interface can in fact do multiple erase blocks at once;
* but erasing may be slow, so we make multiple calls here to avoid
* tripping the MCDI RPC timeout. */
while (offset < end) {
rc = efx_mcdi_nvram_erase(efx, part->mcdi.nvram_type, offset,
chunk);
if (rc)
goto out;
offset += chunk;
}
out:
return rc;
}
static int siena_mtd_write(struct mtd_info *mtd, loff_t start,
size_t len, size_t *retlen, const u8 *buffer)
{
struct efx_mtd_partition *part = to_efx_mtd_partition(mtd);
struct efx_nic *efx = mtd->priv;
loff_t offset = start;
loff_t end = min_t(loff_t, start + len, mtd->size);
size_t chunk;
int rc = 0;
if (!part->mcdi.updating) {
rc = efx_mcdi_nvram_update_start(efx, part->mcdi.nvram_type);
if (rc)
goto out;
part->mcdi.updating = true;
}
while (offset < end) {
chunk = min_t(size_t, end - offset, EFX_MCDI_NVRAM_LEN_MAX);
rc = efx_mcdi_nvram_write(efx, part->mcdi.nvram_type, offset,
buffer, chunk);
if (rc)
goto out;
offset += chunk;
buffer += chunk;
}
out:
*retlen = offset - start;
return rc;
}
static int siena_mtd_sync(struct mtd_info *mtd)
{
struct efx_mtd_partition *part = to_efx_mtd_partition(mtd);
struct efx_nic *efx = mtd->priv;
int rc = 0;
if (part->mcdi.updating) {
part->mcdi.updating = false;
rc = efx_mcdi_nvram_update_finish(efx, part->mcdi.nvram_type);
}
return rc;
}
static const struct efx_mtd_ops siena_mtd_ops = {
.read = siena_mtd_read,
.erase = siena_mtd_erase,
.write = siena_mtd_write,
.sync = siena_mtd_sync,
};
struct siena_nvram_type_info {
int port;
const char *name;
};
static const struct siena_nvram_type_info siena_nvram_types[] = {
[MC_CMD_NVRAM_TYPE_DISABLED_CALLISTO] = { 0, "sfc_dummy_phy" },
[MC_CMD_NVRAM_TYPE_MC_FW] = { 0, "sfc_mcfw" },
[MC_CMD_NVRAM_TYPE_MC_FW_BACKUP] = { 0, "sfc_mcfw_backup" },
[MC_CMD_NVRAM_TYPE_STATIC_CFG_PORT0] = { 0, "sfc_static_cfg" },
[MC_CMD_NVRAM_TYPE_STATIC_CFG_PORT1] = { 1, "sfc_static_cfg" },
[MC_CMD_NVRAM_TYPE_DYNAMIC_CFG_PORT0] = { 0, "sfc_dynamic_cfg" },
[MC_CMD_NVRAM_TYPE_DYNAMIC_CFG_PORT1] = { 1, "sfc_dynamic_cfg" },
[MC_CMD_NVRAM_TYPE_EXP_ROM] = { 0, "sfc_exp_rom" },
[MC_CMD_NVRAM_TYPE_EXP_ROM_CFG_PORT0] = { 0, "sfc_exp_rom_cfg" },
[MC_CMD_NVRAM_TYPE_EXP_ROM_CFG_PORT1] = { 1, "sfc_exp_rom_cfg" },
[MC_CMD_NVRAM_TYPE_PHY_PORT0] = { 0, "sfc_phy_fw" },
[MC_CMD_NVRAM_TYPE_PHY_PORT1] = { 1, "sfc_phy_fw" },
[MC_CMD_NVRAM_TYPE_FPGA] = { 0, "sfc_fpga" },
};
static int siena_mtd_probe_partition(struct efx_nic *efx,
struct efx_mtd_partition *part,
unsigned int type)
{
const struct siena_nvram_type_info *info;
size_t size, erase_size;
bool protected;
int rc;
if (type >= ARRAY_SIZE(siena_nvram_types) ||
siena_nvram_types[type].name == NULL)
return -ENODEV;
info = &siena_nvram_types[type];
if (info->port != efx_port_num(efx))
return -ENODEV;
rc = efx_mcdi_nvram_info(efx, type, &size, &erase_size, &protected);
if (rc)
return rc;
if (protected)
return -ENODEV; /* hide it */
part->mcdi.nvram_type = type;
part->dev_type_name = "Siena NVRAM manager";
part->type_name = info->name;
part->mtd.type = MTD_NORFLASH;
part->mtd.flags = MTD_CAP_NORFLASH;
part->mtd.size = size;
part->mtd.erasesize = erase_size;
return 0;
}
static int siena_mtd_get_fw_subtypes(struct efx_nic *efx,
struct efx_mtd_partition *parts,
size_t n_parts)
{
uint16_t fw_subtype_list[
MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_MAXNUM];
size_t i;
int rc;
rc = efx_mcdi_get_board_cfg(efx, NULL, fw_subtype_list, NULL);
if (rc)
return rc;
for (i = 0; i < n_parts; i++)
parts[i].mcdi.fw_subtype =
fw_subtype_list[parts[i].mcdi.nvram_type];
return 0;
}
static int siena_mtd_probe(struct efx_nic *efx)
{
struct efx_mtd_partition *parts;
u32 nvram_types;
unsigned int type;
size_t n_parts;
int rc;
ASSERT_RTNL();
efx->mtd_ops = &siena_mtd_ops;
rc = efx_mcdi_nvram_types(efx, &nvram_types);
if (rc)
return rc;
parts = kcalloc(hweight32(nvram_types), sizeof(*parts), GFP_KERNEL);
if (!parts)
return -ENOMEM;
type = 0;
n_parts = 0;
while (nvram_types != 0) {
if (nvram_types & 1) {
rc = siena_mtd_probe_partition(efx, &parts[n_parts],
type);
if (rc == 0)
n_parts++;
else if (rc != -ENODEV)
goto fail;
}
type++;
nvram_types >>= 1;
}
rc = siena_mtd_get_fw_subtypes(efx, parts, n_parts);
if (rc)
goto fail;
rc = efx_mtd_add(efx, parts, n_parts);
fail:
if (rc)
kfree(parts);
return rc;
}

View File

@ -27,6 +27,7 @@
#include <linux/mutex.h>
#include <linux/vmalloc.h>
#include <linux/i2c.h>
#include <linux/mtd/mtd.h>
#include "enum.h"
#include "bitfield.h"
@ -185,6 +186,7 @@ struct efx_tx_buffer {
* variable indicates that the queue is empty. This is to
* avoid cache-line ping-pong between the xmit path and the
* completion path.
* @merge_events: Number of TX merged completion events
* @insert_count: Current insert pointer
* This is the number of buffers that have been added to the
* software ring.
@ -221,6 +223,7 @@ struct efx_tx_queue {
/* Members used mainly on the completion path */
unsigned int read_count ____cacheline_aligned_in_smp;
unsigned int old_write_count;
unsigned int merge_events;
/* Members used only on the xmit path */
unsigned int insert_count ____cacheline_aligned_in_smp;
@ -260,6 +263,7 @@ struct efx_rx_buffer {
#define EFX_RX_PKT_CSUMMED 0x0002
#define EFX_RX_PKT_DISCARD 0x0004
#define EFX_RX_PKT_TCP 0x0040
#define EFX_RX_PKT_PREFIX_LEN 0x0080 /* length is in prefix only */
/**
* struct efx_rx_page_state - Page-based rx buffer state
@ -594,75 +598,17 @@ static inline bool efx_phy_mode_disabled(enum efx_phy_mode mode)
return !!(mode & ~PHY_MODE_TX_DISABLED);
}
/*
* Efx extended statistics
*
* Not all statistics are provided by all supported MACs. The purpose
* is this structure is to contain the raw statistics provided by each
* MAC.
/**
* struct efx_hw_stat_desc - Description of a hardware statistic
* @name: Name of the statistic as visible through ethtool, or %NULL if
* it should not be exposed
* @dma_width: Width in bits (0 for non-DMA statistics)
* @offset: Offset within stats (ignored for non-DMA statistics)
*/
struct efx_mac_stats {
u64 tx_bytes;
u64 tx_good_bytes;
u64 tx_bad_bytes;
u64 tx_packets;
u64 tx_bad;
u64 tx_pause;
u64 tx_control;
u64 tx_unicast;
u64 tx_multicast;
u64 tx_broadcast;
u64 tx_lt64;
u64 tx_64;
u64 tx_65_to_127;
u64 tx_128_to_255;
u64 tx_256_to_511;
u64 tx_512_to_1023;
u64 tx_1024_to_15xx;
u64 tx_15xx_to_jumbo;
u64 tx_gtjumbo;
u64 tx_collision;
u64 tx_single_collision;
u64 tx_multiple_collision;
u64 tx_excessive_collision;
u64 tx_deferred;
u64 tx_late_collision;
u64 tx_excessive_deferred;
u64 tx_non_tcpudp;
u64 tx_mac_src_error;
u64 tx_ip_src_error;
u64 rx_bytes;
u64 rx_good_bytes;
u64 rx_bad_bytes;
u64 rx_packets;
u64 rx_good;
u64 rx_bad;
u64 rx_pause;
u64 rx_control;
u64 rx_unicast;
u64 rx_multicast;
u64 rx_broadcast;
u64 rx_lt64;
u64 rx_64;
u64 rx_65_to_127;
u64 rx_128_to_255;
u64 rx_256_to_511;
u64 rx_512_to_1023;
u64 rx_1024_to_15xx;
u64 rx_15xx_to_jumbo;
u64 rx_gtjumbo;
u64 rx_bad_lt64;
u64 rx_bad_64_to_15xx;
u64 rx_bad_15xx_to_jumbo;
u64 rx_bad_gtjumbo;
u64 rx_overflow;
u64 rx_missed;
u64 rx_false_carrier;
u64 rx_symbol_error;
u64 rx_align_error;
u64 rx_length_error;
u64 rx_internal_error;
u64 rx_good_lt64;
struct efx_hw_stat_desc {
const char *name;
u16 dma_width;
u16 offset;
};
/* Number of bits used in a multicast filter hash address */
@ -720,6 +666,11 @@ struct vfdi_status;
* @rx_buffer_order: Order (log2) of number of pages for each RX buffer
* @rx_buffer_truesize: Amortised allocation size of an RX buffer,
* for use in sk_buff::truesize
* @rx_prefix_size: Size of RX prefix before packet data
* @rx_packet_hash_offset: Offset of RX flow hash from start of packet data
* (valid only if @rx_prefix_size != 0; always negative)
* @rx_packet_len_offset: Offset of RX packet length from start of packet data
* (valid only for NICs that set %EFX_RX_PKT_PREFIX_LEN; always negative)
* @rx_hash_key: Toeplitz hash key for RSS
* @rx_indir_table: Indirection table for RSS
* @rx_scatter: Scatter mode enabled for receives
@ -794,12 +745,8 @@ struct vfdi_status;
* @last_irq_cpu: Last CPU to handle a possible test interrupt. This
* field is used by efx_test_interrupts() to verify that an
* interrupt has occurred.
* @n_rx_nodesc_drop_cnt: RX no descriptor drop count
* @mac_stats: MAC statistics. These include all statistics the MACs
* can provide. Generic code converts these into a standard
* &struct net_device_stats.
* @stats_lock: Statistics update lock. Serialises statistics fetches
* and access to @mac_stats.
* @stats_lock: Statistics update lock. Must be held when calling
* efx_nic_type::{update,start,stop}_stats.
*
* This is stored in the private area of the &struct net_device.
*/
@ -854,6 +801,9 @@ struct efx_nic {
unsigned int rx_page_buf_step;
unsigned int rx_bufs_per_page;
unsigned int rx_pages_per_batch;
unsigned int rx_prefix_size;
int rx_packet_hash_offset;
int rx_packet_len_offset;
u8 rx_hash_key[40];
u32 rx_indir_table[128];
bool rx_scatter;
@ -868,7 +818,6 @@ struct efx_nic {
struct delayed_work selftest_work;
#ifdef CONFIG_SFC_MTD
const struct efx_mtd_ops *mtd_ops;
struct list_head mtd_list;
#endif
@ -939,8 +888,6 @@ struct efx_nic {
struct delayed_work monitor_work ____cacheline_aligned_in_smp;
spinlock_t biu_lock;
int last_irq_cpu;
unsigned n_rx_nodesc_drop_cnt;
struct efx_mac_stats mac_stats;
spinlock_t stats_lock;
};
@ -954,6 +901,14 @@ static inline unsigned int efx_port_num(struct efx_nic *efx)
return efx->port_num;
}
struct efx_mtd_partition {
struct list_head node;
struct mtd_info mtd;
const char *dev_type_name;
const char *type_name;
char name[IFNAMSIZ + 20];
};
/**
* struct efx_nic_type - Efx device type definition
* @mem_map_size: Get memory BAR mapped size
@ -976,7 +931,9 @@ static inline unsigned int efx_port_num(struct efx_nic *efx)
* (for Falcon architecture)
* @finish_flush: Clean up after flushing the DMA queues (for Falcon
* architecture)
* @update_stats: Update statistics not provided by event handling
* @describe_stats: Describe statistics for ethtool
* @update_stats: Update statistics not provided by event handling.
* Either argument may be %NULL.
* @start_stats: Start the regular fetching of statistics
* @stop_stats: Stop the regular fetching of statistics
* @set_id_led: Set state of identifying LED or revert to automatic function
@ -1047,6 +1004,15 @@ static inline unsigned int efx_port_num(struct efx_nic *efx)
* @filter_rfs_expire_one: Consider expiring a filter inserted for RFS.
* This must check whether the specified table entry is used by RFS
* and that rps_may_expire_flow() returns true for it.
* @mtd_probe: Probe and add MTD partitions associated with this net device,
* using efx_mtd_add()
* @mtd_rename: Set an MTD partition name using the net device name
* @mtd_read: Read from an MTD partition
* @mtd_erase: Erase part of an MTD partition
* @mtd_write: Write to an MTD partition
* @mtd_sync: Wait for write-back to complete on MTD partition. This
* also notifies the driver that a writer has finished using this
* partition.
* @revision: Hardware architecture revision
* @txd_ptr_tbl_base: TX descriptor ring base address
* @rxd_ptr_tbl_base: RX descriptor ring base address
@ -1054,7 +1020,8 @@ static inline unsigned int efx_port_num(struct efx_nic *efx)
* @evq_ptr_tbl_base: Event queue pointer table base address
* @evq_rptr_tbl_base: Event queue read-pointer table base address
* @max_dma_mask: Maximum possible DMA mask
* @rx_buffer_hash_size: Size of hash at start of RX packet
* @rx_prefix_size: Size of RX prefix before packet data
* @rx_hash_offset: Offset of RX flow hash within prefix
* @rx_buffer_padding: Size of padding at end of RX packet
* @can_rx_scatter: NIC is able to scatter packet to multiple buffers
* @max_interrupt_mode: Highest capability interrupt mode supported
@ -1081,7 +1048,9 @@ struct efx_nic_type {
int (*fini_dmaq)(struct efx_nic *efx);
void (*prepare_flush)(struct efx_nic *efx);
void (*finish_flush)(struct efx_nic *efx);
void (*update_stats)(struct efx_nic *efx);
size_t (*describe_stats)(struct efx_nic *efx, u8 *names);
size_t (*update_stats)(struct efx_nic *efx, u64 *full_stats,
struct rtnl_link_stats64 *core_stats);
void (*start_stats)(struct efx_nic *efx);
void (*stop_stats)(struct efx_nic *efx);
void (*set_id_led)(struct efx_nic *efx, enum efx_led_mode mode);
@ -1150,6 +1119,17 @@ struct efx_nic_type {
bool (*filter_rfs_expire_one)(struct efx_nic *efx, u32 flow_id,
unsigned int index);
#endif
#ifdef CONFIG_SFC_MTD
int (*mtd_probe)(struct efx_nic *efx);
void (*mtd_rename)(struct efx_mtd_partition *part);
int (*mtd_read)(struct mtd_info *mtd, loff_t start, size_t len,
size_t *retlen, u8 *buffer);
int (*mtd_erase)(struct mtd_info *mtd, loff_t start, size_t len);
int (*mtd_write)(struct mtd_info *mtd, loff_t start, size_t len,
size_t *retlen, const u8 *buffer);
int (*mtd_sync)(struct mtd_info *mtd);
#endif
void (*ptp_write_host_time)(struct efx_nic *efx, u32 host_time);
int revision;
unsigned int txd_ptr_tbl_base;
@ -1158,7 +1138,8 @@ struct efx_nic_type {
unsigned int evq_ptr_tbl_base;
unsigned int evq_rptr_tbl_base;
u64 max_dma_mask;
unsigned int rx_buffer_hash_size;
unsigned int rx_prefix_size;
unsigned int rx_hash_offset;
unsigned int rx_buffer_padding;
bool can_rx_scatter;
unsigned int max_interrupt_mode;

View File

@ -429,3 +429,86 @@ void efx_nic_get_regs(struct efx_nic *efx, void *buf)
}
}
}
/**
* efx_nic_describe_stats - Describe supported statistics for ethtool
* @desc: Array of &struct efx_hw_stat_desc describing the statistics
* @count: Length of the @desc array
* @mask: Bitmask of which elements of @desc are enabled
* @names: Buffer to copy names to, or %NULL. The names are copied
* starting at intervals of %ETH_GSTRING_LEN bytes.
*
* Returns the number of visible statistics, i.e. the number of set
* bits in the first @count bits of @mask for which a name is defined.
*/
size_t efx_nic_describe_stats(const struct efx_hw_stat_desc *desc, size_t count,
const unsigned long *mask, u8 *names)
{
size_t visible = 0;
size_t index;
for_each_set_bit(index, mask, count) {
if (desc[index].name) {
if (names) {
strlcpy(names, desc[index].name,
ETH_GSTRING_LEN);
names += ETH_GSTRING_LEN;
}
++visible;
}
}
return visible;
}
/**
* efx_nic_update_stats - Convert statistics DMA buffer to array of u64
* @desc: Array of &struct efx_hw_stat_desc describing the DMA buffer
* layout. DMA widths of 0, 16, 32 and 64 are supported; where
* the width is specified as 0 the corresponding element of
* @stats is not updated.
* @count: Length of the @desc array
* @mask: Bitmask of which elements of @desc are enabled
* @stats: Buffer to update with the converted statistics. The length
* of this array must be at least the number of set bits in the
* first @count bits of @mask.
* @dma_buf: DMA buffer containing hardware statistics
* @accumulate: If set, the converted values will be added rather than
* directly stored to the corresponding elements of @stats
*/
void efx_nic_update_stats(const struct efx_hw_stat_desc *desc, size_t count,
const unsigned long *mask,
u64 *stats, const void *dma_buf, bool accumulate)
{
size_t index;
for_each_set_bit(index, mask, count) {
if (desc[index].dma_width) {
const void *addr = dma_buf + desc[index].offset;
u64 val;
switch (desc[index].dma_width) {
case 16:
val = le16_to_cpup((__le16 *)addr);
break;
case 32:
val = le32_to_cpup((__le32 *)addr);
break;
case 64:
val = le64_to_cpup((__le64 *)addr);
break;
default:
WARN_ON(1);
val = 0;
break;
}
if (accumulate)
*stats += val;
else
*stats = val;
}
++stats;
}
}

View File

@ -16,7 +16,6 @@
#include "net_driver.h"
#include "efx.h"
#include "mcdi.h"
#include "spi.h"
/*
* Falcon hardware control
@ -118,9 +117,6 @@ enum {
(1 << LOOPBACK_XGXS) | \
(1 << LOOPBACK_XAUI))
#define FALCON_GMAC_LOOPBACKS \
(1 << LOOPBACK_GMAC)
/* Alignment of PCIe DMA boundaries (4KB) */
#define EFX_PAGE_SIZE 4096
/* Size and alignment of buffer table entries (same) */
@ -163,14 +159,97 @@ struct falcon_board {
struct i2c_client *hwmon_client, *ioexp_client;
};
/**
* struct falcon_spi_device - a Falcon SPI (Serial Peripheral Interface) device
* @device_id: Controller's id for the device
* @size: Size (in bytes)
* @addr_len: Number of address bytes in read/write commands
* @munge_address: Flag whether addresses should be munged.
* Some devices with 9-bit addresses (e.g. AT25040A EEPROM)
* use bit 3 of the command byte as address bit A8, rather
* than having a two-byte address. If this flag is set, then
* commands should be munged in this way.
* @erase_command: Erase command (or 0 if sector erase not needed).
* @erase_size: Erase sector size (in bytes)
* Erase commands affect sectors with this size and alignment.
* This must be a power of two.
* @block_size: Write block size (in bytes).
* Write commands are limited to blocks with this size and alignment.
*/
struct falcon_spi_device {
int device_id;
unsigned int size;
unsigned int addr_len;
unsigned int munge_address:1;
u8 erase_command;
unsigned int erase_size;
unsigned int block_size;
};
static inline bool falcon_spi_present(const struct falcon_spi_device *spi)
{
return spi->size != 0;
}
enum {
FALCON_STAT_tx_bytes,
FALCON_STAT_tx_packets,
FALCON_STAT_tx_pause,
FALCON_STAT_tx_control,
FALCON_STAT_tx_unicast,
FALCON_STAT_tx_multicast,
FALCON_STAT_tx_broadcast,
FALCON_STAT_tx_lt64,
FALCON_STAT_tx_64,
FALCON_STAT_tx_65_to_127,
FALCON_STAT_tx_128_to_255,
FALCON_STAT_tx_256_to_511,
FALCON_STAT_tx_512_to_1023,
FALCON_STAT_tx_1024_to_15xx,
FALCON_STAT_tx_15xx_to_jumbo,
FALCON_STAT_tx_gtjumbo,
FALCON_STAT_tx_non_tcpudp,
FALCON_STAT_tx_mac_src_error,
FALCON_STAT_tx_ip_src_error,
FALCON_STAT_rx_bytes,
FALCON_STAT_rx_good_bytes,
FALCON_STAT_rx_bad_bytes,
FALCON_STAT_rx_packets,
FALCON_STAT_rx_good,
FALCON_STAT_rx_bad,
FALCON_STAT_rx_pause,
FALCON_STAT_rx_control,
FALCON_STAT_rx_unicast,
FALCON_STAT_rx_multicast,
FALCON_STAT_rx_broadcast,
FALCON_STAT_rx_lt64,
FALCON_STAT_rx_64,
FALCON_STAT_rx_65_to_127,
FALCON_STAT_rx_128_to_255,
FALCON_STAT_rx_256_to_511,
FALCON_STAT_rx_512_to_1023,
FALCON_STAT_rx_1024_to_15xx,
FALCON_STAT_rx_15xx_to_jumbo,
FALCON_STAT_rx_gtjumbo,
FALCON_STAT_rx_bad_lt64,
FALCON_STAT_rx_bad_gtjumbo,
FALCON_STAT_rx_overflow,
FALCON_STAT_rx_symbol_error,
FALCON_STAT_rx_align_error,
FALCON_STAT_rx_length_error,
FALCON_STAT_rx_internal_error,
FALCON_STAT_rx_nodesc_drop_cnt,
FALCON_STAT_COUNT
};
/**
* struct falcon_nic_data - Falcon NIC state
* @pci_dev2: Secondary function of Falcon A
* @board: Board state and functions
* @stats: Hardware statistics
* @stats_disable_count: Nest count for disabling statistics fetches
* @stats_pending: Is there a pending DMA of MAC statistics.
* @stats_timer: A timer for regularly fetching MAC statistics.
* @stats_dma_done: Pointer to the flag which indicates DMA completion.
* @spi_flash: SPI flash device
* @spi_eeprom: SPI EEPROM device
* @spi_lock: SPI bus lock
@ -180,10 +259,10 @@ struct falcon_board {
struct falcon_nic_data {
struct pci_dev *pci_dev2;
struct falcon_board board;
u64 stats[FALCON_STAT_COUNT];
unsigned int stats_disable_count;
bool stats_pending;
struct timer_list stats_timer;
u32 *stats_dma_done;
struct falcon_spi_device spi_flash;
struct falcon_spi_device spi_eeprom;
struct mutex spi_lock;
@ -197,12 +276,75 @@ static inline struct falcon_board *falcon_board(struct efx_nic *efx)
return &data->board;
}
enum {
SIENA_STAT_tx_bytes,
SIENA_STAT_tx_good_bytes,
SIENA_STAT_tx_bad_bytes,
SIENA_STAT_tx_packets,
SIENA_STAT_tx_bad,
SIENA_STAT_tx_pause,
SIENA_STAT_tx_control,
SIENA_STAT_tx_unicast,
SIENA_STAT_tx_multicast,
SIENA_STAT_tx_broadcast,
SIENA_STAT_tx_lt64,
SIENA_STAT_tx_64,
SIENA_STAT_tx_65_to_127,
SIENA_STAT_tx_128_to_255,
SIENA_STAT_tx_256_to_511,
SIENA_STAT_tx_512_to_1023,
SIENA_STAT_tx_1024_to_15xx,
SIENA_STAT_tx_15xx_to_jumbo,
SIENA_STAT_tx_gtjumbo,
SIENA_STAT_tx_collision,
SIENA_STAT_tx_single_collision,
SIENA_STAT_tx_multiple_collision,
SIENA_STAT_tx_excessive_collision,
SIENA_STAT_tx_deferred,
SIENA_STAT_tx_late_collision,
SIENA_STAT_tx_excessive_deferred,
SIENA_STAT_tx_non_tcpudp,
SIENA_STAT_tx_mac_src_error,
SIENA_STAT_tx_ip_src_error,
SIENA_STAT_rx_bytes,
SIENA_STAT_rx_good_bytes,
SIENA_STAT_rx_bad_bytes,
SIENA_STAT_rx_packets,
SIENA_STAT_rx_good,
SIENA_STAT_rx_bad,
SIENA_STAT_rx_pause,
SIENA_STAT_rx_control,
SIENA_STAT_rx_unicast,
SIENA_STAT_rx_multicast,
SIENA_STAT_rx_broadcast,
SIENA_STAT_rx_lt64,
SIENA_STAT_rx_64,
SIENA_STAT_rx_65_to_127,
SIENA_STAT_rx_128_to_255,
SIENA_STAT_rx_256_to_511,
SIENA_STAT_rx_512_to_1023,
SIENA_STAT_rx_1024_to_15xx,
SIENA_STAT_rx_15xx_to_jumbo,
SIENA_STAT_rx_gtjumbo,
SIENA_STAT_rx_bad_gtjumbo,
SIENA_STAT_rx_overflow,
SIENA_STAT_rx_false_carrier,
SIENA_STAT_rx_symbol_error,
SIENA_STAT_rx_align_error,
SIENA_STAT_rx_length_error,
SIENA_STAT_rx_internal_error,
SIENA_STAT_rx_nodesc_drop_cnt,
SIENA_STAT_COUNT
};
/**
* struct siena_nic_data - Siena NIC state
* @wol_filter_id: Wake-on-LAN packet filter id
* @stats: Hardware statistics
*/
struct siena_nic_data {
int wol_filter_id;
u64 stats[SIENA_STAT_COUNT];
};
/*
@ -507,6 +649,14 @@ extern int efx_farch_test_registers(struct efx_nic *efx,
extern size_t efx_nic_get_regs_len(struct efx_nic *efx);
extern void efx_nic_get_regs(struct efx_nic *efx, void *buf);
extern size_t
efx_nic_describe_stats(const struct efx_hw_stat_desc *desc, size_t count,
const unsigned long *mask, u8 *names);
extern void
efx_nic_update_stats(const struct efx_hw_stat_desc *desc, size_t count,
const unsigned long *mask,
u64 *stats, const void *dma_buf, bool accumulate);
#define EFX_MAX_FLUSH_TIME 5000
extern void efx_farch_generate_event(struct efx_nic *efx, unsigned int evq,

View File

@ -313,6 +313,7 @@ static int efx_ptp_enable(struct efx_nic *efx)
MCDI_DECLARE_BUF(inbuf, MC_CMD_PTP_IN_ENABLE_LEN);
MCDI_SET_DWORD(inbuf, PTP_IN_OP, MC_CMD_PTP_OP_ENABLE);
MCDI_SET_DWORD(inbuf, PTP_IN_PERIPH_ID, 0);
MCDI_SET_DWORD(inbuf, PTP_IN_ENABLE_QUEUE,
efx->ptp_data->channel->channel);
MCDI_SET_DWORD(inbuf, PTP_IN_ENABLE_MODE, efx->ptp_data->mode);
@ -331,6 +332,7 @@ static int efx_ptp_disable(struct efx_nic *efx)
MCDI_DECLARE_BUF(inbuf, MC_CMD_PTP_IN_DISABLE_LEN);
MCDI_SET_DWORD(inbuf, PTP_IN_OP, MC_CMD_PTP_OP_DISABLE);
MCDI_SET_DWORD(inbuf, PTP_IN_PERIPH_ID, 0);
return efx_mcdi_rpc(efx, MC_CMD_PTP, inbuf, sizeof(inbuf),
NULL, 0, NULL);
}
@ -388,8 +390,7 @@ static void efx_ptp_send_times(struct efx_nic *efx,
host_time = (now.ts_real.tv_sec << MC_NANOSECOND_BITS |
now.ts_real.tv_nsec);
/* Update host time in NIC memory */
_efx_writed(efx, cpu_to_le32(host_time),
FR_CZ_MC_TREG_SMEM + MC_SMEM_P0_PTP_TIME_OFST);
efx->type->ptp_write_host_time(efx, host_time);
}
*last_time = now;
}
@ -531,6 +532,7 @@ static int efx_ptp_synchronize(struct efx_nic *efx, unsigned int num_readings)
int *start = ptp->start.addr;
MCDI_SET_DWORD(synch_buf, PTP_IN_OP, MC_CMD_PTP_OP_SYNCHRONIZE);
MCDI_SET_DWORD(synch_buf, PTP_IN_PERIPH_ID, 0);
MCDI_SET_DWORD(synch_buf, PTP_IN_SYNCHRONIZE_NUMTIMESETS,
num_readings);
MCDI_SET_QWORD(synch_buf, PTP_IN_SYNCHRONIZE_START_ADDR,
@ -574,6 +576,7 @@ static int efx_ptp_xmit_skb(struct efx_nic *efx, struct sk_buff *skb)
size_t len;
MCDI_SET_DWORD(ptp_data->txbuf, PTP_IN_OP, MC_CMD_PTP_OP_TRANSMIT);
MCDI_SET_DWORD(ptp_data->txbuf, PTP_IN_PERIPH_ID, 0);
MCDI_SET_DWORD(ptp_data->txbuf, PTP_IN_TRANSMIT_LENGTH, skb->len);
if (skb_shinfo(skb)->nr_frags != 0) {
rc = skb_linearize(skb);
@ -1377,6 +1380,7 @@ static int efx_phc_adjfreq(struct ptp_clock_info *ptp, s32 delta)
(PPB_EXTRA_BITS + MAX_PPB_BITS));
MCDI_SET_DWORD(inadj, PTP_IN_OP, MC_CMD_PTP_OP_ADJUST);
MCDI_SET_DWORD(inadj, PTP_IN_PERIPH_ID, 0);
MCDI_SET_QWORD(inadj, PTP_IN_ADJUST_FREQ, adjustment_ns);
MCDI_SET_DWORD(inadj, PTP_IN_ADJUST_SECONDS, 0);
MCDI_SET_DWORD(inadj, PTP_IN_ADJUST_NANOSECONDS, 0);
@ -1399,6 +1403,7 @@ static int efx_phc_adjtime(struct ptp_clock_info *ptp, s64 delta)
MCDI_DECLARE_BUF(inbuf, MC_CMD_PTP_IN_ADJUST_LEN);
MCDI_SET_DWORD(inbuf, PTP_IN_OP, MC_CMD_PTP_OP_ADJUST);
MCDI_SET_DWORD(inbuf, PTP_IN_PERIPH_ID, 0);
MCDI_SET_QWORD(inbuf, PTP_IN_ADJUST_FREQ, 0);
MCDI_SET_DWORD(inbuf, PTP_IN_ADJUST_SECONDS, (u32)delta_ts.tv_sec);
MCDI_SET_DWORD(inbuf, PTP_IN_ADJUST_NANOSECONDS, (u32)delta_ts.tv_nsec);
@ -1417,6 +1422,7 @@ static int efx_phc_gettime(struct ptp_clock_info *ptp, struct timespec *ts)
int rc;
MCDI_SET_DWORD(inbuf, PTP_IN_OP, MC_CMD_PTP_OP_READ_NIC_TIME);
MCDI_SET_DWORD(inbuf, PTP_IN_PERIPH_ID, 0);
rc = efx_mcdi_rpc(efx, MC_CMD_PTP, inbuf, sizeof(inbuf),
outbuf, sizeof(outbuf), NULL);

View File

@ -61,13 +61,12 @@ static inline u8 *efx_rx_buf_va(struct efx_rx_buffer *buf)
return page_address(buf->page) + buf->page_offset;
}
static inline u32 efx_rx_buf_hash(const u8 *eh)
static inline u32 efx_rx_buf_hash(struct efx_nic *efx, const u8 *eh)
{
/* The ethernet header is always directly after any hash. */
#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) || NET_IP_ALIGN % 4 == 0
return __le32_to_cpup((const __le32 *)(eh - 4));
#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
return __le32_to_cpup((const __le32 *)(eh + efx->rx_packet_hash_offset));
#else
const u8 *data = eh - 4;
const u8 *data = eh + efx->rx_packet_hash_offset;
return (u32)data[0] |
(u32)data[1] << 8 |
(u32)data[2] << 16 |
@ -439,7 +438,7 @@ efx_rx_packet_gro(struct efx_channel *channel, struct efx_rx_buffer *rx_buf,
}
if (efx->net_dev->features & NETIF_F_RXHASH)
skb->rxhash = efx_rx_buf_hash(eh);
skb->rxhash = efx_rx_buf_hash(efx, eh);
skb->ip_summed = ((rx_buf->flags & EFX_RX_PKT_CSUMMED) ?
CHECKSUM_UNNECESSARY : CHECKSUM_NONE);
@ -527,7 +526,8 @@ void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index,
/* Validate the number of fragments and completed length */
if (n_frags == 1) {
efx_rx_packet__check_len(rx_queue, rx_buf, len);
if (!(flags & EFX_RX_PKT_PREFIX_LEN))
efx_rx_packet__check_len(rx_queue, rx_buf, len);
} else if (unlikely(n_frags > EFX_RX_MAX_FRAGS) ||
unlikely(len <= (n_frags - 1) * EFX_RX_USR_BUF_SIZE) ||
unlikely(len > n_frags * EFX_RX_USR_BUF_SIZE) ||
@ -555,7 +555,7 @@ void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index,
return;
}
if (n_frags == 1)
if (n_frags == 1 && !(flags & EFX_RX_PKT_PREFIX_LEN))
rx_buf->len = len;
/* Release and/or sync the DMA mapping - assumes all RX buffers
@ -568,8 +568,8 @@ void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index,
*/
prefetch(efx_rx_buf_va(rx_buf));
rx_buf->page_offset += efx->type->rx_buffer_hash_size;
rx_buf->len -= efx->type->rx_buffer_hash_size;
rx_buf->page_offset += efx->rx_prefix_size;
rx_buf->len -= efx->rx_prefix_size;
if (n_frags > 1) {
/* Release/sync DMA mapping for additional fragments.
@ -634,6 +634,13 @@ void __efx_rx_packet(struct efx_channel *channel)
efx_rx_buffer(&channel->rx_queue, channel->rx_pkt_index);
u8 *eh = efx_rx_buf_va(rx_buf);
/* Read length from the prefix if necessary. This already
* excludes the length of the prefix itself.
*/
if (rx_buf->flags & EFX_RX_PKT_PREFIX_LEN)
rx_buf->len = le16_to_cpup((__le16 *)
(eh + efx->rx_packet_len_offset));
/* If we're in loopback test, then pass the packet directly to the
* loopback layer, and free the rx_buf here
*/

View File

@ -18,7 +18,6 @@
#include "bitfield.h"
#include "efx.h"
#include "nic.h"
#include "spi.h"
#include "farch_regs.h"
#include "io.h"
#include "phy.h"
@ -381,117 +380,160 @@ static void siena_remove_nic(struct efx_nic *efx)
efx_mcdi_fini(efx);
}
#define SIENA_DMA_STAT(ext_name, mcdi_name) \
[SIENA_STAT_ ## ext_name] = \
{ #ext_name, 64, 8 * MC_CMD_MAC_ ## mcdi_name }
#define SIENA_OTHER_STAT(ext_name) \
[SIENA_STAT_ ## ext_name] = { #ext_name, 0, 0 }
static const struct efx_hw_stat_desc siena_stat_desc[SIENA_STAT_COUNT] = {
SIENA_DMA_STAT(tx_bytes, TX_BYTES),
SIENA_OTHER_STAT(tx_good_bytes),
SIENA_DMA_STAT(tx_bad_bytes, TX_BAD_BYTES),
SIENA_DMA_STAT(tx_packets, TX_PKTS),
SIENA_DMA_STAT(tx_bad, TX_BAD_FCS_PKTS),
SIENA_DMA_STAT(tx_pause, TX_PAUSE_PKTS),
SIENA_DMA_STAT(tx_control, TX_CONTROL_PKTS),
SIENA_DMA_STAT(tx_unicast, TX_UNICAST_PKTS),
SIENA_DMA_STAT(tx_multicast, TX_MULTICAST_PKTS),
SIENA_DMA_STAT(tx_broadcast, TX_BROADCAST_PKTS),
SIENA_DMA_STAT(tx_lt64, TX_LT64_PKTS),
SIENA_DMA_STAT(tx_64, TX_64_PKTS),
SIENA_DMA_STAT(tx_65_to_127, TX_65_TO_127_PKTS),
SIENA_DMA_STAT(tx_128_to_255, TX_128_TO_255_PKTS),
SIENA_DMA_STAT(tx_256_to_511, TX_256_TO_511_PKTS),
SIENA_DMA_STAT(tx_512_to_1023, TX_512_TO_1023_PKTS),
SIENA_DMA_STAT(tx_1024_to_15xx, TX_1024_TO_15XX_PKTS),
SIENA_DMA_STAT(tx_15xx_to_jumbo, TX_15XX_TO_JUMBO_PKTS),
SIENA_DMA_STAT(tx_gtjumbo, TX_GTJUMBO_PKTS),
SIENA_OTHER_STAT(tx_collision),
SIENA_DMA_STAT(tx_single_collision, TX_SINGLE_COLLISION_PKTS),
SIENA_DMA_STAT(tx_multiple_collision, TX_MULTIPLE_COLLISION_PKTS),
SIENA_DMA_STAT(tx_excessive_collision, TX_EXCESSIVE_COLLISION_PKTS),
SIENA_DMA_STAT(tx_deferred, TX_DEFERRED_PKTS),
SIENA_DMA_STAT(tx_late_collision, TX_LATE_COLLISION_PKTS),
SIENA_DMA_STAT(tx_excessive_deferred, TX_EXCESSIVE_DEFERRED_PKTS),
SIENA_DMA_STAT(tx_non_tcpudp, TX_NON_TCPUDP_PKTS),
SIENA_DMA_STAT(tx_mac_src_error, TX_MAC_SRC_ERR_PKTS),
SIENA_DMA_STAT(tx_ip_src_error, TX_IP_SRC_ERR_PKTS),
SIENA_DMA_STAT(rx_bytes, RX_BYTES),
SIENA_OTHER_STAT(rx_good_bytes),
SIENA_DMA_STAT(rx_bad_bytes, RX_BAD_BYTES),
SIENA_DMA_STAT(rx_packets, RX_PKTS),
SIENA_DMA_STAT(rx_good, RX_GOOD_PKTS),
SIENA_DMA_STAT(rx_bad, RX_BAD_FCS_PKTS),
SIENA_DMA_STAT(rx_pause, RX_PAUSE_PKTS),
SIENA_DMA_STAT(rx_control, RX_CONTROL_PKTS),
SIENA_DMA_STAT(rx_unicast, RX_UNICAST_PKTS),
SIENA_DMA_STAT(rx_multicast, RX_MULTICAST_PKTS),
SIENA_DMA_STAT(rx_broadcast, RX_BROADCAST_PKTS),
SIENA_DMA_STAT(rx_lt64, RX_UNDERSIZE_PKTS),
SIENA_DMA_STAT(rx_64, RX_64_PKTS),
SIENA_DMA_STAT(rx_65_to_127, RX_65_TO_127_PKTS),
SIENA_DMA_STAT(rx_128_to_255, RX_128_TO_255_PKTS),
SIENA_DMA_STAT(rx_256_to_511, RX_256_TO_511_PKTS),
SIENA_DMA_STAT(rx_512_to_1023, RX_512_TO_1023_PKTS),
SIENA_DMA_STAT(rx_1024_to_15xx, RX_1024_TO_15XX_PKTS),
SIENA_DMA_STAT(rx_15xx_to_jumbo, RX_15XX_TO_JUMBO_PKTS),
SIENA_DMA_STAT(rx_gtjumbo, RX_GTJUMBO_PKTS),
SIENA_DMA_STAT(rx_bad_gtjumbo, RX_JABBER_PKTS),
SIENA_DMA_STAT(rx_overflow, RX_OVERFLOW_PKTS),
SIENA_DMA_STAT(rx_false_carrier, RX_FALSE_CARRIER_PKTS),
SIENA_DMA_STAT(rx_symbol_error, RX_SYMBOL_ERROR_PKTS),
SIENA_DMA_STAT(rx_align_error, RX_ALIGN_ERROR_PKTS),
SIENA_DMA_STAT(rx_length_error, RX_LENGTH_ERROR_PKTS),
SIENA_DMA_STAT(rx_internal_error, RX_INTERNAL_ERROR_PKTS),
SIENA_DMA_STAT(rx_nodesc_drop_cnt, RX_NODESC_DROPS),
};
static const unsigned long siena_stat_mask[] = {
[0 ... BITS_TO_LONGS(SIENA_STAT_COUNT) - 1] = ~0UL,
};
static size_t siena_describe_nic_stats(struct efx_nic *efx, u8 *names)
{
return efx_nic_describe_stats(siena_stat_desc, SIENA_STAT_COUNT,
siena_stat_mask, names);
}
static int siena_try_update_nic_stats(struct efx_nic *efx)
{
struct siena_nic_data *nic_data = efx->nic_data;
u64 *stats = nic_data->stats;
__le64 *dma_stats;
struct efx_mac_stats *mac_stats;
__le64 generation_start, generation_end;
mac_stats = &efx->mac_stats;
dma_stats = efx->stats_buffer.addr;
generation_end = dma_stats[MC_CMD_MAC_GENERATION_END];
if (generation_end == EFX_MC_STATS_GENERATION_INVALID)
return 0;
rmb();
#define MAC_STAT(M, D) \
mac_stats->M = le64_to_cpu(dma_stats[MC_CMD_MAC_ ## D])
MAC_STAT(tx_bytes, TX_BYTES);
MAC_STAT(tx_bad_bytes, TX_BAD_BYTES);
efx_update_diff_stat(&mac_stats->tx_good_bytes,
mac_stats->tx_bytes - mac_stats->tx_bad_bytes);
MAC_STAT(tx_packets, TX_PKTS);
MAC_STAT(tx_bad, TX_BAD_FCS_PKTS);
MAC_STAT(tx_pause, TX_PAUSE_PKTS);
MAC_STAT(tx_control, TX_CONTROL_PKTS);
MAC_STAT(tx_unicast, TX_UNICAST_PKTS);
MAC_STAT(tx_multicast, TX_MULTICAST_PKTS);
MAC_STAT(tx_broadcast, TX_BROADCAST_PKTS);
MAC_STAT(tx_lt64, TX_LT64_PKTS);
MAC_STAT(tx_64, TX_64_PKTS);
MAC_STAT(tx_65_to_127, TX_65_TO_127_PKTS);
MAC_STAT(tx_128_to_255, TX_128_TO_255_PKTS);
MAC_STAT(tx_256_to_511, TX_256_TO_511_PKTS);
MAC_STAT(tx_512_to_1023, TX_512_TO_1023_PKTS);
MAC_STAT(tx_1024_to_15xx, TX_1024_TO_15XX_PKTS);
MAC_STAT(tx_15xx_to_jumbo, TX_15XX_TO_JUMBO_PKTS);
MAC_STAT(tx_gtjumbo, TX_GTJUMBO_PKTS);
mac_stats->tx_collision = 0;
MAC_STAT(tx_single_collision, TX_SINGLE_COLLISION_PKTS);
MAC_STAT(tx_multiple_collision, TX_MULTIPLE_COLLISION_PKTS);
MAC_STAT(tx_excessive_collision, TX_EXCESSIVE_COLLISION_PKTS);
MAC_STAT(tx_deferred, TX_DEFERRED_PKTS);
MAC_STAT(tx_late_collision, TX_LATE_COLLISION_PKTS);
mac_stats->tx_collision = (mac_stats->tx_single_collision +
mac_stats->tx_multiple_collision +
mac_stats->tx_excessive_collision +
mac_stats->tx_late_collision);
MAC_STAT(tx_excessive_deferred, TX_EXCESSIVE_DEFERRED_PKTS);
MAC_STAT(tx_non_tcpudp, TX_NON_TCPUDP_PKTS);
MAC_STAT(tx_mac_src_error, TX_MAC_SRC_ERR_PKTS);
MAC_STAT(tx_ip_src_error, TX_IP_SRC_ERR_PKTS);
MAC_STAT(rx_bytes, RX_BYTES);
MAC_STAT(rx_bad_bytes, RX_BAD_BYTES);
efx_update_diff_stat(&mac_stats->rx_good_bytes,
mac_stats->rx_bytes - mac_stats->rx_bad_bytes);
MAC_STAT(rx_packets, RX_PKTS);
MAC_STAT(rx_good, RX_GOOD_PKTS);
MAC_STAT(rx_bad, RX_BAD_FCS_PKTS);
MAC_STAT(rx_pause, RX_PAUSE_PKTS);
MAC_STAT(rx_control, RX_CONTROL_PKTS);
MAC_STAT(rx_unicast, RX_UNICAST_PKTS);
MAC_STAT(rx_multicast, RX_MULTICAST_PKTS);
MAC_STAT(rx_broadcast, RX_BROADCAST_PKTS);
MAC_STAT(rx_lt64, RX_UNDERSIZE_PKTS);
MAC_STAT(rx_64, RX_64_PKTS);
MAC_STAT(rx_65_to_127, RX_65_TO_127_PKTS);
MAC_STAT(rx_128_to_255, RX_128_TO_255_PKTS);
MAC_STAT(rx_256_to_511, RX_256_TO_511_PKTS);
MAC_STAT(rx_512_to_1023, RX_512_TO_1023_PKTS);
MAC_STAT(rx_1024_to_15xx, RX_1024_TO_15XX_PKTS);
MAC_STAT(rx_15xx_to_jumbo, RX_15XX_TO_JUMBO_PKTS);
MAC_STAT(rx_gtjumbo, RX_GTJUMBO_PKTS);
mac_stats->rx_bad_lt64 = 0;
mac_stats->rx_bad_64_to_15xx = 0;
mac_stats->rx_bad_15xx_to_jumbo = 0;
MAC_STAT(rx_bad_gtjumbo, RX_JABBER_PKTS);
MAC_STAT(rx_overflow, RX_OVERFLOW_PKTS);
mac_stats->rx_missed = 0;
MAC_STAT(rx_false_carrier, RX_FALSE_CARRIER_PKTS);
MAC_STAT(rx_symbol_error, RX_SYMBOL_ERROR_PKTS);
MAC_STAT(rx_align_error, RX_ALIGN_ERROR_PKTS);
MAC_STAT(rx_length_error, RX_LENGTH_ERROR_PKTS);
MAC_STAT(rx_internal_error, RX_INTERNAL_ERROR_PKTS);
mac_stats->rx_good_lt64 = 0;
efx->n_rx_nodesc_drop_cnt =
le64_to_cpu(dma_stats[MC_CMD_MAC_RX_NODESC_DROPS]);
#undef MAC_STAT
efx_nic_update_stats(siena_stat_desc, SIENA_STAT_COUNT, siena_stat_mask,
stats, efx->stats_buffer.addr, false);
rmb();
generation_start = dma_stats[MC_CMD_MAC_GENERATION_START];
if (generation_end != generation_start)
return -EAGAIN;
/* Update derived statistics */
efx_update_diff_stat(&stats[SIENA_STAT_tx_good_bytes],
stats[SIENA_STAT_tx_bytes] -
stats[SIENA_STAT_tx_bad_bytes]);
stats[SIENA_STAT_tx_collision] =
stats[SIENA_STAT_tx_single_collision] +
stats[SIENA_STAT_tx_multiple_collision] +
stats[SIENA_STAT_tx_excessive_collision] +
stats[SIENA_STAT_tx_late_collision];
efx_update_diff_stat(&stats[SIENA_STAT_rx_good_bytes],
stats[SIENA_STAT_rx_bytes] -
stats[SIENA_STAT_rx_bad_bytes]);
return 0;
}
static void siena_update_nic_stats(struct efx_nic *efx)
static size_t siena_update_nic_stats(struct efx_nic *efx, u64 *full_stats,
struct rtnl_link_stats64 *core_stats)
{
struct siena_nic_data *nic_data = efx->nic_data;
u64 *stats = nic_data->stats;
int retry;
/* If we're unlucky enough to read statistics wduring the DMA, wait
* up to 10ms for it to finish (typically takes <500us) */
for (retry = 0; retry < 100; ++retry) {
if (siena_try_update_nic_stats(efx) == 0)
return;
break;
udelay(100);
}
/* Use the old values instead */
if (full_stats)
memcpy(full_stats, stats, sizeof(u64) * SIENA_STAT_COUNT);
if (core_stats) {
core_stats->rx_packets = stats[SIENA_STAT_rx_packets];
core_stats->tx_packets = stats[SIENA_STAT_tx_packets];
core_stats->rx_bytes = stats[SIENA_STAT_rx_bytes];
core_stats->tx_bytes = stats[SIENA_STAT_tx_bytes];
core_stats->rx_dropped = stats[SIENA_STAT_rx_nodesc_drop_cnt];
core_stats->multicast = stats[SIENA_STAT_rx_multicast];
core_stats->collisions = stats[SIENA_STAT_tx_collision];
core_stats->rx_length_errors =
stats[SIENA_STAT_rx_gtjumbo] +
stats[SIENA_STAT_rx_length_error];
core_stats->rx_crc_errors = stats[SIENA_STAT_rx_bad];
core_stats->rx_frame_errors = stats[SIENA_STAT_rx_align_error];
core_stats->rx_fifo_errors = stats[SIENA_STAT_rx_overflow];
core_stats->tx_window_errors =
stats[SIENA_STAT_tx_late_collision];
core_stats->rx_errors = (core_stats->rx_length_errors +
core_stats->rx_crc_errors +
core_stats->rx_frame_errors +
stats[SIENA_STAT_rx_symbol_error]);
core_stats->tx_errors = (core_stats->tx_window_errors +
stats[SIENA_STAT_tx_bad]);
}
return SIENA_STAT_COUNT;
}
static int siena_mac_reconfigure(struct efx_nic *efx)
@ -653,6 +695,7 @@ static void siena_mcdi_read_response(struct efx_nic *efx, efx_dword_t *outbuf,
static int siena_mcdi_poll_reboot(struct efx_nic *efx)
{
struct siena_nic_data *nic_data = efx->nic_data;
unsigned int addr = FR_CZ_MC_TREG_SMEM + MCDI_STATUS(efx);
efx_dword_t reg;
u32 value;
@ -666,12 +709,163 @@ static int siena_mcdi_poll_reboot(struct efx_nic *efx)
EFX_ZERO_DWORD(reg);
efx_writed(efx, &reg, addr);
/* MAC statistics have been cleared on the NIC; clear the local
* copies that we update with efx_update_diff_stat().
*/
nic_data->stats[SIENA_STAT_tx_good_bytes] = 0;
nic_data->stats[SIENA_STAT_rx_good_bytes] = 0;
if (value == MC_STATUS_DWORD_ASSERT)
return -EINTR;
else
return -EIO;
}
/**************************************************************************
*
* MTD
*
**************************************************************************
*/
#ifdef CONFIG_SFC_MTD
struct siena_nvram_type_info {
int port;
const char *name;
};
static const struct siena_nvram_type_info siena_nvram_types[] = {
[MC_CMD_NVRAM_TYPE_DISABLED_CALLISTO] = { 0, "sfc_dummy_phy" },
[MC_CMD_NVRAM_TYPE_MC_FW] = { 0, "sfc_mcfw" },
[MC_CMD_NVRAM_TYPE_MC_FW_BACKUP] = { 0, "sfc_mcfw_backup" },
[MC_CMD_NVRAM_TYPE_STATIC_CFG_PORT0] = { 0, "sfc_static_cfg" },
[MC_CMD_NVRAM_TYPE_STATIC_CFG_PORT1] = { 1, "sfc_static_cfg" },
[MC_CMD_NVRAM_TYPE_DYNAMIC_CFG_PORT0] = { 0, "sfc_dynamic_cfg" },
[MC_CMD_NVRAM_TYPE_DYNAMIC_CFG_PORT1] = { 1, "sfc_dynamic_cfg" },
[MC_CMD_NVRAM_TYPE_EXP_ROM] = { 0, "sfc_exp_rom" },
[MC_CMD_NVRAM_TYPE_EXP_ROM_CFG_PORT0] = { 0, "sfc_exp_rom_cfg" },
[MC_CMD_NVRAM_TYPE_EXP_ROM_CFG_PORT1] = { 1, "sfc_exp_rom_cfg" },
[MC_CMD_NVRAM_TYPE_PHY_PORT0] = { 0, "sfc_phy_fw" },
[MC_CMD_NVRAM_TYPE_PHY_PORT1] = { 1, "sfc_phy_fw" },
[MC_CMD_NVRAM_TYPE_FPGA] = { 0, "sfc_fpga" },
};
static int siena_mtd_probe_partition(struct efx_nic *efx,
struct efx_mcdi_mtd_partition *part,
unsigned int type)
{
const struct siena_nvram_type_info *info;
size_t size, erase_size;
bool protected;
int rc;
if (type >= ARRAY_SIZE(siena_nvram_types) ||
siena_nvram_types[type].name == NULL)
return -ENODEV;
info = &siena_nvram_types[type];
if (info->port != efx_port_num(efx))
return -ENODEV;
rc = efx_mcdi_nvram_info(efx, type, &size, &erase_size, &protected);
if (rc)
return rc;
if (protected)
return -ENODEV; /* hide it */
part->nvram_type = type;
part->common.dev_type_name = "Siena NVRAM manager";
part->common.type_name = info->name;
part->common.mtd.type = MTD_NORFLASH;
part->common.mtd.flags = MTD_CAP_NORFLASH;
part->common.mtd.size = size;
part->common.mtd.erasesize = erase_size;
return 0;
}
static int siena_mtd_get_fw_subtypes(struct efx_nic *efx,
struct efx_mcdi_mtd_partition *parts,
size_t n_parts)
{
uint16_t fw_subtype_list[
MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_MAXNUM];
size_t i;
int rc;
rc = efx_mcdi_get_board_cfg(efx, NULL, fw_subtype_list, NULL);
if (rc)
return rc;
for (i = 0; i < n_parts; i++)
parts[i].fw_subtype = fw_subtype_list[parts[i].nvram_type];
return 0;
}
static int siena_mtd_probe(struct efx_nic *efx)
{
struct efx_mcdi_mtd_partition *parts;
u32 nvram_types;
unsigned int type;
size_t n_parts;
int rc;
ASSERT_RTNL();
rc = efx_mcdi_nvram_types(efx, &nvram_types);
if (rc)
return rc;
parts = kcalloc(hweight32(nvram_types), sizeof(*parts), GFP_KERNEL);
if (!parts)
return -ENOMEM;
type = 0;
n_parts = 0;
while (nvram_types != 0) {
if (nvram_types & 1) {
rc = siena_mtd_probe_partition(efx, &parts[n_parts],
type);
if (rc == 0)
n_parts++;
else if (rc != -ENODEV)
goto fail;
}
type++;
nvram_types >>= 1;
}
rc = siena_mtd_get_fw_subtypes(efx, parts, n_parts);
if (rc)
goto fail;
rc = efx_mtd_add(efx, &parts[0].common, n_parts, sizeof(*parts));
fail:
if (rc)
kfree(parts);
return rc;
}
#endif /* CONFIG_SFC_MTD */
/**************************************************************************
*
* PTP
*
**************************************************************************
*/
static void siena_ptp_write_host_time(struct efx_nic *efx, u32 host_time)
{
_efx_writed(efx, cpu_to_le32(host_time),
FR_CZ_MC_TREG_SMEM + MC_SMEM_P0_PTP_TIME_OFST);
}
/**************************************************************************
*
* Revision-dependent attributes used by efx.c and nic.c
@ -699,6 +893,7 @@ const struct efx_nic_type siena_a0_nic_type = {
.fini_dmaq = efx_farch_fini_dmaq,
.prepare_flush = siena_prepare_flush,
.finish_flush = siena_finish_flush,
.describe_stats = siena_describe_nic_stats,
.update_stats = siena_update_nic_stats,
.start_stats = efx_mcdi_mac_start_stats,
.stop_stats = efx_mcdi_mac_stop_stats,
@ -753,6 +948,15 @@ const struct efx_nic_type siena_a0_nic_type = {
.filter_rfs_insert = efx_farch_filter_rfs_insert,
.filter_rfs_expire_one = efx_farch_filter_rfs_expire_one,
#endif
#ifdef CONFIG_SFC_MTD
.mtd_probe = siena_mtd_probe,
.mtd_rename = efx_mcdi_mtd_rename,
.mtd_read = efx_mcdi_mtd_read,
.mtd_erase = efx_mcdi_mtd_erase,
.mtd_write = efx_mcdi_mtd_write,
.mtd_sync = efx_mcdi_mtd_sync,
#endif
.ptp_write_host_time = siena_ptp_write_host_time,
.revision = EFX_REV_SIENA_A0,
.txd_ptr_tbl_base = FR_BZ_TX_DESC_PTR_TBL,
@ -761,7 +965,8 @@ const struct efx_nic_type siena_a0_nic_type = {
.evq_ptr_tbl_base = FR_BZ_EVQ_PTR_TBL,
.evq_rptr_tbl_base = FR_BZ_EVQ_RPTR,
.max_dma_mask = DMA_BIT_MASK(FSF_AZ_TX_KER_BUF_ADDR_WIDTH),
.rx_buffer_hash_size = 0x10,
.rx_prefix_size = FS_BZ_RX_PREFIX_SIZE,
.rx_hash_offset = FS_BZ_RX_PREFIX_HASH_OFST,
.rx_buffer_padding = 0,
.can_rx_scatter = true,
.max_interrupt_mode = EFX_INT_MODE_MSIX,

View File

@ -1,99 +0,0 @@
/****************************************************************************
* Driver for Solarflare Solarstorm network controllers and boards
* Copyright 2005 Fen Systems Ltd.
* Copyright 2006-2010 Solarflare Communications Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
* by the Free Software Foundation, incorporated herein by reference.
*/
#ifndef EFX_SPI_H
#define EFX_SPI_H
#include "net_driver.h"
/**************************************************************************
*
* Basic SPI command set and bit definitions
*
*************************************************************************/
#define SPI_WRSR 0x01 /* Write status register */
#define SPI_WRITE 0x02 /* Write data to memory array */
#define SPI_READ 0x03 /* Read data from memory array */
#define SPI_WRDI 0x04 /* Reset write enable latch */
#define SPI_RDSR 0x05 /* Read status register */
#define SPI_WREN 0x06 /* Set write enable latch */
#define SPI_SST_EWSR 0x50 /* SST: Enable write to status register */
#define SPI_STATUS_WPEN 0x80 /* Write-protect pin enabled */
#define SPI_STATUS_BP2 0x10 /* Block protection bit 2 */
#define SPI_STATUS_BP1 0x08 /* Block protection bit 1 */
#define SPI_STATUS_BP0 0x04 /* Block protection bit 0 */
#define SPI_STATUS_WEN 0x02 /* State of the write enable latch */
#define SPI_STATUS_NRDY 0x01 /* Device busy flag */
/**
* struct falcon_spi_device - a Falcon SPI (Serial Peripheral Interface) device
* @device_id: Controller's id for the device
* @size: Size (in bytes)
* @addr_len: Number of address bytes in read/write commands
* @munge_address: Flag whether addresses should be munged.
* Some devices with 9-bit addresses (e.g. AT25040A EEPROM)
* use bit 3 of the command byte as address bit A8, rather
* than having a two-byte address. If this flag is set, then
* commands should be munged in this way.
* @erase_command: Erase command (or 0 if sector erase not needed).
* @erase_size: Erase sector size (in bytes)
* Erase commands affect sectors with this size and alignment.
* This must be a power of two.
* @block_size: Write block size (in bytes).
* Write commands are limited to blocks with this size and alignment.
*/
struct falcon_spi_device {
int device_id;
unsigned int size;
unsigned int addr_len;
unsigned int munge_address:1;
u8 erase_command;
unsigned int erase_size;
unsigned int block_size;
};
static inline bool falcon_spi_present(const struct falcon_spi_device *spi)
{
return spi->size != 0;
}
int falcon_spi_cmd(struct efx_nic *efx,
const struct falcon_spi_device *spi, unsigned int command,
int address, const void *in, void *out, size_t len);
int falcon_spi_wait_write(struct efx_nic *efx,
const struct falcon_spi_device *spi);
int falcon_spi_read(struct efx_nic *efx,
const struct falcon_spi_device *spi, loff_t start,
size_t len, size_t *retlen, u8 *buffer);
int falcon_spi_write(struct efx_nic *efx,
const struct falcon_spi_device *spi, loff_t start,
size_t len, size_t *retlen, const u8 *buffer);
/*
* SFC4000 flash is partitioned into:
* 0-0x400 chip and board config (see falcon_hwdefs.h)
* 0x400-0x8000 unused (or may contain VPD if EEPROM not present)
* 0x8000-end boot code (mapped to PCI expansion ROM)
* SFC4000 small EEPROM (size < 0x400) is used for VPD only.
* SFC4000 large EEPROM (size >= 0x400) is partitioned into:
* 0-0x400 chip and board config
* configurable VPD
* 0x800-0x1800 boot config
* Aside from the chip and board config, all of these are optional and may
* be absent or truncated depending on the devices used.
*/
#define FALCON_NVCONFIG_END 0x400U
#define FALCON_FLASH_BOOTCODE_START 0x8000U
#define FALCON_EEPROM_BOOTCONFIG_START 0x800U
#define FALCON_EEPROM_BOOTCONFIG_END 0x1800U
#endif /* EFX_SPI_H */

View File

@ -437,6 +437,9 @@ void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index)
efx_dequeue_buffers(tx_queue, index, &pkts_compl, &bytes_compl);
netdev_tx_completed_queue(tx_queue->core_txq, pkts_compl, bytes_compl);
if (pkts_compl > 1)
++tx_queue->merge_events;
/* See if we need to restart the netif queue. This memory
* barrier ensures that we write read_count (inside
* efx_dequeue_buffers()) before reading the queue status.