2018-08-09 11:59:11 +03:00
|
|
|
// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
|
|
|
|
|
/* Copyright (c) 2015-2018 Mellanox Technologies. All rights reserved */
|
2015-07-29 23:33:46 +02:00
|
|
|
|
|
|
|
|
#include <linux/kernel.h>
|
|
|
|
|
#include <linux/module.h>
|
|
|
|
|
#include <linux/device.h>
|
|
|
|
|
#include <linux/export.h>
|
|
|
|
|
#include <linux/err.h>
|
|
|
|
|
#include <linux/if_link.h>
|
|
|
|
|
#include <linux/netdevice.h>
|
2016-04-14 18:19:29 +02:00
|
|
|
#include <linux/completion.h>
|
2015-07-29 23:33:48 +02:00
|
|
|
#include <linux/skbuff.h>
|
|
|
|
|
#include <linux/etherdevice.h>
|
|
|
|
|
#include <linux/types.h>
|
|
|
|
|
#include <linux/string.h>
|
|
|
|
|
#include <linux/gfp.h>
|
|
|
|
|
#include <linux/random.h>
|
|
|
|
|
#include <linux/jiffies.h>
|
|
|
|
|
#include <linux/mutex.h>
|
|
|
|
|
#include <linux/rcupdate.h>
|
|
|
|
|
#include <linux/slab.h>
|
2016-04-14 18:19:28 +02:00
|
|
|
#include <linux/workqueue.h>
|
2020-09-15 11:40:52 +03:00
|
|
|
#include <linux/firmware.h>
|
2015-07-29 23:33:48 +02:00
|
|
|
#include <asm/byteorder.h>
|
2016-02-26 17:32:26 +01:00
|
|
|
#include <net/devlink.h>
|
2016-07-12 18:05:04 +02:00
|
|
|
#include <trace/events/devlink.h>
|
2015-07-29 23:33:46 +02:00
|
|
|
|
|
|
|
|
#include "core.h"
|
2020-09-27 10:50:10 +03:00
|
|
|
#include "core_env.h"
|
2015-07-29 23:33:46 +02:00
|
|
|
#include "item.h"
|
|
|
|
|
#include "cmd.h"
|
|
|
|
|
#include "port.h"
|
|
|
|
|
#include "trap.h"
|
2015-07-29 23:33:48 +02:00
|
|
|
#include "emad.h"
|
|
|
|
|
#include "reg.h"
|
2016-10-21 16:07:23 +02:00
|
|
|
#include "resources.h"
|
2020-09-15 11:40:52 +03:00
|
|
|
#include "../mlxfw/mlxfw.h"
|
2015-07-29 23:33:46 +02:00
|
|
|
|
|
|
|
|
static LIST_HEAD(mlxsw_core_driver_list);
|
|
|
|
|
static DEFINE_SPINLOCK(mlxsw_core_driver_list_lock);
|
|
|
|
|
|
|
|
|
|
static const char mlxsw_core_driver_name[] = "mlxsw_core";
|
|
|
|
|
|
2016-04-14 18:19:28 +02:00
|
|
|
static struct workqueue_struct *mlxsw_wq;
|
2016-12-03 16:45:00 +01:00
|
|
|
static struct workqueue_struct *mlxsw_owq;
|
2016-04-14 18:19:28 +02:00
|
|
|
|
2016-10-28 21:35:55 +02:00
|
|
|
struct mlxsw_core_port {
|
|
|
|
|
struct devlink_port devlink_port;
|
|
|
|
|
void *port_driver_priv;
|
2016-10-28 21:35:58 +02:00
|
|
|
u8 local_port;
|
2016-10-28 21:35:55 +02:00
|
|
|
};
|
|
|
|
|
|
|
|
|
|
void *mlxsw_core_port_driver_priv(struct mlxsw_core_port *mlxsw_core_port)
|
|
|
|
|
{
|
|
|
|
|
return mlxsw_core_port->port_driver_priv;
|
|
|
|
|
}
|
|
|
|
|
EXPORT_SYMBOL(mlxsw_core_port_driver_priv);
|
|
|
|
|
|
|
|
|
|
static bool mlxsw_core_port_check(struct mlxsw_core_port *mlxsw_core_port)
|
|
|
|
|
{
|
|
|
|
|
return mlxsw_core_port->port_driver_priv != NULL;
|
|
|
|
|
}
|
|
|
|
|
|
2015-07-29 23:33:46 +02:00
|
|
|
struct mlxsw_core {
|
|
|
|
|
struct mlxsw_driver *driver;
|
|
|
|
|
const struct mlxsw_bus *bus;
|
|
|
|
|
void *bus_priv;
|
|
|
|
|
const struct mlxsw_bus_info *bus_info;
|
mlxsw: core: Fix possible deadlock
When an EMAD is transmitted, a timeout work item is scheduled with a
delay of 200ms, so that another EMAD will be retried until a maximum of
five retries.
In certain situations, it's possible for the function waiting on the
EMAD to be associated with a work item that is queued on the same
workqueue (`mlxsw_core`) as the timeout work item. This results in
flushing a work item on the same workqueue.
According to commit e159489baa71 ("workqueue: relax lockdep annotation
on flush_work()") the above may lead to a deadlock in case the workqueue
has only one worker active or if the system in under memory pressure and
the rescue worker is in use. The latter explains the very rare and
random nature of the lockdep splats we have been seeing:
[ 52.730240] ============================================
[ 52.736179] WARNING: possible recursive locking detected
[ 52.742119] 4.14.0-rc3jiri+ #4 Not tainted
[ 52.746697] --------------------------------------------
[ 52.752635] kworker/1:3/599 is trying to acquire lock:
[ 52.758378] (mlxsw_core_driver_name){+.+.}, at: [<ffffffff811c4fa4>] flush_work+0x3a4/0x5e0
[ 52.767837]
but task is already holding lock:
[ 52.774360] (mlxsw_core_driver_name){+.+.}, at: [<ffffffff811c65c4>] process_one_work+0x7d4/0x12f0
[ 52.784495]
other info that might help us debug this:
[ 52.791794] Possible unsafe locking scenario:
[ 52.798413] CPU0
[ 52.801144] ----
[ 52.803875] lock(mlxsw_core_driver_name);
[ 52.808556] lock(mlxsw_core_driver_name);
[ 52.813236]
*** DEADLOCK ***
[ 52.819857] May be due to missing lock nesting notation
[ 52.827450] 3 locks held by kworker/1:3/599:
[ 52.832221] #0: (mlxsw_core_driver_name){+.+.}, at: [<ffffffff811c65c4>] process_one_work+0x7d4/0x12f0
[ 52.842846] #1: ((&(&bridge->fdb_notify.dw)->work)){+.+.}, at: [<ffffffff811c65c4>] process_one_work+0x7d4/0x12f0
[ 52.854537] #2: (rtnl_mutex){+.+.}, at: [<ffffffff822ad8e7>] rtnl_lock+0x17/0x20
[ 52.863021]
stack backtrace:
[ 52.867890] CPU: 1 PID: 599 Comm: kworker/1:3 Not tainted 4.14.0-rc3jiri+ #4
[ 52.875773] Hardware name: Mellanox Technologies Ltd. "MSN2100-CB2F"/"SA001017", BIOS 5.6.5 06/07/2016
[ 52.886267] Workqueue: mlxsw_core mlxsw_sp_fdb_notify_work [mlxsw_spectrum]
[ 52.894060] Call Trace:
[ 52.909122] __lock_acquire+0xf6f/0x2a10
[ 53.025412] lock_acquire+0x158/0x440
[ 53.047557] flush_work+0x3c4/0x5e0
[ 53.087571] __cancel_work_timer+0x3ca/0x5e0
[ 53.177051] cancel_delayed_work_sync+0x13/0x20
[ 53.182142] mlxsw_reg_trans_bulk_wait+0x12d/0x7a0 [mlxsw_core]
[ 53.194571] mlxsw_core_reg_access+0x586/0x990 [mlxsw_core]
[ 53.225365] mlxsw_reg_query+0x10/0x20 [mlxsw_core]
[ 53.230882] mlxsw_sp_fdb_notify_work+0x2a3/0x9d0 [mlxsw_spectrum]
[ 53.237801] process_one_work+0x8f1/0x12f0
[ 53.321804] worker_thread+0x1fd/0x10c0
[ 53.435158] kthread+0x28e/0x370
[ 53.448703] ret_from_fork+0x2a/0x40
[ 53.453017] mlxsw_spectrum 0000:01:00.0: EMAD retries (2/5) (tid=bf4549b100000774)
[ 53.453119] mlxsw_spectrum 0000:01:00.0: EMAD retries (5/5) (tid=bf4549b100000770)
[ 53.453132] mlxsw_spectrum 0000:01:00.0: EMAD reg access failed (tid=bf4549b100000770,reg_id=200b(sfn),type=query,status=0(operation performed))
[ 53.453143] mlxsw_spectrum 0000:01:00.0: Failed to get FDB notifications
Fix this by creating another workqueue for EMAD timeouts, thereby
preventing the situation of a work item trying to flush a work item
queued on the same workqueue.
Fixes: caf7297e7ab5f ("mlxsw: core: Introduce support for asynchronous EMAD register access")
Signed-off-by: Ido Schimmel <idosch@mellanox.com>
Reported-by: Jiri Pirko <jiri@mellanox.com>
Signed-off-by: Jiri Pirko <jiri@mellanox.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2017-10-16 16:28:28 +02:00
|
|
|
struct workqueue_struct *emad_wq;
|
2015-07-29 23:33:46 +02:00
|
|
|
struct list_head rx_listener_list;
|
2015-07-29 23:33:48 +02:00
|
|
|
struct list_head event_listener_list;
|
|
|
|
|
struct {
|
2016-04-14 18:19:29 +02:00
|
|
|
atomic64_t tid;
|
|
|
|
|
struct list_head trans_list;
|
|
|
|
|
spinlock_t trans_list_lock; /* protects trans_list writes */
|
2015-07-29 23:33:48 +02:00
|
|
|
bool use_emad;
|
2019-11-12 08:48:29 +02:00
|
|
|
bool enable_string_tlv;
|
2015-07-29 23:33:48 +02:00
|
|
|
} emad;
|
2015-12-03 12:12:23 +01:00
|
|
|
struct {
|
|
|
|
|
u8 *mapping; /* lag_id+port_index to local_port mapping */
|
|
|
|
|
} lag;
|
2016-10-21 16:07:23 +02:00
|
|
|
struct mlxsw_res res;
|
2015-11-27 13:45:57 +01:00
|
|
|
struct mlxsw_hwmon *hwmon;
|
2016-11-22 11:24:13 +01:00
|
|
|
struct mlxsw_thermal *thermal;
|
2017-03-24 08:02:48 +01:00
|
|
|
struct mlxsw_core_port *ports;
|
|
|
|
|
unsigned int max_ports;
|
2021-01-21 15:10:23 +02:00
|
|
|
atomic_t active_ports_count;
|
2018-12-18 15:59:20 +00:00
|
|
|
bool fw_flash_in_progress;
|
2020-09-15 11:40:58 +03:00
|
|
|
struct {
|
|
|
|
|
struct devlink_health_reporter *fw_fatal;
|
|
|
|
|
} health;
|
2020-09-27 10:50:10 +03:00
|
|
|
struct mlxsw_env *env;
|
|
|
|
|
bool is_initialized; /* Denotes if core was already initialized. */
|
2020-02-18 14:57:05 -06:00
|
|
|
unsigned long driver_priv[];
|
2015-07-29 23:33:46 +02:00
|
|
|
/* driver_priv has to be always the last item */
|
|
|
|
|
};
|
|
|
|
|
|
2017-03-24 08:02:48 +01:00
|
|
|
#define MLXSW_PORT_MAX_PORTS_DEFAULT 0x40
|
|
|
|
|
|
2021-01-21 15:10:23 +02:00
|
|
|
static u64 mlxsw_ports_occ_get(void *priv)
|
2017-03-24 08:02:48 +01:00
|
|
|
{
|
2021-01-21 15:10:23 +02:00
|
|
|
struct mlxsw_core *mlxsw_core = priv;
|
|
|
|
|
|
|
|
|
|
return atomic_read(&mlxsw_core->active_ports_count);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static int mlxsw_core_resources_ports_register(struct mlxsw_core *mlxsw_core)
|
|
|
|
|
{
|
|
|
|
|
struct devlink *devlink = priv_to_devlink(mlxsw_core);
|
|
|
|
|
struct devlink_resource_size_params ports_num_params;
|
|
|
|
|
u32 max_ports;
|
|
|
|
|
|
|
|
|
|
max_ports = mlxsw_core->max_ports - 1;
|
|
|
|
|
devlink_resource_size_params_init(&ports_num_params, max_ports,
|
|
|
|
|
max_ports, 1,
|
|
|
|
|
DEVLINK_RESOURCE_UNIT_ENTRY);
|
|
|
|
|
|
|
|
|
|
return devlink_resource_register(devlink,
|
|
|
|
|
DEVLINK_RESOURCE_GENERIC_NAME_PORTS,
|
|
|
|
|
max_ports, MLXSW_CORE_RESOURCE_PORTS,
|
|
|
|
|
DEVLINK_RESOURCE_ID_PARENT_TOP,
|
|
|
|
|
&ports_num_params);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static int mlxsw_ports_init(struct mlxsw_core *mlxsw_core, bool reload)
|
|
|
|
|
{
|
|
|
|
|
struct devlink *devlink = priv_to_devlink(mlxsw_core);
|
|
|
|
|
int err;
|
|
|
|
|
|
2017-03-24 08:02:48 +01:00
|
|
|
/* Switch ports are numbered from 1 to queried value */
|
|
|
|
|
if (MLXSW_CORE_RES_VALID(mlxsw_core, MAX_SYSTEM_PORT))
|
|
|
|
|
mlxsw_core->max_ports = MLXSW_CORE_RES_GET(mlxsw_core,
|
|
|
|
|
MAX_SYSTEM_PORT) + 1;
|
|
|
|
|
else
|
|
|
|
|
mlxsw_core->max_ports = MLXSW_PORT_MAX_PORTS_DEFAULT + 1;
|
|
|
|
|
|
|
|
|
|
mlxsw_core->ports = kcalloc(mlxsw_core->max_ports,
|
|
|
|
|
sizeof(struct mlxsw_core_port), GFP_KERNEL);
|
|
|
|
|
if (!mlxsw_core->ports)
|
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
2021-01-21 15:10:23 +02:00
|
|
|
if (!reload) {
|
|
|
|
|
err = mlxsw_core_resources_ports_register(mlxsw_core);
|
|
|
|
|
if (err)
|
|
|
|
|
goto err_resources_ports_register;
|
|
|
|
|
}
|
|
|
|
|
atomic_set(&mlxsw_core->active_ports_count, 0);
|
|
|
|
|
devlink_resource_occ_get_register(devlink, MLXSW_CORE_RESOURCE_PORTS,
|
|
|
|
|
mlxsw_ports_occ_get, mlxsw_core);
|
|
|
|
|
|
2017-03-24 08:02:48 +01:00
|
|
|
return 0;
|
2021-01-21 15:10:23 +02:00
|
|
|
|
|
|
|
|
err_resources_ports_register:
|
|
|
|
|
kfree(mlxsw_core->ports);
|
|
|
|
|
return err;
|
2017-03-24 08:02:48 +01:00
|
|
|
}
|
|
|
|
|
|
2021-01-21 15:10:23 +02:00
|
|
|
static void mlxsw_ports_fini(struct mlxsw_core *mlxsw_core, bool reload)
|
2017-03-24 08:02:48 +01:00
|
|
|
{
|
2021-01-21 15:10:23 +02:00
|
|
|
struct devlink *devlink = priv_to_devlink(mlxsw_core);
|
|
|
|
|
|
|
|
|
|
devlink_resource_occ_get_unregister(devlink, MLXSW_CORE_RESOURCE_PORTS);
|
|
|
|
|
if (!reload)
|
|
|
|
|
devlink_resources_unregister(priv_to_devlink(mlxsw_core), NULL);
|
|
|
|
|
|
2017-03-24 08:02:48 +01:00
|
|
|
kfree(mlxsw_core->ports);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
unsigned int mlxsw_core_max_ports(const struct mlxsw_core *mlxsw_core)
|
|
|
|
|
{
|
|
|
|
|
return mlxsw_core->max_ports;
|
|
|
|
|
}
|
|
|
|
|
EXPORT_SYMBOL(mlxsw_core_max_ports);
|
|
|
|
|
|
2016-04-08 19:11:23 +02:00
|
|
|
void *mlxsw_core_driver_priv(struct mlxsw_core *mlxsw_core)
|
|
|
|
|
{
|
|
|
|
|
return mlxsw_core->driver_priv;
|
|
|
|
|
}
|
|
|
|
|
EXPORT_SYMBOL(mlxsw_core_driver_priv);
|
|
|
|
|
|
2019-05-18 18:58:28 +03:00
|
|
|
bool mlxsw_core_res_query_enabled(const struct mlxsw_core *mlxsw_core)
|
|
|
|
|
{
|
|
|
|
|
return mlxsw_core->driver->res_query_enabled;
|
|
|
|
|
}
|
|
|
|
|
EXPORT_SYMBOL(mlxsw_core_res_query_enabled);
|
|
|
|
|
|
2020-09-27 10:50:11 +03:00
|
|
|
bool mlxsw_core_temp_warn_enabled(const struct mlxsw_core *mlxsw_core)
|
|
|
|
|
{
|
|
|
|
|
return mlxsw_core->driver->temp_warn_enabled;
|
|
|
|
|
}
|
|
|
|
|
|
2019-10-06 09:34:51 +03:00
|
|
|
bool
|
|
|
|
|
mlxsw_core_fw_rev_minor_subminor_validate(const struct mlxsw_fw_rev *rev,
|
|
|
|
|
const struct mlxsw_fw_rev *req_rev)
|
|
|
|
|
{
|
|
|
|
|
return rev->minor > req_rev->minor ||
|
|
|
|
|
(rev->minor == req_rev->minor &&
|
|
|
|
|
rev->subminor >= req_rev->subminor);
|
|
|
|
|
}
|
|
|
|
|
EXPORT_SYMBOL(mlxsw_core_fw_rev_minor_subminor_validate);
|
|
|
|
|
|
2015-07-29 23:33:46 +02:00
|
|
|
struct mlxsw_rx_listener_item {
|
|
|
|
|
struct list_head list;
|
|
|
|
|
struct mlxsw_rx_listener rxl;
|
|
|
|
|
void *priv;
|
2020-02-24 08:35:52 +01:00
|
|
|
bool enabled;
|
2015-07-29 23:33:46 +02:00
|
|
|
};
|
|
|
|
|
|
2015-07-29 23:33:48 +02:00
|
|
|
struct mlxsw_event_listener_item {
|
|
|
|
|
struct list_head list;
|
2020-12-06 10:22:23 +02:00
|
|
|
struct mlxsw_core *mlxsw_core;
|
2015-07-29 23:33:48 +02:00
|
|
|
struct mlxsw_event_listener el;
|
|
|
|
|
void *priv;
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
/******************
|
|
|
|
|
* EMAD processing
|
|
|
|
|
******************/
|
|
|
|
|
|
|
|
|
|
/* emad_eth_hdr_dmac
|
|
|
|
|
* Destination MAC in EMAD's Ethernet header.
|
|
|
|
|
* Must be set to 01:02:c9:00:00:01
|
|
|
|
|
*/
|
|
|
|
|
MLXSW_ITEM_BUF(emad, eth_hdr, dmac, 0x00, 6);
|
|
|
|
|
|
|
|
|
|
/* emad_eth_hdr_smac
|
|
|
|
|
* Source MAC in EMAD's Ethernet header.
|
|
|
|
|
* Must be set to 00:02:c9:01:02:03
|
|
|
|
|
*/
|
|
|
|
|
MLXSW_ITEM_BUF(emad, eth_hdr, smac, 0x06, 6);
|
|
|
|
|
|
|
|
|
|
/* emad_eth_hdr_ethertype
|
|
|
|
|
* Ethertype in EMAD's Ethernet header.
|
|
|
|
|
* Must be set to 0x8932
|
|
|
|
|
*/
|
|
|
|
|
MLXSW_ITEM32(emad, eth_hdr, ethertype, 0x0C, 16, 16);
|
|
|
|
|
|
|
|
|
|
/* emad_eth_hdr_mlx_proto
|
|
|
|
|
* Mellanox protocol.
|
|
|
|
|
* Must be set to 0x0.
|
|
|
|
|
*/
|
|
|
|
|
MLXSW_ITEM32(emad, eth_hdr, mlx_proto, 0x0C, 8, 8);
|
|
|
|
|
|
|
|
|
|
/* emad_eth_hdr_ver
|
|
|
|
|
* Mellanox protocol version.
|
|
|
|
|
* Must be set to 0x0.
|
|
|
|
|
*/
|
|
|
|
|
MLXSW_ITEM32(emad, eth_hdr, ver, 0x0C, 4, 4);
|
|
|
|
|
|
|
|
|
|
/* emad_op_tlv_type
|
|
|
|
|
* Type of the TLV.
|
|
|
|
|
* Must be set to 0x1 (operation TLV).
|
|
|
|
|
*/
|
|
|
|
|
MLXSW_ITEM32(emad, op_tlv, type, 0x00, 27, 5);
|
|
|
|
|
|
|
|
|
|
/* emad_op_tlv_len
|
|
|
|
|
* Length of the operation TLV in u32.
|
|
|
|
|
* Must be set to 0x4.
|
|
|
|
|
*/
|
|
|
|
|
MLXSW_ITEM32(emad, op_tlv, len, 0x00, 16, 11);
|
|
|
|
|
|
|
|
|
|
/* emad_op_tlv_dr
|
|
|
|
|
* Direct route bit. Setting to 1 indicates the EMAD is a direct route
|
|
|
|
|
* EMAD. DR TLV must follow.
|
|
|
|
|
*
|
|
|
|
|
* Note: Currently not supported and must not be set.
|
|
|
|
|
*/
|
|
|
|
|
MLXSW_ITEM32(emad, op_tlv, dr, 0x00, 15, 1);
|
|
|
|
|
|
|
|
|
|
/* emad_op_tlv_status
|
|
|
|
|
* Returned status in case of EMAD response. Must be set to 0 in case
|
|
|
|
|
* of EMAD request.
|
|
|
|
|
* 0x0 - success
|
|
|
|
|
* 0x1 - device is busy. Requester should retry
|
|
|
|
|
* 0x2 - Mellanox protocol version not supported
|
|
|
|
|
* 0x3 - unknown TLV
|
|
|
|
|
* 0x4 - register not supported
|
|
|
|
|
* 0x5 - operation class not supported
|
|
|
|
|
* 0x6 - EMAD method not supported
|
|
|
|
|
* 0x7 - bad parameter (e.g. port out of range)
|
|
|
|
|
* 0x8 - resource not available
|
|
|
|
|
* 0x9 - message receipt acknowledgment. Requester should retry
|
|
|
|
|
* 0x70 - internal error
|
|
|
|
|
*/
|
|
|
|
|
MLXSW_ITEM32(emad, op_tlv, status, 0x00, 8, 7);
|
|
|
|
|
|
|
|
|
|
/* emad_op_tlv_register_id
|
|
|
|
|
* Register ID of register within register TLV.
|
|
|
|
|
*/
|
|
|
|
|
MLXSW_ITEM32(emad, op_tlv, register_id, 0x04, 16, 16);
|
|
|
|
|
|
|
|
|
|
/* emad_op_tlv_r
|
|
|
|
|
* Response bit. Setting to 1 indicates Response, otherwise request.
|
|
|
|
|
*/
|
|
|
|
|
MLXSW_ITEM32(emad, op_tlv, r, 0x04, 15, 1);
|
|
|
|
|
|
|
|
|
|
/* emad_op_tlv_method
|
|
|
|
|
* EMAD method type.
|
|
|
|
|
* 0x1 - query
|
|
|
|
|
* 0x2 - write
|
|
|
|
|
* 0x3 - send (currently not supported)
|
|
|
|
|
* 0x4 - event
|
|
|
|
|
*/
|
|
|
|
|
MLXSW_ITEM32(emad, op_tlv, method, 0x04, 8, 7);
|
|
|
|
|
|
|
|
|
|
/* emad_op_tlv_class
|
|
|
|
|
* EMAD operation class. Must be set to 0x1 (REG_ACCESS).
|
|
|
|
|
*/
|
|
|
|
|
MLXSW_ITEM32(emad, op_tlv, class, 0x04, 0, 8);
|
|
|
|
|
|
|
|
|
|
/* emad_op_tlv_tid
|
|
|
|
|
* EMAD transaction ID. Used for pairing request and response EMADs.
|
|
|
|
|
*/
|
|
|
|
|
MLXSW_ITEM64(emad, op_tlv, tid, 0x08, 0, 64);
|
|
|
|
|
|
2019-11-12 08:48:26 +02:00
|
|
|
/* emad_string_tlv_type
|
|
|
|
|
* Type of the TLV.
|
|
|
|
|
* Must be set to 0x2 (string TLV).
|
|
|
|
|
*/
|
|
|
|
|
MLXSW_ITEM32(emad, string_tlv, type, 0x00, 27, 5);
|
|
|
|
|
|
|
|
|
|
/* emad_string_tlv_len
|
|
|
|
|
* Length of the string TLV in u32.
|
|
|
|
|
*/
|
|
|
|
|
MLXSW_ITEM32(emad, string_tlv, len, 0x00, 16, 11);
|
|
|
|
|
|
|
|
|
|
#define MLXSW_EMAD_STRING_TLV_STRING_LEN 128
|
|
|
|
|
|
|
|
|
|
/* emad_string_tlv_string
|
|
|
|
|
* String provided by the device's firmware in case of erroneous register access
|
|
|
|
|
*/
|
|
|
|
|
MLXSW_ITEM_BUF(emad, string_tlv, string, 0x04,
|
|
|
|
|
MLXSW_EMAD_STRING_TLV_STRING_LEN);
|
|
|
|
|
|
2015-07-29 23:33:48 +02:00
|
|
|
/* emad_reg_tlv_type
|
|
|
|
|
* Type of the TLV.
|
|
|
|
|
* Must be set to 0x3 (register TLV).
|
|
|
|
|
*/
|
|
|
|
|
MLXSW_ITEM32(emad, reg_tlv, type, 0x00, 27, 5);
|
|
|
|
|
|
|
|
|
|
/* emad_reg_tlv_len
|
|
|
|
|
* Length of the operation TLV in u32.
|
|
|
|
|
*/
|
|
|
|
|
MLXSW_ITEM32(emad, reg_tlv, len, 0x00, 16, 11);
|
|
|
|
|
|
|
|
|
|
/* emad_end_tlv_type
|
|
|
|
|
* Type of the TLV.
|
|
|
|
|
* Must be set to 0x0 (end TLV).
|
|
|
|
|
*/
|
|
|
|
|
MLXSW_ITEM32(emad, end_tlv, type, 0x00, 27, 5);
|
|
|
|
|
|
|
|
|
|
/* emad_end_tlv_len
|
|
|
|
|
* Length of the end TLV in u32.
|
|
|
|
|
* Must be set to 1.
|
|
|
|
|
*/
|
|
|
|
|
MLXSW_ITEM32(emad, end_tlv, len, 0x00, 16, 11);
|
|
|
|
|
|
|
|
|
|
enum mlxsw_core_reg_access_type {
|
|
|
|
|
MLXSW_CORE_REG_ACCESS_TYPE_QUERY,
|
|
|
|
|
MLXSW_CORE_REG_ACCESS_TYPE_WRITE,
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
static inline const char *
|
|
|
|
|
mlxsw_core_reg_access_type_str(enum mlxsw_core_reg_access_type type)
|
|
|
|
|
{
|
|
|
|
|
switch (type) {
|
|
|
|
|
case MLXSW_CORE_REG_ACCESS_TYPE_QUERY:
|
|
|
|
|
return "query";
|
|
|
|
|
case MLXSW_CORE_REG_ACCESS_TYPE_WRITE:
|
|
|
|
|
return "write";
|
|
|
|
|
}
|
|
|
|
|
BUG();
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void mlxsw_emad_pack_end_tlv(char *end_tlv)
|
|
|
|
|
{
|
|
|
|
|
mlxsw_emad_end_tlv_type_set(end_tlv, MLXSW_EMAD_TLV_TYPE_END);
|
|
|
|
|
mlxsw_emad_end_tlv_len_set(end_tlv, MLXSW_EMAD_END_TLV_LEN);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void mlxsw_emad_pack_reg_tlv(char *reg_tlv,
|
|
|
|
|
const struct mlxsw_reg_info *reg,
|
|
|
|
|
char *payload)
|
|
|
|
|
{
|
|
|
|
|
mlxsw_emad_reg_tlv_type_set(reg_tlv, MLXSW_EMAD_TLV_TYPE_REG);
|
|
|
|
|
mlxsw_emad_reg_tlv_len_set(reg_tlv, reg->len / sizeof(u32) + 1);
|
|
|
|
|
memcpy(reg_tlv + sizeof(u32), payload, reg->len);
|
|
|
|
|
}
|
|
|
|
|
|
2019-11-12 08:48:29 +02:00
|
|
|
static void mlxsw_emad_pack_string_tlv(char *string_tlv)
|
|
|
|
|
{
|
|
|
|
|
mlxsw_emad_string_tlv_type_set(string_tlv, MLXSW_EMAD_TLV_TYPE_STRING);
|
|
|
|
|
mlxsw_emad_string_tlv_len_set(string_tlv, MLXSW_EMAD_STRING_TLV_LEN);
|
|
|
|
|
}
|
|
|
|
|
|
2015-07-29 23:33:48 +02:00
|
|
|
static void mlxsw_emad_pack_op_tlv(char *op_tlv,
|
|
|
|
|
const struct mlxsw_reg_info *reg,
|
|
|
|
|
enum mlxsw_core_reg_access_type type,
|
2016-04-14 18:19:29 +02:00
|
|
|
u64 tid)
|
2015-07-29 23:33:48 +02:00
|
|
|
{
|
|
|
|
|
mlxsw_emad_op_tlv_type_set(op_tlv, MLXSW_EMAD_TLV_TYPE_OP);
|
|
|
|
|
mlxsw_emad_op_tlv_len_set(op_tlv, MLXSW_EMAD_OP_TLV_LEN);
|
|
|
|
|
mlxsw_emad_op_tlv_dr_set(op_tlv, 0);
|
|
|
|
|
mlxsw_emad_op_tlv_status_set(op_tlv, 0);
|
|
|
|
|
mlxsw_emad_op_tlv_register_id_set(op_tlv, reg->id);
|
|
|
|
|
mlxsw_emad_op_tlv_r_set(op_tlv, MLXSW_EMAD_OP_TLV_REQUEST);
|
2015-10-28 10:17:03 +01:00
|
|
|
if (type == MLXSW_CORE_REG_ACCESS_TYPE_QUERY)
|
2015-07-29 23:33:48 +02:00
|
|
|
mlxsw_emad_op_tlv_method_set(op_tlv,
|
|
|
|
|
MLXSW_EMAD_OP_TLV_METHOD_QUERY);
|
|
|
|
|
else
|
|
|
|
|
mlxsw_emad_op_tlv_method_set(op_tlv,
|
|
|
|
|
MLXSW_EMAD_OP_TLV_METHOD_WRITE);
|
|
|
|
|
mlxsw_emad_op_tlv_class_set(op_tlv,
|
|
|
|
|
MLXSW_EMAD_OP_TLV_CLASS_REG_ACCESS);
|
2016-04-14 18:19:29 +02:00
|
|
|
mlxsw_emad_op_tlv_tid_set(op_tlv, tid);
|
2015-07-29 23:33:48 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static int mlxsw_emad_construct_eth_hdr(struct sk_buff *skb)
|
|
|
|
|
{
|
|
|
|
|
char *eth_hdr = skb_push(skb, MLXSW_EMAD_ETH_HDR_LEN);
|
|
|
|
|
|
|
|
|
|
mlxsw_emad_eth_hdr_dmac_memcpy_to(eth_hdr, MLXSW_EMAD_EH_DMAC);
|
|
|
|
|
mlxsw_emad_eth_hdr_smac_memcpy_to(eth_hdr, MLXSW_EMAD_EH_SMAC);
|
|
|
|
|
mlxsw_emad_eth_hdr_ethertype_set(eth_hdr, MLXSW_EMAD_EH_ETHERTYPE);
|
|
|
|
|
mlxsw_emad_eth_hdr_mlx_proto_set(eth_hdr, MLXSW_EMAD_EH_MLX_PROTO);
|
|
|
|
|
mlxsw_emad_eth_hdr_ver_set(eth_hdr, MLXSW_EMAD_EH_PROTO_VERSION);
|
|
|
|
|
|
|
|
|
|
skb_reset_mac_header(skb);
|
|
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void mlxsw_emad_construct(struct sk_buff *skb,
|
|
|
|
|
const struct mlxsw_reg_info *reg,
|
|
|
|
|
char *payload,
|
|
|
|
|
enum mlxsw_core_reg_access_type type,
|
2019-11-12 08:48:29 +02:00
|
|
|
u64 tid, bool enable_string_tlv)
|
2015-07-29 23:33:48 +02:00
|
|
|
{
|
|
|
|
|
char *buf;
|
|
|
|
|
|
|
|
|
|
buf = skb_push(skb, MLXSW_EMAD_END_TLV_LEN * sizeof(u32));
|
|
|
|
|
mlxsw_emad_pack_end_tlv(buf);
|
|
|
|
|
|
|
|
|
|
buf = skb_push(skb, reg->len + sizeof(u32));
|
|
|
|
|
mlxsw_emad_pack_reg_tlv(buf, reg, payload);
|
|
|
|
|
|
2019-11-12 08:48:29 +02:00
|
|
|
if (enable_string_tlv) {
|
|
|
|
|
buf = skb_push(skb, MLXSW_EMAD_STRING_TLV_LEN * sizeof(u32));
|
|
|
|
|
mlxsw_emad_pack_string_tlv(buf);
|
|
|
|
|
}
|
|
|
|
|
|
2015-07-29 23:33:48 +02:00
|
|
|
buf = skb_push(skb, MLXSW_EMAD_OP_TLV_LEN * sizeof(u32));
|
2016-04-14 18:19:29 +02:00
|
|
|
mlxsw_emad_pack_op_tlv(buf, reg, type, tid);
|
2015-07-29 23:33:48 +02:00
|
|
|
|
|
|
|
|
mlxsw_emad_construct_eth_hdr(skb);
|
|
|
|
|
}
|
|
|
|
|
|
2019-11-12 08:48:24 +02:00
|
|
|
struct mlxsw_emad_tlv_offsets {
|
|
|
|
|
u16 op_tlv;
|
2019-11-12 08:48:27 +02:00
|
|
|
u16 string_tlv;
|
2019-11-12 08:48:24 +02:00
|
|
|
u16 reg_tlv;
|
|
|
|
|
};
|
|
|
|
|
|
2019-11-12 08:48:27 +02:00
|
|
|
static bool mlxsw_emad_tlv_is_string_tlv(const char *tlv)
|
|
|
|
|
{
|
|
|
|
|
u8 tlv_type = mlxsw_emad_string_tlv_type_get(tlv);
|
|
|
|
|
|
|
|
|
|
return tlv_type == MLXSW_EMAD_TLV_TYPE_STRING;
|
|
|
|
|
}
|
|
|
|
|
|
2019-11-12 08:48:24 +02:00
|
|
|
static void mlxsw_emad_tlv_parse(struct sk_buff *skb)
|
|
|
|
|
{
|
|
|
|
|
struct mlxsw_emad_tlv_offsets *offsets =
|
|
|
|
|
(struct mlxsw_emad_tlv_offsets *) skb->cb;
|
|
|
|
|
|
|
|
|
|
offsets->op_tlv = MLXSW_EMAD_ETH_HDR_LEN;
|
2019-11-12 08:48:27 +02:00
|
|
|
offsets->string_tlv = 0;
|
2019-11-12 08:48:24 +02:00
|
|
|
offsets->reg_tlv = MLXSW_EMAD_ETH_HDR_LEN +
|
|
|
|
|
MLXSW_EMAD_OP_TLV_LEN * sizeof(u32);
|
2019-11-12 08:48:27 +02:00
|
|
|
|
|
|
|
|
/* If string TLV is present, it must come after the operation TLV. */
|
|
|
|
|
if (mlxsw_emad_tlv_is_string_tlv(skb->data + offsets->reg_tlv)) {
|
|
|
|
|
offsets->string_tlv = offsets->reg_tlv;
|
|
|
|
|
offsets->reg_tlv += MLXSW_EMAD_STRING_TLV_LEN * sizeof(u32);
|
|
|
|
|
}
|
2019-11-12 08:48:24 +02:00
|
|
|
}
|
|
|
|
|
|
2015-07-29 23:33:48 +02:00
|
|
|
static char *mlxsw_emad_op_tlv(const struct sk_buff *skb)
|
|
|
|
|
{
|
2019-11-12 08:48:24 +02:00
|
|
|
struct mlxsw_emad_tlv_offsets *offsets =
|
|
|
|
|
(struct mlxsw_emad_tlv_offsets *) skb->cb;
|
|
|
|
|
|
|
|
|
|
return ((char *) (skb->data + offsets->op_tlv));
|
2015-07-29 23:33:48 +02:00
|
|
|
}
|
|
|
|
|
|
2019-11-12 08:48:29 +02:00
|
|
|
static char *mlxsw_emad_string_tlv(const struct sk_buff *skb)
|
|
|
|
|
{
|
|
|
|
|
struct mlxsw_emad_tlv_offsets *offsets =
|
|
|
|
|
(struct mlxsw_emad_tlv_offsets *) skb->cb;
|
|
|
|
|
|
|
|
|
|
if (!offsets->string_tlv)
|
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
|
|
return ((char *) (skb->data + offsets->string_tlv));
|
|
|
|
|
}
|
|
|
|
|
|
2015-07-29 23:33:48 +02:00
|
|
|
static char *mlxsw_emad_reg_tlv(const struct sk_buff *skb)
|
|
|
|
|
{
|
2019-11-12 08:48:24 +02:00
|
|
|
struct mlxsw_emad_tlv_offsets *offsets =
|
|
|
|
|
(struct mlxsw_emad_tlv_offsets *) skb->cb;
|
|
|
|
|
|
|
|
|
|
return ((char *) (skb->data + offsets->reg_tlv));
|
2015-07-29 23:33:48 +02:00
|
|
|
}
|
|
|
|
|
|
2019-11-12 08:48:24 +02:00
|
|
|
static char *mlxsw_emad_reg_payload(const char *reg_tlv)
|
2015-07-29 23:33:48 +02:00
|
|
|
{
|
2019-11-12 08:48:24 +02:00
|
|
|
return ((char *) (reg_tlv + sizeof(u32)));
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static char *mlxsw_emad_reg_payload_cmd(const char *mbox)
|
|
|
|
|
{
|
|
|
|
|
return ((char *) (mbox + (MLXSW_EMAD_OP_TLV_LEN + 1) * sizeof(u32)));
|
2015-07-29 23:33:48 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static u64 mlxsw_emad_get_tid(const struct sk_buff *skb)
|
|
|
|
|
{
|
|
|
|
|
char *op_tlv;
|
|
|
|
|
|
|
|
|
|
op_tlv = mlxsw_emad_op_tlv(skb);
|
|
|
|
|
return mlxsw_emad_op_tlv_tid_get(op_tlv);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static bool mlxsw_emad_is_resp(const struct sk_buff *skb)
|
|
|
|
|
{
|
|
|
|
|
char *op_tlv;
|
|
|
|
|
|
|
|
|
|
op_tlv = mlxsw_emad_op_tlv(skb);
|
2015-10-28 10:17:03 +01:00
|
|
|
return (mlxsw_emad_op_tlv_r_get(op_tlv) == MLXSW_EMAD_OP_TLV_RESPONSE);
|
2015-07-29 23:33:48 +02:00
|
|
|
}
|
|
|
|
|
|
2016-04-14 18:19:29 +02:00
|
|
|
static int mlxsw_emad_process_status(char *op_tlv,
|
|
|
|
|
enum mlxsw_emad_op_tlv_status *p_status)
|
2015-07-29 23:33:48 +02:00
|
|
|
{
|
2016-04-14 18:19:29 +02:00
|
|
|
*p_status = mlxsw_emad_op_tlv_status_get(op_tlv);
|
2015-07-29 23:33:48 +02:00
|
|
|
|
2016-04-14 18:19:29 +02:00
|
|
|
switch (*p_status) {
|
2015-07-29 23:33:48 +02:00
|
|
|
case MLXSW_EMAD_OP_TLV_STATUS_SUCCESS:
|
|
|
|
|
return 0;
|
|
|
|
|
case MLXSW_EMAD_OP_TLV_STATUS_BUSY:
|
|
|
|
|
case MLXSW_EMAD_OP_TLV_STATUS_MESSAGE_RECEIPT_ACK:
|
|
|
|
|
return -EAGAIN;
|
|
|
|
|
case MLXSW_EMAD_OP_TLV_STATUS_VERSION_NOT_SUPPORTED:
|
|
|
|
|
case MLXSW_EMAD_OP_TLV_STATUS_UNKNOWN_TLV:
|
|
|
|
|
case MLXSW_EMAD_OP_TLV_STATUS_REGISTER_NOT_SUPPORTED:
|
|
|
|
|
case MLXSW_EMAD_OP_TLV_STATUS_CLASS_NOT_SUPPORTED:
|
|
|
|
|
case MLXSW_EMAD_OP_TLV_STATUS_METHOD_NOT_SUPPORTED:
|
|
|
|
|
case MLXSW_EMAD_OP_TLV_STATUS_BAD_PARAMETER:
|
|
|
|
|
case MLXSW_EMAD_OP_TLV_STATUS_RESOURCE_NOT_AVAILABLE:
|
|
|
|
|
case MLXSW_EMAD_OP_TLV_STATUS_INTERNAL_ERROR:
|
|
|
|
|
default:
|
|
|
|
|
return -EIO;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2016-04-14 18:19:29 +02:00
|
|
|
static int
|
|
|
|
|
mlxsw_emad_process_status_skb(struct sk_buff *skb,
|
|
|
|
|
enum mlxsw_emad_op_tlv_status *p_status)
|
2015-07-29 23:33:48 +02:00
|
|
|
{
|
2016-04-14 18:19:29 +02:00
|
|
|
return mlxsw_emad_process_status(mlxsw_emad_op_tlv(skb), p_status);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
struct mlxsw_reg_trans {
|
|
|
|
|
struct list_head list;
|
|
|
|
|
struct list_head bulk_list;
|
|
|
|
|
struct mlxsw_core *core;
|
|
|
|
|
struct sk_buff *tx_skb;
|
|
|
|
|
struct mlxsw_tx_info tx_info;
|
|
|
|
|
struct delayed_work timeout_dw;
|
|
|
|
|
unsigned int retries;
|
|
|
|
|
u64 tid;
|
|
|
|
|
struct completion completion;
|
|
|
|
|
atomic_t active;
|
|
|
|
|
mlxsw_reg_trans_cb_t *cb;
|
|
|
|
|
unsigned long cb_priv;
|
|
|
|
|
const struct mlxsw_reg_info *reg;
|
|
|
|
|
enum mlxsw_core_reg_access_type type;
|
|
|
|
|
int err;
|
2019-11-12 08:48:29 +02:00
|
|
|
char *emad_err_string;
|
2016-04-14 18:19:29 +02:00
|
|
|
enum mlxsw_emad_op_tlv_status emad_status;
|
|
|
|
|
struct rcu_head rcu;
|
|
|
|
|
};
|
|
|
|
|
|
2019-11-12 08:48:29 +02:00
|
|
|
static void mlxsw_emad_process_string_tlv(const struct sk_buff *skb,
|
|
|
|
|
struct mlxsw_reg_trans *trans)
|
|
|
|
|
{
|
|
|
|
|
char *string_tlv;
|
|
|
|
|
char *string;
|
|
|
|
|
|
|
|
|
|
string_tlv = mlxsw_emad_string_tlv(skb);
|
|
|
|
|
if (!string_tlv)
|
|
|
|
|
return;
|
|
|
|
|
|
|
|
|
|
trans->emad_err_string = kzalloc(MLXSW_EMAD_STRING_TLV_STRING_LEN,
|
|
|
|
|
GFP_ATOMIC);
|
|
|
|
|
if (!trans->emad_err_string)
|
|
|
|
|
return;
|
|
|
|
|
|
|
|
|
|
string = mlxsw_emad_string_tlv_string_data(string_tlv);
|
|
|
|
|
strlcpy(trans->emad_err_string, string,
|
|
|
|
|
MLXSW_EMAD_STRING_TLV_STRING_LEN);
|
|
|
|
|
}
|
|
|
|
|
|
2018-12-18 15:59:20 +00:00
|
|
|
#define MLXSW_EMAD_TIMEOUT_DURING_FW_FLASH_MS 3000
|
|
|
|
|
#define MLXSW_EMAD_TIMEOUT_MS 200
|
2016-04-14 18:19:29 +02:00
|
|
|
|
|
|
|
|
static void mlxsw_emad_trans_timeout_schedule(struct mlxsw_reg_trans *trans)
|
|
|
|
|
{
|
|
|
|
|
unsigned long timeout = msecs_to_jiffies(MLXSW_EMAD_TIMEOUT_MS);
|
|
|
|
|
|
2018-12-18 15:59:20 +00:00
|
|
|
if (trans->core->fw_flash_in_progress)
|
|
|
|
|
timeout = msecs_to_jiffies(MLXSW_EMAD_TIMEOUT_DURING_FW_FLASH_MS);
|
|
|
|
|
|
2020-11-17 19:33:52 +02:00
|
|
|
queue_delayed_work(trans->core->emad_wq, &trans->timeout_dw,
|
|
|
|
|
timeout << trans->retries);
|
2015-07-29 23:33:48 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static int mlxsw_emad_transmit(struct mlxsw_core *mlxsw_core,
|
2016-04-14 18:19:29 +02:00
|
|
|
struct mlxsw_reg_trans *trans)
|
2015-07-29 23:33:48 +02:00
|
|
|
{
|
2016-04-14 18:19:29 +02:00
|
|
|
struct sk_buff *skb;
|
2015-07-29 23:33:48 +02:00
|
|
|
int err;
|
|
|
|
|
|
2016-04-14 18:19:29 +02:00
|
|
|
skb = skb_copy(trans->tx_skb, GFP_KERNEL);
|
|
|
|
|
if (!skb)
|
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
2016-07-12 18:05:04 +02:00
|
|
|
trace_devlink_hwmsg(priv_to_devlink(mlxsw_core), false, 0,
|
|
|
|
|
skb->data + mlxsw_core->driver->txhdr_len,
|
|
|
|
|
skb->len - mlxsw_core->driver->txhdr_len);
|
|
|
|
|
|
2016-04-14 18:19:29 +02:00
|
|
|
atomic_set(&trans->active, 1);
|
|
|
|
|
err = mlxsw_core_skb_transmit(mlxsw_core, skb, &trans->tx_info);
|
|
|
|
|
if (err) {
|
|
|
|
|
dev_kfree_skb(skb);
|
|
|
|
|
return err;
|
2015-07-29 23:33:48 +02:00
|
|
|
}
|
2016-04-14 18:19:29 +02:00
|
|
|
mlxsw_emad_trans_timeout_schedule(trans);
|
|
|
|
|
return 0;
|
|
|
|
|
}
|
2015-07-29 23:33:48 +02:00
|
|
|
|
2016-04-14 18:19:29 +02:00
|
|
|
static void mlxsw_emad_trans_finish(struct mlxsw_reg_trans *trans, int err)
|
|
|
|
|
{
|
|
|
|
|
struct mlxsw_core *mlxsw_core = trans->core;
|
|
|
|
|
|
|
|
|
|
dev_kfree_skb(trans->tx_skb);
|
|
|
|
|
spin_lock_bh(&mlxsw_core->emad.trans_list_lock);
|
|
|
|
|
list_del_rcu(&trans->list);
|
|
|
|
|
spin_unlock_bh(&mlxsw_core->emad.trans_list_lock);
|
|
|
|
|
trans->err = err;
|
|
|
|
|
complete(&trans->completion);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void mlxsw_emad_transmit_retry(struct mlxsw_core *mlxsw_core,
|
|
|
|
|
struct mlxsw_reg_trans *trans)
|
|
|
|
|
{
|
|
|
|
|
int err;
|
2015-07-29 23:33:48 +02:00
|
|
|
|
2016-04-14 18:19:29 +02:00
|
|
|
if (trans->retries < MLXSW_EMAD_MAX_RETRY) {
|
|
|
|
|
trans->retries++;
|
|
|
|
|
err = mlxsw_emad_transmit(trans->core, trans);
|
|
|
|
|
if (err == 0)
|
|
|
|
|
return;
|
2020-10-24 16:37:33 +03:00
|
|
|
|
|
|
|
|
if (!atomic_dec_and_test(&trans->active))
|
|
|
|
|
return;
|
2016-04-14 18:19:29 +02:00
|
|
|
} else {
|
|
|
|
|
err = -EIO;
|
2015-07-29 23:33:48 +02:00
|
|
|
}
|
2016-04-14 18:19:29 +02:00
|
|
|
mlxsw_emad_trans_finish(trans, err);
|
|
|
|
|
}
|
2015-07-29 23:33:48 +02:00
|
|
|
|
2016-04-14 18:19:29 +02:00
|
|
|
static void mlxsw_emad_trans_timeout_work(struct work_struct *work)
|
|
|
|
|
{
|
|
|
|
|
struct mlxsw_reg_trans *trans = container_of(work,
|
|
|
|
|
struct mlxsw_reg_trans,
|
|
|
|
|
timeout_dw.work);
|
|
|
|
|
|
|
|
|
|
if (!atomic_dec_and_test(&trans->active))
|
|
|
|
|
return;
|
|
|
|
|
|
|
|
|
|
mlxsw_emad_transmit_retry(trans->core, trans);
|
2015-07-29 23:33:48 +02:00
|
|
|
}
|
|
|
|
|
|
2016-04-14 18:19:29 +02:00
|
|
|
static void mlxsw_emad_process_response(struct mlxsw_core *mlxsw_core,
|
|
|
|
|
struct mlxsw_reg_trans *trans,
|
|
|
|
|
struct sk_buff *skb)
|
|
|
|
|
{
|
|
|
|
|
int err;
|
|
|
|
|
|
|
|
|
|
if (!atomic_dec_and_test(&trans->active))
|
|
|
|
|
return;
|
|
|
|
|
|
|
|
|
|
err = mlxsw_emad_process_status_skb(skb, &trans->emad_status);
|
|
|
|
|
if (err == -EAGAIN) {
|
|
|
|
|
mlxsw_emad_transmit_retry(mlxsw_core, trans);
|
|
|
|
|
} else {
|
|
|
|
|
if (err == 0) {
|
2019-11-12 08:48:24 +02:00
|
|
|
char *reg_tlv = mlxsw_emad_reg_tlv(skb);
|
2016-04-14 18:19:29 +02:00
|
|
|
|
|
|
|
|
if (trans->cb)
|
|
|
|
|
trans->cb(mlxsw_core,
|
2019-11-12 08:48:24 +02:00
|
|
|
mlxsw_emad_reg_payload(reg_tlv),
|
2016-04-14 18:19:29 +02:00
|
|
|
trans->reg->len, trans->cb_priv);
|
2019-11-12 08:48:29 +02:00
|
|
|
} else {
|
|
|
|
|
mlxsw_emad_process_string_tlv(skb, trans);
|
2016-04-14 18:19:29 +02:00
|
|
|
}
|
|
|
|
|
mlxsw_emad_trans_finish(trans, err);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* called with rcu read lock held */
|
2015-07-29 23:33:48 +02:00
|
|
|
static void mlxsw_emad_rx_listener_func(struct sk_buff *skb, u8 local_port,
|
|
|
|
|
void *priv)
|
|
|
|
|
{
|
|
|
|
|
struct mlxsw_core *mlxsw_core = priv;
|
2016-04-14 18:19:29 +02:00
|
|
|
struct mlxsw_reg_trans *trans;
|
2015-07-29 23:33:48 +02:00
|
|
|
|
2016-07-12 18:05:04 +02:00
|
|
|
trace_devlink_hwmsg(priv_to_devlink(mlxsw_core), true, 0,
|
|
|
|
|
skb->data, skb->len);
|
|
|
|
|
|
2019-11-12 08:48:24 +02:00
|
|
|
mlxsw_emad_tlv_parse(skb);
|
|
|
|
|
|
2016-04-14 18:19:29 +02:00
|
|
|
if (!mlxsw_emad_is_resp(skb))
|
|
|
|
|
goto free_skb;
|
|
|
|
|
|
|
|
|
|
list_for_each_entry_rcu(trans, &mlxsw_core->emad.trans_list, list) {
|
|
|
|
|
if (mlxsw_emad_get_tid(skb) == trans->tid) {
|
|
|
|
|
mlxsw_emad_process_response(mlxsw_core, trans, skb);
|
|
|
|
|
break;
|
|
|
|
|
}
|
2015-07-29 23:33:48 +02:00
|
|
|
}
|
2016-04-14 18:19:29 +02:00
|
|
|
|
|
|
|
|
free_skb:
|
|
|
|
|
dev_kfree_skb(skb);
|
2015-07-29 23:33:48 +02:00
|
|
|
}
|
|
|
|
|
|
2016-11-25 10:33:40 +01:00
|
|
|
static const struct mlxsw_listener mlxsw_emad_rx_listener =
|
|
|
|
|
MLXSW_RXL(mlxsw_emad_rx_listener_func, ETHEMAD, TRAP_TO_CPU, false,
|
|
|
|
|
EMAD, DISCARD);
|
2015-07-29 23:33:48 +02:00
|
|
|
|
|
|
|
|
static int mlxsw_emad_init(struct mlxsw_core *mlxsw_core)
|
|
|
|
|
{
|
mlxsw: core: Fix possible deadlock
When an EMAD is transmitted, a timeout work item is scheduled with a
delay of 200ms, so that another EMAD will be retried until a maximum of
five retries.
In certain situations, it's possible for the function waiting on the
EMAD to be associated with a work item that is queued on the same
workqueue (`mlxsw_core`) as the timeout work item. This results in
flushing a work item on the same workqueue.
According to commit e159489baa71 ("workqueue: relax lockdep annotation
on flush_work()") the above may lead to a deadlock in case the workqueue
has only one worker active or if the system in under memory pressure and
the rescue worker is in use. The latter explains the very rare and
random nature of the lockdep splats we have been seeing:
[ 52.730240] ============================================
[ 52.736179] WARNING: possible recursive locking detected
[ 52.742119] 4.14.0-rc3jiri+ #4 Not tainted
[ 52.746697] --------------------------------------------
[ 52.752635] kworker/1:3/599 is trying to acquire lock:
[ 52.758378] (mlxsw_core_driver_name){+.+.}, at: [<ffffffff811c4fa4>] flush_work+0x3a4/0x5e0
[ 52.767837]
but task is already holding lock:
[ 52.774360] (mlxsw_core_driver_name){+.+.}, at: [<ffffffff811c65c4>] process_one_work+0x7d4/0x12f0
[ 52.784495]
other info that might help us debug this:
[ 52.791794] Possible unsafe locking scenario:
[ 52.798413] CPU0
[ 52.801144] ----
[ 52.803875] lock(mlxsw_core_driver_name);
[ 52.808556] lock(mlxsw_core_driver_name);
[ 52.813236]
*** DEADLOCK ***
[ 52.819857] May be due to missing lock nesting notation
[ 52.827450] 3 locks held by kworker/1:3/599:
[ 52.832221] #0: (mlxsw_core_driver_name){+.+.}, at: [<ffffffff811c65c4>] process_one_work+0x7d4/0x12f0
[ 52.842846] #1: ((&(&bridge->fdb_notify.dw)->work)){+.+.}, at: [<ffffffff811c65c4>] process_one_work+0x7d4/0x12f0
[ 52.854537] #2: (rtnl_mutex){+.+.}, at: [<ffffffff822ad8e7>] rtnl_lock+0x17/0x20
[ 52.863021]
stack backtrace:
[ 52.867890] CPU: 1 PID: 599 Comm: kworker/1:3 Not tainted 4.14.0-rc3jiri+ #4
[ 52.875773] Hardware name: Mellanox Technologies Ltd. "MSN2100-CB2F"/"SA001017", BIOS 5.6.5 06/07/2016
[ 52.886267] Workqueue: mlxsw_core mlxsw_sp_fdb_notify_work [mlxsw_spectrum]
[ 52.894060] Call Trace:
[ 52.909122] __lock_acquire+0xf6f/0x2a10
[ 53.025412] lock_acquire+0x158/0x440
[ 53.047557] flush_work+0x3c4/0x5e0
[ 53.087571] __cancel_work_timer+0x3ca/0x5e0
[ 53.177051] cancel_delayed_work_sync+0x13/0x20
[ 53.182142] mlxsw_reg_trans_bulk_wait+0x12d/0x7a0 [mlxsw_core]
[ 53.194571] mlxsw_core_reg_access+0x586/0x990 [mlxsw_core]
[ 53.225365] mlxsw_reg_query+0x10/0x20 [mlxsw_core]
[ 53.230882] mlxsw_sp_fdb_notify_work+0x2a3/0x9d0 [mlxsw_spectrum]
[ 53.237801] process_one_work+0x8f1/0x12f0
[ 53.321804] worker_thread+0x1fd/0x10c0
[ 53.435158] kthread+0x28e/0x370
[ 53.448703] ret_from_fork+0x2a/0x40
[ 53.453017] mlxsw_spectrum 0000:01:00.0: EMAD retries (2/5) (tid=bf4549b100000774)
[ 53.453119] mlxsw_spectrum 0000:01:00.0: EMAD retries (5/5) (tid=bf4549b100000770)
[ 53.453132] mlxsw_spectrum 0000:01:00.0: EMAD reg access failed (tid=bf4549b100000770,reg_id=200b(sfn),type=query,status=0(operation performed))
[ 53.453143] mlxsw_spectrum 0000:01:00.0: Failed to get FDB notifications
Fix this by creating another workqueue for EMAD timeouts, thereby
preventing the situation of a work item trying to flush a work item
queued on the same workqueue.
Fixes: caf7297e7ab5f ("mlxsw: core: Introduce support for asynchronous EMAD register access")
Signed-off-by: Ido Schimmel <idosch@mellanox.com>
Reported-by: Jiri Pirko <jiri@mellanox.com>
Signed-off-by: Jiri Pirko <jiri@mellanox.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2017-10-16 16:28:28 +02:00
|
|
|
struct workqueue_struct *emad_wq;
|
2016-04-14 18:19:29 +02:00
|
|
|
u64 tid;
|
2015-07-29 23:33:48 +02:00
|
|
|
int err;
|
|
|
|
|
|
2016-11-16 15:20:43 +01:00
|
|
|
if (!(mlxsw_core->bus->features & MLXSW_BUS_F_TXRX))
|
|
|
|
|
return 0;
|
|
|
|
|
|
2019-04-10 06:58:13 +00:00
|
|
|
emad_wq = alloc_workqueue("mlxsw_core_emad", 0, 0);
|
mlxsw: core: Fix possible deadlock
When an EMAD is transmitted, a timeout work item is scheduled with a
delay of 200ms, so that another EMAD will be retried until a maximum of
five retries.
In certain situations, it's possible for the function waiting on the
EMAD to be associated with a work item that is queued on the same
workqueue (`mlxsw_core`) as the timeout work item. This results in
flushing a work item on the same workqueue.
According to commit e159489baa71 ("workqueue: relax lockdep annotation
on flush_work()") the above may lead to a deadlock in case the workqueue
has only one worker active or if the system in under memory pressure and
the rescue worker is in use. The latter explains the very rare and
random nature of the lockdep splats we have been seeing:
[ 52.730240] ============================================
[ 52.736179] WARNING: possible recursive locking detected
[ 52.742119] 4.14.0-rc3jiri+ #4 Not tainted
[ 52.746697] --------------------------------------------
[ 52.752635] kworker/1:3/599 is trying to acquire lock:
[ 52.758378] (mlxsw_core_driver_name){+.+.}, at: [<ffffffff811c4fa4>] flush_work+0x3a4/0x5e0
[ 52.767837]
but task is already holding lock:
[ 52.774360] (mlxsw_core_driver_name){+.+.}, at: [<ffffffff811c65c4>] process_one_work+0x7d4/0x12f0
[ 52.784495]
other info that might help us debug this:
[ 52.791794] Possible unsafe locking scenario:
[ 52.798413] CPU0
[ 52.801144] ----
[ 52.803875] lock(mlxsw_core_driver_name);
[ 52.808556] lock(mlxsw_core_driver_name);
[ 52.813236]
*** DEADLOCK ***
[ 52.819857] May be due to missing lock nesting notation
[ 52.827450] 3 locks held by kworker/1:3/599:
[ 52.832221] #0: (mlxsw_core_driver_name){+.+.}, at: [<ffffffff811c65c4>] process_one_work+0x7d4/0x12f0
[ 52.842846] #1: ((&(&bridge->fdb_notify.dw)->work)){+.+.}, at: [<ffffffff811c65c4>] process_one_work+0x7d4/0x12f0
[ 52.854537] #2: (rtnl_mutex){+.+.}, at: [<ffffffff822ad8e7>] rtnl_lock+0x17/0x20
[ 52.863021]
stack backtrace:
[ 52.867890] CPU: 1 PID: 599 Comm: kworker/1:3 Not tainted 4.14.0-rc3jiri+ #4
[ 52.875773] Hardware name: Mellanox Technologies Ltd. "MSN2100-CB2F"/"SA001017", BIOS 5.6.5 06/07/2016
[ 52.886267] Workqueue: mlxsw_core mlxsw_sp_fdb_notify_work [mlxsw_spectrum]
[ 52.894060] Call Trace:
[ 52.909122] __lock_acquire+0xf6f/0x2a10
[ 53.025412] lock_acquire+0x158/0x440
[ 53.047557] flush_work+0x3c4/0x5e0
[ 53.087571] __cancel_work_timer+0x3ca/0x5e0
[ 53.177051] cancel_delayed_work_sync+0x13/0x20
[ 53.182142] mlxsw_reg_trans_bulk_wait+0x12d/0x7a0 [mlxsw_core]
[ 53.194571] mlxsw_core_reg_access+0x586/0x990 [mlxsw_core]
[ 53.225365] mlxsw_reg_query+0x10/0x20 [mlxsw_core]
[ 53.230882] mlxsw_sp_fdb_notify_work+0x2a3/0x9d0 [mlxsw_spectrum]
[ 53.237801] process_one_work+0x8f1/0x12f0
[ 53.321804] worker_thread+0x1fd/0x10c0
[ 53.435158] kthread+0x28e/0x370
[ 53.448703] ret_from_fork+0x2a/0x40
[ 53.453017] mlxsw_spectrum 0000:01:00.0: EMAD retries (2/5) (tid=bf4549b100000774)
[ 53.453119] mlxsw_spectrum 0000:01:00.0: EMAD retries (5/5) (tid=bf4549b100000770)
[ 53.453132] mlxsw_spectrum 0000:01:00.0: EMAD reg access failed (tid=bf4549b100000770,reg_id=200b(sfn),type=query,status=0(operation performed))
[ 53.453143] mlxsw_spectrum 0000:01:00.0: Failed to get FDB notifications
Fix this by creating another workqueue for EMAD timeouts, thereby
preventing the situation of a work item trying to flush a work item
queued on the same workqueue.
Fixes: caf7297e7ab5f ("mlxsw: core: Introduce support for asynchronous EMAD register access")
Signed-off-by: Ido Schimmel <idosch@mellanox.com>
Reported-by: Jiri Pirko <jiri@mellanox.com>
Signed-off-by: Jiri Pirko <jiri@mellanox.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2017-10-16 16:28:28 +02:00
|
|
|
if (!emad_wq)
|
|
|
|
|
return -ENOMEM;
|
|
|
|
|
mlxsw_core->emad_wq = emad_wq;
|
|
|
|
|
|
2015-07-29 23:33:48 +02:00
|
|
|
/* Set the upper 32 bits of the transaction ID field to a random
|
|
|
|
|
* number. This allows us to discard EMADs addressed to other
|
|
|
|
|
* devices.
|
|
|
|
|
*/
|
2016-04-14 18:19:29 +02:00
|
|
|
get_random_bytes(&tid, 4);
|
|
|
|
|
tid <<= 32;
|
|
|
|
|
atomic64_set(&mlxsw_core->emad.tid, tid);
|
2015-07-29 23:33:48 +02:00
|
|
|
|
2016-04-14 18:19:29 +02:00
|
|
|
INIT_LIST_HEAD(&mlxsw_core->emad.trans_list);
|
|
|
|
|
spin_lock_init(&mlxsw_core->emad.trans_list_lock);
|
2015-07-29 23:33:48 +02:00
|
|
|
|
2016-11-25 10:33:40 +01:00
|
|
|
err = mlxsw_core_trap_register(mlxsw_core, &mlxsw_emad_rx_listener,
|
|
|
|
|
mlxsw_core);
|
2015-07-29 23:33:48 +02:00
|
|
|
if (err)
|
2020-07-20 22:31:49 +08:00
|
|
|
goto err_trap_register;
|
2015-07-29 23:33:48 +02:00
|
|
|
|
2016-11-25 10:33:40 +01:00
|
|
|
err = mlxsw_core->driver->basic_trap_groups_set(mlxsw_core);
|
2015-07-29 23:33:48 +02:00
|
|
|
if (err)
|
|
|
|
|
goto err_emad_trap_set;
|
|
|
|
|
mlxsw_core->emad.use_emad = true;
|
|
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
|
|
err_emad_trap_set:
|
2016-11-25 10:33:40 +01:00
|
|
|
mlxsw_core_trap_unregister(mlxsw_core, &mlxsw_emad_rx_listener,
|
|
|
|
|
mlxsw_core);
|
2020-07-20 22:31:49 +08:00
|
|
|
err_trap_register:
|
mlxsw: core: Fix possible deadlock
When an EMAD is transmitted, a timeout work item is scheduled with a
delay of 200ms, so that another EMAD will be retried until a maximum of
five retries.
In certain situations, it's possible for the function waiting on the
EMAD to be associated with a work item that is queued on the same
workqueue (`mlxsw_core`) as the timeout work item. This results in
flushing a work item on the same workqueue.
According to commit e159489baa71 ("workqueue: relax lockdep annotation
on flush_work()") the above may lead to a deadlock in case the workqueue
has only one worker active or if the system in under memory pressure and
the rescue worker is in use. The latter explains the very rare and
random nature of the lockdep splats we have been seeing:
[ 52.730240] ============================================
[ 52.736179] WARNING: possible recursive locking detected
[ 52.742119] 4.14.0-rc3jiri+ #4 Not tainted
[ 52.746697] --------------------------------------------
[ 52.752635] kworker/1:3/599 is trying to acquire lock:
[ 52.758378] (mlxsw_core_driver_name){+.+.}, at: [<ffffffff811c4fa4>] flush_work+0x3a4/0x5e0
[ 52.767837]
but task is already holding lock:
[ 52.774360] (mlxsw_core_driver_name){+.+.}, at: [<ffffffff811c65c4>] process_one_work+0x7d4/0x12f0
[ 52.784495]
other info that might help us debug this:
[ 52.791794] Possible unsafe locking scenario:
[ 52.798413] CPU0
[ 52.801144] ----
[ 52.803875] lock(mlxsw_core_driver_name);
[ 52.808556] lock(mlxsw_core_driver_name);
[ 52.813236]
*** DEADLOCK ***
[ 52.819857] May be due to missing lock nesting notation
[ 52.827450] 3 locks held by kworker/1:3/599:
[ 52.832221] #0: (mlxsw_core_driver_name){+.+.}, at: [<ffffffff811c65c4>] process_one_work+0x7d4/0x12f0
[ 52.842846] #1: ((&(&bridge->fdb_notify.dw)->work)){+.+.}, at: [<ffffffff811c65c4>] process_one_work+0x7d4/0x12f0
[ 52.854537] #2: (rtnl_mutex){+.+.}, at: [<ffffffff822ad8e7>] rtnl_lock+0x17/0x20
[ 52.863021]
stack backtrace:
[ 52.867890] CPU: 1 PID: 599 Comm: kworker/1:3 Not tainted 4.14.0-rc3jiri+ #4
[ 52.875773] Hardware name: Mellanox Technologies Ltd. "MSN2100-CB2F"/"SA001017", BIOS 5.6.5 06/07/2016
[ 52.886267] Workqueue: mlxsw_core mlxsw_sp_fdb_notify_work [mlxsw_spectrum]
[ 52.894060] Call Trace:
[ 52.909122] __lock_acquire+0xf6f/0x2a10
[ 53.025412] lock_acquire+0x158/0x440
[ 53.047557] flush_work+0x3c4/0x5e0
[ 53.087571] __cancel_work_timer+0x3ca/0x5e0
[ 53.177051] cancel_delayed_work_sync+0x13/0x20
[ 53.182142] mlxsw_reg_trans_bulk_wait+0x12d/0x7a0 [mlxsw_core]
[ 53.194571] mlxsw_core_reg_access+0x586/0x990 [mlxsw_core]
[ 53.225365] mlxsw_reg_query+0x10/0x20 [mlxsw_core]
[ 53.230882] mlxsw_sp_fdb_notify_work+0x2a3/0x9d0 [mlxsw_spectrum]
[ 53.237801] process_one_work+0x8f1/0x12f0
[ 53.321804] worker_thread+0x1fd/0x10c0
[ 53.435158] kthread+0x28e/0x370
[ 53.448703] ret_from_fork+0x2a/0x40
[ 53.453017] mlxsw_spectrum 0000:01:00.0: EMAD retries (2/5) (tid=bf4549b100000774)
[ 53.453119] mlxsw_spectrum 0000:01:00.0: EMAD retries (5/5) (tid=bf4549b100000770)
[ 53.453132] mlxsw_spectrum 0000:01:00.0: EMAD reg access failed (tid=bf4549b100000770,reg_id=200b(sfn),type=query,status=0(operation performed))
[ 53.453143] mlxsw_spectrum 0000:01:00.0: Failed to get FDB notifications
Fix this by creating another workqueue for EMAD timeouts, thereby
preventing the situation of a work item trying to flush a work item
queued on the same workqueue.
Fixes: caf7297e7ab5f ("mlxsw: core: Introduce support for asynchronous EMAD register access")
Signed-off-by: Ido Schimmel <idosch@mellanox.com>
Reported-by: Jiri Pirko <jiri@mellanox.com>
Signed-off-by: Jiri Pirko <jiri@mellanox.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2017-10-16 16:28:28 +02:00
|
|
|
destroy_workqueue(mlxsw_core->emad_wq);
|
2015-07-29 23:33:48 +02:00
|
|
|
return err;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void mlxsw_emad_fini(struct mlxsw_core *mlxsw_core)
|
|
|
|
|
{
|
|
|
|
|
|
2016-11-16 15:20:43 +01:00
|
|
|
if (!(mlxsw_core->bus->features & MLXSW_BUS_F_TXRX))
|
|
|
|
|
return;
|
|
|
|
|
|
2015-10-15 17:43:21 +02:00
|
|
|
mlxsw_core->emad.use_emad = false;
|
2016-11-25 10:33:40 +01:00
|
|
|
mlxsw_core_trap_unregister(mlxsw_core, &mlxsw_emad_rx_listener,
|
|
|
|
|
mlxsw_core);
|
mlxsw: core: Fix possible deadlock
When an EMAD is transmitted, a timeout work item is scheduled with a
delay of 200ms, so that another EMAD will be retried until a maximum of
five retries.
In certain situations, it's possible for the function waiting on the
EMAD to be associated with a work item that is queued on the same
workqueue (`mlxsw_core`) as the timeout work item. This results in
flushing a work item on the same workqueue.
According to commit e159489baa71 ("workqueue: relax lockdep annotation
on flush_work()") the above may lead to a deadlock in case the workqueue
has only one worker active or if the system in under memory pressure and
the rescue worker is in use. The latter explains the very rare and
random nature of the lockdep splats we have been seeing:
[ 52.730240] ============================================
[ 52.736179] WARNING: possible recursive locking detected
[ 52.742119] 4.14.0-rc3jiri+ #4 Not tainted
[ 52.746697] --------------------------------------------
[ 52.752635] kworker/1:3/599 is trying to acquire lock:
[ 52.758378] (mlxsw_core_driver_name){+.+.}, at: [<ffffffff811c4fa4>] flush_work+0x3a4/0x5e0
[ 52.767837]
but task is already holding lock:
[ 52.774360] (mlxsw_core_driver_name){+.+.}, at: [<ffffffff811c65c4>] process_one_work+0x7d4/0x12f0
[ 52.784495]
other info that might help us debug this:
[ 52.791794] Possible unsafe locking scenario:
[ 52.798413] CPU0
[ 52.801144] ----
[ 52.803875] lock(mlxsw_core_driver_name);
[ 52.808556] lock(mlxsw_core_driver_name);
[ 52.813236]
*** DEADLOCK ***
[ 52.819857] May be due to missing lock nesting notation
[ 52.827450] 3 locks held by kworker/1:3/599:
[ 52.832221] #0: (mlxsw_core_driver_name){+.+.}, at: [<ffffffff811c65c4>] process_one_work+0x7d4/0x12f0
[ 52.842846] #1: ((&(&bridge->fdb_notify.dw)->work)){+.+.}, at: [<ffffffff811c65c4>] process_one_work+0x7d4/0x12f0
[ 52.854537] #2: (rtnl_mutex){+.+.}, at: [<ffffffff822ad8e7>] rtnl_lock+0x17/0x20
[ 52.863021]
stack backtrace:
[ 52.867890] CPU: 1 PID: 599 Comm: kworker/1:3 Not tainted 4.14.0-rc3jiri+ #4
[ 52.875773] Hardware name: Mellanox Technologies Ltd. "MSN2100-CB2F"/"SA001017", BIOS 5.6.5 06/07/2016
[ 52.886267] Workqueue: mlxsw_core mlxsw_sp_fdb_notify_work [mlxsw_spectrum]
[ 52.894060] Call Trace:
[ 52.909122] __lock_acquire+0xf6f/0x2a10
[ 53.025412] lock_acquire+0x158/0x440
[ 53.047557] flush_work+0x3c4/0x5e0
[ 53.087571] __cancel_work_timer+0x3ca/0x5e0
[ 53.177051] cancel_delayed_work_sync+0x13/0x20
[ 53.182142] mlxsw_reg_trans_bulk_wait+0x12d/0x7a0 [mlxsw_core]
[ 53.194571] mlxsw_core_reg_access+0x586/0x990 [mlxsw_core]
[ 53.225365] mlxsw_reg_query+0x10/0x20 [mlxsw_core]
[ 53.230882] mlxsw_sp_fdb_notify_work+0x2a3/0x9d0 [mlxsw_spectrum]
[ 53.237801] process_one_work+0x8f1/0x12f0
[ 53.321804] worker_thread+0x1fd/0x10c0
[ 53.435158] kthread+0x28e/0x370
[ 53.448703] ret_from_fork+0x2a/0x40
[ 53.453017] mlxsw_spectrum 0000:01:00.0: EMAD retries (2/5) (tid=bf4549b100000774)
[ 53.453119] mlxsw_spectrum 0000:01:00.0: EMAD retries (5/5) (tid=bf4549b100000770)
[ 53.453132] mlxsw_spectrum 0000:01:00.0: EMAD reg access failed (tid=bf4549b100000770,reg_id=200b(sfn),type=query,status=0(operation performed))
[ 53.453143] mlxsw_spectrum 0000:01:00.0: Failed to get FDB notifications
Fix this by creating another workqueue for EMAD timeouts, thereby
preventing the situation of a work item trying to flush a work item
queued on the same workqueue.
Fixes: caf7297e7ab5f ("mlxsw: core: Introduce support for asynchronous EMAD register access")
Signed-off-by: Ido Schimmel <idosch@mellanox.com>
Reported-by: Jiri Pirko <jiri@mellanox.com>
Signed-off-by: Jiri Pirko <jiri@mellanox.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2017-10-16 16:28:28 +02:00
|
|
|
destroy_workqueue(mlxsw_core->emad_wq);
|
2015-07-29 23:33:48 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static struct sk_buff *mlxsw_emad_alloc(const struct mlxsw_core *mlxsw_core,
|
2019-11-12 08:48:29 +02:00
|
|
|
u16 reg_len, bool enable_string_tlv)
|
2015-07-29 23:33:48 +02:00
|
|
|
{
|
|
|
|
|
struct sk_buff *skb;
|
|
|
|
|
u16 emad_len;
|
|
|
|
|
|
|
|
|
|
emad_len = (reg_len + sizeof(u32) + MLXSW_EMAD_ETH_HDR_LEN +
|
|
|
|
|
(MLXSW_EMAD_OP_TLV_LEN + MLXSW_EMAD_END_TLV_LEN) *
|
|
|
|
|
sizeof(u32) + mlxsw_core->driver->txhdr_len);
|
2019-11-12 08:48:29 +02:00
|
|
|
if (enable_string_tlv)
|
|
|
|
|
emad_len += MLXSW_EMAD_STRING_TLV_LEN * sizeof(u32);
|
2015-07-29 23:33:48 +02:00
|
|
|
if (emad_len > MLXSW_EMAD_MAX_FRAME_LEN)
|
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
|
|
skb = netdev_alloc_skb(NULL, emad_len);
|
|
|
|
|
if (!skb)
|
|
|
|
|
return NULL;
|
|
|
|
|
memset(skb->data, 0, emad_len);
|
|
|
|
|
skb_reserve(skb, emad_len);
|
|
|
|
|
|
|
|
|
|
return skb;
|
|
|
|
|
}
|
|
|
|
|
|
2016-04-14 18:19:29 +02:00
|
|
|
static int mlxsw_emad_reg_access(struct mlxsw_core *mlxsw_core,
|
|
|
|
|
const struct mlxsw_reg_info *reg,
|
|
|
|
|
char *payload,
|
|
|
|
|
enum mlxsw_core_reg_access_type type,
|
|
|
|
|
struct mlxsw_reg_trans *trans,
|
|
|
|
|
struct list_head *bulk_list,
|
|
|
|
|
mlxsw_reg_trans_cb_t *cb,
|
|
|
|
|
unsigned long cb_priv, u64 tid)
|
|
|
|
|
{
|
2019-11-12 08:48:29 +02:00
|
|
|
bool enable_string_tlv;
|
2016-04-14 18:19:29 +02:00
|
|
|
struct sk_buff *skb;
|
|
|
|
|
int err;
|
|
|
|
|
|
|
|
|
|
dev_dbg(mlxsw_core->bus_info->dev, "EMAD reg access (tid=%llx,reg_id=%x(%s),type=%s)\n",
|
2017-08-02 09:52:10 +02:00
|
|
|
tid, reg->id, mlxsw_reg_id_str(reg->id),
|
2016-04-14 18:19:29 +02:00
|
|
|
mlxsw_core_reg_access_type_str(type));
|
|
|
|
|
|
2019-11-12 08:48:29 +02:00
|
|
|
/* Since this can be changed during emad_reg_access, read it once and
|
|
|
|
|
* use the value all the way.
|
|
|
|
|
*/
|
|
|
|
|
enable_string_tlv = mlxsw_core->emad.enable_string_tlv;
|
|
|
|
|
|
|
|
|
|
skb = mlxsw_emad_alloc(mlxsw_core, reg->len, enable_string_tlv);
|
2016-04-14 18:19:29 +02:00
|
|
|
if (!skb)
|
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
|
|
list_add_tail(&trans->bulk_list, bulk_list);
|
|
|
|
|
trans->core = mlxsw_core;
|
|
|
|
|
trans->tx_skb = skb;
|
|
|
|
|
trans->tx_info.local_port = MLXSW_PORT_CPU_PORT;
|
|
|
|
|
trans->tx_info.is_emad = true;
|
|
|
|
|
INIT_DELAYED_WORK(&trans->timeout_dw, mlxsw_emad_trans_timeout_work);
|
|
|
|
|
trans->tid = tid;
|
|
|
|
|
init_completion(&trans->completion);
|
|
|
|
|
trans->cb = cb;
|
|
|
|
|
trans->cb_priv = cb_priv;
|
|
|
|
|
trans->reg = reg;
|
|
|
|
|
trans->type = type;
|
|
|
|
|
|
2019-11-12 08:48:29 +02:00
|
|
|
mlxsw_emad_construct(skb, reg, payload, type, trans->tid,
|
|
|
|
|
enable_string_tlv);
|
2016-04-14 18:19:29 +02:00
|
|
|
mlxsw_core->driver->txhdr_construct(skb, &trans->tx_info);
|
|
|
|
|
|
|
|
|
|
spin_lock_bh(&mlxsw_core->emad.trans_list_lock);
|
|
|
|
|
list_add_tail_rcu(&trans->list, &mlxsw_core->emad.trans_list);
|
|
|
|
|
spin_unlock_bh(&mlxsw_core->emad.trans_list_lock);
|
|
|
|
|
err = mlxsw_emad_transmit(mlxsw_core, trans);
|
|
|
|
|
if (err)
|
|
|
|
|
goto err_out;
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
|
|
err_out:
|
|
|
|
|
spin_lock_bh(&mlxsw_core->emad.trans_list_lock);
|
|
|
|
|
list_del_rcu(&trans->list);
|
|
|
|
|
spin_unlock_bh(&mlxsw_core->emad.trans_list_lock);
|
|
|
|
|
list_del(&trans->bulk_list);
|
|
|
|
|
dev_kfree_skb(trans->tx_skb);
|
|
|
|
|
return err;
|
|
|
|
|
}
|
|
|
|
|
|
2015-07-29 23:33:46 +02:00
|
|
|
/*****************
|
|
|
|
|
* Core functions
|
|
|
|
|
*****************/
|
|
|
|
|
|
|
|
|
|
int mlxsw_core_driver_register(struct mlxsw_driver *mlxsw_driver)
|
|
|
|
|
{
|
|
|
|
|
spin_lock(&mlxsw_core_driver_list_lock);
|
|
|
|
|
list_add_tail(&mlxsw_driver->list, &mlxsw_core_driver_list);
|
|
|
|
|
spin_unlock(&mlxsw_core_driver_list_lock);
|
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
EXPORT_SYMBOL(mlxsw_core_driver_register);
|
|
|
|
|
|
|
|
|
|
void mlxsw_core_driver_unregister(struct mlxsw_driver *mlxsw_driver)
|
|
|
|
|
{
|
|
|
|
|
spin_lock(&mlxsw_core_driver_list_lock);
|
|
|
|
|
list_del(&mlxsw_driver->list);
|
|
|
|
|
spin_unlock(&mlxsw_core_driver_list_lock);
|
|
|
|
|
}
|
|
|
|
|
EXPORT_SYMBOL(mlxsw_core_driver_unregister);
|
|
|
|
|
|
|
|
|
|
static struct mlxsw_driver *__driver_find(const char *kind)
|
|
|
|
|
{
|
|
|
|
|
struct mlxsw_driver *mlxsw_driver;
|
|
|
|
|
|
|
|
|
|
list_for_each_entry(mlxsw_driver, &mlxsw_core_driver_list, list) {
|
|
|
|
|
if (strcmp(mlxsw_driver->kind, kind) == 0)
|
|
|
|
|
return mlxsw_driver;
|
|
|
|
|
}
|
|
|
|
|
return NULL;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static struct mlxsw_driver *mlxsw_core_driver_get(const char *kind)
|
|
|
|
|
{
|
|
|
|
|
struct mlxsw_driver *mlxsw_driver;
|
|
|
|
|
|
|
|
|
|
spin_lock(&mlxsw_core_driver_list_lock);
|
|
|
|
|
mlxsw_driver = __driver_find(kind);
|
|
|
|
|
spin_unlock(&mlxsw_core_driver_list_lock);
|
|
|
|
|
return mlxsw_driver;
|
|
|
|
|
}
|
|
|
|
|
|
2020-09-15 11:40:52 +03:00
|
|
|
struct mlxsw_core_fw_info {
|
|
|
|
|
struct mlxfw_dev mlxfw_dev;
|
|
|
|
|
struct mlxsw_core *mlxsw_core;
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
static int mlxsw_core_fw_component_query(struct mlxfw_dev *mlxfw_dev,
|
|
|
|
|
u16 component_index, u32 *p_max_size,
|
|
|
|
|
u8 *p_align_bits, u16 *p_max_write_size)
|
|
|
|
|
{
|
|
|
|
|
struct mlxsw_core_fw_info *mlxsw_core_fw_info =
|
|
|
|
|
container_of(mlxfw_dev, struct mlxsw_core_fw_info, mlxfw_dev);
|
|
|
|
|
struct mlxsw_core *mlxsw_core = mlxsw_core_fw_info->mlxsw_core;
|
|
|
|
|
char mcqi_pl[MLXSW_REG_MCQI_LEN];
|
|
|
|
|
int err;
|
|
|
|
|
|
|
|
|
|
mlxsw_reg_mcqi_pack(mcqi_pl, component_index);
|
|
|
|
|
err = mlxsw_reg_query(mlxsw_core, MLXSW_REG(mcqi), mcqi_pl);
|
|
|
|
|
if (err)
|
|
|
|
|
return err;
|
|
|
|
|
mlxsw_reg_mcqi_unpack(mcqi_pl, p_max_size, p_align_bits, p_max_write_size);
|
|
|
|
|
|
|
|
|
|
*p_align_bits = max_t(u8, *p_align_bits, 2);
|
|
|
|
|
*p_max_write_size = min_t(u16, *p_max_write_size, MLXSW_REG_MCDA_MAX_DATA_LEN);
|
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static int mlxsw_core_fw_fsm_lock(struct mlxfw_dev *mlxfw_dev, u32 *fwhandle)
|
|
|
|
|
{
|
|
|
|
|
struct mlxsw_core_fw_info *mlxsw_core_fw_info =
|
|
|
|
|
container_of(mlxfw_dev, struct mlxsw_core_fw_info, mlxfw_dev);
|
|
|
|
|
struct mlxsw_core *mlxsw_core = mlxsw_core_fw_info->mlxsw_core;
|
|
|
|
|
char mcc_pl[MLXSW_REG_MCC_LEN];
|
|
|
|
|
u8 control_state;
|
|
|
|
|
int err;
|
|
|
|
|
|
|
|
|
|
mlxsw_reg_mcc_pack(mcc_pl, 0, 0, 0, 0);
|
|
|
|
|
err = mlxsw_reg_query(mlxsw_core, MLXSW_REG(mcc), mcc_pl);
|
|
|
|
|
if (err)
|
|
|
|
|
return err;
|
|
|
|
|
|
|
|
|
|
mlxsw_reg_mcc_unpack(mcc_pl, fwhandle, NULL, &control_state);
|
|
|
|
|
if (control_state != MLXFW_FSM_STATE_IDLE)
|
|
|
|
|
return -EBUSY;
|
|
|
|
|
|
|
|
|
|
mlxsw_reg_mcc_pack(mcc_pl, MLXSW_REG_MCC_INSTRUCTION_LOCK_UPDATE_HANDLE, 0, *fwhandle, 0);
|
|
|
|
|
return mlxsw_reg_write(mlxsw_core, MLXSW_REG(mcc), mcc_pl);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static int mlxsw_core_fw_fsm_component_update(struct mlxfw_dev *mlxfw_dev, u32 fwhandle,
|
|
|
|
|
u16 component_index, u32 component_size)
|
|
|
|
|
{
|
|
|
|
|
struct mlxsw_core_fw_info *mlxsw_core_fw_info =
|
|
|
|
|
container_of(mlxfw_dev, struct mlxsw_core_fw_info, mlxfw_dev);
|
|
|
|
|
struct mlxsw_core *mlxsw_core = mlxsw_core_fw_info->mlxsw_core;
|
|
|
|
|
char mcc_pl[MLXSW_REG_MCC_LEN];
|
|
|
|
|
|
|
|
|
|
mlxsw_reg_mcc_pack(mcc_pl, MLXSW_REG_MCC_INSTRUCTION_UPDATE_COMPONENT,
|
|
|
|
|
component_index, fwhandle, component_size);
|
|
|
|
|
return mlxsw_reg_write(mlxsw_core, MLXSW_REG(mcc), mcc_pl);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static int mlxsw_core_fw_fsm_block_download(struct mlxfw_dev *mlxfw_dev, u32 fwhandle,
|
|
|
|
|
u8 *data, u16 size, u32 offset)
|
|
|
|
|
{
|
|
|
|
|
struct mlxsw_core_fw_info *mlxsw_core_fw_info =
|
|
|
|
|
container_of(mlxfw_dev, struct mlxsw_core_fw_info, mlxfw_dev);
|
|
|
|
|
struct mlxsw_core *mlxsw_core = mlxsw_core_fw_info->mlxsw_core;
|
|
|
|
|
char mcda_pl[MLXSW_REG_MCDA_LEN];
|
|
|
|
|
|
|
|
|
|
mlxsw_reg_mcda_pack(mcda_pl, fwhandle, offset, size, data);
|
|
|
|
|
return mlxsw_reg_write(mlxsw_core, MLXSW_REG(mcda), mcda_pl);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static int mlxsw_core_fw_fsm_component_verify(struct mlxfw_dev *mlxfw_dev, u32 fwhandle,
|
|
|
|
|
u16 component_index)
|
|
|
|
|
{
|
|
|
|
|
struct mlxsw_core_fw_info *mlxsw_core_fw_info =
|
|
|
|
|
container_of(mlxfw_dev, struct mlxsw_core_fw_info, mlxfw_dev);
|
|
|
|
|
struct mlxsw_core *mlxsw_core = mlxsw_core_fw_info->mlxsw_core;
|
|
|
|
|
char mcc_pl[MLXSW_REG_MCC_LEN];
|
|
|
|
|
|
|
|
|
|
mlxsw_reg_mcc_pack(mcc_pl, MLXSW_REG_MCC_INSTRUCTION_VERIFY_COMPONENT,
|
|
|
|
|
component_index, fwhandle, 0);
|
|
|
|
|
return mlxsw_reg_write(mlxsw_core, MLXSW_REG(mcc), mcc_pl);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static int mlxsw_core_fw_fsm_activate(struct mlxfw_dev *mlxfw_dev, u32 fwhandle)
|
|
|
|
|
{
|
|
|
|
|
struct mlxsw_core_fw_info *mlxsw_core_fw_info =
|
|
|
|
|
container_of(mlxfw_dev, struct mlxsw_core_fw_info, mlxfw_dev);
|
|
|
|
|
struct mlxsw_core *mlxsw_core = mlxsw_core_fw_info->mlxsw_core;
|
|
|
|
|
char mcc_pl[MLXSW_REG_MCC_LEN];
|
|
|
|
|
|
|
|
|
|
mlxsw_reg_mcc_pack(mcc_pl, MLXSW_REG_MCC_INSTRUCTION_ACTIVATE, 0, fwhandle, 0);
|
|
|
|
|
return mlxsw_reg_write(mlxsw_core, MLXSW_REG(mcc), mcc_pl);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static int mlxsw_core_fw_fsm_query_state(struct mlxfw_dev *mlxfw_dev, u32 fwhandle,
|
|
|
|
|
enum mlxfw_fsm_state *fsm_state,
|
|
|
|
|
enum mlxfw_fsm_state_err *fsm_state_err)
|
|
|
|
|
{
|
|
|
|
|
struct mlxsw_core_fw_info *mlxsw_core_fw_info =
|
|
|
|
|
container_of(mlxfw_dev, struct mlxsw_core_fw_info, mlxfw_dev);
|
|
|
|
|
struct mlxsw_core *mlxsw_core = mlxsw_core_fw_info->mlxsw_core;
|
|
|
|
|
char mcc_pl[MLXSW_REG_MCC_LEN];
|
|
|
|
|
u8 control_state;
|
|
|
|
|
u8 error_code;
|
|
|
|
|
int err;
|
|
|
|
|
|
|
|
|
|
mlxsw_reg_mcc_pack(mcc_pl, 0, 0, fwhandle, 0);
|
|
|
|
|
err = mlxsw_reg_query(mlxsw_core, MLXSW_REG(mcc), mcc_pl);
|
|
|
|
|
if (err)
|
|
|
|
|
return err;
|
|
|
|
|
|
|
|
|
|
mlxsw_reg_mcc_unpack(mcc_pl, NULL, &error_code, &control_state);
|
|
|
|
|
*fsm_state = control_state;
|
|
|
|
|
*fsm_state_err = min_t(enum mlxfw_fsm_state_err, error_code, MLXFW_FSM_STATE_ERR_MAX);
|
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void mlxsw_core_fw_fsm_cancel(struct mlxfw_dev *mlxfw_dev, u32 fwhandle)
|
|
|
|
|
{
|
|
|
|
|
struct mlxsw_core_fw_info *mlxsw_core_fw_info =
|
|
|
|
|
container_of(mlxfw_dev, struct mlxsw_core_fw_info, mlxfw_dev);
|
|
|
|
|
struct mlxsw_core *mlxsw_core = mlxsw_core_fw_info->mlxsw_core;
|
|
|
|
|
char mcc_pl[MLXSW_REG_MCC_LEN];
|
|
|
|
|
|
|
|
|
|
mlxsw_reg_mcc_pack(mcc_pl, MLXSW_REG_MCC_INSTRUCTION_CANCEL, 0, fwhandle, 0);
|
|
|
|
|
mlxsw_reg_write(mlxsw_core, MLXSW_REG(mcc), mcc_pl);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void mlxsw_core_fw_fsm_release(struct mlxfw_dev *mlxfw_dev, u32 fwhandle)
|
|
|
|
|
{
|
|
|
|
|
struct mlxsw_core_fw_info *mlxsw_core_fw_info =
|
|
|
|
|
container_of(mlxfw_dev, struct mlxsw_core_fw_info, mlxfw_dev);
|
|
|
|
|
struct mlxsw_core *mlxsw_core = mlxsw_core_fw_info->mlxsw_core;
|
|
|
|
|
char mcc_pl[MLXSW_REG_MCC_LEN];
|
|
|
|
|
|
|
|
|
|
mlxsw_reg_mcc_pack(mcc_pl, MLXSW_REG_MCC_INSTRUCTION_RELEASE_UPDATE_HANDLE, 0, fwhandle, 0);
|
|
|
|
|
mlxsw_reg_write(mlxsw_core, MLXSW_REG(mcc), mcc_pl);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static const struct mlxfw_dev_ops mlxsw_core_fw_mlxsw_dev_ops = {
|
|
|
|
|
.component_query = mlxsw_core_fw_component_query,
|
|
|
|
|
.fsm_lock = mlxsw_core_fw_fsm_lock,
|
|
|
|
|
.fsm_component_update = mlxsw_core_fw_fsm_component_update,
|
|
|
|
|
.fsm_block_download = mlxsw_core_fw_fsm_block_download,
|
|
|
|
|
.fsm_component_verify = mlxsw_core_fw_fsm_component_verify,
|
|
|
|
|
.fsm_activate = mlxsw_core_fw_fsm_activate,
|
|
|
|
|
.fsm_query_state = mlxsw_core_fw_fsm_query_state,
|
|
|
|
|
.fsm_cancel = mlxsw_core_fw_fsm_cancel,
|
|
|
|
|
.fsm_release = mlxsw_core_fw_fsm_release,
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
static int mlxsw_core_fw_flash(struct mlxsw_core *mlxsw_core, const struct firmware *firmware,
|
|
|
|
|
struct netlink_ext_ack *extack)
|
|
|
|
|
{
|
|
|
|
|
struct mlxsw_core_fw_info mlxsw_core_fw_info = {
|
|
|
|
|
.mlxfw_dev = {
|
|
|
|
|
.ops = &mlxsw_core_fw_mlxsw_dev_ops,
|
|
|
|
|
.psid = mlxsw_core->bus_info->psid,
|
|
|
|
|
.psid_size = strlen(mlxsw_core->bus_info->psid),
|
|
|
|
|
.devlink = priv_to_devlink(mlxsw_core),
|
|
|
|
|
},
|
|
|
|
|
.mlxsw_core = mlxsw_core
|
|
|
|
|
};
|
|
|
|
|
int err;
|
|
|
|
|
|
|
|
|
|
mlxsw_core->fw_flash_in_progress = true;
|
|
|
|
|
err = mlxfw_firmware_flash(&mlxsw_core_fw_info.mlxfw_dev, firmware, extack);
|
|
|
|
|
mlxsw_core->fw_flash_in_progress = false;
|
|
|
|
|
|
|
|
|
|
return err;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static int mlxsw_core_fw_rev_validate(struct mlxsw_core *mlxsw_core,
|
|
|
|
|
const struct mlxsw_bus_info *mlxsw_bus_info,
|
|
|
|
|
const struct mlxsw_fw_rev *req_rev,
|
|
|
|
|
const char *filename)
|
|
|
|
|
{
|
|
|
|
|
const struct mlxsw_fw_rev *rev = &mlxsw_bus_info->fw_rev;
|
|
|
|
|
union devlink_param_value value;
|
|
|
|
|
const struct firmware *firmware;
|
|
|
|
|
int err;
|
|
|
|
|
|
|
|
|
|
/* Don't check if driver does not require it */
|
|
|
|
|
if (!req_rev || !filename)
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
|
|
/* Don't check if devlink 'fw_load_policy' param is 'flash' */
|
|
|
|
|
err = devlink_param_driverinit_value_get(priv_to_devlink(mlxsw_core),
|
|
|
|
|
DEVLINK_PARAM_GENERIC_ID_FW_LOAD_POLICY,
|
|
|
|
|
&value);
|
|
|
|
|
if (err)
|
|
|
|
|
return err;
|
|
|
|
|
if (value.vu8 == DEVLINK_PARAM_FW_LOAD_POLICY_VALUE_FLASH)
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
|
|
/* Validate driver & FW are compatible */
|
|
|
|
|
if (rev->major != req_rev->major) {
|
|
|
|
|
WARN(1, "Mismatch in major FW version [%d:%d] is never expected; Please contact support\n",
|
|
|
|
|
rev->major, req_rev->major);
|
|
|
|
|
return -EINVAL;
|
|
|
|
|
}
|
|
|
|
|
if (mlxsw_core_fw_rev_minor_subminor_validate(rev, req_rev))
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
|
|
dev_err(mlxsw_bus_info->dev, "The firmware version %d.%d.%d is incompatible with the driver (required >= %d.%d.%d)\n",
|
|
|
|
|
rev->major, rev->minor, rev->subminor, req_rev->major,
|
|
|
|
|
req_rev->minor, req_rev->subminor);
|
|
|
|
|
dev_info(mlxsw_bus_info->dev, "Flashing firmware using file %s\n", filename);
|
|
|
|
|
|
|
|
|
|
err = request_firmware_direct(&firmware, filename, mlxsw_bus_info->dev);
|
|
|
|
|
if (err) {
|
|
|
|
|
dev_err(mlxsw_bus_info->dev, "Could not request firmware file %s\n", filename);
|
|
|
|
|
return err;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
err = mlxsw_core_fw_flash(mlxsw_core, firmware, NULL);
|
|
|
|
|
release_firmware(firmware);
|
|
|
|
|
if (err)
|
|
|
|
|
dev_err(mlxsw_bus_info->dev, "Could not upgrade firmware\n");
|
|
|
|
|
|
|
|
|
|
/* On FW flash success, tell the caller FW reset is needed
|
|
|
|
|
* if current FW supports it.
|
|
|
|
|
*/
|
|
|
|
|
if (rev->minor >= req_rev->can_reset_minor)
|
|
|
|
|
return err ? err : -EAGAIN;
|
|
|
|
|
else
|
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static int mlxsw_core_fw_flash_update(struct mlxsw_core *mlxsw_core,
|
2020-09-25 13:46:06 -07:00
|
|
|
struct devlink_flash_update_params *params,
|
2020-09-15 11:40:52 +03:00
|
|
|
struct netlink_ext_ack *extack)
|
|
|
|
|
{
|
2020-11-18 11:06:35 -08:00
|
|
|
return mlxsw_core_fw_flash(mlxsw_core, params->fw, extack);
|
2020-09-15 11:40:52 +03:00
|
|
|
}
|
|
|
|
|
|
2020-09-15 11:40:54 +03:00
|
|
|
static int mlxsw_core_devlink_param_fw_load_policy_validate(struct devlink *devlink, u32 id,
|
|
|
|
|
union devlink_param_value val,
|
|
|
|
|
struct netlink_ext_ack *extack)
|
|
|
|
|
{
|
|
|
|
|
if (val.vu8 != DEVLINK_PARAM_FW_LOAD_POLICY_VALUE_DRIVER &&
|
|
|
|
|
val.vu8 != DEVLINK_PARAM_FW_LOAD_POLICY_VALUE_FLASH) {
|
|
|
|
|
NL_SET_ERR_MSG_MOD(extack, "'fw_load_policy' must be 'driver' or 'flash'");
|
|
|
|
|
return -EINVAL;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static const struct devlink_param mlxsw_core_fw_devlink_params[] = {
|
|
|
|
|
DEVLINK_PARAM_GENERIC(FW_LOAD_POLICY, BIT(DEVLINK_PARAM_CMODE_DRIVERINIT), NULL, NULL,
|
|
|
|
|
mlxsw_core_devlink_param_fw_load_policy_validate),
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
static int mlxsw_core_fw_params_register(struct mlxsw_core *mlxsw_core)
|
|
|
|
|
{
|
|
|
|
|
struct devlink *devlink = priv_to_devlink(mlxsw_core);
|
|
|
|
|
union devlink_param_value value;
|
|
|
|
|
int err;
|
|
|
|
|
|
|
|
|
|
err = devlink_params_register(devlink, mlxsw_core_fw_devlink_params,
|
|
|
|
|
ARRAY_SIZE(mlxsw_core_fw_devlink_params));
|
|
|
|
|
if (err)
|
|
|
|
|
return err;
|
|
|
|
|
|
|
|
|
|
value.vu8 = DEVLINK_PARAM_FW_LOAD_POLICY_VALUE_DRIVER;
|
|
|
|
|
devlink_param_driverinit_value_set(devlink, DEVLINK_PARAM_GENERIC_ID_FW_LOAD_POLICY, value);
|
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void mlxsw_core_fw_params_unregister(struct mlxsw_core *mlxsw_core)
|
|
|
|
|
{
|
|
|
|
|
devlink_params_unregister(priv_to_devlink(mlxsw_core), mlxsw_core_fw_devlink_params,
|
|
|
|
|
ARRAY_SIZE(mlxsw_core_fw_devlink_params));
|
|
|
|
|
}
|
|
|
|
|
|
2016-02-26 17:32:27 +01:00
|
|
|
static int mlxsw_devlink_port_split(struct devlink *devlink,
|
|
|
|
|
unsigned int port_index,
|
2018-06-05 08:14:09 -07:00
|
|
|
unsigned int count,
|
|
|
|
|
struct netlink_ext_ack *extack)
|
2016-02-26 17:32:27 +01:00
|
|
|
{
|
|
|
|
|
struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
|
|
|
|
|
|
2018-06-05 08:14:11 -07:00
|
|
|
if (port_index >= mlxsw_core->max_ports) {
|
|
|
|
|
NL_SET_ERR_MSG_MOD(extack, "Port index exceeds maximum number of ports");
|
2016-02-26 17:32:27 +01:00
|
|
|
return -EINVAL;
|
2018-06-05 08:14:11 -07:00
|
|
|
}
|
2016-02-26 17:32:27 +01:00
|
|
|
if (!mlxsw_core->driver->port_split)
|
|
|
|
|
return -EOPNOTSUPP;
|
2018-06-05 08:14:11 -07:00
|
|
|
return mlxsw_core->driver->port_split(mlxsw_core, port_index, count,
|
|
|
|
|
extack);
|
2016-02-26 17:32:27 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static int mlxsw_devlink_port_unsplit(struct devlink *devlink,
|
2018-06-05 08:14:09 -07:00
|
|
|
unsigned int port_index,
|
|
|
|
|
struct netlink_ext_ack *extack)
|
2016-02-26 17:32:27 +01:00
|
|
|
{
|
|
|
|
|
struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
|
|
|
|
|
|
2018-06-05 08:14:11 -07:00
|
|
|
if (port_index >= mlxsw_core->max_ports) {
|
|
|
|
|
NL_SET_ERR_MSG_MOD(extack, "Port index exceeds maximum number of ports");
|
2016-02-26 17:32:27 +01:00
|
|
|
return -EINVAL;
|
2018-06-05 08:14:11 -07:00
|
|
|
}
|
2016-02-26 17:32:27 +01:00
|
|
|
if (!mlxsw_core->driver->port_unsplit)
|
|
|
|
|
return -EOPNOTSUPP;
|
2018-06-05 08:14:11 -07:00
|
|
|
return mlxsw_core->driver->port_unsplit(mlxsw_core, port_index,
|
|
|
|
|
extack);
|
2016-02-26 17:32:27 +01:00
|
|
|
}
|
|
|
|
|
|
2016-04-14 18:19:15 +02:00
|
|
|
static int
|
|
|
|
|
mlxsw_devlink_sb_pool_get(struct devlink *devlink,
|
|
|
|
|
unsigned int sb_index, u16 pool_index,
|
|
|
|
|
struct devlink_sb_pool_info *pool_info)
|
|
|
|
|
{
|
|
|
|
|
struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
|
|
|
|
|
struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
|
|
|
|
|
|
|
|
|
|
if (!mlxsw_driver->sb_pool_get)
|
|
|
|
|
return -EOPNOTSUPP;
|
|
|
|
|
return mlxsw_driver->sb_pool_get(mlxsw_core, sb_index,
|
|
|
|
|
pool_index, pool_info);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static int
|
|
|
|
|
mlxsw_devlink_sb_pool_set(struct devlink *devlink,
|
|
|
|
|
unsigned int sb_index, u16 pool_index, u32 size,
|
2019-04-22 12:08:39 +00:00
|
|
|
enum devlink_sb_threshold_type threshold_type,
|
|
|
|
|
struct netlink_ext_ack *extack)
|
2016-04-14 18:19:15 +02:00
|
|
|
{
|
|
|
|
|
struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
|
|
|
|
|
struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
|
|
|
|
|
|
|
|
|
|
if (!mlxsw_driver->sb_pool_set)
|
|
|
|
|
return -EOPNOTSUPP;
|
|
|
|
|
return mlxsw_driver->sb_pool_set(mlxsw_core, sb_index,
|
2019-04-22 12:08:41 +00:00
|
|
|
pool_index, size, threshold_type,
|
|
|
|
|
extack);
|
2016-04-14 18:19:15 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void *__dl_port(struct devlink_port *devlink_port)
|
|
|
|
|
{
|
|
|
|
|
return container_of(devlink_port, struct mlxsw_core_port, devlink_port);
|
|
|
|
|
}
|
|
|
|
|
|
2016-10-28 21:35:58 +02:00
|
|
|
static int mlxsw_devlink_port_type_set(struct devlink_port *devlink_port,
|
|
|
|
|
enum devlink_port_type port_type)
|
|
|
|
|
{
|
|
|
|
|
struct mlxsw_core *mlxsw_core = devlink_priv(devlink_port->devlink);
|
|
|
|
|
struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
|
|
|
|
|
struct mlxsw_core_port *mlxsw_core_port = __dl_port(devlink_port);
|
|
|
|
|
|
|
|
|
|
if (!mlxsw_driver->port_type_set)
|
|
|
|
|
return -EOPNOTSUPP;
|
|
|
|
|
|
|
|
|
|
return mlxsw_driver->port_type_set(mlxsw_core,
|
|
|
|
|
mlxsw_core_port->local_port,
|
|
|
|
|
port_type);
|
|
|
|
|
}
|
|
|
|
|
|
2016-04-14 18:19:15 +02:00
|
|
|
static int mlxsw_devlink_sb_port_pool_get(struct devlink_port *devlink_port,
|
|
|
|
|
unsigned int sb_index, u16 pool_index,
|
|
|
|
|
u32 *p_threshold)
|
|
|
|
|
{
|
|
|
|
|
struct mlxsw_core *mlxsw_core = devlink_priv(devlink_port->devlink);
|
|
|
|
|
struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
|
|
|
|
|
struct mlxsw_core_port *mlxsw_core_port = __dl_port(devlink_port);
|
|
|
|
|
|
2016-10-28 21:35:55 +02:00
|
|
|
if (!mlxsw_driver->sb_port_pool_get ||
|
|
|
|
|
!mlxsw_core_port_check(mlxsw_core_port))
|
2016-04-14 18:19:15 +02:00
|
|
|
return -EOPNOTSUPP;
|
|
|
|
|
return mlxsw_driver->sb_port_pool_get(mlxsw_core_port, sb_index,
|
|
|
|
|
pool_index, p_threshold);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static int mlxsw_devlink_sb_port_pool_set(struct devlink_port *devlink_port,
|
|
|
|
|
unsigned int sb_index, u16 pool_index,
|
2019-04-22 12:08:39 +00:00
|
|
|
u32 threshold,
|
|
|
|
|
struct netlink_ext_ack *extack)
|
2016-04-14 18:19:15 +02:00
|
|
|
{
|
|
|
|
|
struct mlxsw_core *mlxsw_core = devlink_priv(devlink_port->devlink);
|
|
|
|
|
struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
|
|
|
|
|
struct mlxsw_core_port *mlxsw_core_port = __dl_port(devlink_port);
|
|
|
|
|
|
2016-10-28 21:35:55 +02:00
|
|
|
if (!mlxsw_driver->sb_port_pool_set ||
|
|
|
|
|
!mlxsw_core_port_check(mlxsw_core_port))
|
2016-04-14 18:19:15 +02:00
|
|
|
return -EOPNOTSUPP;
|
|
|
|
|
return mlxsw_driver->sb_port_pool_set(mlxsw_core_port, sb_index,
|
2019-04-22 12:08:41 +00:00
|
|
|
pool_index, threshold, extack);
|
2016-04-14 18:19:15 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static int
|
|
|
|
|
mlxsw_devlink_sb_tc_pool_bind_get(struct devlink_port *devlink_port,
|
|
|
|
|
unsigned int sb_index, u16 tc_index,
|
|
|
|
|
enum devlink_sb_pool_type pool_type,
|
|
|
|
|
u16 *p_pool_index, u32 *p_threshold)
|
|
|
|
|
{
|
|
|
|
|
struct mlxsw_core *mlxsw_core = devlink_priv(devlink_port->devlink);
|
|
|
|
|
struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
|
|
|
|
|
struct mlxsw_core_port *mlxsw_core_port = __dl_port(devlink_port);
|
|
|
|
|
|
2016-10-28 21:35:55 +02:00
|
|
|
if (!mlxsw_driver->sb_tc_pool_bind_get ||
|
|
|
|
|
!mlxsw_core_port_check(mlxsw_core_port))
|
2016-04-14 18:19:15 +02:00
|
|
|
return -EOPNOTSUPP;
|
|
|
|
|
return mlxsw_driver->sb_tc_pool_bind_get(mlxsw_core_port, sb_index,
|
|
|
|
|
tc_index, pool_type,
|
|
|
|
|
p_pool_index, p_threshold);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static int
|
|
|
|
|
mlxsw_devlink_sb_tc_pool_bind_set(struct devlink_port *devlink_port,
|
|
|
|
|
unsigned int sb_index, u16 tc_index,
|
|
|
|
|
enum devlink_sb_pool_type pool_type,
|
2019-04-22 12:08:39 +00:00
|
|
|
u16 pool_index, u32 threshold,
|
|
|
|
|
struct netlink_ext_ack *extack)
|
2016-04-14 18:19:15 +02:00
|
|
|
{
|
|
|
|
|
struct mlxsw_core *mlxsw_core = devlink_priv(devlink_port->devlink);
|
|
|
|
|
struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
|
|
|
|
|
struct mlxsw_core_port *mlxsw_core_port = __dl_port(devlink_port);
|
|
|
|
|
|
2016-10-28 21:35:55 +02:00
|
|
|
if (!mlxsw_driver->sb_tc_pool_bind_set ||
|
|
|
|
|
!mlxsw_core_port_check(mlxsw_core_port))
|
2016-04-14 18:19:15 +02:00
|
|
|
return -EOPNOTSUPP;
|
|
|
|
|
return mlxsw_driver->sb_tc_pool_bind_set(mlxsw_core_port, sb_index,
|
|
|
|
|
tc_index, pool_type,
|
2019-04-22 12:08:41 +00:00
|
|
|
pool_index, threshold, extack);
|
2016-04-14 18:19:15 +02:00
|
|
|
}
|
|
|
|
|
|
2016-04-14 18:19:25 +02:00
|
|
|
static int mlxsw_devlink_sb_occ_snapshot(struct devlink *devlink,
|
|
|
|
|
unsigned int sb_index)
|
|
|
|
|
{
|
|
|
|
|
struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
|
|
|
|
|
struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
|
|
|
|
|
|
|
|
|
|
if (!mlxsw_driver->sb_occ_snapshot)
|
|
|
|
|
return -EOPNOTSUPP;
|
|
|
|
|
return mlxsw_driver->sb_occ_snapshot(mlxsw_core, sb_index);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static int mlxsw_devlink_sb_occ_max_clear(struct devlink *devlink,
|
|
|
|
|
unsigned int sb_index)
|
|
|
|
|
{
|
|
|
|
|
struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
|
|
|
|
|
struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
|
|
|
|
|
|
|
|
|
|
if (!mlxsw_driver->sb_occ_max_clear)
|
|
|
|
|
return -EOPNOTSUPP;
|
|
|
|
|
return mlxsw_driver->sb_occ_max_clear(mlxsw_core, sb_index);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static int
|
|
|
|
|
mlxsw_devlink_sb_occ_port_pool_get(struct devlink_port *devlink_port,
|
|
|
|
|
unsigned int sb_index, u16 pool_index,
|
|
|
|
|
u32 *p_cur, u32 *p_max)
|
|
|
|
|
{
|
|
|
|
|
struct mlxsw_core *mlxsw_core = devlink_priv(devlink_port->devlink);
|
|
|
|
|
struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
|
|
|
|
|
struct mlxsw_core_port *mlxsw_core_port = __dl_port(devlink_port);
|
|
|
|
|
|
2016-10-28 21:35:55 +02:00
|
|
|
if (!mlxsw_driver->sb_occ_port_pool_get ||
|
|
|
|
|
!mlxsw_core_port_check(mlxsw_core_port))
|
2016-04-14 18:19:25 +02:00
|
|
|
return -EOPNOTSUPP;
|
|
|
|
|
return mlxsw_driver->sb_occ_port_pool_get(mlxsw_core_port, sb_index,
|
|
|
|
|
pool_index, p_cur, p_max);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static int
|
|
|
|
|
mlxsw_devlink_sb_occ_tc_port_bind_get(struct devlink_port *devlink_port,
|
|
|
|
|
unsigned int sb_index, u16 tc_index,
|
|
|
|
|
enum devlink_sb_pool_type pool_type,
|
|
|
|
|
u32 *p_cur, u32 *p_max)
|
|
|
|
|
{
|
|
|
|
|
struct mlxsw_core *mlxsw_core = devlink_priv(devlink_port->devlink);
|
|
|
|
|
struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
|
|
|
|
|
struct mlxsw_core_port *mlxsw_core_port = __dl_port(devlink_port);
|
|
|
|
|
|
2016-10-28 21:35:55 +02:00
|
|
|
if (!mlxsw_driver->sb_occ_tc_port_bind_get ||
|
|
|
|
|
!mlxsw_core_port_check(mlxsw_core_port))
|
2016-04-14 18:19:25 +02:00
|
|
|
return -EOPNOTSUPP;
|
|
|
|
|
return mlxsw_driver->sb_occ_tc_port_bind_get(mlxsw_core_port,
|
|
|
|
|
sb_index, tc_index,
|
|
|
|
|
pool_type, p_cur, p_max);
|
|
|
|
|
}
|
|
|
|
|
|
2019-04-08 06:59:35 +00:00
|
|
|
static int
|
|
|
|
|
mlxsw_devlink_info_get(struct devlink *devlink, struct devlink_info_req *req,
|
|
|
|
|
struct netlink_ext_ack *extack)
|
|
|
|
|
{
|
|
|
|
|
struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
|
|
|
|
|
char fw_info_psid[MLXSW_REG_MGIR_FW_INFO_PSID_SIZE];
|
|
|
|
|
u32 hw_rev, fw_major, fw_minor, fw_sub_minor;
|
|
|
|
|
char mgir_pl[MLXSW_REG_MGIR_LEN];
|
|
|
|
|
char buf[32];
|
|
|
|
|
int err;
|
|
|
|
|
|
|
|
|
|
err = devlink_info_driver_name_put(req,
|
|
|
|
|
mlxsw_core->bus_info->device_kind);
|
|
|
|
|
if (err)
|
|
|
|
|
return err;
|
|
|
|
|
|
|
|
|
|
mlxsw_reg_mgir_pack(mgir_pl);
|
|
|
|
|
err = mlxsw_reg_query(mlxsw_core, MLXSW_REG(mgir), mgir_pl);
|
|
|
|
|
if (err)
|
|
|
|
|
return err;
|
|
|
|
|
mlxsw_reg_mgir_unpack(mgir_pl, &hw_rev, fw_info_psid, &fw_major,
|
|
|
|
|
&fw_minor, &fw_sub_minor);
|
|
|
|
|
|
|
|
|
|
sprintf(buf, "%X", hw_rev);
|
|
|
|
|
err = devlink_info_version_fixed_put(req, "hw.revision", buf);
|
|
|
|
|
if (err)
|
|
|
|
|
return err;
|
|
|
|
|
|
|
|
|
|
err = devlink_info_version_fixed_put(req, "fw.psid", fw_info_psid);
|
|
|
|
|
if (err)
|
|
|
|
|
return err;
|
|
|
|
|
|
|
|
|
|
sprintf(buf, "%d.%d.%d", fw_major, fw_minor, fw_sub_minor);
|
|
|
|
|
err = devlink_info_version_running_put(req, "fw.version", buf);
|
|
|
|
|
if (err)
|
|
|
|
|
return err;
|
|
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
|
2019-09-12 10:49:45 +02:00
|
|
|
static int
|
|
|
|
|
mlxsw_devlink_core_bus_device_reload_down(struct devlink *devlink,
|
2020-10-07 09:00:43 +03:00
|
|
|
bool netns_change, enum devlink_reload_action action,
|
2020-10-07 09:00:44 +03:00
|
|
|
enum devlink_reload_limit limit,
|
2019-09-12 10:49:45 +02:00
|
|
|
struct netlink_ext_ack *extack)
|
2018-01-15 08:59:11 +01:00
|
|
|
{
|
|
|
|
|
struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
|
|
|
|
|
|
2018-05-27 09:56:15 +03:00
|
|
|
if (!(mlxsw_core->bus->features & MLXSW_BUS_F_RESET))
|
2018-01-15 08:59:11 +01:00
|
|
|
return -EOPNOTSUPP;
|
|
|
|
|
|
|
|
|
|
mlxsw_core_bus_device_unregister(mlxsw_core, true);
|
2019-09-12 10:49:45 +02:00
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static int
|
2020-10-07 09:00:43 +03:00
|
|
|
mlxsw_devlink_core_bus_device_reload_up(struct devlink *devlink, enum devlink_reload_action action,
|
2020-10-07 09:00:44 +03:00
|
|
|
enum devlink_reload_limit limit, u32 *actions_performed,
|
|
|
|
|
struct netlink_ext_ack *extack)
|
2019-09-12 10:49:45 +02:00
|
|
|
{
|
|
|
|
|
struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
|
2018-10-29 14:26:16 +00:00
|
|
|
|
2020-10-07 09:00:43 +03:00
|
|
|
*actions_performed = BIT(DEVLINK_RELOAD_ACTION_DRIVER_REINIT) |
|
|
|
|
|
BIT(DEVLINK_RELOAD_ACTION_FW_ACTIVATE);
|
2019-09-12 10:49:46 +02:00
|
|
|
return mlxsw_core_bus_device_register(mlxsw_core->bus_info,
|
|
|
|
|
mlxsw_core->bus,
|
|
|
|
|
mlxsw_core->bus_priv, true,
|
2019-10-03 11:49:34 +02:00
|
|
|
devlink, extack);
|
2018-01-15 08:59:11 +01:00
|
|
|
}
|
|
|
|
|
|
2019-06-04 15:40:37 +02:00
|
|
|
static int mlxsw_devlink_flash_update(struct devlink *devlink,
|
2020-09-25 13:46:06 -07:00
|
|
|
struct devlink_flash_update_params *params,
|
2019-06-04 15:40:37 +02:00
|
|
|
struct netlink_ext_ack *extack)
|
|
|
|
|
{
|
|
|
|
|
struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
|
|
|
|
|
|
2020-09-25 13:46:06 -07:00
|
|
|
return mlxsw_core_fw_flash_update(mlxsw_core, params, extack);
|
2019-06-04 15:40:37 +02:00
|
|
|
}
|
|
|
|
|
|
2019-08-21 10:19:35 +03:00
|
|
|
static int mlxsw_devlink_trap_init(struct devlink *devlink,
|
|
|
|
|
const struct devlink_trap *trap,
|
|
|
|
|
void *trap_ctx)
|
|
|
|
|
{
|
|
|
|
|
struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
|
|
|
|
|
struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
|
|
|
|
|
|
|
|
|
|
if (!mlxsw_driver->trap_init)
|
|
|
|
|
return -EOPNOTSUPP;
|
|
|
|
|
return mlxsw_driver->trap_init(mlxsw_core, trap, trap_ctx);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void mlxsw_devlink_trap_fini(struct devlink *devlink,
|
|
|
|
|
const struct devlink_trap *trap,
|
|
|
|
|
void *trap_ctx)
|
|
|
|
|
{
|
|
|
|
|
struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
|
|
|
|
|
struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
|
|
|
|
|
|
|
|
|
|
if (!mlxsw_driver->trap_fini)
|
|
|
|
|
return;
|
|
|
|
|
mlxsw_driver->trap_fini(mlxsw_core, trap, trap_ctx);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static int mlxsw_devlink_trap_action_set(struct devlink *devlink,
|
|
|
|
|
const struct devlink_trap *trap,
|
2020-08-03 19:11:34 +03:00
|
|
|
enum devlink_trap_action action,
|
|
|
|
|
struct netlink_ext_ack *extack)
|
2019-08-21 10:19:35 +03:00
|
|
|
{
|
|
|
|
|
struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
|
|
|
|
|
struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
|
|
|
|
|
|
|
|
|
|
if (!mlxsw_driver->trap_action_set)
|
|
|
|
|
return -EOPNOTSUPP;
|
2020-08-03 19:11:34 +03:00
|
|
|
return mlxsw_driver->trap_action_set(mlxsw_core, trap, action, extack);
|
2019-08-21 10:19:35 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static int
|
|
|
|
|
mlxsw_devlink_trap_group_init(struct devlink *devlink,
|
|
|
|
|
const struct devlink_trap_group *group)
|
|
|
|
|
{
|
|
|
|
|
struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
|
|
|
|
|
struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
|
|
|
|
|
|
|
|
|
|
if (!mlxsw_driver->trap_group_init)
|
|
|
|
|
return -EOPNOTSUPP;
|
|
|
|
|
return mlxsw_driver->trap_group_init(mlxsw_core, group);
|
|
|
|
|
}
|
|
|
|
|
|
2020-03-30 22:38:31 +03:00
|
|
|
static int
|
|
|
|
|
mlxsw_devlink_trap_group_set(struct devlink *devlink,
|
|
|
|
|
const struct devlink_trap_group *group,
|
2020-08-03 19:11:34 +03:00
|
|
|
const struct devlink_trap_policer *policer,
|
|
|
|
|
struct netlink_ext_ack *extack)
|
2020-03-30 22:38:31 +03:00
|
|
|
{
|
|
|
|
|
struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
|
|
|
|
|
struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
|
|
|
|
|
|
|
|
|
|
if (!mlxsw_driver->trap_group_set)
|
|
|
|
|
return -EOPNOTSUPP;
|
2020-08-03 19:11:34 +03:00
|
|
|
return mlxsw_driver->trap_group_set(mlxsw_core, group, policer, extack);
|
2020-03-30 22:38:31 +03:00
|
|
|
}
|
|
|
|
|
|
2020-03-30 22:38:28 +03:00
|
|
|
static int
|
|
|
|
|
mlxsw_devlink_trap_policer_init(struct devlink *devlink,
|
|
|
|
|
const struct devlink_trap_policer *policer)
|
|
|
|
|
{
|
|
|
|
|
struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
|
|
|
|
|
struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
|
|
|
|
|
|
|
|
|
|
if (!mlxsw_driver->trap_policer_init)
|
|
|
|
|
return -EOPNOTSUPP;
|
|
|
|
|
return mlxsw_driver->trap_policer_init(mlxsw_core, policer);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void
|
|
|
|
|
mlxsw_devlink_trap_policer_fini(struct devlink *devlink,
|
|
|
|
|
const struct devlink_trap_policer *policer)
|
|
|
|
|
{
|
|
|
|
|
struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
|
|
|
|
|
struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
|
|
|
|
|
|
|
|
|
|
if (!mlxsw_driver->trap_policer_fini)
|
|
|
|
|
return;
|
|
|
|
|
mlxsw_driver->trap_policer_fini(mlxsw_core, policer);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static int
|
|
|
|
|
mlxsw_devlink_trap_policer_set(struct devlink *devlink,
|
|
|
|
|
const struct devlink_trap_policer *policer,
|
|
|
|
|
u64 rate, u64 burst,
|
|
|
|
|
struct netlink_ext_ack *extack)
|
|
|
|
|
{
|
|
|
|
|
struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
|
|
|
|
|
struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
|
|
|
|
|
|
|
|
|
|
if (!mlxsw_driver->trap_policer_set)
|
|
|
|
|
return -EOPNOTSUPP;
|
|
|
|
|
return mlxsw_driver->trap_policer_set(mlxsw_core, policer, rate, burst,
|
|
|
|
|
extack);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static int
|
|
|
|
|
mlxsw_devlink_trap_policer_counter_get(struct devlink *devlink,
|
|
|
|
|
const struct devlink_trap_policer *policer,
|
|
|
|
|
u64 *p_drops)
|
|
|
|
|
{
|
|
|
|
|
struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
|
|
|
|
|
struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
|
|
|
|
|
|
|
|
|
|
if (!mlxsw_driver->trap_policer_counter_get)
|
|
|
|
|
return -EOPNOTSUPP;
|
|
|
|
|
return mlxsw_driver->trap_policer_counter_get(mlxsw_core, policer,
|
|
|
|
|
p_drops);
|
|
|
|
|
}
|
|
|
|
|
|
2016-02-26 17:32:27 +01:00
|
|
|
static const struct devlink_ops mlxsw_devlink_ops = {
|
2020-10-07 09:00:43 +03:00
|
|
|
.reload_actions = BIT(DEVLINK_RELOAD_ACTION_DRIVER_REINIT) |
|
|
|
|
|
BIT(DEVLINK_RELOAD_ACTION_FW_ACTIVATE),
|
2019-09-12 10:49:45 +02:00
|
|
|
.reload_down = mlxsw_devlink_core_bus_device_reload_down,
|
|
|
|
|
.reload_up = mlxsw_devlink_core_bus_device_reload_up,
|
2016-10-28 21:35:58 +02:00
|
|
|
.port_type_set = mlxsw_devlink_port_type_set,
|
2016-04-14 18:19:25 +02:00
|
|
|
.port_split = mlxsw_devlink_port_split,
|
|
|
|
|
.port_unsplit = mlxsw_devlink_port_unsplit,
|
|
|
|
|
.sb_pool_get = mlxsw_devlink_sb_pool_get,
|
|
|
|
|
.sb_pool_set = mlxsw_devlink_sb_pool_set,
|
|
|
|
|
.sb_port_pool_get = mlxsw_devlink_sb_port_pool_get,
|
|
|
|
|
.sb_port_pool_set = mlxsw_devlink_sb_port_pool_set,
|
|
|
|
|
.sb_tc_pool_bind_get = mlxsw_devlink_sb_tc_pool_bind_get,
|
|
|
|
|
.sb_tc_pool_bind_set = mlxsw_devlink_sb_tc_pool_bind_set,
|
|
|
|
|
.sb_occ_snapshot = mlxsw_devlink_sb_occ_snapshot,
|
|
|
|
|
.sb_occ_max_clear = mlxsw_devlink_sb_occ_max_clear,
|
|
|
|
|
.sb_occ_port_pool_get = mlxsw_devlink_sb_occ_port_pool_get,
|
|
|
|
|
.sb_occ_tc_port_bind_get = mlxsw_devlink_sb_occ_tc_port_bind_get,
|
2019-04-08 06:59:35 +00:00
|
|
|
.info_get = mlxsw_devlink_info_get,
|
2019-06-04 15:40:37 +02:00
|
|
|
.flash_update = mlxsw_devlink_flash_update,
|
2019-08-21 10:19:35 +03:00
|
|
|
.trap_init = mlxsw_devlink_trap_init,
|
|
|
|
|
.trap_fini = mlxsw_devlink_trap_fini,
|
|
|
|
|
.trap_action_set = mlxsw_devlink_trap_action_set,
|
|
|
|
|
.trap_group_init = mlxsw_devlink_trap_group_init,
|
2020-03-30 22:38:31 +03:00
|
|
|
.trap_group_set = mlxsw_devlink_trap_group_set,
|
2020-03-30 22:38:28 +03:00
|
|
|
.trap_policer_init = mlxsw_devlink_trap_policer_init,
|
|
|
|
|
.trap_policer_fini = mlxsw_devlink_trap_policer_fini,
|
|
|
|
|
.trap_policer_set = mlxsw_devlink_trap_policer_set,
|
|
|
|
|
.trap_policer_counter_get = mlxsw_devlink_trap_policer_counter_get,
|
2016-02-26 17:32:27 +01:00
|
|
|
};
|
|
|
|
|
|
2020-09-15 11:40:53 +03:00
|
|
|
static int mlxsw_core_params_register(struct mlxsw_core *mlxsw_core)
|
|
|
|
|
{
|
|
|
|
|
int err;
|
|
|
|
|
|
2020-09-15 11:40:54 +03:00
|
|
|
err = mlxsw_core_fw_params_register(mlxsw_core);
|
|
|
|
|
if (err)
|
|
|
|
|
return err;
|
|
|
|
|
|
2020-09-15 11:40:53 +03:00
|
|
|
if (mlxsw_core->driver->params_register) {
|
|
|
|
|
err = mlxsw_core->driver->params_register(mlxsw_core);
|
|
|
|
|
if (err)
|
2020-09-15 11:40:54 +03:00
|
|
|
goto err_params_register;
|
2020-09-15 11:40:53 +03:00
|
|
|
}
|
|
|
|
|
return 0;
|
2020-09-15 11:40:54 +03:00
|
|
|
|
|
|
|
|
err_params_register:
|
|
|
|
|
mlxsw_core_fw_params_unregister(mlxsw_core);
|
|
|
|
|
return err;
|
2020-09-15 11:40:53 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void mlxsw_core_params_unregister(struct mlxsw_core *mlxsw_core)
|
|
|
|
|
{
|
2020-09-15 11:40:54 +03:00
|
|
|
mlxsw_core_fw_params_unregister(mlxsw_core);
|
2020-09-15 11:40:53 +03:00
|
|
|
if (mlxsw_core->driver->params_register)
|
|
|
|
|
mlxsw_core->driver->params_unregister(mlxsw_core);
|
|
|
|
|
}
|
|
|
|
|
|
2020-09-15 11:40:58 +03:00
|
|
|
struct mlxsw_core_health_event {
|
|
|
|
|
struct mlxsw_core *mlxsw_core;
|
|
|
|
|
char mfde_pl[MLXSW_REG_MFDE_LEN];
|
|
|
|
|
struct work_struct work;
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
static void mlxsw_core_health_event_work(struct work_struct *work)
|
|
|
|
|
{
|
|
|
|
|
struct mlxsw_core_health_event *event;
|
|
|
|
|
struct mlxsw_core *mlxsw_core;
|
|
|
|
|
|
|
|
|
|
event = container_of(work, struct mlxsw_core_health_event, work);
|
|
|
|
|
mlxsw_core = event->mlxsw_core;
|
|
|
|
|
devlink_health_report(mlxsw_core->health.fw_fatal, "FW fatal event occurred",
|
|
|
|
|
event->mfde_pl);
|
|
|
|
|
kfree(event);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void mlxsw_core_health_listener_func(const struct mlxsw_reg_info *reg,
|
|
|
|
|
char *mfde_pl, void *priv)
|
|
|
|
|
{
|
|
|
|
|
struct mlxsw_core_health_event *event;
|
|
|
|
|
struct mlxsw_core *mlxsw_core = priv;
|
|
|
|
|
|
|
|
|
|
event = kmalloc(sizeof(*event), GFP_ATOMIC);
|
|
|
|
|
if (!event)
|
|
|
|
|
return;
|
|
|
|
|
event->mlxsw_core = mlxsw_core;
|
|
|
|
|
memcpy(event->mfde_pl, mfde_pl, sizeof(event->mfde_pl));
|
|
|
|
|
INIT_WORK(&event->work, mlxsw_core_health_event_work);
|
|
|
|
|
mlxsw_core_schedule_work(&event->work);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static const struct mlxsw_listener mlxsw_core_health_listener =
|
|
|
|
|
MLXSW_EVENTL(mlxsw_core_health_listener_func, MFDE, MFDE);
|
|
|
|
|
|
|
|
|
|
static int mlxsw_core_health_fw_fatal_dump(struct devlink_health_reporter *reporter,
|
|
|
|
|
struct devlink_fmsg *fmsg, void *priv_ctx,
|
|
|
|
|
struct netlink_ext_ack *extack)
|
|
|
|
|
{
|
|
|
|
|
char *mfde_pl = priv_ctx;
|
|
|
|
|
char *val_str;
|
|
|
|
|
u8 event_id;
|
|
|
|
|
u32 val;
|
|
|
|
|
int err;
|
|
|
|
|
|
|
|
|
|
if (!priv_ctx)
|
|
|
|
|
/* User-triggered dumps are not possible */
|
|
|
|
|
return -EOPNOTSUPP;
|
|
|
|
|
|
|
|
|
|
val = mlxsw_reg_mfde_irisc_id_get(mfde_pl);
|
|
|
|
|
err = devlink_fmsg_u8_pair_put(fmsg, "irisc_id", val);
|
|
|
|
|
if (err)
|
|
|
|
|
return err;
|
|
|
|
|
err = devlink_fmsg_arr_pair_nest_start(fmsg, "event");
|
|
|
|
|
if (err)
|
|
|
|
|
return err;
|
|
|
|
|
|
|
|
|
|
event_id = mlxsw_reg_mfde_event_id_get(mfde_pl);
|
2021-03-10 13:02:20 +02:00
|
|
|
err = devlink_fmsg_u32_pair_put(fmsg, "id", event_id);
|
2020-09-15 11:40:58 +03:00
|
|
|
if (err)
|
|
|
|
|
return err;
|
|
|
|
|
switch (event_id) {
|
|
|
|
|
case MLXSW_REG_MFDE_EVENT_ID_CRSPACE_TO:
|
|
|
|
|
val_str = "CR space timeout";
|
|
|
|
|
break;
|
|
|
|
|
case MLXSW_REG_MFDE_EVENT_ID_KVD_IM_STOP:
|
|
|
|
|
val_str = "KVD insertion machine stopped";
|
|
|
|
|
break;
|
|
|
|
|
default:
|
|
|
|
|
val_str = NULL;
|
|
|
|
|
}
|
|
|
|
|
if (val_str) {
|
|
|
|
|
err = devlink_fmsg_string_pair_put(fmsg, "desc", val_str);
|
|
|
|
|
if (err)
|
|
|
|
|
return err;
|
|
|
|
|
}
|
|
|
|
|
err = devlink_fmsg_arr_pair_nest_end(fmsg);
|
|
|
|
|
if (err)
|
|
|
|
|
return err;
|
|
|
|
|
|
|
|
|
|
val = mlxsw_reg_mfde_method_get(mfde_pl);
|
|
|
|
|
switch (val) {
|
|
|
|
|
case MLXSW_REG_MFDE_METHOD_QUERY:
|
|
|
|
|
val_str = "query";
|
|
|
|
|
break;
|
|
|
|
|
case MLXSW_REG_MFDE_METHOD_WRITE:
|
|
|
|
|
val_str = "write";
|
|
|
|
|
break;
|
|
|
|
|
default:
|
|
|
|
|
val_str = NULL;
|
|
|
|
|
}
|
|
|
|
|
if (val_str) {
|
|
|
|
|
err = devlink_fmsg_string_pair_put(fmsg, "method", val_str);
|
|
|
|
|
if (err)
|
|
|
|
|
return err;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
val = mlxsw_reg_mfde_long_process_get(mfde_pl);
|
|
|
|
|
err = devlink_fmsg_bool_pair_put(fmsg, "long_process", val);
|
|
|
|
|
if (err)
|
|
|
|
|
return err;
|
|
|
|
|
|
|
|
|
|
val = mlxsw_reg_mfde_command_type_get(mfde_pl);
|
|
|
|
|
switch (val) {
|
|
|
|
|
case MLXSW_REG_MFDE_COMMAND_TYPE_MAD:
|
|
|
|
|
val_str = "mad";
|
|
|
|
|
break;
|
|
|
|
|
case MLXSW_REG_MFDE_COMMAND_TYPE_EMAD:
|
|
|
|
|
val_str = "emad";
|
|
|
|
|
break;
|
|
|
|
|
case MLXSW_REG_MFDE_COMMAND_TYPE_CMDIF:
|
|
|
|
|
val_str = "cmdif";
|
|
|
|
|
break;
|
|
|
|
|
default:
|
|
|
|
|
val_str = NULL;
|
|
|
|
|
}
|
|
|
|
|
if (val_str) {
|
|
|
|
|
err = devlink_fmsg_string_pair_put(fmsg, "command_type", val_str);
|
|
|
|
|
if (err)
|
|
|
|
|
return err;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
val = mlxsw_reg_mfde_reg_attr_id_get(mfde_pl);
|
|
|
|
|
err = devlink_fmsg_u32_pair_put(fmsg, "reg_attr_id", val);
|
|
|
|
|
if (err)
|
|
|
|
|
return err;
|
|
|
|
|
|
|
|
|
|
if (event_id == MLXSW_REG_MFDE_EVENT_ID_CRSPACE_TO) {
|
|
|
|
|
val = mlxsw_reg_mfde_log_address_get(mfde_pl);
|
|
|
|
|
err = devlink_fmsg_u32_pair_put(fmsg, "log_address", val);
|
|
|
|
|
if (err)
|
|
|
|
|
return err;
|
|
|
|
|
val = mlxsw_reg_mfde_log_id_get(mfde_pl);
|
|
|
|
|
err = devlink_fmsg_u8_pair_put(fmsg, "log_irisc_id", val);
|
|
|
|
|
if (err)
|
|
|
|
|
return err;
|
2021-03-10 13:02:19 +02:00
|
|
|
val = mlxsw_reg_mfde_log_ip_get(mfde_pl);
|
|
|
|
|
err = devlink_fmsg_u64_pair_put(fmsg, "log_ip", val);
|
|
|
|
|
if (err)
|
|
|
|
|
return err;
|
2020-09-15 11:40:58 +03:00
|
|
|
} else if (event_id == MLXSW_REG_MFDE_EVENT_ID_KVD_IM_STOP) {
|
|
|
|
|
val = mlxsw_reg_mfde_pipes_mask_get(mfde_pl);
|
|
|
|
|
err = devlink_fmsg_u32_pair_put(fmsg, "pipes_mask", val);
|
|
|
|
|
if (err)
|
|
|
|
|
return err;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static int
|
|
|
|
|
mlxsw_core_health_fw_fatal_test(struct devlink_health_reporter *reporter,
|
|
|
|
|
struct netlink_ext_ack *extack)
|
|
|
|
|
{
|
|
|
|
|
struct mlxsw_core *mlxsw_core = devlink_health_reporter_priv(reporter);
|
|
|
|
|
char mfgd_pl[MLXSW_REG_MFGD_LEN];
|
|
|
|
|
int err;
|
|
|
|
|
|
|
|
|
|
/* Read the register first to make sure no other bits are changed. */
|
|
|
|
|
err = mlxsw_reg_query(mlxsw_core, MLXSW_REG(mfgd), mfgd_pl);
|
|
|
|
|
if (err)
|
|
|
|
|
return err;
|
|
|
|
|
mlxsw_reg_mfgd_trigger_test_set(mfgd_pl, true);
|
|
|
|
|
return mlxsw_reg_write(mlxsw_core, MLXSW_REG(mfgd), mfgd_pl);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static const struct devlink_health_reporter_ops
|
|
|
|
|
mlxsw_core_health_fw_fatal_ops = {
|
|
|
|
|
.name = "fw_fatal",
|
|
|
|
|
.dump = mlxsw_core_health_fw_fatal_dump,
|
|
|
|
|
.test = mlxsw_core_health_fw_fatal_test,
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
static int mlxsw_core_health_fw_fatal_config(struct mlxsw_core *mlxsw_core,
|
|
|
|
|
bool enable)
|
|
|
|
|
{
|
|
|
|
|
char mfgd_pl[MLXSW_REG_MFGD_LEN];
|
|
|
|
|
int err;
|
|
|
|
|
|
|
|
|
|
/* Read the register first to make sure no other bits are changed. */
|
|
|
|
|
err = mlxsw_reg_query(mlxsw_core, MLXSW_REG(mfgd), mfgd_pl);
|
|
|
|
|
if (err)
|
|
|
|
|
return err;
|
|
|
|
|
mlxsw_reg_mfgd_fatal_event_mode_set(mfgd_pl, enable);
|
|
|
|
|
return mlxsw_reg_write(mlxsw_core, MLXSW_REG(mfgd), mfgd_pl);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static int mlxsw_core_health_init(struct mlxsw_core *mlxsw_core)
|
|
|
|
|
{
|
|
|
|
|
struct devlink *devlink = priv_to_devlink(mlxsw_core);
|
|
|
|
|
struct devlink_health_reporter *fw_fatal;
|
|
|
|
|
int err;
|
|
|
|
|
|
|
|
|
|
if (!mlxsw_core->driver->fw_fatal_enabled)
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
|
|
fw_fatal = devlink_health_reporter_create(devlink, &mlxsw_core_health_fw_fatal_ops,
|
|
|
|
|
0, mlxsw_core);
|
|
|
|
|
if (IS_ERR(fw_fatal)) {
|
|
|
|
|
dev_err(mlxsw_core->bus_info->dev, "Failed to create fw fatal reporter");
|
|
|
|
|
return PTR_ERR(fw_fatal);
|
|
|
|
|
}
|
|
|
|
|
mlxsw_core->health.fw_fatal = fw_fatal;
|
|
|
|
|
|
|
|
|
|
err = mlxsw_core_trap_register(mlxsw_core, &mlxsw_core_health_listener, mlxsw_core);
|
|
|
|
|
if (err)
|
|
|
|
|
goto err_trap_register;
|
|
|
|
|
|
|
|
|
|
err = mlxsw_core_health_fw_fatal_config(mlxsw_core, true);
|
|
|
|
|
if (err)
|
|
|
|
|
goto err_fw_fatal_config;
|
|
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
|
|
err_fw_fatal_config:
|
|
|
|
|
mlxsw_core_trap_unregister(mlxsw_core, &mlxsw_core_health_listener, mlxsw_core);
|
|
|
|
|
err_trap_register:
|
|
|
|
|
devlink_health_reporter_destroy(mlxsw_core->health.fw_fatal);
|
|
|
|
|
return err;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void mlxsw_core_health_fini(struct mlxsw_core *mlxsw_core)
|
|
|
|
|
{
|
|
|
|
|
if (!mlxsw_core->driver->fw_fatal_enabled)
|
|
|
|
|
return;
|
|
|
|
|
|
|
|
|
|
mlxsw_core_health_fw_fatal_config(mlxsw_core, false);
|
|
|
|
|
mlxsw_core_trap_unregister(mlxsw_core, &mlxsw_core_health_listener, mlxsw_core);
|
|
|
|
|
/* Make sure there is no more event work scheduled */
|
|
|
|
|
mlxsw_core_flush_owq();
|
|
|
|
|
devlink_health_reporter_destroy(mlxsw_core->health.fw_fatal);
|
|
|
|
|
}
|
|
|
|
|
|
2018-12-03 07:59:01 +00:00
|
|
|
static int
|
|
|
|
|
__mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info,
|
|
|
|
|
const struct mlxsw_bus *mlxsw_bus,
|
|
|
|
|
void *bus_priv, bool reload,
|
2019-10-03 11:49:34 +02:00
|
|
|
struct devlink *devlink,
|
|
|
|
|
struct netlink_ext_ack *extack)
|
2015-07-29 23:33:46 +02:00
|
|
|
{
|
|
|
|
|
const char *device_kind = mlxsw_bus_info->device_kind;
|
|
|
|
|
struct mlxsw_core *mlxsw_core;
|
|
|
|
|
struct mlxsw_driver *mlxsw_driver;
|
2018-04-01 17:34:57 +03:00
|
|
|
struct mlxsw_res *res;
|
2015-07-29 23:33:46 +02:00
|
|
|
size_t alloc_size;
|
|
|
|
|
int err;
|
|
|
|
|
|
|
|
|
|
mlxsw_driver = mlxsw_core_driver_get(device_kind);
|
|
|
|
|
if (!mlxsw_driver)
|
|
|
|
|
return -EINVAL;
|
2018-01-15 08:59:11 +01:00
|
|
|
|
|
|
|
|
if (!reload) {
|
|
|
|
|
alloc_size = sizeof(*mlxsw_core) + mlxsw_driver->priv_size;
|
|
|
|
|
devlink = devlink_alloc(&mlxsw_devlink_ops, alloc_size);
|
|
|
|
|
if (!devlink) {
|
|
|
|
|
err = -ENOMEM;
|
|
|
|
|
goto err_devlink_alloc;
|
|
|
|
|
}
|
2015-07-29 23:33:46 +02:00
|
|
|
}
|
|
|
|
|
|
2016-02-26 17:32:26 +01:00
|
|
|
mlxsw_core = devlink_priv(devlink);
|
2015-07-29 23:33:46 +02:00
|
|
|
INIT_LIST_HEAD(&mlxsw_core->rx_listener_list);
|
2015-07-29 23:33:48 +02:00
|
|
|
INIT_LIST_HEAD(&mlxsw_core->event_listener_list);
|
2015-07-29 23:33:46 +02:00
|
|
|
mlxsw_core->driver = mlxsw_driver;
|
|
|
|
|
mlxsw_core->bus = mlxsw_bus;
|
|
|
|
|
mlxsw_core->bus_priv = bus_priv;
|
|
|
|
|
mlxsw_core->bus_info = mlxsw_bus_info;
|
|
|
|
|
|
2018-04-01 17:34:57 +03:00
|
|
|
res = mlxsw_driver->res_query_enabled ? &mlxsw_core->res : NULL;
|
|
|
|
|
err = mlxsw_bus->init(bus_priv, mlxsw_core, mlxsw_driver->profile, res);
|
2016-09-20 11:16:50 +02:00
|
|
|
if (err)
|
|
|
|
|
goto err_bus_init;
|
|
|
|
|
|
2018-01-15 08:59:11 +01:00
|
|
|
if (mlxsw_driver->resources_register && !reload) {
|
2018-01-15 08:59:07 +01:00
|
|
|
err = mlxsw_driver->resources_register(mlxsw_core);
|
|
|
|
|
if (err)
|
|
|
|
|
goto err_register_resources;
|
|
|
|
|
}
|
|
|
|
|
|
2021-01-21 15:10:23 +02:00
|
|
|
err = mlxsw_ports_init(mlxsw_core, reload);
|
2017-03-24 08:02:48 +01:00
|
|
|
if (err)
|
|
|
|
|
goto err_ports_init;
|
|
|
|
|
|
2016-10-21 16:07:23 +02:00
|
|
|
if (MLXSW_CORE_RES_VALID(mlxsw_core, MAX_LAG) &&
|
|
|
|
|
MLXSW_CORE_RES_VALID(mlxsw_core, MAX_LAG_MEMBERS)) {
|
|
|
|
|
alloc_size = sizeof(u8) *
|
|
|
|
|
MLXSW_CORE_RES_GET(mlxsw_core, MAX_LAG) *
|
|
|
|
|
MLXSW_CORE_RES_GET(mlxsw_core, MAX_LAG_MEMBERS);
|
2015-12-03 12:12:23 +01:00
|
|
|
mlxsw_core->lag.mapping = kzalloc(alloc_size, GFP_KERNEL);
|
|
|
|
|
if (!mlxsw_core->lag.mapping) {
|
|
|
|
|
err = -ENOMEM;
|
|
|
|
|
goto err_alloc_lag_mapping;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2015-07-29 23:33:48 +02:00
|
|
|
err = mlxsw_emad_init(mlxsw_core);
|
|
|
|
|
if (err)
|
|
|
|
|
goto err_emad_init;
|
|
|
|
|
|
2018-01-15 08:59:11 +01:00
|
|
|
if (!reload) {
|
|
|
|
|
err = devlink_register(devlink, mlxsw_bus_info->dev);
|
|
|
|
|
if (err)
|
|
|
|
|
goto err_devlink_register;
|
|
|
|
|
}
|
2016-02-26 17:32:26 +01:00
|
|
|
|
2020-09-15 11:40:53 +03:00
|
|
|
if (!reload) {
|
|
|
|
|
err = mlxsw_core_params_register(mlxsw_core);
|
2018-12-03 07:59:02 +00:00
|
|
|
if (err)
|
|
|
|
|
goto err_register_params;
|
|
|
|
|
}
|
|
|
|
|
|
2020-09-15 11:40:52 +03:00
|
|
|
err = mlxsw_core_fw_rev_validate(mlxsw_core, mlxsw_bus_info, mlxsw_driver->fw_req_rev,
|
|
|
|
|
mlxsw_driver->fw_filename);
|
|
|
|
|
if (err)
|
|
|
|
|
goto err_fw_rev_validate;
|
|
|
|
|
|
2020-09-15 11:40:58 +03:00
|
|
|
err = mlxsw_core_health_init(mlxsw_core);
|
|
|
|
|
if (err)
|
|
|
|
|
goto err_health_init;
|
|
|
|
|
|
2019-05-29 11:47:17 +03:00
|
|
|
if (mlxsw_driver->init) {
|
2019-10-03 11:49:34 +02:00
|
|
|
err = mlxsw_driver->init(mlxsw_core, mlxsw_bus_info, extack);
|
2019-05-29 11:47:17 +03:00
|
|
|
if (err)
|
|
|
|
|
goto err_driver_init;
|
|
|
|
|
}
|
|
|
|
|
|
2016-07-12 18:05:04 +02:00
|
|
|
err = mlxsw_hwmon_init(mlxsw_core, mlxsw_bus_info, &mlxsw_core->hwmon);
|
|
|
|
|
if (err)
|
|
|
|
|
goto err_hwmon_init;
|
|
|
|
|
|
2016-11-22 11:24:13 +01:00
|
|
|
err = mlxsw_thermal_init(mlxsw_core, mlxsw_bus_info,
|
|
|
|
|
&mlxsw_core->thermal);
|
|
|
|
|
if (err)
|
|
|
|
|
goto err_thermal_init;
|
|
|
|
|
|
2020-09-27 10:50:10 +03:00
|
|
|
err = mlxsw_env_init(mlxsw_core, &mlxsw_core->env);
|
|
|
|
|
if (err)
|
|
|
|
|
goto err_env_init;
|
|
|
|
|
|
|
|
|
|
mlxsw_core->is_initialized = true;
|
2020-09-15 11:40:53 +03:00
|
|
|
devlink_params_publish(devlink);
|
2019-11-10 16:31:44 +01:00
|
|
|
|
|
|
|
|
if (!reload)
|
2019-11-08 21:42:43 +01:00
|
|
|
devlink_reload_enable(devlink);
|
2019-02-07 11:22:45 +00:00
|
|
|
|
2015-07-29 23:33:46 +02:00
|
|
|
return 0;
|
|
|
|
|
|
2020-09-27 10:50:10 +03:00
|
|
|
err_env_init:
|
|
|
|
|
mlxsw_thermal_fini(mlxsw_core->thermal);
|
2016-11-22 11:24:13 +01:00
|
|
|
err_thermal_init:
|
2018-10-17 08:05:45 +00:00
|
|
|
mlxsw_hwmon_fini(mlxsw_core->hwmon);
|
2016-07-12 18:05:04 +02:00
|
|
|
err_hwmon_init:
|
2019-05-29 11:47:17 +03:00
|
|
|
if (mlxsw_core->driver->fini)
|
|
|
|
|
mlxsw_core->driver->fini(mlxsw_core);
|
|
|
|
|
err_driver_init:
|
2020-09-15 11:40:58 +03:00
|
|
|
mlxsw_core_health_fini(mlxsw_core);
|
|
|
|
|
err_health_init:
|
2020-09-15 11:40:52 +03:00
|
|
|
err_fw_rev_validate:
|
2020-09-15 11:40:53 +03:00
|
|
|
if (!reload)
|
|
|
|
|
mlxsw_core_params_unregister(mlxsw_core);
|
2018-12-03 07:59:02 +00:00
|
|
|
err_register_params:
|
2018-01-15 08:59:11 +01:00
|
|
|
if (!reload)
|
|
|
|
|
devlink_unregister(devlink);
|
2016-02-26 17:32:26 +01:00
|
|
|
err_devlink_register:
|
2015-07-29 23:33:48 +02:00
|
|
|
mlxsw_emad_fini(mlxsw_core);
|
|
|
|
|
err_emad_init:
|
2015-12-03 12:12:23 +01:00
|
|
|
kfree(mlxsw_core->lag.mapping);
|
|
|
|
|
err_alloc_lag_mapping:
|
2021-01-21 15:10:23 +02:00
|
|
|
mlxsw_ports_fini(mlxsw_core, reload);
|
2017-03-24 08:02:48 +01:00
|
|
|
err_ports_init:
|
2018-01-15 08:59:11 +01:00
|
|
|
if (!reload)
|
|
|
|
|
devlink_resources_unregister(devlink, NULL);
|
2018-01-15 08:59:07 +01:00
|
|
|
err_register_resources:
|
2018-05-10 13:26:16 +02:00
|
|
|
mlxsw_bus->fini(bus_priv);
|
|
|
|
|
err_bus_init:
|
2018-01-15 08:59:11 +01:00
|
|
|
if (!reload)
|
|
|
|
|
devlink_free(devlink);
|
2016-02-26 17:32:26 +01:00
|
|
|
err_devlink_alloc:
|
2015-07-29 23:33:46 +02:00
|
|
|
return err;
|
|
|
|
|
}
|
2018-12-03 07:59:01 +00:00
|
|
|
|
|
|
|
|
int mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info,
|
|
|
|
|
const struct mlxsw_bus *mlxsw_bus,
|
|
|
|
|
void *bus_priv, bool reload,
|
2019-10-03 11:49:34 +02:00
|
|
|
struct devlink *devlink,
|
|
|
|
|
struct netlink_ext_ack *extack)
|
2018-12-03 07:59:01 +00:00
|
|
|
{
|
|
|
|
|
bool called_again = false;
|
|
|
|
|
int err;
|
|
|
|
|
|
|
|
|
|
again:
|
|
|
|
|
err = __mlxsw_core_bus_device_register(mlxsw_bus_info, mlxsw_bus,
|
2019-10-03 11:49:34 +02:00
|
|
|
bus_priv, reload,
|
|
|
|
|
devlink, extack);
|
2018-12-03 07:59:01 +00:00
|
|
|
/* -EAGAIN is returned in case the FW was updated. FW needs
|
|
|
|
|
* a reset, so lets try to call __mlxsw_core_bus_device_register()
|
|
|
|
|
* again.
|
|
|
|
|
*/
|
|
|
|
|
if (err == -EAGAIN && !called_again) {
|
|
|
|
|
called_again = true;
|
|
|
|
|
goto again;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return err;
|
|
|
|
|
}
|
2015-07-29 23:33:46 +02:00
|
|
|
EXPORT_SYMBOL(mlxsw_core_bus_device_register);
|
|
|
|
|
|
2018-01-15 08:59:11 +01:00
|
|
|
void mlxsw_core_bus_device_unregister(struct mlxsw_core *mlxsw_core,
|
|
|
|
|
bool reload)
|
2015-07-29 23:33:46 +02:00
|
|
|
{
|
2016-02-26 17:32:26 +01:00
|
|
|
struct devlink *devlink = priv_to_devlink(mlxsw_core);
|
2015-07-29 23:33:46 +02:00
|
|
|
|
2019-11-08 21:42:43 +01:00
|
|
|
if (!reload)
|
|
|
|
|
devlink_reload_disable(devlink);
|
2019-09-12 10:49:46 +02:00
|
|
|
if (devlink_is_reload_failed(devlink)) {
|
2018-10-29 14:26:16 +00:00
|
|
|
if (!reload)
|
|
|
|
|
/* Only the parts that were not de-initialized in the
|
|
|
|
|
* failed reload attempt need to be de-initialized.
|
|
|
|
|
*/
|
|
|
|
|
goto reload_fail_deinit;
|
|
|
|
|
else
|
|
|
|
|
return;
|
|
|
|
|
}
|
2018-01-15 08:59:11 +01:00
|
|
|
|
2020-09-15 11:40:53 +03:00
|
|
|
devlink_params_unpublish(devlink);
|
2020-09-27 10:50:10 +03:00
|
|
|
mlxsw_core->is_initialized = false;
|
|
|
|
|
mlxsw_env_fini(mlxsw_core->env);
|
2016-11-22 11:24:13 +01:00
|
|
|
mlxsw_thermal_fini(mlxsw_core->thermal);
|
2018-10-17 08:05:45 +00:00
|
|
|
mlxsw_hwmon_fini(mlxsw_core->hwmon);
|
2019-05-29 11:47:17 +03:00
|
|
|
if (mlxsw_core->driver->fini)
|
|
|
|
|
mlxsw_core->driver->fini(mlxsw_core);
|
2020-09-15 11:40:58 +03:00
|
|
|
mlxsw_core_health_fini(mlxsw_core);
|
2020-09-15 11:40:53 +03:00
|
|
|
if (!reload)
|
|
|
|
|
mlxsw_core_params_unregister(mlxsw_core);
|
2018-01-15 08:59:11 +01:00
|
|
|
if (!reload)
|
|
|
|
|
devlink_unregister(devlink);
|
2015-07-29 23:33:48 +02:00
|
|
|
mlxsw_emad_fini(mlxsw_core);
|
2015-12-03 12:12:23 +01:00
|
|
|
kfree(mlxsw_core->lag.mapping);
|
2021-01-21 15:10:23 +02:00
|
|
|
mlxsw_ports_fini(mlxsw_core, reload);
|
2018-01-15 08:59:11 +01:00
|
|
|
if (!reload)
|
|
|
|
|
devlink_resources_unregister(devlink, NULL);
|
2016-11-28 18:01:26 +01:00
|
|
|
mlxsw_core->bus->fini(mlxsw_core->bus_priv);
|
2020-10-24 16:37:32 +03:00
|
|
|
if (!reload)
|
|
|
|
|
devlink_free(devlink);
|
2018-10-29 14:26:16 +00:00
|
|
|
|
|
|
|
|
return;
|
|
|
|
|
|
|
|
|
|
reload_fail_deinit:
|
2020-09-15 11:40:53 +03:00
|
|
|
mlxsw_core_params_unregister(mlxsw_core);
|
2018-10-29 14:26:16 +00:00
|
|
|
devlink_unregister(devlink);
|
|
|
|
|
devlink_resources_unregister(devlink, NULL);
|
2016-02-26 17:32:26 +01:00
|
|
|
devlink_free(devlink);
|
2015-07-29 23:33:46 +02:00
|
|
|
}
|
|
|
|
|
EXPORT_SYMBOL(mlxsw_core_bus_device_unregister);
|
|
|
|
|
|
2016-04-08 19:11:22 +02:00
|
|
|
bool mlxsw_core_skb_transmit_busy(struct mlxsw_core *mlxsw_core,
|
2015-08-06 16:41:56 +02:00
|
|
|
const struct mlxsw_tx_info *tx_info)
|
|
|
|
|
{
|
|
|
|
|
return mlxsw_core->bus->skb_transmit_busy(mlxsw_core->bus_priv,
|
|
|
|
|
tx_info);
|
|
|
|
|
}
|
|
|
|
|
EXPORT_SYMBOL(mlxsw_core_skb_transmit_busy);
|
|
|
|
|
|
2016-04-08 19:11:22 +02:00
|
|
|
int mlxsw_core_skb_transmit(struct mlxsw_core *mlxsw_core, struct sk_buff *skb,
|
2015-07-29 23:33:46 +02:00
|
|
|
const struct mlxsw_tx_info *tx_info)
|
|
|
|
|
{
|
|
|
|
|
return mlxsw_core->bus->skb_transmit(mlxsw_core->bus_priv, skb,
|
|
|
|
|
tx_info);
|
|
|
|
|
}
|
|
|
|
|
EXPORT_SYMBOL(mlxsw_core_skb_transmit);
|
|
|
|
|
|
mlxsw: pci: PTP: Hook into packet transmit path
On Spectrum-1, timestamps are delivered separately from the packets, and
need to paired up. Therefore, at some point after mlxsw_sp_port_xmit()
is invoked, it is necessary to involve the chip-specific driver code to
allow it to do the necessary bookkeeping and matching.
On Spectrum-2, timestamps are delivered in CQE. For that reason,
position the point of driver involvement into mlxsw_pci_cqe_sdq_handle()
to make it hopefully easier to extend for Spectrum-2 in the future.
To tell the driver what port the packet was sent on, keep tx_info
in SKB control buffer.
Introduce a new driver core interface mlxsw_core_ptp_transmitted(), a
driver callback ptp_transmitted, and a PTP op transmitted. The callee is
responsible for taking care of releasing the SKB passed to the new
interfaces, and correspondingly have the new stub callbacks just call
dev_kfree_skb_any().
Follow-up patches will introduce the actual content into
mlxsw_sp1_ptp_transmitted() in particular.
Signed-off-by: Petr Machata <petrm@mellanox.com>
Acked-by: Jiri Pirko <jiri@mellanox.com>
Signed-off-by: Ido Schimmel <idosch@mellanox.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2019-06-30 09:04:53 +03:00
|
|
|
void mlxsw_core_ptp_transmitted(struct mlxsw_core *mlxsw_core,
|
|
|
|
|
struct sk_buff *skb, u8 local_port)
|
|
|
|
|
{
|
|
|
|
|
if (mlxsw_core->driver->ptp_transmitted)
|
|
|
|
|
mlxsw_core->driver->ptp_transmitted(mlxsw_core, skb,
|
|
|
|
|
local_port);
|
|
|
|
|
}
|
|
|
|
|
EXPORT_SYMBOL(mlxsw_core_ptp_transmitted);
|
|
|
|
|
|
2015-07-29 23:33:46 +02:00
|
|
|
static bool __is_rx_listener_equal(const struct mlxsw_rx_listener *rxl_a,
|
|
|
|
|
const struct mlxsw_rx_listener *rxl_b)
|
|
|
|
|
{
|
|
|
|
|
return (rxl_a->func == rxl_b->func &&
|
|
|
|
|
rxl_a->local_port == rxl_b->local_port &&
|
2020-07-14 17:21:06 +03:00
|
|
|
rxl_a->trap_id == rxl_b->trap_id &&
|
|
|
|
|
rxl_a->mirror_reason == rxl_b->mirror_reason);
|
2015-07-29 23:33:46 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static struct mlxsw_rx_listener_item *
|
|
|
|
|
__find_rx_listener_item(struct mlxsw_core *mlxsw_core,
|
2020-02-23 08:31:43 +01:00
|
|
|
const struct mlxsw_rx_listener *rxl)
|
2015-07-29 23:33:46 +02:00
|
|
|
{
|
|
|
|
|
struct mlxsw_rx_listener_item *rxl_item;
|
|
|
|
|
|
|
|
|
|
list_for_each_entry(rxl_item, &mlxsw_core->rx_listener_list, list) {
|
2020-02-23 08:31:43 +01:00
|
|
|
if (__is_rx_listener_equal(&rxl_item->rxl, rxl))
|
2015-07-29 23:33:46 +02:00
|
|
|
return rxl_item;
|
|
|
|
|
}
|
|
|
|
|
return NULL;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
int mlxsw_core_rx_listener_register(struct mlxsw_core *mlxsw_core,
|
|
|
|
|
const struct mlxsw_rx_listener *rxl,
|
2020-02-24 08:35:52 +01:00
|
|
|
void *priv, bool enabled)
|
2015-07-29 23:33:46 +02:00
|
|
|
{
|
|
|
|
|
struct mlxsw_rx_listener_item *rxl_item;
|
|
|
|
|
|
2020-02-23 08:31:43 +01:00
|
|
|
rxl_item = __find_rx_listener_item(mlxsw_core, rxl);
|
2015-07-29 23:33:46 +02:00
|
|
|
if (rxl_item)
|
|
|
|
|
return -EEXIST;
|
|
|
|
|
rxl_item = kmalloc(sizeof(*rxl_item), GFP_KERNEL);
|
|
|
|
|
if (!rxl_item)
|
|
|
|
|
return -ENOMEM;
|
|
|
|
|
rxl_item->rxl = *rxl;
|
|
|
|
|
rxl_item->priv = priv;
|
2020-02-24 08:35:52 +01:00
|
|
|
rxl_item->enabled = enabled;
|
2015-07-29 23:33:46 +02:00
|
|
|
|
|
|
|
|
list_add_rcu(&rxl_item->list, &mlxsw_core->rx_listener_list);
|
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
EXPORT_SYMBOL(mlxsw_core_rx_listener_register);
|
|
|
|
|
|
|
|
|
|
void mlxsw_core_rx_listener_unregister(struct mlxsw_core *mlxsw_core,
|
2020-02-23 08:31:43 +01:00
|
|
|
const struct mlxsw_rx_listener *rxl)
|
2015-07-29 23:33:46 +02:00
|
|
|
{
|
|
|
|
|
struct mlxsw_rx_listener_item *rxl_item;
|
|
|
|
|
|
2020-02-23 08:31:43 +01:00
|
|
|
rxl_item = __find_rx_listener_item(mlxsw_core, rxl);
|
2015-07-29 23:33:46 +02:00
|
|
|
if (!rxl_item)
|
|
|
|
|
return;
|
|
|
|
|
list_del_rcu(&rxl_item->list);
|
|
|
|
|
synchronize_rcu();
|
|
|
|
|
kfree(rxl_item);
|
|
|
|
|
}
|
|
|
|
|
EXPORT_SYMBOL(mlxsw_core_rx_listener_unregister);
|
|
|
|
|
|
2020-02-24 08:35:52 +01:00
|
|
|
static void
|
|
|
|
|
mlxsw_core_rx_listener_state_set(struct mlxsw_core *mlxsw_core,
|
|
|
|
|
const struct mlxsw_rx_listener *rxl,
|
|
|
|
|
bool enabled)
|
|
|
|
|
{
|
|
|
|
|
struct mlxsw_rx_listener_item *rxl_item;
|
|
|
|
|
|
|
|
|
|
rxl_item = __find_rx_listener_item(mlxsw_core, rxl);
|
|
|
|
|
if (WARN_ON(!rxl_item))
|
|
|
|
|
return;
|
|
|
|
|
rxl_item->enabled = enabled;
|
|
|
|
|
}
|
|
|
|
|
|
2015-07-29 23:33:48 +02:00
|
|
|
static void mlxsw_core_event_listener_func(struct sk_buff *skb, u8 local_port,
|
|
|
|
|
void *priv)
|
|
|
|
|
{
|
|
|
|
|
struct mlxsw_event_listener_item *event_listener_item = priv;
|
2020-12-06 10:22:23 +02:00
|
|
|
struct mlxsw_core *mlxsw_core;
|
2015-07-29 23:33:48 +02:00
|
|
|
struct mlxsw_reg_info reg;
|
|
|
|
|
char *payload;
|
2019-11-12 08:48:24 +02:00
|
|
|
char *reg_tlv;
|
|
|
|
|
char *op_tlv;
|
|
|
|
|
|
2020-12-06 10:22:23 +02:00
|
|
|
mlxsw_core = event_listener_item->mlxsw_core;
|
|
|
|
|
trace_devlink_hwmsg(priv_to_devlink(mlxsw_core), true, 0,
|
|
|
|
|
skb->data, skb->len);
|
|
|
|
|
|
2019-11-12 08:48:24 +02:00
|
|
|
mlxsw_emad_tlv_parse(skb);
|
|
|
|
|
op_tlv = mlxsw_emad_op_tlv(skb);
|
|
|
|
|
reg_tlv = mlxsw_emad_reg_tlv(skb);
|
2015-07-29 23:33:48 +02:00
|
|
|
|
|
|
|
|
reg.id = mlxsw_emad_op_tlv_register_id_get(op_tlv);
|
|
|
|
|
reg.len = (mlxsw_emad_reg_tlv_len_get(reg_tlv) - 1) * sizeof(u32);
|
2019-11-12 08:48:24 +02:00
|
|
|
payload = mlxsw_emad_reg_payload(reg_tlv);
|
2015-07-29 23:33:48 +02:00
|
|
|
event_listener_item->el.func(®, payload, event_listener_item->priv);
|
|
|
|
|
dev_kfree_skb(skb);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static bool __is_event_listener_equal(const struct mlxsw_event_listener *el_a,
|
|
|
|
|
const struct mlxsw_event_listener *el_b)
|
|
|
|
|
{
|
|
|
|
|
return (el_a->func == el_b->func &&
|
|
|
|
|
el_a->trap_id == el_b->trap_id);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static struct mlxsw_event_listener_item *
|
|
|
|
|
__find_event_listener_item(struct mlxsw_core *mlxsw_core,
|
2020-02-23 08:31:43 +01:00
|
|
|
const struct mlxsw_event_listener *el)
|
2015-07-29 23:33:48 +02:00
|
|
|
{
|
|
|
|
|
struct mlxsw_event_listener_item *el_item;
|
|
|
|
|
|
|
|
|
|
list_for_each_entry(el_item, &mlxsw_core->event_listener_list, list) {
|
2020-02-23 08:31:43 +01:00
|
|
|
if (__is_event_listener_equal(&el_item->el, el))
|
2015-07-29 23:33:48 +02:00
|
|
|
return el_item;
|
|
|
|
|
}
|
|
|
|
|
return NULL;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
int mlxsw_core_event_listener_register(struct mlxsw_core *mlxsw_core,
|
|
|
|
|
const struct mlxsw_event_listener *el,
|
|
|
|
|
void *priv)
|
|
|
|
|
{
|
|
|
|
|
int err;
|
|
|
|
|
struct mlxsw_event_listener_item *el_item;
|
|
|
|
|
const struct mlxsw_rx_listener rxl = {
|
|
|
|
|
.func = mlxsw_core_event_listener_func,
|
|
|
|
|
.local_port = MLXSW_PORT_DONT_CARE,
|
|
|
|
|
.trap_id = el->trap_id,
|
|
|
|
|
};
|
|
|
|
|
|
2020-02-23 08:31:43 +01:00
|
|
|
el_item = __find_event_listener_item(mlxsw_core, el);
|
2015-07-29 23:33:48 +02:00
|
|
|
if (el_item)
|
|
|
|
|
return -EEXIST;
|
|
|
|
|
el_item = kmalloc(sizeof(*el_item), GFP_KERNEL);
|
|
|
|
|
if (!el_item)
|
|
|
|
|
return -ENOMEM;
|
2020-12-06 10:22:23 +02:00
|
|
|
el_item->mlxsw_core = mlxsw_core;
|
2015-07-29 23:33:48 +02:00
|
|
|
el_item->el = *el;
|
|
|
|
|
el_item->priv = priv;
|
|
|
|
|
|
2020-02-24 08:35:52 +01:00
|
|
|
err = mlxsw_core_rx_listener_register(mlxsw_core, &rxl, el_item, true);
|
2015-07-29 23:33:48 +02:00
|
|
|
if (err)
|
|
|
|
|
goto err_rx_listener_register;
|
|
|
|
|
|
|
|
|
|
/* No reason to save item if we did not manage to register an RX
|
|
|
|
|
* listener for it.
|
|
|
|
|
*/
|
|
|
|
|
list_add_rcu(&el_item->list, &mlxsw_core->event_listener_list);
|
|
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
|
|
err_rx_listener_register:
|
|
|
|
|
kfree(el_item);
|
|
|
|
|
return err;
|
|
|
|
|
}
|
|
|
|
|
EXPORT_SYMBOL(mlxsw_core_event_listener_register);
|
|
|
|
|
|
|
|
|
|
void mlxsw_core_event_listener_unregister(struct mlxsw_core *mlxsw_core,
|
2020-02-23 08:31:43 +01:00
|
|
|
const struct mlxsw_event_listener *el)
|
2015-07-29 23:33:48 +02:00
|
|
|
{
|
|
|
|
|
struct mlxsw_event_listener_item *el_item;
|
|
|
|
|
const struct mlxsw_rx_listener rxl = {
|
|
|
|
|
.func = mlxsw_core_event_listener_func,
|
|
|
|
|
.local_port = MLXSW_PORT_DONT_CARE,
|
|
|
|
|
.trap_id = el->trap_id,
|
|
|
|
|
};
|
|
|
|
|
|
2020-02-23 08:31:43 +01:00
|
|
|
el_item = __find_event_listener_item(mlxsw_core, el);
|
2015-07-29 23:33:48 +02:00
|
|
|
if (!el_item)
|
|
|
|
|
return;
|
2020-02-23 08:31:43 +01:00
|
|
|
mlxsw_core_rx_listener_unregister(mlxsw_core, &rxl);
|
2015-07-29 23:33:48 +02:00
|
|
|
list_del(&el_item->list);
|
|
|
|
|
kfree(el_item);
|
|
|
|
|
}
|
|
|
|
|
EXPORT_SYMBOL(mlxsw_core_event_listener_unregister);
|
|
|
|
|
|
2016-11-25 10:33:30 +01:00
|
|
|
static int mlxsw_core_listener_register(struct mlxsw_core *mlxsw_core,
|
|
|
|
|
const struct mlxsw_listener *listener,
|
2020-02-24 08:35:52 +01:00
|
|
|
void *priv, bool enabled)
|
2016-11-25 10:33:30 +01:00
|
|
|
{
|
2020-02-24 08:35:52 +01:00
|
|
|
if (listener->is_event) {
|
|
|
|
|
WARN_ON(!enabled);
|
2016-11-25 10:33:30 +01:00
|
|
|
return mlxsw_core_event_listener_register(mlxsw_core,
|
2020-02-23 08:31:38 +01:00
|
|
|
&listener->event_listener,
|
2016-11-25 10:33:30 +01:00
|
|
|
priv);
|
2020-02-24 08:35:52 +01:00
|
|
|
} else {
|
2016-11-25 10:33:30 +01:00
|
|
|
return mlxsw_core_rx_listener_register(mlxsw_core,
|
2020-02-23 08:31:38 +01:00
|
|
|
&listener->rx_listener,
|
2020-02-24 08:35:52 +01:00
|
|
|
priv, enabled);
|
|
|
|
|
}
|
2016-11-25 10:33:30 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void mlxsw_core_listener_unregister(struct mlxsw_core *mlxsw_core,
|
|
|
|
|
const struct mlxsw_listener *listener,
|
|
|
|
|
void *priv)
|
|
|
|
|
{
|
|
|
|
|
if (listener->is_event)
|
|
|
|
|
mlxsw_core_event_listener_unregister(mlxsw_core,
|
2020-02-23 08:31:43 +01:00
|
|
|
&listener->event_listener);
|
2016-11-25 10:33:30 +01:00
|
|
|
else
|
|
|
|
|
mlxsw_core_rx_listener_unregister(mlxsw_core,
|
2020-02-23 08:31:43 +01:00
|
|
|
&listener->rx_listener);
|
2016-11-25 10:33:30 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
int mlxsw_core_trap_register(struct mlxsw_core *mlxsw_core,
|
|
|
|
|
const struct mlxsw_listener *listener, void *priv)
|
|
|
|
|
{
|
2020-02-24 08:35:53 +01:00
|
|
|
enum mlxsw_reg_htgt_trap_group trap_group;
|
2020-02-24 08:35:44 +01:00
|
|
|
enum mlxsw_reg_hpkt_action action;
|
2016-11-25 10:33:30 +01:00
|
|
|
char hpkt_pl[MLXSW_REG_HPKT_LEN];
|
|
|
|
|
int err;
|
|
|
|
|
|
2020-02-24 08:35:52 +01:00
|
|
|
err = mlxsw_core_listener_register(mlxsw_core, listener, priv,
|
|
|
|
|
listener->enabled_on_register);
|
2016-11-25 10:33:30 +01:00
|
|
|
if (err)
|
|
|
|
|
return err;
|
|
|
|
|
|
2020-02-24 08:35:44 +01:00
|
|
|
action = listener->enabled_on_register ? listener->en_action :
|
|
|
|
|
listener->dis_action;
|
2020-02-24 08:35:53 +01:00
|
|
|
trap_group = listener->enabled_on_register ? listener->en_trap_group :
|
|
|
|
|
listener->dis_trap_group;
|
2020-02-24 08:35:44 +01:00
|
|
|
mlxsw_reg_hpkt_pack(hpkt_pl, action, listener->trap_id,
|
2020-02-24 08:35:53 +01:00
|
|
|
trap_group, listener->is_ctrl);
|
2016-11-25 10:33:38 +01:00
|
|
|
err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(hpkt), hpkt_pl);
|
2016-11-25 10:33:30 +01:00
|
|
|
if (err)
|
|
|
|
|
goto err_trap_set;
|
|
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
|
|
err_trap_set:
|
|
|
|
|
mlxsw_core_listener_unregister(mlxsw_core, listener, priv);
|
|
|
|
|
return err;
|
|
|
|
|
}
|
|
|
|
|
EXPORT_SYMBOL(mlxsw_core_trap_register);
|
|
|
|
|
|
|
|
|
|
void mlxsw_core_trap_unregister(struct mlxsw_core *mlxsw_core,
|
|
|
|
|
const struct mlxsw_listener *listener,
|
|
|
|
|
void *priv)
|
|
|
|
|
{
|
|
|
|
|
char hpkt_pl[MLXSW_REG_HPKT_LEN];
|
|
|
|
|
|
|
|
|
|
if (!listener->is_event) {
|
2020-02-24 08:35:44 +01:00
|
|
|
mlxsw_reg_hpkt_pack(hpkt_pl, listener->dis_action,
|
2020-02-24 08:35:53 +01:00
|
|
|
listener->trap_id, listener->dis_trap_group,
|
2016-11-25 10:33:38 +01:00
|
|
|
listener->is_ctrl);
|
2016-11-25 10:33:30 +01:00
|
|
|
mlxsw_reg_write(mlxsw_core, MLXSW_REG(hpkt), hpkt_pl);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
mlxsw_core_listener_unregister(mlxsw_core, listener, priv);
|
|
|
|
|
}
|
|
|
|
|
EXPORT_SYMBOL(mlxsw_core_trap_unregister);
|
|
|
|
|
|
2020-02-24 08:35:46 +01:00
|
|
|
int mlxsw_core_trap_state_set(struct mlxsw_core *mlxsw_core,
|
|
|
|
|
const struct mlxsw_listener *listener,
|
|
|
|
|
bool enabled)
|
2019-08-21 10:19:31 +03:00
|
|
|
{
|
2020-02-24 08:35:53 +01:00
|
|
|
enum mlxsw_reg_htgt_trap_group trap_group;
|
2020-02-24 08:35:46 +01:00
|
|
|
enum mlxsw_reg_hpkt_action action;
|
2019-08-21 10:19:31 +03:00
|
|
|
char hpkt_pl[MLXSW_REG_HPKT_LEN];
|
2020-02-24 08:35:52 +01:00
|
|
|
int err;
|
|
|
|
|
|
|
|
|
|
/* Not supported for event listener */
|
|
|
|
|
if (WARN_ON(listener->is_event))
|
|
|
|
|
return -EINVAL;
|
2019-08-21 10:19:31 +03:00
|
|
|
|
2020-02-24 08:35:46 +01:00
|
|
|
action = enabled ? listener->en_action : listener->dis_action;
|
2020-02-24 08:35:53 +01:00
|
|
|
trap_group = enabled ? listener->en_trap_group :
|
|
|
|
|
listener->dis_trap_group;
|
2019-08-21 10:19:31 +03:00
|
|
|
mlxsw_reg_hpkt_pack(hpkt_pl, action, listener->trap_id,
|
2020-02-24 08:35:53 +01:00
|
|
|
trap_group, listener->is_ctrl);
|
2020-02-24 08:35:52 +01:00
|
|
|
err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(hpkt), hpkt_pl);
|
|
|
|
|
if (err)
|
|
|
|
|
return err;
|
|
|
|
|
|
|
|
|
|
mlxsw_core_rx_listener_state_set(mlxsw_core, &listener->rx_listener,
|
|
|
|
|
enabled);
|
|
|
|
|
return 0;
|
2019-08-21 10:19:31 +03:00
|
|
|
}
|
2020-02-24 08:35:46 +01:00
|
|
|
EXPORT_SYMBOL(mlxsw_core_trap_state_set);
|
2019-08-21 10:19:31 +03:00
|
|
|
|
2016-04-14 18:19:29 +02:00
|
|
|
static u64 mlxsw_core_tid_get(struct mlxsw_core *mlxsw_core)
|
|
|
|
|
{
|
|
|
|
|
return atomic64_inc_return(&mlxsw_core->emad.tid);
|
|
|
|
|
}
|
|
|
|
|
|
2015-07-29 23:33:48 +02:00
|
|
|
static int mlxsw_core_reg_access_emad(struct mlxsw_core *mlxsw_core,
|
|
|
|
|
const struct mlxsw_reg_info *reg,
|
|
|
|
|
char *payload,
|
2016-04-14 18:19:29 +02:00
|
|
|
enum mlxsw_core_reg_access_type type,
|
|
|
|
|
struct list_head *bulk_list,
|
|
|
|
|
mlxsw_reg_trans_cb_t *cb,
|
|
|
|
|
unsigned long cb_priv)
|
2015-07-29 23:33:48 +02:00
|
|
|
{
|
2016-04-14 18:19:29 +02:00
|
|
|
u64 tid = mlxsw_core_tid_get(mlxsw_core);
|
|
|
|
|
struct mlxsw_reg_trans *trans;
|
2015-07-29 23:33:48 +02:00
|
|
|
int err;
|
|
|
|
|
|
2016-04-14 18:19:29 +02:00
|
|
|
trans = kzalloc(sizeof(*trans), GFP_KERNEL);
|
|
|
|
|
if (!trans)
|
2015-07-29 23:33:48 +02:00
|
|
|
return -ENOMEM;
|
|
|
|
|
|
2016-04-14 18:19:29 +02:00
|
|
|
err = mlxsw_emad_reg_access(mlxsw_core, reg, payload, type, trans,
|
|
|
|
|
bulk_list, cb, cb_priv, tid);
|
|
|
|
|
if (err) {
|
2020-07-29 12:26:46 +03:00
|
|
|
kfree_rcu(trans, rcu);
|
2016-04-14 18:19:29 +02:00
|
|
|
return err;
|
|
|
|
|
}
|
|
|
|
|
return 0;
|
|
|
|
|
}
|
2015-07-29 23:33:48 +02:00
|
|
|
|
2016-04-14 18:19:29 +02:00
|
|
|
int mlxsw_reg_trans_query(struct mlxsw_core *mlxsw_core,
|
|
|
|
|
const struct mlxsw_reg_info *reg, char *payload,
|
|
|
|
|
struct list_head *bulk_list,
|
|
|
|
|
mlxsw_reg_trans_cb_t *cb, unsigned long cb_priv)
|
|
|
|
|
{
|
|
|
|
|
return mlxsw_core_reg_access_emad(mlxsw_core, reg, payload,
|
|
|
|
|
MLXSW_CORE_REG_ACCESS_TYPE_QUERY,
|
|
|
|
|
bulk_list, cb, cb_priv);
|
|
|
|
|
}
|
|
|
|
|
EXPORT_SYMBOL(mlxsw_reg_trans_query);
|
2015-07-29 23:33:48 +02:00
|
|
|
|
2016-04-14 18:19:29 +02:00
|
|
|
int mlxsw_reg_trans_write(struct mlxsw_core *mlxsw_core,
|
|
|
|
|
const struct mlxsw_reg_info *reg, char *payload,
|
|
|
|
|
struct list_head *bulk_list,
|
|
|
|
|
mlxsw_reg_trans_cb_t *cb, unsigned long cb_priv)
|
|
|
|
|
{
|
|
|
|
|
return mlxsw_core_reg_access_emad(mlxsw_core, reg, payload,
|
|
|
|
|
MLXSW_CORE_REG_ACCESS_TYPE_WRITE,
|
|
|
|
|
bulk_list, cb, cb_priv);
|
|
|
|
|
}
|
|
|
|
|
EXPORT_SYMBOL(mlxsw_reg_trans_write);
|
2015-07-29 23:33:48 +02:00
|
|
|
|
2019-11-12 08:48:28 +02:00
|
|
|
#define MLXSW_REG_TRANS_ERR_STRING_SIZE 256
|
|
|
|
|
|
2016-04-14 18:19:29 +02:00
|
|
|
static int mlxsw_reg_trans_wait(struct mlxsw_reg_trans *trans)
|
|
|
|
|
{
|
2019-11-12 08:48:28 +02:00
|
|
|
char err_string[MLXSW_REG_TRANS_ERR_STRING_SIZE];
|
2016-04-14 18:19:29 +02:00
|
|
|
struct mlxsw_core *mlxsw_core = trans->core;
|
|
|
|
|
int err;
|
2015-07-29 23:33:48 +02:00
|
|
|
|
2016-04-14 18:19:29 +02:00
|
|
|
wait_for_completion(&trans->completion);
|
|
|
|
|
cancel_delayed_work_sync(&trans->timeout_dw);
|
|
|
|
|
err = trans->err;
|
2015-07-29 23:33:48 +02:00
|
|
|
|
2016-04-14 18:19:29 +02:00
|
|
|
if (trans->retries)
|
|
|
|
|
dev_warn(mlxsw_core->bus_info->dev, "EMAD retries (%d/%d) (tid=%llx)\n",
|
|
|
|
|
trans->retries, MLXSW_EMAD_MAX_RETRY, trans->tid);
|
2019-02-04 18:47:46 +00:00
|
|
|
if (err) {
|
2016-04-14 18:19:29 +02:00
|
|
|
dev_err(mlxsw_core->bus_info->dev, "EMAD reg access failed (tid=%llx,reg_id=%x(%s),type=%s,status=%x(%s))\n",
|
|
|
|
|
trans->tid, trans->reg->id,
|
|
|
|
|
mlxsw_reg_id_str(trans->reg->id),
|
|
|
|
|
mlxsw_core_reg_access_type_str(trans->type),
|
|
|
|
|
trans->emad_status,
|
|
|
|
|
mlxsw_emad_op_tlv_status_str(trans->emad_status));
|
2019-11-12 08:48:28 +02:00
|
|
|
|
|
|
|
|
snprintf(err_string, MLXSW_REG_TRANS_ERR_STRING_SIZE,
|
2019-11-12 08:48:29 +02:00
|
|
|
"(tid=%llx,reg_id=%x(%s)) %s (%s)\n", trans->tid,
|
2019-11-12 08:48:28 +02:00
|
|
|
trans->reg->id, mlxsw_reg_id_str(trans->reg->id),
|
2019-11-12 08:48:29 +02:00
|
|
|
mlxsw_emad_op_tlv_status_str(trans->emad_status),
|
|
|
|
|
trans->emad_err_string ? trans->emad_err_string : "");
|
2019-11-12 08:48:28 +02:00
|
|
|
|
2019-02-04 18:47:46 +00:00
|
|
|
trace_devlink_hwerr(priv_to_devlink(mlxsw_core),
|
2019-11-12 08:48:28 +02:00
|
|
|
trans->emad_status, err_string);
|
2019-11-12 08:48:29 +02:00
|
|
|
|
|
|
|
|
kfree(trans->emad_err_string);
|
2019-02-04 18:47:46 +00:00
|
|
|
}
|
2016-04-14 18:19:29 +02:00
|
|
|
|
|
|
|
|
list_del(&trans->bulk_list);
|
|
|
|
|
kfree_rcu(trans, rcu);
|
2015-07-29 23:33:48 +02:00
|
|
|
return err;
|
|
|
|
|
}
|
|
|
|
|
|
2016-04-14 18:19:29 +02:00
|
|
|
int mlxsw_reg_trans_bulk_wait(struct list_head *bulk_list)
|
|
|
|
|
{
|
|
|
|
|
struct mlxsw_reg_trans *trans;
|
|
|
|
|
struct mlxsw_reg_trans *tmp;
|
|
|
|
|
int sum_err = 0;
|
|
|
|
|
int err;
|
|
|
|
|
|
|
|
|
|
list_for_each_entry_safe(trans, tmp, bulk_list, bulk_list) {
|
|
|
|
|
err = mlxsw_reg_trans_wait(trans);
|
|
|
|
|
if (err && sum_err == 0)
|
|
|
|
|
sum_err = err; /* first error to be returned */
|
|
|
|
|
}
|
|
|
|
|
return sum_err;
|
|
|
|
|
}
|
|
|
|
|
EXPORT_SYMBOL(mlxsw_reg_trans_bulk_wait);
|
|
|
|
|
|
2015-07-29 23:33:48 +02:00
|
|
|
static int mlxsw_core_reg_access_cmd(struct mlxsw_core *mlxsw_core,
|
|
|
|
|
const struct mlxsw_reg_info *reg,
|
|
|
|
|
char *payload,
|
|
|
|
|
enum mlxsw_core_reg_access_type type)
|
|
|
|
|
{
|
2016-04-14 18:19:29 +02:00
|
|
|
enum mlxsw_emad_op_tlv_status status;
|
2015-07-29 23:33:48 +02:00
|
|
|
int err, n_retry;
|
2018-05-27 09:56:14 +03:00
|
|
|
bool reset_ok;
|
2015-07-29 23:33:48 +02:00
|
|
|
char *in_mbox, *out_mbox, *tmp;
|
|
|
|
|
|
2016-04-14 18:19:29 +02:00
|
|
|
dev_dbg(mlxsw_core->bus_info->dev, "Reg cmd access (reg_id=%x(%s),type=%s)\n",
|
|
|
|
|
reg->id, mlxsw_reg_id_str(reg->id),
|
|
|
|
|
mlxsw_core_reg_access_type_str(type));
|
|
|
|
|
|
2015-07-29 23:33:48 +02:00
|
|
|
in_mbox = mlxsw_cmd_mbox_alloc();
|
|
|
|
|
if (!in_mbox)
|
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
|
|
out_mbox = mlxsw_cmd_mbox_alloc();
|
|
|
|
|
if (!out_mbox) {
|
|
|
|
|
err = -ENOMEM;
|
|
|
|
|
goto free_in_mbox;
|
|
|
|
|
}
|
|
|
|
|
|
2016-04-14 18:19:29 +02:00
|
|
|
mlxsw_emad_pack_op_tlv(in_mbox, reg, type,
|
|
|
|
|
mlxsw_core_tid_get(mlxsw_core));
|
2015-07-29 23:33:48 +02:00
|
|
|
tmp = in_mbox + MLXSW_EMAD_OP_TLV_LEN * sizeof(u32);
|
|
|
|
|
mlxsw_emad_pack_reg_tlv(tmp, reg, payload);
|
|
|
|
|
|
2018-05-27 09:56:14 +03:00
|
|
|
/* There is a special treatment needed for MRSR (reset) register.
|
|
|
|
|
* The command interface will return error after the command
|
|
|
|
|
* is executed, so tell the lower layer to expect it
|
|
|
|
|
* and cope accordingly.
|
|
|
|
|
*/
|
|
|
|
|
reset_ok = reg->id == MLXSW_REG_MRSR_ID;
|
|
|
|
|
|
2015-07-29 23:33:48 +02:00
|
|
|
n_retry = 0;
|
|
|
|
|
retry:
|
2018-05-27 09:56:14 +03:00
|
|
|
err = mlxsw_cmd_access_reg(mlxsw_core, reset_ok, in_mbox, out_mbox);
|
2015-07-29 23:33:48 +02:00
|
|
|
if (!err) {
|
2016-04-14 18:19:29 +02:00
|
|
|
err = mlxsw_emad_process_status(out_mbox, &status);
|
|
|
|
|
if (err) {
|
|
|
|
|
if (err == -EAGAIN && n_retry++ < MLXSW_EMAD_MAX_RETRY)
|
|
|
|
|
goto retry;
|
|
|
|
|
dev_err(mlxsw_core->bus_info->dev, "Reg cmd access status failed (status=%x(%s))\n",
|
|
|
|
|
status, mlxsw_emad_op_tlv_status_str(status));
|
|
|
|
|
}
|
2015-07-29 23:33:48 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (!err)
|
2019-11-12 08:48:24 +02:00
|
|
|
memcpy(payload, mlxsw_emad_reg_payload_cmd(out_mbox),
|
2015-07-29 23:33:48 +02:00
|
|
|
reg->len);
|
|
|
|
|
|
|
|
|
|
mlxsw_cmd_mbox_free(out_mbox);
|
|
|
|
|
free_in_mbox:
|
|
|
|
|
mlxsw_cmd_mbox_free(in_mbox);
|
2016-04-14 18:19:29 +02:00
|
|
|
if (err)
|
|
|
|
|
dev_err(mlxsw_core->bus_info->dev, "Reg cmd access failed (reg_id=%x(%s),type=%s)\n",
|
|
|
|
|
reg->id, mlxsw_reg_id_str(reg->id),
|
|
|
|
|
mlxsw_core_reg_access_type_str(type));
|
2015-07-29 23:33:48 +02:00
|
|
|
return err;
|
|
|
|
|
}
|
|
|
|
|
|
2016-04-14 18:19:29 +02:00
|
|
|
static void mlxsw_core_reg_access_cb(struct mlxsw_core *mlxsw_core,
|
|
|
|
|
char *payload, size_t payload_len,
|
|
|
|
|
unsigned long cb_priv)
|
|
|
|
|
{
|
|
|
|
|
char *orig_payload = (char *) cb_priv;
|
|
|
|
|
|
|
|
|
|
memcpy(orig_payload, payload, payload_len);
|
|
|
|
|
}
|
|
|
|
|
|
2015-07-29 23:33:48 +02:00
|
|
|
static int mlxsw_core_reg_access(struct mlxsw_core *mlxsw_core,
|
|
|
|
|
const struct mlxsw_reg_info *reg,
|
|
|
|
|
char *payload,
|
|
|
|
|
enum mlxsw_core_reg_access_type type)
|
|
|
|
|
{
|
2016-04-14 18:19:29 +02:00
|
|
|
LIST_HEAD(bulk_list);
|
2015-07-29 23:33:48 +02:00
|
|
|
int err;
|
|
|
|
|
|
|
|
|
|
/* During initialization EMAD interface is not available to us,
|
|
|
|
|
* so we default to command interface. We switch to EMAD interface
|
|
|
|
|
* after setting the appropriate traps.
|
|
|
|
|
*/
|
|
|
|
|
if (!mlxsw_core->emad.use_emad)
|
2016-04-14 18:19:29 +02:00
|
|
|
return mlxsw_core_reg_access_cmd(mlxsw_core, reg,
|
2015-07-29 23:33:48 +02:00
|
|
|
payload, type);
|
|
|
|
|
|
2016-04-14 18:19:29 +02:00
|
|
|
err = mlxsw_core_reg_access_emad(mlxsw_core, reg,
|
|
|
|
|
payload, type, &bulk_list,
|
|
|
|
|
mlxsw_core_reg_access_cb,
|
|
|
|
|
(unsigned long) payload);
|
2015-07-29 23:33:48 +02:00
|
|
|
if (err)
|
2016-04-14 18:19:29 +02:00
|
|
|
return err;
|
|
|
|
|
return mlxsw_reg_trans_bulk_wait(&bulk_list);
|
2015-07-29 23:33:48 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
int mlxsw_reg_query(struct mlxsw_core *mlxsw_core,
|
|
|
|
|
const struct mlxsw_reg_info *reg, char *payload)
|
|
|
|
|
{
|
|
|
|
|
return mlxsw_core_reg_access(mlxsw_core, reg, payload,
|
|
|
|
|
MLXSW_CORE_REG_ACCESS_TYPE_QUERY);
|
|
|
|
|
}
|
|
|
|
|
EXPORT_SYMBOL(mlxsw_reg_query);
|
|
|
|
|
|
|
|
|
|
int mlxsw_reg_write(struct mlxsw_core *mlxsw_core,
|
|
|
|
|
const struct mlxsw_reg_info *reg, char *payload)
|
|
|
|
|
{
|
|
|
|
|
return mlxsw_core_reg_access(mlxsw_core, reg, payload,
|
|
|
|
|
MLXSW_CORE_REG_ACCESS_TYPE_WRITE);
|
|
|
|
|
}
|
|
|
|
|
EXPORT_SYMBOL(mlxsw_reg_write);
|
|
|
|
|
|
2015-07-29 23:33:46 +02:00
|
|
|
void mlxsw_core_skb_receive(struct mlxsw_core *mlxsw_core, struct sk_buff *skb,
|
|
|
|
|
struct mlxsw_rx_info *rx_info)
|
|
|
|
|
{
|
|
|
|
|
struct mlxsw_rx_listener_item *rxl_item;
|
|
|
|
|
const struct mlxsw_rx_listener *rxl;
|
2015-12-03 12:12:23 +01:00
|
|
|
u8 local_port;
|
2015-07-29 23:33:46 +02:00
|
|
|
bool found = false;
|
|
|
|
|
|
2015-12-03 12:12:23 +01:00
|
|
|
if (rx_info->is_lag) {
|
|
|
|
|
dev_dbg_ratelimited(mlxsw_core->bus_info->dev, "%s: lag_id = %d, lag_port_index = 0x%x\n",
|
|
|
|
|
__func__, rx_info->u.lag_id,
|
|
|
|
|
rx_info->trap_id);
|
|
|
|
|
/* Upper layer does not care if the skb came from LAG or not,
|
|
|
|
|
* so just get the local_port for the lag port and push it up.
|
|
|
|
|
*/
|
|
|
|
|
local_port = mlxsw_core_lag_mapping_get(mlxsw_core,
|
|
|
|
|
rx_info->u.lag_id,
|
|
|
|
|
rx_info->lag_port_index);
|
|
|
|
|
} else {
|
|
|
|
|
local_port = rx_info->u.sys_port;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
dev_dbg_ratelimited(mlxsw_core->bus_info->dev, "%s: local_port = %d, trap_id = 0x%x\n",
|
|
|
|
|
__func__, local_port, rx_info->trap_id);
|
2015-07-29 23:33:46 +02:00
|
|
|
|
|
|
|
|
if ((rx_info->trap_id >= MLXSW_TRAP_ID_MAX) ||
|
2017-03-24 08:02:48 +01:00
|
|
|
(local_port >= mlxsw_core->max_ports))
|
2015-07-29 23:33:46 +02:00
|
|
|
goto drop;
|
|
|
|
|
|
|
|
|
|
rcu_read_lock();
|
|
|
|
|
list_for_each_entry_rcu(rxl_item, &mlxsw_core->rx_listener_list, list) {
|
|
|
|
|
rxl = &rxl_item->rxl;
|
|
|
|
|
if ((rxl->local_port == MLXSW_PORT_DONT_CARE ||
|
|
|
|
|
rxl->local_port == local_port) &&
|
2020-07-14 17:21:06 +03:00
|
|
|
rxl->trap_id == rx_info->trap_id &&
|
|
|
|
|
rxl->mirror_reason == rx_info->mirror_reason) {
|
2020-02-24 08:35:52 +01:00
|
|
|
if (rxl_item->enabled)
|
|
|
|
|
found = true;
|
2015-07-29 23:33:46 +02:00
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
}
|
2020-07-29 12:26:45 +03:00
|
|
|
if (!found) {
|
|
|
|
|
rcu_read_unlock();
|
2015-07-29 23:33:46 +02:00
|
|
|
goto drop;
|
2020-07-29 12:26:45 +03:00
|
|
|
}
|
2015-07-29 23:33:46 +02:00
|
|
|
|
|
|
|
|
rxl->func(skb, local_port, rxl_item->priv);
|
2020-07-29 12:26:45 +03:00
|
|
|
rcu_read_unlock();
|
2015-07-29 23:33:46 +02:00
|
|
|
return;
|
|
|
|
|
|
|
|
|
|
drop:
|
|
|
|
|
dev_kfree_skb(skb);
|
|
|
|
|
}
|
|
|
|
|
EXPORT_SYMBOL(mlxsw_core_skb_receive);
|
|
|
|
|
|
2015-12-03 12:12:23 +01:00
|
|
|
static int mlxsw_core_lag_mapping_index(struct mlxsw_core *mlxsw_core,
|
|
|
|
|
u16 lag_id, u8 port_index)
|
|
|
|
|
{
|
2016-10-21 16:07:23 +02:00
|
|
|
return MLXSW_CORE_RES_GET(mlxsw_core, MAX_LAG_MEMBERS) * lag_id +
|
2015-12-03 12:12:23 +01:00
|
|
|
port_index;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void mlxsw_core_lag_mapping_set(struct mlxsw_core *mlxsw_core,
|
|
|
|
|
u16 lag_id, u8 port_index, u8 local_port)
|
|
|
|
|
{
|
|
|
|
|
int index = mlxsw_core_lag_mapping_index(mlxsw_core,
|
|
|
|
|
lag_id, port_index);
|
|
|
|
|
|
|
|
|
|
mlxsw_core->lag.mapping[index] = local_port;
|
|
|
|
|
}
|
|
|
|
|
EXPORT_SYMBOL(mlxsw_core_lag_mapping_set);
|
|
|
|
|
|
|
|
|
|
u8 mlxsw_core_lag_mapping_get(struct mlxsw_core *mlxsw_core,
|
|
|
|
|
u16 lag_id, u8 port_index)
|
|
|
|
|
{
|
|
|
|
|
int index = mlxsw_core_lag_mapping_index(mlxsw_core,
|
|
|
|
|
lag_id, port_index);
|
|
|
|
|
|
|
|
|
|
return mlxsw_core->lag.mapping[index];
|
|
|
|
|
}
|
|
|
|
|
EXPORT_SYMBOL(mlxsw_core_lag_mapping_get);
|
|
|
|
|
|
|
|
|
|
void mlxsw_core_lag_mapping_clear(struct mlxsw_core *mlxsw_core,
|
|
|
|
|
u16 lag_id, u8 local_port)
|
|
|
|
|
{
|
|
|
|
|
int i;
|
|
|
|
|
|
2016-10-21 16:07:23 +02:00
|
|
|
for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_core, MAX_LAG_MEMBERS); i++) {
|
2015-12-03 12:12:23 +01:00
|
|
|
int index = mlxsw_core_lag_mapping_index(mlxsw_core,
|
|
|
|
|
lag_id, i);
|
|
|
|
|
|
|
|
|
|
if (mlxsw_core->lag.mapping[index] == local_port)
|
|
|
|
|
mlxsw_core->lag.mapping[index] = 0;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
EXPORT_SYMBOL(mlxsw_core_lag_mapping_clear);
|
|
|
|
|
|
2016-10-21 16:07:23 +02:00
|
|
|
bool mlxsw_core_res_valid(struct mlxsw_core *mlxsw_core,
|
|
|
|
|
enum mlxsw_res_id res_id)
|
2016-07-21 12:03:09 +02:00
|
|
|
{
|
2016-10-21 16:07:23 +02:00
|
|
|
return mlxsw_res_valid(&mlxsw_core->res, res_id);
|
2016-07-21 12:03:09 +02:00
|
|
|
}
|
2016-10-21 16:07:23 +02:00
|
|
|
EXPORT_SYMBOL(mlxsw_core_res_valid);
|
|
|
|
|
|
|
|
|
|
u64 mlxsw_core_res_get(struct mlxsw_core *mlxsw_core,
|
|
|
|
|
enum mlxsw_res_id res_id)
|
|
|
|
|
{
|
|
|
|
|
return mlxsw_res_get(&mlxsw_core->res, res_id);
|
|
|
|
|
}
|
|
|
|
|
EXPORT_SYMBOL(mlxsw_core_res_get);
|
2016-07-21 12:03:09 +02:00
|
|
|
|
2019-09-16 18:04:21 +03:00
|
|
|
static int __mlxsw_core_port_init(struct mlxsw_core *mlxsw_core, u8 local_port,
|
|
|
|
|
enum devlink_port_flavour flavour,
|
|
|
|
|
u32 port_number, bool split,
|
|
|
|
|
u32 split_port_subnumber,
|
2020-07-09 16:18:19 +03:00
|
|
|
bool splittable, u32 lanes,
|
2019-09-16 18:04:21 +03:00
|
|
|
const unsigned char *switch_id,
|
|
|
|
|
unsigned char switch_id_len)
|
2016-04-08 19:11:21 +02:00
|
|
|
{
|
|
|
|
|
struct devlink *devlink = priv_to_devlink(mlxsw_core);
|
2016-10-28 21:35:55 +02:00
|
|
|
struct mlxsw_core_port *mlxsw_core_port =
|
|
|
|
|
&mlxsw_core->ports[local_port];
|
2016-04-08 19:11:21 +02:00
|
|
|
struct devlink_port *devlink_port = &mlxsw_core_port->devlink_port;
|
2020-07-09 16:18:16 +03:00
|
|
|
struct devlink_port_attrs attrs = {};
|
2016-10-28 21:35:55 +02:00
|
|
|
int err;
|
2016-04-08 19:11:21 +02:00
|
|
|
|
2020-07-09 16:18:16 +03:00
|
|
|
attrs.split = split;
|
2020-07-09 16:18:18 +03:00
|
|
|
attrs.lanes = lanes;
|
2020-07-09 16:18:20 +03:00
|
|
|
attrs.splittable = splittable;
|
2020-07-09 16:18:16 +03:00
|
|
|
attrs.flavour = flavour;
|
|
|
|
|
attrs.phys.port_number = port_number;
|
|
|
|
|
attrs.phys.split_subport_number = split_port_subnumber;
|
|
|
|
|
memcpy(attrs.switch_id.id, switch_id, switch_id_len);
|
|
|
|
|
attrs.switch_id.id_len = switch_id_len;
|
2016-10-28 21:35:58 +02:00
|
|
|
mlxsw_core_port->local_port = local_port;
|
2020-07-09 16:18:16 +03:00
|
|
|
devlink_port_attrs_set(devlink_port, &attrs);
|
2016-10-28 21:35:55 +02:00
|
|
|
err = devlink_port_register(devlink, devlink_port, local_port);
|
|
|
|
|
if (err)
|
|
|
|
|
memset(mlxsw_core_port, 0, sizeof(*mlxsw_core_port));
|
|
|
|
|
return err;
|
2016-04-08 19:11:21 +02:00
|
|
|
}
|
|
|
|
|
|
2019-09-16 18:04:21 +03:00
|
|
|
static void __mlxsw_core_port_fini(struct mlxsw_core *mlxsw_core, u8 local_port)
|
2016-04-08 19:11:21 +02:00
|
|
|
{
|
2016-10-28 21:35:55 +02:00
|
|
|
struct mlxsw_core_port *mlxsw_core_port =
|
|
|
|
|
&mlxsw_core->ports[local_port];
|
2016-04-08 19:11:21 +02:00
|
|
|
struct devlink_port *devlink_port = &mlxsw_core_port->devlink_port;
|
|
|
|
|
|
|
|
|
|
devlink_port_unregister(devlink_port);
|
2016-10-28 21:35:55 +02:00
|
|
|
memset(mlxsw_core_port, 0, sizeof(*mlxsw_core_port));
|
2016-04-08 19:11:21 +02:00
|
|
|
}
|
2019-09-16 18:04:21 +03:00
|
|
|
|
|
|
|
|
int mlxsw_core_port_init(struct mlxsw_core *mlxsw_core, u8 local_port,
|
|
|
|
|
u32 port_number, bool split,
|
|
|
|
|
u32 split_port_subnumber,
|
2020-07-09 16:18:19 +03:00
|
|
|
bool splittable, u32 lanes,
|
2019-09-16 18:04:21 +03:00
|
|
|
const unsigned char *switch_id,
|
|
|
|
|
unsigned char switch_id_len)
|
|
|
|
|
{
|
2021-01-21 15:10:23 +02:00
|
|
|
int err;
|
|
|
|
|
|
|
|
|
|
err = __mlxsw_core_port_init(mlxsw_core, local_port,
|
|
|
|
|
DEVLINK_PORT_FLAVOUR_PHYSICAL,
|
|
|
|
|
port_number, split, split_port_subnumber,
|
|
|
|
|
splittable, lanes,
|
|
|
|
|
switch_id, switch_id_len);
|
|
|
|
|
if (err)
|
|
|
|
|
return err;
|
|
|
|
|
|
|
|
|
|
atomic_inc(&mlxsw_core->active_ports_count);
|
|
|
|
|
return 0;
|
2019-09-16 18:04:21 +03:00
|
|
|
}
|
|
|
|
|
EXPORT_SYMBOL(mlxsw_core_port_init);
|
|
|
|
|
|
|
|
|
|
void mlxsw_core_port_fini(struct mlxsw_core *mlxsw_core, u8 local_port)
|
|
|
|
|
{
|
2021-01-21 15:10:23 +02:00
|
|
|
atomic_dec(&mlxsw_core->active_ports_count);
|
|
|
|
|
|
2019-09-16 18:04:21 +03:00
|
|
|
__mlxsw_core_port_fini(mlxsw_core, local_port);
|
|
|
|
|
}
|
2016-04-08 19:11:21 +02:00
|
|
|
EXPORT_SYMBOL(mlxsw_core_port_fini);
|
|
|
|
|
|
2019-09-16 18:04:21 +03:00
|
|
|
int mlxsw_core_cpu_port_init(struct mlxsw_core *mlxsw_core,
|
|
|
|
|
void *port_driver_priv,
|
|
|
|
|
const unsigned char *switch_id,
|
|
|
|
|
unsigned char switch_id_len)
|
|
|
|
|
{
|
|
|
|
|
struct mlxsw_core_port *mlxsw_core_port =
|
|
|
|
|
&mlxsw_core->ports[MLXSW_PORT_CPU_PORT];
|
|
|
|
|
int err;
|
|
|
|
|
|
|
|
|
|
err = __mlxsw_core_port_init(mlxsw_core, MLXSW_PORT_CPU_PORT,
|
|
|
|
|
DEVLINK_PORT_FLAVOUR_CPU,
|
2020-07-09 16:18:19 +03:00
|
|
|
0, false, 0, false, 0,
|
2019-09-16 18:04:21 +03:00
|
|
|
switch_id, switch_id_len);
|
|
|
|
|
if (err)
|
|
|
|
|
return err;
|
|
|
|
|
|
|
|
|
|
mlxsw_core_port->port_driver_priv = port_driver_priv;
|
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
EXPORT_SYMBOL(mlxsw_core_cpu_port_init);
|
|
|
|
|
|
|
|
|
|
void mlxsw_core_cpu_port_fini(struct mlxsw_core *mlxsw_core)
|
|
|
|
|
{
|
|
|
|
|
__mlxsw_core_port_fini(mlxsw_core, MLXSW_PORT_CPU_PORT);
|
|
|
|
|
}
|
|
|
|
|
EXPORT_SYMBOL(mlxsw_core_cpu_port_fini);
|
|
|
|
|
|
2016-10-28 21:35:57 +02:00
|
|
|
void mlxsw_core_port_eth_set(struct mlxsw_core *mlxsw_core, u8 local_port,
|
2019-03-24 11:14:31 +01:00
|
|
|
void *port_driver_priv, struct net_device *dev)
|
2016-10-28 21:35:55 +02:00
|
|
|
{
|
|
|
|
|
struct mlxsw_core_port *mlxsw_core_port =
|
|
|
|
|
&mlxsw_core->ports[local_port];
|
|
|
|
|
struct devlink_port *devlink_port = &mlxsw_core_port->devlink_port;
|
|
|
|
|
|
|
|
|
|
mlxsw_core_port->port_driver_priv = port_driver_priv;
|
|
|
|
|
devlink_port_type_eth_set(devlink_port, dev);
|
|
|
|
|
}
|
2016-10-28 21:35:57 +02:00
|
|
|
EXPORT_SYMBOL(mlxsw_core_port_eth_set);
|
2016-10-28 21:35:55 +02:00
|
|
|
|
2016-10-28 21:35:58 +02:00
|
|
|
void mlxsw_core_port_ib_set(struct mlxsw_core *mlxsw_core, u8 local_port,
|
|
|
|
|
void *port_driver_priv)
|
|
|
|
|
{
|
|
|
|
|
struct mlxsw_core_port *mlxsw_core_port =
|
|
|
|
|
&mlxsw_core->ports[local_port];
|
|
|
|
|
struct devlink_port *devlink_port = &mlxsw_core_port->devlink_port;
|
|
|
|
|
|
|
|
|
|
mlxsw_core_port->port_driver_priv = port_driver_priv;
|
|
|
|
|
devlink_port_type_ib_set(devlink_port, NULL);
|
|
|
|
|
}
|
|
|
|
|
EXPORT_SYMBOL(mlxsw_core_port_ib_set);
|
|
|
|
|
|
2016-10-28 21:35:55 +02:00
|
|
|
void mlxsw_core_port_clear(struct mlxsw_core *mlxsw_core, u8 local_port,
|
|
|
|
|
void *port_driver_priv)
|
|
|
|
|
{
|
|
|
|
|
struct mlxsw_core_port *mlxsw_core_port =
|
|
|
|
|
&mlxsw_core->ports[local_port];
|
|
|
|
|
struct devlink_port *devlink_port = &mlxsw_core_port->devlink_port;
|
|
|
|
|
|
|
|
|
|
mlxsw_core_port->port_driver_priv = port_driver_priv;
|
|
|
|
|
devlink_port_type_clear(devlink_port);
|
|
|
|
|
}
|
|
|
|
|
EXPORT_SYMBOL(mlxsw_core_port_clear);
|
|
|
|
|
|
2016-10-28 21:35:58 +02:00
|
|
|
enum devlink_port_type mlxsw_core_port_type_get(struct mlxsw_core *mlxsw_core,
|
|
|
|
|
u8 local_port)
|
|
|
|
|
{
|
|
|
|
|
struct mlxsw_core_port *mlxsw_core_port =
|
|
|
|
|
&mlxsw_core->ports[local_port];
|
|
|
|
|
struct devlink_port *devlink_port = &mlxsw_core_port->devlink_port;
|
|
|
|
|
|
|
|
|
|
return devlink_port->type;
|
|
|
|
|
}
|
|
|
|
|
EXPORT_SYMBOL(mlxsw_core_port_type_get);
|
|
|
|
|
|
2018-05-18 09:29:04 +02:00
|
|
|
|
2019-03-28 13:56:38 +01:00
|
|
|
struct devlink_port *
|
|
|
|
|
mlxsw_core_port_devlink_port_get(struct mlxsw_core *mlxsw_core,
|
|
|
|
|
u8 local_port)
|
|
|
|
|
{
|
|
|
|
|
struct mlxsw_core_port *mlxsw_core_port =
|
|
|
|
|
&mlxsw_core->ports[local_port];
|
|
|
|
|
struct devlink_port *devlink_port = &mlxsw_core_port->devlink_port;
|
|
|
|
|
|
|
|
|
|
return devlink_port;
|
|
|
|
|
}
|
|
|
|
|
EXPORT_SYMBOL(mlxsw_core_port_devlink_port_get);
|
|
|
|
|
|
2020-12-14 13:30:31 +02:00
|
|
|
bool mlxsw_core_port_is_xm(const struct mlxsw_core *mlxsw_core, u8 local_port)
|
|
|
|
|
{
|
|
|
|
|
const struct mlxsw_bus_info *bus_info = mlxsw_core->bus_info;
|
|
|
|
|
int i;
|
|
|
|
|
|
|
|
|
|
for (i = 0; i < bus_info->xm_local_ports_count; i++)
|
|
|
|
|
if (bus_info->xm_local_ports[i] == local_port)
|
|
|
|
|
return true;
|
|
|
|
|
return false;
|
|
|
|
|
}
|
|
|
|
|
EXPORT_SYMBOL(mlxsw_core_port_is_xm);
|
|
|
|
|
|
2020-09-27 10:50:10 +03:00
|
|
|
struct mlxsw_env *mlxsw_core_env(const struct mlxsw_core *mlxsw_core)
|
|
|
|
|
{
|
|
|
|
|
return mlxsw_core->env;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
bool mlxsw_core_is_initialized(const struct mlxsw_core *mlxsw_core)
|
|
|
|
|
{
|
|
|
|
|
return mlxsw_core->is_initialized;
|
|
|
|
|
}
|
|
|
|
|
|
2019-10-31 11:42:08 +02:00
|
|
|
int mlxsw_core_module_max_width(struct mlxsw_core *mlxsw_core, u8 module)
|
|
|
|
|
{
|
|
|
|
|
enum mlxsw_reg_pmtm_module_type module_type;
|
|
|
|
|
char pmtm_pl[MLXSW_REG_PMTM_LEN];
|
|
|
|
|
int err;
|
|
|
|
|
|
|
|
|
|
mlxsw_reg_pmtm_pack(pmtm_pl, module);
|
|
|
|
|
err = mlxsw_reg_query(mlxsw_core, MLXSW_REG(pmtm), pmtm_pl);
|
|
|
|
|
if (err)
|
|
|
|
|
return err;
|
|
|
|
|
mlxsw_reg_pmtm_unpack(pmtm_pl, &module_type);
|
|
|
|
|
|
|
|
|
|
/* Here we need to get the module width according to the module type. */
|
|
|
|
|
|
|
|
|
|
switch (module_type) {
|
2020-08-23 17:36:59 -05:00
|
|
|
case MLXSW_REG_PMTM_MODULE_TYPE_C2C8X:
|
|
|
|
|
case MLXSW_REG_PMTM_MODULE_TYPE_QSFP_DD:
|
2020-02-27 20:59:26 +01:00
|
|
|
case MLXSW_REG_PMTM_MODULE_TYPE_OSFP:
|
|
|
|
|
return 8;
|
2020-08-23 17:36:59 -05:00
|
|
|
case MLXSW_REG_PMTM_MODULE_TYPE_C2C4X:
|
|
|
|
|
case MLXSW_REG_PMTM_MODULE_TYPE_BP_4X:
|
2020-02-27 20:59:26 +01:00
|
|
|
case MLXSW_REG_PMTM_MODULE_TYPE_QSFP:
|
2019-10-31 11:42:08 +02:00
|
|
|
return 4;
|
2020-08-23 17:36:59 -05:00
|
|
|
case MLXSW_REG_PMTM_MODULE_TYPE_C2C2X:
|
|
|
|
|
case MLXSW_REG_PMTM_MODULE_TYPE_BP_2X:
|
|
|
|
|
case MLXSW_REG_PMTM_MODULE_TYPE_SFP_DD:
|
2020-02-27 20:59:26 +01:00
|
|
|
case MLXSW_REG_PMTM_MODULE_TYPE_DSFP:
|
2019-10-31 11:42:08 +02:00
|
|
|
return 2;
|
2020-08-23 17:36:59 -05:00
|
|
|
case MLXSW_REG_PMTM_MODULE_TYPE_C2C1X:
|
|
|
|
|
case MLXSW_REG_PMTM_MODULE_TYPE_BP_1X:
|
2020-02-27 20:59:26 +01:00
|
|
|
case MLXSW_REG_PMTM_MODULE_TYPE_SFP:
|
2019-10-31 11:42:08 +02:00
|
|
|
return 1;
|
|
|
|
|
default:
|
|
|
|
|
return -EINVAL;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
EXPORT_SYMBOL(mlxsw_core_module_max_width);
|
|
|
|
|
|
2016-04-14 18:19:29 +02:00
|
|
|
static void mlxsw_core_buf_dump_dbg(struct mlxsw_core *mlxsw_core,
|
|
|
|
|
const char *buf, size_t size)
|
|
|
|
|
{
|
|
|
|
|
__be32 *m = (__be32 *) buf;
|
|
|
|
|
int i;
|
|
|
|
|
int count = size / sizeof(__be32);
|
|
|
|
|
|
|
|
|
|
for (i = count - 1; i >= 0; i--)
|
|
|
|
|
if (m[i])
|
|
|
|
|
break;
|
|
|
|
|
i++;
|
|
|
|
|
count = i ? i : 1;
|
|
|
|
|
for (i = 0; i < count; i += 4)
|
|
|
|
|
dev_dbg(mlxsw_core->bus_info->dev, "%04x - %08x %08x %08x %08x\n",
|
|
|
|
|
i * 4, be32_to_cpu(m[i]), be32_to_cpu(m[i + 1]),
|
|
|
|
|
be32_to_cpu(m[i + 2]), be32_to_cpu(m[i + 3]));
|
|
|
|
|
}
|
|
|
|
|
|
2015-07-29 23:33:46 +02:00
|
|
|
int mlxsw_cmd_exec(struct mlxsw_core *mlxsw_core, u16 opcode, u8 opcode_mod,
|
2018-05-27 09:56:14 +03:00
|
|
|
u32 in_mod, bool out_mbox_direct, bool reset_ok,
|
2015-07-29 23:33:46 +02:00
|
|
|
char *in_mbox, size_t in_mbox_size,
|
|
|
|
|
char *out_mbox, size_t out_mbox_size)
|
|
|
|
|
{
|
|
|
|
|
u8 status;
|
|
|
|
|
int err;
|
|
|
|
|
|
|
|
|
|
BUG_ON(in_mbox_size % sizeof(u32) || out_mbox_size % sizeof(u32));
|
|
|
|
|
if (!mlxsw_core->bus->cmd_exec)
|
|
|
|
|
return -EOPNOTSUPP;
|
|
|
|
|
|
|
|
|
|
dev_dbg(mlxsw_core->bus_info->dev, "Cmd exec (opcode=%x(%s),opcode_mod=%x,in_mod=%x)\n",
|
|
|
|
|
opcode, mlxsw_cmd_opcode_str(opcode), opcode_mod, in_mod);
|
|
|
|
|
if (in_mbox) {
|
|
|
|
|
dev_dbg(mlxsw_core->bus_info->dev, "Input mailbox:\n");
|
|
|
|
|
mlxsw_core_buf_dump_dbg(mlxsw_core, in_mbox, in_mbox_size);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
err = mlxsw_core->bus->cmd_exec(mlxsw_core->bus_priv, opcode,
|
|
|
|
|
opcode_mod, in_mod, out_mbox_direct,
|
|
|
|
|
in_mbox, in_mbox_size,
|
|
|
|
|
out_mbox, out_mbox_size, &status);
|
|
|
|
|
|
2018-05-27 09:56:14 +03:00
|
|
|
if (!err && out_mbox) {
|
|
|
|
|
dev_dbg(mlxsw_core->bus_info->dev, "Output mailbox:\n");
|
|
|
|
|
mlxsw_core_buf_dump_dbg(mlxsw_core, out_mbox, out_mbox_size);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (reset_ok && err == -EIO &&
|
|
|
|
|
status == MLXSW_CMD_STATUS_RUNNING_RESET) {
|
|
|
|
|
err = 0;
|
|
|
|
|
} else if (err == -EIO && status != MLXSW_CMD_STATUS_OK) {
|
2015-07-29 23:33:46 +02:00
|
|
|
dev_err(mlxsw_core->bus_info->dev, "Cmd exec failed (opcode=%x(%s),opcode_mod=%x,in_mod=%x,status=%x(%s))\n",
|
|
|
|
|
opcode, mlxsw_cmd_opcode_str(opcode), opcode_mod,
|
|
|
|
|
in_mod, status, mlxsw_cmd_status_str(status));
|
|
|
|
|
} else if (err == -ETIMEDOUT) {
|
|
|
|
|
dev_err(mlxsw_core->bus_info->dev, "Cmd exec timed-out (opcode=%x(%s),opcode_mod=%x,in_mod=%x)\n",
|
|
|
|
|
opcode, mlxsw_cmd_opcode_str(opcode), opcode_mod,
|
|
|
|
|
in_mod);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return err;
|
|
|
|
|
}
|
|
|
|
|
EXPORT_SYMBOL(mlxsw_cmd_exec);
|
|
|
|
|
|
2016-04-14 18:19:28 +02:00
|
|
|
int mlxsw_core_schedule_dw(struct delayed_work *dwork, unsigned long delay)
|
|
|
|
|
{
|
|
|
|
|
return queue_delayed_work(mlxsw_wq, dwork, delay);
|
|
|
|
|
}
|
|
|
|
|
EXPORT_SYMBOL(mlxsw_core_schedule_dw);
|
|
|
|
|
|
2017-02-06 16:20:10 +01:00
|
|
|
bool mlxsw_core_schedule_work(struct work_struct *work)
|
2016-12-03 16:45:00 +01:00
|
|
|
{
|
2017-02-06 16:20:10 +01:00
|
|
|
return queue_work(mlxsw_owq, work);
|
2016-12-03 16:45:00 +01:00
|
|
|
}
|
2017-02-06 16:20:10 +01:00
|
|
|
EXPORT_SYMBOL(mlxsw_core_schedule_work);
|
2016-12-03 16:45:00 +01:00
|
|
|
|
|
|
|
|
void mlxsw_core_flush_owq(void)
|
|
|
|
|
{
|
|
|
|
|
flush_workqueue(mlxsw_owq);
|
|
|
|
|
}
|
|
|
|
|
EXPORT_SYMBOL(mlxsw_core_flush_owq);
|
|
|
|
|
|
2018-01-15 08:59:10 +01:00
|
|
|
int mlxsw_core_kvd_sizes_get(struct mlxsw_core *mlxsw_core,
|
|
|
|
|
const struct mlxsw_config_profile *profile,
|
|
|
|
|
u64 *p_single_size, u64 *p_double_size,
|
|
|
|
|
u64 *p_linear_size)
|
|
|
|
|
{
|
|
|
|
|
struct mlxsw_driver *driver = mlxsw_core->driver;
|
|
|
|
|
|
|
|
|
|
if (!driver->kvd_sizes_get)
|
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
|
|
return driver->kvd_sizes_get(mlxsw_core, profile,
|
|
|
|
|
p_single_size, p_double_size,
|
|
|
|
|
p_linear_size);
|
|
|
|
|
}
|
|
|
|
|
EXPORT_SYMBOL(mlxsw_core_kvd_sizes_get);
|
|
|
|
|
|
2019-03-03 09:12:11 +00:00
|
|
|
int mlxsw_core_resources_query(struct mlxsw_core *mlxsw_core, char *mbox,
|
|
|
|
|
struct mlxsw_res *res)
|
|
|
|
|
{
|
|
|
|
|
int index, i;
|
|
|
|
|
u64 data;
|
|
|
|
|
u16 id;
|
|
|
|
|
int err;
|
|
|
|
|
|
|
|
|
|
if (!res)
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
|
|
mlxsw_cmd_mbox_zero(mbox);
|
|
|
|
|
|
|
|
|
|
for (index = 0; index < MLXSW_CMD_QUERY_RESOURCES_MAX_QUERIES;
|
|
|
|
|
index++) {
|
|
|
|
|
err = mlxsw_cmd_query_resources(mlxsw_core, mbox, index);
|
|
|
|
|
if (err)
|
|
|
|
|
return err;
|
|
|
|
|
|
|
|
|
|
for (i = 0; i < MLXSW_CMD_QUERY_RESOURCES_PER_QUERY; i++) {
|
|
|
|
|
id = mlxsw_cmd_mbox_query_resource_id_get(mbox, i);
|
|
|
|
|
data = mlxsw_cmd_mbox_query_resource_data_get(mbox, i);
|
|
|
|
|
|
|
|
|
|
if (id == MLXSW_CMD_QUERY_RESOURCES_TABLE_END_ID)
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
|
|
mlxsw_res_parse(res, id, data);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* If after MLXSW_RESOURCES_QUERY_MAX_QUERIES we still didn't get
|
|
|
|
|
* MLXSW_RESOURCES_TABLE_END_ID, something went bad in the FW.
|
|
|
|
|
*/
|
|
|
|
|
return -EIO;
|
|
|
|
|
}
|
|
|
|
|
EXPORT_SYMBOL(mlxsw_core_resources_query);
|
|
|
|
|
|
2019-06-11 18:45:05 +03:00
|
|
|
u32 mlxsw_core_read_frc_h(struct mlxsw_core *mlxsw_core)
|
|
|
|
|
{
|
|
|
|
|
return mlxsw_core->bus->read_frc_h(mlxsw_core->bus_priv);
|
|
|
|
|
}
|
|
|
|
|
EXPORT_SYMBOL(mlxsw_core_read_frc_h);
|
|
|
|
|
|
|
|
|
|
u32 mlxsw_core_read_frc_l(struct mlxsw_core *mlxsw_core)
|
|
|
|
|
{
|
|
|
|
|
return mlxsw_core->bus->read_frc_l(mlxsw_core->bus_priv);
|
|
|
|
|
}
|
|
|
|
|
EXPORT_SYMBOL(mlxsw_core_read_frc_l);
|
|
|
|
|
|
2019-11-12 08:48:29 +02:00
|
|
|
void mlxsw_core_emad_string_tlv_enable(struct mlxsw_core *mlxsw_core)
|
|
|
|
|
{
|
|
|
|
|
mlxsw_core->emad.enable_string_tlv = true;
|
|
|
|
|
}
|
|
|
|
|
EXPORT_SYMBOL(mlxsw_core_emad_string_tlv_enable);
|
|
|
|
|
|
2015-07-29 23:33:46 +02:00
|
|
|
static int __init mlxsw_core_module_init(void)
|
|
|
|
|
{
|
2016-04-14 18:19:28 +02:00
|
|
|
int err;
|
|
|
|
|
|
2019-04-10 06:58:15 +00:00
|
|
|
mlxsw_wq = alloc_workqueue(mlxsw_core_driver_name, 0, 0);
|
2016-04-14 18:19:28 +02:00
|
|
|
if (!mlxsw_wq)
|
2015-07-29 23:33:46 +02:00
|
|
|
return -ENOMEM;
|
2019-04-10 06:58:14 +00:00
|
|
|
mlxsw_owq = alloc_ordered_workqueue("%s_ordered", 0,
|
2016-12-03 16:45:00 +01:00
|
|
|
mlxsw_core_driver_name);
|
|
|
|
|
if (!mlxsw_owq) {
|
|
|
|
|
err = -ENOMEM;
|
|
|
|
|
goto err_alloc_ordered_workqueue;
|
|
|
|
|
}
|
2015-07-29 23:33:46 +02:00
|
|
|
return 0;
|
2016-04-14 18:19:28 +02:00
|
|
|
|
2016-12-03 16:45:00 +01:00
|
|
|
err_alloc_ordered_workqueue:
|
2016-04-14 18:19:28 +02:00
|
|
|
destroy_workqueue(mlxsw_wq);
|
|
|
|
|
return err;
|
2015-07-29 23:33:46 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void __exit mlxsw_core_module_exit(void)
|
|
|
|
|
{
|
2016-12-03 16:45:00 +01:00
|
|
|
destroy_workqueue(mlxsw_owq);
|
2016-04-14 18:19:28 +02:00
|
|
|
destroy_workqueue(mlxsw_wq);
|
2015-07-29 23:33:46 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
module_init(mlxsw_core_module_init);
|
|
|
|
|
module_exit(mlxsw_core_module_exit);
|
|
|
|
|
|
|
|
|
|
MODULE_LICENSE("Dual BSD/GPL");
|
|
|
|
|
MODULE_AUTHOR("Jiri Pirko <jiri@mellanox.com>");
|
|
|
|
|
MODULE_DESCRIPTION("Mellanox switch device core driver");
|