2020-09-16 16:30:57 +00:00
|
|
|
// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
|
|
|
|
/* Copyright (c) 2019-2020 Marvell International Ltd. All rights reserved */
|
|
|
|
|
|
|
|
#include <linux/etherdevice.h>
|
|
|
|
#include <linux/jiffies.h>
|
|
|
|
#include <linux/list.h>
|
|
|
|
#include <linux/module.h>
|
|
|
|
#include <linux/netdev_features.h>
|
|
|
|
#include <linux/of.h>
|
|
|
|
#include <linux/of_net.h>
|
2021-06-10 15:43:11 +00:00
|
|
|
#include <linux/if_vlan.h>
|
2020-09-16 16:30:57 +00:00
|
|
|
|
|
|
|
#include "prestera.h"
|
|
|
|
#include "prestera_hw.h"
|
2021-06-16 16:01:44 +00:00
|
|
|
#include "prestera_acl.h"
|
|
|
|
#include "prestera_flow.h"
|
2021-06-16 16:01:45 +00:00
|
|
|
#include "prestera_span.h"
|
2020-09-16 16:30:57 +00:00
|
|
|
#include "prestera_rxtx.h"
|
2020-09-16 16:30:59 +00:00
|
|
|
#include "prestera_devlink.h"
|
2020-09-16 16:31:00 +00:00
|
|
|
#include "prestera_ethtool.h"
|
2021-11-30 10:32:59 +00:00
|
|
|
#include "prestera_counter.h"
|
2020-09-16 16:31:01 +00:00
|
|
|
#include "prestera_switchdev.h"
|
2020-09-16 16:30:57 +00:00
|
|
|
|
|
|
|
#define PRESTERA_MTU_DEFAULT 1536
|
|
|
|
|
|
|
|
#define PRESTERA_STATS_DELAY_MS 1000
|
|
|
|
|
|
|
|
#define PRESTERA_MAC_ADDR_NUM_MAX 255
|
|
|
|
|
|
|
|
static struct workqueue_struct *prestera_wq;
|
|
|
|
|
2020-09-16 16:31:01 +00:00
|
|
|
int prestera_port_pvid_set(struct prestera_port *port, u16 vid)
|
|
|
|
{
|
|
|
|
enum prestera_accept_frm_type frm_type;
|
|
|
|
int err;
|
|
|
|
|
|
|
|
frm_type = PRESTERA_ACCEPT_FRAME_TYPE_TAGGED;
|
|
|
|
|
|
|
|
if (vid) {
|
|
|
|
err = prestera_hw_vlan_port_vid_set(port, vid);
|
|
|
|
if (err)
|
|
|
|
return err;
|
|
|
|
|
|
|
|
frm_type = PRESTERA_ACCEPT_FRAME_TYPE_ALL;
|
|
|
|
}
|
|
|
|
|
|
|
|
err = prestera_hw_port_accept_frm_type(port, frm_type);
|
|
|
|
if (err && frm_type == PRESTERA_ACCEPT_FRAME_TYPE_ALL)
|
|
|
|
prestera_hw_vlan_port_vid_set(port, port->pvid);
|
|
|
|
|
|
|
|
port->pvid = vid;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2020-09-16 16:30:57 +00:00
|
|
|
struct prestera_port *prestera_port_find_by_hwid(struct prestera_switch *sw,
|
|
|
|
u32 dev_id, u32 hw_id)
|
|
|
|
{
|
2021-12-16 17:07:36 +00:00
|
|
|
struct prestera_port *port = NULL, *tmp;
|
2020-09-16 16:30:57 +00:00
|
|
|
|
|
|
|
read_lock(&sw->port_list_lock);
|
2021-12-16 17:07:36 +00:00
|
|
|
list_for_each_entry(tmp, &sw->port_list, list) {
|
|
|
|
if (tmp->dev_id == dev_id && tmp->hw_id == hw_id) {
|
|
|
|
port = tmp;
|
2020-09-16 16:30:57 +00:00
|
|
|
break;
|
2021-12-16 17:07:36 +00:00
|
|
|
}
|
2020-09-16 16:30:57 +00:00
|
|
|
}
|
|
|
|
read_unlock(&sw->port_list_lock);
|
|
|
|
|
|
|
|
return port;
|
|
|
|
}
|
|
|
|
|
2020-09-16 16:31:01 +00:00
|
|
|
struct prestera_port *prestera_find_port(struct prestera_switch *sw, u32 id)
|
2020-09-16 16:30:57 +00:00
|
|
|
{
|
2021-12-16 17:07:36 +00:00
|
|
|
struct prestera_port *port = NULL, *tmp;
|
2020-09-16 16:30:57 +00:00
|
|
|
|
|
|
|
read_lock(&sw->port_list_lock);
|
2021-12-16 17:07:36 +00:00
|
|
|
list_for_each_entry(tmp, &sw->port_list, list) {
|
|
|
|
if (tmp->id == id) {
|
|
|
|
port = tmp;
|
2020-09-16 16:30:57 +00:00
|
|
|
break;
|
2021-12-16 17:07:36 +00:00
|
|
|
}
|
2020-09-16 16:30:57 +00:00
|
|
|
}
|
|
|
|
read_unlock(&sw->port_list_lock);
|
|
|
|
|
|
|
|
return port;
|
|
|
|
}
|
|
|
|
|
2021-10-29 05:38:07 +00:00
|
|
|
int prestera_port_cfg_mac_read(struct prestera_port *port,
|
|
|
|
struct prestera_port_mac_config *cfg)
|
|
|
|
{
|
|
|
|
*cfg = port->cfg_mac;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
int prestera_port_cfg_mac_write(struct prestera_port *port,
|
|
|
|
struct prestera_port_mac_config *cfg)
|
2020-09-16 16:30:57 +00:00
|
|
|
{
|
|
|
|
int err;
|
|
|
|
|
2021-10-29 05:38:07 +00:00
|
|
|
err = prestera_hw_port_mac_mode_set(port, cfg->admin,
|
|
|
|
cfg->mode, cfg->inband, cfg->speed,
|
|
|
|
cfg->duplex, cfg->fec);
|
2020-09-16 16:30:57 +00:00
|
|
|
if (err)
|
|
|
|
return err;
|
|
|
|
|
2021-10-29 05:38:07 +00:00
|
|
|
port->cfg_mac = *cfg;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int prestera_port_open(struct net_device *dev)
|
|
|
|
{
|
|
|
|
struct prestera_port *port = netdev_priv(dev);
|
|
|
|
struct prestera_port_mac_config cfg_mac;
|
|
|
|
int err = 0;
|
|
|
|
|
|
|
|
if (port->caps.transceiver == PRESTERA_PORT_TCVR_SFP) {
|
|
|
|
err = prestera_port_cfg_mac_read(port, &cfg_mac);
|
|
|
|
if (!err) {
|
|
|
|
cfg_mac.admin = true;
|
|
|
|
err = prestera_port_cfg_mac_write(port, &cfg_mac);
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
port->cfg_phy.admin = true;
|
|
|
|
err = prestera_hw_port_phy_mode_set(port, true, port->autoneg,
|
|
|
|
port->cfg_phy.mode,
|
|
|
|
port->adver_link_modes,
|
|
|
|
port->cfg_phy.mdix);
|
|
|
|
}
|
|
|
|
|
2020-09-16 16:30:57 +00:00
|
|
|
netif_start_queue(dev);
|
|
|
|
|
2021-10-29 05:38:07 +00:00
|
|
|
return err;
|
2020-09-16 16:30:57 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static int prestera_port_close(struct net_device *dev)
|
|
|
|
{
|
|
|
|
struct prestera_port *port = netdev_priv(dev);
|
2021-10-29 05:38:07 +00:00
|
|
|
struct prestera_port_mac_config cfg_mac;
|
|
|
|
int err = 0;
|
2020-09-16 16:30:57 +00:00
|
|
|
|
|
|
|
netif_stop_queue(dev);
|
|
|
|
|
2021-10-29 05:38:07 +00:00
|
|
|
if (port->caps.transceiver == PRESTERA_PORT_TCVR_SFP) {
|
|
|
|
err = prestera_port_cfg_mac_read(port, &cfg_mac);
|
|
|
|
if (!err) {
|
|
|
|
cfg_mac.admin = false;
|
|
|
|
prestera_port_cfg_mac_write(port, &cfg_mac);
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
port->cfg_phy.admin = false;
|
|
|
|
err = prestera_hw_port_phy_mode_set(port, false, port->autoneg,
|
|
|
|
port->cfg_phy.mode,
|
|
|
|
port->adver_link_modes,
|
|
|
|
port->cfg_phy.mdix);
|
|
|
|
}
|
|
|
|
|
|
|
|
return err;
|
2020-09-16 16:30:57 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static netdev_tx_t prestera_port_xmit(struct sk_buff *skb,
|
|
|
|
struct net_device *dev)
|
|
|
|
{
|
|
|
|
return prestera_rxtx_xmit(netdev_priv(dev), skb);
|
|
|
|
}
|
|
|
|
|
2021-12-27 21:52:30 +00:00
|
|
|
int prestera_is_valid_mac_addr(struct prestera_port *port, const u8 *addr)
|
2020-09-16 16:30:57 +00:00
|
|
|
{
|
|
|
|
if (!is_valid_ether_addr(addr))
|
|
|
|
return -EADDRNOTAVAIL;
|
|
|
|
|
|
|
|
/* firmware requires that port's MAC address contains first 5 bytes
|
|
|
|
* of the base MAC address
|
|
|
|
*/
|
|
|
|
if (memcmp(port->sw->base_mac, addr, ETH_ALEN - 1))
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int prestera_port_set_mac_address(struct net_device *dev, void *p)
|
|
|
|
{
|
|
|
|
struct prestera_port *port = netdev_priv(dev);
|
|
|
|
struct sockaddr *addr = p;
|
|
|
|
int err;
|
|
|
|
|
|
|
|
err = prestera_is_valid_mac_addr(port, addr->sa_data);
|
|
|
|
if (err)
|
|
|
|
return err;
|
|
|
|
|
|
|
|
err = prestera_hw_port_mac_set(port, addr->sa_data);
|
|
|
|
if (err)
|
|
|
|
return err;
|
|
|
|
|
2021-10-01 21:32:23 +00:00
|
|
|
eth_hw_addr_set(dev, addr->sa_data);
|
2020-09-16 16:30:57 +00:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int prestera_port_change_mtu(struct net_device *dev, int mtu)
|
|
|
|
{
|
|
|
|
struct prestera_port *port = netdev_priv(dev);
|
|
|
|
int err;
|
|
|
|
|
|
|
|
err = prestera_hw_port_mtu_set(port, mtu);
|
|
|
|
if (err)
|
|
|
|
return err;
|
|
|
|
|
|
|
|
dev->mtu = mtu;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void prestera_port_get_stats64(struct net_device *dev,
|
|
|
|
struct rtnl_link_stats64 *stats)
|
|
|
|
{
|
|
|
|
struct prestera_port *port = netdev_priv(dev);
|
|
|
|
struct prestera_port_stats *port_stats = &port->cached_hw_stats.stats;
|
|
|
|
|
|
|
|
stats->rx_packets = port_stats->broadcast_frames_received +
|
|
|
|
port_stats->multicast_frames_received +
|
|
|
|
port_stats->unicast_frames_received;
|
|
|
|
|
|
|
|
stats->tx_packets = port_stats->broadcast_frames_sent +
|
|
|
|
port_stats->multicast_frames_sent +
|
|
|
|
port_stats->unicast_frames_sent;
|
|
|
|
|
|
|
|
stats->rx_bytes = port_stats->good_octets_received;
|
|
|
|
|
|
|
|
stats->tx_bytes = port_stats->good_octets_sent;
|
|
|
|
|
|
|
|
stats->rx_errors = port_stats->rx_error_frame_received;
|
|
|
|
stats->tx_errors = port_stats->mac_trans_error;
|
|
|
|
|
|
|
|
stats->rx_dropped = port_stats->buffer_overrun;
|
|
|
|
stats->tx_dropped = 0;
|
|
|
|
|
|
|
|
stats->multicast = port_stats->multicast_frames_received;
|
|
|
|
stats->collisions = port_stats->excessive_collision;
|
|
|
|
|
|
|
|
stats->rx_crc_errors = port_stats->bad_crc;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void prestera_port_get_hw_stats(struct prestera_port *port)
|
|
|
|
{
|
|
|
|
prestera_hw_port_stats_get(port, &port->cached_hw_stats.stats);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void prestera_port_stats_update(struct work_struct *work)
|
|
|
|
{
|
|
|
|
struct prestera_port *port =
|
|
|
|
container_of(work, struct prestera_port,
|
|
|
|
cached_hw_stats.caching_dw.work);
|
|
|
|
|
|
|
|
prestera_port_get_hw_stats(port);
|
|
|
|
|
|
|
|
queue_delayed_work(prestera_wq, &port->cached_hw_stats.caching_dw,
|
|
|
|
msecs_to_jiffies(PRESTERA_STATS_DELAY_MS));
|
|
|
|
}
|
|
|
|
|
2021-06-16 16:01:44 +00:00
|
|
|
static int prestera_port_setup_tc(struct net_device *dev,
|
|
|
|
enum tc_setup_type type,
|
|
|
|
void *type_data)
|
|
|
|
{
|
|
|
|
struct prestera_port *port = netdev_priv(dev);
|
|
|
|
|
|
|
|
switch (type) {
|
|
|
|
case TC_SETUP_BLOCK:
|
|
|
|
return prestera_flow_block_setup(port, type_data);
|
|
|
|
default:
|
|
|
|
return -EOPNOTSUPP;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-09-16 16:30:57 +00:00
|
|
|
static const struct net_device_ops prestera_netdev_ops = {
|
|
|
|
.ndo_open = prestera_port_open,
|
|
|
|
.ndo_stop = prestera_port_close,
|
|
|
|
.ndo_start_xmit = prestera_port_xmit,
|
2021-06-16 16:01:44 +00:00
|
|
|
.ndo_setup_tc = prestera_port_setup_tc,
|
2020-09-16 16:30:57 +00:00
|
|
|
.ndo_change_mtu = prestera_port_change_mtu,
|
|
|
|
.ndo_get_stats64 = prestera_port_get_stats64,
|
|
|
|
.ndo_set_mac_address = prestera_port_set_mac_address,
|
2020-09-16 16:30:59 +00:00
|
|
|
.ndo_get_devlink_port = prestera_devlink_get_port,
|
2020-09-16 16:30:57 +00:00
|
|
|
};
|
|
|
|
|
2021-10-29 05:38:07 +00:00
|
|
|
int prestera_port_autoneg_set(struct prestera_port *port, u64 link_modes)
|
2020-09-16 16:30:57 +00:00
|
|
|
{
|
|
|
|
int err;
|
|
|
|
|
2021-10-29 05:38:07 +00:00
|
|
|
if (port->autoneg && port->adver_link_modes == link_modes)
|
2020-09-16 16:30:57 +00:00
|
|
|
return 0;
|
|
|
|
|
2021-10-29 05:38:07 +00:00
|
|
|
err = prestera_hw_port_phy_mode_set(port, port->cfg_phy.admin,
|
|
|
|
true, 0, link_modes,
|
|
|
|
port->cfg_phy.mdix);
|
2020-09-16 16:30:57 +00:00
|
|
|
if (err)
|
|
|
|
return err;
|
|
|
|
|
2021-10-29 05:38:07 +00:00
|
|
|
port->adver_fec = BIT(PRESTERA_PORT_FEC_OFF);
|
|
|
|
port->adver_link_modes = link_modes;
|
|
|
|
port->cfg_phy.mode = 0;
|
|
|
|
port->autoneg = true;
|
2020-09-16 16:30:57 +00:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void prestera_port_list_add(struct prestera_port *port)
|
|
|
|
{
|
|
|
|
write_lock(&port->sw->port_list_lock);
|
|
|
|
list_add(&port->list, &port->sw->port_list);
|
|
|
|
write_unlock(&port->sw->port_list_lock);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void prestera_port_list_del(struct prestera_port *port)
|
|
|
|
{
|
|
|
|
write_lock(&port->sw->port_list_lock);
|
|
|
|
list_del(&port->list);
|
|
|
|
write_unlock(&port->sw->port_list_lock);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int prestera_port_create(struct prestera_switch *sw, u32 id)
|
|
|
|
{
|
2021-10-29 05:38:07 +00:00
|
|
|
struct prestera_port_mac_config cfg_mac;
|
2020-09-16 16:30:57 +00:00
|
|
|
struct prestera_port *port;
|
|
|
|
struct net_device *dev;
|
|
|
|
int err;
|
|
|
|
|
|
|
|
dev = alloc_etherdev(sizeof(*port));
|
|
|
|
if (!dev)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
port = netdev_priv(dev);
|
|
|
|
|
2020-09-16 16:31:01 +00:00
|
|
|
INIT_LIST_HEAD(&port->vlans_list);
|
|
|
|
port->pvid = PRESTERA_DEFAULT_VID;
|
2021-06-10 15:43:11 +00:00
|
|
|
port->lag = NULL;
|
2020-09-16 16:30:57 +00:00
|
|
|
port->dev = dev;
|
|
|
|
port->id = id;
|
|
|
|
port->sw = sw;
|
|
|
|
|
|
|
|
err = prestera_hw_port_info_get(port, &port->dev_id, &port->hw_id,
|
|
|
|
&port->fp_id);
|
|
|
|
if (err) {
|
|
|
|
dev_err(prestera_dev(sw), "Failed to get port(%u) info\n", id);
|
2020-09-16 16:30:59 +00:00
|
|
|
goto err_port_info_get;
|
2020-09-16 16:30:57 +00:00
|
|
|
}
|
|
|
|
|
2020-09-16 16:30:59 +00:00
|
|
|
err = prestera_devlink_port_register(port);
|
|
|
|
if (err)
|
|
|
|
goto err_dl_port_register;
|
|
|
|
|
2021-06-16 16:01:44 +00:00
|
|
|
dev->features |= NETIF_F_NETNS_LOCAL | NETIF_F_HW_TC;
|
2020-09-16 16:30:57 +00:00
|
|
|
dev->netdev_ops = &prestera_netdev_ops;
|
2020-09-16 16:31:00 +00:00
|
|
|
dev->ethtool_ops = &prestera_ethtool_ops;
|
2020-09-16 16:30:57 +00:00
|
|
|
|
|
|
|
netif_carrier_off(dev);
|
|
|
|
|
|
|
|
dev->mtu = min_t(unsigned int, sw->mtu_max, PRESTERA_MTU_DEFAULT);
|
|
|
|
dev->min_mtu = sw->mtu_min;
|
|
|
|
dev->max_mtu = sw->mtu_max;
|
|
|
|
|
|
|
|
err = prestera_hw_port_mtu_set(port, dev->mtu);
|
|
|
|
if (err) {
|
|
|
|
dev_err(prestera_dev(sw), "Failed to set port(%u) mtu(%d)\n",
|
|
|
|
id, dev->mtu);
|
|
|
|
goto err_port_init;
|
|
|
|
}
|
|
|
|
|
2020-12-04 08:49:42 +00:00
|
|
|
if (port->fp_id >= PRESTERA_MAC_ADDR_NUM_MAX) {
|
|
|
|
err = -EINVAL;
|
2020-09-16 16:30:57 +00:00
|
|
|
goto err_port_init;
|
2020-12-04 08:49:42 +00:00
|
|
|
}
|
2020-09-16 16:30:57 +00:00
|
|
|
|
2021-10-18 21:10:04 +00:00
|
|
|
eth_hw_addr_gen(dev, sw->base_mac, port->fp_id);
|
2020-09-16 16:30:57 +00:00
|
|
|
/* firmware requires that port's MAC address consist of the first
|
|
|
|
* 5 bytes of the base MAC address
|
|
|
|
*/
|
2021-10-18 21:10:04 +00:00
|
|
|
if (memcmp(dev->dev_addr, sw->base_mac, ETH_ALEN - 1)) {
|
|
|
|
dev_warn(prestera_dev(sw), "Port MAC address wraps for port(%u)\n", id);
|
|
|
|
dev_addr_mod(dev, 0, sw->base_mac, ETH_ALEN - 1);
|
|
|
|
}
|
2020-09-16 16:30:57 +00:00
|
|
|
|
|
|
|
err = prestera_hw_port_mac_set(port, dev->dev_addr);
|
|
|
|
if (err) {
|
|
|
|
dev_err(prestera_dev(sw), "Failed to set port(%u) mac addr\n", id);
|
|
|
|
goto err_port_init;
|
|
|
|
}
|
|
|
|
|
|
|
|
err = prestera_hw_port_cap_get(port, &port->caps);
|
|
|
|
if (err) {
|
|
|
|
dev_err(prestera_dev(sw), "Failed to get port(%u) caps\n", id);
|
|
|
|
goto err_port_init;
|
|
|
|
}
|
|
|
|
|
2021-10-29 05:38:07 +00:00
|
|
|
port->adver_link_modes = port->caps.supp_link_modes;
|
|
|
|
port->adver_fec = 0;
|
|
|
|
port->autoneg = true;
|
|
|
|
|
|
|
|
/* initialize config mac */
|
|
|
|
if (port->caps.transceiver != PRESTERA_PORT_TCVR_SFP) {
|
|
|
|
cfg_mac.admin = true;
|
|
|
|
cfg_mac.mode = PRESTERA_MAC_MODE_INTERNAL;
|
|
|
|
} else {
|
|
|
|
cfg_mac.admin = false;
|
|
|
|
cfg_mac.mode = PRESTERA_MAC_MODE_MAX;
|
|
|
|
}
|
|
|
|
cfg_mac.inband = false;
|
|
|
|
cfg_mac.speed = 0;
|
|
|
|
cfg_mac.duplex = DUPLEX_UNKNOWN;
|
|
|
|
cfg_mac.fec = PRESTERA_PORT_FEC_OFF;
|
2020-09-16 16:30:57 +00:00
|
|
|
|
2021-10-29 05:38:07 +00:00
|
|
|
err = prestera_port_cfg_mac_write(port, &cfg_mac);
|
2020-09-16 16:30:57 +00:00
|
|
|
if (err) {
|
2021-11-04 13:12:52 +00:00
|
|
|
dev_err(prestera_dev(sw),
|
|
|
|
"Failed to set port(%u) mac mode\n", id);
|
2020-09-16 16:30:57 +00:00
|
|
|
goto err_port_init;
|
|
|
|
}
|
|
|
|
|
2021-10-29 05:38:07 +00:00
|
|
|
/* initialize config phy (if this is inegral) */
|
|
|
|
if (port->caps.transceiver != PRESTERA_PORT_TCVR_SFP) {
|
|
|
|
port->cfg_phy.mdix = ETH_TP_MDI_AUTO;
|
|
|
|
port->cfg_phy.admin = false;
|
|
|
|
err = prestera_hw_port_phy_mode_set(port,
|
|
|
|
port->cfg_phy.admin,
|
|
|
|
false, 0, 0,
|
|
|
|
port->cfg_phy.mdix);
|
|
|
|
if (err) {
|
2021-11-04 13:12:52 +00:00
|
|
|
dev_err(prestera_dev(sw),
|
|
|
|
"Failed to set port(%u) phy mode\n", id);
|
2021-10-29 05:38:07 +00:00
|
|
|
goto err_port_init;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-09-16 16:30:57 +00:00
|
|
|
err = prestera_rxtx_port_init(port);
|
|
|
|
if (err)
|
|
|
|
goto err_port_init;
|
|
|
|
|
|
|
|
INIT_DELAYED_WORK(&port->cached_hw_stats.caching_dw,
|
|
|
|
&prestera_port_stats_update);
|
|
|
|
|
|
|
|
prestera_port_list_add(port);
|
|
|
|
|
|
|
|
err = register_netdev(dev);
|
|
|
|
if (err)
|
|
|
|
goto err_register_netdev;
|
|
|
|
|
2020-09-16 16:30:59 +00:00
|
|
|
prestera_devlink_port_set(port);
|
|
|
|
|
2020-09-16 16:30:57 +00:00
|
|
|
return 0;
|
|
|
|
|
|
|
|
err_register_netdev:
|
|
|
|
prestera_port_list_del(port);
|
|
|
|
err_port_init:
|
2020-09-16 16:30:59 +00:00
|
|
|
prestera_devlink_port_unregister(port);
|
|
|
|
err_dl_port_register:
|
|
|
|
err_port_info_get:
|
2020-09-16 16:30:57 +00:00
|
|
|
free_netdev(dev);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void prestera_port_destroy(struct prestera_port *port)
|
|
|
|
{
|
|
|
|
struct net_device *dev = port->dev;
|
|
|
|
|
|
|
|
cancel_delayed_work_sync(&port->cached_hw_stats.caching_dw);
|
2020-09-16 16:30:59 +00:00
|
|
|
prestera_devlink_port_clear(port);
|
2020-09-16 16:30:57 +00:00
|
|
|
unregister_netdev(dev);
|
|
|
|
prestera_port_list_del(port);
|
2020-09-16 16:30:59 +00:00
|
|
|
prestera_devlink_port_unregister(port);
|
2020-09-16 16:30:57 +00:00
|
|
|
free_netdev(dev);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void prestera_destroy_ports(struct prestera_switch *sw)
|
|
|
|
{
|
|
|
|
struct prestera_port *port, *tmp;
|
|
|
|
|
|
|
|
list_for_each_entry_safe(port, tmp, &sw->port_list, list)
|
|
|
|
prestera_port_destroy(port);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int prestera_create_ports(struct prestera_switch *sw)
|
|
|
|
{
|
|
|
|
struct prestera_port *port, *tmp;
|
|
|
|
u32 port_idx;
|
|
|
|
int err;
|
|
|
|
|
|
|
|
for (port_idx = 0; port_idx < sw->port_count; port_idx++) {
|
|
|
|
err = prestera_port_create(sw, port_idx);
|
|
|
|
if (err)
|
|
|
|
goto err_port_create;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
err_port_create:
|
|
|
|
list_for_each_entry_safe(port, tmp, &sw->port_list, list)
|
|
|
|
prestera_port_destroy(port);
|
|
|
|
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void prestera_port_handle_event(struct prestera_switch *sw,
|
|
|
|
struct prestera_event *evt, void *arg)
|
|
|
|
{
|
|
|
|
struct delayed_work *caching_dw;
|
|
|
|
struct prestera_port *port;
|
|
|
|
|
|
|
|
port = prestera_find_port(sw, evt->port_evt.port_id);
|
|
|
|
if (!port || !port->dev)
|
|
|
|
return;
|
|
|
|
|
|
|
|
caching_dw = &port->cached_hw_stats.caching_dw;
|
|
|
|
|
2021-10-29 05:38:07 +00:00
|
|
|
prestera_ethtool_port_state_changed(port, &evt->port_evt);
|
|
|
|
|
|
|
|
if (evt->id == PRESTERA_PORT_EVENT_MAC_STATE_CHANGED) {
|
|
|
|
if (port->state_mac.oper) {
|
2020-09-16 16:30:57 +00:00
|
|
|
netif_carrier_on(port->dev);
|
|
|
|
if (!delayed_work_pending(caching_dw))
|
|
|
|
queue_delayed_work(prestera_wq, caching_dw, 0);
|
2021-04-20 13:31:51 +00:00
|
|
|
} else if (netif_running(port->dev) &&
|
|
|
|
netif_carrier_ok(port->dev)) {
|
2020-09-16 16:30:57 +00:00
|
|
|
netif_carrier_off(port->dev);
|
|
|
|
if (delayed_work_pending(caching_dw))
|
|
|
|
cancel_delayed_work(caching_dw);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static int prestera_event_handlers_register(struct prestera_switch *sw)
|
|
|
|
{
|
|
|
|
return prestera_hw_event_handler_register(sw, PRESTERA_EVENT_TYPE_PORT,
|
|
|
|
prestera_port_handle_event,
|
|
|
|
NULL);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void prestera_event_handlers_unregister(struct prestera_switch *sw)
|
|
|
|
{
|
|
|
|
prestera_hw_event_handler_unregister(sw, PRESTERA_EVENT_TYPE_PORT,
|
|
|
|
prestera_port_handle_event);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int prestera_switch_set_base_mac_addr(struct prestera_switch *sw)
|
|
|
|
{
|
|
|
|
struct device_node *base_mac_np;
|
|
|
|
struct device_node *np;
|
of: net: pass the dst buffer to of_get_mac_address()
of_get_mac_address() returns a "const void*" pointer to a MAC address.
Lately, support to fetch the MAC address by an NVMEM provider was added.
But this will only work with platform devices. It will not work with
PCI devices (e.g. of an integrated root complex) and esp. not with DSA
ports.
There is an of_* variant of the nvmem binding which works without
devices. The returned data of a nvmem_cell_read() has to be freed after
use. On the other hand the return of_get_mac_address() points to some
static data without a lifetime. The trick for now, was to allocate a
device resource managed buffer which is then returned. This will only
work if we have an actual device.
Change it, so that the caller of of_get_mac_address() has to supply a
buffer where the MAC address is written to. Unfortunately, this will
touch all drivers which use the of_get_mac_address().
Usually the code looks like:
const char *addr;
addr = of_get_mac_address(np);
if (!IS_ERR(addr))
ether_addr_copy(ndev->dev_addr, addr);
This can then be simply rewritten as:
of_get_mac_address(np, ndev->dev_addr);
Sometimes is_valid_ether_addr() is used to test the MAC address.
of_get_mac_address() already makes sure, it just returns a valid MAC
address. Thus we can just test its return code. But we have to be
careful if there are still other sources for the MAC address before the
of_get_mac_address(). In this case we have to keep the
is_valid_ether_addr() call.
The following coccinelle patch was used to convert common cases to the
new style. Afterwards, I've manually gone over the drivers and fixed the
return code variable: either used a new one or if one was already
available use that. Mansour Moufid, thanks for that coccinelle patch!
<spml>
@a@
identifier x;
expression y, z;
@@
- x = of_get_mac_address(y);
+ x = of_get_mac_address(y, z);
<...
- ether_addr_copy(z, x);
...>
@@
identifier a.x;
@@
- if (<+... x ...+>) {}
@@
identifier a.x;
@@
if (<+... x ...+>) {
...
}
- else {}
@@
identifier a.x;
expression e;
@@
- if (<+... x ...+>@e)
- {}
- else
+ if (!(e))
{...}
@@
expression x, y, z;
@@
- x = of_get_mac_address(y, z);
+ of_get_mac_address(y, z);
... when != x
</spml>
All drivers, except drivers/net/ethernet/aeroflex/greth.c, were
compile-time tested.
Suggested-by: Andrew Lunn <andrew@lunn.ch>
Signed-off-by: Michael Walle <michael@walle.cc>
Reviewed-by: Andrew Lunn <andrew@lunn.ch>
Signed-off-by: David S. Miller <davem@davemloft.net>
2021-04-12 17:47:17 +00:00
|
|
|
int ret;
|
2020-09-16 16:30:57 +00:00
|
|
|
|
|
|
|
np = of_find_compatible_node(NULL, NULL, "marvell,prestera");
|
|
|
|
base_mac_np = of_parse_phandle(np, "base-mac-provider", 0);
|
|
|
|
|
of: net: pass the dst buffer to of_get_mac_address()
of_get_mac_address() returns a "const void*" pointer to a MAC address.
Lately, support to fetch the MAC address by an NVMEM provider was added.
But this will only work with platform devices. It will not work with
PCI devices (e.g. of an integrated root complex) and esp. not with DSA
ports.
There is an of_* variant of the nvmem binding which works without
devices. The returned data of a nvmem_cell_read() has to be freed after
use. On the other hand the return of_get_mac_address() points to some
static data without a lifetime. The trick for now, was to allocate a
device resource managed buffer which is then returned. This will only
work if we have an actual device.
Change it, so that the caller of of_get_mac_address() has to supply a
buffer where the MAC address is written to. Unfortunately, this will
touch all drivers which use the of_get_mac_address().
Usually the code looks like:
const char *addr;
addr = of_get_mac_address(np);
if (!IS_ERR(addr))
ether_addr_copy(ndev->dev_addr, addr);
This can then be simply rewritten as:
of_get_mac_address(np, ndev->dev_addr);
Sometimes is_valid_ether_addr() is used to test the MAC address.
of_get_mac_address() already makes sure, it just returns a valid MAC
address. Thus we can just test its return code. But we have to be
careful if there are still other sources for the MAC address before the
of_get_mac_address(). In this case we have to keep the
is_valid_ether_addr() call.
The following coccinelle patch was used to convert common cases to the
new style. Afterwards, I've manually gone over the drivers and fixed the
return code variable: either used a new one or if one was already
available use that. Mansour Moufid, thanks for that coccinelle patch!
<spml>
@a@
identifier x;
expression y, z;
@@
- x = of_get_mac_address(y);
+ x = of_get_mac_address(y, z);
<...
- ether_addr_copy(z, x);
...>
@@
identifier a.x;
@@
- if (<+... x ...+>) {}
@@
identifier a.x;
@@
if (<+... x ...+>) {
...
}
- else {}
@@
identifier a.x;
expression e;
@@
- if (<+... x ...+>@e)
- {}
- else
+ if (!(e))
{...}
@@
expression x, y, z;
@@
- x = of_get_mac_address(y, z);
+ of_get_mac_address(y, z);
... when != x
</spml>
All drivers, except drivers/net/ethernet/aeroflex/greth.c, were
compile-time tested.
Suggested-by: Andrew Lunn <andrew@lunn.ch>
Signed-off-by: Michael Walle <michael@walle.cc>
Reviewed-by: Andrew Lunn <andrew@lunn.ch>
Signed-off-by: David S. Miller <davem@davemloft.net>
2021-04-12 17:47:17 +00:00
|
|
|
ret = of_get_mac_address(base_mac_np, sw->base_mac);
|
|
|
|
if (ret) {
|
2020-09-16 16:30:57 +00:00
|
|
|
eth_random_addr(sw->base_mac);
|
|
|
|
dev_info(prestera_dev(sw), "using random base mac address\n");
|
|
|
|
}
|
of: net: pass the dst buffer to of_get_mac_address()
of_get_mac_address() returns a "const void*" pointer to a MAC address.
Lately, support to fetch the MAC address by an NVMEM provider was added.
But this will only work with platform devices. It will not work with
PCI devices (e.g. of an integrated root complex) and esp. not with DSA
ports.
There is an of_* variant of the nvmem binding which works without
devices. The returned data of a nvmem_cell_read() has to be freed after
use. On the other hand the return of_get_mac_address() points to some
static data without a lifetime. The trick for now, was to allocate a
device resource managed buffer which is then returned. This will only
work if we have an actual device.
Change it, so that the caller of of_get_mac_address() has to supply a
buffer where the MAC address is written to. Unfortunately, this will
touch all drivers which use the of_get_mac_address().
Usually the code looks like:
const char *addr;
addr = of_get_mac_address(np);
if (!IS_ERR(addr))
ether_addr_copy(ndev->dev_addr, addr);
This can then be simply rewritten as:
of_get_mac_address(np, ndev->dev_addr);
Sometimes is_valid_ether_addr() is used to test the MAC address.
of_get_mac_address() already makes sure, it just returns a valid MAC
address. Thus we can just test its return code. But we have to be
careful if there are still other sources for the MAC address before the
of_get_mac_address(). In this case we have to keep the
is_valid_ether_addr() call.
The following coccinelle patch was used to convert common cases to the
new style. Afterwards, I've manually gone over the drivers and fixed the
return code variable: either used a new one or if one was already
available use that. Mansour Moufid, thanks for that coccinelle patch!
<spml>
@a@
identifier x;
expression y, z;
@@
- x = of_get_mac_address(y);
+ x = of_get_mac_address(y, z);
<...
- ether_addr_copy(z, x);
...>
@@
identifier a.x;
@@
- if (<+... x ...+>) {}
@@
identifier a.x;
@@
if (<+... x ...+>) {
...
}
- else {}
@@
identifier a.x;
expression e;
@@
- if (<+... x ...+>@e)
- {}
- else
+ if (!(e))
{...}
@@
expression x, y, z;
@@
- x = of_get_mac_address(y, z);
+ of_get_mac_address(y, z);
... when != x
</spml>
All drivers, except drivers/net/ethernet/aeroflex/greth.c, were
compile-time tested.
Suggested-by: Andrew Lunn <andrew@lunn.ch>
Signed-off-by: Michael Walle <michael@walle.cc>
Reviewed-by: Andrew Lunn <andrew@lunn.ch>
Signed-off-by: David S. Miller <davem@davemloft.net>
2021-04-12 17:47:17 +00:00
|
|
|
of_node_put(base_mac_np);
|
2020-09-16 16:30:57 +00:00
|
|
|
|
|
|
|
return prestera_hw_switch_mac_set(sw, sw->base_mac);
|
|
|
|
}
|
|
|
|
|
2021-06-10 15:43:11 +00:00
|
|
|
struct prestera_lag *prestera_lag_by_id(struct prestera_switch *sw, u16 id)
|
|
|
|
{
|
|
|
|
return id < sw->lag_max ? &sw->lags[id] : NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct prestera_lag *prestera_lag_by_dev(struct prestera_switch *sw,
|
|
|
|
struct net_device *dev)
|
|
|
|
{
|
|
|
|
struct prestera_lag *lag;
|
|
|
|
u16 id;
|
|
|
|
|
|
|
|
for (id = 0; id < sw->lag_max; id++) {
|
|
|
|
lag = &sw->lags[id];
|
|
|
|
if (lag->dev == dev)
|
|
|
|
return lag;
|
|
|
|
}
|
|
|
|
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct prestera_lag *prestera_lag_create(struct prestera_switch *sw,
|
|
|
|
struct net_device *lag_dev)
|
|
|
|
{
|
|
|
|
struct prestera_lag *lag = NULL;
|
|
|
|
u16 id;
|
|
|
|
|
|
|
|
for (id = 0; id < sw->lag_max; id++) {
|
|
|
|
lag = &sw->lags[id];
|
|
|
|
if (!lag->dev)
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
if (lag) {
|
|
|
|
INIT_LIST_HEAD(&lag->members);
|
|
|
|
lag->dev = lag_dev;
|
|
|
|
}
|
|
|
|
|
|
|
|
return lag;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void prestera_lag_destroy(struct prestera_switch *sw,
|
|
|
|
struct prestera_lag *lag)
|
|
|
|
{
|
|
|
|
WARN_ON(!list_empty(&lag->members));
|
|
|
|
lag->member_count = 0;
|
|
|
|
lag->dev = NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int prestera_lag_port_add(struct prestera_port *port,
|
|
|
|
struct net_device *lag_dev)
|
|
|
|
{
|
|
|
|
struct prestera_switch *sw = port->sw;
|
|
|
|
struct prestera_lag *lag;
|
|
|
|
int err;
|
|
|
|
|
|
|
|
lag = prestera_lag_by_dev(sw, lag_dev);
|
|
|
|
if (!lag) {
|
|
|
|
lag = prestera_lag_create(sw, lag_dev);
|
|
|
|
if (!lag)
|
|
|
|
return -ENOSPC;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (lag->member_count >= sw->lag_member_max)
|
|
|
|
return -ENOSPC;
|
|
|
|
|
|
|
|
err = prestera_hw_lag_member_add(port, lag->lag_id);
|
|
|
|
if (err) {
|
|
|
|
if (!lag->member_count)
|
|
|
|
prestera_lag_destroy(sw, lag);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
list_add(&port->lag_member, &lag->members);
|
|
|
|
lag->member_count++;
|
|
|
|
port->lag = lag;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int prestera_lag_port_del(struct prestera_port *port)
|
|
|
|
{
|
|
|
|
struct prestera_switch *sw = port->sw;
|
|
|
|
struct prestera_lag *lag = port->lag;
|
|
|
|
int err;
|
|
|
|
|
|
|
|
if (!lag || !lag->member_count)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
err = prestera_hw_lag_member_del(port, lag->lag_id);
|
|
|
|
if (err)
|
|
|
|
return err;
|
|
|
|
|
|
|
|
list_del(&port->lag_member);
|
|
|
|
lag->member_count--;
|
|
|
|
port->lag = NULL;
|
|
|
|
|
|
|
|
if (netif_is_bridge_port(lag->dev)) {
|
|
|
|
struct net_device *br_dev;
|
|
|
|
|
|
|
|
br_dev = netdev_master_upper_dev_get(lag->dev);
|
|
|
|
|
|
|
|
prestera_bridge_port_leave(br_dev, port);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!lag->member_count)
|
|
|
|
prestera_lag_destroy(sw, lag);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool prestera_port_is_lag_member(const struct prestera_port *port)
|
|
|
|
{
|
|
|
|
return !!port->lag;
|
|
|
|
}
|
|
|
|
|
|
|
|
u16 prestera_port_lag_id(const struct prestera_port *port)
|
|
|
|
{
|
|
|
|
return port->lag->lag_id;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int prestera_lag_init(struct prestera_switch *sw)
|
|
|
|
{
|
|
|
|
u16 id;
|
|
|
|
|
|
|
|
sw->lags = kcalloc(sw->lag_max, sizeof(*sw->lags), GFP_KERNEL);
|
|
|
|
if (!sw->lags)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
for (id = 0; id < sw->lag_max; id++)
|
|
|
|
sw->lags[id].lag_id = id;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void prestera_lag_fini(struct prestera_switch *sw)
|
|
|
|
{
|
|
|
|
u8 idx;
|
|
|
|
|
|
|
|
for (idx = 0; idx < sw->lag_max; idx++)
|
|
|
|
WARN_ON(sw->lags[idx].member_count);
|
|
|
|
|
|
|
|
kfree(sw->lags);
|
|
|
|
}
|
|
|
|
|
2020-09-16 16:31:01 +00:00
|
|
|
bool prestera_netdev_check(const struct net_device *dev)
|
|
|
|
{
|
|
|
|
return dev->netdev_ops == &prestera_netdev_ops;
|
|
|
|
}
|
|
|
|
|
2020-10-06 00:33:26 +00:00
|
|
|
static int prestera_lower_dev_walk(struct net_device *dev,
|
|
|
|
struct netdev_nested_priv *priv)
|
2020-09-16 16:31:01 +00:00
|
|
|
{
|
2020-10-06 00:33:26 +00:00
|
|
|
struct prestera_port **pport = (struct prestera_port **)priv->data;
|
2020-09-16 16:31:01 +00:00
|
|
|
|
|
|
|
if (prestera_netdev_check(dev)) {
|
|
|
|
*pport = netdev_priv(dev);
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
struct prestera_port *prestera_port_dev_lower_find(struct net_device *dev)
|
|
|
|
{
|
|
|
|
struct prestera_port *port = NULL;
|
2020-10-06 00:33:26 +00:00
|
|
|
struct netdev_nested_priv priv = {
|
|
|
|
.data = (void *)&port,
|
|
|
|
};
|
2020-09-16 16:31:01 +00:00
|
|
|
|
|
|
|
if (prestera_netdev_check(dev))
|
|
|
|
return netdev_priv(dev);
|
|
|
|
|
2020-10-06 00:33:26 +00:00
|
|
|
netdev_walk_all_lower_dev(dev, prestera_lower_dev_walk, &priv);
|
2020-09-16 16:31:01 +00:00
|
|
|
|
|
|
|
return port;
|
|
|
|
}
|
|
|
|
|
2021-06-10 15:43:11 +00:00
|
|
|
static int prestera_netdev_port_lower_event(struct net_device *dev,
|
|
|
|
unsigned long event, void *ptr)
|
|
|
|
{
|
|
|
|
struct netdev_notifier_changelowerstate_info *info = ptr;
|
|
|
|
struct netdev_lag_lower_state_info *lower_state_info;
|
|
|
|
struct prestera_port *port = netdev_priv(dev);
|
|
|
|
bool enabled;
|
|
|
|
|
|
|
|
if (!netif_is_lag_port(dev))
|
|
|
|
return 0;
|
|
|
|
if (!prestera_port_is_lag_member(port))
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
lower_state_info = info->lower_state_info;
|
|
|
|
enabled = lower_state_info->link_up && lower_state_info->tx_enabled;
|
|
|
|
|
|
|
|
return prestera_hw_lag_member_enable(port, port->lag->lag_id, enabled);
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool prestera_lag_master_check(struct net_device *lag_dev,
|
|
|
|
struct netdev_lag_upper_info *info,
|
|
|
|
struct netlink_ext_ack *ext_ack)
|
|
|
|
{
|
|
|
|
if (info->tx_type != NETDEV_LAG_TX_TYPE_HASH) {
|
|
|
|
NL_SET_ERR_MSG_MOD(ext_ack, "Unsupported LAG Tx type");
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int prestera_netdev_port_event(struct net_device *lower,
|
|
|
|
struct net_device *dev,
|
2020-09-16 16:31:01 +00:00
|
|
|
unsigned long event, void *ptr)
|
|
|
|
{
|
2021-12-16 17:17:14 +00:00
|
|
|
struct netdev_notifier_info *info = ptr;
|
|
|
|
struct netdev_notifier_changeupper_info *cu_info;
|
2021-06-10 15:43:10 +00:00
|
|
|
struct prestera_port *port = netdev_priv(dev);
|
2021-06-10 15:43:09 +00:00
|
|
|
struct netlink_ext_ack *extack;
|
|
|
|
struct net_device *upper;
|
|
|
|
|
2021-12-16 17:17:14 +00:00
|
|
|
extack = netdev_notifier_info_to_extack(info);
|
|
|
|
cu_info = container_of(info,
|
|
|
|
struct netdev_notifier_changeupper_info,
|
|
|
|
info);
|
2021-06-10 15:43:09 +00:00
|
|
|
|
2020-09-16 16:31:01 +00:00
|
|
|
switch (event) {
|
|
|
|
case NETDEV_PRECHANGEUPPER:
|
2021-12-16 17:17:14 +00:00
|
|
|
upper = cu_info->upper_dev;
|
2021-06-10 15:43:11 +00:00
|
|
|
if (!netif_is_bridge_master(upper) &&
|
|
|
|
!netif_is_lag_master(upper)) {
|
2021-06-10 15:43:09 +00:00
|
|
|
NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type");
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
2021-12-16 17:17:14 +00:00
|
|
|
if (!cu_info->linking)
|
2021-06-10 15:43:09 +00:00
|
|
|
break;
|
|
|
|
|
|
|
|
if (netdev_has_any_upper_dev(upper)) {
|
|
|
|
NL_SET_ERR_MSG_MOD(extack, "Upper device is already enslaved");
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
2021-06-10 15:43:11 +00:00
|
|
|
|
|
|
|
if (netif_is_lag_master(upper) &&
|
2021-12-16 17:17:14 +00:00
|
|
|
!prestera_lag_master_check(upper, cu_info->upper_info, extack))
|
2021-06-10 15:43:11 +00:00
|
|
|
return -EOPNOTSUPP;
|
|
|
|
if (netif_is_lag_master(upper) && vlan_uses_dev(dev)) {
|
|
|
|
NL_SET_ERR_MSG_MOD(extack,
|
|
|
|
"Master device is a LAG master and port has a VLAN");
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
if (netif_is_lag_port(dev) && is_vlan_dev(upper) &&
|
|
|
|
!netif_is_lag_master(vlan_dev_real_dev(upper))) {
|
|
|
|
NL_SET_ERR_MSG_MOD(extack,
|
|
|
|
"Can not put a VLAN on a LAG port");
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
2021-06-10 15:43:09 +00:00
|
|
|
break;
|
|
|
|
|
2020-09-16 16:31:01 +00:00
|
|
|
case NETDEV_CHANGEUPPER:
|
2021-12-16 17:17:14 +00:00
|
|
|
upper = cu_info->upper_dev;
|
2021-06-10 15:43:10 +00:00
|
|
|
if (netif_is_bridge_master(upper)) {
|
2021-12-16 17:17:14 +00:00
|
|
|
if (cu_info->linking)
|
net: bridge: switchdev: let drivers inform which bridge ports are offloaded
On reception of an skb, the bridge checks if it was marked as 'already
forwarded in hardware' (checks if skb->offload_fwd_mark == 1), and if it
is, it assigns the source hardware domain of that skb based on the
hardware domain of the ingress port. Then during forwarding, it enforces
that the egress port must have a different hardware domain than the
ingress one (this is done in nbp_switchdev_allowed_egress).
Non-switchdev drivers don't report any physical switch id (neither
through devlink nor .ndo_get_port_parent_id), therefore the bridge
assigns them a hardware domain of 0, and packets coming from them will
always have skb->offload_fwd_mark = 0. So there aren't any restrictions.
Problems appear due to the fact that DSA would like to perform software
fallback for bonding and team interfaces that the physical switch cannot
offload.
+-- br0 ---+
/ / | \
/ / | \
/ | | bond0
/ | | / \
swp0 swp1 swp2 swp3 swp4
There, it is desirable that the presence of swp3 and swp4 under a
non-offloaded LAG does not preclude us from doing hardware bridging
beteen swp0, swp1 and swp2. The bandwidth of the CPU is often times high
enough that software bridging between {swp0,swp1,swp2} and bond0 is not
impractical.
But this creates an impossible paradox given the current way in which
port hardware domains are assigned. When the driver receives a packet
from swp0 (say, due to flooding), it must set skb->offload_fwd_mark to
something.
- If we set it to 0, then the bridge will forward it towards swp1, swp2
and bond0. But the switch has already forwarded it towards swp1 and
swp2 (not to bond0, remember, that isn't offloaded, so as far as the
switch is concerned, ports swp3 and swp4 are not looking up the FDB,
and the entire bond0 is a destination that is strictly behind the
CPU). But we don't want duplicated traffic towards swp1 and swp2, so
it's not ok to set skb->offload_fwd_mark = 0.
- If we set it to 1, then the bridge will not forward the skb towards
the ports with the same switchdev mark, i.e. not to swp1, swp2 and
bond0. Towards swp1 and swp2 that's ok, but towards bond0? It should
have forwarded the skb there.
So the real issue is that bond0 will be assigned the same hardware
domain as {swp0,swp1,swp2}, because the function that assigns hardware
domains to bridge ports, nbp_switchdev_add(), recurses through bond0's
lower interfaces until it finds something that implements devlink (calls
dev_get_port_parent_id with bool recurse = true). This is a problem
because the fact that bond0 can be offloaded by swp3 and swp4 in our
example is merely an assumption.
A solution is to give the bridge explicit hints as to what hardware
domain it should use for each port.
Currently, the bridging offload is very 'silent': a driver registers a
netdevice notifier, which is put on the netns's notifier chain, and
which sniffs around for NETDEV_CHANGEUPPER events where the upper is a
bridge, and the lower is an interface it knows about (one registered by
this driver, normally). Then, from within that notifier, it does a bunch
of stuff behind the bridge's back, without the bridge necessarily
knowing that there's somebody offloading that port. It looks like this:
ip link set swp0 master br0
|
v
br_add_if() calls netdev_master_upper_dev_link()
|
v
call_netdevice_notifiers
|
v
dsa_slave_netdevice_event
|
v
oh, hey! it's for me!
|
v
.port_bridge_join
What we do to solve the conundrum is to be less silent, and change the
switchdev drivers to present themselves to the bridge. Something like this:
ip link set swp0 master br0
|
v
br_add_if() calls netdev_master_upper_dev_link()
|
v bridge: Aye! I'll use this
call_netdevice_notifiers ^ ppid as the
| | hardware domain for
v | this port, and zero
dsa_slave_netdevice_event | if I got nothing.
| |
v |
oh, hey! it's for me! |
| |
v |
.port_bridge_join |
| |
+------------------------+
switchdev_bridge_port_offload(swp0, swp0)
Then stacked interfaces (like bond0 on top of swp3/swp4) would be
treated differently in DSA, depending on whether we can or cannot
offload them.
The offload case:
ip link set bond0 master br0
|
v
br_add_if() calls netdev_master_upper_dev_link()
|
v bridge: Aye! I'll use this
call_netdevice_notifiers ^ ppid as the
| | switchdev mark for
v | bond0.
dsa_slave_netdevice_event | Coincidentally (or not),
| | bond0 and swp0, swp1, swp2
v | all have the same switchdev
hmm, it's not quite for me, | mark now, since the ASIC
but my driver has already | is able to forward towards
called .port_lag_join | all these ports in hw.
for it, because I have |
a port with dp->lag_dev == bond0. |
| |
v |
.port_bridge_join |
for swp3 and swp4 |
| |
+------------------------+
switchdev_bridge_port_offload(bond0, swp3)
switchdev_bridge_port_offload(bond0, swp4)
And the non-offload case:
ip link set bond0 master br0
|
v
br_add_if() calls netdev_master_upper_dev_link()
|
v bridge waiting:
call_netdevice_notifiers ^ huh, switchdev_bridge_port_offload
| | wasn't called, okay, I'll use a
v | hwdom of zero for this one.
dsa_slave_netdevice_event : Then packets received on swp0 will
| : not be software-forwarded towards
v : swp1, but they will towards bond0.
it's not for me, but
bond0 is an upper of swp3
and swp4, but their dp->lag_dev
is NULL because they couldn't
offload it.
Basically we can draw the conclusion that the lowers of a bridge port
can come and go, so depending on the configuration of lowers for a
bridge port, it can dynamically toggle between offloaded and unoffloaded.
Therefore, we need an equivalent switchdev_bridge_port_unoffload too.
This patch changes the way any switchdev driver interacts with the
bridge. From now on, everybody needs to call switchdev_bridge_port_offload
and switchdev_bridge_port_unoffload, otherwise the bridge will treat the
port as non-offloaded and allow software flooding to other ports from
the same ASIC.
Note that these functions lay the ground for a more complex handshake
between switchdev drivers and the bridge in the future.
For drivers that will request a replay of the switchdev objects when
they offload and unoffload a bridge port (DSA, dpaa2-switch, ocelot), we
place the call to switchdev_bridge_port_unoffload() strategically inside
the NETDEV_PRECHANGEUPPER notifier's code path, and not inside
NETDEV_CHANGEUPPER. This is because the switchdev object replay helpers
need the netdev adjacency lists to be valid, and that is only true in
NETDEV_PRECHANGEUPPER.
Cc: Vadym Kochan <vkochan@marvell.com>
Cc: Taras Chornyi <tchornyi@marvell.com>
Cc: Ioana Ciornei <ioana.ciornei@nxp.com>
Cc: Lars Povlsen <lars.povlsen@microchip.com>
Cc: Steen Hegelund <Steen.Hegelund@microchip.com>
Cc: UNGLinuxDriver@microchip.com
Cc: Claudiu Manoil <claudiu.manoil@nxp.com>
Cc: Alexandre Belloni <alexandre.belloni@bootlin.com>
Cc: Grygorii Strashko <grygorii.strashko@ti.com>
Signed-off-by: Vladimir Oltean <vladimir.oltean@nxp.com>
Tested-by: Ioana Ciornei <ioana.ciornei@nxp.com> # dpaa2-switch: regression
Acked-by: Ioana Ciornei <ioana.ciornei@nxp.com> # dpaa2-switch
Tested-by: Horatiu Vultur <horatiu.vultur@microchip.com> # ocelot-switch
Signed-off-by: David S. Miller <davem@davemloft.net>
2021-07-21 16:24:01 +00:00
|
|
|
return prestera_bridge_port_join(upper, port,
|
|
|
|
extack);
|
2021-06-10 15:43:10 +00:00
|
|
|
else
|
|
|
|
prestera_bridge_port_leave(upper, port);
|
2021-06-10 15:43:11 +00:00
|
|
|
} else if (netif_is_lag_master(upper)) {
|
2021-12-16 17:17:14 +00:00
|
|
|
if (cu_info->linking)
|
2021-06-10 15:43:11 +00:00
|
|
|
return prestera_lag_port_add(port, upper);
|
|
|
|
else
|
|
|
|
prestera_lag_port_del(port);
|
2021-06-10 15:43:10 +00:00
|
|
|
}
|
2021-06-10 15:43:09 +00:00
|
|
|
break;
|
2021-06-10 15:43:11 +00:00
|
|
|
|
|
|
|
case NETDEV_CHANGELOWERSTATE:
|
|
|
|
return prestera_netdev_port_lower_event(dev, event, ptr);
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int prestera_netdevice_lag_event(struct net_device *lag_dev,
|
|
|
|
unsigned long event, void *ptr)
|
|
|
|
{
|
|
|
|
struct net_device *dev;
|
|
|
|
struct list_head *iter;
|
|
|
|
int err;
|
|
|
|
|
|
|
|
netdev_for_each_lower_dev(lag_dev, dev, iter) {
|
|
|
|
if (prestera_netdev_check(dev)) {
|
|
|
|
err = prestera_netdev_port_event(lag_dev, dev, event,
|
|
|
|
ptr);
|
|
|
|
if (err)
|
|
|
|
return err;
|
|
|
|
}
|
2020-09-16 16:31:01 +00:00
|
|
|
}
|
2021-06-10 15:43:09 +00:00
|
|
|
|
|
|
|
return 0;
|
2020-09-16 16:31:01 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static int prestera_netdev_event_handler(struct notifier_block *nb,
|
|
|
|
unsigned long event, void *ptr)
|
|
|
|
{
|
|
|
|
struct net_device *dev = netdev_notifier_info_to_dev(ptr);
|
|
|
|
int err = 0;
|
|
|
|
|
|
|
|
if (prestera_netdev_check(dev))
|
2021-06-10 15:43:11 +00:00
|
|
|
err = prestera_netdev_port_event(dev, dev, event, ptr);
|
|
|
|
else if (netif_is_lag_master(dev))
|
|
|
|
err = prestera_netdevice_lag_event(dev, event, ptr);
|
2020-09-16 16:31:01 +00:00
|
|
|
|
|
|
|
return notifier_from_errno(err);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int prestera_netdev_event_handler_register(struct prestera_switch *sw)
|
|
|
|
{
|
|
|
|
sw->netdev_nb.notifier_call = prestera_netdev_event_handler;
|
|
|
|
|
|
|
|
return register_netdevice_notifier(&sw->netdev_nb);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void prestera_netdev_event_handler_unregister(struct prestera_switch *sw)
|
|
|
|
{
|
|
|
|
unregister_netdevice_notifier(&sw->netdev_nb);
|
|
|
|
}
|
|
|
|
|
2020-09-16 16:30:57 +00:00
|
|
|
static int prestera_switch_init(struct prestera_switch *sw)
|
|
|
|
{
|
|
|
|
int err;
|
|
|
|
|
|
|
|
err = prestera_hw_switch_init(sw);
|
|
|
|
if (err) {
|
|
|
|
dev_err(prestera_dev(sw), "Failed to init Switch device\n");
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
rwlock_init(&sw->port_list_lock);
|
|
|
|
INIT_LIST_HEAD(&sw->port_list);
|
|
|
|
|
|
|
|
err = prestera_switch_set_base_mac_addr(sw);
|
|
|
|
if (err)
|
|
|
|
return err;
|
|
|
|
|
2020-09-16 16:31:01 +00:00
|
|
|
err = prestera_netdev_event_handler_register(sw);
|
2020-09-16 16:30:57 +00:00
|
|
|
if (err)
|
|
|
|
return err;
|
|
|
|
|
2021-12-27 21:52:28 +00:00
|
|
|
err = prestera_router_init(sw);
|
|
|
|
if (err)
|
|
|
|
goto err_router_init;
|
|
|
|
|
2020-09-16 16:31:01 +00:00
|
|
|
err = prestera_switchdev_init(sw);
|
|
|
|
if (err)
|
|
|
|
goto err_swdev_register;
|
|
|
|
|
|
|
|
err = prestera_rxtx_switch_init(sw);
|
|
|
|
if (err)
|
|
|
|
goto err_rxtx_register;
|
|
|
|
|
2020-09-16 16:30:57 +00:00
|
|
|
err = prestera_event_handlers_register(sw);
|
|
|
|
if (err)
|
|
|
|
goto err_handlers_register;
|
|
|
|
|
2021-11-30 10:32:59 +00:00
|
|
|
err = prestera_counter_init(sw);
|
|
|
|
if (err)
|
|
|
|
goto err_counter_init;
|
|
|
|
|
2021-06-16 16:01:44 +00:00
|
|
|
err = prestera_acl_init(sw);
|
|
|
|
if (err)
|
|
|
|
goto err_acl_init;
|
|
|
|
|
2021-06-16 16:01:45 +00:00
|
|
|
err = prestera_span_init(sw);
|
|
|
|
if (err)
|
|
|
|
goto err_span_init;
|
|
|
|
|
2021-09-25 11:22:48 +00:00
|
|
|
err = prestera_devlink_traps_register(sw);
|
2020-09-16 16:30:59 +00:00
|
|
|
if (err)
|
|
|
|
goto err_dl_register;
|
|
|
|
|
2021-06-10 15:43:11 +00:00
|
|
|
err = prestera_lag_init(sw);
|
|
|
|
if (err)
|
|
|
|
goto err_lag_init;
|
|
|
|
|
2020-09-16 16:30:57 +00:00
|
|
|
err = prestera_create_ports(sw);
|
|
|
|
if (err)
|
|
|
|
goto err_ports_create;
|
|
|
|
|
2021-09-25 11:22:48 +00:00
|
|
|
prestera_devlink_register(sw);
|
2020-09-16 16:30:57 +00:00
|
|
|
return 0;
|
|
|
|
|
|
|
|
err_ports_create:
|
2021-06-10 15:43:11 +00:00
|
|
|
prestera_lag_fini(sw);
|
|
|
|
err_lag_init:
|
2021-09-25 11:22:48 +00:00
|
|
|
prestera_devlink_traps_unregister(sw);
|
2020-09-16 16:30:59 +00:00
|
|
|
err_dl_register:
|
2021-06-16 16:01:45 +00:00
|
|
|
prestera_span_fini(sw);
|
|
|
|
err_span_init:
|
2021-06-16 16:01:44 +00:00
|
|
|
prestera_acl_fini(sw);
|
|
|
|
err_acl_init:
|
2021-11-30 10:32:59 +00:00
|
|
|
prestera_counter_fini(sw);
|
|
|
|
err_counter_init:
|
2020-09-16 16:30:57 +00:00
|
|
|
prestera_event_handlers_unregister(sw);
|
|
|
|
err_handlers_register:
|
|
|
|
prestera_rxtx_switch_fini(sw);
|
2020-09-16 16:31:01 +00:00
|
|
|
err_rxtx_register:
|
|
|
|
prestera_switchdev_fini(sw);
|
|
|
|
err_swdev_register:
|
2021-12-27 21:52:28 +00:00
|
|
|
prestera_router_fini(sw);
|
|
|
|
err_router_init:
|
2020-09-16 16:31:01 +00:00
|
|
|
prestera_netdev_event_handler_unregister(sw);
|
2020-09-16 16:30:57 +00:00
|
|
|
prestera_hw_switch_fini(sw);
|
|
|
|
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void prestera_switch_fini(struct prestera_switch *sw)
|
|
|
|
{
|
2021-09-25 11:22:48 +00:00
|
|
|
prestera_devlink_unregister(sw);
|
2020-09-16 16:30:57 +00:00
|
|
|
prestera_destroy_ports(sw);
|
2021-06-10 15:43:11 +00:00
|
|
|
prestera_lag_fini(sw);
|
2021-09-25 11:22:48 +00:00
|
|
|
prestera_devlink_traps_unregister(sw);
|
2021-06-16 16:01:45 +00:00
|
|
|
prestera_span_fini(sw);
|
2021-06-16 16:01:44 +00:00
|
|
|
prestera_acl_fini(sw);
|
2021-11-30 10:32:59 +00:00
|
|
|
prestera_counter_fini(sw);
|
2020-09-16 16:30:57 +00:00
|
|
|
prestera_event_handlers_unregister(sw);
|
|
|
|
prestera_rxtx_switch_fini(sw);
|
2020-09-16 16:31:01 +00:00
|
|
|
prestera_switchdev_fini(sw);
|
2022-01-11 01:11:29 +00:00
|
|
|
prestera_router_fini(sw);
|
2020-09-16 16:31:01 +00:00
|
|
|
prestera_netdev_event_handler_unregister(sw);
|
2020-09-16 16:30:57 +00:00
|
|
|
prestera_hw_switch_fini(sw);
|
|
|
|
}
|
|
|
|
|
|
|
|
int prestera_device_register(struct prestera_device *dev)
|
|
|
|
{
|
|
|
|
struct prestera_switch *sw;
|
|
|
|
int err;
|
|
|
|
|
2021-08-08 18:57:43 +00:00
|
|
|
sw = prestera_devlink_alloc(dev);
|
2020-09-16 16:30:57 +00:00
|
|
|
if (!sw)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
dev->priv = sw;
|
|
|
|
sw->dev = dev;
|
|
|
|
|
|
|
|
err = prestera_switch_init(sw);
|
|
|
|
if (err) {
|
2020-09-16 16:30:59 +00:00
|
|
|
prestera_devlink_free(sw);
|
2020-09-16 16:30:57 +00:00
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(prestera_device_register);
|
|
|
|
|
|
|
|
void prestera_device_unregister(struct prestera_device *dev)
|
|
|
|
{
|
|
|
|
struct prestera_switch *sw = dev->priv;
|
|
|
|
|
|
|
|
prestera_switch_fini(sw);
|
2020-09-16 16:30:59 +00:00
|
|
|
prestera_devlink_free(sw);
|
2020-09-16 16:30:57 +00:00
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(prestera_device_unregister);
|
|
|
|
|
|
|
|
static int __init prestera_module_init(void)
|
|
|
|
{
|
|
|
|
prestera_wq = alloc_workqueue("prestera", 0, 0);
|
|
|
|
if (!prestera_wq)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void __exit prestera_module_exit(void)
|
|
|
|
{
|
|
|
|
destroy_workqueue(prestera_wq);
|
|
|
|
}
|
|
|
|
|
|
|
|
module_init(prestera_module_init);
|
|
|
|
module_exit(prestera_module_exit);
|
|
|
|
|
|
|
|
MODULE_LICENSE("Dual BSD/GPL");
|
|
|
|
MODULE_DESCRIPTION("Marvell Prestera switch driver");
|