Merge branch 'dsa-changes-for-multiple-cpu-ports-part-1'

Vladimir Oltean says:

====================
DSA changes for multiple CPU ports (part 1)

I am trying to enable the second internal port pair from the NXP LS1028A
Felix switch for DSA-tagged traffic via "ocelot-8021q". This series
represents part 1 (of an unknown number) of that effort.

It does some preparation work, like managing host flooding in DSA via a
dedicated method, and removing the CPU port as argument from the tagging
protocol change procedure.

In terms of driver-specific changes, it reworks the 2 tag protocol
implementations in the Felix driver to have a structured data format.
It enables host flooding towards all tag_8021q CPU ports. It dynamically
updates the tag_8021q CPU port used for traps. It also fixes a bug
introduced by a previous refactoring/oversimplification commit in
net-next.
====================

Link: https://lore.kernel.org/r/20220511095020.562461-1-vladimir.oltean@nxp.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
Jakub Kicinski 2022-05-12 16:38:56 -07:00
commit 879c610c92
11 changed files with 342 additions and 285 deletions

View File

@ -6329,11 +6329,12 @@ static enum dsa_tag_protocol mv88e6xxx_get_tag_protocol(struct dsa_switch *ds,
return chip->tag_protocol;
}
static int mv88e6xxx_change_tag_protocol(struct dsa_switch *ds, int port,
static int mv88e6xxx_change_tag_protocol(struct dsa_switch *ds,
enum dsa_tag_protocol proto)
{
struct mv88e6xxx_chip *chip = ds->priv;
enum dsa_tag_protocol old_protocol;
struct dsa_port *cpu_dp;
int err;
switch (proto) {
@ -6358,11 +6359,24 @@ static int mv88e6xxx_change_tag_protocol(struct dsa_switch *ds, int port,
chip->tag_protocol = proto;
mv88e6xxx_reg_lock(chip);
err = mv88e6xxx_setup_port_mode(chip, port);
dsa_switch_for_each_cpu_port(cpu_dp, ds) {
err = mv88e6xxx_setup_port_mode(chip, cpu_dp->index);
if (err) {
mv88e6xxx_reg_unlock(chip);
goto unwind;
}
}
mv88e6xxx_reg_unlock(chip);
if (err)
chip->tag_protocol = old_protocol;
return 0;
unwind:
chip->tag_protocol = old_protocol;
mv88e6xxx_reg_lock(chip);
dsa_switch_for_each_cpu_port_continue_reverse(cpu_dp, ds)
mv88e6xxx_setup_port_mode(chip, cpu_dp->index);
mv88e6xxx_reg_unlock(chip);
return err;
}

View File

@ -42,43 +42,6 @@ static struct net_device *felix_classify_db(struct dsa_db db)
}
}
static void felix_migrate_pgid_bit(struct dsa_switch *ds, int from, int to,
int pgid)
{
struct ocelot *ocelot = ds->priv;
bool on;
u32 val;
val = ocelot_read_rix(ocelot, ANA_PGID_PGID, pgid);
on = !!(val & BIT(from));
val &= ~BIT(from);
if (on)
val |= BIT(to);
else
val &= ~BIT(to);
ocelot_write_rix(ocelot, val, ANA_PGID_PGID, pgid);
}
static void felix_migrate_flood_to_npi_port(struct dsa_switch *ds, int port)
{
struct ocelot *ocelot = ds->priv;
felix_migrate_pgid_bit(ds, port, ocelot->num_phys_ports, PGID_UC);
felix_migrate_pgid_bit(ds, port, ocelot->num_phys_ports, PGID_MC);
felix_migrate_pgid_bit(ds, port, ocelot->num_phys_ports, PGID_BC);
}
static void
felix_migrate_flood_to_tag_8021q_port(struct dsa_switch *ds, int port)
{
struct ocelot *ocelot = ds->priv;
felix_migrate_pgid_bit(ds, ocelot->num_phys_ports, port, PGID_UC);
felix_migrate_pgid_bit(ds, ocelot->num_phys_ports, port, PGID_MC);
felix_migrate_pgid_bit(ds, ocelot->num_phys_ports, port, PGID_BC);
}
/* Set up VCAP ES0 rules for pushing a tag_8021q VLAN towards the CPU such that
* the tagger can perform RX source port identification.
*/
@ -313,6 +276,21 @@ static void felix_8021q_cpu_port_deinit(struct ocelot *ocelot, int port)
mutex_unlock(&ocelot->fwd_domain_lock);
}
static int felix_trap_get_cpu_port(struct dsa_switch *ds,
const struct ocelot_vcap_filter *trap)
{
struct dsa_port *dp;
int first_port;
if (WARN_ON(!trap->ingress_port_mask))
return -1;
first_port = __ffs(trap->ingress_port_mask);
dp = dsa_to_port(ds, first_port);
return dp->cpu_dp->index;
}
/* On switches with no extraction IRQ wired, trapped packets need to be
* replicated over Ethernet as well, otherwise we'd get no notification of
* their arrival when using the ocelot-8021q tagging protocol.
@ -326,19 +304,12 @@ static int felix_update_trapping_destinations(struct dsa_switch *ds,
struct ocelot_vcap_filter *trap;
enum ocelot_mask_mode mask_mode;
unsigned long port_mask;
struct dsa_port *dp;
bool cpu_copy_ena;
int cpu = -1, err;
int err;
if (!felix->info->quirk_no_xtr_irq)
return 0;
/* Figure out the current CPU port */
dsa_switch_for_each_cpu_port(dp, ds) {
cpu = dp->index;
break;
}
/* We are sure that "cpu" was found, otherwise
* dsa_tree_setup_default_cpu() would have failed earlier.
*/
@ -356,7 +327,7 @@ static int felix_update_trapping_destinations(struct dsa_switch *ds,
* port module.
*/
mask_mode = OCELOT_MASK_MODE_REDIRECT;
port_mask = BIT(cpu);
port_mask = BIT(felix_trap_get_cpu_port(ds, trap));
cpu_copy_ena = !!trap->take_ts;
} else {
/* Trap packets only to the CPU port module, which is
@ -384,90 +355,6 @@ static int felix_update_trapping_destinations(struct dsa_switch *ds,
return 0;
}
static int felix_setup_tag_8021q(struct dsa_switch *ds, int cpu)
{
struct ocelot *ocelot = ds->priv;
struct dsa_port *dp;
int err;
felix_8021q_cpu_port_init(ocelot, cpu);
dsa_switch_for_each_available_port(dp, ds) {
/* This overwrites ocelot_init():
* Do not forward BPDU frames to the CPU port module,
* for 2 reasons:
* - When these packets are injected from the tag_8021q
* CPU port, we want them to go out, not loop back
* into the system.
* - STP traffic ingressing on a user port should go to
* the tag_8021q CPU port, not to the hardware CPU
* port module.
*/
ocelot_write_gix(ocelot,
ANA_PORT_CPU_FWD_BPDU_CFG_BPDU_REDIR_ENA(0),
ANA_PORT_CPU_FWD_BPDU_CFG, dp->index);
}
err = dsa_tag_8021q_register(ds, htons(ETH_P_8021AD));
if (err)
return err;
err = ocelot_migrate_mdbs(ocelot, BIT(ocelot->num_phys_ports),
BIT(cpu));
if (err)
goto out_tag_8021q_unregister;
felix_migrate_flood_to_tag_8021q_port(ds, cpu);
err = felix_update_trapping_destinations(ds, true);
if (err)
goto out_migrate_flood;
/* The ownership of the CPU port module's queues might have just been
* transferred to the tag_8021q tagger from the NPI-based tagger.
* So there might still be all sorts of crap in the queues. On the
* other hand, the MMIO-based matching of PTP frames is very brittle,
* so we need to be careful that there are no extra frames to be
* dequeued over MMIO, since we would never know to discard them.
*/
ocelot_drain_cpu_queue(ocelot, 0);
return 0;
out_migrate_flood:
felix_migrate_flood_to_npi_port(ds, cpu);
ocelot_migrate_mdbs(ocelot, BIT(cpu), BIT(ocelot->num_phys_ports));
out_tag_8021q_unregister:
dsa_tag_8021q_unregister(ds);
return err;
}
static void felix_teardown_tag_8021q(struct dsa_switch *ds, int cpu)
{
struct ocelot *ocelot = ds->priv;
struct dsa_port *dp;
int err;
err = felix_update_trapping_destinations(ds, false);
if (err)
dev_err(ds->dev, "felix_teardown_mmio_filtering returned %d",
err);
dsa_tag_8021q_unregister(ds);
dsa_switch_for_each_available_port(dp, ds) {
/* Restore the logic from ocelot_init:
* do not forward BPDU frames to the front ports.
*/
ocelot_write_gix(ocelot,
ANA_PORT_CPU_FWD_BPDU_CFG_BPDU_REDIR_ENA(0xffff),
ANA_PORT_CPU_FWD_BPDU_CFG,
dp->index);
}
felix_8021q_cpu_port_deinit(ocelot, cpu);
}
/* The CPU port module is connected to the Node Processor Interface (NPI). This
* is the mode through which frames can be injected from and extracted to an
* external CPU, over Ethernet. In NXP SoCs, the "external CPU" is the ARM CPU
@ -511,117 +398,249 @@ static void felix_npi_port_deinit(struct ocelot *ocelot, int port)
ocelot_fields_write(ocelot, port, SYS_PAUSE_CFG_PAUSE_ENA, 1);
}
static int felix_setup_tag_npi(struct dsa_switch *ds, int cpu)
static int felix_tag_npi_setup(struct dsa_switch *ds)
{
struct dsa_port *dp, *first_cpu_dp = NULL;
struct ocelot *ocelot = ds->priv;
int err;
err = ocelot_migrate_mdbs(ocelot, BIT(cpu),
BIT(ocelot->num_phys_ports));
if (err)
return err;
dsa_switch_for_each_user_port(dp, ds) {
if (first_cpu_dp && dp->cpu_dp != first_cpu_dp) {
dev_err(ds->dev, "Multiple NPI ports not supported\n");
return -EINVAL;
}
felix_migrate_flood_to_npi_port(ds, cpu);
first_cpu_dp = dp->cpu_dp;
}
felix_npi_port_init(ocelot, cpu);
if (!first_cpu_dp)
return -EINVAL;
felix_npi_port_init(ocelot, first_cpu_dp->index);
return 0;
}
static void felix_teardown_tag_npi(struct dsa_switch *ds, int cpu)
static void felix_tag_npi_teardown(struct dsa_switch *ds)
{
struct ocelot *ocelot = ds->priv;
felix_npi_port_deinit(ocelot, cpu);
felix_npi_port_deinit(ocelot, ocelot->npi);
}
static int felix_set_tag_protocol(struct dsa_switch *ds, int cpu,
enum dsa_tag_protocol proto)
static unsigned long felix_tag_npi_get_host_fwd_mask(struct dsa_switch *ds)
{
struct ocelot *ocelot = ds->priv;
return BIT(ocelot->num_phys_ports);
}
static const struct felix_tag_proto_ops felix_tag_npi_proto_ops = {
.setup = felix_tag_npi_setup,
.teardown = felix_tag_npi_teardown,
.get_host_fwd_mask = felix_tag_npi_get_host_fwd_mask,
};
static int felix_tag_8021q_setup(struct dsa_switch *ds)
{
struct ocelot *ocelot = ds->priv;
struct dsa_port *dp, *cpu_dp;
int err;
switch (proto) {
case DSA_TAG_PROTO_SEVILLE:
case DSA_TAG_PROTO_OCELOT:
err = felix_setup_tag_npi(ds, cpu);
err = dsa_tag_8021q_register(ds, htons(ETH_P_8021AD));
if (err)
return err;
dsa_switch_for_each_cpu_port(cpu_dp, ds) {
felix_8021q_cpu_port_init(ocelot, cpu_dp->index);
/* TODO we could support multiple CPU ports in tag_8021q mode */
break;
case DSA_TAG_PROTO_OCELOT_8021Q:
err = felix_setup_tag_8021q(ds, cpu);
break;
default:
err = -EPROTONOSUPPORT;
}
return err;
dsa_switch_for_each_available_port(dp, ds) {
/* This overwrites ocelot_init():
* Do not forward BPDU frames to the CPU port module,
* for 2 reasons:
* - When these packets are injected from the tag_8021q
* CPU port, we want them to go out, not loop back
* into the system.
* - STP traffic ingressing on a user port should go to
* the tag_8021q CPU port, not to the hardware CPU
* port module.
*/
ocelot_write_gix(ocelot,
ANA_PORT_CPU_FWD_BPDU_CFG_BPDU_REDIR_ENA(0),
ANA_PORT_CPU_FWD_BPDU_CFG, dp->index);
}
/* The ownership of the CPU port module's queues might have just been
* transferred to the tag_8021q tagger from the NPI-based tagger.
* So there might still be all sorts of crap in the queues. On the
* other hand, the MMIO-based matching of PTP frames is very brittle,
* so we need to be careful that there are no extra frames to be
* dequeued over MMIO, since we would never know to discard them.
*/
ocelot_drain_cpu_queue(ocelot, 0);
return 0;
}
static void felix_del_tag_protocol(struct dsa_switch *ds, int cpu,
enum dsa_tag_protocol proto)
static void felix_tag_8021q_teardown(struct dsa_switch *ds)
{
switch (proto) {
case DSA_TAG_PROTO_SEVILLE:
case DSA_TAG_PROTO_OCELOT:
felix_teardown_tag_npi(ds, cpu);
break;
case DSA_TAG_PROTO_OCELOT_8021Q:
felix_teardown_tag_8021q(ds, cpu);
break;
default:
struct ocelot *ocelot = ds->priv;
struct dsa_port *dp, *cpu_dp;
dsa_switch_for_each_available_port(dp, ds) {
/* Restore the logic from ocelot_init:
* do not forward BPDU frames to the front ports.
*/
ocelot_write_gix(ocelot,
ANA_PORT_CPU_FWD_BPDU_CFG_BPDU_REDIR_ENA(0xffff),
ANA_PORT_CPU_FWD_BPDU_CFG,
dp->index);
}
dsa_switch_for_each_cpu_port(cpu_dp, ds) {
felix_8021q_cpu_port_deinit(ocelot, cpu_dp->index);
/* TODO we could support multiple CPU ports in tag_8021q mode */
break;
}
dsa_tag_8021q_unregister(ds);
}
static unsigned long felix_tag_8021q_get_host_fwd_mask(struct dsa_switch *ds)
{
return dsa_cpu_ports(ds);
}
static const struct felix_tag_proto_ops felix_tag_8021q_proto_ops = {
.setup = felix_tag_8021q_setup,
.teardown = felix_tag_8021q_teardown,
.get_host_fwd_mask = felix_tag_8021q_get_host_fwd_mask,
};
static void felix_set_host_flood(struct dsa_switch *ds, unsigned long mask,
bool uc, bool mc, bool bc)
{
struct ocelot *ocelot = ds->priv;
unsigned long val;
val = uc ? mask : 0;
ocelot_rmw_rix(ocelot, val, mask, ANA_PGID_PGID, PGID_UC);
val = mc ? mask : 0;
ocelot_rmw_rix(ocelot, val, mask, ANA_PGID_PGID, PGID_MC);
ocelot_rmw_rix(ocelot, val, mask, ANA_PGID_PGID, PGID_MCIPV4);
ocelot_rmw_rix(ocelot, val, mask, ANA_PGID_PGID, PGID_MCIPV6);
}
static void
felix_migrate_host_flood(struct dsa_switch *ds,
const struct felix_tag_proto_ops *proto_ops,
const struct felix_tag_proto_ops *old_proto_ops)
{
struct ocelot *ocelot = ds->priv;
struct felix *felix = ocelot_to_felix(ocelot);
unsigned long mask;
if (old_proto_ops) {
mask = old_proto_ops->get_host_fwd_mask(ds);
felix_set_host_flood(ds, mask, false, false, false);
}
mask = proto_ops->get_host_fwd_mask(ds);
felix_set_host_flood(ds, mask, !!felix->host_flood_uc_mask,
!!felix->host_flood_mc_mask, true);
}
static int felix_migrate_mdbs(struct dsa_switch *ds,
const struct felix_tag_proto_ops *proto_ops,
const struct felix_tag_proto_ops *old_proto_ops)
{
struct ocelot *ocelot = ds->priv;
unsigned long from, to;
if (!old_proto_ops)
return 0;
from = old_proto_ops->get_host_fwd_mask(ds);
to = proto_ops->get_host_fwd_mask(ds);
return ocelot_migrate_mdbs(ocelot, from, to);
}
/* Configure the shared hardware resources for a transition between
* @old_proto_ops and @proto_ops.
* Manual migration is needed because as far as DSA is concerned, no change of
* the CPU port is taking place here, just of the tagging protocol.
*/
static int
felix_tag_proto_setup_shared(struct dsa_switch *ds,
const struct felix_tag_proto_ops *proto_ops,
const struct felix_tag_proto_ops *old_proto_ops)
{
bool using_tag_8021q = (proto_ops == &felix_tag_8021q_proto_ops);
int err;
err = felix_migrate_mdbs(ds, proto_ops, old_proto_ops);
if (err)
return err;
felix_update_trapping_destinations(ds, using_tag_8021q);
felix_migrate_host_flood(ds, proto_ops, old_proto_ops);
return 0;
}
/* This always leaves the switch in a consistent state, because although the
* tag_8021q setup can fail, the NPI setup can't. So either the change is made,
* or the restoration is guaranteed to work.
*/
static int felix_change_tag_protocol(struct dsa_switch *ds, int cpu,
static int felix_change_tag_protocol(struct dsa_switch *ds,
enum dsa_tag_protocol proto)
{
const struct felix_tag_proto_ops *old_proto_ops, *proto_ops;
struct ocelot *ocelot = ds->priv;
struct felix *felix = ocelot_to_felix(ocelot);
enum dsa_tag_protocol old_proto = felix->tag_proto;
bool cpu_port_active = false;
struct dsa_port *dp;
int err;
if (proto != DSA_TAG_PROTO_SEVILLE &&
proto != DSA_TAG_PROTO_OCELOT &&
proto != DSA_TAG_PROTO_OCELOT_8021Q)
switch (proto) {
case DSA_TAG_PROTO_SEVILLE:
case DSA_TAG_PROTO_OCELOT:
proto_ops = &felix_tag_npi_proto_ops;
break;
case DSA_TAG_PROTO_OCELOT_8021Q:
proto_ops = &felix_tag_8021q_proto_ops;
break;
default:
return -EPROTONOSUPPORT;
/* We don't support multiple CPU ports, yet the DT blob may have
* multiple CPU ports defined. The first CPU port is the active one,
* the others are inactive. In this case, DSA will call
* ->change_tag_protocol() multiple times, once per CPU port.
* Since we implement the tagging protocol change towards "ocelot" or
* "seville" as effectively initializing the NPI port, what we are
* doing is effectively changing who the NPI port is to the last @cpu
* argument passed, which is an unused DSA CPU port and not the one
* that should actively pass traffic.
* Suppress DSA's calls on CPU ports that are inactive.
*/
dsa_switch_for_each_user_port(dp, ds) {
if (dp->cpu_dp->index == cpu) {
cpu_port_active = true;
break;
}
}
if (!cpu_port_active)
return 0;
old_proto_ops = felix->tag_proto_ops;
felix_del_tag_protocol(ds, cpu, old_proto);
err = proto_ops->setup(ds);
if (err)
goto setup_failed;
err = felix_set_tag_protocol(ds, cpu, proto);
if (err) {
felix_set_tag_protocol(ds, cpu, old_proto);
return err;
}
err = felix_tag_proto_setup_shared(ds, proto_ops, old_proto_ops);
if (err)
goto setup_shared_failed;
if (old_proto_ops)
old_proto_ops->teardown(ds);
felix->tag_proto_ops = proto_ops;
felix->tag_proto = proto;
return 0;
setup_shared_failed:
proto_ops->teardown(ds);
setup_failed:
return err;
}
static enum dsa_tag_protocol felix_get_tag_protocol(struct dsa_switch *ds,
@ -634,6 +653,28 @@ static enum dsa_tag_protocol felix_get_tag_protocol(struct dsa_switch *ds,
return felix->tag_proto;
}
static void felix_port_set_host_flood(struct dsa_switch *ds, int port,
bool uc, bool mc)
{
struct ocelot *ocelot = ds->priv;
struct felix *felix = ocelot_to_felix(ocelot);
unsigned long mask;
if (uc)
felix->host_flood_uc_mask |= BIT(port);
else
felix->host_flood_uc_mask &= ~BIT(port);
if (mc)
felix->host_flood_mc_mask |= BIT(port);
else
felix->host_flood_mc_mask &= ~BIT(port);
mask = felix->tag_proto_ops->get_host_fwd_mask(ds);
felix_set_host_flood(ds, mask, !!felix->host_flood_uc_mask,
!!felix->host_flood_mc_mask, true);
}
static int felix_set_ageing_time(struct dsa_switch *ds,
unsigned int ageing_time)
{
@ -668,15 +709,19 @@ static int felix_fdb_add(struct dsa_switch *ds, int port,
struct dsa_db db)
{
struct net_device *bridge_dev = felix_classify_db(db);
struct dsa_port *dp = dsa_to_port(ds, port);
struct ocelot *ocelot = ds->priv;
if (IS_ERR(bridge_dev))
return PTR_ERR(bridge_dev);
if (dsa_is_cpu_port(ds, port) && !bridge_dev &&
if (dsa_port_is_cpu(dp) && !bridge_dev &&
dsa_fdb_present_in_other_db(ds, port, addr, vid, db))
return 0;
if (dsa_port_is_cpu(dp))
port = PGID_CPU;
return ocelot_fdb_add(ocelot, port, addr, vid, bridge_dev);
}
@ -685,15 +730,19 @@ static int felix_fdb_del(struct dsa_switch *ds, int port,
struct dsa_db db)
{
struct net_device *bridge_dev = felix_classify_db(db);
struct dsa_port *dp = dsa_to_port(ds, port);
struct ocelot *ocelot = ds->priv;
if (IS_ERR(bridge_dev))
return PTR_ERR(bridge_dev);
if (dsa_is_cpu_port(ds, port) && !bridge_dev &&
if (dsa_port_is_cpu(dp) && !bridge_dev &&
dsa_fdb_present_in_other_db(ds, port, addr, vid, db))
return 0;
if (dsa_port_is_cpu(dp))
port = PGID_CPU;
return ocelot_fdb_del(ocelot, port, addr, vid, bridge_dev);
}
@ -737,6 +786,9 @@ static int felix_mdb_add(struct dsa_switch *ds, int port,
dsa_mdb_present_in_other_db(ds, port, mdb, db))
return 0;
if (port == ocelot->npi)
port = ocelot->num_phys_ports;
return ocelot_port_mdb_add(ocelot, port, mdb, bridge_dev);
}
@ -754,6 +806,9 @@ static int felix_mdb_del(struct dsa_switch *ds, int port,
dsa_mdb_present_in_other_db(ds, port, mdb, db))
return 0;
if (port == ocelot->npi)
port = ocelot->num_phys_ports;
return ocelot_port_mdb_del(ocelot, port, mdb, bridge_dev);
}
@ -780,6 +835,9 @@ static int felix_bridge_flags(struct dsa_switch *ds, int port,
{
struct ocelot *ocelot = ds->priv;
if (port == ocelot->npi)
port = ocelot->num_phys_ports;
ocelot_port_bridge_flags(ocelot, port, val);
return 0;
@ -1293,7 +1351,6 @@ static int felix_setup(struct dsa_switch *ds)
{
struct ocelot *ocelot = ds->priv;
struct felix *felix = ocelot_to_felix(ocelot);
unsigned long cpu_flood;
struct dsa_port *dp;
int err;
@ -1327,21 +1384,10 @@ static int felix_setup(struct dsa_switch *ds)
if (err)
goto out_deinit_ports;
dsa_switch_for_each_cpu_port(dp, ds) {
/* The initial tag protocol is NPI which always returns 0, so
* there's no real point in checking for errors.
*/
felix_set_tag_protocol(ds, dp->index, felix->tag_proto);
/* Start off with flooding disabled towards the NPI port
* (actually CPU port module).
*/
cpu_flood = ANA_PGID_PGID_PGID(BIT(ocelot->num_phys_ports));
ocelot_rmw_rix(ocelot, 0, cpu_flood, ANA_PGID_PGID, PGID_UC);
ocelot_rmw_rix(ocelot, 0, cpu_flood, ANA_PGID_PGID, PGID_MC);
break;
}
/* The initial tag protocol is NPI which won't fail during initial
* setup, there's no real point in checking for errors.
*/
felix_change_tag_protocol(ds, felix->tag_proto);
ds->mtu_enforcement_ingress = true;
ds->assisted_learning_on_cpu_port = true;
@ -1370,10 +1416,8 @@ static void felix_teardown(struct dsa_switch *ds)
struct felix *felix = ocelot_to_felix(ocelot);
struct dsa_port *dp;
dsa_switch_for_each_cpu_port(dp, ds) {
felix_del_tag_protocol(ds, dp->index, felix->tag_proto);
break;
}
if (felix->tag_proto_ops)
felix->tag_proto_ops->teardown(ds);
dsa_switch_for_each_available_port(dp, ds)
ocelot_deinit_port(ocelot, dp->index);
@ -1859,6 +1903,7 @@ const struct dsa_switch_ops felix_switch_ops = {
.port_get_dscp_prio = felix_port_get_dscp_prio,
.port_add_dscp_prio = felix_port_add_dscp_prio,
.port_del_dscp_prio = felix_port_del_dscp_prio,
.port_set_host_flood = felix_port_set_host_flood,
};
struct net_device *felix_port_to_netdev(struct ocelot *ocelot, int port)

View File

@ -59,6 +59,19 @@ struct felix_info {
struct resource *res);
};
/* Methods for initializing the hardware resources specific to a tagging
* protocol (like the NPI port, for "ocelot" or "seville", or the VCAP TCAMs,
* for "ocelot-8021q").
* It is important that the resources configured here do not have side effects
* for the other tagging protocols. If that is the case, their configuration
* needs to go to felix_tag_proto_setup_shared().
*/
struct felix_tag_proto_ops {
int (*setup)(struct dsa_switch *ds);
void (*teardown)(struct dsa_switch *ds);
unsigned long (*get_host_fwd_mask)(struct dsa_switch *ds);
};
extern const struct dsa_switch_ops felix_switch_ops;
/* DSA glue / front-end for struct ocelot */
@ -71,7 +84,10 @@ struct felix {
resource_size_t switch_base;
resource_size_t imdio_base;
enum dsa_tag_protocol tag_proto;
const struct felix_tag_proto_ops *tag_proto_ops;
struct kthread_worker *xmit_worker;
unsigned long host_flood_uc_mask;
unsigned long host_flood_mc_mask;
};
struct net_device *felix_port_to_netdev(struct ocelot *ocelot, int port);

View File

@ -1778,7 +1778,7 @@ static int rtl8365mb_cpu_config(struct realtek_priv *priv)
return 0;
}
static int rtl8365mb_change_tag_protocol(struct dsa_switch *ds, int cpu_index,
static int rtl8365mb_change_tag_protocol(struct dsa_switch *ds,
enum dsa_tag_protocol proto)
{
struct realtek_priv *priv = ds->priv;

View File

@ -1349,15 +1349,10 @@ EXPORT_SYMBOL(ocelot_drain_cpu_queue);
int ocelot_fdb_add(struct ocelot *ocelot, int port, const unsigned char *addr,
u16 vid, const struct net_device *bridge)
{
int pgid = port;
if (port == ocelot->npi)
pgid = PGID_CPU;
if (!vid)
vid = ocelot_vlan_unaware_pvid(ocelot, bridge);
return ocelot_mact_learn(ocelot, pgid, addr, vid, ENTRYTYPE_LOCKED);
return ocelot_mact_learn(ocelot, port, addr, vid, ENTRYTYPE_LOCKED);
}
EXPORT_SYMBOL(ocelot_fdb_add);
@ -2344,9 +2339,6 @@ int ocelot_port_mdb_add(struct ocelot *ocelot, int port,
struct ocelot_pgid *pgid;
u16 vid = mdb->vid;
if (port == ocelot->npi)
port = ocelot->num_phys_ports;
if (!vid)
vid = ocelot_vlan_unaware_pvid(ocelot, bridge);
@ -2404,9 +2396,6 @@ int ocelot_port_mdb_del(struct ocelot *ocelot, int port,
struct ocelot_pgid *pgid;
u16 vid = mdb->vid;
if (port == ocelot->npi)
port = ocelot->num_phys_ports;
if (!vid)
vid = ocelot_vlan_unaware_pvid(ocelot, bridge);
@ -2954,9 +2943,6 @@ EXPORT_SYMBOL(ocelot_port_pre_bridge_flags);
void ocelot_port_bridge_flags(struct ocelot *ocelot, int port,
struct switchdev_brport_flags flags)
{
if (port == ocelot->npi)
port = ocelot->num_phys_ports;
if (flags.mask & BR_LEARNING)
ocelot_port_set_learning(ocelot, port,
!!(flags.val & BR_LEARNING));

View File

@ -579,6 +579,10 @@ static inline bool dsa_is_user_port(struct dsa_switch *ds, int p)
dsa_switch_for_each_port((_dp), (_ds)) \
if (dsa_port_is_cpu((_dp)))
#define dsa_switch_for_each_cpu_port_continue_reverse(_dp, _ds) \
dsa_switch_for_each_port_continue_reverse((_dp), (_ds)) \
if (dsa_port_is_cpu((_dp)))
static inline u32 dsa_user_ports(struct dsa_switch *ds)
{
struct dsa_port *dp;
@ -590,6 +594,17 @@ static inline u32 dsa_user_ports(struct dsa_switch *ds)
return mask;
}
static inline u32 dsa_cpu_ports(struct dsa_switch *ds)
{
struct dsa_port *cpu_dp;
u32 mask = 0;
dsa_switch_for_each_cpu_port(cpu_dp, ds)
mask |= BIT(cpu_dp->index);
return mask;
}
/* Return the local port used to reach an arbitrary switch device */
static inline unsigned int dsa_routing_port(struct dsa_switch *ds, int device)
{
@ -792,7 +807,7 @@ struct dsa_switch_ops {
enum dsa_tag_protocol (*get_tag_protocol)(struct dsa_switch *ds,
int port,
enum dsa_tag_protocol mprot);
int (*change_tag_protocol)(struct dsa_switch *ds, int port,
int (*change_tag_protocol)(struct dsa_switch *ds,
enum dsa_tag_protocol proto);
/*
* Method for switch drivers to connect to the tagging protocol driver
@ -967,6 +982,8 @@ struct dsa_switch_ops {
int (*port_bridge_flags)(struct dsa_switch *ds, int port,
struct switchdev_brport_flags flags,
struct netlink_ext_ack *extack);
void (*port_set_host_flood)(struct dsa_switch *ds, int port,
bool uc, bool mc);
/*
* VLAN support

View File

@ -809,22 +809,18 @@ static int dsa_switch_setup_tag_protocol(struct dsa_switch *ds)
{
const struct dsa_device_ops *tag_ops = ds->dst->tag_ops;
struct dsa_switch_tree *dst = ds->dst;
struct dsa_port *cpu_dp;
int err;
if (tag_ops->proto == dst->default_proto)
goto connect;
dsa_switch_for_each_cpu_port(cpu_dp, ds) {
rtnl_lock();
err = ds->ops->change_tag_protocol(ds, cpu_dp->index,
tag_ops->proto);
rtnl_unlock();
if (err) {
dev_err(ds->dev, "Unable to use tag protocol \"%s\": %pe\n",
tag_ops->name, ERR_PTR(err));
return err;
}
rtnl_lock();
err = ds->ops->change_tag_protocol(ds, tag_ops->proto);
rtnl_unlock();
if (err) {
dev_err(ds->dev, "Unable to use tag protocol \"%s\": %pe\n",
tag_ops->name, ERR_PTR(err));
return err;
}
connect:

View File

@ -291,6 +291,7 @@ int dsa_port_hsr_join(struct dsa_port *dp, struct net_device *hsr);
void dsa_port_hsr_leave(struct dsa_port *dp, struct net_device *hsr);
int dsa_port_tag_8021q_vlan_add(struct dsa_port *dp, u16 vid, bool broadcast);
void dsa_port_tag_8021q_vlan_del(struct dsa_port *dp, u16 vid, bool broadcast);
void dsa_port_set_host_flood(struct dsa_port *dp, bool uc, bool mc);
/* slave.c */
extern const struct dsa_device_ops notag_netdev_ops;

View File

@ -920,6 +920,14 @@ int dsa_port_bridge_flags(struct dsa_port *dp,
return 0;
}
void dsa_port_set_host_flood(struct dsa_port *dp, bool uc, bool mc)
{
struct dsa_switch *ds = dp->ds;
if (ds->ops->port_set_host_flood)
ds->ops->port_set_host_flood(ds, dp->index, uc, mc);
}
int dsa_port_vlan_msti(struct dsa_port *dp,
const struct switchdev_vlan_msti *msti)
{

View File

@ -262,37 +262,13 @@ static int dsa_slave_close(struct net_device *dev)
return 0;
}
/* Keep flooding enabled towards this port's CPU port as long as it serves at
* least one port in the tree that requires it.
*/
static void dsa_port_manage_cpu_flood(struct dsa_port *dp)
static void dsa_slave_manage_host_flood(struct net_device *dev)
{
struct switchdev_brport_flags flags = {
.mask = BR_FLOOD | BR_MCAST_FLOOD,
};
struct dsa_switch_tree *dst = dp->ds->dst;
struct dsa_port *cpu_dp = dp->cpu_dp;
struct dsa_port *other_dp;
int err;
bool mc = dev->flags & (IFF_PROMISC | IFF_ALLMULTI);
struct dsa_port *dp = dsa_slave_to_port(dev);
bool uc = dev->flags & IFF_PROMISC;
list_for_each_entry(other_dp, &dst->ports, list) {
if (!dsa_port_is_user(other_dp))
continue;
if (other_dp->cpu_dp != cpu_dp)
continue;
if (other_dp->slave->flags & IFF_ALLMULTI)
flags.val |= BR_MCAST_FLOOD;
if (other_dp->slave->flags & IFF_PROMISC)
flags.val |= BR_FLOOD | BR_MCAST_FLOOD;
}
err = dsa_port_pre_bridge_flags(dp, flags, NULL);
if (err)
return;
dsa_port_bridge_flags(cpu_dp, flags, NULL);
dsa_port_set_host_flood(dp, uc, mc);
}
static void dsa_slave_change_rx_flags(struct net_device *dev, int change)
@ -310,7 +286,7 @@ static void dsa_slave_change_rx_flags(struct net_device *dev, int change)
if (dsa_switch_supports_uc_filtering(ds) &&
dsa_switch_supports_mc_filtering(ds))
dsa_port_manage_cpu_flood(dp);
dsa_slave_manage_host_flood(dev);
}
static void dsa_slave_set_rx_mode(struct net_device *dev)

View File

@ -809,14 +809,12 @@ static int dsa_switch_change_tag_proto(struct dsa_switch *ds,
ASSERT_RTNL();
dsa_switch_for_each_cpu_port(cpu_dp, ds) {
err = ds->ops->change_tag_protocol(ds, cpu_dp->index,
tag_ops->proto);
if (err)
return err;
err = ds->ops->change_tag_protocol(ds, tag_ops->proto);
if (err)
return err;
dsa_switch_for_each_cpu_port(cpu_dp, ds)
dsa_port_set_tag_protocol(cpu_dp, tag_ops);
}
/* Now that changing the tag protocol can no longer fail, let's update
* the remaining bits which are "duplicated for faster access", and the