Merge branch 'mlxsw-Prepare-SPAN-API-for-upcoming-changes'

Ido Schimmel says:

====================
mlxsw: Prepare SPAN API for upcoming changes

Switched port analyzer (SPAN) is used for packet mirroring. Over mlxsw
this is achieved by attaching tc-mirred action to either matchall or
flower classifier.

The current API used to configure SPAN consists of two functions:
mlxsw_sp_span_mirror_add() and mlxsw_sp_span_mirror_del().

These two functions pack a lot of different operations:

* SPAN agent configuration: Determining the egress port and optional
  headers that need to encapsulate the mirrored packet (when mirroring
  to a gretap, for example)

* Egress mirror buffer configuration: Allocating / freeing a buffer when
  port is analyzed (inspected) at egress

* SPAN agent binding: Binding the SPAN agent to a trigger, if any. The
  current triggers are incoming / outgoing packet and they are only used
  for matchall-based mirroring

This non-modular design makes it difficult to extend the API for future
changes, such as new mirror targets (CPU) and new global triggers (early
dropped packets, for example).

Therefore, this patch set gradually adds APIs for above mentioned
operations and then converts the two existing users to use it instead of
the old API. No functional changes intended. Tested with existing
mirroring selftests.

Patch set overview:

Patches #1-#5 gradually add the new API
Patches #6-#8 convert existing users to use the new API
Patch #9 removes the old API
====================

Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
David S. Miller 2020-04-30 13:02:32 -07:00
commit b8afaeddea
4 changed files with 461 additions and 256 deletions

View File

@ -136,28 +136,35 @@ mlxsw_sp_act_mirror_add(void *priv, u8 local_in_port,
const struct net_device *out_dev,
bool ingress, int *p_span_id)
{
struct mlxsw_sp_port *in_port;
struct mlxsw_sp_port *mlxsw_sp_port;
struct mlxsw_sp *mlxsw_sp = priv;
enum mlxsw_sp_span_type type;
int err;
type = ingress ? MLXSW_SP_SPAN_INGRESS : MLXSW_SP_SPAN_EGRESS;
in_port = mlxsw_sp->ports[local_in_port];
err = mlxsw_sp_span_agent_get(mlxsw_sp, out_dev, p_span_id);
if (err)
return err;
return mlxsw_sp_span_mirror_add(in_port, out_dev, type,
false, p_span_id);
mlxsw_sp_port = mlxsw_sp->ports[local_in_port];
err = mlxsw_sp_span_analyzed_port_get(mlxsw_sp_port, ingress);
if (err)
goto err_analyzed_port_get;
return 0;
err_analyzed_port_get:
mlxsw_sp_span_agent_put(mlxsw_sp, *p_span_id);
return err;
}
static void
mlxsw_sp_act_mirror_del(void *priv, u8 local_in_port, int span_id, bool ingress)
{
struct mlxsw_sp_port *mlxsw_sp_port;
struct mlxsw_sp *mlxsw_sp = priv;
struct mlxsw_sp_port *in_port;
enum mlxsw_sp_span_type type;
type = ingress ? MLXSW_SP_SPAN_INGRESS : MLXSW_SP_SPAN_EGRESS;
in_port = mlxsw_sp->ports[local_in_port];
mlxsw_sp_span_mirror_del(in_port, span_id, type, false);
mlxsw_sp_port = mlxsw_sp->ports[local_in_port];
mlxsw_sp_span_analyzed_port_put(mlxsw_sp_port, ingress);
mlxsw_sp_span_agent_put(mlxsw_sp, span_id);
}
const struct mlxsw_afa_ops mlxsw_sp1_act_afa_ops = {

View File

@ -48,31 +48,57 @@ static int
mlxsw_sp_mall_port_mirror_add(struct mlxsw_sp_port *mlxsw_sp_port,
struct mlxsw_sp_mall_entry *mall_entry)
{
enum mlxsw_sp_span_type span_type;
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
struct mlxsw_sp_span_trigger_parms parms;
enum mlxsw_sp_span_trigger trigger;
int err;
if (!mall_entry->mirror.to_dev) {
netdev_err(mlxsw_sp_port->dev, "Could not find requested device\n");
return -EINVAL;
}
span_type = mall_entry->ingress ? MLXSW_SP_SPAN_INGRESS :
MLXSW_SP_SPAN_EGRESS;
return mlxsw_sp_span_mirror_add(mlxsw_sp_port,
mall_entry->mirror.to_dev,
span_type, true,
&mall_entry->mirror.span_id);
err = mlxsw_sp_span_agent_get(mlxsw_sp, mall_entry->mirror.to_dev,
&mall_entry->mirror.span_id);
if (err)
return err;
err = mlxsw_sp_span_analyzed_port_get(mlxsw_sp_port,
mall_entry->ingress);
if (err)
goto err_analyzed_port_get;
trigger = mall_entry->ingress ? MLXSW_SP_SPAN_TRIGGER_INGRESS :
MLXSW_SP_SPAN_TRIGGER_EGRESS;
parms.span_id = mall_entry->mirror.span_id;
err = mlxsw_sp_span_agent_bind(mlxsw_sp, trigger, mlxsw_sp_port,
&parms);
if (err)
goto err_agent_bind;
return 0;
err_agent_bind:
mlxsw_sp_span_analyzed_port_put(mlxsw_sp_port, mall_entry->ingress);
err_analyzed_port_get:
mlxsw_sp_span_agent_put(mlxsw_sp, mall_entry->mirror.span_id);
return err;
}
static void
mlxsw_sp_mall_port_mirror_del(struct mlxsw_sp_port *mlxsw_sp_port,
struct mlxsw_sp_mall_entry *mall_entry)
{
enum mlxsw_sp_span_type span_type;
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
struct mlxsw_sp_span_trigger_parms parms;
enum mlxsw_sp_span_trigger trigger;
span_type = mall_entry->ingress ? MLXSW_SP_SPAN_INGRESS :
MLXSW_SP_SPAN_EGRESS;
mlxsw_sp_span_mirror_del(mlxsw_sp_port, mall_entry->mirror.span_id,
span_type, true);
trigger = mall_entry->ingress ? MLXSW_SP_SPAN_TRIGGER_INGRESS :
MLXSW_SP_SPAN_TRIGGER_EGRESS;
parms.span_id = mall_entry->mirror.span_id;
mlxsw_sp_span_agent_unbind(mlxsw_sp, trigger, mlxsw_sp_port, &parms);
mlxsw_sp_span_analyzed_port_put(mlxsw_sp_port, mall_entry->ingress);
mlxsw_sp_span_agent_put(mlxsw_sp, mall_entry->mirror.span_id);
}
static int mlxsw_sp_mall_port_sample_set(struct mlxsw_sp_port *mlxsw_sp_port,

View File

@ -3,6 +3,7 @@
#include <linux/if_bridge.h>
#include <linux/list.h>
#include <linux/mutex.h>
#include <linux/refcount.h>
#include <linux/rtnetlink.h>
#include <linux/workqueue.h>
@ -20,11 +21,29 @@
struct mlxsw_sp_span {
struct work_struct work;
struct mlxsw_sp *mlxsw_sp;
struct list_head analyzed_ports_list;
struct mutex analyzed_ports_lock; /* Protects analyzed_ports_list */
struct list_head trigger_entries_list;
atomic_t active_entries_count;
int entries_count;
struct mlxsw_sp_span_entry entries[];
};
struct mlxsw_sp_span_analyzed_port {
struct list_head list; /* Member of analyzed_ports_list */
refcount_t ref_count;
u8 local_port;
bool ingress;
};
struct mlxsw_sp_span_trigger_entry {
struct list_head list; /* Member of trigger_entries_list */
refcount_t ref_count;
u8 local_port;
enum mlxsw_sp_span_trigger trigger;
struct mlxsw_sp_span_trigger_parms parms;
};
static void mlxsw_sp_span_respin_work(struct work_struct *work);
static u64 mlxsw_sp_span_occ_get(void *priv)
@ -49,15 +68,14 @@ int mlxsw_sp_span_init(struct mlxsw_sp *mlxsw_sp)
return -ENOMEM;
span->entries_count = entries_count;
atomic_set(&span->active_entries_count, 0);
mutex_init(&span->analyzed_ports_lock);
INIT_LIST_HEAD(&span->analyzed_ports_list);
INIT_LIST_HEAD(&span->trigger_entries_list);
span->mlxsw_sp = mlxsw_sp;
mlxsw_sp->span = span;
for (i = 0; i < mlxsw_sp->span->entries_count; i++) {
struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span->entries[i];
INIT_LIST_HEAD(&curr->bound_ports_list);
curr->id = i;
}
for (i = 0; i < mlxsw_sp->span->entries_count; i++)
mlxsw_sp->span->entries[i].id = i;
devlink_resource_occ_get_register(devlink, MLXSW_SP_RESOURCE_SPAN,
mlxsw_sp_span_occ_get, mlxsw_sp);
@ -69,16 +87,13 @@ int mlxsw_sp_span_init(struct mlxsw_sp *mlxsw_sp)
void mlxsw_sp_span_fini(struct mlxsw_sp *mlxsw_sp)
{
struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
int i;
cancel_work_sync(&mlxsw_sp->span->work);
devlink_resource_occ_get_unregister(devlink, MLXSW_SP_RESOURCE_SPAN);
for (i = 0; i < mlxsw_sp->span->entries_count; i++) {
struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span->entries[i];
WARN_ON_ONCE(!list_empty(&curr->bound_ports_list));
}
WARN_ON_ONCE(!list_empty(&mlxsw_sp->span->trigger_entries_list));
WARN_ON_ONCE(!list_empty(&mlxsw_sp->span->analyzed_ports_list));
mutex_destroy(&mlxsw_sp->span->analyzed_ports_lock);
kfree(mlxsw_sp->span);
}
@ -751,26 +766,8 @@ static int mlxsw_sp_span_entry_put(struct mlxsw_sp *mlxsw_sp,
return 0;
}
static bool mlxsw_sp_span_is_egress_mirror(struct mlxsw_sp_port *port)
{
struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp;
struct mlxsw_sp_span_inspected_port *p;
int i;
for (i = 0; i < mlxsw_sp->span->entries_count; i++) {
struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span->entries[i];
list_for_each_entry(p, &curr->bound_ports_list, list)
if (p->local_port == port->local_port &&
p->type == MLXSW_SP_SPAN_EGRESS)
return true;
}
return false;
}
static int
mlxsw_sp_span_port_buffsize_update(struct mlxsw_sp_port *mlxsw_sp_port, u16 mtu)
mlxsw_sp_span_port_buffer_update(struct mlxsw_sp_port *mlxsw_sp_port, u16 mtu)
{
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
char sbib_pl[MLXSW_REG_SBIB_LEN];
@ -789,20 +786,54 @@ mlxsw_sp_span_port_buffsize_update(struct mlxsw_sp_port *mlxsw_sp_port, u16 mtu)
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbib), sbib_pl);
}
static void mlxsw_sp_span_port_buffer_disable(struct mlxsw_sp *mlxsw_sp,
u8 local_port)
{
char sbib_pl[MLXSW_REG_SBIB_LEN];
mlxsw_reg_sbib_pack(sbib_pl, local_port, 0);
mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbib), sbib_pl);
}
static struct mlxsw_sp_span_analyzed_port *
mlxsw_sp_span_analyzed_port_find(struct mlxsw_sp_span *span, u8 local_port,
bool ingress)
{
struct mlxsw_sp_span_analyzed_port *analyzed_port;
list_for_each_entry(analyzed_port, &span->analyzed_ports_list, list) {
if (analyzed_port->local_port == local_port &&
analyzed_port->ingress == ingress)
return analyzed_port;
}
return NULL;
}
int mlxsw_sp_span_port_mtu_update(struct mlxsw_sp_port *port, u16 mtu)
{
struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp;
int err = 0;
/* If port is egress mirrored, the shared buffer size should be
* updated according to the mtu value
*/
if (mlxsw_sp_span_is_egress_mirror(port))
return mlxsw_sp_span_port_buffsize_update(port, mtu);
return 0;
mutex_lock(&mlxsw_sp->span->analyzed_ports_lock);
if (mlxsw_sp_span_analyzed_port_find(mlxsw_sp->span, port->local_port,
false))
err = mlxsw_sp_span_port_buffer_update(port, mtu);
mutex_unlock(&mlxsw_sp->span->analyzed_ports_lock);
return err;
}
void mlxsw_sp_span_speed_update_work(struct work_struct *work)
{
struct delayed_work *dwork = to_delayed_work(work);
struct mlxsw_sp_port *mlxsw_sp_port;
struct mlxsw_sp *mlxsw_sp;
mlxsw_sp_port = container_of(dwork, struct mlxsw_sp_port,
span.speed_update_dw);
@ -810,134 +841,15 @@ void mlxsw_sp_span_speed_update_work(struct work_struct *work)
/* If port is egress mirrored, the shared buffer size should be
* updated according to the speed value.
*/
if (mlxsw_sp_span_is_egress_mirror(mlxsw_sp_port))
mlxsw_sp_span_port_buffsize_update(mlxsw_sp_port,
mlxsw_sp_port->dev->mtu);
}
mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
mutex_lock(&mlxsw_sp->span->analyzed_ports_lock);
static struct mlxsw_sp_span_inspected_port *
mlxsw_sp_span_entry_bound_port_find(struct mlxsw_sp_span_entry *span_entry,
enum mlxsw_sp_span_type type,
struct mlxsw_sp_port *port,
bool bind)
{
struct mlxsw_sp_span_inspected_port *p;
if (mlxsw_sp_span_analyzed_port_find(mlxsw_sp->span,
mlxsw_sp_port->local_port, false))
mlxsw_sp_span_port_buffer_update(mlxsw_sp_port,
mlxsw_sp_port->dev->mtu);
list_for_each_entry(p, &span_entry->bound_ports_list, list)
if (type == p->type &&
port->local_port == p->local_port &&
bind == p->bound)
return p;
return NULL;
}
static int
mlxsw_sp_span_inspected_port_bind(struct mlxsw_sp_port *port,
struct mlxsw_sp_span_entry *span_entry,
enum mlxsw_sp_span_type type,
bool bind)
{
struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp;
char mpar_pl[MLXSW_REG_MPAR_LEN];
int pa_id = span_entry->id;
/* bind the port to the SPAN entry */
mlxsw_reg_mpar_pack(mpar_pl, port->local_port,
(enum mlxsw_reg_mpar_i_e)type, bind, pa_id);
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpar), mpar_pl);
}
static int
mlxsw_sp_span_inspected_port_add(struct mlxsw_sp_port *port,
struct mlxsw_sp_span_entry *span_entry,
enum mlxsw_sp_span_type type,
bool bind)
{
struct mlxsw_sp_span_inspected_port *inspected_port;
struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp;
char sbib_pl[MLXSW_REG_SBIB_LEN];
int i;
int err;
/* A given (source port, direction) can only be bound to one analyzer,
* so if a binding is requested, check for conflicts.
*/
if (bind)
for (i = 0; i < mlxsw_sp->span->entries_count; i++) {
struct mlxsw_sp_span_entry *curr =
&mlxsw_sp->span->entries[i];
if (mlxsw_sp_span_entry_bound_port_find(curr, type,
port, bind))
return -EEXIST;
}
/* if it is an egress SPAN, bind a shared buffer to it */
if (type == MLXSW_SP_SPAN_EGRESS) {
err = mlxsw_sp_span_port_buffsize_update(port, port->dev->mtu);
if (err)
return err;
}
if (bind) {
err = mlxsw_sp_span_inspected_port_bind(port, span_entry, type,
true);
if (err)
goto err_port_bind;
}
inspected_port = kzalloc(sizeof(*inspected_port), GFP_KERNEL);
if (!inspected_port) {
err = -ENOMEM;
goto err_inspected_port_alloc;
}
inspected_port->local_port = port->local_port;
inspected_port->type = type;
inspected_port->bound = bind;
list_add_tail(&inspected_port->list, &span_entry->bound_ports_list);
return 0;
err_inspected_port_alloc:
if (bind)
mlxsw_sp_span_inspected_port_bind(port, span_entry, type,
false);
err_port_bind:
if (type == MLXSW_SP_SPAN_EGRESS) {
mlxsw_reg_sbib_pack(sbib_pl, port->local_port, 0);
mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbib), sbib_pl);
}
return err;
}
static void
mlxsw_sp_span_inspected_port_del(struct mlxsw_sp_port *port,
struct mlxsw_sp_span_entry *span_entry,
enum mlxsw_sp_span_type type,
bool bind)
{
struct mlxsw_sp_span_inspected_port *inspected_port;
struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp;
char sbib_pl[MLXSW_REG_SBIB_LEN];
inspected_port = mlxsw_sp_span_entry_bound_port_find(span_entry, type,
port, bind);
if (!inspected_port)
return;
if (bind)
mlxsw_sp_span_inspected_port_bind(port, span_entry, type,
false);
/* remove the SBIB buffer if it was egress SPAN */
if (type == MLXSW_SP_SPAN_EGRESS) {
mlxsw_reg_sbib_pack(sbib_pl, port->local_port, 0);
mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbib), sbib_pl);
}
mlxsw_sp_span_entry_put(mlxsw_sp, span_entry);
list_del(&inspected_port->list);
kfree(inspected_port);
mutex_unlock(&mlxsw_sp->span->analyzed_ports_lock);
}
static const struct mlxsw_sp_span_entry_ops *
@ -953,57 +865,6 @@ mlxsw_sp_span_entry_ops(struct mlxsw_sp *mlxsw_sp,
return NULL;
}
int mlxsw_sp_span_mirror_add(struct mlxsw_sp_port *from,
const struct net_device *to_dev,
enum mlxsw_sp_span_type type, bool bind,
int *p_span_id)
{
struct mlxsw_sp *mlxsw_sp = from->mlxsw_sp;
const struct mlxsw_sp_span_entry_ops *ops;
struct mlxsw_sp_span_parms sparms = {NULL};
struct mlxsw_sp_span_entry *span_entry;
int err;
ops = mlxsw_sp_span_entry_ops(mlxsw_sp, to_dev);
if (!ops) {
netdev_err(to_dev, "Cannot mirror to %s", to_dev->name);
return -EOPNOTSUPP;
}
err = ops->parms_set(to_dev, &sparms);
if (err)
return err;
span_entry = mlxsw_sp_span_entry_get(mlxsw_sp, to_dev, ops, sparms);
if (!span_entry)
return -ENOBUFS;
err = mlxsw_sp_span_inspected_port_add(from, span_entry, type, bind);
if (err)
goto err_port_bind;
*p_span_id = span_entry->id;
return 0;
err_port_bind:
mlxsw_sp_span_entry_put(mlxsw_sp, span_entry);
return err;
}
void mlxsw_sp_span_mirror_del(struct mlxsw_sp_port *from, int span_id,
enum mlxsw_sp_span_type type, bool bind)
{
struct mlxsw_sp_span_entry *span_entry;
span_entry = mlxsw_sp_span_entry_find_by_id(from->mlxsw_sp, span_id);
if (!span_entry) {
netdev_err(from->dev, "no span entry found\n");
return;
}
mlxsw_sp_span_inspected_port_del(from, span_entry, type, bind);
}
static void mlxsw_sp_span_respin_work(struct work_struct *work)
{
struct mlxsw_sp_span *span;
@ -1039,3 +900,309 @@ void mlxsw_sp_span_respin(struct mlxsw_sp *mlxsw_sp)
return;
mlxsw_core_schedule_work(&mlxsw_sp->span->work);
}
int mlxsw_sp_span_agent_get(struct mlxsw_sp *mlxsw_sp,
const struct net_device *to_dev, int *p_span_id)
{
const struct mlxsw_sp_span_entry_ops *ops;
struct mlxsw_sp_span_entry *span_entry;
struct mlxsw_sp_span_parms sparms;
int err;
ASSERT_RTNL();
ops = mlxsw_sp_span_entry_ops(mlxsw_sp, to_dev);
if (!ops) {
dev_err(mlxsw_sp->bus_info->dev, "Cannot mirror to requested destination\n");
return -EOPNOTSUPP;
}
memset(&sparms, 0, sizeof(sparms));
err = ops->parms_set(to_dev, &sparms);
if (err)
return err;
span_entry = mlxsw_sp_span_entry_get(mlxsw_sp, to_dev, ops, sparms);
if (!span_entry)
return -ENOBUFS;
*p_span_id = span_entry->id;
return 0;
}
void mlxsw_sp_span_agent_put(struct mlxsw_sp *mlxsw_sp, int span_id)
{
struct mlxsw_sp_span_entry *span_entry;
ASSERT_RTNL();
span_entry = mlxsw_sp_span_entry_find_by_id(mlxsw_sp, span_id);
if (WARN_ON_ONCE(!span_entry))
return;
mlxsw_sp_span_entry_put(mlxsw_sp, span_entry);
}
static struct mlxsw_sp_span_analyzed_port *
mlxsw_sp_span_analyzed_port_create(struct mlxsw_sp_span *span,
struct mlxsw_sp_port *mlxsw_sp_port,
bool ingress)
{
struct mlxsw_sp_span_analyzed_port *analyzed_port;
int err;
analyzed_port = kzalloc(sizeof(*analyzed_port), GFP_KERNEL);
if (!analyzed_port)
return ERR_PTR(-ENOMEM);
refcount_set(&analyzed_port->ref_count, 1);
analyzed_port->local_port = mlxsw_sp_port->local_port;
analyzed_port->ingress = ingress;
list_add_tail(&analyzed_port->list, &span->analyzed_ports_list);
/* An egress mirror buffer should be allocated on the egress port which
* does the mirroring.
*/
if (!ingress) {
u16 mtu = mlxsw_sp_port->dev->mtu;
err = mlxsw_sp_span_port_buffer_update(mlxsw_sp_port, mtu);
if (err)
goto err_buffer_update;
}
return analyzed_port;
err_buffer_update:
list_del(&analyzed_port->list);
kfree(analyzed_port);
return ERR_PTR(err);
}
static void
mlxsw_sp_span_analyzed_port_destroy(struct mlxsw_sp_span *span,
struct mlxsw_sp_span_analyzed_port *
analyzed_port)
{
struct mlxsw_sp *mlxsw_sp = span->mlxsw_sp;
/* Remove egress mirror buffer now that port is no longer analyzed
* at egress.
*/
if (!analyzed_port->ingress)
mlxsw_sp_span_port_buffer_disable(mlxsw_sp,
analyzed_port->local_port);
list_del(&analyzed_port->list);
kfree(analyzed_port);
}
int mlxsw_sp_span_analyzed_port_get(struct mlxsw_sp_port *mlxsw_sp_port,
bool ingress)
{
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
struct mlxsw_sp_span_analyzed_port *analyzed_port;
u8 local_port = mlxsw_sp_port->local_port;
int err = 0;
mutex_lock(&mlxsw_sp->span->analyzed_ports_lock);
analyzed_port = mlxsw_sp_span_analyzed_port_find(mlxsw_sp->span,
local_port, ingress);
if (analyzed_port) {
refcount_inc(&analyzed_port->ref_count);
goto out_unlock;
}
analyzed_port = mlxsw_sp_span_analyzed_port_create(mlxsw_sp->span,
mlxsw_sp_port,
ingress);
if (IS_ERR(analyzed_port))
err = PTR_ERR(analyzed_port);
out_unlock:
mutex_unlock(&mlxsw_sp->span->analyzed_ports_lock);
return err;
}
void mlxsw_sp_span_analyzed_port_put(struct mlxsw_sp_port *mlxsw_sp_port,
bool ingress)
{
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
struct mlxsw_sp_span_analyzed_port *analyzed_port;
u8 local_port = mlxsw_sp_port->local_port;
mutex_lock(&mlxsw_sp->span->analyzed_ports_lock);
analyzed_port = mlxsw_sp_span_analyzed_port_find(mlxsw_sp->span,
local_port, ingress);
if (WARN_ON_ONCE(!analyzed_port))
goto out_unlock;
if (!refcount_dec_and_test(&analyzed_port->ref_count))
goto out_unlock;
mlxsw_sp_span_analyzed_port_destroy(mlxsw_sp->span, analyzed_port);
out_unlock:
mutex_unlock(&mlxsw_sp->span->analyzed_ports_lock);
}
static int
__mlxsw_sp_span_trigger_entry_bind(struct mlxsw_sp_span *span,
struct mlxsw_sp_span_trigger_entry *
trigger_entry, bool enable)
{
char mpar_pl[MLXSW_REG_MPAR_LEN];
enum mlxsw_reg_mpar_i_e i_e;
switch (trigger_entry->trigger) {
case MLXSW_SP_SPAN_TRIGGER_INGRESS:
i_e = MLXSW_REG_MPAR_TYPE_INGRESS;
break;
case MLXSW_SP_SPAN_TRIGGER_EGRESS:
i_e = MLXSW_REG_MPAR_TYPE_EGRESS;
break;
default:
WARN_ON_ONCE(1);
return -EINVAL;
}
mlxsw_reg_mpar_pack(mpar_pl, trigger_entry->local_port, i_e, enable,
trigger_entry->parms.span_id);
return mlxsw_reg_write(span->mlxsw_sp->core, MLXSW_REG(mpar), mpar_pl);
}
static int
mlxsw_sp_span_trigger_entry_bind(struct mlxsw_sp_span *span,
struct mlxsw_sp_span_trigger_entry *
trigger_entry)
{
return __mlxsw_sp_span_trigger_entry_bind(span, trigger_entry, true);
}
static void
mlxsw_sp_span_trigger_entry_unbind(struct mlxsw_sp_span *span,
struct mlxsw_sp_span_trigger_entry *
trigger_entry)
{
__mlxsw_sp_span_trigger_entry_bind(span, trigger_entry, false);
}
static struct mlxsw_sp_span_trigger_entry *
mlxsw_sp_span_trigger_entry_create(struct mlxsw_sp_span *span,
enum mlxsw_sp_span_trigger trigger,
struct mlxsw_sp_port *mlxsw_sp_port,
const struct mlxsw_sp_span_trigger_parms
*parms)
{
struct mlxsw_sp_span_trigger_entry *trigger_entry;
int err;
trigger_entry = kzalloc(sizeof(*trigger_entry), GFP_KERNEL);
if (!trigger_entry)
return ERR_PTR(-ENOMEM);
refcount_set(&trigger_entry->ref_count, 1);
trigger_entry->local_port = mlxsw_sp_port->local_port;
trigger_entry->trigger = trigger;
memcpy(&trigger_entry->parms, parms, sizeof(trigger_entry->parms));
list_add_tail(&trigger_entry->list, &span->trigger_entries_list);
err = mlxsw_sp_span_trigger_entry_bind(span, trigger_entry);
if (err)
goto err_trigger_entry_bind;
return trigger_entry;
err_trigger_entry_bind:
list_del(&trigger_entry->list);
kfree(trigger_entry);
return ERR_PTR(err);
}
static void
mlxsw_sp_span_trigger_entry_destroy(struct mlxsw_sp_span *span,
struct mlxsw_sp_span_trigger_entry *
trigger_entry)
{
mlxsw_sp_span_trigger_entry_unbind(span, trigger_entry);
list_del(&trigger_entry->list);
kfree(trigger_entry);
}
static struct mlxsw_sp_span_trigger_entry *
mlxsw_sp_span_trigger_entry_find(struct mlxsw_sp_span *span,
enum mlxsw_sp_span_trigger trigger,
struct mlxsw_sp_port *mlxsw_sp_port)
{
struct mlxsw_sp_span_trigger_entry *trigger_entry;
list_for_each_entry(trigger_entry, &span->trigger_entries_list, list) {
if (trigger_entry->trigger == trigger &&
trigger_entry->local_port == mlxsw_sp_port->local_port)
return trigger_entry;
}
return NULL;
}
int mlxsw_sp_span_agent_bind(struct mlxsw_sp *mlxsw_sp,
enum mlxsw_sp_span_trigger trigger,
struct mlxsw_sp_port *mlxsw_sp_port,
const struct mlxsw_sp_span_trigger_parms *parms)
{
struct mlxsw_sp_span_trigger_entry *trigger_entry;
int err = 0;
ASSERT_RTNL();
if (!mlxsw_sp_span_entry_find_by_id(mlxsw_sp, parms->span_id))
return -EINVAL;
trigger_entry = mlxsw_sp_span_trigger_entry_find(mlxsw_sp->span,
trigger,
mlxsw_sp_port);
if (trigger_entry) {
if (trigger_entry->parms.span_id != parms->span_id)
return -EINVAL;
refcount_inc(&trigger_entry->ref_count);
goto out;
}
trigger_entry = mlxsw_sp_span_trigger_entry_create(mlxsw_sp->span,
trigger,
mlxsw_sp_port,
parms);
if (IS_ERR(trigger_entry))
err = PTR_ERR(trigger_entry);
out:
return err;
}
void mlxsw_sp_span_agent_unbind(struct mlxsw_sp *mlxsw_sp,
enum mlxsw_sp_span_trigger trigger,
struct mlxsw_sp_port *mlxsw_sp_port,
const struct mlxsw_sp_span_trigger_parms *parms)
{
struct mlxsw_sp_span_trigger_entry *trigger_entry;
ASSERT_RTNL();
if (WARN_ON_ONCE(!mlxsw_sp_span_entry_find_by_id(mlxsw_sp,
parms->span_id)))
return;
trigger_entry = mlxsw_sp_span_trigger_entry_find(mlxsw_sp->span,
trigger,
mlxsw_sp_port);
if (WARN_ON_ONCE(!trigger_entry))
return;
if (!refcount_dec_and_test(&trigger_entry->ref_count))
return;
mlxsw_sp_span_trigger_entry_destroy(mlxsw_sp->span, trigger_entry);
}

View File

@ -13,20 +13,6 @@
struct mlxsw_sp;
struct mlxsw_sp_port;
enum mlxsw_sp_span_type {
MLXSW_SP_SPAN_EGRESS,
MLXSW_SP_SPAN_INGRESS
};
struct mlxsw_sp_span_inspected_port {
struct list_head list;
enum mlxsw_sp_span_type type;
u8 local_port;
/* Whether this is a directly bound mirror (port-to-port) or an ACL. */
bool bound;
};
struct mlxsw_sp_span_parms {
struct mlxsw_sp_port *dest_port; /* NULL for unoffloaded SPAN. */
unsigned int ttl;
@ -37,13 +23,21 @@ struct mlxsw_sp_span_parms {
u16 vid;
};
enum mlxsw_sp_span_trigger {
MLXSW_SP_SPAN_TRIGGER_INGRESS,
MLXSW_SP_SPAN_TRIGGER_EGRESS,
};
struct mlxsw_sp_span_trigger_parms {
int span_id;
};
struct mlxsw_sp_span_entry_ops;
struct mlxsw_sp_span_entry {
const struct net_device *to_dev;
const struct mlxsw_sp_span_entry_ops *ops;
struct mlxsw_sp_span_parms parms;
struct list_head bound_ports_list;
refcount_t ref_count;
int id;
};
@ -61,12 +55,6 @@ int mlxsw_sp_span_init(struct mlxsw_sp *mlxsw_sp);
void mlxsw_sp_span_fini(struct mlxsw_sp *mlxsw_sp);
void mlxsw_sp_span_respin(struct mlxsw_sp *mlxsw_sp);
int mlxsw_sp_span_mirror_add(struct mlxsw_sp_port *from,
const struct net_device *to_dev,
enum mlxsw_sp_span_type type,
bool bind, int *p_span_id);
void mlxsw_sp_span_mirror_del(struct mlxsw_sp_port *from, int span_id,
enum mlxsw_sp_span_type type, bool bind);
struct mlxsw_sp_span_entry *
mlxsw_sp_span_entry_find_by_port(struct mlxsw_sp *mlxsw_sp,
const struct net_device *to_dev);
@ -77,4 +65,21 @@ void mlxsw_sp_span_entry_invalidate(struct mlxsw_sp *mlxsw_sp,
int mlxsw_sp_span_port_mtu_update(struct mlxsw_sp_port *port, u16 mtu);
void mlxsw_sp_span_speed_update_work(struct work_struct *work);
int mlxsw_sp_span_agent_get(struct mlxsw_sp *mlxsw_sp,
const struct net_device *to_dev, int *p_span_id);
void mlxsw_sp_span_agent_put(struct mlxsw_sp *mlxsw_sp, int span_id);
int mlxsw_sp_span_analyzed_port_get(struct mlxsw_sp_port *mlxsw_sp_port,
bool ingress);
void mlxsw_sp_span_analyzed_port_put(struct mlxsw_sp_port *mlxsw_sp_port,
bool ingress);
int mlxsw_sp_span_agent_bind(struct mlxsw_sp *mlxsw_sp,
enum mlxsw_sp_span_trigger trigger,
struct mlxsw_sp_port *mlxsw_sp_port,
const struct mlxsw_sp_span_trigger_parms *parms);
void
mlxsw_sp_span_agent_unbind(struct mlxsw_sp *mlxsw_sp,
enum mlxsw_sp_span_trigger trigger,
struct mlxsw_sp_port *mlxsw_sp_port,
const struct mlxsw_sp_span_trigger_parms *parms);
#endif