2018-08-09 11:59:11 +03:00
|
|
|
// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
|
|
|
|
|
/* Copyright (c) 2015-2018 Mellanox Technologies. All rights reserved */
|
2015-10-16 14:01:37 +02:00
|
|
|
|
|
|
|
|
#include <linux/kernel.h>
|
|
|
|
|
#include <linux/types.h>
|
mlxsw: spectrum: Map all switch priorities to priority group 0
During transmission, the skb's priority is used to map the skb to a
traffic class, where the idea is to group priorities with similar
characteristics (e.g. lossy, lossless) to the same traffic class. By
default, all priorities are mapped to traffic class 0.
In the device, we model the skb's priority as the switch priority, which
is assigned to a packet according to its PCP value and ingress port
(untagged packets are assigned the port's default switch priority - 0).
At ingress, the packet is directed to a priority group (PG) buffer in
the port's headroom buffer according to the packet's switch priority and
switch priority to buffer mapping.
While it's possible to configure the egress mapping between skb's
priority (switch priority) and traffic class, there is no mechanism to
configure the ingress mapping to a PG.
In order to keep things simple and since grouping certain priorities into
a traffic class at egress also implies they should be grouped the same
at ingress, treat a PG as the ingress counterpart of an egress traffic
class.
Having established the above, during initialization map all the switch
priorities to PG0 in accordance with the Linux defaults for traffic
class mapping.
Signed-off-by: Ido Schimmel <idosch@mellanox.com>
Signed-off-by: Jiri Pirko <jiri@mellanox.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2016-04-06 17:10:01 +02:00
|
|
|
#include <linux/dcbnl.h>
|
2016-04-06 17:10:03 +02:00
|
|
|
#include <linux/if_ether.h>
|
2016-04-14 18:19:30 +02:00
|
|
|
#include <linux/list.h>
|
2019-04-22 12:08:41 +00:00
|
|
|
#include <linux/netlink.h>
|
2015-10-16 14:01:37 +02:00
|
|
|
|
|
|
|
|
#include "spectrum.h"
|
|
|
|
|
#include "core.h"
|
|
|
|
|
#include "port.h"
|
|
|
|
|
#include "reg.h"
|
|
|
|
|
|
2017-05-16 19:38:24 +02:00
|
|
|
struct mlxsw_sp_sb_pr {
|
|
|
|
|
enum mlxsw_reg_sbpr_mode mode;
|
|
|
|
|
u32 size;
|
2019-04-22 12:08:43 +00:00
|
|
|
u8 freeze_mode:1,
|
|
|
|
|
freeze_size:1;
|
2017-05-16 19:38:24 +02:00
|
|
|
};
|
|
|
|
|
|
|
|
|
|
struct mlxsw_cp_sb_occ {
|
|
|
|
|
u32 cur;
|
|
|
|
|
u32 max;
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
struct mlxsw_sp_sb_cm {
|
|
|
|
|
u32 min_buff;
|
|
|
|
|
u32 max_buff;
|
mlxsw: spectrum_buffers: Use devlink pool indices throughout
Currently, mlxsw assumes that each ingress pool has its egress
counterpart, and that pool index for purposes of caching matches the
index with which the hardware should be configured. As we want to expose
the MC pool, both of these assumptions break.
Instead, maintain the pool index as long as possible. Unify ingress and
egress caches and use the pool index as cache index as well. Only
translate to FW pool numbering when actually packing the registers. This
simplifies things considerably, as the pool index is the only quantity
necessary to uniquely identify a pool, and the pool/direction split is
not necessary until firmware is talked to.
To support the mapping between pool indices and pool numbers and
directions, which is not neatly mathematical anymore, introduce a pool
descriptor table, indexed by pool index, to facilitate the translation.
Include the MC pool in the descriptor table as well, so that it can be
referenced from mlxsw_sp_sb_cms_egress.
Signed-off-by: Petr Machata <petrm@mellanox.com>
Reviewed-by: Jiri Pirko <jiri@mellanox.com>
Signed-off-by: Ido Schimmel <idosch@mellanox.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2018-09-20 09:21:25 +03:00
|
|
|
u16 pool_index;
|
2017-05-16 19:38:24 +02:00
|
|
|
struct mlxsw_cp_sb_occ occ;
|
2019-04-22 12:08:45 +00:00
|
|
|
u8 freeze_pool:1,
|
|
|
|
|
freeze_thresh:1;
|
2017-05-16 19:38:24 +02:00
|
|
|
};
|
|
|
|
|
|
2018-09-20 09:21:28 +03:00
|
|
|
#define MLXSW_SP_SB_INFI -1U
|
2019-10-23 09:05:00 +03:00
|
|
|
#define MLXSW_SP_SB_REST -2U
|
2018-09-20 09:21:28 +03:00
|
|
|
|
2017-05-16 19:38:24 +02:00
|
|
|
struct mlxsw_sp_sb_pm {
|
|
|
|
|
u32 min_buff;
|
|
|
|
|
u32 max_buff;
|
|
|
|
|
struct mlxsw_cp_sb_occ occ;
|
|
|
|
|
};
|
|
|
|
|
|
2019-02-20 19:32:23 +00:00
|
|
|
struct mlxsw_sp_sb_mm {
|
|
|
|
|
u32 min_buff;
|
|
|
|
|
u32 max_buff;
|
|
|
|
|
u16 pool_index;
|
|
|
|
|
};
|
|
|
|
|
|
mlxsw: spectrum_buffers: Use devlink pool indices throughout
Currently, mlxsw assumes that each ingress pool has its egress
counterpart, and that pool index for purposes of caching matches the
index with which the hardware should be configured. As we want to expose
the MC pool, both of these assumptions break.
Instead, maintain the pool index as long as possible. Unify ingress and
egress caches and use the pool index as cache index as well. Only
translate to FW pool numbering when actually packing the registers. This
simplifies things considerably, as the pool index is the only quantity
necessary to uniquely identify a pool, and the pool/direction split is
not necessary until firmware is talked to.
To support the mapping between pool indices and pool numbers and
directions, which is not neatly mathematical anymore, introduce a pool
descriptor table, indexed by pool index, to facilitate the translation.
Include the MC pool in the descriptor table as well, so that it can be
referenced from mlxsw_sp_sb_cms_egress.
Signed-off-by: Petr Machata <petrm@mellanox.com>
Reviewed-by: Jiri Pirko <jiri@mellanox.com>
Signed-off-by: Ido Schimmel <idosch@mellanox.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2018-09-20 09:21:25 +03:00
|
|
|
struct mlxsw_sp_sb_pool_des {
|
|
|
|
|
enum mlxsw_reg_sbxx_dir dir;
|
|
|
|
|
u8 pool;
|
|
|
|
|
};
|
|
|
|
|
|
2019-04-22 12:08:42 +00:00
|
|
|
#define MLXSW_SP_SB_POOL_ING 0
|
|
|
|
|
#define MLXSW_SP_SB_POOL_EGR 4
|
|
|
|
|
#define MLXSW_SP_SB_POOL_EGR_MC 8
|
2019-04-22 12:08:51 +00:00
|
|
|
#define MLXSW_SP_SB_POOL_ING_CPU 9
|
|
|
|
|
#define MLXSW_SP_SB_POOL_EGR_CPU 10
|
2019-04-22 12:08:42 +00:00
|
|
|
|
2019-02-20 19:32:25 +00:00
|
|
|
static const struct mlxsw_sp_sb_pool_des mlxsw_sp1_sb_pool_dess[] = {
|
mlxsw: spectrum_buffers: Use devlink pool indices throughout
Currently, mlxsw assumes that each ingress pool has its egress
counterpart, and that pool index for purposes of caching matches the
index with which the hardware should be configured. As we want to expose
the MC pool, both of these assumptions break.
Instead, maintain the pool index as long as possible. Unify ingress and
egress caches and use the pool index as cache index as well. Only
translate to FW pool numbering when actually packing the registers. This
simplifies things considerably, as the pool index is the only quantity
necessary to uniquely identify a pool, and the pool/direction split is
not necessary until firmware is talked to.
To support the mapping between pool indices and pool numbers and
directions, which is not neatly mathematical anymore, introduce a pool
descriptor table, indexed by pool index, to facilitate the translation.
Include the MC pool in the descriptor table as well, so that it can be
referenced from mlxsw_sp_sb_cms_egress.
Signed-off-by: Petr Machata <petrm@mellanox.com>
Reviewed-by: Jiri Pirko <jiri@mellanox.com>
Signed-off-by: Ido Schimmel <idosch@mellanox.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2018-09-20 09:21:25 +03:00
|
|
|
{MLXSW_REG_SBXX_DIR_INGRESS, 0},
|
|
|
|
|
{MLXSW_REG_SBXX_DIR_INGRESS, 1},
|
|
|
|
|
{MLXSW_REG_SBXX_DIR_INGRESS, 2},
|
|
|
|
|
{MLXSW_REG_SBXX_DIR_INGRESS, 3},
|
|
|
|
|
{MLXSW_REG_SBXX_DIR_EGRESS, 0},
|
|
|
|
|
{MLXSW_REG_SBXX_DIR_EGRESS, 1},
|
|
|
|
|
{MLXSW_REG_SBXX_DIR_EGRESS, 2},
|
|
|
|
|
{MLXSW_REG_SBXX_DIR_EGRESS, 3},
|
|
|
|
|
{MLXSW_REG_SBXX_DIR_EGRESS, 15},
|
2019-04-22 12:08:51 +00:00
|
|
|
{MLXSW_REG_SBXX_DIR_INGRESS, 4},
|
|
|
|
|
{MLXSW_REG_SBXX_DIR_EGRESS, 4},
|
mlxsw: spectrum_buffers: Use devlink pool indices throughout
Currently, mlxsw assumes that each ingress pool has its egress
counterpart, and that pool index for purposes of caching matches the
index with which the hardware should be configured. As we want to expose
the MC pool, both of these assumptions break.
Instead, maintain the pool index as long as possible. Unify ingress and
egress caches and use the pool index as cache index as well. Only
translate to FW pool numbering when actually packing the registers. This
simplifies things considerably, as the pool index is the only quantity
necessary to uniquely identify a pool, and the pool/direction split is
not necessary until firmware is talked to.
To support the mapping between pool indices and pool numbers and
directions, which is not neatly mathematical anymore, introduce a pool
descriptor table, indexed by pool index, to facilitate the translation.
Include the MC pool in the descriptor table as well, so that it can be
referenced from mlxsw_sp_sb_cms_egress.
Signed-off-by: Petr Machata <petrm@mellanox.com>
Reviewed-by: Jiri Pirko <jiri@mellanox.com>
Signed-off-by: Ido Schimmel <idosch@mellanox.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2018-09-20 09:21:25 +03:00
|
|
|
};
|
|
|
|
|
|
2019-02-20 19:32:25 +00:00
|
|
|
static const struct mlxsw_sp_sb_pool_des mlxsw_sp2_sb_pool_dess[] = {
|
|
|
|
|
{MLXSW_REG_SBXX_DIR_INGRESS, 0},
|
|
|
|
|
{MLXSW_REG_SBXX_DIR_INGRESS, 1},
|
|
|
|
|
{MLXSW_REG_SBXX_DIR_INGRESS, 2},
|
|
|
|
|
{MLXSW_REG_SBXX_DIR_INGRESS, 3},
|
|
|
|
|
{MLXSW_REG_SBXX_DIR_EGRESS, 0},
|
|
|
|
|
{MLXSW_REG_SBXX_DIR_EGRESS, 1},
|
|
|
|
|
{MLXSW_REG_SBXX_DIR_EGRESS, 2},
|
|
|
|
|
{MLXSW_REG_SBXX_DIR_EGRESS, 3},
|
2019-04-10 06:58:17 +00:00
|
|
|
{MLXSW_REG_SBXX_DIR_EGRESS, 15},
|
2019-04-22 12:08:51 +00:00
|
|
|
{MLXSW_REG_SBXX_DIR_INGRESS, 4},
|
|
|
|
|
{MLXSW_REG_SBXX_DIR_EGRESS, 4},
|
2019-02-20 19:32:25 +00:00
|
|
|
};
|
|
|
|
|
|
2018-09-20 09:21:26 +03:00
|
|
|
#define MLXSW_SP_SB_ING_TC_COUNT 8
|
|
|
|
|
#define MLXSW_SP_SB_EG_TC_COUNT 16
|
2017-05-16 19:38:24 +02:00
|
|
|
|
|
|
|
|
struct mlxsw_sp_sb_port {
|
2018-09-20 09:21:26 +03:00
|
|
|
struct mlxsw_sp_sb_cm ing_cms[MLXSW_SP_SB_ING_TC_COUNT];
|
|
|
|
|
struct mlxsw_sp_sb_cm eg_cms[MLXSW_SP_SB_EG_TC_COUNT];
|
2019-02-20 19:32:14 +00:00
|
|
|
struct mlxsw_sp_sb_pm *pms;
|
2017-05-16 19:38:24 +02:00
|
|
|
};
|
|
|
|
|
|
|
|
|
|
struct mlxsw_sp_sb {
|
2019-02-20 19:32:14 +00:00
|
|
|
struct mlxsw_sp_sb_pr *prs;
|
2017-05-16 19:38:24 +02:00
|
|
|
struct mlxsw_sp_sb_port *ports;
|
|
|
|
|
u32 cell_size;
|
2019-02-20 19:32:29 +00:00
|
|
|
u32 max_headroom_cells;
|
2018-09-20 09:21:27 +03:00
|
|
|
u64 sb_size;
|
2017-05-16 19:38:24 +02:00
|
|
|
};
|
|
|
|
|
|
2019-02-20 19:32:12 +00:00
|
|
|
struct mlxsw_sp_sb_vals {
|
2019-02-20 19:32:16 +00:00
|
|
|
unsigned int pool_count;
|
|
|
|
|
const struct mlxsw_sp_sb_pool_des *pool_dess;
|
2019-02-20 19:32:18 +00:00
|
|
|
const struct mlxsw_sp_sb_pm *pms;
|
2019-04-22 12:08:56 +00:00
|
|
|
const struct mlxsw_sp_sb_pm *pms_cpu;
|
2019-02-20 19:32:20 +00:00
|
|
|
const struct mlxsw_sp_sb_pr *prs;
|
2019-02-20 19:32:23 +00:00
|
|
|
const struct mlxsw_sp_sb_mm *mms;
|
2019-02-20 19:32:22 +00:00
|
|
|
const struct mlxsw_sp_sb_cm *cms_ingress;
|
|
|
|
|
const struct mlxsw_sp_sb_cm *cms_egress;
|
|
|
|
|
const struct mlxsw_sp_sb_cm *cms_cpu;
|
2019-02-20 19:32:23 +00:00
|
|
|
unsigned int mms_count;
|
2019-02-20 19:32:22 +00:00
|
|
|
unsigned int cms_ingress_count;
|
|
|
|
|
unsigned int cms_egress_count;
|
|
|
|
|
unsigned int cms_cpu_count;
|
2019-02-20 19:32:12 +00:00
|
|
|
};
|
|
|
|
|
|
2020-09-16 09:35:27 +03:00
|
|
|
struct mlxsw_sp_sb_ops {
|
2020-09-16 09:35:28 +03:00
|
|
|
u32 (*int_buf_size_get)(int mtu, u32 speed);
|
2020-09-16 09:35:27 +03:00
|
|
|
};
|
|
|
|
|
|
2017-05-16 19:38:24 +02:00
|
|
|
u32 mlxsw_sp_cells_bytes(const struct mlxsw_sp *mlxsw_sp, u32 cells)
|
|
|
|
|
{
|
|
|
|
|
return mlxsw_sp->sb->cell_size * cells;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
u32 mlxsw_sp_bytes_cells(const struct mlxsw_sp *mlxsw_sp, u32 bytes)
|
|
|
|
|
{
|
|
|
|
|
return DIV_ROUND_UP(bytes, mlxsw_sp->sb->cell_size);
|
|
|
|
|
}
|
|
|
|
|
|
2020-09-16 09:35:28 +03:00
|
|
|
static u32 mlxsw_sp_port_headroom_8x_adjust(const struct mlxsw_sp_port *mlxsw_sp_port,
|
|
|
|
|
u32 size_cells)
|
|
|
|
|
{
|
|
|
|
|
/* Ports with eight lanes use two headroom buffers between which the
|
|
|
|
|
* configured headroom size is split. Therefore, multiply the calculated
|
|
|
|
|
* headroom size by two.
|
|
|
|
|
*/
|
|
|
|
|
return mlxsw_sp_port->mapping.width == 8 ? 2 * size_cells : size_cells;
|
|
|
|
|
}
|
|
|
|
|
|
2016-04-14 18:19:19 +02:00
|
|
|
static struct mlxsw_sp_sb_pr *mlxsw_sp_sb_pr_get(struct mlxsw_sp *mlxsw_sp,
|
mlxsw: spectrum_buffers: Use devlink pool indices throughout
Currently, mlxsw assumes that each ingress pool has its egress
counterpart, and that pool index for purposes of caching matches the
index with which the hardware should be configured. As we want to expose
the MC pool, both of these assumptions break.
Instead, maintain the pool index as long as possible. Unify ingress and
egress caches and use the pool index as cache index as well. Only
translate to FW pool numbering when actually packing the registers. This
simplifies things considerably, as the pool index is the only quantity
necessary to uniquely identify a pool, and the pool/direction split is
not necessary until firmware is talked to.
To support the mapping between pool indices and pool numbers and
directions, which is not neatly mathematical anymore, introduce a pool
descriptor table, indexed by pool index, to facilitate the translation.
Include the MC pool in the descriptor table as well, so that it can be
referenced from mlxsw_sp_sb_cms_egress.
Signed-off-by: Petr Machata <petrm@mellanox.com>
Reviewed-by: Jiri Pirko <jiri@mellanox.com>
Signed-off-by: Ido Schimmel <idosch@mellanox.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2018-09-20 09:21:25 +03:00
|
|
|
u16 pool_index)
|
2016-04-14 18:19:19 +02:00
|
|
|
{
|
mlxsw: spectrum_buffers: Use devlink pool indices throughout
Currently, mlxsw assumes that each ingress pool has its egress
counterpart, and that pool index for purposes of caching matches the
index with which the hardware should be configured. As we want to expose
the MC pool, both of these assumptions break.
Instead, maintain the pool index as long as possible. Unify ingress and
egress caches and use the pool index as cache index as well. Only
translate to FW pool numbering when actually packing the registers. This
simplifies things considerably, as the pool index is the only quantity
necessary to uniquely identify a pool, and the pool/direction split is
not necessary until firmware is talked to.
To support the mapping between pool indices and pool numbers and
directions, which is not neatly mathematical anymore, introduce a pool
descriptor table, indexed by pool index, to facilitate the translation.
Include the MC pool in the descriptor table as well, so that it can be
referenced from mlxsw_sp_sb_cms_egress.
Signed-off-by: Petr Machata <petrm@mellanox.com>
Reviewed-by: Jiri Pirko <jiri@mellanox.com>
Signed-off-by: Ido Schimmel <idosch@mellanox.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2018-09-20 09:21:25 +03:00
|
|
|
return &mlxsw_sp->sb->prs[pool_index];
|
2016-04-14 18:19:19 +02:00
|
|
|
}
|
|
|
|
|
|
2018-09-20 09:21:26 +03:00
|
|
|
static bool mlxsw_sp_sb_cm_exists(u8 pg_buff, enum mlxsw_reg_sbxx_dir dir)
|
|
|
|
|
{
|
|
|
|
|
if (dir == MLXSW_REG_SBXX_DIR_INGRESS)
|
|
|
|
|
return pg_buff < MLXSW_SP_SB_ING_TC_COUNT;
|
|
|
|
|
else
|
|
|
|
|
return pg_buff < MLXSW_SP_SB_EG_TC_COUNT;
|
|
|
|
|
}
|
|
|
|
|
|
2016-04-14 18:19:19 +02:00
|
|
|
static struct mlxsw_sp_sb_cm *mlxsw_sp_sb_cm_get(struct mlxsw_sp *mlxsw_sp,
|
|
|
|
|
u8 local_port, u8 pg_buff,
|
|
|
|
|
enum mlxsw_reg_sbxx_dir dir)
|
|
|
|
|
{
|
2018-09-20 09:21:26 +03:00
|
|
|
struct mlxsw_sp_sb_port *sb_port = &mlxsw_sp->sb->ports[local_port];
|
|
|
|
|
|
|
|
|
|
WARN_ON(!mlxsw_sp_sb_cm_exists(pg_buff, dir));
|
|
|
|
|
if (dir == MLXSW_REG_SBXX_DIR_INGRESS)
|
|
|
|
|
return &sb_port->ing_cms[pg_buff];
|
|
|
|
|
else
|
|
|
|
|
return &sb_port->eg_cms[pg_buff];
|
2016-04-14 18:19:19 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static struct mlxsw_sp_sb_pm *mlxsw_sp_sb_pm_get(struct mlxsw_sp *mlxsw_sp,
|
mlxsw: spectrum_buffers: Use devlink pool indices throughout
Currently, mlxsw assumes that each ingress pool has its egress
counterpart, and that pool index for purposes of caching matches the
index with which the hardware should be configured. As we want to expose
the MC pool, both of these assumptions break.
Instead, maintain the pool index as long as possible. Unify ingress and
egress caches and use the pool index as cache index as well. Only
translate to FW pool numbering when actually packing the registers. This
simplifies things considerably, as the pool index is the only quantity
necessary to uniquely identify a pool, and the pool/direction split is
not necessary until firmware is talked to.
To support the mapping between pool indices and pool numbers and
directions, which is not neatly mathematical anymore, introduce a pool
descriptor table, indexed by pool index, to facilitate the translation.
Include the MC pool in the descriptor table as well, so that it can be
referenced from mlxsw_sp_sb_cms_egress.
Signed-off-by: Petr Machata <petrm@mellanox.com>
Reviewed-by: Jiri Pirko <jiri@mellanox.com>
Signed-off-by: Ido Schimmel <idosch@mellanox.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2018-09-20 09:21:25 +03:00
|
|
|
u8 local_port, u16 pool_index)
|
2016-04-14 18:19:19 +02:00
|
|
|
{
|
mlxsw: spectrum_buffers: Use devlink pool indices throughout
Currently, mlxsw assumes that each ingress pool has its egress
counterpart, and that pool index for purposes of caching matches the
index with which the hardware should be configured. As we want to expose
the MC pool, both of these assumptions break.
Instead, maintain the pool index as long as possible. Unify ingress and
egress caches and use the pool index as cache index as well. Only
translate to FW pool numbering when actually packing the registers. This
simplifies things considerably, as the pool index is the only quantity
necessary to uniquely identify a pool, and the pool/direction split is
not necessary until firmware is talked to.
To support the mapping between pool indices and pool numbers and
directions, which is not neatly mathematical anymore, introduce a pool
descriptor table, indexed by pool index, to facilitate the translation.
Include the MC pool in the descriptor table as well, so that it can be
referenced from mlxsw_sp_sb_cms_egress.
Signed-off-by: Petr Machata <petrm@mellanox.com>
Reviewed-by: Jiri Pirko <jiri@mellanox.com>
Signed-off-by: Ido Schimmel <idosch@mellanox.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2018-09-20 09:21:25 +03:00
|
|
|
return &mlxsw_sp->sb->ports[local_port].pms[pool_index];
|
2016-04-14 18:19:19 +02:00
|
|
|
}
|
|
|
|
|
|
mlxsw: spectrum_buffers: Use devlink pool indices throughout
Currently, mlxsw assumes that each ingress pool has its egress
counterpart, and that pool index for purposes of caching matches the
index with which the hardware should be configured. As we want to expose
the MC pool, both of these assumptions break.
Instead, maintain the pool index as long as possible. Unify ingress and
egress caches and use the pool index as cache index as well. Only
translate to FW pool numbering when actually packing the registers. This
simplifies things considerably, as the pool index is the only quantity
necessary to uniquely identify a pool, and the pool/direction split is
not necessary until firmware is talked to.
To support the mapping between pool indices and pool numbers and
directions, which is not neatly mathematical anymore, introduce a pool
descriptor table, indexed by pool index, to facilitate the translation.
Include the MC pool in the descriptor table as well, so that it can be
referenced from mlxsw_sp_sb_cms_egress.
Signed-off-by: Petr Machata <petrm@mellanox.com>
Reviewed-by: Jiri Pirko <jiri@mellanox.com>
Signed-off-by: Ido Schimmel <idosch@mellanox.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2018-09-20 09:21:25 +03:00
|
|
|
static int mlxsw_sp_sb_pr_write(struct mlxsw_sp *mlxsw_sp, u16 pool_index,
|
2018-09-20 09:21:28 +03:00
|
|
|
enum mlxsw_reg_sbpr_mode mode,
|
|
|
|
|
u32 size, bool infi_size)
|
2016-04-14 18:19:16 +02:00
|
|
|
{
|
mlxsw: spectrum_buffers: Use devlink pool indices throughout
Currently, mlxsw assumes that each ingress pool has its egress
counterpart, and that pool index for purposes of caching matches the
index with which the hardware should be configured. As we want to expose
the MC pool, both of these assumptions break.
Instead, maintain the pool index as long as possible. Unify ingress and
egress caches and use the pool index as cache index as well. Only
translate to FW pool numbering when actually packing the registers. This
simplifies things considerably, as the pool index is the only quantity
necessary to uniquely identify a pool, and the pool/direction split is
not necessary until firmware is talked to.
To support the mapping between pool indices and pool numbers and
directions, which is not neatly mathematical anymore, introduce a pool
descriptor table, indexed by pool index, to facilitate the translation.
Include the MC pool in the descriptor table as well, so that it can be
referenced from mlxsw_sp_sb_cms_egress.
Signed-off-by: Petr Machata <petrm@mellanox.com>
Reviewed-by: Jiri Pirko <jiri@mellanox.com>
Signed-off-by: Ido Schimmel <idosch@mellanox.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2018-09-20 09:21:25 +03:00
|
|
|
const struct mlxsw_sp_sb_pool_des *des =
|
2019-02-20 19:32:16 +00:00
|
|
|
&mlxsw_sp->sb_vals->pool_dess[pool_index];
|
2016-04-14 18:19:16 +02:00
|
|
|
char sbpr_pl[MLXSW_REG_SBPR_LEN];
|
2016-04-14 18:19:19 +02:00
|
|
|
struct mlxsw_sp_sb_pr *pr;
|
|
|
|
|
int err;
|
2016-04-14 18:19:16 +02:00
|
|
|
|
2018-09-20 09:21:28 +03:00
|
|
|
mlxsw_reg_sbpr_pack(sbpr_pl, des->pool, des->dir, mode,
|
|
|
|
|
size, infi_size);
|
2016-04-14 18:19:19 +02:00
|
|
|
err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbpr), sbpr_pl);
|
|
|
|
|
if (err)
|
|
|
|
|
return err;
|
|
|
|
|
|
2018-09-20 09:21:28 +03:00
|
|
|
if (infi_size)
|
|
|
|
|
size = mlxsw_sp_bytes_cells(mlxsw_sp, mlxsw_sp->sb->sb_size);
|
mlxsw: spectrum_buffers: Use devlink pool indices throughout
Currently, mlxsw assumes that each ingress pool has its egress
counterpart, and that pool index for purposes of caching matches the
index with which the hardware should be configured. As we want to expose
the MC pool, both of these assumptions break.
Instead, maintain the pool index as long as possible. Unify ingress and
egress caches and use the pool index as cache index as well. Only
translate to FW pool numbering when actually packing the registers. This
simplifies things considerably, as the pool index is the only quantity
necessary to uniquely identify a pool, and the pool/direction split is
not necessary until firmware is talked to.
To support the mapping between pool indices and pool numbers and
directions, which is not neatly mathematical anymore, introduce a pool
descriptor table, indexed by pool index, to facilitate the translation.
Include the MC pool in the descriptor table as well, so that it can be
referenced from mlxsw_sp_sb_cms_egress.
Signed-off-by: Petr Machata <petrm@mellanox.com>
Reviewed-by: Jiri Pirko <jiri@mellanox.com>
Signed-off-by: Ido Schimmel <idosch@mellanox.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2018-09-20 09:21:25 +03:00
|
|
|
pr = mlxsw_sp_sb_pr_get(mlxsw_sp, pool_index);
|
2016-04-14 18:19:19 +02:00
|
|
|
pr->mode = mode;
|
|
|
|
|
pr->size = size;
|
|
|
|
|
return 0;
|
2016-04-14 18:19:16 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static int mlxsw_sp_sb_cm_write(struct mlxsw_sp *mlxsw_sp, u8 local_port,
|
mlxsw: spectrum_buffers: Use devlink pool indices throughout
Currently, mlxsw assumes that each ingress pool has its egress
counterpart, and that pool index for purposes of caching matches the
index with which the hardware should be configured. As we want to expose
the MC pool, both of these assumptions break.
Instead, maintain the pool index as long as possible. Unify ingress and
egress caches and use the pool index as cache index as well. Only
translate to FW pool numbering when actually packing the registers. This
simplifies things considerably, as the pool index is the only quantity
necessary to uniquely identify a pool, and the pool/direction split is
not necessary until firmware is talked to.
To support the mapping between pool indices and pool numbers and
directions, which is not neatly mathematical anymore, introduce a pool
descriptor table, indexed by pool index, to facilitate the translation.
Include the MC pool in the descriptor table as well, so that it can be
referenced from mlxsw_sp_sb_cms_egress.
Signed-off-by: Petr Machata <petrm@mellanox.com>
Reviewed-by: Jiri Pirko <jiri@mellanox.com>
Signed-off-by: Ido Schimmel <idosch@mellanox.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2018-09-20 09:21:25 +03:00
|
|
|
u8 pg_buff, u32 min_buff, u32 max_buff,
|
2018-09-20 09:21:29 +03:00
|
|
|
bool infi_max, u16 pool_index)
|
2016-04-14 18:19:16 +02:00
|
|
|
{
|
mlxsw: spectrum_buffers: Use devlink pool indices throughout
Currently, mlxsw assumes that each ingress pool has its egress
counterpart, and that pool index for purposes of caching matches the
index with which the hardware should be configured. As we want to expose
the MC pool, both of these assumptions break.
Instead, maintain the pool index as long as possible. Unify ingress and
egress caches and use the pool index as cache index as well. Only
translate to FW pool numbering when actually packing the registers. This
simplifies things considerably, as the pool index is the only quantity
necessary to uniquely identify a pool, and the pool/direction split is
not necessary until firmware is talked to.
To support the mapping between pool indices and pool numbers and
directions, which is not neatly mathematical anymore, introduce a pool
descriptor table, indexed by pool index, to facilitate the translation.
Include the MC pool in the descriptor table as well, so that it can be
referenced from mlxsw_sp_sb_cms_egress.
Signed-off-by: Petr Machata <petrm@mellanox.com>
Reviewed-by: Jiri Pirko <jiri@mellanox.com>
Signed-off-by: Ido Schimmel <idosch@mellanox.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2018-09-20 09:21:25 +03:00
|
|
|
const struct mlxsw_sp_sb_pool_des *des =
|
2019-02-20 19:32:16 +00:00
|
|
|
&mlxsw_sp->sb_vals->pool_dess[pool_index];
|
2016-04-14 18:19:16 +02:00
|
|
|
char sbcm_pl[MLXSW_REG_SBCM_LEN];
|
2018-09-20 09:21:29 +03:00
|
|
|
struct mlxsw_sp_sb_cm *cm;
|
2016-04-14 18:19:19 +02:00
|
|
|
int err;
|
2016-04-14 18:19:16 +02:00
|
|
|
|
mlxsw: spectrum_buffers: Use devlink pool indices throughout
Currently, mlxsw assumes that each ingress pool has its egress
counterpart, and that pool index for purposes of caching matches the
index with which the hardware should be configured. As we want to expose
the MC pool, both of these assumptions break.
Instead, maintain the pool index as long as possible. Unify ingress and
egress caches and use the pool index as cache index as well. Only
translate to FW pool numbering when actually packing the registers. This
simplifies things considerably, as the pool index is the only quantity
necessary to uniquely identify a pool, and the pool/direction split is
not necessary until firmware is talked to.
To support the mapping between pool indices and pool numbers and
directions, which is not neatly mathematical anymore, introduce a pool
descriptor table, indexed by pool index, to facilitate the translation.
Include the MC pool in the descriptor table as well, so that it can be
referenced from mlxsw_sp_sb_cms_egress.
Signed-off-by: Petr Machata <petrm@mellanox.com>
Reviewed-by: Jiri Pirko <jiri@mellanox.com>
Signed-off-by: Ido Schimmel <idosch@mellanox.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2018-09-20 09:21:25 +03:00
|
|
|
mlxsw_reg_sbcm_pack(sbcm_pl, local_port, pg_buff, des->dir,
|
2018-09-20 09:21:29 +03:00
|
|
|
min_buff, max_buff, infi_max, des->pool);
|
2016-04-14 18:19:19 +02:00
|
|
|
err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbcm), sbcm_pl);
|
|
|
|
|
if (err)
|
|
|
|
|
return err;
|
2018-09-20 09:21:29 +03:00
|
|
|
|
2018-09-20 09:21:26 +03:00
|
|
|
if (mlxsw_sp_sb_cm_exists(pg_buff, des->dir)) {
|
2018-09-20 09:21:29 +03:00
|
|
|
if (infi_max)
|
|
|
|
|
max_buff = mlxsw_sp_bytes_cells(mlxsw_sp,
|
|
|
|
|
mlxsw_sp->sb->sb_size);
|
2016-04-14 18:19:19 +02:00
|
|
|
|
mlxsw: spectrum_buffers: Use devlink pool indices throughout
Currently, mlxsw assumes that each ingress pool has its egress
counterpart, and that pool index for purposes of caching matches the
index with which the hardware should be configured. As we want to expose
the MC pool, both of these assumptions break.
Instead, maintain the pool index as long as possible. Unify ingress and
egress caches and use the pool index as cache index as well. Only
translate to FW pool numbering when actually packing the registers. This
simplifies things considerably, as the pool index is the only quantity
necessary to uniquely identify a pool, and the pool/direction split is
not necessary until firmware is talked to.
To support the mapping between pool indices and pool numbers and
directions, which is not neatly mathematical anymore, introduce a pool
descriptor table, indexed by pool index, to facilitate the translation.
Include the MC pool in the descriptor table as well, so that it can be
referenced from mlxsw_sp_sb_cms_egress.
Signed-off-by: Petr Machata <petrm@mellanox.com>
Reviewed-by: Jiri Pirko <jiri@mellanox.com>
Signed-off-by: Ido Schimmel <idosch@mellanox.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2018-09-20 09:21:25 +03:00
|
|
|
cm = mlxsw_sp_sb_cm_get(mlxsw_sp, local_port, pg_buff,
|
|
|
|
|
des->dir);
|
2016-04-14 18:19:19 +02:00
|
|
|
cm->min_buff = min_buff;
|
|
|
|
|
cm->max_buff = max_buff;
|
mlxsw: spectrum_buffers: Use devlink pool indices throughout
Currently, mlxsw assumes that each ingress pool has its egress
counterpart, and that pool index for purposes of caching matches the
index with which the hardware should be configured. As we want to expose
the MC pool, both of these assumptions break.
Instead, maintain the pool index as long as possible. Unify ingress and
egress caches and use the pool index as cache index as well. Only
translate to FW pool numbering when actually packing the registers. This
simplifies things considerably, as the pool index is the only quantity
necessary to uniquely identify a pool, and the pool/direction split is
not necessary until firmware is talked to.
To support the mapping between pool indices and pool numbers and
directions, which is not neatly mathematical anymore, introduce a pool
descriptor table, indexed by pool index, to facilitate the translation.
Include the MC pool in the descriptor table as well, so that it can be
referenced from mlxsw_sp_sb_cms_egress.
Signed-off-by: Petr Machata <petrm@mellanox.com>
Reviewed-by: Jiri Pirko <jiri@mellanox.com>
Signed-off-by: Ido Schimmel <idosch@mellanox.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2018-09-20 09:21:25 +03:00
|
|
|
cm->pool_index = pool_index;
|
2016-04-14 18:19:19 +02:00
|
|
|
}
|
|
|
|
|
return 0;
|
2016-04-14 18:19:16 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static int mlxsw_sp_sb_pm_write(struct mlxsw_sp *mlxsw_sp, u8 local_port,
|
mlxsw: spectrum_buffers: Use devlink pool indices throughout
Currently, mlxsw assumes that each ingress pool has its egress
counterpart, and that pool index for purposes of caching matches the
index with which the hardware should be configured. As we want to expose
the MC pool, both of these assumptions break.
Instead, maintain the pool index as long as possible. Unify ingress and
egress caches and use the pool index as cache index as well. Only
translate to FW pool numbering when actually packing the registers. This
simplifies things considerably, as the pool index is the only quantity
necessary to uniquely identify a pool, and the pool/direction split is
not necessary until firmware is talked to.
To support the mapping between pool indices and pool numbers and
directions, which is not neatly mathematical anymore, introduce a pool
descriptor table, indexed by pool index, to facilitate the translation.
Include the MC pool in the descriptor table as well, so that it can be
referenced from mlxsw_sp_sb_cms_egress.
Signed-off-by: Petr Machata <petrm@mellanox.com>
Reviewed-by: Jiri Pirko <jiri@mellanox.com>
Signed-off-by: Ido Schimmel <idosch@mellanox.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2018-09-20 09:21:25 +03:00
|
|
|
u16 pool_index, u32 min_buff, u32 max_buff)
|
2016-04-14 18:19:16 +02:00
|
|
|
{
|
mlxsw: spectrum_buffers: Use devlink pool indices throughout
Currently, mlxsw assumes that each ingress pool has its egress
counterpart, and that pool index for purposes of caching matches the
index with which the hardware should be configured. As we want to expose
the MC pool, both of these assumptions break.
Instead, maintain the pool index as long as possible. Unify ingress and
egress caches and use the pool index as cache index as well. Only
translate to FW pool numbering when actually packing the registers. This
simplifies things considerably, as the pool index is the only quantity
necessary to uniquely identify a pool, and the pool/direction split is
not necessary until firmware is talked to.
To support the mapping between pool indices and pool numbers and
directions, which is not neatly mathematical anymore, introduce a pool
descriptor table, indexed by pool index, to facilitate the translation.
Include the MC pool in the descriptor table as well, so that it can be
referenced from mlxsw_sp_sb_cms_egress.
Signed-off-by: Petr Machata <petrm@mellanox.com>
Reviewed-by: Jiri Pirko <jiri@mellanox.com>
Signed-off-by: Ido Schimmel <idosch@mellanox.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2018-09-20 09:21:25 +03:00
|
|
|
const struct mlxsw_sp_sb_pool_des *des =
|
2019-02-20 19:32:16 +00:00
|
|
|
&mlxsw_sp->sb_vals->pool_dess[pool_index];
|
2016-04-14 18:19:16 +02:00
|
|
|
char sbpm_pl[MLXSW_REG_SBPM_LEN];
|
2016-04-14 18:19:19 +02:00
|
|
|
struct mlxsw_sp_sb_pm *pm;
|
|
|
|
|
int err;
|
2016-04-14 18:19:16 +02:00
|
|
|
|
mlxsw: spectrum_buffers: Use devlink pool indices throughout
Currently, mlxsw assumes that each ingress pool has its egress
counterpart, and that pool index for purposes of caching matches the
index with which the hardware should be configured. As we want to expose
the MC pool, both of these assumptions break.
Instead, maintain the pool index as long as possible. Unify ingress and
egress caches and use the pool index as cache index as well. Only
translate to FW pool numbering when actually packing the registers. This
simplifies things considerably, as the pool index is the only quantity
necessary to uniquely identify a pool, and the pool/direction split is
not necessary until firmware is talked to.
To support the mapping between pool indices and pool numbers and
directions, which is not neatly mathematical anymore, introduce a pool
descriptor table, indexed by pool index, to facilitate the translation.
Include the MC pool in the descriptor table as well, so that it can be
referenced from mlxsw_sp_sb_cms_egress.
Signed-off-by: Petr Machata <petrm@mellanox.com>
Reviewed-by: Jiri Pirko <jiri@mellanox.com>
Signed-off-by: Ido Schimmel <idosch@mellanox.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2018-09-20 09:21:25 +03:00
|
|
|
mlxsw_reg_sbpm_pack(sbpm_pl, local_port, des->pool, des->dir, false,
|
2016-04-14 18:19:27 +02:00
|
|
|
min_buff, max_buff);
|
2016-04-14 18:19:19 +02:00
|
|
|
err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbpm), sbpm_pl);
|
|
|
|
|
if (err)
|
|
|
|
|
return err;
|
|
|
|
|
|
mlxsw: spectrum_buffers: Use devlink pool indices throughout
Currently, mlxsw assumes that each ingress pool has its egress
counterpart, and that pool index for purposes of caching matches the
index with which the hardware should be configured. As we want to expose
the MC pool, both of these assumptions break.
Instead, maintain the pool index as long as possible. Unify ingress and
egress caches and use the pool index as cache index as well. Only
translate to FW pool numbering when actually packing the registers. This
simplifies things considerably, as the pool index is the only quantity
necessary to uniquely identify a pool, and the pool/direction split is
not necessary until firmware is talked to.
To support the mapping between pool indices and pool numbers and
directions, which is not neatly mathematical anymore, introduce a pool
descriptor table, indexed by pool index, to facilitate the translation.
Include the MC pool in the descriptor table as well, so that it can be
referenced from mlxsw_sp_sb_cms_egress.
Signed-off-by: Petr Machata <petrm@mellanox.com>
Reviewed-by: Jiri Pirko <jiri@mellanox.com>
Signed-off-by: Ido Schimmel <idosch@mellanox.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2018-09-20 09:21:25 +03:00
|
|
|
pm = mlxsw_sp_sb_pm_get(mlxsw_sp, local_port, pool_index);
|
2016-04-14 18:19:19 +02:00
|
|
|
pm->min_buff = min_buff;
|
|
|
|
|
pm->max_buff = max_buff;
|
|
|
|
|
return 0;
|
2016-04-14 18:19:16 +02:00
|
|
|
}
|
|
|
|
|
|
2016-04-14 18:19:30 +02:00
|
|
|
static int mlxsw_sp_sb_pm_occ_clear(struct mlxsw_sp *mlxsw_sp, u8 local_port,
|
mlxsw: spectrum_buffers: Use devlink pool indices throughout
Currently, mlxsw assumes that each ingress pool has its egress
counterpart, and that pool index for purposes of caching matches the
index with which the hardware should be configured. As we want to expose
the MC pool, both of these assumptions break.
Instead, maintain the pool index as long as possible. Unify ingress and
egress caches and use the pool index as cache index as well. Only
translate to FW pool numbering when actually packing the registers. This
simplifies things considerably, as the pool index is the only quantity
necessary to uniquely identify a pool, and the pool/direction split is
not necessary until firmware is talked to.
To support the mapping between pool indices and pool numbers and
directions, which is not neatly mathematical anymore, introduce a pool
descriptor table, indexed by pool index, to facilitate the translation.
Include the MC pool in the descriptor table as well, so that it can be
referenced from mlxsw_sp_sb_cms_egress.
Signed-off-by: Petr Machata <petrm@mellanox.com>
Reviewed-by: Jiri Pirko <jiri@mellanox.com>
Signed-off-by: Ido Schimmel <idosch@mellanox.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2018-09-20 09:21:25 +03:00
|
|
|
u16 pool_index, struct list_head *bulk_list)
|
2016-04-14 18:19:30 +02:00
|
|
|
{
|
mlxsw: spectrum_buffers: Use devlink pool indices throughout
Currently, mlxsw assumes that each ingress pool has its egress
counterpart, and that pool index for purposes of caching matches the
index with which the hardware should be configured. As we want to expose
the MC pool, both of these assumptions break.
Instead, maintain the pool index as long as possible. Unify ingress and
egress caches and use the pool index as cache index as well. Only
translate to FW pool numbering when actually packing the registers. This
simplifies things considerably, as the pool index is the only quantity
necessary to uniquely identify a pool, and the pool/direction split is
not necessary until firmware is talked to.
To support the mapping between pool indices and pool numbers and
directions, which is not neatly mathematical anymore, introduce a pool
descriptor table, indexed by pool index, to facilitate the translation.
Include the MC pool in the descriptor table as well, so that it can be
referenced from mlxsw_sp_sb_cms_egress.
Signed-off-by: Petr Machata <petrm@mellanox.com>
Reviewed-by: Jiri Pirko <jiri@mellanox.com>
Signed-off-by: Ido Schimmel <idosch@mellanox.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2018-09-20 09:21:25 +03:00
|
|
|
const struct mlxsw_sp_sb_pool_des *des =
|
2019-02-20 19:32:16 +00:00
|
|
|
&mlxsw_sp->sb_vals->pool_dess[pool_index];
|
2016-04-14 18:19:30 +02:00
|
|
|
char sbpm_pl[MLXSW_REG_SBPM_LEN];
|
|
|
|
|
|
2019-09-16 18:04:22 +03:00
|
|
|
if (local_port == MLXSW_PORT_CPU_PORT &&
|
|
|
|
|
des->dir == MLXSW_REG_SBXX_DIR_INGRESS)
|
|
|
|
|
return 0;
|
|
|
|
|
|
mlxsw: spectrum_buffers: Use devlink pool indices throughout
Currently, mlxsw assumes that each ingress pool has its egress
counterpart, and that pool index for purposes of caching matches the
index with which the hardware should be configured. As we want to expose
the MC pool, both of these assumptions break.
Instead, maintain the pool index as long as possible. Unify ingress and
egress caches and use the pool index as cache index as well. Only
translate to FW pool numbering when actually packing the registers. This
simplifies things considerably, as the pool index is the only quantity
necessary to uniquely identify a pool, and the pool/direction split is
not necessary until firmware is talked to.
To support the mapping between pool indices and pool numbers and
directions, which is not neatly mathematical anymore, introduce a pool
descriptor table, indexed by pool index, to facilitate the translation.
Include the MC pool in the descriptor table as well, so that it can be
referenced from mlxsw_sp_sb_cms_egress.
Signed-off-by: Petr Machata <petrm@mellanox.com>
Reviewed-by: Jiri Pirko <jiri@mellanox.com>
Signed-off-by: Ido Schimmel <idosch@mellanox.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2018-09-20 09:21:25 +03:00
|
|
|
mlxsw_reg_sbpm_pack(sbpm_pl, local_port, des->pool, des->dir,
|
|
|
|
|
true, 0, 0);
|
2016-04-14 18:19:30 +02:00
|
|
|
return mlxsw_reg_trans_query(mlxsw_sp->core, MLXSW_REG(sbpm), sbpm_pl,
|
|
|
|
|
bulk_list, NULL, 0);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void mlxsw_sp_sb_pm_occ_query_cb(struct mlxsw_core *mlxsw_core,
|
|
|
|
|
char *sbpm_pl, size_t sbpm_pl_len,
|
|
|
|
|
unsigned long cb_priv)
|
|
|
|
|
{
|
|
|
|
|
struct mlxsw_sp_sb_pm *pm = (struct mlxsw_sp_sb_pm *) cb_priv;
|
|
|
|
|
|
|
|
|
|
mlxsw_reg_sbpm_unpack(sbpm_pl, &pm->occ.cur, &pm->occ.max);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static int mlxsw_sp_sb_pm_occ_query(struct mlxsw_sp *mlxsw_sp, u8 local_port,
|
mlxsw: spectrum_buffers: Use devlink pool indices throughout
Currently, mlxsw assumes that each ingress pool has its egress
counterpart, and that pool index for purposes of caching matches the
index with which the hardware should be configured. As we want to expose
the MC pool, both of these assumptions break.
Instead, maintain the pool index as long as possible. Unify ingress and
egress caches and use the pool index as cache index as well. Only
translate to FW pool numbering when actually packing the registers. This
simplifies things considerably, as the pool index is the only quantity
necessary to uniquely identify a pool, and the pool/direction split is
not necessary until firmware is talked to.
To support the mapping between pool indices and pool numbers and
directions, which is not neatly mathematical anymore, introduce a pool
descriptor table, indexed by pool index, to facilitate the translation.
Include the MC pool in the descriptor table as well, so that it can be
referenced from mlxsw_sp_sb_cms_egress.
Signed-off-by: Petr Machata <petrm@mellanox.com>
Reviewed-by: Jiri Pirko <jiri@mellanox.com>
Signed-off-by: Ido Schimmel <idosch@mellanox.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2018-09-20 09:21:25 +03:00
|
|
|
u16 pool_index, struct list_head *bulk_list)
|
2016-04-14 18:19:30 +02:00
|
|
|
{
|
mlxsw: spectrum_buffers: Use devlink pool indices throughout
Currently, mlxsw assumes that each ingress pool has its egress
counterpart, and that pool index for purposes of caching matches the
index with which the hardware should be configured. As we want to expose
the MC pool, both of these assumptions break.
Instead, maintain the pool index as long as possible. Unify ingress and
egress caches and use the pool index as cache index as well. Only
translate to FW pool numbering when actually packing the registers. This
simplifies things considerably, as the pool index is the only quantity
necessary to uniquely identify a pool, and the pool/direction split is
not necessary until firmware is talked to.
To support the mapping between pool indices and pool numbers and
directions, which is not neatly mathematical anymore, introduce a pool
descriptor table, indexed by pool index, to facilitate the translation.
Include the MC pool in the descriptor table as well, so that it can be
referenced from mlxsw_sp_sb_cms_egress.
Signed-off-by: Petr Machata <petrm@mellanox.com>
Reviewed-by: Jiri Pirko <jiri@mellanox.com>
Signed-off-by: Ido Schimmel <idosch@mellanox.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2018-09-20 09:21:25 +03:00
|
|
|
const struct mlxsw_sp_sb_pool_des *des =
|
2019-02-20 19:32:16 +00:00
|
|
|
&mlxsw_sp->sb_vals->pool_dess[pool_index];
|
2016-04-14 18:19:30 +02:00
|
|
|
char sbpm_pl[MLXSW_REG_SBPM_LEN];
|
|
|
|
|
struct mlxsw_sp_sb_pm *pm;
|
|
|
|
|
|
2019-09-16 18:04:22 +03:00
|
|
|
if (local_port == MLXSW_PORT_CPU_PORT &&
|
|
|
|
|
des->dir == MLXSW_REG_SBXX_DIR_INGRESS)
|
|
|
|
|
return 0;
|
|
|
|
|
|
mlxsw: spectrum_buffers: Use devlink pool indices throughout
Currently, mlxsw assumes that each ingress pool has its egress
counterpart, and that pool index for purposes of caching matches the
index with which the hardware should be configured. As we want to expose
the MC pool, both of these assumptions break.
Instead, maintain the pool index as long as possible. Unify ingress and
egress caches and use the pool index as cache index as well. Only
translate to FW pool numbering when actually packing the registers. This
simplifies things considerably, as the pool index is the only quantity
necessary to uniquely identify a pool, and the pool/direction split is
not necessary until firmware is talked to.
To support the mapping between pool indices and pool numbers and
directions, which is not neatly mathematical anymore, introduce a pool
descriptor table, indexed by pool index, to facilitate the translation.
Include the MC pool in the descriptor table as well, so that it can be
referenced from mlxsw_sp_sb_cms_egress.
Signed-off-by: Petr Machata <petrm@mellanox.com>
Reviewed-by: Jiri Pirko <jiri@mellanox.com>
Signed-off-by: Ido Schimmel <idosch@mellanox.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2018-09-20 09:21:25 +03:00
|
|
|
pm = mlxsw_sp_sb_pm_get(mlxsw_sp, local_port, pool_index);
|
|
|
|
|
mlxsw_reg_sbpm_pack(sbpm_pl, local_port, des->pool, des->dir,
|
|
|
|
|
false, 0, 0);
|
2016-04-14 18:19:30 +02:00
|
|
|
return mlxsw_reg_trans_query(mlxsw_sp->core, MLXSW_REG(sbpm), sbpm_pl,
|
|
|
|
|
bulk_list,
|
|
|
|
|
mlxsw_sp_sb_pm_occ_query_cb,
|
|
|
|
|
(unsigned long) pm);
|
|
|
|
|
}
|
|
|
|
|
|
mlxsw: spectrum: Track priorities in struct mlxsw_sp_hdroom
The mapping from priorities to buffers determines which buffers should be
configured. Lossiness of these priorities combined with the mapping
determines whether a given buffer should be lossy.
Currently this configuration is stored implicitly in DCB ETS, PFC and
ethtool PAUSE configuration. Keeping it together with the rest of the
headroom configuration and deriving it as needed from PFC / ETS / PAUSE
will make things clearer. To that end, add a field "prios" to struct
mlxsw_sp_hdroom.
Previously, __mlxsw_sp_port_headroom_set() took prio_tc as an argument, and
assumed that the same mapping as we use on the egress should be used on
ingress as well. Instead, track this configuration at each priority, so
that it can be adjusted flexibly.
In the following patches, as dcbnl_setbuffer is implemented, it will need
to store its own mapping, and it will also be sometimes necessary to revert
back to the original ETS mapping. Therefore track two buffer indices: the
one for chip configuration (buf_idx), and the source one (ets_buf_idx).
Introduce a function to configure the chip-level buffer index, and for now
have it simply copy the ETS mapping over to the chip mapping.
Update the ETS handler to project prio_tc to the ets_buf_idx and invoke the
buf_idx recomputation.
Now that there is a canonical place to look for this configuration,
mlxsw_sp_port_headroom_set() does not need to invent def_prio_tc to use if
DCB is compiled out.
Signed-off-by: Petr Machata <petrm@nvidia.com>
Reviewed-by: Jiri Pirko <jiri@nvidia.com>
Signed-off-by: Ido Schimmel <idosch@nvidia.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2020-09-16 09:35:17 +03:00
|
|
|
void mlxsw_sp_hdroom_prios_reset_buf_idx(struct mlxsw_sp_hdroom *hdroom)
|
|
|
|
|
{
|
|
|
|
|
int prio;
|
|
|
|
|
|
2020-09-17 09:49:01 +03:00
|
|
|
for (prio = 0; prio < IEEE_8021QAZ_MAX_TCS; prio++) {
|
|
|
|
|
switch (hdroom->mode) {
|
|
|
|
|
case MLXSW_SP_HDROOM_MODE_DCB:
|
|
|
|
|
hdroom->prios.prio[prio].buf_idx = hdroom->prios.prio[prio].ets_buf_idx;
|
|
|
|
|
break;
|
|
|
|
|
case MLXSW_SP_HDROOM_MODE_TC:
|
|
|
|
|
hdroom->prios.prio[prio].buf_idx = hdroom->prios.prio[prio].set_buf_idx;
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
}
|
mlxsw: spectrum: Track priorities in struct mlxsw_sp_hdroom
The mapping from priorities to buffers determines which buffers should be
configured. Lossiness of these priorities combined with the mapping
determines whether a given buffer should be lossy.
Currently this configuration is stored implicitly in DCB ETS, PFC and
ethtool PAUSE configuration. Keeping it together with the rest of the
headroom configuration and deriving it as needed from PFC / ETS / PAUSE
will make things clearer. To that end, add a field "prios" to struct
mlxsw_sp_hdroom.
Previously, __mlxsw_sp_port_headroom_set() took prio_tc as an argument, and
assumed that the same mapping as we use on the egress should be used on
ingress as well. Instead, track this configuration at each priority, so
that it can be adjusted flexibly.
In the following patches, as dcbnl_setbuffer is implemented, it will need
to store its own mapping, and it will also be sometimes necessary to revert
back to the original ETS mapping. Therefore track two buffer indices: the
one for chip configuration (buf_idx), and the source one (ets_buf_idx).
Introduce a function to configure the chip-level buffer index, and for now
have it simply copy the ETS mapping over to the chip mapping.
Update the ETS handler to project prio_tc to the ets_buf_idx and invoke the
buf_idx recomputation.
Now that there is a canonical place to look for this configuration,
mlxsw_sp_port_headroom_set() does not need to invent def_prio_tc to use if
DCB is compiled out.
Signed-off-by: Petr Machata <petrm@nvidia.com>
Reviewed-by: Jiri Pirko <jiri@nvidia.com>
Signed-off-by: Ido Schimmel <idosch@nvidia.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2020-09-16 09:35:17 +03:00
|
|
|
}
|
|
|
|
|
|
2020-09-16 09:35:18 +03:00
|
|
|
void mlxsw_sp_hdroom_bufs_reset_lossiness(struct mlxsw_sp_hdroom *hdroom)
|
|
|
|
|
{
|
|
|
|
|
int prio;
|
|
|
|
|
int i;
|
|
|
|
|
|
|
|
|
|
for (i = 0; i < DCBX_MAX_BUFFERS; i++)
|
|
|
|
|
hdroom->bufs.buf[i].lossy = true;
|
|
|
|
|
|
|
|
|
|
for (prio = 0; prio < IEEE_8021Q_MAX_PRIORITIES; prio++) {
|
|
|
|
|
if (!hdroom->prios.prio[prio].lossy)
|
|
|
|
|
hdroom->bufs.buf[hdroom->prios.prio[prio].buf_idx].lossy = false;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2020-09-16 09:35:24 +03:00
|
|
|
static u16 mlxsw_sp_hdroom_buf_threshold_get(const struct mlxsw_sp *mlxsw_sp, int mtu)
|
|
|
|
|
{
|
|
|
|
|
return 2 * mlxsw_sp_bytes_cells(mlxsw_sp, mtu);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void mlxsw_sp_hdroom_buf_pack(char *pbmc_pl, int index, u16 size, u16 thres, bool lossy)
|
|
|
|
|
{
|
|
|
|
|
if (lossy)
|
|
|
|
|
mlxsw_reg_pbmc_lossy_buffer_pack(pbmc_pl, index, size);
|
|
|
|
|
else
|
|
|
|
|
mlxsw_reg_pbmc_lossless_buffer_pack(pbmc_pl, index, size,
|
|
|
|
|
thres);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static u16 mlxsw_sp_hdroom_buf_delay_get(const struct mlxsw_sp *mlxsw_sp,
|
|
|
|
|
const struct mlxsw_sp_hdroom *hdroom)
|
|
|
|
|
{
|
|
|
|
|
u16 delay_cells;
|
|
|
|
|
|
|
|
|
|
delay_cells = mlxsw_sp_bytes_cells(mlxsw_sp, hdroom->delay_bytes);
|
|
|
|
|
|
|
|
|
|
/* In the worst case scenario the delay will be made up of packets that
|
|
|
|
|
* are all of size CELL_SIZE + 1, which means each packet will require
|
|
|
|
|
* almost twice its true size when buffered in the switch. We therefore
|
|
|
|
|
* multiply this value by the "cell factor", which is close to 2.
|
|
|
|
|
*
|
|
|
|
|
* Another MTU is added in case the transmitting host already started
|
|
|
|
|
* transmitting a maximum length frame when the PFC packet was received.
|
|
|
|
|
*/
|
|
|
|
|
return 2 * delay_cells + mlxsw_sp_bytes_cells(mlxsw_sp, hdroom->mtu);
|
|
|
|
|
}
|
|
|
|
|
|
2020-09-16 09:35:28 +03:00
|
|
|
static u32 mlxsw_sp_hdroom_int_buf_size_get(struct mlxsw_sp *mlxsw_sp, int mtu, u32 speed)
|
|
|
|
|
{
|
|
|
|
|
u32 buffsize = mlxsw_sp->sb_ops->int_buf_size_get(speed, mtu);
|
|
|
|
|
|
|
|
|
|
return mlxsw_sp_bytes_cells(mlxsw_sp, buffsize) + 1;
|
|
|
|
|
}
|
|
|
|
|
|
2020-09-16 09:35:24 +03:00
|
|
|
static bool mlxsw_sp_hdroom_buf_is_used(const struct mlxsw_sp_hdroom *hdroom, int buf)
|
|
|
|
|
{
|
|
|
|
|
int prio;
|
|
|
|
|
|
|
|
|
|
for (prio = 0; prio < IEEE_8021QAZ_MAX_TCS; prio++) {
|
|
|
|
|
if (hdroom->prios.prio[prio].buf_idx == buf)
|
|
|
|
|
return true;
|
|
|
|
|
}
|
|
|
|
|
return false;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void mlxsw_sp_hdroom_bufs_reset_sizes(struct mlxsw_sp_port *mlxsw_sp_port,
|
|
|
|
|
struct mlxsw_sp_hdroom *hdroom)
|
|
|
|
|
{
|
|
|
|
|
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
|
2020-09-16 09:35:28 +03:00
|
|
|
u16 reserve_cells;
|
2020-09-16 09:35:24 +03:00
|
|
|
int i;
|
|
|
|
|
|
2020-09-16 09:35:28 +03:00
|
|
|
/* Internal buffer. */
|
|
|
|
|
reserve_cells = mlxsw_sp_hdroom_int_buf_size_get(mlxsw_sp, mlxsw_sp_port->max_speed,
|
|
|
|
|
mlxsw_sp_port->max_mtu);
|
|
|
|
|
reserve_cells = mlxsw_sp_port_headroom_8x_adjust(mlxsw_sp_port, reserve_cells);
|
|
|
|
|
hdroom->int_buf.reserve_cells = reserve_cells;
|
|
|
|
|
|
|
|
|
|
if (hdroom->int_buf.enable)
|
|
|
|
|
hdroom->int_buf.size_cells = reserve_cells;
|
|
|
|
|
else
|
|
|
|
|
hdroom->int_buf.size_cells = 0;
|
|
|
|
|
|
|
|
|
|
/* PG buffers. */
|
2020-09-16 09:35:24 +03:00
|
|
|
for (i = 0; i < DCBX_MAX_BUFFERS; i++) {
|
|
|
|
|
struct mlxsw_sp_hdroom_buf *buf = &hdroom->bufs.buf[i];
|
|
|
|
|
u16 thres_cells;
|
|
|
|
|
u16 delay_cells;
|
|
|
|
|
|
|
|
|
|
if (!mlxsw_sp_hdroom_buf_is_used(hdroom, i)) {
|
|
|
|
|
thres_cells = 0;
|
|
|
|
|
delay_cells = 0;
|
|
|
|
|
} else if (buf->lossy) {
|
|
|
|
|
thres_cells = mlxsw_sp_hdroom_buf_threshold_get(mlxsw_sp, hdroom->mtu);
|
|
|
|
|
delay_cells = 0;
|
|
|
|
|
} else {
|
|
|
|
|
thres_cells = mlxsw_sp_hdroom_buf_threshold_get(mlxsw_sp, hdroom->mtu);
|
|
|
|
|
delay_cells = mlxsw_sp_hdroom_buf_delay_get(mlxsw_sp, hdroom);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
thres_cells = mlxsw_sp_port_headroom_8x_adjust(mlxsw_sp_port, thres_cells);
|
|
|
|
|
delay_cells = mlxsw_sp_port_headroom_8x_adjust(mlxsw_sp_port, delay_cells);
|
|
|
|
|
|
|
|
|
|
buf->thres_cells = thres_cells;
|
2020-09-17 09:49:01 +03:00
|
|
|
if (hdroom->mode == MLXSW_SP_HDROOM_MODE_DCB) {
|
|
|
|
|
buf->size_cells = thres_cells + delay_cells;
|
|
|
|
|
} else {
|
|
|
|
|
/* Do not allow going below the minimum size, even if
|
|
|
|
|
* the user requested it.
|
|
|
|
|
*/
|
|
|
|
|
buf->size_cells = max(buf->set_size_cells, buf->thres_cells);
|
|
|
|
|
}
|
2020-09-16 09:35:24 +03:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2020-09-16 09:35:26 +03:00
|
|
|
#define MLXSW_SP_PB_UNUSED 8
|
|
|
|
|
|
2020-09-16 09:35:24 +03:00
|
|
|
static int mlxsw_sp_hdroom_configure_buffers(struct mlxsw_sp_port *mlxsw_sp_port,
|
|
|
|
|
const struct mlxsw_sp_hdroom *hdroom, bool force)
|
|
|
|
|
{
|
|
|
|
|
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
|
|
|
|
|
char pbmc_pl[MLXSW_REG_PBMC_LEN];
|
|
|
|
|
bool dirty;
|
|
|
|
|
int err;
|
|
|
|
|
int i;
|
|
|
|
|
|
|
|
|
|
dirty = memcmp(&mlxsw_sp_port->hdroom->bufs, &hdroom->bufs, sizeof(hdroom->bufs));
|
|
|
|
|
if (!dirty && !force)
|
|
|
|
|
return 0;
|
|
|
|
|
|
2020-09-16 09:35:26 +03:00
|
|
|
mlxsw_reg_pbmc_pack(pbmc_pl, mlxsw_sp_port->local_port, 0xffff, 0xffff / 2);
|
|
|
|
|
for (i = 0; i < MLXSW_SP_PB_COUNT; i++) {
|
2020-09-16 09:35:24 +03:00
|
|
|
const struct mlxsw_sp_hdroom_buf *buf = &hdroom->bufs.buf[i];
|
|
|
|
|
|
2020-09-16 09:35:26 +03:00
|
|
|
if (i == MLXSW_SP_PB_UNUSED)
|
|
|
|
|
continue;
|
|
|
|
|
|
2020-09-16 09:35:24 +03:00
|
|
|
mlxsw_sp_hdroom_buf_pack(pbmc_pl, i, buf->size_cells, buf->thres_cells, buf->lossy);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
mlxsw_reg_pbmc_lossy_buffer_pack(pbmc_pl, MLXSW_REG_PBMC_PORT_SHARED_BUF_IDX, 0);
|
|
|
|
|
err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pbmc), pbmc_pl);
|
|
|
|
|
if (err)
|
|
|
|
|
return err;
|
|
|
|
|
|
|
|
|
|
mlxsw_sp_port->hdroom->bufs = hdroom->bufs;
|
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static int mlxsw_sp_hdroom_configure_priomap(struct mlxsw_sp_port *mlxsw_sp_port,
|
|
|
|
|
const struct mlxsw_sp_hdroom *hdroom, bool force)
|
|
|
|
|
{
|
|
|
|
|
char pptb_pl[MLXSW_REG_PPTB_LEN];
|
|
|
|
|
bool dirty;
|
|
|
|
|
int prio;
|
|
|
|
|
int err;
|
|
|
|
|
|
|
|
|
|
dirty = memcmp(&mlxsw_sp_port->hdroom->prios, &hdroom->prios, sizeof(hdroom->prios));
|
|
|
|
|
if (!dirty && !force)
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
|
|
mlxsw_reg_pptb_pack(pptb_pl, mlxsw_sp_port->local_port);
|
|
|
|
|
for (prio = 0; prio < IEEE_8021QAZ_MAX_TCS; prio++)
|
|
|
|
|
mlxsw_reg_pptb_prio_to_buff_pack(pptb_pl, prio, hdroom->prios.prio[prio].buf_idx);
|
|
|
|
|
|
|
|
|
|
err = mlxsw_reg_write(mlxsw_sp_port->mlxsw_sp->core, MLXSW_REG(pptb), pptb_pl);
|
|
|
|
|
if (err)
|
|
|
|
|
return err;
|
|
|
|
|
|
|
|
|
|
mlxsw_sp_port->hdroom->prios = hdroom->prios;
|
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
|
2020-09-16 09:35:28 +03:00
|
|
|
static int mlxsw_sp_hdroom_configure_int_buf(struct mlxsw_sp_port *mlxsw_sp_port,
|
|
|
|
|
const struct mlxsw_sp_hdroom *hdroom, bool force)
|
|
|
|
|
{
|
|
|
|
|
char sbib_pl[MLXSW_REG_SBIB_LEN];
|
|
|
|
|
bool dirty;
|
|
|
|
|
int err;
|
|
|
|
|
|
|
|
|
|
dirty = memcmp(&mlxsw_sp_port->hdroom->int_buf, &hdroom->int_buf, sizeof(hdroom->int_buf));
|
|
|
|
|
if (!dirty && !force)
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
|
|
mlxsw_reg_sbib_pack(sbib_pl, mlxsw_sp_port->local_port, hdroom->int_buf.size_cells);
|
|
|
|
|
err = mlxsw_reg_write(mlxsw_sp_port->mlxsw_sp->core, MLXSW_REG(sbib), sbib_pl);
|
|
|
|
|
if (err)
|
|
|
|
|
return err;
|
|
|
|
|
|
|
|
|
|
mlxsw_sp_port->hdroom->int_buf = hdroom->int_buf;
|
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
|
2020-09-16 09:35:24 +03:00
|
|
|
static bool mlxsw_sp_hdroom_bufs_fit(struct mlxsw_sp *mlxsw_sp,
|
|
|
|
|
const struct mlxsw_sp_hdroom *hdroom)
|
|
|
|
|
{
|
|
|
|
|
u32 taken_headroom_cells = 0;
|
|
|
|
|
int i;
|
|
|
|
|
|
|
|
|
|
for (i = 0; i < MLXSW_SP_PB_COUNT; i++)
|
|
|
|
|
taken_headroom_cells += hdroom->bufs.buf[i].size_cells;
|
|
|
|
|
|
2020-09-16 09:35:28 +03:00
|
|
|
taken_headroom_cells += hdroom->int_buf.reserve_cells;
|
2020-09-16 09:35:25 +03:00
|
|
|
return taken_headroom_cells <= mlxsw_sp->sb->max_headroom_cells;
|
2020-09-16 09:35:24 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static int __mlxsw_sp_hdroom_configure(struct mlxsw_sp_port *mlxsw_sp_port,
|
|
|
|
|
const struct mlxsw_sp_hdroom *hdroom, bool force)
|
|
|
|
|
{
|
|
|
|
|
struct mlxsw_sp_hdroom orig_hdroom;
|
|
|
|
|
struct mlxsw_sp_hdroom tmp_hdroom;
|
|
|
|
|
int err;
|
|
|
|
|
int i;
|
|
|
|
|
|
|
|
|
|
/* Port buffers need to be configured in three steps. First, all buffers
|
|
|
|
|
* with non-zero size are configured. Then, prio-to-buffer map is
|
|
|
|
|
* updated, allowing traffic to flow to the now non-zero buffers.
|
|
|
|
|
* Finally, zero-sized buffers are configured, because now no traffic
|
|
|
|
|
* should be directed to them anymore. This way, in a non-congested
|
|
|
|
|
* system, no packet drops are introduced by the reconfiguration.
|
|
|
|
|
*/
|
|
|
|
|
|
|
|
|
|
orig_hdroom = *mlxsw_sp_port->hdroom;
|
|
|
|
|
tmp_hdroom = orig_hdroom;
|
|
|
|
|
for (i = 0; i < MLXSW_SP_PB_COUNT; i++) {
|
|
|
|
|
if (hdroom->bufs.buf[i].size_cells)
|
|
|
|
|
tmp_hdroom.bufs.buf[i] = hdroom->bufs.buf[i];
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (!mlxsw_sp_hdroom_bufs_fit(mlxsw_sp_port->mlxsw_sp, &tmp_hdroom) ||
|
|
|
|
|
!mlxsw_sp_hdroom_bufs_fit(mlxsw_sp_port->mlxsw_sp, hdroom))
|
|
|
|
|
return -ENOBUFS;
|
|
|
|
|
|
|
|
|
|
err = mlxsw_sp_hdroom_configure_buffers(mlxsw_sp_port, &tmp_hdroom, force);
|
|
|
|
|
if (err)
|
|
|
|
|
return err;
|
|
|
|
|
|
|
|
|
|
err = mlxsw_sp_hdroom_configure_priomap(mlxsw_sp_port, hdroom, force);
|
|
|
|
|
if (err)
|
|
|
|
|
goto err_configure_priomap;
|
|
|
|
|
|
|
|
|
|
err = mlxsw_sp_hdroom_configure_buffers(mlxsw_sp_port, hdroom, false);
|
|
|
|
|
if (err)
|
|
|
|
|
goto err_configure_buffers;
|
|
|
|
|
|
2020-09-16 09:35:28 +03:00
|
|
|
err = mlxsw_sp_hdroom_configure_int_buf(mlxsw_sp_port, hdroom, false);
|
|
|
|
|
if (err)
|
|
|
|
|
goto err_configure_int_buf;
|
|
|
|
|
|
2020-09-16 09:35:24 +03:00
|
|
|
*mlxsw_sp_port->hdroom = *hdroom;
|
|
|
|
|
return 0;
|
|
|
|
|
|
2020-09-16 09:35:28 +03:00
|
|
|
err_configure_int_buf:
|
|
|
|
|
mlxsw_sp_hdroom_configure_buffers(mlxsw_sp_port, &tmp_hdroom, false);
|
2020-09-16 09:35:24 +03:00
|
|
|
err_configure_buffers:
|
|
|
|
|
mlxsw_sp_hdroom_configure_priomap(mlxsw_sp_port, &tmp_hdroom, false);
|
|
|
|
|
err_configure_priomap:
|
|
|
|
|
mlxsw_sp_hdroom_configure_buffers(mlxsw_sp_port, &orig_hdroom, false);
|
|
|
|
|
return err;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
int mlxsw_sp_hdroom_configure(struct mlxsw_sp_port *mlxsw_sp_port,
|
|
|
|
|
const struct mlxsw_sp_hdroom *hdroom)
|
|
|
|
|
{
|
|
|
|
|
return __mlxsw_sp_hdroom_configure(mlxsw_sp_port, hdroom, false);
|
|
|
|
|
}
|
|
|
|
|
|
mlxsw: spectrum: Map all switch priorities to priority group 0
During transmission, the skb's priority is used to map the skb to a
traffic class, where the idea is to group priorities with similar
characteristics (e.g. lossy, lossless) to the same traffic class. By
default, all priorities are mapped to traffic class 0.
In the device, we model the skb's priority as the switch priority, which
is assigned to a packet according to its PCP value and ingress port
(untagged packets are assigned the port's default switch priority - 0).
At ingress, the packet is directed to a priority group (PG) buffer in
the port's headroom buffer according to the packet's switch priority and
switch priority to buffer mapping.
While it's possible to configure the egress mapping between skb's
priority (switch priority) and traffic class, there is no mechanism to
configure the ingress mapping to a PG.
In order to keep things simple and since grouping certain priorities into
a traffic class at egress also implies they should be grouped the same
at ingress, treat a PG as the ingress counterpart of an egress traffic
class.
Having established the above, during initialization map all the switch
priorities to PG0 in accordance with the Linux defaults for traffic
class mapping.
Signed-off-by: Ido Schimmel <idosch@mellanox.com>
Signed-off-by: Jiri Pirko <jiri@mellanox.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2016-04-06 17:10:01 +02:00
|
|
|
static int mlxsw_sp_port_headroom_init(struct mlxsw_sp_port *mlxsw_sp_port)
|
|
|
|
|
{
|
2020-09-16 09:35:26 +03:00
|
|
|
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
|
|
|
|
|
struct mlxsw_sp_hdroom hdroom = {};
|
|
|
|
|
u32 size9;
|
|
|
|
|
int prio;
|
mlxsw: spectrum: Map all switch priorities to priority group 0
During transmission, the skb's priority is used to map the skb to a
traffic class, where the idea is to group priorities with similar
characteristics (e.g. lossy, lossless) to the same traffic class. By
default, all priorities are mapped to traffic class 0.
In the device, we model the skb's priority as the switch priority, which
is assigned to a packet according to its PCP value and ingress port
(untagged packets are assigned the port's default switch priority - 0).
At ingress, the packet is directed to a priority group (PG) buffer in
the port's headroom buffer according to the packet's switch priority and
switch priority to buffer mapping.
While it's possible to configure the egress mapping between skb's
priority (switch priority) and traffic class, there is no mechanism to
configure the ingress mapping to a PG.
In order to keep things simple and since grouping certain priorities into
a traffic class at egress also implies they should be grouped the same
at ingress, treat a PG as the ingress counterpart of an egress traffic
class.
Having established the above, during initialization map all the switch
priorities to PG0 in accordance with the Linux defaults for traffic
class mapping.
Signed-off-by: Ido Schimmel <idosch@mellanox.com>
Signed-off-by: Jiri Pirko <jiri@mellanox.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2016-04-06 17:10:01 +02:00
|
|
|
|
2020-09-16 09:35:26 +03:00
|
|
|
hdroom.mtu = mlxsw_sp_port->dev->mtu;
|
2020-09-17 09:49:01 +03:00
|
|
|
hdroom.mode = MLXSW_SP_HDROOM_MODE_DCB;
|
2020-09-16 09:35:26 +03:00
|
|
|
for (prio = 0; prio < IEEE_8021QAZ_MAX_TCS; prio++)
|
|
|
|
|
hdroom.prios.prio[prio].lossy = true;
|
|
|
|
|
|
|
|
|
|
mlxsw_sp_hdroom_bufs_reset_lossiness(&hdroom);
|
|
|
|
|
mlxsw_sp_hdroom_bufs_reset_sizes(mlxsw_sp_port, &hdroom);
|
|
|
|
|
|
|
|
|
|
/* Buffer 9 is used for control traffic. */
|
|
|
|
|
size9 = mlxsw_sp_port_headroom_8x_adjust(mlxsw_sp_port, mlxsw_sp_port->max_mtu);
|
|
|
|
|
hdroom.bufs.buf[9].size_cells = mlxsw_sp_bytes_cells(mlxsw_sp, size9);
|
|
|
|
|
|
|
|
|
|
return __mlxsw_sp_hdroom_configure(mlxsw_sp_port, &hdroom, true);
|
mlxsw: spectrum: Map all switch priorities to priority group 0
During transmission, the skb's priority is used to map the skb to a
traffic class, where the idea is to group priorities with similar
characteristics (e.g. lossy, lossless) to the same traffic class. By
default, all priorities are mapped to traffic class 0.
In the device, we model the skb's priority as the switch priority, which
is assigned to a packet according to its PCP value and ingress port
(untagged packets are assigned the port's default switch priority - 0).
At ingress, the packet is directed to a priority group (PG) buffer in
the port's headroom buffer according to the packet's switch priority and
switch priority to buffer mapping.
While it's possible to configure the egress mapping between skb's
priority (switch priority) and traffic class, there is no mechanism to
configure the ingress mapping to a PG.
In order to keep things simple and since grouping certain priorities into
a traffic class at egress also implies they should be grouped the same
at ingress, treat a PG as the ingress counterpart of an egress traffic
class.
Having established the above, during initialization map all the switch
priorities to PG0 in accordance with the Linux defaults for traffic
class mapping.
Signed-off-by: Ido Schimmel <idosch@mellanox.com>
Signed-off-by: Jiri Pirko <jiri@mellanox.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2016-04-06 17:10:01 +02:00
|
|
|
}
|
|
|
|
|
|
2019-02-20 19:32:14 +00:00
|
|
|
static int mlxsw_sp_sb_port_init(struct mlxsw_sp *mlxsw_sp,
|
|
|
|
|
struct mlxsw_sp_sb_port *sb_port)
|
|
|
|
|
{
|
|
|
|
|
struct mlxsw_sp_sb_pm *pms;
|
|
|
|
|
|
2019-02-20 19:32:16 +00:00
|
|
|
pms = kcalloc(mlxsw_sp->sb_vals->pool_count, sizeof(*pms),
|
|
|
|
|
GFP_KERNEL);
|
2019-02-20 19:32:14 +00:00
|
|
|
if (!pms)
|
|
|
|
|
return -ENOMEM;
|
|
|
|
|
sb_port->pms = pms;
|
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void mlxsw_sp_sb_port_fini(struct mlxsw_sp_sb_port *sb_port)
|
|
|
|
|
{
|
|
|
|
|
kfree(sb_port->pms);
|
|
|
|
|
}
|
|
|
|
|
|
2017-03-24 08:02:48 +01:00
|
|
|
static int mlxsw_sp_sb_ports_init(struct mlxsw_sp *mlxsw_sp)
|
|
|
|
|
{
|
|
|
|
|
unsigned int max_ports = mlxsw_core_max_ports(mlxsw_sp->core);
|
2019-02-20 19:32:14 +00:00
|
|
|
struct mlxsw_sp_sb_pr *prs;
|
|
|
|
|
int i;
|
|
|
|
|
int err;
|
2017-03-24 08:02:48 +01:00
|
|
|
|
2017-05-16 19:38:24 +02:00
|
|
|
mlxsw_sp->sb->ports = kcalloc(max_ports,
|
|
|
|
|
sizeof(struct mlxsw_sp_sb_port),
|
|
|
|
|
GFP_KERNEL);
|
|
|
|
|
if (!mlxsw_sp->sb->ports)
|
2017-03-24 08:02:48 +01:00
|
|
|
return -ENOMEM;
|
2019-02-20 19:32:14 +00:00
|
|
|
|
2019-02-20 19:32:16 +00:00
|
|
|
prs = kcalloc(mlxsw_sp->sb_vals->pool_count, sizeof(*prs),
|
|
|
|
|
GFP_KERNEL);
|
2019-02-20 19:32:14 +00:00
|
|
|
if (!prs) {
|
|
|
|
|
err = -ENOMEM;
|
|
|
|
|
goto err_alloc_prs;
|
|
|
|
|
}
|
|
|
|
|
mlxsw_sp->sb->prs = prs;
|
|
|
|
|
|
|
|
|
|
for (i = 0; i < max_ports; i++) {
|
|
|
|
|
err = mlxsw_sp_sb_port_init(mlxsw_sp, &mlxsw_sp->sb->ports[i]);
|
|
|
|
|
if (err)
|
|
|
|
|
goto err_sb_port_init;
|
|
|
|
|
}
|
|
|
|
|
|
2017-03-24 08:02:48 +01:00
|
|
|
return 0;
|
2019-02-20 19:32:14 +00:00
|
|
|
|
|
|
|
|
err_sb_port_init:
|
|
|
|
|
for (i--; i >= 0; i--)
|
|
|
|
|
mlxsw_sp_sb_port_fini(&mlxsw_sp->sb->ports[i]);
|
|
|
|
|
kfree(mlxsw_sp->sb->prs);
|
|
|
|
|
err_alloc_prs:
|
|
|
|
|
kfree(mlxsw_sp->sb->ports);
|
|
|
|
|
return err;
|
2017-03-24 08:02:48 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void mlxsw_sp_sb_ports_fini(struct mlxsw_sp *mlxsw_sp)
|
|
|
|
|
{
|
2019-02-20 19:32:14 +00:00
|
|
|
int max_ports = mlxsw_core_max_ports(mlxsw_sp->core);
|
|
|
|
|
int i;
|
|
|
|
|
|
|
|
|
|
for (i = max_ports - 1; i >= 0; i--)
|
|
|
|
|
mlxsw_sp_sb_port_fini(&mlxsw_sp->sb->ports[i]);
|
|
|
|
|
kfree(mlxsw_sp->sb->prs);
|
2017-05-16 19:38:24 +02:00
|
|
|
kfree(mlxsw_sp->sb->ports);
|
2017-03-24 08:02:48 +01:00
|
|
|
}
|
|
|
|
|
|
2016-04-14 18:19:18 +02:00
|
|
|
#define MLXSW_SP_SB_PR(_mode, _size) \
|
2016-04-14 18:19:17 +02:00
|
|
|
{ \
|
|
|
|
|
.mode = _mode, \
|
|
|
|
|
.size = _size, \
|
2015-10-16 14:01:37 +02:00
|
|
|
}
|
|
|
|
|
|
2019-04-22 12:08:46 +00:00
|
|
|
#define MLXSW_SP_SB_PR_EXT(_mode, _size, _freeze_mode, _freeze_size) \
|
|
|
|
|
{ \
|
|
|
|
|
.mode = _mode, \
|
|
|
|
|
.size = _size, \
|
|
|
|
|
.freeze_mode = _freeze_mode, \
|
|
|
|
|
.freeze_size = _freeze_size, \
|
|
|
|
|
}
|
|
|
|
|
|
2019-04-22 12:08:51 +00:00
|
|
|
#define MLXSW_SP1_SB_PR_CPU_SIZE (256 * 1000)
|
2019-02-20 19:32:25 +00:00
|
|
|
|
2019-04-22 12:08:50 +00:00
|
|
|
/* Order according to mlxsw_sp1_sb_pool_dess */
|
2019-02-20 19:32:25 +00:00
|
|
|
static const struct mlxsw_sp_sb_pr mlxsw_sp1_sb_prs[] = {
|
2019-10-23 09:05:00 +03:00
|
|
|
MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, MLXSW_SP_SB_REST),
|
2016-04-14 18:19:18 +02:00
|
|
|
MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, 0),
|
|
|
|
|
MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, 0),
|
2019-04-22 12:08:52 +00:00
|
|
|
MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, 0),
|
2019-10-23 09:05:00 +03:00
|
|
|
MLXSW_SP_SB_PR_EXT(MLXSW_REG_SBPR_MODE_DYNAMIC, MLXSW_SP_SB_REST,
|
|
|
|
|
true, false),
|
2016-04-14 18:19:18 +02:00
|
|
|
MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, 0),
|
|
|
|
|
MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, 0),
|
2016-04-14 18:19:20 +02:00
|
|
|
MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, 0),
|
2019-04-22 12:08:46 +00:00
|
|
|
MLXSW_SP_SB_PR_EXT(MLXSW_REG_SBPR_MODE_STATIC, MLXSW_SP_SB_INFI,
|
|
|
|
|
true, true),
|
2019-04-22 12:08:51 +00:00
|
|
|
MLXSW_SP_SB_PR_EXT(MLXSW_REG_SBPR_MODE_DYNAMIC,
|
|
|
|
|
MLXSW_SP1_SB_PR_CPU_SIZE, true, false),
|
|
|
|
|
MLXSW_SP_SB_PR_EXT(MLXSW_REG_SBPR_MODE_DYNAMIC,
|
|
|
|
|
MLXSW_SP1_SB_PR_CPU_SIZE, true, false),
|
2016-04-14 18:19:17 +02:00
|
|
|
};
|
|
|
|
|
|
2019-04-22 12:08:51 +00:00
|
|
|
#define MLXSW_SP2_SB_PR_CPU_SIZE (256 * 1000)
|
2019-02-20 19:32:25 +00:00
|
|
|
|
2019-04-22 12:08:50 +00:00
|
|
|
/* Order according to mlxsw_sp2_sb_pool_dess */
|
2019-02-20 19:32:25 +00:00
|
|
|
static const struct mlxsw_sp_sb_pr mlxsw_sp2_sb_prs[] = {
|
2019-10-23 09:05:00 +03:00
|
|
|
MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, MLXSW_SP_SB_REST),
|
2019-02-20 19:32:25 +00:00
|
|
|
MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_STATIC, 0),
|
|
|
|
|
MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_STATIC, 0),
|
2019-04-22 12:08:52 +00:00
|
|
|
MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_STATIC, 0),
|
2019-10-23 09:05:00 +03:00
|
|
|
MLXSW_SP_SB_PR_EXT(MLXSW_REG_SBPR_MODE_DYNAMIC, MLXSW_SP_SB_REST,
|
|
|
|
|
true, false),
|
2019-02-20 19:32:25 +00:00
|
|
|
MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_STATIC, 0),
|
|
|
|
|
MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_STATIC, 0),
|
|
|
|
|
MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_STATIC, 0),
|
2019-04-22 12:08:46 +00:00
|
|
|
MLXSW_SP_SB_PR_EXT(MLXSW_REG_SBPR_MODE_STATIC, MLXSW_SP_SB_INFI,
|
|
|
|
|
true, true),
|
2019-04-22 12:08:51 +00:00
|
|
|
MLXSW_SP_SB_PR_EXT(MLXSW_REG_SBPR_MODE_DYNAMIC,
|
|
|
|
|
MLXSW_SP2_SB_PR_CPU_SIZE, true, false),
|
|
|
|
|
MLXSW_SP_SB_PR_EXT(MLXSW_REG_SBPR_MODE_DYNAMIC,
|
|
|
|
|
MLXSW_SP2_SB_PR_CPU_SIZE, true, false),
|
2019-02-20 19:32:25 +00:00
|
|
|
};
|
|
|
|
|
|
mlxsw: spectrum_buffers: Use devlink pool indices throughout
Currently, mlxsw assumes that each ingress pool has its egress
counterpart, and that pool index for purposes of caching matches the
index with which the hardware should be configured. As we want to expose
the MC pool, both of these assumptions break.
Instead, maintain the pool index as long as possible. Unify ingress and
egress caches and use the pool index as cache index as well. Only
translate to FW pool numbering when actually packing the registers. This
simplifies things considerably, as the pool index is the only quantity
necessary to uniquely identify a pool, and the pool/direction split is
not necessary until firmware is talked to.
To support the mapping between pool indices and pool numbers and
directions, which is not neatly mathematical anymore, introduce a pool
descriptor table, indexed by pool index, to facilitate the translation.
Include the MC pool in the descriptor table as well, so that it can be
referenced from mlxsw_sp_sb_cms_egress.
Signed-off-by: Petr Machata <petrm@mellanox.com>
Reviewed-by: Jiri Pirko <jiri@mellanox.com>
Signed-off-by: Ido Schimmel <idosch@mellanox.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2018-09-20 09:21:25 +03:00
|
|
|
static int mlxsw_sp_sb_prs_init(struct mlxsw_sp *mlxsw_sp,
|
|
|
|
|
const struct mlxsw_sp_sb_pr *prs,
|
2019-10-23 09:05:00 +03:00
|
|
|
const struct mlxsw_sp_sb_pool_des *pool_dess,
|
mlxsw: spectrum_buffers: Use devlink pool indices throughout
Currently, mlxsw assumes that each ingress pool has its egress
counterpart, and that pool index for purposes of caching matches the
index with which the hardware should be configured. As we want to expose
the MC pool, both of these assumptions break.
Instead, maintain the pool index as long as possible. Unify ingress and
egress caches and use the pool index as cache index as well. Only
translate to FW pool numbering when actually packing the registers. This
simplifies things considerably, as the pool index is the only quantity
necessary to uniquely identify a pool, and the pool/direction split is
not necessary until firmware is talked to.
To support the mapping between pool indices and pool numbers and
directions, which is not neatly mathematical anymore, introduce a pool
descriptor table, indexed by pool index, to facilitate the translation.
Include the MC pool in the descriptor table as well, so that it can be
referenced from mlxsw_sp_sb_cms_egress.
Signed-off-by: Petr Machata <petrm@mellanox.com>
Reviewed-by: Jiri Pirko <jiri@mellanox.com>
Signed-off-by: Ido Schimmel <idosch@mellanox.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2018-09-20 09:21:25 +03:00
|
|
|
size_t prs_len)
|
2015-10-16 14:01:37 +02:00
|
|
|
{
|
2019-10-23 09:05:00 +03:00
|
|
|
/* Round down, unlike mlxsw_sp_bytes_cells(). */
|
2019-10-30 09:01:52 -07:00
|
|
|
u32 sb_cells = div_u64(mlxsw_sp->sb->sb_size, mlxsw_sp->sb->cell_size);
|
2019-10-23 09:05:00 +03:00
|
|
|
u32 rest_cells[2] = {sb_cells, sb_cells};
|
2015-10-16 14:01:37 +02:00
|
|
|
int i;
|
|
|
|
|
int err;
|
|
|
|
|
|
2019-10-23 09:05:00 +03:00
|
|
|
/* Calculate how much space to give to the "REST" pools in either
|
|
|
|
|
* direction.
|
|
|
|
|
*/
|
|
|
|
|
for (i = 0; i < prs_len; i++) {
|
|
|
|
|
enum mlxsw_reg_sbxx_dir dir = pool_dess[i].dir;
|
|
|
|
|
u32 size = prs[i].size;
|
|
|
|
|
u32 size_cells;
|
|
|
|
|
|
|
|
|
|
if (size == MLXSW_SP_SB_INFI || size == MLXSW_SP_SB_REST)
|
|
|
|
|
continue;
|
|
|
|
|
|
|
|
|
|
size_cells = mlxsw_sp_bytes_cells(mlxsw_sp, size);
|
|
|
|
|
if (WARN_ON_ONCE(size_cells > rest_cells[dir]))
|
|
|
|
|
continue;
|
|
|
|
|
|
|
|
|
|
rest_cells[dir] -= size_cells;
|
|
|
|
|
}
|
|
|
|
|
|
2016-04-14 18:19:18 +02:00
|
|
|
for (i = 0; i < prs_len; i++) {
|
2018-09-20 09:21:28 +03:00
|
|
|
u32 size = prs[i].size;
|
|
|
|
|
u32 size_cells;
|
|
|
|
|
|
|
|
|
|
if (size == MLXSW_SP_SB_INFI) {
|
|
|
|
|
err = mlxsw_sp_sb_pr_write(mlxsw_sp, i, prs[i].mode,
|
|
|
|
|
0, true);
|
2019-10-23 09:05:00 +03:00
|
|
|
} else if (size == MLXSW_SP_SB_REST) {
|
|
|
|
|
size_cells = rest_cells[pool_dess[i].dir];
|
|
|
|
|
err = mlxsw_sp_sb_pr_write(mlxsw_sp, i, prs[i].mode,
|
|
|
|
|
size_cells, false);
|
2018-09-20 09:21:28 +03:00
|
|
|
} else {
|
|
|
|
|
size_cells = mlxsw_sp_bytes_cells(mlxsw_sp, size);
|
|
|
|
|
err = mlxsw_sp_sb_pr_write(mlxsw_sp, i, prs[i].mode,
|
|
|
|
|
size_cells, false);
|
|
|
|
|
}
|
2015-10-16 14:01:37 +02:00
|
|
|
if (err)
|
|
|
|
|
return err;
|
|
|
|
|
}
|
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
|
2016-04-14 18:19:17 +02:00
|
|
|
#define MLXSW_SP_SB_CM(_min_buff, _max_buff, _pool) \
|
|
|
|
|
{ \
|
|
|
|
|
.min_buff = _min_buff, \
|
|
|
|
|
.max_buff = _max_buff, \
|
mlxsw: spectrum_buffers: Use devlink pool indices throughout
Currently, mlxsw assumes that each ingress pool has its egress
counterpart, and that pool index for purposes of caching matches the
index with which the hardware should be configured. As we want to expose
the MC pool, both of these assumptions break.
Instead, maintain the pool index as long as possible. Unify ingress and
egress caches and use the pool index as cache index as well. Only
translate to FW pool numbering when actually packing the registers. This
simplifies things considerably, as the pool index is the only quantity
necessary to uniquely identify a pool, and the pool/direction split is
not necessary until firmware is talked to.
To support the mapping between pool indices and pool numbers and
directions, which is not neatly mathematical anymore, introduce a pool
descriptor table, indexed by pool index, to facilitate the translation.
Include the MC pool in the descriptor table as well, so that it can be
referenced from mlxsw_sp_sb_cms_egress.
Signed-off-by: Petr Machata <petrm@mellanox.com>
Reviewed-by: Jiri Pirko <jiri@mellanox.com>
Signed-off-by: Ido Schimmel <idosch@mellanox.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2018-09-20 09:21:25 +03:00
|
|
|
.pool_index = _pool, \
|
2015-10-16 14:01:37 +02:00
|
|
|
}
|
|
|
|
|
|
2019-04-22 12:08:42 +00:00
|
|
|
#define MLXSW_SP_SB_CM_ING(_min_buff, _max_buff) \
|
|
|
|
|
{ \
|
|
|
|
|
.min_buff = _min_buff, \
|
|
|
|
|
.max_buff = _max_buff, \
|
|
|
|
|
.pool_index = MLXSW_SP_SB_POOL_ING, \
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#define MLXSW_SP_SB_CM_EGR(_min_buff, _max_buff) \
|
|
|
|
|
{ \
|
|
|
|
|
.min_buff = _min_buff, \
|
|
|
|
|
.max_buff = _max_buff, \
|
|
|
|
|
.pool_index = MLXSW_SP_SB_POOL_EGR, \
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#define MLXSW_SP_SB_CM_EGR_MC(_min_buff, _max_buff) \
|
|
|
|
|
{ \
|
|
|
|
|
.min_buff = _min_buff, \
|
|
|
|
|
.max_buff = _max_buff, \
|
|
|
|
|
.pool_index = MLXSW_SP_SB_POOL_EGR_MC, \
|
2019-04-22 12:08:49 +00:00
|
|
|
.freeze_pool = true, \
|
|
|
|
|
.freeze_thresh = true, \
|
2019-04-22 12:08:42 +00:00
|
|
|
}
|
|
|
|
|
|
2019-02-20 19:32:25 +00:00
|
|
|
static const struct mlxsw_sp_sb_cm mlxsw_sp1_sb_cms_ingress[] = {
|
2019-04-22 12:08:42 +00:00
|
|
|
MLXSW_SP_SB_CM_ING(10000, 8),
|
|
|
|
|
MLXSW_SP_SB_CM_ING(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
|
|
|
|
|
MLXSW_SP_SB_CM_ING(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
|
|
|
|
|
MLXSW_SP_SB_CM_ING(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
|
|
|
|
|
MLXSW_SP_SB_CM_ING(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
|
|
|
|
|
MLXSW_SP_SB_CM_ING(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
|
|
|
|
|
MLXSW_SP_SB_CM_ING(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
|
|
|
|
|
MLXSW_SP_SB_CM_ING(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
|
|
|
|
|
MLXSW_SP_SB_CM_ING(0, 0), /* dummy, this PG does not exist */
|
2019-04-22 12:08:52 +00:00
|
|
|
MLXSW_SP_SB_CM(10000, 8, MLXSW_SP_SB_POOL_ING_CPU),
|
2015-10-16 14:01:37 +02:00
|
|
|
};
|
|
|
|
|
|
2019-02-20 19:32:25 +00:00
|
|
|
static const struct mlxsw_sp_sb_cm mlxsw_sp2_sb_cms_ingress[] = {
|
2019-04-22 12:08:42 +00:00
|
|
|
MLXSW_SP_SB_CM_ING(0, 7),
|
|
|
|
|
MLXSW_SP_SB_CM_ING(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
|
|
|
|
|
MLXSW_SP_SB_CM_ING(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
|
|
|
|
|
MLXSW_SP_SB_CM_ING(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
|
|
|
|
|
MLXSW_SP_SB_CM_ING(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
|
|
|
|
|
MLXSW_SP_SB_CM_ING(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
|
|
|
|
|
MLXSW_SP_SB_CM_ING(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
|
|
|
|
|
MLXSW_SP_SB_CM_ING(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
|
|
|
|
|
MLXSW_SP_SB_CM_ING(0, 0), /* dummy, this PG does not exist */
|
2019-04-22 12:08:52 +00:00
|
|
|
MLXSW_SP_SB_CM(10000, 8, MLXSW_SP_SB_POOL_ING_CPU),
|
2019-02-20 19:32:25 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
|
|
static const struct mlxsw_sp_sb_cm mlxsw_sp1_sb_cms_egress[] = {
|
2019-04-22 12:08:42 +00:00
|
|
|
MLXSW_SP_SB_CM_EGR(1500, 9),
|
|
|
|
|
MLXSW_SP_SB_CM_EGR(1500, 9),
|
|
|
|
|
MLXSW_SP_SB_CM_EGR(1500, 9),
|
|
|
|
|
MLXSW_SP_SB_CM_EGR(1500, 9),
|
|
|
|
|
MLXSW_SP_SB_CM_EGR(1500, 9),
|
|
|
|
|
MLXSW_SP_SB_CM_EGR(1500, 9),
|
|
|
|
|
MLXSW_SP_SB_CM_EGR(1500, 9),
|
|
|
|
|
MLXSW_SP_SB_CM_EGR(1500, 9),
|
|
|
|
|
MLXSW_SP_SB_CM_EGR_MC(0, MLXSW_SP_SB_INFI),
|
|
|
|
|
MLXSW_SP_SB_CM_EGR_MC(0, MLXSW_SP_SB_INFI),
|
|
|
|
|
MLXSW_SP_SB_CM_EGR_MC(0, MLXSW_SP_SB_INFI),
|
|
|
|
|
MLXSW_SP_SB_CM_EGR_MC(0, MLXSW_SP_SB_INFI),
|
|
|
|
|
MLXSW_SP_SB_CM_EGR_MC(0, MLXSW_SP_SB_INFI),
|
|
|
|
|
MLXSW_SP_SB_CM_EGR_MC(0, MLXSW_SP_SB_INFI),
|
|
|
|
|
MLXSW_SP_SB_CM_EGR_MC(0, MLXSW_SP_SB_INFI),
|
|
|
|
|
MLXSW_SP_SB_CM_EGR_MC(0, MLXSW_SP_SB_INFI),
|
|
|
|
|
MLXSW_SP_SB_CM_EGR(1, 0xff),
|
2016-04-14 18:19:17 +02:00
|
|
|
};
|
|
|
|
|
|
2019-02-20 19:32:25 +00:00
|
|
|
static const struct mlxsw_sp_sb_cm mlxsw_sp2_sb_cms_egress[] = {
|
2019-04-22 12:08:42 +00:00
|
|
|
MLXSW_SP_SB_CM_EGR(0, 7),
|
|
|
|
|
MLXSW_SP_SB_CM_EGR(0, 7),
|
|
|
|
|
MLXSW_SP_SB_CM_EGR(0, 7),
|
|
|
|
|
MLXSW_SP_SB_CM_EGR(0, 7),
|
|
|
|
|
MLXSW_SP_SB_CM_EGR(0, 7),
|
|
|
|
|
MLXSW_SP_SB_CM_EGR(0, 7),
|
|
|
|
|
MLXSW_SP_SB_CM_EGR(0, 7),
|
|
|
|
|
MLXSW_SP_SB_CM_EGR(0, 7),
|
|
|
|
|
MLXSW_SP_SB_CM_EGR_MC(0, MLXSW_SP_SB_INFI),
|
|
|
|
|
MLXSW_SP_SB_CM_EGR_MC(0, MLXSW_SP_SB_INFI),
|
|
|
|
|
MLXSW_SP_SB_CM_EGR_MC(0, MLXSW_SP_SB_INFI),
|
|
|
|
|
MLXSW_SP_SB_CM_EGR_MC(0, MLXSW_SP_SB_INFI),
|
|
|
|
|
MLXSW_SP_SB_CM_EGR_MC(0, MLXSW_SP_SB_INFI),
|
|
|
|
|
MLXSW_SP_SB_CM_EGR_MC(0, MLXSW_SP_SB_INFI),
|
|
|
|
|
MLXSW_SP_SB_CM_EGR_MC(0, MLXSW_SP_SB_INFI),
|
|
|
|
|
MLXSW_SP_SB_CM_EGR_MC(0, MLXSW_SP_SB_INFI),
|
|
|
|
|
MLXSW_SP_SB_CM_EGR(1, 0xff),
|
2019-02-20 19:32:25 +00:00
|
|
|
};
|
|
|
|
|
|
2019-04-22 12:08:56 +00:00
|
|
|
#define MLXSW_SP_CPU_PORT_SB_CM MLXSW_SP_SB_CM(0, 0, MLXSW_SP_SB_POOL_EGR_CPU)
|
2015-10-16 14:01:37 +02:00
|
|
|
|
|
|
|
|
static const struct mlxsw_sp_sb_cm mlxsw_sp_cpu_port_sb_cms[] = {
|
2020-05-25 00:51:01 +03:00
|
|
|
MLXSW_SP_SB_CM(1000, 8, MLXSW_SP_SB_POOL_EGR_CPU),
|
2019-04-22 12:08:56 +00:00
|
|
|
MLXSW_SP_SB_CM(1000, 8, MLXSW_SP_SB_POOL_EGR_CPU),
|
|
|
|
|
MLXSW_SP_SB_CM(1000, 8, MLXSW_SP_SB_POOL_EGR_CPU),
|
|
|
|
|
MLXSW_SP_SB_CM(1000, 8, MLXSW_SP_SB_POOL_EGR_CPU),
|
|
|
|
|
MLXSW_SP_SB_CM(1000, 8, MLXSW_SP_SB_POOL_EGR_CPU),
|
|
|
|
|
MLXSW_SP_SB_CM(1000, 8, MLXSW_SP_SB_POOL_EGR_CPU),
|
2016-04-14 18:19:17 +02:00
|
|
|
MLXSW_SP_CPU_PORT_SB_CM,
|
2019-04-22 12:08:56 +00:00
|
|
|
MLXSW_SP_SB_CM(1000, 8, MLXSW_SP_SB_POOL_EGR_CPU),
|
2016-04-14 18:19:17 +02:00
|
|
|
MLXSW_SP_CPU_PORT_SB_CM,
|
|
|
|
|
MLXSW_SP_CPU_PORT_SB_CM,
|
|
|
|
|
MLXSW_SP_CPU_PORT_SB_CM,
|
|
|
|
|
MLXSW_SP_CPU_PORT_SB_CM,
|
|
|
|
|
MLXSW_SP_CPU_PORT_SB_CM,
|
|
|
|
|
MLXSW_SP_CPU_PORT_SB_CM,
|
|
|
|
|
MLXSW_SP_CPU_PORT_SB_CM,
|
|
|
|
|
MLXSW_SP_CPU_PORT_SB_CM,
|
|
|
|
|
MLXSW_SP_CPU_PORT_SB_CM,
|
|
|
|
|
MLXSW_SP_CPU_PORT_SB_CM,
|
|
|
|
|
MLXSW_SP_CPU_PORT_SB_CM,
|
|
|
|
|
MLXSW_SP_CPU_PORT_SB_CM,
|
|
|
|
|
MLXSW_SP_CPU_PORT_SB_CM,
|
|
|
|
|
MLXSW_SP_CPU_PORT_SB_CM,
|
|
|
|
|
MLXSW_SP_CPU_PORT_SB_CM,
|
|
|
|
|
MLXSW_SP_CPU_PORT_SB_CM,
|
|
|
|
|
MLXSW_SP_CPU_PORT_SB_CM,
|
|
|
|
|
MLXSW_SP_CPU_PORT_SB_CM,
|
|
|
|
|
MLXSW_SP_CPU_PORT_SB_CM,
|
|
|
|
|
MLXSW_SP_CPU_PORT_SB_CM,
|
|
|
|
|
MLXSW_SP_CPU_PORT_SB_CM,
|
|
|
|
|
MLXSW_SP_CPU_PORT_SB_CM,
|
|
|
|
|
MLXSW_SP_CPU_PORT_SB_CM,
|
|
|
|
|
MLXSW_SP_CPU_PORT_SB_CM,
|
2015-10-16 14:01:37 +02:00
|
|
|
};
|
|
|
|
|
|
2018-09-20 09:21:31 +03:00
|
|
|
static bool
|
|
|
|
|
mlxsw_sp_sb_pool_is_static(struct mlxsw_sp *mlxsw_sp, u16 pool_index)
|
|
|
|
|
{
|
|
|
|
|
struct mlxsw_sp_sb_pr *pr = mlxsw_sp_sb_pr_get(mlxsw_sp, pool_index);
|
|
|
|
|
|
|
|
|
|
return pr->mode == MLXSW_REG_SBPR_MODE_STATIC;
|
|
|
|
|
}
|
|
|
|
|
|
2016-04-14 18:19:17 +02:00
|
|
|
static int __mlxsw_sp_sb_cms_init(struct mlxsw_sp *mlxsw_sp, u8 local_port,
|
|
|
|
|
enum mlxsw_reg_sbxx_dir dir,
|
|
|
|
|
const struct mlxsw_sp_sb_cm *cms,
|
|
|
|
|
size_t cms_len)
|
2015-10-16 14:01:37 +02:00
|
|
|
{
|
2019-02-20 19:32:16 +00:00
|
|
|
const struct mlxsw_sp_sb_vals *sb_vals = mlxsw_sp->sb_vals;
|
2015-10-16 14:01:37 +02:00
|
|
|
int i;
|
|
|
|
|
int err;
|
|
|
|
|
|
|
|
|
|
for (i = 0; i < cms_len; i++) {
|
|
|
|
|
const struct mlxsw_sp_sb_cm *cm;
|
2017-03-24 08:02:51 +01:00
|
|
|
u32 min_buff;
|
2018-09-20 09:21:31 +03:00
|
|
|
u32 max_buff;
|
2015-10-16 14:01:37 +02:00
|
|
|
|
2016-04-14 18:19:17 +02:00
|
|
|
if (i == 8 && dir == MLXSW_REG_SBXX_DIR_INGRESS)
|
|
|
|
|
continue; /* PG number 8 does not exist, skip it */
|
2015-10-16 14:01:37 +02:00
|
|
|
cm = &cms[i];
|
2019-02-20 19:32:16 +00:00
|
|
|
if (WARN_ON(sb_vals->pool_dess[cm->pool_index].dir != dir))
|
mlxsw: spectrum_buffers: Use devlink pool indices throughout
Currently, mlxsw assumes that each ingress pool has its egress
counterpart, and that pool index for purposes of caching matches the
index with which the hardware should be configured. As we want to expose
the MC pool, both of these assumptions break.
Instead, maintain the pool index as long as possible. Unify ingress and
egress caches and use the pool index as cache index as well. Only
translate to FW pool numbering when actually packing the registers. This
simplifies things considerably, as the pool index is the only quantity
necessary to uniquely identify a pool, and the pool/direction split is
not necessary until firmware is talked to.
To support the mapping between pool indices and pool numbers and
directions, which is not neatly mathematical anymore, introduce a pool
descriptor table, indexed by pool index, to facilitate the translation.
Include the MC pool in the descriptor table as well, so that it can be
referenced from mlxsw_sp_sb_cms_egress.
Signed-off-by: Petr Machata <petrm@mellanox.com>
Reviewed-by: Jiri Pirko <jiri@mellanox.com>
Signed-off-by: Ido Schimmel <idosch@mellanox.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2018-09-20 09:21:25 +03:00
|
|
|
continue;
|
|
|
|
|
|
2017-03-24 08:02:51 +01:00
|
|
|
min_buff = mlxsw_sp_bytes_cells(mlxsw_sp, cm->min_buff);
|
2018-09-20 09:21:31 +03:00
|
|
|
max_buff = cm->max_buff;
|
|
|
|
|
if (max_buff == MLXSW_SP_SB_INFI) {
|
2018-09-20 09:21:29 +03:00
|
|
|
err = mlxsw_sp_sb_cm_write(mlxsw_sp, local_port, i,
|
|
|
|
|
min_buff, 0,
|
|
|
|
|
true, cm->pool_index);
|
2018-09-20 09:21:31 +03:00
|
|
|
} else {
|
|
|
|
|
if (mlxsw_sp_sb_pool_is_static(mlxsw_sp,
|
|
|
|
|
cm->pool_index))
|
|
|
|
|
max_buff = mlxsw_sp_bytes_cells(mlxsw_sp,
|
|
|
|
|
max_buff);
|
2018-09-20 09:21:29 +03:00
|
|
|
err = mlxsw_sp_sb_cm_write(mlxsw_sp, local_port, i,
|
2018-09-20 09:21:31 +03:00
|
|
|
min_buff, max_buff,
|
2018-09-20 09:21:29 +03:00
|
|
|
false, cm->pool_index);
|
2018-09-20 09:21:31 +03:00
|
|
|
}
|
2015-10-16 14:01:37 +02:00
|
|
|
if (err)
|
|
|
|
|
return err;
|
|
|
|
|
}
|
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static int mlxsw_sp_port_sb_cms_init(struct mlxsw_sp_port *mlxsw_sp_port)
|
|
|
|
|
{
|
2019-02-20 19:32:22 +00:00
|
|
|
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
|
2016-04-14 18:19:17 +02:00
|
|
|
int err;
|
|
|
|
|
|
2019-02-20 19:32:22 +00:00
|
|
|
err = __mlxsw_sp_sb_cms_init(mlxsw_sp,
|
2016-04-14 18:19:17 +02:00
|
|
|
mlxsw_sp_port->local_port,
|
|
|
|
|
MLXSW_REG_SBXX_DIR_INGRESS,
|
2019-02-20 19:32:22 +00:00
|
|
|
mlxsw_sp->sb_vals->cms_ingress,
|
|
|
|
|
mlxsw_sp->sb_vals->cms_ingress_count);
|
2016-04-14 18:19:17 +02:00
|
|
|
if (err)
|
|
|
|
|
return err;
|
|
|
|
|
return __mlxsw_sp_sb_cms_init(mlxsw_sp_port->mlxsw_sp,
|
|
|
|
|
mlxsw_sp_port->local_port,
|
|
|
|
|
MLXSW_REG_SBXX_DIR_EGRESS,
|
2019-02-20 19:32:22 +00:00
|
|
|
mlxsw_sp->sb_vals->cms_egress,
|
|
|
|
|
mlxsw_sp->sb_vals->cms_egress_count);
|
2015-10-16 14:01:37 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static int mlxsw_sp_cpu_port_sb_cms_init(struct mlxsw_sp *mlxsw_sp)
|
|
|
|
|
{
|
2016-04-14 18:19:17 +02:00
|
|
|
return __mlxsw_sp_sb_cms_init(mlxsw_sp, 0, MLXSW_REG_SBXX_DIR_EGRESS,
|
2019-02-20 19:32:22 +00:00
|
|
|
mlxsw_sp->sb_vals->cms_cpu,
|
|
|
|
|
mlxsw_sp->sb_vals->cms_cpu_count);
|
2015-10-16 14:01:37 +02:00
|
|
|
}
|
|
|
|
|
|
2016-04-14 18:19:17 +02:00
|
|
|
#define MLXSW_SP_SB_PM(_min_buff, _max_buff) \
|
|
|
|
|
{ \
|
|
|
|
|
.min_buff = _min_buff, \
|
|
|
|
|
.max_buff = _max_buff, \
|
2015-10-16 14:01:37 +02:00
|
|
|
}
|
|
|
|
|
|
2019-04-22 12:08:50 +00:00
|
|
|
/* Order according to mlxsw_sp1_sb_pool_dess */
|
2019-02-20 19:32:25 +00:00
|
|
|
static const struct mlxsw_sp_sb_pm mlxsw_sp1_sb_pms[] = {
|
2016-04-14 18:19:22 +02:00
|
|
|
MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MAX),
|
|
|
|
|
MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
|
|
|
|
|
MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
|
2019-04-22 12:08:52 +00:00
|
|
|
MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
|
2016-04-14 18:19:17 +02:00
|
|
|
MLXSW_SP_SB_PM(0, 7),
|
2016-04-14 18:19:22 +02:00
|
|
|
MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
|
|
|
|
|
MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
|
|
|
|
|
MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
|
2018-09-20 09:21:32 +03:00
|
|
|
MLXSW_SP_SB_PM(10000, 90000),
|
2019-04-22 12:08:51 +00:00
|
|
|
MLXSW_SP_SB_PM(0, 8), /* 50% occupancy */
|
|
|
|
|
MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
|
2016-04-14 18:19:17 +02:00
|
|
|
};
|
|
|
|
|
|
2019-04-22 12:08:50 +00:00
|
|
|
/* Order according to mlxsw_sp2_sb_pool_dess */
|
2019-02-20 19:32:25 +00:00
|
|
|
static const struct mlxsw_sp_sb_pm mlxsw_sp2_sb_pms[] = {
|
|
|
|
|
MLXSW_SP_SB_PM(0, 7),
|
|
|
|
|
MLXSW_SP_SB_PM(0, 0),
|
|
|
|
|
MLXSW_SP_SB_PM(0, 0),
|
2019-04-22 12:08:52 +00:00
|
|
|
MLXSW_SP_SB_PM(0, 0),
|
2019-02-20 19:32:25 +00:00
|
|
|
MLXSW_SP_SB_PM(0, 7),
|
|
|
|
|
MLXSW_SP_SB_PM(0, 0),
|
|
|
|
|
MLXSW_SP_SB_PM(0, 0),
|
|
|
|
|
MLXSW_SP_SB_PM(0, 0),
|
2019-04-10 06:58:17 +00:00
|
|
|
MLXSW_SP_SB_PM(10000, 90000),
|
2019-04-22 12:08:51 +00:00
|
|
|
MLXSW_SP_SB_PM(0, 8), /* 50% occupancy */
|
|
|
|
|
MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
|
2019-02-20 19:32:25 +00:00
|
|
|
};
|
|
|
|
|
|
2019-04-22 12:08:56 +00:00
|
|
|
/* Order according to mlxsw_sp*_sb_pool_dess */
|
|
|
|
|
static const struct mlxsw_sp_sb_pm mlxsw_sp_cpu_port_sb_pms[] = {
|
|
|
|
|
MLXSW_SP_SB_PM(0, 0),
|
|
|
|
|
MLXSW_SP_SB_PM(0, 0),
|
|
|
|
|
MLXSW_SP_SB_PM(0, 0),
|
|
|
|
|
MLXSW_SP_SB_PM(0, 0),
|
|
|
|
|
MLXSW_SP_SB_PM(0, 0),
|
|
|
|
|
MLXSW_SP_SB_PM(0, 0),
|
|
|
|
|
MLXSW_SP_SB_PM(0, 0),
|
|
|
|
|
MLXSW_SP_SB_PM(0, 0),
|
|
|
|
|
MLXSW_SP_SB_PM(0, 90000),
|
|
|
|
|
MLXSW_SP_SB_PM(0, 0),
|
|
|
|
|
MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MAX),
|
|
|
|
|
};
|
|
|
|
|
|
2019-04-22 12:08:54 +00:00
|
|
|
static int mlxsw_sp_sb_pms_init(struct mlxsw_sp *mlxsw_sp, u8 local_port,
|
2019-04-22 12:08:55 +00:00
|
|
|
const struct mlxsw_sp_sb_pm *pms,
|
|
|
|
|
bool skip_ingress)
|
2015-10-16 14:01:37 +02:00
|
|
|
{
|
2019-04-22 12:08:54 +00:00
|
|
|
int i, err;
|
2015-10-16 14:01:37 +02:00
|
|
|
|
2019-02-20 19:32:18 +00:00
|
|
|
for (i = 0; i < mlxsw_sp->sb_vals->pool_count; i++) {
|
2019-04-22 12:08:54 +00:00
|
|
|
const struct mlxsw_sp_sb_pm *pm = &pms[i];
|
2019-04-22 12:08:55 +00:00
|
|
|
const struct mlxsw_sp_sb_pool_des *des;
|
2018-09-20 09:21:31 +03:00
|
|
|
u32 max_buff;
|
2018-09-20 09:21:30 +03:00
|
|
|
u32 min_buff;
|
2015-10-16 14:01:37 +02:00
|
|
|
|
2019-04-22 12:08:55 +00:00
|
|
|
des = &mlxsw_sp->sb_vals->pool_dess[i];
|
|
|
|
|
if (skip_ingress && des->dir == MLXSW_REG_SBXX_DIR_INGRESS)
|
|
|
|
|
continue;
|
|
|
|
|
|
2018-09-20 09:21:30 +03:00
|
|
|
min_buff = mlxsw_sp_bytes_cells(mlxsw_sp, pm->min_buff);
|
2018-09-20 09:21:31 +03:00
|
|
|
max_buff = pm->max_buff;
|
|
|
|
|
if (mlxsw_sp_sb_pool_is_static(mlxsw_sp, i))
|
|
|
|
|
max_buff = mlxsw_sp_bytes_cells(mlxsw_sp, max_buff);
|
2019-04-22 12:08:54 +00:00
|
|
|
err = mlxsw_sp_sb_pm_write(mlxsw_sp, local_port, i, min_buff,
|
|
|
|
|
max_buff);
|
2015-10-16 14:01:37 +02:00
|
|
|
if (err)
|
|
|
|
|
return err;
|
|
|
|
|
}
|
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
|
2019-04-22 12:08:54 +00:00
|
|
|
static int mlxsw_sp_port_sb_pms_init(struct mlxsw_sp_port *mlxsw_sp_port)
|
|
|
|
|
{
|
|
|
|
|
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
|
|
|
|
|
|
|
|
|
|
return mlxsw_sp_sb_pms_init(mlxsw_sp, mlxsw_sp_port->local_port,
|
2019-04-22 12:08:55 +00:00
|
|
|
mlxsw_sp->sb_vals->pms, false);
|
2019-04-22 12:08:54 +00:00
|
|
|
}
|
|
|
|
|
|
2019-04-22 12:08:56 +00:00
|
|
|
static int mlxsw_sp_cpu_port_sb_pms_init(struct mlxsw_sp *mlxsw_sp)
|
|
|
|
|
{
|
|
|
|
|
return mlxsw_sp_sb_pms_init(mlxsw_sp, 0, mlxsw_sp->sb_vals->pms_cpu,
|
|
|
|
|
true);
|
|
|
|
|
}
|
|
|
|
|
|
2019-04-22 12:08:42 +00:00
|
|
|
#define MLXSW_SP_SB_MM(_min_buff, _max_buff) \
|
2016-04-14 18:19:17 +02:00
|
|
|
{ \
|
|
|
|
|
.min_buff = _min_buff, \
|
|
|
|
|
.max_buff = _max_buff, \
|
2019-04-22 12:08:42 +00:00
|
|
|
.pool_index = MLXSW_SP_SB_POOL_EGR, \
|
2015-10-16 14:01:37 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static const struct mlxsw_sp_sb_mm mlxsw_sp_sb_mms[] = {
|
2019-04-22 12:08:42 +00:00
|
|
|
MLXSW_SP_SB_MM(0, 6),
|
|
|
|
|
MLXSW_SP_SB_MM(0, 6),
|
|
|
|
|
MLXSW_SP_SB_MM(0, 6),
|
|
|
|
|
MLXSW_SP_SB_MM(0, 6),
|
|
|
|
|
MLXSW_SP_SB_MM(0, 6),
|
|
|
|
|
MLXSW_SP_SB_MM(0, 6),
|
|
|
|
|
MLXSW_SP_SB_MM(0, 6),
|
|
|
|
|
MLXSW_SP_SB_MM(0, 6),
|
|
|
|
|
MLXSW_SP_SB_MM(0, 6),
|
|
|
|
|
MLXSW_SP_SB_MM(0, 6),
|
|
|
|
|
MLXSW_SP_SB_MM(0, 6),
|
|
|
|
|
MLXSW_SP_SB_MM(0, 6),
|
|
|
|
|
MLXSW_SP_SB_MM(0, 6),
|
|
|
|
|
MLXSW_SP_SB_MM(0, 6),
|
|
|
|
|
MLXSW_SP_SB_MM(0, 6),
|
2015-10-16 14:01:37 +02:00
|
|
|
};
|
|
|
|
|
|
|
|
|
|
static int mlxsw_sp_sb_mms_init(struct mlxsw_sp *mlxsw_sp)
|
|
|
|
|
{
|
|
|
|
|
char sbmm_pl[MLXSW_REG_SBMM_LEN];
|
|
|
|
|
int i;
|
|
|
|
|
int err;
|
|
|
|
|
|
2019-02-20 19:32:23 +00:00
|
|
|
for (i = 0; i < mlxsw_sp->sb_vals->mms_count; i++) {
|
mlxsw: spectrum_buffers: Use devlink pool indices throughout
Currently, mlxsw assumes that each ingress pool has its egress
counterpart, and that pool index for purposes of caching matches the
index with which the hardware should be configured. As we want to expose
the MC pool, both of these assumptions break.
Instead, maintain the pool index as long as possible. Unify ingress and
egress caches and use the pool index as cache index as well. Only
translate to FW pool numbering when actually packing the registers. This
simplifies things considerably, as the pool index is the only quantity
necessary to uniquely identify a pool, and the pool/direction split is
not necessary until firmware is talked to.
To support the mapping between pool indices and pool numbers and
directions, which is not neatly mathematical anymore, introduce a pool
descriptor table, indexed by pool index, to facilitate the translation.
Include the MC pool in the descriptor table as well, so that it can be
referenced from mlxsw_sp_sb_cms_egress.
Signed-off-by: Petr Machata <petrm@mellanox.com>
Reviewed-by: Jiri Pirko <jiri@mellanox.com>
Signed-off-by: Ido Schimmel <idosch@mellanox.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2018-09-20 09:21:25 +03:00
|
|
|
const struct mlxsw_sp_sb_pool_des *des;
|
2015-10-16 14:01:37 +02:00
|
|
|
const struct mlxsw_sp_sb_mm *mc;
|
2017-03-24 08:02:51 +01:00
|
|
|
u32 min_buff;
|
2015-10-16 14:01:37 +02:00
|
|
|
|
2019-02-20 19:32:23 +00:00
|
|
|
mc = &mlxsw_sp->sb_vals->mms[i];
|
2019-02-20 19:32:16 +00:00
|
|
|
des = &mlxsw_sp->sb_vals->pool_dess[mc->pool_index];
|
2018-09-20 09:21:31 +03:00
|
|
|
/* All pools used by sb_mm's are initialized using dynamic
|
|
|
|
|
* thresholds, therefore 'max_buff' isn't specified in cells.
|
2017-03-24 08:02:51 +01:00
|
|
|
*/
|
|
|
|
|
min_buff = mlxsw_sp_bytes_cells(mlxsw_sp, mc->min_buff);
|
|
|
|
|
mlxsw_reg_sbmm_pack(sbmm_pl, i, min_buff, mc->max_buff,
|
mlxsw: spectrum_buffers: Use devlink pool indices throughout
Currently, mlxsw assumes that each ingress pool has its egress
counterpart, and that pool index for purposes of caching matches the
index with which the hardware should be configured. As we want to expose
the MC pool, both of these assumptions break.
Instead, maintain the pool index as long as possible. Unify ingress and
egress caches and use the pool index as cache index as well. Only
translate to FW pool numbering when actually packing the registers. This
simplifies things considerably, as the pool index is the only quantity
necessary to uniquely identify a pool, and the pool/direction split is
not necessary until firmware is talked to.
To support the mapping between pool indices and pool numbers and
directions, which is not neatly mathematical anymore, introduce a pool
descriptor table, indexed by pool index, to facilitate the translation.
Include the MC pool in the descriptor table as well, so that it can be
referenced from mlxsw_sp_sb_cms_egress.
Signed-off-by: Petr Machata <petrm@mellanox.com>
Reviewed-by: Jiri Pirko <jiri@mellanox.com>
Signed-off-by: Ido Schimmel <idosch@mellanox.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2018-09-20 09:21:25 +03:00
|
|
|
des->pool);
|
2015-10-16 14:01:37 +02:00
|
|
|
err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbmm), sbmm_pl);
|
|
|
|
|
if (err)
|
|
|
|
|
return err;
|
|
|
|
|
}
|
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
|
2019-02-20 19:32:16 +00:00
|
|
|
static void mlxsw_sp_pool_count(struct mlxsw_sp *mlxsw_sp,
|
|
|
|
|
u16 *p_ingress_len, u16 *p_egress_len)
|
mlxsw: spectrum_buffers: Use devlink pool indices throughout
Currently, mlxsw assumes that each ingress pool has its egress
counterpart, and that pool index for purposes of caching matches the
index with which the hardware should be configured. As we want to expose
the MC pool, both of these assumptions break.
Instead, maintain the pool index as long as possible. Unify ingress and
egress caches and use the pool index as cache index as well. Only
translate to FW pool numbering when actually packing the registers. This
simplifies things considerably, as the pool index is the only quantity
necessary to uniquely identify a pool, and the pool/direction split is
not necessary until firmware is talked to.
To support the mapping between pool indices and pool numbers and
directions, which is not neatly mathematical anymore, introduce a pool
descriptor table, indexed by pool index, to facilitate the translation.
Include the MC pool in the descriptor table as well, so that it can be
referenced from mlxsw_sp_sb_cms_egress.
Signed-off-by: Petr Machata <petrm@mellanox.com>
Reviewed-by: Jiri Pirko <jiri@mellanox.com>
Signed-off-by: Ido Schimmel <idosch@mellanox.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2018-09-20 09:21:25 +03:00
|
|
|
{
|
|
|
|
|
int i;
|
|
|
|
|
|
2019-04-22 12:08:50 +00:00
|
|
|
for (i = 0; i < mlxsw_sp->sb_vals->pool_count; ++i) {
|
2019-02-20 19:32:16 +00:00
|
|
|
if (mlxsw_sp->sb_vals->pool_dess[i].dir ==
|
2019-04-22 12:08:50 +00:00
|
|
|
MLXSW_REG_SBXX_DIR_INGRESS)
|
|
|
|
|
(*p_ingress_len)++;
|
|
|
|
|
else
|
|
|
|
|
(*p_egress_len)++;
|
|
|
|
|
}
|
mlxsw: spectrum_buffers: Use devlink pool indices throughout
Currently, mlxsw assumes that each ingress pool has its egress
counterpart, and that pool index for purposes of caching matches the
index with which the hardware should be configured. As we want to expose
the MC pool, both of these assumptions break.
Instead, maintain the pool index as long as possible. Unify ingress and
egress caches and use the pool index as cache index as well. Only
translate to FW pool numbering when actually packing the registers. This
simplifies things considerably, as the pool index is the only quantity
necessary to uniquely identify a pool, and the pool/direction split is
not necessary until firmware is talked to.
To support the mapping between pool indices and pool numbers and
directions, which is not neatly mathematical anymore, introduce a pool
descriptor table, indexed by pool index, to facilitate the translation.
Include the MC pool in the descriptor table as well, so that it can be
referenced from mlxsw_sp_sb_cms_egress.
Signed-off-by: Petr Machata <petrm@mellanox.com>
Reviewed-by: Jiri Pirko <jiri@mellanox.com>
Signed-off-by: Ido Schimmel <idosch@mellanox.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2018-09-20 09:21:25 +03:00
|
|
|
|
2019-04-22 12:08:50 +00:00
|
|
|
WARN(*p_egress_len == 0, "No egress pools\n");
|
mlxsw: spectrum_buffers: Use devlink pool indices throughout
Currently, mlxsw assumes that each ingress pool has its egress
counterpart, and that pool index for purposes of caching matches the
index with which the hardware should be configured. As we want to expose
the MC pool, both of these assumptions break.
Instead, maintain the pool index as long as possible. Unify ingress and
egress caches and use the pool index as cache index as well. Only
translate to FW pool numbering when actually packing the registers. This
simplifies things considerably, as the pool index is the only quantity
necessary to uniquely identify a pool, and the pool/direction split is
not necessary until firmware is talked to.
To support the mapping between pool indices and pool numbers and
directions, which is not neatly mathematical anymore, introduce a pool
descriptor table, indexed by pool index, to facilitate the translation.
Include the MC pool in the descriptor table as well, so that it can be
referenced from mlxsw_sp_sb_cms_egress.
Signed-off-by: Petr Machata <petrm@mellanox.com>
Reviewed-by: Jiri Pirko <jiri@mellanox.com>
Signed-off-by: Ido Schimmel <idosch@mellanox.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2018-09-20 09:21:25 +03:00
|
|
|
}
|
|
|
|
|
|
2019-02-20 19:32:12 +00:00
|
|
|
const struct mlxsw_sp_sb_vals mlxsw_sp1_sb_vals = {
|
2019-02-20 19:32:25 +00:00
|
|
|
.pool_count = ARRAY_SIZE(mlxsw_sp1_sb_pool_dess),
|
|
|
|
|
.pool_dess = mlxsw_sp1_sb_pool_dess,
|
|
|
|
|
.pms = mlxsw_sp1_sb_pms,
|
2019-04-22 12:08:56 +00:00
|
|
|
.pms_cpu = mlxsw_sp_cpu_port_sb_pms,
|
2019-02-20 19:32:25 +00:00
|
|
|
.prs = mlxsw_sp1_sb_prs,
|
2019-02-20 19:32:23 +00:00
|
|
|
.mms = mlxsw_sp_sb_mms,
|
2019-02-20 19:32:25 +00:00
|
|
|
.cms_ingress = mlxsw_sp1_sb_cms_ingress,
|
|
|
|
|
.cms_egress = mlxsw_sp1_sb_cms_egress,
|
2019-02-20 19:32:22 +00:00
|
|
|
.cms_cpu = mlxsw_sp_cpu_port_sb_cms,
|
2019-02-20 19:32:23 +00:00
|
|
|
.mms_count = ARRAY_SIZE(mlxsw_sp_sb_mms),
|
2019-02-20 19:32:25 +00:00
|
|
|
.cms_ingress_count = ARRAY_SIZE(mlxsw_sp1_sb_cms_ingress),
|
|
|
|
|
.cms_egress_count = ARRAY_SIZE(mlxsw_sp1_sb_cms_egress),
|
2019-02-20 19:32:22 +00:00
|
|
|
.cms_cpu_count = ARRAY_SIZE(mlxsw_sp_cpu_port_sb_cms),
|
2019-02-20 19:32:12 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
|
|
const struct mlxsw_sp_sb_vals mlxsw_sp2_sb_vals = {
|
2019-02-20 19:32:25 +00:00
|
|
|
.pool_count = ARRAY_SIZE(mlxsw_sp2_sb_pool_dess),
|
|
|
|
|
.pool_dess = mlxsw_sp2_sb_pool_dess,
|
|
|
|
|
.pms = mlxsw_sp2_sb_pms,
|
2019-04-22 12:08:56 +00:00
|
|
|
.pms_cpu = mlxsw_sp_cpu_port_sb_pms,
|
2019-02-20 19:32:25 +00:00
|
|
|
.prs = mlxsw_sp2_sb_prs,
|
2019-02-20 19:32:23 +00:00
|
|
|
.mms = mlxsw_sp_sb_mms,
|
2019-02-20 19:32:25 +00:00
|
|
|
.cms_ingress = mlxsw_sp2_sb_cms_ingress,
|
|
|
|
|
.cms_egress = mlxsw_sp2_sb_cms_egress,
|
2019-02-20 19:32:22 +00:00
|
|
|
.cms_cpu = mlxsw_sp_cpu_port_sb_cms,
|
2019-02-20 19:32:23 +00:00
|
|
|
.mms_count = ARRAY_SIZE(mlxsw_sp_sb_mms),
|
2019-02-20 19:32:25 +00:00
|
|
|
.cms_ingress_count = ARRAY_SIZE(mlxsw_sp2_sb_cms_ingress),
|
|
|
|
|
.cms_egress_count = ARRAY_SIZE(mlxsw_sp2_sb_cms_egress),
|
2019-02-20 19:32:22 +00:00
|
|
|
.cms_cpu_count = ARRAY_SIZE(mlxsw_sp_cpu_port_sb_cms),
|
2019-02-20 19:32:12 +00:00
|
|
|
};
|
|
|
|
|
|
2020-09-16 09:35:28 +03:00
|
|
|
static u32 mlxsw_sp1_pb_int_buf_size_get(int mtu, u32 speed)
|
|
|
|
|
{
|
|
|
|
|
return mtu * 5 / 2;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static u32 __mlxsw_sp_pb_int_buf_size_get(int mtu, u32 speed, u32 buffer_factor)
|
|
|
|
|
{
|
|
|
|
|
return 3 * mtu + buffer_factor * speed / 1000;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#define MLXSW_SP2_SPAN_EG_MIRROR_BUFFER_FACTOR 38
|
|
|
|
|
|
|
|
|
|
static u32 mlxsw_sp2_pb_int_buf_size_get(int mtu, u32 speed)
|
|
|
|
|
{
|
|
|
|
|
int factor = MLXSW_SP2_SPAN_EG_MIRROR_BUFFER_FACTOR;
|
|
|
|
|
|
|
|
|
|
return __mlxsw_sp_pb_int_buf_size_get(mtu, speed, factor);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#define MLXSW_SP3_SPAN_EG_MIRROR_BUFFER_FACTOR 50
|
|
|
|
|
|
|
|
|
|
static u32 mlxsw_sp3_pb_int_buf_size_get(int mtu, u32 speed)
|
|
|
|
|
{
|
|
|
|
|
int factor = MLXSW_SP3_SPAN_EG_MIRROR_BUFFER_FACTOR;
|
|
|
|
|
|
|
|
|
|
return __mlxsw_sp_pb_int_buf_size_get(mtu, speed, factor);
|
|
|
|
|
}
|
|
|
|
|
|
2020-09-16 09:35:27 +03:00
|
|
|
const struct mlxsw_sp_sb_ops mlxsw_sp1_sb_ops = {
|
2020-09-16 09:35:28 +03:00
|
|
|
.int_buf_size_get = mlxsw_sp1_pb_int_buf_size_get,
|
2020-09-16 09:35:27 +03:00
|
|
|
};
|
|
|
|
|
|
|
|
|
|
const struct mlxsw_sp_sb_ops mlxsw_sp2_sb_ops = {
|
2020-09-16 09:35:28 +03:00
|
|
|
.int_buf_size_get = mlxsw_sp2_pb_int_buf_size_get,
|
2020-09-16 09:35:27 +03:00
|
|
|
};
|
|
|
|
|
|
|
|
|
|
const struct mlxsw_sp_sb_ops mlxsw_sp3_sb_ops = {
|
2020-09-16 09:35:28 +03:00
|
|
|
.int_buf_size_get = mlxsw_sp3_pb_int_buf_size_get,
|
2020-09-16 09:35:27 +03:00
|
|
|
};
|
|
|
|
|
|
2015-10-16 14:01:37 +02:00
|
|
|
int mlxsw_sp_buffers_init(struct mlxsw_sp *mlxsw_sp)
|
|
|
|
|
{
|
2019-02-20 19:32:29 +00:00
|
|
|
u32 max_headroom_size;
|
2019-04-22 12:08:50 +00:00
|
|
|
u16 ing_pool_count = 0;
|
|
|
|
|
u16 eg_pool_count = 0;
|
2015-10-16 14:01:37 +02:00
|
|
|
int err;
|
|
|
|
|
|
2017-03-24 08:02:51 +01:00
|
|
|
if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, CELL_SIZE))
|
|
|
|
|
return -EIO;
|
|
|
|
|
|
2019-10-23 09:04:59 +03:00
|
|
|
if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, GUARANTEED_SHARED_BUFFER))
|
2017-03-24 08:02:49 +01:00
|
|
|
return -EIO;
|
|
|
|
|
|
2019-02-20 19:32:29 +00:00
|
|
|
if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_HEADROOM_SIZE))
|
|
|
|
|
return -EIO;
|
|
|
|
|
|
2017-05-16 19:38:24 +02:00
|
|
|
mlxsw_sp->sb = kzalloc(sizeof(*mlxsw_sp->sb), GFP_KERNEL);
|
|
|
|
|
if (!mlxsw_sp->sb)
|
|
|
|
|
return -ENOMEM;
|
|
|
|
|
mlxsw_sp->sb->cell_size = MLXSW_CORE_RES_GET(mlxsw_sp->core, CELL_SIZE);
|
2018-09-20 09:21:27 +03:00
|
|
|
mlxsw_sp->sb->sb_size = MLXSW_CORE_RES_GET(mlxsw_sp->core,
|
2019-10-23 09:04:59 +03:00
|
|
|
GUARANTEED_SHARED_BUFFER);
|
2019-02-20 19:32:29 +00:00
|
|
|
max_headroom_size = MLXSW_CORE_RES_GET(mlxsw_sp->core,
|
|
|
|
|
MAX_HEADROOM_SIZE);
|
|
|
|
|
/* Round down, because this limit must not be overstepped. */
|
|
|
|
|
mlxsw_sp->sb->max_headroom_cells = max_headroom_size /
|
|
|
|
|
mlxsw_sp->sb->cell_size;
|
|
|
|
|
|
2017-03-24 08:02:48 +01:00
|
|
|
err = mlxsw_sp_sb_ports_init(mlxsw_sp);
|
2015-10-16 14:01:37 +02:00
|
|
|
if (err)
|
2017-05-16 19:38:24 +02:00
|
|
|
goto err_sb_ports_init;
|
2019-02-20 19:32:20 +00:00
|
|
|
err = mlxsw_sp_sb_prs_init(mlxsw_sp, mlxsw_sp->sb_vals->prs,
|
2019-10-23 09:05:00 +03:00
|
|
|
mlxsw_sp->sb_vals->pool_dess,
|
2019-02-20 19:32:20 +00:00
|
|
|
mlxsw_sp->sb_vals->pool_count);
|
2017-03-24 08:02:48 +01:00
|
|
|
if (err)
|
|
|
|
|
goto err_sb_prs_init;
|
2015-10-16 14:01:37 +02:00
|
|
|
err = mlxsw_sp_cpu_port_sb_cms_init(mlxsw_sp);
|
|
|
|
|
if (err)
|
2017-03-24 08:02:48 +01:00
|
|
|
goto err_sb_cpu_port_sb_cms_init;
|
2019-04-22 12:08:56 +00:00
|
|
|
err = mlxsw_sp_cpu_port_sb_pms_init(mlxsw_sp);
|
|
|
|
|
if (err)
|
|
|
|
|
goto err_sb_cpu_port_pms_init;
|
2015-10-16 14:01:37 +02:00
|
|
|
err = mlxsw_sp_sb_mms_init(mlxsw_sp);
|
2016-04-14 18:19:24 +02:00
|
|
|
if (err)
|
2017-03-24 08:02:48 +01:00
|
|
|
goto err_sb_mms_init;
|
2019-02-20 19:32:16 +00:00
|
|
|
mlxsw_sp_pool_count(mlxsw_sp, &ing_pool_count, &eg_pool_count);
|
2018-09-20 09:21:27 +03:00
|
|
|
err = devlink_sb_register(priv_to_devlink(mlxsw_sp->core), 0,
|
|
|
|
|
mlxsw_sp->sb->sb_size,
|
mlxsw: spectrum_buffers: Use devlink pool indices throughout
Currently, mlxsw assumes that each ingress pool has its egress
counterpart, and that pool index for purposes of caching matches the
index with which the hardware should be configured. As we want to expose
the MC pool, both of these assumptions break.
Instead, maintain the pool index as long as possible. Unify ingress and
egress caches and use the pool index as cache index as well. Only
translate to FW pool numbering when actually packing the registers. This
simplifies things considerably, as the pool index is the only quantity
necessary to uniquely identify a pool, and the pool/direction split is
not necessary until firmware is talked to.
To support the mapping between pool indices and pool numbers and
directions, which is not neatly mathematical anymore, introduce a pool
descriptor table, indexed by pool index, to facilitate the translation.
Include the MC pool in the descriptor table as well, so that it can be
referenced from mlxsw_sp_sb_cms_egress.
Signed-off-by: Petr Machata <petrm@mellanox.com>
Reviewed-by: Jiri Pirko <jiri@mellanox.com>
Signed-off-by: Ido Schimmel <idosch@mellanox.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2018-09-20 09:21:25 +03:00
|
|
|
ing_pool_count,
|
|
|
|
|
eg_pool_count,
|
2018-09-20 09:21:26 +03:00
|
|
|
MLXSW_SP_SB_ING_TC_COUNT,
|
|
|
|
|
MLXSW_SP_SB_EG_TC_COUNT);
|
2017-03-24 08:02:48 +01:00
|
|
|
if (err)
|
|
|
|
|
goto err_devlink_sb_register;
|
|
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
|
|
err_devlink_sb_register:
|
|
|
|
|
err_sb_mms_init:
|
2019-04-22 12:08:56 +00:00
|
|
|
err_sb_cpu_port_pms_init:
|
2017-03-24 08:02:48 +01:00
|
|
|
err_sb_cpu_port_sb_cms_init:
|
|
|
|
|
err_sb_prs_init:
|
|
|
|
|
mlxsw_sp_sb_ports_fini(mlxsw_sp);
|
2017-05-16 19:38:24 +02:00
|
|
|
err_sb_ports_init:
|
|
|
|
|
kfree(mlxsw_sp->sb);
|
2017-03-24 08:02:48 +01:00
|
|
|
return err;
|
2016-04-14 18:19:24 +02:00
|
|
|
}
|
2015-10-16 14:01:37 +02:00
|
|
|
|
2016-04-14 18:19:24 +02:00
|
|
|
void mlxsw_sp_buffers_fini(struct mlxsw_sp *mlxsw_sp)
|
|
|
|
|
{
|
|
|
|
|
devlink_sb_unregister(priv_to_devlink(mlxsw_sp->core), 0);
|
2017-03-24 08:02:48 +01:00
|
|
|
mlxsw_sp_sb_ports_fini(mlxsw_sp);
|
2017-05-16 19:38:24 +02:00
|
|
|
kfree(mlxsw_sp->sb);
|
2015-10-16 14:01:37 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
int mlxsw_sp_port_buffers_init(struct mlxsw_sp_port *mlxsw_sp_port)
|
|
|
|
|
{
|
|
|
|
|
int err;
|
|
|
|
|
|
2020-09-16 09:35:14 +03:00
|
|
|
mlxsw_sp_port->hdroom = kzalloc(sizeof(*mlxsw_sp_port->hdroom), GFP_KERNEL);
|
|
|
|
|
if (!mlxsw_sp_port->hdroom)
|
|
|
|
|
return -ENOMEM;
|
2020-09-16 09:35:16 +03:00
|
|
|
mlxsw_sp_port->hdroom->mtu = mlxsw_sp_port->dev->mtu;
|
2020-09-16 09:35:14 +03:00
|
|
|
|
mlxsw: spectrum: Map all switch priorities to priority group 0
During transmission, the skb's priority is used to map the skb to a
traffic class, where the idea is to group priorities with similar
characteristics (e.g. lossy, lossless) to the same traffic class. By
default, all priorities are mapped to traffic class 0.
In the device, we model the skb's priority as the switch priority, which
is assigned to a packet according to its PCP value and ingress port
(untagged packets are assigned the port's default switch priority - 0).
At ingress, the packet is directed to a priority group (PG) buffer in
the port's headroom buffer according to the packet's switch priority and
switch priority to buffer mapping.
While it's possible to configure the egress mapping between skb's
priority (switch priority) and traffic class, there is no mechanism to
configure the ingress mapping to a PG.
In order to keep things simple and since grouping certain priorities into
a traffic class at egress also implies they should be grouped the same
at ingress, treat a PG as the ingress counterpart of an egress traffic
class.
Having established the above, during initialization map all the switch
priorities to PG0 in accordance with the Linux defaults for traffic
class mapping.
Signed-off-by: Ido Schimmel <idosch@mellanox.com>
Signed-off-by: Jiri Pirko <jiri@mellanox.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2016-04-06 17:10:01 +02:00
|
|
|
err = mlxsw_sp_port_headroom_init(mlxsw_sp_port);
|
2015-10-16 14:01:37 +02:00
|
|
|
if (err)
|
2020-09-16 09:35:14 +03:00
|
|
|
goto err_headroom_init;
|
2015-10-16 14:01:37 +02:00
|
|
|
err = mlxsw_sp_port_sb_cms_init(mlxsw_sp_port);
|
|
|
|
|
if (err)
|
2020-09-16 09:35:14 +03:00
|
|
|
goto err_port_sb_cms_init;
|
2015-10-16 14:01:37 +02:00
|
|
|
err = mlxsw_sp_port_sb_pms_init(mlxsw_sp_port);
|
2020-09-16 09:35:14 +03:00
|
|
|
if (err)
|
|
|
|
|
goto err_port_sb_pms_init;
|
|
|
|
|
return 0;
|
2015-10-16 14:01:37 +02:00
|
|
|
|
2020-09-16 09:35:14 +03:00
|
|
|
err_port_sb_pms_init:
|
|
|
|
|
err_port_sb_cms_init:
|
|
|
|
|
err_headroom_init:
|
|
|
|
|
kfree(mlxsw_sp_port->hdroom);
|
2015-10-16 14:01:37 +02:00
|
|
|
return err;
|
|
|
|
|
}
|
2016-04-14 18:19:24 +02:00
|
|
|
|
2020-09-16 09:35:14 +03:00
|
|
|
void mlxsw_sp_port_buffers_fini(struct mlxsw_sp_port *mlxsw_sp_port)
|
|
|
|
|
{
|
|
|
|
|
kfree(mlxsw_sp_port->hdroom);
|
|
|
|
|
}
|
|
|
|
|
|
2016-04-14 18:19:24 +02:00
|
|
|
int mlxsw_sp_sb_pool_get(struct mlxsw_core *mlxsw_core,
|
|
|
|
|
unsigned int sb_index, u16 pool_index,
|
|
|
|
|
struct devlink_sb_pool_info *pool_info)
|
|
|
|
|
{
|
|
|
|
|
struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
|
2019-02-20 19:32:16 +00:00
|
|
|
enum mlxsw_reg_sbxx_dir dir;
|
mlxsw: spectrum_buffers: Use devlink pool indices throughout
Currently, mlxsw assumes that each ingress pool has its egress
counterpart, and that pool index for purposes of caching matches the
index with which the hardware should be configured. As we want to expose
the MC pool, both of these assumptions break.
Instead, maintain the pool index as long as possible. Unify ingress and
egress caches and use the pool index as cache index as well. Only
translate to FW pool numbering when actually packing the registers. This
simplifies things considerably, as the pool index is the only quantity
necessary to uniquely identify a pool, and the pool/direction split is
not necessary until firmware is talked to.
To support the mapping between pool indices and pool numbers and
directions, which is not neatly mathematical anymore, introduce a pool
descriptor table, indexed by pool index, to facilitate the translation.
Include the MC pool in the descriptor table as well, so that it can be
referenced from mlxsw_sp_sb_cms_egress.
Signed-off-by: Petr Machata <petrm@mellanox.com>
Reviewed-by: Jiri Pirko <jiri@mellanox.com>
Signed-off-by: Ido Schimmel <idosch@mellanox.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2018-09-20 09:21:25 +03:00
|
|
|
struct mlxsw_sp_sb_pr *pr;
|
2016-04-14 18:19:24 +02:00
|
|
|
|
2019-02-20 19:32:16 +00:00
|
|
|
dir = mlxsw_sp->sb_vals->pool_dess[pool_index].dir;
|
mlxsw: spectrum_buffers: Use devlink pool indices throughout
Currently, mlxsw assumes that each ingress pool has its egress
counterpart, and that pool index for purposes of caching matches the
index with which the hardware should be configured. As we want to expose
the MC pool, both of these assumptions break.
Instead, maintain the pool index as long as possible. Unify ingress and
egress caches and use the pool index as cache index as well. Only
translate to FW pool numbering when actually packing the registers. This
simplifies things considerably, as the pool index is the only quantity
necessary to uniquely identify a pool, and the pool/direction split is
not necessary until firmware is talked to.
To support the mapping between pool indices and pool numbers and
directions, which is not neatly mathematical anymore, introduce a pool
descriptor table, indexed by pool index, to facilitate the translation.
Include the MC pool in the descriptor table as well, so that it can be
referenced from mlxsw_sp_sb_cms_egress.
Signed-off-by: Petr Machata <petrm@mellanox.com>
Reviewed-by: Jiri Pirko <jiri@mellanox.com>
Signed-off-by: Ido Schimmel <idosch@mellanox.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2018-09-20 09:21:25 +03:00
|
|
|
pr = mlxsw_sp_sb_pr_get(mlxsw_sp, pool_index);
|
2016-09-19 08:29:26 +02:00
|
|
|
pool_info->pool_type = (enum devlink_sb_pool_type) dir;
|
2017-03-24 08:02:51 +01:00
|
|
|
pool_info->size = mlxsw_sp_cells_bytes(mlxsw_sp, pr->size);
|
2016-09-19 08:29:26 +02:00
|
|
|
pool_info->threshold_type = (enum devlink_sb_threshold_type) pr->mode;
|
2019-02-01 17:56:28 -08:00
|
|
|
pool_info->cell_size = mlxsw_sp->sb->cell_size;
|
2016-04-14 18:19:24 +02:00
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
int mlxsw_sp_sb_pool_set(struct mlxsw_core *mlxsw_core,
|
|
|
|
|
unsigned int sb_index, u16 pool_index, u32 size,
|
2019-04-22 12:08:41 +00:00
|
|
|
enum devlink_sb_threshold_type threshold_type,
|
|
|
|
|
struct netlink_ext_ack *extack)
|
2016-04-14 18:19:24 +02:00
|
|
|
{
|
|
|
|
|
struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
|
2017-03-24 08:02:51 +01:00
|
|
|
u32 pool_size = mlxsw_sp_bytes_cells(mlxsw_sp, size);
|
2019-04-22 12:08:43 +00:00
|
|
|
const struct mlxsw_sp_sb_pr *pr;
|
2016-09-19 08:29:26 +02:00
|
|
|
enum mlxsw_reg_sbpr_mode mode;
|
2016-04-14 18:19:24 +02:00
|
|
|
|
2019-04-22 12:08:43 +00:00
|
|
|
mode = (enum mlxsw_reg_sbpr_mode) threshold_type;
|
|
|
|
|
pr = &mlxsw_sp->sb_vals->prs[pool_index];
|
|
|
|
|
|
2019-10-23 09:04:59 +03:00
|
|
|
if (size > MLXSW_CORE_RES_GET(mlxsw_sp->core,
|
|
|
|
|
GUARANTEED_SHARED_BUFFER)) {
|
2019-04-22 12:08:41 +00:00
|
|
|
NL_SET_ERR_MSG_MOD(extack, "Exceeded shared buffer size");
|
2016-11-28 18:01:24 +01:00
|
|
|
return -EINVAL;
|
2019-04-22 12:08:41 +00:00
|
|
|
}
|
2016-11-28 18:01:24 +01:00
|
|
|
|
2019-04-22 12:08:43 +00:00
|
|
|
if (pr->freeze_mode && pr->mode != mode) {
|
|
|
|
|
NL_SET_ERR_MSG_MOD(extack, "Changing this pool's threshold type is forbidden");
|
|
|
|
|
return -EINVAL;
|
2019-10-25 17:09:48 +08:00
|
|
|
}
|
2019-04-22 12:08:43 +00:00
|
|
|
|
|
|
|
|
if (pr->freeze_size && pr->size != size) {
|
|
|
|
|
NL_SET_ERR_MSG_MOD(extack, "Changing this pool's size is forbidden");
|
|
|
|
|
return -EINVAL;
|
2019-10-25 17:09:48 +08:00
|
|
|
}
|
2019-04-22 12:08:43 +00:00
|
|
|
|
2018-09-20 09:21:28 +03:00
|
|
|
return mlxsw_sp_sb_pr_write(mlxsw_sp, pool_index, mode,
|
|
|
|
|
pool_size, false);
|
2016-04-14 18:19:24 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#define MLXSW_SP_SB_THRESHOLD_TO_ALPHA_OFFSET (-2) /* 3->1, 16->14 */
|
|
|
|
|
|
mlxsw: spectrum_buffers: Use devlink pool indices throughout
Currently, mlxsw assumes that each ingress pool has its egress
counterpart, and that pool index for purposes of caching matches the
index with which the hardware should be configured. As we want to expose
the MC pool, both of these assumptions break.
Instead, maintain the pool index as long as possible. Unify ingress and
egress caches and use the pool index as cache index as well. Only
translate to FW pool numbering when actually packing the registers. This
simplifies things considerably, as the pool index is the only quantity
necessary to uniquely identify a pool, and the pool/direction split is
not necessary until firmware is talked to.
To support the mapping between pool indices and pool numbers and
directions, which is not neatly mathematical anymore, introduce a pool
descriptor table, indexed by pool index, to facilitate the translation.
Include the MC pool in the descriptor table as well, so that it can be
referenced from mlxsw_sp_sb_cms_egress.
Signed-off-by: Petr Machata <petrm@mellanox.com>
Reviewed-by: Jiri Pirko <jiri@mellanox.com>
Signed-off-by: Ido Schimmel <idosch@mellanox.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2018-09-20 09:21:25 +03:00
|
|
|
static u32 mlxsw_sp_sb_threshold_out(struct mlxsw_sp *mlxsw_sp, u16 pool_index,
|
|
|
|
|
u32 max_buff)
|
2016-04-14 18:19:24 +02:00
|
|
|
{
|
mlxsw: spectrum_buffers: Use devlink pool indices throughout
Currently, mlxsw assumes that each ingress pool has its egress
counterpart, and that pool index for purposes of caching matches the
index with which the hardware should be configured. As we want to expose
the MC pool, both of these assumptions break.
Instead, maintain the pool index as long as possible. Unify ingress and
egress caches and use the pool index as cache index as well. Only
translate to FW pool numbering when actually packing the registers. This
simplifies things considerably, as the pool index is the only quantity
necessary to uniquely identify a pool, and the pool/direction split is
not necessary until firmware is talked to.
To support the mapping between pool indices and pool numbers and
directions, which is not neatly mathematical anymore, introduce a pool
descriptor table, indexed by pool index, to facilitate the translation.
Include the MC pool in the descriptor table as well, so that it can be
referenced from mlxsw_sp_sb_cms_egress.
Signed-off-by: Petr Machata <petrm@mellanox.com>
Reviewed-by: Jiri Pirko <jiri@mellanox.com>
Signed-off-by: Ido Schimmel <idosch@mellanox.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2018-09-20 09:21:25 +03:00
|
|
|
struct mlxsw_sp_sb_pr *pr = mlxsw_sp_sb_pr_get(mlxsw_sp, pool_index);
|
2016-04-14 18:19:24 +02:00
|
|
|
|
|
|
|
|
if (pr->mode == MLXSW_REG_SBPR_MODE_DYNAMIC)
|
|
|
|
|
return max_buff - MLXSW_SP_SB_THRESHOLD_TO_ALPHA_OFFSET;
|
2017-03-24 08:02:51 +01:00
|
|
|
return mlxsw_sp_cells_bytes(mlxsw_sp, max_buff);
|
2016-04-14 18:19:24 +02:00
|
|
|
}
|
|
|
|
|
|
mlxsw: spectrum_buffers: Use devlink pool indices throughout
Currently, mlxsw assumes that each ingress pool has its egress
counterpart, and that pool index for purposes of caching matches the
index with which the hardware should be configured. As we want to expose
the MC pool, both of these assumptions break.
Instead, maintain the pool index as long as possible. Unify ingress and
egress caches and use the pool index as cache index as well. Only
translate to FW pool numbering when actually packing the registers. This
simplifies things considerably, as the pool index is the only quantity
necessary to uniquely identify a pool, and the pool/direction split is
not necessary until firmware is talked to.
To support the mapping between pool indices and pool numbers and
directions, which is not neatly mathematical anymore, introduce a pool
descriptor table, indexed by pool index, to facilitate the translation.
Include the MC pool in the descriptor table as well, so that it can be
referenced from mlxsw_sp_sb_cms_egress.
Signed-off-by: Petr Machata <petrm@mellanox.com>
Reviewed-by: Jiri Pirko <jiri@mellanox.com>
Signed-off-by: Ido Schimmel <idosch@mellanox.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2018-09-20 09:21:25 +03:00
|
|
|
static int mlxsw_sp_sb_threshold_in(struct mlxsw_sp *mlxsw_sp, u16 pool_index,
|
2019-04-22 12:08:41 +00:00
|
|
|
u32 threshold, u32 *p_max_buff,
|
|
|
|
|
struct netlink_ext_ack *extack)
|
2016-04-14 18:19:24 +02:00
|
|
|
{
|
mlxsw: spectrum_buffers: Use devlink pool indices throughout
Currently, mlxsw assumes that each ingress pool has its egress
counterpart, and that pool index for purposes of caching matches the
index with which the hardware should be configured. As we want to expose
the MC pool, both of these assumptions break.
Instead, maintain the pool index as long as possible. Unify ingress and
egress caches and use the pool index as cache index as well. Only
translate to FW pool numbering when actually packing the registers. This
simplifies things considerably, as the pool index is the only quantity
necessary to uniquely identify a pool, and the pool/direction split is
not necessary until firmware is talked to.
To support the mapping between pool indices and pool numbers and
directions, which is not neatly mathematical anymore, introduce a pool
descriptor table, indexed by pool index, to facilitate the translation.
Include the MC pool in the descriptor table as well, so that it can be
referenced from mlxsw_sp_sb_cms_egress.
Signed-off-by: Petr Machata <petrm@mellanox.com>
Reviewed-by: Jiri Pirko <jiri@mellanox.com>
Signed-off-by: Ido Schimmel <idosch@mellanox.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2018-09-20 09:21:25 +03:00
|
|
|
struct mlxsw_sp_sb_pr *pr = mlxsw_sp_sb_pr_get(mlxsw_sp, pool_index);
|
2016-04-14 18:19:24 +02:00
|
|
|
|
|
|
|
|
if (pr->mode == MLXSW_REG_SBPR_MODE_DYNAMIC) {
|
|
|
|
|
int val;
|
|
|
|
|
|
|
|
|
|
val = threshold + MLXSW_SP_SB_THRESHOLD_TO_ALPHA_OFFSET;
|
|
|
|
|
if (val < MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN ||
|
2019-04-22 12:08:41 +00:00
|
|
|
val > MLXSW_REG_SBXX_DYN_MAX_BUFF_MAX) {
|
|
|
|
|
NL_SET_ERR_MSG_MOD(extack, "Invalid dynamic threshold value");
|
2016-04-14 18:19:24 +02:00
|
|
|
return -EINVAL;
|
2019-04-22 12:08:41 +00:00
|
|
|
}
|
2016-04-14 18:19:24 +02:00
|
|
|
*p_max_buff = val;
|
|
|
|
|
} else {
|
2017-03-24 08:02:51 +01:00
|
|
|
*p_max_buff = mlxsw_sp_bytes_cells(mlxsw_sp, threshold);
|
2016-04-14 18:19:24 +02:00
|
|
|
}
|
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
int mlxsw_sp_sb_port_pool_get(struct mlxsw_core_port *mlxsw_core_port,
|
|
|
|
|
unsigned int sb_index, u16 pool_index,
|
|
|
|
|
u32 *p_threshold)
|
|
|
|
|
{
|
|
|
|
|
struct mlxsw_sp_port *mlxsw_sp_port =
|
|
|
|
|
mlxsw_core_port_driver_priv(mlxsw_core_port);
|
|
|
|
|
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
|
|
|
|
|
u8 local_port = mlxsw_sp_port->local_port;
|
|
|
|
|
struct mlxsw_sp_sb_pm *pm = mlxsw_sp_sb_pm_get(mlxsw_sp, local_port,
|
mlxsw: spectrum_buffers: Use devlink pool indices throughout
Currently, mlxsw assumes that each ingress pool has its egress
counterpart, and that pool index for purposes of caching matches the
index with which the hardware should be configured. As we want to expose
the MC pool, both of these assumptions break.
Instead, maintain the pool index as long as possible. Unify ingress and
egress caches and use the pool index as cache index as well. Only
translate to FW pool numbering when actually packing the registers. This
simplifies things considerably, as the pool index is the only quantity
necessary to uniquely identify a pool, and the pool/direction split is
not necessary until firmware is talked to.
To support the mapping between pool indices and pool numbers and
directions, which is not neatly mathematical anymore, introduce a pool
descriptor table, indexed by pool index, to facilitate the translation.
Include the MC pool in the descriptor table as well, so that it can be
referenced from mlxsw_sp_sb_cms_egress.
Signed-off-by: Petr Machata <petrm@mellanox.com>
Reviewed-by: Jiri Pirko <jiri@mellanox.com>
Signed-off-by: Ido Schimmel <idosch@mellanox.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2018-09-20 09:21:25 +03:00
|
|
|
pool_index);
|
2016-04-14 18:19:24 +02:00
|
|
|
|
mlxsw: spectrum_buffers: Use devlink pool indices throughout
Currently, mlxsw assumes that each ingress pool has its egress
counterpart, and that pool index for purposes of caching matches the
index with which the hardware should be configured. As we want to expose
the MC pool, both of these assumptions break.
Instead, maintain the pool index as long as possible. Unify ingress and
egress caches and use the pool index as cache index as well. Only
translate to FW pool numbering when actually packing the registers. This
simplifies things considerably, as the pool index is the only quantity
necessary to uniquely identify a pool, and the pool/direction split is
not necessary until firmware is talked to.
To support the mapping between pool indices and pool numbers and
directions, which is not neatly mathematical anymore, introduce a pool
descriptor table, indexed by pool index, to facilitate the translation.
Include the MC pool in the descriptor table as well, so that it can be
referenced from mlxsw_sp_sb_cms_egress.
Signed-off-by: Petr Machata <petrm@mellanox.com>
Reviewed-by: Jiri Pirko <jiri@mellanox.com>
Signed-off-by: Ido Schimmel <idosch@mellanox.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2018-09-20 09:21:25 +03:00
|
|
|
*p_threshold = mlxsw_sp_sb_threshold_out(mlxsw_sp, pool_index,
|
2016-04-14 18:19:24 +02:00
|
|
|
pm->max_buff);
|
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
int mlxsw_sp_sb_port_pool_set(struct mlxsw_core_port *mlxsw_core_port,
|
|
|
|
|
unsigned int sb_index, u16 pool_index,
|
2019-04-22 12:08:41 +00:00
|
|
|
u32 threshold, struct netlink_ext_ack *extack)
|
2016-04-14 18:19:24 +02:00
|
|
|
{
|
|
|
|
|
struct mlxsw_sp_port *mlxsw_sp_port =
|
|
|
|
|
mlxsw_core_port_driver_priv(mlxsw_core_port);
|
|
|
|
|
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
|
|
|
|
|
u8 local_port = mlxsw_sp_port->local_port;
|
|
|
|
|
u32 max_buff;
|
|
|
|
|
int err;
|
|
|
|
|
|
2019-09-16 18:04:20 +03:00
|
|
|
if (local_port == MLXSW_PORT_CPU_PORT) {
|
|
|
|
|
NL_SET_ERR_MSG_MOD(extack, "Changing CPU port's threshold is forbidden");
|
|
|
|
|
return -EINVAL;
|
|
|
|
|
}
|
|
|
|
|
|
mlxsw: spectrum_buffers: Use devlink pool indices throughout
Currently, mlxsw assumes that each ingress pool has its egress
counterpart, and that pool index for purposes of caching matches the
index with which the hardware should be configured. As we want to expose
the MC pool, both of these assumptions break.
Instead, maintain the pool index as long as possible. Unify ingress and
egress caches and use the pool index as cache index as well. Only
translate to FW pool numbering when actually packing the registers. This
simplifies things considerably, as the pool index is the only quantity
necessary to uniquely identify a pool, and the pool/direction split is
not necessary until firmware is talked to.
To support the mapping between pool indices and pool numbers and
directions, which is not neatly mathematical anymore, introduce a pool
descriptor table, indexed by pool index, to facilitate the translation.
Include the MC pool in the descriptor table as well, so that it can be
referenced from mlxsw_sp_sb_cms_egress.
Signed-off-by: Petr Machata <petrm@mellanox.com>
Reviewed-by: Jiri Pirko <jiri@mellanox.com>
Signed-off-by: Ido Schimmel <idosch@mellanox.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2018-09-20 09:21:25 +03:00
|
|
|
err = mlxsw_sp_sb_threshold_in(mlxsw_sp, pool_index,
|
2019-04-22 12:08:41 +00:00
|
|
|
threshold, &max_buff, extack);
|
2016-04-14 18:19:24 +02:00
|
|
|
if (err)
|
|
|
|
|
return err;
|
|
|
|
|
|
mlxsw: spectrum_buffers: Use devlink pool indices throughout
Currently, mlxsw assumes that each ingress pool has its egress
counterpart, and that pool index for purposes of caching matches the
index with which the hardware should be configured. As we want to expose
the MC pool, both of these assumptions break.
Instead, maintain the pool index as long as possible. Unify ingress and
egress caches and use the pool index as cache index as well. Only
translate to FW pool numbering when actually packing the registers. This
simplifies things considerably, as the pool index is the only quantity
necessary to uniquely identify a pool, and the pool/direction split is
not necessary until firmware is talked to.
To support the mapping between pool indices and pool numbers and
directions, which is not neatly mathematical anymore, introduce a pool
descriptor table, indexed by pool index, to facilitate the translation.
Include the MC pool in the descriptor table as well, so that it can be
referenced from mlxsw_sp_sb_cms_egress.
Signed-off-by: Petr Machata <petrm@mellanox.com>
Reviewed-by: Jiri Pirko <jiri@mellanox.com>
Signed-off-by: Ido Schimmel <idosch@mellanox.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2018-09-20 09:21:25 +03:00
|
|
|
return mlxsw_sp_sb_pm_write(mlxsw_sp, local_port, pool_index,
|
2016-04-14 18:19:24 +02:00
|
|
|
0, max_buff);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
int mlxsw_sp_sb_tc_pool_bind_get(struct mlxsw_core_port *mlxsw_core_port,
|
|
|
|
|
unsigned int sb_index, u16 tc_index,
|
|
|
|
|
enum devlink_sb_pool_type pool_type,
|
|
|
|
|
u16 *p_pool_index, u32 *p_threshold)
|
|
|
|
|
{
|
|
|
|
|
struct mlxsw_sp_port *mlxsw_sp_port =
|
|
|
|
|
mlxsw_core_port_driver_priv(mlxsw_core_port);
|
|
|
|
|
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
|
|
|
|
|
u8 local_port = mlxsw_sp_port->local_port;
|
|
|
|
|
u8 pg_buff = tc_index;
|
2016-09-19 08:29:26 +02:00
|
|
|
enum mlxsw_reg_sbxx_dir dir = (enum mlxsw_reg_sbxx_dir) pool_type;
|
2016-04-14 18:19:24 +02:00
|
|
|
struct mlxsw_sp_sb_cm *cm = mlxsw_sp_sb_cm_get(mlxsw_sp, local_port,
|
|
|
|
|
pg_buff, dir);
|
|
|
|
|
|
mlxsw: spectrum_buffers: Use devlink pool indices throughout
Currently, mlxsw assumes that each ingress pool has its egress
counterpart, and that pool index for purposes of caching matches the
index with which the hardware should be configured. As we want to expose
the MC pool, both of these assumptions break.
Instead, maintain the pool index as long as possible. Unify ingress and
egress caches and use the pool index as cache index as well. Only
translate to FW pool numbering when actually packing the registers. This
simplifies things considerably, as the pool index is the only quantity
necessary to uniquely identify a pool, and the pool/direction split is
not necessary until firmware is talked to.
To support the mapping between pool indices and pool numbers and
directions, which is not neatly mathematical anymore, introduce a pool
descriptor table, indexed by pool index, to facilitate the translation.
Include the MC pool in the descriptor table as well, so that it can be
referenced from mlxsw_sp_sb_cms_egress.
Signed-off-by: Petr Machata <petrm@mellanox.com>
Reviewed-by: Jiri Pirko <jiri@mellanox.com>
Signed-off-by: Ido Schimmel <idosch@mellanox.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2018-09-20 09:21:25 +03:00
|
|
|
*p_threshold = mlxsw_sp_sb_threshold_out(mlxsw_sp, cm->pool_index,
|
2016-04-14 18:19:24 +02:00
|
|
|
cm->max_buff);
|
mlxsw: spectrum_buffers: Use devlink pool indices throughout
Currently, mlxsw assumes that each ingress pool has its egress
counterpart, and that pool index for purposes of caching matches the
index with which the hardware should be configured. As we want to expose
the MC pool, both of these assumptions break.
Instead, maintain the pool index as long as possible. Unify ingress and
egress caches and use the pool index as cache index as well. Only
translate to FW pool numbering when actually packing the registers. This
simplifies things considerably, as the pool index is the only quantity
necessary to uniquely identify a pool, and the pool/direction split is
not necessary until firmware is talked to.
To support the mapping between pool indices and pool numbers and
directions, which is not neatly mathematical anymore, introduce a pool
descriptor table, indexed by pool index, to facilitate the translation.
Include the MC pool in the descriptor table as well, so that it can be
referenced from mlxsw_sp_sb_cms_egress.
Signed-off-by: Petr Machata <petrm@mellanox.com>
Reviewed-by: Jiri Pirko <jiri@mellanox.com>
Signed-off-by: Ido Schimmel <idosch@mellanox.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2018-09-20 09:21:25 +03:00
|
|
|
*p_pool_index = cm->pool_index;
|
2016-04-14 18:19:24 +02:00
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
int mlxsw_sp_sb_tc_pool_bind_set(struct mlxsw_core_port *mlxsw_core_port,
|
|
|
|
|
unsigned int sb_index, u16 tc_index,
|
|
|
|
|
enum devlink_sb_pool_type pool_type,
|
2019-04-22 12:08:41 +00:00
|
|
|
u16 pool_index, u32 threshold,
|
|
|
|
|
struct netlink_ext_ack *extack)
|
2016-04-14 18:19:24 +02:00
|
|
|
{
|
|
|
|
|
struct mlxsw_sp_port *mlxsw_sp_port =
|
|
|
|
|
mlxsw_core_port_driver_priv(mlxsw_core_port);
|
|
|
|
|
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
|
|
|
|
|
u8 local_port = mlxsw_sp_port->local_port;
|
2019-04-22 12:08:45 +00:00
|
|
|
const struct mlxsw_sp_sb_cm *cm;
|
2016-04-14 18:19:24 +02:00
|
|
|
u8 pg_buff = tc_index;
|
2016-09-19 08:29:26 +02:00
|
|
|
enum mlxsw_reg_sbxx_dir dir = (enum mlxsw_reg_sbxx_dir) pool_type;
|
2016-04-14 18:19:24 +02:00
|
|
|
u32 max_buff;
|
|
|
|
|
int err;
|
|
|
|
|
|
2019-09-16 18:04:20 +03:00
|
|
|
if (local_port == MLXSW_PORT_CPU_PORT) {
|
|
|
|
|
NL_SET_ERR_MSG_MOD(extack, "Changing CPU port's binding is forbidden");
|
|
|
|
|
return -EINVAL;
|
|
|
|
|
}
|
|
|
|
|
|
2019-04-22 12:08:41 +00:00
|
|
|
if (dir != mlxsw_sp->sb_vals->pool_dess[pool_index].dir) {
|
|
|
|
|
NL_SET_ERR_MSG_MOD(extack, "Binding egress TC to ingress pool and vice versa is forbidden");
|
2016-08-19 14:43:48 +02:00
|
|
|
return -EINVAL;
|
2019-04-22 12:08:45 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (dir == MLXSW_REG_SBXX_DIR_INGRESS)
|
|
|
|
|
cm = &mlxsw_sp->sb_vals->cms_ingress[tc_index];
|
|
|
|
|
else
|
|
|
|
|
cm = &mlxsw_sp->sb_vals->cms_egress[tc_index];
|
|
|
|
|
|
|
|
|
|
if (cm->freeze_pool && cm->pool_index != pool_index) {
|
|
|
|
|
NL_SET_ERR_MSG_MOD(extack, "Binding this TC to a different pool is forbidden");
|
|
|
|
|
return -EINVAL;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (cm->freeze_thresh && cm->max_buff != threshold) {
|
|
|
|
|
NL_SET_ERR_MSG_MOD(extack, "Changing this TC's threshold is forbidden");
|
|
|
|
|
return -EINVAL;
|
2019-04-22 12:08:41 +00:00
|
|
|
}
|
2016-08-19 14:43:48 +02:00
|
|
|
|
mlxsw: spectrum_buffers: Use devlink pool indices throughout
Currently, mlxsw assumes that each ingress pool has its egress
counterpart, and that pool index for purposes of caching matches the
index with which the hardware should be configured. As we want to expose
the MC pool, both of these assumptions break.
Instead, maintain the pool index as long as possible. Unify ingress and
egress caches and use the pool index as cache index as well. Only
translate to FW pool numbering when actually packing the registers. This
simplifies things considerably, as the pool index is the only quantity
necessary to uniquely identify a pool, and the pool/direction split is
not necessary until firmware is talked to.
To support the mapping between pool indices and pool numbers and
directions, which is not neatly mathematical anymore, introduce a pool
descriptor table, indexed by pool index, to facilitate the translation.
Include the MC pool in the descriptor table as well, so that it can be
referenced from mlxsw_sp_sb_cms_egress.
Signed-off-by: Petr Machata <petrm@mellanox.com>
Reviewed-by: Jiri Pirko <jiri@mellanox.com>
Signed-off-by: Ido Schimmel <idosch@mellanox.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2018-09-20 09:21:25 +03:00
|
|
|
err = mlxsw_sp_sb_threshold_in(mlxsw_sp, pool_index,
|
2019-04-22 12:08:41 +00:00
|
|
|
threshold, &max_buff, extack);
|
2016-04-14 18:19:24 +02:00
|
|
|
if (err)
|
|
|
|
|
return err;
|
|
|
|
|
|
mlxsw: spectrum_buffers: Use devlink pool indices throughout
Currently, mlxsw assumes that each ingress pool has its egress
counterpart, and that pool index for purposes of caching matches the
index with which the hardware should be configured. As we want to expose
the MC pool, both of these assumptions break.
Instead, maintain the pool index as long as possible. Unify ingress and
egress caches and use the pool index as cache index as well. Only
translate to FW pool numbering when actually packing the registers. This
simplifies things considerably, as the pool index is the only quantity
necessary to uniquely identify a pool, and the pool/direction split is
not necessary until firmware is talked to.
To support the mapping between pool indices and pool numbers and
directions, which is not neatly mathematical anymore, introduce a pool
descriptor table, indexed by pool index, to facilitate the translation.
Include the MC pool in the descriptor table as well, so that it can be
referenced from mlxsw_sp_sb_cms_egress.
Signed-off-by: Petr Machata <petrm@mellanox.com>
Reviewed-by: Jiri Pirko <jiri@mellanox.com>
Signed-off-by: Ido Schimmel <idosch@mellanox.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2018-09-20 09:21:25 +03:00
|
|
|
return mlxsw_sp_sb_cm_write(mlxsw_sp, local_port, pg_buff,
|
2018-09-20 09:21:29 +03:00
|
|
|
0, max_buff, false, pool_index);
|
2016-04-14 18:19:24 +02:00
|
|
|
}
|
2016-04-14 18:19:30 +02:00
|
|
|
|
|
|
|
|
#define MASKED_COUNT_MAX \
|
2018-09-20 09:21:26 +03:00
|
|
|
(MLXSW_REG_SBSR_REC_MAX_COUNT / \
|
|
|
|
|
(MLXSW_SP_SB_ING_TC_COUNT + MLXSW_SP_SB_EG_TC_COUNT))
|
2016-04-14 18:19:30 +02:00
|
|
|
|
|
|
|
|
struct mlxsw_sp_sb_sr_occ_query_cb_ctx {
|
|
|
|
|
u8 masked_count;
|
|
|
|
|
u8 local_port_1;
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
static void mlxsw_sp_sb_sr_occ_query_cb(struct mlxsw_core *mlxsw_core,
|
|
|
|
|
char *sbsr_pl, size_t sbsr_pl_len,
|
|
|
|
|
unsigned long cb_priv)
|
|
|
|
|
{
|
|
|
|
|
struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
|
|
|
|
|
struct mlxsw_sp_sb_sr_occ_query_cb_ctx cb_ctx;
|
|
|
|
|
u8 masked_count;
|
|
|
|
|
u8 local_port;
|
|
|
|
|
int rec_index = 0;
|
|
|
|
|
struct mlxsw_sp_sb_cm *cm;
|
|
|
|
|
int i;
|
|
|
|
|
|
|
|
|
|
memcpy(&cb_ctx, &cb_priv, sizeof(cb_ctx));
|
|
|
|
|
|
|
|
|
|
masked_count = 0;
|
|
|
|
|
for (local_port = cb_ctx.local_port_1;
|
2017-03-24 08:02:48 +01:00
|
|
|
local_port < mlxsw_core_max_ports(mlxsw_core); local_port++) {
|
2016-04-14 18:19:30 +02:00
|
|
|
if (!mlxsw_sp->ports[local_port])
|
|
|
|
|
continue;
|
2019-09-16 18:04:22 +03:00
|
|
|
if (local_port == MLXSW_PORT_CPU_PORT) {
|
|
|
|
|
/* Ingress quotas are not supported for the CPU port */
|
|
|
|
|
masked_count++;
|
|
|
|
|
continue;
|
|
|
|
|
}
|
2018-09-20 09:21:26 +03:00
|
|
|
for (i = 0; i < MLXSW_SP_SB_ING_TC_COUNT; i++) {
|
2016-04-14 18:19:30 +02:00
|
|
|
cm = mlxsw_sp_sb_cm_get(mlxsw_sp, local_port, i,
|
|
|
|
|
MLXSW_REG_SBXX_DIR_INGRESS);
|
|
|
|
|
mlxsw_reg_sbsr_rec_unpack(sbsr_pl, rec_index++,
|
|
|
|
|
&cm->occ.cur, &cm->occ.max);
|
|
|
|
|
}
|
|
|
|
|
if (++masked_count == cb_ctx.masked_count)
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
masked_count = 0;
|
|
|
|
|
for (local_port = cb_ctx.local_port_1;
|
2017-03-24 08:02:48 +01:00
|
|
|
local_port < mlxsw_core_max_ports(mlxsw_core); local_port++) {
|
2016-04-14 18:19:30 +02:00
|
|
|
if (!mlxsw_sp->ports[local_port])
|
|
|
|
|
continue;
|
2018-09-20 09:21:26 +03:00
|
|
|
for (i = 0; i < MLXSW_SP_SB_EG_TC_COUNT; i++) {
|
2016-04-14 18:19:30 +02:00
|
|
|
cm = mlxsw_sp_sb_cm_get(mlxsw_sp, local_port, i,
|
|
|
|
|
MLXSW_REG_SBXX_DIR_EGRESS);
|
|
|
|
|
mlxsw_reg_sbsr_rec_unpack(sbsr_pl, rec_index++,
|
|
|
|
|
&cm->occ.cur, &cm->occ.max);
|
|
|
|
|
}
|
|
|
|
|
if (++masked_count == cb_ctx.masked_count)
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
int mlxsw_sp_sb_occ_snapshot(struct mlxsw_core *mlxsw_core,
|
|
|
|
|
unsigned int sb_index)
|
|
|
|
|
{
|
|
|
|
|
struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
|
|
|
|
|
struct mlxsw_sp_sb_sr_occ_query_cb_ctx cb_ctx;
|
|
|
|
|
unsigned long cb_priv;
|
|
|
|
|
LIST_HEAD(bulk_list);
|
|
|
|
|
char *sbsr_pl;
|
|
|
|
|
u8 masked_count;
|
|
|
|
|
u8 local_port_1;
|
2019-09-16 18:04:22 +03:00
|
|
|
u8 local_port;
|
2016-04-14 18:19:30 +02:00
|
|
|
int i;
|
|
|
|
|
int err;
|
|
|
|
|
int err2;
|
|
|
|
|
|
|
|
|
|
sbsr_pl = kmalloc(MLXSW_REG_SBSR_LEN, GFP_KERNEL);
|
|
|
|
|
if (!sbsr_pl)
|
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
2019-09-16 18:04:22 +03:00
|
|
|
local_port = MLXSW_PORT_CPU_PORT;
|
2016-04-14 18:19:30 +02:00
|
|
|
next_batch:
|
|
|
|
|
local_port_1 = local_port;
|
|
|
|
|
masked_count = 0;
|
|
|
|
|
mlxsw_reg_sbsr_pack(sbsr_pl, false);
|
2018-09-20 09:21:26 +03:00
|
|
|
for (i = 0; i < MLXSW_SP_SB_ING_TC_COUNT; i++)
|
2016-04-14 18:19:30 +02:00
|
|
|
mlxsw_reg_sbsr_pg_buff_mask_set(sbsr_pl, i, 1);
|
2018-09-20 09:21:26 +03:00
|
|
|
for (i = 0; i < MLXSW_SP_SB_EG_TC_COUNT; i++)
|
2016-04-14 18:19:30 +02:00
|
|
|
mlxsw_reg_sbsr_tclass_mask_set(sbsr_pl, i, 1);
|
2017-03-24 08:02:48 +01:00
|
|
|
for (; local_port < mlxsw_core_max_ports(mlxsw_core); local_port++) {
|
2016-04-14 18:19:30 +02:00
|
|
|
if (!mlxsw_sp->ports[local_port])
|
|
|
|
|
continue;
|
2019-09-16 18:04:22 +03:00
|
|
|
if (local_port != MLXSW_PORT_CPU_PORT) {
|
|
|
|
|
/* Ingress quotas are not supported for the CPU port */
|
|
|
|
|
mlxsw_reg_sbsr_ingress_port_mask_set(sbsr_pl,
|
|
|
|
|
local_port, 1);
|
|
|
|
|
}
|
2016-04-14 18:19:30 +02:00
|
|
|
mlxsw_reg_sbsr_egress_port_mask_set(sbsr_pl, local_port, 1);
|
2019-02-20 19:32:16 +00:00
|
|
|
for (i = 0; i < mlxsw_sp->sb_vals->pool_count; i++) {
|
2016-04-14 18:19:30 +02:00
|
|
|
err = mlxsw_sp_sb_pm_occ_query(mlxsw_sp, local_port, i,
|
|
|
|
|
&bulk_list);
|
|
|
|
|
if (err)
|
|
|
|
|
goto out;
|
|
|
|
|
}
|
|
|
|
|
if (++masked_count == MASKED_COUNT_MAX)
|
|
|
|
|
goto do_query;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
do_query:
|
|
|
|
|
cb_ctx.masked_count = masked_count;
|
|
|
|
|
cb_ctx.local_port_1 = local_port_1;
|
|
|
|
|
memcpy(&cb_priv, &cb_ctx, sizeof(cb_ctx));
|
|
|
|
|
err = mlxsw_reg_trans_query(mlxsw_core, MLXSW_REG(sbsr), sbsr_pl,
|
|
|
|
|
&bulk_list, mlxsw_sp_sb_sr_occ_query_cb,
|
|
|
|
|
cb_priv);
|
|
|
|
|
if (err)
|
|
|
|
|
goto out;
|
2019-09-16 18:04:22 +03:00
|
|
|
if (local_port < mlxsw_core_max_ports(mlxsw_core)) {
|
|
|
|
|
local_port++;
|
2016-04-14 18:19:30 +02:00
|
|
|
goto next_batch;
|
2019-09-16 18:04:22 +03:00
|
|
|
}
|
2016-04-14 18:19:30 +02:00
|
|
|
|
|
|
|
|
out:
|
|
|
|
|
err2 = mlxsw_reg_trans_bulk_wait(&bulk_list);
|
|
|
|
|
if (!err)
|
|
|
|
|
err = err2;
|
|
|
|
|
kfree(sbsr_pl);
|
|
|
|
|
return err;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
int mlxsw_sp_sb_occ_max_clear(struct mlxsw_core *mlxsw_core,
|
|
|
|
|
unsigned int sb_index)
|
|
|
|
|
{
|
|
|
|
|
struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
|
|
|
|
|
LIST_HEAD(bulk_list);
|
|
|
|
|
char *sbsr_pl;
|
|
|
|
|
unsigned int masked_count;
|
2019-09-16 18:04:22 +03:00
|
|
|
u8 local_port;
|
2016-04-14 18:19:30 +02:00
|
|
|
int i;
|
|
|
|
|
int err;
|
|
|
|
|
int err2;
|
|
|
|
|
|
|
|
|
|
sbsr_pl = kmalloc(MLXSW_REG_SBSR_LEN, GFP_KERNEL);
|
|
|
|
|
if (!sbsr_pl)
|
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
2019-09-16 18:04:22 +03:00
|
|
|
local_port = MLXSW_PORT_CPU_PORT;
|
2016-04-14 18:19:30 +02:00
|
|
|
next_batch:
|
|
|
|
|
masked_count = 0;
|
|
|
|
|
mlxsw_reg_sbsr_pack(sbsr_pl, true);
|
2018-09-20 09:21:26 +03:00
|
|
|
for (i = 0; i < MLXSW_SP_SB_ING_TC_COUNT; i++)
|
2016-04-14 18:19:30 +02:00
|
|
|
mlxsw_reg_sbsr_pg_buff_mask_set(sbsr_pl, i, 1);
|
2018-09-20 09:21:26 +03:00
|
|
|
for (i = 0; i < MLXSW_SP_SB_EG_TC_COUNT; i++)
|
2016-04-14 18:19:30 +02:00
|
|
|
mlxsw_reg_sbsr_tclass_mask_set(sbsr_pl, i, 1);
|
2017-03-24 08:02:48 +01:00
|
|
|
for (; local_port < mlxsw_core_max_ports(mlxsw_core); local_port++) {
|
2016-04-14 18:19:30 +02:00
|
|
|
if (!mlxsw_sp->ports[local_port])
|
|
|
|
|
continue;
|
2019-09-16 18:04:22 +03:00
|
|
|
if (local_port != MLXSW_PORT_CPU_PORT) {
|
|
|
|
|
/* Ingress quotas are not supported for the CPU port */
|
|
|
|
|
mlxsw_reg_sbsr_ingress_port_mask_set(sbsr_pl,
|
|
|
|
|
local_port, 1);
|
|
|
|
|
}
|
2016-04-14 18:19:30 +02:00
|
|
|
mlxsw_reg_sbsr_egress_port_mask_set(sbsr_pl, local_port, 1);
|
2019-02-20 19:32:16 +00:00
|
|
|
for (i = 0; i < mlxsw_sp->sb_vals->pool_count; i++) {
|
2016-04-14 18:19:30 +02:00
|
|
|
err = mlxsw_sp_sb_pm_occ_clear(mlxsw_sp, local_port, i,
|
|
|
|
|
&bulk_list);
|
|
|
|
|
if (err)
|
|
|
|
|
goto out;
|
|
|
|
|
}
|
|
|
|
|
if (++masked_count == MASKED_COUNT_MAX)
|
|
|
|
|
goto do_query;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
do_query:
|
|
|
|
|
err = mlxsw_reg_trans_query(mlxsw_core, MLXSW_REG(sbsr), sbsr_pl,
|
|
|
|
|
&bulk_list, NULL, 0);
|
|
|
|
|
if (err)
|
|
|
|
|
goto out;
|
2019-09-16 18:04:22 +03:00
|
|
|
if (local_port < mlxsw_core_max_ports(mlxsw_core)) {
|
|
|
|
|
local_port++;
|
2016-04-14 18:19:30 +02:00
|
|
|
goto next_batch;
|
2019-09-16 18:04:22 +03:00
|
|
|
}
|
2016-04-14 18:19:30 +02:00
|
|
|
|
|
|
|
|
out:
|
|
|
|
|
err2 = mlxsw_reg_trans_bulk_wait(&bulk_list);
|
|
|
|
|
if (!err)
|
|
|
|
|
err = err2;
|
|
|
|
|
kfree(sbsr_pl);
|
|
|
|
|
return err;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
int mlxsw_sp_sb_occ_port_pool_get(struct mlxsw_core_port *mlxsw_core_port,
|
|
|
|
|
unsigned int sb_index, u16 pool_index,
|
|
|
|
|
u32 *p_cur, u32 *p_max)
|
|
|
|
|
{
|
|
|
|
|
struct mlxsw_sp_port *mlxsw_sp_port =
|
|
|
|
|
mlxsw_core_port_driver_priv(mlxsw_core_port);
|
|
|
|
|
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
|
|
|
|
|
u8 local_port = mlxsw_sp_port->local_port;
|
|
|
|
|
struct mlxsw_sp_sb_pm *pm = mlxsw_sp_sb_pm_get(mlxsw_sp, local_port,
|
mlxsw: spectrum_buffers: Use devlink pool indices throughout
Currently, mlxsw assumes that each ingress pool has its egress
counterpart, and that pool index for purposes of caching matches the
index with which the hardware should be configured. As we want to expose
the MC pool, both of these assumptions break.
Instead, maintain the pool index as long as possible. Unify ingress and
egress caches and use the pool index as cache index as well. Only
translate to FW pool numbering when actually packing the registers. This
simplifies things considerably, as the pool index is the only quantity
necessary to uniquely identify a pool, and the pool/direction split is
not necessary until firmware is talked to.
To support the mapping between pool indices and pool numbers and
directions, which is not neatly mathematical anymore, introduce a pool
descriptor table, indexed by pool index, to facilitate the translation.
Include the MC pool in the descriptor table as well, so that it can be
referenced from mlxsw_sp_sb_cms_egress.
Signed-off-by: Petr Machata <petrm@mellanox.com>
Reviewed-by: Jiri Pirko <jiri@mellanox.com>
Signed-off-by: Ido Schimmel <idosch@mellanox.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2018-09-20 09:21:25 +03:00
|
|
|
pool_index);
|
2016-04-14 18:19:30 +02:00
|
|
|
|
2017-03-24 08:02:51 +01:00
|
|
|
*p_cur = mlxsw_sp_cells_bytes(mlxsw_sp, pm->occ.cur);
|
|
|
|
|
*p_max = mlxsw_sp_cells_bytes(mlxsw_sp, pm->occ.max);
|
2016-04-14 18:19:30 +02:00
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
int mlxsw_sp_sb_occ_tc_port_bind_get(struct mlxsw_core_port *mlxsw_core_port,
|
|
|
|
|
unsigned int sb_index, u16 tc_index,
|
|
|
|
|
enum devlink_sb_pool_type pool_type,
|
|
|
|
|
u32 *p_cur, u32 *p_max)
|
|
|
|
|
{
|
|
|
|
|
struct mlxsw_sp_port *mlxsw_sp_port =
|
|
|
|
|
mlxsw_core_port_driver_priv(mlxsw_core_port);
|
|
|
|
|
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
|
|
|
|
|
u8 local_port = mlxsw_sp_port->local_port;
|
|
|
|
|
u8 pg_buff = tc_index;
|
2016-09-19 08:29:26 +02:00
|
|
|
enum mlxsw_reg_sbxx_dir dir = (enum mlxsw_reg_sbxx_dir) pool_type;
|
2016-04-14 18:19:30 +02:00
|
|
|
struct mlxsw_sp_sb_cm *cm = mlxsw_sp_sb_cm_get(mlxsw_sp, local_port,
|
|
|
|
|
pg_buff, dir);
|
|
|
|
|
|
2017-03-24 08:02:51 +01:00
|
|
|
*p_cur = mlxsw_sp_cells_bytes(mlxsw_sp, cm->occ.cur);
|
|
|
|
|
*p_max = mlxsw_sp_cells_bytes(mlxsw_sp, cm->occ.max);
|
2016-04-14 18:19:30 +02:00
|
|
|
return 0;
|
|
|
|
|
}
|