bnxt_en: Change IRQ assignment for RDMA driver.

In the current code, the range of MSIX vectors allocated for the RDMA
driver is disjoint from the network driver.  This creates a problem
for the new firmware ring reservation scheme.  The new scheme requires
the reserved completion rings/MSIX vectors to be in a contiguous
range.

Change the logic to allocate RDMA MSIX vectors to be contiguous with
the vectors used by bnxt_en on new firmware using the new scheme.
The new function bnxt_get_num_msix() calculates the exact number of
vectors needed by both drivers.

Signed-off-by: Michael Chan <michael.chan@broadcom.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
Michael Chan 2018-03-31 13:54:17 -04:00 committed by David S. Miller
parent 9899bb59ff
commit 08654eb213
3 changed files with 61 additions and 3 deletions

View File

@ -4720,6 +4720,21 @@ static int bnxt_hwrm_reserve_rings(struct bnxt *bp, int tx, int rx, int grp,
return bnxt_hwrm_reserve_vf_rings(bp, tx, rx, grp, cp, vnic);
}
static int bnxt_cp_rings_in_use(struct bnxt *bp)
{
int cp = bp->cp_nr_rings;
int ulp_msix, ulp_base;
ulp_msix = bnxt_get_ulp_msix_num(bp);
if (ulp_msix) {
ulp_base = bnxt_get_ulp_msix_base(bp);
cp += ulp_msix;
if ((ulp_base + ulp_msix) > cp)
cp = ulp_base + ulp_msix;
}
return cp;
}
static int bnxt_trim_rings(struct bnxt *bp, int *rx, int *tx, int max,
bool shared);
@ -5893,12 +5908,24 @@ void bnxt_set_max_func_irqs(struct bnxt *bp, unsigned int max_irqs)
bp->hw_resc.max_irqs = max_irqs;
}
static int bnxt_get_num_msix(struct bnxt *bp)
{
if (!(bp->flags & BNXT_FLAG_NEW_RM))
return bnxt_get_max_func_irqs(bp);
return bnxt_cp_rings_in_use(bp);
}
static int bnxt_init_msix(struct bnxt *bp)
{
int i, total_vecs, rc = 0, min = 1;
int i, total_vecs, max, rc = 0, min = 1;
struct msix_entry *msix_ent;
total_vecs = bnxt_get_max_func_irqs(bp);
total_vecs = bnxt_get_num_msix(bp);
max = bnxt_get_max_func_irqs(bp);
if (total_vecs > max)
total_vecs = max;
msix_ent = kcalloc(total_vecs, sizeof(struct msix_entry), GFP_KERNEL);
if (!msix_ent)
return -ENOMEM;

View File

@ -116,6 +116,9 @@ static int bnxt_req_msix_vecs(struct bnxt_en_dev *edev, int ulp_id,
if (!(bp->flags & BNXT_FLAG_USING_MSIX))
return -ENODEV;
if (edev->ulp_tbl[ulp_id].msix_requested)
return -EAGAIN;
max_cp_rings = bnxt_get_max_func_cp_rings(bp);
max_idx = min_t(int, bp->total_irqs, max_cp_rings);
avail_msix = max_idx - bp->cp_nr_rings;
@ -124,7 +127,11 @@ static int bnxt_req_msix_vecs(struct bnxt_en_dev *edev, int ulp_id,
if (avail_msix > num_msix)
avail_msix = num_msix;
idx = max_idx - avail_msix;
if (bp->flags & BNXT_FLAG_NEW_RM)
idx = bp->cp_nr_rings;
else
idx = max_idx - avail_msix;
edev->ulp_tbl[ulp_id].msix_base = idx;
for (i = 0; i < avail_msix; i++) {
ent[i].vector = bp->irq_tbl[idx + i].vector;
ent[i].ring_idx = idx + i;
@ -154,6 +161,27 @@ static int bnxt_free_msix_vecs(struct bnxt_en_dev *edev, int ulp_id)
return 0;
}
int bnxt_get_ulp_msix_num(struct bnxt *bp)
{
if (bnxt_ulp_registered(bp->edev, BNXT_ROCE_ULP)) {
struct bnxt_en_dev *edev = bp->edev;
return edev->ulp_tbl[BNXT_ROCE_ULP].msix_requested;
}
return 0;
}
int bnxt_get_ulp_msix_base(struct bnxt *bp)
{
if (bnxt_ulp_registered(bp->edev, BNXT_ROCE_ULP)) {
struct bnxt_en_dev *edev = bp->edev;
if (edev->ulp_tbl[BNXT_ROCE_ULP].msix_requested)
return edev->ulp_tbl[BNXT_ROCE_ULP].msix_base;
}
return 0;
}
void bnxt_subtract_ulp_resources(struct bnxt *bp, int ulp_id)
{
ASSERT_RTNL();

View File

@ -49,6 +49,7 @@ struct bnxt_ulp {
unsigned long *async_events_bmap;
u16 max_async_event_id;
u16 msix_requested;
u16 msix_base;
atomic_t ref_count;
};
@ -84,6 +85,8 @@ static inline bool bnxt_ulp_registered(struct bnxt_en_dev *edev, int ulp_id)
return false;
}
int bnxt_get_ulp_msix_num(struct bnxt *bp);
int bnxt_get_ulp_msix_base(struct bnxt *bp);
void bnxt_subtract_ulp_resources(struct bnxt *bp, int ulp_id);
void bnxt_ulp_stop(struct bnxt *bp);
void bnxt_ulp_start(struct bnxt *bp);