2018-10-10 12:44:21 +00:00
|
|
|
/* SPDX-License-Identifier: GPL-2.0
|
|
|
|
* Marvell OcteonTx2 RVU Admin Function driver
|
|
|
|
*
|
|
|
|
* Copyright (C) 2018 Marvell International Ltd.
|
|
|
|
*
|
|
|
|
* This program is free software; you can redistribute it and/or modify
|
|
|
|
* it under the terms of the GNU General Public License version 2 as
|
|
|
|
* published by the Free Software Foundation.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#ifndef RVU_H
|
|
|
|
#define RVU_H
|
|
|
|
|
2018-10-10 12:44:22 +00:00
|
|
|
#include "rvu_struct.h"
|
2018-10-16 11:27:11 +00:00
|
|
|
#include "common.h"
|
2018-10-10 12:44:25 +00:00
|
|
|
#include "mbox.h"
|
2018-10-10 12:44:22 +00:00
|
|
|
|
2018-10-10 12:44:21 +00:00
|
|
|
/* PCI device IDs */
|
|
|
|
#define PCI_DEVID_OCTEONTX2_RVU_AF 0xA065
|
|
|
|
|
|
|
|
/* PCI BAR nos */
|
|
|
|
#define PCI_AF_REG_BAR_NUM 0
|
|
|
|
#define PCI_PF_REG_BAR_NUM 2
|
|
|
|
#define PCI_MBOX_BAR_NUM 4
|
|
|
|
|
|
|
|
#define NAME_SIZE 32
|
|
|
|
|
2018-10-10 12:44:25 +00:00
|
|
|
/* PF_FUNC */
|
|
|
|
#define RVU_PFVF_PF_SHIFT 10
|
|
|
|
#define RVU_PFVF_PF_MASK 0x3F
|
|
|
|
#define RVU_PFVF_FUNC_SHIFT 0
|
|
|
|
#define RVU_PFVF_FUNC_MASK 0x3FF
|
|
|
|
|
|
|
|
struct rvu_work {
|
|
|
|
struct work_struct work;
|
|
|
|
struct rvu *rvu;
|
|
|
|
};
|
|
|
|
|
2018-10-10 12:44:23 +00:00
|
|
|
struct rsrc_bmap {
|
|
|
|
unsigned long *bmap; /* Pointer to resource bitmap */
|
|
|
|
u16 max; /* Max resource id or count */
|
|
|
|
};
|
|
|
|
|
2018-10-10 12:44:22 +00:00
|
|
|
struct rvu_block {
|
2018-10-16 11:27:11 +00:00
|
|
|
struct rsrc_bmap lf;
|
|
|
|
struct admin_queue *aq; /* NIX/NPA AQ */
|
2018-10-10 12:44:27 +00:00
|
|
|
u16 *fn_map; /* LF to pcifunc mapping */
|
2018-10-10 12:44:23 +00:00
|
|
|
bool multislot;
|
2018-10-10 12:44:22 +00:00
|
|
|
bool implemented;
|
2018-10-10 12:44:23 +00:00
|
|
|
u8 addr; /* RVU_BLOCK_ADDR_E */
|
2018-10-10 12:44:27 +00:00
|
|
|
u8 type; /* RVU_BLOCK_TYPE_E */
|
2018-10-10 12:44:23 +00:00
|
|
|
u8 lfshift;
|
|
|
|
u64 lookup_reg;
|
|
|
|
u64 pf_lfcnt_reg;
|
|
|
|
u64 vf_lfcnt_reg;
|
|
|
|
u64 lfcfg_reg;
|
|
|
|
u64 msixcfg_reg;
|
|
|
|
u64 lfreset_reg;
|
|
|
|
unsigned char name[NAME_SIZE];
|
2018-10-10 12:44:22 +00:00
|
|
|
};
|
|
|
|
|
2018-10-10 12:44:27 +00:00
|
|
|
/* Structure for per RVU func info ie PF/VF */
|
|
|
|
struct rvu_pfvf {
|
|
|
|
bool npalf; /* Only one NPALF per RVU_FUNC */
|
|
|
|
bool nixlf; /* Only one NIXLF per RVU_FUNC */
|
|
|
|
u16 sso;
|
|
|
|
u16 ssow;
|
|
|
|
u16 cptlfs;
|
|
|
|
u16 timlfs;
|
2018-10-10 12:44:29 +00:00
|
|
|
|
|
|
|
/* Block LF's MSIX vector info */
|
|
|
|
struct rsrc_bmap msix; /* Bitmap for MSIX vector alloc */
|
|
|
|
#define MSIX_BLKLF(blkaddr, lf) (((blkaddr) << 8) | ((lf) & 0xFF))
|
|
|
|
u16 *msix_lfmap; /* Vector to block LF mapping */
|
2018-10-16 11:27:12 +00:00
|
|
|
|
|
|
|
/* NPA contexts */
|
|
|
|
struct qmem *aura_ctx;
|
|
|
|
struct qmem *pool_ctx;
|
|
|
|
struct qmem *npa_qints_ctx;
|
2018-10-10 12:44:27 +00:00
|
|
|
};
|
|
|
|
|
2018-10-10 12:44:22 +00:00
|
|
|
struct rvu_hwinfo {
|
2018-10-10 12:44:23 +00:00
|
|
|
u8 total_pfs; /* MAX RVU PFs HW supports */
|
|
|
|
u16 total_vfs; /* Max RVU VFs HW supports */
|
|
|
|
u16 max_vfs_per_pf; /* Max VFs that can be attached to a PF */
|
|
|
|
|
2018-10-10 12:44:22 +00:00
|
|
|
struct rvu_block block[BLK_COUNT]; /* Block info */
|
|
|
|
};
|
|
|
|
|
2018-10-10 12:44:21 +00:00
|
|
|
struct rvu {
|
|
|
|
void __iomem *afreg_base;
|
|
|
|
void __iomem *pfreg_base;
|
|
|
|
struct pci_dev *pdev;
|
|
|
|
struct device *dev;
|
2018-10-10 12:44:22 +00:00
|
|
|
struct rvu_hwinfo *hw;
|
2018-10-10 12:44:27 +00:00
|
|
|
struct rvu_pfvf *pf;
|
|
|
|
struct rvu_pfvf *hwvf;
|
2018-10-10 12:44:28 +00:00
|
|
|
spinlock_t rsrc_lock; /* Serialize resource alloc/free */
|
2018-10-10 12:44:25 +00:00
|
|
|
|
|
|
|
/* Mbox */
|
|
|
|
struct otx2_mbox mbox;
|
|
|
|
struct rvu_work *mbox_wrk;
|
2018-10-16 11:27:09 +00:00
|
|
|
struct otx2_mbox mbox_up;
|
|
|
|
struct rvu_work *mbox_wrk_up;
|
2018-10-10 12:44:25 +00:00
|
|
|
struct workqueue_struct *mbox_wq;
|
|
|
|
|
|
|
|
/* MSI-X */
|
|
|
|
u16 num_vec;
|
|
|
|
char *irq_name;
|
|
|
|
bool *irq_allocated;
|
2018-10-10 12:44:30 +00:00
|
|
|
dma_addr_t msix_base_iova;
|
2018-10-10 12:44:32 +00:00
|
|
|
|
|
|
|
/* CGX */
|
|
|
|
#define PF_CGXMAP_BASE 1 /* PF 0 is reserved for RVU PF */
|
|
|
|
u8 cgx_mapped_pfs;
|
|
|
|
u8 cgx_cnt; /* available cgx ports */
|
|
|
|
u8 *pf2cgxlmac_map; /* pf to cgx_lmac map */
|
|
|
|
u16 *cgxlmac2pf_map; /* bitmap of mapped pfs for
|
|
|
|
* every cgx lmac port
|
|
|
|
*/
|
2018-10-16 11:27:09 +00:00
|
|
|
unsigned long pf_notify_bmap; /* Flags for PF notification */
|
2018-10-10 12:44:32 +00:00
|
|
|
void **cgx_idmap; /* cgx id to cgx data map table */
|
2018-10-10 12:44:34 +00:00
|
|
|
struct work_struct cgx_evh_work;
|
|
|
|
struct workqueue_struct *cgx_evh_wq;
|
|
|
|
spinlock_t cgx_evq_lock; /* cgx event queue lock */
|
|
|
|
struct list_head cgx_evq_head; /* cgx event queue head */
|
2018-10-10 12:44:21 +00:00
|
|
|
};
|
|
|
|
|
2018-10-10 12:44:22 +00:00
|
|
|
static inline void rvu_write64(struct rvu *rvu, u64 block, u64 offset, u64 val)
|
|
|
|
{
|
|
|
|
writeq(val, rvu->afreg_base + ((block << 28) | offset));
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline u64 rvu_read64(struct rvu *rvu, u64 block, u64 offset)
|
|
|
|
{
|
|
|
|
return readq(rvu->afreg_base + ((block << 28) | offset));
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void rvupf_write64(struct rvu *rvu, u64 offset, u64 val)
|
|
|
|
{
|
|
|
|
writeq(val, rvu->pfreg_base + offset);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline u64 rvupf_read64(struct rvu *rvu, u64 offset)
|
|
|
|
{
|
|
|
|
return readq(rvu->pfreg_base + offset);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Function Prototypes
|
|
|
|
* RVU
|
|
|
|
*/
|
|
|
|
|
2018-10-10 12:44:23 +00:00
|
|
|
int rvu_alloc_bitmap(struct rsrc_bmap *rsrc);
|
2018-10-10 12:44:28 +00:00
|
|
|
int rvu_alloc_rsrc(struct rsrc_bmap *rsrc);
|
|
|
|
void rvu_free_rsrc(struct rsrc_bmap *rsrc, int id);
|
|
|
|
int rvu_rsrc_free_count(struct rsrc_bmap *rsrc);
|
2018-10-10 12:44:27 +00:00
|
|
|
int rvu_get_pf(u16 pcifunc);
|
|
|
|
struct rvu_pfvf *rvu_get_pfvf(struct rvu *rvu, int pcifunc);
|
2018-10-10 12:44:29 +00:00
|
|
|
void rvu_get_pf_numvfs(struct rvu *rvu, int pf, int *numvfs, int *hwvf);
|
2018-10-10 12:44:28 +00:00
|
|
|
bool is_block_implemented(struct rvu_hwinfo *hw, int blkaddr);
|
2018-10-10 12:44:29 +00:00
|
|
|
int rvu_get_lf(struct rvu *rvu, struct rvu_block *block, u16 pcifunc, u16 slot);
|
2018-10-16 11:27:12 +00:00
|
|
|
int rvu_lf_reset(struct rvu *rvu, struct rvu_block *block, int lf);
|
2018-10-10 12:44:28 +00:00
|
|
|
int rvu_get_blkaddr(struct rvu *rvu, int blktype, u16 pcifunc);
|
|
|
|
int rvu_poll_reg(struct rvu *rvu, u64 block, u64 offset, u64 mask, bool zero);
|
2018-10-10 12:44:22 +00:00
|
|
|
|
2018-10-16 11:27:11 +00:00
|
|
|
/* NPA/NIX AQ APIs */
|
|
|
|
int rvu_aq_alloc(struct rvu *rvu, struct admin_queue **ad_queue,
|
|
|
|
int qsize, int inst_size, int res_size);
|
|
|
|
void rvu_aq_free(struct rvu *rvu, struct admin_queue *aq);
|
|
|
|
|
2018-10-10 12:44:32 +00:00
|
|
|
/* CGX APIs */
|
2018-10-16 11:27:06 +00:00
|
|
|
static inline bool is_pf_cgxmapped(struct rvu *rvu, u8 pf)
|
|
|
|
{
|
|
|
|
return (pf >= PF_CGXMAP_BASE && pf <= rvu->cgx_mapped_pfs);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void rvu_get_cgx_lmac_id(u8 map, u8 *cgx_id, u8 *lmac_id)
|
|
|
|
{
|
|
|
|
*cgx_id = (map >> 4) & 0xF;
|
|
|
|
*lmac_id = (map & 0xF);
|
|
|
|
}
|
|
|
|
|
2018-10-10 12:44:32 +00:00
|
|
|
int rvu_cgx_probe(struct rvu *rvu);
|
2018-10-10 12:44:34 +00:00
|
|
|
void rvu_cgx_wq_destroy(struct rvu *rvu);
|
2018-10-16 11:27:06 +00:00
|
|
|
int rvu_cgx_config_rxtx(struct rvu *rvu, u16 pcifunc, bool start);
|
|
|
|
int rvu_mbox_handler_CGX_START_RXTX(struct rvu *rvu, struct msg_req *req,
|
|
|
|
struct msg_rsp *rsp);
|
|
|
|
int rvu_mbox_handler_CGX_STOP_RXTX(struct rvu *rvu, struct msg_req *req,
|
|
|
|
struct msg_rsp *rsp);
|
2018-10-16 11:27:07 +00:00
|
|
|
int rvu_mbox_handler_CGX_STATS(struct rvu *rvu, struct msg_req *req,
|
|
|
|
struct cgx_stats_rsp *rsp);
|
2018-10-16 11:27:08 +00:00
|
|
|
int rvu_mbox_handler_CGX_MAC_ADDR_SET(struct rvu *rvu,
|
|
|
|
struct cgx_mac_addr_set_or_get *req,
|
|
|
|
struct cgx_mac_addr_set_or_get *rsp);
|
|
|
|
int rvu_mbox_handler_CGX_MAC_ADDR_GET(struct rvu *rvu,
|
|
|
|
struct cgx_mac_addr_set_or_get *req,
|
|
|
|
struct cgx_mac_addr_set_or_get *rsp);
|
|
|
|
int rvu_mbox_handler_CGX_PROMISC_ENABLE(struct rvu *rvu, struct msg_req *req,
|
|
|
|
struct msg_rsp *rsp);
|
|
|
|
int rvu_mbox_handler_CGX_PROMISC_DISABLE(struct rvu *rvu, struct msg_req *req,
|
|
|
|
struct msg_rsp *rsp);
|
2018-10-16 11:27:09 +00:00
|
|
|
int rvu_mbox_handler_CGX_START_LINKEVENTS(struct rvu *rvu, struct msg_req *req,
|
|
|
|
struct msg_rsp *rsp);
|
|
|
|
int rvu_mbox_handler_CGX_STOP_LINKEVENTS(struct rvu *rvu, struct msg_req *req,
|
|
|
|
struct msg_rsp *rsp);
|
|
|
|
int rvu_mbox_handler_CGX_GET_LINKINFO(struct rvu *rvu, struct msg_req *req,
|
|
|
|
struct cgx_link_info_msg *rsp);
|
2018-10-16 11:27:10 +00:00
|
|
|
int rvu_mbox_handler_CGX_INTLBK_ENABLE(struct rvu *rvu, struct msg_req *req,
|
|
|
|
struct msg_rsp *rsp);
|
|
|
|
int rvu_mbox_handler_CGX_INTLBK_DISABLE(struct rvu *rvu, struct msg_req *req,
|
|
|
|
struct msg_rsp *rsp);
|
2018-10-16 11:27:11 +00:00
|
|
|
|
|
|
|
/* NPA APIs */
|
|
|
|
int rvu_npa_init(struct rvu *rvu);
|
2018-10-16 11:27:12 +00:00
|
|
|
void rvu_npa_freemem(struct rvu *rvu);
|
2018-10-16 11:27:13 +00:00
|
|
|
int rvu_mbox_handler_NPA_AQ_ENQ(struct rvu *rvu,
|
|
|
|
struct npa_aq_enq_req *req,
|
|
|
|
struct npa_aq_enq_rsp *rsp);
|
2018-10-16 11:27:12 +00:00
|
|
|
int rvu_mbox_handler_NPA_LF_ALLOC(struct rvu *rvu,
|
|
|
|
struct npa_lf_alloc_req *req,
|
|
|
|
struct npa_lf_alloc_rsp *rsp);
|
|
|
|
int rvu_mbox_handler_NPA_LF_FREE(struct rvu *rvu, struct msg_req *req,
|
|
|
|
struct msg_rsp *rsp);
|
2018-10-10 12:44:21 +00:00
|
|
|
#endif /* RVU_H */
|