linux/drivers/net/ethernet/intel/ice/ice_main.c

916 lines
23 KiB
C
Raw Normal View History

// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2018, Intel Corporation. */
/* Intel(R) Ethernet Connection E800 Series Linux Driver */
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include "ice.h"
#define DRV_VERSION "ice-0.0.1-k"
#define DRV_SUMMARY "Intel(R) Ethernet Connection E800 Series Linux Driver"
static const char ice_drv_ver[] = DRV_VERSION;
static const char ice_driver_string[] = DRV_SUMMARY;
static const char ice_copyright[] = "Copyright (c) 2018, Intel Corporation.";
MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
MODULE_DESCRIPTION(DRV_SUMMARY);
MODULE_LICENSE("GPL");
MODULE_VERSION(DRV_VERSION);
static int debug = -1;
module_param(debug, int, 0644);
#ifndef CONFIG_DYNAMIC_DEBUG
MODULE_PARM_DESC(debug, "netif level (0=none,...,16=all), hw debug_mask (0x8XXXXXXX)");
#else
MODULE_PARM_DESC(debug, "netif level (0=none,...,16=all)");
#endif /* !CONFIG_DYNAMIC_DEBUG */
static struct workqueue_struct *ice_wq;
/**
* ice_search_res - Search the tracker for a block of resources
* @res: pointer to the resource
* @needed: size of the block needed
* @id: identifier to track owner
* Returns the base item index of the block, or -ENOMEM for error
*/
static int ice_search_res(struct ice_res_tracker *res, u16 needed, u16 id)
{
int start = res->search_hint;
int end = start;
id |= ICE_RES_VALID_BIT;
do {
/* skip already allocated entries */
if (res->list[end++] & ICE_RES_VALID_BIT) {
start = end;
if ((start + needed) > res->num_entries)
break;
}
if (end == (start + needed)) {
int i = start;
/* there was enough, so assign it to the requestor */
while (i != end)
res->list[i++] = id;
if (end == res->num_entries)
end = 0;
res->search_hint = end;
return start;
}
} while (1);
return -ENOMEM;
}
/**
* ice_get_res - get a block of resources
* @pf: board private structure
* @res: pointer to the resource
* @needed: size of the block needed
* @id: identifier to track owner
*
* Returns the base item index of the block, or -ENOMEM for error
* The search_hint trick and lack of advanced fit-finding only works
* because we're highly likely to have all the same sized requests.
* Linear search time and any fragmentation should be minimal.
*/
static int
ice_get_res(struct ice_pf *pf, struct ice_res_tracker *res, u16 needed, u16 id)
{
int ret;
if (!res || !pf)
return -EINVAL;
if (!needed || needed > res->num_entries || id >= ICE_RES_VALID_BIT) {
dev_err(&pf->pdev->dev,
"param err: needed=%d, num_entries = %d id=0x%04x\n",
needed, res->num_entries, id);
return -EINVAL;
}
/* search based on search_hint */
ret = ice_search_res(res, needed, id);
if (ret < 0) {
/* previous search failed. Reset search hint and try again */
res->search_hint = 0;
ret = ice_search_res(res, needed, id);
}
return ret;
}
/**
* ice_free_res - free a block of resources
* @res: pointer to the resource
* @index: starting index previously returned by ice_get_res
* @id: identifier to track owner
* Returns number of resources freed
*/
static int ice_free_res(struct ice_res_tracker *res, u16 index, u16 id)
{
int count = 0;
int i;
if (!res || index >= res->num_entries)
return -EINVAL;
id |= ICE_RES_VALID_BIT;
for (i = index; i < res->num_entries && res->list[i] == id; i++) {
res->list[i] = 0;
count++;
}
return count;
}
/**
* __ice_clean_ctrlq - helper function to clean controlq rings
* @pf: ptr to struct ice_pf
* @q_type: specific Control queue type
*/
static int __ice_clean_ctrlq(struct ice_pf *pf, enum ice_ctl_q q_type)
{
struct ice_rq_event_info event;
struct ice_hw *hw = &pf->hw;
struct ice_ctl_q_info *cq;
u16 pending, i = 0;
const char *qtype;
u32 oldval, val;
switch (q_type) {
case ICE_CTL_Q_ADMIN:
cq = &hw->adminq;
qtype = "Admin";
break;
default:
dev_warn(&pf->pdev->dev, "Unknown control queue type 0x%x\n",
q_type);
return 0;
}
/* check for error indications - PF_xx_AxQLEN register layout for
* FW/MBX/SB are identical so just use defines for PF_FW_AxQLEN.
*/
val = rd32(hw, cq->rq.len);
if (val & (PF_FW_ARQLEN_ARQVFE_M | PF_FW_ARQLEN_ARQOVFL_M |
PF_FW_ARQLEN_ARQCRIT_M)) {
oldval = val;
if (val & PF_FW_ARQLEN_ARQVFE_M)
dev_dbg(&pf->pdev->dev,
"%s Receive Queue VF Error detected\n", qtype);
if (val & PF_FW_ARQLEN_ARQOVFL_M) {
dev_dbg(&pf->pdev->dev,
"%s Receive Queue Overflow Error detected\n",
qtype);
}
if (val & PF_FW_ARQLEN_ARQCRIT_M)
dev_dbg(&pf->pdev->dev,
"%s Receive Queue Critical Error detected\n",
qtype);
val &= ~(PF_FW_ARQLEN_ARQVFE_M | PF_FW_ARQLEN_ARQOVFL_M |
PF_FW_ARQLEN_ARQCRIT_M);
if (oldval != val)
wr32(hw, cq->rq.len, val);
}
val = rd32(hw, cq->sq.len);
if (val & (PF_FW_ATQLEN_ATQVFE_M | PF_FW_ATQLEN_ATQOVFL_M |
PF_FW_ATQLEN_ATQCRIT_M)) {
oldval = val;
if (val & PF_FW_ATQLEN_ATQVFE_M)
dev_dbg(&pf->pdev->dev,
"%s Send Queue VF Error detected\n", qtype);
if (val & PF_FW_ATQLEN_ATQOVFL_M) {
dev_dbg(&pf->pdev->dev,
"%s Send Queue Overflow Error detected\n",
qtype);
}
if (val & PF_FW_ATQLEN_ATQCRIT_M)
dev_dbg(&pf->pdev->dev,
"%s Send Queue Critical Error detected\n",
qtype);
val &= ~(PF_FW_ATQLEN_ATQVFE_M | PF_FW_ATQLEN_ATQOVFL_M |
PF_FW_ATQLEN_ATQCRIT_M);
if (oldval != val)
wr32(hw, cq->sq.len, val);
}
event.buf_len = cq->rq_buf_size;
event.msg_buf = devm_kzalloc(&pf->pdev->dev, event.buf_len,
GFP_KERNEL);
if (!event.msg_buf)
return 0;
do {
enum ice_status ret;
ret = ice_clean_rq_elem(hw, cq, &event, &pending);
if (ret == ICE_ERR_AQ_NO_WORK)
break;
if (ret) {
dev_err(&pf->pdev->dev,
"%s Receive Queue event error %d\n", qtype,
ret);
break;
}
} while (pending && (i++ < ICE_DFLT_IRQ_WORK));
devm_kfree(&pf->pdev->dev, event.msg_buf);
return pending && (i == ICE_DFLT_IRQ_WORK);
}
/**
* ice_clean_adminq_subtask - clean the AdminQ rings
* @pf: board private structure
*/
static void ice_clean_adminq_subtask(struct ice_pf *pf)
{
struct ice_hw *hw = &pf->hw;
u32 val;
if (!test_bit(__ICE_ADMINQ_EVENT_PENDING, pf->state))
return;
if (__ice_clean_ctrlq(pf, ICE_CTL_Q_ADMIN))
return;
clear_bit(__ICE_ADMINQ_EVENT_PENDING, pf->state);
/* re-enable Admin queue interrupt causes */
val = rd32(hw, PFINT_FW_CTL);
wr32(hw, PFINT_FW_CTL, (val | PFINT_FW_CTL_CAUSE_ENA_M));
ice_flush(hw);
}
/**
* ice_service_task_schedule - schedule the service task to wake up
* @pf: board private structure
*
* If not already scheduled, this puts the task into the work queue.
*/
static void ice_service_task_schedule(struct ice_pf *pf)
{
if (!test_bit(__ICE_DOWN, pf->state) &&
!test_and_set_bit(__ICE_SERVICE_SCHED, pf->state))
queue_work(ice_wq, &pf->serv_task);
}
/**
* ice_service_task_complete - finish up the service task
* @pf: board private structure
*/
static void ice_service_task_complete(struct ice_pf *pf)
{
WARN_ON(!test_bit(__ICE_SERVICE_SCHED, pf->state));
/* force memory (pf->state) to sync before next service task */
smp_mb__before_atomic();
clear_bit(__ICE_SERVICE_SCHED, pf->state);
}
/**
* ice_service_timer - timer callback to schedule service task
* @t: pointer to timer_list
*/
static void ice_service_timer(struct timer_list *t)
{
struct ice_pf *pf = from_timer(pf, t, serv_tmr);
mod_timer(&pf->serv_tmr, round_jiffies(pf->serv_tmr_period + jiffies));
ice_service_task_schedule(pf);
}
/**
* ice_service_task - manage and run subtasks
* @work: pointer to work_struct contained by the PF struct
*/
static void ice_service_task(struct work_struct *work)
{
struct ice_pf *pf = container_of(work, struct ice_pf, serv_task);
unsigned long start_time = jiffies;
/* subtasks */
ice_clean_adminq_subtask(pf);
/* Clear __ICE_SERVICE_SCHED flag to allow scheduling next event */
ice_service_task_complete(pf);
/* If the tasks have taken longer than one service timer period
* or there is more work to be done, reset the service timer to
* schedule the service task now.
*/
if (time_after(jiffies, (start_time + pf->serv_tmr_period)) ||
test_bit(__ICE_ADMINQ_EVENT_PENDING, pf->state))
mod_timer(&pf->serv_tmr, jiffies);
}
/**
* ice_set_ctrlq_len - helper function to set controlq length
* @hw: pointer to the hw instance
*/
static void ice_set_ctrlq_len(struct ice_hw *hw)
{
hw->adminq.num_rq_entries = ICE_AQ_LEN;
hw->adminq.num_sq_entries = ICE_AQ_LEN;
hw->adminq.rq_buf_size = ICE_AQ_MAX_BUF_LEN;
hw->adminq.sq_buf_size = ICE_AQ_MAX_BUF_LEN;
}
/**
* ice_ena_misc_vector - enable the non-queue interrupts
* @pf: board private structure
*/
static void ice_ena_misc_vector(struct ice_pf *pf)
{
struct ice_hw *hw = &pf->hw;
u32 val;
/* clear things first */
wr32(hw, PFINT_OICR_ENA, 0); /* disable all */
rd32(hw, PFINT_OICR); /* read to clear */
val = (PFINT_OICR_HLP_RDY_M |
PFINT_OICR_CPM_RDY_M |
PFINT_OICR_ECC_ERR_M |
PFINT_OICR_MAL_DETECT_M |
PFINT_OICR_GRST_M |
PFINT_OICR_PCI_EXCEPTION_M |
PFINT_OICR_GPIO_M |
PFINT_OICR_STORM_DETECT_M |
PFINT_OICR_HMC_ERR_M);
wr32(hw, PFINT_OICR_ENA, val);
/* SW_ITR_IDX = 0, but don't change INTENA */
wr32(hw, GLINT_DYN_CTL(pf->oicr_idx),
GLINT_DYN_CTL_SW_ITR_INDX_M | GLINT_DYN_CTL_INTENA_MSK_M);
}
/**
* ice_misc_intr - misc interrupt handler
* @irq: interrupt number
* @data: pointer to a q_vector
*/
static irqreturn_t ice_misc_intr(int __always_unused irq, void *data)
{
struct ice_pf *pf = (struct ice_pf *)data;
struct ice_hw *hw = &pf->hw;
irqreturn_t ret = IRQ_NONE;
u32 oicr, ena_mask;
set_bit(__ICE_ADMINQ_EVENT_PENDING, pf->state);
oicr = rd32(hw, PFINT_OICR);
ena_mask = rd32(hw, PFINT_OICR_ENA);
if (!(oicr & PFINT_OICR_INTEVENT_M))
goto ena_intr;
if (oicr & PFINT_OICR_HMC_ERR_M) {
ena_mask &= ~PFINT_OICR_HMC_ERR_M;
dev_dbg(&pf->pdev->dev,
"HMC Error interrupt - info 0x%x, data 0x%x\n",
rd32(hw, PFHMC_ERRORINFO),
rd32(hw, PFHMC_ERRORDATA));
}
/* Report and mask off any remaining unexpected interrupts */
oicr &= ena_mask;
if (oicr) {
dev_dbg(&pf->pdev->dev, "unhandled interrupt oicr=0x%08x\n",
oicr);
/* If a critical error is pending there is no choice but to
* reset the device.
*/
if (oicr & (PFINT_OICR_PE_CRITERR_M |
PFINT_OICR_PCI_EXCEPTION_M |
PFINT_OICR_ECC_ERR_M))
set_bit(__ICE_PFR_REQ, pf->state);
ena_mask &= ~oicr;
}
ret = IRQ_HANDLED;
ena_intr:
/* re-enable interrupt causes that are not handled during this pass */
wr32(hw, PFINT_OICR_ENA, ena_mask);
if (!test_bit(__ICE_DOWN, pf->state)) {
ice_service_task_schedule(pf);
ice_irq_dynamic_ena(hw);
}
return ret;
}
/**
* ice_free_irq_msix_misc - Unroll misc vector setup
* @pf: board private structure
*/
static void ice_free_irq_msix_misc(struct ice_pf *pf)
{
/* disable OICR interrupt */
wr32(&pf->hw, PFINT_OICR_ENA, 0);
ice_flush(&pf->hw);
if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags) && pf->msix_entries) {
synchronize_irq(pf->msix_entries[pf->oicr_idx].vector);
devm_free_irq(&pf->pdev->dev,
pf->msix_entries[pf->oicr_idx].vector, pf);
}
ice_free_res(pf->irq_tracker, pf->oicr_idx, ICE_RES_MISC_VEC_ID);
}
/**
* ice_req_irq_msix_misc - Setup the misc vector to handle non queue events
* @pf: board private structure
*
* This sets up the handler for MSIX 0, which is used to manage the
* non-queue interrupts, e.g. AdminQ and errors. This is not used
* when in MSI or Legacy interrupt mode.
*/
static int ice_req_irq_msix_misc(struct ice_pf *pf)
{
struct ice_hw *hw = &pf->hw;
int oicr_idx, err = 0;
u8 itr_gran;
u32 val;
if (!pf->int_name[0])
snprintf(pf->int_name, sizeof(pf->int_name) - 1, "%s-%s:misc",
dev_driver_string(&pf->pdev->dev),
dev_name(&pf->pdev->dev));
/* reserve one vector in irq_tracker for misc interrupts */
oicr_idx = ice_get_res(pf, pf->irq_tracker, 1, ICE_RES_MISC_VEC_ID);
if (oicr_idx < 0)
return oicr_idx;
pf->oicr_idx = oicr_idx;
err = devm_request_irq(&pf->pdev->dev,
pf->msix_entries[pf->oicr_idx].vector,
ice_misc_intr, 0, pf->int_name, pf);
if (err) {
dev_err(&pf->pdev->dev,
"devm_request_irq for %s failed: %d\n",
pf->int_name, err);
ice_free_res(pf->irq_tracker, 1, ICE_RES_MISC_VEC_ID);
return err;
}
ice_ena_misc_vector(pf);
val = (pf->oicr_idx & PFINT_OICR_CTL_MSIX_INDX_M) |
(ICE_RX_ITR & PFINT_OICR_CTL_ITR_INDX_M) |
PFINT_OICR_CTL_CAUSE_ENA_M;
wr32(hw, PFINT_OICR_CTL, val);
/* This enables Admin queue Interrupt causes */
val = (pf->oicr_idx & PFINT_FW_CTL_MSIX_INDX_M) |
(ICE_RX_ITR & PFINT_FW_CTL_ITR_INDX_M) |
PFINT_FW_CTL_CAUSE_ENA_M;
wr32(hw, PFINT_FW_CTL, val);
itr_gran = hw->itr_gran_200;
wr32(hw, GLINT_ITR(ICE_RX_ITR, pf->oicr_idx),
ITR_TO_REG(ICE_ITR_8K, itr_gran));
ice_flush(hw);
ice_irq_dynamic_ena(hw);
return 0;
}
/**
* ice_determine_q_usage - Calculate queue distribution
* @pf: board private structure
*
* Return -ENOMEM if we don't get enough queues for all ports
*/
static void ice_determine_q_usage(struct ice_pf *pf)
{
u16 q_left_tx, q_left_rx;
q_left_tx = pf->hw.func_caps.common_cap.num_txq;
q_left_rx = pf->hw.func_caps.common_cap.num_rxq;
/* initial support for only 1 tx and 1 rx queue */
pf->num_lan_tx = 1;
pf->num_lan_rx = 1;
pf->q_left_tx = q_left_tx - pf->num_lan_tx;
pf->q_left_rx = q_left_rx - pf->num_lan_rx;
}
/**
* ice_deinit_pf - Unrolls initialziations done by ice_init_pf
* @pf: board private structure to initialize
*/
static void ice_deinit_pf(struct ice_pf *pf)
{
if (pf->serv_tmr.function)
del_timer_sync(&pf->serv_tmr);
if (pf->serv_task.func)
cancel_work_sync(&pf->serv_task);
mutex_destroy(&pf->sw_mutex);
mutex_destroy(&pf->avail_q_mutex);
}
/**
* ice_init_pf - Initialize general software structures (struct ice_pf)
* @pf: board private structure to initialize
*/
static void ice_init_pf(struct ice_pf *pf)
{
bitmap_zero(pf->flags, ICE_PF_FLAGS_NBITS);
set_bit(ICE_FLAG_MSIX_ENA, pf->flags);
mutex_init(&pf->sw_mutex);
mutex_init(&pf->avail_q_mutex);
/* Clear avail_[t|r]x_qs bitmaps (set all to avail) */
mutex_lock(&pf->avail_q_mutex);
bitmap_zero(pf->avail_txqs, ICE_MAX_TXQS);
bitmap_zero(pf->avail_rxqs, ICE_MAX_RXQS);
mutex_unlock(&pf->avail_q_mutex);
/* setup service timer and periodic service task */
timer_setup(&pf->serv_tmr, ice_service_timer, 0);
pf->serv_tmr_period = HZ;
INIT_WORK(&pf->serv_task, ice_service_task);
clear_bit(__ICE_SERVICE_SCHED, pf->state);
}
/**
* ice_ena_msix_range - Request a range of MSIX vectors from the OS
* @pf: board private structure
*
* compute the number of MSIX vectors required (v_budget) and request from
* the OS. Return the number of vectors reserved or negative on failure
*/
static int ice_ena_msix_range(struct ice_pf *pf)
{
int v_left, v_actual, v_budget = 0;
int needed, err, i;
v_left = pf->hw.func_caps.common_cap.num_msix_vectors;
/* reserve one vector for miscellaneous handler */
needed = 1;
v_budget += needed;
v_left -= needed;
/* reserve vectors for LAN traffic */
pf->num_lan_msix = min_t(int, num_online_cpus(), v_left);
v_budget += pf->num_lan_msix;
pf->msix_entries = devm_kcalloc(&pf->pdev->dev, v_budget,
sizeof(struct msix_entry), GFP_KERNEL);
if (!pf->msix_entries) {
err = -ENOMEM;
goto exit_err;
}
for (i = 0; i < v_budget; i++)
pf->msix_entries[i].entry = i;
/* actually reserve the vectors */
v_actual = pci_enable_msix_range(pf->pdev, pf->msix_entries,
ICE_MIN_MSIX, v_budget);
if (v_actual < 0) {
dev_err(&pf->pdev->dev, "unable to reserve MSI-X vectors\n");
err = v_actual;
goto msix_err;
}
if (v_actual < v_budget) {
dev_warn(&pf->pdev->dev,
"not enough vectors. requested = %d, obtained = %d\n",
v_budget, v_actual);
if (v_actual >= (pf->num_lan_msix + 1)) {
pf->num_avail_msix = v_actual - (pf->num_lan_msix + 1);
} else if (v_actual >= 2) {
pf->num_lan_msix = 1;
pf->num_avail_msix = v_actual - 2;
} else {
pci_disable_msix(pf->pdev);
err = -ERANGE;
goto msix_err;
}
}
return v_actual;
msix_err:
devm_kfree(&pf->pdev->dev, pf->msix_entries);
goto exit_err;
exit_err:
pf->num_lan_msix = 0;
clear_bit(ICE_FLAG_MSIX_ENA, pf->flags);
return err;
}
/**
* ice_dis_msix - Disable MSI-X interrupt setup in OS
* @pf: board private structure
*/
static void ice_dis_msix(struct ice_pf *pf)
{
pci_disable_msix(pf->pdev);
devm_kfree(&pf->pdev->dev, pf->msix_entries);
pf->msix_entries = NULL;
clear_bit(ICE_FLAG_MSIX_ENA, pf->flags);
}
/**
* ice_init_interrupt_scheme - Determine proper interrupt scheme
* @pf: board private structure to initialize
*/
static int ice_init_interrupt_scheme(struct ice_pf *pf)
{
int vectors = 0;
ssize_t size;
if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags))
vectors = ice_ena_msix_range(pf);
else
return -ENODEV;
if (vectors < 0)
return vectors;
/* set up vector assignment tracking */
size = sizeof(struct ice_res_tracker) + (sizeof(u16) * vectors);
pf->irq_tracker = devm_kzalloc(&pf->pdev->dev, size, GFP_KERNEL);
if (!pf->irq_tracker) {
ice_dis_msix(pf);
return -ENOMEM;
}
pf->irq_tracker->num_entries = vectors;
return 0;
}
/**
* ice_clear_interrupt_scheme - Undo things done by ice_init_interrupt_scheme
* @pf: board private structure
*/
static void ice_clear_interrupt_scheme(struct ice_pf *pf)
{
if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags))
ice_dis_msix(pf);
devm_kfree(&pf->pdev->dev, pf->irq_tracker);
pf->irq_tracker = NULL;
}
/**
* ice_probe - Device initialization routine
* @pdev: PCI device information struct
* @ent: entry in ice_pci_tbl
*
* Returns 0 on success, negative on failure
*/
static int ice_probe(struct pci_dev *pdev,
const struct pci_device_id __always_unused *ent)
{
struct ice_pf *pf;
struct ice_hw *hw;
int err;
/* this driver uses devres, see Documentation/driver-model/devres.txt */
err = pcim_enable_device(pdev);
if (err)
return err;
err = pcim_iomap_regions(pdev, BIT(ICE_BAR0), pci_name(pdev));
if (err) {
dev_err(&pdev->dev, "I/O map error %d\n", err);
return err;
}
pf = devm_kzalloc(&pdev->dev, sizeof(*pf), GFP_KERNEL);
if (!pf)
return -ENOMEM;
/* set up for high or low dma */
err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
if (err)
err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
if (err) {
dev_err(&pdev->dev, "DMA configuration failed: 0x%x\n", err);
return err;
}
pci_enable_pcie_error_reporting(pdev);
pci_set_master(pdev);
pf->pdev = pdev;
pci_set_drvdata(pdev, pf);
set_bit(__ICE_DOWN, pf->state);
hw = &pf->hw;
hw->hw_addr = pcim_iomap_table(pdev)[ICE_BAR0];
hw->back = pf;
hw->vendor_id = pdev->vendor;
hw->device_id = pdev->device;
pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id);
hw->subsystem_vendor_id = pdev->subsystem_vendor;
hw->subsystem_device_id = pdev->subsystem_device;
hw->bus.device = PCI_SLOT(pdev->devfn);
hw->bus.func = PCI_FUNC(pdev->devfn);
ice_set_ctrlq_len(hw);
pf->msg_enable = netif_msg_init(debug, ICE_DFLT_NETIF_M);
#ifndef CONFIG_DYNAMIC_DEBUG
if (debug < -1)
hw->debug_mask = debug;
#endif
err = ice_init_hw(hw);
if (err) {
dev_err(&pdev->dev, "ice_init_hw failed: %d\n", err);
err = -EIO;
goto err_exit_unroll;
}
dev_info(&pdev->dev, "firmware %d.%d.%05d api %d.%d\n",
hw->fw_maj_ver, hw->fw_min_ver, hw->fw_build,
hw->api_maj_ver, hw->api_min_ver);
ice_init_pf(pf);
ice_determine_q_usage(pf);
pf->num_alloc_vsi = min_t(u16, ICE_MAX_VSI_ALLOC,
hw->func_caps.guaranteed_num_vsi);
if (!pf->num_alloc_vsi) {
err = -EIO;
goto err_init_pf_unroll;
}
pf->vsi = devm_kcalloc(&pdev->dev, pf->num_alloc_vsi,
sizeof(struct ice_vsi *), GFP_KERNEL);
if (!pf->vsi) {
err = -ENOMEM;
goto err_init_pf_unroll;
}
err = ice_init_interrupt_scheme(pf);
if (err) {
dev_err(&pdev->dev,
"ice_init_interrupt_scheme failed: %d\n", err);
err = -EIO;
goto err_init_interrupt_unroll;
}
/* In case of MSIX we are going to setup the misc vector right here
* to handle admin queue events etc. In case of legacy and MSI
* the misc functionality and queue processing is combined in
* the same vector and that gets setup at open.
*/
if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags)) {
err = ice_req_irq_msix_misc(pf);
if (err) {
dev_err(&pdev->dev,
"setup of misc vector failed: %d\n", err);
goto err_init_interrupt_unroll;
}
}
/* create switch struct for the switch element created by FW on boot */
pf->first_sw = devm_kzalloc(&pdev->dev, sizeof(struct ice_sw),
GFP_KERNEL);
if (!pf->first_sw) {
err = -ENOMEM;
goto err_msix_misc_unroll;
}
pf->first_sw->bridge_mode = BRIDGE_MODE_VEB;
pf->first_sw->pf = pf;
/* record the sw_id available for later use */
pf->first_sw->sw_id = hw->port_info->sw_id;
return 0;
err_msix_misc_unroll:
ice_free_irq_msix_misc(pf);
err_init_interrupt_unroll:
ice_clear_interrupt_scheme(pf);
devm_kfree(&pdev->dev, pf->vsi);
err_init_pf_unroll:
ice_deinit_pf(pf);
ice_deinit_hw(hw);
err_exit_unroll:
pci_disable_pcie_error_reporting(pdev);
return err;
}
/**
* ice_remove - Device removal routine
* @pdev: PCI device information struct
*/
static void ice_remove(struct pci_dev *pdev)
{
struct ice_pf *pf = pci_get_drvdata(pdev);
if (!pf)
return;
set_bit(__ICE_DOWN, pf->state);
ice_free_irq_msix_misc(pf);
ice_clear_interrupt_scheme(pf);
ice_deinit_pf(pf);
ice_deinit_hw(&pf->hw);
pci_disable_pcie_error_reporting(pdev);
}
/* ice_pci_tbl - PCI Device ID Table
*
* Wildcard entries (PCI_ANY_ID) should come last
* Last entry must be all 0s
*
* { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
* Class, Class Mask, private data (not used) }
*/
static const struct pci_device_id ice_pci_tbl[] = {
{ PCI_VDEVICE(INTEL, ICE_DEV_ID_C810_BACKPLANE), 0 },
{ PCI_VDEVICE(INTEL, ICE_DEV_ID_C810_QSFP), 0 },
{ PCI_VDEVICE(INTEL, ICE_DEV_ID_C810_SFP), 0 },
{ PCI_VDEVICE(INTEL, ICE_DEV_ID_C810_10G_BASE_T), 0 },
{ PCI_VDEVICE(INTEL, ICE_DEV_ID_C810_SGMII), 0 },
/* required last entry */
{ 0, }
};
MODULE_DEVICE_TABLE(pci, ice_pci_tbl);
static struct pci_driver ice_driver = {
.name = KBUILD_MODNAME,
.id_table = ice_pci_tbl,
.probe = ice_probe,
.remove = ice_remove,
};
/**
* ice_module_init - Driver registration routine
*
* ice_module_init is the first routine called when the driver is
* loaded. All it does is register with the PCI subsystem.
*/
static int __init ice_module_init(void)
{
int status;
pr_info("%s - version %s\n", ice_driver_string, ice_drv_ver);
pr_info("%s\n", ice_copyright);
ice_wq = alloc_ordered_workqueue("%s", WQ_MEM_RECLAIM, KBUILD_MODNAME);
if (!ice_wq) {
pr_err("Failed to create workqueue\n");
return -ENOMEM;
}
status = pci_register_driver(&ice_driver);
if (status) {
pr_err("failed to register pci driver, err %d\n", status);
destroy_workqueue(ice_wq);
}
return status;
}
module_init(ice_module_init);
/**
* ice_module_exit - Driver exit cleanup routine
*
* ice_module_exit is called just before the driver is removed
* from memory.
*/
static void __exit ice_module_exit(void)
{
pci_unregister_driver(&ice_driver);
destroy_workqueue(ice_wq);
pr_info("module unloaded\n");
}
module_exit(ice_module_exit);