RDMA/irdma: Add irdma Kconfig/Makefile and remove i40iw

Add Kconfig and Makefile to build irdma driver.

Remove i40iw driver and add an alias in irdma.

Remove legacy exported symbols i40e_register_client
and i40e_unregister_client from i40e as they are no
longer used.

irdma is the replacement driver that supports X722.

Link: https://lore.kernel.org/r/20210602205138.889-16-shiraz.saleem@intel.com
Signed-off-by: Shiraz Saleem <shiraz.saleem@intel.com>
Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
This commit is contained in:
Shiraz Saleem 2021-06-02 15:51:37 -05:00 committed by Jason Gunthorpe
parent 48d6b3336a
commit fa0cf568fd
38 changed files with 41 additions and 28996 deletions

View File

@ -731,26 +731,6 @@ Description:
is the irq number of "sdma3", and M is irq number of "sdma4" in
the /proc/interrupts file.
sysfs interface for Intel(R) X722 iWARP i40iw driver
----------------------------------------------------
What: /sys/class/infiniband/i40iwX/hw_rev
What: /sys/class/infiniband/i40iwX/hca_type
What: /sys/class/infiniband/i40iwX/board_id
Date: Jan, 2016
KernelVersion: v4.10
Contact: linux-rdma@vger.kernel.org
Description:
=============== ==== ========================
hw_rev: (RO) Hardware revision number
hca_type: (RO) Show HCA type (I40IW)
board_id: (RO) I40IW board ID
=============== ==== ========================
sysfs interface for QLogic qedr NIC Driver
------------------------------------------

View File

@ -9365,14 +9365,6 @@ L: linux-pm@vger.kernel.org
S: Supported
F: drivers/cpufreq/intel_pstate.c
INTEL RDMA RNIC DRIVER
M: Faisal Latif <faisal.latif@intel.com>
M: Shiraz Saleem <shiraz.saleem@intel.com>
L: linux-rdma@vger.kernel.org
S: Supported
F: drivers/infiniband/hw/i40iw/
F: include/uapi/rdma/i40iw-abi.h
INTEL SCU DRIVERS
M: Mika Westerberg <mika.westerberg@linux.intel.com>
S: Maintained

View File

@ -82,7 +82,7 @@ source "drivers/infiniband/hw/mthca/Kconfig"
source "drivers/infiniband/hw/qib/Kconfig"
source "drivers/infiniband/hw/cxgb4/Kconfig"
source "drivers/infiniband/hw/efa/Kconfig"
source "drivers/infiniband/hw/i40iw/Kconfig"
source "drivers/infiniband/hw/irdma/Kconfig"
source "drivers/infiniband/hw/mlx4/Kconfig"
source "drivers/infiniband/hw/mlx5/Kconfig"
source "drivers/infiniband/hw/ocrdma/Kconfig"

View File

@ -3,7 +3,7 @@ obj-$(CONFIG_INFINIBAND_MTHCA) += mthca/
obj-$(CONFIG_INFINIBAND_QIB) += qib/
obj-$(CONFIG_INFINIBAND_CXGB4) += cxgb4/
obj-$(CONFIG_INFINIBAND_EFA) += efa/
obj-$(CONFIG_INFINIBAND_I40IW) += i40iw/
obj-$(CONFIG_INFINIBAND_IRDMA) += irdma/
obj-$(CONFIG_MLX4_INFINIBAND) += mlx4/
obj-$(CONFIG_MLX5_INFINIBAND) += mlx5/
obj-$(CONFIG_INFINIBAND_OCRDMA) += ocrdma/

View File

@ -1,9 +0,0 @@
# SPDX-License-Identifier: GPL-2.0-only
config INFINIBAND_I40IW
tristate "Intel(R) Ethernet X722 iWARP Driver"
depends on INET && I40E
depends on IPV6 || !IPV6
depends on PCI
select GENERIC_ALLOCATOR
help
Intel(R) Ethernet X722 iWARP Driver

View File

@ -1,9 +0,0 @@
# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_INFINIBAND_I40IW) += i40iw.o
i40iw-objs :=\
i40iw_cm.o i40iw_ctrl.o \
i40iw_hmc.o i40iw_hw.o i40iw_main.o \
i40iw_pble.o i40iw_puda.o i40iw_uk.o i40iw_utils.o \
i40iw_verbs.o i40iw_virtchnl.o i40iw_vf.o

View File

@ -1,602 +0,0 @@
/*******************************************************************************
*
* Copyright (c) 2015-2016 Intel Corporation. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenFabrics.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
*******************************************************************************/
#ifndef I40IW_IW_H
#define I40IW_IW_H
#include <linux/netdevice.h>
#include <linux/inetdevice.h>
#include <linux/spinlock.h>
#include <linux/kernel.h>
#include <linux/delay.h>
#include <linux/pci.h>
#include <linux/dma-mapping.h>
#include <linux/workqueue.h>
#include <linux/slab.h>
#include <linux/io.h>
#include <linux/crc32c.h>
#include <linux/net/intel/i40e_client.h>
#include <rdma/ib_smi.h>
#include <rdma/ib_verbs.h>
#include <rdma/ib_pack.h>
#include <rdma/rdma_cm.h>
#include <rdma/iw_cm.h>
#include <crypto/hash.h>
#include "i40iw_status.h"
#include "i40iw_osdep.h"
#include "i40iw_d.h"
#include "i40iw_hmc.h"
#include "i40iw_type.h"
#include "i40iw_p.h"
#include <rdma/i40iw-abi.h>
#include "i40iw_pble.h"
#include "i40iw_verbs.h"
#include "i40iw_cm.h"
#include "i40iw_user.h"
#include "i40iw_puda.h"
#define I40IW_FW_VER_DEFAULT 2
#define I40IW_HW_VERSION 2
#define I40IW_ARP_ADD 1
#define I40IW_ARP_DELETE 2
#define I40IW_ARP_RESOLVE 3
#define I40IW_MACIP_ADD 1
#define I40IW_MACIP_DELETE 2
#define IW_CCQ_SIZE (I40IW_CQP_SW_SQSIZE_2048 + 1)
#define IW_CEQ_SIZE 2048
#define IW_AEQ_SIZE 2048
#define RX_BUF_SIZE (1536 + 8)
#define IW_REG0_SIZE (4 * 1024)
#define IW_TX_TIMEOUT (6 * HZ)
#define IW_FIRST_QPN 1
#define IW_SW_CONTEXT_ALIGN 1024
#define MAX_DPC_ITERATIONS 128
#define I40IW_EVENT_TIMEOUT 100000
#define I40IW_VCHNL_EVENT_TIMEOUT 100000
#define I40IW_NO_VLAN 0xffff
#define I40IW_NO_QSET 0xffff
/* access to mcast filter list */
#define IW_ADD_MCAST false
#define IW_DEL_MCAST true
#define I40IW_DRV_OPT_ENABLE_MPA_VER_0 0x00000001
#define I40IW_DRV_OPT_DISABLE_MPA_CRC 0x00000002
#define I40IW_DRV_OPT_DISABLE_FIRST_WRITE 0x00000004
#define I40IW_DRV_OPT_DISABLE_INTF 0x00000008
#define I40IW_DRV_OPT_ENABLE_MSI 0x00000010
#define I40IW_DRV_OPT_DUAL_LOGICAL_PORT 0x00000020
#define I40IW_DRV_OPT_NO_INLINE_DATA 0x00000080
#define I40IW_DRV_OPT_DISABLE_INT_MOD 0x00000100
#define I40IW_DRV_OPT_DISABLE_VIRT_WQ 0x00000200
#define I40IW_DRV_OPT_ENABLE_PAU 0x00000400
#define I40IW_DRV_OPT_MCAST_LOGPORT_MAP 0x00000800
#define IW_HMC_OBJ_TYPE_NUM ARRAY_SIZE(iw_hmc_obj_types)
#define IW_CFG_FPM_QP_COUNT 32768
#define I40IW_MAX_PAGES_PER_FMR 512
#define I40IW_MIN_PAGES_PER_FMR 1
#define I40IW_CQP_COMPL_RQ_WQE_FLUSHED 2
#define I40IW_CQP_COMPL_SQ_WQE_FLUSHED 3
#define I40IW_CQP_COMPL_RQ_SQ_WQE_FLUSHED 4
struct i40iw_cqp_compl_info {
u32 op_ret_val;
u16 maj_err_code;
u16 min_err_code;
bool error;
u8 op_code;
};
#define i40iw_pr_err(fmt, args ...) pr_err("%s: "fmt, __func__, ## args)
#define i40iw_pr_info(fmt, args ...) pr_info("%s: " fmt, __func__, ## args)
#define i40iw_pr_warn(fmt, args ...) pr_warn("%s: " fmt, __func__, ## args)
struct i40iw_cqp_request {
struct cqp_commands_info info;
wait_queue_head_t waitq;
struct list_head list;
atomic_t refcount;
void (*callback_fcn)(struct i40iw_cqp_request*, u32);
void *param;
struct i40iw_cqp_compl_info compl_info;
bool waiting;
bool request_done;
bool dynamic;
};
struct i40iw_cqp {
struct i40iw_sc_cqp sc_cqp;
spinlock_t req_lock; /*cqp request list */
wait_queue_head_t waitq;
struct i40iw_dma_mem sq;
struct i40iw_dma_mem host_ctx;
u64 *scratch_array;
struct i40iw_cqp_request *cqp_requests;
struct list_head cqp_avail_reqs;
struct list_head cqp_pending_reqs;
};
struct i40iw_device;
struct i40iw_ccq {
struct i40iw_sc_cq sc_cq;
spinlock_t lock; /* ccq control */
wait_queue_head_t waitq;
struct i40iw_dma_mem mem_cq;
struct i40iw_dma_mem shadow_area;
};
struct i40iw_ceq {
struct i40iw_sc_ceq sc_ceq;
struct i40iw_dma_mem mem;
u32 irq;
u32 msix_idx;
struct i40iw_device *iwdev;
struct tasklet_struct dpc_tasklet;
};
struct i40iw_aeq {
struct i40iw_sc_aeq sc_aeq;
struct i40iw_dma_mem mem;
};
struct i40iw_arp_entry {
u32 ip_addr[4];
u8 mac_addr[ETH_ALEN];
};
enum init_completion_state {
INVALID_STATE = 0,
INITIAL_STATE,
CQP_CREATED,
HMC_OBJS_CREATED,
PBLE_CHUNK_MEM,
CCQ_CREATED,
AEQ_CREATED,
CEQ_CREATED,
ILQ_CREATED,
IEQ_CREATED,
IP_ADDR_REGISTERED,
RDMA_DEV_REGISTERED
};
struct i40iw_msix_vector {
u32 idx;
u32 irq;
u32 cpu_affinity;
u32 ceq_id;
cpumask_t mask;
};
struct l2params_work {
struct work_struct work;
struct i40iw_device *iwdev;
struct i40iw_l2params l2params;
};
#define I40IW_MSIX_TABLE_SIZE 65
struct virtchnl_work {
struct work_struct work;
union {
struct i40iw_cqp_request *cqp_request;
struct i40iw_virtchnl_work_info work_info;
};
};
struct i40e_qvlist_info;
struct i40iw_device {
struct i40iw_ib_device *iwibdev;
struct net_device *netdev;
wait_queue_head_t vchnl_waitq;
struct i40iw_sc_dev sc_dev;
struct i40iw_sc_vsi vsi;
struct i40iw_handler *hdl;
struct i40e_info *ldev;
struct i40e_client *client;
struct i40iw_hw hw;
struct i40iw_cm_core cm_core;
u8 *mem_resources;
unsigned long *allocated_qps;
unsigned long *allocated_cqs;
unsigned long *allocated_mrs;
unsigned long *allocated_pds;
unsigned long *allocated_arps;
struct i40iw_qp **qp_table;
bool msix_shared;
u32 msix_count;
struct i40iw_msix_vector *iw_msixtbl;
struct i40e_qvlist_info *iw_qvlist;
struct i40iw_hmc_pble_rsrc *pble_rsrc;
struct i40iw_arp_entry *arp_table;
struct i40iw_cqp cqp;
struct i40iw_ccq ccq;
u32 ceqs_count;
struct i40iw_ceq *ceqlist;
struct i40iw_aeq aeq;
u32 arp_table_size;
u32 next_arp_index;
spinlock_t resource_lock; /* hw resource access */
spinlock_t qptable_lock;
u32 vendor_id;
u32 vendor_part_id;
u32 of_device_registered;
u32 device_cap_flags;
unsigned long db_start;
u8 resource_profile;
u8 max_rdma_vfs;
u8 max_enabled_vfs;
u8 max_sge;
u8 iw_status;
u8 send_term_ok;
/* x710 specific */
struct mutex pbl_mutex;
struct tasklet_struct dpc_tasklet;
struct workqueue_struct *virtchnl_wq;
struct virtchnl_work virtchnl_w[I40IW_MAX_PE_ENABLED_VF_COUNT];
struct i40iw_dma_mem obj_mem;
struct i40iw_dma_mem obj_next;
u8 *hmc_info_mem;
u32 sd_type;
struct workqueue_struct *param_wq;
atomic_t params_busy;
enum init_completion_state init_state;
u16 mac_ip_table_idx;
atomic_t vchnl_msgs;
u32 max_mr;
u32 max_qp;
u32 max_cq;
u32 max_pd;
u32 next_qp;
u32 next_cq;
u32 next_pd;
u32 max_mr_size;
u32 max_qp_wr;
u32 max_cqe;
u32 mr_stagmask;
u32 mpa_version;
bool dcb;
bool closing;
bool reset;
u32 used_pds;
u32 used_cqs;
u32 used_mrs;
u32 used_qps;
wait_queue_head_t close_wq;
atomic64_t use_count;
};
struct i40iw_ib_device {
struct ib_device ibdev;
struct i40iw_device *iwdev;
};
struct i40iw_handler {
struct list_head list;
struct i40e_client *client;
struct i40iw_device device;
struct i40e_info ldev;
};
/**
* i40iw_fw_major_ver - get firmware major version
* @dev: iwarp device
**/
static inline u64 i40iw_fw_major_ver(struct i40iw_sc_dev *dev)
{
return RS_64(dev->feature_info[I40IW_FEATURE_FW_INFO],
I40IW_FW_VER_MAJOR);
}
/**
* i40iw_fw_minor_ver - get firmware minor version
* @dev: iwarp device
**/
static inline u64 i40iw_fw_minor_ver(struct i40iw_sc_dev *dev)
{
return RS_64(dev->feature_info[I40IW_FEATURE_FW_INFO],
I40IW_FW_VER_MINOR);
}
/**
* to_iwdev - get device
* @ibdev: ib device
**/
static inline struct i40iw_device *to_iwdev(struct ib_device *ibdev)
{
return container_of(ibdev, struct i40iw_ib_device, ibdev)->iwdev;
}
/**
* to_ucontext - get user context
* @ibucontext: ib user context
**/
static inline struct i40iw_ucontext *to_ucontext(struct ib_ucontext *ibucontext)
{
return container_of(ibucontext, struct i40iw_ucontext, ibucontext);
}
/**
* to_iwpd - get protection domain
* @ibpd: ib pd
**/
static inline struct i40iw_pd *to_iwpd(struct ib_pd *ibpd)
{
return container_of(ibpd, struct i40iw_pd, ibpd);
}
/**
* to_iwmr - get device memory region
* @ibdev: ib memory region
**/
static inline struct i40iw_mr *to_iwmr(struct ib_mr *ibmr)
{
return container_of(ibmr, struct i40iw_mr, ibmr);
}
/**
* to_iwmw - get device memory window
* @ibmw: ib memory window
**/
static inline struct i40iw_mr *to_iwmw(struct ib_mw *ibmw)
{
return container_of(ibmw, struct i40iw_mr, ibmw);
}
/**
* to_iwcq - get completion queue
* @ibcq: ib cqdevice
**/
static inline struct i40iw_cq *to_iwcq(struct ib_cq *ibcq)
{
return container_of(ibcq, struct i40iw_cq, ibcq);
}
/**
* to_iwqp - get device qp
* @ibqp: ib qp
**/
static inline struct i40iw_qp *to_iwqp(struct ib_qp *ibqp)
{
return container_of(ibqp, struct i40iw_qp, ibqp);
}
/* i40iw.c */
void i40iw_qp_add_ref(struct ib_qp *ibqp);
void i40iw_qp_rem_ref(struct ib_qp *ibqp);
struct ib_qp *i40iw_get_qp(struct ib_device *, int);
void i40iw_flush_wqes(struct i40iw_device *iwdev,
struct i40iw_qp *qp);
void i40iw_manage_arp_cache(struct i40iw_device *iwdev,
unsigned char *mac_addr,
u32 *ip_addr,
bool ipv4,
u32 action);
int i40iw_manage_apbvt(struct i40iw_device *iwdev,
u16 accel_local_port,
bool add_port);
struct i40iw_cqp_request *i40iw_get_cqp_request(struct i40iw_cqp *cqp, bool wait);
void i40iw_free_cqp_request(struct i40iw_cqp *cqp, struct i40iw_cqp_request *cqp_request);
void i40iw_put_cqp_request(struct i40iw_cqp *cqp, struct i40iw_cqp_request *cqp_request);
/**
* i40iw_alloc_resource - allocate a resource
* @iwdev: device pointer
* @resource_array: resource bit array:
* @max_resources: maximum resource number
* @req_resources_num: Allocated resource number
* @next: next free id
**/
static inline int i40iw_alloc_resource(struct i40iw_device *iwdev,
unsigned long *resource_array,
u32 max_resources,
u32 *req_resource_num,
u32 *next)
{
u32 resource_num;
unsigned long flags;
spin_lock_irqsave(&iwdev->resource_lock, flags);
resource_num = find_next_zero_bit(resource_array, max_resources, *next);
if (resource_num >= max_resources) {
resource_num = find_first_zero_bit(resource_array, max_resources);
if (resource_num >= max_resources) {
spin_unlock_irqrestore(&iwdev->resource_lock, flags);
return -EOVERFLOW;
}
}
set_bit(resource_num, resource_array);
*next = resource_num + 1;
if (*next == max_resources)
*next = 0;
*req_resource_num = resource_num;
spin_unlock_irqrestore(&iwdev->resource_lock, flags);
return 0;
}
/**
* i40iw_is_resource_allocated - detrmine if resource is
* allocated
* @iwdev: device pointer
* @resource_array: resource array for the resource_num
* @resource_num: resource number to check
**/
static inline bool i40iw_is_resource_allocated(struct i40iw_device *iwdev,
unsigned long *resource_array,
u32 resource_num)
{
bool bit_is_set;
unsigned long flags;
spin_lock_irqsave(&iwdev->resource_lock, flags);
bit_is_set = test_bit(resource_num, resource_array);
spin_unlock_irqrestore(&iwdev->resource_lock, flags);
return bit_is_set;
}
/**
* i40iw_free_resource - free a resource
* @iwdev: device pointer
* @resource_array: resource array for the resource_num
* @resource_num: resource number to free
**/
static inline void i40iw_free_resource(struct i40iw_device *iwdev,
unsigned long *resource_array,
u32 resource_num)
{
unsigned long flags;
spin_lock_irqsave(&iwdev->resource_lock, flags);
clear_bit(resource_num, resource_array);
spin_unlock_irqrestore(&iwdev->resource_lock, flags);
}
struct i40iw_handler *i40iw_find_netdev(struct net_device *netdev);
/**
* iw_init_resources -
*/
u32 i40iw_initialize_hw_resources(struct i40iw_device *iwdev);
int i40iw_register_rdma_device(struct i40iw_device *iwdev);
void i40iw_port_ibevent(struct i40iw_device *iwdev);
void i40iw_cm_disconn(struct i40iw_qp *iwqp);
void i40iw_cm_disconn_worker(void *);
int mini_cm_recv_pkt(struct i40iw_cm_core *, struct i40iw_device *,
struct sk_buff *);
enum i40iw_status_code i40iw_handle_cqp_op(struct i40iw_device *iwdev,
struct i40iw_cqp_request *cqp_request);
enum i40iw_status_code i40iw_add_mac_addr(struct i40iw_device *iwdev,
u8 *mac_addr, u8 *mac_index);
int i40iw_modify_qp(struct ib_qp *, struct ib_qp_attr *, int, struct ib_udata *);
void i40iw_cq_wq_destroy(struct i40iw_device *iwdev, struct i40iw_sc_cq *cq);
void i40iw_cleanup_pending_cqp_op(struct i40iw_device *iwdev);
void i40iw_rem_pdusecount(struct i40iw_pd *iwpd, struct i40iw_device *iwdev);
void i40iw_add_pdusecount(struct i40iw_pd *iwpd);
void i40iw_rem_devusecount(struct i40iw_device *iwdev);
void i40iw_add_devusecount(struct i40iw_device *iwdev);
void i40iw_hw_modify_qp(struct i40iw_device *iwdev, struct i40iw_qp *iwqp,
struct i40iw_modify_qp_info *info, bool wait);
void i40iw_qp_suspend_resume(struct i40iw_sc_dev *dev,
struct i40iw_sc_qp *qp,
bool suspend);
enum i40iw_status_code i40iw_manage_qhash(struct i40iw_device *iwdev,
struct i40iw_cm_info *cminfo,
enum i40iw_quad_entry_type etype,
enum i40iw_quad_hash_manage_type mtype,
void *cmnode,
bool wait);
void i40iw_receive_ilq(struct i40iw_sc_vsi *vsi, struct i40iw_puda_buf *rbuf);
void i40iw_free_sqbuf(struct i40iw_sc_vsi *vsi, void *bufp);
void i40iw_free_qp_resources(struct i40iw_qp *iwqp);
enum i40iw_status_code i40iw_obj_aligned_mem(struct i40iw_device *iwdev,
struct i40iw_dma_mem *memptr,
u32 size, u32 mask);
void i40iw_request_reset(struct i40iw_device *iwdev);
void i40iw_destroy_rdma_device(struct i40iw_ib_device *iwibdev);
int i40iw_setup_cm_core(struct i40iw_device *iwdev);
void i40iw_cleanup_cm_core(struct i40iw_cm_core *cm_core);
void i40iw_process_ceq(struct i40iw_device *, struct i40iw_ceq *iwceq);
void i40iw_process_aeq(struct i40iw_device *);
void i40iw_next_iw_state(struct i40iw_qp *iwqp,
u8 state, u8 del_hash,
u8 term, u8 term_len);
int i40iw_send_syn(struct i40iw_cm_node *cm_node, u32 sendack);
int i40iw_send_reset(struct i40iw_cm_node *cm_node);
struct i40iw_cm_node *i40iw_find_node(struct i40iw_cm_core *cm_core,
u16 rem_port,
u32 *rem_addr,
u16 loc_port,
u32 *loc_addr,
bool add_refcnt,
bool accelerated_list);
enum i40iw_status_code i40iw_hw_flush_wqes(struct i40iw_device *iwdev,
struct i40iw_sc_qp *qp,
struct i40iw_qp_flush_info *info,
bool wait);
void i40iw_gen_ae(struct i40iw_device *iwdev,
struct i40iw_sc_qp *qp,
struct i40iw_gen_ae_info *info,
bool wait);
void i40iw_copy_ip_ntohl(u32 *dst, __be32 *src);
struct ib_mr *i40iw_reg_phys_mr(struct ib_pd *ib_pd,
u64 addr,
u64 size,
int acc,
u64 *iova_start);
int i40iw_inetaddr_event(struct notifier_block *notifier,
unsigned long event,
void *ptr);
int i40iw_inet6addr_event(struct notifier_block *notifier,
unsigned long event,
void *ptr);
int i40iw_net_event(struct notifier_block *notifier,
unsigned long event,
void *ptr);
int i40iw_netdevice_event(struct notifier_block *notifier,
unsigned long event,
void *ptr);
#endif

File diff suppressed because it is too large Load Diff

View File

@ -1,462 +0,0 @@
/*******************************************************************************
*
* Copyright (c) 2015-2016 Intel Corporation. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenFabrics.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
*******************************************************************************/
#ifndef I40IW_CM_H
#define I40IW_CM_H
#define QUEUE_EVENTS
#define I40IW_MANAGE_APBVT_DEL 0
#define I40IW_MANAGE_APBVT_ADD 1
#define I40IW_MPA_REQUEST_ACCEPT 1
#define I40IW_MPA_REQUEST_REJECT 2
/* IETF MPA -- defines, enums, structs */
#define IEFT_MPA_KEY_REQ "MPA ID Req Frame"
#define IEFT_MPA_KEY_REP "MPA ID Rep Frame"
#define IETF_MPA_KEY_SIZE 16
#define IETF_MPA_VERSION 1
#define IETF_MAX_PRIV_DATA_LEN 512
#define IETF_MPA_FRAME_SIZE 20
#define IETF_RTR_MSG_SIZE 4
#define IETF_MPA_V2_FLAG 0x10
#define SNDMARKER_SEQNMASK 0x000001FF
#define I40IW_MAX_IETF_SIZE 32
/* IETF RTR MSG Fields */
#define IETF_PEER_TO_PEER 0x8000
#define IETF_FLPDU_ZERO_LEN 0x4000
#define IETF_RDMA0_WRITE 0x8000
#define IETF_RDMA0_READ 0x4000
#define IETF_NO_IRD_ORD 0x3FFF
/* HW-supported IRD sizes*/
#define I40IW_HW_IRD_SETTING_2 2
#define I40IW_HW_IRD_SETTING_4 4
#define I40IW_HW_IRD_SETTING_8 8
#define I40IW_HW_IRD_SETTING_16 16
#define I40IW_HW_IRD_SETTING_32 32
#define I40IW_HW_IRD_SETTING_64 64
#define MAX_PORTS 65536
#define I40IW_VLAN_PRIO_SHIFT 13
enum ietf_mpa_flags {
IETF_MPA_FLAGS_MARKERS = 0x80, /* receive Markers */
IETF_MPA_FLAGS_CRC = 0x40, /* receive Markers */
IETF_MPA_FLAGS_REJECT = 0x20, /* Reject */
};
struct ietf_mpa_v1 {
u8 key[IETF_MPA_KEY_SIZE];
u8 flags;
u8 rev;
__be16 priv_data_len;
u8 priv_data[];
};
#define ietf_mpa_req_resp_frame ietf_mpa_frame
struct ietf_rtr_msg {
__be16 ctrl_ird;
__be16 ctrl_ord;
};
struct ietf_mpa_v2 {
u8 key[IETF_MPA_KEY_SIZE];
u8 flags;
u8 rev;
__be16 priv_data_len;
struct ietf_rtr_msg rtr_msg;
u8 priv_data[];
};
struct i40iw_cm_node;
enum i40iw_timer_type {
I40IW_TIMER_TYPE_SEND,
I40IW_TIMER_TYPE_RECV,
I40IW_TIMER_NODE_CLEANUP,
I40IW_TIMER_TYPE_CLOSE,
};
#define I40IW_PASSIVE_STATE_INDICATED 0
#define I40IW_DO_NOT_SEND_RESET_EVENT 1
#define I40IW_SEND_RESET_EVENT 2
#define MAX_I40IW_IFS 4
#define SET_ACK 0x1
#define SET_SYN 0x2
#define SET_FIN 0x4
#define SET_RST 0x8
#define TCP_OPTIONS_PADDING 3
struct option_base {
u8 optionnum;
u8 length;
};
enum option_numbers {
OPTION_NUMBER_END,
OPTION_NUMBER_NONE,
OPTION_NUMBER_MSS,
OPTION_NUMBER_WINDOW_SCALE,
OPTION_NUMBER_SACK_PERM,
OPTION_NUMBER_SACK,
OPTION_NUMBER_WRITE0 = 0xbc
};
struct option_mss {
u8 optionnum;
u8 length;
__be16 mss;
};
struct option_windowscale {
u8 optionnum;
u8 length;
u8 shiftcount;
};
union all_known_options {
char as_end;
struct option_base as_base;
struct option_mss as_mss;
struct option_windowscale as_windowscale;
};
struct i40iw_timer_entry {
struct list_head list;
unsigned long timetosend; /* jiffies */
struct i40iw_puda_buf *sqbuf;
u32 type;
u32 retrycount;
u32 retranscount;
u32 context;
u32 send_retrans;
int close_when_complete;
};
#define I40IW_DEFAULT_RETRYS 64
#define I40IW_DEFAULT_RETRANS 8
#define I40IW_DEFAULT_TTL 0x40
#define I40IW_DEFAULT_RTT_VAR 0x6
#define I40IW_DEFAULT_SS_THRESH 0x3FFFFFFF
#define I40IW_DEFAULT_REXMIT_THRESH 8
#define I40IW_RETRY_TIMEOUT HZ
#define I40IW_SHORT_TIME 10
#define I40IW_LONG_TIME (2 * HZ)
#define I40IW_MAX_TIMEOUT ((unsigned long)(12 * HZ))
#define I40IW_CM_HASHTABLE_SIZE 1024
#define I40IW_CM_TCP_TIMER_INTERVAL 3000
#define I40IW_CM_DEFAULT_MTU 1540
#define I40IW_CM_DEFAULT_FRAME_CNT 10
#define I40IW_CM_THREAD_STACK_SIZE 256
#define I40IW_CM_DEFAULT_RCV_WND 64240
#define I40IW_CM_DEFAULT_RCV_WND_SCALED 0x3fffc
#define I40IW_CM_DEFAULT_RCV_WND_SCALE 2
#define I40IW_CM_DEFAULT_FREE_PKTS 0x000A
#define I40IW_CM_FREE_PKT_LO_WATERMARK 2
#define I40IW_CM_DEFAULT_MSS 536
#define I40IW_CM_DEF_SEQ 0x159bf75f
#define I40IW_CM_DEF_LOCAL_ID 0x3b47
#define I40IW_CM_DEF_SEQ2 0x18ed5740
#define I40IW_CM_DEF_LOCAL_ID2 0xb807
#define MAX_CM_BUFFER (I40IW_MAX_IETF_SIZE + IETF_MAX_PRIV_DATA_LEN)
typedef u32 i40iw_addr_t;
#define i40iw_cm_tsa_context i40iw_qp_context
struct i40iw_qp;
/* cm node transition states */
enum i40iw_cm_node_state {
I40IW_CM_STATE_UNKNOWN,
I40IW_CM_STATE_INITED,
I40IW_CM_STATE_LISTENING,
I40IW_CM_STATE_SYN_RCVD,
I40IW_CM_STATE_SYN_SENT,
I40IW_CM_STATE_ONE_SIDE_ESTABLISHED,
I40IW_CM_STATE_ESTABLISHED,
I40IW_CM_STATE_ACCEPTING,
I40IW_CM_STATE_MPAREQ_SENT,
I40IW_CM_STATE_MPAREQ_RCVD,
I40IW_CM_STATE_MPAREJ_RCVD,
I40IW_CM_STATE_OFFLOADED,
I40IW_CM_STATE_FIN_WAIT1,
I40IW_CM_STATE_FIN_WAIT2,
I40IW_CM_STATE_CLOSE_WAIT,
I40IW_CM_STATE_TIME_WAIT,
I40IW_CM_STATE_LAST_ACK,
I40IW_CM_STATE_CLOSING,
I40IW_CM_STATE_LISTENER_DESTROYED,
I40IW_CM_STATE_CLOSED
};
enum mpa_frame_version {
IETF_MPA_V1 = 1,
IETF_MPA_V2 = 2
};
enum mpa_frame_key {
MPA_KEY_REQUEST,
MPA_KEY_REPLY
};
enum send_rdma0 {
SEND_RDMA_READ_ZERO = 1,
SEND_RDMA_WRITE_ZERO = 2
};
enum i40iw_tcpip_pkt_type {
I40IW_PKT_TYPE_UNKNOWN,
I40IW_PKT_TYPE_SYN,
I40IW_PKT_TYPE_SYNACK,
I40IW_PKT_TYPE_ACK,
I40IW_PKT_TYPE_FIN,
I40IW_PKT_TYPE_RST
};
/* CM context params */
struct i40iw_cm_tcp_context {
u8 client;
u32 loc_seq_num;
u32 loc_ack_num;
u32 rem_ack_num;
u32 rcv_nxt;
u32 loc_id;
u32 rem_id;
u32 snd_wnd;
u32 max_snd_wnd;
u32 rcv_wnd;
u32 mss;
u8 snd_wscale;
u8 rcv_wscale;
};
enum i40iw_cm_listener_state {
I40IW_CM_LISTENER_PASSIVE_STATE = 1,
I40IW_CM_LISTENER_ACTIVE_STATE = 2,
I40IW_CM_LISTENER_EITHER_STATE = 3
};
struct i40iw_cm_listener {
struct list_head list;
struct i40iw_cm_core *cm_core;
u8 loc_mac[ETH_ALEN];
u32 loc_addr[4];
u16 loc_port;
struct iw_cm_id *cm_id;
atomic_t ref_count;
struct i40iw_device *iwdev;
atomic_t pend_accepts_cnt;
int backlog;
enum i40iw_cm_listener_state listener_state;
u32 reused_node;
u8 user_pri;
u8 tos;
u16 vlan_id;
bool qhash_set;
bool ipv4;
struct list_head child_listen_list;
};
struct i40iw_kmem_info {
void *addr;
u32 size;
};
/* per connection node and node state information */
struct i40iw_cm_node {
u32 loc_addr[4], rem_addr[4];
u16 loc_port, rem_port;
u16 vlan_id;
enum i40iw_cm_node_state state;
u8 loc_mac[ETH_ALEN];
u8 rem_mac[ETH_ALEN];
atomic_t ref_count;
struct i40iw_qp *iwqp;
struct i40iw_device *iwdev;
struct i40iw_sc_dev *dev;
struct i40iw_cm_tcp_context tcp_cntxt;
struct i40iw_cm_core *cm_core;
struct i40iw_cm_node *loopbackpartner;
struct i40iw_timer_entry *send_entry;
struct i40iw_timer_entry *close_entry;
spinlock_t retrans_list_lock; /* cm transmit packet */
enum send_rdma0 send_rdma0_op;
u16 ird_size;
u16 ord_size;
u16 mpav2_ird_ord;
struct iw_cm_id *cm_id;
struct list_head list;
bool accelerated;
struct i40iw_cm_listener *listener;
int apbvt_set;
int accept_pend;
struct list_head timer_entry;
struct list_head reset_entry;
struct list_head teardown_entry;
atomic_t passive_state;
bool qhash_set;
u8 user_pri;
u8 tos;
bool ipv4;
bool snd_mark_en;
u16 lsmm_size;
enum mpa_frame_version mpa_frame_rev;
struct i40iw_kmem_info pdata;
union {
struct ietf_mpa_v1 mpa_frame;
struct ietf_mpa_v2 mpa_v2_frame;
};
u8 pdata_buf[IETF_MAX_PRIV_DATA_LEN];
struct i40iw_kmem_info mpa_hdr;
bool ack_rcvd;
};
/* structure for client or CM to fill when making CM api calls. */
/* - only need to set relevant data, based on op. */
struct i40iw_cm_info {
struct iw_cm_id *cm_id;
u16 loc_port;
u16 rem_port;
u32 loc_addr[4];
u32 rem_addr[4];
u16 vlan_id;
int backlog;
u8 user_pri;
u8 tos;
bool ipv4;
};
/* CM event codes */
enum i40iw_cm_event_type {
I40IW_CM_EVENT_UNKNOWN,
I40IW_CM_EVENT_ESTABLISHED,
I40IW_CM_EVENT_MPA_REQ,
I40IW_CM_EVENT_MPA_CONNECT,
I40IW_CM_EVENT_MPA_ACCEPT,
I40IW_CM_EVENT_MPA_REJECT,
I40IW_CM_EVENT_MPA_ESTABLISHED,
I40IW_CM_EVENT_CONNECTED,
I40IW_CM_EVENT_RESET,
I40IW_CM_EVENT_ABORTED
};
/* event to post to CM event handler */
struct i40iw_cm_event {
enum i40iw_cm_event_type type;
struct i40iw_cm_info cm_info;
struct work_struct event_work;
struct i40iw_cm_node *cm_node;
};
struct i40iw_cm_core {
struct i40iw_device *iwdev;
struct i40iw_sc_dev *dev;
struct list_head listen_nodes;
struct list_head accelerated_list;
struct list_head non_accelerated_list;
struct timer_list tcp_timer;
struct workqueue_struct *event_wq;
struct workqueue_struct *disconn_wq;
spinlock_t ht_lock; /* manage hash table */
spinlock_t listen_list_lock; /* listen list */
spinlock_t apbvt_lock; /*manage apbvt entries*/
unsigned long ports_in_use[BITS_TO_LONGS(MAX_PORTS)];
u64 stats_nodes_created;
u64 stats_nodes_destroyed;
u64 stats_listen_created;
u64 stats_listen_destroyed;
u64 stats_listen_nodes_created;
u64 stats_listen_nodes_destroyed;
u64 stats_loopbacks;
u64 stats_accepts;
u64 stats_rejects;
u64 stats_connect_errs;
u64 stats_passive_errs;
u64 stats_pkt_retrans;
u64 stats_backlog_drops;
};
int i40iw_schedule_cm_timer(struct i40iw_cm_node *cm_node,
struct i40iw_puda_buf *sqbuf,
enum i40iw_timer_type type,
int send_retrans,
int close_when_complete);
int i40iw_accept(struct iw_cm_id *, struct iw_cm_conn_param *);
int i40iw_reject(struct iw_cm_id *, const void *, u8);
int i40iw_connect(struct iw_cm_id *, struct iw_cm_conn_param *);
int i40iw_create_listen(struct iw_cm_id *, int);
int i40iw_destroy_listen(struct iw_cm_id *);
int i40iw_cm_start(struct i40iw_device *);
int i40iw_cm_stop(struct i40iw_device *);
int i40iw_arp_table(struct i40iw_device *iwdev,
u32 *ip_addr,
bool ipv4,
u8 *mac_addr,
u32 action);
void i40iw_if_notify(struct i40iw_device *iwdev, struct net_device *netdev,
u32 *ipaddr, bool ipv4, bool ifup);
void i40iw_cm_teardown_connections(struct i40iw_device *iwdev, u32 *ipaddr,
struct i40iw_cm_info *nfo,
bool disconnect_all);
bool i40iw_port_in_use(struct i40iw_cm_core *cm_core, u16 port);
#endif /* I40IW_CM_H */

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -1,821 +0,0 @@
/*******************************************************************************
*
* Copyright (c) 2015-2016 Intel Corporation. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenFabrics.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
*******************************************************************************/
#include "i40iw_osdep.h"
#include "i40iw_register.h"
#include "i40iw_status.h"
#include "i40iw_hmc.h"
#include "i40iw_d.h"
#include "i40iw_type.h"
#include "i40iw_p.h"
#include "i40iw_vf.h"
#include "i40iw_virtchnl.h"
/**
* i40iw_find_sd_index_limit - finds segment descriptor index limit
* @hmc_info: pointer to the HMC configuration information structure
* @type: type of HMC resources we're searching
* @idx: starting index for the object
* @cnt: number of objects we're trying to create
* @sd_idx: pointer to return index of the segment descriptor in question
* @sd_limit: pointer to return the maximum number of segment descriptors
*
* This function calculates the segment descriptor index and index limit
* for the resource defined by i40iw_hmc_rsrc_type.
*/
static inline void i40iw_find_sd_index_limit(struct i40iw_hmc_info *hmc_info,
u32 type,
u32 idx,
u32 cnt,
u32 *sd_idx,
u32 *sd_limit)
{
u64 fpm_addr, fpm_limit;
fpm_addr = hmc_info->hmc_obj[(type)].base +
hmc_info->hmc_obj[type].size * idx;
fpm_limit = fpm_addr + hmc_info->hmc_obj[type].size * cnt;
*sd_idx = (u32)(fpm_addr / I40IW_HMC_DIRECT_BP_SIZE);
*sd_limit = (u32)((fpm_limit - 1) / I40IW_HMC_DIRECT_BP_SIZE);
*sd_limit += 1;
}
/**
* i40iw_find_pd_index_limit - finds page descriptor index limit
* @hmc_info: pointer to the HMC configuration information struct
* @type: HMC resource type we're examining
* @idx: starting index for the object
* @cnt: number of objects we're trying to create
* @pd_idx: pointer to return page descriptor index
* @pd_limit: pointer to return page descriptor index limit
*
* Calculates the page descriptor index and index limit for the resource
* defined by i40iw_hmc_rsrc_type.
*/
static inline void i40iw_find_pd_index_limit(struct i40iw_hmc_info *hmc_info,
u32 type,
u32 idx,
u32 cnt,
u32 *pd_idx,
u32 *pd_limit)
{
u64 fpm_adr, fpm_limit;
fpm_adr = hmc_info->hmc_obj[type].base +
hmc_info->hmc_obj[type].size * idx;
fpm_limit = fpm_adr + (hmc_info)->hmc_obj[(type)].size * (cnt);
*(pd_idx) = (u32)(fpm_adr / I40IW_HMC_PAGED_BP_SIZE);
*(pd_limit) = (u32)((fpm_limit - 1) / I40IW_HMC_PAGED_BP_SIZE);
*(pd_limit) += 1;
}
/**
* i40iw_set_sd_entry - setup entry for sd programming
* @pa: physical addr
* @idx: sd index
* @type: paged or direct sd
* @entry: sd entry ptr
*/
static inline void i40iw_set_sd_entry(u64 pa,
u32 idx,
enum i40iw_sd_entry_type type,
struct update_sd_entry *entry)
{
entry->data = pa | (I40IW_HMC_MAX_BP_COUNT << I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_SHIFT) |
(((type == I40IW_SD_TYPE_PAGED) ? 0 : 1) <<
I40E_PFHMC_SDDATALOW_PMSDTYPE_SHIFT) |
(1 << I40E_PFHMC_SDDATALOW_PMSDVALID_SHIFT);
entry->cmd = (idx | (1 << I40E_PFHMC_SDCMD_PMSDWR_SHIFT) | (1 << 15));
}
/**
* i40iw_clr_sd_entry - setup entry for sd clear
* @idx: sd index
* @type: paged or direct sd
* @entry: sd entry ptr
*/
static inline void i40iw_clr_sd_entry(u32 idx, enum i40iw_sd_entry_type type,
struct update_sd_entry *entry)
{
entry->data = (I40IW_HMC_MAX_BP_COUNT <<
I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_SHIFT) |
(((type == I40IW_SD_TYPE_PAGED) ? 0 : 1) <<
I40E_PFHMC_SDDATALOW_PMSDTYPE_SHIFT);
entry->cmd = (idx | (1 << I40E_PFHMC_SDCMD_PMSDWR_SHIFT) | (1 << 15));
}
/**
* i40iw_hmc_sd_one - setup 1 sd entry for cqp
* @dev: pointer to the device structure
* @hmc_fn_id: hmc's function id
* @pa: physical addr
* @sd_idx: sd index
* @type: paged or direct sd
* @setsd: flag to set or clear sd
*/
enum i40iw_status_code i40iw_hmc_sd_one(struct i40iw_sc_dev *dev,
u8 hmc_fn_id,
u64 pa, u32 sd_idx,
enum i40iw_sd_entry_type type,
bool setsd)
{
struct i40iw_update_sds_info sdinfo;
sdinfo.cnt = 1;
sdinfo.hmc_fn_id = hmc_fn_id;
if (setsd)
i40iw_set_sd_entry(pa, sd_idx, type, sdinfo.entry);
else
i40iw_clr_sd_entry(sd_idx, type, sdinfo.entry);
return dev->cqp->process_cqp_sds(dev, &sdinfo);
}
/**
* i40iw_hmc_sd_grp - setup group od sd entries for cqp
* @dev: pointer to the device structure
* @hmc_info: pointer to the HMC configuration information struct
* @sd_index: sd index
* @sd_cnt: number of sd entries
* @setsd: flag to set or clear sd
*/
static enum i40iw_status_code i40iw_hmc_sd_grp(struct i40iw_sc_dev *dev,
struct i40iw_hmc_info *hmc_info,
u32 sd_index,
u32 sd_cnt,
bool setsd)
{
struct i40iw_hmc_sd_entry *sd_entry;
struct i40iw_update_sds_info sdinfo;
u64 pa;
u32 i;
enum i40iw_status_code ret_code = 0;
memset(&sdinfo, 0, sizeof(sdinfo));
sdinfo.hmc_fn_id = hmc_info->hmc_fn_id;
for (i = sd_index; i < sd_index + sd_cnt; i++) {
sd_entry = &hmc_info->sd_table.sd_entry[i];
if (!sd_entry ||
(!sd_entry->valid && setsd) ||
(sd_entry->valid && !setsd))
continue;
if (setsd) {
pa = (sd_entry->entry_type == I40IW_SD_TYPE_PAGED) ?
sd_entry->u.pd_table.pd_page_addr.pa :
sd_entry->u.bp.addr.pa;
i40iw_set_sd_entry(pa, i, sd_entry->entry_type,
&sdinfo.entry[sdinfo.cnt]);
} else {
i40iw_clr_sd_entry(i, sd_entry->entry_type,
&sdinfo.entry[sdinfo.cnt]);
}
sdinfo.cnt++;
if (sdinfo.cnt == I40IW_MAX_SD_ENTRIES) {
ret_code = dev->cqp->process_cqp_sds(dev, &sdinfo);
if (ret_code) {
i40iw_debug(dev, I40IW_DEBUG_HMC,
"i40iw_hmc_sd_grp: sd_programming failed err=%d\n",
ret_code);
return ret_code;
}
sdinfo.cnt = 0;
}
}
if (sdinfo.cnt)
ret_code = dev->cqp->process_cqp_sds(dev, &sdinfo);
return ret_code;
}
/**
* i40iw_vfdev_from_fpm - return vf dev ptr for hmc function id
* @dev: pointer to the device structure
* @hmc_fn_id: hmc's function id
*/
struct i40iw_vfdev *i40iw_vfdev_from_fpm(struct i40iw_sc_dev *dev, u8 hmc_fn_id)
{
struct i40iw_vfdev *vf_dev = NULL;
u16 idx;
for (idx = 0; idx < I40IW_MAX_PE_ENABLED_VF_COUNT; idx++) {
if (dev->vf_dev[idx] &&
((u8)dev->vf_dev[idx]->pmf_index == hmc_fn_id)) {
vf_dev = dev->vf_dev[idx];
break;
}
}
return vf_dev;
}
/**
* i40iw_vf_hmcinfo_from_fpm - get ptr to hmc for func_id
* @dev: pointer to the device structure
* @hmc_fn_id: hmc's function id
*/
struct i40iw_hmc_info *i40iw_vf_hmcinfo_from_fpm(struct i40iw_sc_dev *dev,
u8 hmc_fn_id)
{
struct i40iw_hmc_info *hmc_info = NULL;
u16 idx;
for (idx = 0; idx < I40IW_MAX_PE_ENABLED_VF_COUNT; idx++) {
if (dev->vf_dev[idx] &&
((u8)dev->vf_dev[idx]->pmf_index == hmc_fn_id)) {
hmc_info = &dev->vf_dev[idx]->hmc_info;
break;
}
}
return hmc_info;
}
/**
* i40iw_hmc_finish_add_sd_reg - program sd entries for objects
* @dev: pointer to the device structure
* @info: create obj info
*/
static enum i40iw_status_code i40iw_hmc_finish_add_sd_reg(struct i40iw_sc_dev *dev,
struct i40iw_hmc_create_obj_info *info)
{
if (info->start_idx >= info->hmc_info->hmc_obj[info->rsrc_type].cnt)
return I40IW_ERR_INVALID_HMC_OBJ_INDEX;
if ((info->start_idx + info->count) >
info->hmc_info->hmc_obj[info->rsrc_type].cnt)
return I40IW_ERR_INVALID_HMC_OBJ_COUNT;
if (!info->add_sd_cnt)
return 0;
return i40iw_hmc_sd_grp(dev, info->hmc_info,
info->hmc_info->sd_indexes[0],
info->add_sd_cnt, true);
}
/**
* i40iw_sc_create_hmc_obj - allocate backing store for hmc objects
* @dev: pointer to the device structure
* @info: pointer to i40iw_hmc_iw_create_obj_info struct
*
* This will allocate memory for PDs and backing pages and populate
* the sd and pd entries.
*/
enum i40iw_status_code i40iw_sc_create_hmc_obj(struct i40iw_sc_dev *dev,
struct i40iw_hmc_create_obj_info *info)
{
struct i40iw_hmc_sd_entry *sd_entry;
u32 sd_idx, sd_lmt;
u32 pd_idx = 0, pd_lmt = 0;
u32 pd_idx1 = 0, pd_lmt1 = 0;
u32 i, j;
bool pd_error = false;
enum i40iw_status_code ret_code = 0;
if (info->start_idx >= info->hmc_info->hmc_obj[info->rsrc_type].cnt)
return I40IW_ERR_INVALID_HMC_OBJ_INDEX;
if ((info->start_idx + info->count) >
info->hmc_info->hmc_obj[info->rsrc_type].cnt) {
i40iw_debug(dev, I40IW_DEBUG_HMC,
"%s: error type %u, start = %u, req cnt %u, cnt = %u\n",
__func__, info->rsrc_type, info->start_idx, info->count,
info->hmc_info->hmc_obj[info->rsrc_type].cnt);
return I40IW_ERR_INVALID_HMC_OBJ_COUNT;
}
if (!dev->is_pf)
return i40iw_vchnl_vf_add_hmc_objs(dev, info->rsrc_type, 0, info->count);
i40iw_find_sd_index_limit(info->hmc_info, info->rsrc_type,
info->start_idx, info->count,
&sd_idx, &sd_lmt);
if (sd_idx >= info->hmc_info->sd_table.sd_cnt ||
sd_lmt > info->hmc_info->sd_table.sd_cnt) {
return I40IW_ERR_INVALID_SD_INDEX;
}
i40iw_find_pd_index_limit(info->hmc_info, info->rsrc_type,
info->start_idx, info->count, &pd_idx, &pd_lmt);
for (j = sd_idx; j < sd_lmt; j++) {
ret_code = i40iw_add_sd_table_entry(dev->hw, info->hmc_info,
j,
info->entry_type,
I40IW_HMC_DIRECT_BP_SIZE);
if (ret_code)
goto exit_sd_error;
sd_entry = &info->hmc_info->sd_table.sd_entry[j];
if ((sd_entry->entry_type == I40IW_SD_TYPE_PAGED) &&
((dev->hmc_info == info->hmc_info) &&
(info->rsrc_type != I40IW_HMC_IW_PBLE))) {
pd_idx1 = max(pd_idx, (j * I40IW_HMC_MAX_BP_COUNT));
pd_lmt1 = min(pd_lmt,
(j + 1) * I40IW_HMC_MAX_BP_COUNT);
for (i = pd_idx1; i < pd_lmt1; i++) {
/* update the pd table entry */
ret_code = i40iw_add_pd_table_entry(dev->hw, info->hmc_info,
i, NULL);
if (ret_code) {
pd_error = true;
break;
}
}
if (pd_error) {
while (i && (i > pd_idx1)) {
i40iw_remove_pd_bp(dev->hw, info->hmc_info, (i - 1),
info->is_pf);
i--;
}
}
}
if (sd_entry->valid)
continue;
info->hmc_info->sd_indexes[info->add_sd_cnt] = (u16)j;
info->add_sd_cnt++;
sd_entry->valid = true;
}
return i40iw_hmc_finish_add_sd_reg(dev, info);
exit_sd_error:
while (j && (j > sd_idx)) {
sd_entry = &info->hmc_info->sd_table.sd_entry[j - 1];
switch (sd_entry->entry_type) {
case I40IW_SD_TYPE_PAGED:
pd_idx1 = max(pd_idx,
(j - 1) * I40IW_HMC_MAX_BP_COUNT);
pd_lmt1 = min(pd_lmt, (j * I40IW_HMC_MAX_BP_COUNT));
for (i = pd_idx1; i < pd_lmt1; i++)
i40iw_prep_remove_pd_page(info->hmc_info, i);
break;
case I40IW_SD_TYPE_DIRECT:
i40iw_prep_remove_pd_page(info->hmc_info, (j - 1));
break;
default:
ret_code = I40IW_ERR_INVALID_SD_TYPE;
break;
}
j--;
}
return ret_code;
}
/**
* i40iw_finish_del_sd_reg - delete sd entries for objects
* @dev: pointer to the device structure
* @info: dele obj info
* @reset: true if called before reset
*/
static enum i40iw_status_code i40iw_finish_del_sd_reg(struct i40iw_sc_dev *dev,
struct i40iw_hmc_del_obj_info *info,
bool reset)
{
struct i40iw_hmc_sd_entry *sd_entry;
enum i40iw_status_code ret_code = 0;
u32 i, sd_idx;
struct i40iw_dma_mem *mem;
if (dev->is_pf && !reset)
ret_code = i40iw_hmc_sd_grp(dev, info->hmc_info,
info->hmc_info->sd_indexes[0],
info->del_sd_cnt, false);
if (ret_code)
i40iw_debug(dev, I40IW_DEBUG_HMC, "%s: error cqp sd sd_grp\n", __func__);
for (i = 0; i < info->del_sd_cnt; i++) {
sd_idx = info->hmc_info->sd_indexes[i];
sd_entry = &info->hmc_info->sd_table.sd_entry[sd_idx];
if (!sd_entry)
continue;
mem = (sd_entry->entry_type == I40IW_SD_TYPE_PAGED) ?
&sd_entry->u.pd_table.pd_page_addr :
&sd_entry->u.bp.addr;
if (!mem || !mem->va)
i40iw_debug(dev, I40IW_DEBUG_HMC, "%s: error cqp sd mem\n", __func__);
else
i40iw_free_dma_mem(dev->hw, mem);
}
return ret_code;
}
/**
* i40iw_sc_del_hmc_obj - remove pe hmc objects
* @dev: pointer to the device structure
* @info: pointer to i40iw_hmc_del_obj_info struct
* @reset: true if called before reset
*
* This will de-populate the SDs and PDs. It frees
* the memory for PDS and backing storage. After this function is returned,
* caller should deallocate memory allocated previously for
* book-keeping information about PDs and backing storage.
*/
enum i40iw_status_code i40iw_sc_del_hmc_obj(struct i40iw_sc_dev *dev,
struct i40iw_hmc_del_obj_info *info,
bool reset)
{
struct i40iw_hmc_pd_table *pd_table;
u32 sd_idx, sd_lmt;
u32 pd_idx, pd_lmt, rel_pd_idx;
u32 i, j;
enum i40iw_status_code ret_code = 0;
if (info->start_idx >= info->hmc_info->hmc_obj[info->rsrc_type].cnt) {
i40iw_debug(dev, I40IW_DEBUG_HMC,
"%s: error start_idx[%04d] >= [type %04d].cnt[%04d]\n",
__func__, info->start_idx, info->rsrc_type,
info->hmc_info->hmc_obj[info->rsrc_type].cnt);
return I40IW_ERR_INVALID_HMC_OBJ_INDEX;
}
if ((info->start_idx + info->count) >
info->hmc_info->hmc_obj[info->rsrc_type].cnt) {
i40iw_debug(dev, I40IW_DEBUG_HMC,
"%s: error start_idx[%04d] + count %04d >= [type %04d].cnt[%04d]\n",
__func__, info->start_idx, info->count,
info->rsrc_type,
info->hmc_info->hmc_obj[info->rsrc_type].cnt);
return I40IW_ERR_INVALID_HMC_OBJ_COUNT;
}
if (!dev->is_pf) {
ret_code = i40iw_vchnl_vf_del_hmc_obj(dev, info->rsrc_type, 0,
info->count);
if (info->rsrc_type != I40IW_HMC_IW_PBLE)
return ret_code;
}
i40iw_find_pd_index_limit(info->hmc_info, info->rsrc_type,
info->start_idx, info->count, &pd_idx, &pd_lmt);
for (j = pd_idx; j < pd_lmt; j++) {
sd_idx = j / I40IW_HMC_PD_CNT_IN_SD;
if (info->hmc_info->sd_table.sd_entry[sd_idx].entry_type !=
I40IW_SD_TYPE_PAGED)
continue;
rel_pd_idx = j % I40IW_HMC_PD_CNT_IN_SD;
pd_table = &info->hmc_info->sd_table.sd_entry[sd_idx].u.pd_table;
if (pd_table->pd_entry[rel_pd_idx].valid) {
ret_code = i40iw_remove_pd_bp(dev->hw, info->hmc_info, j,
info->is_pf);
if (ret_code) {
i40iw_debug(dev, I40IW_DEBUG_HMC, "%s: error\n", __func__);
return ret_code;
}
}
}
i40iw_find_sd_index_limit(info->hmc_info, info->rsrc_type,
info->start_idx, info->count, &sd_idx, &sd_lmt);
if (sd_idx >= info->hmc_info->sd_table.sd_cnt ||
sd_lmt > info->hmc_info->sd_table.sd_cnt) {
i40iw_debug(dev, I40IW_DEBUG_HMC, "%s: error invalid sd_idx\n", __func__);
return I40IW_ERR_INVALID_SD_INDEX;
}
for (i = sd_idx; i < sd_lmt; i++) {
if (!info->hmc_info->sd_table.sd_entry[i].valid)
continue;
switch (info->hmc_info->sd_table.sd_entry[i].entry_type) {
case I40IW_SD_TYPE_DIRECT:
ret_code = i40iw_prep_remove_sd_bp(info->hmc_info, i);
if (!ret_code) {
info->hmc_info->sd_indexes[info->del_sd_cnt] = (u16)i;
info->del_sd_cnt++;
}
break;
case I40IW_SD_TYPE_PAGED:
ret_code = i40iw_prep_remove_pd_page(info->hmc_info, i);
if (!ret_code) {
info->hmc_info->sd_indexes[info->del_sd_cnt] = (u16)i;
info->del_sd_cnt++;
}
break;
default:
break;
}
}
return i40iw_finish_del_sd_reg(dev, info, reset);
}
/**
* i40iw_add_sd_table_entry - Adds a segment descriptor to the table
* @hw: pointer to our hw struct
* @hmc_info: pointer to the HMC configuration information struct
* @sd_index: segment descriptor index to manipulate
* @type: what type of segment descriptor we're manipulating
* @direct_mode_sz: size to alloc in direct mode
*/
enum i40iw_status_code i40iw_add_sd_table_entry(struct i40iw_hw *hw,
struct i40iw_hmc_info *hmc_info,
u32 sd_index,
enum i40iw_sd_entry_type type,
u64 direct_mode_sz)
{
enum i40iw_status_code ret_code = 0;
struct i40iw_hmc_sd_entry *sd_entry;
bool dma_mem_alloc_done = false;
struct i40iw_dma_mem mem;
u64 alloc_len;
sd_entry = &hmc_info->sd_table.sd_entry[sd_index];
if (!sd_entry->valid) {
if (type == I40IW_SD_TYPE_PAGED)
alloc_len = I40IW_HMC_PAGED_BP_SIZE;
else
alloc_len = direct_mode_sz;
/* allocate a 4K pd page or 2M backing page */
ret_code = i40iw_allocate_dma_mem(hw, &mem, alloc_len,
I40IW_HMC_PD_BP_BUF_ALIGNMENT);
if (ret_code)
goto exit;
dma_mem_alloc_done = true;
if (type == I40IW_SD_TYPE_PAGED) {
ret_code = i40iw_allocate_virt_mem(hw,
&sd_entry->u.pd_table.pd_entry_virt_mem,
sizeof(struct i40iw_hmc_pd_entry) * 512);
if (ret_code)
goto exit;
sd_entry->u.pd_table.pd_entry = (struct i40iw_hmc_pd_entry *)
sd_entry->u.pd_table.pd_entry_virt_mem.va;
memcpy(&sd_entry->u.pd_table.pd_page_addr, &mem, sizeof(struct i40iw_dma_mem));
} else {
memcpy(&sd_entry->u.bp.addr, &mem, sizeof(struct i40iw_dma_mem));
sd_entry->u.bp.sd_pd_index = sd_index;
}
hmc_info->sd_table.sd_entry[sd_index].entry_type = type;
I40IW_INC_SD_REFCNT(&hmc_info->sd_table);
}
if (sd_entry->entry_type == I40IW_SD_TYPE_DIRECT)
I40IW_INC_BP_REFCNT(&sd_entry->u.bp);
exit:
if (ret_code)
if (dma_mem_alloc_done)
i40iw_free_dma_mem(hw, &mem);
return ret_code;
}
/**
* i40iw_add_pd_table_entry - Adds page descriptor to the specified table
* @hw: pointer to our HW structure
* @hmc_info: pointer to the HMC configuration information structure
* @pd_index: which page descriptor index to manipulate
* @rsrc_pg: if not NULL, use preallocated page instead of allocating new one.
*
* This function:
* 1. Initializes the pd entry
* 2. Adds pd_entry in the pd_table
* 3. Mark the entry valid in i40iw_hmc_pd_entry structure
* 4. Initializes the pd_entry's ref count to 1
* assumptions:
* 1. The memory for pd should be pinned down, physically contiguous and
* aligned on 4K boundary and zeroed memory.
* 2. It should be 4K in size.
*/
enum i40iw_status_code i40iw_add_pd_table_entry(struct i40iw_hw *hw,
struct i40iw_hmc_info *hmc_info,
u32 pd_index,
struct i40iw_dma_mem *rsrc_pg)
{
enum i40iw_status_code ret_code = 0;
struct i40iw_hmc_pd_table *pd_table;
struct i40iw_hmc_pd_entry *pd_entry;
struct i40iw_dma_mem mem;
struct i40iw_dma_mem *page = &mem;
u32 sd_idx, rel_pd_idx;
u64 *pd_addr;
u64 page_desc;
if (pd_index / I40IW_HMC_PD_CNT_IN_SD >= hmc_info->sd_table.sd_cnt)
return I40IW_ERR_INVALID_PAGE_DESC_INDEX;
sd_idx = (pd_index / I40IW_HMC_PD_CNT_IN_SD);
if (hmc_info->sd_table.sd_entry[sd_idx].entry_type != I40IW_SD_TYPE_PAGED)
return 0;
rel_pd_idx = (pd_index % I40IW_HMC_PD_CNT_IN_SD);
pd_table = &hmc_info->sd_table.sd_entry[sd_idx].u.pd_table;
pd_entry = &pd_table->pd_entry[rel_pd_idx];
if (!pd_entry->valid) {
if (rsrc_pg) {
pd_entry->rsrc_pg = true;
page = rsrc_pg;
} else {
ret_code = i40iw_allocate_dma_mem(hw, page,
I40IW_HMC_PAGED_BP_SIZE,
I40IW_HMC_PD_BP_BUF_ALIGNMENT);
if (ret_code)
return ret_code;
pd_entry->rsrc_pg = false;
}
memcpy(&pd_entry->bp.addr, page, sizeof(struct i40iw_dma_mem));
pd_entry->bp.sd_pd_index = pd_index;
pd_entry->bp.entry_type = I40IW_SD_TYPE_PAGED;
page_desc = page->pa | 0x1;
pd_addr = (u64 *)pd_table->pd_page_addr.va;
pd_addr += rel_pd_idx;
memcpy(pd_addr, &page_desc, sizeof(*pd_addr));
pd_entry->sd_index = sd_idx;
pd_entry->valid = true;
I40IW_INC_PD_REFCNT(pd_table);
if (hmc_info->hmc_fn_id < I40IW_FIRST_VF_FPM_ID)
I40IW_INVALIDATE_PF_HMC_PD(hw, sd_idx, rel_pd_idx);
else if (hw->hmc.hmc_fn_id != hmc_info->hmc_fn_id)
I40IW_INVALIDATE_VF_HMC_PD(hw, sd_idx, rel_pd_idx,
hmc_info->hmc_fn_id);
}
I40IW_INC_BP_REFCNT(&pd_entry->bp);
return 0;
}
/**
* i40iw_remove_pd_bp - remove a backing page from a page descriptor
* @hw: pointer to our HW structure
* @hmc_info: pointer to the HMC configuration information structure
* @idx: the page index
* @is_pf: distinguishes a VF from a PF
*
* This function:
* 1. Marks the entry in pd table (for paged address mode) or in sd table
* (for direct address mode) invalid.
* 2. Write to register PMPDINV to invalidate the backing page in FV cache
* 3. Decrement the ref count for the pd _entry
* assumptions:
* 1. Caller can deallocate the memory used by backing storage after this
* function returns.
*/
enum i40iw_status_code i40iw_remove_pd_bp(struct i40iw_hw *hw,
struct i40iw_hmc_info *hmc_info,
u32 idx,
bool is_pf)
{
struct i40iw_hmc_pd_entry *pd_entry;
struct i40iw_hmc_pd_table *pd_table;
struct i40iw_hmc_sd_entry *sd_entry;
u32 sd_idx, rel_pd_idx;
struct i40iw_dma_mem *mem;
u64 *pd_addr;
sd_idx = idx / I40IW_HMC_PD_CNT_IN_SD;
rel_pd_idx = idx % I40IW_HMC_PD_CNT_IN_SD;
if (sd_idx >= hmc_info->sd_table.sd_cnt)
return I40IW_ERR_INVALID_PAGE_DESC_INDEX;
sd_entry = &hmc_info->sd_table.sd_entry[sd_idx];
if (sd_entry->entry_type != I40IW_SD_TYPE_PAGED)
return I40IW_ERR_INVALID_SD_TYPE;
pd_table = &hmc_info->sd_table.sd_entry[sd_idx].u.pd_table;
pd_entry = &pd_table->pd_entry[rel_pd_idx];
I40IW_DEC_BP_REFCNT(&pd_entry->bp);
if (pd_entry->bp.ref_cnt)
return 0;
pd_entry->valid = false;
I40IW_DEC_PD_REFCNT(pd_table);
pd_addr = (u64 *)pd_table->pd_page_addr.va;
pd_addr += rel_pd_idx;
memset(pd_addr, 0, sizeof(u64));
if (is_pf)
I40IW_INVALIDATE_PF_HMC_PD(hw, sd_idx, idx);
else
I40IW_INVALIDATE_VF_HMC_PD(hw, sd_idx, idx,
hmc_info->hmc_fn_id);
if (!pd_entry->rsrc_pg) {
mem = &pd_entry->bp.addr;
if (!mem || !mem->va)
return I40IW_ERR_PARAM;
i40iw_free_dma_mem(hw, mem);
}
if (!pd_table->ref_cnt)
i40iw_free_virt_mem(hw, &pd_table->pd_entry_virt_mem);
return 0;
}
/**
* i40iw_prep_remove_sd_bp - Prepares to remove a backing page from a sd entry
* @hmc_info: pointer to the HMC configuration information structure
* @idx: the page index
*/
enum i40iw_status_code i40iw_prep_remove_sd_bp(struct i40iw_hmc_info *hmc_info, u32 idx)
{
struct i40iw_hmc_sd_entry *sd_entry;
sd_entry = &hmc_info->sd_table.sd_entry[idx];
I40IW_DEC_BP_REFCNT(&sd_entry->u.bp);
if (sd_entry->u.bp.ref_cnt)
return I40IW_ERR_NOT_READY;
I40IW_DEC_SD_REFCNT(&hmc_info->sd_table);
sd_entry->valid = false;
return 0;
}
/**
* i40iw_prep_remove_pd_page - Prepares to remove a PD page from sd entry.
* @hmc_info: pointer to the HMC configuration information structure
* @idx: segment descriptor index to find the relevant page descriptor
*/
enum i40iw_status_code i40iw_prep_remove_pd_page(struct i40iw_hmc_info *hmc_info,
u32 idx)
{
struct i40iw_hmc_sd_entry *sd_entry;
sd_entry = &hmc_info->sd_table.sd_entry[idx];
if (sd_entry->u.pd_table.ref_cnt)
return I40IW_ERR_NOT_READY;
sd_entry->valid = false;
I40IW_DEC_SD_REFCNT(&hmc_info->sd_table);
return 0;
}
/**
* i40iw_pf_init_vfhmc -
* @vf_cnt_array: array of cnt values of iwarp hmc objects
* @vf_hmc_fn_id: hmc function id ofr vf driver
* @dev: pointer to i40iw_dev struct
*
* Called by pf driver to initialize hmc_info for vf driver instance.
*/
enum i40iw_status_code i40iw_pf_init_vfhmc(struct i40iw_sc_dev *dev,
u8 vf_hmc_fn_id,
u32 *vf_cnt_array)
{
struct i40iw_hmc_info *hmc_info;
enum i40iw_status_code ret_code = 0;
u32 i;
if ((vf_hmc_fn_id < I40IW_FIRST_VF_FPM_ID) ||
(vf_hmc_fn_id >= I40IW_FIRST_VF_FPM_ID +
I40IW_MAX_PE_ENABLED_VF_COUNT)) {
i40iw_debug(dev, I40IW_DEBUG_HMC, "%s: invalid vf_hmc_fn_id 0x%x\n",
__func__, vf_hmc_fn_id);
return I40IW_ERR_INVALID_HMCFN_ID;
}
ret_code = i40iw_sc_init_iw_hmc(dev, vf_hmc_fn_id);
if (ret_code)
return ret_code;
hmc_info = i40iw_vf_hmcinfo_from_fpm(dev, vf_hmc_fn_id);
for (i = I40IW_HMC_IW_QP; i < I40IW_HMC_IW_MAX; i++)
if (vf_cnt_array)
hmc_info->hmc_obj[i].cnt =
vf_cnt_array[i - I40IW_HMC_IW_QP];
else
hmc_info->hmc_obj[i].cnt = hmc_info->hmc_obj[i].max_cnt;
return 0;
}

View File

@ -1,241 +0,0 @@
/*******************************************************************************
*
* Copyright (c) 2015-2016 Intel Corporation. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenFabrics.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
*******************************************************************************/
#ifndef I40IW_HMC_H
#define I40IW_HMC_H
#include "i40iw_d.h"
struct i40iw_hw;
enum i40iw_status_code;
#define I40IW_HMC_MAX_BP_COUNT 512
#define I40IW_MAX_SD_ENTRIES 11
#define I40IW_HW_DBG_HMC_INVALID_BP_MARK 0xCA
#define I40IW_HMC_INFO_SIGNATURE 0x484D5347
#define I40IW_HMC_PD_CNT_IN_SD 512
#define I40IW_HMC_DIRECT_BP_SIZE 0x200000
#define I40IW_HMC_MAX_SD_COUNT 4096
#define I40IW_HMC_PAGED_BP_SIZE 4096
#define I40IW_HMC_PD_BP_BUF_ALIGNMENT 4096
#define I40IW_FIRST_VF_FPM_ID 16
#define FPM_MULTIPLIER 1024
#define I40IW_INC_SD_REFCNT(sd_table) ((sd_table)->ref_cnt++)
#define I40IW_INC_PD_REFCNT(pd_table) ((pd_table)->ref_cnt++)
#define I40IW_INC_BP_REFCNT(bp) ((bp)->ref_cnt++)
#define I40IW_DEC_SD_REFCNT(sd_table) ((sd_table)->ref_cnt--)
#define I40IW_DEC_PD_REFCNT(pd_table) ((pd_table)->ref_cnt--)
#define I40IW_DEC_BP_REFCNT(bp) ((bp)->ref_cnt--)
/**
* I40IW_INVALIDATE_PF_HMC_PD - Invalidates the pd cache in the hardware
* @hw: pointer to our hw struct
* @sd_idx: segment descriptor index
* @pd_idx: page descriptor index
*/
#define I40IW_INVALIDATE_PF_HMC_PD(hw, sd_idx, pd_idx) \
i40iw_wr32((hw), I40E_PFHMC_PDINV, \
(((sd_idx) << I40E_PFHMC_PDINV_PMSDIDX_SHIFT) | \
(0x1 << I40E_PFHMC_PDINV_PMSDPARTSEL_SHIFT) | \
((pd_idx) << I40E_PFHMC_PDINV_PMPDIDX_SHIFT)))
/**
* I40IW_INVALIDATE_VF_HMC_PD - Invalidates the pd cache in the hardware
* @hw: pointer to our hw struct
* @sd_idx: segment descriptor index
* @pd_idx: page descriptor index
* @hmc_fn_id: VF's function id
*/
#define I40IW_INVALIDATE_VF_HMC_PD(hw, sd_idx, pd_idx, hmc_fn_id) \
i40iw_wr32(hw, I40E_GLHMC_VFPDINV(hmc_fn_id - I40IW_FIRST_VF_FPM_ID), \
((sd_idx << I40E_PFHMC_PDINV_PMSDIDX_SHIFT) | \
(pd_idx << I40E_PFHMC_PDINV_PMPDIDX_SHIFT)))
struct i40iw_hmc_obj_info {
u64 base;
u32 max_cnt;
u32 cnt;
u64 size;
};
enum i40iw_sd_entry_type {
I40IW_SD_TYPE_INVALID = 0,
I40IW_SD_TYPE_PAGED = 1,
I40IW_SD_TYPE_DIRECT = 2
};
struct i40iw_hmc_bp {
enum i40iw_sd_entry_type entry_type;
struct i40iw_dma_mem addr;
u32 sd_pd_index;
u32 ref_cnt;
};
struct i40iw_hmc_pd_entry {
struct i40iw_hmc_bp bp;
u32 sd_index;
bool rsrc_pg;
bool valid;
};
struct i40iw_hmc_pd_table {
struct i40iw_dma_mem pd_page_addr;
struct i40iw_hmc_pd_entry *pd_entry;
struct i40iw_virt_mem pd_entry_virt_mem;
u32 ref_cnt;
u32 sd_index;
};
struct i40iw_hmc_sd_entry {
enum i40iw_sd_entry_type entry_type;
bool valid;
union {
struct i40iw_hmc_pd_table pd_table;
struct i40iw_hmc_bp bp;
} u;
};
struct i40iw_hmc_sd_table {
struct i40iw_virt_mem addr;
u32 sd_cnt;
u32 ref_cnt;
struct i40iw_hmc_sd_entry *sd_entry;
};
struct i40iw_hmc_info {
u32 signature;
u8 hmc_fn_id;
u16 first_sd_index;
struct i40iw_hmc_obj_info *hmc_obj;
struct i40iw_virt_mem hmc_obj_virt_mem;
struct i40iw_hmc_sd_table sd_table;
u16 sd_indexes[I40IW_HMC_MAX_SD_COUNT];
};
struct update_sd_entry {
u64 cmd;
u64 data;
};
struct i40iw_update_sds_info {
u32 cnt;
u8 hmc_fn_id;
struct update_sd_entry entry[I40IW_MAX_SD_ENTRIES];
};
struct i40iw_ccq_cqe_info;
struct i40iw_hmc_fcn_info {
void (*callback_fcn)(struct i40iw_sc_dev *, void *,
struct i40iw_ccq_cqe_info *);
void *cqp_callback_param;
u32 vf_id;
u16 iw_vf_idx;
bool free_fcn;
};
enum i40iw_hmc_rsrc_type {
I40IW_HMC_IW_QP = 0,
I40IW_HMC_IW_CQ = 1,
I40IW_HMC_IW_SRQ = 2,
I40IW_HMC_IW_HTE = 3,
I40IW_HMC_IW_ARP = 4,
I40IW_HMC_IW_APBVT_ENTRY = 5,
I40IW_HMC_IW_MR = 6,
I40IW_HMC_IW_XF = 7,
I40IW_HMC_IW_XFFL = 8,
I40IW_HMC_IW_Q1 = 9,
I40IW_HMC_IW_Q1FL = 10,
I40IW_HMC_IW_TIMER = 11,
I40IW_HMC_IW_FSIMC = 12,
I40IW_HMC_IW_FSIAV = 13,
I40IW_HMC_IW_PBLE = 14,
I40IW_HMC_IW_MAX = 15,
};
struct i40iw_hmc_create_obj_info {
struct i40iw_hmc_info *hmc_info;
struct i40iw_virt_mem add_sd_virt_mem;
u32 rsrc_type;
u32 start_idx;
u32 count;
u32 add_sd_cnt;
enum i40iw_sd_entry_type entry_type;
bool is_pf;
};
struct i40iw_hmc_del_obj_info {
struct i40iw_hmc_info *hmc_info;
struct i40iw_virt_mem del_sd_virt_mem;
u32 rsrc_type;
u32 start_idx;
u32 count;
u32 del_sd_cnt;
bool is_pf;
};
enum i40iw_status_code i40iw_copy_dma_mem(struct i40iw_hw *hw, void *dest_buf,
struct i40iw_dma_mem *src_mem, u64 src_offset, u64 size);
enum i40iw_status_code i40iw_sc_create_hmc_obj(struct i40iw_sc_dev *dev,
struct i40iw_hmc_create_obj_info *info);
enum i40iw_status_code i40iw_sc_del_hmc_obj(struct i40iw_sc_dev *dev,
struct i40iw_hmc_del_obj_info *info,
bool reset);
enum i40iw_status_code i40iw_hmc_sd_one(struct i40iw_sc_dev *dev, u8 hmc_fn_id,
u64 pa, u32 sd_idx, enum i40iw_sd_entry_type type,
bool setsd);
enum i40iw_status_code i40iw_update_sds_noccq(struct i40iw_sc_dev *dev,
struct i40iw_update_sds_info *info);
struct i40iw_vfdev *i40iw_vfdev_from_fpm(struct i40iw_sc_dev *dev, u8 hmc_fn_id);
struct i40iw_hmc_info *i40iw_vf_hmcinfo_from_fpm(struct i40iw_sc_dev *dev,
u8 hmc_fn_id);
enum i40iw_status_code i40iw_add_sd_table_entry(struct i40iw_hw *hw,
struct i40iw_hmc_info *hmc_info, u32 sd_index,
enum i40iw_sd_entry_type type, u64 direct_mode_sz);
enum i40iw_status_code i40iw_add_pd_table_entry(struct i40iw_hw *hw,
struct i40iw_hmc_info *hmc_info, u32 pd_index,
struct i40iw_dma_mem *rsrc_pg);
enum i40iw_status_code i40iw_remove_pd_bp(struct i40iw_hw *hw,
struct i40iw_hmc_info *hmc_info, u32 idx, bool is_pf);
enum i40iw_status_code i40iw_prep_remove_sd_bp(struct i40iw_hmc_info *hmc_info, u32 idx);
enum i40iw_status_code i40iw_prep_remove_pd_page(struct i40iw_hmc_info *hmc_info, u32 idx);
#define ENTER_SHARED_FUNCTION()
#define EXIT_SHARED_FUNCTION()
#endif /* I40IW_HMC_H */

View File

@ -1,851 +0,0 @@
/*******************************************************************************
*
* Copyright (c) 2015-2016 Intel Corporation. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenFabrics.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
*******************************************************************************/
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/ip.h>
#include <linux/tcp.h>
#include <linux/if_vlan.h>
#include "i40iw.h"
/**
* i40iw_initialize_hw_resources - initialize hw resource during open
* @iwdev: iwarp device
*/
u32 i40iw_initialize_hw_resources(struct i40iw_device *iwdev)
{
unsigned long num_pds;
u32 resources_size;
u32 max_mr;
u32 max_qp;
u32 max_cq;
u32 arp_table_size;
u32 mrdrvbits;
void *resource_ptr;
max_qp = iwdev->sc_dev.hmc_info->hmc_obj[I40IW_HMC_IW_QP].cnt;
max_cq = iwdev->sc_dev.hmc_info->hmc_obj[I40IW_HMC_IW_CQ].cnt;
max_mr = iwdev->sc_dev.hmc_info->hmc_obj[I40IW_HMC_IW_MR].cnt;
arp_table_size = iwdev->sc_dev.hmc_info->hmc_obj[I40IW_HMC_IW_ARP].cnt;
iwdev->max_cqe = 0xFFFFF;
num_pds = I40IW_MAX_PDS;
resources_size = sizeof(struct i40iw_arp_entry) * arp_table_size;
resources_size += sizeof(unsigned long) * BITS_TO_LONGS(max_qp);
resources_size += sizeof(unsigned long) * BITS_TO_LONGS(max_mr);
resources_size += sizeof(unsigned long) * BITS_TO_LONGS(max_cq);
resources_size += sizeof(unsigned long) * BITS_TO_LONGS(num_pds);
resources_size += sizeof(unsigned long) * BITS_TO_LONGS(arp_table_size);
resources_size += sizeof(struct i40iw_qp **) * max_qp;
iwdev->mem_resources = kzalloc(resources_size, GFP_KERNEL);
if (!iwdev->mem_resources)
return -ENOMEM;
iwdev->max_qp = max_qp;
iwdev->max_mr = max_mr;
iwdev->max_cq = max_cq;
iwdev->max_pd = num_pds;
iwdev->arp_table_size = arp_table_size;
iwdev->arp_table = (struct i40iw_arp_entry *)iwdev->mem_resources;
resource_ptr = iwdev->mem_resources + (sizeof(struct i40iw_arp_entry) * arp_table_size);
iwdev->device_cap_flags = IB_DEVICE_LOCAL_DMA_LKEY |
IB_DEVICE_MEM_WINDOW | IB_DEVICE_MEM_MGT_EXTENSIONS;
iwdev->allocated_qps = resource_ptr;
iwdev->allocated_cqs = &iwdev->allocated_qps[BITS_TO_LONGS(max_qp)];
iwdev->allocated_mrs = &iwdev->allocated_cqs[BITS_TO_LONGS(max_cq)];
iwdev->allocated_pds = &iwdev->allocated_mrs[BITS_TO_LONGS(max_mr)];
iwdev->allocated_arps = &iwdev->allocated_pds[BITS_TO_LONGS(num_pds)];
iwdev->qp_table = (struct i40iw_qp **)(&iwdev->allocated_arps[BITS_TO_LONGS(arp_table_size)]);
set_bit(0, iwdev->allocated_mrs);
set_bit(0, iwdev->allocated_qps);
set_bit(0, iwdev->allocated_cqs);
set_bit(0, iwdev->allocated_pds);
set_bit(0, iwdev->allocated_arps);
/* Following for ILQ/IEQ */
set_bit(1, iwdev->allocated_qps);
set_bit(1, iwdev->allocated_cqs);
set_bit(1, iwdev->allocated_pds);
set_bit(2, iwdev->allocated_cqs);
set_bit(2, iwdev->allocated_pds);
spin_lock_init(&iwdev->resource_lock);
spin_lock_init(&iwdev->qptable_lock);
/* stag index mask has a minimum of 14 bits */
mrdrvbits = 24 - max(get_count_order(iwdev->max_mr), 14);
iwdev->mr_stagmask = ~(((1 << mrdrvbits) - 1) << (32 - mrdrvbits));
return 0;
}
/**
* i40iw_cqp_ce_handler - handle cqp completions
* @iwdev: iwarp device
* @arm: flag to arm after completions
* @cq: cq for cqp completions
*/
static void i40iw_cqp_ce_handler(struct i40iw_device *iwdev, struct i40iw_sc_cq *cq, bool arm)
{
struct i40iw_cqp_request *cqp_request;
struct i40iw_sc_dev *dev = &iwdev->sc_dev;
u32 cqe_count = 0;
struct i40iw_ccq_cqe_info info;
int ret;
do {
memset(&info, 0, sizeof(info));
ret = dev->ccq_ops->ccq_get_cqe_info(cq, &info);
if (ret)
break;
cqp_request = (struct i40iw_cqp_request *)(unsigned long)info.scratch;
if (info.error)
i40iw_pr_err("opcode = 0x%x maj_err_code = 0x%x min_err_code = 0x%x\n",
info.op_code, info.maj_err_code, info.min_err_code);
if (cqp_request) {
cqp_request->compl_info.maj_err_code = info.maj_err_code;
cqp_request->compl_info.min_err_code = info.min_err_code;
cqp_request->compl_info.op_ret_val = info.op_ret_val;
cqp_request->compl_info.error = info.error;
if (cqp_request->waiting) {
cqp_request->request_done = true;
wake_up(&cqp_request->waitq);
i40iw_put_cqp_request(&iwdev->cqp, cqp_request);
} else {
if (cqp_request->callback_fcn)
cqp_request->callback_fcn(cqp_request, 1);
i40iw_put_cqp_request(&iwdev->cqp, cqp_request);
}
}
cqe_count++;
} while (1);
if (arm && cqe_count) {
i40iw_process_bh(dev);
dev->ccq_ops->ccq_arm(cq);
}
}
/**
* i40iw_iwarp_ce_handler - handle iwarp completions
* @iwdev: iwarp device
* @iwcq: iwarp cq receiving event
*/
static void i40iw_iwarp_ce_handler(struct i40iw_device *iwdev,
struct i40iw_sc_cq *iwcq)
{
struct i40iw_cq *i40iwcq = iwcq->back_cq;
if (i40iwcq->ibcq.comp_handler)
i40iwcq->ibcq.comp_handler(&i40iwcq->ibcq,
i40iwcq->ibcq.cq_context);
}
/**
* i40iw_puda_ce_handler - handle puda completion events
* @iwdev: iwarp device
* @cq: puda completion q for event
*/
static void i40iw_puda_ce_handler(struct i40iw_device *iwdev,
struct i40iw_sc_cq *cq)
{
struct i40iw_sc_dev *dev = (struct i40iw_sc_dev *)&iwdev->sc_dev;
enum i40iw_status_code status;
u32 compl_error;
do {
status = i40iw_puda_poll_completion(dev, cq, &compl_error);
if (status == I40IW_ERR_QUEUE_EMPTY)
break;
if (status) {
i40iw_pr_err("puda status = %d\n", status);
break;
}
if (compl_error) {
i40iw_pr_err("puda compl_err =0x%x\n", compl_error);
break;
}
} while (1);
dev->ccq_ops->ccq_arm(cq);
}
/**
* i40iw_process_ceq - handle ceq for completions
* @iwdev: iwarp device
* @ceq: ceq having cq for completion
*/
void i40iw_process_ceq(struct i40iw_device *iwdev, struct i40iw_ceq *ceq)
{
struct i40iw_sc_dev *dev = &iwdev->sc_dev;
struct i40iw_sc_ceq *sc_ceq;
struct i40iw_sc_cq *cq;
bool arm = true;
sc_ceq = &ceq->sc_ceq;
do {
cq = dev->ceq_ops->process_ceq(dev, sc_ceq);
if (!cq)
break;
if (cq->cq_type == I40IW_CQ_TYPE_CQP)
i40iw_cqp_ce_handler(iwdev, cq, arm);
else if (cq->cq_type == I40IW_CQ_TYPE_IWARP)
i40iw_iwarp_ce_handler(iwdev, cq);
else if ((cq->cq_type == I40IW_CQ_TYPE_ILQ) ||
(cq->cq_type == I40IW_CQ_TYPE_IEQ))
i40iw_puda_ce_handler(iwdev, cq);
} while (1);
}
/**
* i40iw_next_iw_state - modify qp state
* @iwqp: iwarp qp to modify
* @state: next state for qp
* @del_hash: del hash
* @term: term message
* @termlen: length of term message
*/
void i40iw_next_iw_state(struct i40iw_qp *iwqp,
u8 state,
u8 del_hash,
u8 term,
u8 termlen)
{
struct i40iw_modify_qp_info info;
memset(&info, 0, sizeof(info));
info.next_iwarp_state = state;
info.remove_hash_idx = del_hash;
info.cq_num_valid = true;
info.arp_cache_idx_valid = true;
info.dont_send_term = true;
info.dont_send_fin = true;
info.termlen = termlen;
if (term & I40IWQP_TERM_SEND_TERM_ONLY)
info.dont_send_term = false;
if (term & I40IWQP_TERM_SEND_FIN_ONLY)
info.dont_send_fin = false;
if (iwqp->sc_qp.term_flags && (state == I40IW_QP_STATE_ERROR))
info.reset_tcp_conn = true;
iwqp->hw_iwarp_state = state;
i40iw_hw_modify_qp(iwqp->iwdev, iwqp, &info, 0);
}
/**
* i40iw_process_aeq - handle aeq events
* @iwdev: iwarp device
*/
void i40iw_process_aeq(struct i40iw_device *iwdev)
{
struct i40iw_sc_dev *dev = &iwdev->sc_dev;
struct i40iw_aeq *aeq = &iwdev->aeq;
struct i40iw_sc_aeq *sc_aeq = &aeq->sc_aeq;
struct i40iw_aeqe_info aeinfo;
struct i40iw_aeqe_info *info = &aeinfo;
int ret;
struct i40iw_qp *iwqp = NULL;
struct i40iw_sc_cq *cq = NULL;
struct i40iw_cq *iwcq = NULL;
struct i40iw_sc_qp *qp = NULL;
struct i40iw_qp_host_ctx_info *ctx_info = NULL;
unsigned long flags;
u32 aeqcnt = 0;
if (!sc_aeq->size)
return;
do {
memset(info, 0, sizeof(*info));
ret = dev->aeq_ops->get_next_aeqe(sc_aeq, info);
if (ret)
break;
aeqcnt++;
i40iw_debug(dev, I40IW_DEBUG_AEQ,
"%s ae_id = 0x%x bool qp=%d qp_id = %d\n",
__func__, info->ae_id, info->qp, info->qp_cq_id);
if (info->qp) {
spin_lock_irqsave(&iwdev->qptable_lock, flags);
iwqp = iwdev->qp_table[info->qp_cq_id];
if (!iwqp) {
spin_unlock_irqrestore(&iwdev->qptable_lock, flags);
i40iw_debug(dev, I40IW_DEBUG_AEQ,
"%s qp_id %d is already freed\n",
__func__, info->qp_cq_id);
continue;
}
i40iw_qp_add_ref(&iwqp->ibqp);
spin_unlock_irqrestore(&iwdev->qptable_lock, flags);
qp = &iwqp->sc_qp;
spin_lock_irqsave(&iwqp->lock, flags);
iwqp->hw_tcp_state = info->tcp_state;
iwqp->hw_iwarp_state = info->iwarp_state;
iwqp->last_aeq = info->ae_id;
spin_unlock_irqrestore(&iwqp->lock, flags);
ctx_info = &iwqp->ctx_info;
ctx_info->err_rq_idx_valid = true;
} else {
if (info->ae_id != I40IW_AE_CQ_OPERATION_ERROR)
continue;
}
switch (info->ae_id) {
case I40IW_AE_LLP_FIN_RECEIVED:
if (qp->term_flags)
break;
if (atomic_inc_return(&iwqp->close_timer_started) == 1) {
iwqp->hw_tcp_state = I40IW_TCP_STATE_CLOSE_WAIT;
if ((iwqp->hw_tcp_state == I40IW_TCP_STATE_CLOSE_WAIT) &&
(iwqp->ibqp_state == IB_QPS_RTS)) {
i40iw_next_iw_state(iwqp,
I40IW_QP_STATE_CLOSING, 0, 0, 0);
i40iw_cm_disconn(iwqp);
}
iwqp->cm_id->add_ref(iwqp->cm_id);
i40iw_schedule_cm_timer(iwqp->cm_node,
(struct i40iw_puda_buf *)iwqp,
I40IW_TIMER_TYPE_CLOSE, 1, 0);
}
break;
case I40IW_AE_LLP_CLOSE_COMPLETE:
if (qp->term_flags)
i40iw_terminate_done(qp, 0);
else
i40iw_cm_disconn(iwqp);
break;
case I40IW_AE_BAD_CLOSE:
case I40IW_AE_RESET_SENT:
i40iw_next_iw_state(iwqp, I40IW_QP_STATE_ERROR, 1, 0, 0);
i40iw_cm_disconn(iwqp);
break;
case I40IW_AE_LLP_CONNECTION_RESET:
if (atomic_read(&iwqp->close_timer_started))
break;
i40iw_cm_disconn(iwqp);
break;
case I40IW_AE_QP_SUSPEND_COMPLETE:
i40iw_qp_suspend_resume(dev, &iwqp->sc_qp, false);
break;
case I40IW_AE_TERMINATE_SENT:
i40iw_terminate_send_fin(qp);
break;
case I40IW_AE_LLP_TERMINATE_RECEIVED:
i40iw_terminate_received(qp, info);
break;
case I40IW_AE_CQ_OPERATION_ERROR:
i40iw_pr_err("Processing an iWARP related AE for CQ misc = 0x%04X\n",
info->ae_id);
cq = (struct i40iw_sc_cq *)(unsigned long)info->compl_ctx;
iwcq = (struct i40iw_cq *)cq->back_cq;
if (iwcq->ibcq.event_handler) {
struct ib_event ibevent;
ibevent.device = iwcq->ibcq.device;
ibevent.event = IB_EVENT_CQ_ERR;
ibevent.element.cq = &iwcq->ibcq;
iwcq->ibcq.event_handler(&ibevent, iwcq->ibcq.cq_context);
}
break;
case I40IW_AE_LLP_DOUBT_REACHABILITY:
break;
case I40IW_AE_PRIV_OPERATION_DENIED:
case I40IW_AE_STAG_ZERO_INVALID:
case I40IW_AE_IB_RREQ_AND_Q1_FULL:
case I40IW_AE_DDP_UBE_INVALID_DDP_VERSION:
case I40IW_AE_DDP_UBE_INVALID_MO:
case I40IW_AE_DDP_UBE_INVALID_QN:
case I40IW_AE_DDP_NO_L_BIT:
case I40IW_AE_RDMAP_ROE_INVALID_RDMAP_VERSION:
case I40IW_AE_RDMAP_ROE_UNEXPECTED_OPCODE:
case I40IW_AE_ROE_INVALID_RDMA_READ_REQUEST:
case I40IW_AE_ROE_INVALID_RDMA_WRITE_OR_READ_RESP:
case I40IW_AE_INVALID_ARP_ENTRY:
case I40IW_AE_INVALID_TCP_OPTION_RCVD:
case I40IW_AE_STALE_ARP_ENTRY:
case I40IW_AE_LLP_RECEIVED_MPA_CRC_ERROR:
case I40IW_AE_LLP_SEGMENT_TOO_SMALL:
case I40IW_AE_LLP_SYN_RECEIVED:
case I40IW_AE_LLP_TOO_MANY_RETRIES:
case I40IW_AE_LCE_QP_CATASTROPHIC:
case I40IW_AE_LCE_FUNCTION_CATASTROPHIC:
case I40IW_AE_LCE_CQ_CATASTROPHIC:
case I40IW_AE_UDA_XMIT_DGRAM_TOO_LONG:
case I40IW_AE_UDA_XMIT_DGRAM_TOO_SHORT:
ctx_info->err_rq_idx_valid = false;
fallthrough;
default:
if (!info->sq && ctx_info->err_rq_idx_valid) {
ctx_info->err_rq_idx = info->wqe_idx;
ctx_info->tcp_info_valid = false;
ctx_info->iwarp_info_valid = false;
ret = dev->iw_priv_qp_ops->qp_setctx(&iwqp->sc_qp,
iwqp->host_ctx.va,
ctx_info);
}
i40iw_terminate_connection(qp, info);
break;
}
if (info->qp)
i40iw_qp_rem_ref(&iwqp->ibqp);
} while (1);
if (aeqcnt)
dev->aeq_ops->repost_aeq_entries(dev, aeqcnt);
}
/**
* i40iw_cqp_manage_abvpt_cmd - send cqp command manage abpvt
* @iwdev: iwarp device
* @accel_local_port: port for apbvt
* @add_port: add or delete port
*/
static enum i40iw_status_code
i40iw_cqp_manage_abvpt_cmd(struct i40iw_device *iwdev,
u16 accel_local_port,
bool add_port)
{
struct i40iw_apbvt_info *info;
struct i40iw_cqp_request *cqp_request;
struct cqp_commands_info *cqp_info;
enum i40iw_status_code status;
cqp_request = i40iw_get_cqp_request(&iwdev->cqp, add_port);
if (!cqp_request)
return I40IW_ERR_NO_MEMORY;
cqp_info = &cqp_request->info;
info = &cqp_info->in.u.manage_apbvt_entry.info;
memset(info, 0, sizeof(*info));
info->add = add_port;
info->port = cpu_to_le16(accel_local_port);
cqp_info->cqp_cmd = OP_MANAGE_APBVT_ENTRY;
cqp_info->post_sq = 1;
cqp_info->in.u.manage_apbvt_entry.cqp = &iwdev->cqp.sc_cqp;
cqp_info->in.u.manage_apbvt_entry.scratch = (uintptr_t)cqp_request;
status = i40iw_handle_cqp_op(iwdev, cqp_request);
if (status)
i40iw_pr_err("CQP-OP Manage APBVT entry fail");
return status;
}
/**
* i40iw_manage_apbvt - add or delete tcp port
* @iwdev: iwarp device
* @accel_local_port: port for apbvt
* @add_port: add or delete port
*/
enum i40iw_status_code i40iw_manage_apbvt(struct i40iw_device *iwdev,
u16 accel_local_port,
bool add_port)
{
struct i40iw_cm_core *cm_core = &iwdev->cm_core;
enum i40iw_status_code status;
unsigned long flags;
bool in_use;
/* apbvt_lock is held across CQP delete APBVT OP (non-waiting) to
* protect against race where add APBVT CQP can race ahead of the delete
* APBVT for same port.
*/
if (add_port) {
spin_lock_irqsave(&cm_core->apbvt_lock, flags);
in_use = __test_and_set_bit(accel_local_port,
cm_core->ports_in_use);
spin_unlock_irqrestore(&cm_core->apbvt_lock, flags);
if (in_use)
return 0;
return i40iw_cqp_manage_abvpt_cmd(iwdev, accel_local_port,
true);
} else {
spin_lock_irqsave(&cm_core->apbvt_lock, flags);
in_use = i40iw_port_in_use(cm_core, accel_local_port);
if (in_use) {
spin_unlock_irqrestore(&cm_core->apbvt_lock, flags);
return 0;
}
__clear_bit(accel_local_port, cm_core->ports_in_use);
status = i40iw_cqp_manage_abvpt_cmd(iwdev, accel_local_port,
false);
spin_unlock_irqrestore(&cm_core->apbvt_lock, flags);
return status;
}
}
/**
* i40iw_manage_arp_cache - manage hw arp cache
* @iwdev: iwarp device
* @mac_addr: mac address ptr
* @ip_addr: ip addr for arp cache
* @ipv4: flag indicating IPv4 when true
* @action: add, delete or modify
*/
void i40iw_manage_arp_cache(struct i40iw_device *iwdev,
unsigned char *mac_addr,
u32 *ip_addr,
bool ipv4,
u32 action)
{
struct i40iw_add_arp_cache_entry_info *info;
struct i40iw_cqp_request *cqp_request;
struct cqp_commands_info *cqp_info;
int arp_index;
arp_index = i40iw_arp_table(iwdev, ip_addr, ipv4, mac_addr, action);
if (arp_index < 0)
return;
cqp_request = i40iw_get_cqp_request(&iwdev->cqp, false);
if (!cqp_request)
return;
cqp_info = &cqp_request->info;
if (action == I40IW_ARP_ADD) {
cqp_info->cqp_cmd = OP_ADD_ARP_CACHE_ENTRY;
info = &cqp_info->in.u.add_arp_cache_entry.info;
memset(info, 0, sizeof(*info));
info->arp_index = cpu_to_le16((u16)arp_index);
info->permanent = true;
ether_addr_copy(info->mac_addr, mac_addr);
cqp_info->in.u.add_arp_cache_entry.scratch = (uintptr_t)cqp_request;
cqp_info->in.u.add_arp_cache_entry.cqp = &iwdev->cqp.sc_cqp;
} else {
cqp_info->cqp_cmd = OP_DELETE_ARP_CACHE_ENTRY;
cqp_info->in.u.del_arp_cache_entry.scratch = (uintptr_t)cqp_request;
cqp_info->in.u.del_arp_cache_entry.cqp = &iwdev->cqp.sc_cqp;
cqp_info->in.u.del_arp_cache_entry.arp_index = arp_index;
}
cqp_info->in.u.add_arp_cache_entry.cqp = &iwdev->cqp.sc_cqp;
cqp_info->in.u.add_arp_cache_entry.scratch = (uintptr_t)cqp_request;
cqp_info->post_sq = 1;
if (i40iw_handle_cqp_op(iwdev, cqp_request))
i40iw_pr_err("CQP-OP Add/Del Arp Cache entry fail");
}
/**
* i40iw_send_syn_cqp_callback - do syn/ack after qhash
* @cqp_request: qhash cqp completion
* @send_ack: flag send ack
*/
static void i40iw_send_syn_cqp_callback(struct i40iw_cqp_request *cqp_request, u32 send_ack)
{
i40iw_send_syn(cqp_request->param, send_ack);
}
/**
* i40iw_manage_qhash - add or modify qhash
* @iwdev: iwarp device
* @cminfo: cm info for qhash
* @etype: type (syn or quad)
* @mtype: type of qhash
* @cmnode: cmnode associated with connection
* @wait: wait for completion
*/
enum i40iw_status_code i40iw_manage_qhash(struct i40iw_device *iwdev,
struct i40iw_cm_info *cminfo,
enum i40iw_quad_entry_type etype,
enum i40iw_quad_hash_manage_type mtype,
void *cmnode,
bool wait)
{
struct i40iw_qhash_table_info *info;
struct i40iw_sc_dev *dev = &iwdev->sc_dev;
struct i40iw_sc_vsi *vsi = &iwdev->vsi;
enum i40iw_status_code status;
struct i40iw_cqp *iwcqp = &iwdev->cqp;
struct i40iw_cqp_request *cqp_request;
struct cqp_commands_info *cqp_info;
cqp_request = i40iw_get_cqp_request(iwcqp, wait);
if (!cqp_request)
return I40IW_ERR_NO_MEMORY;
cqp_info = &cqp_request->info;
info = &cqp_info->in.u.manage_qhash_table_entry.info;
memset(info, 0, sizeof(*info));
info->vsi = &iwdev->vsi;
info->manage = mtype;
info->entry_type = etype;
if (cminfo->vlan_id != 0xFFFF) {
info->vlan_valid = true;
info->vlan_id = cpu_to_le16(cminfo->vlan_id);
} else {
info->vlan_valid = false;
}
info->ipv4_valid = cminfo->ipv4;
info->user_pri = cminfo->user_pri;
ether_addr_copy(info->mac_addr, iwdev->netdev->dev_addr);
info->qp_num = cpu_to_le32(vsi->ilq->qp_id);
info->dest_port = cpu_to_le16(cminfo->loc_port);
info->dest_ip[0] = cpu_to_le32(cminfo->loc_addr[0]);
info->dest_ip[1] = cpu_to_le32(cminfo->loc_addr[1]);
info->dest_ip[2] = cpu_to_le32(cminfo->loc_addr[2]);
info->dest_ip[3] = cpu_to_le32(cminfo->loc_addr[3]);
if (etype == I40IW_QHASH_TYPE_TCP_ESTABLISHED) {
info->src_port = cpu_to_le16(cminfo->rem_port);
info->src_ip[0] = cpu_to_le32(cminfo->rem_addr[0]);
info->src_ip[1] = cpu_to_le32(cminfo->rem_addr[1]);
info->src_ip[2] = cpu_to_le32(cminfo->rem_addr[2]);
info->src_ip[3] = cpu_to_le32(cminfo->rem_addr[3]);
}
if (cmnode) {
cqp_request->callback_fcn = i40iw_send_syn_cqp_callback;
cqp_request->param = (void *)cmnode;
}
if (info->ipv4_valid)
i40iw_debug(dev, I40IW_DEBUG_CM,
"%s:%s IP=%pI4, port=%d, mac=%pM, vlan_id=%d\n",
__func__, (!mtype) ? "DELETE" : "ADD",
info->dest_ip,
info->dest_port, info->mac_addr, cminfo->vlan_id);
else
i40iw_debug(dev, I40IW_DEBUG_CM,
"%s:%s IP=%pI6, port=%d, mac=%pM, vlan_id=%d\n",
__func__, (!mtype) ? "DELETE" : "ADD",
info->dest_ip,
info->dest_port, info->mac_addr, cminfo->vlan_id);
cqp_info->in.u.manage_qhash_table_entry.cqp = &iwdev->cqp.sc_cqp;
cqp_info->in.u.manage_qhash_table_entry.scratch = (uintptr_t)cqp_request;
cqp_info->cqp_cmd = OP_MANAGE_QHASH_TABLE_ENTRY;
cqp_info->post_sq = 1;
status = i40iw_handle_cqp_op(iwdev, cqp_request);
if (status)
i40iw_pr_err("CQP-OP Manage Qhash Entry fail");
return status;
}
/**
* i40iw_hw_flush_wqes - flush qp's wqe
* @iwdev: iwarp device
* @qp: hardware control qp
* @info: info for flush
* @wait: flag wait for completion
*/
enum i40iw_status_code i40iw_hw_flush_wqes(struct i40iw_device *iwdev,
struct i40iw_sc_qp *qp,
struct i40iw_qp_flush_info *info,
bool wait)
{
enum i40iw_status_code status;
struct i40iw_qp_flush_info *hw_info;
struct i40iw_cqp_request *cqp_request;
struct cqp_commands_info *cqp_info;
struct i40iw_qp *iwqp = (struct i40iw_qp *)qp->back_qp;
cqp_request = i40iw_get_cqp_request(&iwdev->cqp, wait);
if (!cqp_request)
return I40IW_ERR_NO_MEMORY;
cqp_info = &cqp_request->info;
hw_info = &cqp_request->info.in.u.qp_flush_wqes.info;
memcpy(hw_info, info, sizeof(*hw_info));
cqp_info->cqp_cmd = OP_QP_FLUSH_WQES;
cqp_info->post_sq = 1;
cqp_info->in.u.qp_flush_wqes.qp = qp;
cqp_info->in.u.qp_flush_wqes.scratch = (uintptr_t)cqp_request;
status = i40iw_handle_cqp_op(iwdev, cqp_request);
if (status) {
i40iw_pr_err("CQP-OP Flush WQE's fail");
complete(&iwqp->sq_drained);
complete(&iwqp->rq_drained);
return status;
}
if (!cqp_request->compl_info.maj_err_code) {
switch (cqp_request->compl_info.min_err_code) {
case I40IW_CQP_COMPL_RQ_WQE_FLUSHED:
complete(&iwqp->sq_drained);
break;
case I40IW_CQP_COMPL_SQ_WQE_FLUSHED:
complete(&iwqp->rq_drained);
break;
case I40IW_CQP_COMPL_RQ_SQ_WQE_FLUSHED:
break;
default:
complete(&iwqp->sq_drained);
complete(&iwqp->rq_drained);
break;
}
}
return 0;
}
/**
* i40iw_gen_ae - generate AE
* @iwdev: iwarp device
* @qp: qp associated with AE
* @info: info for ae
* @wait: wait for completion
*/
void i40iw_gen_ae(struct i40iw_device *iwdev,
struct i40iw_sc_qp *qp,
struct i40iw_gen_ae_info *info,
bool wait)
{
struct i40iw_gen_ae_info *ae_info;
struct i40iw_cqp_request *cqp_request;
struct cqp_commands_info *cqp_info;
cqp_request = i40iw_get_cqp_request(&iwdev->cqp, wait);
if (!cqp_request)
return;
cqp_info = &cqp_request->info;
ae_info = &cqp_request->info.in.u.gen_ae.info;
memcpy(ae_info, info, sizeof(*ae_info));
cqp_info->cqp_cmd = OP_GEN_AE;
cqp_info->post_sq = 1;
cqp_info->in.u.gen_ae.qp = qp;
cqp_info->in.u.gen_ae.scratch = (uintptr_t)cqp_request;
if (i40iw_handle_cqp_op(iwdev, cqp_request))
i40iw_pr_err("CQP OP failed attempting to generate ae_code=0x%x\n",
info->ae_code);
}
/**
* i40iw_hw_manage_vf_pble_bp - manage vf pbles
* @iwdev: iwarp device
* @info: info for managing pble
* @wait: flag wait for completion
*/
enum i40iw_status_code i40iw_hw_manage_vf_pble_bp(struct i40iw_device *iwdev,
struct i40iw_manage_vf_pble_info *info,
bool wait)
{
enum i40iw_status_code status;
struct i40iw_manage_vf_pble_info *hw_info;
struct i40iw_cqp_request *cqp_request;
struct cqp_commands_info *cqp_info;
if ((iwdev->init_state < CCQ_CREATED) && wait)
wait = false;
cqp_request = i40iw_get_cqp_request(&iwdev->cqp, wait);
if (!cqp_request)
return I40IW_ERR_NO_MEMORY;
cqp_info = &cqp_request->info;
hw_info = &cqp_request->info.in.u.manage_vf_pble_bp.info;
memcpy(hw_info, info, sizeof(*hw_info));
cqp_info->cqp_cmd = OP_MANAGE_VF_PBLE_BP;
cqp_info->post_sq = 1;
cqp_info->in.u.manage_vf_pble_bp.cqp = &iwdev->cqp.sc_cqp;
cqp_info->in.u.manage_vf_pble_bp.scratch = (uintptr_t)cqp_request;
status = i40iw_handle_cqp_op(iwdev, cqp_request);
if (status)
i40iw_pr_err("CQP-OP Manage VF pble_bp fail");
return status;
}
/**
* i40iw_get_ib_wc - return change flush code to IB's
* @opcode: iwarp flush code
*/
static enum ib_wc_status i40iw_get_ib_wc(enum i40iw_flush_opcode opcode)
{
switch (opcode) {
case FLUSH_PROT_ERR:
return IB_WC_LOC_PROT_ERR;
case FLUSH_REM_ACCESS_ERR:
return IB_WC_REM_ACCESS_ERR;
case FLUSH_LOC_QP_OP_ERR:
return IB_WC_LOC_QP_OP_ERR;
case FLUSH_REM_OP_ERR:
return IB_WC_REM_OP_ERR;
case FLUSH_LOC_LEN_ERR:
return IB_WC_LOC_LEN_ERR;
case FLUSH_GENERAL_ERR:
return IB_WC_GENERAL_ERR;
case FLUSH_FATAL_ERR:
default:
return IB_WC_FATAL_ERR;
}
}
/**
* i40iw_set_flush_info - set flush info
* @pinfo: set flush info
* @min: minor err
* @maj: major err
* @opcode: flush error code
*/
static void i40iw_set_flush_info(struct i40iw_qp_flush_info *pinfo,
u16 *min,
u16 *maj,
enum i40iw_flush_opcode opcode)
{
*min = (u16)i40iw_get_ib_wc(opcode);
*maj = CQE_MAJOR_DRV;
pinfo->userflushcode = true;
}
/**
* i40iw_flush_wqes - flush wqe for qp
* @iwdev: iwarp device
* @iwqp: qp to flush wqes
*/
void i40iw_flush_wqes(struct i40iw_device *iwdev, struct i40iw_qp *iwqp)
{
struct i40iw_qp_flush_info info;
struct i40iw_qp_flush_info *pinfo = &info;
struct i40iw_sc_qp *qp = &iwqp->sc_qp;
memset(pinfo, 0, sizeof(*pinfo));
info.sq = true;
info.rq = true;
if (qp->term_flags) {
i40iw_set_flush_info(pinfo, &pinfo->sq_minor_code,
&pinfo->sq_major_code, qp->flush_code);
i40iw_set_flush_info(pinfo, &pinfo->rq_minor_code,
&pinfo->rq_major_code, qp->flush_code);
}
(void)i40iw_hw_flush_wqes(iwdev, &iwqp->sc_qp, &info, true);
}

File diff suppressed because it is too large Load Diff

View File

@ -1,195 +0,0 @@
/*******************************************************************************
*
* Copyright (c) 2015-2016 Intel Corporation. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenFabrics.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
*******************************************************************************/
#ifndef I40IW_OSDEP_H
#define I40IW_OSDEP_H
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/bitops.h>
#include <net/tcp.h>
#include <crypto/hash.h>
/* get readq/writeq support for 32 bit kernels, use the low-first version */
#include <linux/io-64-nonatomic-lo-hi.h>
#define STATS_TIMER_DELAY 1000
static inline void set_64bit_val(u64 *wqe_words, u32 byte_index, u64 value)
{
wqe_words[byte_index >> 3] = value;
}
/**
* get_64bit_val - read 64 bit value from wqe
* @wqe_words: wqe addr
* @byte_index: index to read from
* @value: read value
**/
static inline void get_64bit_val(u64 *wqe_words, u32 byte_index, u64 *value)
{
*value = wqe_words[byte_index >> 3];
}
struct i40iw_dma_mem {
void *va;
dma_addr_t pa;
u32 size;
} __packed;
struct i40iw_virt_mem {
void *va;
u32 size;
} __packed;
#define i40iw_debug(h, m, s, ...) \
do { \
if (((m) & (h)->debug_mask)) \
pr_info("i40iw " s, ##__VA_ARGS__); \
} while (0)
#define i40iw_flush(a) readl((a)->hw_addr + I40E_GLGEN_STAT)
#define I40E_GLHMC_VFSDCMD(_i) (0x000C8000 + ((_i) * 4)) \
/* _i=0...31 */
#define I40E_GLHMC_VFSDCMD_MAX_INDEX 31
#define I40E_GLHMC_VFSDCMD_PMSDIDX_SHIFT 0
#define I40E_GLHMC_VFSDCMD_PMSDIDX_MASK (0xFFF \
<< I40E_GLHMC_VFSDCMD_PMSDIDX_SHIFT)
#define I40E_GLHMC_VFSDCMD_PF_SHIFT 16
#define I40E_GLHMC_VFSDCMD_PF_MASK (0xF << I40E_GLHMC_VFSDCMD_PF_SHIFT)
#define I40E_GLHMC_VFSDCMD_VF_SHIFT 20
#define I40E_GLHMC_VFSDCMD_VF_MASK (0x1FF << I40E_GLHMC_VFSDCMD_VF_SHIFT)
#define I40E_GLHMC_VFSDCMD_PMF_TYPE_SHIFT 29
#define I40E_GLHMC_VFSDCMD_PMF_TYPE_MASK (0x3 \
<< I40E_GLHMC_VFSDCMD_PMF_TYPE_SHIFT)
#define I40E_GLHMC_VFSDCMD_PMSDWR_SHIFT 31
#define I40E_GLHMC_VFSDCMD_PMSDWR_MASK (0x1 << I40E_GLHMC_VFSDCMD_PMSDWR_SHIFT)
#define I40E_GLHMC_VFSDDATAHIGH(_i) (0x000C8200 + ((_i) * 4)) \
/* _i=0...31 */
#define I40E_GLHMC_VFSDDATAHIGH_MAX_INDEX 31
#define I40E_GLHMC_VFSDDATAHIGH_PMSDDATAHIGH_SHIFT 0
#define I40E_GLHMC_VFSDDATAHIGH_PMSDDATAHIGH_MASK (0xFFFFFFFF \
<< I40E_GLHMC_VFSDDATAHIGH_PMSDDATAHIGH_SHIFT)
#define I40E_GLHMC_VFSDDATALOW(_i) (0x000C8100 + ((_i) * 4)) \
/* _i=0...31 */
#define I40E_GLHMC_VFSDDATALOW_MAX_INDEX 31
#define I40E_GLHMC_VFSDDATALOW_PMSDVALID_SHIFT 0
#define I40E_GLHMC_VFSDDATALOW_PMSDVALID_MASK (0x1 \
<< I40E_GLHMC_VFSDDATALOW_PMSDVALID_SHIFT)
#define I40E_GLHMC_VFSDDATALOW_PMSDTYPE_SHIFT 1
#define I40E_GLHMC_VFSDDATALOW_PMSDTYPE_MASK (0x1 \
<< I40E_GLHMC_VFSDDATALOW_PMSDTYPE_SHIFT)
#define I40E_GLHMC_VFSDDATALOW_PMSDBPCOUNT_SHIFT 2
#define I40E_GLHMC_VFSDDATALOW_PMSDBPCOUNT_MASK (0x3FF \
<< I40E_GLHMC_VFSDDATALOW_PMSDBPCOUNT_SHIFT)
#define I40E_GLHMC_VFSDDATALOW_PMSDDATALOW_SHIFT 12
#define I40E_GLHMC_VFSDDATALOW_PMSDDATALOW_MASK (0xFFFFF \
<< I40E_GLHMC_VFSDDATALOW_PMSDDATALOW_SHIFT)
#define I40E_GLPE_FWLDSTATUS 0x0000D200
#define I40E_GLPE_FWLDSTATUS_LOAD_REQUESTED_SHIFT 0
#define I40E_GLPE_FWLDSTATUS_LOAD_REQUESTED_MASK (0x1 \
<< I40E_GLPE_FWLDSTATUS_LOAD_REQUESTED_SHIFT)
#define I40E_GLPE_FWLDSTATUS_DONE_SHIFT 1
#define I40E_GLPE_FWLDSTATUS_DONE_MASK (0x1 << I40E_GLPE_FWLDSTATUS_DONE_SHIFT)
#define I40E_GLPE_FWLDSTATUS_CQP_FAIL_SHIFT 2
#define I40E_GLPE_FWLDSTATUS_CQP_FAIL_MASK (0x1 \
<< I40E_GLPE_FWLDSTATUS_CQP_FAIL_SHIFT)
#define I40E_GLPE_FWLDSTATUS_TEP_FAIL_SHIFT 3
#define I40E_GLPE_FWLDSTATUS_TEP_FAIL_MASK (0x1 \
<< I40E_GLPE_FWLDSTATUS_TEP_FAIL_SHIFT)
#define I40E_GLPE_FWLDSTATUS_OOP_FAIL_SHIFT 4
#define I40E_GLPE_FWLDSTATUS_OOP_FAIL_MASK (0x1 \
<< I40E_GLPE_FWLDSTATUS_OOP_FAIL_SHIFT)
struct i40iw_sc_dev;
struct i40iw_sc_qp;
struct i40iw_puda_buf;
struct i40iw_puda_completion_info;
struct i40iw_update_sds_info;
struct i40iw_hmc_fcn_info;
struct i40iw_virtchnl_work_info;
struct i40iw_manage_vf_pble_info;
struct i40iw_device;
struct i40iw_hmc_info;
struct i40iw_hw;
u8 __iomem *i40iw_get_hw_addr(void *dev);
void i40iw_ieq_mpa_crc_ae(struct i40iw_sc_dev *dev, struct i40iw_sc_qp *qp);
enum i40iw_status_code i40iw_vf_wait_vchnl_resp(struct i40iw_sc_dev *dev);
bool i40iw_vf_clear_to_send(struct i40iw_sc_dev *dev);
enum i40iw_status_code i40iw_ieq_check_mpacrc(struct shash_desc *desc, void *addr,
u32 length, u32 value);
struct i40iw_sc_qp *i40iw_ieq_get_qp(struct i40iw_sc_dev *dev, struct i40iw_puda_buf *buf);
void i40iw_ieq_update_tcpip_info(struct i40iw_puda_buf *buf, u16 length, u32 seqnum);
void i40iw_free_hash_desc(struct shash_desc *);
enum i40iw_status_code i40iw_init_hash_desc(struct shash_desc **);
enum i40iw_status_code i40iw_puda_get_tcpip_info(struct i40iw_puda_completion_info *info,
struct i40iw_puda_buf *buf);
enum i40iw_status_code i40iw_cqp_sds_cmd(struct i40iw_sc_dev *dev,
struct i40iw_update_sds_info *info);
enum i40iw_status_code i40iw_cqp_manage_hmc_fcn_cmd(struct i40iw_sc_dev *dev,
struct i40iw_hmc_fcn_info *hmcfcninfo);
enum i40iw_status_code i40iw_cqp_query_fpm_values_cmd(struct i40iw_sc_dev *dev,
struct i40iw_dma_mem *values_mem,
u8 hmc_fn_id);
enum i40iw_status_code i40iw_cqp_commit_fpm_values_cmd(struct i40iw_sc_dev *dev,
struct i40iw_dma_mem *values_mem,
u8 hmc_fn_id);
enum i40iw_status_code i40iw_alloc_query_fpm_buf(struct i40iw_sc_dev *dev,
struct i40iw_dma_mem *mem);
enum i40iw_status_code i40iw_cqp_manage_vf_pble_bp(struct i40iw_sc_dev *dev,
struct i40iw_manage_vf_pble_info *info);
void i40iw_cqp_spawn_worker(struct i40iw_sc_dev *dev,
struct i40iw_virtchnl_work_info *work_info, u32 iw_vf_idx);
void *i40iw_remove_head(struct list_head *list);
void i40iw_qp_suspend_resume(struct i40iw_sc_dev *dev, struct i40iw_sc_qp *qp, bool suspend);
void i40iw_term_modify_qp(struct i40iw_sc_qp *qp, u8 next_state, u8 term, u8 term_len);
void i40iw_terminate_done(struct i40iw_sc_qp *qp, int timeout_occurred);
void i40iw_terminate_start_timer(struct i40iw_sc_qp *qp);
void i40iw_terminate_del_timer(struct i40iw_sc_qp *qp);
enum i40iw_status_code i40iw_hw_manage_vf_pble_bp(struct i40iw_device *iwdev,
struct i40iw_manage_vf_pble_info *info,
bool wait);
struct i40iw_sc_vsi;
void i40iw_hw_stats_start_timer(struct i40iw_sc_vsi *vsi);
void i40iw_hw_stats_stop_timer(struct i40iw_sc_vsi *vsi);
#define i40iw_mmiowb() do { } while (0)
void i40iw_wr32(struct i40iw_hw *hw, u32 reg, u32 value);
u32 i40iw_rd32(struct i40iw_hw *hw, u32 reg);
#endif /* _I40IW_OSDEP_H_ */

View File

@ -1,129 +0,0 @@
/*******************************************************************************
*
* Copyright (c) 2015-2016 Intel Corporation. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenFabrics.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
*******************************************************************************/
#ifndef I40IW_P_H
#define I40IW_P_H
#define PAUSE_TIMER_VALUE 0xFFFF
#define REFRESH_THRESHOLD 0x7FFF
#define HIGH_THRESHOLD 0x800
#define LOW_THRESHOLD 0x200
#define ALL_TC2PFC 0xFF
#define CQP_COMPL_WAIT_TIME 0x3E8
#define CQP_TIMEOUT_THRESHOLD 5
void i40iw_debug_buf(struct i40iw_sc_dev *dev, enum i40iw_debug_flag mask,
char *desc, u64 *buf, u32 size);
/* init operations */
enum i40iw_status_code i40iw_device_init(struct i40iw_sc_dev *dev,
struct i40iw_device_init_info *info);
void i40iw_sc_cqp_post_sq(struct i40iw_sc_cqp *cqp);
u64 *i40iw_sc_cqp_get_next_send_wqe(struct i40iw_sc_cqp *cqp, u64 scratch);
void i40iw_check_cqp_progress(struct i40iw_cqp_timeout *cqp_timeout, struct i40iw_sc_dev *dev);
enum i40iw_status_code i40iw_sc_mr_fast_register(struct i40iw_sc_qp *qp,
struct i40iw_fast_reg_stag_info *info,
bool post_sq);
void i40iw_insert_wqe_hdr(u64 *wqe, u64 header);
/* HMC/FPM functions */
enum i40iw_status_code i40iw_sc_init_iw_hmc(struct i40iw_sc_dev *dev,
u8 hmc_fn_id);
enum i40iw_status_code i40iw_pf_init_vfhmc(struct i40iw_sc_dev *dev, u8 vf_hmc_fn_id,
u32 *vf_cnt_array);
/* stats functions */
void i40iw_hw_stats_refresh_all(struct i40iw_vsi_pestat *stats);
void i40iw_hw_stats_read_all(struct i40iw_vsi_pestat *stats, struct i40iw_dev_hw_stats *stats_values);
void i40iw_hw_stats_read_32(struct i40iw_vsi_pestat *stats,
enum i40iw_hw_stats_index_32b index,
u64 *value);
void i40iw_hw_stats_read_64(struct i40iw_vsi_pestat *stats,
enum i40iw_hw_stats_index_64b index,
u64 *value);
void i40iw_hw_stats_init(struct i40iw_vsi_pestat *stats, u8 index, bool is_pf);
/* vsi misc functions */
enum i40iw_status_code i40iw_vsi_stats_init(struct i40iw_sc_vsi *vsi, struct i40iw_vsi_stats_info *info);
void i40iw_vsi_stats_free(struct i40iw_sc_vsi *vsi);
void i40iw_sc_vsi_init(struct i40iw_sc_vsi *vsi, struct i40iw_vsi_init_info *info);
void i40iw_change_l2params(struct i40iw_sc_vsi *vsi, struct i40iw_l2params *l2params);
void i40iw_qp_add_qos(struct i40iw_sc_qp *qp);
void i40iw_qp_rem_qos(struct i40iw_sc_qp *qp);
void i40iw_terminate_send_fin(struct i40iw_sc_qp *qp);
void i40iw_terminate_connection(struct i40iw_sc_qp *qp, struct i40iw_aeqe_info *info);
void i40iw_terminate_received(struct i40iw_sc_qp *qp, struct i40iw_aeqe_info *info);
enum i40iw_status_code i40iw_sc_suspend_qp(struct i40iw_sc_cqp *cqp,
struct i40iw_sc_qp *qp, u64 scratch);
enum i40iw_status_code i40iw_sc_resume_qp(struct i40iw_sc_cqp *cqp,
struct i40iw_sc_qp *qp, u64 scratch);
enum i40iw_status_code i40iw_sc_static_hmc_pages_allocated(struct i40iw_sc_cqp *cqp,
u64 scratch, u8 hmc_fn_id,
bool post_sq,
bool poll_registers);
enum i40iw_status_code i40iw_config_fpm_values(struct i40iw_sc_dev *dev, u32 qp_count);
enum i40iw_status_code i40iw_get_rdma_features(struct i40iw_sc_dev *dev);
void free_sd_mem(struct i40iw_sc_dev *dev);
enum i40iw_status_code i40iw_process_cqp_cmd(struct i40iw_sc_dev *dev,
struct cqp_commands_info *pcmdinfo);
enum i40iw_status_code i40iw_process_bh(struct i40iw_sc_dev *dev);
/* prototype for functions used for dynamic memory allocation */
enum i40iw_status_code i40iw_allocate_dma_mem(struct i40iw_hw *hw,
struct i40iw_dma_mem *mem, u64 size,
u32 alignment);
void i40iw_free_dma_mem(struct i40iw_hw *hw, struct i40iw_dma_mem *mem);
enum i40iw_status_code i40iw_allocate_virt_mem(struct i40iw_hw *hw,
struct i40iw_virt_mem *mem, u32 size);
enum i40iw_status_code i40iw_free_virt_mem(struct i40iw_hw *hw,
struct i40iw_virt_mem *mem);
u8 i40iw_get_encoded_wqe_size(u32 wqsize, bool cqpsq);
void i40iw_reinitialize_ieq(struct i40iw_sc_dev *dev);
#endif

View File

@ -1,611 +0,0 @@
/*******************************************************************************
*
* Copyright (c) 2015-2016 Intel Corporation. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenFabrics.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
*******************************************************************************/
#include "i40iw_status.h"
#include "i40iw_osdep.h"
#include "i40iw_register.h"
#include "i40iw_hmc.h"
#include "i40iw_d.h"
#include "i40iw_type.h"
#include "i40iw_p.h"
#include <linux/pci.h>
#include <linux/genalloc.h>
#include <linux/vmalloc.h>
#include "i40iw_pble.h"
#include "i40iw.h"
struct i40iw_device;
static enum i40iw_status_code add_pble_pool(struct i40iw_sc_dev *dev,
struct i40iw_hmc_pble_rsrc *pble_rsrc);
static void i40iw_free_vmalloc_mem(struct i40iw_hw *hw, struct i40iw_chunk *chunk);
/**
* i40iw_destroy_pble_pool - destroy pool during module unload
* @dev: i40iw_sc_dev struct
* @pble_rsrc: pble resources
*/
void i40iw_destroy_pble_pool(struct i40iw_sc_dev *dev, struct i40iw_hmc_pble_rsrc *pble_rsrc)
{
struct list_head *clist;
struct list_head *tlist;
struct i40iw_chunk *chunk;
struct i40iw_pble_pool *pinfo = &pble_rsrc->pinfo;
if (pinfo->pool) {
list_for_each_safe(clist, tlist, &pinfo->clist) {
chunk = list_entry(clist, struct i40iw_chunk, list);
if (chunk->type == I40IW_VMALLOC)
i40iw_free_vmalloc_mem(dev->hw, chunk);
kfree(chunk);
}
gen_pool_destroy(pinfo->pool);
}
}
/**
* i40iw_hmc_init_pble - Initialize pble resources during module load
* @dev: i40iw_sc_dev struct
* @pble_rsrc: pble resources
*/
enum i40iw_status_code i40iw_hmc_init_pble(struct i40iw_sc_dev *dev,
struct i40iw_hmc_pble_rsrc *pble_rsrc)
{
struct i40iw_hmc_info *hmc_info;
u32 fpm_idx = 0;
hmc_info = dev->hmc_info;
pble_rsrc->fpm_base_addr = hmc_info->hmc_obj[I40IW_HMC_IW_PBLE].base;
/* Now start the pble' on 4k boundary */
if (pble_rsrc->fpm_base_addr & 0xfff)
fpm_idx = (PAGE_SIZE - (pble_rsrc->fpm_base_addr & 0xfff)) >> 3;
pble_rsrc->unallocated_pble =
hmc_info->hmc_obj[I40IW_HMC_IW_PBLE].cnt - fpm_idx;
pble_rsrc->next_fpm_addr = pble_rsrc->fpm_base_addr + (fpm_idx << 3);
pble_rsrc->pinfo.pool_shift = POOL_SHIFT;
pble_rsrc->pinfo.pool = gen_pool_create(pble_rsrc->pinfo.pool_shift, -1);
INIT_LIST_HEAD(&pble_rsrc->pinfo.clist);
if (!pble_rsrc->pinfo.pool)
goto error;
if (add_pble_pool(dev, pble_rsrc))
goto error;
return 0;
error:i40iw_destroy_pble_pool(dev, pble_rsrc);
return I40IW_ERR_NO_MEMORY;
}
/**
* get_sd_pd_idx - Returns sd index, pd index and rel_pd_idx from fpm address
* @pble_rsrc: structure containing fpm address
* @idx: where to return indexes
*/
static inline void get_sd_pd_idx(struct i40iw_hmc_pble_rsrc *pble_rsrc,
struct sd_pd_idx *idx)
{
idx->sd_idx = (u32)(pble_rsrc->next_fpm_addr) / I40IW_HMC_DIRECT_BP_SIZE;
idx->pd_idx = (u32)(pble_rsrc->next_fpm_addr) / I40IW_HMC_PAGED_BP_SIZE;
idx->rel_pd_idx = (idx->pd_idx % I40IW_HMC_PD_CNT_IN_SD);
}
/**
* add_sd_direct - add sd direct for pble
* @dev: hardware control device structure
* @pble_rsrc: pble resource ptr
* @info: page info for sd
*/
static enum i40iw_status_code add_sd_direct(struct i40iw_sc_dev *dev,
struct i40iw_hmc_pble_rsrc *pble_rsrc,
struct i40iw_add_page_info *info)
{
enum i40iw_status_code ret_code = 0;
struct sd_pd_idx *idx = &info->idx;
struct i40iw_chunk *chunk = info->chunk;
struct i40iw_hmc_info *hmc_info = info->hmc_info;
struct i40iw_hmc_sd_entry *sd_entry = info->sd_entry;
u32 offset = 0;
if (!sd_entry->valid) {
if (dev->is_pf) {
ret_code = i40iw_add_sd_table_entry(dev->hw, hmc_info,
info->idx.sd_idx,
I40IW_SD_TYPE_DIRECT,
I40IW_HMC_DIRECT_BP_SIZE);
if (ret_code)
return ret_code;
chunk->type = I40IW_DMA_COHERENT;
}
}
offset = idx->rel_pd_idx << I40IW_HMC_PAGED_BP_SHIFT;
chunk->size = info->pages << I40IW_HMC_PAGED_BP_SHIFT;
chunk->vaddr = ((u8 *)sd_entry->u.bp.addr.va + offset);
chunk->fpm_addr = pble_rsrc->next_fpm_addr;
i40iw_debug(dev, I40IW_DEBUG_PBLE, "chunk_size[%d] = 0x%x vaddr=%p fpm_addr = %llx\n",
chunk->size, chunk->size, chunk->vaddr, chunk->fpm_addr);
return 0;
}
/**
* i40iw_free_vmalloc_mem - free vmalloc during close
* @hw: hw struct
* @chunk: chunk information for vmalloc
*/
static void i40iw_free_vmalloc_mem(struct i40iw_hw *hw, struct i40iw_chunk *chunk)
{
struct pci_dev *pcidev = hw->pcidev;
int i;
if (!chunk->pg_cnt)
goto done;
for (i = 0; i < chunk->pg_cnt; i++)
dma_unmap_page(&pcidev->dev, chunk->dmaaddrs[i], PAGE_SIZE, DMA_BIDIRECTIONAL);
done:
kfree(chunk->dmaaddrs);
chunk->dmaaddrs = NULL;
vfree(chunk->vaddr);
chunk->vaddr = NULL;
chunk->type = 0;
}
/**
* i40iw_get_vmalloc_mem - get 2M page for sd
* @hw: hardware address
* @chunk: chunk to adf
* @pg_cnt: #of 4 K pages
*/
static enum i40iw_status_code i40iw_get_vmalloc_mem(struct i40iw_hw *hw,
struct i40iw_chunk *chunk,
int pg_cnt)
{
struct pci_dev *pcidev = hw->pcidev;
struct page *page;
u8 *addr;
u32 size;
int i;
chunk->dmaaddrs = kzalloc(pg_cnt << 3, GFP_KERNEL);
if (!chunk->dmaaddrs)
return I40IW_ERR_NO_MEMORY;
size = PAGE_SIZE * pg_cnt;
chunk->vaddr = vmalloc(size);
if (!chunk->vaddr) {
kfree(chunk->dmaaddrs);
chunk->dmaaddrs = NULL;
return I40IW_ERR_NO_MEMORY;
}
chunk->size = size;
addr = (u8 *)chunk->vaddr;
for (i = 0; i < pg_cnt; i++) {
page = vmalloc_to_page((void *)addr);
if (!page)
break;
chunk->dmaaddrs[i] = dma_map_page(&pcidev->dev, page, 0,
PAGE_SIZE, DMA_BIDIRECTIONAL);
if (dma_mapping_error(&pcidev->dev, chunk->dmaaddrs[i]))
break;
addr += PAGE_SIZE;
}
chunk->pg_cnt = i;
chunk->type = I40IW_VMALLOC;
if (i == pg_cnt)
return 0;
i40iw_free_vmalloc_mem(hw, chunk);
return I40IW_ERR_NO_MEMORY;
}
/**
* fpm_to_idx - given fpm address, get pble index
* @pble_rsrc: pble resource management
* @addr: fpm address for index
*/
static inline u32 fpm_to_idx(struct i40iw_hmc_pble_rsrc *pble_rsrc, u64 addr)
{
return (addr - (pble_rsrc->fpm_base_addr)) >> 3;
}
/**
* add_bp_pages - add backing pages for sd
* @dev: hardware control device structure
* @pble_rsrc: pble resource management
* @info: page info for sd
*/
static enum i40iw_status_code add_bp_pages(struct i40iw_sc_dev *dev,
struct i40iw_hmc_pble_rsrc *pble_rsrc,
struct i40iw_add_page_info *info)
{
u8 *addr;
struct i40iw_dma_mem mem;
struct i40iw_hmc_pd_entry *pd_entry;
struct i40iw_hmc_sd_entry *sd_entry = info->sd_entry;
struct i40iw_hmc_info *hmc_info = info->hmc_info;
struct i40iw_chunk *chunk = info->chunk;
struct i40iw_manage_vf_pble_info vf_pble_info;
enum i40iw_status_code status = 0;
u32 rel_pd_idx = info->idx.rel_pd_idx;
u32 pd_idx = info->idx.pd_idx;
u32 i;
status = i40iw_get_vmalloc_mem(dev->hw, chunk, info->pages);
if (status)
return I40IW_ERR_NO_MEMORY;
status = i40iw_add_sd_table_entry(dev->hw, hmc_info,
info->idx.sd_idx, I40IW_SD_TYPE_PAGED,
I40IW_HMC_DIRECT_BP_SIZE);
if (status)
goto error;
if (!dev->is_pf) {
status = i40iw_vchnl_vf_add_hmc_objs(dev, I40IW_HMC_IW_PBLE,
fpm_to_idx(pble_rsrc,
pble_rsrc->next_fpm_addr),
(info->pages << PBLE_512_SHIFT));
if (status) {
i40iw_pr_err("allocate PBLEs in the PF. Error %i\n", status);
goto error;
}
}
addr = chunk->vaddr;
for (i = 0; i < info->pages; i++) {
mem.pa = chunk->dmaaddrs[i];
mem.size = PAGE_SIZE;
mem.va = (void *)(addr);
pd_entry = &sd_entry->u.pd_table.pd_entry[rel_pd_idx++];
if (!pd_entry->valid) {
status = i40iw_add_pd_table_entry(dev->hw, hmc_info, pd_idx++, &mem);
if (status)
goto error;
addr += PAGE_SIZE;
} else {
i40iw_pr_err("pd entry is valid expecting to be invalid\n");
}
}
if (!dev->is_pf) {
vf_pble_info.first_pd_index = info->idx.rel_pd_idx;
vf_pble_info.inv_pd_ent = false;
vf_pble_info.pd_entry_cnt = PBLE_PER_PAGE;
vf_pble_info.pd_pl_pba = sd_entry->u.pd_table.pd_page_addr.pa;
vf_pble_info.sd_index = info->idx.sd_idx;
status = i40iw_hw_manage_vf_pble_bp(dev->back_dev,
&vf_pble_info, true);
if (status) {
i40iw_pr_err("CQP manage VF PBLE BP failed. %i\n", status);
goto error;
}
}
chunk->fpm_addr = pble_rsrc->next_fpm_addr;
return 0;
error:
i40iw_free_vmalloc_mem(dev->hw, chunk);
return status;
}
/**
* add_pble_pool - add a sd entry for pble resoure
* @dev: hardware control device structure
* @pble_rsrc: pble resource management
*/
static enum i40iw_status_code add_pble_pool(struct i40iw_sc_dev *dev,
struct i40iw_hmc_pble_rsrc *pble_rsrc)
{
struct i40iw_hmc_sd_entry *sd_entry;
struct i40iw_hmc_info *hmc_info;
struct i40iw_chunk *chunk;
struct i40iw_add_page_info info;
struct sd_pd_idx *idx = &info.idx;
enum i40iw_status_code ret_code = 0;
enum i40iw_sd_entry_type sd_entry_type;
u64 sd_reg_val = 0;
u32 pages;
if (pble_rsrc->unallocated_pble < PBLE_PER_PAGE)
return I40IW_ERR_NO_MEMORY;
if (pble_rsrc->next_fpm_addr & 0xfff) {
i40iw_pr_err("next fpm_addr %llx\n", pble_rsrc->next_fpm_addr);
return I40IW_ERR_INVALID_PAGE_DESC_INDEX;
}
chunk = kzalloc(sizeof(*chunk), GFP_KERNEL);
if (!chunk)
return I40IW_ERR_NO_MEMORY;
hmc_info = dev->hmc_info;
chunk->fpm_addr = pble_rsrc->next_fpm_addr;
get_sd_pd_idx(pble_rsrc, idx);
sd_entry = &hmc_info->sd_table.sd_entry[idx->sd_idx];
pages = (idx->rel_pd_idx) ? (I40IW_HMC_PD_CNT_IN_SD -
idx->rel_pd_idx) : I40IW_HMC_PD_CNT_IN_SD;
pages = min(pages, pble_rsrc->unallocated_pble >> PBLE_512_SHIFT);
info.chunk = chunk;
info.hmc_info = hmc_info;
info.pages = pages;
info.sd_entry = sd_entry;
if (!sd_entry->valid) {
sd_entry_type = (!idx->rel_pd_idx &&
(pages == I40IW_HMC_PD_CNT_IN_SD) &&
dev->is_pf) ? I40IW_SD_TYPE_DIRECT : I40IW_SD_TYPE_PAGED;
} else {
sd_entry_type = sd_entry->entry_type;
}
i40iw_debug(dev, I40IW_DEBUG_PBLE,
"pages = %d, unallocated_pble[%u] current_fpm_addr = %llx\n",
pages, pble_rsrc->unallocated_pble, pble_rsrc->next_fpm_addr);
i40iw_debug(dev, I40IW_DEBUG_PBLE, "sd_entry_type = %d sd_entry valid = %d\n",
sd_entry_type, sd_entry->valid);
if (sd_entry_type == I40IW_SD_TYPE_DIRECT)
ret_code = add_sd_direct(dev, pble_rsrc, &info);
if (ret_code)
sd_entry_type = I40IW_SD_TYPE_PAGED;
else
pble_rsrc->stats_direct_sds++;
if (sd_entry_type == I40IW_SD_TYPE_PAGED) {
ret_code = add_bp_pages(dev, pble_rsrc, &info);
if (ret_code)
goto error;
else
pble_rsrc->stats_paged_sds++;
}
if (gen_pool_add_virt(pble_rsrc->pinfo.pool, (unsigned long)chunk->vaddr,
(phys_addr_t)chunk->fpm_addr, chunk->size, -1)) {
i40iw_pr_err("could not allocate memory by gen_pool_addr_virt()\n");
ret_code = I40IW_ERR_NO_MEMORY;
goto error;
}
pble_rsrc->next_fpm_addr += chunk->size;
i40iw_debug(dev, I40IW_DEBUG_PBLE, "next_fpm_addr = %llx chunk_size[%u] = 0x%x\n",
pble_rsrc->next_fpm_addr, chunk->size, chunk->size);
pble_rsrc->unallocated_pble -= (chunk->size >> 3);
sd_reg_val = (sd_entry_type == I40IW_SD_TYPE_PAGED) ?
sd_entry->u.pd_table.pd_page_addr.pa : sd_entry->u.bp.addr.pa;
if (dev->is_pf && !sd_entry->valid) {
ret_code = i40iw_hmc_sd_one(dev, hmc_info->hmc_fn_id,
sd_reg_val, idx->sd_idx,
sd_entry->entry_type, true);
if (ret_code) {
i40iw_pr_err("cqp cmd failed for sd (pbles)\n");
goto error;
}
}
sd_entry->valid = true;
list_add(&chunk->list, &pble_rsrc->pinfo.clist);
return 0;
error:
kfree(chunk);
return ret_code;
}
/**
* free_lvl2 - fee level 2 pble
* @pble_rsrc: pble resource management
* @palloc: level 2 pble allocation
*/
static void free_lvl2(struct i40iw_hmc_pble_rsrc *pble_rsrc,
struct i40iw_pble_alloc *palloc)
{
u32 i;
struct gen_pool *pool;
struct i40iw_pble_level2 *lvl2 = &palloc->level2;
struct i40iw_pble_info *root = &lvl2->root;
struct i40iw_pble_info *leaf = lvl2->leaf;
pool = pble_rsrc->pinfo.pool;
for (i = 0; i < lvl2->leaf_cnt; i++, leaf++) {
if (leaf->addr)
gen_pool_free(pool, leaf->addr, (leaf->cnt << 3));
else
break;
}
if (root->addr)
gen_pool_free(pool, root->addr, (root->cnt << 3));
kfree(lvl2->leaf);
lvl2->leaf = NULL;
}
/**
* get_lvl2_pble - get level 2 pble resource
* @pble_rsrc: pble resource management
* @palloc: level 2 pble allocation
* @pool: pool pointer
*/
static enum i40iw_status_code get_lvl2_pble(struct i40iw_hmc_pble_rsrc *pble_rsrc,
struct i40iw_pble_alloc *palloc,
struct gen_pool *pool)
{
u32 lf4k, lflast, total, i;
u32 pblcnt = PBLE_PER_PAGE;
u64 *addr;
struct i40iw_pble_level2 *lvl2 = &palloc->level2;
struct i40iw_pble_info *root = &lvl2->root;
struct i40iw_pble_info *leaf;
/* number of full 512 (4K) leafs) */
lf4k = palloc->total_cnt >> 9;
lflast = palloc->total_cnt % PBLE_PER_PAGE;
total = (lflast == 0) ? lf4k : lf4k + 1;
lvl2->leaf_cnt = total;
leaf = kzalloc((sizeof(*leaf) * total), GFP_ATOMIC);
if (!leaf)
return I40IW_ERR_NO_MEMORY;
lvl2->leaf = leaf;
/* allocate pbles for the root */
root->addr = gen_pool_alloc(pool, (total << 3));
if (!root->addr) {
kfree(lvl2->leaf);
lvl2->leaf = NULL;
return I40IW_ERR_NO_MEMORY;
}
root->idx = fpm_to_idx(pble_rsrc,
(u64)gen_pool_virt_to_phys(pool, root->addr));
root->cnt = total;
addr = (u64 *)root->addr;
for (i = 0; i < total; i++, leaf++) {
pblcnt = (lflast && ((i + 1) == total)) ? lflast : PBLE_PER_PAGE;
leaf->addr = gen_pool_alloc(pool, (pblcnt << 3));
if (!leaf->addr)
goto error;
leaf->idx = fpm_to_idx(pble_rsrc, (u64)gen_pool_virt_to_phys(pool, leaf->addr));
leaf->cnt = pblcnt;
*addr = (u64)leaf->idx;
addr++;
}
palloc->level = I40IW_LEVEL_2;
pble_rsrc->stats_lvl2++;
return 0;
error:
free_lvl2(pble_rsrc, palloc);
return I40IW_ERR_NO_MEMORY;
}
/**
* get_lvl1_pble - get level 1 pble resource
* @dev: hardware control device structure
* @pble_rsrc: pble resource management
* @palloc: level 1 pble allocation
*/
static enum i40iw_status_code get_lvl1_pble(struct i40iw_sc_dev *dev,
struct i40iw_hmc_pble_rsrc *pble_rsrc,
struct i40iw_pble_alloc *palloc)
{
u64 *addr;
struct gen_pool *pool;
struct i40iw_pble_info *lvl1 = &palloc->level1;
pool = pble_rsrc->pinfo.pool;
addr = (u64 *)gen_pool_alloc(pool, (palloc->total_cnt << 3));
if (!addr)
return I40IW_ERR_NO_MEMORY;
palloc->level = I40IW_LEVEL_1;
lvl1->addr = (unsigned long)addr;
lvl1->idx = fpm_to_idx(pble_rsrc, (u64)gen_pool_virt_to_phys(pool,
(unsigned long)addr));
lvl1->cnt = palloc->total_cnt;
pble_rsrc->stats_lvl1++;
return 0;
}
/**
* get_lvl1_lvl2_pble - calls get_lvl1 and get_lvl2 pble routine
* @dev: i40iw_sc_dev struct
* @pble_rsrc: pble resources
* @palloc: contains all inforamtion regarding pble (idx + pble addr)
* @pool: pointer to general purpose special memory pool descriptor
*/
static inline enum i40iw_status_code get_lvl1_lvl2_pble(struct i40iw_sc_dev *dev,
struct i40iw_hmc_pble_rsrc *pble_rsrc,
struct i40iw_pble_alloc *palloc,
struct gen_pool *pool)
{
enum i40iw_status_code status = 0;
status = get_lvl1_pble(dev, pble_rsrc, palloc);
if (status && (palloc->total_cnt > PBLE_PER_PAGE))
status = get_lvl2_pble(pble_rsrc, palloc, pool);
return status;
}
/**
* i40iw_get_pble - allocate pbles from the pool
* @dev: i40iw_sc_dev struct
* @pble_rsrc: pble resources
* @palloc: contains all inforamtion regarding pble (idx + pble addr)
* @pble_cnt: #of pbles requested
*/
enum i40iw_status_code i40iw_get_pble(struct i40iw_sc_dev *dev,
struct i40iw_hmc_pble_rsrc *pble_rsrc,
struct i40iw_pble_alloc *palloc,
u32 pble_cnt)
{
struct gen_pool *pool;
enum i40iw_status_code status = 0;
u32 max_sds = 0;
int i;
pool = pble_rsrc->pinfo.pool;
palloc->total_cnt = pble_cnt;
palloc->level = I40IW_LEVEL_0;
/*check first to see if we can get pble's without acquiring additional sd's */
status = get_lvl1_lvl2_pble(dev, pble_rsrc, palloc, pool);
if (!status)
goto exit;
max_sds = (palloc->total_cnt >> 18) + 1;
for (i = 0; i < max_sds; i++) {
status = add_pble_pool(dev, pble_rsrc);
if (status)
break;
status = get_lvl1_lvl2_pble(dev, pble_rsrc, palloc, pool);
if (!status)
break;
}
exit:
if (!status)
pble_rsrc->stats_alloc_ok++;
else
pble_rsrc->stats_alloc_fail++;
return status;
}
/**
* i40iw_free_pble - put pbles back into pool
* @pble_rsrc: pble resources
* @palloc: contains all inforamtion regarding pble resource being freed
*/
void i40iw_free_pble(struct i40iw_hmc_pble_rsrc *pble_rsrc,
struct i40iw_pble_alloc *palloc)
{
struct gen_pool *pool;
pool = pble_rsrc->pinfo.pool;
if (palloc->level == I40IW_LEVEL_2)
free_lvl2(pble_rsrc, palloc);
else
gen_pool_free(pool, palloc->level1.addr,
(palloc->level1.cnt << 3));
pble_rsrc->stats_alloc_freed++;
}

View File

@ -1,131 +0,0 @@
/*******************************************************************************
*
* Copyright (c) 2015-2016 Intel Corporation. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenFabrics.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
*******************************************************************************/
#ifndef I40IW_PBLE_H
#define I40IW_PBLE_H
#define POOL_SHIFT 6
#define PBLE_PER_PAGE 512
#define I40IW_HMC_PAGED_BP_SHIFT 12
#define PBLE_512_SHIFT 9
enum i40iw_pble_level {
I40IW_LEVEL_0 = 0,
I40IW_LEVEL_1 = 1,
I40IW_LEVEL_2 = 2
};
enum i40iw_alloc_type {
I40IW_NO_ALLOC = 0,
I40IW_DMA_COHERENT = 1,
I40IW_VMALLOC = 2
};
struct i40iw_pble_info {
unsigned long addr;
u32 idx;
u32 cnt;
};
struct i40iw_pble_level2 {
struct i40iw_pble_info root;
struct i40iw_pble_info *leaf;
u32 leaf_cnt;
};
struct i40iw_pble_alloc {
u32 total_cnt;
enum i40iw_pble_level level;
union {
struct i40iw_pble_info level1;
struct i40iw_pble_level2 level2;
};
};
struct sd_pd_idx {
u32 sd_idx;
u32 pd_idx;
u32 rel_pd_idx;
};
struct i40iw_add_page_info {
struct i40iw_chunk *chunk;
struct i40iw_hmc_sd_entry *sd_entry;
struct i40iw_hmc_info *hmc_info;
struct sd_pd_idx idx;
u32 pages;
};
struct i40iw_chunk {
struct list_head list;
u32 size;
void *vaddr;
u64 fpm_addr;
u32 pg_cnt;
dma_addr_t *dmaaddrs;
enum i40iw_alloc_type type;
};
struct i40iw_pble_pool {
struct gen_pool *pool;
struct list_head clist;
u32 total_pble_alloc;
u32 free_pble_cnt;
u32 pool_shift;
};
struct i40iw_hmc_pble_rsrc {
u32 unallocated_pble;
u64 fpm_base_addr;
u64 next_fpm_addr;
struct i40iw_pble_pool pinfo;
u32 stats_direct_sds;
u32 stats_paged_sds;
u64 stats_alloc_ok;
u64 stats_alloc_fail;
u64 stats_alloc_freed;
u64 stats_lvl1;
u64 stats_lvl2;
};
void i40iw_destroy_pble_pool(struct i40iw_sc_dev *dev, struct i40iw_hmc_pble_rsrc *pble_rsrc);
enum i40iw_status_code i40iw_hmc_init_pble(struct i40iw_sc_dev *dev,
struct i40iw_hmc_pble_rsrc *pble_rsrc);
void i40iw_free_pble(struct i40iw_hmc_pble_rsrc *pble_rsrc, struct i40iw_pble_alloc *palloc);
enum i40iw_status_code i40iw_get_pble(struct i40iw_sc_dev *dev,
struct i40iw_hmc_pble_rsrc *pble_rsrc,
struct i40iw_pble_alloc *palloc,
u32 pble_cnt);
#endif

File diff suppressed because it is too large Load Diff

View File

@ -1,188 +0,0 @@
/*******************************************************************************
*
* Copyright (c) 2015-2016 Intel Corporation. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenFabrics.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
*******************************************************************************/
#ifndef I40IW_PUDA_H
#define I40IW_PUDA_H
#define I40IW_IEQ_MPA_FRAMING 6
struct i40iw_sc_dev;
struct i40iw_sc_qp;
struct i40iw_sc_cq;
enum puda_resource_type {
I40IW_PUDA_RSRC_TYPE_ILQ = 1,
I40IW_PUDA_RSRC_TYPE_IEQ
};
enum puda_rsrc_complete {
PUDA_CQ_CREATED = 1,
PUDA_QP_CREATED,
PUDA_TX_COMPLETE,
PUDA_RX_COMPLETE,
PUDA_HASH_CRC_COMPLETE
};
struct i40iw_puda_completion_info {
struct i40iw_qp_uk *qp;
u8 q_type;
u8 vlan_valid;
u8 l3proto;
u8 l4proto;
u16 payload_len;
u32 compl_error; /* No_err=0, else major and minor err code */
u32 qp_id;
u32 wqe_idx;
};
struct i40iw_puda_send_info {
u64 paddr; /* Physical address */
u32 len;
u8 tcplen;
u8 maclen;
bool ipv4;
bool doloopback;
void *scratch;
};
struct i40iw_puda_buf {
struct list_head list; /* MUST be first entry */
struct i40iw_dma_mem mem; /* DMA memory for the buffer */
struct i40iw_puda_buf *next; /* for alloclist in rsrc struct */
struct i40iw_virt_mem buf_mem; /* Buffer memory for this buffer */
void *scratch;
u8 *iph;
u8 *tcph;
u8 *data;
u16 datalen;
u16 vlan_id;
u8 tcphlen; /* tcp length in bytes */
u8 maclen; /* mac length in bytes */
u32 totallen; /* machlen+iphlen+tcphlen+datalen */
atomic_t refcount;
u8 hdrlen;
bool ipv4;
u32 seqnum;
};
struct i40iw_puda_rsrc_info {
enum puda_resource_type type; /* ILQ or IEQ */
u32 count;
u16 pd_id;
u32 cq_id;
u32 qp_id;
u32 sq_size;
u32 rq_size;
u16 buf_size;
u16 mss;
u32 tx_buf_cnt; /* total bufs allocated will be rq_size + tx_buf_cnt */
void (*receive)(struct i40iw_sc_vsi *, struct i40iw_puda_buf *);
void (*xmit_complete)(struct i40iw_sc_vsi *, void *);
};
struct i40iw_puda_rsrc {
struct i40iw_sc_cq cq;
struct i40iw_sc_qp qp;
struct i40iw_sc_pd sc_pd;
struct i40iw_sc_dev *dev;
struct i40iw_sc_vsi *vsi;
struct i40iw_dma_mem cqmem;
struct i40iw_dma_mem qpmem;
struct i40iw_virt_mem ilq_mem;
enum puda_rsrc_complete completion;
enum puda_resource_type type;
u16 buf_size; /*buffer must be max datalen + tcpip hdr + mac */
u16 mss;
u32 cq_id;
u32 qp_id;
u32 sq_size;
u32 rq_size;
u32 cq_size;
struct i40iw_sq_uk_wr_trk_info *sq_wrtrk_array;
u64 *rq_wrid_array;
u32 compl_rxwqe_idx;
u32 rx_wqe_idx;
u32 rxq_invalid_cnt;
u32 tx_wqe_avail_cnt;
bool check_crc;
struct shash_desc *hash_desc;
struct list_head txpend;
struct list_head bufpool; /* free buffers pool list for recv and xmit */
u32 alloc_buf_count;
u32 avail_buf_count; /* snapshot of currently available buffers */
spinlock_t bufpool_lock;
struct i40iw_puda_buf *alloclist;
void (*receive)(struct i40iw_sc_vsi *, struct i40iw_puda_buf *);
void (*xmit_complete)(struct i40iw_sc_vsi *, void *);
/* puda stats */
u64 stats_buf_alloc_fail;
u64 stats_pkt_rcvd;
u64 stats_pkt_sent;
u64 stats_rcvd_pkt_err;
u64 stats_sent_pkt_q;
u64 stats_bad_qp_id;
};
struct i40iw_puda_buf *i40iw_puda_get_bufpool(struct i40iw_puda_rsrc *rsrc);
void i40iw_puda_ret_bufpool(struct i40iw_puda_rsrc *rsrc,
struct i40iw_puda_buf *buf);
void i40iw_puda_send_buf(struct i40iw_puda_rsrc *rsrc,
struct i40iw_puda_buf *buf);
enum i40iw_status_code i40iw_puda_send(struct i40iw_sc_qp *qp,
struct i40iw_puda_send_info *info);
enum i40iw_status_code i40iw_puda_create_rsrc(struct i40iw_sc_vsi *vsi,
struct i40iw_puda_rsrc_info *info);
void i40iw_puda_dele_resources(struct i40iw_sc_vsi *vsi,
enum puda_resource_type type,
bool reset);
enum i40iw_status_code i40iw_puda_poll_completion(struct i40iw_sc_dev *dev,
struct i40iw_sc_cq *cq, u32 *compl_err);
struct i40iw_sc_qp *i40iw_ieq_get_qp(struct i40iw_sc_dev *dev,
struct i40iw_puda_buf *buf);
enum i40iw_status_code i40iw_puda_get_tcpip_info(struct i40iw_puda_completion_info *info,
struct i40iw_puda_buf *buf);
enum i40iw_status_code i40iw_ieq_check_mpacrc(struct shash_desc *desc,
void *addr, u32 length, u32 value);
enum i40iw_status_code i40iw_init_hash_desc(struct shash_desc **desc);
void i40iw_ieq_mpa_crc_ae(struct i40iw_sc_dev *dev, struct i40iw_sc_qp *qp);
void i40iw_free_hash_desc(struct shash_desc *desc);
void i40iw_ieq_update_tcpip_info(struct i40iw_puda_buf *buf, u16 length,
u32 seqnum);
enum i40iw_status_code i40iw_cqp_qp_create_cmd(struct i40iw_sc_dev *dev, struct i40iw_sc_qp *qp);
enum i40iw_status_code i40iw_cqp_cq_create_cmd(struct i40iw_sc_dev *dev, struct i40iw_sc_cq *cq);
void i40iw_cqp_qp_destroy_cmd(struct i40iw_sc_dev *dev, struct i40iw_sc_qp *qp);
void i40iw_cqp_cq_destroy_cmd(struct i40iw_sc_dev *dev, struct i40iw_sc_cq *cq);
void i40iw_ieq_cleanup_qp(struct i40iw_puda_rsrc *ieq, struct i40iw_sc_qp *qp);
#endif

File diff suppressed because it is too large Load Diff

View File

@ -1,101 +0,0 @@
/*******************************************************************************
*
* Copyright (c) 2015-2016 Intel Corporation. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenFabrics.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
*******************************************************************************/
#ifndef I40IW_STATUS_H
#define I40IW_STATUS_H
/* Error Codes */
enum i40iw_status_code {
I40IW_SUCCESS = 0,
I40IW_ERR_NVM = -1,
I40IW_ERR_NVM_CHECKSUM = -2,
I40IW_ERR_CONFIG = -4,
I40IW_ERR_PARAM = -5,
I40IW_ERR_DEVICE_NOT_SUPPORTED = -6,
I40IW_ERR_RESET_FAILED = -7,
I40IW_ERR_SWFW_SYNC = -8,
I40IW_ERR_NO_MEMORY = -9,
I40IW_ERR_BAD_PTR = -10,
I40IW_ERR_INVALID_PD_ID = -11,
I40IW_ERR_INVALID_QP_ID = -12,
I40IW_ERR_INVALID_CQ_ID = -13,
I40IW_ERR_INVALID_CEQ_ID = -14,
I40IW_ERR_INVALID_AEQ_ID = -15,
I40IW_ERR_INVALID_SIZE = -16,
I40IW_ERR_INVALID_ARP_INDEX = -17,
I40IW_ERR_INVALID_FPM_FUNC_ID = -18,
I40IW_ERR_QP_INVALID_MSG_SIZE = -19,
I40IW_ERR_QP_TOOMANY_WRS_POSTED = -20,
I40IW_ERR_INVALID_FRAG_COUNT = -21,
I40IW_ERR_QUEUE_EMPTY = -22,
I40IW_ERR_INVALID_ALIGNMENT = -23,
I40IW_ERR_FLUSHED_QUEUE = -24,
I40IW_ERR_INVALID_INLINE_DATA_SIZE = -26,
I40IW_ERR_TIMEOUT = -27,
I40IW_ERR_OPCODE_MISMATCH = -28,
I40IW_ERR_CQP_COMPL_ERROR = -29,
I40IW_ERR_INVALID_VF_ID = -30,
I40IW_ERR_INVALID_HMCFN_ID = -31,
I40IW_ERR_BACKING_PAGE_ERROR = -32,
I40IW_ERR_NO_PBLCHUNKS_AVAILABLE = -33,
I40IW_ERR_INVALID_PBLE_INDEX = -34,
I40IW_ERR_INVALID_SD_INDEX = -35,
I40IW_ERR_INVALID_PAGE_DESC_INDEX = -36,
I40IW_ERR_INVALID_SD_TYPE = -37,
I40IW_ERR_MEMCPY_FAILED = -38,
I40IW_ERR_INVALID_HMC_OBJ_INDEX = -39,
I40IW_ERR_INVALID_HMC_OBJ_COUNT = -40,
I40IW_ERR_INVALID_SRQ_ARM_LIMIT = -41,
I40IW_ERR_SRQ_ENABLED = -42,
I40IW_ERR_BUF_TOO_SHORT = -43,
I40IW_ERR_BAD_IWARP_CQE = -44,
I40IW_ERR_NVM_BLANK_MODE = -45,
I40IW_ERR_NOT_IMPLEMENTED = -46,
I40IW_ERR_PE_DOORBELL_NOT_ENABLED = -47,
I40IW_ERR_NOT_READY = -48,
I40IW_NOT_SUPPORTED = -49,
I40IW_ERR_FIRMWARE_API_VERSION = -50,
I40IW_ERR_RING_FULL = -51,
I40IW_ERR_MPA_CRC = -61,
I40IW_ERR_NO_TXBUFS = -62,
I40IW_ERR_SEQ_NUM = -63,
I40IW_ERR_list_empty = -64,
I40IW_ERR_INVALID_MAC_ADDR = -65,
I40IW_ERR_BAD_STAG = -66,
I40IW_ERR_CQ_COMPL_ERROR = -67,
I40IW_ERR_QUEUE_DESTROYED = -68,
I40IW_ERR_INVALID_FEAT_CNT = -69
};
#endif

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -1,422 +0,0 @@
/*******************************************************************************
*
* Copyright (c) 2015-2016 Intel Corporation. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenFabrics.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
*******************************************************************************/
#ifndef I40IW_USER_H
#define I40IW_USER_H
enum i40iw_device_capabilities_const {
I40IW_WQE_SIZE = 4,
I40IW_CQP_WQE_SIZE = 8,
I40IW_CQE_SIZE = 4,
I40IW_EXTENDED_CQE_SIZE = 8,
I40IW_AEQE_SIZE = 2,
I40IW_CEQE_SIZE = 1,
I40IW_CQP_CTX_SIZE = 8,
I40IW_SHADOW_AREA_SIZE = 8,
I40IW_CEQ_MAX_COUNT = 256,
I40IW_QUERY_FPM_BUF_SIZE = 128,
I40IW_COMMIT_FPM_BUF_SIZE = 128,
I40IW_MIN_IW_QP_ID = 1,
I40IW_MAX_IW_QP_ID = 262143,
I40IW_MIN_CEQID = 0,
I40IW_MAX_CEQID = 256,
I40IW_MIN_CQID = 0,
I40IW_MAX_CQID = 131071,
I40IW_MIN_AEQ_ENTRIES = 1,
I40IW_MAX_AEQ_ENTRIES = 524287,
I40IW_MIN_CEQ_ENTRIES = 1,
I40IW_MAX_CEQ_ENTRIES = 131071,
I40IW_MIN_CQ_SIZE = 1,
I40IW_MAX_CQ_SIZE = 1048575,
I40IW_DB_ID_ZERO = 0,
I40IW_MAX_WQ_FRAGMENT_COUNT = 3,
I40IW_MAX_SGE_RD = 1,
I40IW_MAX_OUTBOUND_MESSAGE_SIZE = 2147483647,
I40IW_MAX_INBOUND_MESSAGE_SIZE = 2147483647,
I40IW_MAX_PE_ENABLED_VF_COUNT = 32,
I40IW_MAX_VF_FPM_ID = 47,
I40IW_MAX_VF_PER_PF = 127,
I40IW_MAX_SQ_PAYLOAD_SIZE = 2145386496,
I40IW_MAX_INLINE_DATA_SIZE = 48,
I40IW_MAX_IRD_SIZE = 64,
I40IW_MAX_ORD_SIZE = 127,
I40IW_MAX_WQ_ENTRIES = 2048,
I40IW_Q2_BUFFER_SIZE = (248 + 100),
I40IW_MAX_WQE_SIZE_RQ = 128,
I40IW_QP_CTX_SIZE = 248,
I40IW_MAX_PDS = 32768
};
#define i40iw_handle void *
#define i40iw_adapter_handle i40iw_handle
#define i40iw_qp_handle i40iw_handle
#define i40iw_cq_handle i40iw_handle
#define i40iw_srq_handle i40iw_handle
#define i40iw_pd_id i40iw_handle
#define i40iw_stag_handle i40iw_handle
#define i40iw_stag_index u32
#define i40iw_stag u32
#define i40iw_stag_key u8
#define i40iw_tagged_offset u64
#define i40iw_access_privileges u32
#define i40iw_physical_fragment u64
#define i40iw_address_list u64 *
#define I40IW_MAX_MR_SIZE 0x10000000000L
#define I40IW_MAX_RQ_WQE_SHIFT 2
struct i40iw_qp_uk;
struct i40iw_cq_uk;
struct i40iw_srq_uk;
struct i40iw_qp_uk_init_info;
struct i40iw_cq_uk_init_info;
struct i40iw_srq_uk_init_info;
struct i40iw_sge {
i40iw_tagged_offset tag_off;
u32 len;
i40iw_stag stag;
};
#define i40iw_sgl struct i40iw_sge *
struct i40iw_ring {
u32 head;
u32 tail;
u32 size;
};
struct i40iw_cqe {
u64 buf[I40IW_CQE_SIZE];
};
struct i40iw_extended_cqe {
u64 buf[I40IW_EXTENDED_CQE_SIZE];
};
struct i40iw_wqe {
u64 buf[I40IW_WQE_SIZE];
};
struct i40iw_qp_uk_ops;
enum i40iw_addressing_type {
I40IW_ADDR_TYPE_ZERO_BASED = 0,
I40IW_ADDR_TYPE_VA_BASED = 1,
};
#define I40IW_ACCESS_FLAGS_LOCALREAD 0x01
#define I40IW_ACCESS_FLAGS_LOCALWRITE 0x02
#define I40IW_ACCESS_FLAGS_REMOTEREAD_ONLY 0x04
#define I40IW_ACCESS_FLAGS_REMOTEREAD 0x05
#define I40IW_ACCESS_FLAGS_REMOTEWRITE_ONLY 0x08
#define I40IW_ACCESS_FLAGS_REMOTEWRITE 0x0a
#define I40IW_ACCESS_FLAGS_BIND_WINDOW 0x10
#define I40IW_ACCESS_FLAGS_ALL 0x1F
#define I40IW_OP_TYPE_RDMA_WRITE 0
#define I40IW_OP_TYPE_RDMA_READ 1
#define I40IW_OP_TYPE_SEND 3
#define I40IW_OP_TYPE_SEND_INV 4
#define I40IW_OP_TYPE_SEND_SOL 5
#define I40IW_OP_TYPE_SEND_SOL_INV 6
#define I40IW_OP_TYPE_REC 7
#define I40IW_OP_TYPE_BIND_MW 8
#define I40IW_OP_TYPE_FAST_REG_NSMR 9
#define I40IW_OP_TYPE_INV_STAG 10
#define I40IW_OP_TYPE_RDMA_READ_INV_STAG 11
#define I40IW_OP_TYPE_NOP 12
enum i40iw_completion_status {
I40IW_COMPL_STATUS_SUCCESS = 0,
I40IW_COMPL_STATUS_FLUSHED,
I40IW_COMPL_STATUS_INVALID_WQE,
I40IW_COMPL_STATUS_QP_CATASTROPHIC,
I40IW_COMPL_STATUS_REMOTE_TERMINATION,
I40IW_COMPL_STATUS_INVALID_STAG,
I40IW_COMPL_STATUS_BASE_BOUND_VIOLATION,
I40IW_COMPL_STATUS_ACCESS_VIOLATION,
I40IW_COMPL_STATUS_INVALID_PD_ID,
I40IW_COMPL_STATUS_WRAP_ERROR,
I40IW_COMPL_STATUS_STAG_INVALID_PDID,
I40IW_COMPL_STATUS_RDMA_READ_ZERO_ORD,
I40IW_COMPL_STATUS_QP_NOT_PRIVLEDGED,
I40IW_COMPL_STATUS_STAG_NOT_INVALID,
I40IW_COMPL_STATUS_INVALID_PHYS_BUFFER_SIZE,
I40IW_COMPL_STATUS_INVALID_PHYS_BUFFER_ENTRY,
I40IW_COMPL_STATUS_INVALID_FBO,
I40IW_COMPL_STATUS_INVALID_LENGTH,
I40IW_COMPL_STATUS_INVALID_ACCESS,
I40IW_COMPL_STATUS_PHYS_BUFFER_LIST_TOO_LONG,
I40IW_COMPL_STATUS_INVALID_VIRT_ADDRESS,
I40IW_COMPL_STATUS_INVALID_REGION,
I40IW_COMPL_STATUS_INVALID_WINDOW,
I40IW_COMPL_STATUS_INVALID_TOTAL_LENGTH
};
enum i40iw_completion_notify {
IW_CQ_COMPL_EVENT = 0,
IW_CQ_COMPL_SOLICITED = 1
};
struct i40iw_post_send {
i40iw_sgl sg_list;
u32 num_sges;
};
struct i40iw_post_inline_send {
void *data;
u32 len;
};
struct i40iw_rdma_write {
i40iw_sgl lo_sg_list;
u32 num_lo_sges;
struct i40iw_sge rem_addr;
};
struct i40iw_inline_rdma_write {
void *data;
u32 len;
struct i40iw_sge rem_addr;
};
struct i40iw_rdma_read {
struct i40iw_sge lo_addr;
struct i40iw_sge rem_addr;
};
struct i40iw_bind_window {
i40iw_stag mr_stag;
u64 bind_length;
void *va;
enum i40iw_addressing_type addressing_type;
bool enable_reads;
bool enable_writes;
i40iw_stag mw_stag;
};
struct i40iw_inv_local_stag {
i40iw_stag target_stag;
};
struct i40iw_post_sq_info {
u64 wr_id;
u8 op_type;
bool signaled;
bool read_fence;
bool local_fence;
bool inline_data;
bool defer_flag;
union {
struct i40iw_post_send send;
struct i40iw_rdma_write rdma_write;
struct i40iw_rdma_read rdma_read;
struct i40iw_rdma_read rdma_read_inv;
struct i40iw_bind_window bind_window;
struct i40iw_inv_local_stag inv_local_stag;
struct i40iw_inline_rdma_write inline_rdma_write;
struct i40iw_post_inline_send inline_send;
} op;
};
struct i40iw_post_rq_info {
u64 wr_id;
i40iw_sgl sg_list;
u32 num_sges;
};
struct i40iw_cq_poll_info {
u64 wr_id;
i40iw_qp_handle qp_handle;
u32 bytes_xfered;
u32 tcp_seq_num;
u32 qp_id;
i40iw_stag inv_stag;
enum i40iw_completion_status comp_status;
u16 major_err;
u16 minor_err;
u8 op_type;
bool stag_invalid_set;
bool error;
bool is_srq;
bool solicited_event;
};
struct i40iw_qp_uk_ops {
void (*iw_qp_post_wr)(struct i40iw_qp_uk *);
enum i40iw_status_code (*iw_rdma_write)(struct i40iw_qp_uk *,
struct i40iw_post_sq_info *, bool);
enum i40iw_status_code (*iw_rdma_read)(struct i40iw_qp_uk *,
struct i40iw_post_sq_info *, bool, bool);
enum i40iw_status_code (*iw_send)(struct i40iw_qp_uk *,
struct i40iw_post_sq_info *, u32, bool);
enum i40iw_status_code (*iw_inline_rdma_write)(struct i40iw_qp_uk *,
struct i40iw_post_sq_info *, bool);
enum i40iw_status_code (*iw_inline_send)(struct i40iw_qp_uk *,
struct i40iw_post_sq_info *, u32, bool);
enum i40iw_status_code (*iw_stag_local_invalidate)(struct i40iw_qp_uk *,
struct i40iw_post_sq_info *, bool);
enum i40iw_status_code (*iw_mw_bind)(struct i40iw_qp_uk *,
struct i40iw_post_sq_info *, bool);
enum i40iw_status_code (*iw_post_receive)(struct i40iw_qp_uk *,
struct i40iw_post_rq_info *);
enum i40iw_status_code (*iw_post_nop)(struct i40iw_qp_uk *, u64, bool, bool);
};
struct i40iw_cq_ops {
void (*iw_cq_request_notification)(struct i40iw_cq_uk *,
enum i40iw_completion_notify);
enum i40iw_status_code (*iw_cq_poll_completion)(struct i40iw_cq_uk *,
struct i40iw_cq_poll_info *);
enum i40iw_status_code (*iw_cq_post_entries)(struct i40iw_cq_uk *, u8 count);
void (*iw_cq_clean)(void *, struct i40iw_cq_uk *);
};
struct i40iw_dev_uk;
struct i40iw_device_uk_ops {
enum i40iw_status_code (*iwarp_cq_uk_init)(struct i40iw_cq_uk *,
struct i40iw_cq_uk_init_info *);
enum i40iw_status_code (*iwarp_qp_uk_init)(struct i40iw_qp_uk *,
struct i40iw_qp_uk_init_info *);
};
struct i40iw_dev_uk {
struct i40iw_device_uk_ops ops_uk;
};
struct i40iw_sq_uk_wr_trk_info {
u64 wrid;
u32 wr_len;
u8 wqe_size;
u8 reserved[3];
};
struct i40iw_qp_quanta {
u64 elem[I40IW_WQE_SIZE];
};
struct i40iw_qp_uk {
struct i40iw_qp_quanta *sq_base;
struct i40iw_qp_quanta *rq_base;
u32 __iomem *wqe_alloc_reg;
struct i40iw_sq_uk_wr_trk_info *sq_wrtrk_array;
u64 *rq_wrid_array;
u64 *shadow_area;
struct i40iw_ring sq_ring;
struct i40iw_ring rq_ring;
struct i40iw_ring initial_ring;
u32 qp_id;
u32 sq_size;
u32 rq_size;
u32 max_sq_frag_cnt;
u32 max_rq_frag_cnt;
struct i40iw_qp_uk_ops ops;
bool use_srq;
u8 swqe_polarity;
u8 swqe_polarity_deferred;
u8 rwqe_polarity;
u8 rq_wqe_size;
u8 rq_wqe_size_multiplier;
bool first_sq_wq;
bool deferred_flag;
};
struct i40iw_cq_uk {
struct i40iw_cqe *cq_base;
u32 __iomem *cqe_alloc_reg;
u64 *shadow_area;
u32 cq_id;
u32 cq_size;
struct i40iw_ring cq_ring;
u8 polarity;
bool avoid_mem_cflct;
struct i40iw_cq_ops ops;
};
struct i40iw_qp_uk_init_info {
struct i40iw_qp_quanta *sq;
struct i40iw_qp_quanta *rq;
u32 __iomem *wqe_alloc_reg;
u64 *shadow_area;
struct i40iw_sq_uk_wr_trk_info *sq_wrtrk_array;
u64 *rq_wrid_array;
u32 qp_id;
u32 sq_size;
u32 rq_size;
u32 max_sq_frag_cnt;
u32 max_rq_frag_cnt;
u32 max_inline_data;
int abi_ver;
};
struct i40iw_cq_uk_init_info {
u32 __iomem *cqe_alloc_reg;
struct i40iw_cqe *cq_base;
u64 *shadow_area;
u32 cq_size;
u32 cq_id;
bool avoid_mem_cflct;
};
void i40iw_device_init_uk(struct i40iw_dev_uk *dev);
void i40iw_qp_post_wr(struct i40iw_qp_uk *qp);
u64 *i40iw_qp_get_next_send_wqe(struct i40iw_qp_uk *qp, u32 *wqe_idx,
u8 wqe_size,
u32 total_size,
u64 wr_id
);
u64 *i40iw_qp_get_next_recv_wqe(struct i40iw_qp_uk *qp, u32 *wqe_idx);
u64 *i40iw_qp_get_next_srq_wqe(struct i40iw_srq_uk *srq, u32 *wqe_idx);
enum i40iw_status_code i40iw_cq_uk_init(struct i40iw_cq_uk *cq,
struct i40iw_cq_uk_init_info *info);
enum i40iw_status_code i40iw_qp_uk_init(struct i40iw_qp_uk *qp,
struct i40iw_qp_uk_init_info *info);
void i40iw_clean_cq(void *queue, struct i40iw_cq_uk *cq);
enum i40iw_status_code i40iw_nop(struct i40iw_qp_uk *qp, u64 wr_id,
bool signaled, bool post_sq);
enum i40iw_status_code i40iw_fragcnt_to_wqesize_sq(u32 frag_cnt, u8 *wqe_size);
enum i40iw_status_code i40iw_fragcnt_to_wqesize_rq(u32 frag_cnt, u8 *wqe_size);
enum i40iw_status_code i40iw_inline_data_size_to_wqesize(u32 data_size,
u8 *wqe_size);
void i40iw_get_wqe_shift(u32 sge, u32 inline_data, u8 *shift);
enum i40iw_status_code i40iw_get_sqdepth(u32 sq_size, u8 shift, u32 *sqdepth);
enum i40iw_status_code i40iw_get_rqdepth(u32 rq_size, u8 shift, u32 *rqdepth);
#endif

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -1,179 +0,0 @@
/*******************************************************************************
*
* Copyright (c) 2015-2016 Intel Corporation. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenFabrics.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
*******************************************************************************/
#ifndef I40IW_VERBS_H
#define I40IW_VERBS_H
struct i40iw_ucontext {
struct ib_ucontext ibucontext;
struct i40iw_device *iwdev;
struct list_head cq_reg_mem_list;
spinlock_t cq_reg_mem_list_lock; /* memory list for cq's */
struct list_head qp_reg_mem_list;
spinlock_t qp_reg_mem_list_lock; /* memory list for qp's */
int abi_ver;
};
struct i40iw_pd {
struct ib_pd ibpd;
struct i40iw_sc_pd sc_pd;
atomic_t usecount;
};
struct i40iw_hmc_pble {
union {
u32 idx;
dma_addr_t addr;
};
};
struct i40iw_cq_mr {
struct i40iw_hmc_pble cq_pbl;
dma_addr_t shadow;
};
struct i40iw_qp_mr {
struct i40iw_hmc_pble sq_pbl;
struct i40iw_hmc_pble rq_pbl;
dma_addr_t shadow;
struct page *sq_page;
};
struct i40iw_pbl {
struct list_head list;
union {
struct i40iw_qp_mr qp_mr;
struct i40iw_cq_mr cq_mr;
};
bool pbl_allocated;
bool on_list;
u64 user_base;
struct i40iw_pble_alloc pble_alloc;
struct i40iw_mr *iwmr;
};
#define MAX_SAVE_PAGE_ADDRS 4
struct i40iw_mr {
union {
struct ib_mr ibmr;
struct ib_mw ibmw;
};
struct ib_umem *region;
u16 type;
u32 page_cnt;
u64 page_size;
u32 npages;
u32 stag;
u64 length;
u64 pgaddrmem[MAX_SAVE_PAGE_ADDRS];
struct i40iw_pbl iwpbl;
};
struct i40iw_cq {
struct ib_cq ibcq;
struct i40iw_sc_cq sc_cq;
u16 cq_head;
u16 cq_size;
u16 cq_number;
bool user_mode;
u32 polled_completions;
u32 cq_mem_size;
struct i40iw_dma_mem kmem;
spinlock_t lock; /* for poll cq */
struct i40iw_pbl *iwpbl;
};
struct disconn_work {
struct work_struct work;
struct i40iw_qp *iwqp;
};
struct iw_cm_id;
struct ietf_mpa_frame;
struct i40iw_ud_file;
struct i40iw_qp_kmode {
struct i40iw_dma_mem dma_mem;
u64 *wrid_mem;
};
struct i40iw_qp {
struct ib_qp ibqp;
struct i40iw_sc_qp sc_qp;
struct i40iw_device *iwdev;
struct i40iw_cq *iwscq;
struct i40iw_cq *iwrcq;
struct i40iw_pd *iwpd;
struct i40iw_qp_host_ctx_info ctx_info;
struct i40iwarp_offload_info iwarp_info;
void *allocated_buffer;
refcount_t refcount;
struct iw_cm_id *cm_id;
void *cm_node;
struct ib_mr *lsmm_mr;
struct work_struct work;
enum ib_qp_state ibqp_state;
u32 iwarp_state;
u32 qp_mem_size;
u32 last_aeq;
atomic_t close_timer_started;
spinlock_t lock; /* for post work requests */
struct i40iw_qp_context *iwqp_context;
void *pbl_vbase;
dma_addr_t pbl_pbase;
struct page *page;
u8 active_conn:1;
u8 user_mode:1;
u8 hte_added:1;
u8 flush_issued:1;
u8 destroyed:1;
u8 sig_all:1;
u8 pau_mode:1;
u8 rsvd:1;
u16 term_sq_flush_code;
u16 term_rq_flush_code;
u8 hw_iwarp_state;
u8 hw_tcp_state;
struct i40iw_qp_kmode kqp;
struct i40iw_dma_mem host_ctx;
struct timer_list terminate_timer;
struct i40iw_pbl iwpbl;
struct i40iw_dma_mem q2_ctx_mem;
struct i40iw_dma_mem ietf_mem;
struct completion sq_drained;
struct completion rq_drained;
struct completion free_qp;
};
#endif

View File

@ -1,85 +0,0 @@
/*******************************************************************************
*
* Copyright (c) 2015-2016 Intel Corporation. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenFabrics.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
*******************************************************************************/
#include "i40iw_osdep.h"
#include "i40iw_register.h"
#include "i40iw_status.h"
#include "i40iw_hmc.h"
#include "i40iw_d.h"
#include "i40iw_type.h"
#include "i40iw_p.h"
#include "i40iw_vf.h"
/**
* i40iw_manage_vf_pble_bp - manage vf pble
* @cqp: cqp for cqp' sq wqe
* @info: pble info
* @scratch: pointer for completion
* @post_sq: to post and ring
*/
enum i40iw_status_code i40iw_manage_vf_pble_bp(struct i40iw_sc_cqp *cqp,
struct i40iw_manage_vf_pble_info *info,
u64 scratch,
bool post_sq)
{
u64 *wqe;
u64 temp, header, pd_pl_pba = 0;
wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
if (!wqe)
return I40IW_ERR_RING_FULL;
temp = LS_64(info->pd_entry_cnt, I40IW_CQPSQ_MVPBP_PD_ENTRY_CNT) |
LS_64(info->first_pd_index, I40IW_CQPSQ_MVPBP_FIRST_PD_INX) |
LS_64(info->sd_index, I40IW_CQPSQ_MVPBP_SD_INX);
set_64bit_val(wqe, 16, temp);
header = LS_64((info->inv_pd_ent ? 1 : 0), I40IW_CQPSQ_MVPBP_INV_PD_ENT) |
LS_64(I40IW_CQP_OP_MANAGE_VF_PBLE_BP, I40IW_CQPSQ_OPCODE) |
LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
set_64bit_val(wqe, 24, header);
pd_pl_pba = LS_64(info->pd_pl_pba >> 3, I40IW_CQPSQ_MVPBP_PD_PLPBA);
set_64bit_val(wqe, 32, pd_pl_pba);
i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "MANAGE VF_PBLE_BP WQE", wqe, I40IW_CQP_WQE_SIZE * 8);
if (post_sq)
i40iw_sc_cqp_post_sq(cqp);
return 0;
}
const struct i40iw_vf_cqp_ops iw_vf_cqp_ops = {
i40iw_manage_vf_pble_bp
};

View File

@ -1,62 +0,0 @@
/*******************************************************************************
*
* Copyright (c) 2015-2016 Intel Corporation. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenFabrics.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
*******************************************************************************/
#ifndef I40IW_VF_H
#define I40IW_VF_H
struct i40iw_sc_cqp;
struct i40iw_manage_vf_pble_info {
u32 sd_index;
u16 first_pd_index;
u16 pd_entry_cnt;
u8 inv_pd_ent;
u64 pd_pl_pba;
};
struct i40iw_vf_cqp_ops {
enum i40iw_status_code (*manage_vf_pble_bp)(struct i40iw_sc_cqp *,
struct i40iw_manage_vf_pble_info *,
u64,
bool);
};
enum i40iw_status_code i40iw_manage_vf_pble_bp(struct i40iw_sc_cqp *cqp,
struct i40iw_manage_vf_pble_info *info,
u64 scratch,
bool post_sq);
extern const struct i40iw_vf_cqp_ops iw_vf_cqp_ops;
#endif

View File

@ -1,759 +0,0 @@
/*******************************************************************************
*
* Copyright (c) 2015-2016 Intel Corporation. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenFabrics.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
*******************************************************************************/
#include "i40iw_osdep.h"
#include "i40iw_register.h"
#include "i40iw_status.h"
#include "i40iw_hmc.h"
#include "i40iw_d.h"
#include "i40iw_type.h"
#include "i40iw_p.h"
#include "i40iw_virtchnl.h"
/**
* vchnl_vf_send_get_ver_req - Request Channel version
* @dev: IWARP device pointer
* @vchnl_req: Virtual channel message request pointer
*/
static enum i40iw_status_code vchnl_vf_send_get_ver_req(struct i40iw_sc_dev *dev,
struct i40iw_virtchnl_req *vchnl_req)
{
enum i40iw_status_code ret_code = I40IW_ERR_NOT_READY;
struct i40iw_virtchnl_op_buf *vchnl_msg = vchnl_req->vchnl_msg;
if (!dev->vchnl_up)
return ret_code;
memset(vchnl_msg, 0, sizeof(*vchnl_msg));
vchnl_msg->iw_chnl_op_ctx = (uintptr_t)vchnl_req;
vchnl_msg->iw_chnl_buf_len = sizeof(*vchnl_msg);
vchnl_msg->iw_op_code = I40IW_VCHNL_OP_GET_VER;
vchnl_msg->iw_op_ver = I40IW_VCHNL_OP_GET_VER_V0;
ret_code = dev->vchnl_if.vchnl_send(dev, 0, (u8 *)vchnl_msg, vchnl_msg->iw_chnl_buf_len);
if (ret_code)
i40iw_debug(dev, I40IW_DEBUG_VIRT,
"%s: virt channel send failed 0x%x\n", __func__, ret_code);
return ret_code;
}
/**
* vchnl_vf_send_get_hmc_fcn_req - Request HMC Function from VF
* @dev: IWARP device pointer
* @vchnl_req: Virtual channel message request pointer
*/
static enum i40iw_status_code vchnl_vf_send_get_hmc_fcn_req(struct i40iw_sc_dev *dev,
struct i40iw_virtchnl_req *vchnl_req)
{
enum i40iw_status_code ret_code = I40IW_ERR_NOT_READY;
struct i40iw_virtchnl_op_buf *vchnl_msg = vchnl_req->vchnl_msg;
if (!dev->vchnl_up)
return ret_code;
memset(vchnl_msg, 0, sizeof(*vchnl_msg));
vchnl_msg->iw_chnl_op_ctx = (uintptr_t)vchnl_req;
vchnl_msg->iw_chnl_buf_len = sizeof(*vchnl_msg);
vchnl_msg->iw_op_code = I40IW_VCHNL_OP_GET_HMC_FCN;
vchnl_msg->iw_op_ver = I40IW_VCHNL_OP_GET_HMC_FCN_V0;
ret_code = dev->vchnl_if.vchnl_send(dev, 0, (u8 *)vchnl_msg, vchnl_msg->iw_chnl_buf_len);
if (ret_code)
i40iw_debug(dev, I40IW_DEBUG_VIRT,
"%s: virt channel send failed 0x%x\n", __func__, ret_code);
return ret_code;
}
/**
* vchnl_vf_send_get_pe_stats_req - Request PE stats from VF
* @dev: IWARP device pointer
* @vchnl_req: Virtual channel message request pointer
*/
static enum i40iw_status_code vchnl_vf_send_get_pe_stats_req(struct i40iw_sc_dev *dev,
struct i40iw_virtchnl_req *vchnl_req)
{
enum i40iw_status_code ret_code = I40IW_ERR_NOT_READY;
struct i40iw_virtchnl_op_buf *vchnl_msg = vchnl_req->vchnl_msg;
if (!dev->vchnl_up)
return ret_code;
memset(vchnl_msg, 0, sizeof(*vchnl_msg));
vchnl_msg->iw_chnl_op_ctx = (uintptr_t)vchnl_req;
vchnl_msg->iw_chnl_buf_len = sizeof(*vchnl_msg) + sizeof(struct i40iw_dev_hw_stats) - 1;
vchnl_msg->iw_op_code = I40IW_VCHNL_OP_GET_STATS;
vchnl_msg->iw_op_ver = I40IW_VCHNL_OP_GET_STATS_V0;
ret_code = dev->vchnl_if.vchnl_send(dev, 0, (u8 *)vchnl_msg, vchnl_msg->iw_chnl_buf_len);
if (ret_code)
i40iw_debug(dev, I40IW_DEBUG_VIRT,
"%s: virt channel send failed 0x%x\n", __func__, ret_code);
return ret_code;
}
/*
* vchnl_vf_send_add_hmc_objs_req - Add HMC objects
* @dev: IWARP device pointer
* @vchnl_req: Virtual channel message request pointer
*/
static enum i40iw_status_code vchnl_vf_send_add_hmc_objs_req(struct i40iw_sc_dev *dev,
struct i40iw_virtchnl_req *vchnl_req,
enum i40iw_hmc_rsrc_type rsrc_type,
u32 start_index,
u32 rsrc_count)
{
enum i40iw_status_code ret_code = I40IW_ERR_NOT_READY;
struct i40iw_virtchnl_op_buf *vchnl_msg = vchnl_req->vchnl_msg;
struct i40iw_virtchnl_hmc_obj_range *add_hmc_obj;
if (!dev->vchnl_up)
return ret_code;
add_hmc_obj = (struct i40iw_virtchnl_hmc_obj_range *)vchnl_msg->iw_chnl_buf;
memset(vchnl_msg, 0, sizeof(*vchnl_msg));
memset(add_hmc_obj, 0, sizeof(*add_hmc_obj));
vchnl_msg->iw_chnl_op_ctx = (uintptr_t)vchnl_req;
vchnl_msg->iw_chnl_buf_len = sizeof(*vchnl_msg) + sizeof(struct i40iw_virtchnl_hmc_obj_range) - 1;
vchnl_msg->iw_op_code = I40IW_VCHNL_OP_ADD_HMC_OBJ_RANGE;
vchnl_msg->iw_op_ver = I40IW_VCHNL_OP_ADD_HMC_OBJ_RANGE_V0;
add_hmc_obj->obj_type = (u16)rsrc_type;
add_hmc_obj->start_index = start_index;
add_hmc_obj->obj_count = rsrc_count;
ret_code = dev->vchnl_if.vchnl_send(dev, 0, (u8 *)vchnl_msg, vchnl_msg->iw_chnl_buf_len);
if (ret_code)
i40iw_debug(dev, I40IW_DEBUG_VIRT,
"%s: virt channel send failed 0x%x\n", __func__, ret_code);
return ret_code;
}
/**
* vchnl_vf_send_del_hmc_objs_req - del HMC objects
* @dev: IWARP device pointer
* @vchnl_req: Virtual channel message request pointer
* @rsrc_type: resource type to delete
* @start_index: starting index for resource
* @rsrc_count: number of resource type to delete
*/
static enum i40iw_status_code vchnl_vf_send_del_hmc_objs_req(struct i40iw_sc_dev *dev,
struct i40iw_virtchnl_req *vchnl_req,
enum i40iw_hmc_rsrc_type rsrc_type,
u32 start_index,
u32 rsrc_count)
{
enum i40iw_status_code ret_code = I40IW_ERR_NOT_READY;
struct i40iw_virtchnl_op_buf *vchnl_msg = vchnl_req->vchnl_msg;
struct i40iw_virtchnl_hmc_obj_range *add_hmc_obj;
if (!dev->vchnl_up)
return ret_code;
add_hmc_obj = (struct i40iw_virtchnl_hmc_obj_range *)vchnl_msg->iw_chnl_buf;
memset(vchnl_msg, 0, sizeof(*vchnl_msg));
memset(add_hmc_obj, 0, sizeof(*add_hmc_obj));
vchnl_msg->iw_chnl_op_ctx = (uintptr_t)vchnl_req;
vchnl_msg->iw_chnl_buf_len = sizeof(*vchnl_msg) + sizeof(struct i40iw_virtchnl_hmc_obj_range) - 1;
vchnl_msg->iw_op_code = I40IW_VCHNL_OP_DEL_HMC_OBJ_RANGE;
vchnl_msg->iw_op_ver = I40IW_VCHNL_OP_DEL_HMC_OBJ_RANGE_V0;
add_hmc_obj->obj_type = (u16)rsrc_type;
add_hmc_obj->start_index = start_index;
add_hmc_obj->obj_count = rsrc_count;
ret_code = dev->vchnl_if.vchnl_send(dev, 0, (u8 *)vchnl_msg, vchnl_msg->iw_chnl_buf_len);
if (ret_code)
i40iw_debug(dev, I40IW_DEBUG_VIRT,
"%s: virt channel send failed 0x%x\n", __func__, ret_code);
return ret_code;
}
/**
* vchnl_pf_send_get_ver_resp - Send channel version to VF
* @dev: IWARP device pointer
* @vf_id: Virtual function ID associated with the message
* @vchnl_msg: Virtual channel message buffer pointer
*/
static void vchnl_pf_send_get_ver_resp(struct i40iw_sc_dev *dev,
u32 vf_id,
struct i40iw_virtchnl_op_buf *vchnl_msg)
{
enum i40iw_status_code ret_code;
u8 resp_buffer[sizeof(struct i40iw_virtchnl_resp_buf) + sizeof(u32) - 1];
struct i40iw_virtchnl_resp_buf *vchnl_msg_resp = (struct i40iw_virtchnl_resp_buf *)resp_buffer;
memset(resp_buffer, 0, sizeof(*resp_buffer));
vchnl_msg_resp->iw_chnl_op_ctx = vchnl_msg->iw_chnl_op_ctx;
vchnl_msg_resp->iw_chnl_buf_len = sizeof(resp_buffer);
vchnl_msg_resp->iw_op_ret_code = I40IW_SUCCESS;
*((u32 *)vchnl_msg_resp->iw_chnl_buf) = I40IW_VCHNL_CHNL_VER_V0;
ret_code = dev->vchnl_if.vchnl_send(dev, vf_id, resp_buffer, sizeof(resp_buffer));
if (ret_code)
i40iw_debug(dev, I40IW_DEBUG_VIRT,
"%s: virt channel send failed 0x%x\n", __func__, ret_code);
}
/**
* vchnl_pf_send_get_hmc_fcn_resp - Send HMC Function to VF
* @dev: IWARP device pointer
* @vf_id: Virtual function ID associated with the message
* @vchnl_msg: Virtual channel message buffer pointer
* @hmc_fcn: HMC function index pointer
*/
static void vchnl_pf_send_get_hmc_fcn_resp(struct i40iw_sc_dev *dev,
u32 vf_id,
struct i40iw_virtchnl_op_buf *vchnl_msg,
u16 hmc_fcn)
{
enum i40iw_status_code ret_code;
u8 resp_buffer[sizeof(struct i40iw_virtchnl_resp_buf) + sizeof(u16) - 1];
struct i40iw_virtchnl_resp_buf *vchnl_msg_resp = (struct i40iw_virtchnl_resp_buf *)resp_buffer;
memset(resp_buffer, 0, sizeof(*resp_buffer));
vchnl_msg_resp->iw_chnl_op_ctx = vchnl_msg->iw_chnl_op_ctx;
vchnl_msg_resp->iw_chnl_buf_len = sizeof(resp_buffer);
vchnl_msg_resp->iw_op_ret_code = I40IW_SUCCESS;
*((u16 *)vchnl_msg_resp->iw_chnl_buf) = hmc_fcn;
ret_code = dev->vchnl_if.vchnl_send(dev, vf_id, resp_buffer, sizeof(resp_buffer));
if (ret_code)
i40iw_debug(dev, I40IW_DEBUG_VIRT,
"%s: virt channel send failed 0x%x\n", __func__, ret_code);
}
/**
* vchnl_pf_send_get_pe_stats_resp - Send PE Stats to VF
* @dev: IWARP device pointer
* @vf_id: Virtual function ID associated with the message
* @vchnl_msg: Virtual channel message buffer pointer
* @hw_stats: HW Stats struct
*/
static void vchnl_pf_send_get_pe_stats_resp(struct i40iw_sc_dev *dev,
u32 vf_id,
struct i40iw_virtchnl_op_buf *vchnl_msg,
struct i40iw_dev_hw_stats *hw_stats)
{
enum i40iw_status_code ret_code;
u8 resp_buffer[sizeof(struct i40iw_virtchnl_resp_buf) + sizeof(struct i40iw_dev_hw_stats) - 1];
struct i40iw_virtchnl_resp_buf *vchnl_msg_resp = (struct i40iw_virtchnl_resp_buf *)resp_buffer;
memset(resp_buffer, 0, sizeof(*resp_buffer));
vchnl_msg_resp->iw_chnl_op_ctx = vchnl_msg->iw_chnl_op_ctx;
vchnl_msg_resp->iw_chnl_buf_len = sizeof(resp_buffer);
vchnl_msg_resp->iw_op_ret_code = I40IW_SUCCESS;
*((struct i40iw_dev_hw_stats *)vchnl_msg_resp->iw_chnl_buf) = *hw_stats;
ret_code = dev->vchnl_if.vchnl_send(dev, vf_id, resp_buffer, sizeof(resp_buffer));
if (ret_code)
i40iw_debug(dev, I40IW_DEBUG_VIRT,
"%s: virt channel send failed 0x%x\n", __func__, ret_code);
}
/**
* vchnl_pf_send_error_resp - Send an error response to VF
* @dev: IWARP device pointer
* @vf_id: Virtual function ID associated with the message
* @vchnl_msg: Virtual channel message buffer pointer
* @op_ret_code: I40IW_ERR_* status code
*/
static void vchnl_pf_send_error_resp(struct i40iw_sc_dev *dev, u32 vf_id,
struct i40iw_virtchnl_op_buf *vchnl_msg,
u16 op_ret_code)
{
enum i40iw_status_code ret_code;
u8 resp_buffer[sizeof(struct i40iw_virtchnl_resp_buf)];
struct i40iw_virtchnl_resp_buf *vchnl_msg_resp = (struct i40iw_virtchnl_resp_buf *)resp_buffer;
memset(resp_buffer, 0, sizeof(resp_buffer));
vchnl_msg_resp->iw_chnl_op_ctx = vchnl_msg->iw_chnl_op_ctx;
vchnl_msg_resp->iw_chnl_buf_len = sizeof(resp_buffer);
vchnl_msg_resp->iw_op_ret_code = (u16)op_ret_code;
ret_code = dev->vchnl_if.vchnl_send(dev, vf_id, resp_buffer, sizeof(resp_buffer));
if (ret_code)
i40iw_debug(dev, I40IW_DEBUG_VIRT,
"%s: virt channel send failed 0x%x\n", __func__, ret_code);
}
/**
* pf_cqp_get_hmc_fcn_callback - Callback for Get HMC Fcn
* @dev: IWARP device pointer
* @callback_param: unused CQP callback parameter
* @cqe_info: CQE information pointer
*/
static void pf_cqp_get_hmc_fcn_callback(struct i40iw_sc_dev *dev, void *callback_param,
struct i40iw_ccq_cqe_info *cqe_info)
{
struct i40iw_vfdev *vf_dev = callback_param;
struct i40iw_virt_mem vf_dev_mem;
if (cqe_info->error) {
i40iw_debug(dev, I40IW_DEBUG_VIRT,
"CQP Completion Error on Get HMC Function. Maj = 0x%04x, Minor = 0x%04x\n",
cqe_info->maj_err_code, cqe_info->min_err_code);
dev->vf_dev[vf_dev->iw_vf_idx] = NULL;
vchnl_pf_send_error_resp(dev, vf_dev->vf_id, &vf_dev->vf_msg_buffer.vchnl_msg,
(u16)I40IW_ERR_CQP_COMPL_ERROR);
vf_dev_mem.va = vf_dev;
vf_dev_mem.size = sizeof(*vf_dev);
i40iw_free_virt_mem(dev->hw, &vf_dev_mem);
} else {
i40iw_debug(dev, I40IW_DEBUG_VIRT,
"CQP Completion Operation Return information = 0x%08x\n",
cqe_info->op_ret_val);
vf_dev->pmf_index = (u16)cqe_info->op_ret_val;
vf_dev->msg_count--;
vchnl_pf_send_get_hmc_fcn_resp(dev,
vf_dev->vf_id,
&vf_dev->vf_msg_buffer.vchnl_msg,
vf_dev->pmf_index);
}
}
/**
* pf_add_hmc_obj_callback - Callback for Add HMC Object
* @work_vf_dev: pointer to the VF Device
*/
static void pf_add_hmc_obj_callback(void *work_vf_dev)
{
struct i40iw_vfdev *vf_dev = (struct i40iw_vfdev *)work_vf_dev;
struct i40iw_hmc_info *hmc_info = &vf_dev->hmc_info;
struct i40iw_virtchnl_op_buf *vchnl_msg = &vf_dev->vf_msg_buffer.vchnl_msg;
struct i40iw_hmc_create_obj_info info;
struct i40iw_virtchnl_hmc_obj_range *add_hmc_obj;
enum i40iw_status_code ret_code;
if (!vf_dev->pf_hmc_initialized) {
ret_code = i40iw_pf_init_vfhmc(vf_dev->pf_dev, (u8)vf_dev->pmf_index, NULL);
if (ret_code)
goto add_out;
vf_dev->pf_hmc_initialized = true;
}
add_hmc_obj = (struct i40iw_virtchnl_hmc_obj_range *)vchnl_msg->iw_chnl_buf;
memset(&info, 0, sizeof(info));
info.hmc_info = hmc_info;
info.is_pf = false;
info.rsrc_type = (u32)add_hmc_obj->obj_type;
info.entry_type = (info.rsrc_type == I40IW_HMC_IW_PBLE) ? I40IW_SD_TYPE_PAGED : I40IW_SD_TYPE_DIRECT;
info.start_idx = add_hmc_obj->start_index;
info.count = add_hmc_obj->obj_count;
i40iw_debug(vf_dev->pf_dev, I40IW_DEBUG_VIRT,
"I40IW_VCHNL_OP_ADD_HMC_OBJ_RANGE. Add %u type %u objects\n",
info.count, info.rsrc_type);
ret_code = i40iw_sc_create_hmc_obj(vf_dev->pf_dev, &info);
if (!ret_code)
vf_dev->hmc_info.hmc_obj[add_hmc_obj->obj_type].cnt = add_hmc_obj->obj_count;
add_out:
vf_dev->msg_count--;
vchnl_pf_send_error_resp(vf_dev->pf_dev, vf_dev->vf_id, vchnl_msg, (u16)ret_code);
}
/**
* pf_del_hmc_obj_callback - Callback for delete HMC Object
* @work_vf_dev: pointer to the VF Device
*/
static void pf_del_hmc_obj_callback(void *work_vf_dev)
{
struct i40iw_vfdev *vf_dev = (struct i40iw_vfdev *)work_vf_dev;
struct i40iw_hmc_info *hmc_info = &vf_dev->hmc_info;
struct i40iw_virtchnl_op_buf *vchnl_msg = &vf_dev->vf_msg_buffer.vchnl_msg;
struct i40iw_hmc_del_obj_info info;
struct i40iw_virtchnl_hmc_obj_range *del_hmc_obj;
enum i40iw_status_code ret_code = I40IW_SUCCESS;
if (!vf_dev->pf_hmc_initialized)
goto del_out;
del_hmc_obj = (struct i40iw_virtchnl_hmc_obj_range *)vchnl_msg->iw_chnl_buf;
memset(&info, 0, sizeof(info));
info.hmc_info = hmc_info;
info.is_pf = false;
info.rsrc_type = (u32)del_hmc_obj->obj_type;
info.start_idx = del_hmc_obj->start_index;
info.count = del_hmc_obj->obj_count;
i40iw_debug(vf_dev->pf_dev, I40IW_DEBUG_VIRT,
"I40IW_VCHNL_OP_DEL_HMC_OBJ_RANGE. Delete %u type %u objects\n",
info.count, info.rsrc_type);
ret_code = i40iw_sc_del_hmc_obj(vf_dev->pf_dev, &info, false);
del_out:
vf_dev->msg_count--;
vchnl_pf_send_error_resp(vf_dev->pf_dev, vf_dev->vf_id, vchnl_msg, (u16)ret_code);
}
/**
* i40iw_vf_init_pestat - Initialize stats for VF
* @dev: pointer to the VF Device
* @stats: Statistics structure pointer
* @index: Stats index
*/
static void i40iw_vf_init_pestat(struct i40iw_sc_dev *dev, struct i40iw_vsi_pestat *stats, u16 index)
{
stats->hw = dev->hw;
i40iw_hw_stats_init(stats, (u8)index, false);
spin_lock_init(&stats->lock);
}
/**
* i40iw_vchnl_recv_pf - Receive PF virtual channel messages
* @dev: IWARP device pointer
* @vf_id: Virtual function ID associated with the message
* @msg: Virtual channel message buffer pointer
* @len: Length of the virtual channels message
*/
enum i40iw_status_code i40iw_vchnl_recv_pf(struct i40iw_sc_dev *dev,
u32 vf_id,
u8 *msg,
u16 len)
{
struct i40iw_virtchnl_op_buf *vchnl_msg = (struct i40iw_virtchnl_op_buf *)msg;
struct i40iw_vfdev *vf_dev = NULL;
struct i40iw_hmc_fcn_info hmc_fcn_info;
u16 iw_vf_idx;
u16 first_avail_iw_vf = I40IW_MAX_PE_ENABLED_VF_COUNT;
struct i40iw_virt_mem vf_dev_mem;
struct i40iw_virtchnl_work_info work_info;
struct i40iw_vsi_pestat *stats;
enum i40iw_status_code ret_code;
if (!dev || !msg || !len)
return I40IW_ERR_PARAM;
if (!dev->vchnl_up)
return I40IW_ERR_NOT_READY;
if (vchnl_msg->iw_op_code == I40IW_VCHNL_OP_GET_VER) {
vchnl_pf_send_get_ver_resp(dev, vf_id, vchnl_msg);
return I40IW_SUCCESS;
}
for (iw_vf_idx = 0; iw_vf_idx < I40IW_MAX_PE_ENABLED_VF_COUNT; iw_vf_idx++) {
if (!dev->vf_dev[iw_vf_idx]) {
if (first_avail_iw_vf == I40IW_MAX_PE_ENABLED_VF_COUNT)
first_avail_iw_vf = iw_vf_idx;
continue;
}
if (dev->vf_dev[iw_vf_idx]->vf_id == vf_id) {
vf_dev = dev->vf_dev[iw_vf_idx];
break;
}
}
if (vf_dev) {
if (!vf_dev->msg_count) {
vf_dev->msg_count++;
} else {
i40iw_debug(dev, I40IW_DEBUG_VIRT,
"VF%u already has a channel message in progress.\n",
vf_id);
return I40IW_SUCCESS;
}
}
switch (vchnl_msg->iw_op_code) {
case I40IW_VCHNL_OP_GET_HMC_FCN:
if (!vf_dev &&
(first_avail_iw_vf != I40IW_MAX_PE_ENABLED_VF_COUNT)) {
ret_code = i40iw_allocate_virt_mem(dev->hw, &vf_dev_mem, sizeof(struct i40iw_vfdev) +
(sizeof(struct i40iw_hmc_obj_info) * I40IW_HMC_IW_MAX));
if (!ret_code) {
vf_dev = vf_dev_mem.va;
vf_dev->stats_initialized = false;
vf_dev->pf_dev = dev;
vf_dev->msg_count = 1;
vf_dev->vf_id = vf_id;
vf_dev->iw_vf_idx = first_avail_iw_vf;
vf_dev->pf_hmc_initialized = false;
vf_dev->hmc_info.hmc_obj = (struct i40iw_hmc_obj_info *)(&vf_dev[1]);
i40iw_debug(dev, I40IW_DEBUG_VIRT,
"vf_dev %p, hmc_info %p, hmc_obj %p\n",
vf_dev, &vf_dev->hmc_info, vf_dev->hmc_info.hmc_obj);
dev->vf_dev[first_avail_iw_vf] = vf_dev;
iw_vf_idx = first_avail_iw_vf;
} else {
i40iw_debug(dev, I40IW_DEBUG_VIRT,
"VF%u Unable to allocate a VF device structure.\n",
vf_id);
vchnl_pf_send_error_resp(dev, vf_id, vchnl_msg, (u16)I40IW_ERR_NO_MEMORY);
return I40IW_SUCCESS;
}
memcpy(&vf_dev->vf_msg_buffer.vchnl_msg, vchnl_msg, len);
hmc_fcn_info.callback_fcn = pf_cqp_get_hmc_fcn_callback;
hmc_fcn_info.vf_id = vf_id;
hmc_fcn_info.iw_vf_idx = vf_dev->iw_vf_idx;
hmc_fcn_info.cqp_callback_param = vf_dev;
hmc_fcn_info.free_fcn = false;
ret_code = i40iw_cqp_manage_hmc_fcn_cmd(dev, &hmc_fcn_info);
if (ret_code)
i40iw_debug(dev, I40IW_DEBUG_VIRT,
"VF%u error CQP HMC Function operation.\n",
vf_id);
i40iw_vf_init_pestat(dev, &vf_dev->pestat, vf_dev->pmf_index);
vf_dev->stats_initialized = true;
} else {
if (vf_dev) {
vf_dev->msg_count--;
vchnl_pf_send_get_hmc_fcn_resp(dev, vf_id, vchnl_msg, vf_dev->pmf_index);
} else {
vchnl_pf_send_error_resp(dev, vf_id, vchnl_msg,
(u16)I40IW_ERR_NO_MEMORY);
}
}
break;
case I40IW_VCHNL_OP_ADD_HMC_OBJ_RANGE:
if (!vf_dev)
return I40IW_ERR_BAD_PTR;
work_info.worker_vf_dev = vf_dev;
work_info.callback_fcn = pf_add_hmc_obj_callback;
memcpy(&vf_dev->vf_msg_buffer.vchnl_msg, vchnl_msg, len);
i40iw_cqp_spawn_worker(dev, &work_info, vf_dev->iw_vf_idx);
break;
case I40IW_VCHNL_OP_DEL_HMC_OBJ_RANGE:
if (!vf_dev)
return I40IW_ERR_BAD_PTR;
work_info.worker_vf_dev = vf_dev;
work_info.callback_fcn = pf_del_hmc_obj_callback;
memcpy(&vf_dev->vf_msg_buffer.vchnl_msg, vchnl_msg, len);
i40iw_cqp_spawn_worker(dev, &work_info, vf_dev->iw_vf_idx);
break;
case I40IW_VCHNL_OP_GET_STATS:
if (!vf_dev)
return I40IW_ERR_BAD_PTR;
stats = &vf_dev->pestat;
i40iw_hw_stats_read_all(stats, &stats->hw_stats);
vf_dev->msg_count--;
vchnl_pf_send_get_pe_stats_resp(dev, vf_id, vchnl_msg, &stats->hw_stats);
break;
default:
i40iw_debug(dev, I40IW_DEBUG_VIRT,
"40iw_vchnl_recv_pf: Invalid OpCode 0x%x\n",
vchnl_msg->iw_op_code);
vchnl_pf_send_error_resp(dev, vf_id,
vchnl_msg, (u16)I40IW_ERR_NOT_IMPLEMENTED);
}
return I40IW_SUCCESS;
}
/**
* i40iw_vchnl_recv_vf - Receive VF virtual channel messages
* @dev: IWARP device pointer
* @vf_id: Virtual function ID associated with the message
* @msg: Virtual channel message buffer pointer
* @len: Length of the virtual channels message
*/
enum i40iw_status_code i40iw_vchnl_recv_vf(struct i40iw_sc_dev *dev,
u32 vf_id,
u8 *msg,
u16 len)
{
struct i40iw_virtchnl_resp_buf *vchnl_msg_resp = (struct i40iw_virtchnl_resp_buf *)msg;
struct i40iw_virtchnl_req *vchnl_req;
vchnl_req = (struct i40iw_virtchnl_req *)(uintptr_t)vchnl_msg_resp->iw_chnl_op_ctx;
vchnl_req->ret_code = (enum i40iw_status_code)vchnl_msg_resp->iw_op_ret_code;
if (len == (sizeof(*vchnl_msg_resp) + vchnl_req->parm_len - 1)) {
if (vchnl_req->parm_len && vchnl_req->parm)
memcpy(vchnl_req->parm, vchnl_msg_resp->iw_chnl_buf, vchnl_req->parm_len);
i40iw_debug(dev, I40IW_DEBUG_VIRT,
"%s: Got response, data size %u\n", __func__,
vchnl_req->parm_len);
} else {
i40iw_debug(dev, I40IW_DEBUG_VIRT,
"%s: error length on response, Got %u, expected %u\n", __func__,
len, (u32)(sizeof(*vchnl_msg_resp) + vchnl_req->parm_len - 1));
}
return I40IW_SUCCESS;
}
/**
* i40iw_vchnl_vf_get_ver - Request Channel version
* @dev: IWARP device pointer
* @vchnl_ver: Virtual channel message version pointer
*/
enum i40iw_status_code i40iw_vchnl_vf_get_ver(struct i40iw_sc_dev *dev,
u32 *vchnl_ver)
{
struct i40iw_virtchnl_req vchnl_req;
enum i40iw_status_code ret_code;
if (!i40iw_vf_clear_to_send(dev))
return I40IW_ERR_TIMEOUT;
memset(&vchnl_req, 0, sizeof(vchnl_req));
vchnl_req.dev = dev;
vchnl_req.parm = vchnl_ver;
vchnl_req.parm_len = sizeof(*vchnl_ver);
vchnl_req.vchnl_msg = &dev->vchnl_vf_msg_buf.vchnl_msg;
ret_code = vchnl_vf_send_get_ver_req(dev, &vchnl_req);
if (ret_code) {
i40iw_debug(dev, I40IW_DEBUG_VIRT,
"%s Send message failed 0x%0x\n", __func__, ret_code);
return ret_code;
}
ret_code = i40iw_vf_wait_vchnl_resp(dev);
if (ret_code)
return ret_code;
else
return vchnl_req.ret_code;
}
/**
* i40iw_vchnl_vf_get_hmc_fcn - Request HMC Function
* @dev: IWARP device pointer
* @hmc_fcn: HMC function index pointer
*/
enum i40iw_status_code i40iw_vchnl_vf_get_hmc_fcn(struct i40iw_sc_dev *dev,
u16 *hmc_fcn)
{
struct i40iw_virtchnl_req vchnl_req;
enum i40iw_status_code ret_code;
if (!i40iw_vf_clear_to_send(dev))
return I40IW_ERR_TIMEOUT;
memset(&vchnl_req, 0, sizeof(vchnl_req));
vchnl_req.dev = dev;
vchnl_req.parm = hmc_fcn;
vchnl_req.parm_len = sizeof(*hmc_fcn);
vchnl_req.vchnl_msg = &dev->vchnl_vf_msg_buf.vchnl_msg;
ret_code = vchnl_vf_send_get_hmc_fcn_req(dev, &vchnl_req);
if (ret_code) {
i40iw_debug(dev, I40IW_DEBUG_VIRT,
"%s Send message failed 0x%0x\n", __func__, ret_code);
return ret_code;
}
ret_code = i40iw_vf_wait_vchnl_resp(dev);
if (ret_code)
return ret_code;
else
return vchnl_req.ret_code;
}
/**
* i40iw_vchnl_vf_add_hmc_objs - Add HMC Object
* @dev: IWARP device pointer
* @rsrc_type: HMC Resource type
* @start_index: Starting index of the objects to be added
* @rsrc_count: Number of resources to be added
*/
enum i40iw_status_code i40iw_vchnl_vf_add_hmc_objs(struct i40iw_sc_dev *dev,
enum i40iw_hmc_rsrc_type rsrc_type,
u32 start_index,
u32 rsrc_count)
{
struct i40iw_virtchnl_req vchnl_req;
enum i40iw_status_code ret_code;
if (!i40iw_vf_clear_to_send(dev))
return I40IW_ERR_TIMEOUT;
memset(&vchnl_req, 0, sizeof(vchnl_req));
vchnl_req.dev = dev;
vchnl_req.vchnl_msg = &dev->vchnl_vf_msg_buf.vchnl_msg;
ret_code = vchnl_vf_send_add_hmc_objs_req(dev,
&vchnl_req,
rsrc_type,
start_index,
rsrc_count);
if (ret_code) {
i40iw_debug(dev, I40IW_DEBUG_VIRT,
"%s Send message failed 0x%0x\n", __func__, ret_code);
return ret_code;
}
ret_code = i40iw_vf_wait_vchnl_resp(dev);
if (ret_code)
return ret_code;
else
return vchnl_req.ret_code;
}
/**
* i40iw_vchnl_vf_del_hmc_obj - del HMC obj
* @dev: IWARP device pointer
* @rsrc_type: HMC Resource type
* @start_index: Starting index of the object to delete
* @rsrc_count: Number of resources to be delete
*/
enum i40iw_status_code i40iw_vchnl_vf_del_hmc_obj(struct i40iw_sc_dev *dev,
enum i40iw_hmc_rsrc_type rsrc_type,
u32 start_index,
u32 rsrc_count)
{
struct i40iw_virtchnl_req vchnl_req;
enum i40iw_status_code ret_code;
if (!i40iw_vf_clear_to_send(dev))
return I40IW_ERR_TIMEOUT;
memset(&vchnl_req, 0, sizeof(vchnl_req));
vchnl_req.dev = dev;
vchnl_req.vchnl_msg = &dev->vchnl_vf_msg_buf.vchnl_msg;
ret_code = vchnl_vf_send_del_hmc_objs_req(dev,
&vchnl_req,
rsrc_type,
start_index,
rsrc_count);
if (ret_code) {
i40iw_debug(dev, I40IW_DEBUG_VIRT,
"%s Send message failed 0x%0x\n", __func__, ret_code);
return ret_code;
}
ret_code = i40iw_vf_wait_vchnl_resp(dev);
if (ret_code)
return ret_code;
else
return vchnl_req.ret_code;
}
/**
* i40iw_vchnl_vf_get_pe_stats - Get PE stats
* @dev: IWARP device pointer
* @hw_stats: HW stats struct
*/
enum i40iw_status_code i40iw_vchnl_vf_get_pe_stats(struct i40iw_sc_dev *dev,
struct i40iw_dev_hw_stats *hw_stats)
{
struct i40iw_virtchnl_req vchnl_req;
enum i40iw_status_code ret_code;
if (!i40iw_vf_clear_to_send(dev))
return I40IW_ERR_TIMEOUT;
memset(&vchnl_req, 0, sizeof(vchnl_req));
vchnl_req.dev = dev;
vchnl_req.parm = hw_stats;
vchnl_req.parm_len = sizeof(*hw_stats);
vchnl_req.vchnl_msg = &dev->vchnl_vf_msg_buf.vchnl_msg;
ret_code = vchnl_vf_send_get_pe_stats_req(dev, &vchnl_req);
if (ret_code) {
i40iw_debug(dev, I40IW_DEBUG_VIRT,
"%s Send message failed 0x%0x\n", __func__, ret_code);
return ret_code;
}
ret_code = i40iw_vf_wait_vchnl_resp(dev);
if (ret_code)
return ret_code;
else
return vchnl_req.ret_code;
}

View File

@ -1,124 +0,0 @@
/*******************************************************************************
*
* Copyright (c) 2015-2016 Intel Corporation. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenFabrics.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
*******************************************************************************/
#ifndef I40IW_VIRTCHNL_H
#define I40IW_VIRTCHNL_H
#include "i40iw_hmc.h"
#pragma pack(push, 1)
struct i40iw_virtchnl_op_buf {
u16 iw_op_code;
u16 iw_op_ver;
u16 iw_chnl_buf_len;
u16 rsvd;
u64 iw_chnl_op_ctx;
/* Member alignment MUST be maintained above this location */
u8 iw_chnl_buf[1];
};
struct i40iw_virtchnl_resp_buf {
u64 iw_chnl_op_ctx;
u16 iw_chnl_buf_len;
s16 iw_op_ret_code;
/* Member alignment MUST be maintained above this location */
u16 rsvd[2];
u8 iw_chnl_buf[1];
};
enum i40iw_virtchnl_ops {
I40IW_VCHNL_OP_GET_VER = 0,
I40IW_VCHNL_OP_GET_HMC_FCN,
I40IW_VCHNL_OP_ADD_HMC_OBJ_RANGE,
I40IW_VCHNL_OP_DEL_HMC_OBJ_RANGE,
I40IW_VCHNL_OP_GET_STATS
};
#define I40IW_VCHNL_OP_GET_VER_V0 0
#define I40IW_VCHNL_OP_GET_HMC_FCN_V0 0
#define I40IW_VCHNL_OP_ADD_HMC_OBJ_RANGE_V0 0
#define I40IW_VCHNL_OP_DEL_HMC_OBJ_RANGE_V0 0
#define I40IW_VCHNL_OP_GET_STATS_V0 0
#define I40IW_VCHNL_CHNL_VER_V0 0
struct i40iw_dev_hw_stats;
struct i40iw_virtchnl_hmc_obj_range {
u16 obj_type;
u16 rsvd;
u32 start_index;
u32 obj_count;
};
enum i40iw_status_code i40iw_vchnl_recv_pf(struct i40iw_sc_dev *dev,
u32 vf_id,
u8 *msg,
u16 len);
enum i40iw_status_code i40iw_vchnl_recv_vf(struct i40iw_sc_dev *dev,
u32 vf_id,
u8 *msg,
u16 len);
struct i40iw_virtchnl_req {
struct i40iw_sc_dev *dev;
struct i40iw_virtchnl_op_buf *vchnl_msg;
void *parm;
u32 vf_id;
u16 parm_len;
s16 ret_code;
};
#pragma pack(pop)
enum i40iw_status_code i40iw_vchnl_vf_get_ver(struct i40iw_sc_dev *dev,
u32 *vchnl_ver);
enum i40iw_status_code i40iw_vchnl_vf_get_hmc_fcn(struct i40iw_sc_dev *dev,
u16 *hmc_fcn);
enum i40iw_status_code i40iw_vchnl_vf_add_hmc_objs(struct i40iw_sc_dev *dev,
enum i40iw_hmc_rsrc_type rsrc_type,
u32 start_index,
u32 rsrc_count);
enum i40iw_status_code i40iw_vchnl_vf_del_hmc_obj(struct i40iw_sc_dev *dev,
enum i40iw_hmc_rsrc_type rsrc_type,
u32 start_index,
u32 rsrc_count);
enum i40iw_status_code i40iw_vchnl_vf_get_pe_stats(struct i40iw_sc_dev *dev,
struct i40iw_dev_hw_stats *hw_stats);
#endif

View File

@ -0,0 +1,12 @@
# SPDX-License-Identifier: GPL-2.0-only
config INFINIBAND_IRDMA
tristate "Intel(R) Ethernet Protocol Driver for RDMA"
depends on INET
depends on IPV6 || !IPV6
depends on PCI
depends on ICE && I40E
select GENERIC_ALLOCATOR
select CONFIG_AUXILIARY_BUS
help
This is an Intel(R) Ethernet Protocol Driver for RDMA driver
that support E810 (iWARP/RoCE) and X722 (iWARP) network devices.

View File

@ -0,0 +1,27 @@
# SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
# Copyright (c) 2019, Intel Corporation.
#
# Makefile for the Intel(R) Ethernet Connection RDMA Linux Driver
#
obj-$(CONFIG_INFINIBAND_IRDMA) += irdma.o
irdma-objs := cm.o \
ctrl.o \
hmc.o \
hw.o \
i40iw_hw.o \
i40iw_if.o \
icrdma_hw.o \
main.o \
pble.o \
puda.o \
trace.o \
uda.o \
uk.o \
utils.o \
verbs.o \
ws.o \
CFLAGS_trace.o = -I$(src)

View File

@ -8,8 +8,6 @@
#include "i40e.h"
#include "i40e_prototype.h"
static const char i40e_client_interface_version_str[] = I40E_CLIENT_VERSION_STR;
static struct i40e_client *registered_client;
static LIST_HEAD(i40e_devices);
static DEFINE_MUTEX(i40e_device_mutex);
DEFINE_IDA(i40e_client_ida);
@ -367,7 +365,6 @@ static void i40e_client_add_instance(struct i40e_pf *pf)
else
dev_err(&pf->pdev->dev, "MAC address list is empty!\n");
cdev->client = registered_client;
pf->cinst = cdev;
cdev->lan_info.msix_count = pf->num_iwarp_msix;
@ -525,69 +522,6 @@ int i40e_lan_del_device(struct i40e_pf *pf)
return ret;
}
/**
* i40e_client_release - release client specific resources
* @client: pointer to the registered client
*
**/
static void i40e_client_release(struct i40e_client *client)
{
struct i40e_client_instance *cdev;
struct i40e_device *ldev;
struct i40e_pf *pf;
mutex_lock(&i40e_device_mutex);
list_for_each_entry(ldev, &i40e_devices, list) {
pf = ldev->pf;
cdev = pf->cinst;
if (!cdev)
continue;
while (test_and_set_bit(__I40E_SERVICE_SCHED,
pf->state))
usleep_range(500, 1000);
if (test_bit(__I40E_CLIENT_INSTANCE_OPENED, &cdev->state)) {
if (client->ops && client->ops->close)
client->ops->close(&cdev->lan_info, client,
false);
i40e_client_release_qvlist(&cdev->lan_info);
clear_bit(__I40E_CLIENT_INSTANCE_OPENED, &cdev->state);
dev_warn(&pf->pdev->dev,
"Client %s instance for PF id %d closed\n",
client->name, pf->hw.pf_id);
}
/* delete the client instance */
i40e_client_del_instance(pf);
dev_info(&pf->pdev->dev, "Deleted client instance of Client %s\n",
client->name);
clear_bit(__I40E_SERVICE_SCHED, pf->state);
}
mutex_unlock(&i40e_device_mutex);
}
/**
* i40e_client_prepare - prepare client specific resources
* @client: pointer to the registered client
*
**/
static void i40e_client_prepare(struct i40e_client *client)
{
struct i40e_device *ldev;
struct i40e_pf *pf;
mutex_lock(&i40e_device_mutex);
list_for_each_entry(ldev, &i40e_devices, list) {
pf = ldev->pf;
i40e_client_add_instance(pf);
/* Start the client subtask */
set_bit(__I40E_CLIENT_SERVICE_REQUESTED, pf->state);
i40e_service_event_schedule(pf);
}
mutex_unlock(&i40e_device_mutex);
}
/**
* i40e_client_virtchnl_send - TBD
* @ldev: pointer to L2 context
@ -817,86 +751,3 @@ void i40e_client_device_unregister(struct i40e_info *ldev)
clear_bit(__I40E_SERVICE_SCHED, pf->state);
}
EXPORT_SYMBOL_GPL(i40e_client_device_unregister);
/* Retain these legacy global registration/unregistration calls till i40iw is
* removed from the kernel. The irdma unified driver does not use these
* exported symbols.
*/
/**
* i40e_register_client - Register a i40e client driver with the L2 driver
* @client: pointer to the i40e_client struct
*
* Returns 0 on success or non-0 on error
**/
int i40e_register_client(struct i40e_client *client)
{
int ret = 0;
if (!client) {
ret = -EIO;
goto out;
}
if (strlen(client->name) == 0) {
pr_info("i40e: Failed to register client with no name\n");
ret = -EIO;
goto out;
}
if (registered_client) {
pr_info("i40e: Client %s has already been registered!\n",
client->name);
ret = -EEXIST;
goto out;
}
if ((client->version.major != I40E_CLIENT_VERSION_MAJOR) ||
(client->version.minor != I40E_CLIENT_VERSION_MINOR)) {
pr_info("i40e: Failed to register client %s due to mismatched client interface version\n",
client->name);
pr_info("Client is using version: %02d.%02d.%02d while LAN driver supports %s\n",
client->version.major, client->version.minor,
client->version.build,
i40e_client_interface_version_str);
ret = -EIO;
goto out;
}
registered_client = client;
i40e_client_prepare(client);
pr_info("i40e: Registered client %s\n", client->name);
out:
return ret;
}
EXPORT_SYMBOL(i40e_register_client);
/**
* i40e_unregister_client - Unregister a i40e client driver with the L2 driver
* @client: pointer to the i40e_client struct
*
* Returns 0 on success or non-0 on error
**/
int i40e_unregister_client(struct i40e_client *client)
{
int ret = 0;
if (registered_client != client) {
pr_info("i40e: Client %s has not been registered\n",
client->name);
ret = -ENODEV;
goto out;
}
registered_client = NULL;
/* When a unregister request comes through we would have to send
* a close for each of the client instances that were opened.
* client_release function is called to handle this.
*/
i40e_client_release(client);
pr_info("i40e: Unregistered client %s\n", client->name);
out:
return ret;
}
EXPORT_SYMBOL(i40e_unregister_client);

View File

@ -197,8 +197,5 @@ static inline bool i40e_client_is_registered(struct i40e_client *client)
void i40e_client_device_register(struct i40e_info *ldev, struct i40e_client *client);
void i40e_client_device_unregister(struct i40e_info *ldev);
/* used by clients */
int i40e_register_client(struct i40e_client *client);
int i40e_unregister_client(struct i40e_client *client);
#endif /* _I40E_CLIENT_H_ */

View File

@ -1,107 +0,0 @@
/*
* Copyright (c) 2006 - 2016 Intel Corporation. All rights reserved.
* Copyright (c) 2005 Topspin Communications. All rights reserved.
* Copyright (c) 2005 Cisco Systems. All rights reserved.
* Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
*/
#ifndef I40IW_ABI_H
#define I40IW_ABI_H
#include <linux/types.h>
#define I40IW_ABI_VER 5
struct i40iw_alloc_ucontext_req {
__u32 reserved32;
__u8 userspace_ver;
__u8 reserved8[3];
};
struct i40iw_alloc_ucontext_resp {
__u32 max_pds; /* maximum pds allowed for this user process */
__u32 max_qps; /* maximum qps allowed for this user process */
__u32 wq_size; /* size of the WQs (sq+rq) allocated to the mmaped area */
__u8 kernel_ver;
__u8 reserved[3];
};
struct i40iw_alloc_pd_resp {
__u32 pd_id;
__u8 reserved[4];
};
struct i40iw_create_cq_req {
__aligned_u64 user_cq_buffer;
__aligned_u64 user_shadow_area;
};
struct i40iw_create_qp_req {
__aligned_u64 user_wqe_buffers;
__aligned_u64 user_compl_ctx;
/* UDA QP PHB */
__aligned_u64 user_sq_phb; /* place for VA of the sq phb buff */
__aligned_u64 user_rq_phb; /* place for VA of the rq phb buff */
};
enum i40iw_memreg_type {
IW_MEMREG_TYPE_MEM = 0x0000,
IW_MEMREG_TYPE_QP = 0x0001,
IW_MEMREG_TYPE_CQ = 0x0002,
};
struct i40iw_mem_reg_req {
__u16 reg_type; /* Memory, QP or CQ */
__u16 cq_pages;
__u16 rq_pages;
__u16 sq_pages;
};
struct i40iw_create_cq_resp {
__u32 cq_id;
__u32 cq_size;
__u32 mmap_db_index;
__u32 reserved;
};
struct i40iw_create_qp_resp {
__u32 qp_id;
__u32 actual_sq_size;
__u32 actual_rq_size;
__u32 i40iw_drv_opt;
__u16 push_idx;
__u8 lsmm;
__u8 rsvd2;
};
#endif