Merge branches 'cma', 'cxgb4', 'misc', 'mlx4-sriov', 'mlx-cleanups', 'ocrdma' and 'qib' into for-linus

This commit is contained in:
Roland Dreier 2012-07-22 23:26:17 -07:00
55 changed files with 2343 additions and 899 deletions

View File

@ -129,7 +129,7 @@ int rdma_translate_ip(struct sockaddr *addr, struct rdma_dev_addr *dev_addr)
dev_put(dev); dev_put(dev);
break; break;
#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) #if IS_ENABLED(CONFIG_IPV6)
case AF_INET6: case AF_INET6:
rcu_read_lock(); rcu_read_lock();
for_each_netdev_rcu(&init_net, dev) { for_each_netdev_rcu(&init_net, dev) {
@ -243,7 +243,7 @@ out:
return ret; return ret;
} }
#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) #if IS_ENABLED(CONFIG_IPV6)
static int addr6_resolve(struct sockaddr_in6 *src_in, static int addr6_resolve(struct sockaddr_in6 *src_in,
struct sockaddr_in6 *dst_in, struct sockaddr_in6 *dst_in,
struct rdma_dev_addr *addr) struct rdma_dev_addr *addr)

View File

@ -3848,24 +3848,28 @@ static int __init ib_cm_init(void)
INIT_LIST_HEAD(&cm.timewait_list); INIT_LIST_HEAD(&cm.timewait_list);
ret = class_register(&cm_class); ret = class_register(&cm_class);
if (ret) if (ret) {
return -ENOMEM;
cm.wq = create_workqueue("ib_cm");
if (!cm.wq) {
ret = -ENOMEM; ret = -ENOMEM;
goto error1; goto error1;
} }
cm.wq = create_workqueue("ib_cm");
if (!cm.wq) {
ret = -ENOMEM;
goto error2;
}
ret = ib_register_client(&cm_client); ret = ib_register_client(&cm_client);
if (ret) if (ret)
goto error2; goto error3;
return 0; return 0;
error2: error3:
destroy_workqueue(cm.wq); destroy_workqueue(cm.wq);
error1: error2:
class_unregister(&cm_class); class_unregister(&cm_class);
error1:
idr_destroy(&cm.local_id_table);
return ret; return ret;
} }

View File

@ -44,18 +44,6 @@
#define IB_CM_CLASS_VERSION 2 /* IB specification 1.2 */ #define IB_CM_CLASS_VERSION 2 /* IB specification 1.2 */
#define CM_REQ_ATTR_ID cpu_to_be16(0x0010)
#define CM_MRA_ATTR_ID cpu_to_be16(0x0011)
#define CM_REJ_ATTR_ID cpu_to_be16(0x0012)
#define CM_REP_ATTR_ID cpu_to_be16(0x0013)
#define CM_RTU_ATTR_ID cpu_to_be16(0x0014)
#define CM_DREQ_ATTR_ID cpu_to_be16(0x0015)
#define CM_DREP_ATTR_ID cpu_to_be16(0x0016)
#define CM_SIDR_REQ_ATTR_ID cpu_to_be16(0x0017)
#define CM_SIDR_REP_ATTR_ID cpu_to_be16(0x0018)
#define CM_LAP_ATTR_ID cpu_to_be16(0x0019)
#define CM_APR_ATTR_ID cpu_to_be16(0x001A)
enum cm_msg_sequence { enum cm_msg_sequence {
CM_MSG_SEQUENCE_REQ, CM_MSG_SEQUENCE_REQ,
CM_MSG_SEQUENCE_LAP, CM_MSG_SEQUENCE_LAP,

View File

@ -2311,7 +2311,7 @@ static int cma_get_port(struct rdma_id_private *id_priv)
static int cma_check_linklocal(struct rdma_dev_addr *dev_addr, static int cma_check_linklocal(struct rdma_dev_addr *dev_addr,
struct sockaddr *addr) struct sockaddr *addr)
{ {
#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) #if IS_ENABLED(CONFIG_IPV6)
struct sockaddr_in6 *sin6; struct sockaddr_in6 *sin6;
if (addr->sa_family != AF_INET6) if (addr->sa_family != AF_INET6)

View File

@ -94,6 +94,12 @@ struct ib_sa_path_query {
struct ib_sa_query sa_query; struct ib_sa_query sa_query;
}; };
struct ib_sa_guidinfo_query {
void (*callback)(int, struct ib_sa_guidinfo_rec *, void *);
void *context;
struct ib_sa_query sa_query;
};
struct ib_sa_mcmember_query { struct ib_sa_mcmember_query {
void (*callback)(int, struct ib_sa_mcmember_rec *, void *); void (*callback)(int, struct ib_sa_mcmember_rec *, void *);
void *context; void *context;
@ -347,6 +353,34 @@ static const struct ib_field service_rec_table[] = {
.size_bits = 2*64 }, .size_bits = 2*64 },
}; };
#define GUIDINFO_REC_FIELD(field) \
.struct_offset_bytes = offsetof(struct ib_sa_guidinfo_rec, field), \
.struct_size_bytes = sizeof((struct ib_sa_guidinfo_rec *) 0)->field, \
.field_name = "sa_guidinfo_rec:" #field
static const struct ib_field guidinfo_rec_table[] = {
{ GUIDINFO_REC_FIELD(lid),
.offset_words = 0,
.offset_bits = 0,
.size_bits = 16 },
{ GUIDINFO_REC_FIELD(block_num),
.offset_words = 0,
.offset_bits = 16,
.size_bits = 8 },
{ GUIDINFO_REC_FIELD(res1),
.offset_words = 0,
.offset_bits = 24,
.size_bits = 8 },
{ GUIDINFO_REC_FIELD(res2),
.offset_words = 1,
.offset_bits = 0,
.size_bits = 32 },
{ GUIDINFO_REC_FIELD(guid_info_list),
.offset_words = 2,
.offset_bits = 0,
.size_bits = 512 },
};
static void free_sm_ah(struct kref *kref) static void free_sm_ah(struct kref *kref)
{ {
struct ib_sa_sm_ah *sm_ah = container_of(kref, struct ib_sa_sm_ah, ref); struct ib_sa_sm_ah *sm_ah = container_of(kref, struct ib_sa_sm_ah, ref);
@ -945,6 +979,105 @@ err1:
return ret; return ret;
} }
/* Support GuidInfoRecord */
static void ib_sa_guidinfo_rec_callback(struct ib_sa_query *sa_query,
int status,
struct ib_sa_mad *mad)
{
struct ib_sa_guidinfo_query *query =
container_of(sa_query, struct ib_sa_guidinfo_query, sa_query);
if (mad) {
struct ib_sa_guidinfo_rec rec;
ib_unpack(guidinfo_rec_table, ARRAY_SIZE(guidinfo_rec_table),
mad->data, &rec);
query->callback(status, &rec, query->context);
} else
query->callback(status, NULL, query->context);
}
static void ib_sa_guidinfo_rec_release(struct ib_sa_query *sa_query)
{
kfree(container_of(sa_query, struct ib_sa_guidinfo_query, sa_query));
}
int ib_sa_guid_info_rec_query(struct ib_sa_client *client,
struct ib_device *device, u8 port_num,
struct ib_sa_guidinfo_rec *rec,
ib_sa_comp_mask comp_mask, u8 method,
int timeout_ms, gfp_t gfp_mask,
void (*callback)(int status,
struct ib_sa_guidinfo_rec *resp,
void *context),
void *context,
struct ib_sa_query **sa_query)
{
struct ib_sa_guidinfo_query *query;
struct ib_sa_device *sa_dev = ib_get_client_data(device, &sa_client);
struct ib_sa_port *port;
struct ib_mad_agent *agent;
struct ib_sa_mad *mad;
int ret;
if (!sa_dev)
return -ENODEV;
if (method != IB_MGMT_METHOD_GET &&
method != IB_MGMT_METHOD_SET &&
method != IB_SA_METHOD_DELETE) {
return -EINVAL;
}
port = &sa_dev->port[port_num - sa_dev->start_port];
agent = port->agent;
query = kmalloc(sizeof *query, gfp_mask);
if (!query)
return -ENOMEM;
query->sa_query.port = port;
ret = alloc_mad(&query->sa_query, gfp_mask);
if (ret)
goto err1;
ib_sa_client_get(client);
query->sa_query.client = client;
query->callback = callback;
query->context = context;
mad = query->sa_query.mad_buf->mad;
init_mad(mad, agent);
query->sa_query.callback = callback ? ib_sa_guidinfo_rec_callback : NULL;
query->sa_query.release = ib_sa_guidinfo_rec_release;
mad->mad_hdr.method = method;
mad->mad_hdr.attr_id = cpu_to_be16(IB_SA_ATTR_GUID_INFO_REC);
mad->sa_hdr.comp_mask = comp_mask;
ib_pack(guidinfo_rec_table, ARRAY_SIZE(guidinfo_rec_table), rec,
mad->data);
*sa_query = &query->sa_query;
ret = send_mad(&query->sa_query, timeout_ms, gfp_mask);
if (ret < 0)
goto err2;
return ret;
err2:
*sa_query = NULL;
ib_sa_client_put(query->sa_query.client);
free_mad(&query->sa_query);
err1:
kfree(query);
return ret;
}
EXPORT_SYMBOL(ib_sa_guid_info_rec_query);
static void send_handler(struct ib_mad_agent *agent, static void send_handler(struct ib_mad_agent *agent,
struct ib_mad_send_wc *mad_send_wc) struct ib_mad_send_wc *mad_send_wc)
{ {

View File

@ -548,8 +548,8 @@ static void send_mpa_req(struct c4iw_ep *ep, struct sk_buff *skb,
} }
if (mpa_rev_to_use == 2) { if (mpa_rev_to_use == 2) {
mpa->private_data_size += mpa->private_data_size = htons(ntohs(mpa->private_data_size) +
htons(sizeof(struct mpa_v2_conn_params)); sizeof (struct mpa_v2_conn_params));
mpa_v2_params.ird = htons((u16)ep->ird); mpa_v2_params.ird = htons((u16)ep->ird);
mpa_v2_params.ord = htons((u16)ep->ord); mpa_v2_params.ord = htons((u16)ep->ord);
@ -635,8 +635,8 @@ static int send_mpa_reject(struct c4iw_ep *ep, const void *pdata, u8 plen)
if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn) { if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn) {
mpa->flags |= MPA_ENHANCED_RDMA_CONN; mpa->flags |= MPA_ENHANCED_RDMA_CONN;
mpa->private_data_size += mpa->private_data_size = htons(ntohs(mpa->private_data_size) +
htons(sizeof(struct mpa_v2_conn_params)); sizeof (struct mpa_v2_conn_params));
mpa_v2_params.ird = htons(((u16)ep->ird) | mpa_v2_params.ird = htons(((u16)ep->ird) |
(peer2peer ? MPA_V2_PEER2PEER_MODEL : (peer2peer ? MPA_V2_PEER2PEER_MODEL :
0)); 0));
@ -715,8 +715,8 @@ static int send_mpa_reply(struct c4iw_ep *ep, const void *pdata, u8 plen)
if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn) { if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn) {
mpa->flags |= MPA_ENHANCED_RDMA_CONN; mpa->flags |= MPA_ENHANCED_RDMA_CONN;
mpa->private_data_size += mpa->private_data_size = htons(ntohs(mpa->private_data_size) +
htons(sizeof(struct mpa_v2_conn_params)); sizeof (struct mpa_v2_conn_params));
mpa_v2_params.ird = htons((u16)ep->ird); mpa_v2_params.ird = htons((u16)ep->ird);
mpa_v2_params.ord = htons((u16)ep->ord); mpa_v2_params.ord = htons((u16)ep->ord);
if (peer2peer && (ep->mpa_attr.p2p_type != if (peer2peer && (ep->mpa_attr.p2p_type !=

View File

@ -147,47 +147,51 @@ static void update_sm_ah(struct mlx4_ib_dev *dev, u8 port_num, u16 lid, u8 sl)
} }
/* /*
* Snoop SM MADs for port info and P_Key table sets, so we can * Snoop SM MADs for port info, GUID info, and P_Key table sets, so we can
* synthesize LID change and P_Key change events. * synthesize LID change, Client-Rereg, GID change, and P_Key change events.
*/ */
static void smp_snoop(struct ib_device *ibdev, u8 port_num, struct ib_mad *mad, static void smp_snoop(struct ib_device *ibdev, u8 port_num, struct ib_mad *mad,
u16 prev_lid) u16 prev_lid)
{ {
struct ib_event event; struct ib_port_info *pinfo;
u16 lid;
struct mlx4_ib_dev *dev = to_mdev(ibdev);
if ((mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED || if ((mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED ||
mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) && mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) &&
mad->mad_hdr.method == IB_MGMT_METHOD_SET) { mad->mad_hdr.method == IB_MGMT_METHOD_SET)
if (mad->mad_hdr.attr_id == IB_SMP_ATTR_PORT_INFO) { switch (mad->mad_hdr.attr_id) {
struct ib_port_info *pinfo = case IB_SMP_ATTR_PORT_INFO:
(struct ib_port_info *) ((struct ib_smp *) mad)->data; pinfo = (struct ib_port_info *) ((struct ib_smp *) mad)->data;
u16 lid = be16_to_cpu(pinfo->lid); lid = be16_to_cpu(pinfo->lid);
update_sm_ah(to_mdev(ibdev), port_num, update_sm_ah(dev, port_num,
be16_to_cpu(pinfo->sm_lid), be16_to_cpu(pinfo->sm_lid),
pinfo->neighbormtu_mastersmsl & 0xf); pinfo->neighbormtu_mastersmsl & 0xf);
event.device = ibdev; if (pinfo->clientrereg_resv_subnetto & 0x80)
event.element.port_num = port_num; mlx4_ib_dispatch_event(dev, port_num,
IB_EVENT_CLIENT_REREGISTER);
if (pinfo->clientrereg_resv_subnetto & 0x80) { if (prev_lid != lid)
event.event = IB_EVENT_CLIENT_REREGISTER; mlx4_ib_dispatch_event(dev, port_num,
ib_dispatch_event(&event); IB_EVENT_LID_CHANGE);
} break;
if (prev_lid != lid) { case IB_SMP_ATTR_PKEY_TABLE:
event.event = IB_EVENT_LID_CHANGE; mlx4_ib_dispatch_event(dev, port_num,
ib_dispatch_event(&event); IB_EVENT_PKEY_CHANGE);
} break;
case IB_SMP_ATTR_GUID_INFO:
/* paravirtualized master's guid is guid 0 -- does not change */
if (!mlx4_is_master(dev->dev))
mlx4_ib_dispatch_event(dev, port_num,
IB_EVENT_GID_CHANGE);
break;
default:
break;
} }
if (mad->mad_hdr.attr_id == IB_SMP_ATTR_PKEY_TABLE) {
event.device = ibdev;
event.event = IB_EVENT_PKEY_CHANGE;
event.element.port_num = port_num;
ib_dispatch_event(&event);
}
}
} }
static void node_desc_override(struct ib_device *dev, static void node_desc_override(struct ib_device *dev,
@ -242,6 +246,25 @@ static int ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
int err; int err;
struct ib_port_attr pattr; struct ib_port_attr pattr;
if (in_wc && in_wc->qp->qp_num) {
pr_debug("received MAD: slid:%d sqpn:%d "
"dlid_bits:%d dqpn:%d wc_flags:0x%x, cls %x, mtd %x, atr %x\n",
in_wc->slid, in_wc->src_qp,
in_wc->dlid_path_bits,
in_wc->qp->qp_num,
in_wc->wc_flags,
in_mad->mad_hdr.mgmt_class, in_mad->mad_hdr.method,
be16_to_cpu(in_mad->mad_hdr.attr_id));
if (in_wc->wc_flags & IB_WC_GRH) {
pr_debug("sgid_hi:0x%016llx sgid_lo:0x%016llx\n",
be64_to_cpu(in_grh->sgid.global.subnet_prefix),
be64_to_cpu(in_grh->sgid.global.interface_id));
pr_debug("dgid_hi:0x%016llx dgid_lo:0x%016llx\n",
be64_to_cpu(in_grh->dgid.global.subnet_prefix),
be64_to_cpu(in_grh->dgid.global.interface_id));
}
}
slid = in_wc ? in_wc->slid : be16_to_cpu(IB_LID_PERMISSIVE); slid = in_wc ? in_wc->slid : be16_to_cpu(IB_LID_PERMISSIVE);
if (in_mad->mad_hdr.method == IB_MGMT_METHOD_TRAP && slid == 0) { if (in_mad->mad_hdr.method == IB_MGMT_METHOD_TRAP && slid == 0) {
@ -286,7 +309,8 @@ static int ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
return IB_MAD_RESULT_FAILURE; return IB_MAD_RESULT_FAILURE;
if (!out_mad->mad_hdr.status) { if (!out_mad->mad_hdr.status) {
smp_snoop(ibdev, port_num, in_mad, prev_lid); if (!(to_mdev(ibdev)->dev->caps.flags & MLX4_DEV_CAP_FLAG_PORT_MNG_CHG_EV))
smp_snoop(ibdev, port_num, in_mad, prev_lid);
node_desc_override(ibdev, out_mad); node_desc_override(ibdev, out_mad);
} }
@ -427,3 +451,64 @@ void mlx4_ib_mad_cleanup(struct mlx4_ib_dev *dev)
ib_destroy_ah(dev->sm_ah[p]); ib_destroy_ah(dev->sm_ah[p]);
} }
} }
void handle_port_mgmt_change_event(struct work_struct *work)
{
struct ib_event_work *ew = container_of(work, struct ib_event_work, work);
struct mlx4_ib_dev *dev = ew->ib_dev;
struct mlx4_eqe *eqe = &(ew->ib_eqe);
u8 port = eqe->event.port_mgmt_change.port;
u32 changed_attr;
switch (eqe->subtype) {
case MLX4_DEV_PMC_SUBTYPE_PORT_INFO:
changed_attr = be32_to_cpu(eqe->event.port_mgmt_change.params.port_info.changed_attr);
/* Update the SM ah - This should be done before handling
the other changed attributes so that MADs can be sent to the SM */
if (changed_attr & MSTR_SM_CHANGE_MASK) {
u16 lid = be16_to_cpu(eqe->event.port_mgmt_change.params.port_info.mstr_sm_lid);
u8 sl = eqe->event.port_mgmt_change.params.port_info.mstr_sm_sl & 0xf;
update_sm_ah(dev, port, lid, sl);
}
/* Check if it is a lid change event */
if (changed_attr & MLX4_EQ_PORT_INFO_LID_CHANGE_MASK)
mlx4_ib_dispatch_event(dev, port, IB_EVENT_LID_CHANGE);
/* Generate GUID changed event */
if (changed_attr & MLX4_EQ_PORT_INFO_GID_PFX_CHANGE_MASK)
mlx4_ib_dispatch_event(dev, port, IB_EVENT_GID_CHANGE);
if (changed_attr & MLX4_EQ_PORT_INFO_CLIENT_REREG_MASK)
mlx4_ib_dispatch_event(dev, port,
IB_EVENT_CLIENT_REREGISTER);
break;
case MLX4_DEV_PMC_SUBTYPE_PKEY_TABLE:
mlx4_ib_dispatch_event(dev, port, IB_EVENT_PKEY_CHANGE);
break;
case MLX4_DEV_PMC_SUBTYPE_GUID_INFO:
/* paravirtualized master's guid is guid 0 -- does not change */
if (!mlx4_is_master(dev->dev))
mlx4_ib_dispatch_event(dev, port, IB_EVENT_GID_CHANGE);
break;
default:
pr_warn("Unsupported subtype 0x%x for "
"Port Management Change event\n", eqe->subtype);
}
kfree(ew);
}
void mlx4_ib_dispatch_event(struct mlx4_ib_dev *dev, u8 port_num,
enum ib_event_type type)
{
struct ib_event event;
event.device = &dev->ib_dev;
event.element.port_num = port_num;
event.event = type;
ib_dispatch_event(&event);
}

View File

@ -50,7 +50,7 @@
#include "mlx4_ib.h" #include "mlx4_ib.h"
#include "user.h" #include "user.h"
#define DRV_NAME "mlx4_ib" #define DRV_NAME MLX4_IB_DRV_NAME
#define DRV_VERSION "1.0" #define DRV_VERSION "1.0"
#define DRV_RELDATE "April 4, 2008" #define DRV_RELDATE "April 4, 2008"
@ -157,7 +157,7 @@ static int mlx4_ib_query_device(struct ib_device *ibdev,
props->local_ca_ack_delay = dev->dev->caps.local_ca_ack_delay; props->local_ca_ack_delay = dev->dev->caps.local_ca_ack_delay;
props->atomic_cap = dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_ATOMIC ? props->atomic_cap = dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_ATOMIC ?
IB_ATOMIC_HCA : IB_ATOMIC_NONE; IB_ATOMIC_HCA : IB_ATOMIC_NONE;
props->masked_atomic_cap = IB_ATOMIC_HCA; props->masked_atomic_cap = props->atomic_cap;
props->max_pkeys = dev->dev->caps.pkey_table_len[1]; props->max_pkeys = dev->dev->caps.pkey_table_len[1];
props->max_mcast_grp = dev->dev->caps.num_mgms + dev->dev->caps.num_amgms; props->max_mcast_grp = dev->dev->caps.num_mgms + dev->dev->caps.num_amgms;
props->max_mcast_qp_attach = dev->dev->caps.num_qp_per_mgm; props->max_mcast_qp_attach = dev->dev->caps.num_qp_per_mgm;
@ -898,7 +898,6 @@ static void update_gids_task(struct work_struct *work)
union ib_gid *gids; union ib_gid *gids;
int err; int err;
struct mlx4_dev *dev = gw->dev->dev; struct mlx4_dev *dev = gw->dev->dev;
struct ib_event event;
mailbox = mlx4_alloc_cmd_mailbox(dev); mailbox = mlx4_alloc_cmd_mailbox(dev);
if (IS_ERR(mailbox)) { if (IS_ERR(mailbox)) {
@ -916,10 +915,7 @@ static void update_gids_task(struct work_struct *work)
pr_warn("set port command failed\n"); pr_warn("set port command failed\n");
else { else {
memcpy(gw->dev->iboe.gid_table[gw->port - 1], gw->gids, sizeof gw->gids); memcpy(gw->dev->iboe.gid_table[gw->port - 1], gw->gids, sizeof gw->gids);
event.device = &gw->dev->ib_dev; mlx4_ib_dispatch_event(gw->dev, gw->port, IB_EVENT_GID_CHANGE);
event.element.port_num = gw->port;
event.event = IB_EVENT_GID_CHANGE;
ib_dispatch_event(&event);
} }
mlx4_free_cmd_mailbox(dev, mailbox); mlx4_free_cmd_mailbox(dev, mailbox);
@ -1383,10 +1379,18 @@ static void mlx4_ib_remove(struct mlx4_dev *dev, void *ibdev_ptr)
} }
static void mlx4_ib_event(struct mlx4_dev *dev, void *ibdev_ptr, static void mlx4_ib_event(struct mlx4_dev *dev, void *ibdev_ptr,
enum mlx4_dev_event event, int port) enum mlx4_dev_event event, unsigned long param)
{ {
struct ib_event ibev; struct ib_event ibev;
struct mlx4_ib_dev *ibdev = to_mdev((struct ib_device *) ibdev_ptr); struct mlx4_ib_dev *ibdev = to_mdev((struct ib_device *) ibdev_ptr);
struct mlx4_eqe *eqe = NULL;
struct ib_event_work *ew;
int port = 0;
if (event == MLX4_DEV_EVENT_PORT_MGMT_CHANGE)
eqe = (struct mlx4_eqe *)param;
else
port = (u8)param;
if (port > ibdev->num_ports) if (port > ibdev->num_ports)
return; return;
@ -1405,6 +1409,19 @@ static void mlx4_ib_event(struct mlx4_dev *dev, void *ibdev_ptr,
ibev.event = IB_EVENT_DEVICE_FATAL; ibev.event = IB_EVENT_DEVICE_FATAL;
break; break;
case MLX4_DEV_EVENT_PORT_MGMT_CHANGE:
ew = kmalloc(sizeof *ew, GFP_ATOMIC);
if (!ew) {
pr_err("failed to allocate memory for events work\n");
break;
}
INIT_WORK(&ew->work, handle_port_mgmt_change_event);
memcpy(&ew->ib_eqe, eqe, sizeof *eqe);
ew->ib_dev = ibdev;
handle_port_mgmt_change_event(&ew->work);
return;
default: default:
return; return;
} }

View File

@ -44,6 +44,16 @@
#include <linux/mlx4/device.h> #include <linux/mlx4/device.h>
#include <linux/mlx4/doorbell.h> #include <linux/mlx4/doorbell.h>
#define MLX4_IB_DRV_NAME "mlx4_ib"
#ifdef pr_fmt
#undef pr_fmt
#endif
#define pr_fmt(fmt) "<" MLX4_IB_DRV_NAME "> %s: " fmt, __func__
#define mlx4_ib_warn(ibdev, format, arg...) \
dev_warn((ibdev)->dma_device, MLX4_IB_DRV_NAME ": " format, ## arg)
enum { enum {
MLX4_IB_SQ_MIN_WQE_SHIFT = 6, MLX4_IB_SQ_MIN_WQE_SHIFT = 6,
MLX4_IB_MAX_HEADROOM = 2048 MLX4_IB_MAX_HEADROOM = 2048
@ -214,6 +224,12 @@ struct mlx4_ib_dev {
int eq_added; int eq_added;
}; };
struct ib_event_work {
struct work_struct work;
struct mlx4_ib_dev *ib_dev;
struct mlx4_eqe ib_eqe;
};
static inline struct mlx4_ib_dev *to_mdev(struct ib_device *ibdev) static inline struct mlx4_ib_dev *to_mdev(struct ib_device *ibdev)
{ {
return container_of(ibdev, struct mlx4_ib_dev, ib_dev); return container_of(ibdev, struct mlx4_ib_dev, ib_dev);
@ -371,4 +387,7 @@ static inline int mlx4_ib_ah_grh_present(struct mlx4_ib_ah *ah)
int mlx4_ib_add_mc(struct mlx4_ib_dev *mdev, struct mlx4_ib_qp *mqp, int mlx4_ib_add_mc(struct mlx4_ib_dev *mdev, struct mlx4_ib_qp *mqp,
union ib_gid *gid); union ib_gid *gid);
void mlx4_ib_dispatch_event(struct mlx4_ib_dev *dev, u8 port_num,
enum ib_event_type type);
#endif /* MLX4_IB_H */ #endif /* MLX4_IB_H */

View File

@ -1335,11 +1335,21 @@ int mlx4_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
cur_state = attr_mask & IB_QP_CUR_STATE ? attr->cur_qp_state : qp->state; cur_state = attr_mask & IB_QP_CUR_STATE ? attr->cur_qp_state : qp->state;
new_state = attr_mask & IB_QP_STATE ? attr->qp_state : cur_state; new_state = attr_mask & IB_QP_STATE ? attr->qp_state : cur_state;
if (!ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type, attr_mask)) if (!ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type, attr_mask)) {
pr_debug("qpn 0x%x: invalid attribute mask specified "
"for transition %d to %d. qp_type %d,"
" attr_mask 0x%x\n",
ibqp->qp_num, cur_state, new_state,
ibqp->qp_type, attr_mask);
goto out; goto out;
}
if ((attr_mask & IB_QP_PORT) && if ((attr_mask & IB_QP_PORT) &&
(attr->port_num == 0 || attr->port_num > dev->dev->caps.num_ports)) { (attr->port_num == 0 || attr->port_num > dev->dev->caps.num_ports)) {
pr_debug("qpn 0x%x: invalid port number (%d) specified "
"for transition %d to %d. qp_type %d\n",
ibqp->qp_num, attr->port_num, cur_state,
new_state, ibqp->qp_type);
goto out; goto out;
} }
@ -1350,17 +1360,30 @@ int mlx4_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
if (attr_mask & IB_QP_PKEY_INDEX) { if (attr_mask & IB_QP_PKEY_INDEX) {
int p = attr_mask & IB_QP_PORT ? attr->port_num : qp->port; int p = attr_mask & IB_QP_PORT ? attr->port_num : qp->port;
if (attr->pkey_index >= dev->dev->caps.pkey_table_len[p]) if (attr->pkey_index >= dev->dev->caps.pkey_table_len[p]) {
pr_debug("qpn 0x%x: invalid pkey index (%d) specified "
"for transition %d to %d. qp_type %d\n",
ibqp->qp_num, attr->pkey_index, cur_state,
new_state, ibqp->qp_type);
goto out; goto out;
}
} }
if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC && if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC &&
attr->max_rd_atomic > dev->dev->caps.max_qp_init_rdma) { attr->max_rd_atomic > dev->dev->caps.max_qp_init_rdma) {
pr_debug("qpn 0x%x: max_rd_atomic (%d) too large. "
"Transition %d to %d. qp_type %d\n",
ibqp->qp_num, attr->max_rd_atomic, cur_state,
new_state, ibqp->qp_type);
goto out; goto out;
} }
if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC && if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC &&
attr->max_dest_rd_atomic > dev->dev->caps.max_qp_dest_rdma) { attr->max_dest_rd_atomic > dev->dev->caps.max_qp_dest_rdma) {
pr_debug("qpn 0x%x: max_dest_rd_atomic (%d) too large. "
"Transition %d to %d. qp_type %d\n",
ibqp->qp_num, attr->max_dest_rd_atomic, cur_state,
new_state, ibqp->qp_type);
goto out; goto out;
} }

View File

@ -247,7 +247,8 @@ void mthca_qp_event(struct mthca_dev *dev, u32 qpn,
spin_unlock(&dev->qp_table.lock); spin_unlock(&dev->qp_table.lock);
if (!qp) { if (!qp) {
mthca_warn(dev, "Async event for bogus QP %08x\n", qpn); mthca_warn(dev, "Async event %d for bogus QP %08x\n",
event_type, qpn);
return; return;
} }
@ -501,6 +502,7 @@ done:
qp_attr->cap.max_inline_data = qp->max_inline_data; qp_attr->cap.max_inline_data = qp->max_inline_data;
qp_init_attr->cap = qp_attr->cap; qp_init_attr->cap = qp_attr->cap;
qp_init_attr->sq_sig_type = qp->sq_policy;
out_mailbox: out_mailbox:
mthca_free_mailbox(dev, mailbox); mthca_free_mailbox(dev, mailbox);

View File

@ -202,8 +202,7 @@ static int ocrdma_build_sgid_tbl(struct ocrdma_dev *dev)
return 0; return 0;
} }
#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) || \ #if IS_ENABLED(CONFIG_IPV6) || IS_ENABLED(CONFIG_VLAN_8021Q)
defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
static int ocrdma_inet6addr_event(struct notifier_block *notifier, static int ocrdma_inet6addr_event(struct notifier_block *notifier,
unsigned long event, void *ptr) unsigned long event, void *ptr)
@ -549,7 +548,7 @@ static struct ocrdma_driver ocrdma_drv = {
static void ocrdma_unregister_inet6addr_notifier(void) static void ocrdma_unregister_inet6addr_notifier(void)
{ {
#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) #if IS_ENABLED(CONFIG_IPV6)
unregister_inet6addr_notifier(&ocrdma_inet6addr_notifier); unregister_inet6addr_notifier(&ocrdma_inet6addr_notifier);
#endif #endif
} }
@ -558,7 +557,7 @@ static int __init ocrdma_init_module(void)
{ {
int status; int status;
#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) #if IS_ENABLED(CONFIG_IPV6)
status = register_inet6addr_notifier(&ocrdma_inet6addr_notifier); status = register_inet6addr_notifier(&ocrdma_inet6addr_notifier);
if (status) if (status)
return status; return status;

View File

@ -97,7 +97,7 @@ int ocrdma_query_device(struct ib_device *ibdev, struct ib_device_attr *attr)
min(dev->attr.max_ord_per_qp, dev->attr.max_ird_per_qp); min(dev->attr.max_ord_per_qp, dev->attr.max_ird_per_qp);
attr->max_qp_init_rd_atom = dev->attr.max_ord_per_qp; attr->max_qp_init_rd_atom = dev->attr.max_ord_per_qp;
attr->max_srq = (dev->attr.max_qp - 1); attr->max_srq = (dev->attr.max_qp - 1);
attr->max_srq_sge = attr->max_srq_sge; attr->max_srq_sge = dev->attr.max_srq_sge;
attr->max_srq_wr = dev->attr.max_rqe; attr->max_srq_wr = dev->attr.max_rqe;
attr->local_ca_ack_delay = dev->attr.local_ca_ack_delay; attr->local_ca_ack_delay = dev->attr.local_ca_ack_delay;
attr->max_fast_reg_page_list_len = 0; attr->max_fast_reg_page_list_len = 0;

View File

@ -1,8 +1,8 @@
#ifndef _QIB_KERNEL_H #ifndef _QIB_KERNEL_H
#define _QIB_KERNEL_H #define _QIB_KERNEL_H
/* /*
* Copyright (c) 2006, 2007, 2008, 2009, 2010 QLogic Corporation. * Copyright (c) 2012 Intel Corporation. All rights reserved.
* All rights reserved. * Copyright (c) 2006 - 2012 QLogic Corporation. All rights reserved.
* Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved. * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
* *
* This software is available to you under a choice of one of two * This software is available to you under a choice of one of two
@ -519,6 +519,7 @@ struct qib_pportdata {
struct qib_devdata *dd; struct qib_devdata *dd;
struct qib_chippport_specific *cpspec; /* chip-specific per-port */ struct qib_chippport_specific *cpspec; /* chip-specific per-port */
struct kobject pport_kobj; struct kobject pport_kobj;
struct kobject pport_cc_kobj;
struct kobject sl2vl_kobj; struct kobject sl2vl_kobj;
struct kobject diagc_kobj; struct kobject diagc_kobj;
@ -544,6 +545,7 @@ struct qib_pportdata {
/* read mostly */ /* read mostly */
struct qib_sdma_desc *sdma_descq; struct qib_sdma_desc *sdma_descq;
struct workqueue_struct *qib_wq;
struct qib_sdma_state sdma_state; struct qib_sdma_state sdma_state;
dma_addr_t sdma_descq_phys; dma_addr_t sdma_descq_phys;
volatile __le64 *sdma_head_dma; /* DMA'ed by chip */ volatile __le64 *sdma_head_dma; /* DMA'ed by chip */
@ -637,6 +639,39 @@ struct qib_pportdata {
struct timer_list led_override_timer; struct timer_list led_override_timer;
struct xmit_wait cong_stats; struct xmit_wait cong_stats;
struct timer_list symerr_clear_timer; struct timer_list symerr_clear_timer;
/* Synchronize access between driver writes and sysfs reads */
spinlock_t cc_shadow_lock
____cacheline_aligned_in_smp;
/* Shadow copy of the congestion control table */
struct cc_table_shadow *ccti_entries_shadow;
/* Shadow copy of the congestion control entries */
struct ib_cc_congestion_setting_attr_shadow *congestion_entries_shadow;
/* List of congestion control table entries */
struct ib_cc_table_entry_shadow *ccti_entries;
/* 16 congestion entries with each entry corresponding to a SL */
struct ib_cc_congestion_entry_shadow *congestion_entries;
/* Total number of congestion control table entries */
u16 total_cct_entry;
/* Bit map identifying service level */
u16 cc_sl_control_map;
/* maximum congestion control table index */
u16 ccti_limit;
/* CA's max number of 64 entry units in the congestion control table */
u8 cc_max_table_entries;
/* Maximum number of congestion control entries that the agent expects
* the manager to send.
*/
u8 cc_supported_table_entries;
}; };
/* Observers. Not to be taken lightly, possibly not to ship. */ /* Observers. Not to be taken lightly, possibly not to ship. */
@ -1077,6 +1112,7 @@ extern u32 qib_cpulist_count;
extern unsigned long *qib_cpulist; extern unsigned long *qib_cpulist;
extern unsigned qib_wc_pat; extern unsigned qib_wc_pat;
extern unsigned qib_cc_table_size;
int qib_init(struct qib_devdata *, int); int qib_init(struct qib_devdata *, int);
int init_chip_wc_pat(struct qib_devdata *dd, u32); int init_chip_wc_pat(struct qib_devdata *dd, u32);
int qib_enable_wc(struct qib_devdata *dd); int qib_enable_wc(struct qib_devdata *dd);
@ -1267,6 +1303,11 @@ int qib_sdma_verbs_send(struct qib_pportdata *, struct qib_sge_state *,
/* ppd->sdma_lock should be locked before calling this. */ /* ppd->sdma_lock should be locked before calling this. */
int qib_sdma_make_progress(struct qib_pportdata *dd); int qib_sdma_make_progress(struct qib_pportdata *dd);
static inline int qib_sdma_empty(const struct qib_pportdata *ppd)
{
return ppd->sdma_descq_added == ppd->sdma_descq_removed;
}
/* must be called under qib_sdma_lock */ /* must be called under qib_sdma_lock */
static inline u16 qib_sdma_descq_freecnt(const struct qib_pportdata *ppd) static inline u16 qib_sdma_descq_freecnt(const struct qib_pportdata *ppd)
{ {

View File

@ -1,6 +1,6 @@
/* /*
* Copyright (c) 2010 QLogic Corporation. All rights reserved. * Copyright (c) 2012 Intel Corporation. All rights reserved.
* Copyright (c) 2006, 2007, 2008, 2009 QLogic Corporation. All rights reserved. * Copyright (c) 2006 - 2012 QLogic Corporation. All rights reserved.
* Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved. * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
* *
* This software is available to you under a choice of one of two * This software is available to you under a choice of one of two
@ -53,6 +53,9 @@
#include "qib.h" #include "qib.h"
#include "qib_common.h" #include "qib_common.h"
#undef pr_fmt
#define pr_fmt(fmt) QIB_DRV_NAME ": " fmt
/* /*
* Each client that opens the diag device must read then write * Each client that opens the diag device must read then write
* offset 0, to prevent lossage from random cat or od. diag_state * offset 0, to prevent lossage from random cat or od. diag_state
@ -598,8 +601,8 @@ static ssize_t qib_diagpkt_write(struct file *fp,
} }
tmpbuf = vmalloc(plen); tmpbuf = vmalloc(plen);
if (!tmpbuf) { if (!tmpbuf) {
qib_devinfo(dd->pcidev, "Unable to allocate tmp buffer, " qib_devinfo(dd->pcidev,
"failing\n"); "Unable to allocate tmp buffer, failing\n");
ret = -ENOMEM; ret = -ENOMEM;
goto bail; goto bail;
} }
@ -693,7 +696,7 @@ int qib_register_observer(struct qib_devdata *dd,
ret = -ENOMEM; ret = -ENOMEM;
olp = vmalloc(sizeof *olp); olp = vmalloc(sizeof *olp);
if (!olp) { if (!olp) {
printk(KERN_ERR QIB_DRV_NAME ": vmalloc for observer failed\n"); pr_err("vmalloc for observer failed\n");
goto bail; goto bail;
} }
if (olp) { if (olp) {

View File

@ -764,8 +764,9 @@ int qib_reset_device(int unit)
qib_devinfo(dd->pcidev, "Reset on unit %u requested\n", unit); qib_devinfo(dd->pcidev, "Reset on unit %u requested\n", unit);
if (!dd->kregbase || !(dd->flags & QIB_PRESENT)) { if (!dd->kregbase || !(dd->flags & QIB_PRESENT)) {
qib_devinfo(dd->pcidev, "Invalid unit number %u or " qib_devinfo(dd->pcidev,
"not initialized or not present\n", unit); "Invalid unit number %u or not initialized or not present\n",
unit);
ret = -ENXIO; ret = -ENXIO;
goto bail; goto bail;
} }
@ -802,11 +803,13 @@ int qib_reset_device(int unit)
else else
ret = -EAGAIN; ret = -EAGAIN;
if (ret) if (ret)
qib_dev_err(dd, "Reinitialize unit %u after " qib_dev_err(dd,
"reset failed with %d\n", unit, ret); "Reinitialize unit %u after reset failed with %d\n",
unit, ret);
else else
qib_devinfo(dd->pcidev, "Reinitialized unit %u after " qib_devinfo(dd->pcidev,
"resetting\n", unit); "Reinitialized unit %u after resetting\n",
unit);
bail: bail:
return ret; return ret;

View File

@ -1,5 +1,6 @@
/* /*
* Copyright (c) 2006, 2007, 2008, 2009 QLogic Corporation. All rights reserved. * Copyright (c) 2012 Intel Corporation. All rights reserved.
* Copyright (c) 2006 - 2012 QLogic Corporation. All rights reserved.
* Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved. * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
* *
* This software is available to you under a choice of one of two * This software is available to you under a choice of one of two
@ -160,10 +161,9 @@ void qib_get_eeprom_info(struct qib_devdata *dd)
if (oguid > bguid[7]) { if (oguid > bguid[7]) {
if (bguid[6] == 0xff) { if (bguid[6] == 0xff) {
if (bguid[5] == 0xff) { if (bguid[5] == 0xff) {
qib_dev_err(dd, "Can't set %s GUID" qib_dev_err(dd,
" from base, wraps to" "Can't set %s GUID from base, wraps to OUI!\n",
" OUI!\n", qib_get_unit_name(t));
qib_get_unit_name(t));
dd->base_guid = 0; dd->base_guid = 0;
goto bail; goto bail;
} }
@ -182,8 +182,9 @@ void qib_get_eeprom_info(struct qib_devdata *dd)
len = sizeof(struct qib_flash); len = sizeof(struct qib_flash);
buf = vmalloc(len); buf = vmalloc(len);
if (!buf) { if (!buf) {
qib_dev_err(dd, "Couldn't allocate memory to read %u " qib_dev_err(dd,
"bytes from eeprom for GUID\n", len); "Couldn't allocate memory to read %u bytes from eeprom for GUID\n",
len);
goto bail; goto bail;
} }
@ -201,23 +202,25 @@ void qib_get_eeprom_info(struct qib_devdata *dd)
csum = flash_csum(ifp, 0); csum = flash_csum(ifp, 0);
if (csum != ifp->if_csum) { if (csum != ifp->if_csum) {
qib_devinfo(dd->pcidev, "Bad I2C flash checksum: " qib_devinfo(dd->pcidev,
"0x%x, not 0x%x\n", csum, ifp->if_csum); "Bad I2C flash checksum: 0x%x, not 0x%x\n",
csum, ifp->if_csum);
goto done; goto done;
} }
if (*(__be64 *) ifp->if_guid == cpu_to_be64(0) || if (*(__be64 *) ifp->if_guid == cpu_to_be64(0) ||
*(__be64 *) ifp->if_guid == ~cpu_to_be64(0)) { *(__be64 *) ifp->if_guid == ~cpu_to_be64(0)) {
qib_dev_err(dd, "Invalid GUID %llx from flash; ignoring\n", qib_dev_err(dd,
*(unsigned long long *) ifp->if_guid); "Invalid GUID %llx from flash; ignoring\n",
*(unsigned long long *) ifp->if_guid);
/* don't allow GUID if all 0 or all 1's */ /* don't allow GUID if all 0 or all 1's */
goto done; goto done;
} }
/* complain, but allow it */ /* complain, but allow it */
if (*(u64 *) ifp->if_guid == 0x100007511000000ULL) if (*(u64 *) ifp->if_guid == 0x100007511000000ULL)
qib_devinfo(dd->pcidev, "Warning, GUID %llx is " qib_devinfo(dd->pcidev,
"default, probably not correct!\n", "Warning, GUID %llx is default, probably not correct!\n",
*(unsigned long long *) ifp->if_guid); *(unsigned long long *) ifp->if_guid);
bguid = ifp->if_guid; bguid = ifp->if_guid;
if (!bguid[0] && !bguid[1] && !bguid[2]) { if (!bguid[0] && !bguid[1] && !bguid[2]) {
@ -260,8 +263,9 @@ void qib_get_eeprom_info(struct qib_devdata *dd)
memcpy(dd->serial, ifp->if_serial, memcpy(dd->serial, ifp->if_serial,
sizeof ifp->if_serial); sizeof ifp->if_serial);
if (!strstr(ifp->if_comment, "Tested successfully")) if (!strstr(ifp->if_comment, "Tested successfully"))
qib_dev_err(dd, "Board SN %s did not pass functional " qib_dev_err(dd,
"test: %s\n", dd->serial, ifp->if_comment); "Board SN %s did not pass functional test: %s\n",
dd->serial, ifp->if_comment);
memcpy(&dd->eep_st_errs, &ifp->if_errcntp, QIB_EEP_LOG_CNT); memcpy(&dd->eep_st_errs, &ifp->if_errcntp, QIB_EEP_LOG_CNT);
/* /*
@ -323,8 +327,9 @@ int qib_update_eeprom_log(struct qib_devdata *dd)
buf = vmalloc(len); buf = vmalloc(len);
ret = 1; ret = 1;
if (!buf) { if (!buf) {
qib_dev_err(dd, "Couldn't allocate memory to read %u " qib_dev_err(dd,
"bytes from eeprom for logging\n", len); "Couldn't allocate memory to read %u bytes from eeprom for logging\n",
len);
goto bail; goto bail;
} }

View File

@ -1,6 +1,6 @@
/* /*
* Copyright (c) 2006, 2007, 2008, 2009, 2010 QLogic Corporation. * Copyright (c) 2012 Intel Corporation. All rights reserved.
* All rights reserved. * Copyright (c) 2006 - 2012 QLogic Corporation. All rights reserved.
* Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved. * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
* *
* This software is available to you under a choice of one of two * This software is available to you under a choice of one of two
@ -49,6 +49,9 @@
#include "qib_common.h" #include "qib_common.h"
#include "qib_user_sdma.h" #include "qib_user_sdma.h"
#undef pr_fmt
#define pr_fmt(fmt) QIB_DRV_NAME ": " fmt
static int qib_open(struct inode *, struct file *); static int qib_open(struct inode *, struct file *);
static int qib_close(struct inode *, struct file *); static int qib_close(struct inode *, struct file *);
static ssize_t qib_write(struct file *, const char __user *, size_t, loff_t *); static ssize_t qib_write(struct file *, const char __user *, size_t, loff_t *);
@ -315,8 +318,9 @@ static int qib_tid_update(struct qib_ctxtdata *rcd, struct file *fp,
} }
if (cnt > tidcnt) { if (cnt > tidcnt) {
/* make sure it all fits in tid_pg_list */ /* make sure it all fits in tid_pg_list */
qib_devinfo(dd->pcidev, "Process tried to allocate %u " qib_devinfo(dd->pcidev,
"TIDs, only trying max (%u)\n", cnt, tidcnt); "Process tried to allocate %u TIDs, only trying max (%u)\n",
cnt, tidcnt);
cnt = tidcnt; cnt = tidcnt;
} }
pagep = (struct page **) rcd->tid_pg_list; pagep = (struct page **) rcd->tid_pg_list;
@ -750,9 +754,9 @@ static int qib_mmap_mem(struct vm_area_struct *vma, struct qib_ctxtdata *rcd,
ret = remap_pfn_range(vma, vma->vm_start, pfn, ret = remap_pfn_range(vma, vma->vm_start, pfn,
len, vma->vm_page_prot); len, vma->vm_page_prot);
if (ret) if (ret)
qib_devinfo(dd->pcidev, "%s ctxt%u mmap of %lx, %x " qib_devinfo(dd->pcidev,
"bytes failed: %d\n", what, rcd->ctxt, "%s ctxt%u mmap of %lx, %x bytes failed: %d\n",
pfn, len, ret); what, rcd->ctxt, pfn, len, ret);
bail: bail:
return ret; return ret;
} }
@ -771,8 +775,9 @@ static int mmap_ureg(struct vm_area_struct *vma, struct qib_devdata *dd,
*/ */
sz = dd->flags & QIB_HAS_HDRSUPP ? 2 * PAGE_SIZE : PAGE_SIZE; sz = dd->flags & QIB_HAS_HDRSUPP ? 2 * PAGE_SIZE : PAGE_SIZE;
if ((vma->vm_end - vma->vm_start) > sz) { if ((vma->vm_end - vma->vm_start) > sz) {
qib_devinfo(dd->pcidev, "FAIL mmap userreg: reqlen " qib_devinfo(dd->pcidev,
"%lx > PAGE\n", vma->vm_end - vma->vm_start); "FAIL mmap userreg: reqlen %lx > PAGE\n",
vma->vm_end - vma->vm_start);
ret = -EFAULT; ret = -EFAULT;
} else { } else {
phys = dd->physaddr + ureg; phys = dd->physaddr + ureg;
@ -802,8 +807,8 @@ static int mmap_piobufs(struct vm_area_struct *vma,
* for it. * for it.
*/ */
if ((vma->vm_end - vma->vm_start) > (piocnt * dd->palign)) { if ((vma->vm_end - vma->vm_start) > (piocnt * dd->palign)) {
qib_devinfo(dd->pcidev, "FAIL mmap piobufs: " qib_devinfo(dd->pcidev,
"reqlen %lx > PAGE\n", "FAIL mmap piobufs: reqlen %lx > PAGE\n",
vma->vm_end - vma->vm_start); vma->vm_end - vma->vm_start);
ret = -EINVAL; ret = -EINVAL;
goto bail; goto bail;
@ -847,8 +852,8 @@ static int mmap_rcvegrbufs(struct vm_area_struct *vma,
size = rcd->rcvegrbuf_size; size = rcd->rcvegrbuf_size;
total_size = rcd->rcvegrbuf_chunks * size; total_size = rcd->rcvegrbuf_chunks * size;
if ((vma->vm_end - vma->vm_start) > total_size) { if ((vma->vm_end - vma->vm_start) > total_size) {
qib_devinfo(dd->pcidev, "FAIL on egr bufs: " qib_devinfo(dd->pcidev,
"reqlen %lx > actual %lx\n", "FAIL on egr bufs: reqlen %lx > actual %lx\n",
vma->vm_end - vma->vm_start, vma->vm_end - vma->vm_start,
(unsigned long) total_size); (unsigned long) total_size);
ret = -EINVAL; ret = -EINVAL;
@ -856,8 +861,9 @@ static int mmap_rcvegrbufs(struct vm_area_struct *vma,
} }
if (vma->vm_flags & VM_WRITE) { if (vma->vm_flags & VM_WRITE) {
qib_devinfo(dd->pcidev, "Can't map eager buffers as " qib_devinfo(dd->pcidev,
"writable (flags=%lx)\n", vma->vm_flags); "Can't map eager buffers as writable (flags=%lx)\n",
vma->vm_flags);
ret = -EPERM; ret = -EPERM;
goto bail; goto bail;
} }
@ -1270,8 +1276,8 @@ static int setup_ctxt(struct qib_pportdata *ppd, int ctxt,
GFP_KERNEL); GFP_KERNEL);
if (!rcd || !ptmp) { if (!rcd || !ptmp) {
qib_dev_err(dd, "Unable to allocate ctxtdata " qib_dev_err(dd,
"memory, failing open\n"); "Unable to allocate ctxtdata memory, failing open\n");
ret = -ENOMEM; ret = -ENOMEM;
goto bailerr; goto bailerr;
} }
@ -1560,10 +1566,10 @@ done_chk_sdma:
} else if (weight == 1 && } else if (weight == 1 &&
test_bit(cpumask_first(tsk_cpus_allowed(current)), test_bit(cpumask_first(tsk_cpus_allowed(current)),
qib_cpulist)) qib_cpulist))
qib_devinfo(dd->pcidev, "%s PID %u affinity " qib_devinfo(dd->pcidev,
"set to cpu %d; already allocated\n", "%s PID %u affinity set to cpu %d; already allocated\n",
current->comm, current->pid, current->comm, current->pid,
cpumask_first(tsk_cpus_allowed(current))); cpumask_first(tsk_cpus_allowed(current)));
} }
mutex_unlock(&qib_mutex); mutex_unlock(&qib_mutex);
@ -2185,8 +2191,7 @@ int qib_cdev_init(int minor, const char *name,
cdev = cdev_alloc(); cdev = cdev_alloc();
if (!cdev) { if (!cdev) {
printk(KERN_ERR QIB_DRV_NAME pr_err("Could not allocate cdev for minor %d, %s\n",
": Could not allocate cdev for minor %d, %s\n",
minor, name); minor, name);
ret = -ENOMEM; ret = -ENOMEM;
goto done; goto done;
@ -2198,8 +2203,7 @@ int qib_cdev_init(int minor, const char *name,
ret = cdev_add(cdev, dev, 1); ret = cdev_add(cdev, dev, 1);
if (ret < 0) { if (ret < 0) {
printk(KERN_ERR QIB_DRV_NAME pr_err("Could not add cdev for minor %d, %s (err %d)\n",
": Could not add cdev for minor %d, %s (err %d)\n",
minor, name, -ret); minor, name, -ret);
goto err_cdev; goto err_cdev;
} }
@ -2209,8 +2213,7 @@ int qib_cdev_init(int minor, const char *name,
goto done; goto done;
ret = PTR_ERR(device); ret = PTR_ERR(device);
device = NULL; device = NULL;
printk(KERN_ERR QIB_DRV_NAME ": Could not create " pr_err("Could not create device for minor %d, %s (err %d)\n",
"device for minor %d, %s (err %d)\n",
minor, name, -ret); minor, name, -ret);
err_cdev: err_cdev:
cdev_del(cdev); cdev_del(cdev);
@ -2245,16 +2248,14 @@ int __init qib_dev_init(void)
ret = alloc_chrdev_region(&qib_dev, 0, QIB_NMINORS, QIB_DRV_NAME); ret = alloc_chrdev_region(&qib_dev, 0, QIB_NMINORS, QIB_DRV_NAME);
if (ret < 0) { if (ret < 0) {
printk(KERN_ERR QIB_DRV_NAME ": Could not allocate " pr_err("Could not allocate chrdev region (err %d)\n", -ret);
"chrdev region (err %d)\n", -ret);
goto done; goto done;
} }
qib_class = class_create(THIS_MODULE, "ipath"); qib_class = class_create(THIS_MODULE, "ipath");
if (IS_ERR(qib_class)) { if (IS_ERR(qib_class)) {
ret = PTR_ERR(qib_class); ret = PTR_ERR(qib_class);
printk(KERN_ERR QIB_DRV_NAME ": Could not create " pr_err("Could not create device class (err %d)\n", -ret);
"device class (err %d)\n", -ret);
unregister_chrdev_region(qib_dev, QIB_NMINORS); unregister_chrdev_region(qib_dev, QIB_NMINORS);
} }

View File

@ -1,5 +1,6 @@
/* /*
* Copyright (c) 2006, 2007, 2008, 2009 QLogic Corporation. All rights reserved. * Copyright (c) 2012 Intel Corporation. All rights reserved.
* Copyright (c) 2006 - 2012 QLogic Corporation. All rights reserved.
* Copyright (c) 2006 PathScale, Inc. All rights reserved. * Copyright (c) 2006 PathScale, Inc. All rights reserved.
* *
* This software is available to you under a choice of one of two * This software is available to you under a choice of one of two
@ -382,7 +383,7 @@ static int add_cntr_files(struct super_block *sb, struct qib_devdata *dd)
ret = create_file(unit, S_IFDIR|S_IRUGO|S_IXUGO, sb->s_root, &dir, ret = create_file(unit, S_IFDIR|S_IRUGO|S_IXUGO, sb->s_root, &dir,
&simple_dir_operations, dd); &simple_dir_operations, dd);
if (ret) { if (ret) {
printk(KERN_ERR "create_file(%s) failed: %d\n", unit, ret); pr_err("create_file(%s) failed: %d\n", unit, ret);
goto bail; goto bail;
} }
@ -390,21 +391,21 @@ static int add_cntr_files(struct super_block *sb, struct qib_devdata *dd)
ret = create_file("counters", S_IFREG|S_IRUGO, dir, &tmp, ret = create_file("counters", S_IFREG|S_IRUGO, dir, &tmp,
&cntr_ops[0], dd); &cntr_ops[0], dd);
if (ret) { if (ret) {
printk(KERN_ERR "create_file(%s/counters) failed: %d\n", pr_err("create_file(%s/counters) failed: %d\n",
unit, ret); unit, ret);
goto bail; goto bail;
} }
ret = create_file("counter_names", S_IFREG|S_IRUGO, dir, &tmp, ret = create_file("counter_names", S_IFREG|S_IRUGO, dir, &tmp,
&cntr_ops[1], dd); &cntr_ops[1], dd);
if (ret) { if (ret) {
printk(KERN_ERR "create_file(%s/counter_names) failed: %d\n", pr_err("create_file(%s/counter_names) failed: %d\n",
unit, ret); unit, ret);
goto bail; goto bail;
} }
ret = create_file("portcounter_names", S_IFREG|S_IRUGO, dir, &tmp, ret = create_file("portcounter_names", S_IFREG|S_IRUGO, dir, &tmp,
&portcntr_ops[0], dd); &portcntr_ops[0], dd);
if (ret) { if (ret) {
printk(KERN_ERR "create_file(%s/%s) failed: %d\n", pr_err("create_file(%s/%s) failed: %d\n",
unit, "portcounter_names", ret); unit, "portcounter_names", ret);
goto bail; goto bail;
} }
@ -416,7 +417,7 @@ static int add_cntr_files(struct super_block *sb, struct qib_devdata *dd)
ret = create_file(fname, S_IFREG|S_IRUGO, dir, &tmp, ret = create_file(fname, S_IFREG|S_IRUGO, dir, &tmp,
&portcntr_ops[i], dd); &portcntr_ops[i], dd);
if (ret) { if (ret) {
printk(KERN_ERR "create_file(%s/%s) failed: %d\n", pr_err("create_file(%s/%s) failed: %d\n",
unit, fname, ret); unit, fname, ret);
goto bail; goto bail;
} }
@ -426,7 +427,7 @@ static int add_cntr_files(struct super_block *sb, struct qib_devdata *dd)
ret = create_file(fname, S_IFREG|S_IRUGO, dir, &tmp, ret = create_file(fname, S_IFREG|S_IRUGO, dir, &tmp,
&qsfp_ops[i - 1], dd); &qsfp_ops[i - 1], dd);
if (ret) { if (ret) {
printk(KERN_ERR "create_file(%s/%s) failed: %d\n", pr_err("create_file(%s/%s) failed: %d\n",
unit, fname, ret); unit, fname, ret);
goto bail; goto bail;
} }
@ -435,7 +436,7 @@ static int add_cntr_files(struct super_block *sb, struct qib_devdata *dd)
ret = create_file("flash", S_IFREG|S_IWUSR|S_IRUGO, dir, &tmp, ret = create_file("flash", S_IFREG|S_IWUSR|S_IRUGO, dir, &tmp,
&flash_ops, dd); &flash_ops, dd);
if (ret) if (ret)
printk(KERN_ERR "create_file(%s/flash) failed: %d\n", pr_err("create_file(%s/flash) failed: %d\n",
unit, ret); unit, ret);
bail: bail:
return ret; return ret;
@ -486,7 +487,7 @@ static int remove_device_files(struct super_block *sb,
if (IS_ERR(dir)) { if (IS_ERR(dir)) {
ret = PTR_ERR(dir); ret = PTR_ERR(dir);
printk(KERN_ERR "Lookup of %s failed\n", unit); pr_err("Lookup of %s failed\n", unit);
goto bail; goto bail;
} }
@ -532,7 +533,7 @@ static int qibfs_fill_super(struct super_block *sb, void *data, int silent)
ret = simple_fill_super(sb, QIBFS_MAGIC, files); ret = simple_fill_super(sb, QIBFS_MAGIC, files);
if (ret) { if (ret) {
printk(KERN_ERR "simple_fill_super failed: %d\n", ret); pr_err("simple_fill_super failed: %d\n", ret);
goto bail; goto bail;
} }

View File

@ -753,8 +753,8 @@ static void qib_handle_6120_hwerrors(struct qib_devdata *dd, char *msg,
if (!hwerrs) if (!hwerrs)
return; return;
if (hwerrs == ~0ULL) { if (hwerrs == ~0ULL) {
qib_dev_err(dd, "Read of hardware error status failed " qib_dev_err(dd,
"(all bits set); ignoring\n"); "Read of hardware error status failed (all bits set); ignoring\n");
return; return;
} }
qib_stats.sps_hwerrs++; qib_stats.sps_hwerrs++;
@ -779,13 +779,14 @@ static void qib_handle_6120_hwerrors(struct qib_devdata *dd, char *msg,
* or it's occurred within the last 5 seconds. * or it's occurred within the last 5 seconds.
*/ */
if (hwerrs & ~(TXE_PIO_PARITY | RXEMEMPARITYERR_EAGERTID)) if (hwerrs & ~(TXE_PIO_PARITY | RXEMEMPARITYERR_EAGERTID))
qib_devinfo(dd->pcidev, "Hardware error: hwerr=0x%llx " qib_devinfo(dd->pcidev,
"(cleared)\n", (unsigned long long) hwerrs); "Hardware error: hwerr=0x%llx (cleared)\n",
(unsigned long long) hwerrs);
if (hwerrs & ~IB_HWE_BITSEXTANT) if (hwerrs & ~IB_HWE_BITSEXTANT)
qib_dev_err(dd, "hwerror interrupt with unknown errors " qib_dev_err(dd,
"%llx set\n", (unsigned long long) "hwerror interrupt with unknown errors %llx set\n",
(hwerrs & ~IB_HWE_BITSEXTANT)); (unsigned long long)(hwerrs & ~IB_HWE_BITSEXTANT));
ctrl = qib_read_kreg32(dd, kr_control); ctrl = qib_read_kreg32(dd, kr_control);
if ((ctrl & QLOGIC_IB_C_FREEZEMODE) && !dd->diag_client) { if ((ctrl & QLOGIC_IB_C_FREEZEMODE) && !dd->diag_client) {
@ -815,8 +816,9 @@ static void qib_handle_6120_hwerrors(struct qib_devdata *dd, char *msg,
if (hwerrs & HWE_MASK(PowerOnBISTFailed)) { if (hwerrs & HWE_MASK(PowerOnBISTFailed)) {
isfatal = 1; isfatal = 1;
strlcat(msg, "[Memory BIST test failed, InfiniPath hardware" strlcat(msg,
" unusable]", msgl); "[Memory BIST test failed, InfiniPath hardware unusable]",
msgl);
/* ignore from now on, so disable until driver reloaded */ /* ignore from now on, so disable until driver reloaded */
dd->cspec->hwerrmask &= ~HWE_MASK(PowerOnBISTFailed); dd->cspec->hwerrmask &= ~HWE_MASK(PowerOnBISTFailed);
qib_write_kreg(dd, kr_hwerrmask, dd->cspec->hwerrmask); qib_write_kreg(dd, kr_hwerrmask, dd->cspec->hwerrmask);
@ -868,8 +870,9 @@ static void qib_handle_6120_hwerrors(struct qib_devdata *dd, char *msg,
*msg = 0; /* recovered from all of them */ *msg = 0; /* recovered from all of them */
if (isfatal && !dd->diag_client) { if (isfatal && !dd->diag_client) {
qib_dev_err(dd, "Fatal Hardware Error, no longer" qib_dev_err(dd,
" usable, SN %.16s\n", dd->serial); "Fatal Hardware Error, no longer usable, SN %.16s\n",
dd->serial);
/* /*
* for /sys status file and user programs to print; if no * for /sys status file and user programs to print; if no
* trailing brace is copied, we'll know it was truncated. * trailing brace is copied, we'll know it was truncated.
@ -1017,9 +1020,9 @@ static void handle_6120_errors(struct qib_devdata *dd, u64 errs)
qib_inc_eeprom_err(dd, log_idx, 1); qib_inc_eeprom_err(dd, log_idx, 1);
if (errs & ~IB_E_BITSEXTANT) if (errs & ~IB_E_BITSEXTANT)
qib_dev_err(dd, "error interrupt with unknown errors " qib_dev_err(dd,
"%llx set\n", "error interrupt with unknown errors %llx set\n",
(unsigned long long) (errs & ~IB_E_BITSEXTANT)); (unsigned long long) (errs & ~IB_E_BITSEXTANT));
if (errs & E_SUM_ERRS) { if (errs & E_SUM_ERRS) {
qib_disarm_6120_senderrbufs(ppd); qib_disarm_6120_senderrbufs(ppd);
@ -1089,8 +1092,8 @@ static void handle_6120_errors(struct qib_devdata *dd, u64 errs)
} }
if (errs & ERR_MASK(ResetNegated)) { if (errs & ERR_MASK(ResetNegated)) {
qib_dev_err(dd, "Got reset, requires re-init " qib_dev_err(dd,
"(unload and reload driver)\n"); "Got reset, requires re-init (unload and reload driver)\n");
dd->flags &= ~QIB_INITTED; /* needs re-init */ dd->flags &= ~QIB_INITTED; /* needs re-init */
/* mark as having had error */ /* mark as having had error */
*dd->devstatusp |= QIB_STATUS_HWERROR; *dd->devstatusp |= QIB_STATUS_HWERROR;
@ -1541,8 +1544,9 @@ static noinline void unlikely_6120_intr(struct qib_devdata *dd, u64 istat)
qib_stats.sps_errints++; qib_stats.sps_errints++;
estat = qib_read_kreg64(dd, kr_errstatus); estat = qib_read_kreg64(dd, kr_errstatus);
if (!estat) if (!estat)
qib_devinfo(dd->pcidev, "error interrupt (%Lx), " qib_devinfo(dd->pcidev,
"but no error bits set!\n", istat); "error interrupt (%Lx), but no error bits set!\n",
istat);
handle_6120_errors(dd, estat); handle_6120_errors(dd, estat);
} }
@ -1715,16 +1719,16 @@ static void qib_setup_6120_interrupt(struct qib_devdata *dd)
} }
if (!dd->cspec->irq) if (!dd->cspec->irq)
qib_dev_err(dd, "irq is 0, BIOS error? Interrupts won't " qib_dev_err(dd,
"work\n"); "irq is 0, BIOS error? Interrupts won't work\n");
else { else {
int ret; int ret;
ret = request_irq(dd->cspec->irq, qib_6120intr, 0, ret = request_irq(dd->cspec->irq, qib_6120intr, 0,
QIB_DRV_NAME, dd); QIB_DRV_NAME, dd);
if (ret) if (ret)
qib_dev_err(dd, "Couldn't setup interrupt " qib_dev_err(dd,
"(irq=%d): %d\n", dd->cspec->irq, "Couldn't setup interrupt (irq=%d): %d\n",
ret); dd->cspec->irq, ret);
} }
} }
@ -1759,8 +1763,9 @@ static void pe_boardname(struct qib_devdata *dd)
snprintf(dd->boardname, namelen, "%s", n); snprintf(dd->boardname, namelen, "%s", n);
if (dd->majrev != 4 || !dd->minrev || dd->minrev > 2) if (dd->majrev != 4 || !dd->minrev || dd->minrev > 2)
qib_dev_err(dd, "Unsupported InfiniPath hardware revision " qib_dev_err(dd,
"%u.%u!\n", dd->majrev, dd->minrev); "Unsupported InfiniPath hardware revision %u.%u!\n",
dd->majrev, dd->minrev);
snprintf(dd->boardversion, sizeof(dd->boardversion), snprintf(dd->boardversion, sizeof(dd->boardversion),
"ChipABI %u.%u, %s, InfiniPath%u %u.%u, SW Compat %u\n", "ChipABI %u.%u, %s, InfiniPath%u %u.%u, SW Compat %u\n",
@ -1833,8 +1838,8 @@ static int qib_6120_setup_reset(struct qib_devdata *dd)
bail: bail:
if (ret) { if (ret) {
if (qib_pcie_params(dd, dd->lbus_width, NULL, NULL)) if (qib_pcie_params(dd, dd->lbus_width, NULL, NULL))
qib_dev_err(dd, "Reset failed to setup PCIe or " qib_dev_err(dd,
"interrupts; continuing anyway\n"); "Reset failed to setup PCIe or interrupts; continuing anyway\n");
/* clear the reset error, init error/hwerror mask */ /* clear the reset error, init error/hwerror mask */
qib_6120_init_hwerrors(dd); qib_6120_init_hwerrors(dd);
/* for Rev2 error interrupts; nop for rev 1 */ /* for Rev2 error interrupts; nop for rev 1 */
@ -1876,8 +1881,9 @@ static void qib_6120_put_tid(struct qib_devdata *dd, u64 __iomem *tidptr,
} }
pa >>= 11; pa >>= 11;
if (pa & ~QLOGIC_IB_RT_ADDR_MASK) { if (pa & ~QLOGIC_IB_RT_ADDR_MASK) {
qib_dev_err(dd, "Physical page address 0x%lx " qib_dev_err(dd,
"larger than supported\n", pa); "Physical page address 0x%lx larger than supported\n",
pa);
return; return;
} }
@ -1941,8 +1947,9 @@ static void qib_6120_put_tid_2(struct qib_devdata *dd, u64 __iomem *tidptr,
} }
pa >>= 11; pa >>= 11;
if (pa & ~QLOGIC_IB_RT_ADDR_MASK) { if (pa & ~QLOGIC_IB_RT_ADDR_MASK) {
qib_dev_err(dd, "Physical page address 0x%lx " qib_dev_err(dd,
"larger than supported\n", pa); "Physical page address 0x%lx larger than supported\n",
pa);
return; return;
} }
@ -2928,8 +2935,9 @@ static int qib_6120_set_loopback(struct qib_pportdata *ppd, const char *what)
ppd->dd->unit, ppd->port); ppd->dd->unit, ppd->port);
} else if (!strncmp(what, "off", 3)) { } else if (!strncmp(what, "off", 3)) {
ppd->dd->cspec->ibcctrl &= ~SYM_MASK(IBCCtrl, Loopback); ppd->dd->cspec->ibcctrl &= ~SYM_MASK(IBCCtrl, Loopback);
qib_devinfo(ppd->dd->pcidev, "Disabling IB%u:%u IBC loopback " qib_devinfo(ppd->dd->pcidev,
"(normal)\n", ppd->dd->unit, ppd->port); "Disabling IB%u:%u IBC loopback (normal)\n",
ppd->dd->unit, ppd->port);
} else } else
ret = -EINVAL; ret = -EINVAL;
if (!ret) { if (!ret) {
@ -3186,11 +3194,10 @@ static int qib_late_6120_initreg(struct qib_devdata *dd)
qib_write_kreg(dd, kr_sendpioavailaddr, dd->pioavailregs_phys); qib_write_kreg(dd, kr_sendpioavailaddr, dd->pioavailregs_phys);
val = qib_read_kreg64(dd, kr_sendpioavailaddr); val = qib_read_kreg64(dd, kr_sendpioavailaddr);
if (val != dd->pioavailregs_phys) { if (val != dd->pioavailregs_phys) {
qib_dev_err(dd, "Catastrophic software error, " qib_dev_err(dd,
"SendPIOAvailAddr written as %lx, " "Catastrophic software error, SendPIOAvailAddr written as %lx, read back as %llx\n",
"read back as %llx\n", (unsigned long) dd->pioavailregs_phys,
(unsigned long) dd->pioavailregs_phys, (unsigned long long) val);
(unsigned long long) val);
ret = -EINVAL; ret = -EINVAL;
} }
return ret; return ret;
@ -3218,8 +3225,8 @@ static int init_6120_variables(struct qib_devdata *dd)
dd->revision = readq(&dd->kregbase[kr_revision]); dd->revision = readq(&dd->kregbase[kr_revision]);
if ((dd->revision & 0xffffffffU) == 0xffffffffU) { if ((dd->revision & 0xffffffffU) == 0xffffffffU) {
qib_dev_err(dd, "Revision register read failure, " qib_dev_err(dd,
"giving up initialization\n"); "Revision register read failure, giving up initialization\n");
ret = -ENODEV; ret = -ENODEV;
goto bail; goto bail;
} }
@ -3551,8 +3558,8 @@ struct qib_devdata *qib_init_iba6120_funcs(struct pci_dev *pdev,
goto bail; goto bail;
if (qib_pcie_params(dd, 8, NULL, NULL)) if (qib_pcie_params(dd, 8, NULL, NULL))
qib_dev_err(dd, "Failed to setup PCIe or interrupts; " qib_dev_err(dd,
"continuing anyway\n"); "Failed to setup PCIe or interrupts; continuing anyway\n");
dd->cspec->irq = pdev->irq; /* save IRQ */ dd->cspec->irq = pdev->irq; /* save IRQ */
/* clear diagctrl register, in case diags were running and crashed */ /* clear diagctrl register, in case diags were running and crashed */

View File

@ -1111,9 +1111,9 @@ static void handle_7220_errors(struct qib_devdata *dd, u64 errs)
sdma_7220_errors(ppd, errs); sdma_7220_errors(ppd, errs);
if (errs & ~IB_E_BITSEXTANT) if (errs & ~IB_E_BITSEXTANT)
qib_dev_err(dd, "error interrupt with unknown errors " qib_dev_err(dd,
"%llx set\n", (unsigned long long) "error interrupt with unknown errors %llx set\n",
(errs & ~IB_E_BITSEXTANT)); (unsigned long long) (errs & ~IB_E_BITSEXTANT));
if (errs & E_SUM_ERRS) { if (errs & E_SUM_ERRS) {
qib_disarm_7220_senderrbufs(ppd); qib_disarm_7220_senderrbufs(ppd);
@ -1192,8 +1192,8 @@ static void handle_7220_errors(struct qib_devdata *dd, u64 errs)
} }
if (errs & ERR_MASK(ResetNegated)) { if (errs & ERR_MASK(ResetNegated)) {
qib_dev_err(dd, "Got reset, requires re-init " qib_dev_err(dd,
"(unload and reload driver)\n"); "Got reset, requires re-init (unload and reload driver)\n");
dd->flags &= ~QIB_INITTED; /* needs re-init */ dd->flags &= ~QIB_INITTED; /* needs re-init */
/* mark as having had error */ /* mark as having had error */
*dd->devstatusp |= QIB_STATUS_HWERROR; *dd->devstatusp |= QIB_STATUS_HWERROR;
@ -1305,8 +1305,8 @@ static void qib_7220_handle_hwerrors(struct qib_devdata *dd, char *msg,
if (!hwerrs) if (!hwerrs)
goto bail; goto bail;
if (hwerrs == ~0ULL) { if (hwerrs == ~0ULL) {
qib_dev_err(dd, "Read of hardware error status failed " qib_dev_err(dd,
"(all bits set); ignoring\n"); "Read of hardware error status failed (all bits set); ignoring\n");
goto bail; goto bail;
} }
qib_stats.sps_hwerrs++; qib_stats.sps_hwerrs++;
@ -1329,13 +1329,14 @@ static void qib_7220_handle_hwerrors(struct qib_devdata *dd, char *msg,
qib_inc_eeprom_err(dd, log_idx, 1); qib_inc_eeprom_err(dd, log_idx, 1);
if (hwerrs & ~(TXEMEMPARITYERR_PIOBUF | TXEMEMPARITYERR_PIOPBC | if (hwerrs & ~(TXEMEMPARITYERR_PIOBUF | TXEMEMPARITYERR_PIOPBC |
RXE_PARITY)) RXE_PARITY))
qib_devinfo(dd->pcidev, "Hardware error: hwerr=0x%llx " qib_devinfo(dd->pcidev,
"(cleared)\n", (unsigned long long) hwerrs); "Hardware error: hwerr=0x%llx (cleared)\n",
(unsigned long long) hwerrs);
if (hwerrs & ~IB_HWE_BITSEXTANT) if (hwerrs & ~IB_HWE_BITSEXTANT)
qib_dev_err(dd, "hwerror interrupt with unknown errors " qib_dev_err(dd,
"%llx set\n", (unsigned long long) "hwerror interrupt with unknown errors %llx set\n",
(hwerrs & ~IB_HWE_BITSEXTANT)); (unsigned long long) (hwerrs & ~IB_HWE_BITSEXTANT));
if (hwerrs & QLOGIC_IB_HWE_IB_UC_MEMORYPARITYERR) if (hwerrs & QLOGIC_IB_HWE_IB_UC_MEMORYPARITYERR)
qib_sd7220_clr_ibpar(dd); qib_sd7220_clr_ibpar(dd);
@ -1362,8 +1363,9 @@ static void qib_7220_handle_hwerrors(struct qib_devdata *dd, char *msg,
if (hwerrs & HWE_MASK(PowerOnBISTFailed)) { if (hwerrs & HWE_MASK(PowerOnBISTFailed)) {
isfatal = 1; isfatal = 1;
strlcat(msg, "[Memory BIST test failed, " strlcat(msg,
"InfiniPath hardware unusable]", msgl); "[Memory BIST test failed, InfiniPath hardware unusable]",
msgl);
/* ignore from now on, so disable until driver reloaded */ /* ignore from now on, so disable until driver reloaded */
dd->cspec->hwerrmask &= ~HWE_MASK(PowerOnBISTFailed); dd->cspec->hwerrmask &= ~HWE_MASK(PowerOnBISTFailed);
qib_write_kreg(dd, kr_hwerrmask, dd->cspec->hwerrmask); qib_write_kreg(dd, kr_hwerrmask, dd->cspec->hwerrmask);
@ -1409,8 +1411,9 @@ static void qib_7220_handle_hwerrors(struct qib_devdata *dd, char *msg,
qib_dev_err(dd, "%s hardware error\n", msg); qib_dev_err(dd, "%s hardware error\n", msg);
if (isfatal && !dd->diag_client) { if (isfatal && !dd->diag_client) {
qib_dev_err(dd, "Fatal Hardware Error, no longer" qib_dev_err(dd,
" usable, SN %.16s\n", dd->serial); "Fatal Hardware Error, no longer usable, SN %.16s\n",
dd->serial);
/* /*
* For /sys status file and user programs to print; if no * For /sys status file and user programs to print; if no
* trailing brace is copied, we'll know it was truncated. * trailing brace is copied, we'll know it was truncated.
@ -1918,8 +1921,9 @@ static noinline void unlikely_7220_intr(struct qib_devdata *dd, u64 istat)
qib_stats.sps_errints++; qib_stats.sps_errints++;
estat = qib_read_kreg64(dd, kr_errstatus); estat = qib_read_kreg64(dd, kr_errstatus);
if (!estat) if (!estat)
qib_devinfo(dd->pcidev, "error interrupt (%Lx), " qib_devinfo(dd->pcidev,
"but no error bits set!\n", istat); "error interrupt (%Lx), but no error bits set!\n",
istat);
else else
handle_7220_errors(dd, estat); handle_7220_errors(dd, estat);
} }
@ -2023,17 +2027,18 @@ bail:
static void qib_setup_7220_interrupt(struct qib_devdata *dd) static void qib_setup_7220_interrupt(struct qib_devdata *dd)
{ {
if (!dd->cspec->irq) if (!dd->cspec->irq)
qib_dev_err(dd, "irq is 0, BIOS error? Interrupts won't " qib_dev_err(dd,
"work\n"); "irq is 0, BIOS error? Interrupts won't work\n");
else { else {
int ret = request_irq(dd->cspec->irq, qib_7220intr, int ret = request_irq(dd->cspec->irq, qib_7220intr,
dd->msi_lo ? 0 : IRQF_SHARED, dd->msi_lo ? 0 : IRQF_SHARED,
QIB_DRV_NAME, dd); QIB_DRV_NAME, dd);
if (ret) if (ret)
qib_dev_err(dd, "Couldn't setup %s interrupt " qib_dev_err(dd,
"(irq=%d): %d\n", dd->msi_lo ? "Couldn't setup %s interrupt (irq=%d): %d\n",
"MSI" : "INTx", dd->cspec->irq, ret); dd->msi_lo ? "MSI" : "INTx",
dd->cspec->irq, ret);
} }
} }
@ -2072,9 +2077,9 @@ static void qib_7220_boardname(struct qib_devdata *dd)
snprintf(dd->boardname, namelen, "%s", n); snprintf(dd->boardname, namelen, "%s", n);
if (dd->majrev != 5 || !dd->minrev || dd->minrev > 2) if (dd->majrev != 5 || !dd->minrev || dd->minrev > 2)
qib_dev_err(dd, "Unsupported InfiniPath hardware " qib_dev_err(dd,
"revision %u.%u!\n", "Unsupported InfiniPath hardware revision %u.%u!\n",
dd->majrev, dd->minrev); dd->majrev, dd->minrev);
snprintf(dd->boardversion, sizeof(dd->boardversion), snprintf(dd->boardversion, sizeof(dd->boardversion),
"ChipABI %u.%u, %s, InfiniPath%u %u.%u, SW Compat %u\n", "ChipABI %u.%u, %s, InfiniPath%u %u.%u, SW Compat %u\n",
@ -2146,8 +2151,8 @@ static int qib_setup_7220_reset(struct qib_devdata *dd)
bail: bail:
if (ret) { if (ret) {
if (qib_pcie_params(dd, dd->lbus_width, NULL, NULL)) if (qib_pcie_params(dd, dd->lbus_width, NULL, NULL))
qib_dev_err(dd, "Reset failed to setup PCIe or " qib_dev_err(dd,
"interrupts; continuing anyway\n"); "Reset failed to setup PCIe or interrupts; continuing anyway\n");
/* hold IBC in reset, no sends, etc till later */ /* hold IBC in reset, no sends, etc till later */
qib_write_kreg(dd, kr_control, 0ULL); qib_write_kreg(dd, kr_control, 0ULL);
@ -2187,8 +2192,9 @@ static void qib_7220_put_tid(struct qib_devdata *dd, u64 __iomem *tidptr,
return; return;
} }
if (chippa >= (1UL << IBA7220_TID_SZ_SHIFT)) { if (chippa >= (1UL << IBA7220_TID_SZ_SHIFT)) {
qib_dev_err(dd, "Physical page address 0x%lx " qib_dev_err(dd,
"larger than supported\n", pa); "Physical page address 0x%lx larger than supported\n",
pa);
return; return;
} }
@ -2706,8 +2712,9 @@ static int qib_7220_set_loopback(struct qib_pportdata *ppd, const char *what)
ppd->cpspec->ibcctrl &= ~SYM_MASK(IBCCtrl, Loopback); ppd->cpspec->ibcctrl &= ~SYM_MASK(IBCCtrl, Loopback);
/* enable heart beat again */ /* enable heart beat again */
val = IBA7220_IBC_HRTBT_MASK << IBA7220_IBC_HRTBT_SHIFT; val = IBA7220_IBC_HRTBT_MASK << IBA7220_IBC_HRTBT_SHIFT;
qib_devinfo(ppd->dd->pcidev, "Disabling IB%u:%u IBC loopback " qib_devinfo(ppd->dd->pcidev,
"(normal)\n", ppd->dd->unit, ppd->port); "Disabling IB%u:%u IBC loopback (normal)\n",
ppd->dd->unit, ppd->port);
} else } else
ret = -EINVAL; ret = -EINVAL;
if (!ret) { if (!ret) {
@ -3307,8 +3314,8 @@ static int qib_7220_intr_fallback(struct qib_devdata *dd)
if (!dd->msi_lo) if (!dd->msi_lo)
return 0; return 0;
qib_devinfo(dd->pcidev, "MSI interrupt not detected," qib_devinfo(dd->pcidev,
" trying INTx interrupts\n"); "MSI interrupt not detected, trying INTx interrupts\n");
qib_7220_free_irq(dd); qib_7220_free_irq(dd);
qib_enable_intx(dd->pcidev); qib_enable_intx(dd->pcidev);
/* /*
@ -3980,11 +3987,10 @@ static int qib_late_7220_initreg(struct qib_devdata *dd)
qib_write_kreg(dd, kr_sendpioavailaddr, dd->pioavailregs_phys); qib_write_kreg(dd, kr_sendpioavailaddr, dd->pioavailregs_phys);
val = qib_read_kreg64(dd, kr_sendpioavailaddr); val = qib_read_kreg64(dd, kr_sendpioavailaddr);
if (val != dd->pioavailregs_phys) { if (val != dd->pioavailregs_phys) {
qib_dev_err(dd, "Catastrophic software error, " qib_dev_err(dd,
"SendPIOAvailAddr written as %lx, " "Catastrophic software error, SendPIOAvailAddr written as %lx, read back as %llx\n",
"read back as %llx\n", (unsigned long) dd->pioavailregs_phys,
(unsigned long) dd->pioavailregs_phys, (unsigned long long) val);
(unsigned long long) val);
ret = -EINVAL; ret = -EINVAL;
} }
qib_register_observer(dd, &sendctrl_observer); qib_register_observer(dd, &sendctrl_observer);
@ -4014,8 +4020,8 @@ static int qib_init_7220_variables(struct qib_devdata *dd)
dd->revision = readq(&dd->kregbase[kr_revision]); dd->revision = readq(&dd->kregbase[kr_revision]);
if ((dd->revision & 0xffffffffU) == 0xffffffffU) { if ((dd->revision & 0xffffffffU) == 0xffffffffU) {
qib_dev_err(dd, "Revision register read failure, " qib_dev_err(dd,
"giving up initialization\n"); "Revision register read failure, giving up initialization\n");
ret = -ENODEV; ret = -ENODEV;
goto bail; goto bail;
} }
@ -4613,8 +4619,8 @@ struct qib_devdata *qib_init_iba7220_funcs(struct pci_dev *pdev,
break; break;
} }
if (qib_pcie_params(dd, minwidth, NULL, NULL)) if (qib_pcie_params(dd, minwidth, NULL, NULL))
qib_dev_err(dd, "Failed to setup PCIe or interrupts; " qib_dev_err(dd,
"continuing anyway\n"); "Failed to setup PCIe or interrupts; continuing anyway\n");
/* save IRQ for possible later use */ /* save IRQ for possible later use */
dd->cspec->irq = pdev->irq; dd->cspec->irq = pdev->irq;

View File

@ -1,5 +1,6 @@
/* /*
* Copyright (c) 2008, 2009, 2010 QLogic Corporation. All rights reserved. * Copyright (c) 2012 Intel Corporation. All rights reserved.
* Copyright (c) 2008 - 2012 QLogic Corporation. All rights reserved.
* *
* This software is available to you under a choice of one of two * This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU * licenses. You may choose to be licensed under the terms of the GNU
@ -49,6 +50,10 @@
#include "qib_qsfp.h" #include "qib_qsfp.h"
#include "qib_mad.h" #include "qib_mad.h"
#include "qib_verbs.h"
#undef pr_fmt
#define pr_fmt(fmt) QIB_DRV_NAME " " fmt
static void qib_setup_7322_setextled(struct qib_pportdata *, u32); static void qib_setup_7322_setextled(struct qib_pportdata *, u32);
static void qib_7322_handle_hwerrors(struct qib_devdata *, char *, size_t); static void qib_7322_handle_hwerrors(struct qib_devdata *, char *, size_t);
@ -1575,8 +1580,8 @@ static noinline void handle_7322_errors(struct qib_devdata *dd)
qib_stats.sps_errints++; qib_stats.sps_errints++;
errs = qib_read_kreg64(dd, kr_errstatus); errs = qib_read_kreg64(dd, kr_errstatus);
if (!errs) { if (!errs) {
qib_devinfo(dd->pcidev, "device error interrupt, " qib_devinfo(dd->pcidev,
"but no error bits set!\n"); "device error interrupt, but no error bits set!\n");
goto done; goto done;
} }
@ -1622,8 +1627,8 @@ static noinline void handle_7322_errors(struct qib_devdata *dd)
if (errs & QIB_E_RESET) { if (errs & QIB_E_RESET) {
int pidx; int pidx;
qib_dev_err(dd, "Got reset, requires re-init " qib_dev_err(dd,
"(unload and reload driver)\n"); "Got reset, requires re-init (unload and reload driver)\n");
dd->flags &= ~QIB_INITTED; /* needs re-init */ dd->flags &= ~QIB_INITTED; /* needs re-init */
/* mark as having had error */ /* mark as having had error */
*dd->devstatusp |= QIB_STATUS_HWERROR; *dd->devstatusp |= QIB_STATUS_HWERROR;
@ -1760,9 +1765,9 @@ static void handle_serdes_issues(struct qib_pportdata *ppd, u64 ibcst)
ppd->dd->cspec->r1 ? ppd->dd->cspec->r1 ?
QDR_STATIC_ADAPT_DOWN_R1 : QDR_STATIC_ADAPT_DOWN_R1 :
QDR_STATIC_ADAPT_DOWN); QDR_STATIC_ADAPT_DOWN);
printk(KERN_INFO QIB_DRV_NAME pr_info(
" IB%u:%u re-enabled QDR adaptation " "IB%u:%u re-enabled QDR adaptation ibclt %x\n",
"ibclt %x\n", ppd->dd->unit, ppd->port, ibclt); ppd->dd->unit, ppd->port, ibclt);
} }
} }
} }
@ -1804,9 +1809,9 @@ static noinline void handle_7322_p_errors(struct qib_pportdata *ppd)
if (!*msg) if (!*msg)
snprintf(msg, sizeof ppd->cpspec->epmsgbuf, snprintf(msg, sizeof ppd->cpspec->epmsgbuf,
"no others"); "no others");
qib_dev_porterr(dd, ppd->port, "error interrupt with unknown" qib_dev_porterr(dd, ppd->port,
" errors 0x%016Lx set (and %s)\n", "error interrupt with unknown errors 0x%016Lx set (and %s)\n",
(errs & ~QIB_E_P_BITSEXTANT), msg); (errs & ~QIB_E_P_BITSEXTANT), msg);
*msg = '\0'; *msg = '\0';
} }
@ -2024,8 +2029,8 @@ static void qib_7322_handle_hwerrors(struct qib_devdata *dd, char *msg,
if (!hwerrs) if (!hwerrs)
goto bail; goto bail;
if (hwerrs == ~0ULL) { if (hwerrs == ~0ULL) {
qib_dev_err(dd, "Read of hardware error status failed " qib_dev_err(dd,
"(all bits set); ignoring\n"); "Read of hardware error status failed (all bits set); ignoring\n");
goto bail; goto bail;
} }
qib_stats.sps_hwerrs++; qib_stats.sps_hwerrs++;
@ -2039,8 +2044,9 @@ static void qib_7322_handle_hwerrors(struct qib_devdata *dd, char *msg,
/* no EEPROM logging, yet */ /* no EEPROM logging, yet */
if (hwerrs) if (hwerrs)
qib_devinfo(dd->pcidev, "Hardware error: hwerr=0x%llx " qib_devinfo(dd->pcidev,
"(cleared)\n", (unsigned long long) hwerrs); "Hardware error: hwerr=0x%llx (cleared)\n",
(unsigned long long) hwerrs);
ctrl = qib_read_kreg32(dd, kr_control); ctrl = qib_read_kreg32(dd, kr_control);
if ((ctrl & SYM_MASK(Control, FreezeMode)) && !dd->diag_client) { if ((ctrl & SYM_MASK(Control, FreezeMode)) && !dd->diag_client) {
@ -2064,8 +2070,9 @@ static void qib_7322_handle_hwerrors(struct qib_devdata *dd, char *msg,
if (hwerrs & HWE_MASK(PowerOnBISTFailed)) { if (hwerrs & HWE_MASK(PowerOnBISTFailed)) {
isfatal = 1; isfatal = 1;
strlcpy(msg, "[Memory BIST test failed, " strlcpy(msg,
"InfiniPath hardware unusable]", msgl); "[Memory BIST test failed, InfiniPath hardware unusable]",
msgl);
/* ignore from now on, so disable until driver reloaded */ /* ignore from now on, so disable until driver reloaded */
dd->cspec->hwerrmask &= ~HWE_MASK(PowerOnBISTFailed); dd->cspec->hwerrmask &= ~HWE_MASK(PowerOnBISTFailed);
qib_write_kreg(dd, kr_hwerrmask, dd->cspec->hwerrmask); qib_write_kreg(dd, kr_hwerrmask, dd->cspec->hwerrmask);
@ -2078,8 +2085,9 @@ static void qib_7322_handle_hwerrors(struct qib_devdata *dd, char *msg,
qib_dev_err(dd, "%s hardware error\n", msg); qib_dev_err(dd, "%s hardware error\n", msg);
if (isfatal && !dd->diag_client) { if (isfatal && !dd->diag_client) {
qib_dev_err(dd, "Fatal Hardware Error, no longer" qib_dev_err(dd,
" usable, SN %.16s\n", dd->serial); "Fatal Hardware Error, no longer usable, SN %.16s\n",
dd->serial);
/* /*
* for /sys status file and user programs to print; if no * for /sys status file and user programs to print; if no
* trailing brace is copied, we'll know it was truncated. * trailing brace is copied, we'll know it was truncated.
@ -2667,8 +2675,9 @@ static noinline void unknown_7322_ibits(struct qib_devdata *dd, u64 istat)
char msg[128]; char msg[128];
kills = istat & ~QIB_I_BITSEXTANT; kills = istat & ~QIB_I_BITSEXTANT;
qib_dev_err(dd, "Clearing reserved interrupt(s) 0x%016llx:" qib_dev_err(dd,
" %s\n", (unsigned long long) kills, msg); "Clearing reserved interrupt(s) 0x%016llx: %s\n",
(unsigned long long) kills, msg);
qib_write_kreg(dd, kr_intmask, (dd->cspec->int_enable_mask & ~kills)); qib_write_kreg(dd, kr_intmask, (dd->cspec->int_enable_mask & ~kills));
} }
@ -3101,16 +3110,16 @@ static void qib_setup_7322_interrupt(struct qib_devdata *dd, int clearpend)
/* Try to get INTx interrupt */ /* Try to get INTx interrupt */
try_intx: try_intx:
if (!dd->pcidev->irq) { if (!dd->pcidev->irq) {
qib_dev_err(dd, "irq is 0, BIOS error? " qib_dev_err(dd,
"Interrupts won't work\n"); "irq is 0, BIOS error? Interrupts won't work\n");
goto bail; goto bail;
} }
ret = request_irq(dd->pcidev->irq, qib_7322intr, ret = request_irq(dd->pcidev->irq, qib_7322intr,
IRQF_SHARED, QIB_DRV_NAME, dd); IRQF_SHARED, QIB_DRV_NAME, dd);
if (ret) { if (ret) {
qib_dev_err(dd, "Couldn't setup INTx " qib_dev_err(dd,
"interrupt (irq=%d): %d\n", "Couldn't setup INTx interrupt (irq=%d): %d\n",
dd->pcidev->irq, ret); dd->pcidev->irq, ret);
goto bail; goto bail;
} }
dd->cspec->irq = dd->pcidev->irq; dd->cspec->irq = dd->pcidev->irq;
@ -3185,8 +3194,9 @@ try_intx:
* Shouldn't happen since the enable said we could * Shouldn't happen since the enable said we could
* have as many as we are trying to setup here. * have as many as we are trying to setup here.
*/ */
qib_dev_err(dd, "Couldn't setup MSIx " qib_dev_err(dd,
"interrupt (vec=%d, irq=%d): %d\n", msixnum, "Couldn't setup MSIx interrupt (vec=%d, irq=%d): %d\n",
msixnum,
dd->cspec->msix_entries[msixnum].msix.vector, dd->cspec->msix_entries[msixnum].msix.vector,
ret); ret);
qib_7322_nomsix(dd); qib_7322_nomsix(dd);
@ -3305,8 +3315,9 @@ static unsigned qib_7322_boardname(struct qib_devdata *dd)
(unsigned)SYM_FIELD(dd->revision, Revision_R, SW)); (unsigned)SYM_FIELD(dd->revision, Revision_R, SW));
if (qib_singleport && (features >> PORT_SPD_CAP_SHIFT) & PORT_SPD_CAP) { if (qib_singleport && (features >> PORT_SPD_CAP_SHIFT) & PORT_SPD_CAP) {
qib_devinfo(dd->pcidev, "IB%u: Forced to single port mode" qib_devinfo(dd->pcidev,
" by module parameter\n", dd->unit); "IB%u: Forced to single port mode by module parameter\n",
dd->unit);
features &= PORT_SPD_CAP; features &= PORT_SPD_CAP;
} }
@ -3400,8 +3411,8 @@ static int qib_do_7322_reset(struct qib_devdata *dd)
if (val == dd->revision) if (val == dd->revision)
break; break;
if (i == 5) { if (i == 5) {
qib_dev_err(dd, "Failed to initialize after reset, " qib_dev_err(dd,
"unusable\n"); "Failed to initialize after reset, unusable\n");
ret = 0; ret = 0;
goto bail; goto bail;
} }
@ -3432,8 +3443,8 @@ static int qib_do_7322_reset(struct qib_devdata *dd)
if (qib_pcie_params(dd, dd->lbus_width, if (qib_pcie_params(dd, dd->lbus_width,
&dd->cspec->num_msix_entries, &dd->cspec->num_msix_entries,
dd->cspec->msix_entries)) dd->cspec->msix_entries))
qib_dev_err(dd, "Reset failed to setup PCIe or interrupts; " qib_dev_err(dd,
"continuing anyway\n"); "Reset failed to setup PCIe or interrupts; continuing anyway\n");
qib_setup_7322_interrupt(dd, 1); qib_setup_7322_interrupt(dd, 1);
@ -3474,8 +3485,9 @@ static void qib_7322_put_tid(struct qib_devdata *dd, u64 __iomem *tidptr,
return; return;
} }
if (chippa >= (1UL << IBA7322_TID_SZ_SHIFT)) { if (chippa >= (1UL << IBA7322_TID_SZ_SHIFT)) {
qib_dev_err(dd, "Physical page address 0x%lx " qib_dev_err(dd,
"larger than supported\n", pa); "Physical page address 0x%lx larger than supported\n",
pa);
return; return;
} }
@ -4029,8 +4041,9 @@ static int qib_7322_set_loopback(struct qib_pportdata *ppd, const char *what)
Loopback); Loopback);
/* enable heart beat again */ /* enable heart beat again */
val = IBA7322_IBC_HRTBT_RMASK << IBA7322_IBC_HRTBT_LSB; val = IBA7322_IBC_HRTBT_RMASK << IBA7322_IBC_HRTBT_LSB;
qib_devinfo(ppd->dd->pcidev, "Disabling IB%u:%u IBC loopback " qib_devinfo(ppd->dd->pcidev,
"(normal)\n", ppd->dd->unit, ppd->port); "Disabling IB%u:%u IBC loopback (normal)\n",
ppd->dd->unit, ppd->port);
} else } else
ret = -EINVAL; ret = -EINVAL;
if (!ret) { if (!ret) {
@ -4714,8 +4727,8 @@ static void init_7322_cntrnames(struct qib_devdata *dd)
dd->pport[i].cpspec->portcntrs = kmalloc(dd->cspec->nportcntrs dd->pport[i].cpspec->portcntrs = kmalloc(dd->cspec->nportcntrs
* sizeof(u64), GFP_KERNEL); * sizeof(u64), GFP_KERNEL);
if (!dd->pport[i].cpspec->portcntrs) if (!dd->pport[i].cpspec->portcntrs)
qib_dev_err(dd, "Failed allocation for" qib_dev_err(dd,
" portcounters\n"); "Failed allocation for portcounters\n");
} }
} }
@ -4865,8 +4878,8 @@ static int qib_7322_intr_fallback(struct qib_devdata *dd)
if (!dd->cspec->num_msix_entries) if (!dd->cspec->num_msix_entries)
return 0; /* already using INTx */ return 0; /* already using INTx */
qib_devinfo(dd->pcidev, "MSIx interrupt not detected," qib_devinfo(dd->pcidev,
" trying INTx interrupts\n"); "MSIx interrupt not detected, trying INTx interrupts\n");
qib_7322_nomsix(dd); qib_7322_nomsix(dd);
qib_enable_intx(dd->pcidev); qib_enable_intx(dd->pcidev);
qib_setup_7322_interrupt(dd, 0); qib_setup_7322_interrupt(dd, 0);
@ -5151,15 +5164,11 @@ static void try_7322_ipg(struct qib_pportdata *ppd)
goto retry; goto retry;
if (!ibp->smi_ah) { if (!ibp->smi_ah) {
struct ib_ah_attr attr;
struct ib_ah *ah; struct ib_ah *ah;
memset(&attr, 0, sizeof attr); ah = qib_create_qp0_ah(ibp, be16_to_cpu(IB_LID_PERMISSIVE));
attr.dlid = be16_to_cpu(IB_LID_PERMISSIVE);
attr.port_num = ppd->port;
ah = ib_create_ah(ibp->qp0->ibqp.pd, &attr);
if (IS_ERR(ah)) if (IS_ERR(ah))
ret = -EINVAL; ret = PTR_ERR(ah);
else { else {
send_buf->ah = ah; send_buf->ah = ah;
ibp->smi_ah = to_iah(ah); ibp->smi_ah = to_iah(ah);
@ -5844,22 +5853,21 @@ static int setup_txselect(const char *str, struct kernel_param *kp)
{ {
struct qib_devdata *dd; struct qib_devdata *dd;
unsigned long val; unsigned long val;
char *n; int ret;
if (strlen(str) >= MAX_ATTEN_LEN) { if (strlen(str) >= MAX_ATTEN_LEN) {
printk(KERN_INFO QIB_DRV_NAME " txselect_values string " pr_info("txselect_values string too long\n");
"too long\n");
return -ENOSPC; return -ENOSPC;
} }
val = simple_strtoul(str, &n, 0); ret = kstrtoul(str, 0, &val);
if (n == str || val >= (TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ + if (ret || val >= (TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ +
TXDDS_MFG_SZ)) { TXDDS_MFG_SZ)) {
printk(KERN_INFO QIB_DRV_NAME pr_info("txselect_values must start with a number < %d\n",
"txselect_values must start with a number < %d\n",
TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ + TXDDS_MFG_SZ); TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ + TXDDS_MFG_SZ);
return -EINVAL; return ret ? ret : -EINVAL;
} }
strcpy(txselect_list, str);
strcpy(txselect_list, str);
list_for_each_entry(dd, &qib_dev_list, list) list_for_each_entry(dd, &qib_dev_list, list)
if (dd->deviceid == PCI_DEVICE_ID_QLOGIC_IB_7322) if (dd->deviceid == PCI_DEVICE_ID_QLOGIC_IB_7322)
set_no_qsfp_atten(dd, 1); set_no_qsfp_atten(dd, 1);
@ -5882,11 +5890,10 @@ static int qib_late_7322_initreg(struct qib_devdata *dd)
qib_write_kreg(dd, kr_sendpioavailaddr, dd->pioavailregs_phys); qib_write_kreg(dd, kr_sendpioavailaddr, dd->pioavailregs_phys);
val = qib_read_kreg64(dd, kr_sendpioavailaddr); val = qib_read_kreg64(dd, kr_sendpioavailaddr);
if (val != dd->pioavailregs_phys) { if (val != dd->pioavailregs_phys) {
qib_dev_err(dd, "Catastrophic software error, " qib_dev_err(dd,
"SendPIOAvailAddr written as %lx, " "Catastrophic software error, SendPIOAvailAddr written as %lx, read back as %llx\n",
"read back as %llx\n", (unsigned long) dd->pioavailregs_phys,
(unsigned long) dd->pioavailregs_phys, (unsigned long long) val);
(unsigned long long) val);
ret = -EINVAL; ret = -EINVAL;
} }
@ -6098,8 +6105,8 @@ static int qib_init_7322_variables(struct qib_devdata *dd)
dd->revision = readq(&dd->kregbase[kr_revision]); dd->revision = readq(&dd->kregbase[kr_revision]);
if ((dd->revision & 0xffffffffU) == 0xffffffffU) { if ((dd->revision & 0xffffffffU) == 0xffffffffU) {
qib_dev_err(dd, "Revision register read failure, " qib_dev_err(dd,
"giving up initialization\n"); "Revision register read failure, giving up initialization\n");
ret = -ENODEV; ret = -ENODEV;
goto bail; goto bail;
} }
@ -6265,9 +6272,9 @@ static int qib_init_7322_variables(struct qib_devdata *dd)
*/ */
if (!(dd->flags & QIB_HAS_QSFP)) { if (!(dd->flags & QIB_HAS_QSFP)) {
if (!IS_QMH(dd) && !IS_QME(dd)) if (!IS_QMH(dd) && !IS_QME(dd))
qib_devinfo(dd->pcidev, "IB%u:%u: " qib_devinfo(dd->pcidev,
"Unknown mezzanine card type\n", "IB%u:%u: Unknown mezzanine card type\n",
dd->unit, ppd->port); dd->unit, ppd->port);
cp->h1_val = IS_QMH(dd) ? H1_FORCE_QMH : H1_FORCE_QME; cp->h1_val = IS_QMH(dd) ? H1_FORCE_QMH : H1_FORCE_QME;
/* /*
* Choose center value as default tx serdes setting * Choose center value as default tx serdes setting
@ -6922,8 +6929,8 @@ struct qib_devdata *qib_init_iba7322_funcs(struct pci_dev *pdev,
dd->cspec->msix_entries[i].msix.entry = i; dd->cspec->msix_entries[i].msix.entry = i;
if (qib_pcie_params(dd, 8, &tabsize, dd->cspec->msix_entries)) if (qib_pcie_params(dd, 8, &tabsize, dd->cspec->msix_entries))
qib_dev_err(dd, "Failed to setup PCIe or interrupts; " qib_dev_err(dd,
"continuing anyway\n"); "Failed to setup PCIe or interrupts; continuing anyway\n");
/* may be less than we wanted, if not enough available */ /* may be less than we wanted, if not enough available */
dd->cspec->num_msix_entries = tabsize; dd->cspec->num_msix_entries = tabsize;
@ -7276,8 +7283,7 @@ static void find_best_ent(struct qib_pportdata *ppd,
ppd->cpspec->no_eep < (TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ + ppd->cpspec->no_eep < (TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ +
TXDDS_MFG_SZ)) { TXDDS_MFG_SZ)) {
idx = ppd->cpspec->no_eep - (TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ); idx = ppd->cpspec->no_eep - (TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ);
printk(KERN_INFO QIB_DRV_NAME pr_info("IB%u:%u use idx %u into txdds_mfg\n",
" IB%u:%u use idx %u into txdds_mfg\n",
ppd->dd->unit, ppd->port, idx); ppd->dd->unit, ppd->port, idx);
*sdr_dds = &txdds_extra_mfg[idx]; *sdr_dds = &txdds_extra_mfg[idx];
*ddr_dds = &txdds_extra_mfg[idx]; *ddr_dds = &txdds_extra_mfg[idx];
@ -7432,11 +7438,11 @@ static void serdes_7322_los_enable(struct qib_pportdata *ppd, int enable)
u8 state = SYM_FIELD(data, IBSerdesCtrl_0, RXLOSEN); u8 state = SYM_FIELD(data, IBSerdesCtrl_0, RXLOSEN);
if (enable && !state) { if (enable && !state) {
printk(KERN_INFO QIB_DRV_NAME " IB%u:%u Turning LOS on\n", pr_info("IB%u:%u Turning LOS on\n",
ppd->dd->unit, ppd->port); ppd->dd->unit, ppd->port);
data |= SYM_MASK(IBSerdesCtrl_0, RXLOSEN); data |= SYM_MASK(IBSerdesCtrl_0, RXLOSEN);
} else if (!enable && state) { } else if (!enable && state) {
printk(KERN_INFO QIB_DRV_NAME " IB%u:%u Turning LOS off\n", pr_info("IB%u:%u Turning LOS off\n",
ppd->dd->unit, ppd->port); ppd->dd->unit, ppd->port);
data &= ~SYM_MASK(IBSerdesCtrl_0, RXLOSEN); data &= ~SYM_MASK(IBSerdesCtrl_0, RXLOSEN);
} }
@ -7672,8 +7678,7 @@ static int serdes_7322_init_new(struct qib_pportdata *ppd)
} }
} }
if (chan_done) { if (chan_done) {
printk(KERN_INFO QIB_DRV_NAME pr_info("Serdes %d calibration not done after .5 sec: 0x%x\n",
" Serdes %d calibration not done after .5 sec: 0x%x\n",
IBSD(ppd->hw_pidx), chan_done); IBSD(ppd->hw_pidx), chan_done);
} else { } else {
for (chan = 0; chan < SERDES_CHANS; ++chan) { for (chan = 0; chan < SERDES_CHANS; ++chan) {
@ -7681,9 +7686,8 @@ static int serdes_7322_init_new(struct qib_pportdata *ppd)
(chan + (chan >> 1)), (chan + (chan >> 1)),
25, 0, 0); 25, 0, 0);
if ((~rxcaldone & (u32)BMASK(10, 10)) == 0) if ((~rxcaldone & (u32)BMASK(10, 10)) == 0)
printk(KERN_INFO QIB_DRV_NAME pr_info("Serdes %d chan %d calibration failed\n",
" Serdes %d chan %d calibration " IBSD(ppd->hw_pidx), chan);
"failed\n", IBSD(ppd->hw_pidx), chan);
} }
} }

View File

@ -1,6 +1,6 @@
/* /*
* Copyright (c) 2006, 2007, 2008, 2009, 2010 QLogic Corporation. * Copyright (c) 2012 Intel Corporation. All rights reserved.
* All rights reserved. * Copyright (c) 2006 - 2012 QLogic Corporation. All rights reserved.
* Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved. * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
* *
* This software is available to you under a choice of one of two * This software is available to you under a choice of one of two
@ -38,9 +38,14 @@
#include <linux/delay.h> #include <linux/delay.h>
#include <linux/idr.h> #include <linux/idr.h>
#include <linux/module.h> #include <linux/module.h>
#include <linux/printk.h>
#include "qib.h" #include "qib.h"
#include "qib_common.h" #include "qib_common.h"
#include "qib_mad.h"
#undef pr_fmt
#define pr_fmt(fmt) QIB_DRV_NAME ": " fmt
/* /*
* min buffers we want to have per context, after driver * min buffers we want to have per context, after driver
@ -71,6 +76,9 @@ unsigned qib_n_krcv_queues;
module_param_named(krcvqs, qib_n_krcv_queues, uint, S_IRUGO); module_param_named(krcvqs, qib_n_krcv_queues, uint, S_IRUGO);
MODULE_PARM_DESC(krcvqs, "number of kernel receive queues per IB port"); MODULE_PARM_DESC(krcvqs, "number of kernel receive queues per IB port");
unsigned qib_cc_table_size;
module_param_named(cc_table_size, qib_cc_table_size, uint, S_IRUGO);
MODULE_PARM_DESC(cc_table_size, "Congestion control table entries 0 (CCA disabled - default), min = 128, max = 1984");
/* /*
* qib_wc_pat parameter: * qib_wc_pat parameter:
* 0 is WC via MTRR * 0 is WC via MTRR
@ -120,8 +128,8 @@ int qib_create_ctxts(struct qib_devdata *dd)
*/ */
dd->rcd = kzalloc(sizeof(*dd->rcd) * dd->ctxtcnt, GFP_KERNEL); dd->rcd = kzalloc(sizeof(*dd->rcd) * dd->ctxtcnt, GFP_KERNEL);
if (!dd->rcd) { if (!dd->rcd) {
qib_dev_err(dd, "Unable to allocate ctxtdata array, " qib_dev_err(dd,
"failing\n"); "Unable to allocate ctxtdata array, failing\n");
ret = -ENOMEM; ret = -ENOMEM;
goto done; goto done;
} }
@ -137,8 +145,8 @@ int qib_create_ctxts(struct qib_devdata *dd)
ppd = dd->pport + (i % dd->num_pports); ppd = dd->pport + (i % dd->num_pports);
rcd = qib_create_ctxtdata(ppd, i); rcd = qib_create_ctxtdata(ppd, i);
if (!rcd) { if (!rcd) {
qib_dev_err(dd, "Unable to allocate ctxtdata" qib_dev_err(dd,
" for Kernel ctxt, failing\n"); "Unable to allocate ctxtdata for Kernel ctxt, failing\n");
ret = -ENOMEM; ret = -ENOMEM;
goto done; goto done;
} }
@ -199,6 +207,7 @@ struct qib_ctxtdata *qib_create_ctxtdata(struct qib_pportdata *ppd, u32 ctxt)
void qib_init_pportdata(struct qib_pportdata *ppd, struct qib_devdata *dd, void qib_init_pportdata(struct qib_pportdata *ppd, struct qib_devdata *dd,
u8 hw_pidx, u8 port) u8 hw_pidx, u8 port)
{ {
int size;
ppd->dd = dd; ppd->dd = dd;
ppd->hw_pidx = hw_pidx; ppd->hw_pidx = hw_pidx;
ppd->port = port; /* IB port number, not index */ ppd->port = port; /* IB port number, not index */
@ -210,6 +219,83 @@ void qib_init_pportdata(struct qib_pportdata *ppd, struct qib_devdata *dd,
init_timer(&ppd->symerr_clear_timer); init_timer(&ppd->symerr_clear_timer);
ppd->symerr_clear_timer.function = qib_clear_symerror_on_linkup; ppd->symerr_clear_timer.function = qib_clear_symerror_on_linkup;
ppd->symerr_clear_timer.data = (unsigned long)ppd; ppd->symerr_clear_timer.data = (unsigned long)ppd;
ppd->qib_wq = NULL;
spin_lock_init(&ppd->cc_shadow_lock);
if (qib_cc_table_size < IB_CCT_MIN_ENTRIES)
goto bail;
ppd->cc_supported_table_entries = min(max_t(int, qib_cc_table_size,
IB_CCT_MIN_ENTRIES), IB_CCT_ENTRIES*IB_CC_TABLE_CAP_DEFAULT);
ppd->cc_max_table_entries =
ppd->cc_supported_table_entries/IB_CCT_ENTRIES;
size = IB_CC_TABLE_CAP_DEFAULT * sizeof(struct ib_cc_table_entry)
* IB_CCT_ENTRIES;
ppd->ccti_entries = kzalloc(size, GFP_KERNEL);
if (!ppd->ccti_entries) {
qib_dev_err(dd,
"failed to allocate congestion control table for port %d!\n",
port);
goto bail;
}
size = IB_CC_CCS_ENTRIES * sizeof(struct ib_cc_congestion_entry);
ppd->congestion_entries = kzalloc(size, GFP_KERNEL);
if (!ppd->congestion_entries) {
qib_dev_err(dd,
"failed to allocate congestion setting list for port %d!\n",
port);
goto bail_1;
}
size = sizeof(struct cc_table_shadow);
ppd->ccti_entries_shadow = kzalloc(size, GFP_KERNEL);
if (!ppd->ccti_entries_shadow) {
qib_dev_err(dd,
"failed to allocate shadow ccti list for port %d!\n",
port);
goto bail_2;
}
size = sizeof(struct ib_cc_congestion_setting_attr);
ppd->congestion_entries_shadow = kzalloc(size, GFP_KERNEL);
if (!ppd->congestion_entries_shadow) {
qib_dev_err(dd,
"failed to allocate shadow congestion setting list for port %d!\n",
port);
goto bail_3;
}
return;
bail_3:
kfree(ppd->ccti_entries_shadow);
ppd->ccti_entries_shadow = NULL;
bail_2:
kfree(ppd->congestion_entries);
ppd->congestion_entries = NULL;
bail_1:
kfree(ppd->ccti_entries);
ppd->ccti_entries = NULL;
bail:
/* User is intentionally disabling the congestion control agent */
if (!qib_cc_table_size)
return;
if (qib_cc_table_size < IB_CCT_MIN_ENTRIES) {
qib_cc_table_size = 0;
qib_dev_err(dd,
"Congestion Control table size %d less than minimum %d for port %d\n",
qib_cc_table_size, IB_CCT_MIN_ENTRIES, port);
}
qib_dev_err(dd, "Congestion Control Agent disabled for port %d\n",
port);
return;
} }
static int init_pioavailregs(struct qib_devdata *dd) static int init_pioavailregs(struct qib_devdata *dd)
@ -221,8 +307,8 @@ static int init_pioavailregs(struct qib_devdata *dd)
&dd->pcidev->dev, PAGE_SIZE, &dd->pioavailregs_phys, &dd->pcidev->dev, PAGE_SIZE, &dd->pioavailregs_phys,
GFP_KERNEL); GFP_KERNEL);
if (!dd->pioavailregs_dma) { if (!dd->pioavailregs_dma) {
qib_dev_err(dd, "failed to allocate PIOavail reg area " qib_dev_err(dd,
"in memory\n"); "failed to allocate PIOavail reg area in memory\n");
ret = -ENOMEM; ret = -ENOMEM;
goto done; goto done;
} }
@ -277,15 +363,15 @@ static void init_shadow_tids(struct qib_devdata *dd)
pages = vzalloc(dd->cfgctxts * dd->rcvtidcnt * sizeof(struct page *)); pages = vzalloc(dd->cfgctxts * dd->rcvtidcnt * sizeof(struct page *));
if (!pages) { if (!pages) {
qib_dev_err(dd, "failed to allocate shadow page * " qib_dev_err(dd,
"array, no expected sends!\n"); "failed to allocate shadow page * array, no expected sends!\n");
goto bail; goto bail;
} }
addrs = vzalloc(dd->cfgctxts * dd->rcvtidcnt * sizeof(dma_addr_t)); addrs = vzalloc(dd->cfgctxts * dd->rcvtidcnt * sizeof(dma_addr_t));
if (!addrs) { if (!addrs) {
qib_dev_err(dd, "failed to allocate shadow dma handle " qib_dev_err(dd,
"array, no expected sends!\n"); "failed to allocate shadow dma handle array, no expected sends!\n");
goto bail_free; goto bail_free;
} }
@ -309,13 +395,13 @@ static int loadtime_init(struct qib_devdata *dd)
if (((dd->revision >> QLOGIC_IB_R_SOFTWARE_SHIFT) & if (((dd->revision >> QLOGIC_IB_R_SOFTWARE_SHIFT) &
QLOGIC_IB_R_SOFTWARE_MASK) != QIB_CHIP_SWVERSION) { QLOGIC_IB_R_SOFTWARE_MASK) != QIB_CHIP_SWVERSION) {
qib_dev_err(dd, "Driver only handles version %d, " qib_dev_err(dd,
"chip swversion is %d (%llx), failng\n", "Driver only handles version %d, chip swversion is %d (%llx), failng\n",
QIB_CHIP_SWVERSION, QIB_CHIP_SWVERSION,
(int)(dd->revision >> (int)(dd->revision >>
QLOGIC_IB_R_SOFTWARE_SHIFT) & QLOGIC_IB_R_SOFTWARE_SHIFT) &
QLOGIC_IB_R_SOFTWARE_MASK, QLOGIC_IB_R_SOFTWARE_MASK,
(unsigned long long) dd->revision); (unsigned long long) dd->revision);
ret = -ENOSYS; ret = -ENOSYS;
goto done; goto done;
} }
@ -419,8 +505,8 @@ static void verify_interrupt(unsigned long opaque)
*/ */
if (dd->int_counter == 0) { if (dd->int_counter == 0) {
if (!dd->f_intr_fallback(dd)) if (!dd->f_intr_fallback(dd))
dev_err(&dd->pcidev->dev, "No interrupts detected, " dev_err(&dd->pcidev->dev,
"not usable.\n"); "No interrupts detected, not usable.\n");
else /* re-arm the timer to see if fallback works */ else /* re-arm the timer to see if fallback works */
mod_timer(&dd->intrchk_timer, jiffies + HZ/2); mod_timer(&dd->intrchk_timer, jiffies + HZ/2);
} }
@ -482,6 +568,41 @@ static void init_piobuf_state(struct qib_devdata *dd)
dd->f_initvl15_bufs(dd); dd->f_initvl15_bufs(dd);
} }
/**
* qib_create_workqueues - create per port workqueues
* @dd: the qlogic_ib device
*/
static int qib_create_workqueues(struct qib_devdata *dd)
{
int pidx;
struct qib_pportdata *ppd;
for (pidx = 0; pidx < dd->num_pports; ++pidx) {
ppd = dd->pport + pidx;
if (!ppd->qib_wq) {
char wq_name[8]; /* 3 + 2 + 1 + 1 + 1 */
snprintf(wq_name, sizeof(wq_name), "qib%d_%d",
dd->unit, pidx);
ppd->qib_wq =
create_singlethread_workqueue(wq_name);
if (!ppd->qib_wq)
goto wq_error;
}
}
return 0;
wq_error:
pr_err("create_singlethread_workqueue failed for port %d\n",
pidx + 1);
for (pidx = 0; pidx < dd->num_pports; ++pidx) {
ppd = dd->pport + pidx;
if (ppd->qib_wq) {
destroy_workqueue(ppd->qib_wq);
ppd->qib_wq = NULL;
}
}
return -ENOMEM;
}
/** /**
* qib_init - do the actual initialization sequence on the chip * qib_init - do the actual initialization sequence on the chip
* @dd: the qlogic_ib device * @dd: the qlogic_ib device
@ -547,8 +668,8 @@ int qib_init(struct qib_devdata *dd, int reinit)
if (!lastfail) if (!lastfail)
lastfail = qib_setup_eagerbufs(rcd); lastfail = qib_setup_eagerbufs(rcd);
if (lastfail) { if (lastfail) {
qib_dev_err(dd, "failed to allocate kernel ctxt's " qib_dev_err(dd,
"rcvhdrq and/or egr bufs\n"); "failed to allocate kernel ctxt's rcvhdrq and/or egr bufs\n");
continue; continue;
} }
} }
@ -764,6 +885,11 @@ static void qib_shutdown_device(struct qib_devdata *dd)
* We can't count on interrupts since we are stopping. * We can't count on interrupts since we are stopping.
*/ */
dd->f_quiet_serdes(ppd); dd->f_quiet_serdes(ppd);
if (ppd->qib_wq) {
destroy_workqueue(ppd->qib_wq);
ppd->qib_wq = NULL;
}
} }
qib_update_eeprom_log(dd); qib_update_eeprom_log(dd);
@ -893,8 +1019,7 @@ static void qib_verify_pioperf(struct qib_devdata *dd)
/* 1 GiB/sec, slightly over IB SDR line rate */ /* 1 GiB/sec, slightly over IB SDR line rate */
if (lcnt < (emsecs * 1024U)) if (lcnt < (emsecs * 1024U))
qib_dev_err(dd, qib_dev_err(dd,
"Performance problem: bandwidth to PIO buffers is " "Performance problem: bandwidth to PIO buffers is only %u MiB/sec\n",
"only %u MiB/sec\n",
lcnt / (u32) emsecs); lcnt / (u32) emsecs);
preempt_enable(); preempt_enable();
@ -967,8 +1092,8 @@ struct qib_devdata *qib_alloc_devdata(struct pci_dev *pdev, size_t extra)
if (qib_cpulist) if (qib_cpulist)
qib_cpulist_count = count; qib_cpulist_count = count;
else else
qib_early_err(&pdev->dev, "Could not alloc cpulist " qib_early_err(&pdev->dev,
"info, cpu affinity might be wrong\n"); "Could not alloc cpulist info, cpu affinity might be wrong\n");
} }
bail: bail:
@ -1057,21 +1182,20 @@ static int __init qlogic_ib_init(void)
*/ */
idr_init(&qib_unit_table); idr_init(&qib_unit_table);
if (!idr_pre_get(&qib_unit_table, GFP_KERNEL)) { if (!idr_pre_get(&qib_unit_table, GFP_KERNEL)) {
printk(KERN_ERR QIB_DRV_NAME ": idr_pre_get() failed\n"); pr_err("idr_pre_get() failed\n");
ret = -ENOMEM; ret = -ENOMEM;
goto bail_cq_wq; goto bail_cq_wq;
} }
ret = pci_register_driver(&qib_driver); ret = pci_register_driver(&qib_driver);
if (ret < 0) { if (ret < 0) {
printk(KERN_ERR QIB_DRV_NAME pr_err("Unable to register driver: error %d\n", -ret);
": Unable to register driver: error %d\n", -ret);
goto bail_unit; goto bail_unit;
} }
/* not fatal if it doesn't work */ /* not fatal if it doesn't work */
if (qib_init_qibfs()) if (qib_init_qibfs())
printk(KERN_ERR QIB_DRV_NAME ": Unable to register ipathfs\n"); pr_err("Unable to register ipathfs\n");
goto bail; /* all OK */ goto bail; /* all OK */
bail_unit: bail_unit:
@ -1095,9 +1219,9 @@ static void __exit qlogic_ib_cleanup(void)
ret = qib_exit_qibfs(); ret = qib_exit_qibfs();
if (ret) if (ret)
printk(KERN_ERR QIB_DRV_NAME ": " pr_err(
"Unable to cleanup counter filesystem: " "Unable to cleanup counter filesystem: error %d\n",
"error %d\n", -ret); -ret);
pci_unregister_driver(&qib_driver); pci_unregister_driver(&qib_driver);
@ -1121,10 +1245,24 @@ static void cleanup_device_data(struct qib_devdata *dd)
unsigned long flags; unsigned long flags;
/* users can't do anything more with chip */ /* users can't do anything more with chip */
for (pidx = 0; pidx < dd->num_pports; ++pidx) for (pidx = 0; pidx < dd->num_pports; ++pidx) {
if (dd->pport[pidx].statusp) if (dd->pport[pidx].statusp)
*dd->pport[pidx].statusp &= ~QIB_STATUS_CHIP_PRESENT; *dd->pport[pidx].statusp &= ~QIB_STATUS_CHIP_PRESENT;
spin_lock(&dd->pport[pidx].cc_shadow_lock);
kfree(dd->pport[pidx].congestion_entries);
dd->pport[pidx].congestion_entries = NULL;
kfree(dd->pport[pidx].ccti_entries);
dd->pport[pidx].ccti_entries = NULL;
kfree(dd->pport[pidx].ccti_entries_shadow);
dd->pport[pidx].ccti_entries_shadow = NULL;
kfree(dd->pport[pidx].congestion_entries_shadow);
dd->pport[pidx].congestion_entries_shadow = NULL;
spin_unlock(&dd->pport[pidx].cc_shadow_lock);
}
if (!qib_wc_pat) if (!qib_wc_pat)
qib_disable_wc(dd); qib_disable_wc(dd);
@ -1223,9 +1361,9 @@ static int __devinit qib_init_one(struct pci_dev *pdev,
#ifdef CONFIG_PCI_MSI #ifdef CONFIG_PCI_MSI
dd = qib_init_iba6120_funcs(pdev, ent); dd = qib_init_iba6120_funcs(pdev, ent);
#else #else
qib_early_err(&pdev->dev, "QLogic PCIE device 0x%x cannot " qib_early_err(&pdev->dev,
"work if CONFIG_PCI_MSI is not enabled\n", "QLogic PCIE device 0x%x cannot work if CONFIG_PCI_MSI is not enabled\n",
ent->device); ent->device);
dd = ERR_PTR(-ENODEV); dd = ERR_PTR(-ENODEV);
#endif #endif
break; break;
@ -1239,8 +1377,9 @@ static int __devinit qib_init_one(struct pci_dev *pdev,
break; break;
default: default:
qib_early_err(&pdev->dev, "Failing on unknown QLogic " qib_early_err(&pdev->dev,
"deviceid 0x%x\n", ent->device); "Failing on unknown QLogic deviceid 0x%x\n",
ent->device);
ret = -ENODEV; ret = -ENODEV;
} }
@ -1249,6 +1388,10 @@ static int __devinit qib_init_one(struct pci_dev *pdev,
if (ret) if (ret)
goto bail; /* error already printed */ goto bail; /* error already printed */
ret = qib_create_workqueues(dd);
if (ret)
goto bail;
/* do the generic initialization */ /* do the generic initialization */
initfail = qib_init(dd, 0); initfail = qib_init(dd, 0);
@ -1293,9 +1436,9 @@ static int __devinit qib_init_one(struct pci_dev *pdev,
if (!qib_wc_pat) { if (!qib_wc_pat) {
ret = qib_enable_wc(dd); ret = qib_enable_wc(dd);
if (ret) { if (ret) {
qib_dev_err(dd, "Write combining not enabled " qib_dev_err(dd,
"(err %d): performance may be poor\n", "Write combining not enabled (err %d): performance may be poor\n",
-ret); -ret);
ret = 0; ret = 0;
} }
} }
@ -1361,9 +1504,9 @@ int qib_create_rcvhdrq(struct qib_devdata *dd, struct qib_ctxtdata *rcd)
gfp_flags | __GFP_COMP); gfp_flags | __GFP_COMP);
if (!rcd->rcvhdrq) { if (!rcd->rcvhdrq) {
qib_dev_err(dd, "attempt to allocate %d bytes " qib_dev_err(dd,
"for ctxt %u rcvhdrq failed\n", "attempt to allocate %d bytes for ctxt %u rcvhdrq failed\n",
amt, rcd->ctxt); amt, rcd->ctxt);
goto bail; goto bail;
} }
@ -1392,8 +1535,9 @@ int qib_create_rcvhdrq(struct qib_devdata *dd, struct qib_ctxtdata *rcd)
return 0; return 0;
bail_free: bail_free:
qib_dev_err(dd, "attempt to allocate 1 page for ctxt %u " qib_dev_err(dd,
"rcvhdrqtailaddr failed\n", rcd->ctxt); "attempt to allocate 1 page for ctxt %u rcvhdrqtailaddr failed\n",
rcd->ctxt);
vfree(rcd->user_event_mask); vfree(rcd->user_event_mask);
rcd->user_event_mask = NULL; rcd->user_event_mask = NULL;
bail_free_hdrq: bail_free_hdrq:

View File

@ -224,15 +224,15 @@ void qib_bad_intrstatus(struct qib_devdata *dd)
* We print the message and disable interrupts, in hope of * We print the message and disable interrupts, in hope of
* having a better chance of debugging the problem. * having a better chance of debugging the problem.
*/ */
qib_dev_err(dd, "Read of chip interrupt status failed" qib_dev_err(dd,
" disabling interrupts\n"); "Read of chip interrupt status failed disabling interrupts\n");
if (allbits++) { if (allbits++) {
/* disable interrupt delivery, something is very wrong */ /* disable interrupt delivery, something is very wrong */
if (allbits == 2) if (allbits == 2)
dd->f_set_intr_state(dd, 0); dd->f_set_intr_state(dd, 0);
if (allbits == 3) { if (allbits == 3) {
qib_dev_err(dd, "2nd bad interrupt status, " qib_dev_err(dd,
"unregistering interrupts\n"); "2nd bad interrupt status, unregistering interrupts\n");
dd->flags |= QIB_BADINTR; dd->flags |= QIB_BADINTR;
dd->flags &= ~QIB_INITTED; dd->flags &= ~QIB_INITTED;
dd->f_free_irq(dd); dd->f_free_irq(dd);

View File

@ -35,21 +35,41 @@
/** /**
* qib_alloc_lkey - allocate an lkey * qib_alloc_lkey - allocate an lkey
* @rkt: lkey table in which to allocate the lkey
* @mr: memory region that this lkey protects * @mr: memory region that this lkey protects
* @dma_region: 0->normal key, 1->restricted DMA key
*
* Returns 0 if successful, otherwise returns -errno.
*
* Increments mr reference count as required.
*
* Sets the lkey field mr for non-dma regions.
* *
* Returns 1 if successful, otherwise returns 0.
*/ */
int qib_alloc_lkey(struct qib_lkey_table *rkt, struct qib_mregion *mr) int qib_alloc_lkey(struct qib_mregion *mr, int dma_region)
{ {
unsigned long flags; unsigned long flags;
u32 r; u32 r;
u32 n; u32 n;
int ret; int ret = 0;
struct qib_ibdev *dev = to_idev(mr->pd->device);
struct qib_lkey_table *rkt = &dev->lk_table;
spin_lock_irqsave(&rkt->lock, flags); spin_lock_irqsave(&rkt->lock, flags);
/* special case for dma_mr lkey == 0 */
if (dma_region) {
struct qib_mregion *tmr;
tmr = rcu_dereference(dev->dma_mr);
if (!tmr) {
qib_get_mr(mr);
rcu_assign_pointer(dev->dma_mr, mr);
mr->lkey_published = 1;
}
goto success;
}
/* Find the next available LKEY */ /* Find the next available LKEY */
r = rkt->next; r = rkt->next;
n = r; n = r;
@ -57,11 +77,8 @@ int qib_alloc_lkey(struct qib_lkey_table *rkt, struct qib_mregion *mr)
if (rkt->table[r] == NULL) if (rkt->table[r] == NULL)
break; break;
r = (r + 1) & (rkt->max - 1); r = (r + 1) & (rkt->max - 1);
if (r == n) { if (r == n)
spin_unlock_irqrestore(&rkt->lock, flags);
ret = 0;
goto bail; goto bail;
}
} }
rkt->next = (r + 1) & (rkt->max - 1); rkt->next = (r + 1) & (rkt->max - 1);
/* /*
@ -76,57 +93,58 @@ int qib_alloc_lkey(struct qib_lkey_table *rkt, struct qib_mregion *mr)
mr->lkey |= 1 << 8; mr->lkey |= 1 << 8;
rkt->gen++; rkt->gen++;
} }
rkt->table[r] = mr; qib_get_mr(mr);
rcu_assign_pointer(rkt->table[r], mr);
mr->lkey_published = 1;
success:
spin_unlock_irqrestore(&rkt->lock, flags); spin_unlock_irqrestore(&rkt->lock, flags);
out:
ret = 1;
bail:
return ret; return ret;
bail:
spin_unlock_irqrestore(&rkt->lock, flags);
ret = -ENOMEM;
goto out;
} }
/** /**
* qib_free_lkey - free an lkey * qib_free_lkey - free an lkey
* @rkt: table from which to free the lkey * @mr: mr to free from tables
* @lkey: lkey id to free
*/ */
int qib_free_lkey(struct qib_ibdev *dev, struct qib_mregion *mr) void qib_free_lkey(struct qib_mregion *mr)
{ {
unsigned long flags; unsigned long flags;
u32 lkey = mr->lkey; u32 lkey = mr->lkey;
u32 r; u32 r;
int ret; struct qib_ibdev *dev = to_idev(mr->pd->device);
struct qib_lkey_table *rkt = &dev->lk_table;
spin_lock_irqsave(&dev->lk_table.lock, flags); spin_lock_irqsave(&rkt->lock, flags);
if (lkey == 0) { if (!mr->lkey_published)
if (dev->dma_mr && dev->dma_mr == mr) { goto out;
ret = atomic_read(&dev->dma_mr->refcount); if (lkey == 0)
if (!ret) rcu_assign_pointer(dev->dma_mr, NULL);
dev->dma_mr = NULL; else {
} else
ret = 0;
} else {
r = lkey >> (32 - ib_qib_lkey_table_size); r = lkey >> (32 - ib_qib_lkey_table_size);
ret = atomic_read(&dev->lk_table.table[r]->refcount); rcu_assign_pointer(rkt->table[r], NULL);
if (!ret)
dev->lk_table.table[r] = NULL;
} }
spin_unlock_irqrestore(&dev->lk_table.lock, flags); qib_put_mr(mr);
mr->lkey_published = 0;
if (ret) out:
ret = -EBUSY; spin_unlock_irqrestore(&rkt->lock, flags);
return ret;
} }
/** /**
* qib_lkey_ok - check IB SGE for validity and initialize * qib_lkey_ok - check IB SGE for validity and initialize
* @rkt: table containing lkey to check SGE against * @rkt: table containing lkey to check SGE against
* @pd: protection domain
* @isge: outgoing internal SGE * @isge: outgoing internal SGE
* @sge: SGE to check * @sge: SGE to check
* @acc: access flags * @acc: access flags
* *
* Return 1 if valid and successful, otherwise returns 0. * Return 1 if valid and successful, otherwise returns 0.
* *
* increments the reference count upon success
*
* Check the IB SGE for validity and initialize our internal version * Check the IB SGE for validity and initialize our internal version
* of it. * of it.
*/ */
@ -136,24 +154,25 @@ int qib_lkey_ok(struct qib_lkey_table *rkt, struct qib_pd *pd,
struct qib_mregion *mr; struct qib_mregion *mr;
unsigned n, m; unsigned n, m;
size_t off; size_t off;
unsigned long flags;
/* /*
* We use LKEY == zero for kernel virtual addresses * We use LKEY == zero for kernel virtual addresses
* (see qib_get_dma_mr and qib_dma.c). * (see qib_get_dma_mr and qib_dma.c).
*/ */
spin_lock_irqsave(&rkt->lock, flags); rcu_read_lock();
if (sge->lkey == 0) { if (sge->lkey == 0) {
struct qib_ibdev *dev = to_idev(pd->ibpd.device); struct qib_ibdev *dev = to_idev(pd->ibpd.device);
if (pd->user) if (pd->user)
goto bail; goto bail;
if (!dev->dma_mr) mr = rcu_dereference(dev->dma_mr);
if (!mr)
goto bail; goto bail;
atomic_inc(&dev->dma_mr->refcount); if (unlikely(!atomic_inc_not_zero(&mr->refcount)))
spin_unlock_irqrestore(&rkt->lock, flags); goto bail;
rcu_read_unlock();
isge->mr = dev->dma_mr; isge->mr = mr;
isge->vaddr = (void *) sge->addr; isge->vaddr = (void *) sge->addr;
isge->length = sge->length; isge->length = sge->length;
isge->sge_length = sge->length; isge->sge_length = sge->length;
@ -161,18 +180,18 @@ int qib_lkey_ok(struct qib_lkey_table *rkt, struct qib_pd *pd,
isge->n = 0; isge->n = 0;
goto ok; goto ok;
} }
mr = rkt->table[(sge->lkey >> (32 - ib_qib_lkey_table_size))]; mr = rcu_dereference(
if (unlikely(mr == NULL || mr->lkey != sge->lkey || rkt->table[(sge->lkey >> (32 - ib_qib_lkey_table_size))]);
mr->pd != &pd->ibpd)) if (unlikely(!mr || mr->lkey != sge->lkey || mr->pd != &pd->ibpd))
goto bail; goto bail;
off = sge->addr - mr->user_base; off = sge->addr - mr->user_base;
if (unlikely(sge->addr < mr->user_base || if (unlikely(sge->addr < mr->iova || off + sge->length > mr->length ||
off + sge->length > mr->length || (mr->access_flags & acc) == 0))
(mr->access_flags & acc) != acc))
goto bail; goto bail;
atomic_inc(&mr->refcount); if (unlikely(!atomic_inc_not_zero(&mr->refcount)))
spin_unlock_irqrestore(&rkt->lock, flags); goto bail;
rcu_read_unlock();
off += mr->offset; off += mr->offset;
if (mr->page_shift) { if (mr->page_shift) {
@ -208,20 +227,22 @@ int qib_lkey_ok(struct qib_lkey_table *rkt, struct qib_pd *pd,
ok: ok:
return 1; return 1;
bail: bail:
spin_unlock_irqrestore(&rkt->lock, flags); rcu_read_unlock();
return 0; return 0;
} }
/** /**
* qib_rkey_ok - check the IB virtual address, length, and RKEY * qib_rkey_ok - check the IB virtual address, length, and RKEY
* @dev: infiniband device * @qp: qp for validation
* @ss: SGE state * @sge: SGE state
* @len: length of data * @len: length of data
* @vaddr: virtual address to place data * @vaddr: virtual address to place data
* @rkey: rkey to check * @rkey: rkey to check
* @acc: access flags * @acc: access flags
* *
* Return 1 if successful, otherwise 0. * Return 1 if successful, otherwise 0.
*
* increments the reference count upon success
*/ */
int qib_rkey_ok(struct qib_qp *qp, struct qib_sge *sge, int qib_rkey_ok(struct qib_qp *qp, struct qib_sge *sge,
u32 len, u64 vaddr, u32 rkey, int acc) u32 len, u64 vaddr, u32 rkey, int acc)
@ -230,25 +251,26 @@ int qib_rkey_ok(struct qib_qp *qp, struct qib_sge *sge,
struct qib_mregion *mr; struct qib_mregion *mr;
unsigned n, m; unsigned n, m;
size_t off; size_t off;
unsigned long flags;
/* /*
* We use RKEY == zero for kernel virtual addresses * We use RKEY == zero for kernel virtual addresses
* (see qib_get_dma_mr and qib_dma.c). * (see qib_get_dma_mr and qib_dma.c).
*/ */
spin_lock_irqsave(&rkt->lock, flags); rcu_read_lock();
if (rkey == 0) { if (rkey == 0) {
struct qib_pd *pd = to_ipd(qp->ibqp.pd); struct qib_pd *pd = to_ipd(qp->ibqp.pd);
struct qib_ibdev *dev = to_idev(pd->ibpd.device); struct qib_ibdev *dev = to_idev(pd->ibpd.device);
if (pd->user) if (pd->user)
goto bail; goto bail;
if (!dev->dma_mr) mr = rcu_dereference(dev->dma_mr);
if (!mr)
goto bail; goto bail;
atomic_inc(&dev->dma_mr->refcount); if (unlikely(!atomic_inc_not_zero(&mr->refcount)))
spin_unlock_irqrestore(&rkt->lock, flags); goto bail;
rcu_read_unlock();
sge->mr = dev->dma_mr; sge->mr = mr;
sge->vaddr = (void *) vaddr; sge->vaddr = (void *) vaddr;
sge->length = len; sge->length = len;
sge->sge_length = len; sge->sge_length = len;
@ -257,16 +279,18 @@ int qib_rkey_ok(struct qib_qp *qp, struct qib_sge *sge,
goto ok; goto ok;
} }
mr = rkt->table[(rkey >> (32 - ib_qib_lkey_table_size))]; mr = rcu_dereference(
if (unlikely(mr == NULL || mr->lkey != rkey || qp->ibqp.pd != mr->pd)) rkt->table[(rkey >> (32 - ib_qib_lkey_table_size))]);
if (unlikely(!mr || mr->lkey != rkey || qp->ibqp.pd != mr->pd))
goto bail; goto bail;
off = vaddr - mr->iova; off = vaddr - mr->iova;
if (unlikely(vaddr < mr->iova || off + len > mr->length || if (unlikely(vaddr < mr->iova || off + len > mr->length ||
(mr->access_flags & acc) == 0)) (mr->access_flags & acc) == 0))
goto bail; goto bail;
atomic_inc(&mr->refcount); if (unlikely(!atomic_inc_not_zero(&mr->refcount)))
spin_unlock_irqrestore(&rkt->lock, flags); goto bail;
rcu_read_unlock();
off += mr->offset; off += mr->offset;
if (mr->page_shift) { if (mr->page_shift) {
@ -302,7 +326,7 @@ int qib_rkey_ok(struct qib_qp *qp, struct qib_sge *sge,
ok: ok:
return 1; return 1;
bail: bail:
spin_unlock_irqrestore(&rkt->lock, flags); rcu_read_unlock();
return 0; return 0;
} }
@ -325,7 +349,9 @@ int qib_fast_reg_mr(struct qib_qp *qp, struct ib_send_wr *wr)
if (pd->user || rkey == 0) if (pd->user || rkey == 0)
goto bail; goto bail;
mr = rkt->table[(rkey >> (32 - ib_qib_lkey_table_size))]; mr = rcu_dereference_protected(
rkt->table[(rkey >> (32 - ib_qib_lkey_table_size))],
lockdep_is_held(&rkt->lock));
if (unlikely(mr == NULL || qp->ibqp.pd != mr->pd)) if (unlikely(mr == NULL || qp->ibqp.pd != mr->pd))
goto bail; goto bail;

View File

@ -1,6 +1,6 @@
/* /*
* Copyright (c) 2006, 2007, 2008, 2009, 2010 QLogic Corporation. * Copyright (c) 2012 Intel Corporation. All rights reserved.
* All rights reserved. * Copyright (c) 2006 - 2012 QLogic Corporation. All rights reserved.
* Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved. * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
* *
* This software is available to you under a choice of one of two * This software is available to you under a choice of one of two
@ -49,6 +49,18 @@ static int reply(struct ib_smp *smp)
return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY; return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY;
} }
static int reply_failure(struct ib_smp *smp)
{
/*
* The verbs framework will handle the directed/LID route
* packet changes.
*/
smp->method = IB_MGMT_METHOD_GET_RESP;
if (smp->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)
smp->status |= IB_SMP_DIRECTION;
return IB_MAD_RESULT_FAILURE | IB_MAD_RESULT_REPLY;
}
static void qib_send_trap(struct qib_ibport *ibp, void *data, unsigned len) static void qib_send_trap(struct qib_ibport *ibp, void *data, unsigned len)
{ {
struct ib_mad_send_buf *send_buf; struct ib_mad_send_buf *send_buf;
@ -90,14 +102,10 @@ static void qib_send_trap(struct qib_ibport *ibp, void *data, unsigned len)
if (!ibp->sm_ah) { if (!ibp->sm_ah) {
if (ibp->sm_lid != be16_to_cpu(IB_LID_PERMISSIVE)) { if (ibp->sm_lid != be16_to_cpu(IB_LID_PERMISSIVE)) {
struct ib_ah *ah; struct ib_ah *ah;
struct ib_ah_attr attr;
memset(&attr, 0, sizeof attr); ah = qib_create_qp0_ah(ibp, ibp->sm_lid);
attr.dlid = ibp->sm_lid;
attr.port_num = ppd_from_ibp(ibp)->port;
ah = ib_create_ah(ibp->qp0->ibqp.pd, &attr);
if (IS_ERR(ah)) if (IS_ERR(ah))
ret = -EINVAL; ret = PTR_ERR(ah);
else { else {
send_buf->ah = ah; send_buf->ah = ah;
ibp->sm_ah = to_iah(ah); ibp->sm_ah = to_iah(ah);
@ -2051,6 +2059,298 @@ bail:
return ret; return ret;
} }
static int cc_get_classportinfo(struct ib_cc_mad *ccp,
struct ib_device *ibdev)
{
struct ib_cc_classportinfo_attr *p =
(struct ib_cc_classportinfo_attr *)ccp->mgmt_data;
memset(ccp->mgmt_data, 0, sizeof(ccp->mgmt_data));
p->base_version = 1;
p->class_version = 1;
p->cap_mask = 0;
/*
* Expected response time is 4.096 usec. * 2^18 == 1.073741824 sec.
*/
p->resp_time_value = 18;
return reply((struct ib_smp *) ccp);
}
static int cc_get_congestion_info(struct ib_cc_mad *ccp,
struct ib_device *ibdev, u8 port)
{
struct ib_cc_info_attr *p =
(struct ib_cc_info_attr *)ccp->mgmt_data;
struct qib_ibport *ibp = to_iport(ibdev, port);
struct qib_pportdata *ppd = ppd_from_ibp(ibp);
memset(ccp->mgmt_data, 0, sizeof(ccp->mgmt_data));
p->congestion_info = 0;
p->control_table_cap = ppd->cc_max_table_entries;
return reply((struct ib_smp *) ccp);
}
static int cc_get_congestion_setting(struct ib_cc_mad *ccp,
struct ib_device *ibdev, u8 port)
{
int i;
struct ib_cc_congestion_setting_attr *p =
(struct ib_cc_congestion_setting_attr *)ccp->mgmt_data;
struct qib_ibport *ibp = to_iport(ibdev, port);
struct qib_pportdata *ppd = ppd_from_ibp(ibp);
struct ib_cc_congestion_entry_shadow *entries;
memset(ccp->mgmt_data, 0, sizeof(ccp->mgmt_data));
spin_lock(&ppd->cc_shadow_lock);
entries = ppd->congestion_entries_shadow->entries;
p->port_control = cpu_to_be16(
ppd->congestion_entries_shadow->port_control);
p->control_map = cpu_to_be16(
ppd->congestion_entries_shadow->control_map);
for (i = 0; i < IB_CC_CCS_ENTRIES; i++) {
p->entries[i].ccti_increase = entries[i].ccti_increase;
p->entries[i].ccti_timer = cpu_to_be16(entries[i].ccti_timer);
p->entries[i].trigger_threshold = entries[i].trigger_threshold;
p->entries[i].ccti_min = entries[i].ccti_min;
}
spin_unlock(&ppd->cc_shadow_lock);
return reply((struct ib_smp *) ccp);
}
static int cc_get_congestion_control_table(struct ib_cc_mad *ccp,
struct ib_device *ibdev, u8 port)
{
struct ib_cc_table_attr *p =
(struct ib_cc_table_attr *)ccp->mgmt_data;
struct qib_ibport *ibp = to_iport(ibdev, port);
struct qib_pportdata *ppd = ppd_from_ibp(ibp);
u32 cct_block_index = be32_to_cpu(ccp->attr_mod);
u32 max_cct_block;
u32 cct_entry;
struct ib_cc_table_entry_shadow *entries;
int i;
/* Is the table index more than what is supported? */
if (cct_block_index > IB_CC_TABLE_CAP_DEFAULT - 1)
goto bail;
memset(ccp->mgmt_data, 0, sizeof(ccp->mgmt_data));
spin_lock(&ppd->cc_shadow_lock);
max_cct_block =
(ppd->ccti_entries_shadow->ccti_last_entry + 1)/IB_CCT_ENTRIES;
max_cct_block = max_cct_block ? max_cct_block - 1 : 0;
if (cct_block_index > max_cct_block) {
spin_unlock(&ppd->cc_shadow_lock);
goto bail;
}
ccp->attr_mod = cpu_to_be32(cct_block_index);
cct_entry = IB_CCT_ENTRIES * (cct_block_index + 1);
cct_entry--;
p->ccti_limit = cpu_to_be16(cct_entry);
entries = &ppd->ccti_entries_shadow->
entries[IB_CCT_ENTRIES * cct_block_index];
cct_entry %= IB_CCT_ENTRIES;
for (i = 0; i <= cct_entry; i++)
p->ccti_entries[i].entry = cpu_to_be16(entries[i].entry);
spin_unlock(&ppd->cc_shadow_lock);
return reply((struct ib_smp *) ccp);
bail:
return reply_failure((struct ib_smp *) ccp);
}
static int cc_set_congestion_setting(struct ib_cc_mad *ccp,
struct ib_device *ibdev, u8 port)
{
struct ib_cc_congestion_setting_attr *p =
(struct ib_cc_congestion_setting_attr *)ccp->mgmt_data;
struct qib_ibport *ibp = to_iport(ibdev, port);
struct qib_pportdata *ppd = ppd_from_ibp(ibp);
int i;
ppd->cc_sl_control_map = be16_to_cpu(p->control_map);
for (i = 0; i < IB_CC_CCS_ENTRIES; i++) {
ppd->congestion_entries[i].ccti_increase =
p->entries[i].ccti_increase;
ppd->congestion_entries[i].ccti_timer =
be16_to_cpu(p->entries[i].ccti_timer);
ppd->congestion_entries[i].trigger_threshold =
p->entries[i].trigger_threshold;
ppd->congestion_entries[i].ccti_min =
p->entries[i].ccti_min;
}
return reply((struct ib_smp *) ccp);
}
static int cc_set_congestion_control_table(struct ib_cc_mad *ccp,
struct ib_device *ibdev, u8 port)
{
struct ib_cc_table_attr *p =
(struct ib_cc_table_attr *)ccp->mgmt_data;
struct qib_ibport *ibp = to_iport(ibdev, port);
struct qib_pportdata *ppd = ppd_from_ibp(ibp);
u32 cct_block_index = be32_to_cpu(ccp->attr_mod);
u32 cct_entry;
struct ib_cc_table_entry_shadow *entries;
int i;
/* Is the table index more than what is supported? */
if (cct_block_index > IB_CC_TABLE_CAP_DEFAULT - 1)
goto bail;
/* If this packet is the first in the sequence then
* zero the total table entry count.
*/
if (be16_to_cpu(p->ccti_limit) < IB_CCT_ENTRIES)
ppd->total_cct_entry = 0;
cct_entry = (be16_to_cpu(p->ccti_limit))%IB_CCT_ENTRIES;
/* ccti_limit is 0 to 63 */
ppd->total_cct_entry += (cct_entry + 1);
if (ppd->total_cct_entry > ppd->cc_supported_table_entries)
goto bail;
ppd->ccti_limit = be16_to_cpu(p->ccti_limit);
entries = ppd->ccti_entries + (IB_CCT_ENTRIES * cct_block_index);
for (i = 0; i <= cct_entry; i++)
entries[i].entry = be16_to_cpu(p->ccti_entries[i].entry);
spin_lock(&ppd->cc_shadow_lock);
ppd->ccti_entries_shadow->ccti_last_entry = ppd->total_cct_entry - 1;
memcpy(ppd->ccti_entries_shadow->entries, ppd->ccti_entries,
(ppd->total_cct_entry * sizeof(struct ib_cc_table_entry)));
ppd->congestion_entries_shadow->port_control = IB_CC_CCS_PC_SL_BASED;
ppd->congestion_entries_shadow->control_map = ppd->cc_sl_control_map;
memcpy(ppd->congestion_entries_shadow->entries, ppd->congestion_entries,
IB_CC_CCS_ENTRIES * sizeof(struct ib_cc_congestion_entry));
spin_unlock(&ppd->cc_shadow_lock);
return reply((struct ib_smp *) ccp);
bail:
return reply_failure((struct ib_smp *) ccp);
}
static int check_cc_key(struct qib_ibport *ibp,
struct ib_cc_mad *ccp, int mad_flags)
{
return 0;
}
static int process_cc(struct ib_device *ibdev, int mad_flags,
u8 port, struct ib_mad *in_mad,
struct ib_mad *out_mad)
{
struct ib_cc_mad *ccp = (struct ib_cc_mad *)out_mad;
struct qib_ibport *ibp = to_iport(ibdev, port);
int ret;
*out_mad = *in_mad;
if (ccp->class_version != 2) {
ccp->status |= IB_SMP_UNSUP_VERSION;
ret = reply((struct ib_smp *)ccp);
goto bail;
}
ret = check_cc_key(ibp, ccp, mad_flags);
if (ret)
goto bail;
switch (ccp->method) {
case IB_MGMT_METHOD_GET:
switch (ccp->attr_id) {
case IB_CC_ATTR_CLASSPORTINFO:
ret = cc_get_classportinfo(ccp, ibdev);
goto bail;
case IB_CC_ATTR_CONGESTION_INFO:
ret = cc_get_congestion_info(ccp, ibdev, port);
goto bail;
case IB_CC_ATTR_CA_CONGESTION_SETTING:
ret = cc_get_congestion_setting(ccp, ibdev, port);
goto bail;
case IB_CC_ATTR_CONGESTION_CONTROL_TABLE:
ret = cc_get_congestion_control_table(ccp, ibdev, port);
goto bail;
/* FALLTHROUGH */
default:
ccp->status |= IB_SMP_UNSUP_METH_ATTR;
ret = reply((struct ib_smp *) ccp);
goto bail;
}
case IB_MGMT_METHOD_SET:
switch (ccp->attr_id) {
case IB_CC_ATTR_CA_CONGESTION_SETTING:
ret = cc_set_congestion_setting(ccp, ibdev, port);
goto bail;
case IB_CC_ATTR_CONGESTION_CONTROL_TABLE:
ret = cc_set_congestion_control_table(ccp, ibdev, port);
goto bail;
/* FALLTHROUGH */
default:
ccp->status |= IB_SMP_UNSUP_METH_ATTR;
ret = reply((struct ib_smp *) ccp);
goto bail;
}
case IB_MGMT_METHOD_GET_RESP:
/*
* The ib_mad module will call us to process responses
* before checking for other consumers.
* Just tell the caller to process it normally.
*/
ret = IB_MAD_RESULT_SUCCESS;
goto bail;
case IB_MGMT_METHOD_TRAP:
default:
ccp->status |= IB_SMP_UNSUP_METHOD;
ret = reply((struct ib_smp *) ccp);
}
bail:
return ret;
}
/** /**
* qib_process_mad - process an incoming MAD packet * qib_process_mad - process an incoming MAD packet
* @ibdev: the infiniband device this packet came in on * @ibdev: the infiniband device this packet came in on
@ -2075,6 +2375,8 @@ int qib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port,
struct ib_mad *in_mad, struct ib_mad *out_mad) struct ib_mad *in_mad, struct ib_mad *out_mad)
{ {
int ret; int ret;
struct qib_ibport *ibp = to_iport(ibdev, port);
struct qib_pportdata *ppd = ppd_from_ibp(ibp);
switch (in_mad->mad_hdr.mgmt_class) { switch (in_mad->mad_hdr.mgmt_class) {
case IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE: case IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE:
@ -2086,6 +2388,15 @@ int qib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port,
ret = process_perf(ibdev, port, in_mad, out_mad); ret = process_perf(ibdev, port, in_mad, out_mad);
goto bail; goto bail;
case IB_MGMT_CLASS_CONG_MGMT:
if (!ppd->congestion_entries_shadow ||
!qib_cc_table_size) {
ret = IB_MAD_RESULT_SUCCESS;
goto bail;
}
ret = process_cc(ibdev, mad_flags, port, in_mad, out_mad);
goto bail;
default: default:
ret = IB_MAD_RESULT_SUCCESS; ret = IB_MAD_RESULT_SUCCESS;
} }

View File

@ -1,6 +1,6 @@
/* /*
* Copyright (c) 2006, 2007, 2008, 2009, 2010 QLogic Corporation. * Copyright (c) 2012 Intel Corporation. All rights reserved.
* All rights reserved. * Copyright (c) 2006 - 2012 QLogic Corporation. All rights reserved.
* Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved. * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
* *
* This software is available to you under a choice of one of two * This software is available to you under a choice of one of two
@ -31,6 +31,8 @@
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE. * SOFTWARE.
*/ */
#ifndef _QIB_MAD_H
#define _QIB_MAD_H
#include <rdma/ib_pma.h> #include <rdma/ib_pma.h>
@ -222,6 +224,198 @@ struct ib_pma_portcounters_cong {
#define IB_PMA_SEL_CONG_XMIT 0x04 #define IB_PMA_SEL_CONG_XMIT 0x04
#define IB_PMA_SEL_CONG_ROUTING 0x08 #define IB_PMA_SEL_CONG_ROUTING 0x08
/*
* Congestion control class attributes
*/
#define IB_CC_ATTR_CLASSPORTINFO cpu_to_be16(0x0001)
#define IB_CC_ATTR_NOTICE cpu_to_be16(0x0002)
#define IB_CC_ATTR_CONGESTION_INFO cpu_to_be16(0x0011)
#define IB_CC_ATTR_CONGESTION_KEY_INFO cpu_to_be16(0x0012)
#define IB_CC_ATTR_CONGESTION_LOG cpu_to_be16(0x0013)
#define IB_CC_ATTR_SWITCH_CONGESTION_SETTING cpu_to_be16(0x0014)
#define IB_CC_ATTR_SWITCH_PORT_CONGESTION_SETTING cpu_to_be16(0x0015)
#define IB_CC_ATTR_CA_CONGESTION_SETTING cpu_to_be16(0x0016)
#define IB_CC_ATTR_CONGESTION_CONTROL_TABLE cpu_to_be16(0x0017)
#define IB_CC_ATTR_TIME_STAMP cpu_to_be16(0x0018)
/* generalizations for threshold values */
#define IB_CC_THRESHOLD_NONE 0x0
#define IB_CC_THRESHOLD_MIN 0x1
#define IB_CC_THRESHOLD_MAX 0xf
/* CCA MAD header constants */
#define IB_CC_MAD_LOGDATA_LEN 32
#define IB_CC_MAD_MGMTDATA_LEN 192
struct ib_cc_mad {
u8 base_version;
u8 mgmt_class;
u8 class_version;
u8 method;
__be16 status;
__be16 class_specific;
__be64 tid;
__be16 attr_id;
__be16 resv;
__be32 attr_mod;
__be64 cckey;
/* For CongestionLog attribute only */
u8 log_data[IB_CC_MAD_LOGDATA_LEN];
u8 mgmt_data[IB_CC_MAD_MGMTDATA_LEN];
} __packed;
/*
* Congestion Control class portinfo capability mask bits
*/
#define IB_CC_CPI_CM_TRAP_GEN cpu_to_be16(1 << 0)
#define IB_CC_CPI_CM_GET_SET_NOTICE cpu_to_be16(1 << 1)
#define IB_CC_CPI_CM_CAP2 cpu_to_be16(1 << 2)
#define IB_CC_CPI_CM_ENHANCEDPORT0_CC cpu_to_be16(1 << 8)
struct ib_cc_classportinfo_attr {
u8 base_version;
u8 class_version;
__be16 cap_mask;
u8 reserved[3];
u8 resp_time_value; /* only lower 5 bits */
union ib_gid redirect_gid;
__be32 redirect_tc_sl_fl; /* 8, 4, 20 bits respectively */
__be16 redirect_lid;
__be16 redirect_pkey;
__be32 redirect_qp; /* only lower 24 bits */
__be32 redirect_qkey;
union ib_gid trap_gid;
__be32 trap_tc_sl_fl; /* 8, 4, 20 bits respectively */
__be16 trap_lid;
__be16 trap_pkey;
__be32 trap_hl_qp; /* 8, 24 bits respectively */
__be32 trap_qkey;
} __packed;
/* Congestion control traps */
#define IB_CC_TRAP_KEY_VIOLATION 0x0000
struct ib_cc_trap_key_violation_attr {
__be16 source_lid;
u8 method;
u8 reserved1;
__be16 attrib_id;
__be32 attrib_mod;
__be32 qp;
__be64 cckey;
u8 sgid[16];
u8 padding[24];
} __packed;
/* Congestion info flags */
#define IB_CC_CI_FLAGS_CREDIT_STARVATION 0x1
#define IB_CC_TABLE_CAP_DEFAULT 31
struct ib_cc_info_attr {
__be16 congestion_info;
u8 control_table_cap; /* Multiple of 64 entry unit CCTs */
} __packed;
struct ib_cc_key_info_attr {
__be64 cckey;
u8 protect;
__be16 lease_period;
__be16 violations;
} __packed;
#define IB_CC_CL_CA_LOGEVENTS_LEN 208
struct ib_cc_log_attr {
u8 log_type;
u8 congestion_flags;
__be16 threshold_event_counter;
__be16 threshold_congestion_event_map;
__be16 current_time_stamp;
u8 log_events[IB_CC_CL_CA_LOGEVENTS_LEN];
} __packed;
#define IB_CC_CLEC_SERVICETYPE_RC 0x0
#define IB_CC_CLEC_SERVICETYPE_UC 0x1
#define IB_CC_CLEC_SERVICETYPE_RD 0x2
#define IB_CC_CLEC_SERVICETYPE_UD 0x3
struct ib_cc_log_event {
u8 local_qp_cn_entry;
u8 remote_qp_number_cn_entry[3];
u8 sl_cn_entry:4;
u8 service_type_cn_entry:4;
__be32 remote_lid_cn_entry;
__be32 timestamp_cn_entry;
} __packed;
/* Sixteen congestion entries */
#define IB_CC_CCS_ENTRIES 16
/* Port control flags */
#define IB_CC_CCS_PC_SL_BASED 0x01
struct ib_cc_congestion_entry {
u8 ccti_increase;
__be16 ccti_timer;
u8 trigger_threshold;
u8 ccti_min; /* min CCTI for cc table */
} __packed;
struct ib_cc_congestion_entry_shadow {
u8 ccti_increase;
u16 ccti_timer;
u8 trigger_threshold;
u8 ccti_min; /* min CCTI for cc table */
} __packed;
struct ib_cc_congestion_setting_attr {
__be16 port_control;
__be16 control_map;
struct ib_cc_congestion_entry entries[IB_CC_CCS_ENTRIES];
} __packed;
struct ib_cc_congestion_setting_attr_shadow {
u16 port_control;
u16 control_map;
struct ib_cc_congestion_entry_shadow entries[IB_CC_CCS_ENTRIES];
} __packed;
#define IB_CC_TABLE_ENTRY_INCREASE_DEFAULT 1
#define IB_CC_TABLE_ENTRY_TIMER_DEFAULT 1
/* 64 Congestion Control table entries in a single MAD */
#define IB_CCT_ENTRIES 64
#define IB_CCT_MIN_ENTRIES (IB_CCT_ENTRIES * 2)
struct ib_cc_table_entry {
__be16 entry; /* shift:2, multiplier:14 */
};
struct ib_cc_table_entry_shadow {
u16 entry; /* shift:2, multiplier:14 */
};
struct ib_cc_table_attr {
__be16 ccti_limit; /* max CCTI for cc table */
struct ib_cc_table_entry ccti_entries[IB_CCT_ENTRIES];
} __packed;
struct ib_cc_table_attr_shadow {
u16 ccti_limit; /* max CCTI for cc table */
struct ib_cc_table_entry_shadow ccti_entries[IB_CCT_ENTRIES];
} __packed;
#define CC_TABLE_SHADOW_MAX \
(IB_CC_TABLE_CAP_DEFAULT * IB_CCT_ENTRIES)
struct cc_table_shadow {
u16 ccti_last_entry;
struct ib_cc_table_entry_shadow entries[CC_TABLE_SHADOW_MAX];
} __packed;
#endif /* _QIB_MAD_H */
/* /*
* The PortSamplesControl.CounterMasks field is an array of 3 bit fields * The PortSamplesControl.CounterMasks field is an array of 3 bit fields
* which specify the N'th counter's capabilities. See ch. 16.1.3.2. * which specify the N'th counter's capabilities. See ch. 16.1.3.2.

View File

@ -47,6 +47,43 @@ static inline struct qib_fmr *to_ifmr(struct ib_fmr *ibfmr)
return container_of(ibfmr, struct qib_fmr, ibfmr); return container_of(ibfmr, struct qib_fmr, ibfmr);
} }
static int init_qib_mregion(struct qib_mregion *mr, struct ib_pd *pd,
int count)
{
int m, i = 0;
int rval = 0;
m = (count + QIB_SEGSZ - 1) / QIB_SEGSZ;
for (; i < m; i++) {
mr->map[i] = kzalloc(sizeof *mr->map[0], GFP_KERNEL);
if (!mr->map[i])
goto bail;
}
mr->mapsz = m;
init_completion(&mr->comp);
/* count returning the ptr to user */
atomic_set(&mr->refcount, 1);
mr->pd = pd;
mr->max_segs = count;
out:
return rval;
bail:
while (i)
kfree(mr->map[--i]);
rval = -ENOMEM;
goto out;
}
static void deinit_qib_mregion(struct qib_mregion *mr)
{
int i = mr->mapsz;
mr->mapsz = 0;
while (i)
kfree(mr->map[--i]);
}
/** /**
* qib_get_dma_mr - get a DMA memory region * qib_get_dma_mr - get a DMA memory region
* @pd: protection domain for this memory region * @pd: protection domain for this memory region
@ -58,10 +95,9 @@ static inline struct qib_fmr *to_ifmr(struct ib_fmr *ibfmr)
*/ */
struct ib_mr *qib_get_dma_mr(struct ib_pd *pd, int acc) struct ib_mr *qib_get_dma_mr(struct ib_pd *pd, int acc)
{ {
struct qib_ibdev *dev = to_idev(pd->device); struct qib_mr *mr = NULL;
struct qib_mr *mr;
struct ib_mr *ret; struct ib_mr *ret;
unsigned long flags; int rval;
if (to_ipd(pd)->user) { if (to_ipd(pd)->user) {
ret = ERR_PTR(-EPERM); ret = ERR_PTR(-EPERM);
@ -74,61 +110,64 @@ struct ib_mr *qib_get_dma_mr(struct ib_pd *pd, int acc)
goto bail; goto bail;
} }
rval = init_qib_mregion(&mr->mr, pd, 0);
if (rval) {
ret = ERR_PTR(rval);
goto bail;
}
rval = qib_alloc_lkey(&mr->mr, 1);
if (rval) {
ret = ERR_PTR(rval);
goto bail_mregion;
}
mr->mr.access_flags = acc; mr->mr.access_flags = acc;
atomic_set(&mr->mr.refcount, 0);
spin_lock_irqsave(&dev->lk_table.lock, flags);
if (!dev->dma_mr)
dev->dma_mr = &mr->mr;
spin_unlock_irqrestore(&dev->lk_table.lock, flags);
ret = &mr->ibmr; ret = &mr->ibmr;
done:
bail:
return ret; return ret;
bail_mregion:
deinit_qib_mregion(&mr->mr);
bail:
kfree(mr);
goto done;
} }
static struct qib_mr *alloc_mr(int count, struct qib_lkey_table *lk_table) static struct qib_mr *alloc_mr(int count, struct ib_pd *pd)
{ {
struct qib_mr *mr; struct qib_mr *mr;
int m, i = 0; int rval = -ENOMEM;
int m;
/* Allocate struct plus pointers to first level page tables. */ /* Allocate struct plus pointers to first level page tables. */
m = (count + QIB_SEGSZ - 1) / QIB_SEGSZ; m = (count + QIB_SEGSZ - 1) / QIB_SEGSZ;
mr = kmalloc(sizeof *mr + m * sizeof mr->mr.map[0], GFP_KERNEL); mr = kzalloc(sizeof *mr + m * sizeof mr->mr.map[0], GFP_KERNEL);
if (!mr) if (!mr)
goto done; goto bail;
/* Allocate first level page tables. */
for (; i < m; i++) {
mr->mr.map[i] = kmalloc(sizeof *mr->mr.map[0], GFP_KERNEL);
if (!mr->mr.map[i])
goto bail;
}
mr->mr.mapsz = m;
mr->mr.page_shift = 0;
mr->mr.max_segs = count;
rval = init_qib_mregion(&mr->mr, pd, count);
if (rval)
goto bail;
/* /*
* ib_reg_phys_mr() will initialize mr->ibmr except for * ib_reg_phys_mr() will initialize mr->ibmr except for
* lkey and rkey. * lkey and rkey.
*/ */
if (!qib_alloc_lkey(lk_table, &mr->mr)) rval = qib_alloc_lkey(&mr->mr, 0);
goto bail; if (rval)
goto bail_mregion;
mr->ibmr.lkey = mr->mr.lkey; mr->ibmr.lkey = mr->mr.lkey;
mr->ibmr.rkey = mr->mr.lkey; mr->ibmr.rkey = mr->mr.lkey;
atomic_set(&mr->mr.refcount, 0);
goto done;
bail:
while (i)
kfree(mr->mr.map[--i]);
kfree(mr);
mr = NULL;
done: done:
return mr; return mr;
bail_mregion:
deinit_qib_mregion(&mr->mr);
bail:
kfree(mr);
mr = ERR_PTR(rval);
goto done;
} }
/** /**
@ -148,19 +187,15 @@ struct ib_mr *qib_reg_phys_mr(struct ib_pd *pd,
int n, m, i; int n, m, i;
struct ib_mr *ret; struct ib_mr *ret;
mr = alloc_mr(num_phys_buf, &to_idev(pd->device)->lk_table); mr = alloc_mr(num_phys_buf, pd);
if (mr == NULL) { if (IS_ERR(mr)) {
ret = ERR_PTR(-ENOMEM); ret = (struct ib_mr *)mr;
goto bail; goto bail;
} }
mr->mr.pd = pd;
mr->mr.user_base = *iova_start; mr->mr.user_base = *iova_start;
mr->mr.iova = *iova_start; mr->mr.iova = *iova_start;
mr->mr.length = 0;
mr->mr.offset = 0;
mr->mr.access_flags = acc; mr->mr.access_flags = acc;
mr->umem = NULL;
m = 0; m = 0;
n = 0; n = 0;
@ -186,7 +221,6 @@ bail:
* @pd: protection domain for this memory region * @pd: protection domain for this memory region
* @start: starting userspace address * @start: starting userspace address
* @length: length of region to register * @length: length of region to register
* @virt_addr: virtual address to use (from HCA's point of view)
* @mr_access_flags: access flags for this memory region * @mr_access_flags: access flags for this memory region
* @udata: unused by the QLogic_IB driver * @udata: unused by the QLogic_IB driver
* *
@ -216,14 +250,13 @@ struct ib_mr *qib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
list_for_each_entry(chunk, &umem->chunk_list, list) list_for_each_entry(chunk, &umem->chunk_list, list)
n += chunk->nents; n += chunk->nents;
mr = alloc_mr(n, &to_idev(pd->device)->lk_table); mr = alloc_mr(n, pd);
if (!mr) { if (IS_ERR(mr)) {
ret = ERR_PTR(-ENOMEM); ret = (struct ib_mr *)mr;
ib_umem_release(umem); ib_umem_release(umem);
goto bail; goto bail;
} }
mr->mr.pd = pd;
mr->mr.user_base = start; mr->mr.user_base = start;
mr->mr.iova = virt_addr; mr->mr.iova = virt_addr;
mr->mr.length = length; mr->mr.length = length;
@ -271,21 +304,25 @@ bail:
int qib_dereg_mr(struct ib_mr *ibmr) int qib_dereg_mr(struct ib_mr *ibmr)
{ {
struct qib_mr *mr = to_imr(ibmr); struct qib_mr *mr = to_imr(ibmr);
struct qib_ibdev *dev = to_idev(ibmr->device); int ret = 0;
int ret; unsigned long timeout;
int i;
ret = qib_free_lkey(dev, &mr->mr); qib_free_lkey(&mr->mr);
if (ret)
return ret;
i = mr->mr.mapsz; qib_put_mr(&mr->mr); /* will set completion if last */
while (i) timeout = wait_for_completion_timeout(&mr->mr.comp,
kfree(mr->mr.map[--i]); 5 * HZ);
if (!timeout) {
qib_get_mr(&mr->mr);
ret = -EBUSY;
goto out;
}
deinit_qib_mregion(&mr->mr);
if (mr->umem) if (mr->umem)
ib_umem_release(mr->umem); ib_umem_release(mr->umem);
kfree(mr); kfree(mr);
return 0; out:
return ret;
} }
/* /*
@ -298,17 +335,9 @@ struct ib_mr *qib_alloc_fast_reg_mr(struct ib_pd *pd, int max_page_list_len)
{ {
struct qib_mr *mr; struct qib_mr *mr;
mr = alloc_mr(max_page_list_len, &to_idev(pd->device)->lk_table); mr = alloc_mr(max_page_list_len, pd);
if (mr == NULL) if (IS_ERR(mr))
return ERR_PTR(-ENOMEM); return (struct ib_mr *)mr;
mr->mr.pd = pd;
mr->mr.user_base = 0;
mr->mr.iova = 0;
mr->mr.length = 0;
mr->mr.offset = 0;
mr->mr.access_flags = 0;
mr->umem = NULL;
return &mr->ibmr; return &mr->ibmr;
} }
@ -322,11 +351,11 @@ qib_alloc_fast_reg_page_list(struct ib_device *ibdev, int page_list_len)
if (size > PAGE_SIZE) if (size > PAGE_SIZE)
return ERR_PTR(-EINVAL); return ERR_PTR(-EINVAL);
pl = kmalloc(sizeof *pl, GFP_KERNEL); pl = kzalloc(sizeof *pl, GFP_KERNEL);
if (!pl) if (!pl)
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
pl->page_list = kmalloc(size, GFP_KERNEL); pl->page_list = kzalloc(size, GFP_KERNEL);
if (!pl->page_list) if (!pl->page_list)
goto err_free; goto err_free;
@ -355,57 +384,47 @@ struct ib_fmr *qib_alloc_fmr(struct ib_pd *pd, int mr_access_flags,
struct ib_fmr_attr *fmr_attr) struct ib_fmr_attr *fmr_attr)
{ {
struct qib_fmr *fmr; struct qib_fmr *fmr;
int m, i = 0; int m;
struct ib_fmr *ret; struct ib_fmr *ret;
int rval = -ENOMEM;
/* Allocate struct plus pointers to first level page tables. */ /* Allocate struct plus pointers to first level page tables. */
m = (fmr_attr->max_pages + QIB_SEGSZ - 1) / QIB_SEGSZ; m = (fmr_attr->max_pages + QIB_SEGSZ - 1) / QIB_SEGSZ;
fmr = kmalloc(sizeof *fmr + m * sizeof fmr->mr.map[0], GFP_KERNEL); fmr = kzalloc(sizeof *fmr + m * sizeof fmr->mr.map[0], GFP_KERNEL);
if (!fmr) if (!fmr)
goto bail; goto bail;
/* Allocate first level page tables. */ rval = init_qib_mregion(&fmr->mr, pd, fmr_attr->max_pages);
for (; i < m; i++) { if (rval)
fmr->mr.map[i] = kmalloc(sizeof *fmr->mr.map[0], goto bail;
GFP_KERNEL);
if (!fmr->mr.map[i])
goto bail;
}
fmr->mr.mapsz = m;
/* /*
* ib_alloc_fmr() will initialize fmr->ibfmr except for lkey & * ib_alloc_fmr() will initialize fmr->ibfmr except for lkey &
* rkey. * rkey.
*/ */
if (!qib_alloc_lkey(&to_idev(pd->device)->lk_table, &fmr->mr)) rval = qib_alloc_lkey(&fmr->mr, 0);
goto bail; if (rval)
goto bail_mregion;
fmr->ibfmr.rkey = fmr->mr.lkey; fmr->ibfmr.rkey = fmr->mr.lkey;
fmr->ibfmr.lkey = fmr->mr.lkey; fmr->ibfmr.lkey = fmr->mr.lkey;
/* /*
* Resources are allocated but no valid mapping (RKEY can't be * Resources are allocated but no valid mapping (RKEY can't be
* used). * used).
*/ */
fmr->mr.pd = pd;
fmr->mr.user_base = 0;
fmr->mr.iova = 0;
fmr->mr.length = 0;
fmr->mr.offset = 0;
fmr->mr.access_flags = mr_access_flags; fmr->mr.access_flags = mr_access_flags;
fmr->mr.max_segs = fmr_attr->max_pages; fmr->mr.max_segs = fmr_attr->max_pages;
fmr->mr.page_shift = fmr_attr->page_shift; fmr->mr.page_shift = fmr_attr->page_shift;
atomic_set(&fmr->mr.refcount, 0);
ret = &fmr->ibfmr; ret = &fmr->ibfmr;
goto done;
bail:
while (i)
kfree(fmr->mr.map[--i]);
kfree(fmr);
ret = ERR_PTR(-ENOMEM);
done: done:
return ret; return ret;
bail_mregion:
deinit_qib_mregion(&fmr->mr);
bail:
kfree(fmr);
ret = ERR_PTR(rval);
goto done;
} }
/** /**
@ -428,7 +447,8 @@ int qib_map_phys_fmr(struct ib_fmr *ibfmr, u64 *page_list,
u32 ps; u32 ps;
int ret; int ret;
if (atomic_read(&fmr->mr.refcount)) i = atomic_read(&fmr->mr.refcount);
if (i > 2)
return -EBUSY; return -EBUSY;
if (list_len > fmr->mr.max_segs) { if (list_len > fmr->mr.max_segs) {
@ -490,16 +510,27 @@ int qib_unmap_fmr(struct list_head *fmr_list)
int qib_dealloc_fmr(struct ib_fmr *ibfmr) int qib_dealloc_fmr(struct ib_fmr *ibfmr)
{ {
struct qib_fmr *fmr = to_ifmr(ibfmr); struct qib_fmr *fmr = to_ifmr(ibfmr);
int ret; int ret = 0;
int i; unsigned long timeout;
ret = qib_free_lkey(to_idev(ibfmr->device), &fmr->mr); qib_free_lkey(&fmr->mr);
if (ret) qib_put_mr(&fmr->mr); /* will set completion if last */
return ret; timeout = wait_for_completion_timeout(&fmr->mr.comp,
5 * HZ);
i = fmr->mr.mapsz; if (!timeout) {
while (i) qib_get_mr(&fmr->mr);
kfree(fmr->mr.map[--i]); ret = -EBUSY;
goto out;
}
deinit_qib_mregion(&fmr->mr);
kfree(fmr); kfree(fmr);
return 0; out:
return ret;
}
void mr_rcu_callback(struct rcu_head *list)
{
struct qib_mregion *mr = container_of(list, struct qib_mregion, list);
complete(&mr->comp);
} }

View File

@ -224,8 +224,9 @@ static void qib_msix_setup(struct qib_devdata *dd, int pos, u32 *msixcnt,
} }
do_intx: do_intx:
if (ret) { if (ret) {
qib_dev_err(dd, "pci_enable_msix %d vectors failed: %d, " qib_dev_err(dd,
"falling back to INTx\n", tabsize, ret); "pci_enable_msix %d vectors failed: %d, falling back to INTx\n",
tabsize, ret);
tabsize = 0; tabsize = 0;
} }
for (i = 0; i < tabsize; i++) for (i = 0; i < tabsize; i++)
@ -251,8 +252,9 @@ static int qib_msi_setup(struct qib_devdata *dd, int pos)
ret = pci_enable_msi(pdev); ret = pci_enable_msi(pdev);
if (ret) if (ret)
qib_dev_err(dd, "pci_enable_msi failed: %d, " qib_dev_err(dd,
"interrupts may not work\n", ret); "pci_enable_msi failed: %d, interrupts may not work\n",
ret);
/* continue even if it fails, we may still be OK... */ /* continue even if it fails, we may still be OK... */
pci_read_config_dword(pdev, pos + PCI_MSI_ADDRESS_LO, pci_read_config_dword(pdev, pos + PCI_MSI_ADDRESS_LO,
@ -358,8 +360,8 @@ int qib_reinit_intr(struct qib_devdata *dd)
pos = pci_find_capability(dd->pcidev, PCI_CAP_ID_MSI); pos = pci_find_capability(dd->pcidev, PCI_CAP_ID_MSI);
if (!pos) { if (!pos) {
qib_dev_err(dd, "Can't find MSI capability, " qib_dev_err(dd,
"can't restore MSI settings\n"); "Can't find MSI capability, can't restore MSI settings\n");
ret = 0; ret = 0;
/* nothing special for MSIx, just MSI */ /* nothing special for MSIx, just MSI */
goto bail; goto bail;
@ -471,8 +473,8 @@ void qib_pcie_reenable(struct qib_devdata *dd, u16 cmd, u8 iline, u8 cline)
pci_write_config_byte(dd->pcidev, PCI_CACHE_LINE_SIZE, cline); pci_write_config_byte(dd->pcidev, PCI_CACHE_LINE_SIZE, cline);
r = pci_enable_device(dd->pcidev); r = pci_enable_device(dd->pcidev);
if (r) if (r)
qib_dev_err(dd, "pci_enable_device failed after " qib_dev_err(dd,
"reset: %d\n", r); "pci_enable_device failed after reset: %d\n", r);
} }
/* code to adjust PCIe capabilities. */ /* code to adjust PCIe capabilities. */
@ -717,15 +719,16 @@ qib_pci_mmio_enabled(struct pci_dev *pdev)
if (words == ~0ULL) if (words == ~0ULL)
ret = PCI_ERS_RESULT_NEED_RESET; ret = PCI_ERS_RESULT_NEED_RESET;
} }
qib_devinfo(pdev, "QIB mmio_enabled function called, " qib_devinfo(pdev,
"read wordscntr %Lx, returning %d\n", words, ret); "QIB mmio_enabled function called, read wordscntr %Lx, returning %d\n",
words, ret);
return ret; return ret;
} }
static pci_ers_result_t static pci_ers_result_t
qib_pci_slot_reset(struct pci_dev *pdev) qib_pci_slot_reset(struct pci_dev *pdev)
{ {
qib_devinfo(pdev, "QIB link_reset function called, ignored\n"); qib_devinfo(pdev, "QIB slot_reset function called, ignored\n");
return PCI_ERS_RESULT_CAN_RECOVER; return PCI_ERS_RESULT_CAN_RECOVER;
} }

View File

@ -1,6 +1,6 @@
/* /*
* Copyright (c) 2006, 2007, 2008, 2009, 2010 QLogic Corporation. * Copyright (c) 2012 Intel Corporation. All rights reserved.
* All rights reserved. * Copyright (c) 2006 - 2012 QLogic Corporation. * All rights reserved.
* Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved. * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
* *
* This software is available to you under a choice of one of two * This software is available to you under a choice of one of two
@ -250,23 +250,33 @@ static void remove_qp(struct qib_ibdev *dev, struct qib_qp *qp)
spin_lock_irqsave(&dev->qpt_lock, flags); spin_lock_irqsave(&dev->qpt_lock, flags);
if (ibp->qp0 == qp) { if (rcu_dereference_protected(ibp->qp0,
lockdep_is_held(&dev->qpt_lock)) == qp) {
atomic_dec(&qp->refcount); atomic_dec(&qp->refcount);
rcu_assign_pointer(ibp->qp0, NULL); rcu_assign_pointer(ibp->qp0, NULL);
} else if (ibp->qp1 == qp) { } else if (rcu_dereference_protected(ibp->qp1,
lockdep_is_held(&dev->qpt_lock)) == qp) {
atomic_dec(&qp->refcount); atomic_dec(&qp->refcount);
rcu_assign_pointer(ibp->qp1, NULL); rcu_assign_pointer(ibp->qp1, NULL);
} else { } else {
struct qib_qp *q, **qpp; struct qib_qp *q;
struct qib_qp __rcu **qpp;
qpp = &dev->qp_table[n]; qpp = &dev->qp_table[n];
for (; (q = *qpp) != NULL; qpp = &q->next) q = rcu_dereference_protected(*qpp,
lockdep_is_held(&dev->qpt_lock));
for (; q; qpp = &q->next) {
if (q == qp) { if (q == qp) {
atomic_dec(&qp->refcount); atomic_dec(&qp->refcount);
rcu_assign_pointer(*qpp, qp->next); *qpp = qp->next;
qp->next = NULL; rcu_assign_pointer(qp->next, NULL);
q = rcu_dereference_protected(*qpp,
lockdep_is_held(&dev->qpt_lock));
break; break;
} }
q = rcu_dereference_protected(*qpp,
lockdep_is_held(&dev->qpt_lock));
}
} }
spin_unlock_irqrestore(&dev->qpt_lock, flags); spin_unlock_irqrestore(&dev->qpt_lock, flags);
@ -302,10 +312,12 @@ unsigned qib_free_all_qps(struct qib_devdata *dd)
spin_lock_irqsave(&dev->qpt_lock, flags); spin_lock_irqsave(&dev->qpt_lock, flags);
for (n = 0; n < dev->qp_table_size; n++) { for (n = 0; n < dev->qp_table_size; n++) {
qp = dev->qp_table[n]; qp = rcu_dereference_protected(dev->qp_table[n],
lockdep_is_held(&dev->qpt_lock));
rcu_assign_pointer(dev->qp_table[n], NULL); rcu_assign_pointer(dev->qp_table[n], NULL);
for (; qp; qp = qp->next) for (; qp; qp = rcu_dereference_protected(qp->next,
lockdep_is_held(&dev->qpt_lock)))
qp_inuse++; qp_inuse++;
} }
spin_unlock_irqrestore(&dev->qpt_lock, flags); spin_unlock_irqrestore(&dev->qpt_lock, flags);
@ -337,7 +349,8 @@ struct qib_qp *qib_lookup_qpn(struct qib_ibport *ibp, u32 qpn)
unsigned n = qpn_hash(dev, qpn); unsigned n = qpn_hash(dev, qpn);
rcu_read_lock(); rcu_read_lock();
for (qp = dev->qp_table[n]; rcu_dereference(qp); qp = qp->next) for (qp = rcu_dereference(dev->qp_table[n]); qp;
qp = rcu_dereference(qp->next))
if (qp->ibqp.qp_num == qpn) if (qp->ibqp.qp_num == qpn)
break; break;
} }
@ -406,18 +419,9 @@ static void clear_mr_refs(struct qib_qp *qp, int clr_sends)
unsigned n; unsigned n;
if (test_and_clear_bit(QIB_R_REWIND_SGE, &qp->r_aflags)) if (test_and_clear_bit(QIB_R_REWIND_SGE, &qp->r_aflags))
while (qp->s_rdma_read_sge.num_sge) { qib_put_ss(&qp->s_rdma_read_sge);
atomic_dec(&qp->s_rdma_read_sge.sge.mr->refcount);
if (--qp->s_rdma_read_sge.num_sge)
qp->s_rdma_read_sge.sge =
*qp->s_rdma_read_sge.sg_list++;
}
while (qp->r_sge.num_sge) { qib_put_ss(&qp->r_sge);
atomic_dec(&qp->r_sge.sge.mr->refcount);
if (--qp->r_sge.num_sge)
qp->r_sge.sge = *qp->r_sge.sg_list++;
}
if (clr_sends) { if (clr_sends) {
while (qp->s_last != qp->s_head) { while (qp->s_last != qp->s_head) {
@ -427,7 +431,7 @@ static void clear_mr_refs(struct qib_qp *qp, int clr_sends)
for (i = 0; i < wqe->wr.num_sge; i++) { for (i = 0; i < wqe->wr.num_sge; i++) {
struct qib_sge *sge = &wqe->sg_list[i]; struct qib_sge *sge = &wqe->sg_list[i];
atomic_dec(&sge->mr->refcount); qib_put_mr(sge->mr);
} }
if (qp->ibqp.qp_type == IB_QPT_UD || if (qp->ibqp.qp_type == IB_QPT_UD ||
qp->ibqp.qp_type == IB_QPT_SMI || qp->ibqp.qp_type == IB_QPT_SMI ||
@ -437,7 +441,7 @@ static void clear_mr_refs(struct qib_qp *qp, int clr_sends)
qp->s_last = 0; qp->s_last = 0;
} }
if (qp->s_rdma_mr) { if (qp->s_rdma_mr) {
atomic_dec(&qp->s_rdma_mr->refcount); qib_put_mr(qp->s_rdma_mr);
qp->s_rdma_mr = NULL; qp->s_rdma_mr = NULL;
} }
} }
@ -450,7 +454,7 @@ static void clear_mr_refs(struct qib_qp *qp, int clr_sends)
if (e->opcode == IB_OPCODE_RC_RDMA_READ_REQUEST && if (e->opcode == IB_OPCODE_RC_RDMA_READ_REQUEST &&
e->rdma_sge.mr) { e->rdma_sge.mr) {
atomic_dec(&e->rdma_sge.mr->refcount); qib_put_mr(e->rdma_sge.mr);
e->rdma_sge.mr = NULL; e->rdma_sge.mr = NULL;
} }
} }
@ -495,7 +499,7 @@ int qib_error_qp(struct qib_qp *qp, enum ib_wc_status err)
if (!(qp->s_flags & QIB_S_BUSY)) { if (!(qp->s_flags & QIB_S_BUSY)) {
qp->s_hdrwords = 0; qp->s_hdrwords = 0;
if (qp->s_rdma_mr) { if (qp->s_rdma_mr) {
atomic_dec(&qp->s_rdma_mr->refcount); qib_put_mr(qp->s_rdma_mr);
qp->s_rdma_mr = NULL; qp->s_rdma_mr = NULL;
} }
if (qp->s_tx) { if (qp->s_tx) {

View File

@ -95,7 +95,7 @@ static int qib_make_rc_ack(struct qib_ibdev *dev, struct qib_qp *qp,
case OP(RDMA_READ_RESPONSE_ONLY): case OP(RDMA_READ_RESPONSE_ONLY):
e = &qp->s_ack_queue[qp->s_tail_ack_queue]; e = &qp->s_ack_queue[qp->s_tail_ack_queue];
if (e->rdma_sge.mr) { if (e->rdma_sge.mr) {
atomic_dec(&e->rdma_sge.mr->refcount); qib_put_mr(e->rdma_sge.mr);
e->rdma_sge.mr = NULL; e->rdma_sge.mr = NULL;
} }
/* FALLTHROUGH */ /* FALLTHROUGH */
@ -133,7 +133,7 @@ static int qib_make_rc_ack(struct qib_ibdev *dev, struct qib_qp *qp,
/* Copy SGE state in case we need to resend */ /* Copy SGE state in case we need to resend */
qp->s_rdma_mr = e->rdma_sge.mr; qp->s_rdma_mr = e->rdma_sge.mr;
if (qp->s_rdma_mr) if (qp->s_rdma_mr)
atomic_inc(&qp->s_rdma_mr->refcount); qib_get_mr(qp->s_rdma_mr);
qp->s_ack_rdma_sge.sge = e->rdma_sge; qp->s_ack_rdma_sge.sge = e->rdma_sge;
qp->s_ack_rdma_sge.num_sge = 1; qp->s_ack_rdma_sge.num_sge = 1;
qp->s_cur_sge = &qp->s_ack_rdma_sge; qp->s_cur_sge = &qp->s_ack_rdma_sge;
@ -172,7 +172,7 @@ static int qib_make_rc_ack(struct qib_ibdev *dev, struct qib_qp *qp,
qp->s_cur_sge = &qp->s_ack_rdma_sge; qp->s_cur_sge = &qp->s_ack_rdma_sge;
qp->s_rdma_mr = qp->s_ack_rdma_sge.sge.mr; qp->s_rdma_mr = qp->s_ack_rdma_sge.sge.mr;
if (qp->s_rdma_mr) if (qp->s_rdma_mr)
atomic_inc(&qp->s_rdma_mr->refcount); qib_get_mr(qp->s_rdma_mr);
len = qp->s_ack_rdma_sge.sge.sge_length; len = qp->s_ack_rdma_sge.sge.sge_length;
if (len > pmtu) if (len > pmtu)
len = pmtu; len = pmtu;
@ -1012,7 +1012,7 @@ void qib_rc_send_complete(struct qib_qp *qp, struct qib_ib_header *hdr)
for (i = 0; i < wqe->wr.num_sge; i++) { for (i = 0; i < wqe->wr.num_sge; i++) {
struct qib_sge *sge = &wqe->sg_list[i]; struct qib_sge *sge = &wqe->sg_list[i];
atomic_dec(&sge->mr->refcount); qib_put_mr(sge->mr);
} }
/* Post a send completion queue entry if requested. */ /* Post a send completion queue entry if requested. */
if (!(qp->s_flags & QIB_S_SIGNAL_REQ_WR) || if (!(qp->s_flags & QIB_S_SIGNAL_REQ_WR) ||
@ -1068,7 +1068,7 @@ static struct qib_swqe *do_rc_completion(struct qib_qp *qp,
for (i = 0; i < wqe->wr.num_sge; i++) { for (i = 0; i < wqe->wr.num_sge; i++) {
struct qib_sge *sge = &wqe->sg_list[i]; struct qib_sge *sge = &wqe->sg_list[i];
atomic_dec(&sge->mr->refcount); qib_put_mr(sge->mr);
} }
/* Post a send completion queue entry if requested. */ /* Post a send completion queue entry if requested. */
if (!(qp->s_flags & QIB_S_SIGNAL_REQ_WR) || if (!(qp->s_flags & QIB_S_SIGNAL_REQ_WR) ||
@ -1730,7 +1730,7 @@ static int qib_rc_rcv_error(struct qib_other_headers *ohdr,
if (unlikely(offset + len != e->rdma_sge.sge_length)) if (unlikely(offset + len != e->rdma_sge.sge_length))
goto unlock_done; goto unlock_done;
if (e->rdma_sge.mr) { if (e->rdma_sge.mr) {
atomic_dec(&e->rdma_sge.mr->refcount); qib_put_mr(e->rdma_sge.mr);
e->rdma_sge.mr = NULL; e->rdma_sge.mr = NULL;
} }
if (len != 0) { if (len != 0) {
@ -2024,11 +2024,7 @@ send_last:
if (unlikely(wc.byte_len > qp->r_len)) if (unlikely(wc.byte_len > qp->r_len))
goto nack_inv; goto nack_inv;
qib_copy_sge(&qp->r_sge, data, tlen, 1); qib_copy_sge(&qp->r_sge, data, tlen, 1);
while (qp->r_sge.num_sge) { qib_put_ss(&qp->r_sge);
atomic_dec(&qp->r_sge.sge.mr->refcount);
if (--qp->r_sge.num_sge)
qp->r_sge.sge = *qp->r_sge.sg_list++;
}
qp->r_msn++; qp->r_msn++;
if (!test_and_clear_bit(QIB_R_WRID_VALID, &qp->r_aflags)) if (!test_and_clear_bit(QIB_R_WRID_VALID, &qp->r_aflags))
break; break;
@ -2116,7 +2112,7 @@ send_last:
} }
e = &qp->s_ack_queue[qp->r_head_ack_queue]; e = &qp->s_ack_queue[qp->r_head_ack_queue];
if (e->opcode == OP(RDMA_READ_REQUEST) && e->rdma_sge.mr) { if (e->opcode == OP(RDMA_READ_REQUEST) && e->rdma_sge.mr) {
atomic_dec(&e->rdma_sge.mr->refcount); qib_put_mr(e->rdma_sge.mr);
e->rdma_sge.mr = NULL; e->rdma_sge.mr = NULL;
} }
reth = &ohdr->u.rc.reth; reth = &ohdr->u.rc.reth;
@ -2188,7 +2184,7 @@ send_last:
} }
e = &qp->s_ack_queue[qp->r_head_ack_queue]; e = &qp->s_ack_queue[qp->r_head_ack_queue];
if (e->opcode == OP(RDMA_READ_REQUEST) && e->rdma_sge.mr) { if (e->opcode == OP(RDMA_READ_REQUEST) && e->rdma_sge.mr) {
atomic_dec(&e->rdma_sge.mr->refcount); qib_put_mr(e->rdma_sge.mr);
e->rdma_sge.mr = NULL; e->rdma_sge.mr = NULL;
} }
ateth = &ohdr->u.atomic_eth; ateth = &ohdr->u.atomic_eth;
@ -2210,7 +2206,7 @@ send_last:
(u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr, (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
be64_to_cpu(ateth->compare_data), be64_to_cpu(ateth->compare_data),
sdata); sdata);
atomic_dec(&qp->r_sge.sge.mr->refcount); qib_put_mr(qp->r_sge.sge.mr);
qp->r_sge.num_sge = 0; qp->r_sge.num_sge = 0;
e->opcode = opcode; e->opcode = opcode;
e->sent = 0; e->sent = 0;

View File

@ -110,7 +110,7 @@ bad_lkey:
while (j) { while (j) {
struct qib_sge *sge = --j ? &ss->sg_list[j - 1] : &ss->sge; struct qib_sge *sge = --j ? &ss->sg_list[j - 1] : &ss->sge;
atomic_dec(&sge->mr->refcount); qib_put_mr(sge->mr);
} }
ss->num_sge = 0; ss->num_sge = 0;
memset(&wc, 0, sizeof(wc)); memset(&wc, 0, sizeof(wc));
@ -501,7 +501,7 @@ again:
(u64) atomic64_add_return(sdata, maddr) - sdata : (u64) atomic64_add_return(sdata, maddr) - sdata :
(u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr, (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
sdata, wqe->wr.wr.atomic.swap); sdata, wqe->wr.wr.atomic.swap);
atomic_dec(&qp->r_sge.sge.mr->refcount); qib_put_mr(qp->r_sge.sge.mr);
qp->r_sge.num_sge = 0; qp->r_sge.num_sge = 0;
goto send_comp; goto send_comp;
@ -525,7 +525,7 @@ again:
sge->sge_length -= len; sge->sge_length -= len;
if (sge->sge_length == 0) { if (sge->sge_length == 0) {
if (!release) if (!release)
atomic_dec(&sge->mr->refcount); qib_put_mr(sge->mr);
if (--sqp->s_sge.num_sge) if (--sqp->s_sge.num_sge)
*sge = *sqp->s_sge.sg_list++; *sge = *sqp->s_sge.sg_list++;
} else if (sge->length == 0 && sge->mr->lkey) { } else if (sge->length == 0 && sge->mr->lkey) {
@ -542,11 +542,7 @@ again:
sqp->s_len -= len; sqp->s_len -= len;
} }
if (release) if (release)
while (qp->r_sge.num_sge) { qib_put_ss(&qp->r_sge);
atomic_dec(&qp->r_sge.sge.mr->refcount);
if (--qp->r_sge.num_sge)
qp->r_sge.sge = *qp->r_sge.sg_list++;
}
if (!test_and_clear_bit(QIB_R_WRID_VALID, &qp->r_aflags)) if (!test_and_clear_bit(QIB_R_WRID_VALID, &qp->r_aflags))
goto send_comp; goto send_comp;
@ -782,7 +778,7 @@ void qib_send_complete(struct qib_qp *qp, struct qib_swqe *wqe,
for (i = 0; i < wqe->wr.num_sge; i++) { for (i = 0; i < wqe->wr.num_sge; i++) {
struct qib_sge *sge = &wqe->sg_list[i]; struct qib_sge *sge = &wqe->sg_list[i];
atomic_dec(&sge->mr->refcount); qib_put_mr(sge->mr);
} }
if (qp->ibqp.qp_type == IB_QPT_UD || if (qp->ibqp.qp_type == IB_QPT_UD ||
qp->ibqp.qp_type == IB_QPT_SMI || qp->ibqp.qp_type == IB_QPT_SMI ||

View File

@ -1,6 +1,6 @@
/* /*
* Copyright (c) 2006, 2007, 2008, 2009, 2010 QLogic Corporation. * Copyright (c) 2012 Intel Corporation. All rights reserved.
* All rights reserved. * Copyright (c) 2006 - 2012 QLogic Corporation. All rights reserved.
* Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved. * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
* *
* This software is available to you under a choice of one of two * This software is available to you under a choice of one of two
@ -342,15 +342,17 @@ static void qib_sd_trimdone_monitor(struct qib_devdata *dd,
ret = qib_sd7220_reg_mod(dd, IB_7220_SERDES, ret = qib_sd7220_reg_mod(dd, IB_7220_SERDES,
IB_CTRL2(chn), 0, 0); IB_CTRL2(chn), 0, 0);
if (ret < 0) if (ret < 0)
qib_dev_err(dd, "Failed checking TRIMDONE, chn %d" qib_dev_err(dd,
" (%s)\n", chn, where); "Failed checking TRIMDONE, chn %d (%s)\n",
chn, where);
if (!(ret & 0x10)) { if (!(ret & 0x10)) {
int probe; int probe;
baduns |= (1 << chn); baduns |= (1 << chn);
qib_dev_err(dd, "TRIMDONE cleared on chn %d (%02X)." qib_dev_err(dd,
" (%s)\n", chn, ret, where); "TRIMDONE cleared on chn %d (%02X). (%s)\n",
chn, ret, where);
probe = qib_sd7220_reg_mod(dd, IB_7220_SERDES, probe = qib_sd7220_reg_mod(dd, IB_7220_SERDES,
IB_PGUDP(0), 0, 0); IB_PGUDP(0), 0, 0);
qib_dev_err(dd, "probe is %d (%02X)\n", qib_dev_err(dd, "probe is %d (%02X)\n",
@ -375,8 +377,8 @@ static void qib_sd_trimdone_monitor(struct qib_devdata *dd,
ret = qib_sd7220_reg_mod(dd, IB_7220_SERDES, ret = qib_sd7220_reg_mod(dd, IB_7220_SERDES,
IB_CTRL2(chn), 0x10, 0x10); IB_CTRL2(chn), 0x10, 0x10);
if (ret < 0) if (ret < 0)
qib_dev_err(dd, "Failed re-setting " qib_dev_err(dd,
"TRIMDONE, chn %d (%s)\n", "Failed re-setting TRIMDONE, chn %d (%s)\n",
chn, where); chn, where);
} }
} }
@ -1144,10 +1146,10 @@ static int ibsd_mod_allchnls(struct qib_devdata *dd, int loc, int val,
if (ret < 0) { if (ret < 0) {
int sloc = loc >> EPB_ADDR_SHF; int sloc = loc >> EPB_ADDR_SHF;
qib_dev_err(dd, "pre-read failed: elt %d," qib_dev_err(dd,
" addr 0x%X, chnl %d\n", "pre-read failed: elt %d, addr 0x%X, chnl %d\n",
(sloc & 0xF), (sloc & 0xF),
(sloc >> 9) & 0x3f, chnl); (sloc >> 9) & 0x3f, chnl);
return ret; return ret;
} }
val = (ret & ~mask) | (val & mask); val = (ret & ~mask) | (val & mask);
@ -1157,9 +1159,9 @@ static int ibsd_mod_allchnls(struct qib_devdata *dd, int loc, int val,
if (ret < 0) { if (ret < 0) {
int sloc = loc >> EPB_ADDR_SHF; int sloc = loc >> EPB_ADDR_SHF;
qib_dev_err(dd, "Global WR failed: elt %d," qib_dev_err(dd,
" addr 0x%X, val %02X\n", "Global WR failed: elt %d, addr 0x%X, val %02X\n",
(sloc & 0xF), (sloc >> 9) & 0x3f, val); (sloc & 0xF), (sloc >> 9) & 0x3f, val);
} }
return ret; return ret;
} }
@ -1173,11 +1175,10 @@ static int ibsd_mod_allchnls(struct qib_devdata *dd, int loc, int val,
if (ret < 0) { if (ret < 0) {
int sloc = loc >> EPB_ADDR_SHF; int sloc = loc >> EPB_ADDR_SHF;
qib_dev_err(dd, "Write failed: elt %d," qib_dev_err(dd,
" addr 0x%X, chnl %d, val 0x%02X," "Write failed: elt %d, addr 0x%X, chnl %d, val 0x%02X, mask 0x%02X\n",
" mask 0x%02X\n", (sloc & 0xF), (sloc >> 9) & 0x3f, chnl,
(sloc & 0xF), (sloc >> 9) & 0x3f, chnl, val & 0xFF, mask & 0xFF);
val & 0xFF, mask & 0xFF);
break; break;
} }
} }

View File

@ -1,5 +1,6 @@
/* /*
* Copyright (c) 2007, 2008, 2009, 2010 QLogic Corporation. All rights reserved. * Copyright (c) 2012 Intel Corporation. All rights reserved.
* Copyright (c) 2007 - 2012 QLogic Corporation. All rights reserved.
* *
* This software is available to you under a choice of one of two * This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU * licenses. You may choose to be licensed under the terms of the GNU
@ -276,8 +277,8 @@ static int alloc_sdma(struct qib_pportdata *ppd)
GFP_KERNEL); GFP_KERNEL);
if (!ppd->sdma_descq) { if (!ppd->sdma_descq) {
qib_dev_err(ppd->dd, "failed to allocate SendDMA descriptor " qib_dev_err(ppd->dd,
"FIFO memory\n"); "failed to allocate SendDMA descriptor FIFO memory\n");
goto bail; goto bail;
} }
@ -285,8 +286,8 @@ static int alloc_sdma(struct qib_pportdata *ppd)
ppd->sdma_head_dma = dma_alloc_coherent(&ppd->dd->pcidev->dev, ppd->sdma_head_dma = dma_alloc_coherent(&ppd->dd->pcidev->dev,
PAGE_SIZE, &ppd->sdma_head_phys, GFP_KERNEL); PAGE_SIZE, &ppd->sdma_head_phys, GFP_KERNEL);
if (!ppd->sdma_head_dma) { if (!ppd->sdma_head_dma) {
qib_dev_err(ppd->dd, "failed to allocate SendDMA " qib_dev_err(ppd->dd,
"head memory\n"); "failed to allocate SendDMA head memory\n");
goto cleanup_descq; goto cleanup_descq;
} }
ppd->sdma_head_dma[0] = 0; ppd->sdma_head_dma[0] = 0;

View File

@ -1,5 +1,6 @@
/* /*
* Copyright (c) 2006, 2007, 2008, 2009 QLogic Corporation. All rights reserved. * Copyright (c) 2012 Intel Corporation. All rights reserved.
* Copyright (c) 2006 - 2012 QLogic Corporation. All rights reserved.
* Copyright (c) 2006 PathScale, Inc. All rights reserved. * Copyright (c) 2006 PathScale, Inc. All rights reserved.
* *
* This software is available to you under a choice of one of two * This software is available to you under a choice of one of two
@ -33,41 +34,7 @@
#include <linux/ctype.h> #include <linux/ctype.h>
#include "qib.h" #include "qib.h"
#include "qib_mad.h"
/**
* qib_parse_ushort - parse an unsigned short value in an arbitrary base
* @str: the string containing the number
* @valp: where to put the result
*
* Returns the number of bytes consumed, or negative value on error.
*/
static int qib_parse_ushort(const char *str, unsigned short *valp)
{
unsigned long val;
char *end;
int ret;
if (!isdigit(str[0])) {
ret = -EINVAL;
goto bail;
}
val = simple_strtoul(str, &end, 0);
if (val > 0xffff) {
ret = -EINVAL;
goto bail;
}
*valp = val;
ret = end + 1 - str;
if (ret == 0)
ret = -EINVAL;
bail:
return ret;
}
/* start of per-port functions */ /* start of per-port functions */
/* /*
@ -90,7 +57,11 @@ static ssize_t store_hrtbt_enb(struct qib_pportdata *ppd, const char *buf,
int ret; int ret;
u16 val; u16 val;
ret = qib_parse_ushort(buf, &val); ret = kstrtou16(buf, 0, &val);
if (ret) {
qib_dev_err(dd, "attempt to set invalid Heartbeat enable\n");
return ret;
}
/* /*
* Set the "intentional" heartbeat enable per either of * Set the "intentional" heartbeat enable per either of
@ -99,10 +70,7 @@ static ssize_t store_hrtbt_enb(struct qib_pportdata *ppd, const char *buf,
* because entering loopback mode overrides it and automatically * because entering loopback mode overrides it and automatically
* disables heartbeat. * disables heartbeat.
*/ */
if (ret >= 0) ret = dd->f_set_ib_cfg(ppd, QIB_IB_CFG_HRTBT, val);
ret = dd->f_set_ib_cfg(ppd, QIB_IB_CFG_HRTBT, val);
if (ret < 0)
qib_dev_err(dd, "attempt to set invalid Heartbeat enable\n");
return ret < 0 ? ret : count; return ret < 0 ? ret : count;
} }
@ -126,12 +94,14 @@ static ssize_t store_led_override(struct qib_pportdata *ppd, const char *buf,
int ret; int ret;
u16 val; u16 val;
ret = qib_parse_ushort(buf, &val); ret = kstrtou16(buf, 0, &val);
if (ret > 0) if (ret) {
qib_set_led_override(ppd, val);
else
qib_dev_err(dd, "attempt to set invalid LED override\n"); qib_dev_err(dd, "attempt to set invalid LED override\n");
return ret < 0 ? ret : count; return ret;
}
qib_set_led_override(ppd, val);
return count;
} }
static ssize_t show_status(struct qib_pportdata *ppd, char *buf) static ssize_t show_status(struct qib_pportdata *ppd, char *buf)
@ -231,6 +201,98 @@ static struct attribute *port_default_attributes[] = {
NULL NULL
}; };
/*
* Start of per-port congestion control structures and support code
*/
/*
* Congestion control table size followed by table entries
*/
static ssize_t read_cc_table_bin(struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr,
char *buf, loff_t pos, size_t count)
{
int ret;
struct qib_pportdata *ppd =
container_of(kobj, struct qib_pportdata, pport_cc_kobj);
if (!qib_cc_table_size || !ppd->ccti_entries_shadow)
return -EINVAL;
ret = ppd->total_cct_entry * sizeof(struct ib_cc_table_entry_shadow)
+ sizeof(__be16);
if (pos > ret)
return -EINVAL;
if (count > ret - pos)
count = ret - pos;
if (!count)
return count;
spin_lock(&ppd->cc_shadow_lock);
memcpy(buf, ppd->ccti_entries_shadow, count);
spin_unlock(&ppd->cc_shadow_lock);
return count;
}
static void qib_port_release(struct kobject *kobj)
{
/* nothing to do since memory is freed by qib_free_devdata() */
}
static struct kobj_type qib_port_cc_ktype = {
.release = qib_port_release,
};
static struct bin_attribute cc_table_bin_attr = {
.attr = {.name = "cc_table_bin", .mode = 0444},
.read = read_cc_table_bin,
.size = PAGE_SIZE,
};
/*
* Congestion settings: port control, control map and an array of 16
* entries for the congestion entries - increase, timer, event log
* trigger threshold and the minimum injection rate delay.
*/
static ssize_t read_cc_setting_bin(struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr,
char *buf, loff_t pos, size_t count)
{
int ret;
struct qib_pportdata *ppd =
container_of(kobj, struct qib_pportdata, pport_cc_kobj);
if (!qib_cc_table_size || !ppd->congestion_entries_shadow)
return -EINVAL;
ret = sizeof(struct ib_cc_congestion_setting_attr_shadow);
if (pos > ret)
return -EINVAL;
if (count > ret - pos)
count = ret - pos;
if (!count)
return count;
spin_lock(&ppd->cc_shadow_lock);
memcpy(buf, ppd->congestion_entries_shadow, count);
spin_unlock(&ppd->cc_shadow_lock);
return count;
}
static struct bin_attribute cc_setting_bin_attr = {
.attr = {.name = "cc_settings_bin", .mode = 0444},
.read = read_cc_setting_bin,
.size = PAGE_SIZE,
};
static ssize_t qib_portattr_show(struct kobject *kobj, static ssize_t qib_portattr_show(struct kobject *kobj,
struct attribute *attr, char *buf) struct attribute *attr, char *buf)
{ {
@ -253,10 +315,6 @@ static ssize_t qib_portattr_store(struct kobject *kobj,
return pattr->store(ppd, buf, len); return pattr->store(ppd, buf, len);
} }
static void qib_port_release(struct kobject *kobj)
{
/* nothing to do since memory is freed by qib_free_devdata() */
}
static const struct sysfs_ops qib_port_ops = { static const struct sysfs_ops qib_port_ops = {
.show = qib_portattr_show, .show = qib_portattr_show,
@ -411,12 +469,12 @@ static ssize_t diagc_attr_store(struct kobject *kobj, struct attribute *attr,
struct qib_pportdata *ppd = struct qib_pportdata *ppd =
container_of(kobj, struct qib_pportdata, diagc_kobj); container_of(kobj, struct qib_pportdata, diagc_kobj);
struct qib_ibport *qibp = &ppd->ibport_data; struct qib_ibport *qibp = &ppd->ibport_data;
char *endp; u32 val;
long val = simple_strtol(buf, &endp, 0); int ret;
if (val < 0 || endp == buf)
return -EINVAL;
ret = kstrtou32(buf, 0, &val);
if (ret)
return ret;
*(u32 *)((char *) qibp + dattr->counter) = val; *(u32 *)((char *) qibp + dattr->counter) = val;
return size; return size;
} }
@ -649,8 +707,9 @@ int qib_create_port_files(struct ib_device *ibdev, u8 port_num,
int ret; int ret;
if (!port_num || port_num > dd->num_pports) { if (!port_num || port_num > dd->num_pports) {
qib_dev_err(dd, "Skipping infiniband class with " qib_dev_err(dd,
"invalid port %u\n", port_num); "Skipping infiniband class with invalid port %u\n",
port_num);
ret = -ENODEV; ret = -ENODEV;
goto bail; goto bail;
} }
@ -659,8 +718,9 @@ int qib_create_port_files(struct ib_device *ibdev, u8 port_num,
ret = kobject_init_and_add(&ppd->pport_kobj, &qib_port_ktype, kobj, ret = kobject_init_and_add(&ppd->pport_kobj, &qib_port_ktype, kobj,
"linkcontrol"); "linkcontrol");
if (ret) { if (ret) {
qib_dev_err(dd, "Skipping linkcontrol sysfs info, " qib_dev_err(dd,
"(err %d) port %u\n", ret, port_num); "Skipping linkcontrol sysfs info, (err %d) port %u\n",
ret, port_num);
goto bail; goto bail;
} }
kobject_uevent(&ppd->pport_kobj, KOBJ_ADD); kobject_uevent(&ppd->pport_kobj, KOBJ_ADD);
@ -668,26 +728,70 @@ int qib_create_port_files(struct ib_device *ibdev, u8 port_num,
ret = kobject_init_and_add(&ppd->sl2vl_kobj, &qib_sl2vl_ktype, kobj, ret = kobject_init_and_add(&ppd->sl2vl_kobj, &qib_sl2vl_ktype, kobj,
"sl2vl"); "sl2vl");
if (ret) { if (ret) {
qib_dev_err(dd, "Skipping sl2vl sysfs info, " qib_dev_err(dd,
"(err %d) port %u\n", ret, port_num); "Skipping sl2vl sysfs info, (err %d) port %u\n",
goto bail_sl; ret, port_num);
goto bail_link;
} }
kobject_uevent(&ppd->sl2vl_kobj, KOBJ_ADD); kobject_uevent(&ppd->sl2vl_kobj, KOBJ_ADD);
ret = kobject_init_and_add(&ppd->diagc_kobj, &qib_diagc_ktype, kobj, ret = kobject_init_and_add(&ppd->diagc_kobj, &qib_diagc_ktype, kobj,
"diag_counters"); "diag_counters");
if (ret) { if (ret) {
qib_dev_err(dd, "Skipping diag_counters sysfs info, " qib_dev_err(dd,
"(err %d) port %u\n", ret, port_num); "Skipping diag_counters sysfs info, (err %d) port %u\n",
goto bail_diagc; ret, port_num);
goto bail_sl;
} }
kobject_uevent(&ppd->diagc_kobj, KOBJ_ADD); kobject_uevent(&ppd->diagc_kobj, KOBJ_ADD);
if (!qib_cc_table_size || !ppd->congestion_entries_shadow)
return 0;
ret = kobject_init_and_add(&ppd->pport_cc_kobj, &qib_port_cc_ktype,
kobj, "CCMgtA");
if (ret) {
qib_dev_err(dd,
"Skipping Congestion Control sysfs info, (err %d) port %u\n",
ret, port_num);
goto bail_diagc;
}
kobject_uevent(&ppd->pport_cc_kobj, KOBJ_ADD);
ret = sysfs_create_bin_file(&ppd->pport_cc_kobj,
&cc_setting_bin_attr);
if (ret) {
qib_dev_err(dd,
"Skipping Congestion Control setting sysfs info, (err %d) port %u\n",
ret, port_num);
goto bail_cc;
}
ret = sysfs_create_bin_file(&ppd->pport_cc_kobj,
&cc_table_bin_attr);
if (ret) {
qib_dev_err(dd,
"Skipping Congestion Control table sysfs info, (err %d) port %u\n",
ret, port_num);
goto bail_cc_entry_bin;
}
qib_devinfo(dd->pcidev,
"IB%u: Congestion Control Agent enabled for port %d\n",
dd->unit, port_num);
return 0; return 0;
bail_cc_entry_bin:
sysfs_remove_bin_file(&ppd->pport_cc_kobj, &cc_setting_bin_attr);
bail_cc:
kobject_put(&ppd->pport_cc_kobj);
bail_diagc: bail_diagc:
kobject_put(&ppd->sl2vl_kobj); kobject_put(&ppd->diagc_kobj);
bail_sl: bail_sl:
kobject_put(&ppd->sl2vl_kobj);
bail_link:
kobject_put(&ppd->pport_kobj); kobject_put(&ppd->pport_kobj);
bail: bail:
return ret; return ret;
@ -720,7 +824,15 @@ void qib_verbs_unregister_sysfs(struct qib_devdata *dd)
for (i = 0; i < dd->num_pports; i++) { for (i = 0; i < dd->num_pports; i++) {
ppd = &dd->pport[i]; ppd = &dd->pport[i];
kobject_put(&ppd->pport_kobj); if (qib_cc_table_size &&
ppd->congestion_entries_shadow) {
sysfs_remove_bin_file(&ppd->pport_cc_kobj,
&cc_setting_bin_attr);
sysfs_remove_bin_file(&ppd->pport_cc_kobj,
&cc_table_bin_attr);
kobject_put(&ppd->pport_cc_kobj);
}
kobject_put(&ppd->sl2vl_kobj); kobject_put(&ppd->sl2vl_kobj);
kobject_put(&ppd->pport_kobj);
} }
} }

View File

@ -1,5 +1,6 @@
/* /*
* Copyright (c) 2006, 2007, 2008, 2009 QLogic Corporation. All rights reserved. * Copyright (c) 2012 Intel Corporation. All rights reserved.
* Copyright (c) 2006 - 2012 QLogic Corporation. All rights reserved.
* Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved. * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
* *
* This software is available to you under a choice of one of two * This software is available to you under a choice of one of two
@ -449,8 +450,9 @@ int qib_twsi_blk_wr(struct qib_devdata *dd, int dev, int addr,
goto failed_write; goto failed_write;
ret = qib_twsi_wr(dd, addr, 0); ret = qib_twsi_wr(dd, addr, 0);
if (ret) { if (ret) {
qib_dev_err(dd, "Failed to write interface" qib_dev_err(dd,
" write addr %02X\n", addr); "Failed to write interface write addr %02X\n",
addr);
goto failed_write; goto failed_write;
} }
} }

View File

@ -281,11 +281,7 @@ inv:
set_bit(QIB_R_REWIND_SGE, &qp->r_aflags); set_bit(QIB_R_REWIND_SGE, &qp->r_aflags);
qp->r_sge.num_sge = 0; qp->r_sge.num_sge = 0;
} else } else
while (qp->r_sge.num_sge) { qib_put_ss(&qp->r_sge);
atomic_dec(&qp->r_sge.sge.mr->refcount);
if (--qp->r_sge.num_sge)
qp->r_sge.sge = *qp->r_sge.sg_list++;
}
qp->r_state = OP(SEND_LAST); qp->r_state = OP(SEND_LAST);
switch (opcode) { switch (opcode) {
case OP(SEND_FIRST): case OP(SEND_FIRST):
@ -403,14 +399,9 @@ send_last:
if (unlikely(wc.byte_len > qp->r_len)) if (unlikely(wc.byte_len > qp->r_len))
goto rewind; goto rewind;
wc.opcode = IB_WC_RECV; wc.opcode = IB_WC_RECV;
last_imm:
qib_copy_sge(&qp->r_sge, data, tlen, 0); qib_copy_sge(&qp->r_sge, data, tlen, 0);
while (qp->s_rdma_read_sge.num_sge) { qib_put_ss(&qp->s_rdma_read_sge);
atomic_dec(&qp->s_rdma_read_sge.sge.mr->refcount); last_imm:
if (--qp->s_rdma_read_sge.num_sge)
qp->s_rdma_read_sge.sge =
*qp->s_rdma_read_sge.sg_list++;
}
wc.wr_id = qp->r_wr_id; wc.wr_id = qp->r_wr_id;
wc.status = IB_WC_SUCCESS; wc.status = IB_WC_SUCCESS;
wc.qp = &qp->ibqp; wc.qp = &qp->ibqp;
@ -493,13 +484,7 @@ rdma_last_imm:
if (unlikely(tlen + qp->r_rcv_len != qp->r_len)) if (unlikely(tlen + qp->r_rcv_len != qp->r_len))
goto drop; goto drop;
if (test_and_clear_bit(QIB_R_REWIND_SGE, &qp->r_aflags)) if (test_and_clear_bit(QIB_R_REWIND_SGE, &qp->r_aflags))
while (qp->s_rdma_read_sge.num_sge) { qib_put_ss(&qp->s_rdma_read_sge);
atomic_dec(&qp->s_rdma_read_sge.sge.mr->
refcount);
if (--qp->s_rdma_read_sge.num_sge)
qp->s_rdma_read_sge.sge =
*qp->s_rdma_read_sge.sg_list++;
}
else { else {
ret = qib_get_rwqe(qp, 1); ret = qib_get_rwqe(qp, 1);
if (ret < 0) if (ret < 0)
@ -509,6 +494,8 @@ rdma_last_imm:
} }
wc.byte_len = qp->r_len; wc.byte_len = qp->r_len;
wc.opcode = IB_WC_RECV_RDMA_WITH_IMM; wc.opcode = IB_WC_RECV_RDMA_WITH_IMM;
qib_copy_sge(&qp->r_sge, data, tlen, 1);
qib_put_ss(&qp->r_sge);
goto last_imm; goto last_imm;
case OP(RDMA_WRITE_LAST): case OP(RDMA_WRITE_LAST):
@ -524,11 +511,7 @@ rdma_last:
if (unlikely(tlen + qp->r_rcv_len != qp->r_len)) if (unlikely(tlen + qp->r_rcv_len != qp->r_len))
goto drop; goto drop;
qib_copy_sge(&qp->r_sge, data, tlen, 1); qib_copy_sge(&qp->r_sge, data, tlen, 1);
while (qp->r_sge.num_sge) { qib_put_ss(&qp->r_sge);
atomic_dec(&qp->r_sge.sge.mr->refcount);
if (--qp->r_sge.num_sge)
qp->r_sge.sge = *qp->r_sge.sg_list++;
}
break; break;
default: default:

View File

@ -194,11 +194,7 @@ static void qib_ud_loopback(struct qib_qp *sqp, struct qib_swqe *swqe)
} }
length -= len; length -= len;
} }
while (qp->r_sge.num_sge) { qib_put_ss(&qp->r_sge);
atomic_dec(&qp->r_sge.sge.mr->refcount);
if (--qp->r_sge.num_sge)
qp->r_sge.sge = *qp->r_sge.sg_list++;
}
if (!test_and_clear_bit(QIB_R_WRID_VALID, &qp->r_aflags)) if (!test_and_clear_bit(QIB_R_WRID_VALID, &qp->r_aflags))
goto bail_unlock; goto bail_unlock;
wc.wr_id = qp->r_wr_id; wc.wr_id = qp->r_wr_id;
@ -556,11 +552,7 @@ void qib_ud_rcv(struct qib_ibport *ibp, struct qib_ib_header *hdr,
} else } else
qib_skip_sge(&qp->r_sge, sizeof(struct ib_grh), 1); qib_skip_sge(&qp->r_sge, sizeof(struct ib_grh), 1);
qib_copy_sge(&qp->r_sge, data, wc.byte_len - sizeof(struct ib_grh), 1); qib_copy_sge(&qp->r_sge, data, wc.byte_len - sizeof(struct ib_grh), 1);
while (qp->r_sge.num_sge) { qib_put_ss(&qp->r_sge);
atomic_dec(&qp->r_sge.sge.mr->refcount);
if (--qp->r_sge.num_sge)
qp->r_sge.sge = *qp->r_sge.sg_list++;
}
if (!test_and_clear_bit(QIB_R_WRID_VALID, &qp->r_aflags)) if (!test_and_clear_bit(QIB_R_WRID_VALID, &qp->r_aflags))
return; return;
wc.wr_id = qp->r_wr_id; wc.wr_id = qp->r_wr_id;

View File

@ -1,6 +1,6 @@
/* /*
* Copyright (c) 2006, 2007, 2008, 2009, 2010 QLogic Corporation. * Copyright (c) 2012 Intel Corporation. All rights reserved.
* All rights reserved. * Copyright (c) 2006 - 2012 QLogic Corporation. All rights reserved.
* Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved. * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
* *
* This software is available to you under a choice of one of two * This software is available to you under a choice of one of two
@ -183,7 +183,7 @@ void qib_copy_sge(struct qib_sge_state *ss, void *data, u32 length, int release)
sge->sge_length -= len; sge->sge_length -= len;
if (sge->sge_length == 0) { if (sge->sge_length == 0) {
if (release) if (release)
atomic_dec(&sge->mr->refcount); qib_put_mr(sge->mr);
if (--ss->num_sge) if (--ss->num_sge)
*sge = *ss->sg_list++; *sge = *ss->sg_list++;
} else if (sge->length == 0 && sge->mr->lkey) { } else if (sge->length == 0 && sge->mr->lkey) {
@ -224,7 +224,7 @@ void qib_skip_sge(struct qib_sge_state *ss, u32 length, int release)
sge->sge_length -= len; sge->sge_length -= len;
if (sge->sge_length == 0) { if (sge->sge_length == 0) {
if (release) if (release)
atomic_dec(&sge->mr->refcount); qib_put_mr(sge->mr);
if (--ss->num_sge) if (--ss->num_sge)
*sge = *ss->sg_list++; *sge = *ss->sg_list++;
} else if (sge->length == 0 && sge->mr->lkey) { } else if (sge->length == 0 && sge->mr->lkey) {
@ -333,7 +333,8 @@ static void qib_copy_from_sge(void *data, struct qib_sge_state *ss, u32 length)
* @qp: the QP to post on * @qp: the QP to post on
* @wr: the work request to send * @wr: the work request to send
*/ */
static int qib_post_one_send(struct qib_qp *qp, struct ib_send_wr *wr) static int qib_post_one_send(struct qib_qp *qp, struct ib_send_wr *wr,
int *scheduled)
{ {
struct qib_swqe *wqe; struct qib_swqe *wqe;
u32 next; u32 next;
@ -435,11 +436,17 @@ bail_inval_free:
while (j) { while (j) {
struct qib_sge *sge = &wqe->sg_list[--j]; struct qib_sge *sge = &wqe->sg_list[--j];
atomic_dec(&sge->mr->refcount); qib_put_mr(sge->mr);
} }
bail_inval: bail_inval:
ret = -EINVAL; ret = -EINVAL;
bail: bail:
if (!ret && !wr->next &&
!qib_sdma_empty(
dd_from_ibdev(qp->ibqp.device)->pport + qp->port_num - 1)) {
qib_schedule_send(qp);
*scheduled = 1;
}
spin_unlock_irqrestore(&qp->s_lock, flags); spin_unlock_irqrestore(&qp->s_lock, flags);
return ret; return ret;
} }
@ -457,9 +464,10 @@ static int qib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
{ {
struct qib_qp *qp = to_iqp(ibqp); struct qib_qp *qp = to_iqp(ibqp);
int err = 0; int err = 0;
int scheduled = 0;
for (; wr; wr = wr->next) { for (; wr; wr = wr->next) {
err = qib_post_one_send(qp, wr); err = qib_post_one_send(qp, wr, &scheduled);
if (err) { if (err) {
*bad_wr = wr; *bad_wr = wr;
goto bail; goto bail;
@ -467,7 +475,8 @@ static int qib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
} }
/* Try to do the send work in the caller's context. */ /* Try to do the send work in the caller's context. */
qib_do_send(&qp->s_work); if (!scheduled)
qib_do_send(&qp->s_work);
bail: bail:
return err; return err;
@ -978,7 +987,7 @@ void qib_put_txreq(struct qib_verbs_txreq *tx)
if (atomic_dec_and_test(&qp->refcount)) if (atomic_dec_and_test(&qp->refcount))
wake_up(&qp->wait); wake_up(&qp->wait);
if (tx->mr) { if (tx->mr) {
atomic_dec(&tx->mr->refcount); qib_put_mr(tx->mr);
tx->mr = NULL; tx->mr = NULL;
} }
if (tx->txreq.flags & QIB_SDMA_TXREQ_F_FREEBUF) { if (tx->txreq.flags & QIB_SDMA_TXREQ_F_FREEBUF) {
@ -1336,7 +1345,7 @@ done:
} }
qib_sendbuf_done(dd, pbufn); qib_sendbuf_done(dd, pbufn);
if (qp->s_rdma_mr) { if (qp->s_rdma_mr) {
atomic_dec(&qp->s_rdma_mr->refcount); qib_put_mr(qp->s_rdma_mr);
qp->s_rdma_mr = NULL; qp->s_rdma_mr = NULL;
} }
if (qp->s_wqe) { if (qp->s_wqe) {
@ -1845,6 +1854,23 @@ bail:
return ret; return ret;
} }
struct ib_ah *qib_create_qp0_ah(struct qib_ibport *ibp, u16 dlid)
{
struct ib_ah_attr attr;
struct ib_ah *ah = ERR_PTR(-EINVAL);
struct qib_qp *qp0;
memset(&attr, 0, sizeof attr);
attr.dlid = dlid;
attr.port_num = ppd_from_ibp(ibp)->port;
rcu_read_lock();
qp0 = rcu_dereference(ibp->qp0);
if (qp0)
ah = ib_create_ah(qp0->ibqp.pd, &attr);
rcu_read_unlock();
return ah;
}
/** /**
* qib_destroy_ah - destroy an address handle * qib_destroy_ah - destroy an address handle
* @ibah: the AH to destroy * @ibah: the AH to destroy
@ -2060,13 +2086,15 @@ int qib_register_ib_device(struct qib_devdata *dd)
spin_lock_init(&dev->lk_table.lock); spin_lock_init(&dev->lk_table.lock);
dev->lk_table.max = 1 << ib_qib_lkey_table_size; dev->lk_table.max = 1 << ib_qib_lkey_table_size;
lk_tab_size = dev->lk_table.max * sizeof(*dev->lk_table.table); lk_tab_size = dev->lk_table.max * sizeof(*dev->lk_table.table);
dev->lk_table.table = (struct qib_mregion **) dev->lk_table.table = (struct qib_mregion __rcu **)
__get_free_pages(GFP_KERNEL, get_order(lk_tab_size)); __get_free_pages(GFP_KERNEL, get_order(lk_tab_size));
if (dev->lk_table.table == NULL) { if (dev->lk_table.table == NULL) {
ret = -ENOMEM; ret = -ENOMEM;
goto err_lk; goto err_lk;
} }
memset(dev->lk_table.table, 0, lk_tab_size); RCU_INIT_POINTER(dev->dma_mr, NULL);
for (i = 0; i < dev->lk_table.max; i++)
RCU_INIT_POINTER(dev->lk_table.table[i], NULL);
INIT_LIST_HEAD(&dev->pending_mmaps); INIT_LIST_HEAD(&dev->pending_mmaps);
spin_lock_init(&dev->pending_lock); spin_lock_init(&dev->pending_lock);
dev->mmap_offset = PAGE_SIZE; dev->mmap_offset = PAGE_SIZE;
@ -2289,3 +2317,17 @@ void qib_unregister_ib_device(struct qib_devdata *dd)
get_order(lk_tab_size)); get_order(lk_tab_size));
kfree(dev->qp_table); kfree(dev->qp_table);
} }
/*
* This must be called with s_lock held.
*/
void qib_schedule_send(struct qib_qp *qp)
{
if (qib_send_ok(qp)) {
struct qib_ibport *ibp =
to_iport(qp->ibqp.device, qp->port_num);
struct qib_pportdata *ppd = ppd_from_ibp(ibp);
queue_work(ppd->qib_wq, &qp->s_work);
}
}

View File

@ -1,6 +1,6 @@
/* /*
* Copyright (c) 2006, 2007, 2008, 2009, 2010 QLogic Corporation. * Copyright (c) 2012 Intel Corporation. All rights reserved.
* All rights reserved. * Copyright (c) 2006 - 2012 QLogic Corporation. All rights reserved.
* Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved. * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
* *
* This software is available to you under a choice of one of two * This software is available to you under a choice of one of two
@ -41,6 +41,7 @@
#include <linux/interrupt.h> #include <linux/interrupt.h>
#include <linux/kref.h> #include <linux/kref.h>
#include <linux/workqueue.h> #include <linux/workqueue.h>
#include <linux/completion.h>
#include <rdma/ib_pack.h> #include <rdma/ib_pack.h>
#include <rdma/ib_user_verbs.h> #include <rdma/ib_user_verbs.h>
@ -302,6 +303,9 @@ struct qib_mregion {
u32 max_segs; /* number of qib_segs in all the arrays */ u32 max_segs; /* number of qib_segs in all the arrays */
u32 mapsz; /* size of the map array */ u32 mapsz; /* size of the map array */
u8 page_shift; /* 0 - non unform/non powerof2 sizes */ u8 page_shift; /* 0 - non unform/non powerof2 sizes */
u8 lkey_published; /* in global table */
struct completion comp; /* complete when refcount goes to zero */
struct rcu_head list;
atomic_t refcount; atomic_t refcount;
struct qib_segarray *map[0]; /* the segments */ struct qib_segarray *map[0]; /* the segments */
}; };
@ -416,7 +420,7 @@ struct qib_qp {
/* read mostly fields above and below */ /* read mostly fields above and below */
struct ib_ah_attr remote_ah_attr; struct ib_ah_attr remote_ah_attr;
struct ib_ah_attr alt_ah_attr; struct ib_ah_attr alt_ah_attr;
struct qib_qp *next; /* link list for QPN hash table */ struct qib_qp __rcu *next; /* link list for QPN hash table */
struct qib_swqe *s_wq; /* send work queue */ struct qib_swqe *s_wq; /* send work queue */
struct qib_mmap_info *ip; struct qib_mmap_info *ip;
struct qib_ib_header *s_hdr; /* next packet header to send */ struct qib_ib_header *s_hdr; /* next packet header to send */
@ -646,7 +650,7 @@ struct qib_lkey_table {
u32 next; /* next unused index (speeds search) */ u32 next; /* next unused index (speeds search) */
u32 gen; /* generation count */ u32 gen; /* generation count */
u32 max; /* size of the table */ u32 max; /* size of the table */
struct qib_mregion **table; struct qib_mregion __rcu **table;
}; };
struct qib_opcode_stats { struct qib_opcode_stats {
@ -655,8 +659,8 @@ struct qib_opcode_stats {
}; };
struct qib_ibport { struct qib_ibport {
struct qib_qp *qp0; struct qib_qp __rcu *qp0;
struct qib_qp *qp1; struct qib_qp __rcu *qp1;
struct ib_mad_agent *send_agent; /* agent for SMI (traps) */ struct ib_mad_agent *send_agent; /* agent for SMI (traps) */
struct qib_ah *sm_ah; struct qib_ah *sm_ah;
struct qib_ah *smi_ah; struct qib_ah *smi_ah;
@ -723,12 +727,13 @@ struct qib_ibport {
struct qib_opcode_stats opstats[128]; struct qib_opcode_stats opstats[128];
}; };
struct qib_ibdev { struct qib_ibdev {
struct ib_device ibdev; struct ib_device ibdev;
struct list_head pending_mmaps; struct list_head pending_mmaps;
spinlock_t mmap_offset_lock; /* protect mmap_offset */ spinlock_t mmap_offset_lock; /* protect mmap_offset */
u32 mmap_offset; u32 mmap_offset;
struct qib_mregion *dma_mr; struct qib_mregion __rcu *dma_mr;
/* QP numbers are shared by all IB ports */ /* QP numbers are shared by all IB ports */
struct qib_qpn_table qpn_table; struct qib_qpn_table qpn_table;
@ -739,7 +744,7 @@ struct qib_ibdev {
struct list_head memwait; /* list for wait kernel memory */ struct list_head memwait; /* list for wait kernel memory */
struct list_head txreq_free; struct list_head txreq_free;
struct timer_list mem_timer; struct timer_list mem_timer;
struct qib_qp **qp_table; struct qib_qp __rcu **qp_table;
struct qib_pio_header *pio_hdrs; struct qib_pio_header *pio_hdrs;
dma_addr_t pio_hdrs_phys; dma_addr_t pio_hdrs_phys;
/* list of QPs waiting for RNR timer */ /* list of QPs waiting for RNR timer */
@ -832,11 +837,7 @@ extern struct workqueue_struct *qib_cq_wq;
/* /*
* This must be called with s_lock held. * This must be called with s_lock held.
*/ */
static inline void qib_schedule_send(struct qib_qp *qp) void qib_schedule_send(struct qib_qp *qp);
{
if (qib_send_ok(qp))
queue_work(ib_wq, &qp->s_work);
}
static inline int qib_pkey_ok(u16 pkey1, u16 pkey2) static inline int qib_pkey_ok(u16 pkey1, u16 pkey2)
{ {
@ -933,6 +934,8 @@ void qib_rc_rcv(struct qib_ctxtdata *rcd, struct qib_ib_header *hdr,
int qib_check_ah(struct ib_device *ibdev, struct ib_ah_attr *ah_attr); int qib_check_ah(struct ib_device *ibdev, struct ib_ah_attr *ah_attr);
struct ib_ah *qib_create_qp0_ah(struct qib_ibport *ibp, u16 dlid);
void qib_rc_rnr_retry(unsigned long arg); void qib_rc_rnr_retry(unsigned long arg);
void qib_rc_send_complete(struct qib_qp *qp, struct qib_ib_header *hdr); void qib_rc_send_complete(struct qib_qp *qp, struct qib_ib_header *hdr);
@ -944,9 +947,9 @@ int qib_post_ud_send(struct qib_qp *qp, struct ib_send_wr *wr);
void qib_ud_rcv(struct qib_ibport *ibp, struct qib_ib_header *hdr, void qib_ud_rcv(struct qib_ibport *ibp, struct qib_ib_header *hdr,
int has_grh, void *data, u32 tlen, struct qib_qp *qp); int has_grh, void *data, u32 tlen, struct qib_qp *qp);
int qib_alloc_lkey(struct qib_lkey_table *rkt, struct qib_mregion *mr); int qib_alloc_lkey(struct qib_mregion *mr, int dma_region);
int qib_free_lkey(struct qib_ibdev *dev, struct qib_mregion *mr); void qib_free_lkey(struct qib_mregion *mr);
int qib_lkey_ok(struct qib_lkey_table *rkt, struct qib_pd *pd, int qib_lkey_ok(struct qib_lkey_table *rkt, struct qib_pd *pd,
struct qib_sge *isge, struct ib_sge *sge, int acc); struct qib_sge *isge, struct ib_sge *sge, int acc);
@ -1014,6 +1017,29 @@ int qib_unmap_fmr(struct list_head *fmr_list);
int qib_dealloc_fmr(struct ib_fmr *ibfmr); int qib_dealloc_fmr(struct ib_fmr *ibfmr);
static inline void qib_get_mr(struct qib_mregion *mr)
{
atomic_inc(&mr->refcount);
}
void mr_rcu_callback(struct rcu_head *list);
static inline void qib_put_mr(struct qib_mregion *mr)
{
if (unlikely(atomic_dec_and_test(&mr->refcount)))
call_rcu(&mr->list, mr_rcu_callback);
}
static inline void qib_put_ss(struct qib_sge_state *ss)
{
while (ss->num_sge) {
qib_put_mr(ss->sge.mr);
if (--ss->num_sge)
ss->sge = *ss->sg_list++;
}
}
void qib_release_mmap_info(struct kref *ref); void qib_release_mmap_info(struct kref *ref);
struct qib_mmap_info *qib_create_mmap_info(struct qib_ibdev *dev, u32 size, struct qib_mmap_info *qib_create_mmap_info(struct qib_ibdev *dev, u32 size,

View File

@ -1,5 +1,6 @@
/* /*
* Copyright (c) 2006, 2007, 2008, 2009 QLogic Corporation. All rights reserved. * Copyright (c) 2012 Intel Corporation. All rights reserved.
* Copyright (c) 2006 - 2012 QLogic Corporation. All rights reserved.
* Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved. * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
* *
* This software is available to you under a choice of one of two * This software is available to you under a choice of one of two
@ -102,10 +103,10 @@ int qib_enable_wc(struct qib_devdata *dd)
u64 atmp; u64 atmp;
atmp = pioaddr & ~(piolen - 1); atmp = pioaddr & ~(piolen - 1);
if (atmp < addr || (atmp + piolen) > (addr + len)) { if (atmp < addr || (atmp + piolen) > (addr + len)) {
qib_dev_err(dd, "No way to align address/size " qib_dev_err(dd,
"(%llx/%llx), no WC mtrr\n", "No way to align address/size (%llx/%llx), no WC mtrr\n",
(unsigned long long) atmp, (unsigned long long) atmp,
(unsigned long long) piolen << 1); (unsigned long long) piolen << 1);
ret = -ENODEV; ret = -ENODEV;
} else { } else {
pioaddr = atmp; pioaddr = atmp;
@ -120,8 +121,7 @@ int qib_enable_wc(struct qib_devdata *dd)
if (cookie < 0) { if (cookie < 0) {
{ {
qib_devinfo(dd->pcidev, qib_devinfo(dd->pcidev,
"mtrr_add() WC for PIO bufs " "mtrr_add() WC for PIO bufs failed (%d)\n",
"failed (%d)\n",
cookie); cookie);
ret = -EINVAL; ret = -EINVAL;
} }

View File

@ -1376,7 +1376,7 @@ static void ipoib_cm_skb_reap(struct work_struct *work)
if (skb->protocol == htons(ETH_P_IP)) if (skb->protocol == htons(ETH_P_IP))
icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, htonl(mtu)); icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, htonl(mtu));
#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) #if IS_ENABLED(CONFIG_IPV6)
else if (skb->protocol == htons(ETH_P_IPV6)) else if (skb->protocol == htons(ETH_P_IPV6))
icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu); icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
#endif #endif

View File

@ -131,7 +131,7 @@ static void *mlx4_en_get_netdev(struct mlx4_dev *dev, void *ctx, u8 port)
} }
static void mlx4_en_event(struct mlx4_dev *dev, void *endev_ptr, static void mlx4_en_event(struct mlx4_dev *dev, void *endev_ptr,
enum mlx4_dev_event event, int port) enum mlx4_dev_event event, unsigned long port)
{ {
struct mlx4_en_dev *mdev = (struct mlx4_en_dev *) endev_ptr; struct mlx4_en_dev *mdev = (struct mlx4_en_dev *) endev_ptr;
struct mlx4_en_priv *priv; struct mlx4_en_priv *priv;
@ -156,7 +156,8 @@ static void mlx4_en_event(struct mlx4_dev *dev, void *endev_ptr,
if (port < 1 || port > dev->caps.num_ports || if (port < 1 || port > dev->caps.num_ports ||
!mdev->pndev[port]) !mdev->pndev[port])
return; return;
mlx4_warn(mdev, "Unhandled event %d for port %d\n", event, port); mlx4_warn(mdev, "Unhandled event %d for port %d\n", event,
(int) port);
} }
} }

View File

@ -82,6 +82,15 @@ enum {
(1ull << MLX4_EVENT_TYPE_FLR_EVENT) | \ (1ull << MLX4_EVENT_TYPE_FLR_EVENT) | \
(1ull << MLX4_EVENT_TYPE_FATAL_WARNING)) (1ull << MLX4_EVENT_TYPE_FATAL_WARNING))
static u64 get_async_ev_mask(struct mlx4_dev *dev)
{
u64 async_ev_mask = MLX4_ASYNC_EVENT_MASK;
if (dev->caps.flags & MLX4_DEV_CAP_FLAG_PORT_MNG_CHG_EV)
async_ev_mask |= (1ull << MLX4_EVENT_TYPE_PORT_MNG_CHG_EVENT);
return async_ev_mask;
}
static void eq_set_ci(struct mlx4_eq *eq, int req_not) static void eq_set_ci(struct mlx4_eq *eq, int req_not)
{ {
__raw_writel((__force u32) cpu_to_be32((eq->cons_index & 0xffffff) | __raw_writel((__force u32) cpu_to_be32((eq->cons_index & 0xffffff) |
@ -473,6 +482,11 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
break; break;
case MLX4_EVENT_TYPE_PORT_MNG_CHG_EVENT:
mlx4_dispatch_event(dev, MLX4_DEV_EVENT_PORT_MGMT_CHANGE,
(unsigned long) eqe);
break;
case MLX4_EVENT_TYPE_EEC_CATAS_ERROR: case MLX4_EVENT_TYPE_EEC_CATAS_ERROR:
case MLX4_EVENT_TYPE_ECC_DETECT: case MLX4_EVENT_TYPE_ECC_DETECT:
default: default:
@ -956,7 +970,7 @@ int mlx4_init_eq_table(struct mlx4_dev *dev)
priv->eq_table.have_irq = 1; priv->eq_table.have_irq = 1;
} }
err = mlx4_MAP_EQ(dev, MLX4_ASYNC_EVENT_MASK, 0, err = mlx4_MAP_EQ(dev, get_async_ev_mask(dev), 0,
priv->eq_table.eq[dev->caps.num_comp_vectors].eqn); priv->eq_table.eq[dev->caps.num_comp_vectors].eqn);
if (err) if (err)
mlx4_warn(dev, "MAP_EQ for async EQ %d failed (%d)\n", mlx4_warn(dev, "MAP_EQ for async EQ %d failed (%d)\n",
@ -996,7 +1010,7 @@ void mlx4_cleanup_eq_table(struct mlx4_dev *dev)
struct mlx4_priv *priv = mlx4_priv(dev); struct mlx4_priv *priv = mlx4_priv(dev);
int i; int i;
mlx4_MAP_EQ(dev, MLX4_ASYNC_EVENT_MASK, 1, mlx4_MAP_EQ(dev, get_async_ev_mask(dev), 1,
priv->eq_table.eq[dev->caps.num_comp_vectors].eqn); priv->eq_table.eq[dev->caps.num_comp_vectors].eqn);
mlx4_free_irqs(dev); mlx4_free_irqs(dev);
@ -1040,7 +1054,7 @@ int mlx4_test_interrupts(struct mlx4_dev *dev)
mlx4_cmd_use_polling(dev); mlx4_cmd_use_polling(dev);
/* Map the new eq to handle all asyncronous events */ /* Map the new eq to handle all asyncronous events */
err = mlx4_MAP_EQ(dev, MLX4_ASYNC_EVENT_MASK, 0, err = mlx4_MAP_EQ(dev, get_async_ev_mask(dev), 0,
priv->eq_table.eq[i].eqn); priv->eq_table.eq[i].eqn);
if (err) { if (err) {
mlx4_warn(dev, "Failed mapping eq for interrupt test\n"); mlx4_warn(dev, "Failed mapping eq for interrupt test\n");
@ -1054,7 +1068,7 @@ int mlx4_test_interrupts(struct mlx4_dev *dev)
} }
/* Return to default */ /* Return to default */
mlx4_MAP_EQ(dev, MLX4_ASYNC_EVENT_MASK, 0, mlx4_MAP_EQ(dev, get_async_ev_mask(dev), 0,
priv->eq_table.eq[dev->caps.num_comp_vectors].eqn); priv->eq_table.eq[dev->caps.num_comp_vectors].eqn);
return err; return err;
} }

View File

@ -109,6 +109,7 @@ static void dump_dev_cap_flags(struct mlx4_dev *dev, u64 flags)
[41] = "Unicast VEP steering support", [41] = "Unicast VEP steering support",
[42] = "Multicast VEP steering support", [42] = "Multicast VEP steering support",
[48] = "Counters support", [48] = "Counters support",
[59] = "Port management change event support",
}; };
int i; int i;
@ -173,6 +174,7 @@ int mlx4_QUERY_FUNC_CAP_wrapper(struct mlx4_dev *dev, int slave,
#define QUERY_FUNC_CAP_FLAGS_OFFSET 0x0 #define QUERY_FUNC_CAP_FLAGS_OFFSET 0x0
#define QUERY_FUNC_CAP_NUM_PORTS_OFFSET 0x1 #define QUERY_FUNC_CAP_NUM_PORTS_OFFSET 0x1
#define QUERY_FUNC_CAP_PF_BHVR_OFFSET 0x4 #define QUERY_FUNC_CAP_PF_BHVR_OFFSET 0x4
#define QUERY_FUNC_CAP_FMR_OFFSET 0x8
#define QUERY_FUNC_CAP_QP_QUOTA_OFFSET 0x10 #define QUERY_FUNC_CAP_QP_QUOTA_OFFSET 0x10
#define QUERY_FUNC_CAP_CQ_QUOTA_OFFSET 0x14 #define QUERY_FUNC_CAP_CQ_QUOTA_OFFSET 0x14
#define QUERY_FUNC_CAP_SRQ_QUOTA_OFFSET 0x18 #define QUERY_FUNC_CAP_SRQ_QUOTA_OFFSET 0x18
@ -182,25 +184,44 @@ int mlx4_QUERY_FUNC_CAP_wrapper(struct mlx4_dev *dev, int slave,
#define QUERY_FUNC_CAP_MAX_EQ_OFFSET 0x2c #define QUERY_FUNC_CAP_MAX_EQ_OFFSET 0x2c
#define QUERY_FUNC_CAP_RESERVED_EQ_OFFSET 0X30 #define QUERY_FUNC_CAP_RESERVED_EQ_OFFSET 0X30
#define QUERY_FUNC_CAP_FMR_FLAG 0x80
#define QUERY_FUNC_CAP_FLAG_RDMA 0x40
#define QUERY_FUNC_CAP_FLAG_ETH 0x80
/* when opcode modifier = 1 */
#define QUERY_FUNC_CAP_PHYS_PORT_OFFSET 0x3 #define QUERY_FUNC_CAP_PHYS_PORT_OFFSET 0x3
#define QUERY_FUNC_CAP_RDMA_PROPS_OFFSET 0x8
#define QUERY_FUNC_CAP_ETH_PROPS_OFFSET 0xc #define QUERY_FUNC_CAP_ETH_PROPS_OFFSET 0xc
#define QUERY_FUNC_CAP_ETH_PROPS_FORCE_MAC 0x40
#define QUERY_FUNC_CAP_ETH_PROPS_FORCE_VLAN 0x80
#define QUERY_FUNC_CAP_RDMA_PROPS_FORCE_PHY_WQE_GID 0x80
if (vhcr->op_modifier == 1) { if (vhcr->op_modifier == 1) {
field = vhcr->in_modifier; field = vhcr->in_modifier;
MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_PHYS_PORT_OFFSET); MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_PHYS_PORT_OFFSET);
field = 0; /* ensure fvl bit is not set */ field = 0;
/* ensure force vlan and force mac bits are not set */
MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_ETH_PROPS_OFFSET); MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_ETH_PROPS_OFFSET);
/* ensure that phy_wqe_gid bit is not set */
MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_RDMA_PROPS_OFFSET);
} else if (vhcr->op_modifier == 0) { } else if (vhcr->op_modifier == 0) {
field = 1 << 7; /* enable only ethernet interface */ /* enable rdma and ethernet interfaces */
field = (QUERY_FUNC_CAP_FLAG_ETH | QUERY_FUNC_CAP_FLAG_RDMA);
MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_FLAGS_OFFSET); MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_FLAGS_OFFSET);
field = dev->caps.num_ports; field = dev->caps.num_ports;
MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_NUM_PORTS_OFFSET); MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_NUM_PORTS_OFFSET);
size = 0; /* no PF behavious is set for now */ size = 0; /* no PF behaviour is set for now */
MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_PF_BHVR_OFFSET); MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_PF_BHVR_OFFSET);
field = 0; /* protected FMR support not available as yet */
MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_FMR_OFFSET);
size = dev->caps.num_qps; size = dev->caps.num_qps;
MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_QP_QUOTA_OFFSET); MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_QP_QUOTA_OFFSET);
@ -253,11 +274,12 @@ int mlx4_QUERY_FUNC_CAP(struct mlx4_dev *dev, struct mlx4_func_cap *func_cap)
outbox = mailbox->buf; outbox = mailbox->buf;
MLX4_GET(field, outbox, QUERY_FUNC_CAP_FLAGS_OFFSET); MLX4_GET(field, outbox, QUERY_FUNC_CAP_FLAGS_OFFSET);
if (!(field & (1 << 7))) { if (!(field & (QUERY_FUNC_CAP_FLAG_ETH | QUERY_FUNC_CAP_FLAG_RDMA))) {
mlx4_err(dev, "The host doesn't support eth interface\n"); mlx4_err(dev, "The host supports neither eth nor rdma interfaces\n");
err = -EPROTONOSUPPORT; err = -EPROTONOSUPPORT;
goto out; goto out;
} }
func_cap->flags = field;
MLX4_GET(field, outbox, QUERY_FUNC_CAP_NUM_PORTS_OFFSET); MLX4_GET(field, outbox, QUERY_FUNC_CAP_NUM_PORTS_OFFSET);
func_cap->num_ports = field; func_cap->num_ports = field;
@ -296,17 +318,27 @@ int mlx4_QUERY_FUNC_CAP(struct mlx4_dev *dev, struct mlx4_func_cap *func_cap)
if (err) if (err)
goto out; goto out;
MLX4_GET(field, outbox, QUERY_FUNC_CAP_ETH_PROPS_OFFSET); if (dev->caps.port_type[i] == MLX4_PORT_TYPE_ETH) {
if (field & (1 << 7)) { MLX4_GET(field, outbox, QUERY_FUNC_CAP_ETH_PROPS_OFFSET);
mlx4_err(dev, "VLAN is enforced on this port\n"); if (field & QUERY_FUNC_CAP_ETH_PROPS_FORCE_VLAN) {
err = -EPROTONOSUPPORT; mlx4_err(dev, "VLAN is enforced on this port\n");
goto out; err = -EPROTONOSUPPORT;
} goto out;
}
if (field & (1 << 6)) { if (field & QUERY_FUNC_CAP_ETH_PROPS_FORCE_MAC) {
mlx4_err(dev, "Force mac is enabled on this port\n"); mlx4_err(dev, "Force mac is enabled on this port\n");
err = -EPROTONOSUPPORT; err = -EPROTONOSUPPORT;
goto out; goto out;
}
} else if (dev->caps.port_type[i] == MLX4_PORT_TYPE_IB) {
MLX4_GET(field, outbox, QUERY_FUNC_CAP_RDMA_PROPS_OFFSET);
if (field & QUERY_FUNC_CAP_RDMA_PROPS_FORCE_PHY_WQE_GID) {
mlx4_err(dev, "phy_wqe_gid is "
"enforced on this ib port\n");
err = -EPROTONOSUPPORT;
goto out;
}
} }
MLX4_GET(field, outbox, QUERY_FUNC_CAP_PHYS_PORT_OFFSET); MLX4_GET(field, outbox, QUERY_FUNC_CAP_PHYS_PORT_OFFSET);
@ -698,14 +730,12 @@ int mlx4_QUERY_PORT_wrapper(struct mlx4_dev *dev, int slave,
{ {
u64 def_mac; u64 def_mac;
u8 port_type; u8 port_type;
u16 short_field;
int err; int err;
#define MLX4_PORT_SUPPORT_IB (1 << 0) #define MLX4_VF_PORT_NO_LINK_SENSE_MASK 0xE0
#define MLX4_PORT_SUGGEST_TYPE (1 << 3) #define QUERY_PORT_CUR_MAX_PKEY_OFFSET 0x0c
#define MLX4_PORT_DEFAULT_SENSE (1 << 4) #define QUERY_PORT_CUR_MAX_GID_OFFSET 0x0e
#define MLX4_VF_PORT_ETH_ONLY_MASK (0xff & ~MLX4_PORT_SUPPORT_IB & \
~MLX4_PORT_SUGGEST_TYPE & \
~MLX4_PORT_DEFAULT_SENSE)
err = mlx4_cmd_box(dev, 0, outbox->dma, vhcr->in_modifier, 0, err = mlx4_cmd_box(dev, 0, outbox->dma, vhcr->in_modifier, 0,
MLX4_CMD_QUERY_PORT, MLX4_CMD_TIME_CLASS_B, MLX4_CMD_QUERY_PORT, MLX4_CMD_TIME_CLASS_B,
@ -721,20 +751,58 @@ int mlx4_QUERY_PORT_wrapper(struct mlx4_dev *dev, int slave,
MLX4_GET(port_type, outbox->buf, MLX4_GET(port_type, outbox->buf,
QUERY_PORT_SUPPORTED_TYPE_OFFSET); QUERY_PORT_SUPPORTED_TYPE_OFFSET);
/* Allow only Eth port, no link sensing allowed */ /* No link sensing allowed */
port_type &= MLX4_VF_PORT_ETH_ONLY_MASK; port_type &= MLX4_VF_PORT_NO_LINK_SENSE_MASK;
/* set port type to currently operating port type */
/* check eth is enabled for this port */ port_type |= (dev->caps.port_type[vhcr->in_modifier] & 0x3);
if (!(port_type & 2))
mlx4_dbg(dev, "QUERY PORT: eth not supported by host");
MLX4_PUT(outbox->buf, port_type, MLX4_PUT(outbox->buf, port_type,
QUERY_PORT_SUPPORTED_TYPE_OFFSET); QUERY_PORT_SUPPORTED_TYPE_OFFSET);
short_field = 1; /* slave max gids */
MLX4_PUT(outbox->buf, short_field,
QUERY_PORT_CUR_MAX_GID_OFFSET);
short_field = dev->caps.pkey_table_len[vhcr->in_modifier];
MLX4_PUT(outbox->buf, short_field,
QUERY_PORT_CUR_MAX_PKEY_OFFSET);
} }
return err; return err;
} }
int mlx4_get_slave_pkey_gid_tbl_len(struct mlx4_dev *dev, u8 port,
int *gid_tbl_len, int *pkey_tbl_len)
{
struct mlx4_cmd_mailbox *mailbox;
u32 *outbox;
u16 field;
int err;
mailbox = mlx4_alloc_cmd_mailbox(dev);
if (IS_ERR(mailbox))
return PTR_ERR(mailbox);
err = mlx4_cmd_box(dev, 0, mailbox->dma, port, 0,
MLX4_CMD_QUERY_PORT, MLX4_CMD_TIME_CLASS_B,
MLX4_CMD_WRAPPED);
if (err)
goto out;
outbox = mailbox->buf;
MLX4_GET(field, outbox, QUERY_PORT_CUR_MAX_GID_OFFSET);
*gid_tbl_len = field;
MLX4_GET(field, outbox, QUERY_PORT_CUR_MAX_PKEY_OFFSET);
*pkey_tbl_len = field;
out:
mlx4_free_cmd_mailbox(dev, mailbox);
return err;
}
EXPORT_SYMBOL(mlx4_get_slave_pkey_gid_tbl_len);
int mlx4_map_cmd(struct mlx4_dev *dev, u16 op, struct mlx4_icm *icm, u64 virt) int mlx4_map_cmd(struct mlx4_dev *dev, u16 op, struct mlx4_icm *icm, u64 virt)
{ {
struct mlx4_cmd_mailbox *mailbox; struct mlx4_cmd_mailbox *mailbox;
@ -881,11 +949,12 @@ int mlx4_QUERY_FW(struct mlx4_dev *dev)
((fw_ver & 0xffff0000ull) >> 16) | ((fw_ver & 0xffff0000ull) >> 16) |
((fw_ver & 0x0000ffffull) << 16); ((fw_ver & 0x0000ffffull) << 16);
MLX4_GET(lg, outbox, QUERY_FW_PPF_ID);
dev->caps.function = lg;
if (mlx4_is_slave(dev)) if (mlx4_is_slave(dev))
goto out; goto out;
MLX4_GET(lg, outbox, QUERY_FW_PPF_ID);
dev->caps.function = lg;
MLX4_GET(cmd_if_rev, outbox, QUERY_FW_CMD_IF_REV_OFFSET); MLX4_GET(cmd_if_rev, outbox, QUERY_FW_CMD_IF_REV_OFFSET);
if (cmd_if_rev < MLX4_COMMAND_INTERFACE_MIN_REV || if (cmd_if_rev < MLX4_COMMAND_INTERFACE_MIN_REV ||
@ -966,9 +1035,12 @@ int mlx4_QUERY_FW_wrapper(struct mlx4_dev *dev, int slave,
if (err) if (err)
return err; return err;
/* for slaves, zero out everything except FW version */ /* for slaves, set pci PPF ID to invalid and zero out everything
* else except FW version */
outbuf[0] = outbuf[1] = 0; outbuf[0] = outbuf[1] = 0;
memset(&outbuf[8], 0, QUERY_FW_OUT_SIZE - 8); memset(&outbuf[8], 0, QUERY_FW_OUT_SIZE - 8);
outbuf[QUERY_FW_PPF_ID] = MLX4_INVALID_SLAVE_ID;
return 0; return 0;
} }

View File

@ -413,6 +413,8 @@ err:
mlx4_free_icm(dev, table->icm[i], use_coherent); mlx4_free_icm(dev, table->icm[i], use_coherent);
} }
kfree(table->icm);
return -ENOMEM; return -ENOMEM;
} }

View File

@ -81,13 +81,7 @@ int mlx4_init_icm_table(struct mlx4_dev *dev, struct mlx4_icm_table *table,
u64 virt, int obj_size, int nobj, int reserved, u64 virt, int obj_size, int nobj, int reserved,
int use_lowmem, int use_coherent); int use_lowmem, int use_coherent);
void mlx4_cleanup_icm_table(struct mlx4_dev *dev, struct mlx4_icm_table *table); void mlx4_cleanup_icm_table(struct mlx4_dev *dev, struct mlx4_icm_table *table);
int mlx4_table_get(struct mlx4_dev *dev, struct mlx4_icm_table *table, int obj);
void mlx4_table_put(struct mlx4_dev *dev, struct mlx4_icm_table *table, int obj);
void *mlx4_table_find(struct mlx4_icm_table *table, int obj, dma_addr_t *dma_handle); void *mlx4_table_find(struct mlx4_icm_table *table, int obj, dma_addr_t *dma_handle);
int mlx4_table_get_range(struct mlx4_dev *dev, struct mlx4_icm_table *table,
int start, int end);
void mlx4_table_put_range(struct mlx4_dev *dev, struct mlx4_icm_table *table,
int start, int end);
static inline void mlx4_icm_first(struct mlx4_icm *icm, static inline void mlx4_icm_first(struct mlx4_icm *icm,
struct mlx4_icm_iter *iter) struct mlx4_icm_iter *iter)

View File

@ -115,7 +115,8 @@ void mlx4_unregister_interface(struct mlx4_interface *intf)
} }
EXPORT_SYMBOL_GPL(mlx4_unregister_interface); EXPORT_SYMBOL_GPL(mlx4_unregister_interface);
void mlx4_dispatch_event(struct mlx4_dev *dev, enum mlx4_dev_event type, int port) void mlx4_dispatch_event(struct mlx4_dev *dev, enum mlx4_dev_event type,
unsigned long param)
{ {
struct mlx4_priv *priv = mlx4_priv(dev); struct mlx4_priv *priv = mlx4_priv(dev);
struct mlx4_device_context *dev_ctx; struct mlx4_device_context *dev_ctx;
@ -125,7 +126,7 @@ void mlx4_dispatch_event(struct mlx4_dev *dev, enum mlx4_dev_event type, int por
list_for_each_entry(dev_ctx, &priv->ctx_list, list) list_for_each_entry(dev_ctx, &priv->ctx_list, list)
if (dev_ctx->intf->event) if (dev_ctx->intf->event)
dev_ctx->intf->event(dev, dev_ctx->context, type, port); dev_ctx->intf->event(dev, dev_ctx->context, type, param);
spin_unlock_irqrestore(&priv->ctx_lock, flags); spin_unlock_irqrestore(&priv->ctx_lock, flags);
} }

View File

@ -215,6 +215,10 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
for (i = 1; i <= dev->caps.num_ports; ++i) { for (i = 1; i <= dev->caps.num_ports; ++i) {
dev->caps.vl_cap[i] = dev_cap->max_vl[i]; dev->caps.vl_cap[i] = dev_cap->max_vl[i];
dev->caps.ib_mtu_cap[i] = dev_cap->ib_mtu[i]; dev->caps.ib_mtu_cap[i] = dev_cap->ib_mtu[i];
dev->phys_caps.gid_phys_table_len[i] = dev_cap->max_gids[i];
dev->phys_caps.pkey_phys_table_len[i] = dev_cap->max_pkeys[i];
/* set gid and pkey table operating lengths by default
* to non-sriov values */
dev->caps.gid_table_len[i] = dev_cap->max_gids[i]; dev->caps.gid_table_len[i] = dev_cap->max_gids[i];
dev->caps.pkey_table_len[i] = dev_cap->max_pkeys[i]; dev->caps.pkey_table_len[i] = dev_cap->max_pkeys[i];
dev->caps.port_width_cap[i] = dev_cap->max_port_width[i]; dev->caps.port_width_cap[i] = dev_cap->max_port_width[i];
@ -288,29 +292,19 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
/* if only ETH is supported - assign ETH */ /* if only ETH is supported - assign ETH */
if (dev->caps.supported_type[i] == MLX4_PORT_TYPE_ETH) if (dev->caps.supported_type[i] == MLX4_PORT_TYPE_ETH)
dev->caps.port_type[i] = MLX4_PORT_TYPE_ETH; dev->caps.port_type[i] = MLX4_PORT_TYPE_ETH;
/* if only IB is supported, /* if only IB is supported, assign IB */
* assign IB only if SRIOV is off*/
else if (dev->caps.supported_type[i] == else if (dev->caps.supported_type[i] ==
MLX4_PORT_TYPE_IB) { MLX4_PORT_TYPE_IB)
if (dev->flags & MLX4_FLAG_SRIOV) dev->caps.port_type[i] = MLX4_PORT_TYPE_IB;
dev->caps.port_type[i] =
MLX4_PORT_TYPE_NONE;
else
dev->caps.port_type[i] =
MLX4_PORT_TYPE_IB;
/* if IB and ETH are supported,
* first of all check if SRIOV is on */
} else if (dev->flags & MLX4_FLAG_SRIOV)
dev->caps.port_type[i] = MLX4_PORT_TYPE_ETH;
else { else {
/* In non-SRIOV mode, we set the port type /* if IB and ETH are supported, we set the port
* according to user selection of port type, * type according to user selection of port type;
* if usere selected none, take the FW hint */ * if user selected none, take the FW hint */
if (port_type_array[i-1] == MLX4_PORT_TYPE_NONE) if (port_type_array[i - 1] == MLX4_PORT_TYPE_NONE)
dev->caps.port_type[i] = dev->caps.suggested_type[i] ? dev->caps.port_type[i] = dev->caps.suggested_type[i] ?
MLX4_PORT_TYPE_ETH : MLX4_PORT_TYPE_IB; MLX4_PORT_TYPE_ETH : MLX4_PORT_TYPE_IB;
else else
dev->caps.port_type[i] = port_type_array[i-1]; dev->caps.port_type[i] = port_type_array[i - 1];
} }
} }
/* /*
@ -391,6 +385,23 @@ static int mlx4_how_many_lives_vf(struct mlx4_dev *dev)
return ret; return ret;
} }
int mlx4_get_parav_qkey(struct mlx4_dev *dev, u32 qpn, u32 *qkey)
{
u32 qk = MLX4_RESERVED_QKEY_BASE;
if (qpn >= dev->caps.base_tunnel_sqpn + 8 * MLX4_MFUNC_MAX ||
qpn < dev->caps.sqp_start)
return -EINVAL;
if (qpn >= dev->caps.base_tunnel_sqpn)
/* tunnel qp */
qk += qpn - dev->caps.base_tunnel_sqpn;
else
qk += qpn - dev->caps.sqp_start;
*qkey = qk;
return 0;
}
EXPORT_SYMBOL(mlx4_get_parav_qkey);
int mlx4_is_slave_active(struct mlx4_dev *dev, int slave) int mlx4_is_slave_active(struct mlx4_dev *dev, int slave)
{ {
struct mlx4_priv *priv = mlx4_priv(dev); struct mlx4_priv *priv = mlx4_priv(dev);
@ -491,8 +502,13 @@ static int mlx4_slave_cap(struct mlx4_dev *dev)
return -ENODEV; return -ENODEV;
} }
for (i = 1; i <= dev->caps.num_ports; ++i) for (i = 1; i <= dev->caps.num_ports; ++i) {
dev->caps.port_mask[i] = dev->caps.port_type[i]; dev->caps.port_mask[i] = dev->caps.port_type[i];
if (mlx4_get_slave_pkey_gid_tbl_len(dev, i,
&dev->caps.gid_table_len[i],
&dev->caps.pkey_table_len[i]))
return -ENODEV;
}
if (dev->caps.uar_page_size * (dev->caps.num_uars - if (dev->caps.uar_page_size * (dev->caps.num_uars -
dev->caps.reserved_uars) > dev->caps.reserved_uars) >
@ -529,7 +545,7 @@ int mlx4_change_port_types(struct mlx4_dev *dev,
for (port = 1; port <= dev->caps.num_ports; port++) { for (port = 1; port <= dev->caps.num_ports; port++) {
mlx4_CLOSE_PORT(dev, port); mlx4_CLOSE_PORT(dev, port);
dev->caps.port_type[port] = port_types[port - 1]; dev->caps.port_type[port] = port_types[port - 1];
err = mlx4_SET_PORT(dev, port); err = mlx4_SET_PORT(dev, port, -1);
if (err) { if (err) {
mlx4_err(dev, "Failed to set port %d, " mlx4_err(dev, "Failed to set port %d, "
"aborting\n", port); "aborting\n", port);
@ -715,7 +731,7 @@ static ssize_t set_port_ib_mtu(struct device *dev,
mlx4_unregister_device(mdev); mlx4_unregister_device(mdev);
for (port = 1; port <= mdev->caps.num_ports; port++) { for (port = 1; port <= mdev->caps.num_ports; port++) {
mlx4_CLOSE_PORT(mdev, port); mlx4_CLOSE_PORT(mdev, port);
err = mlx4_SET_PORT(mdev, port); err = mlx4_SET_PORT(mdev, port, -1);
if (err) { if (err) {
mlx4_err(mdev, "Failed to set port %d, " mlx4_err(mdev, "Failed to set port %d, "
"aborting\n", port); "aborting\n", port);
@ -1166,6 +1182,17 @@ err:
return -EIO; return -EIO;
} }
static void mlx4_parav_master_pf_caps(struct mlx4_dev *dev)
{
int i;
for (i = 1; i <= dev->caps.num_ports; i++) {
dev->caps.gid_table_len[i] = 1;
dev->caps.pkey_table_len[i] =
dev->phys_caps.pkey_phys_table_len[i] - 1;
}
}
static int mlx4_init_hca(struct mlx4_dev *dev) static int mlx4_init_hca(struct mlx4_dev *dev)
{ {
struct mlx4_priv *priv = mlx4_priv(dev); struct mlx4_priv *priv = mlx4_priv(dev);
@ -1205,6 +1232,9 @@ static int mlx4_init_hca(struct mlx4_dev *dev)
goto err_stop_fw; goto err_stop_fw;
} }
if (mlx4_is_master(dev))
mlx4_parav_master_pf_caps(dev);
profile = default_profile; profile = default_profile;
icm_size = mlx4_make_profile(dev, &profile, &dev_cap, icm_size = mlx4_make_profile(dev, &profile, &dev_cap,
@ -1477,12 +1507,24 @@ static int mlx4_setup_hca(struct mlx4_dev *dev)
"with caps = 0\n", port, err); "with caps = 0\n", port, err);
dev->caps.ib_port_def_cap[port] = ib_port_default_caps; dev->caps.ib_port_def_cap[port] = ib_port_default_caps;
/* initialize per-slave default ib port capabilities */
if (mlx4_is_master(dev)) {
int i;
for (i = 0; i < dev->num_slaves; i++) {
if (i == mlx4_master_func_num(dev))
continue;
priv->mfunc.master.slave_state[i].ib_cap_mask[port] =
ib_port_default_caps;
}
}
if (mlx4_is_mfunc(dev)) if (mlx4_is_mfunc(dev))
dev->caps.port_ib_mtu[port] = IB_MTU_2048; dev->caps.port_ib_mtu[port] = IB_MTU_2048;
else else
dev->caps.port_ib_mtu[port] = IB_MTU_4096; dev->caps.port_ib_mtu[port] = IB_MTU_4096;
err = mlx4_SET_PORT(dev, port); err = mlx4_SET_PORT(dev, port, mlx4_is_master(dev) ?
dev->caps.pkey_table_len[port] : -1);
if (err) { if (err) {
mlx4_err(dev, "Failed to set port %d, aborting\n", mlx4_err(dev, "Failed to set port %d, aborting\n",
port); port);

View File

@ -338,66 +338,6 @@ struct mlx4_srq_context {
__be64 db_rec_addr; __be64 db_rec_addr;
}; };
struct mlx4_eqe {
u8 reserved1;
u8 type;
u8 reserved2;
u8 subtype;
union {
u32 raw[6];
struct {
__be32 cqn;
} __packed comp;
struct {
u16 reserved1;
__be16 token;
u32 reserved2;
u8 reserved3[3];
u8 status;
__be64 out_param;
} __packed cmd;
struct {
__be32 qpn;
} __packed qp;
struct {
__be32 srqn;
} __packed srq;
struct {
__be32 cqn;
u32 reserved1;
u8 reserved2[3];
u8 syndrome;
} __packed cq_err;
struct {
u32 reserved1[2];
__be32 port;
} __packed port_change;
struct {
#define COMM_CHANNEL_BIT_ARRAY_SIZE 4
u32 reserved;
u32 bit_vec[COMM_CHANNEL_BIT_ARRAY_SIZE];
} __packed comm_channel_arm;
struct {
u8 port;
u8 reserved[3];
__be64 mac;
} __packed mac_update;
struct {
u8 port;
} __packed sw_event;
struct {
__be32 slave_id;
} __packed flr_event;
struct {
__be16 current_temperature;
__be16 warning_threshold;
} __packed warming;
} event;
u8 slave_id;
u8 reserved3[2];
u8 owner;
} __packed;
struct mlx4_eq { struct mlx4_eq {
struct mlx4_dev *dev; struct mlx4_dev *dev;
void __iomem *doorbell; void __iomem *doorbell;
@ -887,7 +827,8 @@ void mlx4_catas_init(void);
int mlx4_restart_one(struct pci_dev *pdev); int mlx4_restart_one(struct pci_dev *pdev);
int mlx4_register_device(struct mlx4_dev *dev); int mlx4_register_device(struct mlx4_dev *dev);
void mlx4_unregister_device(struct mlx4_dev *dev); void mlx4_unregister_device(struct mlx4_dev *dev);
void mlx4_dispatch_event(struct mlx4_dev *dev, enum mlx4_dev_event type, int port); void mlx4_dispatch_event(struct mlx4_dev *dev, enum mlx4_dev_event type,
unsigned long param);
struct mlx4_dev_cap; struct mlx4_dev_cap;
struct mlx4_init_hca_param; struct mlx4_init_hca_param;
@ -1028,7 +969,7 @@ int mlx4_change_port_types(struct mlx4_dev *dev,
void mlx4_init_mac_table(struct mlx4_dev *dev, struct mlx4_mac_table *table); void mlx4_init_mac_table(struct mlx4_dev *dev, struct mlx4_mac_table *table);
void mlx4_init_vlan_table(struct mlx4_dev *dev, struct mlx4_vlan_table *table); void mlx4_init_vlan_table(struct mlx4_dev *dev, struct mlx4_vlan_table *table);
int mlx4_SET_PORT(struct mlx4_dev *dev, u8 port); int mlx4_SET_PORT(struct mlx4_dev *dev, u8 port, int pkey_tbl_sz);
/* resource tracker functions*/ /* resource tracker functions*/
int mlx4_get_slave_from_resource_id(struct mlx4_dev *dev, int mlx4_get_slave_from_resource_id(struct mlx4_dev *dev,
enum mlx4_resource resource_type, enum mlx4_resource resource_type,
@ -1071,6 +1012,8 @@ int mlx4_QUERY_PORT_wrapper(struct mlx4_dev *dev, int slave,
struct mlx4_cmd_info *cmd); struct mlx4_cmd_info *cmd);
int mlx4_get_port_ib_caps(struct mlx4_dev *dev, u8 port, __be32 *caps); int mlx4_get_port_ib_caps(struct mlx4_dev *dev, u8 port, __be32 *caps);
int mlx4_get_slave_pkey_gid_tbl_len(struct mlx4_dev *dev, u8 port,
int *gid_tbl_len, int *pkey_tbl_len);
int mlx4_QP_ATTACH_wrapper(struct mlx4_dev *dev, int slave, int mlx4_QP_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
struct mlx4_vhcr *vhcr, struct mlx4_vhcr *vhcr,

View File

@ -726,14 +726,15 @@ int mlx4_SET_PORT_wrapper(struct mlx4_dev *dev, int slave,
enum { enum {
MLX4_SET_PORT_VL_CAP = 4, /* bits 7:4 */ MLX4_SET_PORT_VL_CAP = 4, /* bits 7:4 */
MLX4_SET_PORT_MTU_CAP = 12, /* bits 15:12 */ MLX4_SET_PORT_MTU_CAP = 12, /* bits 15:12 */
MLX4_CHANGE_PORT_PKEY_TBL_SZ = 20,
MLX4_CHANGE_PORT_VL_CAP = 21, MLX4_CHANGE_PORT_VL_CAP = 21,
MLX4_CHANGE_PORT_MTU_CAP = 22, MLX4_CHANGE_PORT_MTU_CAP = 22,
}; };
int mlx4_SET_PORT(struct mlx4_dev *dev, u8 port) int mlx4_SET_PORT(struct mlx4_dev *dev, u8 port, int pkey_tbl_sz)
{ {
struct mlx4_cmd_mailbox *mailbox; struct mlx4_cmd_mailbox *mailbox;
int err, vl_cap; int err, vl_cap, pkey_tbl_flag = 0;
if (dev->caps.port_type[port] == MLX4_PORT_TYPE_ETH) if (dev->caps.port_type[port] == MLX4_PORT_TYPE_ETH)
return 0; return 0;
@ -746,11 +747,17 @@ int mlx4_SET_PORT(struct mlx4_dev *dev, u8 port)
((__be32 *) mailbox->buf)[1] = dev->caps.ib_port_def_cap[port]; ((__be32 *) mailbox->buf)[1] = dev->caps.ib_port_def_cap[port];
if (pkey_tbl_sz >= 0 && mlx4_is_master(dev)) {
pkey_tbl_flag = 1;
((__be16 *) mailbox->buf)[20] = cpu_to_be16(pkey_tbl_sz);
}
/* IB VL CAP enum isn't used by the firmware, just numerical values */ /* IB VL CAP enum isn't used by the firmware, just numerical values */
for (vl_cap = 8; vl_cap >= 1; vl_cap >>= 1) { for (vl_cap = 8; vl_cap >= 1; vl_cap >>= 1) {
((__be32 *) mailbox->buf)[0] = cpu_to_be32( ((__be32 *) mailbox->buf)[0] = cpu_to_be32(
(1 << MLX4_CHANGE_PORT_MTU_CAP) | (1 << MLX4_CHANGE_PORT_MTU_CAP) |
(1 << MLX4_CHANGE_PORT_VL_CAP) | (1 << MLX4_CHANGE_PORT_VL_CAP) |
(pkey_tbl_flag << MLX4_CHANGE_PORT_PKEY_TBL_SZ) |
(dev->caps.port_ib_mtu[port] << MLX4_SET_PORT_MTU_CAP) | (dev->caps.port_ib_mtu[port] << MLX4_SET_PORT_MTU_CAP) |
(vl_cap << MLX4_SET_PORT_VL_CAP)); (vl_cap << MLX4_SET_PORT_VL_CAP));
err = mlx4_cmd(dev, mailbox->dma, port, 0, MLX4_CMD_SET_PORT, err = mlx4_cmd(dev, mailbox->dma, port, 0, MLX4_CMD_SET_PORT,

View File

@ -56,6 +56,13 @@ enum {
MLX4_MAX_PORTS = 2 MLX4_MAX_PORTS = 2
}; };
/* base qkey for use in sriov tunnel-qp/proxy-qp communication.
* These qkeys must not be allowed for general use. This is a 64k range,
* and to test for violation, we use the mask (protect against future chg).
*/
#define MLX4_RESERVED_QKEY_BASE (0xFFFF0000)
#define MLX4_RESERVED_QKEY_MASK (0xFFFF0000)
enum { enum {
MLX4_BOARD_ID_LEN = 64 MLX4_BOARD_ID_LEN = 64
}; };
@ -96,7 +103,8 @@ enum {
MLX4_DEV_CAP_FLAG_VEP_UC_STEER = 1LL << 41, MLX4_DEV_CAP_FLAG_VEP_UC_STEER = 1LL << 41,
MLX4_DEV_CAP_FLAG_VEP_MC_STEER = 1LL << 42, MLX4_DEV_CAP_FLAG_VEP_MC_STEER = 1LL << 42,
MLX4_DEV_CAP_FLAG_COUNTERS = 1LL << 48, MLX4_DEV_CAP_FLAG_COUNTERS = 1LL << 48,
MLX4_DEV_CAP_FLAG_SENSE_SUPPORT = 1LL << 55 MLX4_DEV_CAP_FLAG_SENSE_SUPPORT = 1LL << 55,
MLX4_DEV_CAP_FLAG_PORT_MNG_CHG_EV = 1LL << 59,
}; };
enum { enum {
@ -138,6 +146,7 @@ enum mlx4_event {
MLX4_EVENT_TYPE_COMM_CHANNEL = 0x18, MLX4_EVENT_TYPE_COMM_CHANNEL = 0x18,
MLX4_EVENT_TYPE_FATAL_WARNING = 0x1b, MLX4_EVENT_TYPE_FATAL_WARNING = 0x1b,
MLX4_EVENT_TYPE_FLR_EVENT = 0x1c, MLX4_EVENT_TYPE_FLR_EVENT = 0x1c,
MLX4_EVENT_TYPE_PORT_MNG_CHG_EVENT = 0x1d,
MLX4_EVENT_TYPE_NONE = 0xff, MLX4_EVENT_TYPE_NONE = 0xff,
}; };
@ -235,12 +244,32 @@ enum {
MLX4_MAX_FAST_REG_PAGES = 511, MLX4_MAX_FAST_REG_PAGES = 511,
}; };
enum {
MLX4_DEV_PMC_SUBTYPE_GUID_INFO = 0x14,
MLX4_DEV_PMC_SUBTYPE_PORT_INFO = 0x15,
MLX4_DEV_PMC_SUBTYPE_PKEY_TABLE = 0x16,
};
/* Port mgmt change event handling */
enum {
MLX4_EQ_PORT_INFO_MSTR_SM_LID_CHANGE_MASK = 1 << 0,
MLX4_EQ_PORT_INFO_GID_PFX_CHANGE_MASK = 1 << 1,
MLX4_EQ_PORT_INFO_LID_CHANGE_MASK = 1 << 2,
MLX4_EQ_PORT_INFO_CLIENT_REREG_MASK = 1 << 3,
MLX4_EQ_PORT_INFO_MSTR_SM_SL_CHANGE_MASK = 1 << 4,
};
#define MSTR_SM_CHANGE_MASK (MLX4_EQ_PORT_INFO_MSTR_SM_SL_CHANGE_MASK | \
MLX4_EQ_PORT_INFO_MSTR_SM_LID_CHANGE_MASK)
static inline u64 mlx4_fw_ver(u64 major, u64 minor, u64 subminor) static inline u64 mlx4_fw_ver(u64 major, u64 minor, u64 subminor)
{ {
return (major << 32) | (minor << 16) | subminor; return (major << 32) | (minor << 16) | subminor;
} }
struct mlx4_phys_caps { struct mlx4_phys_caps {
u32 gid_phys_table_len[MLX4_MAX_PORTS + 1];
u32 pkey_phys_table_len[MLX4_MAX_PORTS + 1];
u32 num_phys_eqs; u32 num_phys_eqs;
}; };
@ -273,6 +302,8 @@ struct mlx4_caps {
int max_qp_init_rdma; int max_qp_init_rdma;
int max_qp_dest_rdma; int max_qp_dest_rdma;
int sqp_start; int sqp_start;
u32 base_sqpn;
u32 base_tunnel_sqpn;
int num_srqs; int num_srqs;
int max_srq_wqes; int max_srq_wqes;
int max_srq_sge; int max_srq_sge;
@ -511,6 +542,81 @@ struct mlx4_dev {
int num_vfs; int num_vfs;
}; };
struct mlx4_eqe {
u8 reserved1;
u8 type;
u8 reserved2;
u8 subtype;
union {
u32 raw[6];
struct {
__be32 cqn;
} __packed comp;
struct {
u16 reserved1;
__be16 token;
u32 reserved2;
u8 reserved3[3];
u8 status;
__be64 out_param;
} __packed cmd;
struct {
__be32 qpn;
} __packed qp;
struct {
__be32 srqn;
} __packed srq;
struct {
__be32 cqn;
u32 reserved1;
u8 reserved2[3];
u8 syndrome;
} __packed cq_err;
struct {
u32 reserved1[2];
__be32 port;
} __packed port_change;
struct {
#define COMM_CHANNEL_BIT_ARRAY_SIZE 4
u32 reserved;
u32 bit_vec[COMM_CHANNEL_BIT_ARRAY_SIZE];
} __packed comm_channel_arm;
struct {
u8 port;
u8 reserved[3];
__be64 mac;
} __packed mac_update;
struct {
__be32 slave_id;
} __packed flr_event;
struct {
__be16 current_temperature;
__be16 warning_threshold;
} __packed warming;
struct {
u8 reserved[3];
u8 port;
union {
struct {
__be16 mstr_sm_lid;
__be16 port_lid;
__be32 changed_attr;
u8 reserved[3];
u8 mstr_sm_sl;
__be64 gid_prefix;
} __packed port_info;
struct {
__be32 block_ptr;
__be32 tbl_entries_mask;
} __packed tbl_change_info;
} params;
} __packed port_mgmt_change;
} event;
u8 slave_id;
u8 reserved3[2];
u8 owner;
} __packed;
struct mlx4_init_port_param { struct mlx4_init_port_param {
int set_guid0; int set_guid0;
int set_node_guid; int set_node_guid;
@ -534,6 +640,15 @@ struct mlx4_init_port_param {
if (((dev)->caps.port_mask[port] == MLX4_PORT_TYPE_IB) || \ if (((dev)->caps.port_mask[port] == MLX4_PORT_TYPE_IB) || \
((dev)->caps.flags & MLX4_DEV_CAP_FLAG_IBOE)) ((dev)->caps.flags & MLX4_DEV_CAP_FLAG_IBOE))
#define MLX4_INVALID_SLAVE_ID 0xFF
void handle_port_mgmt_change_event(struct work_struct *work);
static inline int mlx4_master_func_num(struct mlx4_dev *dev)
{
return dev->caps.function;
}
static inline int mlx4_is_master(struct mlx4_dev *dev) static inline int mlx4_is_master(struct mlx4_dev *dev)
{ {
return dev->flags & MLX4_FLAG_MASTER; return dev->flags & MLX4_FLAG_MASTER;
@ -668,4 +783,6 @@ int mlx4_wol_write(struct mlx4_dev *dev, u64 config, int port);
int mlx4_counter_alloc(struct mlx4_dev *dev, u32 *idx); int mlx4_counter_alloc(struct mlx4_dev *dev, u32 *idx);
void mlx4_counter_free(struct mlx4_dev *dev, u32 idx); void mlx4_counter_free(struct mlx4_dev *dev, u32 idx);
int mlx4_get_parav_qkey(struct mlx4_dev *dev, u32 qpn, u32 *qkey);
#endif /* MLX4_DEVICE_H */ #endif /* MLX4_DEVICE_H */

View File

@ -42,13 +42,14 @@ enum mlx4_dev_event {
MLX4_DEV_EVENT_PORT_UP, MLX4_DEV_EVENT_PORT_UP,
MLX4_DEV_EVENT_PORT_DOWN, MLX4_DEV_EVENT_PORT_DOWN,
MLX4_DEV_EVENT_PORT_REINIT, MLX4_DEV_EVENT_PORT_REINIT,
MLX4_DEV_EVENT_PORT_MGMT_CHANGE,
}; };
struct mlx4_interface { struct mlx4_interface {
void * (*add) (struct mlx4_dev *dev); void * (*add) (struct mlx4_dev *dev);
void (*remove)(struct mlx4_dev *dev, void *context); void (*remove)(struct mlx4_dev *dev, void *context);
void (*event) (struct mlx4_dev *dev, void *context, void (*event) (struct mlx4_dev *dev, void *context,
enum mlx4_dev_event event, int port); enum mlx4_dev_event event, unsigned long param);
void * (*get_dev)(struct mlx4_dev *dev, void *context, u8 port); void * (*get_dev)(struct mlx4_dev *dev, void *context, u8 port);
struct list_head list; struct list_head list;
enum mlx4_protocol protocol; enum mlx4_protocol protocol;

View File

@ -262,6 +262,18 @@ struct ib_cm_event {
void *private_data; void *private_data;
}; };
#define CM_REQ_ATTR_ID cpu_to_be16(0x0010)
#define CM_MRA_ATTR_ID cpu_to_be16(0x0011)
#define CM_REJ_ATTR_ID cpu_to_be16(0x0012)
#define CM_REP_ATTR_ID cpu_to_be16(0x0013)
#define CM_RTU_ATTR_ID cpu_to_be16(0x0014)
#define CM_DREQ_ATTR_ID cpu_to_be16(0x0015)
#define CM_DREP_ATTR_ID cpu_to_be16(0x0016)
#define CM_SIDR_REQ_ATTR_ID cpu_to_be16(0x0017)
#define CM_SIDR_REP_ATTR_ID cpu_to_be16(0x0018)
#define CM_LAP_ATTR_ID cpu_to_be16(0x0019)
#define CM_APR_ATTR_ID cpu_to_be16(0x001A)
/** /**
* ib_cm_handler - User-defined callback to process communication events. * ib_cm_handler - User-defined callback to process communication events.
* @cm_id: Communication identifier associated with the reported event. * @cm_id: Communication identifier associated with the reported event.

View File

@ -251,6 +251,28 @@ struct ib_sa_service_rec {
u64 data64[2]; u64 data64[2];
}; };
#define IB_SA_GUIDINFO_REC_LID IB_SA_COMP_MASK(0)
#define IB_SA_GUIDINFO_REC_BLOCK_NUM IB_SA_COMP_MASK(1)
#define IB_SA_GUIDINFO_REC_RES1 IB_SA_COMP_MASK(2)
#define IB_SA_GUIDINFO_REC_RES2 IB_SA_COMP_MASK(3)
#define IB_SA_GUIDINFO_REC_GID0 IB_SA_COMP_MASK(4)
#define IB_SA_GUIDINFO_REC_GID1 IB_SA_COMP_MASK(5)
#define IB_SA_GUIDINFO_REC_GID2 IB_SA_COMP_MASK(6)
#define IB_SA_GUIDINFO_REC_GID3 IB_SA_COMP_MASK(7)
#define IB_SA_GUIDINFO_REC_GID4 IB_SA_COMP_MASK(8)
#define IB_SA_GUIDINFO_REC_GID5 IB_SA_COMP_MASK(9)
#define IB_SA_GUIDINFO_REC_GID6 IB_SA_COMP_MASK(10)
#define IB_SA_GUIDINFO_REC_GID7 IB_SA_COMP_MASK(11)
struct ib_sa_guidinfo_rec {
__be16 lid;
u8 block_num;
/* reserved */
u8 res1;
__be32 res2;
u8 guid_info_list[64];
};
struct ib_sa_client { struct ib_sa_client {
atomic_t users; atomic_t users;
struct completion comp; struct completion comp;
@ -385,4 +407,15 @@ int ib_init_ah_from_path(struct ib_device *device, u8 port_num,
*/ */
void ib_sa_unpack_path(void *attribute, struct ib_sa_path_rec *rec); void ib_sa_unpack_path(void *attribute, struct ib_sa_path_rec *rec);
/* Support GuidInfoRecord */
int ib_sa_guid_info_rec_query(struct ib_sa_client *client,
struct ib_device *device, u8 port_num,
struct ib_sa_guidinfo_rec *rec,
ib_sa_comp_mask comp_mask, u8 method,
int timeout_ms, gfp_t gfp_mask,
void (*callback)(int status,
struct ib_sa_guidinfo_rec *resp,
void *context),
void *context,
struct ib_sa_query **sa_query);
#endif /* IB_SA_H */ #endif /* IB_SA_H */