Merge tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi

Pull SCSI updates from James Bottomley:
 :This series consists of the usual driver updates (qla2xxx, ufs, zfcp,
  target, scsi_debug, lpfc, qedi, qedf, hisi_sas, mpt3sas) plus a host
  of other minor updates.

  There are no major core changes in this series apart from a
  refactoring in scsi_lib.c"

* tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi: (207 commits)
  scsi: ufs: ti-j721e-ufs: Fix unwinding of pm_runtime changes
  scsi: cxgb3i: Fix some leaks in init_act_open()
  scsi: ibmvscsi: Make some functions static
  scsi: iscsi: Fix deadlock on recovery path during GFP_IO reclaim
  scsi: ufs: Fix WriteBooster flush during runtime suspend
  scsi: ufs: Fix index of attributes query for WriteBooster feature
  scsi: ufs: Allow WriteBooster on UFS 2.2 devices
  scsi: ufs: Remove unnecessary memset for dev_info
  scsi: ufs-qcom: Fix scheduling while atomic issue
  scsi: mpt3sas: Fix reply queue count in non RDPQ mode
  scsi: lpfc: Fix lpfc_nodelist leak when processing unsolicited event
  scsi: target: tcmu: Fix a use after free in tcmu_check_expired_queue_cmd()
  scsi: vhost: Notify TCM about the maximum sg entries supported per command
  scsi: qla2xxx: Remove return value from qla_nvme_ls()
  scsi: qla2xxx: Remove an unused function
  scsi: iscsi: Register sysfs for iscsi workqueue
  scsi: scsi_debug: Parser tables and code interaction
  scsi: core: Refactor scsi_mq_setup_tags function
  scsi: core: Fix incorrect usage of shost_for_each_device
  scsi: qla2xxx: Fix endianness annotations in source files
  ...
This commit is contained in:
Linus Torvalds
2020-06-05 15:11:50 -07:00
149 changed files with 6642 additions and 3957 deletions

View File

@@ -10051,7 +10051,7 @@ F: drivers/hid/hid-lg-g15.c
LSILOGIC MPT FUSION DRIVERS (FC/SAS/SPI) LSILOGIC MPT FUSION DRIVERS (FC/SAS/SPI)
M: Sathya Prakash <sathya.prakash@broadcom.com> M: Sathya Prakash <sathya.prakash@broadcom.com>
M: Chaitra P B <chaitra.basappa@broadcom.com> M: Sreekanth Reddy <sreekanth.reddy@broadcom.com>
M: Suganath Prabu Subramani <suganath-prabu.subramani@broadcom.com> M: Suganath Prabu Subramani <suganath-prabu.subramani@broadcom.com>
L: MPT-FusionLinux.pdl@broadcom.com L: MPT-FusionLinux.pdl@broadcom.com
L: linux-scsi@vger.kernel.org L: linux-scsi@vger.kernel.org

View File

@@ -5052,9 +5052,11 @@ GetLanConfigPages(MPT_ADAPTER *ioc)
* @ioc: Pointer to MPT_ADAPTER structure * @ioc: Pointer to MPT_ADAPTER structure
* @persist_opcode: see below * @persist_opcode: see below
* *
* MPI_SAS_OP_CLEAR_NOT_PRESENT - Free all persist TargetID mappings for * =============================== ======================================
* devices not currently present. * MPI_SAS_OP_CLEAR_NOT_PRESENT Free all persist TargetID mappings for
* MPI_SAS_OP_CLEAR_ALL_PERSISTENT - Clear al persist TargetID mappings * devices not currently present.
* MPI_SAS_OP_CLEAR_ALL_PERSISTENT Clear al persist TargetID mappings
* =============================== ======================================
* *
* NOTE: Don't use not this function during interrupt time. * NOTE: Don't use not this function during interrupt time.
* *

View File

@@ -1014,6 +1014,7 @@ int qed_device_num_ports(struct qed_dev *cdev);
int qed_fill_dev_info(struct qed_dev *cdev, int qed_fill_dev_info(struct qed_dev *cdev,
struct qed_dev_info *dev_info); struct qed_dev_info *dev_info);
void qed_link_update(struct qed_hwfn *hwfn, struct qed_ptt *ptt); void qed_link_update(struct qed_hwfn *hwfn, struct qed_ptt *ptt);
void qed_bw_update(struct qed_hwfn *hwfn, struct qed_ptt *ptt);
u32 qed_unzip_data(struct qed_hwfn *p_hwfn, u32 qed_unzip_data(struct qed_hwfn *p_hwfn,
u32 input_len, u8 *input_buf, u32 input_len, u8 *input_buf,
u32 max_size, u8 *unzip_buf); u32 max_size, u8 *unzip_buf);

View File

@@ -1949,6 +1949,15 @@ void qed_link_update(struct qed_hwfn *hwfn, struct qed_ptt *ptt)
op->link_update(cookie, &if_link); op->link_update(cookie, &if_link);
} }
void qed_bw_update(struct qed_hwfn *hwfn, struct qed_ptt *ptt)
{
void *cookie = hwfn->cdev->ops_cookie;
struct qed_common_cb_ops *op = hwfn->cdev->protocol_ops.common;
if (IS_LEAD_HWFN(hwfn) && cookie && op && op->bw_update)
op->bw_update(cookie);
}
static int qed_drain(struct qed_dev *cdev) static int qed_drain(struct qed_dev *cdev)
{ {
struct qed_hwfn *hwfn; struct qed_hwfn *hwfn;

View File

@@ -4,7 +4,7 @@
* *
* Module interface and handling of zfcp data structures. * Module interface and handling of zfcp data structures.
* *
* Copyright IBM Corp. 2002, 2018 * Copyright IBM Corp. 2002, 2020
*/ */
/* /*
@@ -415,8 +415,7 @@ struct zfcp_adapter *zfcp_adapter_enqueue(struct ccw_device *ccw_device)
adapter->stat_read_buf_num = FSF_STATUS_READS_RECOM; adapter->stat_read_buf_num = FSF_STATUS_READS_RECOM;
if (!zfcp_scsi_adapter_register(adapter)) return adapter;
return adapter;
failed: failed:
zfcp_adapter_unregister(adapter); zfcp_adapter_unregister(adapter);

View File

@@ -4,7 +4,7 @@
* *
* Definitions for handling diagnostics in the the zfcp device driver. * Definitions for handling diagnostics in the the zfcp device driver.
* *
* Copyright IBM Corp. 2018 * Copyright IBM Corp. 2018, 2020
*/ */
#ifndef ZFCP_DIAG_H #ifndef ZFCP_DIAG_H
@@ -56,11 +56,11 @@ struct zfcp_diag_adapter {
unsigned long max_age; unsigned long max_age;
struct { struct zfcp_diag_adapter_port_data {
struct zfcp_diag_header header; struct zfcp_diag_header header;
struct fsf_qtcb_bottom_port data; struct fsf_qtcb_bottom_port data;
} port_data; } port_data;
struct { struct zfcp_diag_adapter_config_data {
struct zfcp_diag_header header; struct zfcp_diag_header header;
struct fsf_qtcb_bottom_config data; struct fsf_qtcb_bottom_config data;
} config_data; } config_data;

View File

@@ -4,7 +4,7 @@
* *
* Error Recovery Procedures (ERP). * Error Recovery Procedures (ERP).
* *
* Copyright IBM Corp. 2002, 2017 * Copyright IBM Corp. 2002, 2020
*/ */
#define KMSG_COMPONENT "zfcp" #define KMSG_COMPONENT "zfcp"
@@ -14,6 +14,7 @@
#include <linux/bug.h> #include <linux/bug.h>
#include "zfcp_ext.h" #include "zfcp_ext.h"
#include "zfcp_reqlist.h" #include "zfcp_reqlist.h"
#include "zfcp_diag.h"
#define ZFCP_MAX_ERPS 3 #define ZFCP_MAX_ERPS 3
@@ -768,10 +769,14 @@ static enum zfcp_erp_act_result zfcp_erp_adapter_strat_fsf_xconf(
if (!(atomic_read(&adapter->status) & ZFCP_STATUS_ADAPTER_XCONFIG_OK)) if (!(atomic_read(&adapter->status) & ZFCP_STATUS_ADAPTER_XCONFIG_OK))
return ZFCP_ERP_FAILED; return ZFCP_ERP_FAILED;
return ZFCP_ERP_SUCCEEDED;
}
static void
zfcp_erp_adapter_strategy_open_ptp_port(struct zfcp_adapter *const adapter)
{
if (fc_host_port_type(adapter->scsi_host) == FC_PORTTYPE_PTP) if (fc_host_port_type(adapter->scsi_host) == FC_PORTTYPE_PTP)
zfcp_erp_enqueue_ptp_port(adapter); zfcp_erp_enqueue_ptp_port(adapter);
return ZFCP_ERP_SUCCEEDED;
} }
static enum zfcp_erp_act_result zfcp_erp_adapter_strategy_open_fsf_xport( static enum zfcp_erp_act_result zfcp_erp_adapter_strategy_open_fsf_xport(
@@ -800,6 +805,59 @@ static enum zfcp_erp_act_result zfcp_erp_adapter_strategy_open_fsf_xport(
return ZFCP_ERP_SUCCEEDED; return ZFCP_ERP_SUCCEEDED;
} }
static enum zfcp_erp_act_result
zfcp_erp_adapter_strategy_alloc_shost(struct zfcp_adapter *const adapter)
{
struct zfcp_diag_adapter_config_data *const config_data =
&adapter->diagnostics->config_data;
struct zfcp_diag_adapter_port_data *const port_data =
&adapter->diagnostics->port_data;
unsigned long flags;
int rc;
rc = zfcp_scsi_adapter_register(adapter);
if (rc == -EEXIST)
return ZFCP_ERP_SUCCEEDED;
else if (rc)
return ZFCP_ERP_FAILED;
/*
* We allocated the shost for the first time. Before it was NULL,
* and so we deferred all updates in the xconf- and xport-data
* handlers. We need to make up for that now, and make all the updates
* that would have been done before.
*
* We can be sure that xconf- and xport-data succeeded, because
* otherwise this function is not called. But they might have been
* incomplete.
*/
spin_lock_irqsave(&config_data->header.access_lock, flags);
zfcp_scsi_shost_update_config_data(adapter, &config_data->data,
!!config_data->header.incomplete);
spin_unlock_irqrestore(&config_data->header.access_lock, flags);
if (adapter->adapter_features & FSF_FEATURE_HBAAPI_MANAGEMENT) {
spin_lock_irqsave(&port_data->header.access_lock, flags);
zfcp_scsi_shost_update_port_data(adapter, &port_data->data);
spin_unlock_irqrestore(&port_data->header.access_lock, flags);
}
/*
* There is a remote possibility that the 'Exchange Port Data' request
* reports a different connectivity status than 'Exchange Config Data'.
* But any change to the connectivity status of the local optic that
* happens after the initial xconf request is expected to be reported
* to us, as soon as we post Status Read Buffers to the FCP channel
* firmware after this function. So any resulting inconsistency will
* only be momentary.
*/
if (config_data->header.incomplete)
zfcp_fsf_fc_host_link_down(adapter);
return ZFCP_ERP_SUCCEEDED;
}
static enum zfcp_erp_act_result zfcp_erp_adapter_strategy_open_fsf( static enum zfcp_erp_act_result zfcp_erp_adapter_strategy_open_fsf(
struct zfcp_erp_action *act) struct zfcp_erp_action *act)
{ {
@@ -809,6 +867,12 @@ static enum zfcp_erp_act_result zfcp_erp_adapter_strategy_open_fsf(
if (zfcp_erp_adapter_strategy_open_fsf_xport(act) == ZFCP_ERP_FAILED) if (zfcp_erp_adapter_strategy_open_fsf_xport(act) == ZFCP_ERP_FAILED)
return ZFCP_ERP_FAILED; return ZFCP_ERP_FAILED;
if (zfcp_erp_adapter_strategy_alloc_shost(act->adapter) ==
ZFCP_ERP_FAILED)
return ZFCP_ERP_FAILED;
zfcp_erp_adapter_strategy_open_ptp_port(act->adapter);
if (mempool_resize(act->adapter->pool.sr_data, if (mempool_resize(act->adapter->pool.sr_data,
act->adapter->stat_read_buf_num)) act->adapter->stat_read_buf_num))
return ZFCP_ERP_FAILED; return ZFCP_ERP_FAILED;
@@ -1636,6 +1700,13 @@ void zfcp_erp_set_adapter_status(struct zfcp_adapter *adapter, u32 mask)
atomic_or(common_mask, &port->status); atomic_or(common_mask, &port->status);
read_unlock_irqrestore(&adapter->port_list_lock, flags); read_unlock_irqrestore(&adapter->port_list_lock, flags);
/*
* if `scsi_host` is missing, xconfig/xport data has never completed
* yet, so we can't access it, but there are also no SDEVs yet
*/
if (adapter->scsi_host == NULL)
return;
spin_lock_irqsave(adapter->scsi_host->host_lock, flags); spin_lock_irqsave(adapter->scsi_host->host_lock, flags);
__shost_for_each_device(sdev, adapter->scsi_host) __shost_for_each_device(sdev, adapter->scsi_host)
atomic_or(common_mask, &sdev_to_zfcp(sdev)->status); atomic_or(common_mask, &sdev_to_zfcp(sdev)->status);
@@ -1673,6 +1744,13 @@ void zfcp_erp_clear_adapter_status(struct zfcp_adapter *adapter, u32 mask)
} }
read_unlock_irqrestore(&adapter->port_list_lock, flags); read_unlock_irqrestore(&adapter->port_list_lock, flags);
/*
* if `scsi_host` is missing, xconfig/xport data has never completed
* yet, so we can't access it, but there are also no SDEVs yet
*/
if (adapter->scsi_host == NULL)
return;
spin_lock_irqsave(adapter->scsi_host->host_lock, flags); spin_lock_irqsave(adapter->scsi_host->host_lock, flags);
__shost_for_each_device(sdev, adapter->scsi_host) { __shost_for_each_device(sdev, adapter->scsi_host) {
atomic_andnot(common_mask, &sdev_to_zfcp(sdev)->status); atomic_andnot(common_mask, &sdev_to_zfcp(sdev)->status);

View File

@@ -125,6 +125,7 @@ extern int zfcp_fsf_exchange_config_data_sync(struct zfcp_qdio *,
extern int zfcp_fsf_exchange_port_data(struct zfcp_erp_action *); extern int zfcp_fsf_exchange_port_data(struct zfcp_erp_action *);
extern int zfcp_fsf_exchange_port_data_sync(struct zfcp_qdio *, extern int zfcp_fsf_exchange_port_data_sync(struct zfcp_qdio *,
struct fsf_qtcb_bottom_port *); struct fsf_qtcb_bottom_port *);
extern u32 zfcp_fsf_convert_portspeed(u32 fsf_speed);
extern void zfcp_fsf_req_dismiss_all(struct zfcp_adapter *); extern void zfcp_fsf_req_dismiss_all(struct zfcp_adapter *);
extern int zfcp_fsf_status_read(struct zfcp_qdio *); extern int zfcp_fsf_status_read(struct zfcp_qdio *);
extern int zfcp_status_read_refill(struct zfcp_adapter *adapter); extern int zfcp_status_read_refill(struct zfcp_adapter *adapter);
@@ -134,6 +135,7 @@ extern int zfcp_fsf_send_els(struct zfcp_adapter *, u32,
struct zfcp_fsf_ct_els *, unsigned int); struct zfcp_fsf_ct_els *, unsigned int);
extern int zfcp_fsf_fcp_cmnd(struct scsi_cmnd *); extern int zfcp_fsf_fcp_cmnd(struct scsi_cmnd *);
extern void zfcp_fsf_req_free(struct zfcp_fsf_req *); extern void zfcp_fsf_req_free(struct zfcp_fsf_req *);
extern void zfcp_fsf_fc_host_link_down(struct zfcp_adapter *adapter);
extern struct zfcp_fsf_req *zfcp_fsf_fcp_task_mgmt(struct scsi_device *sdev, extern struct zfcp_fsf_req *zfcp_fsf_fcp_task_mgmt(struct scsi_device *sdev,
u8 tm_flags); u8 tm_flags);
extern struct zfcp_fsf_req *zfcp_fsf_abort_fcp_cmnd(struct scsi_cmnd *); extern struct zfcp_fsf_req *zfcp_fsf_abort_fcp_cmnd(struct scsi_cmnd *);
@@ -153,6 +155,8 @@ extern int zfcp_qdio_sbal_get(struct zfcp_qdio *);
extern int zfcp_qdio_send(struct zfcp_qdio *, struct zfcp_qdio_req *); extern int zfcp_qdio_send(struct zfcp_qdio *, struct zfcp_qdio_req *);
extern int zfcp_qdio_sbals_from_sg(struct zfcp_qdio *, struct zfcp_qdio_req *, extern int zfcp_qdio_sbals_from_sg(struct zfcp_qdio *, struct zfcp_qdio_req *,
struct scatterlist *); struct scatterlist *);
extern void zfcp_qdio_shost_update(struct zfcp_adapter *const adapter,
const struct zfcp_qdio *const qdio);
extern int zfcp_qdio_open(struct zfcp_qdio *); extern int zfcp_qdio_open(struct zfcp_qdio *);
extern void zfcp_qdio_close(struct zfcp_qdio *); extern void zfcp_qdio_close(struct zfcp_qdio *);
extern void zfcp_qdio_siosl(struct zfcp_adapter *); extern void zfcp_qdio_siosl(struct zfcp_adapter *);
@@ -169,6 +173,13 @@ extern void zfcp_scsi_schedule_rport_block(struct zfcp_port *);
extern void zfcp_scsi_schedule_rports_block(struct zfcp_adapter *); extern void zfcp_scsi_schedule_rports_block(struct zfcp_adapter *);
extern void zfcp_scsi_set_prot(struct zfcp_adapter *); extern void zfcp_scsi_set_prot(struct zfcp_adapter *);
extern void zfcp_scsi_dif_sense_error(struct scsi_cmnd *, int); extern void zfcp_scsi_dif_sense_error(struct scsi_cmnd *, int);
extern void zfcp_scsi_shost_update_config_data(
struct zfcp_adapter *const adapter,
const struct fsf_qtcb_bottom_config *const bottom,
const bool bottom_incomplete);
extern void zfcp_scsi_shost_update_port_data(
struct zfcp_adapter *const adapter,
const struct fsf_qtcb_bottom_port *const bottom);
/* zfcp_sysfs.c */ /* zfcp_sysfs.c */
extern const struct attribute_group *zfcp_unit_attr_groups[]; extern const struct attribute_group *zfcp_unit_attr_groups[];

View File

@@ -120,21 +120,25 @@ static void zfcp_fsf_status_read_port_closed(struct zfcp_fsf_req *req)
read_unlock_irqrestore(&adapter->port_list_lock, flags); read_unlock_irqrestore(&adapter->port_list_lock, flags);
} }
static void zfcp_fsf_fc_host_link_down(struct zfcp_adapter *adapter) void zfcp_fsf_fc_host_link_down(struct zfcp_adapter *adapter)
{ {
struct Scsi_Host *shost = adapter->scsi_host; struct Scsi_Host *shost = adapter->scsi_host;
adapter->hydra_version = 0;
adapter->peer_wwpn = 0;
adapter->peer_wwnn = 0;
adapter->peer_d_id = 0;
/* if there is no shost yet, we have nothing to zero-out */
if (shost == NULL)
return;
fc_host_port_id(shost) = 0; fc_host_port_id(shost) = 0;
fc_host_fabric_name(shost) = 0; fc_host_fabric_name(shost) = 0;
fc_host_speed(shost) = FC_PORTSPEED_UNKNOWN; fc_host_speed(shost) = FC_PORTSPEED_UNKNOWN;
fc_host_port_type(shost) = FC_PORTTYPE_UNKNOWN; fc_host_port_type(shost) = FC_PORTTYPE_UNKNOWN;
adapter->hydra_version = 0;
snprintf(fc_host_model(shost), FC_SYMBOLIC_NAME_SIZE, "0x%04x", 0); snprintf(fc_host_model(shost), FC_SYMBOLIC_NAME_SIZE, "0x%04x", 0);
memset(fc_host_active_fc4s(shost), 0, FC_FC4_LIST_SIZE); memset(fc_host_active_fc4s(shost), 0, FC_FC4_LIST_SIZE);
adapter->peer_wwpn = 0;
adapter->peer_wwnn = 0;
adapter->peer_d_id = 0;
} }
static void zfcp_fsf_link_down_info_eval(struct zfcp_fsf_req *req, static void zfcp_fsf_link_down_info_eval(struct zfcp_fsf_req *req,
@@ -479,7 +483,7 @@ void zfcp_fsf_req_dismiss_all(struct zfcp_adapter *adapter)
#define ZFCP_FSF_PORTSPEED_128GBIT (1 << 8) #define ZFCP_FSF_PORTSPEED_128GBIT (1 << 8)
#define ZFCP_FSF_PORTSPEED_NOT_NEGOTIATED (1 << 15) #define ZFCP_FSF_PORTSPEED_NOT_NEGOTIATED (1 << 15)
static u32 zfcp_fsf_convert_portspeed(u32 fsf_speed) u32 zfcp_fsf_convert_portspeed(u32 fsf_speed)
{ {
u32 fdmi_speed = 0; u32 fdmi_speed = 0;
if (fsf_speed & ZFCP_FSF_PORTSPEED_1GBIT) if (fsf_speed & ZFCP_FSF_PORTSPEED_1GBIT)
@@ -509,64 +513,36 @@ static int zfcp_fsf_exchange_config_evaluate(struct zfcp_fsf_req *req)
{ {
struct fsf_qtcb_bottom_config *bottom = &req->qtcb->bottom.config; struct fsf_qtcb_bottom_config *bottom = &req->qtcb->bottom.config;
struct zfcp_adapter *adapter = req->adapter; struct zfcp_adapter *adapter = req->adapter;
struct Scsi_Host *shost = adapter->scsi_host; struct fc_els_flogi *plogi;
struct fc_els_flogi *nsp, *plogi;
/* adjust pointers for missing command code */ /* adjust pointers for missing command code */
nsp = (struct fc_els_flogi *) ((u8 *)&bottom->nport_serv_param
- sizeof(u32));
plogi = (struct fc_els_flogi *) ((u8 *)&bottom->plogi_payload plogi = (struct fc_els_flogi *) ((u8 *)&bottom->plogi_payload
- sizeof(u32)); - sizeof(u32));
if (req->data) if (req->data)
memcpy(req->data, bottom, sizeof(*bottom)); memcpy(req->data, bottom, sizeof(*bottom));
snprintf(fc_host_manufacturer(shost), FC_SERIAL_NUMBER_SIZE, "%s",
"IBM");
fc_host_port_name(shost) = be64_to_cpu(nsp->fl_wwpn);
fc_host_node_name(shost) = be64_to_cpu(nsp->fl_wwnn);
fc_host_supported_classes(shost) = FC_COS_CLASS2 | FC_COS_CLASS3;
adapter->timer_ticks = bottom->timer_interval & ZFCP_FSF_TIMER_INT_MASK; adapter->timer_ticks = bottom->timer_interval & ZFCP_FSF_TIMER_INT_MASK;
adapter->stat_read_buf_num = max(bottom->status_read_buf_num, adapter->stat_read_buf_num = max(bottom->status_read_buf_num,
(u16)FSF_STATUS_READS_RECOM); (u16)FSF_STATUS_READS_RECOM);
zfcp_scsi_set_prot(adapter);
/* no error return above here, otherwise must fix call chains */ /* no error return above here, otherwise must fix call chains */
/* do not evaluate invalid fields */ /* do not evaluate invalid fields */
if (req->qtcb->header.fsf_status == FSF_EXCHANGE_CONFIG_DATA_INCOMPLETE) if (req->qtcb->header.fsf_status == FSF_EXCHANGE_CONFIG_DATA_INCOMPLETE)
return 0; return 0;
fc_host_port_id(shost) = ntoh24(bottom->s_id);
fc_host_speed(shost) =
zfcp_fsf_convert_portspeed(bottom->fc_link_speed);
adapter->hydra_version = bottom->adapter_type; adapter->hydra_version = bottom->adapter_type;
snprintf(fc_host_model(shost), FC_SYMBOLIC_NAME_SIZE, "0x%04x",
bottom->adapter_type);
switch (bottom->fc_topology) { switch (bottom->fc_topology) {
case FSF_TOPO_P2P: case FSF_TOPO_P2P:
adapter->peer_d_id = ntoh24(bottom->peer_d_id); adapter->peer_d_id = ntoh24(bottom->peer_d_id);
adapter->peer_wwpn = be64_to_cpu(plogi->fl_wwpn); adapter->peer_wwpn = be64_to_cpu(plogi->fl_wwpn);
adapter->peer_wwnn = be64_to_cpu(plogi->fl_wwnn); adapter->peer_wwnn = be64_to_cpu(plogi->fl_wwnn);
fc_host_port_type(shost) = FC_PORTTYPE_PTP;
fc_host_fabric_name(shost) = 0;
break; break;
case FSF_TOPO_FABRIC: case FSF_TOPO_FABRIC:
fc_host_fabric_name(shost) = be64_to_cpu(plogi->fl_wwnn);
if (bottom->connection_features & FSF_FEATURE_NPIV_MODE)
fc_host_port_type(shost) = FC_PORTTYPE_NPIV;
else
fc_host_port_type(shost) = FC_PORTTYPE_NPORT;
break; break;
case FSF_TOPO_AL: case FSF_TOPO_AL:
fc_host_port_type(shost) = FC_PORTTYPE_NLPORT;
fc_host_fabric_name(shost) = 0;
fallthrough;
default: default:
fc_host_fabric_name(shost) = 0;
dev_err(&adapter->ccw_device->dev, dev_err(&adapter->ccw_device->dev,
"Unknown or unsupported arbitrated loop " "Unknown or unsupported arbitrated loop "
"fibre channel topology detected\n"); "fibre channel topology detected\n");
@@ -584,13 +560,10 @@ static void zfcp_fsf_exchange_config_data_handler(struct zfcp_fsf_req *req)
&adapter->diagnostics->config_data.header; &adapter->diagnostics->config_data.header;
struct fsf_qtcb *qtcb = req->qtcb; struct fsf_qtcb *qtcb = req->qtcb;
struct fsf_qtcb_bottom_config *bottom = &qtcb->bottom.config; struct fsf_qtcb_bottom_config *bottom = &qtcb->bottom.config;
struct Scsi_Host *shost = adapter->scsi_host;
if (req->status & ZFCP_STATUS_FSFREQ_ERROR) if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
return; return;
snprintf(fc_host_firmware_version(shost), FC_VERSION_STRING_SIZE,
"0x%08x", bottom->lic_version);
adapter->fsf_lic_version = bottom->lic_version; adapter->fsf_lic_version = bottom->lic_version;
adapter->adapter_features = bottom->adapter_features; adapter->adapter_features = bottom->adapter_features;
adapter->connection_features = bottom->connection_features; adapter->connection_features = bottom->connection_features;
@@ -606,6 +579,7 @@ static void zfcp_fsf_exchange_config_data_handler(struct zfcp_fsf_req *req)
*/ */
zfcp_diag_update_xdata(diag_hdr, bottom, false); zfcp_diag_update_xdata(diag_hdr, bottom, false);
zfcp_scsi_shost_update_config_data(adapter, bottom, false);
if (zfcp_fsf_exchange_config_evaluate(req)) if (zfcp_fsf_exchange_config_evaluate(req))
return; return;
@@ -630,6 +604,8 @@ static void zfcp_fsf_exchange_config_data_handler(struct zfcp_fsf_req *req)
&adapter->status); &adapter->status);
zfcp_fsf_link_down_info_eval(req, zfcp_fsf_link_down_info_eval(req,
&qtcb->header.fsf_status_qual.link_down_info); &qtcb->header.fsf_status_qual.link_down_info);
zfcp_scsi_shost_update_config_data(adapter, bottom, true);
if (zfcp_fsf_exchange_config_evaluate(req)) if (zfcp_fsf_exchange_config_evaluate(req))
return; return;
break; break;
@@ -638,16 +614,8 @@ static void zfcp_fsf_exchange_config_data_handler(struct zfcp_fsf_req *req)
return; return;
} }
if (adapter->adapter_features & FSF_FEATURE_HBAAPI_MANAGEMENT) { if (adapter->adapter_features & FSF_FEATURE_HBAAPI_MANAGEMENT)
adapter->hardware_version = bottom->hardware_version; adapter->hardware_version = bottom->hardware_version;
snprintf(fc_host_hardware_version(shost),
FC_VERSION_STRING_SIZE,
"0x%08x", bottom->hardware_version);
memcpy(fc_host_serial_number(shost), bottom->serial_number,
min(FC_SERIAL_NUMBER_SIZE, 17));
EBCASC(fc_host_serial_number(shost),
min(FC_SERIAL_NUMBER_SIZE, 17));
}
if (FSF_QTCB_CURRENT_VERSION < bottom->low_qtcb_version) { if (FSF_QTCB_CURRENT_VERSION < bottom->low_qtcb_version) {
dev_err(&adapter->ccw_device->dev, dev_err(&adapter->ccw_device->dev,
@@ -761,19 +729,10 @@ static void zfcp_fsf_exchange_port_evaluate(struct zfcp_fsf_req *req)
{ {
struct zfcp_adapter *adapter = req->adapter; struct zfcp_adapter *adapter = req->adapter;
struct fsf_qtcb_bottom_port *bottom = &req->qtcb->bottom.port; struct fsf_qtcb_bottom_port *bottom = &req->qtcb->bottom.port;
struct Scsi_Host *shost = adapter->scsi_host;
if (req->data) if (req->data)
memcpy(req->data, bottom, sizeof(*bottom)); memcpy(req->data, bottom, sizeof(*bottom));
fc_host_permanent_port_name(shost) = bottom->wwpn;
fc_host_maxframe_size(shost) = bottom->maximum_frame_size;
fc_host_supported_speeds(shost) =
zfcp_fsf_convert_portspeed(bottom->supported_speed);
memcpy(fc_host_supported_fc4s(shost), bottom->supported_fc4_types,
FC_FC4_LIST_SIZE);
memcpy(fc_host_active_fc4s(shost), bottom->active_fc4_types,
FC_FC4_LIST_SIZE);
if (adapter->adapter_features & FSF_FEATURE_FC_SECURITY) if (adapter->adapter_features & FSF_FEATURE_FC_SECURITY)
adapter->fc_security_algorithms = adapter->fc_security_algorithms =
bottom->fc_security_algorithms; bottom->fc_security_algorithms;
@@ -800,6 +759,7 @@ static void zfcp_fsf_exchange_port_data_handler(struct zfcp_fsf_req *req)
*/ */
zfcp_diag_update_xdata(diag_hdr, bottom, false); zfcp_diag_update_xdata(diag_hdr, bottom, false);
zfcp_scsi_shost_update_port_data(req->adapter, bottom);
zfcp_fsf_exchange_port_evaluate(req); zfcp_fsf_exchange_port_evaluate(req);
break; break;
case FSF_EXCHANGE_CONFIG_DATA_INCOMPLETE: case FSF_EXCHANGE_CONFIG_DATA_INCOMPLETE:
@@ -808,6 +768,8 @@ static void zfcp_fsf_exchange_port_data_handler(struct zfcp_fsf_req *req)
zfcp_fsf_link_down_info_eval(req, zfcp_fsf_link_down_info_eval(req,
&qtcb->header.fsf_status_qual.link_down_info); &qtcb->header.fsf_status_qual.link_down_info);
zfcp_scsi_shost_update_port_data(req->adapter, bottom);
zfcp_fsf_exchange_port_evaluate(req); zfcp_fsf_exchange_port_evaluate(req);
break; break;
} }

View File

@@ -4,7 +4,7 @@
* *
* Setup and helper functions to access QDIO. * Setup and helper functions to access QDIO.
* *
* Copyright IBM Corp. 2002, 2017 * Copyright IBM Corp. 2002, 2020
*/ */
#define KMSG_COMPONENT "zfcp" #define KMSG_COMPONENT "zfcp"
@@ -342,6 +342,18 @@ void zfcp_qdio_close(struct zfcp_qdio *qdio)
atomic_set(&qdio->req_q_free, 0); atomic_set(&qdio->req_q_free, 0);
} }
void zfcp_qdio_shost_update(struct zfcp_adapter *const adapter,
const struct zfcp_qdio *const qdio)
{
struct Scsi_Host *const shost = adapter->scsi_host;
if (shost == NULL)
return;
shost->sg_tablesize = qdio->max_sbale_per_req;
shost->max_sectors = qdio->max_sbale_per_req * 8;
}
/** /**
* zfcp_qdio_open - prepare and initialize response queue * zfcp_qdio_open - prepare and initialize response queue
* @qdio: pointer to struct zfcp_qdio * @qdio: pointer to struct zfcp_qdio
@@ -420,10 +432,7 @@ int zfcp_qdio_open(struct zfcp_qdio *qdio)
atomic_set(&qdio->req_q_free, QDIO_MAX_BUFFERS_PER_Q); atomic_set(&qdio->req_q_free, QDIO_MAX_BUFFERS_PER_Q);
atomic_or(ZFCP_STATUS_ADAPTER_QDIOUP, &qdio->adapter->status); atomic_or(ZFCP_STATUS_ADAPTER_QDIOUP, &qdio->adapter->status);
if (adapter->scsi_host) { zfcp_qdio_shost_update(adapter, qdio);
adapter->scsi_host->sg_tablesize = qdio->max_sbale_per_req;
adapter->scsi_host->max_sectors = qdio->max_sbale_per_req * 8;
}
return 0; return 0;

View File

@@ -4,7 +4,7 @@
* *
* Interface to Linux SCSI midlayer. * Interface to Linux SCSI midlayer.
* *
* Copyright IBM Corp. 2002, 2018 * Copyright IBM Corp. 2002, 2020
*/ */
#define KMSG_COMPONENT "zfcp" #define KMSG_COMPONENT "zfcp"
@@ -451,26 +451,39 @@ static struct scsi_host_template zfcp_scsi_host_template = {
}; };
/** /**
* zfcp_scsi_adapter_register - Register SCSI and FC host with SCSI midlayer * zfcp_scsi_adapter_register() - Allocate and register SCSI and FC host with
* SCSI midlayer
* @adapter: The zfcp adapter to register with the SCSI midlayer * @adapter: The zfcp adapter to register with the SCSI midlayer
*
* Allocates the SCSI host object for the given adapter, sets basic properties
* (such as the transport template, QDIO limits, ...), and registers it with
* the midlayer.
*
* During registration with the midlayer the corresponding FC host object for
* the referenced transport class is also implicitely allocated.
*
* Upon success adapter->scsi_host is set, and upon failure it remains NULL. If
* adapter->scsi_host is already set, nothing is done.
*
* Return:
* * 0 - Allocation and registration was successful
* * -EEXIST - SCSI and FC host did already exist, nothing was done, nothing
* was changed
* * -EIO - Allocation or registration failed
*/ */
int zfcp_scsi_adapter_register(struct zfcp_adapter *adapter) int zfcp_scsi_adapter_register(struct zfcp_adapter *adapter)
{ {
struct ccw_dev_id dev_id; struct ccw_dev_id dev_id;
if (adapter->scsi_host) if (adapter->scsi_host)
return 0; return -EEXIST;
ccw_device_get_id(adapter->ccw_device, &dev_id); ccw_device_get_id(adapter->ccw_device, &dev_id);
/* register adapter as SCSI host with mid layer of SCSI stack */ /* register adapter as SCSI host with mid layer of SCSI stack */
adapter->scsi_host = scsi_host_alloc(&zfcp_scsi_host_template, adapter->scsi_host = scsi_host_alloc(&zfcp_scsi_host_template,
sizeof (struct zfcp_adapter *)); sizeof (struct zfcp_adapter *));
if (!adapter->scsi_host) { if (!adapter->scsi_host)
dev_err(&adapter->ccw_device->dev, goto err_out;
"Registering the FCP device with the "
"SCSI stack failed\n");
return -EIO;
}
/* tell the SCSI stack some characteristics of this adapter */ /* tell the SCSI stack some characteristics of this adapter */
adapter->scsi_host->max_id = 511; adapter->scsi_host->max_id = 511;
@@ -480,14 +493,23 @@ int zfcp_scsi_adapter_register(struct zfcp_adapter *adapter)
adapter->scsi_host->max_cmd_len = 16; /* in struct fcp_cmnd */ adapter->scsi_host->max_cmd_len = 16; /* in struct fcp_cmnd */
adapter->scsi_host->transportt = zfcp_scsi_transport_template; adapter->scsi_host->transportt = zfcp_scsi_transport_template;
/* make all basic properties known at registration time */
zfcp_qdio_shost_update(adapter, adapter->qdio);
zfcp_scsi_set_prot(adapter);
adapter->scsi_host->hostdata[0] = (unsigned long) adapter; adapter->scsi_host->hostdata[0] = (unsigned long) adapter;
if (scsi_add_host(adapter->scsi_host, &adapter->ccw_device->dev)) { if (scsi_add_host(adapter->scsi_host, &adapter->ccw_device->dev)) {
scsi_host_put(adapter->scsi_host); scsi_host_put(adapter->scsi_host);
return -EIO; goto err_out;
} }
return 0; return 0;
err_out:
adapter->scsi_host = NULL;
dev_err(&adapter->ccw_device->dev,
"Registering the FCP device with the SCSI stack failed\n");
return -EIO;
} }
/** /**
@@ -841,6 +863,95 @@ void zfcp_scsi_dif_sense_error(struct scsi_cmnd *scmd, int ascq)
set_host_byte(scmd, DID_SOFT_ERROR); set_host_byte(scmd, DID_SOFT_ERROR);
} }
void zfcp_scsi_shost_update_config_data(
struct zfcp_adapter *const adapter,
const struct fsf_qtcb_bottom_config *const bottom,
const bool bottom_incomplete)
{
struct Scsi_Host *const shost = adapter->scsi_host;
const struct fc_els_flogi *nsp, *plogi;
if (shost == NULL)
return;
snprintf(fc_host_firmware_version(shost), FC_VERSION_STRING_SIZE,
"0x%08x", bottom->lic_version);
if (adapter->adapter_features & FSF_FEATURE_HBAAPI_MANAGEMENT) {
snprintf(fc_host_hardware_version(shost),
FC_VERSION_STRING_SIZE,
"0x%08x", bottom->hardware_version);
memcpy(fc_host_serial_number(shost), bottom->serial_number,
min(FC_SERIAL_NUMBER_SIZE, 17));
EBCASC(fc_host_serial_number(shost),
min(FC_SERIAL_NUMBER_SIZE, 17));
}
/* adjust pointers for missing command code */
nsp = (struct fc_els_flogi *) ((u8 *)&bottom->nport_serv_param
- sizeof(u32));
plogi = (struct fc_els_flogi *) ((u8 *)&bottom->plogi_payload
- sizeof(u32));
snprintf(fc_host_manufacturer(shost), FC_SERIAL_NUMBER_SIZE, "%s",
"IBM");
fc_host_port_name(shost) = be64_to_cpu(nsp->fl_wwpn);
fc_host_node_name(shost) = be64_to_cpu(nsp->fl_wwnn);
fc_host_supported_classes(shost) = FC_COS_CLASS2 | FC_COS_CLASS3;
zfcp_scsi_set_prot(adapter);
/* do not evaluate invalid fields */
if (bottom_incomplete)
return;
fc_host_port_id(shost) = ntoh24(bottom->s_id);
fc_host_speed(shost) =
zfcp_fsf_convert_portspeed(bottom->fc_link_speed);
snprintf(fc_host_model(shost), FC_SYMBOLIC_NAME_SIZE, "0x%04x",
bottom->adapter_type);
switch (bottom->fc_topology) {
case FSF_TOPO_P2P:
fc_host_port_type(shost) = FC_PORTTYPE_PTP;
fc_host_fabric_name(shost) = 0;
break;
case FSF_TOPO_FABRIC:
fc_host_fabric_name(shost) = be64_to_cpu(plogi->fl_wwnn);
if (bottom->connection_features & FSF_FEATURE_NPIV_MODE)
fc_host_port_type(shost) = FC_PORTTYPE_NPIV;
else
fc_host_port_type(shost) = FC_PORTTYPE_NPORT;
break;
case FSF_TOPO_AL:
fc_host_port_type(shost) = FC_PORTTYPE_NLPORT;
fallthrough;
default:
fc_host_fabric_name(shost) = 0;
break;
}
}
void zfcp_scsi_shost_update_port_data(
struct zfcp_adapter *const adapter,
const struct fsf_qtcb_bottom_port *const bottom)
{
struct Scsi_Host *const shost = adapter->scsi_host;
if (shost == NULL)
return;
fc_host_permanent_port_name(shost) = bottom->wwpn;
fc_host_maxframe_size(shost) = bottom->maximum_frame_size;
fc_host_supported_speeds(shost) =
zfcp_fsf_convert_portspeed(bottom->supported_speed);
memcpy(fc_host_supported_fc4s(shost), bottom->supported_fc4_types,
FC_FC4_LIST_SIZE);
memcpy(fc_host_active_fc4s(shost), bottom->active_fc4_types,
FC_FC4_LIST_SIZE);
}
struct fc_function_template zfcp_transport_functions = { struct fc_function_template zfcp_transport_functions = {
.show_starget_port_id = 1, .show_starget_port_id = 1,
.show_starget_port_name = 1, .show_starget_port_name = 1,

View File

@@ -216,10 +216,22 @@ static ssize_t zfcp_sysfs_port_rescan_store(struct device *dev,
{ {
struct ccw_device *cdev = to_ccwdev(dev); struct ccw_device *cdev = to_ccwdev(dev);
struct zfcp_adapter *adapter = zfcp_ccw_adapter_by_cdev(cdev); struct zfcp_adapter *adapter = zfcp_ccw_adapter_by_cdev(cdev);
int retval = 0;
if (!adapter) if (!adapter)
return -ENODEV; return -ENODEV;
/*
* If `scsi_host` is missing, we can't schedule `scan_work`, as it
* makes use of the corresponding fc_host object. But this state is
* only possible if xconfig/xport data has never completed yet,
* and we couldn't successfully scan for ports anyway.
*/
if (adapter->scsi_host == NULL) {
retval = -ENODEV;
goto out;
}
/* /*
* Users wish is our command: immediately schedule and flush a * Users wish is our command: immediately schedule and flush a
* worker to conduct a synchronous port scan, that is, neither * worker to conduct a synchronous port scan, that is, neither
@@ -227,9 +239,9 @@ static ssize_t zfcp_sysfs_port_rescan_store(struct device *dev,
*/ */
queue_delayed_work(adapter->work_queue, &adapter->scan_work, 0); queue_delayed_work(adapter->work_queue, &adapter->scan_work, 0);
flush_delayed_work(&adapter->scan_work); flush_delayed_work(&adapter->scan_work);
out:
zfcp_ccw_adapter_put(adapter); zfcp_ccw_adapter_put(adapter);
return retval ? retval : (ssize_t) count;
return (ssize_t) count;
} }
static ZFCP_DEV_ATTR(adapter, port_rescan, S_IWUSR, NULL, static ZFCP_DEV_ATTR(adapter, port_rescan, S_IWUSR, NULL,
zfcp_sysfs_port_rescan_store); zfcp_sysfs_port_rescan_store);

View File

@@ -2237,7 +2237,7 @@ static bool __init blogic_inquiry(struct blogic_adapter *adapter)
"INQUIRE INSTALLED DEVICES ID 0 TO 7"); "INQUIRE INSTALLED DEVICES ID 0 TO 7");
for (tgt_id = 0; tgt_id < 8; tgt_id++) for (tgt_id = 0; tgt_id < 8; tgt_id++)
adapter->tgt_flags[tgt_id].tgt_exists = adapter->tgt_flags[tgt_id].tgt_exists =
(installed_devs0to7[tgt_id] != 0 ? true : false); installed_devs0to7[tgt_id] != 0;
} }
/* /*
Issue the Inquire Setup Information command. Issue the Inquire Setup Information command.

View File

@@ -814,7 +814,6 @@ int aac_probe_container(struct aac_dev *dev, int cid)
kfree(scsidev); kfree(scsidev);
return -ENOMEM; return -ENOMEM;
} }
scsicmd->list.next = NULL;
scsicmd->scsi_done = aac_probe_container_scsi_done; scsicmd->scsi_done = aac_probe_container_scsi_done;
scsicmd->device = scsidev; scsicmd->device = scsidev;

View File

@@ -513,15 +513,10 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg)
goto cleanup; goto cleanup;
} }
user_srbcmd = kmalloc(fibsize, GFP_KERNEL); user_srbcmd = memdup_user(user_srb, fibsize);
if (!user_srbcmd) { if (IS_ERR(user_srbcmd)) {
dprintk((KERN_DEBUG"aacraid: Could not make a copy of the srb\n")); rcode = PTR_ERR(user_srbcmd);
rcode = -ENOMEM; user_srbcmd = NULL;
goto cleanup;
}
if(copy_from_user(user_srbcmd, user_srb,fibsize)){
dprintk((KERN_DEBUG"aacraid: Could not copy srb from user\n"));
rcode = -EFAULT;
goto cleanup; goto cleanup;
} }

View File

@@ -2351,7 +2351,7 @@ fib_free_out:
goto out; goto out;
} }
int aac_send_safw_hostttime(struct aac_dev *dev, struct timespec64 *now) static int aac_send_safw_hostttime(struct aac_dev *dev, struct timespec64 *now)
{ {
struct tm cur_tm; struct tm cur_tm;
char wellness_str[] = "<HW>TD\010\0\0\0\0\0\0\0\0\0DW\0\0ZZ"; char wellness_str[] = "<HW>TD\010\0\0\0\0\0\0\0\0\0DW\0\0ZZ";
@@ -2380,7 +2380,7 @@ out:
return ret; return ret;
} }
int aac_send_hosttime(struct aac_dev *dev, struct timespec64 *now) static int aac_send_hosttime(struct aac_dev *dev, struct timespec64 *now)
{ {
int ret = -ENOMEM; int ret = -ENOMEM;
struct fib *fibptr; struct fib *fibptr;

View File

@@ -864,7 +864,7 @@ static u8 aac_eh_tmf_hard_reset_fib(struct aac_hba_map_info *info,
return HBA_IU_TYPE_SATA_REQ; return HBA_IU_TYPE_SATA_REQ;
} }
void aac_tmf_callback(void *context, struct fib *fibptr) static void aac_tmf_callback(void *context, struct fib *fibptr)
{ {
struct aac_hba_resp *err = struct aac_hba_resp *err =
&((struct aac_native_hba *)fibptr->hw_fib_va)->resp.err; &((struct aac_native_hba *)fibptr->hw_fib_va)->resp.err;
@@ -1078,7 +1078,7 @@ static int aac_eh_bus_reset(struct scsi_cmnd* cmd)
* @scsi_cmd: SCSI command block causing the reset * @scsi_cmd: SCSI command block causing the reset
* *
*/ */
int aac_eh_host_reset(struct scsi_cmnd *cmd) static int aac_eh_host_reset(struct scsi_cmnd *cmd)
{ {
struct scsi_device * dev = cmd->device; struct scsi_device * dev = cmd->device;
struct Scsi_Host * host = dev->host; struct Scsi_Host * host = dev->host;
@@ -1632,7 +1632,7 @@ static int aac_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
struct Scsi_Host *shost; struct Scsi_Host *shost;
struct aac_dev *aac; struct aac_dev *aac;
struct list_head *insert = &aac_devices; struct list_head *insert = &aac_devices;
int error = -ENODEV; int error;
int unique_id = 0; int unique_id = 0;
u64 dmamask; u64 dmamask;
int mask_bits = 0; int mask_bits = 0;
@@ -1657,7 +1657,6 @@ static int aac_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
error = pci_enable_device(pdev); error = pci_enable_device(pdev);
if (error) if (error)
goto out; goto out;
error = -ENODEV;
if (!(aac_drivers[index].quirks & AAC_QUIRK_SRC)) { if (!(aac_drivers[index].quirks & AAC_QUIRK_SRC)) {
error = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); error = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
@@ -1689,8 +1688,10 @@ static int aac_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
pci_set_master(pdev); pci_set_master(pdev);
shost = scsi_host_alloc(&aac_driver_template, sizeof(struct aac_dev)); shost = scsi_host_alloc(&aac_driver_template, sizeof(struct aac_dev));
if (!shost) if (!shost) {
error = -ENOMEM;
goto out_disable_pdev; goto out_disable_pdev;
}
shost->irq = pdev->irq; shost->irq = pdev->irq;
shost->unique_id = unique_id; shost->unique_id = unique_id;
@@ -1714,8 +1715,11 @@ static int aac_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
aac->fibs = kcalloc(shost->can_queue + AAC_NUM_MGT_FIB, aac->fibs = kcalloc(shost->can_queue + AAC_NUM_MGT_FIB,
sizeof(struct fib), sizeof(struct fib),
GFP_KERNEL); GFP_KERNEL);
if (!aac->fibs) if (!aac->fibs) {
error = -ENOMEM;
goto out_free_host; goto out_free_host;
}
spin_lock_init(&aac->fib_lock); spin_lock_init(&aac->fib_lock);
mutex_init(&aac->ioctl_mutex); mutex_init(&aac->ioctl_mutex);

View File

@@ -3662,8 +3662,7 @@ ahd_free_tstate(struct ahd_softc *ahd, u_int scsi_id, char channel, int force)
return; return;
tstate = ahd->enabled_targets[scsi_id]; tstate = ahd->enabled_targets[scsi_id];
if (tstate != NULL) kfree(tstate);
kfree(tstate);
ahd->enabled_targets[scsi_id] = NULL; ahd->enabled_targets[scsi_id] = NULL;
} }
#endif #endif
@@ -6054,14 +6053,13 @@ ahd_alloc(void *platform_arg, char *name)
{ {
struct ahd_softc *ahd; struct ahd_softc *ahd;
ahd = kmalloc(sizeof(*ahd), GFP_ATOMIC); ahd = kzalloc(sizeof(*ahd), GFP_ATOMIC);
if (!ahd) { if (!ahd) {
printk("aic7xxx: cannot malloc softc!\n"); printk("aic7xxx: cannot malloc softc!\n");
kfree(name); kfree(name);
return NULL; return NULL;
} }
memset(ahd, 0, sizeof(*ahd));
ahd->seep_config = kmalloc(sizeof(*ahd->seep_config), GFP_ATOMIC); ahd->seep_config = kmalloc(sizeof(*ahd->seep_config), GFP_ATOMIC);
if (ahd->seep_config == NULL) { if (ahd->seep_config == NULL) {
kfree(ahd); kfree(ahd);
@@ -6120,8 +6118,7 @@ ahd_set_unit(struct ahd_softc *ahd, int unit)
void void
ahd_set_name(struct ahd_softc *ahd, char *name) ahd_set_name(struct ahd_softc *ahd, char *name)
{ {
if (ahd->name != NULL) kfree(ahd->name);
kfree(ahd->name);
ahd->name = name; ahd->name = name;
} }
@@ -6182,12 +6179,9 @@ ahd_free(struct ahd_softc *ahd)
kfree(ahd->black_hole); kfree(ahd->black_hole);
} }
#endif #endif
if (ahd->name != NULL) kfree(ahd->name);
kfree(ahd->name); kfree(ahd->seep_config);
if (ahd->seep_config != NULL) kfree(ahd->saved_stack);
kfree(ahd->seep_config);
if (ahd->saved_stack != NULL)
kfree(ahd->saved_stack);
kfree(ahd); kfree(ahd);
return; return;
} }

View File

@@ -2178,8 +2178,7 @@ ahc_free_tstate(struct ahc_softc *ahc, u_int scsi_id, char channel, int force)
if (channel == 'B') if (channel == 'B')
scsi_id += 8; scsi_id += 8;
tstate = ahc->enabled_targets[scsi_id]; tstate = ahc->enabled_targets[scsi_id];
if (tstate != NULL) kfree(tstate);
kfree(tstate);
ahc->enabled_targets[scsi_id] = NULL; ahc->enabled_targets[scsi_id] = NULL;
} }
#endif #endif
@@ -4384,13 +4383,13 @@ ahc_alloc(void *platform_arg, char *name)
struct ahc_softc *ahc; struct ahc_softc *ahc;
int i; int i;
ahc = kmalloc(sizeof(*ahc), GFP_ATOMIC); ahc = kzalloc(sizeof(*ahc), GFP_ATOMIC);
if (!ahc) { if (!ahc) {
printk("aic7xxx: cannot malloc softc!\n"); printk("aic7xxx: cannot malloc softc!\n");
kfree(name); kfree(name);
return NULL; return NULL;
} }
memset(ahc, 0, sizeof(*ahc));
ahc->seep_config = kmalloc(sizeof(*ahc->seep_config), GFP_ATOMIC); ahc->seep_config = kmalloc(sizeof(*ahc->seep_config), GFP_ATOMIC);
if (ahc->seep_config == NULL) { if (ahc->seep_config == NULL) {
kfree(ahc); kfree(ahc);
@@ -4453,8 +4452,7 @@ ahc_set_unit(struct ahc_softc *ahc, int unit)
void void
ahc_set_name(struct ahc_softc *ahc, char *name) ahc_set_name(struct ahc_softc *ahc, char *name)
{ {
if (ahc->name != NULL) kfree(ahc->name);
kfree(ahc->name);
ahc->name = name; ahc->name = name;
} }
@@ -4515,10 +4513,8 @@ ahc_free(struct ahc_softc *ahc)
kfree(ahc->black_hole); kfree(ahc->black_hole);
} }
#endif #endif
if (ahc->name != NULL) kfree(ahc->name);
kfree(ahc->name); kfree(ahc->seep_config);
if (ahc->seep_config != NULL)
kfree(ahc->seep_config);
kfree(ahc); kfree(ahc);
return; return;
} }
@@ -4927,8 +4923,7 @@ ahc_fini_scbdata(struct ahc_softc *ahc)
case 0: case 0:
break; break;
} }
if (scb_data->scbarray != NULL) kfree(scb_data->scbarray);
kfree(scb_data->scbarray);
} }
static void static void

View File

@@ -406,7 +406,7 @@ struct asd_manuf_sec {
u8 sas_addr[SAS_ADDR_SIZE]; u8 sas_addr[SAS_ADDR_SIZE];
u8 pcba_sn[ASD_PCBA_SN_SIZE]; u8 pcba_sn[ASD_PCBA_SN_SIZE];
/* Here start the other segments */ /* Here start the other segments */
u8 linked_list[0]; u8 linked_list[];
} __attribute__ ((packed)); } __attribute__ ((packed));
struct asd_manuf_phy_desc { struct asd_manuf_phy_desc {
@@ -449,7 +449,7 @@ struct asd_ms_sb_desc {
u8 type; u8 type;
u8 node_desc_index; u8 node_desc_index;
u8 conn_desc_index; u8 conn_desc_index;
u8 _recvd[0]; u8 _recvd[];
} __attribute__ ((packed)); } __attribute__ ((packed));
#if 0 #if 0
@@ -478,12 +478,12 @@ struct asd_ms_conn_desc {
u8 size_sideband_desc; u8 size_sideband_desc;
u32 _resvd; u32 _resvd;
u8 name[16]; u8 name[16];
struct asd_ms_sb_desc sb_desc[0]; struct asd_ms_sb_desc sb_desc[];
} __attribute__ ((packed)); } __attribute__ ((packed));
struct asd_nd_phy_desc { struct asd_nd_phy_desc {
u8 vp_attch_type; u8 vp_attch_type;
u8 attch_specific[0]; u8 attch_specific[];
} __attribute__ ((packed)); } __attribute__ ((packed));
#if 0 #if 0
@@ -503,7 +503,7 @@ struct asd_ms_node_desc {
u8 size_phy_desc; u8 size_phy_desc;
u8 _resvd; u8 _resvd;
u8 name[16]; u8 name[16];
struct asd_nd_phy_desc phy_desc[0]; struct asd_nd_phy_desc phy_desc[];
} __attribute__ ((packed)); } __attribute__ ((packed));
struct asd_ms_conn_map { struct asd_ms_conn_map {
@@ -518,7 +518,7 @@ struct asd_ms_conn_map {
u8 usage_model_id; u8 usage_model_id;
u32 _resvd; u32 _resvd;
struct asd_ms_conn_desc conn_desc[0]; struct asd_ms_conn_desc conn_desc[0];
struct asd_ms_node_desc node_desc[0]; struct asd_ms_node_desc node_desc[];
} __attribute__ ((packed)); } __attribute__ ((packed));
struct asd_ctrla_phy_entry { struct asd_ctrla_phy_entry {
@@ -542,7 +542,7 @@ struct asd_ll_el {
u8 id0; u8 id0;
u8 id1; u8 id1;
__le16 next; __le16 next;
u8 something_here[0]; u8 something_here[];
} __attribute__ ((packed)); } __attribute__ ((packed));
static int asd_poll_flash(struct asd_ha_struct *asd_ha) static int asd_poll_flash(struct asd_ha_struct *asd_ha)

View File

@@ -708,7 +708,7 @@ bfa_reqq_resume(struct bfa_s *bfa, int qid)
} }
} }
bfa_boolean_t static bfa_boolean_t
bfa_isr_rspq(struct bfa_s *bfa, int qid) bfa_isr_rspq(struct bfa_s *bfa, int qid)
{ {
struct bfi_msg_s *m; struct bfi_msg_s *m;

View File

@@ -436,7 +436,7 @@ bfa_fcpim_port_iostats(struct bfa_s *bfa,
return BFA_STATUS_OK; return BFA_STATUS_OK;
} }
void static void
bfa_ioim_profile_comp(struct bfa_ioim_s *ioim) bfa_ioim_profile_comp(struct bfa_ioim_s *ioim)
{ {
struct bfa_itnim_latency_s *io_lat = struct bfa_itnim_latency_s *io_lat =
@@ -453,7 +453,7 @@ bfa_ioim_profile_comp(struct bfa_ioim_s *ioim)
io_lat->avg[idx] += val; io_lat->avg[idx] += val;
} }
void static void
bfa_ioim_profile_start(struct bfa_ioim_s *ioim) bfa_ioim_profile_start(struct bfa_ioim_s *ioim)
{ {
ioim->start_time = jiffies; ioim->start_time = jiffies;

View File

@@ -1283,7 +1283,7 @@ bfa_fcs_lport_n2n_offline(struct bfa_fcs_lport_s *port)
n2n_port->reply_oxid = 0; n2n_port->reply_oxid = 0;
} }
void static void
bfa_fcport_get_loop_attr(struct bfa_fcs_lport_s *port) bfa_fcport_get_loop_attr(struct bfa_fcs_lport_s *port)
{ {
int i = 0, j = 0, bit = 0, alpa_bit = 0; int i = 0, j = 0, bit = 0, alpa_bit = 0;
@@ -4358,7 +4358,7 @@ bfa_fcs_lport_ns_sm_online(struct bfa_fcs_lport_ns_s *ns,
bfa_sm_set_state(ns, bfa_sm_set_state(ns,
bfa_fcs_lport_ns_sm_sending_gid_ft); bfa_fcs_lport_ns_sm_sending_gid_ft);
bfa_fcs_lport_ns_send_gid_ft(ns, NULL); bfa_fcs_lport_ns_send_gid_ft(ns, NULL);
}; }
break; break;
default: default:

View File

@@ -1575,7 +1575,7 @@ bfa_fcs_rport_sm_nsdisc_sent(struct bfa_fcs_rport_s *rport,
bfa_timer_start(rport->fcs->bfa, &rport->timer, bfa_timer_start(rport->fcs->bfa, &rport->timer,
bfa_fcs_rport_timeout, rport, bfa_fcs_rport_timeout, rport,
bfa_fcs_rport_del_timeout); bfa_fcs_rport_del_timeout);
}; }
break; break;
case RPSM_EVENT_DELETE: case RPSM_EVENT_DELETE:
@@ -2449,7 +2449,7 @@ bfa_fcs_rport_hal_online_action(struct bfa_fcs_rport_s *rport)
bfa_fcs_itnim_brp_online(rport->itnim); bfa_fcs_itnim_brp_online(rport->itnim);
if (!BFA_FCS_PID_IS_WKA(rport->pid)) if (!BFA_FCS_PID_IS_WKA(rport->pid))
bfa_fcs_rpf_rport_online(rport); bfa_fcs_rpf_rport_online(rport);
}; }
wwn2str(lpwwn_buf, bfa_fcs_lport_get_pwwn(port)); wwn2str(lpwwn_buf, bfa_fcs_lport_get_pwwn(port));
wwn2str(rpwwn_buf, rport->pwwn); wwn2str(rpwwn_buf, rport->pwwn);

View File

@@ -364,7 +364,7 @@ bfa_ioc_ct_isr_mode_set(struct bfa_ioc_s *ioc, bfa_boolean_t msix)
writel(r32, rb + FNC_PERS_REG); writel(r32, rb + FNC_PERS_REG);
} }
bfa_boolean_t static bfa_boolean_t
bfa_ioc_ct2_lpu_read_stat(struct bfa_ioc_s *ioc) bfa_ioc_ct2_lpu_read_stat(struct bfa_ioc_s *ioc)
{ {
u32 r32; u32 r32;
@@ -744,7 +744,7 @@ bfa_ioc_ct2_mem_init(void __iomem *rb)
writel(0, (rb + CT2_MBIST_CTL_REG)); writel(0, (rb + CT2_MBIST_CTL_REG));
} }
void static void
bfa_ioc_ct2_mac_reset(void __iomem *rb) bfa_ioc_ct2_mac_reset(void __iomem *rb)
{ {
/* put port0, port1 MAC & AHB in reset */ /* put port0, port1 MAC & AHB in reset */

View File

@@ -4284,7 +4284,7 @@ bfa_fcport_dportdisable(struct bfa_s *bfa)
bfa_port_set_dportenabled(&bfa->modules.port, BFA_FALSE); bfa_port_set_dportenabled(&bfa->modules.port, BFA_FALSE);
} }
void static void
bfa_fcport_ddportenable(struct bfa_s *bfa) bfa_fcport_ddportenable(struct bfa_s *bfa)
{ {
/* /*
@@ -4293,7 +4293,7 @@ bfa_fcport_ddportenable(struct bfa_s *bfa)
bfa_sm_send_event(BFA_FCPORT_MOD(bfa), BFA_FCPORT_SM_DDPORTENABLE); bfa_sm_send_event(BFA_FCPORT_MOD(bfa), BFA_FCPORT_SM_DDPORTENABLE);
} }
void static void
bfa_fcport_ddportdisable(struct bfa_s *bfa) bfa_fcport_ddportdisable(struct bfa_s *bfa)
{ {
/* /*
@@ -5517,7 +5517,6 @@ uf_recv(struct bfa_s *bfa, struct bfi_uf_frm_rcvd_s *m)
struct bfa_uf_s *uf = &ufm->uf_list[uf_tag]; struct bfa_uf_s *uf = &ufm->uf_list[uf_tag];
struct bfa_uf_buf_s *uf_buf; struct bfa_uf_buf_s *uf_buf;
uint8_t *buf; uint8_t *buf;
struct fchs_s *fchs;
uf_buf = (struct bfa_uf_buf_s *) uf_buf = (struct bfa_uf_buf_s *)
bfa_mem_get_dmabuf_kva(ufm, uf_tag, uf->pb_len); bfa_mem_get_dmabuf_kva(ufm, uf_tag, uf->pb_len);
@@ -5526,8 +5525,6 @@ uf_recv(struct bfa_s *bfa, struct bfi_uf_frm_rcvd_s *m)
m->frm_len = be16_to_cpu(m->frm_len); m->frm_len = be16_to_cpu(m->frm_len);
m->xfr_len = be16_to_cpu(m->xfr_len); m->xfr_len = be16_to_cpu(m->xfr_len);
fchs = (struct fchs_s *)uf_buf;
list_del(&uf->qe); /* dequeue from posted queue */ list_del(&uf->qe); /* dequeue from posted queue */
uf->data_ptr = buf; uf->data_ptr = buf;

View File

@@ -50,7 +50,7 @@ int pcie_max_read_reqsz;
int bfa_debugfs_enable = 1; int bfa_debugfs_enable = 1;
int msix_disable_cb = 0, msix_disable_ct = 0; int msix_disable_cb = 0, msix_disable_ct = 0;
int max_xfer_size = BFAD_MAX_SECTORS >> 1; int max_xfer_size = BFAD_MAX_SECTORS >> 1;
int max_rport_logins = BFA_FCS_MAX_RPORT_LOGINS; static int max_rport_logins = BFA_FCS_MAX_RPORT_LOGINS;
/* Firmware releated */ /* Firmware releated */
u32 bfi_image_cb_size, bfi_image_ct_size, bfi_image_ct2_size; u32 bfi_image_cb_size, bfi_image_ct_size, bfi_image_ct2_size;

View File

@@ -437,7 +437,7 @@ bfad_im_vport_create(struct fc_vport *fc_vport, bool disable)
return status; return status;
} }
int static int
bfad_im_issue_fc_host_lip(struct Scsi_Host *shost) bfad_im_issue_fc_host_lip(struct Scsi_Host *shost)
{ {
struct bfad_im_port_s *im_port = struct bfad_im_port_s *im_port =
@@ -562,7 +562,7 @@ bfad_im_vport_disable(struct fc_vport *fc_vport, bool disable)
return 0; return 0;
} }
void static void
bfad_im_vport_set_symbolic_name(struct fc_vport *fc_vport) bfad_im_vport_set_symbolic_name(struct fc_vport *fc_vport)
{ {
struct bfad_vport_s *vport = (struct bfad_vport_s *)fc_vport->dd_data; struct bfad_vport_s *vport = (struct bfad_vport_s *)fc_vport->dd_data;

View File

@@ -136,7 +136,7 @@ bfad_iocmd_ioc_get_attr(struct bfad_s *bfad, void *cmd)
return 0; return 0;
} }
int static int
bfad_iocmd_ioc_get_stats(struct bfad_s *bfad, void *cmd) bfad_iocmd_ioc_get_stats(struct bfad_s *bfad, void *cmd)
{ {
struct bfa_bsg_ioc_stats_s *iocmd = (struct bfa_bsg_ioc_stats_s *)cmd; struct bfa_bsg_ioc_stats_s *iocmd = (struct bfa_bsg_ioc_stats_s *)cmd;

View File

@@ -945,7 +945,7 @@ static void bnx2fc_indicate_netevent(void *context, unsigned long event,
*/ */
if (interface->enabled) if (interface->enabled)
fcoe_ctlr_link_up(ctlr); fcoe_ctlr_link_up(ctlr);
}; }
} else if (fcoe_ctlr_link_down(ctlr)) { } else if (fcoe_ctlr_link_down(ctlr)) {
switch (cdev->enabled) { switch (cdev->enabled) {
case FCOE_CTLR_DISABLED: case FCOE_CTLR_DISABLED:
@@ -965,7 +965,7 @@ static void bnx2fc_indicate_netevent(void *context, unsigned long event,
put_cpu(); put_cpu();
fcoe_clean_pending_queue(lport); fcoe_clean_pending_queue(lport);
wait_for_upload = 1; wait_for_upload = 1;
}; }
} }
} }
mutex_unlock(&bnx2fc_dev_lock); mutex_unlock(&bnx2fc_dev_lock);

View File

@@ -1081,6 +1081,7 @@ int bnx2fc_eh_device_reset(struct scsi_cmnd *sc_cmd)
} }
static int bnx2fc_abts_cleanup(struct bnx2fc_cmd *io_req) static int bnx2fc_abts_cleanup(struct bnx2fc_cmd *io_req)
__must_hold(&tgt->tgt_lock)
{ {
struct bnx2fc_rport *tgt = io_req->tgt; struct bnx2fc_rport *tgt = io_req->tgt;
unsigned int time_left; unsigned int time_left;

View File

@@ -959,6 +959,7 @@ static int init_act_open(struct cxgbi_sock *csk)
struct net_device *ndev = cdev->ports[csk->port_id]; struct net_device *ndev = cdev->ports[csk->port_id];
struct cxgbi_hba *chba = cdev->hbas[csk->port_id]; struct cxgbi_hba *chba = cdev->hbas[csk->port_id];
struct sk_buff *skb = NULL; struct sk_buff *skb = NULL;
int ret;
log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
"csk 0x%p,%u,0x%lx.\n", csk, csk->state, csk->flags); "csk 0x%p,%u,0x%lx.\n", csk, csk->state, csk->flags);
@@ -979,16 +980,16 @@ static int init_act_open(struct cxgbi_sock *csk)
csk->atid = cxgb3_alloc_atid(t3dev, &t3_client, csk); csk->atid = cxgb3_alloc_atid(t3dev, &t3_client, csk);
if (csk->atid < 0) { if (csk->atid < 0) {
pr_err("NO atid available.\n"); pr_err("NO atid available.\n");
return -EINVAL; ret = -EINVAL;
goto put_sock;
} }
cxgbi_sock_set_flag(csk, CTPF_HAS_ATID); cxgbi_sock_set_flag(csk, CTPF_HAS_ATID);
cxgbi_sock_get(csk); cxgbi_sock_get(csk);
skb = alloc_wr(sizeof(struct cpl_act_open_req), 0, GFP_KERNEL); skb = alloc_wr(sizeof(struct cpl_act_open_req), 0, GFP_KERNEL);
if (!skb) { if (!skb) {
cxgb3_free_atid(t3dev, csk->atid); ret = -ENOMEM;
cxgbi_sock_put(csk); goto free_atid;
return -ENOMEM;
} }
skb->sk = (struct sock *)csk; skb->sk = (struct sock *)csk;
set_arp_failure_handler(skb, act_open_arp_failure); set_arp_failure_handler(skb, act_open_arp_failure);
@@ -1010,6 +1011,15 @@ static int init_act_open(struct cxgbi_sock *csk)
cxgbi_sock_set_state(csk, CTP_ACTIVE_OPEN); cxgbi_sock_set_state(csk, CTP_ACTIVE_OPEN);
send_act_open_req(csk, skb, csk->l2t); send_act_open_req(csk, skb, csk->l2t);
return 0; return 0;
free_atid:
cxgb3_free_atid(t3dev, csk->atid);
put_sock:
cxgbi_sock_put(csk);
l2t_release(t3dev, csk->l2t);
csk->l2t = NULL;
return ret;
} }
cxgb3_cpl_handler_func cxgb3i_cpl_handlers[NUM_CPL_CMDS] = { cxgb3_cpl_handler_func cxgb3i_cpl_handlers[NUM_CPL_CMDS] = {

View File

@@ -1127,10 +1127,9 @@ static void do_abort_rpl_rss(struct cxgbi_device *cdev, struct sk_buff *skb)
if (!csk) if (!csk)
goto rel_skb; goto rel_skb;
if (csk) pr_info_ipaddr("csk 0x%p,%u,0x%lx,%u, status %u.\n",
pr_info_ipaddr("csk 0x%p,%u,0x%lx,%u, status %u.\n", (&csk->saddr), (&csk->daddr), csk,
(&csk->saddr), (&csk->daddr), csk, csk->state, csk->flags, csk->tid, rpl->status);
csk->state, csk->flags, csk->tid, rpl->status);
if (rpl->status == CPL_ERR_ABORT_FAILED) if (rpl->status == CPL_ERR_ABORT_FAILED)
goto rel_skb; goto rel_skb;

View File

@@ -3744,6 +3744,7 @@ static int cxlflash_probe(struct pci_dev *pdev,
cfg->afu_cookie = cfg->ops->create_afu(pdev); cfg->afu_cookie = cfg->ops->create_afu(pdev);
if (unlikely(!cfg->afu_cookie)) { if (unlikely(!cfg->afu_cookie)) {
dev_err(dev, "%s: create_afu failed\n", __func__); dev_err(dev, "%s: create_afu failed\n", __func__);
rc = -ENOMEM;
goto out_remove; goto out_remove;
} }

View File

@@ -1120,7 +1120,7 @@ static struct adpt_device* adpt_find_device(adpt_hba* pHba, u32 chan, u32 id, u6
{ {
struct adpt_device* d; struct adpt_device* d;
if(chan < 0 || chan >= MAX_CHANNEL) if (chan >= MAX_CHANNEL)
return NULL; return NULL;
d = pHba->channel[chan].device[id]; d = pHba->channel[chan].device[id];

View File

@@ -1915,7 +1915,7 @@ static int fcoe_device_notification(struct notifier_block *notifier,
case FCOE_CTLR_ENABLED: case FCOE_CTLR_ENABLED:
case FCOE_CTLR_UNUSED: case FCOE_CTLR_UNUSED:
fcoe_ctlr_link_up(ctlr); fcoe_ctlr_link_up(ctlr);
}; }
} else if (fcoe_ctlr_link_down(ctlr)) { } else if (fcoe_ctlr_link_down(ctlr)) {
switch (cdev->enabled) { switch (cdev->enabled) {
case FCOE_CTLR_DISABLED: case FCOE_CTLR_DISABLED:
@@ -1927,7 +1927,7 @@ static int fcoe_device_notification(struct notifier_block *notifier,
stats->LinkFailureCount++; stats->LinkFailureCount++;
put_cpu(); put_cpu();
fcoe_clean_pending_queue(lport); fcoe_clean_pending_queue(lport);
}; }
} }
out: out:
return rc; return rc;

View File

@@ -49,8 +49,8 @@
static struct kmem_cache *fnic_sgl_cache[FNIC_SGL_NUM_CACHES]; static struct kmem_cache *fnic_sgl_cache[FNIC_SGL_NUM_CACHES];
static struct kmem_cache *fnic_io_req_cache; static struct kmem_cache *fnic_io_req_cache;
LIST_HEAD(fnic_list); static LIST_HEAD(fnic_list);
DEFINE_SPINLOCK(fnic_list_lock); static DEFINE_SPINLOCK(fnic_list_lock);
/* Supported devices by fnic module */ /* Supported devices by fnic module */
static struct pci_device_id fnic_id_table[] = { static struct pci_device_id fnic_id_table[] = {

View File

@@ -2624,8 +2624,8 @@ int fnic_host_reset(struct scsi_cmnd *sc)
unsigned long flags; unsigned long flags;
spin_lock_irqsave(&fnic->fnic_lock, flags); spin_lock_irqsave(&fnic->fnic_lock, flags);
if (fnic->internal_reset_inprogress == 0) { if (!fnic->internal_reset_inprogress) {
fnic->internal_reset_inprogress = 1; fnic->internal_reset_inprogress = true;
} else { } else {
spin_unlock_irqrestore(&fnic->fnic_lock, flags); spin_unlock_irqrestore(&fnic->fnic_lock, flags);
FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
@@ -2654,7 +2654,7 @@ int fnic_host_reset(struct scsi_cmnd *sc)
} }
spin_lock_irqsave(&fnic->fnic_lock, flags); spin_lock_irqsave(&fnic->fnic_lock, flags);
fnic->internal_reset_inprogress = 0; fnic->internal_reset_inprogress = false;
spin_unlock_irqrestore(&fnic->fnic_lock, flags); spin_unlock_irqrestore(&fnic->fnic_lock, flags);
return ret; return ret;
} }

View File

@@ -254,7 +254,7 @@ void vnic_dev_free_desc_ring(struct vnic_dev *vdev, struct vnic_dev_ring *ring)
} }
} }
int vnic_dev_cmd1(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd, int wait) static int vnic_dev_cmd1(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd, int wait)
{ {
struct vnic_devcmd __iomem *devcmd = vdev->devcmd; struct vnic_devcmd __iomem *devcmd = vdev->devcmd;
int delay; int delay;
@@ -316,7 +316,7 @@ int vnic_dev_cmd1(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd, int wait)
return -ETIMEDOUT; return -ETIMEDOUT;
} }
int vnic_dev_cmd2(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd, static int vnic_dev_cmd2(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd,
int wait) int wait)
{ {
struct devcmd2_controller *dc2c = vdev->devcmd2; struct devcmd2_controller *dc2c = vdev->devcmd2;
@@ -411,7 +411,7 @@ int vnic_dev_cmd2(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd,
} }
int vnic_dev_init_devcmd1(struct vnic_dev *vdev) static int vnic_dev_init_devcmd1(struct vnic_dev *vdev)
{ {
vdev->devcmd = vnic_dev_get_res(vdev, RES_TYPE_DEVCMD, 0); vdev->devcmd = vnic_dev_get_res(vdev, RES_TYPE_DEVCMD, 0);
if (!vdev->devcmd) if (!vdev->devcmd)
@@ -422,7 +422,7 @@ int vnic_dev_init_devcmd1(struct vnic_dev *vdev)
} }
int vnic_dev_init_devcmd2(struct vnic_dev *vdev) static int vnic_dev_init_devcmd2(struct vnic_dev *vdev)
{ {
int err; int err;
unsigned int fetch_index; unsigned int fetch_index;
@@ -492,7 +492,7 @@ err_free_devcmd2:
} }
void vnic_dev_deinit_devcmd2(struct vnic_dev *vdev) static void vnic_dev_deinit_devcmd2(struct vnic_dev *vdev)
{ {
vnic_dev_free_desc_ring(vdev, &vdev->devcmd2->results_ring); vnic_dev_free_desc_ring(vdev, &vdev->devcmd2->results_ring);
vnic_wq_disable(&vdev->devcmd2->wq); vnic_wq_disable(&vdev->devcmd2->wq);
@@ -503,7 +503,7 @@ void vnic_dev_deinit_devcmd2(struct vnic_dev *vdev)
} }
int vnic_dev_cmd_no_proxy(struct vnic_dev *vdev, static int vnic_dev_cmd_no_proxy(struct vnic_dev *vdev,
enum vnic_devcmd_cmd cmd, u64 *a0, u64 *a1, int wait) enum vnic_devcmd_cmd cmd, u64 *a0, u64 *a1, int wait)
{ {
int err; int err;

View File

@@ -25,7 +25,7 @@
#include "vnic_wq.h" #include "vnic_wq.h"
int vnic_wq_get_ctrl(struct vnic_dev *vdev, struct vnic_wq *wq, static int vnic_wq_get_ctrl(struct vnic_dev *vdev, struct vnic_wq *wq,
unsigned int index, enum vnic_res_type res_type) unsigned int index, enum vnic_res_type res_type)
{ {
wq->ctrl = vnic_dev_get_res(vdev, res_type, index); wq->ctrl = vnic_dev_get_res(vdev, res_type, index);
@@ -37,7 +37,7 @@ int vnic_wq_get_ctrl(struct vnic_dev *vdev, struct vnic_wq *wq,
} }
int vnic_wq_alloc_ring(struct vnic_dev *vdev, struct vnic_wq *wq, static int vnic_wq_alloc_ring(struct vnic_dev *vdev, struct vnic_wq *wq,
unsigned int desc_count, unsigned int desc_size) unsigned int desc_count, unsigned int desc_size)
{ {
return vnic_dev_alloc_desc_ring(vdev, &wq->ring, desc_count, desc_size); return vnic_dev_alloc_desc_ring(vdev, &wq->ring, desc_count, desc_size);

View File

@@ -329,8 +329,8 @@ static void gdth_scsi_done(struct scsi_cmnd *scp)
scp->scsi_done(scp); scp->scsi_done(scp);
} }
int __gdth_execute(struct scsi_device *sdev, gdth_cmd_str *gdtcmd, char *cmnd, static int __gdth_execute(struct scsi_device *sdev, gdth_cmd_str *gdtcmd,
int timeout, u32 *info) char *cmnd, int timeout, u32 *info)
{ {
gdth_ha_str *ha = shost_priv(sdev->host); gdth_ha_str *ha = shost_priv(sdev->host);
struct scsi_cmnd *scp; struct scsi_cmnd *scp;

View File

@@ -898,8 +898,11 @@ void hisi_sas_phy_oob_ready(struct hisi_hba *hisi_hba, int phy_no)
struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no]; struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
struct device *dev = hisi_hba->dev; struct device *dev = hisi_hba->dev;
dev_dbg(dev, "phy%d OOB ready\n", phy_no);
if (phy->phy_attached)
return;
if (!timer_pending(&phy->timer)) { if (!timer_pending(&phy->timer)) {
dev_dbg(dev, "phy%d OOB ready\n", phy_no);
phy->timer.expires = jiffies + HISI_SAS_WAIT_PHYUP_TIMEOUT * HZ; phy->timer.expires = jiffies + HISI_SAS_WAIT_PHYUP_TIMEOUT * HZ;
add_timer(&phy->timer); add_timer(&phy->timer);
} }

View File

@@ -1175,15 +1175,14 @@ static void slot_err_v1_hw(struct hisi_hba *hisi_hba,
} }
static int slot_complete_v1_hw(struct hisi_hba *hisi_hba, static void slot_complete_v1_hw(struct hisi_hba *hisi_hba,
struct hisi_sas_slot *slot) struct hisi_sas_slot *slot)
{ {
struct sas_task *task = slot->task; struct sas_task *task = slot->task;
struct hisi_sas_device *sas_dev; struct hisi_sas_device *sas_dev;
struct device *dev = hisi_hba->dev; struct device *dev = hisi_hba->dev;
struct task_status_struct *ts; struct task_status_struct *ts;
struct domain_device *device; struct domain_device *device;
enum exec_status sts;
struct hisi_sas_complete_v1_hdr *complete_queue = struct hisi_sas_complete_v1_hdr *complete_queue =
hisi_hba->complete_hdr[slot->cmplt_queue]; hisi_hba->complete_hdr[slot->cmplt_queue];
struct hisi_sas_complete_v1_hdr *complete_hdr; struct hisi_sas_complete_v1_hdr *complete_hdr;
@@ -1194,7 +1193,7 @@ static int slot_complete_v1_hw(struct hisi_hba *hisi_hba,
cmplt_hdr_data = le32_to_cpu(complete_hdr->data); cmplt_hdr_data = le32_to_cpu(complete_hdr->data);
if (unlikely(!task || !task->lldd_task || !task->dev)) if (unlikely(!task || !task->lldd_task || !task->dev))
return -EINVAL; return;
ts = &task->task_status; ts = &task->task_status;
device = task->dev; device = task->dev;
@@ -1260,7 +1259,7 @@ static int slot_complete_v1_hw(struct hisi_hba *hisi_hba,
slot_err_v1_hw(hisi_hba, task, slot); slot_err_v1_hw(hisi_hba, task, slot);
if (unlikely(slot->abort)) if (unlikely(slot->abort))
return ts->stat; return;
goto out; goto out;
} }
@@ -1309,12 +1308,9 @@ static int slot_complete_v1_hw(struct hisi_hba *hisi_hba,
out: out:
hisi_sas_slot_task_free(hisi_hba, task, slot); hisi_sas_slot_task_free(hisi_hba, task, slot);
sts = ts->stat;
if (task->task_done) if (task->task_done)
task->task_done(task); task->task_done(task);
return sts;
} }
/* Interrupts */ /* Interrupts */
@@ -1757,6 +1753,7 @@ static struct device_attribute *host_attrs_v1_hw[] = {
static struct scsi_host_template sht_v1_hw = { static struct scsi_host_template sht_v1_hw = {
.name = DRV_NAME, .name = DRV_NAME,
.proc_name = DRV_NAME,
.module = THIS_MODULE, .module = THIS_MODULE,
.queuecommand = sas_queuecommand, .queuecommand = sas_queuecommand,
.target_alloc = sas_target_alloc, .target_alloc = sas_target_alloc,

View File

@@ -2318,8 +2318,8 @@ static void slot_err_v2_hw(struct hisi_hba *hisi_hba,
} }
} }
static int static void slot_complete_v2_hw(struct hisi_hba *hisi_hba,
slot_complete_v2_hw(struct hisi_hba *hisi_hba, struct hisi_sas_slot *slot) struct hisi_sas_slot *slot)
{ {
struct sas_task *task = slot->task; struct sas_task *task = slot->task;
struct hisi_sas_device *sas_dev; struct hisi_sas_device *sas_dev;
@@ -2327,7 +2327,6 @@ slot_complete_v2_hw(struct hisi_hba *hisi_hba, struct hisi_sas_slot *slot)
struct task_status_struct *ts; struct task_status_struct *ts;
struct domain_device *device; struct domain_device *device;
struct sas_ha_struct *ha; struct sas_ha_struct *ha;
enum exec_status sts;
struct hisi_sas_complete_v2_hdr *complete_queue = struct hisi_sas_complete_v2_hdr *complete_queue =
hisi_hba->complete_hdr[slot->cmplt_queue]; hisi_hba->complete_hdr[slot->cmplt_queue];
struct hisi_sas_complete_v2_hdr *complete_hdr = struct hisi_sas_complete_v2_hdr *complete_hdr =
@@ -2337,7 +2336,7 @@ slot_complete_v2_hw(struct hisi_hba *hisi_hba, struct hisi_sas_slot *slot)
u32 dw0; u32 dw0;
if (unlikely(!task || !task->lldd_task || !task->dev)) if (unlikely(!task || !task->lldd_task || !task->dev))
return -EINVAL; return;
ts = &task->task_status; ts = &task->task_status;
device = task->dev; device = task->dev;
@@ -2406,7 +2405,7 @@ slot_complete_v2_hw(struct hisi_hba *hisi_hba, struct hisi_sas_slot *slot)
error_info[2], error_info[3]); error_info[2], error_info[3]);
if (unlikely(slot->abort)) if (unlikely(slot->abort))
return ts->stat; return;
goto out; goto out;
} }
@@ -2456,12 +2455,11 @@ slot_complete_v2_hw(struct hisi_hba *hisi_hba, struct hisi_sas_slot *slot)
} }
out: out:
sts = ts->stat;
spin_lock_irqsave(&task->task_state_lock, flags); spin_lock_irqsave(&task->task_state_lock, flags);
if (task->task_state_flags & SAS_TASK_STATE_ABORTED) { if (task->task_state_flags & SAS_TASK_STATE_ABORTED) {
spin_unlock_irqrestore(&task->task_state_lock, flags); spin_unlock_irqrestore(&task->task_state_lock, flags);
dev_info(dev, "slot complete: task(%pK) aborted\n", task); dev_info(dev, "slot complete: task(%pK) aborted\n", task);
return SAS_ABORTED_TASK; return;
} }
task->task_state_flags |= SAS_TASK_STATE_DONE; task->task_state_flags |= SAS_TASK_STATE_DONE;
spin_unlock_irqrestore(&task->task_state_lock, flags); spin_unlock_irqrestore(&task->task_state_lock, flags);
@@ -2473,15 +2471,13 @@ out:
spin_unlock_irqrestore(&device->done_lock, flags); spin_unlock_irqrestore(&device->done_lock, flags);
dev_info(dev, "slot complete: task(%pK) ignored\n", dev_info(dev, "slot complete: task(%pK) ignored\n",
task); task);
return sts; return;
} }
spin_unlock_irqrestore(&device->done_lock, flags); spin_unlock_irqrestore(&device->done_lock, flags);
} }
if (task->task_done) if (task->task_done)
task->task_done(task); task->task_done(task);
return sts;
} }
static void prep_ata_v2_hw(struct hisi_hba *hisi_hba, static void prep_ata_v2_hw(struct hisi_hba *hisi_hba,
@@ -3533,6 +3529,7 @@ static struct device_attribute *host_attrs_v2_hw[] = {
static struct scsi_host_template sht_v2_hw = { static struct scsi_host_template sht_v2_hw = {
.name = DRV_NAME, .name = DRV_NAME,
.proc_name = DRV_NAME,
.module = THIS_MODULE, .module = THIS_MODULE,
.queuecommand = sas_queuecommand, .queuecommand = sas_queuecommand,
.target_alloc = sas_target_alloc, .target_alloc = sas_target_alloc,

View File

@@ -912,11 +912,15 @@ static int hw_init_v3_hw(struct hisi_hba *hisi_hba)
return -EINVAL; return -EINVAL;
} }
/* Switch over to MSI handling , from PCI AER default */ /*
* This DSM handles some hardware-related configurations:
* 1. Switch over to MSI error handling in kernel
* 2. BIOS *may* reset some register values through this method
*/
obj = acpi_evaluate_dsm(ACPI_HANDLE(dev), &guid, 0, obj = acpi_evaluate_dsm(ACPI_HANDLE(dev), &guid, 0,
DSM_FUNC_ERR_HANDLE_MSI, NULL); DSM_FUNC_ERR_HANDLE_MSI, NULL);
if (!obj) if (!obj)
dev_warn(dev, "Switch over to MSI handling failed\n"); dev_warn(dev, "can not find DSM method, ignore\n");
else else
ACPI_FREE(obj); ACPI_FREE(obj);
@@ -2152,8 +2156,8 @@ slot_err_v3_hw(struct hisi_hba *hisi_hba, struct sas_task *task,
} }
} }
static int static void slot_complete_v3_hw(struct hisi_hba *hisi_hba,
slot_complete_v3_hw(struct hisi_hba *hisi_hba, struct hisi_sas_slot *slot) struct hisi_sas_slot *slot)
{ {
struct sas_task *task = slot->task; struct sas_task *task = slot->task;
struct hisi_sas_device *sas_dev; struct hisi_sas_device *sas_dev;
@@ -2161,7 +2165,6 @@ slot_complete_v3_hw(struct hisi_hba *hisi_hba, struct hisi_sas_slot *slot)
struct task_status_struct *ts; struct task_status_struct *ts;
struct domain_device *device; struct domain_device *device;
struct sas_ha_struct *ha; struct sas_ha_struct *ha;
enum exec_status sts;
struct hisi_sas_complete_v3_hdr *complete_queue = struct hisi_sas_complete_v3_hdr *complete_queue =
hisi_hba->complete_hdr[slot->cmplt_queue]; hisi_hba->complete_hdr[slot->cmplt_queue];
struct hisi_sas_complete_v3_hdr *complete_hdr = struct hisi_sas_complete_v3_hdr *complete_hdr =
@@ -2171,7 +2174,7 @@ slot_complete_v3_hw(struct hisi_hba *hisi_hba, struct hisi_sas_slot *slot)
u32 dw0, dw1, dw3; u32 dw0, dw1, dw3;
if (unlikely(!task || !task->lldd_task || !task->dev)) if (unlikely(!task || !task->lldd_task || !task->dev))
return -EINVAL; return;
ts = &task->task_status; ts = &task->task_status;
device = task->dev; device = task->dev;
@@ -2233,7 +2236,7 @@ slot_complete_v3_hw(struct hisi_hba *hisi_hba, struct hisi_sas_slot *slot)
error_info[0], error_info[1], error_info[0], error_info[1],
error_info[2], error_info[3]); error_info[2], error_info[3]);
if (unlikely(slot->abort)) if (unlikely(slot->abort))
return ts->stat; return;
goto out; goto out;
} }
@@ -2278,12 +2281,11 @@ slot_complete_v3_hw(struct hisi_hba *hisi_hba, struct hisi_sas_slot *slot)
} }
out: out:
sts = ts->stat;
spin_lock_irqsave(&task->task_state_lock, flags); spin_lock_irqsave(&task->task_state_lock, flags);
if (task->task_state_flags & SAS_TASK_STATE_ABORTED) { if (task->task_state_flags & SAS_TASK_STATE_ABORTED) {
spin_unlock_irqrestore(&task->task_state_lock, flags); spin_unlock_irqrestore(&task->task_state_lock, flags);
dev_info(dev, "slot complete: task(%pK) aborted\n", task); dev_info(dev, "slot complete: task(%pK) aborted\n", task);
return SAS_ABORTED_TASK; return;
} }
task->task_state_flags |= SAS_TASK_STATE_DONE; task->task_state_flags |= SAS_TASK_STATE_DONE;
spin_unlock_irqrestore(&task->task_state_lock, flags); spin_unlock_irqrestore(&task->task_state_lock, flags);
@@ -2295,15 +2297,13 @@ out:
spin_unlock_irqrestore(&device->done_lock, flags); spin_unlock_irqrestore(&device->done_lock, flags);
dev_info(dev, "slot complete: task(%pK) ignored\n ", dev_info(dev, "slot complete: task(%pK) ignored\n ",
task); task);
return sts; return;
} }
spin_unlock_irqrestore(&device->done_lock, flags); spin_unlock_irqrestore(&device->done_lock, flags);
} }
if (task->task_done) if (task->task_done)
task->task_done(task); task->task_done(task);
return sts;
} }
static irqreturn_t cq_thread_v3_hw(int irq_no, void *p) static irqreturn_t cq_thread_v3_hw(int irq_no, void *p)
@@ -2897,6 +2897,7 @@ static const struct hisi_sas_debugfs_reg debugfs_axi_reg = {
}; };
static const struct hisi_sas_debugfs_reg_lu debugfs_ras_reg_lu[] = { static const struct hisi_sas_debugfs_reg_lu debugfs_ras_reg_lu[] = {
HISI_SAS_DEBUGFS_REG(SAS_RAS_INTR0),
HISI_SAS_DEBUGFS_REG(SAS_RAS_INTR1), HISI_SAS_DEBUGFS_REG(SAS_RAS_INTR1),
HISI_SAS_DEBUGFS_REG(SAS_RAS_INTR0_MASK), HISI_SAS_DEBUGFS_REG(SAS_RAS_INTR0_MASK),
HISI_SAS_DEBUGFS_REG(SAS_RAS_INTR1_MASK), HISI_SAS_DEBUGFS_REG(SAS_RAS_INTR1_MASK),
@@ -3071,6 +3072,7 @@ static int debugfs_set_bist_v3_hw(struct hisi_hba *hisi_hba, bool enable)
static struct scsi_host_template sht_v3_hw = { static struct scsi_host_template sht_v3_hw = {
.name = DRV_NAME, .name = DRV_NAME,
.proc_name = DRV_NAME,
.module = THIS_MODULE, .module = THIS_MODULE,
.queuecommand = sas_queuecommand, .queuecommand = sas_queuecommand,
.target_alloc = sas_target_alloc, .target_alloc = sas_target_alloc,

View File

@@ -2384,7 +2384,7 @@ static struct vio_driver ibmvscsi_driver = {
static struct srp_function_template ibmvscsi_transport_functions = { static struct srp_function_template ibmvscsi_transport_functions = {
}; };
int __init ibmvscsi_module_init(void) static int __init ibmvscsi_module_init(void)
{ {
int ret; int ret;
@@ -2406,7 +2406,7 @@ int __init ibmvscsi_module_init(void)
return ret; return ret;
} }
void __exit ibmvscsi_module_exit(void) static void __exit ibmvscsi_module_exit(void)
{ {
vio_unregister_driver(&ibmvscsi_driver); vio_unregister_driver(&ibmvscsi_driver);
srp_release_transport(ibmvscsi_transport_template); srp_release_transport(ibmvscsi_transport_template);

View File

@@ -1164,7 +1164,7 @@ static void ipr_update_ata_class(struct ipr_resource_entry *res, unsigned int pr
default: default:
res->ata_class = ATA_DEV_UNKNOWN; res->ata_class = ATA_DEV_UNKNOWN;
break; break;
}; }
} }
/** /**
@@ -9529,8 +9529,7 @@ static void ipr_free_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
} }
} }
if (ioa_cfg->ipr_cmd_pool) dma_pool_destroy(ioa_cfg->ipr_cmd_pool);
dma_pool_destroy(ioa_cfg->ipr_cmd_pool);
kfree(ioa_cfg->ipr_cmnd_list); kfree(ioa_cfg->ipr_cmnd_list);
kfree(ioa_cfg->ipr_cmnd_list_dma); kfree(ioa_cfg->ipr_cmnd_list_dma);

View File

@@ -500,19 +500,19 @@ struct sci_timer {
static inline static inline
void sci_init_timer(struct sci_timer *tmr, void (*fn)(struct timer_list *t)) void sci_init_timer(struct sci_timer *tmr, void (*fn)(struct timer_list *t))
{ {
tmr->cancel = 0; tmr->cancel = false;
timer_setup(&tmr->timer, fn, 0); timer_setup(&tmr->timer, fn, 0);
} }
static inline void sci_mod_timer(struct sci_timer *tmr, unsigned long msec) static inline void sci_mod_timer(struct sci_timer *tmr, unsigned long msec)
{ {
tmr->cancel = 0; tmr->cancel = false;
mod_timer(&tmr->timer, jiffies + msecs_to_jiffies(msec)); mod_timer(&tmr->timer, jiffies + msecs_to_jiffies(msec));
} }
static inline void sci_del_timer(struct sci_timer *tmr) static inline void sci_del_timer(struct sci_timer *tmr)
{ {
tmr->cancel = 1; tmr->cancel = true;
del_timer(&tmr->timer); del_timer(&tmr->timer);
} }

View File

@@ -2627,7 +2627,9 @@ struct Scsi_Host *iscsi_host_alloc(struct scsi_host_template *sht,
if (xmit_can_sleep) { if (xmit_can_sleep) {
snprintf(ihost->workq_name, sizeof(ihost->workq_name), snprintf(ihost->workq_name, sizeof(ihost->workq_name),
"iscsi_q_%d", shost->host_no); "iscsi_q_%d", shost->host_no);
ihost->workq = create_singlethread_workqueue(ihost->workq_name); ihost->workq = alloc_workqueue("%s",
WQ_SYSFS | __WQ_LEGACY | WQ_MEM_RECLAIM | WQ_UNBOUND,
2, ihost->workq_name);
if (!ihost->workq) if (!ihost->workq)
goto free_host; goto free_host;
} }

View File

@@ -160,6 +160,7 @@ qc_already_gone:
} }
static unsigned int sas_ata_qc_issue(struct ata_queued_cmd *qc) static unsigned int sas_ata_qc_issue(struct ata_queued_cmd *qc)
__must_hold(ap->lock)
{ {
struct sas_task *task; struct sas_task *task;
struct scatterlist *sg; struct scatterlist *sg;

View File

@@ -627,6 +627,19 @@ struct lpfc_ras_fwlog {
enum ras_state state; /* RAS logging running state */ enum ras_state state; /* RAS logging running state */
}; };
enum lpfc_irq_chann_mode {
/* Assign IRQs to all possible cpus that have hardware queues */
NORMAL_MODE,
/* Assign IRQs only to cpus on the same numa node as HBA */
NUMA_MODE,
/* Assign IRQs only on non-hyperthreaded CPUs. This is the
* same as normal_mode, but assign IRQS only on physical CPUs.
*/
NHT_MODE,
};
struct lpfc_hba { struct lpfc_hba {
/* SCSI interface function jump table entries */ /* SCSI interface function jump table entries */
struct lpfc_io_buf * (*lpfc_get_scsi_buf) struct lpfc_io_buf * (*lpfc_get_scsi_buf)
@@ -835,7 +848,6 @@ struct lpfc_hba {
uint32_t cfg_fcp_mq_threshold; uint32_t cfg_fcp_mq_threshold;
uint32_t cfg_hdw_queue; uint32_t cfg_hdw_queue;
uint32_t cfg_irq_chann; uint32_t cfg_irq_chann;
uint32_t cfg_irq_numa;
uint32_t cfg_suppress_rsp; uint32_t cfg_suppress_rsp;
uint32_t cfg_nvme_oas; uint32_t cfg_nvme_oas;
uint32_t cfg_nvme_embed_cmd; uint32_t cfg_nvme_embed_cmd;
@@ -1003,6 +1015,7 @@ struct lpfc_hba {
mempool_t *active_rrq_pool; mempool_t *active_rrq_pool;
struct fc_host_statistics link_stats; struct fc_host_statistics link_stats;
enum lpfc_irq_chann_mode irq_chann_mode;
enum intr_type_t intr_type; enum intr_type_t intr_type;
uint32_t intr_mode; uint32_t intr_mode;
#define LPFC_INTR_ERROR 0xFFFFFFFF #define LPFC_INTR_ERROR 0xFFFFFFFF
@@ -1314,19 +1327,19 @@ lpfc_phba_elsring(struct lpfc_hba *phba)
} }
/** /**
* lpfc_next_online_numa_cpu - Finds next online CPU on NUMA node * lpfc_next_online_cpu - Finds next online CPU on cpumask
* @numa_mask: Pointer to phba's numa_mask member. * @mask: Pointer to phba's cpumask member.
* @start: starting cpu index * @start: starting cpu index
* *
* Note: If no valid cpu found, then nr_cpu_ids is returned. * Note: If no valid cpu found, then nr_cpu_ids is returned.
* *
**/ **/
static inline unsigned int static inline unsigned int
lpfc_next_online_numa_cpu(const struct cpumask *numa_mask, unsigned int start) lpfc_next_online_cpu(const struct cpumask *mask, unsigned int start)
{ {
unsigned int cpu_it; unsigned int cpu_it;
for_each_cpu_wrap(cpu_it, numa_mask, start) { for_each_cpu_wrap(cpu_it, mask, start) {
if (cpu_online(cpu_it)) if (cpu_online(cpu_it))
break; break;
} }

View File

@@ -4874,7 +4874,7 @@ lpfc_request_firmware_upgrade_store(struct device *dev,
struct Scsi_Host *shost = class_to_shost(dev); struct Scsi_Host *shost = class_to_shost(dev);
struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata; struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata;
struct lpfc_hba *phba = vport->phba; struct lpfc_hba *phba = vport->phba;
int val = 0, rc = -EINVAL; int val = 0, rc;
/* Sanity check on user data */ /* Sanity check on user data */
if (!isdigit(buf[0])) if (!isdigit(buf[0]))
@@ -5701,17 +5701,69 @@ LPFC_ATTR_R(hdw_queue,
LPFC_HBA_HDWQ_MIN, LPFC_HBA_HDWQ_MAX, LPFC_HBA_HDWQ_MIN, LPFC_HBA_HDWQ_MAX,
"Set the number of I/O Hardware Queues"); "Set the number of I/O Hardware Queues");
static inline void #if IS_ENABLED(CONFIG_X86)
lpfc_assign_default_irq_numa(struct lpfc_hba *phba) /**
* lpfc_cpumask_irq_mode_init - initalizes cpumask of phba based on
* irq_chann_mode
* @phba: Pointer to HBA context object.
**/
static void
lpfc_cpumask_irq_mode_init(struct lpfc_hba *phba)
{
unsigned int cpu, first_cpu, numa_node = NUMA_NO_NODE;
const struct cpumask *sibling_mask;
struct cpumask *aff_mask = &phba->sli4_hba.irq_aff_mask;
cpumask_clear(aff_mask);
if (phba->irq_chann_mode == NUMA_MODE) {
/* Check if we're a NUMA architecture */
numa_node = dev_to_node(&phba->pcidev->dev);
if (numa_node == NUMA_NO_NODE) {
phba->irq_chann_mode = NORMAL_MODE;
return;
}
}
for_each_possible_cpu(cpu) {
switch (phba->irq_chann_mode) {
case NUMA_MODE:
if (cpu_to_node(cpu) == numa_node)
cpumask_set_cpu(cpu, aff_mask);
break;
case NHT_MODE:
sibling_mask = topology_sibling_cpumask(cpu);
first_cpu = cpumask_first(sibling_mask);
if (first_cpu < nr_cpu_ids)
cpumask_set_cpu(first_cpu, aff_mask);
break;
default:
break;
}
}
}
#endif
static void
lpfc_assign_default_irq_chann(struct lpfc_hba *phba)
{ {
#if IS_ENABLED(CONFIG_X86) #if IS_ENABLED(CONFIG_X86)
/* If AMD architecture, then default is LPFC_IRQ_CHANN_NUMA */ switch (boot_cpu_data.x86_vendor) {
if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) case X86_VENDOR_AMD:
phba->cfg_irq_numa = 1; /* If AMD architecture, then default is NUMA_MODE */
else phba->irq_chann_mode = NUMA_MODE;
phba->cfg_irq_numa = 0; break;
case X86_VENDOR_INTEL:
/* If Intel architecture, then default is no hyperthread mode */
phba->irq_chann_mode = NHT_MODE;
break;
default:
phba->irq_chann_mode = NORMAL_MODE;
break;
}
lpfc_cpumask_irq_mode_init(phba);
#else #else
phba->cfg_irq_numa = 0; phba->irq_chann_mode = NORMAL_MODE;
#endif #endif
} }
@@ -5723,6 +5775,7 @@ lpfc_assign_default_irq_numa(struct lpfc_hba *phba)
* *
* 0 = Configure number of IRQ Channels to: * 0 = Configure number of IRQ Channels to:
* if AMD architecture, number of CPUs on HBA's NUMA node * if AMD architecture, number of CPUs on HBA's NUMA node
* if Intel architecture, number of physical CPUs.
* otherwise, number of active CPUs. * otherwise, number of active CPUs.
* [1,256] = Manually specify how many IRQ Channels to use. * [1,256] = Manually specify how many IRQ Channels to use.
* *
@@ -5748,35 +5801,44 @@ MODULE_PARM_DESC(lpfc_irq_chann, "Set number of interrupt vectors to allocate");
static int static int
lpfc_irq_chann_init(struct lpfc_hba *phba, uint32_t val) lpfc_irq_chann_init(struct lpfc_hba *phba, uint32_t val)
{ {
const struct cpumask *numa_mask; const struct cpumask *aff_mask;
if (phba->cfg_use_msi != 2) { if (phba->cfg_use_msi != 2) {
lpfc_printf_log(phba, KERN_INFO, LOG_INIT, lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
"8532 use_msi = %u ignoring cfg_irq_numa\n", "8532 use_msi = %u ignoring cfg_irq_numa\n",
phba->cfg_use_msi); phba->cfg_use_msi);
phba->cfg_irq_numa = 0; phba->irq_chann_mode = NORMAL_MODE;
phba->cfg_irq_chann = LPFC_IRQ_CHANN_MIN; phba->cfg_irq_chann = LPFC_IRQ_CHANN_DEF;
return 0; return 0;
} }
/* Check if default setting was passed */ /* Check if default setting was passed */
if (val == LPFC_IRQ_CHANN_DEF) if (val == LPFC_IRQ_CHANN_DEF)
lpfc_assign_default_irq_numa(phba); lpfc_assign_default_irq_chann(phba);
if (phba->cfg_irq_numa) { if (phba->irq_chann_mode != NORMAL_MODE) {
numa_mask = &phba->sli4_hba.numa_mask; aff_mask = &phba->sli4_hba.irq_aff_mask;
if (cpumask_empty(numa_mask)) { if (cpumask_empty(aff_mask)) {
lpfc_printf_log(phba, KERN_INFO, LOG_INIT, lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
"8533 Could not identify NUMA node, " "8533 Could not identify CPUS for "
"ignoring cfg_irq_numa\n"); "mode %d, ignoring\n",
phba->cfg_irq_numa = 0; phba->irq_chann_mode);
phba->cfg_irq_chann = LPFC_IRQ_CHANN_MIN; phba->irq_chann_mode = NORMAL_MODE;
phba->cfg_irq_chann = LPFC_IRQ_CHANN_DEF;
} else { } else {
phba->cfg_irq_chann = cpumask_weight(numa_mask); phba->cfg_irq_chann = cpumask_weight(aff_mask);
/* If no hyperthread mode, then set hdwq count to
* aff_mask weight as well
*/
if (phba->irq_chann_mode == NHT_MODE)
phba->cfg_hdw_queue = phba->cfg_irq_chann;
lpfc_printf_log(phba, KERN_INFO, LOG_INIT, lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
"8543 lpfc_irq_chann set to %u " "8543 lpfc_irq_chann set to %u "
"(numa)\n", phba->cfg_irq_chann); "(mode: %d)\n", phba->cfg_irq_chann,
phba->irq_chann_mode);
} }
} else { } else {
if (val > LPFC_IRQ_CHANN_MAX) { if (val > LPFC_IRQ_CHANN_MAX) {
@@ -5787,7 +5849,7 @@ lpfc_irq_chann_init(struct lpfc_hba *phba, uint32_t val)
val, val,
LPFC_IRQ_CHANN_MIN, LPFC_IRQ_CHANN_MIN,
LPFC_IRQ_CHANN_MAX); LPFC_IRQ_CHANN_MAX);
phba->cfg_irq_chann = LPFC_IRQ_CHANN_MIN; phba->cfg_irq_chann = LPFC_IRQ_CHANN_DEF;
return -EINVAL; return -EINVAL;
} }
phba->cfg_irq_chann = val; phba->cfg_irq_chann = val;

View File

@@ -461,7 +461,6 @@ lpfc_prep_node_fc4type(struct lpfc_vport *vport, uint32_t Did, uint8_t fc4_type)
struct lpfc_nodelist *ndlp; struct lpfc_nodelist *ndlp;
if ((vport->port_type != LPFC_NPIV_PORT) || if ((vport->port_type != LPFC_NPIV_PORT) ||
(fc4_type == FC_TYPE_FCP) ||
!(vport->ct_flags & FC_CT_RFF_ID) || !vport->cfg_restrict_login) { !(vport->ct_flags & FC_CT_RFF_ID) || !vport->cfg_restrict_login) {
ndlp = lpfc_setup_disc_node(vport, Did); ndlp = lpfc_setup_disc_node(vport, Did);

View File

@@ -2429,7 +2429,8 @@ lpfc_debugfs_dif_err_write(struct file *file, const char __user *buf,
return 0; return 0;
if (dent == phba->debug_InjErrLBA) { if (dent == phba->debug_InjErrLBA) {
if ((buf[0] == 'o') && (buf[1] == 'f') && (buf[2] == 'f')) if ((dstbuf[0] == 'o') && (dstbuf[1] == 'f') &&
(dstbuf[2] == 'f'))
tmp = (uint64_t)(-1); tmp = (uint64_t)(-1);
} }

View File

@@ -7936,19 +7936,13 @@ lpfc_els_timeout_handler(struct lpfc_vport *vport)
if (unlikely(!pring)) if (unlikely(!pring))
return; return;
if ((phba->pport->load_flag & FC_UNLOADING)) if (phba->pport->load_flag & FC_UNLOADING)
return; return;
spin_lock_irq(&phba->hbalock); spin_lock_irq(&phba->hbalock);
if (phba->sli_rev == LPFC_SLI_REV4) if (phba->sli_rev == LPFC_SLI_REV4)
spin_lock(&pring->ring_lock); spin_lock(&pring->ring_lock);
if ((phba->pport->load_flag & FC_UNLOADING)) {
if (phba->sli_rev == LPFC_SLI_REV4)
spin_unlock(&pring->ring_lock);
spin_unlock_irq(&phba->hbalock);
return;
}
list_for_each_entry_safe(piocb, tmp_iocb, &pring->txcmplq, list) { list_for_each_entry_safe(piocb, tmp_iocb, &pring->txcmplq, list) {
cmd = &piocb->iocb; cmd = &piocb->iocb;
@@ -8514,6 +8508,8 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
spin_lock_irq(shost->host_lock); spin_lock_irq(shost->host_lock);
if (ndlp->nlp_flag & NLP_IN_DEV_LOSS) { if (ndlp->nlp_flag & NLP_IN_DEV_LOSS) {
spin_unlock_irq(shost->host_lock); spin_unlock_irq(shost->host_lock);
if (newnode)
lpfc_nlp_put(ndlp);
goto dropit; goto dropit;
} }
spin_unlock_irq(shost->host_lock); spin_unlock_irq(shost->host_lock);

View File

@@ -1360,14 +1360,14 @@ lpfc_vlan_id_match(uint16_t curr_vlan_id, uint16_t new_vlan_id)
} }
/** /**
* lpfc_update_fcf_record - Update driver fcf record
* __lpfc_update_fcf_record_pri - update the lpfc_fcf_pri record. * __lpfc_update_fcf_record_pri - update the lpfc_fcf_pri record.
* @phba: pointer to lpfc hba data structure. * @phba: pointer to lpfc hba data structure.
* @fcf_index: Index for the lpfc_fcf_record. * @fcf_index: Index for the lpfc_fcf_record.
* @new_fcf_record: pointer to hba fcf record. * @new_fcf_record: pointer to hba fcf record.
* *
* This routine updates the driver FCF priority record from the new HBA FCF * This routine updates the driver FCF priority record from the new HBA FCF
* record. This routine is called with the host lock held. * record. The hbalock is asserted held in the code path calling this
* routine.
**/ **/
static void static void
__lpfc_update_fcf_record_pri(struct lpfc_hba *phba, uint16_t fcf_index, __lpfc_update_fcf_record_pri(struct lpfc_hba *phba, uint16_t fcf_index,
@@ -1376,8 +1376,6 @@ __lpfc_update_fcf_record_pri(struct lpfc_hba *phba, uint16_t fcf_index,
{ {
struct lpfc_fcf_pri *fcf_pri; struct lpfc_fcf_pri *fcf_pri;
lockdep_assert_held(&phba->hbalock);
fcf_pri = &phba->fcf.fcf_pri[fcf_index]; fcf_pri = &phba->fcf.fcf_pri[fcf_index];
fcf_pri->fcf_rec.fcf_index = fcf_index; fcf_pri->fcf_rec.fcf_index = fcf_index;
/* FCF record priority */ /* FCF record priority */
@@ -1455,7 +1453,7 @@ lpfc_copy_fcf_record(struct lpfc_fcf_rec *fcf_rec,
* *
* This routine updates the driver FCF record from the new HBA FCF record * This routine updates the driver FCF record from the new HBA FCF record
* together with the address mode, vlan_id, and other informations. This * together with the address mode, vlan_id, and other informations. This
* routine is called with the host lock held. * routine is called with the hbalock held.
**/ **/
static void static void
__lpfc_update_fcf_record(struct lpfc_hba *phba, struct lpfc_fcf_rec *fcf_rec, __lpfc_update_fcf_record(struct lpfc_hba *phba, struct lpfc_fcf_rec *fcf_rec,

View File

@@ -3541,7 +3541,7 @@ struct lpfc_mbx_set_feature {
#define lpfc_mbx_set_feature_UER_SHIFT 0 #define lpfc_mbx_set_feature_UER_SHIFT 0
#define lpfc_mbx_set_feature_UER_MASK 0x00000001 #define lpfc_mbx_set_feature_UER_MASK 0x00000001
#define lpfc_mbx_set_feature_UER_WORD word6 #define lpfc_mbx_set_feature_UER_WORD word6
#define lpfc_mbx_set_feature_mds_SHIFT 0 #define lpfc_mbx_set_feature_mds_SHIFT 2
#define lpfc_mbx_set_feature_mds_MASK 0x00000001 #define lpfc_mbx_set_feature_mds_MASK 0x00000001
#define lpfc_mbx_set_feature_mds_WORD word6 #define lpfc_mbx_set_feature_mds_WORD word6
#define lpfc_mbx_set_feature_mds_deep_loopbk_SHIFT 1 #define lpfc_mbx_set_feature_mds_deep_loopbk_SHIFT 1

View File

@@ -6019,29 +6019,6 @@ static void lpfc_log_intr_mode(struct lpfc_hba *phba, uint32_t intr_mode)
return; return;
} }
/**
* lpfc_cpumask_of_node_init - initalizes cpumask of phba's NUMA node
* @phba: Pointer to HBA context object.
*
**/
static void
lpfc_cpumask_of_node_init(struct lpfc_hba *phba)
{
unsigned int cpu, numa_node;
struct cpumask *numa_mask = &phba->sli4_hba.numa_mask;
cpumask_clear(numa_mask);
/* Check if we're a NUMA architecture */
numa_node = dev_to_node(&phba->pcidev->dev);
if (numa_node == NUMA_NO_NODE)
return;
for_each_possible_cpu(cpu)
if (cpu_to_node(cpu) == numa_node)
cpumask_set_cpu(cpu, numa_mask);
}
/** /**
* lpfc_enable_pci_dev - Enable a generic PCI device. * lpfc_enable_pci_dev - Enable a generic PCI device.
* @phba: pointer to lpfc hba data structure. * @phba: pointer to lpfc hba data structure.
@@ -6480,7 +6457,6 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
phba->sli4_hba.num_present_cpu = lpfc_present_cpu; phba->sli4_hba.num_present_cpu = lpfc_present_cpu;
phba->sli4_hba.num_possible_cpu = cpumask_last(cpu_possible_mask) + 1; phba->sli4_hba.num_possible_cpu = cpumask_last(cpu_possible_mask) + 1;
phba->sli4_hba.curr_disp_cpu = 0; phba->sli4_hba.curr_disp_cpu = 0;
lpfc_cpumask_of_node_init(phba);
/* Get all the module params for configuring this host */ /* Get all the module params for configuring this host */
lpfc_get_cfgparam(phba); lpfc_get_cfgparam(phba);
@@ -6688,6 +6664,13 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
#endif #endif
/* Not supported for NVMET */ /* Not supported for NVMET */
phba->cfg_xri_rebalancing = 0; phba->cfg_xri_rebalancing = 0;
if (phba->irq_chann_mode == NHT_MODE) {
phba->cfg_irq_chann =
phba->sli4_hba.num_present_cpu;
phba->cfg_hdw_queue =
phba->sli4_hba.num_present_cpu;
phba->irq_chann_mode = NORMAL_MODE;
}
break; break;
} }
} }
@@ -7029,7 +7012,7 @@ lpfc_sli4_driver_resource_unset(struct lpfc_hba *phba)
phba->sli4_hba.num_possible_cpu = 0; phba->sli4_hba.num_possible_cpu = 0;
phba->sli4_hba.num_present_cpu = 0; phba->sli4_hba.num_present_cpu = 0;
phba->sli4_hba.curr_disp_cpu = 0; phba->sli4_hba.curr_disp_cpu = 0;
cpumask_clear(&phba->sli4_hba.numa_mask); cpumask_clear(&phba->sli4_hba.irq_aff_mask);
/* Free memory allocated for fast-path work queue handles */ /* Free memory allocated for fast-path work queue handles */
kfree(phba->sli4_hba.hba_eq_hdl); kfree(phba->sli4_hba.hba_eq_hdl);
@@ -11284,11 +11267,12 @@ lpfc_irq_clear_aff(struct lpfc_hba_eq_hdl *eqhdl)
* @offline: true, cpu is going offline. false, cpu is coming online. * @offline: true, cpu is going offline. false, cpu is coming online.
* *
* If cpu is going offline, we'll try our best effort to find the next * If cpu is going offline, we'll try our best effort to find the next
* online cpu on the phba's NUMA node and migrate all offlining IRQ affinities. * online cpu on the phba's original_mask and migrate all offlining IRQ
* affinities.
* *
* If cpu is coming online, reaffinitize the IRQ back to the onlineng cpu. * If cpu is coming online, reaffinitize the IRQ back to the onlining cpu.
* *
* Note: Call only if cfg_irq_numa is enabled, otherwise rely on * Note: Call only if NUMA or NHT mode is enabled, otherwise rely on
* PCI_IRQ_AFFINITY to auto-manage IRQ affinity. * PCI_IRQ_AFFINITY to auto-manage IRQ affinity.
* *
**/ **/
@@ -11298,14 +11282,14 @@ lpfc_irq_rebalance(struct lpfc_hba *phba, unsigned int cpu, bool offline)
struct lpfc_vector_map_info *cpup; struct lpfc_vector_map_info *cpup;
struct cpumask *aff_mask; struct cpumask *aff_mask;
unsigned int cpu_select, cpu_next, idx; unsigned int cpu_select, cpu_next, idx;
const struct cpumask *numa_mask; const struct cpumask *orig_mask;
if (!phba->cfg_irq_numa) if (phba->irq_chann_mode == NORMAL_MODE)
return; return;
numa_mask = &phba->sli4_hba.numa_mask; orig_mask = &phba->sli4_hba.irq_aff_mask;
if (!cpumask_test_cpu(cpu, numa_mask)) if (!cpumask_test_cpu(cpu, orig_mask))
return; return;
cpup = &phba->sli4_hba.cpu_map[cpu]; cpup = &phba->sli4_hba.cpu_map[cpu];
@@ -11314,9 +11298,9 @@ lpfc_irq_rebalance(struct lpfc_hba *phba, unsigned int cpu, bool offline)
return; return;
if (offline) { if (offline) {
/* Find next online CPU on NUMA node */ /* Find next online CPU on original mask */
cpu_next = cpumask_next_wrap(cpu, numa_mask, cpu, true); cpu_next = cpumask_next_wrap(cpu, orig_mask, cpu, true);
cpu_select = lpfc_next_online_numa_cpu(numa_mask, cpu_next); cpu_select = lpfc_next_online_cpu(orig_mask, cpu_next);
/* Found a valid CPU */ /* Found a valid CPU */
if ((cpu_select < nr_cpu_ids) && (cpu_select != cpu)) { if ((cpu_select < nr_cpu_ids) && (cpu_select != cpu)) {
@@ -11431,7 +11415,7 @@ lpfc_sli4_enable_msix(struct lpfc_hba *phba)
{ {
int vectors, rc, index; int vectors, rc, index;
char *name; char *name;
const struct cpumask *numa_mask = NULL; const struct cpumask *aff_mask = NULL;
unsigned int cpu = 0, cpu_cnt = 0, cpu_select = nr_cpu_ids; unsigned int cpu = 0, cpu_cnt = 0, cpu_select = nr_cpu_ids;
struct lpfc_hba_eq_hdl *eqhdl; struct lpfc_hba_eq_hdl *eqhdl;
const struct cpumask *maskp; const struct cpumask *maskp;
@@ -11441,16 +11425,18 @@ lpfc_sli4_enable_msix(struct lpfc_hba *phba)
/* Set up MSI-X multi-message vectors */ /* Set up MSI-X multi-message vectors */
vectors = phba->cfg_irq_chann; vectors = phba->cfg_irq_chann;
if (phba->cfg_irq_numa) { if (phba->irq_chann_mode != NORMAL_MODE)
numa_mask = &phba->sli4_hba.numa_mask; aff_mask = &phba->sli4_hba.irq_aff_mask;
cpu_cnt = cpumask_weight(numa_mask);
if (aff_mask) {
cpu_cnt = cpumask_weight(aff_mask);
vectors = min(phba->cfg_irq_chann, cpu_cnt); vectors = min(phba->cfg_irq_chann, cpu_cnt);
/* cpu: iterates over numa_mask including offline or online /* cpu: iterates over aff_mask including offline or online
* cpu_select: iterates over online numa_mask to set affinity * cpu_select: iterates over online aff_mask to set affinity
*/ */
cpu = cpumask_first(numa_mask); cpu = cpumask_first(aff_mask);
cpu_select = lpfc_next_online_numa_cpu(numa_mask, cpu); cpu_select = lpfc_next_online_cpu(aff_mask, cpu);
} else { } else {
flags |= PCI_IRQ_AFFINITY; flags |= PCI_IRQ_AFFINITY;
} }
@@ -11484,7 +11470,7 @@ lpfc_sli4_enable_msix(struct lpfc_hba *phba)
eqhdl->irq = pci_irq_vector(phba->pcidev, index); eqhdl->irq = pci_irq_vector(phba->pcidev, index);
if (phba->cfg_irq_numa) { if (aff_mask) {
/* If found a neighboring online cpu, set affinity */ /* If found a neighboring online cpu, set affinity */
if (cpu_select < nr_cpu_ids) if (cpu_select < nr_cpu_ids)
lpfc_irq_set_aff(eqhdl, cpu_select); lpfc_irq_set_aff(eqhdl, cpu_select);
@@ -11494,11 +11480,11 @@ lpfc_sli4_enable_msix(struct lpfc_hba *phba)
LPFC_CPU_FIRST_IRQ, LPFC_CPU_FIRST_IRQ,
cpu); cpu);
/* Iterate to next offline or online cpu in numa_mask */ /* Iterate to next offline or online cpu in aff_mask */
cpu = cpumask_next(cpu, numa_mask); cpu = cpumask_next(cpu, aff_mask);
/* Find next online cpu in numa_mask to set affinity */ /* Find next online cpu in aff_mask to set affinity */
cpu_select = lpfc_next_online_numa_cpu(numa_mask, cpu); cpu_select = lpfc_next_online_cpu(aff_mask, cpu);
} else if (vectors == 1) { } else if (vectors == 1) {
cpu = cpumask_first(cpu_present_mask); cpu = cpumask_first(cpu_present_mask);
lpfc_assign_eq_map_info(phba, index, LPFC_CPU_FIRST_IRQ, lpfc_assign_eq_map_info(phba, index, LPFC_CPU_FIRST_IRQ,

View File

@@ -1378,7 +1378,8 @@ lpfc_config_port(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
*/ */
if (phba->cfg_hostmem_hgp && phba->sli_rev != 3) { if (phba->cfg_hostmem_hgp && phba->sli_rev != 3) {
phba->host_gp = &phba->mbox->us.s2.host[0]; phba->host_gp = (struct lpfc_hgp __iomem *)
&phba->mbox->us.s2.host[0];
phba->hbq_put = NULL; phba->hbq_put = NULL;
offset = (uint8_t *)&phba->mbox->us.s2.host - offset = (uint8_t *)&phba->mbox->us.s2.host -
(uint8_t *)phba->slim2p.virt; (uint8_t *)phba->slim2p.virt;

View File

@@ -1654,11 +1654,6 @@ lpfc_nvme_fcp_io_submit(struct nvme_fc_local_port *pnvme_lport,
phba = vport->phba; phba = vport->phba;
if (vport->load_flag & FC_UNLOADING) {
ret = -ENODEV;
goto out_fail;
}
if (unlikely(vport->load_flag & FC_UNLOADING)) { if (unlikely(vport->load_flag & FC_UNLOADING)) {
lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR, lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR,
"6124 Fail IO, Driver unload\n"); "6124 Fail IO, Driver unload\n");
@@ -2491,38 +2486,6 @@ lpfc_nvme_register_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
spin_unlock_irq(&vport->phba->hbalock); spin_unlock_irq(&vport->phba->hbalock);
rport = remote_port->private; rport = remote_port->private;
if (oldrport) { if (oldrport) {
/* New remoteport record does not guarantee valid
* host private memory area.
*/
if (oldrport == remote_port->private) {
/* Same remoteport - ndlp should match.
* Just reuse.
*/
lpfc_printf_vlog(ndlp->vport, KERN_INFO,
LOG_NVME_DISC,
"6014 Rebind lport to current "
"remoteport x%px wwpn 0x%llx, "
"Data: x%x x%x x%px x%px x%x "
" x%06x\n",
remote_port,
remote_port->port_name,
remote_port->port_id,
remote_port->port_role,
oldrport->ndlp,
ndlp,
ndlp->nlp_type,
ndlp->nlp_DID);
/* It's a complete rebind only if the driver
* is registering with the same ndlp. Otherwise
* the driver likely executed a node swap
* prior to this registration and the ndlp to
* remoteport binding needs to be redone.
*/
if (prev_ndlp == ndlp)
return 0;
}
/* Sever the ndlp<->rport association /* Sever the ndlp<->rport association
* before dropping the ndlp ref from * before dropping the ndlp ref from

View File

@@ -1030,11 +1030,6 @@ lpfc_nvmet_xmt_fcp_op(struct nvmet_fc_target_port *tgtport,
goto aerr; goto aerr;
} }
if (phba->pport->load_flag & FC_UNLOADING) {
rc = -ENODEV;
goto aerr;
}
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
if (ctxp->ts_cmd_nvme) { if (ctxp->ts_cmd_nvme) {
if (rsp->op == NVMET_FCOP_RSP) if (rsp->op == NVMET_FCOP_RSP)
@@ -1154,9 +1149,6 @@ lpfc_nvmet_xmt_fcp_abort(struct nvmet_fc_target_port *tgtport,
struct lpfc_queue *wq; struct lpfc_queue *wq;
unsigned long flags; unsigned long flags;
if (phba->pport->load_flag & FC_UNLOADING)
return;
if (phba->pport->load_flag & FC_UNLOADING) if (phba->pport->load_flag & FC_UNLOADING)
return; return;

View File

@@ -535,7 +535,7 @@ lpfc_sli4_process_eq(struct lpfc_hba *phba, struct lpfc_queue *eq,
if (count > eq->EQ_max_eqe) if (count > eq->EQ_max_eqe)
eq->EQ_max_eqe = count; eq->EQ_max_eqe = count;
eq->queue_claimed = 0; xchg(&eq->queue_claimed, 0);
rearm_and_exit: rearm_and_exit:
/* Always clear the EQ. */ /* Always clear the EQ. */
@@ -1245,8 +1245,8 @@ lpfc_sli_get_iocbq(struct lpfc_hba *phba)
* @phba: Pointer to HBA context object. * @phba: Pointer to HBA context object.
* @iocbq: Pointer to driver iocb object. * @iocbq: Pointer to driver iocb object.
* *
* This function is called with hbalock held to release driver * This function is called to release the driver iocb object
* iocb object to the iocb pool. The iotag in the iocb object * to the iocb pool. The iotag in the iocb object
* does not change for each use of the iocb object. This function * does not change for each use of the iocb object. This function
* clears all other fields of the iocb object when it is freed. * clears all other fields of the iocb object when it is freed.
* The sqlq structure that holds the xritag and phys and virtual * The sqlq structure that holds the xritag and phys and virtual
@@ -1256,7 +1256,8 @@ lpfc_sli_get_iocbq(struct lpfc_hba *phba)
* this IO was aborted then the sglq entry it put on the * this IO was aborted then the sglq entry it put on the
* lpfc_abts_els_sgl_list until the CQ_ABORTED_XRI is received. If the * lpfc_abts_els_sgl_list until the CQ_ABORTED_XRI is received. If the
* IO has good status or fails for any other reason then the sglq * IO has good status or fails for any other reason then the sglq
* entry is added to the free list (lpfc_els_sgl_list). * entry is added to the free list (lpfc_els_sgl_list). The hbalock is
* asserted held in the code path calling this routine.
**/ **/
static void static void
__lpfc_sli_release_iocbq_s4(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq) __lpfc_sli_release_iocbq_s4(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
@@ -1266,8 +1267,6 @@ __lpfc_sli_release_iocbq_s4(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
unsigned long iflag = 0; unsigned long iflag = 0;
struct lpfc_sli_ring *pring; struct lpfc_sli_ring *pring;
lockdep_assert_held(&phba->hbalock);
if (iocbq->sli4_xritag == NO_XRI) if (iocbq->sli4_xritag == NO_XRI)
sglq = NULL; sglq = NULL;
else else
@@ -1330,18 +1329,17 @@ out:
* @phba: Pointer to HBA context object. * @phba: Pointer to HBA context object.
* @iocbq: Pointer to driver iocb object. * @iocbq: Pointer to driver iocb object.
* *
* This function is called with hbalock held to release driver * This function is called to release the driver iocb object to the
* iocb object to the iocb pool. The iotag in the iocb object * iocb pool. The iotag in the iocb object does not change for each
* does not change for each use of the iocb object. This function * use of the iocb object. This function clears all other fields of
* clears all other fields of the iocb object when it is freed. * the iocb object when it is freed. The hbalock is asserted held in
* the code path calling this routine.
**/ **/
static void static void
__lpfc_sli_release_iocbq_s3(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq) __lpfc_sli_release_iocbq_s3(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
{ {
size_t start_clean = offsetof(struct lpfc_iocbq, iocb); size_t start_clean = offsetof(struct lpfc_iocbq, iocb);
lockdep_assert_held(&phba->hbalock);
/* /*
* Clean all volatile data fields, preserve iotag and node struct. * Clean all volatile data fields, preserve iotag and node struct.
*/ */
@@ -1786,17 +1784,17 @@ lpfc_sli_next_iotag(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
* @nextiocb: Pointer to driver iocb object which need to be * @nextiocb: Pointer to driver iocb object which need to be
* posted to firmware. * posted to firmware.
* *
* This function is called with hbalock held to post a new iocb to * This function is called to post a new iocb to the firmware. This
* the firmware. This function copies the new iocb to ring iocb slot and * function copies the new iocb to ring iocb slot and updates the
* updates the ring pointers. It adds the new iocb to txcmplq if there is * ring pointers. It adds the new iocb to txcmplq if there is
* a completion call back for this iocb else the function will free the * a completion call back for this iocb else the function will free the
* iocb object. * iocb object. The hbalock is asserted held in the code path calling
* this routine.
**/ **/
static void static void
lpfc_sli_submit_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, lpfc_sli_submit_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
IOCB_t *iocb, struct lpfc_iocbq *nextiocb) IOCB_t *iocb, struct lpfc_iocbq *nextiocb)
{ {
lockdep_assert_held(&phba->hbalock);
/* /*
* Set up an iotag * Set up an iotag
*/ */
@@ -11284,6 +11282,7 @@ lpfc_ignore_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
* request, this function issues abort out unconditionally. This function is * request, this function issues abort out unconditionally. This function is
* called with hbalock held. The function returns 0 when it fails due to * called with hbalock held. The function returns 0 when it fails due to
* memory allocation failure or when the command iocb is an abort request. * memory allocation failure or when the command iocb is an abort request.
* The hbalock is asserted held in the code path calling this routine.
**/ **/
static int static int
lpfc_sli_abort_iotag_issue(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, lpfc_sli_abort_iotag_issue(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
@@ -11297,8 +11296,6 @@ lpfc_sli_abort_iotag_issue(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
unsigned long iflags; unsigned long iflags;
struct lpfc_nodelist *ndlp; struct lpfc_nodelist *ndlp;
lockdep_assert_held(&phba->hbalock);
/* /*
* There are certain command types we don't want to abort. And we * There are certain command types we don't want to abort. And we
* don't want to abort commands that are already in the process of * don't want to abort commands that are already in the process of
@@ -13808,7 +13805,7 @@ __lpfc_sli4_process_cq(struct lpfc_hba *phba, struct lpfc_queue *cq,
"0369 No entry from completion queue " "0369 No entry from completion queue "
"qid=%d\n", cq->queue_id); "qid=%d\n", cq->queue_id);
cq->queue_claimed = 0; xchg(&cq->queue_claimed, 0);
rearm_and_exit: rearm_and_exit:
phba->sli4_hba.sli4_write_cq_db(phba, cq, consumed, phba->sli4_hba.sli4_write_cq_db(phba, cq, consumed,
@@ -14389,7 +14386,6 @@ lpfc_sli4_hba_intr_handler(int irq, void *dev_id)
int ecount = 0; int ecount = 0;
int hba_eqidx; int hba_eqidx;
struct lpfc_eq_intr_info *eqi; struct lpfc_eq_intr_info *eqi;
uint32_t icnt;
/* Get the driver's phba structure from the dev_id */ /* Get the driver's phba structure from the dev_id */
hba_eq_hdl = (struct lpfc_hba_eq_hdl *)dev_id; hba_eq_hdl = (struct lpfc_hba_eq_hdl *)dev_id;
@@ -14417,11 +14413,12 @@ lpfc_sli4_hba_intr_handler(int irq, void *dev_id)
return IRQ_NONE; return IRQ_NONE;
} }
eqi = phba->sli4_hba.eq_info; eqi = this_cpu_ptr(phba->sli4_hba.eq_info);
icnt = this_cpu_inc_return(eqi->icnt); eqi->icnt++;
fpeq->last_cpu = raw_smp_processor_id(); fpeq->last_cpu = raw_smp_processor_id();
if (icnt > LPFC_EQD_ISR_TRIGGER && if (eqi->icnt > LPFC_EQD_ISR_TRIGGER &&
fpeq->q_flag & HBA_EQ_DELAY_CHK && fpeq->q_flag & HBA_EQ_DELAY_CHK &&
phba->cfg_auto_imax && phba->cfg_auto_imax &&
fpeq->q_mode != LPFC_MAX_AUTO_EQ_DELAY && fpeq->q_mode != LPFC_MAX_AUTO_EQ_DELAY &&

View File

@@ -920,7 +920,7 @@ struct lpfc_sli4_hba {
struct lpfc_vector_map_info *cpu_map; struct lpfc_vector_map_info *cpu_map;
uint16_t num_possible_cpu; uint16_t num_possible_cpu;
uint16_t num_present_cpu; uint16_t num_present_cpu;
struct cpumask numa_mask; struct cpumask irq_aff_mask;
uint16_t curr_disp_cpu; uint16_t curr_disp_cpu;
struct lpfc_eq_intr_info __percpu *eq_info; struct lpfc_eq_intr_info __percpu *eq_info;
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS #ifdef CONFIG_SCSI_LPFC_DEBUG_FS

View File

@@ -20,7 +20,7 @@
* included with this package. * * included with this package. *
*******************************************************************/ *******************************************************************/
#define LPFC_DRIVER_VERSION "12.8.0.0" #define LPFC_DRIVER_VERSION "12.8.0.1"
#define LPFC_DRIVER_NAME "lpfc" #define LPFC_DRIVER_NAME "lpfc"
/* Used for SLI 2/3 */ /* Used for SLI 2/3 */

View File

@@ -302,8 +302,8 @@ static struct pci_driver megaraid_pci_driver = {
// definitions for the device attributes for exporting logical drive number // definitions for the device attributes for exporting logical drive number
// for a scsi address (Host, Channel, Id, Lun) // for a scsi address (Host, Channel, Id, Lun)
DEVICE_ATTR(megaraid_mbox_app_hndl, S_IRUSR, megaraid_sysfs_show_app_hndl, static DEVICE_ATTR(megaraid_mbox_app_hndl, S_IRUSR, megaraid_sysfs_show_app_hndl,
NULL); NULL);
// Host template initializer for megaraid mbox sysfs device attributes // Host template initializer for megaraid mbox sysfs device attributes
static struct device_attribute *megaraid_shost_attrs[] = { static struct device_attribute *megaraid_shost_attrs[] = {
@@ -312,7 +312,7 @@ static struct device_attribute *megaraid_shost_attrs[] = {
}; };
DEVICE_ATTR(megaraid_mbox_ld, S_IRUSR, megaraid_sysfs_show_ldnum, NULL); static DEVICE_ATTR(megaraid_mbox_ld, S_IRUSR, megaraid_sysfs_show_ldnum, NULL);
// Host template initializer for megaraid mbox sysfs device attributes // Host template initializer for megaraid mbox sysfs device attributes
static struct device_attribute *megaraid_sdev_attrs[] = { static struct device_attribute *megaraid_sdev_attrs[] = {

View File

@@ -21,8 +21,8 @@
/* /*
* MegaRAID SAS Driver meta data * MegaRAID SAS Driver meta data
*/ */
#define MEGASAS_VERSION "07.713.01.00-rc1" #define MEGASAS_VERSION "07.714.04.00-rc1"
#define MEGASAS_RELDATE "Dec 27, 2019" #define MEGASAS_RELDATE "Apr 14, 2020"
#define MEGASAS_MSIX_NAME_LEN 32 #define MEGASAS_MSIX_NAME_LEN 32
@@ -511,7 +511,7 @@ union MR_PROGRESS {
*/ */
struct MR_PD_PROGRESS { struct MR_PD_PROGRESS {
struct { struct {
#ifndef MFI_BIG_ENDIAN #ifndef __BIG_ENDIAN_BITFIELD
u32 rbld:1; u32 rbld:1;
u32 patrol:1; u32 patrol:1;
u32 clear:1; u32 clear:1;
@@ -537,7 +537,7 @@ struct MR_PD_PROGRESS {
}; };
struct { struct {
#ifndef MFI_BIG_ENDIAN #ifndef __BIG_ENDIAN_BITFIELD
u32 rbld:1; u32 rbld:1;
u32 patrol:1; u32 patrol:1;
u32 clear:1; u32 clear:1;

View File

@@ -81,7 +81,7 @@ int smp_affinity_enable = 1;
module_param(smp_affinity_enable, int, 0444); module_param(smp_affinity_enable, int, 0444);
MODULE_PARM_DESC(smp_affinity_enable, "SMP affinity feature enable/disable Default: enable(1)"); MODULE_PARM_DESC(smp_affinity_enable, "SMP affinity feature enable/disable Default: enable(1)");
int rdpq_enable = 1; static int rdpq_enable = 1;
module_param(rdpq_enable, int, 0444); module_param(rdpq_enable, int, 0444);
MODULE_PARM_DESC(rdpq_enable, "Allocate reply queue in chunks for large queue depth enable/disable Default: enable(1)"); MODULE_PARM_DESC(rdpq_enable, "Allocate reply queue in chunks for large queue depth enable/disable Default: enable(1)");
@@ -89,7 +89,7 @@ unsigned int dual_qdepth_disable;
module_param(dual_qdepth_disable, int, 0444); module_param(dual_qdepth_disable, int, 0444);
MODULE_PARM_DESC(dual_qdepth_disable, "Disable dual queue depth feature. Default: 0"); MODULE_PARM_DESC(dual_qdepth_disable, "Disable dual queue depth feature. Default: 0");
unsigned int scmd_timeout = MEGASAS_DEFAULT_CMD_TIMEOUT; static unsigned int scmd_timeout = MEGASAS_DEFAULT_CMD_TIMEOUT;
module_param(scmd_timeout, int, 0444); module_param(scmd_timeout, int, 0444);
MODULE_PARM_DESC(scmd_timeout, "scsi command timeout (10-90s), default 90s. See megasas_reset_timer."); MODULE_PARM_DESC(scmd_timeout, "scsi command timeout (10-90s), default 90s. See megasas_reset_timer.");
@@ -1982,9 +1982,9 @@ static void megasas_set_fw_assisted_qd(struct scsi_device *sdev,
if (is_target_prop) { if (is_target_prop) {
tgt_device_qd = le32_to_cpu(instance->tgt_prop->device_qdepth); tgt_device_qd = le32_to_cpu(instance->tgt_prop->device_qdepth);
if (tgt_device_qd && if (tgt_device_qd)
(tgt_device_qd <= instance->host->can_queue)) device_qd = min(instance->host->can_queue,
device_qd = tgt_device_qd; (int)tgt_device_qd);
} }
if (instance->enable_sdev_max_qd && interface_type != UNKNOWN_DRIVE) if (instance->enable_sdev_max_qd && interface_type != UNKNOWN_DRIVE)

View File

@@ -85,7 +85,7 @@ u32 mega_mod64(u64 dividend, u32 divisor)
* *
* @return quotient * @return quotient
**/ **/
u64 mega_div64_32(uint64_t dividend, uint32_t divisor) static u64 mega_div64_32(uint64_t dividend, uint32_t divisor)
{ {
u32 remainder; u32 remainder;
u64 d; u64 d;
@@ -367,7 +367,7 @@ u8 MR_ValidateMapInfo(struct megasas_instance *instance, u64 map_id)
return 1; return 1;
} }
u32 MR_GetSpanBlock(u32 ld, u64 row, u64 *span_blk, static u32 MR_GetSpanBlock(u32 ld, u64 row, u64 *span_blk,
struct MR_DRV_RAID_MAP_ALL *map) struct MR_DRV_RAID_MAP_ALL *map)
{ {
struct MR_SPAN_BLOCK_INFO *pSpanBlock = MR_LdSpanInfoGet(ld, map); struct MR_SPAN_BLOCK_INFO *pSpanBlock = MR_LdSpanInfoGet(ld, map);
@@ -417,7 +417,7 @@ u32 MR_GetSpanBlock(u32 ld, u64 row, u64 *span_blk,
* div_error - Devide error code. * div_error - Devide error code.
*/ */
u32 mr_spanset_get_span_block(struct megasas_instance *instance, static u32 mr_spanset_get_span_block(struct megasas_instance *instance,
u32 ld, u64 row, u64 *span_blk, struct MR_DRV_RAID_MAP_ALL *map) u32 ld, u64 row, u64 *span_blk, struct MR_DRV_RAID_MAP_ALL *map)
{ {
struct fusion_context *fusion = instance->ctrl_context; struct fusion_context *fusion = instance->ctrl_context;
@@ -642,7 +642,7 @@ static u32 get_arm_from_strip(struct megasas_instance *instance,
} }
/* This Function will return Phys arm */ /* This Function will return Phys arm */
u8 get_arm(struct megasas_instance *instance, u32 ld, u8 span, u64 stripe, static u8 get_arm(struct megasas_instance *instance, u32 ld, u8 span, u64 stripe,
struct MR_DRV_RAID_MAP_ALL *map) struct MR_DRV_RAID_MAP_ALL *map)
{ {
struct MR_LD_RAID *raid = MR_LdRaidGet(ld, map); struct MR_LD_RAID *raid = MR_LdRaidGet(ld, map);
@@ -785,7 +785,7 @@ static u8 mr_spanset_get_phy_params(struct megasas_instance *instance, u32 ld,
* span - Span number * span - Span number
* block - Absolute Block number in the physical disk * block - Absolute Block number in the physical disk
*/ */
u8 MR_GetPhyParams(struct megasas_instance *instance, u32 ld, u64 stripRow, static u8 MR_GetPhyParams(struct megasas_instance *instance, u32 ld, u64 stripRow,
u16 stripRef, struct IO_REQUEST_INFO *io_info, u16 stripRef, struct IO_REQUEST_INFO *io_info,
struct RAID_CONTEXT *pRAID_Context, struct RAID_CONTEXT *pRAID_Context,
struct MR_DRV_RAID_MAP_ALL *map) struct MR_DRV_RAID_MAP_ALL *map)
@@ -1342,7 +1342,7 @@ void mr_update_load_balance_params(struct MR_DRV_RAID_MAP_ALL *drv_map,
} }
} }
u8 megasas_get_best_arm_pd(struct megasas_instance *instance, static u8 megasas_get_best_arm_pd(struct megasas_instance *instance,
struct LD_LOAD_BALANCE_INFO *lbInfo, struct LD_LOAD_BALANCE_INFO *lbInfo,
struct IO_REQUEST_INFO *io_info, struct IO_REQUEST_INFO *io_info,
struct MR_DRV_RAID_MAP_ALL *drv_map) struct MR_DRV_RAID_MAP_ALL *drv_map)

View File

@@ -176,7 +176,7 @@ static inline bool megasas_check_same_4gb_region
* megasas_enable_intr_fusion - Enables interrupts * megasas_enable_intr_fusion - Enables interrupts
* @regs: MFI register set * @regs: MFI register set
*/ */
void static void
megasas_enable_intr_fusion(struct megasas_instance *instance) megasas_enable_intr_fusion(struct megasas_instance *instance)
{ {
struct megasas_register_set __iomem *regs; struct megasas_register_set __iomem *regs;
@@ -198,7 +198,7 @@ megasas_enable_intr_fusion(struct megasas_instance *instance)
* megasas_disable_intr_fusion - Disables interrupt * megasas_disable_intr_fusion - Disables interrupt
* @regs: MFI register set * @regs: MFI register set
*/ */
void static void
megasas_disable_intr_fusion(struct megasas_instance *instance) megasas_disable_intr_fusion(struct megasas_instance *instance)
{ {
u32 mask = 0xFFFFFFFF; u32 mask = 0xFFFFFFFF;
@@ -2070,7 +2070,6 @@ static bool
megasas_is_prp_possible(struct megasas_instance *instance, megasas_is_prp_possible(struct megasas_instance *instance,
struct scsi_cmnd *scmd, int sge_count) struct scsi_cmnd *scmd, int sge_count)
{ {
int i;
u32 data_length = 0; u32 data_length = 0;
struct scatterlist *sg_scmd; struct scatterlist *sg_scmd;
bool build_prp = false; bool build_prp = false;
@@ -2099,63 +2098,6 @@ megasas_is_prp_possible(struct megasas_instance *instance,
build_prp = true; build_prp = true;
} }
/*
* Below code detects gaps/holes in IO data buffers.
* What does holes/gaps mean?
* Any SGE except first one in a SGL starts at non NVME page size
* aligned address OR Any SGE except last one in a SGL ends at
* non NVME page size boundary.
*
* Driver has already informed block layer by setting boundary rules for
* bio merging done at NVME page size boundary calling kernel API
* blk_queue_virt_boundary inside slave_config.
* Still there is possibility of IO coming with holes to driver because of
* IO merging done by IO scheduler.
*
* With SCSI BLK MQ enabled, there will be no IO with holes as there is no
* IO scheduling so no IO merging.
*
* With SCSI BLK MQ disabled, IO scheduler may attempt to merge IOs and
* then sending IOs with holes.
*
* Though driver can request block layer to disable IO merging by calling-
* blk_queue_flag_set(QUEUE_FLAG_NOMERGES, sdev->request_queue) but
* user may tune sysfs parameter- nomerges again to 0 or 1.
*
* If in future IO scheduling is enabled with SCSI BLK MQ,
* this algorithm to detect holes will be required in driver
* for SCSI BLK MQ enabled case as well.
*
*
*/
scsi_for_each_sg(scmd, sg_scmd, sge_count, i) {
if ((i != 0) && (i != (sge_count - 1))) {
if (mega_mod64(sg_dma_len(sg_scmd), mr_nvme_pg_size) ||
mega_mod64(sg_dma_address(sg_scmd),
mr_nvme_pg_size)) {
build_prp = false;
break;
}
}
if ((sge_count > 1) && (i == 0)) {
if ((mega_mod64((sg_dma_address(sg_scmd) +
sg_dma_len(sg_scmd)),
mr_nvme_pg_size))) {
build_prp = false;
break;
}
}
if ((sge_count > 1) && (i == (sge_count - 1))) {
if (mega_mod64(sg_dma_address(sg_scmd),
mr_nvme_pg_size)) {
build_prp = false;
break;
}
}
}
return build_prp; return build_prp;
} }
@@ -4230,7 +4172,7 @@ void megasas_reset_reply_desc(struct megasas_instance *instance)
* megasas_refire_mgmt_cmd : Re-fire management commands * megasas_refire_mgmt_cmd : Re-fire management commands
* @instance: Controller's soft instance * @instance: Controller's soft instance
*/ */
void megasas_refire_mgmt_cmd(struct megasas_instance *instance, static void megasas_refire_mgmt_cmd(struct megasas_instance *instance,
bool return_ioctl) bool return_ioctl)
{ {
int j; int j;
@@ -4238,8 +4180,9 @@ void megasas_refire_mgmt_cmd(struct megasas_instance *instance,
struct fusion_context *fusion; struct fusion_context *fusion;
struct megasas_cmd *cmd_mfi; struct megasas_cmd *cmd_mfi;
union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc; union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc;
struct MPI2_RAID_SCSI_IO_REQUEST *scsi_io_req;
u16 smid; u16 smid;
bool refire_cmd = 0; bool refire_cmd = false;
u8 result; u8 result;
u32 opcode = 0; u32 opcode = 0;
@@ -4305,6 +4248,11 @@ void megasas_refire_mgmt_cmd(struct megasas_instance *instance,
result = COMPLETE_CMD; result = COMPLETE_CMD;
} }
scsi_io_req = (struct MPI2_RAID_SCSI_IO_REQUEST *)
cmd_fusion->io_request;
if (scsi_io_req->Function == MPI2_FUNCTION_SCSI_TASK_MGMT)
result = RETURN_CMD;
switch (result) { switch (result) {
case REFIRE_CMD: case REFIRE_CMD:
megasas_fire_cmd_fusion(instance, req_desc); megasas_fire_cmd_fusion(instance, req_desc);
@@ -4533,7 +4481,6 @@ megasas_issue_tm(struct megasas_instance *instance, u16 device_handle,
if (!timeleft) { if (!timeleft) {
dev_err(&instance->pdev->dev, dev_err(&instance->pdev->dev,
"task mgmt type 0x%x timed out\n", type); "task mgmt type 0x%x timed out\n", type);
cmd_mfi->flags |= DRV_DCMD_SKIP_REFIRE;
mutex_unlock(&instance->reset_mutex); mutex_unlock(&instance->reset_mutex);
rc = megasas_reset_fusion(instance->host, MFI_IO_TIMEOUT_OCR); rc = megasas_reset_fusion(instance->host, MFI_IO_TIMEOUT_OCR);
mutex_lock(&instance->reset_mutex); mutex_lock(&instance->reset_mutex);
@@ -4713,12 +4660,12 @@ int megasas_task_abort_fusion(struct scsi_cmnd *scmd)
"attempting task abort! scmd(0x%p) tm_dev_handle 0x%x\n", "attempting task abort! scmd(0x%p) tm_dev_handle 0x%x\n",
scmd, devhandle); scmd, devhandle);
mr_device_priv_data->tm_busy = 1; mr_device_priv_data->tm_busy = true;
ret = megasas_issue_tm(instance, devhandle, ret = megasas_issue_tm(instance, devhandle,
scmd->device->channel, scmd->device->id, smid, scmd->device->channel, scmd->device->id, smid,
MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK, MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK,
mr_device_priv_data); mr_device_priv_data);
mr_device_priv_data->tm_busy = 0; mr_device_priv_data->tm_busy = false;
mutex_unlock(&instance->reset_mutex); mutex_unlock(&instance->reset_mutex);
scmd_printk(KERN_INFO, scmd, "task abort %s!! scmd(0x%p)\n", scmd_printk(KERN_INFO, scmd, "task abort %s!! scmd(0x%p)\n",
@@ -4783,12 +4730,12 @@ int megasas_reset_target_fusion(struct scsi_cmnd *scmd)
sdev_printk(KERN_INFO, scmd->device, sdev_printk(KERN_INFO, scmd->device,
"attempting target reset! scmd(0x%p) tm_dev_handle: 0x%x\n", "attempting target reset! scmd(0x%p) tm_dev_handle: 0x%x\n",
scmd, devhandle); scmd, devhandle);
mr_device_priv_data->tm_busy = 1; mr_device_priv_data->tm_busy = true;
ret = megasas_issue_tm(instance, devhandle, ret = megasas_issue_tm(instance, devhandle,
scmd->device->channel, scmd->device->id, 0, scmd->device->channel, scmd->device->id, 0,
MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET, MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET,
mr_device_priv_data); mr_device_priv_data);
mr_device_priv_data->tm_busy = 0; mr_device_priv_data->tm_busy = false;
mutex_unlock(&instance->reset_mutex); mutex_unlock(&instance->reset_mutex);
scmd_printk(KERN_NOTICE, scmd, "target reset %s!!\n", scmd_printk(KERN_NOTICE, scmd, "target reset %s!!\n",
(ret == SUCCESS) ? "SUCCESS" : "FAILED"); (ret == SUCCESS) ? "SUCCESS" : "FAILED");

View File

@@ -774,7 +774,7 @@ struct MR_SPAN_BLOCK_INFO {
struct MR_CPU_AFFINITY_MASK { struct MR_CPU_AFFINITY_MASK {
union { union {
struct { struct {
#ifndef MFI_BIG_ENDIAN #ifndef __BIG_ENDIAN_BITFIELD
u8 hw_path:1; u8 hw_path:1;
u8 cpu0:1; u8 cpu0:1;
u8 cpu1:1; u8 cpu1:1;
@@ -866,7 +866,7 @@ struct MR_LD_RAID {
__le16 seqNum; __le16 seqNum;
struct { struct {
#ifndef MFI_BIG_ENDIAN #ifndef __BIG_ENDIAN_BITFIELD
u32 ldSyncRequired:1; u32 ldSyncRequired:1;
u32 regTypeReqOnReadIsValid:1; u32 regTypeReqOnReadIsValid:1;
u32 isEPD:1; u32 isEPD:1;
@@ -889,7 +889,7 @@ struct {
/* 0x30 - 0x33, Logical block size for the LD */ /* 0x30 - 0x33, Logical block size for the LD */
u32 logical_block_length; u32 logical_block_length;
struct { struct {
#ifndef MFI_BIG_ENDIAN #ifndef __BIG_ENDIAN_BITFIELD
/* 0x34, P_I_EXPONENT from READ CAPACITY 16 */ /* 0x34, P_I_EXPONENT from READ CAPACITY 16 */
u32 ld_pi_exp:4; u32 ld_pi_exp:4;
/* 0x34, LOGICAL BLOCKS PER PHYSICAL /* 0x34, LOGICAL BLOCKS PER PHYSICAL

View File

@@ -7,4 +7,5 @@ mpt3sas-y += mpt3sas_base.o \
mpt3sas_transport.o \ mpt3sas_transport.o \
mpt3sas_ctl.o \ mpt3sas_ctl.o \
mpt3sas_trigger_diag.o \ mpt3sas_trigger_diag.o \
mpt3sas_warpdrive.o mpt3sas_warpdrive.o \
mpt3sas_debugfs.o \

View File

@@ -413,7 +413,7 @@ static void _clone_sg_entries(struct MPT3SAS_ADAPTER *ioc,
{ {
Mpi2SGESimple32_t *sgel, *sgel_next; Mpi2SGESimple32_t *sgel, *sgel_next;
u32 sgl_flags, sge_chain_count = 0; u32 sgl_flags, sge_chain_count = 0;
bool is_write = 0; bool is_write = false;
u16 i = 0; u16 i = 0;
void __iomem *buffer_iomem; void __iomem *buffer_iomem;
phys_addr_t buffer_iomem_phys; phys_addr_t buffer_iomem_phys;
@@ -482,7 +482,7 @@ static void _clone_sg_entries(struct MPT3SAS_ADAPTER *ioc,
if (le32_to_cpu(sgel->FlagsLength) & if (le32_to_cpu(sgel->FlagsLength) &
(MPI2_SGE_FLAGS_HOST_TO_IOC << MPI2_SGE_FLAGS_SHIFT)) (MPI2_SGE_FLAGS_HOST_TO_IOC << MPI2_SGE_FLAGS_SHIFT))
is_write = 1; is_write = true;
for (i = 0; i < MPT_MIN_PHYS_SEGMENTS + ioc->facts.MaxChainDepth; i++) { for (i = 0; i < MPT_MIN_PHYS_SEGMENTS + ioc->facts.MaxChainDepth; i++) {
@@ -2806,58 +2806,38 @@ _base_build_sg_ieee(struct MPT3SAS_ADAPTER *ioc, void *psge,
static int static int
_base_config_dma_addressing(struct MPT3SAS_ADAPTER *ioc, struct pci_dev *pdev) _base_config_dma_addressing(struct MPT3SAS_ADAPTER *ioc, struct pci_dev *pdev)
{ {
u64 required_mask, coherent_mask;
struct sysinfo s; struct sysinfo s;
int dma_mask;
if (ioc->is_mcpu_endpoint ||
sizeof(dma_addr_t) == 4 || ioc->use_32bit_dma ||
dma_get_required_mask(&pdev->dev) <= 32)
dma_mask = 32;
/* Set 63 bit DMA mask for all SAS3 and SAS35 controllers */ /* Set 63 bit DMA mask for all SAS3 and SAS35 controllers */
int dma_mask = (ioc->hba_mpi_version_belonged > MPI2_VERSION) ? 63 : 64; else if (ioc->hba_mpi_version_belonged > MPI2_VERSION)
dma_mask = 63;
if (ioc->is_mcpu_endpoint)
goto try_32bit;
required_mask = dma_get_required_mask(&pdev->dev);
if (sizeof(dma_addr_t) == 4 || required_mask == 32)
goto try_32bit;
if (ioc->dma_mask)
coherent_mask = DMA_BIT_MASK(dma_mask);
else else
coherent_mask = DMA_BIT_MASK(32); dma_mask = 64;
if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(dma_mask)) || if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(dma_mask)) ||
dma_set_coherent_mask(&pdev->dev, coherent_mask)) dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(dma_mask)))
goto try_32bit;
ioc->base_add_sg_single = &_base_add_sg_single_64;
ioc->sge_size = sizeof(Mpi2SGESimple64_t);
ioc->dma_mask = dma_mask;
goto out;
try_32bit:
if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)))
return -ENODEV; return -ENODEV;
ioc->base_add_sg_single = &_base_add_sg_single_32; if (dma_mask > 32) {
ioc->sge_size = sizeof(Mpi2SGESimple32_t); ioc->base_add_sg_single = &_base_add_sg_single_64;
ioc->dma_mask = 32; ioc->sge_size = sizeof(Mpi2SGESimple64_t);
out: } else {
ioc->base_add_sg_single = &_base_add_sg_single_32;
ioc->sge_size = sizeof(Mpi2SGESimple32_t);
}
si_meminfo(&s); si_meminfo(&s);
ioc_info(ioc, "%d BIT PCI BUS DMA ADDRESSING SUPPORTED, total mem (%ld kB)\n", ioc_info(ioc, "%d BIT PCI BUS DMA ADDRESSING SUPPORTED, total mem (%ld kB)\n",
ioc->dma_mask, convert_to_kb(s.totalram)); dma_mask, convert_to_kb(s.totalram));
return 0; return 0;
} }
static int
_base_change_consistent_dma_mask(struct MPT3SAS_ADAPTER *ioc,
struct pci_dev *pdev)
{
if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(ioc->dma_mask))) {
if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)))
return -ENODEV;
}
return 0;
}
/** /**
* _base_check_enable_msix - checks MSIX capabable. * _base_check_enable_msix - checks MSIX capabable.
* @ioc: per adapter object * @ioc: per adapter object
@@ -4827,8 +4807,9 @@ _base_release_memory_pools(struct MPT3SAS_ADAPTER *ioc)
{ {
int i = 0; int i = 0;
int j = 0; int j = 0;
int dma_alloc_count = 0;
struct chain_tracker *ct; struct chain_tracker *ct;
struct reply_post_struct *rps; int count = ioc->rdpq_array_enable ? ioc->reply_queue_count : 1;
dexitprintk(ioc, ioc_info(ioc, "%s\n", __func__)); dexitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
@@ -4870,29 +4851,34 @@ _base_release_memory_pools(struct MPT3SAS_ADAPTER *ioc)
} }
if (ioc->reply_post) { if (ioc->reply_post) {
do { dma_alloc_count = DIV_ROUND_UP(count,
rps = &ioc->reply_post[i]; RDPQ_MAX_INDEX_IN_ONE_CHUNK);
if (rps->reply_post_free) { for (i = 0; i < count; i++) {
dma_pool_free( if (i % RDPQ_MAX_INDEX_IN_ONE_CHUNK == 0
ioc->reply_post_free_dma_pool, && dma_alloc_count) {
rps->reply_post_free, if (ioc->reply_post[i].reply_post_free) {
rps->reply_post_free_dma); dma_pool_free(
dexitprintk(ioc, ioc->reply_post_free_dma_pool,
ioc_info(ioc, "reply_post_free_pool(0x%p): free\n", ioc->reply_post[i].reply_post_free,
rps->reply_post_free)); ioc->reply_post[i].reply_post_free_dma);
rps->reply_post_free = NULL; dexitprintk(ioc, ioc_info(ioc,
"reply_post_free_pool(0x%p): free\n",
ioc->reply_post[i].reply_post_free));
ioc->reply_post[i].reply_post_free =
NULL;
}
--dma_alloc_count;
} }
} while (ioc->rdpq_array_enable && }
(++i < ioc->reply_queue_count)); dma_pool_destroy(ioc->reply_post_free_dma_pool);
if (ioc->reply_post_free_array && if (ioc->reply_post_free_array &&
ioc->rdpq_array_enable) { ioc->rdpq_array_enable) {
dma_pool_free(ioc->reply_post_free_array_dma_pool, dma_pool_free(ioc->reply_post_free_array_dma_pool,
ioc->reply_post_free_array, ioc->reply_post_free_array,
ioc->reply_post_free_array_dma); ioc->reply_post_free_array_dma);
ioc->reply_post_free_array = NULL; ioc->reply_post_free_array = NULL;
} }
dma_pool_destroy(ioc->reply_post_free_array_dma_pool); dma_pool_destroy(ioc->reply_post_free_array_dma_pool);
dma_pool_destroy(ioc->reply_post_free_dma_pool);
kfree(ioc->reply_post); kfree(ioc->reply_post);
} }
@@ -4902,8 +4888,7 @@ _base_release_memory_pools(struct MPT3SAS_ADAPTER *ioc)
ioc->pcie_sg_lookup[i].pcie_sgl, ioc->pcie_sg_lookup[i].pcie_sgl,
ioc->pcie_sg_lookup[i].pcie_sgl_dma); ioc->pcie_sg_lookup[i].pcie_sgl_dma);
} }
if (ioc->pcie_sgl_dma_pool) dma_pool_destroy(ioc->pcie_sgl_dma_pool);
dma_pool_destroy(ioc->pcie_sgl_dma_pool);
} }
if (ioc->config_page) { if (ioc->config_page) {
@@ -4915,7 +4900,9 @@ _base_release_memory_pools(struct MPT3SAS_ADAPTER *ioc)
} }
kfree(ioc->hpr_lookup); kfree(ioc->hpr_lookup);
ioc->hpr_lookup = NULL;
kfree(ioc->internal_lookup); kfree(ioc->internal_lookup);
ioc->internal_lookup = NULL;
if (ioc->chain_lookup) { if (ioc->chain_lookup) {
for (i = 0; i < ioc->scsiio_depth; i++) { for (i = 0; i < ioc->scsiio_depth; i++) {
for (j = ioc->chains_per_prp_buffer; for (j = ioc->chains_per_prp_buffer;
@@ -4935,7 +4922,7 @@ _base_release_memory_pools(struct MPT3SAS_ADAPTER *ioc)
} }
/** /**
* is_MSB_are_same - checks whether all reply queues in a set are * mpt3sas_check_same_4gb_region - checks whether all reply queues in a set are
* having same upper 32bits in their base memory address. * having same upper 32bits in their base memory address.
* @reply_pool_start_address: Base address of a reply queue set * @reply_pool_start_address: Base address of a reply queue set
* @pool_sz: Size of single Reply Descriptor Post Queues pool size * @pool_sz: Size of single Reply Descriptor Post Queues pool size
@@ -4945,7 +4932,7 @@ _base_release_memory_pools(struct MPT3SAS_ADAPTER *ioc)
*/ */
static int static int
is_MSB_are_same(long reply_pool_start_address, u32 pool_sz) mpt3sas_check_same_4gb_region(long reply_pool_start_address, u32 pool_sz)
{ {
long reply_pool_end_address; long reply_pool_end_address;
@@ -4958,6 +4945,88 @@ is_MSB_are_same(long reply_pool_start_address, u32 pool_sz)
return 0; return 0;
} }
/**
* base_alloc_rdpq_dma_pool - Allocating DMA'able memory
* for reply queues.
* @ioc: per adapter object
* @sz: DMA Pool size
* Return: 0 for success, non-zero for failure.
*/
static int
base_alloc_rdpq_dma_pool(struct MPT3SAS_ADAPTER *ioc, int sz)
{
int i = 0;
u32 dma_alloc_count = 0;
int reply_post_free_sz = ioc->reply_post_queue_depth *
sizeof(Mpi2DefaultReplyDescriptor_t);
int count = ioc->rdpq_array_enable ? ioc->reply_queue_count : 1;
ioc->reply_post = kcalloc(count, sizeof(struct reply_post_struct),
GFP_KERNEL);
if (!ioc->reply_post)
return -ENOMEM;
/*
* For INVADER_SERIES each set of 8 reply queues(0-7, 8-15, ..) and
* VENTURA_SERIES each set of 16 reply queues(0-15, 16-31, ..) should
* be within 4GB boundary i.e reply queues in a set must have same
* upper 32-bits in their memory address. so here driver is allocating
* the DMA'able memory for reply queues according.
* Driver uses limitation of
* VENTURA_SERIES to manage INVADER_SERIES as well.
*/
dma_alloc_count = DIV_ROUND_UP(count,
RDPQ_MAX_INDEX_IN_ONE_CHUNK);
ioc->reply_post_free_dma_pool =
dma_pool_create("reply_post_free pool",
&ioc->pdev->dev, sz, 16, 0);
if (!ioc->reply_post_free_dma_pool)
return -ENOMEM;
for (i = 0; i < count; i++) {
if ((i % RDPQ_MAX_INDEX_IN_ONE_CHUNK == 0) && dma_alloc_count) {
ioc->reply_post[i].reply_post_free =
dma_pool_alloc(ioc->reply_post_free_dma_pool,
GFP_KERNEL,
&ioc->reply_post[i].reply_post_free_dma);
if (!ioc->reply_post[i].reply_post_free)
return -ENOMEM;
/*
* Each set of RDPQ pool must satisfy 4gb boundary
* restriction.
* 1) Check if allocated resources for RDPQ pool are in
* the same 4GB range.
* 2) If #1 is true, continue with 64 bit DMA.
* 3) If #1 is false, return 1. which means free all the
* resources and set DMA mask to 32 and allocate.
*/
if (!mpt3sas_check_same_4gb_region(
(long)ioc->reply_post[i].reply_post_free, sz)) {
dinitprintk(ioc,
ioc_err(ioc, "bad Replypost free pool(0x%p)"
"reply_post_free_dma = (0x%llx)\n",
ioc->reply_post[i].reply_post_free,
(unsigned long long)
ioc->reply_post[i].reply_post_free_dma));
return -EAGAIN;
}
memset(ioc->reply_post[i].reply_post_free, 0,
RDPQ_MAX_INDEX_IN_ONE_CHUNK *
reply_post_free_sz);
dma_alloc_count--;
} else {
ioc->reply_post[i].reply_post_free =
(Mpi2ReplyDescriptorsUnion_t *)
((long)ioc->reply_post[i-1].reply_post_free
+ reply_post_free_sz);
ioc->reply_post[i].reply_post_free_dma =
(dma_addr_t)
(ioc->reply_post[i-1].reply_post_free_dma +
reply_post_free_sz);
}
}
return 0;
}
/** /**
* _base_allocate_memory_pools - allocate start of day memory pools * _base_allocate_memory_pools - allocate start of day memory pools
* @ioc: per adapter object * @ioc: per adapter object
@@ -4972,10 +5041,12 @@ _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc)
u16 chains_needed_per_io; u16 chains_needed_per_io;
u32 sz, total_sz, reply_post_free_sz, reply_post_free_array_sz; u32 sz, total_sz, reply_post_free_sz, reply_post_free_array_sz;
u32 retry_sz; u32 retry_sz;
u32 rdpq_sz = 0;
u16 max_request_credit, nvme_blocks_needed; u16 max_request_credit, nvme_blocks_needed;
unsigned short sg_tablesize; unsigned short sg_tablesize;
u16 sge_size; u16 sge_size;
int i, j; int i, j;
int ret = 0;
struct chain_tracker *ct; struct chain_tracker *ct;
dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__)); dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
@@ -5129,54 +5200,28 @@ _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc)
/* reply post queue, 16 byte align */ /* reply post queue, 16 byte align */
reply_post_free_sz = ioc->reply_post_queue_depth * reply_post_free_sz = ioc->reply_post_queue_depth *
sizeof(Mpi2DefaultReplyDescriptor_t); sizeof(Mpi2DefaultReplyDescriptor_t);
rdpq_sz = reply_post_free_sz * RDPQ_MAX_INDEX_IN_ONE_CHUNK;
sz = reply_post_free_sz;
if (_base_is_controller_msix_enabled(ioc) && !ioc->rdpq_array_enable) if (_base_is_controller_msix_enabled(ioc) && !ioc->rdpq_array_enable)
sz *= ioc->reply_queue_count; rdpq_sz = reply_post_free_sz * ioc->reply_queue_count;
ret = base_alloc_rdpq_dma_pool(ioc, rdpq_sz);
ioc->reply_post = kcalloc((ioc->rdpq_array_enable) ? if (ret == -EAGAIN) {
(ioc->reply_queue_count):1, /*
sizeof(struct reply_post_struct), GFP_KERNEL); * Free allocated bad RDPQ memory pools.
* Change dma coherent mask to 32 bit and reallocate RDPQ
if (!ioc->reply_post) { */
ioc_err(ioc, "reply_post_free pool: kcalloc failed\n"); _base_release_memory_pools(ioc);
goto out; ioc->use_32bit_dma = true;
} if (_base_config_dma_addressing(ioc, ioc->pdev) != 0) {
ioc->reply_post_free_dma_pool = dma_pool_create("reply_post_free pool", ioc_err(ioc,
&ioc->pdev->dev, sz, 16, 0); "32 DMA mask failed %s\n", pci_name(ioc->pdev));
if (!ioc->reply_post_free_dma_pool) { return -ENODEV;
ioc_err(ioc, "reply_post_free pool: dma_pool_create failed\n");
goto out;
}
i = 0;
do {
ioc->reply_post[i].reply_post_free =
dma_pool_zalloc(ioc->reply_post_free_dma_pool,
GFP_KERNEL,
&ioc->reply_post[i].reply_post_free_dma);
if (!ioc->reply_post[i].reply_post_free) {
ioc_err(ioc, "reply_post_free pool: dma_pool_alloc failed\n");
goto out;
} }
dinitprintk(ioc, if (base_alloc_rdpq_dma_pool(ioc, rdpq_sz))
ioc_info(ioc, "reply post free pool (0x%p): depth(%d), element_size(%d), pool_size(%d kB)\n", return -ENOMEM;
ioc->reply_post[i].reply_post_free, } else if (ret == -ENOMEM)
ioc->reply_post_queue_depth, return -ENOMEM;
8, sz / 1024)); total_sz = rdpq_sz * (!ioc->rdpq_array_enable ? 1 :
dinitprintk(ioc, DIV_ROUND_UP(ioc->reply_queue_count, RDPQ_MAX_INDEX_IN_ONE_CHUNK));
ioc_info(ioc, "reply_post_free_dma = (0x%llx)\n",
(u64)ioc->reply_post[i].reply_post_free_dma));
total_sz += sz;
} while (ioc->rdpq_array_enable && (++i < ioc->reply_queue_count));
if (ioc->dma_mask > 32) {
if (_base_change_consistent_dma_mask(ioc, ioc->pdev) != 0) {
ioc_warn(ioc, "no suitable consistent DMA mask for %s\n",
pci_name(ioc->pdev));
goto out;
}
}
ioc->scsiio_depth = ioc->hba_queue_depth - ioc->scsiio_depth = ioc->hba_queue_depth -
ioc->hi_priority_depth - ioc->internal_depth; ioc->hi_priority_depth - ioc->internal_depth;
@@ -5188,7 +5233,6 @@ _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc)
ioc_info(ioc, "scsi host: can_queue depth (%d)\n", ioc_info(ioc, "scsi host: can_queue depth (%d)\n",
ioc->shost->can_queue)); ioc->shost->can_queue));
/* contiguous pool for request and chains, 16 byte align, one extra " /* contiguous pool for request and chains, 16 byte align, one extra "
* "frame for smid=0 * "frame for smid=0
*/ */
@@ -5405,7 +5449,7 @@ _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc)
* Actual requirement is not alignment, but we need start and end of * Actual requirement is not alignment, but we need start and end of
* DMA address must have same upper 32 bit address. * DMA address must have same upper 32 bit address.
*/ */
if (!is_MSB_are_same((long)ioc->sense, sz)) { if (!mpt3sas_check_same_4gb_region((long)ioc->sense, sz)) {
//Release Sense pool & Reallocate //Release Sense pool & Reallocate
dma_pool_free(ioc->sense_dma_pool, ioc->sense, ioc->sense_dma); dma_pool_free(ioc->sense_dma_pool, ioc->sense, ioc->sense_dma);
dma_pool_destroy(ioc->sense_dma_pool); dma_pool_destroy(ioc->sense_dma_pool);
@@ -7158,7 +7202,7 @@ mpt3sas_base_attach(struct MPT3SAS_ADAPTER *ioc)
ioc->smp_affinity_enable = smp_affinity_enable; ioc->smp_affinity_enable = smp_affinity_enable;
ioc->rdpq_array_enable_assigned = 0; ioc->rdpq_array_enable_assigned = 0;
ioc->dma_mask = 0; ioc->use_32bit_dma = false;
if (ioc->is_aero_ioc) if (ioc->is_aero_ioc)
ioc->base_readl = &_base_readl_aero; ioc->base_readl = &_base_readl_aero;
else else

View File

@@ -76,8 +76,8 @@
#define MPT3SAS_DRIVER_NAME "mpt3sas" #define MPT3SAS_DRIVER_NAME "mpt3sas"
#define MPT3SAS_AUTHOR "Avago Technologies <MPT-FusionLinux.pdl@avagotech.com>" #define MPT3SAS_AUTHOR "Avago Technologies <MPT-FusionLinux.pdl@avagotech.com>"
#define MPT3SAS_DESCRIPTION "LSI MPT Fusion SAS 3.0 Device Driver" #define MPT3SAS_DESCRIPTION "LSI MPT Fusion SAS 3.0 Device Driver"
#define MPT3SAS_DRIVER_VERSION "33.100.00.00" #define MPT3SAS_DRIVER_VERSION "34.100.00.00"
#define MPT3SAS_MAJOR_VERSION 33 #define MPT3SAS_MAJOR_VERSION 34
#define MPT3SAS_MINOR_VERSION 100 #define MPT3SAS_MINOR_VERSION 100
#define MPT3SAS_BUILD_VERSION 0 #define MPT3SAS_BUILD_VERSION 0
#define MPT3SAS_RELEASE_VERSION 00 #define MPT3SAS_RELEASE_VERSION 00
@@ -367,6 +367,7 @@ struct mpt3sas_nvme_cmd {
#define MPT3SAS_HIGH_IOPS_REPLY_QUEUES 8 #define MPT3SAS_HIGH_IOPS_REPLY_QUEUES 8
#define MPT3SAS_HIGH_IOPS_BATCH_COUNT 16 #define MPT3SAS_HIGH_IOPS_BATCH_COUNT 16
#define MPT3SAS_GEN35_MAX_MSIX_QUEUES 128 #define MPT3SAS_GEN35_MAX_MSIX_QUEUES 128
#define RDPQ_MAX_INDEX_IN_ONE_CHUNK 16
/* OEM Specific Flags will come from OEM specific header files */ /* OEM Specific Flags will come from OEM specific header files */
struct Mpi2ManufacturingPage10_t { struct Mpi2ManufacturingPage10_t {
@@ -1026,7 +1027,6 @@ typedef void (*MPT3SAS_FLUSH_RUNNING_CMDS)(struct MPT3SAS_ADAPTER *ioc);
* @ir_firmware: IR firmware present * @ir_firmware: IR firmware present
* @bars: bitmask of BAR's that must be configured * @bars: bitmask of BAR's that must be configured
* @mask_interrupts: ignore interrupt * @mask_interrupts: ignore interrupt
* @dma_mask: used to set the consistent dma mask
* @pci_access_mutex: Mutex to synchronize ioctl, sysfs show path and * @pci_access_mutex: Mutex to synchronize ioctl, sysfs show path and
* pci resource handling * pci resource handling
* @fault_reset_work_q_name: fw fault work queue * @fault_reset_work_q_name: fw fault work queue
@@ -1064,6 +1064,7 @@ typedef void (*MPT3SAS_FLUSH_RUNNING_CMDS)(struct MPT3SAS_ADAPTER *ioc);
* @thresh_hold: Max number of reply descriptors processed * @thresh_hold: Max number of reply descriptors processed
* before updating Host Index * before updating Host Index
* @drv_support_bitmap: driver's supported feature bit map * @drv_support_bitmap: driver's supported feature bit map
* @use_32bit_dma: Flag to use 32 bit consistent dma mask
* @scsi_io_cb_idx: shost generated commands * @scsi_io_cb_idx: shost generated commands
* @tm_cb_idx: task management commands * @tm_cb_idx: task management commands
* @scsih_cb_idx: scsih internal commands * @scsih_cb_idx: scsih internal commands
@@ -1205,7 +1206,6 @@ struct MPT3SAS_ADAPTER {
u8 ir_firmware; u8 ir_firmware;
int bars; int bars;
u8 mask_interrupts; u8 mask_interrupts;
int dma_mask;
/* fw fault handler */ /* fw fault handler */
char fault_reset_work_q_name[20]; char fault_reset_work_q_name[20];
@@ -1254,6 +1254,7 @@ struct MPT3SAS_ADAPTER {
u8 high_iops_queues; u8 high_iops_queues;
u32 drv_support_bitmap; u32 drv_support_bitmap;
bool enable_sdev_max_qd; bool enable_sdev_max_qd;
bool use_32bit_dma;
/* internal commands, callback index */ /* internal commands, callback index */
u8 scsi_io_cb_idx; u8 scsi_io_cb_idx;
@@ -1471,6 +1472,8 @@ struct MPT3SAS_ADAPTER {
u16 device_remove_in_progress_sz; u16 device_remove_in_progress_sz;
u8 is_gen35_ioc; u8 is_gen35_ioc;
u8 is_aero_ioc; u8 is_aero_ioc;
struct dentry *debugfs_root;
struct dentry *ioc_dump;
PUT_SMID_IO_FP_HIP put_smid_scsi_io; PUT_SMID_IO_FP_HIP put_smid_scsi_io;
PUT_SMID_IO_FP_HIP put_smid_fast_path; PUT_SMID_IO_FP_HIP put_smid_fast_path;
PUT_SMID_IO_FP_HIP put_smid_hi_priority; PUT_SMID_IO_FP_HIP put_smid_hi_priority;
@@ -1478,6 +1481,11 @@ struct MPT3SAS_ADAPTER {
GET_MSIX_INDEX get_msix_index_for_smlio; GET_MSIX_INDEX get_msix_index_for_smlio;
}; };
struct mpt3sas_debugfs_buffer {
void *buf;
u32 len;
};
#define MPT_DRV_SUPPORT_BITMAP_MEMMOVE 0x00000001 #define MPT_DRV_SUPPORT_BITMAP_MEMMOVE 0x00000001
typedef u8 (*MPT_CALLBACK)(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, typedef u8 (*MPT_CALLBACK)(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
@@ -1781,6 +1789,11 @@ mpt3sas_setup_direct_io(struct MPT3SAS_ADAPTER *ioc, struct scsi_cmnd *scmd,
/* NCQ Prio Handling Check */ /* NCQ Prio Handling Check */
bool scsih_ncq_prio_supp(struct scsi_device *sdev); bool scsih_ncq_prio_supp(struct scsi_device *sdev);
void mpt3sas_setup_debugfs(struct MPT3SAS_ADAPTER *ioc);
void mpt3sas_destroy_debugfs(struct MPT3SAS_ADAPTER *ioc);
void mpt3sas_init_debugfs(void);
void mpt3sas_exit_debugfs(void);
/** /**
* _scsih_is_pcie_scsi_device - determines if device is an pcie scsi device * _scsih_is_pcie_scsi_device - determines if device is an pcie scsi device
* @device_info: bitfield providing information about the device. * @device_info: bitfield providing information about the device.

View File

@@ -0,0 +1,157 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Debugfs interface Support for MPT (Message Passing Technology) based
* controllers.
*
* Copyright (C) 2020 Broadcom Inc.
*
* Authors: Broadcom Inc.
* Sreekanth Reddy <sreekanth.reddy@broadcom.com>
* Suganath Prabu <suganath-prabu.subramani@broadcom.com>
*
* Send feedback to : MPT-FusionLinux.pdl@broadcom.com)
*
**/
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/pci.h>
#include <linux/interrupt.h>
#include <linux/compat.h>
#include <linux/uio.h>
#include <scsi/scsi.h>
#include <scsi/scsi_device.h>
#include <scsi/scsi_host.h>
#include "mpt3sas_base.h"
#include <linux/debugfs.h>
static struct dentry *mpt3sas_debugfs_root;
/*
* _debugfs_iocdump_read - copy ioc dump from debugfs buffer
* @filep: File Pointer
* @ubuf: Buffer to fill data
* @cnt: Length of the buffer
* @ppos: Offset in the file
*/
static ssize_t
_debugfs_iocdump_read(struct file *filp, char __user *ubuf, size_t cnt,
loff_t *ppos)
{
struct mpt3sas_debugfs_buffer *debug = filp->private_data;
if (!debug || !debug->buf)
return 0;
return simple_read_from_buffer(ubuf, cnt, ppos, debug->buf, debug->len);
}
/*
* _debugfs_iocdump_open : open the ioc_dump debugfs attribute file
*/
static int
_debugfs_iocdump_open(struct inode *inode, struct file *file)
{
struct MPT3SAS_ADAPTER *ioc = inode->i_private;
struct mpt3sas_debugfs_buffer *debug;
debug = kzalloc(sizeof(struct mpt3sas_debugfs_buffer), GFP_KERNEL);
if (!debug)
return -ENOMEM;
debug->buf = (void *)ioc;
debug->len = sizeof(struct MPT3SAS_ADAPTER);
file->private_data = debug;
return 0;
}
/*
* _debugfs_iocdump_release : release the ioc_dump debugfs attribute
* @inode: inode structure to the corresponds device
* @file: File pointer
*/
static int
_debugfs_iocdump_release(struct inode *inode, struct file *file)
{
struct mpt3sas_debugfs_buffer *debug = file->private_data;
if (!debug)
return 0;
file->private_data = NULL;
kfree(debug);
return 0;
}
static const struct file_operations mpt3sas_debugfs_iocdump_fops = {
.owner = THIS_MODULE,
.open = _debugfs_iocdump_open,
.read = _debugfs_iocdump_read,
.release = _debugfs_iocdump_release,
};
/*
* mpt3sas_init_debugfs : Create debugfs root for mpt3sas driver
*/
void mpt3sas_init_debugfs(void)
{
mpt3sas_debugfs_root = debugfs_create_dir("mpt3sas", NULL);
if (!mpt3sas_debugfs_root)
pr_info("mpt3sas: Cannot create debugfs root\n");
}
/*
* mpt3sas_exit_debugfs : Remove debugfs root for mpt3sas driver
*/
void mpt3sas_exit_debugfs(void)
{
debugfs_remove_recursive(mpt3sas_debugfs_root);
}
/*
* mpt3sas_setup_debugfs : Setup debugfs per HBA adapter
* ioc: MPT3SAS_ADAPTER object
*/
void
mpt3sas_setup_debugfs(struct MPT3SAS_ADAPTER *ioc)
{
char name[64];
snprintf(name, sizeof(name), "scsi_host%d", ioc->shost->host_no);
if (!ioc->debugfs_root) {
ioc->debugfs_root =
debugfs_create_dir(name, mpt3sas_debugfs_root);
if (!ioc->debugfs_root) {
dev_err(&ioc->pdev->dev,
"Cannot create per adapter debugfs directory\n");
return;
}
}
snprintf(name, sizeof(name), "ioc_dump");
ioc->ioc_dump = debugfs_create_file(name, 0444,
ioc->debugfs_root, ioc, &mpt3sas_debugfs_iocdump_fops);
if (!ioc->ioc_dump) {
dev_err(&ioc->pdev->dev,
"Cannot create ioc_dump debugfs file\n");
debugfs_remove(ioc->debugfs_root);
return;
}
snprintf(name, sizeof(name), "host_recovery");
debugfs_create_u8(name, 0444, ioc->debugfs_root, &ioc->shost_recovery);
}
/*
* mpt3sas_destroy_debugfs : Destroy debugfs per HBA adapter
* @ioc: MPT3SAS_ADAPTER object
*/
void mpt3sas_destroy_debugfs(struct MPT3SAS_ADAPTER *ioc)
{
debugfs_remove_recursive(ioc->debugfs_root);
}

View File

@@ -9928,6 +9928,7 @@ static void scsih_remove(struct pci_dev *pdev)
&ioc->ioc_pg1_copy); &ioc->ioc_pg1_copy);
/* release all the volumes */ /* release all the volumes */
_scsih_ir_shutdown(ioc); _scsih_ir_shutdown(ioc);
mpt3sas_destroy_debugfs(ioc);
sas_remove_host(shost); sas_remove_host(shost);
list_for_each_entry_safe(raid_device, next, &ioc->raid_device_list, list_for_each_entry_safe(raid_device, next, &ioc->raid_device_list,
list) { list) {
@@ -10763,8 +10764,8 @@ _scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id)
} }
} }
/* register EEDP capabilities with SCSI layer */ /* register EEDP capabilities with SCSI layer */
if (prot_mask > 0) if (prot_mask >= 0)
scsi_host_set_prot(shost, prot_mask); scsi_host_set_prot(shost, (prot_mask & 0x07));
else else
scsi_host_set_prot(shost, SHOST_DIF_TYPE1_PROTECTION scsi_host_set_prot(shost, SHOST_DIF_TYPE1_PROTECTION
| SHOST_DIF_TYPE2_PROTECTION | SHOST_DIF_TYPE2_PROTECTION
@@ -10814,6 +10815,7 @@ _scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id)
} }
scsi_scan_host(shost); scsi_scan_host(shost);
mpt3sas_setup_debugfs(ioc);
return 0; return 0;
out_add_shost_fail: out_add_shost_fail:
mpt3sas_base_detach(ioc); mpt3sas_base_detach(ioc);
@@ -11220,6 +11222,7 @@ scsih_init(void)
tm_sas_control_cb_idx = mpt3sas_base_register_callback_handler( tm_sas_control_cb_idx = mpt3sas_base_register_callback_handler(
_scsih_sas_control_complete); _scsih_sas_control_complete);
mpt3sas_init_debugfs();
return 0; return 0;
} }
@@ -11251,6 +11254,7 @@ scsih_exit(void)
if (hbas_to_enumerate != 2) if (hbas_to_enumerate != 2)
raid_class_release(mpt2sas_raid_template); raid_class_release(mpt2sas_raid_template);
sas_release_transport(mpt3sas_transport_template); sas_release_transport(mpt3sas_transport_template);
mpt3sas_exit_debugfs();
} }
/** /**

View File

@@ -25,7 +25,7 @@ static const struct mvs_chip_info mvs_chips[] = {
[chip_1320] = { 2, 4, 0x800, 17, 64, 8, 9, &mvs_94xx_dispatch, }, [chip_1320] = { 2, 4, 0x800, 17, 64, 8, 9, &mvs_94xx_dispatch, },
}; };
struct device_attribute *mvst_host_attrs[]; static struct device_attribute *mvst_host_attrs[];
#define SOC_SAS_NUM 2 #define SOC_SAS_NUM 2
@@ -759,8 +759,6 @@ static DEVICE_ATTR(interrupt_coalescing,
mvs_show_interrupt_coalescing, mvs_show_interrupt_coalescing,
mvs_store_interrupt_coalescing); mvs_store_interrupt_coalescing);
/* task handler */
struct task_struct *mvs_th;
static int __init mvs_init(void) static int __init mvs_init(void)
{ {
int rc; int rc;
@@ -785,7 +783,7 @@ static void __exit mvs_exit(void)
sas_release_transport(mvs_stt); sas_release_transport(mvs_stt);
} }
struct device_attribute *mvst_host_attrs[] = { static struct device_attribute *mvst_host_attrs[] = {
&dev_attr_driver_version, &dev_attr_driver_version,
&dev_attr_interrupt_coalescing, &dev_attr_interrupt_coalescing,
NULL, NULL,

View File

@@ -4652,7 +4652,7 @@ static int pmcraid_allocate_control_blocks(struct pmcraid_instance *pinstance)
for (i = 0; i < PMCRAID_MAX_CMD; i++) { for (i = 0; i < PMCRAID_MAX_CMD; i++) {
pinstance->cmd_list[i]->ioa_cb = pinstance->cmd_list[i]->ioa_cb =
dma_pool_alloc( dma_pool_zalloc(
pinstance->control_pool, pinstance->control_pool,
GFP_KERNEL, GFP_KERNEL,
&(pinstance->cmd_list[i]->ioa_cb_bus_addr)); &(pinstance->cmd_list[i]->ioa_cb_bus_addr));
@@ -4661,8 +4661,6 @@ static int pmcraid_allocate_control_blocks(struct pmcraid_instance *pinstance)
pmcraid_release_control_blocks(pinstance, i); pmcraid_release_control_blocks(pinstance, i);
return -ENOMEM; return -ENOMEM;
} }
memset(pinstance->cmd_list[i]->ioa_cb, 0,
sizeof(struct pmcraid_control_block));
} }
return 0; return 0;
} }

View File

@@ -355,6 +355,7 @@ struct qedf_ctx {
#define QEDF_GRCDUMP_CAPTURE 4 #define QEDF_GRCDUMP_CAPTURE 4
#define QEDF_IN_RECOVERY 5 #define QEDF_IN_RECOVERY 5
#define QEDF_DBG_STOP_IO 6 #define QEDF_DBG_STOP_IO 6
#define QEDF_PROBING 8
unsigned long flags; /* Miscellaneous state flags */ unsigned long flags; /* Miscellaneous state flags */
int fipvlan_retries; int fipvlan_retries;
u8 num_queues; u8 num_queues;
@@ -387,7 +388,9 @@ struct qedf_ctx {
#define QEDF_IO_WORK_MIN 64 #define QEDF_IO_WORK_MIN 64
mempool_t *io_mempool; mempool_t *io_mempool;
struct workqueue_struct *dpc_wq; struct workqueue_struct *dpc_wq;
struct delayed_work recovery_work;
struct delayed_work grcdump_work; struct delayed_work grcdump_work;
struct delayed_work stag_work;
u32 slow_sge_ios; u32 slow_sge_ios;
u32 fast_sge_ios; u32 fast_sge_ios;
@@ -403,6 +406,7 @@ struct qedf_ctx {
u32 flogi_cnt; u32 flogi_cnt;
u32 flogi_failed; u32 flogi_failed;
u32 flogi_pending;
/* Used for fc statistics */ /* Used for fc statistics */
struct mutex stats_mutex; struct mutex stats_mutex;
@@ -468,7 +472,7 @@ extern uint qedf_dump_frames;
extern uint qedf_io_tracing; extern uint qedf_io_tracing;
extern uint qedf_stop_io_on_error; extern uint qedf_stop_io_on_error;
extern uint qedf_link_down_tmo; extern uint qedf_link_down_tmo;
#define QEDF_RETRY_DELAY_MAX 20 /* 2 seconds */ #define QEDF_RETRY_DELAY_MAX 600 /* 60 seconds */
extern bool qedf_retry_delay; extern bool qedf_retry_delay;
extern uint qedf_debug; extern uint qedf_debug;

View File

@@ -388,14 +388,10 @@ void qedf_restart_rport(struct qedf_rport *fcport)
mutex_lock(&lport->disc.disc_mutex); mutex_lock(&lport->disc.disc_mutex);
/* Recreate the rport and log back in */ /* Recreate the rport and log back in */
rdata = fc_rport_create(lport, port_id); rdata = fc_rport_create(lport, port_id);
if (rdata) { mutex_unlock(&lport->disc.disc_mutex);
mutex_unlock(&lport->disc.disc_mutex); if (rdata)
fc_rport_login(rdata); fc_rport_login(rdata);
fcport->rdata = rdata; fcport->rdata = rdata;
} else {
mutex_unlock(&lport->disc.disc_mutex);
fcport->rdata = NULL;
}
} }
clear_bit(QEDF_RPORT_IN_RESET, &fcport->flags); clear_bit(QEDF_RPORT_IN_RESET, &fcport->flags);
} }

View File

@@ -1021,14 +1021,18 @@ qedf_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *sc_cmd)
atomic_inc(&fcport->ios_to_queue); atomic_inc(&fcport->ios_to_queue);
if (fcport->retry_delay_timestamp) { if (fcport->retry_delay_timestamp) {
/* Take fcport->rport_lock for resetting the delay_timestamp */
spin_lock_irqsave(&fcport->rport_lock, flags);
if (time_after(jiffies, fcport->retry_delay_timestamp)) { if (time_after(jiffies, fcport->retry_delay_timestamp)) {
fcport->retry_delay_timestamp = 0; fcport->retry_delay_timestamp = 0;
} else { } else {
spin_unlock_irqrestore(&fcport->rport_lock, flags);
/* If retry_delay timer is active, flow off the ML */ /* If retry_delay timer is active, flow off the ML */
rc = SCSI_MLQUEUE_TARGET_BUSY; rc = SCSI_MLQUEUE_TARGET_BUSY;
atomic_dec(&fcport->ios_to_queue); atomic_dec(&fcport->ios_to_queue);
goto exit_qcmd; goto exit_qcmd;
} }
spin_unlock_irqrestore(&fcport->rport_lock, flags);
} }
io_req = qedf_alloc_cmd(fcport, QEDF_SCSI_CMD); io_req = qedf_alloc_cmd(fcport, QEDF_SCSI_CMD);
@@ -1134,6 +1138,8 @@ void qedf_scsi_completion(struct qedf_ctx *qedf, struct fcoe_cqe *cqe,
int refcount; int refcount;
u16 scope, qualifier = 0; u16 scope, qualifier = 0;
u8 fw_residual_flag = 0; u8 fw_residual_flag = 0;
unsigned long flags = 0;
u16 chk_scope = 0;
if (!io_req) if (!io_req)
return; return;
@@ -1267,16 +1273,8 @@ void qedf_scsi_completion(struct qedf_ctx *qedf, struct fcoe_cqe *cqe,
/* Lower 14 bits */ /* Lower 14 bits */
qualifier = fcp_rsp->retry_delay_timer & 0x3FFF; qualifier = fcp_rsp->retry_delay_timer & 0x3FFF;
if (qedf_retry_delay && if (qedf_retry_delay)
scope > 0 && qualifier > 0 && chk_scope = 1;
qualifier <= 0x3FEF) {
/* Check we don't go over the max */
if (qualifier > QEDF_RETRY_DELAY_MAX)
qualifier =
QEDF_RETRY_DELAY_MAX;
fcport->retry_delay_timestamp =
jiffies + (qualifier * HZ / 10);
}
/* Record stats */ /* Record stats */
if (io_req->cdb_status == if (io_req->cdb_status ==
SAM_STAT_TASK_SET_FULL) SAM_STAT_TASK_SET_FULL)
@@ -1287,6 +1285,36 @@ void qedf_scsi_completion(struct qedf_ctx *qedf, struct fcoe_cqe *cqe,
} }
if (io_req->fcp_resid) if (io_req->fcp_resid)
scsi_set_resid(sc_cmd, io_req->fcp_resid); scsi_set_resid(sc_cmd, io_req->fcp_resid);
if (chk_scope == 1) {
if ((scope == 1 || scope == 2) &&
(qualifier > 0 && qualifier <= 0x3FEF)) {
/* Check we don't go over the max */
if (qualifier > QEDF_RETRY_DELAY_MAX) {
qualifier = QEDF_RETRY_DELAY_MAX;
QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
"qualifier = %d\n",
(fcp_rsp->retry_delay_timer &
0x3FFF));
}
QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
"Scope = %d and qualifier = %d",
scope, qualifier);
/* Take fcport->rport_lock to
* update the retry_delay_timestamp
*/
spin_lock_irqsave(&fcport->rport_lock, flags);
fcport->retry_delay_timestamp =
jiffies + (qualifier * HZ / 10);
spin_unlock_irqrestore(&fcport->rport_lock,
flags);
} else {
QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
"combination of scope = %d and qualifier = %d is not handled in qedf.\n",
scope, qualifier);
}
}
break; break;
default: default:
QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO, "fcp_status=%d.\n", QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO, "fcp_status=%d.\n",

View File

@@ -28,6 +28,8 @@ const struct qed_fcoe_ops *qed_ops;
static int qedf_probe(struct pci_dev *pdev, const struct pci_device_id *id); static int qedf_probe(struct pci_dev *pdev, const struct pci_device_id *id);
static void qedf_remove(struct pci_dev *pdev); static void qedf_remove(struct pci_dev *pdev);
static void qedf_shutdown(struct pci_dev *pdev); static void qedf_shutdown(struct pci_dev *pdev);
static void qedf_schedule_recovery_handler(void *dev);
static void qedf_recovery_handler(struct work_struct *work);
/* /*
* Driver module parameters. * Driver module parameters.
@@ -282,6 +284,7 @@ static void qedf_flogi_resp(struct fc_seq *seq, struct fc_frame *fp,
else if (fc_frame_payload_op(fp) == ELS_LS_ACC) { else if (fc_frame_payload_op(fp) == ELS_LS_ACC) {
/* Set the source MAC we will use for FCoE traffic */ /* Set the source MAC we will use for FCoE traffic */
qedf_set_data_src_addr(qedf, fp); qedf_set_data_src_addr(qedf, fp);
qedf->flogi_pending = 0;
} }
/* Complete flogi_compl so we can proceed to sending ADISCs */ /* Complete flogi_compl so we can proceed to sending ADISCs */
@@ -307,6 +310,11 @@ static struct fc_seq *qedf_elsct_send(struct fc_lport *lport, u32 did,
*/ */
if (resp == fc_lport_flogi_resp) { if (resp == fc_lport_flogi_resp) {
qedf->flogi_cnt++; qedf->flogi_cnt++;
if (qedf->flogi_pending >= QEDF_FLOGI_RETRY_CNT) {
schedule_delayed_work(&qedf->stag_work, 2);
return NULL;
}
qedf->flogi_pending++;
return fc_elsct_send(lport, did, fp, op, qedf_flogi_resp, return fc_elsct_send(lport, did, fp, op, qedf_flogi_resp,
arg, timeout); arg, timeout);
} }
@@ -503,6 +511,32 @@ static void qedf_update_link_speed(struct qedf_ctx *qedf,
fc_host_supported_speeds(lport->host) = lport->link_supported_speeds; fc_host_supported_speeds(lport->host) = lport->link_supported_speeds;
} }
static void qedf_bw_update(void *dev)
{
struct qedf_ctx *qedf = (struct qedf_ctx *)dev;
struct qed_link_output link;
/* Get the latest status of the link */
qed_ops->common->get_link(qedf->cdev, &link);
if (test_bit(QEDF_UNLOADING, &qedf->flags)) {
QEDF_ERR(&qedf->dbg_ctx,
"Ignore link update, driver getting unload.\n");
return;
}
if (link.link_up) {
if (atomic_read(&qedf->link_state) == QEDF_LINK_UP)
qedf_update_link_speed(qedf, &link);
else
QEDF_ERR(&qedf->dbg_ctx,
"Ignore bw update, link is down.\n");
} else {
QEDF_ERR(&qedf->dbg_ctx, "link_up is not set.\n");
}
}
static void qedf_link_update(void *dev, struct qed_link_output *link) static void qedf_link_update(void *dev, struct qed_link_output *link)
{ {
struct qedf_ctx *qedf = (struct qedf_ctx *)dev; struct qedf_ctx *qedf = (struct qedf_ctx *)dev;
@@ -629,6 +663,8 @@ static u32 qedf_get_login_failures(void *cookie)
static struct qed_fcoe_cb_ops qedf_cb_ops = { static struct qed_fcoe_cb_ops qedf_cb_ops = {
{ {
.link_update = qedf_link_update, .link_update = qedf_link_update,
.bw_update = qedf_bw_update,
.schedule_recovery_handler = qedf_schedule_recovery_handler,
.dcbx_aen = qedf_dcbx_handler, .dcbx_aen = qedf_dcbx_handler,
.get_generic_tlv_data = qedf_get_generic_tlv_data, .get_generic_tlv_data = qedf_get_generic_tlv_data,
.get_protocol_tlv_data = qedf_get_protocol_tlv_data, .get_protocol_tlv_data = qedf_get_protocol_tlv_data,
@@ -850,6 +886,7 @@ void qedf_ctx_soft_reset(struct fc_lport *lport)
qedf = lport_priv(lport); qedf = lport_priv(lport);
qedf->flogi_pending = 0;
/* For host reset, essentially do a soft link up/down */ /* For host reset, essentially do a soft link up/down */
atomic_set(&qedf->link_state, QEDF_LINK_DOWN); atomic_set(&qedf->link_state, QEDF_LINK_DOWN);
QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC, QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC,
@@ -3153,7 +3190,7 @@ static int __qedf_probe(struct pci_dev *pdev, int mode)
{ {
int rc = -EINVAL; int rc = -EINVAL;
struct fc_lport *lport; struct fc_lport *lport;
struct qedf_ctx *qedf; struct qedf_ctx *qedf = NULL;
struct Scsi_Host *host; struct Scsi_Host *host;
bool is_vf = false; bool is_vf = false;
struct qed_ll2_params params; struct qed_ll2_params params;
@@ -3183,6 +3220,7 @@ static int __qedf_probe(struct pci_dev *pdev, int mode)
/* Initialize qedf_ctx */ /* Initialize qedf_ctx */
qedf = lport_priv(lport); qedf = lport_priv(lport);
set_bit(QEDF_PROBING, &qedf->flags);
qedf->lport = lport; qedf->lport = lport;
qedf->ctlr.lp = lport; qedf->ctlr.lp = lport;
qedf->pdev = pdev; qedf->pdev = pdev;
@@ -3197,6 +3235,7 @@ static int __qedf_probe(struct pci_dev *pdev, int mode)
init_completion(&qedf->fipvlan_compl); init_completion(&qedf->fipvlan_compl);
mutex_init(&qedf->stats_mutex); mutex_init(&qedf->stats_mutex);
mutex_init(&qedf->flush_mutex); mutex_init(&qedf->flush_mutex);
qedf->flogi_pending = 0;
QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_INFO, QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_INFO,
"QLogic FastLinQ FCoE Module qedf %s, " "QLogic FastLinQ FCoE Module qedf %s, "
@@ -3206,9 +3245,12 @@ static int __qedf_probe(struct pci_dev *pdev, int mode)
} else { } else {
/* Init pointers during recovery */ /* Init pointers during recovery */
qedf = pci_get_drvdata(pdev); qedf = pci_get_drvdata(pdev);
set_bit(QEDF_PROBING, &qedf->flags);
lport = qedf->lport; lport = qedf->lport;
} }
QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC, "Probe started.\n");
host = lport->host; host = lport->host;
/* Allocate mempool for qedf_io_work structs */ /* Allocate mempool for qedf_io_work structs */
@@ -3227,6 +3269,7 @@ static int __qedf_probe(struct pci_dev *pdev, int mode)
INIT_DELAYED_WORK(&qedf->link_update, qedf_handle_link_update); INIT_DELAYED_WORK(&qedf->link_update, qedf_handle_link_update);
INIT_DELAYED_WORK(&qedf->link_recovery, qedf_link_recovery); INIT_DELAYED_WORK(&qedf->link_recovery, qedf_link_recovery);
INIT_DELAYED_WORK(&qedf->grcdump_work, qedf_wq_grcdump); INIT_DELAYED_WORK(&qedf->grcdump_work, qedf_wq_grcdump);
INIT_DELAYED_WORK(&qedf->stag_work, qedf_stag_change_work);
qedf->fipvlan_retries = qedf_fipvlan_retries; qedf->fipvlan_retries = qedf_fipvlan_retries;
/* Set a default prio in case DCBX doesn't converge */ /* Set a default prio in case DCBX doesn't converge */
if (qedf_default_prio > -1) { if (qedf_default_prio > -1) {
@@ -3281,6 +3324,13 @@ static int __qedf_probe(struct pci_dev *pdev, int mode)
} }
qed_ops->common->update_pf_params(qedf->cdev, &qedf->pf_params); qed_ops->common->update_pf_params(qedf->cdev, &qedf->pf_params);
/* Learn information crucial for qedf to progress */
rc = qed_ops->fill_dev_info(qedf->cdev, &qedf->dev_info);
if (rc) {
QEDF_ERR(&qedf->dbg_ctx, "Failed to fill dev info.\n");
goto err2;
}
/* Record BDQ producer doorbell addresses */ /* Record BDQ producer doorbell addresses */
qedf->bdq_primary_prod = qedf->dev_info.primary_dbq_rq_addr; qedf->bdq_primary_prod = qedf->dev_info.primary_dbq_rq_addr;
qedf->bdq_secondary_prod = qedf->dev_info.secondary_bdq_rq_addr; qedf->bdq_secondary_prod = qedf->dev_info.secondary_bdq_rq_addr;
@@ -3466,6 +3516,7 @@ static int __qedf_probe(struct pci_dev *pdev, int mode)
qedf->lport->host->host_no); qedf->lport->host->host_no);
qedf->dpc_wq = create_workqueue(host_buf); qedf->dpc_wq = create_workqueue(host_buf);
} }
INIT_DELAYED_WORK(&qedf->recovery_work, qedf_recovery_handler);
/* /*
* GRC dump and sysfs parameters are not reaped during the recovery * GRC dump and sysfs parameters are not reaped during the recovery
@@ -3513,6 +3564,10 @@ static int __qedf_probe(struct pci_dev *pdev, int mode)
else else
fc_fabric_login(lport); fc_fabric_login(lport);
QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC, "Probe done.\n");
clear_bit(QEDF_PROBING, &qedf->flags);
/* All good */ /* All good */
return 0; return 0;
@@ -3538,6 +3593,11 @@ err2:
err1: err1:
scsi_host_put(lport->host); scsi_host_put(lport->host);
err0: err0:
if (qedf) {
QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC, "Probe done.\n");
clear_bit(QEDF_PROBING, &qedf->flags);
}
return rc; return rc;
} }
@@ -3687,11 +3747,25 @@ void qedf_get_protocol_tlv_data(void *dev, void *data)
{ {
struct qedf_ctx *qedf = dev; struct qedf_ctx *qedf = dev;
struct qed_mfw_tlv_fcoe *fcoe = data; struct qed_mfw_tlv_fcoe *fcoe = data;
struct fc_lport *lport = qedf->lport; struct fc_lport *lport;
struct Scsi_Host *host = lport->host; struct Scsi_Host *host;
struct fc_host_attrs *fc_host = shost_to_fc_host(host); struct fc_host_attrs *fc_host;
struct fc_host_statistics *hst; struct fc_host_statistics *hst;
if (!qedf) {
QEDF_ERR(NULL, "qedf is null.\n");
return;
}
if (test_bit(QEDF_PROBING, &qedf->flags)) {
QEDF_ERR(&qedf->dbg_ctx, "Function is still probing.\n");
return;
}
lport = qedf->lport;
host = lport->host;
fc_host = shost_to_fc_host(host);
/* Force a refresh of the fc_host stats including offload stats */ /* Force a refresh of the fc_host stats including offload stats */
hst = qedf_fc_get_host_stats(host); hst = qedf_fc_get_host_stats(host);
@@ -3762,11 +3836,64 @@ void qedf_get_protocol_tlv_data(void *dev, void *data)
fcoe->scsi_tsk_full = qedf->task_set_fulls; fcoe->scsi_tsk_full = qedf->task_set_fulls;
} }
/* Deferred work function to perform soft context reset on STAG change */
void qedf_stag_change_work(struct work_struct *work)
{
struct qedf_ctx *qedf =
container_of(work, struct qedf_ctx, stag_work.work);
if (!qedf) {
QEDF_ERR(&qedf->dbg_ctx, "qedf is NULL");
return;
}
QEDF_ERR(&qedf->dbg_ctx, "Performing software context reset.\n");
qedf_ctx_soft_reset(qedf->lport);
}
static void qedf_shutdown(struct pci_dev *pdev) static void qedf_shutdown(struct pci_dev *pdev)
{ {
__qedf_remove(pdev, QEDF_MODE_NORMAL); __qedf_remove(pdev, QEDF_MODE_NORMAL);
} }
/*
* Recovery handler code
*/
static void qedf_schedule_recovery_handler(void *dev)
{
struct qedf_ctx *qedf = dev;
QEDF_ERR(&qedf->dbg_ctx, "Recovery handler scheduled.\n");
schedule_delayed_work(&qedf->recovery_work, 0);
}
static void qedf_recovery_handler(struct work_struct *work)
{
struct qedf_ctx *qedf =
container_of(work, struct qedf_ctx, recovery_work.work);
if (test_and_set_bit(QEDF_IN_RECOVERY, &qedf->flags))
return;
/*
* Call common_ops->recovery_prolog to allow the MFW to quiesce
* any PCI transactions.
*/
qed_ops->common->recovery_prolog(qedf->cdev);
QEDF_ERR(&qedf->dbg_ctx, "Recovery work start.\n");
__qedf_remove(qedf->pdev, QEDF_MODE_RECOVERY);
/*
* Reset link and dcbx to down state since we will not get a link down
* event from the MFW but calling __qedf_remove will essentially be a
* link down event.
*/
atomic_set(&qedf->link_state, QEDF_LINK_DOWN);
atomic_set(&qedf->dcbx, QEDF_DCBX_PENDING);
__qedf_probe(qedf->pdev, QEDF_MODE_RECOVERY);
clear_bit(QEDF_IN_RECOVERY, &qedf->flags);
QEDF_ERR(&qedf->dbg_ctx, "Recovery work complete.\n");
}
/* Generic TLV data callback */ /* Generic TLV data callback */
void qedf_get_generic_tlv_data(void *dev, struct qed_generic_tlvs *data) void qedf_get_generic_tlv_data(void *dev, struct qed_generic_tlvs *data)
{ {

View File

@@ -836,6 +836,11 @@ qedi_ep_connect(struct Scsi_Host *shost, struct sockaddr *dst_addr,
return ERR_PTR(ret); return ERR_PTR(ret);
} }
if (atomic_read(&qedi->link_state) != QEDI_LINK_UP) {
QEDI_WARN(&qedi->dbg_ctx, "qedi link down\n");
return ERR_PTR(-ENXIO);
}
ep = iscsi_create_endpoint(sizeof(struct qedi_endpoint)); ep = iscsi_create_endpoint(sizeof(struct qedi_endpoint));
if (!ep) { if (!ep) {
QEDI_ERR(&qedi->dbg_ctx, "endpoint create fail\n"); QEDI_ERR(&qedi->dbg_ctx, "endpoint create fail\n");
@@ -870,12 +875,6 @@ qedi_ep_connect(struct Scsi_Host *shost, struct sockaddr *dst_addr,
QEDI_ERR(&qedi->dbg_ctx, "Invalid endpoint\n"); QEDI_ERR(&qedi->dbg_ctx, "Invalid endpoint\n");
} }
if (atomic_read(&qedi->link_state) != QEDI_LINK_UP) {
QEDI_WARN(&qedi->dbg_ctx, "qedi link down\n");
ret = -ENXIO;
goto ep_conn_exit;
}
ret = qedi_alloc_sq(qedi, qedi_ep); ret = qedi_alloc_sq(qedi, qedi_ep);
if (ret) if (ret)
goto ep_conn_exit; goto ep_conn_exit;
@@ -1001,7 +1000,8 @@ static void qedi_ep_disconnect(struct iscsi_endpoint *ep)
if (qedi_ep->state == EP_STATE_OFLDCONN_START) if (qedi_ep->state == EP_STATE_OFLDCONN_START)
goto ep_exit_recover; goto ep_exit_recover;
flush_work(&qedi_ep->offload_work); if (qedi_ep->state != EP_STATE_OFLDCONN_NONE)
flush_work(&qedi_ep->offload_work);
if (qedi_ep->conn) { if (qedi_ep->conn) {
qedi_conn = qedi_ep->conn; qedi_conn = qedi_ep->conn;
@@ -1065,6 +1065,9 @@ static void qedi_ep_disconnect(struct iscsi_endpoint *ep)
break; break;
} }
if (!abrt_conn)
wait_delay += qedi->pf_params.iscsi_pf_params.two_msl_timer;
qedi_ep->state = EP_STATE_DISCONN_START; qedi_ep->state = EP_STATE_DISCONN_START;
ret = qedi_ops->destroy_conn(qedi->cdev, qedi_ep->handle, abrt_conn); ret = qedi_ops->destroy_conn(qedi->cdev, qedi_ep->handle, abrt_conn);
if (ret) { if (ret) {
@@ -1218,6 +1221,10 @@ static int qedi_set_path(struct Scsi_Host *shost, struct iscsi_path *path_data)
} }
iscsi_cid = (u32)path_data->handle; iscsi_cid = (u32)path_data->handle;
if (iscsi_cid >= qedi->max_active_conns) {
ret = -EINVAL;
goto set_path_exit;
}
qedi_ep = qedi->ep_tbl[iscsi_cid]; qedi_ep = qedi->ep_tbl[iscsi_cid];
QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO, QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO,
"iscsi_cid=0x%x, qedi_ep=%p\n", iscsi_cid, qedi_ep); "iscsi_cid=0x%x, qedi_ep=%p\n", iscsi_cid, qedi_ep);

View File

@@ -28,6 +28,10 @@
#include "qedi_gbl.h" #include "qedi_gbl.h"
#include "qedi_iscsi.h" #include "qedi_iscsi.h"
static uint qedi_qed_debug;
module_param(qedi_qed_debug, uint, 0644);
MODULE_PARM_DESC(qedi_qed_debug, " QED debug level 0 (default)");
static uint qedi_fw_debug; static uint qedi_fw_debug;
module_param(qedi_fw_debug, uint, 0644); module_param(qedi_fw_debug, uint, 0644);
MODULE_PARM_DESC(qedi_fw_debug, " Firmware debug level 0(default) to 3"); MODULE_PARM_DESC(qedi_fw_debug, " Firmware debug level 0(default) to 3");
@@ -41,7 +45,7 @@ module_param(qedi_io_tracing, uint, 0644);
MODULE_PARM_DESC(qedi_io_tracing, MODULE_PARM_DESC(qedi_io_tracing,
" Enable logging of SCSI requests/completions into trace buffer. (default off)."); " Enable logging of SCSI requests/completions into trace buffer. (default off).");
uint qedi_ll2_buf_size = 0x400; static uint qedi_ll2_buf_size = 0x400;
module_param(qedi_ll2_buf_size, uint, 0644); module_param(qedi_ll2_buf_size, uint, 0644);
MODULE_PARM_DESC(qedi_ll2_buf_size, MODULE_PARM_DESC(qedi_ll2_buf_size,
"parameter to set ping packet size, default - 0x400, Jumbo packets - 0x2400."); "parameter to set ping packet size, default - 0x400, Jumbo packets - 0x2400.");
@@ -658,8 +662,6 @@ exit_setup_shost:
static int qedi_ll2_rx(void *cookie, struct sk_buff *skb, u32 arg1, u32 arg2) static int qedi_ll2_rx(void *cookie, struct sk_buff *skb, u32 arg1, u32 arg2)
{ {
struct qedi_ctx *qedi = (struct qedi_ctx *)cookie; struct qedi_ctx *qedi = (struct qedi_ctx *)cookie;
struct qedi_uio_dev *udev;
struct qedi_uio_ctrl *uctrl;
struct skb_work_list *work; struct skb_work_list *work;
struct ethhdr *eh; struct ethhdr *eh;
@@ -698,9 +700,6 @@ static int qedi_ll2_rx(void *cookie, struct sk_buff *skb, u32 arg1, u32 arg2)
"Allowed frame ethertype [0x%x] len [0x%x].\n", "Allowed frame ethertype [0x%x] len [0x%x].\n",
eh->h_proto, skb->len); eh->h_proto, skb->len);
udev = qedi->udev;
uctrl = udev->uctrl;
work = kzalloc(sizeof(*work), GFP_ATOMIC); work = kzalloc(sizeof(*work), GFP_ATOMIC);
if (!work) { if (!work) {
QEDI_WARN(&qedi->dbg_ctx, QEDI_WARN(&qedi->dbg_ctx,
@@ -921,7 +920,7 @@ static void qedi_get_boot_tgt_info(struct nvm_iscsi_block *block,
ipv6_en = !!(block->generic.ctrl_flags & ipv6_en = !!(block->generic.ctrl_flags &
NVM_ISCSI_CFG_GEN_IPV6_ENABLED); NVM_ISCSI_CFG_GEN_IPV6_ENABLED);
snprintf(tgt->iscsi_name, sizeof(tgt->iscsi_name), "%s\n", snprintf(tgt->iscsi_name, sizeof(tgt->iscsi_name), "%s",
block->target[index].target_name.byte); block->target[index].target_name.byte);
tgt->ipv6_en = ipv6_en; tgt->ipv6_en = ipv6_en;
@@ -1302,13 +1301,13 @@ process_again:
"process already running\n"); "process already running\n");
} }
if (qedi_fp_has_work(fp) == 0) if (!qedi_fp_has_work(fp))
qed_sb_update_sb_idx(fp->sb_info); qed_sb_update_sb_idx(fp->sb_info);
/* Check for more work */ /* Check for more work */
rmb(); rmb();
if (qedi_fp_has_work(fp) == 0) if (!qedi_fp_has_work(fp))
qed_sb_ack(fp->sb_info, IGU_INT_ENABLE, 1); qed_sb_ack(fp->sb_info, IGU_INT_ENABLE, 1);
else else
goto process_again; goto process_again;
@@ -1360,7 +1359,7 @@ static int qedi_request_msix_irq(struct qedi_ctx *qedi)
u16 idx; u16 idx;
cpu = cpumask_first(cpu_online_mask); cpu = cpumask_first(cpu_online_mask);
for (i = 0; i < MIN_NUM_CPUS_MSIX(qedi); i++) { for (i = 0; i < qedi->int_info.msix_cnt; i++) {
idx = i * qedi->dev_info.common.num_hwfns + idx = i * qedi->dev_info.common.num_hwfns +
qedi_ops->common->get_affin_hwfn_idx(qedi->cdev); qedi_ops->common->get_affin_hwfn_idx(qedi->cdev);
@@ -2422,7 +2421,6 @@ static int __qedi_probe(struct pci_dev *pdev, int mode)
{ {
struct qedi_ctx *qedi; struct qedi_ctx *qedi;
struct qed_ll2_params params; struct qed_ll2_params params;
u32 dp_module = 0;
u8 dp_level = 0; u8 dp_level = 0;
bool is_vf = false; bool is_vf = false;
char host_buf[16]; char host_buf[16];
@@ -2445,7 +2443,7 @@ static int __qedi_probe(struct pci_dev *pdev, int mode)
memset(&qed_params, 0, sizeof(qed_params)); memset(&qed_params, 0, sizeof(qed_params));
qed_params.protocol = QED_PROTOCOL_ISCSI; qed_params.protocol = QED_PROTOCOL_ISCSI;
qed_params.dp_module = dp_module; qed_params.dp_module = qedi_qed_debug;
qed_params.dp_level = dp_level; qed_params.dp_level = dp_level;
qed_params.is_vf = is_vf; qed_params.is_vf = is_vf;
qedi->cdev = qedi_ops->common->probe(pdev, &qed_params); qedi->cdev = qedi_ops->common->probe(pdev, &qed_params);

View File

@@ -526,7 +526,7 @@ static struct pci_device_id qla1280_pci_tbl[] = {
}; };
MODULE_DEVICE_TABLE(pci, qla1280_pci_tbl); MODULE_DEVICE_TABLE(pci, qla1280_pci_tbl);
DEFINE_MUTEX(qla1280_firmware_mutex); static DEFINE_MUTEX(qla1280_firmware_mutex);
struct qla_fw { struct qla_fw {
char *fwname; char *fwname;
@@ -535,7 +535,7 @@ struct qla_fw {
#define QL_NUM_FW_IMAGES 3 #define QL_NUM_FW_IMAGES 3
struct qla_fw qla1280_fw_tbl[QL_NUM_FW_IMAGES] = { static struct qla_fw qla1280_fw_tbl[QL_NUM_FW_IMAGES] = {
{"qlogic/1040.bin", NULL}, /* image 0 */ {"qlogic/1040.bin", NULL}, /* image 0 */
{"qlogic/1280.bin", NULL}, /* image 1 */ {"qlogic/1280.bin", NULL}, /* image 1 */
{"qlogic/12160.bin", NULL}, /* image 2 */ {"qlogic/12160.bin", NULL}, /* image 2 */

View File

@@ -26,7 +26,8 @@ qla2x00_sysfs_read_fw_dump(struct file *filp, struct kobject *kobj,
struct qla_hw_data *ha = vha->hw; struct qla_hw_data *ha = vha->hw;
int rval = 0; int rval = 0;
if (!(ha->fw_dump_reading || ha->mctp_dump_reading)) if (!(ha->fw_dump_reading || ha->mctp_dump_reading ||
ha->mpi_fw_dump_reading))
return 0; return 0;
mutex_lock(&ha->optrom_mutex); mutex_lock(&ha->optrom_mutex);
@@ -42,6 +43,10 @@ qla2x00_sysfs_read_fw_dump(struct file *filp, struct kobject *kobj,
} else if (ha->mctp_dumped && ha->mctp_dump_reading) { } else if (ha->mctp_dumped && ha->mctp_dump_reading) {
rval = memory_read_from_buffer(buf, count, &off, ha->mctp_dump, rval = memory_read_from_buffer(buf, count, &off, ha->mctp_dump,
MCTP_DUMP_SIZE); MCTP_DUMP_SIZE);
} else if (ha->mpi_fw_dumped && ha->mpi_fw_dump_reading) {
rval = memory_read_from_buffer(buf, count, &off,
ha->mpi_fw_dump,
ha->mpi_fw_dump_len);
} else if (ha->fw_dump_reading) { } else if (ha->fw_dump_reading) {
rval = memory_read_from_buffer(buf, count, &off, ha->fw_dump, rval = memory_read_from_buffer(buf, count, &off, ha->fw_dump,
ha->fw_dump_len); ha->fw_dump_len);
@@ -79,7 +84,7 @@ qla2x00_sysfs_write_fw_dump(struct file *filp, struct kobject *kobj,
qla82xx_md_prep(vha); qla82xx_md_prep(vha);
} }
ha->fw_dump_reading = 0; ha->fw_dump_reading = 0;
ha->fw_dumped = 0; ha->fw_dumped = false;
break; break;
case 1: case 1:
if (ha->fw_dumped && !ha->fw_dump_reading) { if (ha->fw_dumped && !ha->fw_dump_reading) {
@@ -103,7 +108,6 @@ qla2x00_sysfs_write_fw_dump(struct file *filp, struct kobject *kobj,
qla82xx_set_reset_owner(vha); qla82xx_set_reset_owner(vha);
qla8044_idc_unlock(ha); qla8044_idc_unlock(ha);
} else { } else {
ha->fw_dump_mpi = 1;
qla2x00_system_error(vha); qla2x00_system_error(vha);
} }
break; break;
@@ -137,6 +141,22 @@ qla2x00_sysfs_write_fw_dump(struct file *filp, struct kobject *kobj,
vha->host_no); vha->host_no);
} }
break; break;
case 8:
if (!ha->mpi_fw_dump_reading)
break;
ql_log(ql_log_info, vha, 0x70e7,
"MPI firmware dump cleared on (%ld).\n", vha->host_no);
ha->mpi_fw_dump_reading = 0;
ha->mpi_fw_dumped = 0;
break;
case 9:
if (ha->mpi_fw_dumped && !ha->mpi_fw_dump_reading) {
ha->mpi_fw_dump_reading = 1;
ql_log(ql_log_info, vha, 0x70e8,
"Raw MPI firmware dump ready for read on (%ld).\n",
vha->host_no);
}
break;
} }
return count; return count;
} }
@@ -207,10 +227,9 @@ qla2x00_sysfs_write_nvram(struct file *filp, struct kobject *kobj,
/* Checksum NVRAM. */ /* Checksum NVRAM. */
if (IS_FWI2_CAPABLE(ha)) { if (IS_FWI2_CAPABLE(ha)) {
uint32_t *iter; __le32 *iter = (__force __le32 *)buf;
uint32_t chksum; uint32_t chksum;
iter = (uint32_t *)buf;
chksum = 0; chksum = 0;
for (cnt = 0; cnt < ((count >> 2) - 1); cnt++, iter++) for (cnt = 0; cnt < ((count >> 2) - 1); cnt++, iter++)
chksum += le32_to_cpu(*iter); chksum += le32_to_cpu(*iter);
@@ -706,7 +725,8 @@ qla2x00_sysfs_write_reset(struct file *filp, struct kobject *kobj,
scsi_unblock_requests(vha->host); scsi_unblock_requests(vha->host);
break; break;
case 0x2025d: case 0x2025d:
if (!IS_QLA81XX(ha) && !IS_QLA83XX(ha)) if (!IS_QLA81XX(ha) && !IS_QLA83XX(ha) &&
!IS_QLA27XX(ha) && !IS_QLA28XX(ha))
return -EPERM; return -EPERM;
ql_log(ql_log_info, vha, 0x706f, ql_log(ql_log_info, vha, 0x706f,
@@ -724,6 +744,8 @@ qla2x00_sysfs_write_reset(struct file *filp, struct kobject *kobj,
qla83xx_idc_audit(vha, IDC_AUDIT_TIMESTAMP); qla83xx_idc_audit(vha, IDC_AUDIT_TIMESTAMP);
qla83xx_idc_unlock(vha, 0); qla83xx_idc_unlock(vha, 0);
break; break;
} else if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
qla27xx_reset_mpi(vha);
} else { } else {
/* Make sure FC side is not in reset */ /* Make sure FC side is not in reset */
WARN_ON_ONCE(qla2x00_wait_for_hba_online(vha) != WARN_ON_ONCE(qla2x00_wait_for_hba_online(vha) !=
@@ -737,6 +759,7 @@ qla2x00_sysfs_write_reset(struct file *filp, struct kobject *kobj,
scsi_unblock_requests(vha->host); scsi_unblock_requests(vha->host);
break; break;
} }
break;
case 0x2025e: case 0x2025e:
if (!IS_P3P_TYPE(ha) || vha != base_vha) { if (!IS_P3P_TYPE(ha) || vha != base_vha) {
ql_log(ql_log_info, vha, 0x7071, ql_log(ql_log_info, vha, 0x7071,
@@ -1898,9 +1921,8 @@ static char *mode_to_str[] = {
}; };
#define NEED_EXCH_OFFLOAD(_exchg) ((_exchg) > FW_DEF_EXCHANGES_CNT) #define NEED_EXCH_OFFLOAD(_exchg) ((_exchg) > FW_DEF_EXCHANGES_CNT)
static int qla_set_ini_mode(scsi_qla_host_t *vha, int op) static void qla_set_ini_mode(scsi_qla_host_t *vha, int op)
{ {
int rc = 0;
enum { enum {
NO_ACTION, NO_ACTION,
MODE_CHANGE_ACCEPT, MODE_CHANGE_ACCEPT,
@@ -2173,8 +2195,6 @@ static int qla_set_ini_mode(scsi_qla_host_t *vha, int op)
vha->ql2xexchoffld, vha->u_ql2xexchoffld); vha->ql2xexchoffld, vha->u_ql2xexchoffld);
break; break;
} }
return rc;
} }
static ssize_t static ssize_t

View File

@@ -490,7 +490,7 @@ qla2x00_process_ct(struct bsg_job *bsg_job)
>> 24; >> 24;
switch (loop_id) { switch (loop_id) {
case 0xFC: case 0xFC:
loop_id = cpu_to_le16(NPH_SNS); loop_id = NPH_SNS;
break; break;
case 0xFA: case 0xFA:
loop_id = vha->mgmt_svr_loop_id; loop_id = vha->mgmt_svr_loop_id;
@@ -691,7 +691,7 @@ qla81xx_set_loopback_mode(scsi_qla_host_t *vha, uint16_t *config,
* dump and reset the chip. * dump and reset the chip.
*/ */
if (ret) { if (ret) {
ha->isp_ops->fw_dump(vha, 0); qla2xxx_dump_fw(vha);
set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
} }
rval = -EINVAL; rval = -EINVAL;
@@ -896,7 +896,7 @@ qla2x00_process_loopback(struct bsg_job *bsg_job)
* doesn't work take FCoE dump and then * doesn't work take FCoE dump and then
* reset the chip. * reset the chip.
*/ */
ha->isp_ops->fw_dump(vha, 0); qla2xxx_dump_fw(vha);
set_bit(ISP_ABORT_NEEDED, set_bit(ISP_ABORT_NEEDED,
&vha->dpc_flags); &vha->dpc_flags);
} }
@@ -2042,7 +2042,7 @@ qlafx00_mgmt_cmd(struct bsg_job *bsg_job)
/* Initialize all required fields of fcport */ /* Initialize all required fields of fcport */
fcport->vha = vha; fcport->vha = vha;
fcport->loop_id = piocb_rqst->dataword; fcport->loop_id = le32_to_cpu(piocb_rqst->dataword);
sp->type = SRB_FXIOCB_BCMD; sp->type = SRB_FXIOCB_BCMD;
sp->name = "bsg_fx_mgmt"; sp->name = "bsg_fx_mgmt";

File diff suppressed because it is too large Load Diff

View File

@@ -12,205 +12,205 @@
*/ */
struct qla2300_fw_dump { struct qla2300_fw_dump {
uint16_t hccr; __be16 hccr;
uint16_t pbiu_reg[8]; __be16 pbiu_reg[8];
uint16_t risc_host_reg[8]; __be16 risc_host_reg[8];
uint16_t mailbox_reg[32]; __be16 mailbox_reg[32];
uint16_t resp_dma_reg[32]; __be16 resp_dma_reg[32];
uint16_t dma_reg[48]; __be16 dma_reg[48];
uint16_t risc_hdw_reg[16]; __be16 risc_hdw_reg[16];
uint16_t risc_gp0_reg[16]; __be16 risc_gp0_reg[16];
uint16_t risc_gp1_reg[16]; __be16 risc_gp1_reg[16];
uint16_t risc_gp2_reg[16]; __be16 risc_gp2_reg[16];
uint16_t risc_gp3_reg[16]; __be16 risc_gp3_reg[16];
uint16_t risc_gp4_reg[16]; __be16 risc_gp4_reg[16];
uint16_t risc_gp5_reg[16]; __be16 risc_gp5_reg[16];
uint16_t risc_gp6_reg[16]; __be16 risc_gp6_reg[16];
uint16_t risc_gp7_reg[16]; __be16 risc_gp7_reg[16];
uint16_t frame_buf_hdw_reg[64]; __be16 frame_buf_hdw_reg[64];
uint16_t fpm_b0_reg[64]; __be16 fpm_b0_reg[64];
uint16_t fpm_b1_reg[64]; __be16 fpm_b1_reg[64];
uint16_t risc_ram[0xf800]; __be16 risc_ram[0xf800];
uint16_t stack_ram[0x1000]; __be16 stack_ram[0x1000];
uint16_t data_ram[1]; __be16 data_ram[1];
}; };
struct qla2100_fw_dump { struct qla2100_fw_dump {
uint16_t hccr; __be16 hccr;
uint16_t pbiu_reg[8]; __be16 pbiu_reg[8];
uint16_t mailbox_reg[32]; __be16 mailbox_reg[32];
uint16_t dma_reg[48]; __be16 dma_reg[48];
uint16_t risc_hdw_reg[16]; __be16 risc_hdw_reg[16];
uint16_t risc_gp0_reg[16]; __be16 risc_gp0_reg[16];
uint16_t risc_gp1_reg[16]; __be16 risc_gp1_reg[16];
uint16_t risc_gp2_reg[16]; __be16 risc_gp2_reg[16];
uint16_t risc_gp3_reg[16]; __be16 risc_gp3_reg[16];
uint16_t risc_gp4_reg[16]; __be16 risc_gp4_reg[16];
uint16_t risc_gp5_reg[16]; __be16 risc_gp5_reg[16];
uint16_t risc_gp6_reg[16]; __be16 risc_gp6_reg[16];
uint16_t risc_gp7_reg[16]; __be16 risc_gp7_reg[16];
uint16_t frame_buf_hdw_reg[16]; __be16 frame_buf_hdw_reg[16];
uint16_t fpm_b0_reg[64]; __be16 fpm_b0_reg[64];
uint16_t fpm_b1_reg[64]; __be16 fpm_b1_reg[64];
uint16_t risc_ram[0xf000]; __be16 risc_ram[0xf000];
}; };
struct qla24xx_fw_dump { struct qla24xx_fw_dump {
uint32_t host_status; __be32 host_status;
uint32_t host_reg[32]; __be32 host_reg[32];
uint32_t shadow_reg[7]; __be32 shadow_reg[7];
uint16_t mailbox_reg[32]; __be16 mailbox_reg[32];
uint32_t xseq_gp_reg[128]; __be32 xseq_gp_reg[128];
uint32_t xseq_0_reg[16]; __be32 xseq_0_reg[16];
uint32_t xseq_1_reg[16]; __be32 xseq_1_reg[16];
uint32_t rseq_gp_reg[128]; __be32 rseq_gp_reg[128];
uint32_t rseq_0_reg[16]; __be32 rseq_0_reg[16];
uint32_t rseq_1_reg[16]; __be32 rseq_1_reg[16];
uint32_t rseq_2_reg[16]; __be32 rseq_2_reg[16];
uint32_t cmd_dma_reg[16]; __be32 cmd_dma_reg[16];
uint32_t req0_dma_reg[15]; __be32 req0_dma_reg[15];
uint32_t resp0_dma_reg[15]; __be32 resp0_dma_reg[15];
uint32_t req1_dma_reg[15]; __be32 req1_dma_reg[15];
uint32_t xmt0_dma_reg[32]; __be32 xmt0_dma_reg[32];
uint32_t xmt1_dma_reg[32]; __be32 xmt1_dma_reg[32];
uint32_t xmt2_dma_reg[32]; __be32 xmt2_dma_reg[32];
uint32_t xmt3_dma_reg[32]; __be32 xmt3_dma_reg[32];
uint32_t xmt4_dma_reg[32]; __be32 xmt4_dma_reg[32];
uint32_t xmt_data_dma_reg[16]; __be32 xmt_data_dma_reg[16];
uint32_t rcvt0_data_dma_reg[32]; __be32 rcvt0_data_dma_reg[32];
uint32_t rcvt1_data_dma_reg[32]; __be32 rcvt1_data_dma_reg[32];
uint32_t risc_gp_reg[128]; __be32 risc_gp_reg[128];
uint32_t lmc_reg[112]; __be32 lmc_reg[112];
uint32_t fpm_hdw_reg[192]; __be32 fpm_hdw_reg[192];
uint32_t fb_hdw_reg[176]; __be32 fb_hdw_reg[176];
uint32_t code_ram[0x2000]; __be32 code_ram[0x2000];
uint32_t ext_mem[1]; __be32 ext_mem[1];
}; };
struct qla25xx_fw_dump { struct qla25xx_fw_dump {
uint32_t host_status; __be32 host_status;
uint32_t host_risc_reg[32]; __be32 host_risc_reg[32];
uint32_t pcie_regs[4]; __be32 pcie_regs[4];
uint32_t host_reg[32]; __be32 host_reg[32];
uint32_t shadow_reg[11]; __be32 shadow_reg[11];
uint32_t risc_io_reg; __be32 risc_io_reg;
uint16_t mailbox_reg[32]; __be16 mailbox_reg[32];
uint32_t xseq_gp_reg[128]; __be32 xseq_gp_reg[128];
uint32_t xseq_0_reg[48]; __be32 xseq_0_reg[48];
uint32_t xseq_1_reg[16]; __be32 xseq_1_reg[16];
uint32_t rseq_gp_reg[128]; __be32 rseq_gp_reg[128];
uint32_t rseq_0_reg[32]; __be32 rseq_0_reg[32];
uint32_t rseq_1_reg[16]; __be32 rseq_1_reg[16];
uint32_t rseq_2_reg[16]; __be32 rseq_2_reg[16];
uint32_t aseq_gp_reg[128]; __be32 aseq_gp_reg[128];
uint32_t aseq_0_reg[32]; __be32 aseq_0_reg[32];
uint32_t aseq_1_reg[16]; __be32 aseq_1_reg[16];
uint32_t aseq_2_reg[16]; __be32 aseq_2_reg[16];
uint32_t cmd_dma_reg[16]; __be32 cmd_dma_reg[16];
uint32_t req0_dma_reg[15]; __be32 req0_dma_reg[15];
uint32_t resp0_dma_reg[15]; __be32 resp0_dma_reg[15];
uint32_t req1_dma_reg[15]; __be32 req1_dma_reg[15];
uint32_t xmt0_dma_reg[32]; __be32 xmt0_dma_reg[32];
uint32_t xmt1_dma_reg[32]; __be32 xmt1_dma_reg[32];
uint32_t xmt2_dma_reg[32]; __be32 xmt2_dma_reg[32];
uint32_t xmt3_dma_reg[32]; __be32 xmt3_dma_reg[32];
uint32_t xmt4_dma_reg[32]; __be32 xmt4_dma_reg[32];
uint32_t xmt_data_dma_reg[16]; __be32 xmt_data_dma_reg[16];
uint32_t rcvt0_data_dma_reg[32]; __be32 rcvt0_data_dma_reg[32];
uint32_t rcvt1_data_dma_reg[32]; __be32 rcvt1_data_dma_reg[32];
uint32_t risc_gp_reg[128]; __be32 risc_gp_reg[128];
uint32_t lmc_reg[128]; __be32 lmc_reg[128];
uint32_t fpm_hdw_reg[192]; __be32 fpm_hdw_reg[192];
uint32_t fb_hdw_reg[192]; __be32 fb_hdw_reg[192];
uint32_t code_ram[0x2000]; __be32 code_ram[0x2000];
uint32_t ext_mem[1]; __be32 ext_mem[1];
}; };
struct qla81xx_fw_dump { struct qla81xx_fw_dump {
uint32_t host_status; __be32 host_status;
uint32_t host_risc_reg[32]; __be32 host_risc_reg[32];
uint32_t pcie_regs[4]; __be32 pcie_regs[4];
uint32_t host_reg[32]; __be32 host_reg[32];
uint32_t shadow_reg[11]; __be32 shadow_reg[11];
uint32_t risc_io_reg; __be32 risc_io_reg;
uint16_t mailbox_reg[32]; __be16 mailbox_reg[32];
uint32_t xseq_gp_reg[128]; __be32 xseq_gp_reg[128];
uint32_t xseq_0_reg[48]; __be32 xseq_0_reg[48];
uint32_t xseq_1_reg[16]; __be32 xseq_1_reg[16];
uint32_t rseq_gp_reg[128]; __be32 rseq_gp_reg[128];
uint32_t rseq_0_reg[32]; __be32 rseq_0_reg[32];
uint32_t rseq_1_reg[16]; __be32 rseq_1_reg[16];
uint32_t rseq_2_reg[16]; __be32 rseq_2_reg[16];
uint32_t aseq_gp_reg[128]; __be32 aseq_gp_reg[128];
uint32_t aseq_0_reg[32]; __be32 aseq_0_reg[32];
uint32_t aseq_1_reg[16]; __be32 aseq_1_reg[16];
uint32_t aseq_2_reg[16]; __be32 aseq_2_reg[16];
uint32_t cmd_dma_reg[16]; __be32 cmd_dma_reg[16];
uint32_t req0_dma_reg[15]; __be32 req0_dma_reg[15];
uint32_t resp0_dma_reg[15]; __be32 resp0_dma_reg[15];
uint32_t req1_dma_reg[15]; __be32 req1_dma_reg[15];
uint32_t xmt0_dma_reg[32]; __be32 xmt0_dma_reg[32];
uint32_t xmt1_dma_reg[32]; __be32 xmt1_dma_reg[32];
uint32_t xmt2_dma_reg[32]; __be32 xmt2_dma_reg[32];
uint32_t xmt3_dma_reg[32]; __be32 xmt3_dma_reg[32];
uint32_t xmt4_dma_reg[32]; __be32 xmt4_dma_reg[32];
uint32_t xmt_data_dma_reg[16]; __be32 xmt_data_dma_reg[16];
uint32_t rcvt0_data_dma_reg[32]; __be32 rcvt0_data_dma_reg[32];
uint32_t rcvt1_data_dma_reg[32]; __be32 rcvt1_data_dma_reg[32];
uint32_t risc_gp_reg[128]; __be32 risc_gp_reg[128];
uint32_t lmc_reg[128]; __be32 lmc_reg[128];
uint32_t fpm_hdw_reg[224]; __be32 fpm_hdw_reg[224];
uint32_t fb_hdw_reg[208]; __be32 fb_hdw_reg[208];
uint32_t code_ram[0x2000]; __be32 code_ram[0x2000];
uint32_t ext_mem[1]; __be32 ext_mem[1];
}; };
struct qla83xx_fw_dump { struct qla83xx_fw_dump {
uint32_t host_status; __be32 host_status;
uint32_t host_risc_reg[48]; __be32 host_risc_reg[48];
uint32_t pcie_regs[4]; __be32 pcie_regs[4];
uint32_t host_reg[32]; __be32 host_reg[32];
uint32_t shadow_reg[11]; __be32 shadow_reg[11];
uint32_t risc_io_reg; __be32 risc_io_reg;
uint16_t mailbox_reg[32]; __be16 mailbox_reg[32];
uint32_t xseq_gp_reg[256]; __be32 xseq_gp_reg[256];
uint32_t xseq_0_reg[48]; __be32 xseq_0_reg[48];
uint32_t xseq_1_reg[16]; __be32 xseq_1_reg[16];
uint32_t xseq_2_reg[16]; __be32 xseq_2_reg[16];
uint32_t rseq_gp_reg[256]; __be32 rseq_gp_reg[256];
uint32_t rseq_0_reg[32]; __be32 rseq_0_reg[32];
uint32_t rseq_1_reg[16]; __be32 rseq_1_reg[16];
uint32_t rseq_2_reg[16]; __be32 rseq_2_reg[16];
uint32_t rseq_3_reg[16]; __be32 rseq_3_reg[16];
uint32_t aseq_gp_reg[256]; __be32 aseq_gp_reg[256];
uint32_t aseq_0_reg[32]; __be32 aseq_0_reg[32];
uint32_t aseq_1_reg[16]; __be32 aseq_1_reg[16];
uint32_t aseq_2_reg[16]; __be32 aseq_2_reg[16];
uint32_t aseq_3_reg[16]; __be32 aseq_3_reg[16];
uint32_t cmd_dma_reg[64]; __be32 cmd_dma_reg[64];
uint32_t req0_dma_reg[15]; __be32 req0_dma_reg[15];
uint32_t resp0_dma_reg[15]; __be32 resp0_dma_reg[15];
uint32_t req1_dma_reg[15]; __be32 req1_dma_reg[15];
uint32_t xmt0_dma_reg[32]; __be32 xmt0_dma_reg[32];
uint32_t xmt1_dma_reg[32]; __be32 xmt1_dma_reg[32];
uint32_t xmt2_dma_reg[32]; __be32 xmt2_dma_reg[32];
uint32_t xmt3_dma_reg[32]; __be32 xmt3_dma_reg[32];
uint32_t xmt4_dma_reg[32]; __be32 xmt4_dma_reg[32];
uint32_t xmt_data_dma_reg[16]; __be32 xmt_data_dma_reg[16];
uint32_t rcvt0_data_dma_reg[32]; __be32 rcvt0_data_dma_reg[32];
uint32_t rcvt1_data_dma_reg[32]; __be32 rcvt1_data_dma_reg[32];
uint32_t risc_gp_reg[128]; __be32 risc_gp_reg[128];
uint32_t lmc_reg[128]; __be32 lmc_reg[128];
uint32_t fpm_hdw_reg[256]; __be32 fpm_hdw_reg[256];
uint32_t rq0_array_reg[256]; __be32 rq0_array_reg[256];
uint32_t rq1_array_reg[256]; __be32 rq1_array_reg[256];
uint32_t rp0_array_reg[256]; __be32 rp0_array_reg[256];
uint32_t rp1_array_reg[256]; __be32 rp1_array_reg[256];
uint32_t queue_control_reg[16]; __be32 queue_control_reg[16];
uint32_t fb_hdw_reg[432]; __be32 fb_hdw_reg[432];
uint32_t at0_array_reg[128]; __be32 at0_array_reg[128];
uint32_t code_ram[0x2400]; __be32 code_ram[0x2400];
uint32_t ext_mem[1]; __be32 ext_mem[1];
}; };
#define EFT_NUM_BUFFERS 4 #define EFT_NUM_BUFFERS 4
@@ -223,44 +223,45 @@ struct qla83xx_fw_dump {
#define fce_calc_size(b) ((FCE_BYTES_PER_BUFFER) * (b)) #define fce_calc_size(b) ((FCE_BYTES_PER_BUFFER) * (b))
struct qla2xxx_fce_chain { struct qla2xxx_fce_chain {
uint32_t type; __be32 type;
uint32_t chain_size; __be32 chain_size;
uint32_t size; __be32 size;
uint32_t addr_l; __be32 addr_l;
uint32_t addr_h; __be32 addr_h;
uint32_t eregs[8]; __be32 eregs[8];
}; };
/* used by exchange off load and extended login offload */ /* used by exchange off load and extended login offload */
struct qla2xxx_offld_chain { struct qla2xxx_offld_chain {
uint32_t type; __be32 type;
uint32_t chain_size; __be32 chain_size;
uint32_t size; __be32 size;
u64 addr; __be32 reserved;
__be64 addr;
}; };
struct qla2xxx_mq_chain { struct qla2xxx_mq_chain {
uint32_t type; __be32 type;
uint32_t chain_size; __be32 chain_size;
uint32_t count; __be32 count;
uint32_t qregs[4 * QLA_MQ_SIZE]; __be32 qregs[4 * QLA_MQ_SIZE];
}; };
struct qla2xxx_mqueue_header { struct qla2xxx_mqueue_header {
uint32_t queue; __be32 queue;
#define TYPE_REQUEST_QUEUE 0x1 #define TYPE_REQUEST_QUEUE 0x1
#define TYPE_RESPONSE_QUEUE 0x2 #define TYPE_RESPONSE_QUEUE 0x2
#define TYPE_ATIO_QUEUE 0x3 #define TYPE_ATIO_QUEUE 0x3
uint32_t number; __be32 number;
uint32_t size; __be32 size;
}; };
struct qla2xxx_mqueue_chain { struct qla2xxx_mqueue_chain {
uint32_t type; __be32 type;
uint32_t chain_size; __be32 chain_size;
}; };
#define DUMP_CHAIN_VARIANT 0x80000000 #define DUMP_CHAIN_VARIANT 0x80000000
@@ -273,28 +274,28 @@ struct qla2xxx_mqueue_chain {
struct qla2xxx_fw_dump { struct qla2xxx_fw_dump {
uint8_t signature[4]; uint8_t signature[4];
uint32_t version; __be32 version;
uint32_t fw_major_version; __be32 fw_major_version;
uint32_t fw_minor_version; __be32 fw_minor_version;
uint32_t fw_subminor_version; __be32 fw_subminor_version;
uint32_t fw_attributes; __be32 fw_attributes;
uint32_t vendor; __be32 vendor;
uint32_t device; __be32 device;
uint32_t subsystem_vendor; __be32 subsystem_vendor;
uint32_t subsystem_device; __be32 subsystem_device;
uint32_t fixed_size; __be32 fixed_size;
uint32_t mem_size; __be32 mem_size;
uint32_t req_q_size; __be32 req_q_size;
uint32_t rsp_q_size; __be32 rsp_q_size;
uint32_t eft_size; __be32 eft_size;
uint32_t eft_addr_l; __be32 eft_addr_l;
uint32_t eft_addr_h; __be32 eft_addr_h;
uint32_t header_size; __be32 header_size;
union { union {
struct qla2100_fw_dump isp21; struct qla2100_fw_dump isp21;
@@ -369,7 +370,7 @@ ql_log_qp(uint32_t, struct qla_qpair *, int32_t, const char *fmt, ...);
extern int qla27xx_dump_mpi_ram(struct qla_hw_data *, uint32_t, uint32_t *, extern int qla27xx_dump_mpi_ram(struct qla_hw_data *, uint32_t, uint32_t *,
uint32_t, void **); uint32_t, void **);
extern int qla24xx_dump_ram(struct qla_hw_data *, uint32_t, uint32_t *, extern int qla24xx_dump_ram(struct qla_hw_data *, uint32_t, __be32 *,
uint32_t, void **); uint32_t, void **);
extern void qla24xx_pause_risc(struct device_reg_24xx __iomem *, extern void qla24xx_pause_risc(struct device_reg_24xx __iomem *,
struct qla_hw_data *); struct qla_hw_data *);

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -173,6 +173,7 @@ extern int ql2xenablemsix;
extern int qla2xuseresexchforels; extern int qla2xuseresexchforels;
extern int ql2xexlogins; extern int ql2xexlogins;
extern int ql2xdifbundlinginternalbuffers; extern int ql2xdifbundlinginternalbuffers;
extern int ql2xfulldump_on_mpifail;
extern int qla2x00_loop_reset(scsi_qla_host_t *); extern int qla2x00_loop_reset(scsi_qla_host_t *);
extern void qla2x00_abort_all_cmds(scsi_qla_host_t *, int); extern void qla2x00_abort_all_cmds(scsi_qla_host_t *, int);
@@ -636,15 +637,17 @@ extern int qla24xx_read_fcp_prio_cfg(scsi_qla_host_t *);
/* /*
* Global Function Prototypes in qla_dbg.c source file. * Global Function Prototypes in qla_dbg.c source file.
*/ */
extern void qla2100_fw_dump(scsi_qla_host_t *, int); void qla2xxx_dump_fw(scsi_qla_host_t *vha);
extern void qla2300_fw_dump(scsi_qla_host_t *, int); void qla2100_fw_dump(scsi_qla_host_t *vha);
extern void qla24xx_fw_dump(scsi_qla_host_t *, int); void qla2300_fw_dump(scsi_qla_host_t *vha);
extern void qla25xx_fw_dump(scsi_qla_host_t *, int); void qla24xx_fw_dump(scsi_qla_host_t *vha);
extern void qla81xx_fw_dump(scsi_qla_host_t *, int); void qla25xx_fw_dump(scsi_qla_host_t *vha);
extern void qla82xx_fw_dump(scsi_qla_host_t *, int); void qla81xx_fw_dump(scsi_qla_host_t *vha);
extern void qla8044_fw_dump(scsi_qla_host_t *, int); void qla82xx_fw_dump(scsi_qla_host_t *vha);
void qla8044_fw_dump(scsi_qla_host_t *vha);
extern void qla27xx_fwdump(scsi_qla_host_t *, int); void qla27xx_fwdump(scsi_qla_host_t *vha);
extern void qla27xx_mpi_fwdump(scsi_qla_host_t *, int);
extern ulong qla27xx_fwdt_calculate_dump_size(struct scsi_qla_host *, void *); extern ulong qla27xx_fwdt_calculate_dump_size(struct scsi_qla_host *, void *);
extern int qla27xx_fwdt_template_valid(void *); extern int qla27xx_fwdt_template_valid(void *);
extern ulong qla27xx_fwdt_template_size(void *); extern ulong qla27xx_fwdt_template_size(void *);
@@ -769,7 +772,7 @@ extern int qlafx00_fw_ready(scsi_qla_host_t *);
extern int qlafx00_configure_devices(scsi_qla_host_t *); extern int qlafx00_configure_devices(scsi_qla_host_t *);
extern int qlafx00_reset_initialize(scsi_qla_host_t *); extern int qlafx00_reset_initialize(scsi_qla_host_t *);
extern int qlafx00_fx_disc(scsi_qla_host_t *, fc_port_t *, uint16_t); extern int qlafx00_fx_disc(scsi_qla_host_t *, fc_port_t *, uint16_t);
extern int qlafx00_process_aen(struct scsi_qla_host *, struct qla_work_evt *); extern void qlafx00_process_aen(struct scsi_qla_host *, struct qla_work_evt *);
extern int qlafx00_post_aenfx_work(struct scsi_qla_host *, uint32_t, extern int qlafx00_post_aenfx_work(struct scsi_qla_host *, uint32_t,
uint32_t *, int); uint32_t *, int);
extern uint32_t qlafx00_fw_state_show(struct device *, extern uint32_t qlafx00_fw_state_show(struct device *,
@@ -871,7 +874,7 @@ extern int qla2x00_get_idma_speed(scsi_qla_host_t *, uint16_t,
uint16_t *, uint16_t *); uint16_t *, uint16_t *);
/* 83xx related functions */ /* 83xx related functions */
extern void qla83xx_fw_dump(scsi_qla_host_t *, int); void qla83xx_fw_dump(scsi_qla_host_t *vha);
/* Minidump related functions */ /* Minidump related functions */
extern int qla82xx_md_get_template_size(scsi_qla_host_t *); extern int qla82xx_md_get_template_size(scsi_qla_host_t *);
@@ -933,5 +936,6 @@ extern void qla24xx_process_purex_list(struct purex_list *);
/* nvme.c */ /* nvme.c */
void qla_nvme_unregister_remote_port(struct fc_port *fcport); void qla_nvme_unregister_remote_port(struct fc_port *fcport);
void qla27xx_reset_mpi(scsi_qla_host_t *vha);
void qla_handle_els_plogi_done(scsi_qla_host_t *vha, struct event_arg *ea); void qla_handle_els_plogi_done(scsi_qla_host_t *vha, struct event_arg *ea);
#endif /* _QLA_GBL_H */ #endif /* _QLA_GBL_H */

View File

@@ -120,7 +120,7 @@ static void qla24xx_abort_iocb_timeout(void *data)
if (sp->cmd_sp) if (sp->cmd_sp)
sp->cmd_sp->done(sp->cmd_sp, QLA_OS_TIMER_EXPIRED); sp->cmd_sp->done(sp->cmd_sp, QLA_OS_TIMER_EXPIRED);
abt->u.abt.comp_status = CS_TIMEOUT; abt->u.abt.comp_status = cpu_to_le16(CS_TIMEOUT);
sp->done(sp, QLA_OS_TIMER_EXPIRED); sp->done(sp, QLA_OS_TIMER_EXPIRED);
} }
@@ -992,7 +992,7 @@ static void qla24xx_async_gnl_sp_done(srb_t *sp, int res)
ql_dbg(ql_dbg_disc, vha, 0x20e8, ql_dbg(ql_dbg_disc, vha, 0x20e8,
"%s %8phC %02x:%02x:%02x CLS %x/%x lid %x \n", "%s %8phC %02x:%02x:%02x CLS %x/%x lid %x \n",
__func__, (void *)&wwn, e->port_id[2], e->port_id[1], __func__, &wwn, e->port_id[2], e->port_id[1],
e->port_id[0], e->current_login_state, e->last_login_state, e->port_id[0], e->current_login_state, e->last_login_state,
(loop_id & 0x7fff)); (loop_id & 0x7fff));
} }
@@ -1343,7 +1343,7 @@ int qla24xx_async_gpdb(struct scsi_qla_host *vha, fc_port_t *fcport, u8 opt)
mb[9] = vha->vp_idx; mb[9] = vha->vp_idx;
mb[10] = opt; mb[10] = opt;
mbx->u.mbx.in = (void *)pd; mbx->u.mbx.in = pd;
mbx->u.mbx.in_dma = pd_dma; mbx->u.mbx.in_dma = pd_dma;
sp->done = qla24xx_async_gpdb_sp_done; sp->done = qla24xx_async_gpdb_sp_done;
@@ -1791,7 +1791,7 @@ qla2x00_tmf_iocb_timeout(void *data)
} }
} }
spin_unlock_irqrestore(sp->qpair->qp_lock_ptr, flags); spin_unlock_irqrestore(sp->qpair->qp_lock_ptr, flags);
tmf->u.tmf.comp_status = CS_TIMEOUT; tmf->u.tmf.comp_status = cpu_to_le16(CS_TIMEOUT);
tmf->u.tmf.data = QLA_FUNCTION_FAILED; tmf->u.tmf.data = QLA_FUNCTION_FAILED;
complete(&tmf->u.tmf.comp); complete(&tmf->u.tmf.comp);
} }
@@ -2219,7 +2219,7 @@ qla2x00_initialize_adapter(scsi_qla_host_t *vha)
/* Check for secure flash support */ /* Check for secure flash support */
if (IS_QLA28XX(ha)) { if (IS_QLA28XX(ha)) {
if (RD_REG_DWORD(&reg->mailbox12) & BIT_0) if (rd_reg_word(&reg->mailbox12) & BIT_0)
ha->flags.secure_adapter = 1; ha->flags.secure_adapter = 1;
ql_log(ql_log_info, vha, 0xffff, "Secure Adapter: %s\n", ql_log(ql_log_info, vha, 0xffff, "Secure Adapter: %s\n",
(ha->flags.secure_adapter) ? "Yes" : "No"); (ha->flags.secure_adapter) ? "Yes" : "No");
@@ -2357,7 +2357,7 @@ qla2100_pci_config(scsi_qla_host_t *vha)
/* Get PCI bus information. */ /* Get PCI bus information. */
spin_lock_irqsave(&ha->hardware_lock, flags); spin_lock_irqsave(&ha->hardware_lock, flags);
ha->pci_attr = RD_REG_WORD(&reg->ctrl_status); ha->pci_attr = rd_reg_word(&reg->ctrl_status);
spin_unlock_irqrestore(&ha->hardware_lock, flags); spin_unlock_irqrestore(&ha->hardware_lock, flags);
return QLA_SUCCESS; return QLA_SUCCESS;
@@ -2399,17 +2399,17 @@ qla2300_pci_config(scsi_qla_host_t *vha)
spin_lock_irqsave(&ha->hardware_lock, flags); spin_lock_irqsave(&ha->hardware_lock, flags);
/* Pause RISC. */ /* Pause RISC. */
WRT_REG_WORD(&reg->hccr, HCCR_PAUSE_RISC); wrt_reg_word(&reg->hccr, HCCR_PAUSE_RISC);
for (cnt = 0; cnt < 30000; cnt++) { for (cnt = 0; cnt < 30000; cnt++) {
if ((RD_REG_WORD(&reg->hccr) & HCCR_RISC_PAUSE) != 0) if ((rd_reg_word(&reg->hccr) & HCCR_RISC_PAUSE) != 0)
break; break;
udelay(10); udelay(10);
} }
/* Select FPM registers. */ /* Select FPM registers. */
WRT_REG_WORD(&reg->ctrl_status, 0x20); wrt_reg_word(&reg->ctrl_status, 0x20);
RD_REG_WORD(&reg->ctrl_status); rd_reg_word(&reg->ctrl_status);
/* Get the fb rev level */ /* Get the fb rev level */
ha->fb_rev = RD_FB_CMD_REG(ha, reg); ha->fb_rev = RD_FB_CMD_REG(ha, reg);
@@ -2418,13 +2418,13 @@ qla2300_pci_config(scsi_qla_host_t *vha)
pci_clear_mwi(ha->pdev); pci_clear_mwi(ha->pdev);
/* Deselect FPM registers. */ /* Deselect FPM registers. */
WRT_REG_WORD(&reg->ctrl_status, 0x0); wrt_reg_word(&reg->ctrl_status, 0x0);
RD_REG_WORD(&reg->ctrl_status); rd_reg_word(&reg->ctrl_status);
/* Release RISC module. */ /* Release RISC module. */
WRT_REG_WORD(&reg->hccr, HCCR_RELEASE_RISC); wrt_reg_word(&reg->hccr, HCCR_RELEASE_RISC);
for (cnt = 0; cnt < 30000; cnt++) { for (cnt = 0; cnt < 30000; cnt++) {
if ((RD_REG_WORD(&reg->hccr) & HCCR_RISC_PAUSE) == 0) if ((rd_reg_word(&reg->hccr) & HCCR_RISC_PAUSE) == 0)
break; break;
udelay(10); udelay(10);
@@ -2439,7 +2439,7 @@ qla2300_pci_config(scsi_qla_host_t *vha)
/* Get PCI bus information. */ /* Get PCI bus information. */
spin_lock_irqsave(&ha->hardware_lock, flags); spin_lock_irqsave(&ha->hardware_lock, flags);
ha->pci_attr = RD_REG_WORD(&reg->ctrl_status); ha->pci_attr = rd_reg_word(&reg->ctrl_status);
spin_unlock_irqrestore(&ha->hardware_lock, flags); spin_unlock_irqrestore(&ha->hardware_lock, flags);
return QLA_SUCCESS; return QLA_SUCCESS;
@@ -2483,7 +2483,7 @@ qla24xx_pci_config(scsi_qla_host_t *vha)
/* Get PCI bus information. */ /* Get PCI bus information. */
spin_lock_irqsave(&ha->hardware_lock, flags); spin_lock_irqsave(&ha->hardware_lock, flags);
ha->pci_attr = RD_REG_DWORD(&reg->ctrl_status); ha->pci_attr = rd_reg_dword(&reg->ctrl_status);
spin_unlock_irqrestore(&ha->hardware_lock, flags); spin_unlock_irqrestore(&ha->hardware_lock, flags);
return QLA_SUCCESS; return QLA_SUCCESS;
@@ -2587,36 +2587,36 @@ qla2x00_reset_chip(scsi_qla_host_t *vha)
if (!IS_QLA2100(ha)) { if (!IS_QLA2100(ha)) {
/* Pause RISC. */ /* Pause RISC. */
WRT_REG_WORD(&reg->hccr, HCCR_PAUSE_RISC); wrt_reg_word(&reg->hccr, HCCR_PAUSE_RISC);
if (IS_QLA2200(ha) || IS_QLA2300(ha)) { if (IS_QLA2200(ha) || IS_QLA2300(ha)) {
for (cnt = 0; cnt < 30000; cnt++) { for (cnt = 0; cnt < 30000; cnt++) {
if ((RD_REG_WORD(&reg->hccr) & if ((rd_reg_word(&reg->hccr) &
HCCR_RISC_PAUSE) != 0) HCCR_RISC_PAUSE) != 0)
break; break;
udelay(100); udelay(100);
} }
} else { } else {
RD_REG_WORD(&reg->hccr); /* PCI Posting. */ rd_reg_word(&reg->hccr); /* PCI Posting. */
udelay(10); udelay(10);
} }
/* Select FPM registers. */ /* Select FPM registers. */
WRT_REG_WORD(&reg->ctrl_status, 0x20); wrt_reg_word(&reg->ctrl_status, 0x20);
RD_REG_WORD(&reg->ctrl_status); /* PCI Posting. */ rd_reg_word(&reg->ctrl_status); /* PCI Posting. */
/* FPM Soft Reset. */ /* FPM Soft Reset. */
WRT_REG_WORD(&reg->fpm_diag_config, 0x100); wrt_reg_word(&reg->fpm_diag_config, 0x100);
RD_REG_WORD(&reg->fpm_diag_config); /* PCI Posting. */ rd_reg_word(&reg->fpm_diag_config); /* PCI Posting. */
/* Toggle Fpm Reset. */ /* Toggle Fpm Reset. */
if (!IS_QLA2200(ha)) { if (!IS_QLA2200(ha)) {
WRT_REG_WORD(&reg->fpm_diag_config, 0x0); wrt_reg_word(&reg->fpm_diag_config, 0x0);
RD_REG_WORD(&reg->fpm_diag_config); /* PCI Posting. */ rd_reg_word(&reg->fpm_diag_config); /* PCI Posting. */
} }
/* Select frame buffer registers. */ /* Select frame buffer registers. */
WRT_REG_WORD(&reg->ctrl_status, 0x10); wrt_reg_word(&reg->ctrl_status, 0x10);
RD_REG_WORD(&reg->ctrl_status); /* PCI Posting. */ rd_reg_word(&reg->ctrl_status); /* PCI Posting. */
/* Reset frame buffer FIFOs. */ /* Reset frame buffer FIFOs. */
if (IS_QLA2200(ha)) { if (IS_QLA2200(ha)) {
@@ -2634,23 +2634,23 @@ qla2x00_reset_chip(scsi_qla_host_t *vha)
} }
/* Select RISC module registers. */ /* Select RISC module registers. */
WRT_REG_WORD(&reg->ctrl_status, 0); wrt_reg_word(&reg->ctrl_status, 0);
RD_REG_WORD(&reg->ctrl_status); /* PCI Posting. */ rd_reg_word(&reg->ctrl_status); /* PCI Posting. */
/* Reset RISC processor. */ /* Reset RISC processor. */
WRT_REG_WORD(&reg->hccr, HCCR_RESET_RISC); wrt_reg_word(&reg->hccr, HCCR_RESET_RISC);
RD_REG_WORD(&reg->hccr); /* PCI Posting. */ rd_reg_word(&reg->hccr); /* PCI Posting. */
/* Release RISC processor. */ /* Release RISC processor. */
WRT_REG_WORD(&reg->hccr, HCCR_RELEASE_RISC); wrt_reg_word(&reg->hccr, HCCR_RELEASE_RISC);
RD_REG_WORD(&reg->hccr); /* PCI Posting. */ rd_reg_word(&reg->hccr); /* PCI Posting. */
} }
WRT_REG_WORD(&reg->hccr, HCCR_CLR_RISC_INT); wrt_reg_word(&reg->hccr, HCCR_CLR_RISC_INT);
WRT_REG_WORD(&reg->hccr, HCCR_CLR_HOST_INT); wrt_reg_word(&reg->hccr, HCCR_CLR_HOST_INT);
/* Reset ISP chip. */ /* Reset ISP chip. */
WRT_REG_WORD(&reg->ctrl_status, CSR_ISP_SOFT_RESET); wrt_reg_word(&reg->ctrl_status, CSR_ISP_SOFT_RESET);
/* Wait for RISC to recover from reset. */ /* Wait for RISC to recover from reset. */
if (IS_QLA2100(ha) || IS_QLA2200(ha) || IS_QLA2300(ha)) { if (IS_QLA2100(ha) || IS_QLA2200(ha) || IS_QLA2300(ha)) {
@@ -2661,7 +2661,7 @@ qla2x00_reset_chip(scsi_qla_host_t *vha)
*/ */
udelay(20); udelay(20);
for (cnt = 30000; cnt; cnt--) { for (cnt = 30000; cnt; cnt--) {
if ((RD_REG_WORD(&reg->ctrl_status) & if ((rd_reg_word(&reg->ctrl_status) &
CSR_ISP_SOFT_RESET) == 0) CSR_ISP_SOFT_RESET) == 0)
break; break;
udelay(100); udelay(100);
@@ -2670,13 +2670,13 @@ qla2x00_reset_chip(scsi_qla_host_t *vha)
udelay(10); udelay(10);
/* Reset RISC processor. */ /* Reset RISC processor. */
WRT_REG_WORD(&reg->hccr, HCCR_RESET_RISC); wrt_reg_word(&reg->hccr, HCCR_RESET_RISC);
WRT_REG_WORD(&reg->semaphore, 0); wrt_reg_word(&reg->semaphore, 0);
/* Release RISC processor. */ /* Release RISC processor. */
WRT_REG_WORD(&reg->hccr, HCCR_RELEASE_RISC); wrt_reg_word(&reg->hccr, HCCR_RELEASE_RISC);
RD_REG_WORD(&reg->hccr); /* PCI Posting. */ rd_reg_word(&reg->hccr); /* PCI Posting. */
if (IS_QLA2100(ha) || IS_QLA2200(ha) || IS_QLA2300(ha)) { if (IS_QLA2100(ha) || IS_QLA2200(ha) || IS_QLA2300(ha)) {
for (cnt = 0; cnt < 30000; cnt++) { for (cnt = 0; cnt < 30000; cnt++) {
@@ -2694,8 +2694,8 @@ qla2x00_reset_chip(scsi_qla_host_t *vha)
/* Disable RISC pause on FPM parity error. */ /* Disable RISC pause on FPM parity error. */
if (!IS_QLA2100(ha)) { if (!IS_QLA2100(ha)) {
WRT_REG_WORD(&reg->hccr, HCCR_DISABLE_PARITY_PAUSE); wrt_reg_word(&reg->hccr, HCCR_DISABLE_PARITY_PAUSE);
RD_REG_WORD(&reg->hccr); /* PCI Posting. */ rd_reg_word(&reg->hccr); /* PCI Posting. */
} }
spin_unlock_irqrestore(&ha->hardware_lock, flags); spin_unlock_irqrestore(&ha->hardware_lock, flags);
@@ -2740,32 +2740,32 @@ qla24xx_reset_risc(scsi_qla_host_t *vha)
spin_lock_irqsave(&ha->hardware_lock, flags); spin_lock_irqsave(&ha->hardware_lock, flags);
/* Reset RISC. */ /* Reset RISC. */
WRT_REG_DWORD(&reg->ctrl_status, CSRX_DMA_SHUTDOWN|MWB_4096_BYTES); wrt_reg_dword(&reg->ctrl_status, CSRX_DMA_SHUTDOWN|MWB_4096_BYTES);
for (cnt = 0; cnt < 30000; cnt++) { for (cnt = 0; cnt < 30000; cnt++) {
if ((RD_REG_DWORD(&reg->ctrl_status) & CSRX_DMA_ACTIVE) == 0) if ((rd_reg_dword(&reg->ctrl_status) & CSRX_DMA_ACTIVE) == 0)
break; break;
udelay(10); udelay(10);
} }
if (!(RD_REG_DWORD(&reg->ctrl_status) & CSRX_DMA_ACTIVE)) if (!(rd_reg_dword(&reg->ctrl_status) & CSRX_DMA_ACTIVE))
set_bit(DMA_SHUTDOWN_CMPL, &ha->fw_dump_cap_flags); set_bit(DMA_SHUTDOWN_CMPL, &ha->fw_dump_cap_flags);
ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x017e, ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x017e,
"HCCR: 0x%x, Control Status %x, DMA active status:0x%x\n", "HCCR: 0x%x, Control Status %x, DMA active status:0x%x\n",
RD_REG_DWORD(&reg->hccr), rd_reg_dword(&reg->hccr),
RD_REG_DWORD(&reg->ctrl_status), rd_reg_dword(&reg->ctrl_status),
(RD_REG_DWORD(&reg->ctrl_status) & CSRX_DMA_ACTIVE)); (rd_reg_dword(&reg->ctrl_status) & CSRX_DMA_ACTIVE));
WRT_REG_DWORD(&reg->ctrl_status, wrt_reg_dword(&reg->ctrl_status,
CSRX_ISP_SOFT_RESET|CSRX_DMA_SHUTDOWN|MWB_4096_BYTES); CSRX_ISP_SOFT_RESET|CSRX_DMA_SHUTDOWN|MWB_4096_BYTES);
pci_read_config_word(ha->pdev, PCI_COMMAND, &wd); pci_read_config_word(ha->pdev, PCI_COMMAND, &wd);
udelay(100); udelay(100);
/* Wait for firmware to complete NVRAM accesses. */ /* Wait for firmware to complete NVRAM accesses. */
RD_REG_WORD(&reg->mailbox0); rd_reg_word(&reg->mailbox0);
for (cnt = 10000; RD_REG_WORD(&reg->mailbox0) != 0 && for (cnt = 10000; rd_reg_word(&reg->mailbox0) != 0 &&
rval == QLA_SUCCESS; cnt--) { rval == QLA_SUCCESS; cnt--) {
barrier(); barrier();
if (cnt) if (cnt)
@@ -2779,26 +2779,26 @@ qla24xx_reset_risc(scsi_qla_host_t *vha)
ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x017f, ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x017f,
"HCCR: 0x%x, MailBox0 Status 0x%x\n", "HCCR: 0x%x, MailBox0 Status 0x%x\n",
RD_REG_DWORD(&reg->hccr), rd_reg_dword(&reg->hccr),
RD_REG_DWORD(&reg->mailbox0)); rd_reg_word(&reg->mailbox0));
/* Wait for soft-reset to complete. */ /* Wait for soft-reset to complete. */
RD_REG_DWORD(&reg->ctrl_status); rd_reg_dword(&reg->ctrl_status);
for (cnt = 0; cnt < 60; cnt++) { for (cnt = 0; cnt < 60; cnt++) {
barrier(); barrier();
if ((RD_REG_DWORD(&reg->ctrl_status) & if ((rd_reg_dword(&reg->ctrl_status) &
CSRX_ISP_SOFT_RESET) == 0) CSRX_ISP_SOFT_RESET) == 0)
break; break;
udelay(5); udelay(5);
} }
if (!(RD_REG_DWORD(&reg->ctrl_status) & CSRX_ISP_SOFT_RESET)) if (!(rd_reg_dword(&reg->ctrl_status) & CSRX_ISP_SOFT_RESET))
set_bit(ISP_SOFT_RESET_CMPL, &ha->fw_dump_cap_flags); set_bit(ISP_SOFT_RESET_CMPL, &ha->fw_dump_cap_flags);
ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x015d, ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x015d,
"HCCR: 0x%x, Soft Reset status: 0x%x\n", "HCCR: 0x%x, Soft Reset status: 0x%x\n",
RD_REG_DWORD(&reg->hccr), rd_reg_dword(&reg->hccr),
RD_REG_DWORD(&reg->ctrl_status)); rd_reg_dword(&reg->ctrl_status));
/* If required, do an MPI FW reset now */ /* If required, do an MPI FW reset now */
if (test_and_clear_bit(MPI_RESET_NEEDED, &vha->dpc_flags)) { if (test_and_clear_bit(MPI_RESET_NEEDED, &vha->dpc_flags)) {
@@ -2817,17 +2817,17 @@ qla24xx_reset_risc(scsi_qla_host_t *vha)
} }
} }
WRT_REG_DWORD(&reg->hccr, HCCRX_SET_RISC_RESET); wrt_reg_dword(&reg->hccr, HCCRX_SET_RISC_RESET);
RD_REG_DWORD(&reg->hccr); rd_reg_dword(&reg->hccr);
WRT_REG_DWORD(&reg->hccr, HCCRX_REL_RISC_PAUSE); wrt_reg_dword(&reg->hccr, HCCRX_REL_RISC_PAUSE);
RD_REG_DWORD(&reg->hccr); rd_reg_dword(&reg->hccr);
WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_RESET); wrt_reg_dword(&reg->hccr, HCCRX_CLR_RISC_RESET);
RD_REG_DWORD(&reg->hccr); rd_reg_dword(&reg->hccr);
RD_REG_WORD(&reg->mailbox0); rd_reg_word(&reg->mailbox0);
for (cnt = 60; RD_REG_WORD(&reg->mailbox0) != 0 && for (cnt = 60; rd_reg_word(&reg->mailbox0) != 0 &&
rval == QLA_SUCCESS; cnt--) { rval == QLA_SUCCESS; cnt--) {
barrier(); barrier();
if (cnt) if (cnt)
@@ -2840,8 +2840,8 @@ qla24xx_reset_risc(scsi_qla_host_t *vha)
ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x015e, ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x015e,
"Host Risc 0x%x, mailbox0 0x%x\n", "Host Risc 0x%x, mailbox0 0x%x\n",
RD_REG_DWORD(&reg->hccr), rd_reg_dword(&reg->hccr),
RD_REG_WORD(&reg->mailbox0)); rd_reg_word(&reg->mailbox0));
spin_unlock_irqrestore(&ha->hardware_lock, flags); spin_unlock_irqrestore(&ha->hardware_lock, flags);
@@ -2860,9 +2860,8 @@ qla25xx_read_risc_sema_reg(scsi_qla_host_t *vha, uint32_t *data)
{ {
struct device_reg_24xx __iomem *reg = &vha->hw->iobase->isp24; struct device_reg_24xx __iomem *reg = &vha->hw->iobase->isp24;
WRT_REG_DWORD(&reg->iobase_addr, RISC_REGISTER_BASE_OFFSET); wrt_reg_dword(&reg->iobase_addr, RISC_REGISTER_BASE_OFFSET);
*data = RD_REG_DWORD(&reg->iobase_window + RISC_REGISTER_WINDOW_OFFET); *data = rd_reg_dword(&reg->iobase_window + RISC_REGISTER_WINDOW_OFFSET);
} }
static void static void
@@ -2870,8 +2869,8 @@ qla25xx_write_risc_sema_reg(scsi_qla_host_t *vha, uint32_t data)
{ {
struct device_reg_24xx __iomem *reg = &vha->hw->iobase->isp24; struct device_reg_24xx __iomem *reg = &vha->hw->iobase->isp24;
WRT_REG_DWORD(&reg->iobase_addr, RISC_REGISTER_BASE_OFFSET); wrt_reg_dword(&reg->iobase_addr, RISC_REGISTER_BASE_OFFSET);
WRT_REG_DWORD(&reg->iobase_window + RISC_REGISTER_WINDOW_OFFET, data); wrt_reg_dword(&reg->iobase_window + RISC_REGISTER_WINDOW_OFFSET, data);
} }
static void static void
@@ -2887,7 +2886,7 @@ qla25xx_manipulate_risc_semaphore(scsi_qla_host_t *vha)
vha->hw->pdev->subsystem_device != 0x0240) vha->hw->pdev->subsystem_device != 0x0240)
return; return;
WRT_REG_DWORD(&vha->hw->iobase->isp24.hccr, HCCRX_SET_RISC_PAUSE); wrt_reg_dword(&vha->hw->iobase->isp24.hccr, HCCRX_SET_RISC_PAUSE);
udelay(100); udelay(100);
attempt: attempt:
@@ -2989,7 +2988,7 @@ qla2x00_chip_diag(scsi_qla_host_t *vha)
spin_lock_irqsave(&ha->hardware_lock, flags); spin_lock_irqsave(&ha->hardware_lock, flags);
/* Reset ISP chip. */ /* Reset ISP chip. */
WRT_REG_WORD(&reg->ctrl_status, CSR_ISP_SOFT_RESET); wrt_reg_word(&reg->ctrl_status, CSR_ISP_SOFT_RESET);
/* /*
* We need to have a delay here since the card will not respond while * We need to have a delay here since the card will not respond while
@@ -2999,7 +2998,7 @@ qla2x00_chip_diag(scsi_qla_host_t *vha)
data = qla2x00_debounce_register(&reg->ctrl_status); data = qla2x00_debounce_register(&reg->ctrl_status);
for (cnt = 6000000 ; cnt && (data & CSR_ISP_SOFT_RESET); cnt--) { for (cnt = 6000000 ; cnt && (data & CSR_ISP_SOFT_RESET); cnt--) {
udelay(5); udelay(5);
data = RD_REG_WORD(&reg->ctrl_status); data = rd_reg_word(&reg->ctrl_status);
barrier(); barrier();
} }
@@ -3010,8 +3009,8 @@ qla2x00_chip_diag(scsi_qla_host_t *vha)
"Reset register cleared by chip reset.\n"); "Reset register cleared by chip reset.\n");
/* Reset RISC processor. */ /* Reset RISC processor. */
WRT_REG_WORD(&reg->hccr, HCCR_RESET_RISC); wrt_reg_word(&reg->hccr, HCCR_RESET_RISC);
WRT_REG_WORD(&reg->hccr, HCCR_RELEASE_RISC); wrt_reg_word(&reg->hccr, HCCR_RELEASE_RISC);
/* Workaround for QLA2312 PCI parity error */ /* Workaround for QLA2312 PCI parity error */
if (IS_QLA2100(ha) || IS_QLA2200(ha) || IS_QLA2300(ha)) { if (IS_QLA2100(ha) || IS_QLA2200(ha) || IS_QLA2300(ha)) {
@@ -3339,6 +3338,8 @@ qla2x00_alloc_fw_dump(scsi_qla_host_t *vha)
dump_size / 1024); dump_size / 1024);
if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) { if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
ha->mpi_fw_dump = (char *)fw_dump +
ha->fwdt[1].dump_size;
mutex_unlock(&ha->optrom_mutex); mutex_unlock(&ha->optrom_mutex);
return; return;
} }
@@ -3650,8 +3651,8 @@ qla2x00_setup_chip(scsi_qla_host_t *vha)
if (!IS_FWI2_CAPABLE(ha) && !IS_QLA2100(ha) && !IS_QLA2200(ha)) { if (!IS_FWI2_CAPABLE(ha) && !IS_QLA2100(ha) && !IS_QLA2200(ha)) {
/* Disable SRAM, Instruction RAM and GP RAM parity. */ /* Disable SRAM, Instruction RAM and GP RAM parity. */
spin_lock_irqsave(&ha->hardware_lock, flags); spin_lock_irqsave(&ha->hardware_lock, flags);
WRT_REG_WORD(&reg->hccr, (HCCR_ENABLE_PARITY + 0x0)); wrt_reg_word(&reg->hccr, (HCCR_ENABLE_PARITY + 0x0));
RD_REG_WORD(&reg->hccr); rd_reg_word(&reg->hccr);
spin_unlock_irqrestore(&ha->hardware_lock, flags); spin_unlock_irqrestore(&ha->hardware_lock, flags);
} }
@@ -3758,11 +3759,11 @@ enable_82xx_npiv:
spin_lock_irqsave(&ha->hardware_lock, flags); spin_lock_irqsave(&ha->hardware_lock, flags);
if (IS_QLA2300(ha)) if (IS_QLA2300(ha))
/* SRAM parity */ /* SRAM parity */
WRT_REG_WORD(&reg->hccr, HCCR_ENABLE_PARITY + 0x1); wrt_reg_word(&reg->hccr, HCCR_ENABLE_PARITY + 0x1);
else else
/* SRAM, Instruction RAM and GP RAM parity */ /* SRAM, Instruction RAM and GP RAM parity */
WRT_REG_WORD(&reg->hccr, HCCR_ENABLE_PARITY + 0x7); wrt_reg_word(&reg->hccr, HCCR_ENABLE_PARITY + 0x7);
RD_REG_WORD(&reg->hccr); rd_reg_word(&reg->hccr);
spin_unlock_irqrestore(&ha->hardware_lock, flags); spin_unlock_irqrestore(&ha->hardware_lock, flags);
} }
@@ -4006,11 +4007,11 @@ qla2x00_config_rings(struct scsi_qla_host *vha)
put_unaligned_le64(req->dma, &ha->init_cb->request_q_address); put_unaligned_le64(req->dma, &ha->init_cb->request_q_address);
put_unaligned_le64(rsp->dma, &ha->init_cb->response_q_address); put_unaligned_le64(rsp->dma, &ha->init_cb->response_q_address);
WRT_REG_WORD(ISP_REQ_Q_IN(ha, reg), 0); wrt_reg_word(ISP_REQ_Q_IN(ha, reg), 0);
WRT_REG_WORD(ISP_REQ_Q_OUT(ha, reg), 0); wrt_reg_word(ISP_REQ_Q_OUT(ha, reg), 0);
WRT_REG_WORD(ISP_RSP_Q_IN(ha, reg), 0); wrt_reg_word(ISP_RSP_Q_IN(ha, reg), 0);
WRT_REG_WORD(ISP_RSP_Q_OUT(ha, reg), 0); wrt_reg_word(ISP_RSP_Q_OUT(ha, reg), 0);
RD_REG_WORD(ISP_RSP_Q_OUT(ha, reg)); /* PCI Posting. */ rd_reg_word(ISP_RSP_Q_OUT(ha, reg)); /* PCI Posting. */
} }
void void
@@ -4072,15 +4073,15 @@ qla24xx_config_rings(struct scsi_qla_host *vha)
} }
icb->firmware_options_2 |= cpu_to_le32(BIT_23); icb->firmware_options_2 |= cpu_to_le32(BIT_23);
WRT_REG_DWORD(&reg->isp25mq.req_q_in, 0); wrt_reg_dword(&reg->isp25mq.req_q_in, 0);
WRT_REG_DWORD(&reg->isp25mq.req_q_out, 0); wrt_reg_dword(&reg->isp25mq.req_q_out, 0);
WRT_REG_DWORD(&reg->isp25mq.rsp_q_in, 0); wrt_reg_dword(&reg->isp25mq.rsp_q_in, 0);
WRT_REG_DWORD(&reg->isp25mq.rsp_q_out, 0); wrt_reg_dword(&reg->isp25mq.rsp_q_out, 0);
} else { } else {
WRT_REG_DWORD(&reg->isp24.req_q_in, 0); wrt_reg_dword(&reg->isp24.req_q_in, 0);
WRT_REG_DWORD(&reg->isp24.req_q_out, 0); wrt_reg_dword(&reg->isp24.req_q_out, 0);
WRT_REG_DWORD(&reg->isp24.rsp_q_in, 0); wrt_reg_dword(&reg->isp24.rsp_q_in, 0);
WRT_REG_DWORD(&reg->isp24.rsp_q_out, 0); wrt_reg_dword(&reg->isp24.rsp_q_out, 0);
} }
qlt_24xx_config_rings(vha); qlt_24xx_config_rings(vha);
@@ -4090,11 +4091,11 @@ qla24xx_config_rings(struct scsi_qla_host *vha)
ql_dbg(ql_dbg_init, vha, 0x00fd, ql_dbg(ql_dbg_init, vha, 0x00fd,
"Speed set by user : %s Gbps \n", "Speed set by user : %s Gbps \n",
qla2x00_get_link_speed_str(ha, ha->set_data_rate)); qla2x00_get_link_speed_str(ha, ha->set_data_rate));
icb->firmware_options_3 = (ha->set_data_rate << 13); icb->firmware_options_3 = cpu_to_le32(ha->set_data_rate << 13);
} }
/* PCI posting */ /* PCI posting */
RD_REG_DWORD(&ioreg->hccr); rd_reg_word(&ioreg->hccr);
} }
/** /**
@@ -4125,7 +4126,7 @@ qla2x00_init_rings(scsi_qla_host_t *vha)
req = ha->req_q_map[que]; req = ha->req_q_map[que];
if (!req || !test_bit(que, ha->req_qid_map)) if (!req || !test_bit(que, ha->req_qid_map))
continue; continue;
req->out_ptr = (void *)(req->ring + req->length); req->out_ptr = (uint16_t *)(req->ring + req->length);
*req->out_ptr = 0; *req->out_ptr = 0;
for (cnt = 1; cnt < req->num_outstanding_cmds; cnt++) for (cnt = 1; cnt < req->num_outstanding_cmds; cnt++)
req->outstanding_cmds[cnt] = NULL; req->outstanding_cmds[cnt] = NULL;
@@ -4142,7 +4143,7 @@ qla2x00_init_rings(scsi_qla_host_t *vha)
rsp = ha->rsp_q_map[que]; rsp = ha->rsp_q_map[que];
if (!rsp || !test_bit(que, ha->rsp_qid_map)) if (!rsp || !test_bit(que, ha->rsp_qid_map))
continue; continue;
rsp->in_ptr = (void *)(rsp->ring + rsp->length); rsp->in_ptr = (uint16_t *)(rsp->ring + rsp->length);
*rsp->in_ptr = 0; *rsp->in_ptr = 0;
/* Initialize response queue entries */ /* Initialize response queue entries */
if (IS_QLAFX00(ha)) if (IS_QLAFX00(ha))
@@ -4181,12 +4182,14 @@ qla2x00_init_rings(scsi_qla_host_t *vha)
mid_init_cb->init_cb.execution_throttle = mid_init_cb->init_cb.execution_throttle =
cpu_to_le16(ha->cur_fw_xcb_count); cpu_to_le16(ha->cur_fw_xcb_count);
ha->flags.dport_enabled = ha->flags.dport_enabled =
(mid_init_cb->init_cb.firmware_options_1 & BIT_7) != 0; (le32_to_cpu(mid_init_cb->init_cb.firmware_options_1) &
BIT_7) != 0;
ql_dbg(ql_dbg_init, vha, 0x0191, "DPORT Support: %s.\n", ql_dbg(ql_dbg_init, vha, 0x0191, "DPORT Support: %s.\n",
(ha->flags.dport_enabled) ? "enabled" : "disabled"); (ha->flags.dport_enabled) ? "enabled" : "disabled");
/* FA-WWPN Status */ /* FA-WWPN Status */
ha->flags.fawwpn_enabled = ha->flags.fawwpn_enabled =
(mid_init_cb->init_cb.firmware_options_1 & BIT_6) != 0; (le32_to_cpu(mid_init_cb->init_cb.firmware_options_1) &
BIT_6) != 0;
ql_dbg(ql_dbg_init, vha, 0x00bc, "FA-WWPN Support: %s.\n", ql_dbg(ql_dbg_init, vha, 0x00bc, "FA-WWPN Support: %s.\n",
(ha->flags.fawwpn_enabled) ? "enabled" : "disabled"); (ha->flags.fawwpn_enabled) ? "enabled" : "disabled");
} }
@@ -4565,7 +4568,7 @@ qla2x00_nvram_config(scsi_qla_host_t *vha)
ha->nvram_size = sizeof(*nv); ha->nvram_size = sizeof(*nv);
ha->nvram_base = 0; ha->nvram_base = 0;
if (!IS_QLA2100(ha) && !IS_QLA2200(ha) && !IS_QLA2300(ha)) if (!IS_QLA2100(ha) && !IS_QLA2200(ha) && !IS_QLA2300(ha))
if ((RD_REG_WORD(&reg->ctrl_status) >> 14) == 1) if ((rd_reg_word(&reg->ctrl_status) >> 14) == 1)
ha->nvram_base = 0x80; ha->nvram_base = 0x80;
/* Get NVRAM data and calculate checksum. */ /* Get NVRAM data and calculate checksum. */
@@ -5079,6 +5082,54 @@ qla2x00_configure_loop(scsi_qla_host_t *vha)
return (rval); return (rval);
} }
static int qla2x00_configure_n2n_loop(scsi_qla_host_t *vha)
{
struct qla_hw_data *ha = vha->hw;
unsigned long flags;
fc_port_t *fcport;
int rval;
if (test_and_clear_bit(N2N_LOGIN_NEEDED, &vha->dpc_flags)) {
/* borrowing */
u32 *bp, sz;
memset(ha->init_cb, 0, ha->init_cb_size);
sz = min_t(int, sizeof(struct els_plogi_payload),
ha->init_cb_size);
rval = qla24xx_get_port_login_templ(vha, ha->init_cb_dma,
ha->init_cb, sz);
if (rval == QLA_SUCCESS) {
__be32 *q = &ha->plogi_els_payld.data[0];
bp = (uint32_t *)ha->init_cb;
cpu_to_be32_array(q, bp, sz / 4);
memcpy(bp, q, sizeof(ha->plogi_els_payld.data));
} else {
ql_dbg(ql_dbg_init, vha, 0x00d1,
"PLOGI ELS param read fail.\n");
goto skip_login;
}
}
list_for_each_entry(fcport, &vha->vp_fcports, list) {
if (fcport->n2n_flag) {
qla24xx_fcport_handle_login(vha, fcport);
return QLA_SUCCESS;
}
}
skip_login:
spin_lock_irqsave(&vha->work_lock, flags);
vha->scan.scan_retry++;
spin_unlock_irqrestore(&vha->work_lock, flags);
if (vha->scan.scan_retry < MAX_SCAN_RETRIES) {
set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
}
return QLA_FUNCTION_FAILED;
}
/* /*
* qla2x00_configure_local_loop * qla2x00_configure_local_loop
* Updates Fibre Channel Device Database with local loop devices. * Updates Fibre Channel Device Database with local loop devices.
@@ -5096,7 +5147,6 @@ qla2x00_configure_local_loop(scsi_qla_host_t *vha)
int found_devs; int found_devs;
int found; int found;
fc_port_t *fcport, *new_fcport; fc_port_t *fcport, *new_fcport;
uint16_t index; uint16_t index;
uint16_t entries; uint16_t entries;
struct gid_list_info *gid; struct gid_list_info *gid;
@@ -5106,47 +5156,8 @@ qla2x00_configure_local_loop(scsi_qla_host_t *vha)
unsigned long flags; unsigned long flags;
/* Inititae N2N login. */ /* Inititae N2N login. */
if (N2N_TOPO(ha)) { if (N2N_TOPO(ha))
if (test_and_clear_bit(N2N_LOGIN_NEEDED, &vha->dpc_flags)) { return qla2x00_configure_n2n_loop(vha);
/* borrowing */
u32 *bp, sz;
memset(ha->init_cb, 0, ha->init_cb_size);
sz = min_t(int, sizeof(struct els_plogi_payload),
ha->init_cb_size);
rval = qla24xx_get_port_login_templ(vha,
ha->init_cb_dma, (void *)ha->init_cb, sz);
if (rval == QLA_SUCCESS) {
__be32 *q = &ha->plogi_els_payld.data[0];
bp = (uint32_t *)ha->init_cb;
cpu_to_be32_array(q, bp, sz / 4);
memcpy(bp, q, sizeof(ha->plogi_els_payld.data));
} else {
ql_dbg(ql_dbg_init, vha, 0x00d1,
"PLOGI ELS param read fail.\n");
goto skip_login;
}
}
list_for_each_entry(fcport, &vha->vp_fcports, list) {
if (fcport->n2n_flag) {
qla24xx_fcport_handle_login(vha, fcport);
return QLA_SUCCESS;
}
}
skip_login:
spin_lock_irqsave(&vha->work_lock, flags);
vha->scan.scan_retry++;
spin_unlock_irqrestore(&vha->work_lock, flags);
if (vha->scan.scan_retry < MAX_SCAN_RETRIES) {
set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
}
return QLA_FUNCTION_FAILED;
}
found_devs = 0; found_devs = 0;
new_fcport = NULL; new_fcport = NULL;
@@ -7078,10 +7089,10 @@ qla2x00_reset_adapter(scsi_qla_host_t *vha)
ha->isp_ops->disable_intrs(ha); ha->isp_ops->disable_intrs(ha);
spin_lock_irqsave(&ha->hardware_lock, flags); spin_lock_irqsave(&ha->hardware_lock, flags);
WRT_REG_WORD(&reg->hccr, HCCR_RESET_RISC); wrt_reg_word(&reg->hccr, HCCR_RESET_RISC);
RD_REG_WORD(&reg->hccr); /* PCI Posting. */ rd_reg_word(&reg->hccr); /* PCI Posting. */
WRT_REG_WORD(&reg->hccr, HCCR_RELEASE_RISC); wrt_reg_word(&reg->hccr, HCCR_RELEASE_RISC);
RD_REG_WORD(&reg->hccr); /* PCI Posting. */ rd_reg_word(&reg->hccr); /* PCI Posting. */
spin_unlock_irqrestore(&ha->hardware_lock, flags); spin_unlock_irqrestore(&ha->hardware_lock, flags);
return QLA_SUCCESS; return QLA_SUCCESS;
@@ -7102,10 +7113,10 @@ qla24xx_reset_adapter(scsi_qla_host_t *vha)
ha->isp_ops->disable_intrs(ha); ha->isp_ops->disable_intrs(ha);
spin_lock_irqsave(&ha->hardware_lock, flags); spin_lock_irqsave(&ha->hardware_lock, flags);
WRT_REG_DWORD(&reg->hccr, HCCRX_SET_RISC_RESET); wrt_reg_dword(&reg->hccr, HCCRX_SET_RISC_RESET);
RD_REG_DWORD(&reg->hccr); rd_reg_dword(&reg->hccr);
WRT_REG_DWORD(&reg->hccr, HCCRX_REL_RISC_PAUSE); wrt_reg_dword(&reg->hccr, HCCRX_REL_RISC_PAUSE);
RD_REG_DWORD(&reg->hccr); rd_reg_dword(&reg->hccr);
spin_unlock_irqrestore(&ha->hardware_lock, flags); spin_unlock_irqrestore(&ha->hardware_lock, flags);
if (IS_NOPOLLING_TYPE(ha)) if (IS_NOPOLLING_TYPE(ha))
@@ -7143,7 +7154,7 @@ qla24xx_nvram_config(scsi_qla_host_t *vha)
int rval; int rval;
struct init_cb_24xx *icb; struct init_cb_24xx *icb;
struct nvram_24xx *nv; struct nvram_24xx *nv;
uint32_t *dptr; __le32 *dptr;
uint8_t *dptr1, *dptr2; uint8_t *dptr1, *dptr2;
uint32_t chksum; uint32_t chksum;
uint16_t cnt; uint16_t cnt;
@@ -7171,7 +7182,7 @@ qla24xx_nvram_config(scsi_qla_host_t *vha)
ha->nvram_base - FA_NVRAM_FUNC0_ADDR, FA_NVRAM_VPD_SIZE * 4); ha->nvram_base - FA_NVRAM_FUNC0_ADDR, FA_NVRAM_VPD_SIZE * 4);
/* Get NVRAM data into cache and calculate checksum. */ /* Get NVRAM data into cache and calculate checksum. */
dptr = (uint32_t *)nv; dptr = (__force __le32 *)nv;
ha->isp_ops->read_nvram(vha, dptr, ha->nvram_base, ha->nvram_size); ha->isp_ops->read_nvram(vha, dptr, ha->nvram_base, ha->nvram_size);
for (cnt = 0, chksum = 0; cnt < ha->nvram_size >> 2; cnt++, dptr++) for (cnt = 0, chksum = 0; cnt < ha->nvram_size >> 2; cnt++, dptr++)
chksum += le32_to_cpu(*dptr); chksum += le32_to_cpu(*dptr);
@@ -7199,7 +7210,7 @@ qla24xx_nvram_config(scsi_qla_host_t *vha)
memset(nv, 0, ha->nvram_size); memset(nv, 0, ha->nvram_size);
nv->nvram_version = cpu_to_le16(ICB_VERSION); nv->nvram_version = cpu_to_le16(ICB_VERSION);
nv->version = cpu_to_le16(ICB_VERSION); nv->version = cpu_to_le16(ICB_VERSION);
nv->frame_payload_size = 2048; nv->frame_payload_size = cpu_to_le16(2048);
nv->execution_throttle = cpu_to_le16(0xFFFF); nv->execution_throttle = cpu_to_le16(0xFFFF);
nv->exchange_count = cpu_to_le16(0); nv->exchange_count = cpu_to_le16(0);
nv->hard_address = cpu_to_le16(124); nv->hard_address = cpu_to_le16(124);
@@ -7367,7 +7378,7 @@ qla24xx_nvram_config(scsi_qla_host_t *vha)
ha->login_retry_count = ql2xloginretrycount; ha->login_retry_count = ql2xloginretrycount;
/* N2N: driver will initiate Login instead of FW */ /* N2N: driver will initiate Login instead of FW */
icb->firmware_options_3 |= BIT_8; icb->firmware_options_3 |= cpu_to_le32(BIT_8);
/* Enable ZIO. */ /* Enable ZIO. */
if (!vha->flags.init_done) { if (!vha->flags.init_done) {
@@ -7435,7 +7446,7 @@ qla27xx_check_image_status_signature(struct qla27xx_image_status *image_status)
static ulong static ulong
qla27xx_image_status_checksum(struct qla27xx_image_status *image_status) qla27xx_image_status_checksum(struct qla27xx_image_status *image_status)
{ {
uint32_t *p = (void *)image_status; __le32 *p = (__force __le32 *)image_status;
uint n = sizeof(*image_status) / sizeof(*p); uint n = sizeof(*image_status) / sizeof(*p);
uint32_t sum = 0; uint32_t sum = 0;
@@ -7498,7 +7509,7 @@ qla28xx_get_aux_images(
goto check_sec_image; goto check_sec_image;
} }
qla24xx_read_flash_data(vha, (void *)&pri_aux_image_status, qla24xx_read_flash_data(vha, (uint32_t *)&pri_aux_image_status,
ha->flt_region_aux_img_status_pri, ha->flt_region_aux_img_status_pri,
sizeof(pri_aux_image_status) >> 2); sizeof(pri_aux_image_status) >> 2);
qla27xx_print_image(vha, "Primary aux image", &pri_aux_image_status); qla27xx_print_image(vha, "Primary aux image", &pri_aux_image_status);
@@ -7531,7 +7542,7 @@ check_sec_image:
goto check_valid_image; goto check_valid_image;
} }
qla24xx_read_flash_data(vha, (void *)&sec_aux_image_status, qla24xx_read_flash_data(vha, (uint32_t *)&sec_aux_image_status,
ha->flt_region_aux_img_status_sec, ha->flt_region_aux_img_status_sec,
sizeof(sec_aux_image_status) >> 2); sizeof(sec_aux_image_status) >> 2);
qla27xx_print_image(vha, "Secondary aux image", &sec_aux_image_status); qla27xx_print_image(vha, "Secondary aux image", &sec_aux_image_status);
@@ -7596,7 +7607,7 @@ qla27xx_get_active_image(struct scsi_qla_host *vha,
goto check_sec_image; goto check_sec_image;
} }
if (qla24xx_read_flash_data(vha, (void *)(&pri_image_status), if (qla24xx_read_flash_data(vha, (uint32_t *)&pri_image_status,
ha->flt_region_img_status_pri, sizeof(pri_image_status) >> 2) != ha->flt_region_img_status_pri, sizeof(pri_image_status) >> 2) !=
QLA_SUCCESS) { QLA_SUCCESS) {
WARN_ON_ONCE(true); WARN_ON_ONCE(true);
@@ -7703,7 +7714,7 @@ qla24xx_load_risc_flash(scsi_qla_host_t *vha, uint32_t *srisc_addr,
ql_dbg(ql_dbg_init, vha, 0x008b, ql_dbg(ql_dbg_init, vha, 0x008b,
"FW: Loading firmware from flash (%x).\n", faddr); "FW: Loading firmware from flash (%x).\n", faddr);
dcode = (void *)req->ring; dcode = (uint32_t *)req->ring;
qla24xx_read_flash_data(vha, dcode, faddr, 8); qla24xx_read_flash_data(vha, dcode, faddr, 8);
if (qla24xx_risc_firmware_invalid(dcode)) { if (qla24xx_risc_firmware_invalid(dcode)) {
ql_log(ql_log_fatal, vha, 0x008c, ql_log(ql_log_fatal, vha, 0x008c,
@@ -7716,18 +7727,18 @@ qla24xx_load_risc_flash(scsi_qla_host_t *vha, uint32_t *srisc_addr,
return QLA_FUNCTION_FAILED; return QLA_FUNCTION_FAILED;
} }
dcode = (void *)req->ring; dcode = (uint32_t *)req->ring;
*srisc_addr = 0; *srisc_addr = 0;
segments = FA_RISC_CODE_SEGMENTS; segments = FA_RISC_CODE_SEGMENTS;
for (j = 0; j < segments; j++) { for (j = 0; j < segments; j++) {
ql_dbg(ql_dbg_init, vha, 0x008d, ql_dbg(ql_dbg_init, vha, 0x008d,
"-> Loading segment %u...\n", j); "-> Loading segment %u...\n", j);
qla24xx_read_flash_data(vha, dcode, faddr, 10); qla24xx_read_flash_data(vha, dcode, faddr, 10);
risc_addr = be32_to_cpu(dcode[2]); risc_addr = be32_to_cpu((__force __be32)dcode[2]);
risc_size = be32_to_cpu(dcode[3]); risc_size = be32_to_cpu((__force __be32)dcode[3]);
if (!*srisc_addr) { if (!*srisc_addr) {
*srisc_addr = risc_addr; *srisc_addr = risc_addr;
risc_attr = be32_to_cpu(dcode[9]); risc_attr = be32_to_cpu((__force __be32)dcode[9]);
} }
dlen = ha->fw_transfer_size >> 2; dlen = ha->fw_transfer_size >> 2;
@@ -7767,9 +7778,9 @@ qla24xx_load_risc_flash(scsi_qla_host_t *vha, uint32_t *srisc_addr,
fwdt->template = NULL; fwdt->template = NULL;
fwdt->length = 0; fwdt->length = 0;
dcode = (void *)req->ring; dcode = (uint32_t *)req->ring;
qla24xx_read_flash_data(vha, dcode, faddr, 7); qla24xx_read_flash_data(vha, dcode, faddr, 7);
risc_size = be32_to_cpu(dcode[2]); risc_size = be32_to_cpu((__force __be32)dcode[2]);
ql_dbg(ql_dbg_init, vha, 0x0161, ql_dbg(ql_dbg_init, vha, 0x0161,
"-> fwdt%u template array at %#x (%#x dwords)\n", "-> fwdt%u template array at %#x (%#x dwords)\n",
j, faddr, risc_size); j, faddr, risc_size);
@@ -7838,7 +7849,8 @@ qla2x00_load_risc(scsi_qla_host_t *vha, uint32_t *srisc_addr)
{ {
int rval; int rval;
int i, fragment; int i, fragment;
uint16_t *wcode, *fwcode; uint16_t *wcode;
__be16 *fwcode;
uint32_t risc_addr, risc_size, fwclen, wlen, *seg; uint32_t risc_addr, risc_size, fwclen, wlen, *seg;
struct fw_blob *blob; struct fw_blob *blob;
struct qla_hw_data *ha = vha->hw; struct qla_hw_data *ha = vha->hw;
@@ -7858,7 +7870,7 @@ qla2x00_load_risc(scsi_qla_host_t *vha, uint32_t *srisc_addr)
wcode = (uint16_t *)req->ring; wcode = (uint16_t *)req->ring;
*srisc_addr = 0; *srisc_addr = 0;
fwcode = (uint16_t *)blob->fw->data; fwcode = (__force __be16 *)blob->fw->data;
fwclen = 0; fwclen = 0;
/* Validate firmware image by checking version. */ /* Validate firmware image by checking version. */
@@ -7906,7 +7918,7 @@ qla2x00_load_risc(scsi_qla_host_t *vha, uint32_t *srisc_addr)
"words 0x%x.\n", risc_addr, wlen); "words 0x%x.\n", risc_addr, wlen);
for (i = 0; i < wlen; i++) for (i = 0; i < wlen; i++)
wcode[i] = swab16(fwcode[i]); wcode[i] = swab16((__force u32)fwcode[i]);
rval = qla2x00_load_ram(vha, req->dma, risc_addr, rval = qla2x00_load_ram(vha, req->dma, risc_addr,
wlen); wlen);
@@ -7943,7 +7955,7 @@ qla24xx_load_risc_blob(scsi_qla_host_t *vha, uint32_t *srisc_addr)
ulong i; ulong i;
uint j; uint j;
struct fw_blob *blob; struct fw_blob *blob;
uint32_t *fwcode; __be32 *fwcode;
struct qla_hw_data *ha = vha->hw; struct qla_hw_data *ha = vha->hw;
struct req_que *req = ha->req_q_map[0]; struct req_que *req = ha->req_q_map[0];
struct fwdt *fwdt = ha->fwdt; struct fwdt *fwdt = ha->fwdt;
@@ -7959,8 +7971,8 @@ qla24xx_load_risc_blob(scsi_qla_host_t *vha, uint32_t *srisc_addr)
return QLA_FUNCTION_FAILED; return QLA_FUNCTION_FAILED;
} }
fwcode = (void *)blob->fw->data; fwcode = (__force __be32 *)blob->fw->data;
dcode = fwcode; dcode = (__force uint32_t *)fwcode;
if (qla24xx_risc_firmware_invalid(dcode)) { if (qla24xx_risc_firmware_invalid(dcode)) {
ql_log(ql_log_fatal, vha, 0x0093, ql_log(ql_log_fatal, vha, 0x0093,
"Unable to verify integrity of firmware image (%zd).\n", "Unable to verify integrity of firmware image (%zd).\n",
@@ -7971,7 +7983,7 @@ qla24xx_load_risc_blob(scsi_qla_host_t *vha, uint32_t *srisc_addr)
return QLA_FUNCTION_FAILED; return QLA_FUNCTION_FAILED;
} }
dcode = (void *)req->ring; dcode = (uint32_t *)req->ring;
*srisc_addr = 0; *srisc_addr = 0;
segments = FA_RISC_CODE_SEGMENTS; segments = FA_RISC_CODE_SEGMENTS;
for (j = 0; j < segments; j++) { for (j = 0; j < segments; j++) {
@@ -7997,7 +8009,7 @@ qla24xx_load_risc_blob(scsi_qla_host_t *vha, uint32_t *srisc_addr)
dlen); dlen);
for (i = 0; i < dlen; i++) for (i = 0; i < dlen; i++)
dcode[i] = swab32(fwcode[i]); dcode[i] = swab32((__force u32)fwcode[i]);
rval = qla2x00_load_ram(vha, req->dma, risc_addr, dlen); rval = qla2x00_load_ram(vha, req->dma, risc_addr, dlen);
if (rval) { if (rval) {
@@ -8051,7 +8063,7 @@ qla24xx_load_risc_blob(scsi_qla_host_t *vha, uint32_t *srisc_addr)
dcode = fwdt->template; dcode = fwdt->template;
for (i = 0; i < risc_size; i++) for (i = 0; i < risc_size; i++)
dcode[i] = fwcode[i]; dcode[i] = (__force u32)fwcode[i];
if (!qla27xx_fwdt_template_valid(dcode)) { if (!qla27xx_fwdt_template_valid(dcode)) {
ql_log(ql_log_warn, vha, 0x0175, ql_log(ql_log_warn, vha, 0x0175,
@@ -8322,7 +8334,7 @@ qla81xx_nvram_config(scsi_qla_host_t *vha)
int rval; int rval;
struct init_cb_81xx *icb; struct init_cb_81xx *icb;
struct nvram_81xx *nv; struct nvram_81xx *nv;
uint32_t *dptr; __le32 *dptr;
uint8_t *dptr1, *dptr2; uint8_t *dptr1, *dptr2;
uint32_t chksum; uint32_t chksum;
uint16_t cnt; uint16_t cnt;
@@ -8369,7 +8381,7 @@ qla81xx_nvram_config(scsi_qla_host_t *vha)
"primary" : "secondary"); "primary" : "secondary");
ha->isp_ops->read_optrom(vha, ha->nvram, faddr << 2, ha->nvram_size); ha->isp_ops->read_optrom(vha, ha->nvram, faddr << 2, ha->nvram_size);
dptr = (uint32_t *)nv; dptr = (__force __le32 *)nv;
for (cnt = 0, chksum = 0; cnt < ha->nvram_size >> 2; cnt++, dptr++) for (cnt = 0, chksum = 0; cnt < ha->nvram_size >> 2; cnt++, dptr++)
chksum += le32_to_cpu(*dptr); chksum += le32_to_cpu(*dptr);
@@ -8396,7 +8408,7 @@ qla81xx_nvram_config(scsi_qla_host_t *vha)
memset(nv, 0, ha->nvram_size); memset(nv, 0, ha->nvram_size);
nv->nvram_version = cpu_to_le16(ICB_VERSION); nv->nvram_version = cpu_to_le16(ICB_VERSION);
nv->version = cpu_to_le16(ICB_VERSION); nv->version = cpu_to_le16(ICB_VERSION);
nv->frame_payload_size = 2048; nv->frame_payload_size = cpu_to_le16(2048);
nv->execution_throttle = cpu_to_le16(0xFFFF); nv->execution_throttle = cpu_to_le16(0xFFFF);
nv->exchange_count = cpu_to_le16(0); nv->exchange_count = cpu_to_le16(0);
nv->port_name[0] = 0x21; nv->port_name[0] = 0x21;
@@ -8440,7 +8452,7 @@ qla81xx_nvram_config(scsi_qla_host_t *vha)
} }
if (IS_T10_PI_CAPABLE(ha)) if (IS_T10_PI_CAPABLE(ha))
nv->frame_payload_size &= ~7; nv->frame_payload_size &= cpu_to_le16(~7);
qlt_81xx_config_nvram_stage1(vha, nv); qlt_81xx_config_nvram_stage1(vha, nv);
@@ -8603,10 +8615,10 @@ qla81xx_nvram_config(scsi_qla_host_t *vha)
} }
/* enable RIDA Format2 */ /* enable RIDA Format2 */
icb->firmware_options_3 |= BIT_0; icb->firmware_options_3 |= cpu_to_le32(BIT_0);
/* N2N: driver will initiate Login instead of FW */ /* N2N: driver will initiate Login instead of FW */
icb->firmware_options_3 |= BIT_8; icb->firmware_options_3 |= cpu_to_le32(BIT_8);
/* Determine NVMe/FCP priority for target ports */ /* Determine NVMe/FCP priority for target ports */
ha->fc4_type_priority = qla2xxx_get_fc4_priority(vha); ha->fc4_type_priority = qla2xxx_get_fc4_priority(vha);

View File

@@ -40,16 +40,16 @@ qla24xx_calc_iocbs(scsi_qla_host_t *vha, uint16_t dsds)
* register value. * register value.
*/ */
static __inline__ uint16_t static __inline__ uint16_t
qla2x00_debounce_register(volatile uint16_t __iomem *addr) qla2x00_debounce_register(volatile __le16 __iomem *addr)
{ {
volatile uint16_t first; volatile uint16_t first;
volatile uint16_t second; volatile uint16_t second;
do { do {
first = RD_REG_WORD(addr); first = rd_reg_word(addr);
barrier(); barrier();
cpu_relax(); cpu_relax();
second = RD_REG_WORD(addr); second = rd_reg_word(addr);
} while (first != second); } while (first != second);
return (first); return (first);
@@ -329,7 +329,7 @@ qla_83xx_start_iocbs(struct qla_qpair *qpair)
} else } else
req->ring_ptr++; req->ring_ptr++;
WRT_REG_DWORD(req->req_q_in, req->ring_index); wrt_reg_dword(req->req_q_in, req->ring_index);
} }
static inline int static inline int

View File

@@ -376,7 +376,7 @@ qla2x00_start_scsi(srb_t *sp)
/* Calculate the number of request entries needed. */ /* Calculate the number of request entries needed. */
req_cnt = ha->isp_ops->calc_req_entries(tot_dsds); req_cnt = ha->isp_ops->calc_req_entries(tot_dsds);
if (req->cnt < (req_cnt + 2)) { if (req->cnt < (req_cnt + 2)) {
cnt = RD_REG_WORD_RELAXED(ISP_REQ_Q_OUT(ha, reg)); cnt = rd_reg_word_relaxed(ISP_REQ_Q_OUT(ha, reg));
if (req->ring_index < cnt) if (req->ring_index < cnt)
req->cnt = cnt - req->ring_index; req->cnt = cnt - req->ring_index;
else else
@@ -428,8 +428,8 @@ qla2x00_start_scsi(srb_t *sp)
sp->flags |= SRB_DMA_VALID; sp->flags |= SRB_DMA_VALID;
/* Set chip new ring index. */ /* Set chip new ring index. */
WRT_REG_WORD(ISP_REQ_Q_IN(ha, reg), req->ring_index); wrt_reg_word(ISP_REQ_Q_IN(ha, reg), req->ring_index);
RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha, reg)); /* PCI Posting. */ rd_reg_word_relaxed(ISP_REQ_Q_IN(ha, reg)); /* PCI Posting. */
/* Manage unprocessed RIO/ZIO commands in response queue. */ /* Manage unprocessed RIO/ZIO commands in response queue. */
if (vha->flags.process_response_queue && if (vha->flags.process_response_queue &&
@@ -472,21 +472,21 @@ qla2x00_start_iocbs(struct scsi_qla_host *vha, struct req_que *req)
/* Set chip new ring index. */ /* Set chip new ring index. */
if (ha->mqenable || IS_QLA27XX(ha) || IS_QLA28XX(ha)) { if (ha->mqenable || IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
WRT_REG_DWORD(req->req_q_in, req->ring_index); wrt_reg_dword(req->req_q_in, req->ring_index);
} else if (IS_QLA83XX(ha)) { } else if (IS_QLA83XX(ha)) {
WRT_REG_DWORD(req->req_q_in, req->ring_index); wrt_reg_dword(req->req_q_in, req->ring_index);
RD_REG_DWORD_RELAXED(&ha->iobase->isp24.hccr); rd_reg_dword_relaxed(&ha->iobase->isp24.hccr);
} else if (IS_QLAFX00(ha)) { } else if (IS_QLAFX00(ha)) {
WRT_REG_DWORD(&reg->ispfx00.req_q_in, req->ring_index); wrt_reg_dword(&reg->ispfx00.req_q_in, req->ring_index);
RD_REG_DWORD_RELAXED(&reg->ispfx00.req_q_in); rd_reg_dword_relaxed(&reg->ispfx00.req_q_in);
QLAFX00_SET_HST_INTR(ha, ha->rqstq_intr_code); QLAFX00_SET_HST_INTR(ha, ha->rqstq_intr_code);
} else if (IS_FWI2_CAPABLE(ha)) { } else if (IS_FWI2_CAPABLE(ha)) {
WRT_REG_DWORD(&reg->isp24.req_q_in, req->ring_index); wrt_reg_dword(&reg->isp24.req_q_in, req->ring_index);
RD_REG_DWORD_RELAXED(&reg->isp24.req_q_in); rd_reg_dword_relaxed(&reg->isp24.req_q_in);
} else { } else {
WRT_REG_WORD(ISP_REQ_Q_IN(ha, &reg->isp), wrt_reg_word(ISP_REQ_Q_IN(ha, &reg->isp),
req->ring_index); req->ring_index);
RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha, &reg->isp)); rd_reg_word_relaxed(ISP_REQ_Q_IN(ha, &reg->isp));
} }
} }
} }
@@ -661,7 +661,7 @@ qla24xx_build_scsi_type_6_iocbs(srb_t *sp, struct cmd_type_6 *cmd_pkt,
cur_dsd->address = 0; cur_dsd->address = 0;
cur_dsd->length = 0; cur_dsd->length = 0;
cur_dsd++; cur_dsd++;
cmd_pkt->control_flags |= CF_DATA_SEG_DESCR_ENABLE; cmd_pkt->control_flags |= cpu_to_le16(CF_DATA_SEG_DESCR_ENABLE);
return 0; return 0;
} }
@@ -755,8 +755,8 @@ qla24xx_build_scsi_iocbs(srb_t *sp, struct cmd_type_7 *cmd_pkt,
} }
struct fw_dif_context { struct fw_dif_context {
uint32_t ref_tag; __le32 ref_tag;
uint16_t app_tag; __le16 app_tag;
uint8_t ref_tag_mask[4]; /* Validation/Replacement Mask*/ uint8_t ref_tag_mask[4]; /* Validation/Replacement Mask*/
uint8_t app_tag_mask[2]; /* Validation/Replacement Mask*/ uint8_t app_tag_mask[2]; /* Validation/Replacement Mask*/
}; };
@@ -1389,7 +1389,7 @@ qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt,
uint16_t tot_dsds, uint16_t tot_prot_dsds, uint16_t fw_prot_opts) uint16_t tot_dsds, uint16_t tot_prot_dsds, uint16_t fw_prot_opts)
{ {
struct dsd64 *cur_dsd; struct dsd64 *cur_dsd;
uint32_t *fcp_dl; __be32 *fcp_dl;
scsi_qla_host_t *vha; scsi_qla_host_t *vha;
struct scsi_cmnd *cmd; struct scsi_cmnd *cmd;
uint32_t total_bytes = 0; uint32_t total_bytes = 0;
@@ -1456,7 +1456,7 @@ qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt,
&crc_ctx_pkt->ref_tag, tot_prot_dsds); &crc_ctx_pkt->ref_tag, tot_prot_dsds);
put_unaligned_le64(crc_ctx_dma, &cmd_pkt->crc_context_address); put_unaligned_le64(crc_ctx_dma, &cmd_pkt->crc_context_address);
cmd_pkt->crc_context_len = CRC_CONTEXT_LEN_FW; cmd_pkt->crc_context_len = cpu_to_le16(CRC_CONTEXT_LEN_FW);
/* Determine SCSI command length -- align to 4 byte boundary */ /* Determine SCSI command length -- align to 4 byte boundary */
if (cmd->cmd_len > 16) { if (cmd->cmd_len > 16) {
@@ -1545,7 +1545,7 @@ qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt,
crc_ctx_pkt->guard_seed = cpu_to_le16(0); crc_ctx_pkt->guard_seed = cpu_to_le16(0);
/* Fibre channel byte count */ /* Fibre channel byte count */
cmd_pkt->byte_count = cpu_to_le32(total_bytes); cmd_pkt->byte_count = cpu_to_le32(total_bytes);
fcp_dl = (uint32_t *)(crc_ctx_pkt->fcp_cmnd.cdb + 16 + fcp_dl = (__be32 *)(crc_ctx_pkt->fcp_cmnd.cdb + 16 +
additional_fcpcdb_len); additional_fcpcdb_len);
*fcp_dl = htonl(total_bytes); *fcp_dl = htonl(total_bytes);
@@ -1637,7 +1637,7 @@ qla24xx_start_scsi(srb_t *sp)
req_cnt = qla24xx_calc_iocbs(vha, tot_dsds); req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
if (req->cnt < (req_cnt + 2)) { if (req->cnt < (req_cnt + 2)) {
cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr : cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr :
RD_REG_DWORD_RELAXED(req->req_q_out); rd_reg_dword_relaxed(req->req_q_out);
if (req->ring_index < cnt) if (req->ring_index < cnt)
req->cnt = cnt - req->ring_index; req->cnt = cnt - req->ring_index;
else else
@@ -1698,7 +1698,7 @@ qla24xx_start_scsi(srb_t *sp)
sp->flags |= SRB_DMA_VALID; sp->flags |= SRB_DMA_VALID;
/* Set chip new ring index. */ /* Set chip new ring index. */
WRT_REG_DWORD(req->req_q_in, req->ring_index); wrt_reg_dword(req->req_q_in, req->ring_index);
spin_unlock_irqrestore(&ha->hardware_lock, flags); spin_unlock_irqrestore(&ha->hardware_lock, flags);
return QLA_SUCCESS; return QLA_SUCCESS;
@@ -1822,7 +1822,7 @@ qla24xx_dif_start_scsi(srb_t *sp)
tot_dsds += nseg; tot_dsds += nseg;
if (req->cnt < (req_cnt + 2)) { if (req->cnt < (req_cnt + 2)) {
cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr : cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr :
RD_REG_DWORD_RELAXED(req->req_q_out); rd_reg_dword_relaxed(req->req_q_out);
if (req->ring_index < cnt) if (req->ring_index < cnt)
req->cnt = cnt - req->ring_index; req->cnt = cnt - req->ring_index;
else else
@@ -1881,7 +1881,7 @@ qla24xx_dif_start_scsi(srb_t *sp)
req->ring_ptr++; req->ring_ptr++;
/* Set chip new ring index. */ /* Set chip new ring index. */
WRT_REG_DWORD(req->req_q_in, req->ring_index); wrt_reg_dword(req->req_q_in, req->ring_index);
spin_unlock_irqrestore(&ha->hardware_lock, flags); spin_unlock_irqrestore(&ha->hardware_lock, flags);
@@ -1957,7 +1957,7 @@ qla2xxx_start_scsi_mq(srb_t *sp)
req_cnt = qla24xx_calc_iocbs(vha, tot_dsds); req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
if (req->cnt < (req_cnt + 2)) { if (req->cnt < (req_cnt + 2)) {
cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr : cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr :
RD_REG_DWORD_RELAXED(req->req_q_out); rd_reg_dword_relaxed(req->req_q_out);
if (req->ring_index < cnt) if (req->ring_index < cnt)
req->cnt = cnt - req->ring_index; req->cnt = cnt - req->ring_index;
else else
@@ -2018,7 +2018,7 @@ qla2xxx_start_scsi_mq(srb_t *sp)
sp->flags |= SRB_DMA_VALID; sp->flags |= SRB_DMA_VALID;
/* Set chip new ring index. */ /* Set chip new ring index. */
WRT_REG_DWORD(req->req_q_in, req->ring_index); wrt_reg_dword(req->req_q_in, req->ring_index);
spin_unlock_irqrestore(&qpair->qp_lock, flags); spin_unlock_irqrestore(&qpair->qp_lock, flags);
return QLA_SUCCESS; return QLA_SUCCESS;
@@ -2157,7 +2157,7 @@ qla2xxx_dif_start_scsi_mq(srb_t *sp)
tot_dsds += nseg; tot_dsds += nseg;
if (req->cnt < (req_cnt + 2)) { if (req->cnt < (req_cnt + 2)) {
cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr : cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr :
RD_REG_DWORD_RELAXED(req->req_q_out); rd_reg_dword_relaxed(req->req_q_out);
if (req->ring_index < cnt) if (req->ring_index < cnt)
req->cnt = cnt - req->ring_index; req->cnt = cnt - req->ring_index;
else else
@@ -2214,7 +2214,7 @@ qla2xxx_dif_start_scsi_mq(srb_t *sp)
req->ring_ptr++; req->ring_ptr++;
/* Set chip new ring index. */ /* Set chip new ring index. */
WRT_REG_DWORD(req->req_q_in, req->ring_index); wrt_reg_dword(req->req_q_in, req->ring_index);
/* Manage unprocessed RIO/ZIO commands in response queue. */ /* Manage unprocessed RIO/ZIO commands in response queue. */
if (vha->flags.process_response_queue && if (vha->flags.process_response_queue &&
@@ -2266,13 +2266,13 @@ __qla2x00_alloc_iocbs(struct qla_qpair *qpair, srb_t *sp)
cnt = *req->out_ptr; cnt = *req->out_ptr;
else if (ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha) || else if (ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha) ||
IS_QLA28XX(ha)) IS_QLA28XX(ha))
cnt = RD_REG_DWORD(&reg->isp25mq.req_q_out); cnt = rd_reg_dword(&reg->isp25mq.req_q_out);
else if (IS_P3P_TYPE(ha)) else if (IS_P3P_TYPE(ha))
cnt = RD_REG_DWORD(&reg->isp82.req_q_out); cnt = rd_reg_dword(reg->isp82.req_q_out);
else if (IS_FWI2_CAPABLE(ha)) else if (IS_FWI2_CAPABLE(ha))
cnt = RD_REG_DWORD(&reg->isp24.req_q_out); cnt = rd_reg_dword(&reg->isp24.req_q_out);
else if (IS_QLAFX00(ha)) else if (IS_QLAFX00(ha))
cnt = RD_REG_DWORD(&reg->ispfx00.req_q_out); cnt = rd_reg_dword(&reg->ispfx00.req_q_out);
else else
cnt = qla2x00_debounce_register( cnt = qla2x00_debounce_register(
ISP_REQ_Q_OUT(ha, &reg->isp)); ISP_REQ_Q_OUT(ha, &reg->isp));
@@ -2305,8 +2305,8 @@ __qla2x00_alloc_iocbs(struct qla_qpair *qpair, srb_t *sp)
pkt = req->ring_ptr; pkt = req->ring_ptr;
memset(pkt, 0, REQUEST_ENTRY_SIZE); memset(pkt, 0, REQUEST_ENTRY_SIZE);
if (IS_QLAFX00(ha)) { if (IS_QLAFX00(ha)) {
WRT_REG_BYTE((void __iomem *)&pkt->entry_count, req_cnt); wrt_reg_byte((void __iomem *)&pkt->entry_count, req_cnt);
WRT_REG_WORD((void __iomem *)&pkt->handle, handle); wrt_reg_word((void __iomem *)&pkt->handle, handle);
} else { } else {
pkt->entry_count = req_cnt; pkt->entry_count = req_cnt;
pkt->handle = handle; pkt->handle = handle;
@@ -2344,9 +2344,10 @@ qla24xx_prli_iocb(srb_t *sp, struct logio_entry_24xx *logio)
logio->entry_type = LOGINOUT_PORT_IOCB_TYPE; logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
logio->control_flags = cpu_to_le16(LCF_COMMAND_PRLI); logio->control_flags = cpu_to_le16(LCF_COMMAND_PRLI);
if (lio->u.logio.flags & SRB_LOGIN_NVME_PRLI) { if (lio->u.logio.flags & SRB_LOGIN_NVME_PRLI) {
logio->control_flags |= LCF_NVME_PRLI; logio->control_flags |= cpu_to_le16(LCF_NVME_PRLI);
if (sp->vha->flags.nvme_first_burst) if (sp->vha->flags.nvme_first_burst)
logio->io_parameter[0] = NVME_PRLI_SP_FIRST_BURST; logio->io_parameter[0] =
cpu_to_le32(NVME_PRLI_SP_FIRST_BURST);
} }
logio->nport_handle = cpu_to_le16(sp->fcport->loop_id); logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
@@ -2680,7 +2681,7 @@ qla24xx_els_logo_iocb(srb_t *sp, struct els_entry_24xx *els_iocb)
els_iocb->entry_status = 0; els_iocb->entry_status = 0;
els_iocb->handle = sp->handle; els_iocb->handle = sp->handle;
els_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id); els_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
els_iocb->tx_dsd_count = 1; els_iocb->tx_dsd_count = cpu_to_le16(1);
els_iocb->vp_index = vha->vp_idx; els_iocb->vp_index = vha->vp_idx;
els_iocb->sof_type = EST_SOFI3; els_iocb->sof_type = EST_SOFI3;
els_iocb->rx_dsd_count = 0; els_iocb->rx_dsd_count = 0;
@@ -2700,7 +2701,7 @@ qla24xx_els_logo_iocb(srb_t *sp, struct els_entry_24xx *els_iocb)
cpu_to_le32(sizeof(struct els_plogi_payload)); cpu_to_le32(sizeof(struct els_plogi_payload));
put_unaligned_le64(elsio->u.els_plogi.els_plogi_pyld_dma, put_unaligned_le64(elsio->u.els_plogi.els_plogi_pyld_dma,
&els_iocb->tx_address); &els_iocb->tx_address);
els_iocb->rx_dsd_count = 1; els_iocb->rx_dsd_count = cpu_to_le16(1);
els_iocb->rx_byte_count = els_iocb->rx_len = els_iocb->rx_byte_count = els_iocb->rx_len =
cpu_to_le32(sizeof(struct els_plogi_payload)); cpu_to_le32(sizeof(struct els_plogi_payload));
put_unaligned_le64(elsio->u.els_plogi.els_resp_pyld_dma, put_unaligned_le64(elsio->u.els_plogi.els_resp_pyld_dma,
@@ -2712,7 +2713,7 @@ qla24xx_els_logo_iocb(srb_t *sp, struct els_entry_24xx *els_iocb)
(uint8_t *)els_iocb, (uint8_t *)els_iocb,
sizeof(*els_iocb)); sizeof(*els_iocb));
} else { } else {
els_iocb->control_flags = 1 << 13; els_iocb->control_flags = cpu_to_le16(1 << 13);
els_iocb->tx_byte_count = els_iocb->tx_byte_count =
cpu_to_le32(sizeof(struct els_logo_payload)); cpu_to_le32(sizeof(struct els_logo_payload));
put_unaligned_le64(elsio->u.els_logo.els_logo_pyld_dma, put_unaligned_le64(elsio->u.els_logo.els_logo_pyld_dma,
@@ -2787,7 +2788,7 @@ static void qla2x00_els_dcmd2_sp_done(srb_t *sp, int res)
struct qla_work_evt *e; struct qla_work_evt *e;
struct fc_port *conflict_fcport; struct fc_port *conflict_fcport;
port_id_t cid; /* conflict Nport id */ port_id_t cid; /* conflict Nport id */
u32 *fw_status = sp->u.iocb_cmd.u.els_plogi.fw_status; const __le32 *fw_status = sp->u.iocb_cmd.u.els_plogi.fw_status;
u16 lid; u16 lid;
ql_dbg(ql_dbg_disc, vha, 0x3072, ql_dbg(ql_dbg_disc, vha, 0x3072,
@@ -2800,7 +2801,7 @@ static void qla2x00_els_dcmd2_sp_done(srb_t *sp, int res)
if (sp->flags & SRB_WAKEUP_ON_COMP) if (sp->flags & SRB_WAKEUP_ON_COMP)
complete(&lio->u.els_plogi.comp); complete(&lio->u.els_plogi.comp);
else { else {
switch (fw_status[0]) { switch (le32_to_cpu(fw_status[0])) {
case CS_DATA_UNDERRUN: case CS_DATA_UNDERRUN:
case CS_COMPLETE: case CS_COMPLETE:
memset(&ea, 0, sizeof(ea)); memset(&ea, 0, sizeof(ea));
@@ -2810,9 +2811,9 @@ static void qla2x00_els_dcmd2_sp_done(srb_t *sp, int res)
break; break;
case CS_IOCB_ERROR: case CS_IOCB_ERROR:
switch (fw_status[1]) { switch (le32_to_cpu(fw_status[1])) {
case LSC_SCODE_PORTID_USED: case LSC_SCODE_PORTID_USED:
lid = fw_status[2] & 0xffff; lid = le32_to_cpu(fw_status[2]) & 0xffff;
qlt_find_sess_invalidate_other(vha, qlt_find_sess_invalidate_other(vha,
wwn_to_u64(fcport->port_name), wwn_to_u64(fcport->port_name),
fcport->d_id, lid, &conflict_fcport); fcport->d_id, lid, &conflict_fcport);
@@ -2846,9 +2847,11 @@ static void qla2x00_els_dcmd2_sp_done(srb_t *sp, int res)
break; break;
case LSC_SCODE_NPORT_USED: case LSC_SCODE_NPORT_USED:
cid.b.domain = (fw_status[2] >> 16) & 0xff; cid.b.domain = (le32_to_cpu(fw_status[2]) >> 16)
cid.b.area = (fw_status[2] >> 8) & 0xff; & 0xff;
cid.b.al_pa = fw_status[2] & 0xff; cid.b.area = (le32_to_cpu(fw_status[2]) >> 8)
& 0xff;
cid.b.al_pa = le32_to_cpu(fw_status[2]) & 0xff;
cid.b.rsvd_1 = 0; cid.b.rsvd_1 = 0;
ql_dbg(ql_dbg_disc, vha, 0x20ec, ql_dbg(ql_dbg_disc, vha, 0x20ec,
@@ -3022,7 +3025,7 @@ qla24xx_els_iocb(srb_t *sp, struct els_entry_24xx *els_iocb)
els_iocb->sys_define = 0; els_iocb->sys_define = 0;
els_iocb->entry_status = 0; els_iocb->entry_status = 0;
els_iocb->handle = sp->handle; els_iocb->handle = sp->handle;
els_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id); els_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
els_iocb->tx_dsd_count = cpu_to_le16(bsg_job->request_payload.sg_cnt); els_iocb->tx_dsd_count = cpu_to_le16(bsg_job->request_payload.sg_cnt);
els_iocb->vp_index = sp->vha->vp_idx; els_iocb->vp_index = sp->vha->vp_idx;
els_iocb->sof_type = EST_SOFI3; els_iocb->sof_type = EST_SOFI3;
@@ -3216,7 +3219,7 @@ qla82xx_start_scsi(srb_t *sp)
uint16_t tot_dsds; uint16_t tot_dsds;
struct device_reg_82xx __iomem *reg; struct device_reg_82xx __iomem *reg;
uint32_t dbval; uint32_t dbval;
uint32_t *fcp_dl; __be32 *fcp_dl;
uint8_t additional_cdb_len; uint8_t additional_cdb_len;
struct ct6_dsd *ctx; struct ct6_dsd *ctx;
struct scsi_qla_host *vha = sp->vha; struct scsi_qla_host *vha = sp->vha;
@@ -3310,7 +3313,7 @@ sufficient_dsds:
req_cnt = 1; req_cnt = 1;
if (req->cnt < (req_cnt + 2)) { if (req->cnt < (req_cnt + 2)) {
cnt = (uint16_t)RD_REG_DWORD_RELAXED( cnt = (uint16_t)rd_reg_dword_relaxed(
&reg->req_q_out[0]); &reg->req_q_out[0]);
if (req->ring_index < cnt) if (req->ring_index < cnt)
req->cnt = cnt - req->ring_index; req->cnt = cnt - req->ring_index;
@@ -3398,7 +3401,7 @@ sufficient_dsds:
memcpy(ctx->fcp_cmnd->cdb, cmd->cmnd, cmd->cmd_len); memcpy(ctx->fcp_cmnd->cdb, cmd->cmnd, cmd->cmd_len);
fcp_dl = (uint32_t *)(ctx->fcp_cmnd->cdb + 16 + fcp_dl = (__be32 *)(ctx->fcp_cmnd->cdb + 16 +
additional_cdb_len); additional_cdb_len);
*fcp_dl = htonl((uint32_t)scsi_bufflen(cmd)); *fcp_dl = htonl((uint32_t)scsi_bufflen(cmd));
@@ -3419,7 +3422,7 @@ sufficient_dsds:
req_cnt = qla24xx_calc_iocbs(vha, tot_dsds); req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
if (req->cnt < (req_cnt + 2)) { if (req->cnt < (req_cnt + 2)) {
cnt = (uint16_t)RD_REG_DWORD_RELAXED( cnt = (uint16_t)rd_reg_dword_relaxed(
&reg->req_q_out[0]); &reg->req_q_out[0]);
if (req->ring_index < cnt) if (req->ring_index < cnt)
req->cnt = cnt - req->ring_index; req->cnt = cnt - req->ring_index;
@@ -3495,10 +3498,10 @@ sufficient_dsds:
if (ql2xdbwr) if (ql2xdbwr)
qla82xx_wr_32(ha, (uintptr_t __force)ha->nxdb_wr_ptr, dbval); qla82xx_wr_32(ha, (uintptr_t __force)ha->nxdb_wr_ptr, dbval);
else { else {
WRT_REG_DWORD(ha->nxdb_wr_ptr, dbval); wrt_reg_dword(ha->nxdb_wr_ptr, dbval);
wmb(); wmb();
while (RD_REG_DWORD(ha->nxdb_rd_ptr) != dbval) { while (rd_reg_dword(ha->nxdb_rd_ptr) != dbval) {
WRT_REG_DWORD(ha->nxdb_wr_ptr, dbval); wrt_reg_dword(ha->nxdb_wr_ptr, dbval);
wmb(); wmb();
} }
} }
@@ -3536,7 +3539,7 @@ qla24xx_abort_iocb(srb_t *sp, struct abort_entry_24xx *abt_iocb)
memset(abt_iocb, 0, sizeof(struct abort_entry_24xx)); memset(abt_iocb, 0, sizeof(struct abort_entry_24xx));
abt_iocb->entry_type = ABORT_IOCB_TYPE; abt_iocb->entry_type = ABORT_IOCB_TYPE;
abt_iocb->entry_count = 1; abt_iocb->entry_count = 1;
abt_iocb->handle = cpu_to_le32(make_handle(req->id, sp->handle)); abt_iocb->handle = make_handle(req->id, sp->handle);
if (sp->fcport) { if (sp->fcport) {
abt_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id); abt_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
abt_iocb->port_id[0] = sp->fcport->d_id.b.al_pa; abt_iocb->port_id[0] = sp->fcport->d_id.b.al_pa;
@@ -3544,10 +3547,10 @@ qla24xx_abort_iocb(srb_t *sp, struct abort_entry_24xx *abt_iocb)
abt_iocb->port_id[2] = sp->fcport->d_id.b.domain; abt_iocb->port_id[2] = sp->fcport->d_id.b.domain;
} }
abt_iocb->handle_to_abort = abt_iocb->handle_to_abort =
cpu_to_le32(make_handle(aio->u.abt.req_que_no, make_handle(le16_to_cpu(aio->u.abt.req_que_no),
aio->u.abt.cmd_hndl)); aio->u.abt.cmd_hndl);
abt_iocb->vp_index = vha->vp_idx; abt_iocb->vp_index = vha->vp_idx;
abt_iocb->req_que_no = cpu_to_le16(aio->u.abt.req_que_no); abt_iocb->req_que_no = aio->u.abt.req_que_no;
/* Send the command to the firmware */ /* Send the command to the firmware */
wmb(); wmb();
} }
@@ -3562,7 +3565,7 @@ qla2x00_mb_iocb(srb_t *sp, struct mbx_24xx_entry *mbx)
sz = min(ARRAY_SIZE(mbx->mb), ARRAY_SIZE(sp->u.iocb_cmd.u.mbx.out_mb)); sz = min(ARRAY_SIZE(mbx->mb), ARRAY_SIZE(sp->u.iocb_cmd.u.mbx.out_mb));
for (i = 0; i < sz; i++) for (i = 0; i < sz; i++)
mbx->mb[i] = cpu_to_le16(sp->u.iocb_cmd.u.mbx.out_mb[i]); mbx->mb[i] = sp->u.iocb_cmd.u.mbx.out_mb[i];
} }
static void static void
@@ -3586,7 +3589,7 @@ static void qla2x00_send_notify_ack_iocb(srb_t *sp,
nack->u.isp24.nport_handle = ntfy->u.isp24.nport_handle; nack->u.isp24.nport_handle = ntfy->u.isp24.nport_handle;
if (le16_to_cpu(ntfy->u.isp24.status) == IMM_NTFY_ELS) { if (le16_to_cpu(ntfy->u.isp24.status) == IMM_NTFY_ELS) {
nack->u.isp24.flags = ntfy->u.isp24.flags & nack->u.isp24.flags = ntfy->u.isp24.flags &
cpu_to_le32(NOTIFY24XX_FLAGS_PUREX_IOCB); cpu_to_le16(NOTIFY24XX_FLAGS_PUREX_IOCB);
} }
nack->u.isp24.srr_rx_id = ntfy->u.isp24.srr_rx_id; nack->u.isp24.srr_rx_id = ntfy->u.isp24.srr_rx_id;
nack->u.isp24.status = ntfy->u.isp24.status; nack->u.isp24.status = ntfy->u.isp24.status;
@@ -3604,32 +3607,29 @@ static void qla2x00_send_notify_ack_iocb(srb_t *sp,
/* /*
* Build NVME LS request * Build NVME LS request
*/ */
static int static void
qla_nvme_ls(srb_t *sp, struct pt_ls4_request *cmd_pkt) qla_nvme_ls(srb_t *sp, struct pt_ls4_request *cmd_pkt)
{ {
struct srb_iocb *nvme; struct srb_iocb *nvme;
int rval = QLA_SUCCESS;
nvme = &sp->u.iocb_cmd; nvme = &sp->u.iocb_cmd;
cmd_pkt->entry_type = PT_LS4_REQUEST; cmd_pkt->entry_type = PT_LS4_REQUEST;
cmd_pkt->entry_count = 1; cmd_pkt->entry_count = 1;
cmd_pkt->control_flags = CF_LS4_ORIGINATOR << CF_LS4_SHIFT; cmd_pkt->control_flags = cpu_to_le16(CF_LS4_ORIGINATOR << CF_LS4_SHIFT);
cmd_pkt->timeout = cpu_to_le16(nvme->u.nvme.timeout_sec); cmd_pkt->timeout = cpu_to_le16(nvme->u.nvme.timeout_sec);
cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id); cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
cmd_pkt->vp_index = sp->fcport->vha->vp_idx; cmd_pkt->vp_index = sp->fcport->vha->vp_idx;
cmd_pkt->tx_dseg_count = 1; cmd_pkt->tx_dseg_count = cpu_to_le16(1);
cmd_pkt->tx_byte_count = nvme->u.nvme.cmd_len; cmd_pkt->tx_byte_count = cpu_to_le32(nvme->u.nvme.cmd_len);
cmd_pkt->dsd[0].length = nvme->u.nvme.cmd_len; cmd_pkt->dsd[0].length = cpu_to_le32(nvme->u.nvme.cmd_len);
put_unaligned_le64(nvme->u.nvme.cmd_dma, &cmd_pkt->dsd[0].address); put_unaligned_le64(nvme->u.nvme.cmd_dma, &cmd_pkt->dsd[0].address);
cmd_pkt->rx_dseg_count = 1; cmd_pkt->rx_dseg_count = cpu_to_le16(1);
cmd_pkt->rx_byte_count = nvme->u.nvme.rsp_len; cmd_pkt->rx_byte_count = cpu_to_le32(nvme->u.nvme.rsp_len);
cmd_pkt->dsd[1].length = nvme->u.nvme.rsp_len; cmd_pkt->dsd[1].length = cpu_to_le32(nvme->u.nvme.rsp_len);
put_unaligned_le64(nvme->u.nvme.rsp_dma, &cmd_pkt->dsd[1].address); put_unaligned_le64(nvme->u.nvme.rsp_dma, &cmd_pkt->dsd[1].address);
return rval;
} }
static void static void
@@ -3894,7 +3894,7 @@ qla2x00_start_bidir(srb_t *sp, struct scsi_qla_host *vha, uint32_t tot_dsds)
/* Check for room on request queue. */ /* Check for room on request queue. */
if (req->cnt < req_cnt + 2) { if (req->cnt < req_cnt + 2) {
cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr : cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr :
RD_REG_DWORD_RELAXED(req->req_q_out); rd_reg_dword_relaxed(req->req_q_out);
if (req->ring_index < cnt) if (req->ring_index < cnt)
req->cnt = cnt - req->ring_index; req->cnt = cnt - req->ring_index;
else else

View File

@@ -89,9 +89,9 @@ qla24xx_process_abts(struct scsi_qla_host *vha, void *pkt)
/* terminate exchange */ /* terminate exchange */
rsp_els->entry_type = ELS_IOCB_TYPE; rsp_els->entry_type = ELS_IOCB_TYPE;
rsp_els->entry_count = 1; rsp_els->entry_count = 1;
rsp_els->nport_handle = ~0; rsp_els->nport_handle = cpu_to_le16(~0);
rsp_els->rx_xchg_address = abts->rx_xch_addr_to_abort; rsp_els->rx_xchg_address = abts->rx_xch_addr_to_abort;
rsp_els->control_flags = EPD_RX_XCHG; rsp_els->control_flags = cpu_to_le16(EPD_RX_XCHG);
ql_dbg(ql_dbg_init, vha, 0x0283, ql_dbg(ql_dbg_init, vha, 0x0283,
"Sending ELS Response to terminate exchange %#x...\n", "Sending ELS Response to terminate exchange %#x...\n",
abts->rx_xch_addr_to_abort); abts->rx_xch_addr_to_abort);
@@ -141,7 +141,7 @@ qla24xx_process_abts(struct scsi_qla_host *vha, void *pkt)
abts_rsp->ox_id = abts->ox_id; abts_rsp->ox_id = abts->ox_id;
abts_rsp->payload.ba_acc.aborted_rx_id = abts->rx_id; abts_rsp->payload.ba_acc.aborted_rx_id = abts->rx_id;
abts_rsp->payload.ba_acc.aborted_ox_id = abts->ox_id; abts_rsp->payload.ba_acc.aborted_ox_id = abts->ox_id;
abts_rsp->payload.ba_acc.high_seq_cnt = ~0; abts_rsp->payload.ba_acc.high_seq_cnt = cpu_to_le16(~0);
abts_rsp->rx_xch_addr_to_abort = abts->rx_xch_addr_to_abort; abts_rsp->rx_xch_addr_to_abort = abts->rx_xch_addr_to_abort;
ql_dbg(ql_dbg_init, vha, 0x028b, ql_dbg(ql_dbg_init, vha, 0x028b,
"Sending BA ACC response to ABTS %#x...\n", "Sending BA ACC response to ABTS %#x...\n",
@@ -204,7 +204,7 @@ qla2100_intr_handler(int irq, void *dev_id)
spin_lock_irqsave(&ha->hardware_lock, flags); spin_lock_irqsave(&ha->hardware_lock, flags);
vha = pci_get_drvdata(ha->pdev); vha = pci_get_drvdata(ha->pdev);
for (iter = 50; iter--; ) { for (iter = 50; iter--; ) {
hccr = RD_REG_WORD(&reg->hccr); hccr = rd_reg_word(&reg->hccr);
if (qla2x00_check_reg16_for_disconnect(vha, hccr)) if (qla2x00_check_reg16_for_disconnect(vha, hccr))
break; break;
if (hccr & HCCR_RISC_PAUSE) { if (hccr & HCCR_RISC_PAUSE) {
@@ -216,18 +216,18 @@ qla2100_intr_handler(int irq, void *dev_id)
* bit to be cleared. Schedule a big hammer to get * bit to be cleared. Schedule a big hammer to get
* out of the RISC PAUSED state. * out of the RISC PAUSED state.
*/ */
WRT_REG_WORD(&reg->hccr, HCCR_RESET_RISC); wrt_reg_word(&reg->hccr, HCCR_RESET_RISC);
RD_REG_WORD(&reg->hccr); rd_reg_word(&reg->hccr);
ha->isp_ops->fw_dump(vha, 1); ha->isp_ops->fw_dump(vha);
set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
break; break;
} else if ((RD_REG_WORD(&reg->istatus) & ISR_RISC_INT) == 0) } else if ((rd_reg_word(&reg->istatus) & ISR_RISC_INT) == 0)
break; break;
if (RD_REG_WORD(&reg->semaphore) & BIT_0) { if (rd_reg_word(&reg->semaphore) & BIT_0) {
WRT_REG_WORD(&reg->hccr, HCCR_CLR_RISC_INT); wrt_reg_word(&reg->hccr, HCCR_CLR_RISC_INT);
RD_REG_WORD(&reg->hccr); rd_reg_word(&reg->hccr);
/* Get mailbox data. */ /* Get mailbox data. */
mb[0] = RD_MAILBOX_REG(ha, reg, 0); mb[0] = RD_MAILBOX_REG(ha, reg, 0);
@@ -246,13 +246,13 @@ qla2100_intr_handler(int irq, void *dev_id)
mb[0]); mb[0]);
} }
/* Release mailbox registers. */ /* Release mailbox registers. */
WRT_REG_WORD(&reg->semaphore, 0); wrt_reg_word(&reg->semaphore, 0);
RD_REG_WORD(&reg->semaphore); rd_reg_word(&reg->semaphore);
} else { } else {
qla2x00_process_response_queue(rsp); qla2x00_process_response_queue(rsp);
WRT_REG_WORD(&reg->hccr, HCCR_CLR_RISC_INT); wrt_reg_word(&reg->hccr, HCCR_CLR_RISC_INT);
RD_REG_WORD(&reg->hccr); rd_reg_word(&reg->hccr);
} }
} }
qla2x00_handle_mbx_completion(ha, status); qla2x00_handle_mbx_completion(ha, status);
@@ -324,14 +324,14 @@ qla2300_intr_handler(int irq, void *dev_id)
spin_lock_irqsave(&ha->hardware_lock, flags); spin_lock_irqsave(&ha->hardware_lock, flags);
vha = pci_get_drvdata(ha->pdev); vha = pci_get_drvdata(ha->pdev);
for (iter = 50; iter--; ) { for (iter = 50; iter--; ) {
stat = RD_REG_DWORD(&reg->u.isp2300.host_status); stat = rd_reg_dword(&reg->u.isp2300.host_status);
if (qla2x00_check_reg32_for_disconnect(vha, stat)) if (qla2x00_check_reg32_for_disconnect(vha, stat))
break; break;
if (stat & HSR_RISC_PAUSED) { if (stat & HSR_RISC_PAUSED) {
if (unlikely(pci_channel_offline(ha->pdev))) if (unlikely(pci_channel_offline(ha->pdev)))
break; break;
hccr = RD_REG_WORD(&reg->hccr); hccr = rd_reg_word(&reg->hccr);
if (hccr & (BIT_15 | BIT_13 | BIT_11 | BIT_8)) if (hccr & (BIT_15 | BIT_13 | BIT_11 | BIT_8))
ql_log(ql_log_warn, vha, 0x5026, ql_log(ql_log_warn, vha, 0x5026,
@@ -347,10 +347,10 @@ qla2300_intr_handler(int irq, void *dev_id)
* interrupt bit to be cleared. Schedule a big * interrupt bit to be cleared. Schedule a big
* hammer to get out of the RISC PAUSED state. * hammer to get out of the RISC PAUSED state.
*/ */
WRT_REG_WORD(&reg->hccr, HCCR_RESET_RISC); wrt_reg_word(&reg->hccr, HCCR_RESET_RISC);
RD_REG_WORD(&reg->hccr); rd_reg_word(&reg->hccr);
ha->isp_ops->fw_dump(vha, 1); ha->isp_ops->fw_dump(vha);
set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
break; break;
} else if ((stat & HSR_RISC_INT) == 0) } else if ((stat & HSR_RISC_INT) == 0)
@@ -365,7 +365,7 @@ qla2300_intr_handler(int irq, void *dev_id)
status |= MBX_INTERRUPT; status |= MBX_INTERRUPT;
/* Release mailbox registers. */ /* Release mailbox registers. */
WRT_REG_WORD(&reg->semaphore, 0); wrt_reg_word(&reg->semaphore, 0);
break; break;
case 0x12: case 0x12:
mb[0] = MSW(stat); mb[0] = MSW(stat);
@@ -393,8 +393,8 @@ qla2300_intr_handler(int irq, void *dev_id)
"Unrecognized interrupt type (%d).\n", stat & 0xff); "Unrecognized interrupt type (%d).\n", stat & 0xff);
break; break;
} }
WRT_REG_WORD(&reg->hccr, HCCR_CLR_RISC_INT); wrt_reg_word(&reg->hccr, HCCR_CLR_RISC_INT);
RD_REG_WORD_RELAXED(&reg->hccr); rd_reg_word_relaxed(&reg->hccr);
} }
qla2x00_handle_mbx_completion(ha, status); qla2x00_handle_mbx_completion(ha, status);
spin_unlock_irqrestore(&ha->hardware_lock, flags); spin_unlock_irqrestore(&ha->hardware_lock, flags);
@@ -412,7 +412,7 @@ qla2x00_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0)
{ {
uint16_t cnt; uint16_t cnt;
uint32_t mboxes; uint32_t mboxes;
uint16_t __iomem *wptr; __le16 __iomem *wptr;
struct qla_hw_data *ha = vha->hw; struct qla_hw_data *ha = vha->hw;
struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
@@ -428,15 +428,15 @@ qla2x00_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0)
ha->flags.mbox_int = 1; ha->flags.mbox_int = 1;
ha->mailbox_out[0] = mb0; ha->mailbox_out[0] = mb0;
mboxes >>= 1; mboxes >>= 1;
wptr = (uint16_t __iomem *)MAILBOX_REG(ha, reg, 1); wptr = MAILBOX_REG(ha, reg, 1);
for (cnt = 1; cnt < ha->mbx_count; cnt++) { for (cnt = 1; cnt < ha->mbx_count; cnt++) {
if (IS_QLA2200(ha) && cnt == 8) if (IS_QLA2200(ha) && cnt == 8)
wptr = (uint16_t __iomem *)MAILBOX_REG(ha, reg, 8); wptr = MAILBOX_REG(ha, reg, 8);
if ((cnt == 4 || cnt == 5) && (mboxes & BIT_0)) if ((cnt == 4 || cnt == 5) && (mboxes & BIT_0))
ha->mailbox_out[cnt] = qla2x00_debounce_register(wptr); ha->mailbox_out[cnt] = qla2x00_debounce_register(wptr);
else if (mboxes & BIT_0) else if (mboxes & BIT_0)
ha->mailbox_out[cnt] = RD_REG_WORD(wptr); ha->mailbox_out[cnt] = rd_reg_word(wptr);
wptr++; wptr++;
mboxes >>= 1; mboxes >>= 1;
@@ -451,19 +451,19 @@ qla81xx_idc_event(scsi_qla_host_t *vha, uint16_t aen, uint16_t descr)
int rval; int rval;
struct device_reg_24xx __iomem *reg24 = &vha->hw->iobase->isp24; struct device_reg_24xx __iomem *reg24 = &vha->hw->iobase->isp24;
struct device_reg_82xx __iomem *reg82 = &vha->hw->iobase->isp82; struct device_reg_82xx __iomem *reg82 = &vha->hw->iobase->isp82;
uint16_t __iomem *wptr; __le16 __iomem *wptr;
uint16_t cnt, timeout, mb[QLA_IDC_ACK_REGS]; uint16_t cnt, timeout, mb[QLA_IDC_ACK_REGS];
/* Seed data -- mailbox1 -> mailbox7. */ /* Seed data -- mailbox1 -> mailbox7. */
if (IS_QLA81XX(vha->hw) || IS_QLA83XX(vha->hw)) if (IS_QLA81XX(vha->hw) || IS_QLA83XX(vha->hw))
wptr = (uint16_t __iomem *)&reg24->mailbox1; wptr = &reg24->mailbox1;
else if (IS_QLA8044(vha->hw)) else if (IS_QLA8044(vha->hw))
wptr = (uint16_t __iomem *)&reg82->mailbox_out[1]; wptr = &reg82->mailbox_out[1];
else else
return; return;
for (cnt = 0; cnt < QLA_IDC_ACK_REGS; cnt++, wptr++) for (cnt = 0; cnt < QLA_IDC_ACK_REGS; cnt++, wptr++)
mb[cnt] = RD_REG_WORD(wptr); mb[cnt] = rd_reg_word(wptr);
ql_dbg(ql_dbg_async, vha, 0x5021, ql_dbg(ql_dbg_async, vha, 0x5021,
"Inter-Driver Communication %s -- " "Inter-Driver Communication %s -- "
@@ -756,6 +756,39 @@ qla2x00_find_fcport_by_nportid(scsi_qla_host_t *vha, port_id_t *id,
return NULL; return NULL;
} }
/* Shall be called only on supported adapters. */
static void
qla27xx_handle_8200_aen(scsi_qla_host_t *vha, uint16_t *mb)
{
struct qla_hw_data *ha = vha->hw;
bool reset_isp_needed = 0;
ql_log(ql_log_warn, vha, 0x02f0,
"MPI Heartbeat stop. MPI reset is%s needed. "
"MB0[%xh] MB1[%xh] MB2[%xh] MB3[%xh]\n",
mb[0] & BIT_8 ? "" : " not",
mb[0], mb[1], mb[2], mb[3]);
if ((mb[1] & BIT_8) == 0)
return;
ql_log(ql_log_warn, vha, 0x02f1,
"MPI Heartbeat stop. FW dump needed\n");
if (ql2xfulldump_on_mpifail) {
ha->isp_ops->fw_dump(vha);
reset_isp_needed = 1;
}
ha->isp_ops->mpi_fw_dump(vha, 1);
if (reset_isp_needed) {
vha->hw->flags.fw_init_done = 0;
set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
qla2xxx_wake_dpc(vha);
}
}
/** /**
* qla2x00_async_event() - Process aynchronous events. * qla2x00_async_event() - Process aynchronous events.
* @vha: SCSI driver HA context * @vha: SCSI driver HA context
@@ -785,7 +818,7 @@ qla2x00_async_event(scsi_qla_host_t *vha, struct rsp_que *rsp, uint16_t *mb)
goto skip_rio; goto skip_rio;
switch (mb[0]) { switch (mb[0]) {
case MBA_SCSI_COMPLETION: case MBA_SCSI_COMPLETION:
handles[0] = le32_to_cpu((uint32_t)((mb[2] << 16) | mb[1])); handles[0] = make_handle(mb[2], mb[1]);
handle_cnt = 1; handle_cnt = 1;
break; break;
case MBA_CMPLT_1_16BIT: case MBA_CMPLT_1_16BIT:
@@ -824,10 +857,9 @@ qla2x00_async_event(scsi_qla_host_t *vha, struct rsp_que *rsp, uint16_t *mb)
mb[0] = MBA_SCSI_COMPLETION; mb[0] = MBA_SCSI_COMPLETION;
break; break;
case MBA_CMPLT_2_32BIT: case MBA_CMPLT_2_32BIT:
handles[0] = le32_to_cpu((uint32_t)((mb[2] << 16) | mb[1])); handles[0] = make_handle(mb[2], mb[1]);
handles[1] = le32_to_cpu( handles[1] = make_handle(RD_MAILBOX_REG(ha, reg, 7),
((uint32_t)(RD_MAILBOX_REG(ha, reg, 7) << 16)) | RD_MAILBOX_REG(ha, reg, 6));
RD_MAILBOX_REG(ha, reg, 6));
handle_cnt = 2; handle_cnt = 2;
mb[0] = MBA_SCSI_COMPLETION; mb[0] = MBA_SCSI_COMPLETION;
break; break;
@@ -858,10 +890,10 @@ skip_rio:
IS_QLA27XX(ha) || IS_QLA28XX(ha)) { IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
u16 m[4]; u16 m[4];
m[0] = RD_REG_WORD(&reg24->mailbox4); m[0] = rd_reg_word(&reg24->mailbox4);
m[1] = RD_REG_WORD(&reg24->mailbox5); m[1] = rd_reg_word(&reg24->mailbox5);
m[2] = RD_REG_WORD(&reg24->mailbox6); m[2] = rd_reg_word(&reg24->mailbox6);
mbx = m[3] = RD_REG_WORD(&reg24->mailbox7); mbx = m[3] = rd_reg_word(&reg24->mailbox7);
ql_log(ql_log_warn, vha, 0x5003, ql_log(ql_log_warn, vha, 0x5003,
"ISP System Error - mbx1=%xh mbx2=%xh mbx3=%xh mbx4=%xh mbx5=%xh mbx6=%xh mbx7=%xh.\n", "ISP System Error - mbx1=%xh mbx2=%xh mbx3=%xh mbx4=%xh mbx5=%xh mbx6=%xh mbx7=%xh.\n",
@@ -871,10 +903,10 @@ skip_rio:
"ISP System Error - mbx1=%xh mbx2=%xh mbx3=%xh.\n ", "ISP System Error - mbx1=%xh mbx2=%xh mbx3=%xh.\n ",
mb[1], mb[2], mb[3]); mb[1], mb[2], mb[3]);
ha->fw_dump_mpi = if ((IS_QLA27XX(ha) || IS_QLA28XX(ha)) &&
(IS_QLA27XX(ha) || IS_QLA28XX(ha)) && rd_reg_word(&reg24->mailbox7) & BIT_8)
RD_REG_WORD(&reg24->mailbox7) & BIT_8; ha->isp_ops->mpi_fw_dump(vha, 1);
ha->isp_ops->fw_dump(vha, 1); ha->isp_ops->fw_dump(vha);
ha->flags.fw_init_done = 0; ha->flags.fw_init_done = 0;
QLA_FW_STOPPED(ha); QLA_FW_STOPPED(ha);
@@ -979,8 +1011,8 @@ skip_rio:
ha->current_topology = 0; ha->current_topology = 0;
mbx = (IS_QLA81XX(ha) || IS_QLA8031(ha)) mbx = (IS_QLA81XX(ha) || IS_QLA8031(ha))
? RD_REG_WORD(&reg24->mailbox4) : 0; ? rd_reg_word(&reg24->mailbox4) : 0;
mbx = (IS_P3P_TYPE(ha)) ? RD_REG_WORD(&reg82->mailbox_out[4]) mbx = (IS_P3P_TYPE(ha)) ? rd_reg_word(&reg82->mailbox_out[4])
: mbx; : mbx;
ql_log(ql_log_info, vha, 0x500b, ql_log(ql_log_info, vha, 0x500b,
"LOOP DOWN detected (%x %x %x %x).\n", "LOOP DOWN detected (%x %x %x %x).\n",
@@ -1347,7 +1379,7 @@ global_port_update:
break; break;
case MBA_IDC_NOTIFY: case MBA_IDC_NOTIFY:
if (IS_QLA8031(vha->hw) || IS_QLA8044(ha)) { if (IS_QLA8031(vha->hw) || IS_QLA8044(ha)) {
mb[4] = RD_REG_WORD(&reg24->mailbox4); mb[4] = rd_reg_word(&reg24->mailbox4);
if (((mb[2] & 0x7fff) == MBC_PORT_RESET || if (((mb[2] & 0x7fff) == MBC_PORT_RESET ||
(mb[2] & 0x7fff) == MBC_SET_PORT_CONFIG) && (mb[2] & 0x7fff) == MBC_SET_PORT_CONFIG) &&
(mb[4] & INTERNAL_LOOPBACK_MASK) != 0) { (mb[4] & INTERNAL_LOOPBACK_MASK) != 0) {
@@ -1374,25 +1406,12 @@ global_port_update:
case MBA_IDC_AEN: case MBA_IDC_AEN:
if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) { if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
ha->flags.fw_init_done = 0; qla27xx_handle_8200_aen(vha, mb);
ql_log(ql_log_warn, vha, 0xffff,
"MPI Heartbeat stop. Chip reset needed. MB0[%xh] MB1[%xh] MB2[%xh] MB3[%xh]\n",
mb[0], mb[1], mb[2], mb[3]);
if ((mb[1] & BIT_8) ||
(mb[2] & BIT_8)) {
ql_log(ql_log_warn, vha, 0xd013,
"MPI Heartbeat stop. FW dump needed\n");
ha->fw_dump_mpi = 1;
ha->isp_ops->fw_dump(vha, 1);
}
set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
qla2xxx_wake_dpc(vha);
} else if (IS_QLA83XX(ha)) { } else if (IS_QLA83XX(ha)) {
mb[4] = RD_REG_WORD(&reg24->mailbox4); mb[4] = rd_reg_word(&reg24->mailbox4);
mb[5] = RD_REG_WORD(&reg24->mailbox5); mb[5] = rd_reg_word(&reg24->mailbox5);
mb[6] = RD_REG_WORD(&reg24->mailbox6); mb[6] = rd_reg_word(&reg24->mailbox6);
mb[7] = RD_REG_WORD(&reg24->mailbox7); mb[7] = rd_reg_word(&reg24->mailbox7);
qla83xx_handle_8200_aen(vha, mb); qla83xx_handle_8200_aen(vha, mb);
} else { } else {
ql_dbg(ql_dbg_async, vha, 0x5052, ql_dbg(ql_dbg_async, vha, 0x5052,
@@ -1646,7 +1665,7 @@ qla24xx_mbx_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
sz = min(ARRAY_SIZE(pkt->mb), ARRAY_SIZE(sp->u.iocb_cmd.u.mbx.in_mb)); sz = min(ARRAY_SIZE(pkt->mb), ARRAY_SIZE(sp->u.iocb_cmd.u.mbx.in_mb));
for (i = 0; i < sz; i++) for (i = 0; i < sz; i++)
si->u.mbx.in_mb[i] = le16_to_cpu(pkt->mb[i]); si->u.mbx.in_mb[i] = pkt->mb[i];
res = (si->u.mbx.in_mb[0] & MBS_MASK); res = (si->u.mbx.in_mb[0] & MBS_MASK);
@@ -1747,6 +1766,7 @@ static void
qla24xx_els_ct_entry(scsi_qla_host_t *vha, struct req_que *req, qla24xx_els_ct_entry(scsi_qla_host_t *vha, struct req_que *req,
struct sts_entry_24xx *pkt, int iocb_type) struct sts_entry_24xx *pkt, int iocb_type)
{ {
struct els_sts_entry_24xx *ese = (struct els_sts_entry_24xx *)pkt;
const char func[] = "ELS_CT_IOCB"; const char func[] = "ELS_CT_IOCB";
const char *type; const char *type;
srb_t *sp; srb_t *sp;
@@ -1796,23 +1816,22 @@ qla24xx_els_ct_entry(scsi_qla_host_t *vha, struct req_que *req,
} }
comp_status = fw_status[0] = le16_to_cpu(pkt->comp_status); comp_status = fw_status[0] = le16_to_cpu(pkt->comp_status);
fw_status[1] = le16_to_cpu(((struct els_sts_entry_24xx *)pkt)->error_subcode_1); fw_status[1] = le32_to_cpu(ese->error_subcode_1);
fw_status[2] = le16_to_cpu(((struct els_sts_entry_24xx *)pkt)->error_subcode_2); fw_status[2] = le32_to_cpu(ese->error_subcode_2);
if (iocb_type == ELS_IOCB_TYPE) { if (iocb_type == ELS_IOCB_TYPE) {
els = &sp->u.iocb_cmd; els = &sp->u.iocb_cmd;
els->u.els_plogi.fw_status[0] = fw_status[0]; els->u.els_plogi.fw_status[0] = cpu_to_le32(fw_status[0]);
els->u.els_plogi.fw_status[1] = fw_status[1]; els->u.els_plogi.fw_status[1] = cpu_to_le32(fw_status[1]);
els->u.els_plogi.fw_status[2] = fw_status[2]; els->u.els_plogi.fw_status[2] = cpu_to_le32(fw_status[2]);
els->u.els_plogi.comp_status = fw_status[0]; els->u.els_plogi.comp_status = cpu_to_le16(fw_status[0]);
if (comp_status == CS_COMPLETE) { if (comp_status == CS_COMPLETE) {
res = DID_OK << 16; res = DID_OK << 16;
} else { } else {
if (comp_status == CS_DATA_UNDERRUN) { if (comp_status == CS_DATA_UNDERRUN) {
res = DID_OK << 16; res = DID_OK << 16;
els->u.els_plogi.len = els->u.els_plogi.len = cpu_to_le16(le32_to_cpu(
le16_to_cpu(((struct els_sts_entry_24xx *) ese->total_byte_count));
pkt)->total_byte_count);
} else { } else {
els->u.els_plogi.len = 0; els->u.els_plogi.len = 0;
res = DID_ERROR << 16; res = DID_ERROR << 16;
@@ -1821,8 +1840,7 @@ qla24xx_els_ct_entry(scsi_qla_host_t *vha, struct req_que *req,
ql_dbg(ql_dbg_user, vha, 0x503f, ql_dbg(ql_dbg_user, vha, 0x503f,
"ELS IOCB Done -%s error hdl=%x comp_status=0x%x error subcode 1=0x%x error subcode 2=0x%x total_byte=0x%x\n", "ELS IOCB Done -%s error hdl=%x comp_status=0x%x error subcode 1=0x%x error subcode 2=0x%x total_byte=0x%x\n",
type, sp->handle, comp_status, fw_status[1], fw_status[2], type, sp->handle, comp_status, fw_status[1], fw_status[2],
le16_to_cpu(((struct els_sts_entry_24xx *) le32_to_cpu(ese->total_byte_count));
pkt)->total_byte_count));
goto els_ct_done; goto els_ct_done;
} }
@@ -1838,23 +1856,20 @@ qla24xx_els_ct_entry(scsi_qla_host_t *vha, struct req_que *req,
if (comp_status == CS_DATA_UNDERRUN) { if (comp_status == CS_DATA_UNDERRUN) {
res = DID_OK << 16; res = DID_OK << 16;
bsg_reply->reply_payload_rcv_len = bsg_reply->reply_payload_rcv_len =
le16_to_cpu(((struct els_sts_entry_24xx *)pkt)->total_byte_count); le32_to_cpu(ese->total_byte_count);
ql_dbg(ql_dbg_user, vha, 0x503f, ql_dbg(ql_dbg_user, vha, 0x503f,
"ELS-CT pass-through-%s error hdl=%x comp_status-status=0x%x " "ELS-CT pass-through-%s error hdl=%x comp_status-status=0x%x "
"error subcode 1=0x%x error subcode 2=0x%x total_byte = 0x%x.\n", "error subcode 1=0x%x error subcode 2=0x%x total_byte = 0x%x.\n",
type, sp->handle, comp_status, fw_status[1], fw_status[2], type, sp->handle, comp_status, fw_status[1], fw_status[2],
le16_to_cpu(((struct els_sts_entry_24xx *) le32_to_cpu(ese->total_byte_count));
pkt)->total_byte_count));
} else { } else {
ql_dbg(ql_dbg_user, vha, 0x5040, ql_dbg(ql_dbg_user, vha, 0x5040,
"ELS-CT pass-through-%s error hdl=%x comp_status-status=0x%x " "ELS-CT pass-through-%s error hdl=%x comp_status-status=0x%x "
"error subcode 1=0x%x error subcode 2=0x%x.\n", "error subcode 1=0x%x error subcode 2=0x%x.\n",
type, sp->handle, comp_status, type, sp->handle, comp_status,
le16_to_cpu(((struct els_sts_entry_24xx *) le32_to_cpu(ese->error_subcode_1),
pkt)->error_subcode_1), le32_to_cpu(ese->error_subcode_2));
le16_to_cpu(((struct els_sts_entry_24xx *)
pkt)->error_subcode_2));
res = DID_ERROR << 16; res = DID_ERROR << 16;
bsg_reply->reply_payload_rcv_len = 0; bsg_reply->reply_payload_rcv_len = 0;
} }
@@ -2062,7 +2077,7 @@ static void qla24xx_nvme_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
uint16_t state_flags; uint16_t state_flags;
struct nvmefc_fcp_req *fd; struct nvmefc_fcp_req *fd;
uint16_t ret = QLA_SUCCESS; uint16_t ret = QLA_SUCCESS;
uint16_t comp_status = le16_to_cpu(sts->comp_status); __le16 comp_status = sts->comp_status;
int logit = 0; int logit = 0;
iocb = &sp->u.iocb_cmd; iocb = &sp->u.iocb_cmd;
@@ -2093,7 +2108,7 @@ static void qla24xx_nvme_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
} else if ((state_flags & (SF_FCP_RSP_DMA | SF_NVME_ERSP)) == } else if ((state_flags & (SF_FCP_RSP_DMA | SF_NVME_ERSP)) ==
(SF_FCP_RSP_DMA | SF_NVME_ERSP)) { (SF_FCP_RSP_DMA | SF_NVME_ERSP)) {
/* Response already DMA'd to fd->rspaddr. */ /* Response already DMA'd to fd->rspaddr. */
iocb->u.nvme.rsp_pyld_len = le16_to_cpu(sts->nvme_rsp_pyld_len); iocb->u.nvme.rsp_pyld_len = sts->nvme_rsp_pyld_len;
} else if ((state_flags & SF_FCP_RSP_DMA)) { } else if ((state_flags & SF_FCP_RSP_DMA)) {
/* /*
* Non-zero value in first 12 bytes of NVMe_RSP IU, treat this * Non-zero value in first 12 bytes of NVMe_RSP IU, treat this
@@ -2110,8 +2125,8 @@ static void qla24xx_nvme_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
inbuf = (uint32_t *)&sts->nvme_ersp_data; inbuf = (uint32_t *)&sts->nvme_ersp_data;
outbuf = (uint32_t *)fd->rspaddr; outbuf = (uint32_t *)fd->rspaddr;
iocb->u.nvme.rsp_pyld_len = le16_to_cpu(sts->nvme_rsp_pyld_len); iocb->u.nvme.rsp_pyld_len = sts->nvme_rsp_pyld_len;
if (unlikely(iocb->u.nvme.rsp_pyld_len > if (unlikely(le16_to_cpu(iocb->u.nvme.rsp_pyld_len) >
sizeof(struct nvme_fc_ersp_iu))) { sizeof(struct nvme_fc_ersp_iu))) {
if (ql_mask_match(ql_dbg_io)) { if (ql_mask_match(ql_dbg_io)) {
WARN_ONCE(1, "Unexpected response payload length %u.\n", WARN_ONCE(1, "Unexpected response payload length %u.\n",
@@ -2121,9 +2136,9 @@ static void qla24xx_nvme_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
iocb->u.nvme.rsp_pyld_len); iocb->u.nvme.rsp_pyld_len);
} }
iocb->u.nvme.rsp_pyld_len = iocb->u.nvme.rsp_pyld_len =
sizeof(struct nvme_fc_ersp_iu); cpu_to_le16(sizeof(struct nvme_fc_ersp_iu));
} }
iter = iocb->u.nvme.rsp_pyld_len >> 2; iter = le16_to_cpu(iocb->u.nvme.rsp_pyld_len) >> 2;
for (; iter; iter--) for (; iter; iter--)
*outbuf++ = swab32(*inbuf++); *outbuf++ = swab32(*inbuf++);
} }
@@ -2138,7 +2153,7 @@ static void qla24xx_nvme_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
"Dropped frame(s) detected (sent/rcvd=%u/%u).\n", "Dropped frame(s) detected (sent/rcvd=%u/%u).\n",
tgt_xfer_len, fd->transferred_length); tgt_xfer_len, fd->transferred_length);
logit = 1; logit = 1;
} else if (comp_status == CS_DATA_UNDERRUN) { } else if (le16_to_cpu(comp_status) == CS_DATA_UNDERRUN) {
/* /*
* Do not log if this is just an underflow and there * Do not log if this is just an underflow and there
* is no data loss. * is no data loss.
@@ -2158,7 +2173,7 @@ static void qla24xx_nvme_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
* If transport error then Failure (HBA rejects request) * If transport error then Failure (HBA rejects request)
* otherwise transport will handle. * otherwise transport will handle.
*/ */
switch (comp_status) { switch (le16_to_cpu(comp_status)) {
case CS_COMPLETE: case CS_COMPLETE:
break; break;
@@ -2300,7 +2315,7 @@ qla2x00_process_response_queue(struct rsp_que *rsp)
} }
/* Adjust ring index */ /* Adjust ring index */
WRT_REG_WORD(ISP_RSP_Q_OUT(ha, reg), rsp->ring_index); wrt_reg_word(ISP_RSP_Q_OUT(ha, reg), rsp->ring_index);
} }
static inline void static inline void
@@ -2391,9 +2406,9 @@ qla2x00_handle_dif_error(srb_t *sp, struct sts_entry_24xx *sts24)
* For type 3: ref & app tag is all 'f's * For type 3: ref & app tag is all 'f's
* For type 0,1,2: app tag is all 'f's * For type 0,1,2: app tag is all 'f's
*/ */
if ((a_app_tag == T10_PI_APP_ESCAPE) && if (a_app_tag == be16_to_cpu(T10_PI_APP_ESCAPE) &&
((scsi_get_prot_type(cmd) != SCSI_PROT_DIF_TYPE3) || (scsi_get_prot_type(cmd) != SCSI_PROT_DIF_TYPE3 ||
(a_ref_tag == T10_PI_REF_ESCAPE))) { a_ref_tag == be32_to_cpu(T10_PI_REF_ESCAPE))) {
uint32_t blocks_done, resid; uint32_t blocks_done, resid;
sector_t lba_s = scsi_get_lba(cmd); sector_t lba_s = scsi_get_lba(cmd);
@@ -2751,6 +2766,8 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
sense_len = par_sense_len = rsp_info_len = resid_len = sense_len = par_sense_len = rsp_info_len = resid_len =
fw_resid_len = 0; fw_resid_len = 0;
if (IS_FWI2_CAPABLE(ha)) { if (IS_FWI2_CAPABLE(ha)) {
u16 sts24_retry_delay = le16_to_cpu(sts24->retry_delay);
if (scsi_status & SS_SENSE_LEN_VALID) if (scsi_status & SS_SENSE_LEN_VALID)
sense_len = le32_to_cpu(sts24->sense_len); sense_len = le32_to_cpu(sts24->sense_len);
if (scsi_status & SS_RESPONSE_INFO_LEN_VALID) if (scsi_status & SS_RESPONSE_INFO_LEN_VALID)
@@ -2765,11 +2782,11 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
ox_id = le16_to_cpu(sts24->ox_id); ox_id = le16_to_cpu(sts24->ox_id);
par_sense_len = sizeof(sts24->data); par_sense_len = sizeof(sts24->data);
/* Valid values of the retry delay timer are 0x1-0xffef */ /* Valid values of the retry delay timer are 0x1-0xffef */
if (sts24->retry_delay > 0 && sts24->retry_delay < 0xfff1) { if (sts24_retry_delay > 0 && sts24_retry_delay < 0xfff1) {
retry_delay = sts24->retry_delay & 0x3fff; retry_delay = sts24_retry_delay & 0x3fff;
ql_dbg(ql_dbg_io, sp->vha, 0x3033, ql_dbg(ql_dbg_io, sp->vha, 0x3033,
"%s: scope=%#x retry_delay=%#x\n", __func__, "%s: scope=%#x retry_delay=%#x\n", __func__,
sts24->retry_delay >> 14, retry_delay); sts24_retry_delay >> 14, retry_delay);
} }
} else { } else {
if (scsi_status & SS_SENSE_LEN_VALID) if (scsi_status & SS_SENSE_LEN_VALID)
@@ -3143,7 +3160,7 @@ qla24xx_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0)
{ {
uint16_t cnt; uint16_t cnt;
uint32_t mboxes; uint32_t mboxes;
uint16_t __iomem *wptr; __le16 __iomem *wptr;
struct qla_hw_data *ha = vha->hw; struct qla_hw_data *ha = vha->hw;
struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
@@ -3159,11 +3176,11 @@ qla24xx_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0)
ha->flags.mbox_int = 1; ha->flags.mbox_int = 1;
ha->mailbox_out[0] = mb0; ha->mailbox_out[0] = mb0;
mboxes >>= 1; mboxes >>= 1;
wptr = (uint16_t __iomem *)&reg->mailbox1; wptr = &reg->mailbox1;
for (cnt = 1; cnt < ha->mbx_count; cnt++) { for (cnt = 1; cnt < ha->mbx_count; cnt++) {
if (mboxes & BIT_0) if (mboxes & BIT_0)
ha->mailbox_out[cnt] = RD_REG_WORD(wptr); ha->mailbox_out[cnt] = rd_reg_word(wptr);
mboxes >>= 1; mboxes >>= 1;
wptr++; wptr++;
@@ -3183,7 +3200,7 @@ qla24xx_abort_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
return; return;
abt = &sp->u.iocb_cmd; abt = &sp->u.iocb_cmd;
abt->u.abt.comp_status = le16_to_cpu(pkt->nport_handle); abt->u.abt.comp_status = pkt->nport_handle;
sp->done(sp, 0); sp->done(sp, 0);
} }
@@ -3340,9 +3357,9 @@ process_err:
if (IS_P3P_TYPE(ha)) { if (IS_P3P_TYPE(ha)) {
struct device_reg_82xx __iomem *reg = &ha->iobase->isp82; struct device_reg_82xx __iomem *reg = &ha->iobase->isp82;
WRT_REG_DWORD(&reg->rsp_q_out[0], rsp->ring_index); wrt_reg_dword(&reg->rsp_q_out[0], rsp->ring_index);
} else { } else {
WRT_REG_DWORD(rsp->rsp_q_out, rsp->ring_index); wrt_reg_dword(rsp->rsp_q_out, rsp->ring_index);
} }
} }
@@ -3359,13 +3376,13 @@ qla2xxx_check_risc_status(scsi_qla_host_t *vha)
return; return;
rval = QLA_SUCCESS; rval = QLA_SUCCESS;
WRT_REG_DWORD(&reg->iobase_addr, 0x7C00); wrt_reg_dword(&reg->iobase_addr, 0x7C00);
RD_REG_DWORD(&reg->iobase_addr); rd_reg_dword(&reg->iobase_addr);
WRT_REG_DWORD(&reg->iobase_window, 0x0001); wrt_reg_dword(&reg->iobase_window, 0x0001);
for (cnt = 10000; (RD_REG_DWORD(&reg->iobase_window) & BIT_0) == 0 && for (cnt = 10000; (rd_reg_dword(&reg->iobase_window) & BIT_0) == 0 &&
rval == QLA_SUCCESS; cnt--) { rval == QLA_SUCCESS; cnt--) {
if (cnt) { if (cnt) {
WRT_REG_DWORD(&reg->iobase_window, 0x0001); wrt_reg_dword(&reg->iobase_window, 0x0001);
udelay(10); udelay(10);
} else } else
rval = QLA_FUNCTION_TIMEOUT; rval = QLA_FUNCTION_TIMEOUT;
@@ -3374,11 +3391,11 @@ qla2xxx_check_risc_status(scsi_qla_host_t *vha)
goto next_test; goto next_test;
rval = QLA_SUCCESS; rval = QLA_SUCCESS;
WRT_REG_DWORD(&reg->iobase_window, 0x0003); wrt_reg_dword(&reg->iobase_window, 0x0003);
for (cnt = 100; (RD_REG_DWORD(&reg->iobase_window) & BIT_0) == 0 && for (cnt = 100; (rd_reg_dword(&reg->iobase_window) & BIT_0) == 0 &&
rval == QLA_SUCCESS; cnt--) { rval == QLA_SUCCESS; cnt--) {
if (cnt) { if (cnt) {
WRT_REG_DWORD(&reg->iobase_window, 0x0003); wrt_reg_dword(&reg->iobase_window, 0x0003);
udelay(10); udelay(10);
} else } else
rval = QLA_FUNCTION_TIMEOUT; rval = QLA_FUNCTION_TIMEOUT;
@@ -3387,13 +3404,13 @@ qla2xxx_check_risc_status(scsi_qla_host_t *vha)
goto done; goto done;
next_test: next_test:
if (RD_REG_DWORD(&reg->iobase_c8) & BIT_3) if (rd_reg_dword(&reg->iobase_c8) & BIT_3)
ql_log(ql_log_info, vha, 0x504c, ql_log(ql_log_info, vha, 0x504c,
"Additional code -- 0x55AA.\n"); "Additional code -- 0x55AA.\n");
done: done:
WRT_REG_DWORD(&reg->iobase_window, 0x0000); wrt_reg_dword(&reg->iobase_window, 0x0000);
RD_REG_DWORD(&reg->iobase_window); rd_reg_dword(&reg->iobase_window);
} }
/** /**
@@ -3437,14 +3454,14 @@ qla24xx_intr_handler(int irq, void *dev_id)
spin_lock_irqsave(&ha->hardware_lock, flags); spin_lock_irqsave(&ha->hardware_lock, flags);
vha = pci_get_drvdata(ha->pdev); vha = pci_get_drvdata(ha->pdev);
for (iter = 50; iter--; ) { for (iter = 50; iter--; ) {
stat = RD_REG_DWORD(&reg->host_status); stat = rd_reg_dword(&reg->host_status);
if (qla2x00_check_reg32_for_disconnect(vha, stat)) if (qla2x00_check_reg32_for_disconnect(vha, stat))
break; break;
if (stat & HSRX_RISC_PAUSED) { if (stat & HSRX_RISC_PAUSED) {
if (unlikely(pci_channel_offline(ha->pdev))) if (unlikely(pci_channel_offline(ha->pdev)))
break; break;
hccr = RD_REG_DWORD(&reg->hccr); hccr = rd_reg_dword(&reg->hccr);
ql_log(ql_log_warn, vha, 0x504b, ql_log(ql_log_warn, vha, 0x504b,
"RISC paused -- HCCR=%x, Dumping firmware.\n", "RISC paused -- HCCR=%x, Dumping firmware.\n",
@@ -3452,7 +3469,7 @@ qla24xx_intr_handler(int irq, void *dev_id)
qla2xxx_check_risc_status(vha); qla2xxx_check_risc_status(vha);
ha->isp_ops->fw_dump(vha, 1); ha->isp_ops->fw_dump(vha);
set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
break; break;
} else if ((stat & HSRX_RISC_INT) == 0) } else if ((stat & HSRX_RISC_INT) == 0)
@@ -3469,9 +3486,9 @@ qla24xx_intr_handler(int irq, void *dev_id)
break; break;
case INTR_ASYNC_EVENT: case INTR_ASYNC_EVENT:
mb[0] = MSW(stat); mb[0] = MSW(stat);
mb[1] = RD_REG_WORD(&reg->mailbox1); mb[1] = rd_reg_word(&reg->mailbox1);
mb[2] = RD_REG_WORD(&reg->mailbox2); mb[2] = rd_reg_word(&reg->mailbox2);
mb[3] = RD_REG_WORD(&reg->mailbox3); mb[3] = rd_reg_word(&reg->mailbox3);
qla2x00_async_event(vha, rsp, mb); qla2x00_async_event(vha, rsp, mb);
break; break;
case INTR_RSP_QUE_UPDATE: case INTR_RSP_QUE_UPDATE:
@@ -3491,8 +3508,8 @@ qla24xx_intr_handler(int irq, void *dev_id)
"Unrecognized interrupt type (%d).\n", stat * 0xff); "Unrecognized interrupt type (%d).\n", stat * 0xff);
break; break;
} }
WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT); wrt_reg_dword(&reg->hccr, HCCRX_CLR_RISC_INT);
RD_REG_DWORD_RELAXED(&reg->hccr); rd_reg_dword_relaxed(&reg->hccr);
if (unlikely(IS_QLA83XX(ha) && (ha->pdev->revision == 1))) if (unlikely(IS_QLA83XX(ha) && (ha->pdev->revision == 1)))
ndelay(3500); ndelay(3500);
} }
@@ -3531,8 +3548,8 @@ qla24xx_msix_rsp_q(int irq, void *dev_id)
vha = pci_get_drvdata(ha->pdev); vha = pci_get_drvdata(ha->pdev);
qla24xx_process_response_queue(vha, rsp); qla24xx_process_response_queue(vha, rsp);
if (!ha->flags.disable_msix_handshake) { if (!ha->flags.disable_msix_handshake) {
WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT); wrt_reg_dword(&reg->hccr, HCCRX_CLR_RISC_INT);
RD_REG_DWORD_RELAXED(&reg->hccr); rd_reg_dword_relaxed(&reg->hccr);
} }
spin_unlock_irqrestore(&ha->hardware_lock, flags); spin_unlock_irqrestore(&ha->hardware_lock, flags);
@@ -3566,14 +3583,14 @@ qla24xx_msix_default(int irq, void *dev_id)
spin_lock_irqsave(&ha->hardware_lock, flags); spin_lock_irqsave(&ha->hardware_lock, flags);
vha = pci_get_drvdata(ha->pdev); vha = pci_get_drvdata(ha->pdev);
do { do {
stat = RD_REG_DWORD(&reg->host_status); stat = rd_reg_dword(&reg->host_status);
if (qla2x00_check_reg32_for_disconnect(vha, stat)) if (qla2x00_check_reg32_for_disconnect(vha, stat))
break; break;
if (stat & HSRX_RISC_PAUSED) { if (stat & HSRX_RISC_PAUSED) {
if (unlikely(pci_channel_offline(ha->pdev))) if (unlikely(pci_channel_offline(ha->pdev)))
break; break;
hccr = RD_REG_DWORD(&reg->hccr); hccr = rd_reg_dword(&reg->hccr);
ql_log(ql_log_info, vha, 0x5050, ql_log(ql_log_info, vha, 0x5050,
"RISC paused -- HCCR=%x, Dumping firmware.\n", "RISC paused -- HCCR=%x, Dumping firmware.\n",
@@ -3581,7 +3598,7 @@ qla24xx_msix_default(int irq, void *dev_id)
qla2xxx_check_risc_status(vha); qla2xxx_check_risc_status(vha);
ha->isp_ops->fw_dump(vha, 1); ha->isp_ops->fw_dump(vha);
set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
break; break;
} else if ((stat & HSRX_RISC_INT) == 0) } else if ((stat & HSRX_RISC_INT) == 0)
@@ -3598,9 +3615,9 @@ qla24xx_msix_default(int irq, void *dev_id)
break; break;
case INTR_ASYNC_EVENT: case INTR_ASYNC_EVENT:
mb[0] = MSW(stat); mb[0] = MSW(stat);
mb[1] = RD_REG_WORD(&reg->mailbox1); mb[1] = rd_reg_word(&reg->mailbox1);
mb[2] = RD_REG_WORD(&reg->mailbox2); mb[2] = rd_reg_word(&reg->mailbox2);
mb[3] = RD_REG_WORD(&reg->mailbox3); mb[3] = rd_reg_word(&reg->mailbox3);
qla2x00_async_event(vha, rsp, mb); qla2x00_async_event(vha, rsp, mb);
break; break;
case INTR_RSP_QUE_UPDATE: case INTR_RSP_QUE_UPDATE:
@@ -3620,7 +3637,7 @@ qla24xx_msix_default(int irq, void *dev_id)
"Unrecognized interrupt type (%d).\n", stat & 0xff); "Unrecognized interrupt type (%d).\n", stat & 0xff);
break; break;
} }
WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT); wrt_reg_dword(&reg->hccr, HCCRX_CLR_RISC_INT);
} while (0); } while (0);
qla2x00_handle_mbx_completion(ha, status); qla2x00_handle_mbx_completion(ha, status);
spin_unlock_irqrestore(&ha->hardware_lock, flags); spin_unlock_irqrestore(&ha->hardware_lock, flags);
@@ -3671,7 +3688,7 @@ qla2xxx_msix_rsp_q_hs(int irq, void *dev_id)
reg = &ha->iobase->isp24; reg = &ha->iobase->isp24;
spin_lock_irqsave(&ha->hardware_lock, flags); spin_lock_irqsave(&ha->hardware_lock, flags);
WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT); wrt_reg_dword(&reg->hccr, HCCRX_CLR_RISC_INT);
spin_unlock_irqrestore(&ha->hardware_lock, flags); spin_unlock_irqrestore(&ha->hardware_lock, flags);
queue_work(ha->wq, &qpair->q_work); queue_work(ha->wq, &qpair->q_work);
@@ -3932,7 +3949,7 @@ clear_risc_ints:
goto fail; goto fail;
spin_lock_irq(&ha->hardware_lock); spin_lock_irq(&ha->hardware_lock);
WRT_REG_WORD(&reg->isp.semaphore, 0); wrt_reg_word(&reg->isp.semaphore, 0);
spin_unlock_irq(&ha->hardware_lock); spin_unlock_irq(&ha->hardware_lock);
fail: fail:

View File

@@ -106,7 +106,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
uint8_t io_lock_on; uint8_t io_lock_on;
uint16_t command = 0; uint16_t command = 0;
uint16_t *iptr; uint16_t *iptr;
uint16_t __iomem *optr; __le16 __iomem *optr;
uint32_t cnt; uint32_t cnt;
uint32_t mboxes; uint32_t mboxes;
unsigned long wait_time; unsigned long wait_time;
@@ -208,11 +208,11 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
/* Load mailbox registers. */ /* Load mailbox registers. */
if (IS_P3P_TYPE(ha)) if (IS_P3P_TYPE(ha))
optr = (uint16_t __iomem *)&reg->isp82.mailbox_in[0]; optr = &reg->isp82.mailbox_in[0];
else if (IS_FWI2_CAPABLE(ha) && !(IS_P3P_TYPE(ha))) else if (IS_FWI2_CAPABLE(ha) && !(IS_P3P_TYPE(ha)))
optr = (uint16_t __iomem *)&reg->isp24.mailbox0; optr = &reg->isp24.mailbox0;
else else
optr = (uint16_t __iomem *)MAILBOX_REG(ha, &reg->isp, 0); optr = MAILBOX_REG(ha, &reg->isp, 0);
iptr = mcp->mb; iptr = mcp->mb;
command = mcp->mb[0]; command = mcp->mb[0];
@@ -222,12 +222,11 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
"Mailbox registers (OUT):\n"); "Mailbox registers (OUT):\n");
for (cnt = 0; cnt < ha->mbx_count; cnt++) { for (cnt = 0; cnt < ha->mbx_count; cnt++) {
if (IS_QLA2200(ha) && cnt == 8) if (IS_QLA2200(ha) && cnt == 8)
optr = optr = MAILBOX_REG(ha, &reg->isp, 8);
(uint16_t __iomem *)MAILBOX_REG(ha, &reg->isp, 8);
if (mboxes & BIT_0) { if (mboxes & BIT_0) {
ql_dbg(ql_dbg_mbx, vha, 0x1112, ql_dbg(ql_dbg_mbx, vha, 0x1112,
"mbox[%d]<-0x%04x\n", cnt, *iptr); "mbox[%d]<-0x%04x\n", cnt, *iptr);
WRT_REG_WORD(optr, *iptr); wrt_reg_word(optr, *iptr);
} }
mboxes >>= 1; mboxes >>= 1;
@@ -253,11 +252,11 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
set_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags); set_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags);
if (IS_P3P_TYPE(ha)) if (IS_P3P_TYPE(ha))
WRT_REG_DWORD(&reg->isp82.hint, HINT_MBX_INT_PENDING); wrt_reg_dword(&reg->isp82.hint, HINT_MBX_INT_PENDING);
else if (IS_FWI2_CAPABLE(ha)) else if (IS_FWI2_CAPABLE(ha))
WRT_REG_DWORD(&reg->isp24.hccr, HCCRX_SET_HOST_INT); wrt_reg_dword(&reg->isp24.hccr, HCCRX_SET_HOST_INT);
else else
WRT_REG_WORD(&reg->isp.hccr, HCCR_SET_HOST_INT); wrt_reg_word(&reg->isp.hccr, HCCR_SET_HOST_INT);
spin_unlock_irqrestore(&ha->hardware_lock, flags); spin_unlock_irqrestore(&ha->hardware_lock, flags);
wait_time = jiffies; wait_time = jiffies;
@@ -300,7 +299,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
"Cmd=%x Polling Mode.\n", command); "Cmd=%x Polling Mode.\n", command);
if (IS_P3P_TYPE(ha)) { if (IS_P3P_TYPE(ha)) {
if (RD_REG_DWORD(&reg->isp82.hint) & if (rd_reg_dword(&reg->isp82.hint) &
HINT_MBX_INT_PENDING) { HINT_MBX_INT_PENDING) {
ha->flags.mbox_busy = 0; ha->flags.mbox_busy = 0;
spin_unlock_irqrestore(&ha->hardware_lock, spin_unlock_irqrestore(&ha->hardware_lock,
@@ -311,11 +310,11 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
rval = QLA_FUNCTION_TIMEOUT; rval = QLA_FUNCTION_TIMEOUT;
goto premature_exit; goto premature_exit;
} }
WRT_REG_DWORD(&reg->isp82.hint, HINT_MBX_INT_PENDING); wrt_reg_dword(&reg->isp82.hint, HINT_MBX_INT_PENDING);
} else if (IS_FWI2_CAPABLE(ha)) } else if (IS_FWI2_CAPABLE(ha))
WRT_REG_DWORD(&reg->isp24.hccr, HCCRX_SET_HOST_INT); wrt_reg_dword(&reg->isp24.hccr, HCCRX_SET_HOST_INT);
else else
WRT_REG_WORD(&reg->isp.hccr, HCCR_SET_HOST_INT); wrt_reg_word(&reg->isp.hccr, HCCR_SET_HOST_INT);
spin_unlock_irqrestore(&ha->hardware_lock, flags); spin_unlock_irqrestore(&ha->hardware_lock, flags);
wait_time = jiffies + mcp->tov * HZ; /* wait at most tov secs */ wait_time = jiffies + mcp->tov * HZ; /* wait at most tov secs */
@@ -413,14 +412,14 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
uint16_t w; uint16_t w;
if (IS_FWI2_CAPABLE(ha)) { if (IS_FWI2_CAPABLE(ha)) {
mb[0] = RD_REG_WORD(&reg->isp24.mailbox0); mb[0] = rd_reg_word(&reg->isp24.mailbox0);
mb[1] = RD_REG_WORD(&reg->isp24.mailbox1); mb[1] = rd_reg_word(&reg->isp24.mailbox1);
mb[2] = RD_REG_WORD(&reg->isp24.mailbox2); mb[2] = rd_reg_word(&reg->isp24.mailbox2);
mb[3] = RD_REG_WORD(&reg->isp24.mailbox3); mb[3] = rd_reg_word(&reg->isp24.mailbox3);
mb[7] = RD_REG_WORD(&reg->isp24.mailbox7); mb[7] = rd_reg_word(&reg->isp24.mailbox7);
ictrl = RD_REG_DWORD(&reg->isp24.ictrl); ictrl = rd_reg_dword(&reg->isp24.ictrl);
host_status = RD_REG_DWORD(&reg->isp24.host_status); host_status = rd_reg_dword(&reg->isp24.host_status);
hccr = RD_REG_DWORD(&reg->isp24.hccr); hccr = rd_reg_dword(&reg->isp24.hccr);
ql_log(ql_log_warn, vha, 0xd04c, ql_log(ql_log_warn, vha, 0xd04c,
"MBX Command timeout for cmd %x, iocontrol=%x jiffies=%lx " "MBX Command timeout for cmd %x, iocontrol=%x jiffies=%lx "
@@ -430,7 +429,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
} else { } else {
mb[0] = RD_MAILBOX_REG(ha, &reg->isp, 0); mb[0] = RD_MAILBOX_REG(ha, &reg->isp, 0);
ictrl = RD_REG_WORD(&reg->isp.ictrl); ictrl = rd_reg_word(&reg->isp.ictrl);
ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1119, ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1119,
"MBX Command timeout for cmd %x, iocontrol=%x jiffies=%lx " "MBX Command timeout for cmd %x, iocontrol=%x jiffies=%lx "
"mb[0]=0x%x\n", command, ictrl, jiffies, mb[0]); "mb[0]=0x%x\n", command, ictrl, jiffies, mb[0]);
@@ -462,7 +461,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
* a dump * a dump
*/ */
if (mcp->mb[0] != MBC_GEN_SYSTEM_ERROR) if (mcp->mb[0] != MBC_GEN_SYSTEM_ERROR)
ha->isp_ops->fw_dump(vha, 0); qla2xxx_dump_fw(vha);
rval = QLA_FUNCTION_TIMEOUT; rval = QLA_FUNCTION_TIMEOUT;
} }
} }
@@ -573,15 +572,15 @@ mbx_done:
if (IS_FWI2_CAPABLE(ha) && !(IS_P3P_TYPE(ha))) { if (IS_FWI2_CAPABLE(ha) && !(IS_P3P_TYPE(ha))) {
ql_dbg(ql_dbg_mbx, vha, 0x1198, ql_dbg(ql_dbg_mbx, vha, 0x1198,
"host_status=%#x intr_ctrl=%#x intr_status=%#x\n", "host_status=%#x intr_ctrl=%#x intr_status=%#x\n",
RD_REG_DWORD(&reg->isp24.host_status), rd_reg_dword(&reg->isp24.host_status),
RD_REG_DWORD(&reg->isp24.ictrl), rd_reg_dword(&reg->isp24.ictrl),
RD_REG_DWORD(&reg->isp24.istatus)); rd_reg_dword(&reg->isp24.istatus));
} else { } else {
ql_dbg(ql_dbg_mbx, vha, 0x1206, ql_dbg(ql_dbg_mbx, vha, 0x1206,
"ctrl_status=%#x ictrl=%#x istatus=%#x\n", "ctrl_status=%#x ictrl=%#x istatus=%#x\n",
RD_REG_WORD(&reg->isp.ctrl_status), rd_reg_word(&reg->isp.ctrl_status),
RD_REG_WORD(&reg->isp.ictrl), rd_reg_word(&reg->isp.ictrl),
RD_REG_WORD(&reg->isp.istatus)); rd_reg_word(&reg->isp.istatus));
} }
} else { } else {
ql_dbg(ql_dbg_mbx, base_vha, 0x1021, "Done %s.\n", __func__); ql_dbg(ql_dbg_mbx, base_vha, 0x1021, "Done %s.\n", __func__);
@@ -3038,7 +3037,7 @@ qla2x00_get_link_status(scsi_qla_host_t *vha, uint16_t loop_id,
int rval; int rval;
mbx_cmd_t mc; mbx_cmd_t mc;
mbx_cmd_t *mcp = &mc; mbx_cmd_t *mcp = &mc;
uint32_t *iter = (void *)stats; uint32_t *iter = (uint32_t *)stats;
ushort dwords = offsetof(typeof(*stats), link_up_cnt)/sizeof(*iter); ushort dwords = offsetof(typeof(*stats), link_up_cnt)/sizeof(*iter);
struct qla_hw_data *ha = vha->hw; struct qla_hw_data *ha = vha->hw;
@@ -3097,7 +3096,7 @@ qla24xx_get_isp_stats(scsi_qla_host_t *vha, struct link_statistics *stats,
int rval; int rval;
mbx_cmd_t mc; mbx_cmd_t mc;
mbx_cmd_t *mcp = &mc; mbx_cmd_t *mcp = &mc;
uint32_t *iter = (void *)stats; uint32_t *iter = (uint32_t *)stats;
ushort dwords = sizeof(*stats)/sizeof(*iter); ushort dwords = sizeof(*stats)/sizeof(*iter);
ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1088, ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1088,
@@ -3110,8 +3109,8 @@ qla24xx_get_isp_stats(scsi_qla_host_t *vha, struct link_statistics *stats,
mc.mb[6] = MSW(MSD(stats_dma)); mc.mb[6] = MSW(MSD(stats_dma));
mc.mb[7] = LSW(MSD(stats_dma)); mc.mb[7] = LSW(MSD(stats_dma));
mc.mb[8] = dwords; mc.mb[8] = dwords;
mc.mb[9] = cpu_to_le16(vha->vp_idx); mc.mb[9] = vha->vp_idx;
mc.mb[10] = cpu_to_le16(options); mc.mb[10] = options;
rval = qla24xx_send_mb_cmd(vha, &mc); rval = qla24xx_send_mb_cmd(vha, &mc);
@@ -3204,7 +3203,7 @@ qla24xx_abort_command(srb_t *sp)
ql_dbg(ql_dbg_mbx, vha, 0x1090, ql_dbg(ql_dbg_mbx, vha, 0x1090,
"Failed to complete IOCB -- completion status (%x).\n", "Failed to complete IOCB -- completion status (%x).\n",
le16_to_cpu(abt->nport_handle)); le16_to_cpu(abt->nport_handle));
if (abt->nport_handle == CS_IOCB_ERROR) if (abt->nport_handle == cpu_to_le16(CS_IOCB_ERROR))
rval = QLA_FUNCTION_PARAMETER_ERROR; rval = QLA_FUNCTION_PARAMETER_ERROR;
else else
rval = QLA_FUNCTION_FAILED; rval = QLA_FUNCTION_FAILED;
@@ -4427,9 +4426,9 @@ qla25xx_init_req_que(struct scsi_qla_host *vha, struct req_que *req)
spin_lock_irqsave(&ha->hardware_lock, flags); spin_lock_irqsave(&ha->hardware_lock, flags);
if (!(req->options & BIT_0)) { if (!(req->options & BIT_0)) {
WRT_REG_DWORD(req->req_q_in, 0); wrt_reg_dword(req->req_q_in, 0);
if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha) && !IS_QLA28XX(ha)) if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha) && !IS_QLA28XX(ha))
WRT_REG_DWORD(req->req_q_out, 0); wrt_reg_dword(req->req_q_out, 0);
} }
spin_unlock_irqrestore(&ha->hardware_lock, flags); spin_unlock_irqrestore(&ha->hardware_lock, flags);
@@ -4498,9 +4497,9 @@ qla25xx_init_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp)
spin_lock_irqsave(&ha->hardware_lock, flags); spin_lock_irqsave(&ha->hardware_lock, flags);
if (!(rsp->options & BIT_0)) { if (!(rsp->options & BIT_0)) {
WRT_REG_DWORD(rsp->rsp_q_out, 0); wrt_reg_dword(rsp->rsp_q_out, 0);
if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha) && !IS_QLA28XX(ha)) if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha) && !IS_QLA28XX(ha))
WRT_REG_DWORD(rsp->rsp_q_in, 0); wrt_reg_dword(rsp->rsp_q_in, 0);
} }
spin_unlock_irqrestore(&ha->hardware_lock, flags); spin_unlock_irqrestore(&ha->hardware_lock, flags);
@@ -4727,7 +4726,7 @@ qla82xx_set_driver_version(scsi_qla_host_t *vha, char *version)
mbx_cmd_t *mcp = &mc; mbx_cmd_t *mcp = &mc;
int i; int i;
int len; int len;
uint16_t *str; __le16 *str;
struct qla_hw_data *ha = vha->hw; struct qla_hw_data *ha = vha->hw;
if (!IS_P3P_TYPE(ha)) if (!IS_P3P_TYPE(ha))
@@ -4736,14 +4735,14 @@ qla82xx_set_driver_version(scsi_qla_host_t *vha, char *version)
ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x117b, ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x117b,
"Entered %s.\n", __func__); "Entered %s.\n", __func__);
str = (void *)version; str = (__force __le16 *)version;
len = strlen(version); len = strlen(version);
mcp->mb[0] = MBC_SET_RNID_PARAMS; mcp->mb[0] = MBC_SET_RNID_PARAMS;
mcp->mb[1] = RNID_TYPE_SET_VERSION << 8; mcp->mb[1] = RNID_TYPE_SET_VERSION << 8;
mcp->out_mb = MBX_1|MBX_0; mcp->out_mb = MBX_1|MBX_0;
for (i = 4; i < 16 && len; i++, str++, len -= 2) { for (i = 4; i < 16 && len; i++, str++, len -= 2) {
mcp->mb[i] = cpu_to_le16p(str); mcp->mb[i] = le16_to_cpup(str);
mcp->out_mb |= 1<<i; mcp->out_mb |= 1<<i;
} }
for (; i < 16; i++) { for (; i < 16; i++) {
@@ -4861,7 +4860,7 @@ qla24xx_get_port_login_templ(scsi_qla_host_t *vha, dma_addr_t buf_dma,
"Done %s.\n", __func__); "Done %s.\n", __func__);
bp = (uint32_t *) buf; bp = (uint32_t *) buf;
for (i = 0; i < (bufsiz-4)/4; i++, bp++) for (i = 0; i < (bufsiz-4)/4; i++, bp++)
*bp = le32_to_cpu(*bp); *bp = le32_to_cpu((__force __le32)*bp);
} }
return rval; return rval;
@@ -5411,18 +5410,18 @@ qla81xx_write_mpi_register(scsi_qla_host_t *vha, uint16_t *mb)
clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags); clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
/* Write the MBC data to the registers */ /* Write the MBC data to the registers */
WRT_REG_WORD(&reg->mailbox0, MBC_WRITE_MPI_REGISTER); wrt_reg_word(&reg->mailbox0, MBC_WRITE_MPI_REGISTER);
WRT_REG_WORD(&reg->mailbox1, mb[0]); wrt_reg_word(&reg->mailbox1, mb[0]);
WRT_REG_WORD(&reg->mailbox2, mb[1]); wrt_reg_word(&reg->mailbox2, mb[1]);
WRT_REG_WORD(&reg->mailbox3, mb[2]); wrt_reg_word(&reg->mailbox3, mb[2]);
WRT_REG_WORD(&reg->mailbox4, mb[3]); wrt_reg_word(&reg->mailbox4, mb[3]);
WRT_REG_DWORD(&reg->hccr, HCCRX_SET_HOST_INT); wrt_reg_dword(&reg->hccr, HCCRX_SET_HOST_INT);
/* Poll for MBC interrupt */ /* Poll for MBC interrupt */
for (timer = 6000000; timer; timer--) { for (timer = 6000000; timer; timer--) {
/* Check for pending interrupts. */ /* Check for pending interrupts. */
stat = RD_REG_DWORD(&reg->host_status); stat = rd_reg_dword(&reg->host_status);
if (stat & HSRX_RISC_INT) { if (stat & HSRX_RISC_INT) {
stat &= 0xff; stat &= 0xff;
@@ -5430,10 +5429,10 @@ qla81xx_write_mpi_register(scsi_qla_host_t *vha, uint16_t *mb)
stat == 0x10 || stat == 0x11) { stat == 0x10 || stat == 0x11) {
set_bit(MBX_INTERRUPT, set_bit(MBX_INTERRUPT,
&ha->mbx_cmd_flags); &ha->mbx_cmd_flags);
mb0 = RD_REG_WORD(&reg->mailbox0); mb0 = rd_reg_word(&reg->mailbox0);
WRT_REG_DWORD(&reg->hccr, wrt_reg_dword(&reg->hccr,
HCCRX_CLR_RISC_INT); HCCRX_CLR_RISC_INT);
RD_REG_DWORD(&reg->hccr); rd_reg_dword(&reg->hccr);
break; break;
} }
} }
@@ -6211,7 +6210,7 @@ qla83xx_restart_nic_firmware(scsi_qla_host_t *vha)
ql_dbg(ql_dbg_mbx, vha, 0x1144, ql_dbg(ql_dbg_mbx, vha, 0x1144,
"Failed=%x mb[0]=%x mb[1]=%x.\n", "Failed=%x mb[0]=%x mb[1]=%x.\n",
rval, mcp->mb[0], mcp->mb[1]); rval, mcp->mb[0], mcp->mb[1]);
ha->isp_ops->fw_dump(vha, 0); qla2xxx_dump_fw(vha);
} else { } else {
ql_dbg(ql_dbg_mbx, vha, 0x1145, "Done %s.\n", __func__); ql_dbg(ql_dbg_mbx, vha, 0x1145, "Done %s.\n", __func__);
} }
@@ -6256,7 +6255,7 @@ qla83xx_access_control(scsi_qla_host_t *vha, uint16_t options,
"Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x mb[3]=%x mb[4]=%x.\n", "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x mb[3]=%x mb[4]=%x.\n",
rval, mcp->mb[0], mcp->mb[1], mcp->mb[2], mcp->mb[3], rval, mcp->mb[0], mcp->mb[1], mcp->mb[2], mcp->mb[3],
mcp->mb[4]); mcp->mb[4]);
ha->isp_ops->fw_dump(vha, 0); qla2xxx_dump_fw(vha);
} else { } else {
if (subcode & BIT_5) if (subcode & BIT_5)
*sector_size = mcp->mb[1]; *sector_size = mcp->mb[1];
@@ -6470,13 +6469,13 @@ int qla24xx_gpdb_wait(struct scsi_qla_host *vha, fc_port_t *fcport, u8 opt)
memset(&mc, 0, sizeof(mc)); memset(&mc, 0, sizeof(mc));
mc.mb[0] = MBC_GET_PORT_DATABASE; mc.mb[0] = MBC_GET_PORT_DATABASE;
mc.mb[1] = cpu_to_le16(fcport->loop_id); mc.mb[1] = fcport->loop_id;
mc.mb[2] = MSW(pd_dma); mc.mb[2] = MSW(pd_dma);
mc.mb[3] = LSW(pd_dma); mc.mb[3] = LSW(pd_dma);
mc.mb[6] = MSW(MSD(pd_dma)); mc.mb[6] = MSW(MSD(pd_dma));
mc.mb[7] = LSW(MSD(pd_dma)); mc.mb[7] = LSW(MSD(pd_dma));
mc.mb[9] = cpu_to_le16(vha->vp_idx); mc.mb[9] = vha->vp_idx;
mc.mb[10] = cpu_to_le16((uint16_t)opt); mc.mb[10] = opt;
rval = qla24xx_send_mb_cmd(vha, &mc); rval = qla24xx_send_mb_cmd(vha, &mc);
if (rval != QLA_SUCCESS) { if (rval != QLA_SUCCESS) {
@@ -6587,7 +6586,7 @@ int qla24xx_gidlist_wait(struct scsi_qla_host *vha,
mc.mb[6] = MSW(MSD(id_list_dma)); mc.mb[6] = MSW(MSD(id_list_dma));
mc.mb[7] = LSW(MSD(id_list_dma)); mc.mb[7] = LSW(MSD(id_list_dma));
mc.mb[8] = 0; mc.mb[8] = 0;
mc.mb[9] = cpu_to_le16(vha->vp_idx); mc.mb[9] = vha->vp_idx;
rval = qla24xx_send_mb_cmd(vha, &mc); rval = qla24xx_send_mb_cmd(vha, &mc);
if (rval != QLA_SUCCESS) { if (rval != QLA_SUCCESS) {
@@ -6613,8 +6612,8 @@ int qla27xx_set_zio_threshold(scsi_qla_host_t *vha, uint16_t value)
memset(mcp->mb, 0 , sizeof(mcp->mb)); memset(mcp->mb, 0 , sizeof(mcp->mb));
mcp->mb[0] = MBC_GET_SET_ZIO_THRESHOLD; mcp->mb[0] = MBC_GET_SET_ZIO_THRESHOLD;
mcp->mb[1] = cpu_to_le16(1); mcp->mb[1] = 1;
mcp->mb[2] = cpu_to_le16(value); mcp->mb[2] = value;
mcp->out_mb = MBX_2 | MBX_1 | MBX_0; mcp->out_mb = MBX_2 | MBX_1 | MBX_0;
mcp->in_mb = MBX_2 | MBX_0; mcp->in_mb = MBX_2 | MBX_0;
mcp->tov = MBX_TOV_SECONDS; mcp->tov = MBX_TOV_SECONDS;
@@ -6639,7 +6638,7 @@ int qla27xx_get_zio_threshold(scsi_qla_host_t *vha, uint16_t *value)
memset(mcp->mb, 0, sizeof(mcp->mb)); memset(mcp->mb, 0, sizeof(mcp->mb));
mcp->mb[0] = MBC_GET_SET_ZIO_THRESHOLD; mcp->mb[0] = MBC_GET_SET_ZIO_THRESHOLD;
mcp->mb[1] = cpu_to_le16(0); mcp->mb[1] = 0;
mcp->out_mb = MBX_1 | MBX_0; mcp->out_mb = MBX_1 | MBX_0;
mcp->in_mb = MBX_2 | MBX_0; mcp->in_mb = MBX_2 | MBX_0;
mcp->tov = MBX_TOV_SECONDS; mcp->tov = MBX_TOV_SECONDS;

View File

@@ -770,7 +770,7 @@ qla25xx_create_req_que(struct qla_hw_data *ha, uint16_t options,
req->req_q_in = &reg->isp25mq.req_q_in; req->req_q_in = &reg->isp25mq.req_q_in;
req->req_q_out = &reg->isp25mq.req_q_out; req->req_q_out = &reg->isp25mq.req_q_out;
req->max_q_depth = ha->req_q_map[0]->max_q_depth; req->max_q_depth = ha->req_q_map[0]->max_q_depth;
req->out_ptr = (void *)(req->ring + req->length); req->out_ptr = (uint16_t *)(req->ring + req->length);
mutex_unlock(&ha->mq_lock); mutex_unlock(&ha->mq_lock);
ql_dbg(ql_dbg_multiq, base_vha, 0xc004, ql_dbg(ql_dbg_multiq, base_vha, 0xc004,
"ring_ptr=%p ring_index=%d, " "ring_ptr=%p ring_index=%d, "
@@ -884,7 +884,7 @@ qla25xx_create_rsp_que(struct qla_hw_data *ha, uint16_t options,
reg = ISP_QUE_REG(ha, que_id); reg = ISP_QUE_REG(ha, que_id);
rsp->rsp_q_in = &reg->isp25mq.rsp_q_in; rsp->rsp_q_in = &reg->isp25mq.rsp_q_in;
rsp->rsp_q_out = &reg->isp25mq.rsp_q_out; rsp->rsp_q_out = &reg->isp25mq.rsp_q_out;
rsp->in_ptr = (void *)(rsp->ring + rsp->length); rsp->in_ptr = (uint16_t *)(rsp->ring + rsp->length);
mutex_unlock(&ha->mq_lock); mutex_unlock(&ha->mq_lock);
ql_dbg(ql_dbg_multiq, base_vha, 0xc00b, ql_dbg(ql_dbg_multiq, base_vha, 0xc00b,
"options=%x id=%d rsp_q_in=%p rsp_q_out=%p\n", "options=%x id=%d rsp_q_in=%p rsp_q_out=%p\n",

View File

@@ -46,7 +46,7 @@ qlafx00_mailbox_command(scsi_qla_host_t *vha, struct mbx_cmd_32 *mcp)
uint8_t io_lock_on; uint8_t io_lock_on;
uint16_t command = 0; uint16_t command = 0;
uint32_t *iptr; uint32_t *iptr;
uint32_t __iomem *optr; __le32 __iomem *optr;
uint32_t cnt; uint32_t cnt;
uint32_t mboxes; uint32_t mboxes;
unsigned long wait_time; unsigned long wait_time;
@@ -109,7 +109,7 @@ qlafx00_mailbox_command(scsi_qla_host_t *vha, struct mbx_cmd_32 *mcp)
spin_lock_irqsave(&ha->hardware_lock, flags); spin_lock_irqsave(&ha->hardware_lock, flags);
/* Load mailbox registers. */ /* Load mailbox registers. */
optr = (uint32_t __iomem *)&reg->ispfx00.mailbox0; optr = &reg->ispfx00.mailbox0;
iptr = mcp->mb; iptr = mcp->mb;
command = mcp->mb[0]; command = mcp->mb[0];
@@ -117,7 +117,7 @@ qlafx00_mailbox_command(scsi_qla_host_t *vha, struct mbx_cmd_32 *mcp)
for (cnt = 0; cnt < ha->mbx_count; cnt++) { for (cnt = 0; cnt < ha->mbx_count; cnt++) {
if (mboxes & BIT_0) if (mboxes & BIT_0)
WRT_REG_DWORD(optr, *iptr); wrt_reg_dword(optr, *iptr);
mboxes >>= 1; mboxes >>= 1;
optr++; optr++;
@@ -676,14 +676,14 @@ qlafx00_config_rings(struct scsi_qla_host *vha)
struct qla_hw_data *ha = vha->hw; struct qla_hw_data *ha = vha->hw;
struct device_reg_fx00 __iomem *reg = &ha->iobase->ispfx00; struct device_reg_fx00 __iomem *reg = &ha->iobase->ispfx00;
WRT_REG_DWORD(&reg->req_q_in, 0); wrt_reg_dword(&reg->req_q_in, 0);
WRT_REG_DWORD(&reg->req_q_out, 0); wrt_reg_dword(&reg->req_q_out, 0);
WRT_REG_DWORD(&reg->rsp_q_in, 0); wrt_reg_dword(&reg->rsp_q_in, 0);
WRT_REG_DWORD(&reg->rsp_q_out, 0); wrt_reg_dword(&reg->rsp_q_out, 0);
/* PCI posting */ /* PCI posting */
RD_REG_DWORD(&reg->rsp_q_out); rd_reg_dword(&reg->rsp_q_out);
} }
char * char *
@@ -912,9 +912,9 @@ qlafx00_init_fw_ready(scsi_qla_host_t *vha)
/* 30 seconds wait - Adjust if required */ /* 30 seconds wait - Adjust if required */
wait_time = 30; wait_time = 30;
pseudo_aen = RD_REG_DWORD(&reg->pseudoaen); pseudo_aen = rd_reg_dword(&reg->pseudoaen);
if (pseudo_aen == 1) { if (pseudo_aen == 1) {
aenmbx7 = RD_REG_DWORD(&reg->initval7); aenmbx7 = rd_reg_dword(&reg->initval7);
ha->mbx_intr_code = MSW(aenmbx7); ha->mbx_intr_code = MSW(aenmbx7);
ha->rqstq_intr_code = LSW(aenmbx7); ha->rqstq_intr_code = LSW(aenmbx7);
rval = qlafx00_driver_shutdown(vha, 10); rval = qlafx00_driver_shutdown(vha, 10);
@@ -925,7 +925,7 @@ qlafx00_init_fw_ready(scsi_qla_host_t *vha)
/* wait time before firmware ready */ /* wait time before firmware ready */
wtime = jiffies + (wait_time * HZ); wtime = jiffies + (wait_time * HZ);
do { do {
aenmbx = RD_REG_DWORD(&reg->aenmailbox0); aenmbx = rd_reg_dword(&reg->aenmailbox0);
barrier(); barrier();
ql_dbg(ql_dbg_mbx, vha, 0x0133, ql_dbg(ql_dbg_mbx, vha, 0x0133,
"aenmbx: 0x%x\n", aenmbx); "aenmbx: 0x%x\n", aenmbx);
@@ -944,15 +944,15 @@ qlafx00_init_fw_ready(scsi_qla_host_t *vha)
case MBA_FW_RESTART_CMPLT: case MBA_FW_RESTART_CMPLT:
/* Set the mbx and rqstq intr code */ /* Set the mbx and rqstq intr code */
aenmbx7 = RD_REG_DWORD(&reg->aenmailbox7); aenmbx7 = rd_reg_dword(&reg->aenmailbox7);
ha->mbx_intr_code = MSW(aenmbx7); ha->mbx_intr_code = MSW(aenmbx7);
ha->rqstq_intr_code = LSW(aenmbx7); ha->rqstq_intr_code = LSW(aenmbx7);
ha->req_que_off = RD_REG_DWORD(&reg->aenmailbox1); ha->req_que_off = rd_reg_dword(&reg->aenmailbox1);
ha->rsp_que_off = RD_REG_DWORD(&reg->aenmailbox3); ha->rsp_que_off = rd_reg_dword(&reg->aenmailbox3);
ha->req_que_len = RD_REG_DWORD(&reg->aenmailbox5); ha->req_que_len = rd_reg_dword(&reg->aenmailbox5);
ha->rsp_que_len = RD_REG_DWORD(&reg->aenmailbox6); ha->rsp_que_len = rd_reg_dword(&reg->aenmailbox6);
WRT_REG_DWORD(&reg->aenmailbox0, 0); wrt_reg_dword(&reg->aenmailbox0, 0);
RD_REG_DWORD_RELAXED(&reg->aenmailbox0); rd_reg_dword_relaxed(&reg->aenmailbox0);
ql_dbg(ql_dbg_init, vha, 0x0134, ql_dbg(ql_dbg_init, vha, 0x0134,
"f/w returned mbx_intr_code: 0x%x, " "f/w returned mbx_intr_code: 0x%x, "
"rqstq_intr_code: 0x%x\n", "rqstq_intr_code: 0x%x\n",
@@ -982,13 +982,13 @@ qlafx00_init_fw_ready(scsi_qla_host_t *vha)
* 3. issue Get FW State Mbox cmd to determine fw state * 3. issue Get FW State Mbox cmd to determine fw state
* Set the mbx and rqstq intr code from Shadow Regs * Set the mbx and rqstq intr code from Shadow Regs
*/ */
aenmbx7 = RD_REG_DWORD(&reg->initval7); aenmbx7 = rd_reg_dword(&reg->initval7);
ha->mbx_intr_code = MSW(aenmbx7); ha->mbx_intr_code = MSW(aenmbx7);
ha->rqstq_intr_code = LSW(aenmbx7); ha->rqstq_intr_code = LSW(aenmbx7);
ha->req_que_off = RD_REG_DWORD(&reg->initval1); ha->req_que_off = rd_reg_dword(&reg->initval1);
ha->rsp_que_off = RD_REG_DWORD(&reg->initval3); ha->rsp_que_off = rd_reg_dword(&reg->initval3);
ha->req_que_len = RD_REG_DWORD(&reg->initval5); ha->req_que_len = rd_reg_dword(&reg->initval5);
ha->rsp_que_len = RD_REG_DWORD(&reg->initval6); ha->rsp_que_len = rd_reg_dword(&reg->initval6);
ql_dbg(ql_dbg_init, vha, 0x0135, ql_dbg(ql_dbg_init, vha, 0x0135,
"f/w returned mbx_intr_code: 0x%x, " "f/w returned mbx_intr_code: 0x%x, "
"rqstq_intr_code: 0x%x\n", "rqstq_intr_code: 0x%x\n",
@@ -1034,7 +1034,7 @@ qlafx00_init_fw_ready(scsi_qla_host_t *vha)
if (time_after_eq(jiffies, wtime)) { if (time_after_eq(jiffies, wtime)) {
ql_dbg(ql_dbg_init, vha, 0x0137, ql_dbg(ql_dbg_init, vha, 0x0137,
"Init f/w failed: aen[7]: 0x%x\n", "Init f/w failed: aen[7]: 0x%x\n",
RD_REG_DWORD(&reg->aenmailbox7)); rd_reg_dword(&reg->aenmailbox7));
rval = QLA_FUNCTION_FAILED; rval = QLA_FUNCTION_FAILED;
done = true; done = true;
break; break;
@@ -1428,7 +1428,7 @@ qlafx00_init_response_q_entries(struct rsp_que *rsp)
pkt = rsp->ring_ptr; pkt = rsp->ring_ptr;
for (cnt = 0; cnt < rsp->length; cnt++) { for (cnt = 0; cnt < rsp->length; cnt++) {
pkt->signature = RESPONSE_PROCESSED; pkt->signature = RESPONSE_PROCESSED;
WRT_REG_DWORD((void __force __iomem *)&pkt->signature, wrt_reg_dword((void __force __iomem *)&pkt->signature,
RESPONSE_PROCESSED); RESPONSE_PROCESSED);
pkt++; pkt++;
} }
@@ -1444,13 +1444,13 @@ qlafx00_rescan_isp(scsi_qla_host_t *vha)
qla2x00_request_irqs(ha, ha->rsp_q_map[0]); qla2x00_request_irqs(ha, ha->rsp_q_map[0]);
aenmbx7 = RD_REG_DWORD(&reg->aenmailbox7); aenmbx7 = rd_reg_dword(&reg->aenmailbox7);
ha->mbx_intr_code = MSW(aenmbx7); ha->mbx_intr_code = MSW(aenmbx7);
ha->rqstq_intr_code = LSW(aenmbx7); ha->rqstq_intr_code = LSW(aenmbx7);
ha->req_que_off = RD_REG_DWORD(&reg->aenmailbox1); ha->req_que_off = rd_reg_dword(&reg->aenmailbox1);
ha->rsp_que_off = RD_REG_DWORD(&reg->aenmailbox3); ha->rsp_que_off = rd_reg_dword(&reg->aenmailbox3);
ha->req_que_len = RD_REG_DWORD(&reg->aenmailbox5); ha->req_que_len = rd_reg_dword(&reg->aenmailbox5);
ha->rsp_que_len = RD_REG_DWORD(&reg->aenmailbox6); ha->rsp_que_len = rd_reg_dword(&reg->aenmailbox6);
ql_dbg(ql_dbg_disc, vha, 0x2094, ql_dbg(ql_dbg_disc, vha, 0x2094,
"fw returned mbx_intr_code: 0x%x, rqstq_intr_code: 0x%x " "fw returned mbx_intr_code: 0x%x, rqstq_intr_code: 0x%x "
@@ -1495,7 +1495,7 @@ qlafx00_timer_routine(scsi_qla_host_t *vha)
(!test_bit(UNLOADING, &vha->dpc_flags)) && (!test_bit(UNLOADING, &vha->dpc_flags)) &&
(!test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags)) && (!test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags)) &&
(ha->mr.fw_hbt_en)) { (ha->mr.fw_hbt_en)) {
fw_heart_beat = RD_REG_DWORD(&reg->fwheartbeat); fw_heart_beat = rd_reg_dword(&reg->fwheartbeat);
if (fw_heart_beat != ha->mr.old_fw_hbt_cnt) { if (fw_heart_beat != ha->mr.old_fw_hbt_cnt) {
ha->mr.old_fw_hbt_cnt = fw_heart_beat; ha->mr.old_fw_hbt_cnt = fw_heart_beat;
ha->mr.fw_hbt_miss_cnt = 0; ha->mr.fw_hbt_miss_cnt = 0;
@@ -1515,7 +1515,7 @@ qlafx00_timer_routine(scsi_qla_host_t *vha)
if (test_bit(FX00_RESET_RECOVERY, &vha->dpc_flags)) { if (test_bit(FX00_RESET_RECOVERY, &vha->dpc_flags)) {
/* Reset recovery to be performed in timer routine */ /* Reset recovery to be performed in timer routine */
aenmbx0 = RD_REG_DWORD(&reg->aenmailbox0); aenmbx0 = rd_reg_dword(&reg->aenmailbox0);
if (ha->mr.fw_reset_timer_exp) { if (ha->mr.fw_reset_timer_exp) {
set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
qla2xxx_wake_dpc(vha); qla2xxx_wake_dpc(vha);
@@ -1710,10 +1710,9 @@ qlafx00_tgt_detach(struct scsi_qla_host *vha, int tgt_id)
return; return;
} }
int void
qlafx00_process_aen(struct scsi_qla_host *vha, struct qla_work_evt *evt) qlafx00_process_aen(struct scsi_qla_host *vha, struct qla_work_evt *evt)
{ {
int rval = 0;
uint32_t aen_code, aen_data; uint32_t aen_code, aen_data;
aen_code = FCH_EVT_VENDOR_UNIQUE; aen_code = FCH_EVT_VENDOR_UNIQUE;
@@ -1764,8 +1763,6 @@ qlafx00_process_aen(struct scsi_qla_host *vha, struct qla_work_evt *evt)
fc_host_post_event(vha->host, fc_get_event_number(), fc_host_post_event(vha->host, fc_get_event_number(),
aen_code, aen_data); aen_code, aen_data);
return rval;
} }
static void static void
@@ -2721,7 +2718,7 @@ qlafx00_process_response_queue(struct scsi_qla_host *vha,
uint16_t lreq_q_in = 0; uint16_t lreq_q_in = 0;
uint16_t lreq_q_out = 0; uint16_t lreq_q_out = 0;
lreq_q_in = RD_REG_DWORD(rsp->rsp_q_in); lreq_q_in = rd_reg_dword(rsp->rsp_q_in);
lreq_q_out = rsp->ring_index; lreq_q_out = rsp->ring_index;
while (lreq_q_in != lreq_q_out) { while (lreq_q_in != lreq_q_out) {
@@ -2783,7 +2780,7 @@ qlafx00_process_response_queue(struct scsi_qla_host *vha,
} }
/* Adjust ring index */ /* Adjust ring index */
WRT_REG_DWORD(rsp->rsp_q_out, rsp->ring_index); wrt_reg_dword(rsp->rsp_q_out, rsp->ring_index);
} }
/** /**
@@ -2814,9 +2811,9 @@ qlafx00_async_event(scsi_qla_host_t *vha)
break; break;
case QLAFX00_MBA_PORT_UPDATE: /* Port database update */ case QLAFX00_MBA_PORT_UPDATE: /* Port database update */
ha->aenmb[1] = RD_REG_DWORD(&reg->aenmailbox1); ha->aenmb[1] = rd_reg_dword(&reg->aenmailbox1);
ha->aenmb[2] = RD_REG_DWORD(&reg->aenmailbox2); ha->aenmb[2] = rd_reg_dword(&reg->aenmailbox2);
ha->aenmb[3] = RD_REG_DWORD(&reg->aenmailbox3); ha->aenmb[3] = rd_reg_dword(&reg->aenmailbox3);
ql_dbg(ql_dbg_async, vha, 0x5077, ql_dbg(ql_dbg_async, vha, 0x5077,
"Asynchronous port Update received " "Asynchronous port Update received "
"aenmb[0]: %x, aenmb[1]: %x, aenmb[2]: %x, aenmb[3]: %x\n", "aenmb[0]: %x, aenmb[1]: %x, aenmb[2]: %x, aenmb[3]: %x\n",
@@ -2846,13 +2843,13 @@ qlafx00_async_event(scsi_qla_host_t *vha)
break; break;
default: default:
ha->aenmb[1] = RD_REG_WORD(&reg->aenmailbox1); ha->aenmb[1] = rd_reg_dword(&reg->aenmailbox1);
ha->aenmb[2] = RD_REG_WORD(&reg->aenmailbox2); ha->aenmb[2] = rd_reg_dword(&reg->aenmailbox2);
ha->aenmb[3] = RD_REG_WORD(&reg->aenmailbox3); ha->aenmb[3] = rd_reg_dword(&reg->aenmailbox3);
ha->aenmb[4] = RD_REG_WORD(&reg->aenmailbox4); ha->aenmb[4] = rd_reg_dword(&reg->aenmailbox4);
ha->aenmb[5] = RD_REG_WORD(&reg->aenmailbox5); ha->aenmb[5] = rd_reg_dword(&reg->aenmailbox5);
ha->aenmb[6] = RD_REG_WORD(&reg->aenmailbox6); ha->aenmb[6] = rd_reg_dword(&reg->aenmailbox6);
ha->aenmb[7] = RD_REG_WORD(&reg->aenmailbox7); ha->aenmb[7] = rd_reg_dword(&reg->aenmailbox7);
ql_dbg(ql_dbg_async, vha, 0x5078, ql_dbg(ql_dbg_async, vha, 0x5078,
"AEN:%04x %04x %04x %04x :%04x %04x %04x %04x\n", "AEN:%04x %04x %04x %04x :%04x %04x %04x %04x\n",
ha->aenmb[0], ha->aenmb[1], ha->aenmb[2], ha->aenmb[3], ha->aenmb[0], ha->aenmb[1], ha->aenmb[2], ha->aenmb[3],
@@ -2872,7 +2869,7 @@ static void
qlafx00_mbx_completion(scsi_qla_host_t *vha, uint32_t mb0) qlafx00_mbx_completion(scsi_qla_host_t *vha, uint32_t mb0)
{ {
uint16_t cnt; uint16_t cnt;
uint32_t __iomem *wptr; __le32 __iomem *wptr;
struct qla_hw_data *ha = vha->hw; struct qla_hw_data *ha = vha->hw;
struct device_reg_fx00 __iomem *reg = &ha->iobase->ispfx00; struct device_reg_fx00 __iomem *reg = &ha->iobase->ispfx00;
@@ -2882,10 +2879,10 @@ qlafx00_mbx_completion(scsi_qla_host_t *vha, uint32_t mb0)
/* Load return mailbox registers. */ /* Load return mailbox registers. */
ha->flags.mbox_int = 1; ha->flags.mbox_int = 1;
ha->mailbox_out32[0] = mb0; ha->mailbox_out32[0] = mb0;
wptr = (uint32_t __iomem *)&reg->mailbox17; wptr = &reg->mailbox17;
for (cnt = 1; cnt < ha->mbx_count; cnt++) { for (cnt = 1; cnt < ha->mbx_count; cnt++) {
ha->mailbox_out32[cnt] = RD_REG_DWORD(wptr); ha->mailbox_out32[cnt] = rd_reg_dword(wptr);
wptr++; wptr++;
} }
} }
@@ -2939,13 +2936,13 @@ qlafx00_intr_handler(int irq, void *dev_id)
break; break;
if (stat & QLAFX00_INTR_MB_CMPLT) { if (stat & QLAFX00_INTR_MB_CMPLT) {
mb[0] = RD_REG_WORD(&reg->mailbox16); mb[0] = rd_reg_dword(&reg->mailbox16);
qlafx00_mbx_completion(vha, mb[0]); qlafx00_mbx_completion(vha, mb[0]);
status |= MBX_INTERRUPT; status |= MBX_INTERRUPT;
clr_intr |= QLAFX00_INTR_MB_CMPLT; clr_intr |= QLAFX00_INTR_MB_CMPLT;
} }
if (intr_stat & QLAFX00_INTR_ASYNC_CMPLT) { if (intr_stat & QLAFX00_INTR_ASYNC_CMPLT) {
ha->aenmb[0] = RD_REG_WORD(&reg->aenmailbox0); ha->aenmb[0] = rd_reg_dword(&reg->aenmailbox0);
qlafx00_async_event(vha); qlafx00_async_event(vha);
clr_intr |= QLAFX00_INTR_ASYNC_CMPLT; clr_intr |= QLAFX00_INTR_ASYNC_CMPLT;
} }
@@ -3113,7 +3110,7 @@ qlafx00_start_scsi(srb_t *sp)
tot_dsds = nseg; tot_dsds = nseg;
req_cnt = qla24xx_calc_iocbs(vha, tot_dsds); req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
if (req->cnt < (req_cnt + 2)) { if (req->cnt < (req_cnt + 2)) {
cnt = RD_REG_DWORD_RELAXED(req->req_q_out); cnt = rd_reg_dword_relaxed(req->req_q_out);
if (req->ring_index < cnt) if (req->ring_index < cnt)
req->cnt = cnt - req->ring_index; req->cnt = cnt - req->ring_index;
@@ -3178,7 +3175,7 @@ qlafx00_start_scsi(srb_t *sp)
sp->flags |= SRB_DMA_VALID; sp->flags |= SRB_DMA_VALID;
/* Set chip new ring index. */ /* Set chip new ring index. */
WRT_REG_DWORD(req->req_q_in, req->ring_index); wrt_reg_dword(req->req_q_in, req->ring_index);
QLAFX00_SET_HST_INTR(ha, ha->rqstq_intr_code); QLAFX00_SET_HST_INTR(ha, ha->rqstq_intr_code);
spin_unlock_irqrestore(&ha->hardware_lock, flags); spin_unlock_irqrestore(&ha->hardware_lock, flags);
@@ -3205,7 +3202,7 @@ qlafx00_tm_iocb(srb_t *sp, struct tsk_mgmt_entry_fx00 *ptm_iocb)
memset(&tm_iocb, 0, sizeof(struct tsk_mgmt_entry_fx00)); memset(&tm_iocb, 0, sizeof(struct tsk_mgmt_entry_fx00));
tm_iocb.entry_type = TSK_MGMT_IOCB_TYPE_FX00; tm_iocb.entry_type = TSK_MGMT_IOCB_TYPE_FX00;
tm_iocb.entry_count = 1; tm_iocb.entry_count = 1;
tm_iocb.handle = cpu_to_le32(make_handle(req->id, sp->handle)); tm_iocb.handle = make_handle(req->id, sp->handle);
tm_iocb.reserved_0 = 0; tm_iocb.reserved_0 = 0;
tm_iocb.tgt_id = cpu_to_le16(sp->fcport->tgt_id); tm_iocb.tgt_id = cpu_to_le16(sp->fcport->tgt_id);
tm_iocb.control_flags = cpu_to_le32(fxio->u.tmf.flags); tm_iocb.control_flags = cpu_to_le32(fxio->u.tmf.flags);
@@ -3215,7 +3212,7 @@ qlafx00_tm_iocb(srb_t *sp, struct tsk_mgmt_entry_fx00 *ptm_iocb)
sizeof(struct scsi_lun)); sizeof(struct scsi_lun));
} }
memcpy((void *)ptm_iocb, &tm_iocb, memcpy(ptm_iocb, &tm_iocb,
sizeof(struct tsk_mgmt_entry_fx00)); sizeof(struct tsk_mgmt_entry_fx00));
wmb(); wmb();
} }
@@ -3231,13 +3228,12 @@ qlafx00_abort_iocb(srb_t *sp, struct abort_iocb_entry_fx00 *pabt_iocb)
memset(&abt_iocb, 0, sizeof(struct abort_iocb_entry_fx00)); memset(&abt_iocb, 0, sizeof(struct abort_iocb_entry_fx00));
abt_iocb.entry_type = ABORT_IOCB_TYPE_FX00; abt_iocb.entry_type = ABORT_IOCB_TYPE_FX00;
abt_iocb.entry_count = 1; abt_iocb.entry_count = 1;
abt_iocb.handle = cpu_to_le32(make_handle(req->id, sp->handle)); abt_iocb.handle = make_handle(req->id, sp->handle);
abt_iocb.abort_handle = abt_iocb.abort_handle = make_handle(req->id, fxio->u.abt.cmd_hndl);
cpu_to_le32(make_handle(req->id, fxio->u.abt.cmd_hndl));
abt_iocb.tgt_id_sts = cpu_to_le16(sp->fcport->tgt_id); abt_iocb.tgt_id_sts = cpu_to_le16(sp->fcport->tgt_id);
abt_iocb.req_que_no = cpu_to_le16(req->id); abt_iocb.req_que_no = cpu_to_le16(req->id);
memcpy((void *)pabt_iocb, &abt_iocb, memcpy(pabt_iocb, &abt_iocb,
sizeof(struct abort_iocb_entry_fx00)); sizeof(struct abort_iocb_entry_fx00));
wmb(); wmb();
} }
@@ -3254,7 +3250,7 @@ qlafx00_fxdisc_iocb(srb_t *sp, struct fxdisc_entry_fx00 *pfxiocb)
memset(&fx_iocb, 0, sizeof(struct fxdisc_entry_fx00)); memset(&fx_iocb, 0, sizeof(struct fxdisc_entry_fx00));
fx_iocb.entry_type = FX00_IOCB_TYPE; fx_iocb.entry_type = FX00_IOCB_TYPE;
fx_iocb.handle = cpu_to_le32(sp->handle); fx_iocb.handle = sp->handle;
fx_iocb.entry_count = entry_cnt; fx_iocb.entry_count = entry_cnt;
if (sp->type == SRB_FXIOCB_DCMD) { if (sp->type == SRB_FXIOCB_DCMD) {

View File

@@ -96,7 +96,7 @@ struct tsk_mgmt_entry_fx00 {
uint8_t sys_define; uint8_t sys_define;
uint8_t entry_status; /* Entry Status. */ uint8_t entry_status; /* Entry Status. */
__le32 handle; /* System handle. */ uint32_t handle; /* System handle. */
uint32_t reserved_0; uint32_t reserved_0;
@@ -121,13 +121,13 @@ struct abort_iocb_entry_fx00 {
uint8_t sys_define; /* System defined. */ uint8_t sys_define; /* System defined. */
uint8_t entry_status; /* Entry Status. */ uint8_t entry_status; /* Entry Status. */
__le32 handle; /* System handle. */ uint32_t handle; /* System handle. */
__le32 reserved_0; __le32 reserved_0;
__le16 tgt_id_sts; /* Completion status. */ __le16 tgt_id_sts; /* Completion status. */
__le16 options; __le16 options;
__le32 abort_handle; /* System handle. */ uint32_t abort_handle; /* System handle. */
__le32 reserved_2; __le32 reserved_2;
__le16 req_que_no; __le16 req_que_no;
@@ -166,7 +166,7 @@ struct fxdisc_entry_fx00 {
uint8_t sys_define; /* System Defined. */ uint8_t sys_define; /* System Defined. */
uint8_t entry_status; /* Entry Status. */ uint8_t entry_status; /* Entry Status. */
__le32 handle; /* System handle. */ uint32_t handle; /* System handle. */
__le32 reserved_0; /* System handle. */ __le32 reserved_0; /* System handle. */
__le16 func_num; __le16 func_num;
@@ -359,47 +359,47 @@ struct config_info_data {
#define CONTINUE_A64_TYPE_FX00 0x03 /* Continuation entry. */ #define CONTINUE_A64_TYPE_FX00 0x03 /* Continuation entry. */
#define QLAFX00_SET_HST_INTR(ha, value) \ #define QLAFX00_SET_HST_INTR(ha, value) \
WRT_REG_DWORD((ha)->cregbase + QLAFX00_HST_TO_HBA_REG, \ wrt_reg_dword((ha)->cregbase + QLAFX00_HST_TO_HBA_REG, \
value) value)
#define QLAFX00_CLR_HST_INTR(ha, value) \ #define QLAFX00_CLR_HST_INTR(ha, value) \
WRT_REG_DWORD((ha)->cregbase + QLAFX00_HBA_TO_HOST_REG, \ wrt_reg_dword((ha)->cregbase + QLAFX00_HBA_TO_HOST_REG, \
~value) ~value)
#define QLAFX00_RD_INTR_REG(ha) \ #define QLAFX00_RD_INTR_REG(ha) \
RD_REG_DWORD((ha)->cregbase + QLAFX00_HBA_TO_HOST_REG) rd_reg_dword((ha)->cregbase + QLAFX00_HBA_TO_HOST_REG)
#define QLAFX00_CLR_INTR_REG(ha, value) \ #define QLAFX00_CLR_INTR_REG(ha, value) \
WRT_REG_DWORD((ha)->cregbase + QLAFX00_HBA_TO_HOST_REG, \ wrt_reg_dword((ha)->cregbase + QLAFX00_HBA_TO_HOST_REG, \
~value) ~value)
#define QLAFX00_SET_HBA_SOC_REG(ha, off, val)\ #define QLAFX00_SET_HBA_SOC_REG(ha, off, val)\
WRT_REG_DWORD((ha)->cregbase + off, val) wrt_reg_dword((ha)->cregbase + off, val)
#define QLAFX00_GET_HBA_SOC_REG(ha, off)\ #define QLAFX00_GET_HBA_SOC_REG(ha, off)\
RD_REG_DWORD((ha)->cregbase + off) rd_reg_dword((ha)->cregbase + off)
#define QLAFX00_HBA_RST_REG(ha, val)\ #define QLAFX00_HBA_RST_REG(ha, val)\
WRT_REG_DWORD((ha)->cregbase + QLAFX00_HST_RST_REG, val) wrt_reg_dword((ha)->cregbase + QLAFX00_HST_RST_REG, val)
#define QLAFX00_RD_ICNTRL_REG(ha) \ #define QLAFX00_RD_ICNTRL_REG(ha) \
RD_REG_DWORD((ha)->cregbase + QLAFX00_HBA_ICNTRL_REG) rd_reg_dword((ha)->cregbase + QLAFX00_HBA_ICNTRL_REG)
#define QLAFX00_ENABLE_ICNTRL_REG(ha) \ #define QLAFX00_ENABLE_ICNTRL_REG(ha) \
WRT_REG_DWORD((ha)->cregbase + QLAFX00_HBA_ICNTRL_REG, \ wrt_reg_dword((ha)->cregbase + QLAFX00_HBA_ICNTRL_REG, \
(QLAFX00_GET_HBA_SOC_REG(ha, QLAFX00_HBA_ICNTRL_REG) | \ (QLAFX00_GET_HBA_SOC_REG(ha, QLAFX00_HBA_ICNTRL_REG) | \
QLAFX00_ICR_ENB_MASK)) QLAFX00_ICR_ENB_MASK))
#define QLAFX00_DISABLE_ICNTRL_REG(ha) \ #define QLAFX00_DISABLE_ICNTRL_REG(ha) \
WRT_REG_DWORD((ha)->cregbase + QLAFX00_HBA_ICNTRL_REG, \ wrt_reg_dword((ha)->cregbase + QLAFX00_HBA_ICNTRL_REG, \
(QLAFX00_GET_HBA_SOC_REG(ha, QLAFX00_HBA_ICNTRL_REG) & \ (QLAFX00_GET_HBA_SOC_REG(ha, QLAFX00_HBA_ICNTRL_REG) & \
QLAFX00_ICR_DIS_MASK)) QLAFX00_ICR_DIS_MASK))
#define QLAFX00_RD_REG(ha, off) \ #define QLAFX00_RD_REG(ha, off) \
RD_REG_DWORD((ha)->cregbase + off) rd_reg_dword((ha)->cregbase + off)
#define QLAFX00_WR_REG(ha, off, val) \ #define QLAFX00_WR_REG(ha, off, val) \
WRT_REG_DWORD((ha)->cregbase + off, val) wrt_reg_dword((ha)->cregbase + off, val)
struct qla_mt_iocb_rqst_fx00 { struct qla_mt_iocb_rqst_fx00 {
__le32 reserved_0; __le32 reserved_0;

View File

@@ -138,7 +138,7 @@ static void qla_nvme_release_fcp_cmd_kref(struct kref *kref)
priv->sp = NULL; priv->sp = NULL;
sp->priv = NULL; sp->priv = NULL;
if (priv->comp_status == QLA_SUCCESS) { if (priv->comp_status == QLA_SUCCESS) {
fd->rcv_rsplen = nvme->u.nvme.rsp_pyld_len; fd->rcv_rsplen = le16_to_cpu(nvme->u.nvme.rsp_pyld_len);
} else { } else {
fd->rcv_rsplen = 0; fd->rcv_rsplen = 0;
fd->transferred_length = 0; fd->transferred_length = 0;
@@ -295,7 +295,7 @@ static int qla_nvme_ls_req(struct nvme_fc_local_port *lport,
sp->name = "nvme_ls"; sp->name = "nvme_ls";
sp->done = qla_nvme_sp_ls_done; sp->done = qla_nvme_sp_ls_done;
sp->put_fn = qla_nvme_release_ls_cmd_kref; sp->put_fn = qla_nvme_release_ls_cmd_kref;
sp->priv = (void *)priv; sp->priv = priv;
priv->sp = sp; priv->sp = sp;
kref_init(&sp->cmd_kref); kref_init(&sp->cmd_kref);
spin_lock_init(&priv->cmd_lock); spin_lock_init(&priv->cmd_lock);
@@ -384,7 +384,7 @@ static inline int qla2x00_start_nvme_mq(srb_t *sp)
req_cnt = qla24xx_calc_iocbs(vha, tot_dsds); req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
if (req->cnt < (req_cnt + 2)) { if (req->cnt < (req_cnt + 2)) {
cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr : cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr :
RD_REG_DWORD_RELAXED(req->req_q_out); rd_reg_dword_relaxed(req->req_q_out);
if (req->ring_index < cnt) if (req->ring_index < cnt)
req->cnt = cnt - req->ring_index; req->cnt = cnt - req->ring_index;
@@ -426,11 +426,11 @@ static inline int qla2x00_start_nvme_mq(srb_t *sp)
/* No data transfer how do we check buffer len == 0?? */ /* No data transfer how do we check buffer len == 0?? */
if (fd->io_dir == NVMEFC_FCP_READ) { if (fd->io_dir == NVMEFC_FCP_READ) {
cmd_pkt->control_flags = CF_READ_DATA; cmd_pkt->control_flags = cpu_to_le16(CF_READ_DATA);
vha->qla_stats.input_bytes += fd->payload_length; vha->qla_stats.input_bytes += fd->payload_length;
vha->qla_stats.input_requests++; vha->qla_stats.input_requests++;
} else if (fd->io_dir == NVMEFC_FCP_WRITE) { } else if (fd->io_dir == NVMEFC_FCP_WRITE) {
cmd_pkt->control_flags = CF_WRITE_DATA; cmd_pkt->control_flags = cpu_to_le16(CF_WRITE_DATA);
if ((vha->flags.nvme_first_burst) && if ((vha->flags.nvme_first_burst) &&
(sp->fcport->nvme_prli_service_param & (sp->fcport->nvme_prli_service_param &
NVME_PRLI_SP_FIRST_BURST)) { NVME_PRLI_SP_FIRST_BURST)) {
@@ -438,7 +438,7 @@ static inline int qla2x00_start_nvme_mq(srb_t *sp)
sp->fcport->nvme_first_burst_size) || sp->fcport->nvme_first_burst_size) ||
(sp->fcport->nvme_first_burst_size == 0)) (sp->fcport->nvme_first_burst_size == 0))
cmd_pkt->control_flags |= cmd_pkt->control_flags |=
CF_NVME_FIRST_BURST_ENABLE; cpu_to_le16(CF_NVME_FIRST_BURST_ENABLE);
} }
vha->qla_stats.output_bytes += fd->payload_length; vha->qla_stats.output_bytes += fd->payload_length;
vha->qla_stats.output_requests++; vha->qla_stats.output_requests++;
@@ -514,7 +514,7 @@ static inline int qla2x00_start_nvme_mq(srb_t *sp)
} }
/* Set chip new ring index. */ /* Set chip new ring index. */
WRT_REG_DWORD(req->req_q_in, req->ring_index); wrt_reg_dword(req->req_q_in, req->ring_index);
queuing_error: queuing_error:
spin_unlock_irqrestore(&qpair->qp_lock, flags); spin_unlock_irqrestore(&qpair->qp_lock, flags);
@@ -560,7 +560,7 @@ static int qla_nvme_post_cmd(struct nvme_fc_local_port *lport,
init_waitqueue_head(&sp->nvme_ls_waitq); init_waitqueue_head(&sp->nvme_ls_waitq);
kref_init(&sp->cmd_kref); kref_init(&sp->cmd_kref);
spin_lock_init(&priv->cmd_lock); spin_lock_init(&priv->cmd_lock);
sp->priv = (void *)priv; sp->priv = priv;
priv->sp = sp; priv->sp = sp;
sp->type = SRB_NVME_CMD; sp->type = SRB_NVME_CMD;
sp->name = "nvme_cmd"; sp->name = "nvme_cmd";

Some files were not shown because too many files have changed in this diff Show More