SCSI misc on 20180610

This is mostly updates to the usual drivers: ufs, qedf, mpt3sas, lpfc,
 xfcp, hisi_sas, cxlflash, qla2xxx.  In the absence of Nic, we're also
 taking target updates which are mostly minor except for the tcmu
 refactor. The only real core change to worry about is the removal of
 high page bouncing (in sas, storvsc and iscsi).  This has been well
 tested and no problems have shown up so far.
 
 Signed-off-by: James E.J. Bottomley <jejb@linux.vnet.ibm.com>
 -----BEGIN PGP SIGNATURE-----
 
 iJwEABMIAEQWIQTnYEDbdso9F2cI+arnQslM7pishQUCWx1pbCYcamFtZXMuYm90
 dG9tbGV5QGhhbnNlbnBhcnRuZXJzaGlwLmNvbQAKCRDnQslM7pishUucAP42pccS
 ziKyiOizuxv9fZ4Q+nXd1A9zhI5tqqpkHjcQegEA40qiZSi3EKGKR8W0UpX7Ntmo
 tqrZJGojx9lnrAM2RbQ=
 =NMXg
 -----END PGP SIGNATURE-----

Merge tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi

Pull SCSI updates from James Bottomley:
 "This is mostly updates to the usual drivers: ufs, qedf, mpt3sas, lpfc,
  xfcp, hisi_sas, cxlflash, qla2xxx.

  In the absence of Nic, we're also taking target updates which are
  mostly minor except for the tcmu refactor.

  The only real core change to worry about is the removal of high page
  bouncing (in sas, storvsc and iscsi). This has been well tested and no
  problems have shown up so far"

* tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi: (268 commits)
  scsi: lpfc: update driver version to 12.0.0.4
  scsi: lpfc: Fix port initialization failure.
  scsi: lpfc: Fix 16gb hbas failing cq create.
  scsi: lpfc: Fix crash in blk_mq layer when executing modprobe -r lpfc
  scsi: lpfc: correct oversubscription of nvme io requests for an adapter
  scsi: lpfc: Fix MDS diagnostics failure (Rx < Tx)
  scsi: hisi_sas: Mark PHY as in reset for nexus reset
  scsi: hisi_sas: Fix return value when get_free_slot() failed
  scsi: hisi_sas: Terminate STP reject quickly for v2 hw
  scsi: hisi_sas: Add v2 hw force PHY function for internal ATA command
  scsi: hisi_sas: Include TMF elements in struct hisi_sas_slot
  scsi: hisi_sas: Try wait commands before before controller reset
  scsi: hisi_sas: Init disks after controller reset
  scsi: hisi_sas: Create a scsi_host_template per HW module
  scsi: hisi_sas: Reset disks when discovered
  scsi: hisi_sas: Add LED feature for v3 hw
  scsi: hisi_sas: Change common allocation mode of device id
  scsi: hisi_sas: change slot index allocation mode
  scsi: hisi_sas: Introduce hisi_sas_phy_set_linkrate()
  scsi: hisi_sas: fix a typo in hisi_sas_task_prep()
  ...
This commit is contained in:
Linus Torvalds 2018-06-10 13:01:12 -07:00
commit 5f85942c2e
152 changed files with 7237 additions and 2020 deletions

View File

@ -36,6 +36,7 @@ available subsections can be seen below.
edac
scsi
libata
target
mtdnand
miscellaneous
w1

View File

@ -334,5 +334,5 @@ todo
~~~~
Parallel (fast/wide/ultra) SCSI, USB, SATA, SAS, Fibre Channel,
FireWire, ATAPI devices, Infiniband, I2O, iSCSI, Parallel ports,
FireWire, ATAPI devices, Infiniband, I2O, Parallel ports,
netlink...

View File

@ -0,0 +1,64 @@
=================================
target and iSCSI Interfaces Guide
=================================
Introduction and Overview
=========================
TBD
Target core device interfaces
=============================
.. kernel-doc:: drivers/target/target_core_device.c
:export:
Target core transport interfaces
================================
.. kernel-doc:: drivers/target/target_core_transport.c
:export:
Target-supported userspace I/O
==============================
.. kernel-doc:: drivers/target/target_core_user.c
:doc: Userspace I/O
.. kernel-doc:: include/uapi/linux/target_core_user.h
:doc: Ring Design
iSCSI helper functions
======================
.. kernel-doc:: drivers/scsi/libiscsi.c
:export:
iSCSI boot information
======================
.. kernel-doc:: drivers/scsi/iscsi_boot_sysfs.c
:export:
iSCSI transport class
=====================
The file drivers/scsi/scsi_transport_iscsi.c defines transport
attributes for the iSCSI class, which sends SCSI packets over TCP/IP
connections.
.. kernel-doc:: drivers/scsi/scsi_transport_iscsi.c
:export:
iSCSI TCP interfaces
====================
.. kernel-doc:: drivers/scsi/iscsi_tcp.c
:internal:
.. kernel-doc:: drivers/scsi/libiscsi_tcp.c
:export:

View File

@ -151,7 +151,7 @@ Code Seq#(hex) Include File Comments
'J' 00-1F drivers/scsi/gdth_ioctl.h
'K' all linux/kd.h
'L' 00-1F linux/loop.h conflict!
'L' 10-1F drivers/scsi/mpt2sas/mpt2sas_ctl.h conflict!
'L' 10-1F drivers/scsi/mpt3sas/mpt3sas_ctl.h conflict!
'L' 20-2F linux/lightnvm.h
'L' E0-FF linux/ppdd.h encrypted disk device driver
<http://linux01.gwdg.de/~alatham/ppdd.html>

View File

@ -8446,7 +8446,6 @@ L: linux-scsi@vger.kernel.org
W: http://www.avagotech.com/support/
S: Supported
F: drivers/message/fusion/
F: drivers/scsi/mpt2sas/
F: drivers/scsi/mpt3sas/
LSILOGIC/SYMBIOS/NCR 53C8XX and 53C1010 PCI-SCSI drivers

View File

@ -5054,6 +5054,18 @@ int ata_sas_port_init(struct ata_port *ap)
}
EXPORT_SYMBOL_GPL(ata_sas_port_init);
int ata_sas_tport_add(struct device *parent, struct ata_port *ap)
{
return ata_tport_add(parent, ap);
}
EXPORT_SYMBOL_GPL(ata_sas_tport_add);
void ata_sas_tport_delete(struct ata_port *ap)
{
ata_tport_delete(ap);
}
EXPORT_SYMBOL_GPL(ata_sas_tport_delete);
/**
* ata_sas_port_destroy - Destroy a SATA port allocated by ata_sas_port_alloc
* @ap: SATA port to destroy

View File

@ -227,6 +227,8 @@ int hv_ringbuffer_init(struct hv_ring_buffer_info *ring_info,
ring_info->ring_buffer->feature_bits.value = 1;
ring_info->ring_size = page_cnt << PAGE_SHIFT;
ring_info->ring_size_div10_reciprocal =
reciprocal_value(ring_info->ring_size / 10);
ring_info->ring_datasize = ring_info->ring_size -
sizeof(struct hv_ring_buffer);

View File

@ -1802,13 +1802,13 @@ typedef struct _CONFIG_PAGE_FC_PORT_0
#define MPI_FCPORTPAGE0_SUPPORT_CLASS_2 (0x00000002)
#define MPI_FCPORTPAGE0_SUPPORT_CLASS_3 (0x00000004)
#define MPI_FCPORTPAGE0_SUPPORT_SPEED_UKNOWN (0x00000000) /* (SNIA)HBA_PORTSPEED_UNKNOWN 0 Unknown - transceiver incapable of reporting */
#define MPI_FCPORTPAGE0_SUPPORT_SPEED_UNKNOWN (0x00000000) /* (SNIA)HBA_PORTSPEED_UNKNOWN 0 Unknown - transceiver incapable of reporting */
#define MPI_FCPORTPAGE0_SUPPORT_1GBIT_SPEED (0x00000001) /* (SNIA)HBA_PORTSPEED_1GBIT 1 1 GBit/sec */
#define MPI_FCPORTPAGE0_SUPPORT_2GBIT_SPEED (0x00000002) /* (SNIA)HBA_PORTSPEED_2GBIT 2 2 GBit/sec */
#define MPI_FCPORTPAGE0_SUPPORT_10GBIT_SPEED (0x00000004) /* (SNIA)HBA_PORTSPEED_10GBIT 4 10 GBit/sec */
#define MPI_FCPORTPAGE0_SUPPORT_4GBIT_SPEED (0x00000008) /* (SNIA)HBA_PORTSPEED_4GBIT 8 4 GBit/sec */
#define MPI_FCPORTPAGE0_CURRENT_SPEED_UKNOWN MPI_FCPORTPAGE0_SUPPORT_SPEED_UKNOWN
#define MPI_FCPORTPAGE0_CURRENT_SPEED_UNKNOWN MPI_FCPORTPAGE0_SUPPORT_SPEED_UNKNOWN
#define MPI_FCPORTPAGE0_CURRENT_SPEED_1GBIT MPI_FCPORTPAGE0_SUPPORT_1GBIT_SPEED
#define MPI_FCPORTPAGE0_CURRENT_SPEED_2GBIT MPI_FCPORTPAGE0_SUPPORT_2GBIT_SPEED
#define MPI_FCPORTPAGE0_CURRENT_SPEED_10GBIT MPI_FCPORTPAGE0_SUPPORT_10GBIT_SPEED

View File

@ -7600,7 +7600,7 @@ mpt_display_event_info(MPT_ADAPTER *ioc, EventNotificationReply_t *pEventReply)
snprintf(evStr, EVENT_DESCR_STR_SZ,
"SAS Initiator Device Table Overflow: max initiators=%02d "
"current initators=%02d",
"current initiators=%02d",
max_init, current_init);
break;
}

View File

@ -693,7 +693,7 @@ mptfc_display_port_link_speed(MPT_ADAPTER *ioc, int portnum, FCPortPage0_t *pp0d
state = pp0dest->PortState;
if (state != MPI_FCPORTPAGE0_PORTSTATE_OFFLINE &&
new_speed != MPI_FCPORTPAGE0_CURRENT_SPEED_UKNOWN) {
new_speed != MPI_FCPORTPAGE0_CURRENT_SPEED_UNKNOWN) {
old = old_speed == MPI_FCPORTPAGE0_CURRENT_SPEED_1GBIT ? "1 Gbps" :
old_speed == MPI_FCPORTPAGE0_CURRENT_SPEED_2GBIT ? "2 Gbps" :

View File

@ -670,7 +670,7 @@ out:
}
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
static int
static netdev_tx_t
mpt_lan_sdu_send (struct sk_buff *skb, struct net_device *dev)
{
struct mpt_lan_priv *priv = netdev_priv(dev);

View File

@ -4320,7 +4320,7 @@ mptsas_hotplug_work(MPT_ADAPTER *ioc, struct fw_event_work *fw_event,
if (ioc->raid_data.pIocPg2->RaidVolume[i].VolumeID ==
hot_plug_info->id) {
printk(MYIOC_s_WARN_FMT "firmware bug: unable "
"to add hidden disk - target_id matchs "
"to add hidden disk - target_id matches "
"volume_id\n", ioc->name);
mptsas_free_fw_event(ioc, fw_event);
return;

View File

@ -189,7 +189,6 @@ struct netvsc_device;
struct net_device_context;
extern u32 netvsc_ring_bytes;
extern struct reciprocal_value netvsc_ring_reciprocal;
struct netvsc_device *netvsc_device_add(struct hv_device *device,
const struct netvsc_device_info *info);

View File

@ -31,7 +31,6 @@
#include <linux/vmalloc.h>
#include <linux/rtnetlink.h>
#include <linux/prefetch.h>
#include <linux/reciprocal_div.h>
#include <asm/sync_bitops.h>
@ -635,17 +634,6 @@ void netvsc_device_remove(struct hv_device *device)
#define RING_AVAIL_PERCENT_HIWATER 20
#define RING_AVAIL_PERCENT_LOWATER 10
/*
* Get the percentage of available bytes to write in the ring.
* The return value is in range from 0 to 100.
*/
static u32 hv_ringbuf_avail_percent(const struct hv_ring_buffer_info *ring_info)
{
u32 avail_write = hv_get_bytes_to_write(ring_info);
return reciprocal_divide(avail_write * 100, netvsc_ring_reciprocal);
}
static inline void netvsc_free_send_slot(struct netvsc_device *net_device,
u32 index)
{
@ -694,8 +682,8 @@ static void netvsc_send_tx_complete(struct net_device *ndev,
struct netdev_queue *txq = netdev_get_tx_queue(ndev, q_idx);
if (netif_tx_queue_stopped(txq) &&
(hv_ringbuf_avail_percent(&channel->outbound) > RING_AVAIL_PERCENT_HIWATER ||
queue_sends < 1)) {
(hv_get_avail_to_write_percent(&channel->outbound) >
RING_AVAIL_PERCENT_HIWATER || queue_sends < 1)) {
netif_tx_wake_queue(txq);
ndev_ctx->eth_stats.wake_queue++;
}
@ -802,7 +790,7 @@ static inline int netvsc_send_pkt(
struct netdev_queue *txq = netdev_get_tx_queue(ndev, packet->q_idx);
u64 req_id;
int ret;
u32 ring_avail = hv_ringbuf_avail_percent(&out_channel->outbound);
u32 ring_avail = hv_get_avail_to_write_percent(&out_channel->outbound);
nvmsg.hdr.msg_type = NVSP_MSG1_TYPE_SEND_RNDIS_PKT;
if (skb)

View File

@ -35,7 +35,6 @@
#include <linux/slab.h>
#include <linux/rtnetlink.h>
#include <linux/netpoll.h>
#include <linux/reciprocal_div.h>
#include <net/arp.h>
#include <net/route.h>
@ -59,7 +58,6 @@ static unsigned int ring_size __ro_after_init = 128;
module_param(ring_size, uint, 0444);
MODULE_PARM_DESC(ring_size, "Ring buffer size (# of pages)");
unsigned int netvsc_ring_bytes __ro_after_init;
struct reciprocal_value netvsc_ring_reciprocal __ro_after_init;
static const u32 default_msg = NETIF_MSG_DRV | NETIF_MSG_PROBE |
NETIF_MSG_LINK | NETIF_MSG_IFUP |
@ -2130,7 +2128,6 @@ static int __init netvsc_drv_init(void)
ring_size);
}
netvsc_ring_bytes = ring_size * PAGE_SIZE;
netvsc_ring_reciprocal = reciprocal_value(netvsc_ring_bytes);
ret = vmbus_driver_register(&netvsc_drv);
if (ret)

View File

@ -285,6 +285,8 @@ void zfcp_dbf_rec_trig(char *tag, struct zfcp_adapter *adapter,
struct list_head *entry;
unsigned long flags;
lockdep_assert_held(&adapter->erp_lock);
if (unlikely(!debug_level_enabled(dbf->rec, level)))
return;
@ -599,16 +601,18 @@ void zfcp_dbf_san_in_els(char *tag, struct zfcp_fsf_req *fsf)
}
/**
* zfcp_dbf_scsi - trace event for scsi commands
* @tag: identifier for event
* @sc: pointer to struct scsi_cmnd
* @fsf: pointer to struct zfcp_fsf_req
* zfcp_dbf_scsi_common() - Common trace event helper for scsi.
* @tag: Identifier for event.
* @level: trace level of event.
* @sdev: Pointer to SCSI device as context for this event.
* @sc: Pointer to SCSI command, or NULL with task management function (TMF).
* @fsf: Pointer to FSF request, or NULL.
*/
void zfcp_dbf_scsi(char *tag, int level, struct scsi_cmnd *sc,
struct zfcp_fsf_req *fsf)
void zfcp_dbf_scsi_common(char *tag, int level, struct scsi_device *sdev,
struct scsi_cmnd *sc, struct zfcp_fsf_req *fsf)
{
struct zfcp_adapter *adapter =
(struct zfcp_adapter *) sc->device->host->hostdata[0];
(struct zfcp_adapter *) sdev->host->hostdata[0];
struct zfcp_dbf *dbf = adapter->dbf;
struct zfcp_dbf_scsi *rec = &dbf->scsi_buf;
struct fcp_resp_with_ext *fcp_rsp;
@ -620,16 +624,28 @@ void zfcp_dbf_scsi(char *tag, int level, struct scsi_cmnd *sc,
memcpy(rec->tag, tag, ZFCP_DBF_TAG_LEN);
rec->id = ZFCP_DBF_SCSI_CMND;
rec->scsi_result = sc->result;
rec->scsi_retries = sc->retries;
rec->scsi_allowed = sc->allowed;
rec->scsi_id = sc->device->id;
rec->scsi_lun = (u32)sc->device->lun;
rec->scsi_lun_64_hi = (u32)(sc->device->lun >> 32);
rec->host_scribble = (unsigned long)sc->host_scribble;
if (sc) {
rec->scsi_result = sc->result;
rec->scsi_retries = sc->retries;
rec->scsi_allowed = sc->allowed;
rec->scsi_id = sc->device->id;
rec->scsi_lun = (u32)sc->device->lun;
rec->scsi_lun_64_hi = (u32)(sc->device->lun >> 32);
rec->host_scribble = (unsigned long)sc->host_scribble;
memcpy(rec->scsi_opcode, sc->cmnd,
min((int)sc->cmd_len, ZFCP_DBF_SCSI_OPCODE));
memcpy(rec->scsi_opcode, sc->cmnd,
min_t(int, sc->cmd_len, ZFCP_DBF_SCSI_OPCODE));
} else {
rec->scsi_result = ~0;
rec->scsi_retries = ~0;
rec->scsi_allowed = ~0;
rec->scsi_id = sdev->id;
rec->scsi_lun = (u32)sdev->lun;
rec->scsi_lun_64_hi = (u32)(sdev->lun >> 32);
rec->host_scribble = ~0;
memset(rec->scsi_opcode, 0xff, ZFCP_DBF_SCSI_OPCODE);
}
if (fsf) {
rec->fsf_req_id = fsf->req_id;
@ -664,6 +680,46 @@ void zfcp_dbf_scsi(char *tag, int level, struct scsi_cmnd *sc,
spin_unlock_irqrestore(&dbf->scsi_lock, flags);
}
/**
* zfcp_dbf_scsi_eh() - Trace event for special cases of scsi_eh callbacks.
* @tag: Identifier for event.
* @adapter: Pointer to zfcp adapter as context for this event.
* @scsi_id: SCSI ID/target to indicate scope of task management function (TMF).
* @ret: Return value of calling function.
*
* This SCSI trace variant does not depend on any of:
* scsi_cmnd, zfcp_fsf_req, scsi_device.
*/
void zfcp_dbf_scsi_eh(char *tag, struct zfcp_adapter *adapter,
unsigned int scsi_id, int ret)
{
struct zfcp_dbf *dbf = adapter->dbf;
struct zfcp_dbf_scsi *rec = &dbf->scsi_buf;
unsigned long flags;
static int const level = 1;
if (unlikely(!debug_level_enabled(adapter->dbf->scsi, level)))
return;
spin_lock_irqsave(&dbf->scsi_lock, flags);
memset(rec, 0, sizeof(*rec));
memcpy(rec->tag, tag, ZFCP_DBF_TAG_LEN);
rec->id = ZFCP_DBF_SCSI_CMND;
rec->scsi_result = ret; /* re-use field, int is 4 bytes and fits */
rec->scsi_retries = ~0;
rec->scsi_allowed = ~0;
rec->fcp_rsp_info = ~0;
rec->scsi_id = scsi_id;
rec->scsi_lun = (u32)ZFCP_DBF_INVALID_LUN;
rec->scsi_lun_64_hi = (u32)(ZFCP_DBF_INVALID_LUN >> 32);
rec->host_scribble = ~0;
memset(rec->scsi_opcode, 0xff, ZFCP_DBF_SCSI_OPCODE);
debug_event(dbf->scsi, level, rec, sizeof(*rec));
spin_unlock_irqrestore(&dbf->scsi_lock, flags);
}
static debug_info_t *zfcp_dbf_reg(const char *name, int size, int rec_size)
{
struct debug_info *d;

View File

@ -359,7 +359,7 @@ void _zfcp_dbf_scsi(char *tag, int level, struct scsi_cmnd *scmd,
scmd->device->host->hostdata[0];
if (debug_level_enabled(adapter->dbf->scsi, level))
zfcp_dbf_scsi(tag, level, scmd, req);
zfcp_dbf_scsi_common(tag, level, scmd->device, scmd, req);
}
/**
@ -402,16 +402,23 @@ void zfcp_dbf_scsi_abort(char *tag, struct scsi_cmnd *scmd,
}
/**
* zfcp_dbf_scsi_devreset - trace event for Logical Unit or Target Reset
* @tag: tag indicating success or failure of reset operation
* @scmnd: SCSI command which caused this error recovery
* @flag: indicates type of reset (Target Reset, Logical Unit Reset)
* zfcp_dbf_scsi_devreset() - Trace event for Logical Unit or Target Reset.
* @tag: Tag indicating success or failure of reset operation.
* @sdev: Pointer to SCSI device as context for this event.
* @flag: Indicates type of reset (Target Reset, Logical Unit Reset).
* @fsf_req: Pointer to FSF request representing the TMF, or NULL.
*/
static inline
void zfcp_dbf_scsi_devreset(char *tag, struct scsi_cmnd *scmnd, u8 flag,
void zfcp_dbf_scsi_devreset(char *tag, struct scsi_device *sdev, u8 flag,
struct zfcp_fsf_req *fsf_req)
{
struct zfcp_adapter *adapter = (struct zfcp_adapter *)
sdev->host->hostdata[0];
char tmp_tag[ZFCP_DBF_TAG_LEN];
static int const level = 1;
if (unlikely(!debug_level_enabled(adapter->dbf->scsi, level)))
return;
if (flag == FCP_TMF_TGT_RESET)
memcpy(tmp_tag, "tr_", 3);
@ -419,7 +426,7 @@ void zfcp_dbf_scsi_devreset(char *tag, struct scsi_cmnd *scmnd, u8 flag,
memcpy(tmp_tag, "lr_", 3);
memcpy(&tmp_tag[3], tag, 4);
_zfcp_dbf_scsi(tmp_tag, 1, scmnd, fsf_req);
zfcp_dbf_scsi_common(tmp_tag, level, sdev, NULL, fsf_req);
}
/**

View File

@ -19,7 +19,6 @@
enum zfcp_erp_act_flags {
ZFCP_STATUS_ERP_TIMEDOUT = 0x10000000,
ZFCP_STATUS_ERP_CLOSE_ONLY = 0x01000000,
ZFCP_STATUS_ERP_DISMISSING = 0x00100000,
ZFCP_STATUS_ERP_DISMISSED = 0x00200000,
ZFCP_STATUS_ERP_LOWMEM = 0x00400000,
ZFCP_STATUS_ERP_NO_REF = 0x00800000,
@ -27,7 +26,6 @@ enum zfcp_erp_act_flags {
enum zfcp_erp_steps {
ZFCP_ERP_STEP_UNINITIALIZED = 0x0000,
ZFCP_ERP_STEP_FSF_XCONFIG = 0x0001,
ZFCP_ERP_STEP_PHYS_PORT_CLOSING = 0x0010,
ZFCP_ERP_STEP_PORT_CLOSING = 0x0100,
ZFCP_ERP_STEP_PORT_OPENING = 0x0800,
@ -35,16 +33,28 @@ enum zfcp_erp_steps {
ZFCP_ERP_STEP_LUN_OPENING = 0x2000,
};
/**
* enum zfcp_erp_act_type - Type of ERP action object.
* @ZFCP_ERP_ACTION_REOPEN_LUN: LUN recovery.
* @ZFCP_ERP_ACTION_REOPEN_PORT: Port recovery.
* @ZFCP_ERP_ACTION_REOPEN_PORT_FORCED: Forced port recovery.
* @ZFCP_ERP_ACTION_REOPEN_ADAPTER: Adapter recovery.
* @ZFCP_ERP_ACTION_NONE: Eyecatcher pseudo flag to bitwise or-combine with
* either of the first four enum values.
* Used to indicate that an ERP action could not be
* set up despite a detected need for some recovery.
* @ZFCP_ERP_ACTION_FAILED: Eyecatcher pseudo flag to bitwise or-combine with
* either of the first four enum values.
* Used to indicate that ERP not needed because
* the object has ZFCP_STATUS_COMMON_ERP_FAILED.
*/
enum zfcp_erp_act_type {
ZFCP_ERP_ACTION_REOPEN_LUN = 1,
ZFCP_ERP_ACTION_REOPEN_PORT = 2,
ZFCP_ERP_ACTION_REOPEN_PORT_FORCED = 3,
ZFCP_ERP_ACTION_REOPEN_ADAPTER = 4,
};
enum zfcp_erp_act_state {
ZFCP_ERP_ACTION_RUNNING = 1,
ZFCP_ERP_ACTION_READY = 2,
ZFCP_ERP_ACTION_NONE = 0xc0,
ZFCP_ERP_ACTION_FAILED = 0xe0,
};
enum zfcp_erp_act_result {
@ -62,14 +72,14 @@ static void zfcp_erp_adapter_block(struct zfcp_adapter *adapter, int mask)
ZFCP_STATUS_COMMON_UNBLOCKED | mask);
}
static int zfcp_erp_action_exists(struct zfcp_erp_action *act)
static bool zfcp_erp_action_is_running(struct zfcp_erp_action *act)
{
struct zfcp_erp_action *curr_act;
list_for_each_entry(curr_act, &act->adapter->erp_running_head, list)
if (act == curr_act)
return ZFCP_ERP_ACTION_RUNNING;
return 0;
return true;
return false;
}
static void zfcp_erp_action_ready(struct zfcp_erp_action *act)
@ -85,7 +95,7 @@ static void zfcp_erp_action_ready(struct zfcp_erp_action *act)
static void zfcp_erp_action_dismiss(struct zfcp_erp_action *act)
{
act->status |= ZFCP_STATUS_ERP_DISMISSED;
if (zfcp_erp_action_exists(act) == ZFCP_ERP_ACTION_RUNNING)
if (zfcp_erp_action_is_running(act))
zfcp_erp_action_ready(act);
}
@ -126,6 +136,49 @@ static void zfcp_erp_action_dismiss_adapter(struct zfcp_adapter *adapter)
}
}
static int zfcp_erp_handle_failed(int want, struct zfcp_adapter *adapter,
struct zfcp_port *port,
struct scsi_device *sdev)
{
int need = want;
struct zfcp_scsi_dev *zsdev;
switch (want) {
case ZFCP_ERP_ACTION_REOPEN_LUN:
zsdev = sdev_to_zfcp(sdev);
if (atomic_read(&zsdev->status) & ZFCP_STATUS_COMMON_ERP_FAILED)
need = 0;
break;
case ZFCP_ERP_ACTION_REOPEN_PORT_FORCED:
if (atomic_read(&port->status) & ZFCP_STATUS_COMMON_ERP_FAILED)
need = 0;
break;
case ZFCP_ERP_ACTION_REOPEN_PORT:
if (atomic_read(&port->status) &
ZFCP_STATUS_COMMON_ERP_FAILED) {
need = 0;
/* ensure propagation of failed status to new devices */
zfcp_erp_set_port_status(
port, ZFCP_STATUS_COMMON_ERP_FAILED);
}
break;
case ZFCP_ERP_ACTION_REOPEN_ADAPTER:
if (atomic_read(&adapter->status) &
ZFCP_STATUS_COMMON_ERP_FAILED) {
need = 0;
/* ensure propagation of failed status to new devices */
zfcp_erp_set_adapter_status(
adapter, ZFCP_STATUS_COMMON_ERP_FAILED);
}
break;
default:
need = 0;
break;
}
return need;
}
static int zfcp_erp_required_act(int want, struct zfcp_adapter *adapter,
struct zfcp_port *port,
struct scsi_device *sdev)
@ -241,48 +294,70 @@ static struct zfcp_erp_action *zfcp_erp_setup_act(int need, u32 act_status,
return erp_action;
}
static int zfcp_erp_action_enqueue(int want, struct zfcp_adapter *adapter,
struct zfcp_port *port,
struct scsi_device *sdev,
char *id, u32 act_status)
static void zfcp_erp_action_enqueue(int want, struct zfcp_adapter *adapter,
struct zfcp_port *port,
struct scsi_device *sdev,
char *id, u32 act_status)
{
int retval = 1, need;
int need;
struct zfcp_erp_action *act;
if (!adapter->erp_thread)
return -EIO;
need = zfcp_erp_handle_failed(want, adapter, port, sdev);
if (!need) {
need = ZFCP_ERP_ACTION_FAILED; /* marker for trace */
goto out;
}
if (!adapter->erp_thread) {
need = ZFCP_ERP_ACTION_NONE; /* marker for trace */
goto out;
}
need = zfcp_erp_required_act(want, adapter, port, sdev);
if (!need)
goto out;
act = zfcp_erp_setup_act(need, act_status, adapter, port, sdev);
if (!act)
if (!act) {
need |= ZFCP_ERP_ACTION_NONE; /* marker for trace */
goto out;
}
atomic_or(ZFCP_STATUS_ADAPTER_ERP_PENDING, &adapter->status);
++adapter->erp_total_count;
list_add_tail(&act->list, &adapter->erp_ready_head);
wake_up(&adapter->erp_ready_wq);
retval = 0;
out:
zfcp_dbf_rec_trig(id, adapter, port, sdev, want, need);
return retval;
}
static int _zfcp_erp_adapter_reopen(struct zfcp_adapter *adapter,
void zfcp_erp_port_forced_no_port_dbf(char *id, struct zfcp_adapter *adapter,
u64 port_name, u32 port_id)
{
unsigned long flags;
static /* don't waste stack */ struct zfcp_port tmpport;
write_lock_irqsave(&adapter->erp_lock, flags);
/* Stand-in zfcp port with fields just good enough for
* zfcp_dbf_rec_trig() and zfcp_dbf_set_common().
* Under lock because tmpport is static.
*/
atomic_set(&tmpport.status, -1); /* unknown */
tmpport.wwpn = port_name;
tmpport.d_id = port_id;
zfcp_dbf_rec_trig(id, adapter, &tmpport, NULL,
ZFCP_ERP_ACTION_REOPEN_PORT_FORCED,
ZFCP_ERP_ACTION_NONE);
write_unlock_irqrestore(&adapter->erp_lock, flags);
}
static void _zfcp_erp_adapter_reopen(struct zfcp_adapter *adapter,
int clear_mask, char *id)
{
zfcp_erp_adapter_block(adapter, clear_mask);
zfcp_scsi_schedule_rports_block(adapter);
/* ensure propagation of failed status to new devices */
if (atomic_read(&adapter->status) & ZFCP_STATUS_COMMON_ERP_FAILED) {
zfcp_erp_set_adapter_status(adapter,
ZFCP_STATUS_COMMON_ERP_FAILED);
return -EIO;
}
return zfcp_erp_action_enqueue(ZFCP_ERP_ACTION_REOPEN_ADAPTER,
adapter, NULL, NULL, id, 0);
zfcp_erp_action_enqueue(ZFCP_ERP_ACTION_REOPEN_ADAPTER,
adapter, NULL, NULL, id, 0);
}
/**
@ -299,12 +374,8 @@ void zfcp_erp_adapter_reopen(struct zfcp_adapter *adapter, int clear, char *id)
zfcp_scsi_schedule_rports_block(adapter);
write_lock_irqsave(&adapter->erp_lock, flags);
if (atomic_read(&adapter->status) & ZFCP_STATUS_COMMON_ERP_FAILED)
zfcp_erp_set_adapter_status(adapter,
ZFCP_STATUS_COMMON_ERP_FAILED);
else
zfcp_erp_action_enqueue(ZFCP_ERP_ACTION_REOPEN_ADAPTER, adapter,
NULL, NULL, id, 0);
zfcp_erp_action_enqueue(ZFCP_ERP_ACTION_REOPEN_ADAPTER, adapter,
NULL, NULL, id, 0);
write_unlock_irqrestore(&adapter->erp_lock, flags);
}
@ -345,9 +416,6 @@ static void _zfcp_erp_port_forced_reopen(struct zfcp_port *port, int clear,
zfcp_erp_port_block(port, clear);
zfcp_scsi_schedule_rport_block(port);
if (atomic_read(&port->status) & ZFCP_STATUS_COMMON_ERP_FAILED)
return;
zfcp_erp_action_enqueue(ZFCP_ERP_ACTION_REOPEN_PORT_FORCED,
port->adapter, port, NULL, id, 0);
}
@ -368,19 +436,13 @@ void zfcp_erp_port_forced_reopen(struct zfcp_port *port, int clear, char *id)
write_unlock_irqrestore(&adapter->erp_lock, flags);
}
static int _zfcp_erp_port_reopen(struct zfcp_port *port, int clear, char *id)
static void _zfcp_erp_port_reopen(struct zfcp_port *port, int clear, char *id)
{
zfcp_erp_port_block(port, clear);
zfcp_scsi_schedule_rport_block(port);
if (atomic_read(&port->status) & ZFCP_STATUS_COMMON_ERP_FAILED) {
/* ensure propagation of failed status to new devices */
zfcp_erp_set_port_status(port, ZFCP_STATUS_COMMON_ERP_FAILED);
return -EIO;
}
return zfcp_erp_action_enqueue(ZFCP_ERP_ACTION_REOPEN_PORT,
port->adapter, port, NULL, id, 0);
zfcp_erp_action_enqueue(ZFCP_ERP_ACTION_REOPEN_PORT,
port->adapter, port, NULL, id, 0);
}
/**
@ -388,20 +450,15 @@ static int _zfcp_erp_port_reopen(struct zfcp_port *port, int clear, char *id)
* @port: port to recover
* @clear_mask: flags in port status to be cleared
* @id: Id for debug trace event.
*
* Returns 0 if recovery has been triggered, < 0 if not.
*/
int zfcp_erp_port_reopen(struct zfcp_port *port, int clear, char *id)
void zfcp_erp_port_reopen(struct zfcp_port *port, int clear, char *id)
{
int retval;
unsigned long flags;
struct zfcp_adapter *adapter = port->adapter;
write_lock_irqsave(&adapter->erp_lock, flags);
retval = _zfcp_erp_port_reopen(port, clear, id);
_zfcp_erp_port_reopen(port, clear, id);
write_unlock_irqrestore(&adapter->erp_lock, flags);
return retval;
}
static void zfcp_erp_lun_block(struct scsi_device *sdev, int clear_mask)
@ -418,9 +475,6 @@ static void _zfcp_erp_lun_reopen(struct scsi_device *sdev, int clear, char *id,
zfcp_erp_lun_block(sdev, clear);
if (atomic_read(&zfcp_sdev->status) & ZFCP_STATUS_COMMON_ERP_FAILED)
return;
zfcp_erp_action_enqueue(ZFCP_ERP_ACTION_REOPEN_LUN, adapter,
zfcp_sdev->port, sdev, id, act_status);
}
@ -482,21 +536,23 @@ void zfcp_erp_lun_shutdown_wait(struct scsi_device *sdev, char *id)
zfcp_erp_wait(adapter);
}
static int status_change_set(unsigned long mask, atomic_t *status)
static int zfcp_erp_status_change_set(unsigned long mask, atomic_t *status)
{
return (atomic_read(status) ^ mask) & mask;
}
static void zfcp_erp_adapter_unblock(struct zfcp_adapter *adapter)
{
if (status_change_set(ZFCP_STATUS_COMMON_UNBLOCKED, &adapter->status))
if (zfcp_erp_status_change_set(ZFCP_STATUS_COMMON_UNBLOCKED,
&adapter->status))
zfcp_dbf_rec_run("eraubl1", &adapter->erp_action);
atomic_or(ZFCP_STATUS_COMMON_UNBLOCKED, &adapter->status);
}
static void zfcp_erp_port_unblock(struct zfcp_port *port)
{
if (status_change_set(ZFCP_STATUS_COMMON_UNBLOCKED, &port->status))
if (zfcp_erp_status_change_set(ZFCP_STATUS_COMMON_UNBLOCKED,
&port->status))
zfcp_dbf_rec_run("erpubl1", &port->erp_action);
atomic_or(ZFCP_STATUS_COMMON_UNBLOCKED, &port->status);
}
@ -505,7 +561,8 @@ static void zfcp_erp_lun_unblock(struct scsi_device *sdev)
{
struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
if (status_change_set(ZFCP_STATUS_COMMON_UNBLOCKED, &zfcp_sdev->status))
if (zfcp_erp_status_change_set(ZFCP_STATUS_COMMON_UNBLOCKED,
&zfcp_sdev->status))
zfcp_dbf_rec_run("erlubl1", &sdev_to_zfcp(sdev)->erp_action);
atomic_or(ZFCP_STATUS_COMMON_UNBLOCKED, &zfcp_sdev->status);
}
@ -553,7 +610,7 @@ void zfcp_erp_notify(struct zfcp_erp_action *erp_action, unsigned long set_mask)
unsigned long flags;
write_lock_irqsave(&adapter->erp_lock, flags);
if (zfcp_erp_action_exists(erp_action) == ZFCP_ERP_ACTION_RUNNING) {
if (zfcp_erp_action_is_running(erp_action)) {
erp_action->status |= set_mask;
zfcp_erp_action_ready(erp_action);
}
@ -1634,3 +1691,14 @@ void zfcp_erp_clear_lun_status(struct scsi_device *sdev, u32 mask)
atomic_set(&zfcp_sdev->erp_counter, 0);
}
/**
* zfcp_erp_adapter_reset_sync() - Really reopen adapter and wait.
* @adapter: Pointer to zfcp_adapter to reopen.
* @id: Trace tag string of length %ZFCP_DBF_TAG_LEN.
*/
void zfcp_erp_adapter_reset_sync(struct zfcp_adapter *adapter, char *id)
{
zfcp_erp_set_adapter_status(adapter, ZFCP_STATUS_COMMON_RUNNING);
zfcp_erp_adapter_reopen(adapter, ZFCP_STATUS_COMMON_ERP_FAILED, id);
zfcp_erp_wait(adapter);
}

View File

@ -50,17 +50,23 @@ extern void zfcp_dbf_hba_basic(char *, struct zfcp_adapter *);
extern void zfcp_dbf_san_req(char *, struct zfcp_fsf_req *, u32);
extern void zfcp_dbf_san_res(char *, struct zfcp_fsf_req *);
extern void zfcp_dbf_san_in_els(char *, struct zfcp_fsf_req *);
extern void zfcp_dbf_scsi(char *, int, struct scsi_cmnd *,
struct zfcp_fsf_req *);
extern void zfcp_dbf_scsi_common(char *tag, int level, struct scsi_device *sdev,
struct scsi_cmnd *sc,
struct zfcp_fsf_req *fsf);
extern void zfcp_dbf_scsi_eh(char *tag, struct zfcp_adapter *adapter,
unsigned int scsi_id, int ret);
/* zfcp_erp.c */
extern void zfcp_erp_set_adapter_status(struct zfcp_adapter *, u32);
extern void zfcp_erp_clear_adapter_status(struct zfcp_adapter *, u32);
extern void zfcp_erp_port_forced_no_port_dbf(char *id,
struct zfcp_adapter *adapter,
u64 port_name, u32 port_id);
extern void zfcp_erp_adapter_reopen(struct zfcp_adapter *, int, char *);
extern void zfcp_erp_adapter_shutdown(struct zfcp_adapter *, int, char *);
extern void zfcp_erp_set_port_status(struct zfcp_port *, u32);
extern void zfcp_erp_clear_port_status(struct zfcp_port *, u32);
extern int zfcp_erp_port_reopen(struct zfcp_port *, int, char *);
extern void zfcp_erp_port_reopen(struct zfcp_port *port, int clear, char *id);
extern void zfcp_erp_port_shutdown(struct zfcp_port *, int, char *);
extern void zfcp_erp_port_forced_reopen(struct zfcp_port *, int, char *);
extern void zfcp_erp_set_lun_status(struct scsi_device *, u32);
@ -73,6 +79,7 @@ extern void zfcp_erp_thread_kill(struct zfcp_adapter *);
extern void zfcp_erp_wait(struct zfcp_adapter *);
extern void zfcp_erp_notify(struct zfcp_erp_action *, unsigned long);
extern void zfcp_erp_timeout_handler(struct timer_list *t);
extern void zfcp_erp_adapter_reset_sync(struct zfcp_adapter *adapter, char *id);
/* zfcp_fc.c */
extern struct kmem_cache *zfcp_fc_req_cache;
@ -120,7 +127,8 @@ extern int zfcp_fsf_send_els(struct zfcp_adapter *, u32,
struct zfcp_fsf_ct_els *, unsigned int);
extern int zfcp_fsf_fcp_cmnd(struct scsi_cmnd *);
extern void zfcp_fsf_req_free(struct zfcp_fsf_req *);
extern struct zfcp_fsf_req *zfcp_fsf_fcp_task_mgmt(struct scsi_cmnd *, u8);
extern struct zfcp_fsf_req *zfcp_fsf_fcp_task_mgmt(struct scsi_device *sdev,
u8 tm_flags);
extern struct zfcp_fsf_req *zfcp_fsf_abort_fcp_cmnd(struct scsi_cmnd *);
extern void zfcp_fsf_reqid_check(struct zfcp_qdio *, int);

View File

@ -111,11 +111,10 @@ void zfcp_fc_post_event(struct work_struct *work)
list_for_each_entry_safe(event, tmp, &tmp_lh, list) {
fc_host_post_event(adapter->scsi_host, fc_get_event_number(),
event->code, event->data);
event->code, event->data);
list_del(&event->list);
kfree(event);
}
}
/**
@ -126,7 +125,7 @@ void zfcp_fc_post_event(struct work_struct *work)
* @event_data: The event data (e.g. n_port page in case of els)
*/
void zfcp_fc_enqueue_event(struct zfcp_adapter *adapter,
enum fc_host_event_code event_code, u32 event_data)
enum fc_host_event_code event_code, u32 event_data)
{
struct zfcp_fc_event *event;
@ -425,6 +424,7 @@ void zfcp_fc_port_did_lookup(struct work_struct *work)
struct zfcp_port *port = container_of(work, struct zfcp_port,
gid_pn_work);
set_worker_desc("zgidpn%16llx", port->wwpn); /* < WORKER_DESC_LEN=24 */
ret = zfcp_fc_ns_gid_pn(port);
if (ret) {
/* could not issue gid_pn for some reason */
@ -559,6 +559,7 @@ void zfcp_fc_link_test_work(struct work_struct *work)
container_of(work, struct zfcp_port, test_link_work);
int retval;
set_worker_desc("zadisc%16llx", port->wwpn); /* < WORKER_DESC_LEN=24 */
get_device(&port->dev);
port->rport_task = RPORT_DEL;
zfcp_scsi_rport_work(&port->rport_work);
@ -596,7 +597,7 @@ void zfcp_fc_test_link(struct zfcp_port *port)
put_device(&port->dev);
}
static struct zfcp_fc_req *zfcp_alloc_sg_env(int buf_num)
static struct zfcp_fc_req *zfcp_fc_alloc_sg_env(int buf_num)
{
struct zfcp_fc_req *fc_req;
@ -748,7 +749,7 @@ void zfcp_fc_scan_ports(struct work_struct *work)
if (zfcp_fc_wka_port_get(&adapter->gs->ds))
return;
fc_req = zfcp_alloc_sg_env(buf_num);
fc_req = zfcp_fc_alloc_sg_env(buf_num);
if (!fc_req)
goto out;

View File

@ -207,21 +207,14 @@ struct zfcp_fc_wka_ports {
* zfcp_fc_scsi_to_fcp - setup FCP command with data from scsi_cmnd
* @fcp: fcp_cmnd to setup
* @scsi: scsi_cmnd where to get LUN, task attributes/flags and CDB
* @tm: task management flags to setup task management command
*/
static inline
void zfcp_fc_scsi_to_fcp(struct fcp_cmnd *fcp, struct scsi_cmnd *scsi,
u8 tm_flags)
void zfcp_fc_scsi_to_fcp(struct fcp_cmnd *fcp, struct scsi_cmnd *scsi)
{
u32 datalen;
int_to_scsilun(scsi->device->lun, (struct scsi_lun *) &fcp->fc_lun);
if (unlikely(tm_flags)) {
fcp->fc_tm_flags = tm_flags;
return;
}
fcp->fc_pri_ta = FCP_PTA_SIMPLE;
if (scsi->sc_data_direction == DMA_FROM_DEVICE)
@ -240,6 +233,19 @@ void zfcp_fc_scsi_to_fcp(struct fcp_cmnd *fcp, struct scsi_cmnd *scsi,
}
}
/**
* zfcp_fc_fcp_tm() - Setup FCP command as task management command.
* @fcp: Pointer to FCP_CMND IU to set up.
* @dev: Pointer to SCSI_device where to send the task management command.
* @tm_flags: Task management flags to setup tm command.
*/
static inline
void zfcp_fc_fcp_tm(struct fcp_cmnd *fcp, struct scsi_device *dev, u8 tm_flags)
{
int_to_scsilun(dev->lun, (struct scsi_lun *) &fcp->fc_lun);
fcp->fc_tm_flags = tm_flags;
}
/**
* zfcp_fc_evap_fcp_rsp - evaluate FCP RSP IU and update scsi_cmnd accordingly
* @fcp_rsp: FCP RSP IU to evaluate

View File

@ -4,7 +4,7 @@
*
* Implementation of FSF commands.
*
* Copyright IBM Corp. 2002, 2017
* Copyright IBM Corp. 2002, 2018
*/
#define KMSG_COMPONENT "zfcp"
@ -437,6 +437,9 @@ void zfcp_fsf_req_dismiss_all(struct zfcp_adapter *adapter)
#define ZFCP_FSF_PORTSPEED_10GBIT (1 << 3)
#define ZFCP_FSF_PORTSPEED_8GBIT (1 << 4)
#define ZFCP_FSF_PORTSPEED_16GBIT (1 << 5)
#define ZFCP_FSF_PORTSPEED_32GBIT (1 << 6)
#define ZFCP_FSF_PORTSPEED_64GBIT (1 << 7)
#define ZFCP_FSF_PORTSPEED_128GBIT (1 << 8)
#define ZFCP_FSF_PORTSPEED_NOT_NEGOTIATED (1 << 15)
static u32 zfcp_fsf_convert_portspeed(u32 fsf_speed)
@ -454,6 +457,12 @@ static u32 zfcp_fsf_convert_portspeed(u32 fsf_speed)
fdmi_speed |= FC_PORTSPEED_8GBIT;
if (fsf_speed & ZFCP_FSF_PORTSPEED_16GBIT)
fdmi_speed |= FC_PORTSPEED_16GBIT;
if (fsf_speed & ZFCP_FSF_PORTSPEED_32GBIT)
fdmi_speed |= FC_PORTSPEED_32GBIT;
if (fsf_speed & ZFCP_FSF_PORTSPEED_64GBIT)
fdmi_speed |= FC_PORTSPEED_64GBIT;
if (fsf_speed & ZFCP_FSF_PORTSPEED_128GBIT)
fdmi_speed |= FC_PORTSPEED_128GBIT;
if (fsf_speed & ZFCP_FSF_PORTSPEED_NOT_NEGOTIATED)
fdmi_speed |= FC_PORTSPEED_NOT_NEGOTIATED;
return fdmi_speed;
@ -662,7 +671,7 @@ static struct zfcp_fsf_req *zfcp_fsf_alloc(mempool_t *pool)
return req;
}
static struct fsf_qtcb *zfcp_qtcb_alloc(mempool_t *pool)
static struct fsf_qtcb *zfcp_fsf_qtcb_alloc(mempool_t *pool)
{
struct fsf_qtcb *qtcb;
@ -701,9 +710,10 @@ static struct zfcp_fsf_req *zfcp_fsf_req_create(struct zfcp_qdio *qdio,
if (likely(fsf_cmd != FSF_QTCB_UNSOLICITED_STATUS)) {
if (likely(pool))
req->qtcb = zfcp_qtcb_alloc(adapter->pool.qtcb_pool);
req->qtcb = zfcp_fsf_qtcb_alloc(
adapter->pool.qtcb_pool);
else
req->qtcb = zfcp_qtcb_alloc(NULL);
req->qtcb = zfcp_fsf_qtcb_alloc(NULL);
if (unlikely(!req->qtcb)) {
zfcp_fsf_req_free(req);
@ -2036,10 +2046,14 @@ static void zfcp_fsf_req_trace(struct zfcp_fsf_req *req, struct scsi_cmnd *scsi)
sizeof(blktrc));
}
static void zfcp_fsf_fcp_handler_common(struct zfcp_fsf_req *req)
/**
* zfcp_fsf_fcp_handler_common() - FCP response handler common to I/O and TMF.
* @req: Pointer to FSF request.
* @sdev: Pointer to SCSI device as request context.
*/
static void zfcp_fsf_fcp_handler_common(struct zfcp_fsf_req *req,
struct scsi_device *sdev)
{
struct scsi_cmnd *scmnd = req->data;
struct scsi_device *sdev = scmnd->device;
struct zfcp_scsi_dev *zfcp_sdev;
struct fsf_qtcb_header *header = &req->qtcb->header;
@ -2051,7 +2065,7 @@ static void zfcp_fsf_fcp_handler_common(struct zfcp_fsf_req *req)
switch (header->fsf_status) {
case FSF_HANDLE_MISMATCH:
case FSF_PORT_HANDLE_NOT_VALID:
zfcp_erp_adapter_reopen(zfcp_sdev->port->adapter, 0, "fssfch1");
zfcp_erp_adapter_reopen(req->adapter, 0, "fssfch1");
req->status |= ZFCP_STATUS_FSFREQ_ERROR;
break;
case FSF_FCPLUN_NOT_VALID:
@ -2069,8 +2083,7 @@ static void zfcp_fsf_fcp_handler_common(struct zfcp_fsf_req *req)
req->qtcb->bottom.io.data_direction,
(unsigned long long)zfcp_scsi_dev_lun(sdev),
(unsigned long long)zfcp_sdev->port->wwpn);
zfcp_erp_adapter_shutdown(zfcp_sdev->port->adapter, 0,
"fssfch3");
zfcp_erp_adapter_shutdown(req->adapter, 0, "fssfch3");
req->status |= ZFCP_STATUS_FSFREQ_ERROR;
break;
case FSF_CMND_LENGTH_NOT_VALID:
@ -2080,8 +2093,7 @@ static void zfcp_fsf_fcp_handler_common(struct zfcp_fsf_req *req)
req->qtcb->bottom.io.fcp_cmnd_length,
(unsigned long long)zfcp_scsi_dev_lun(sdev),
(unsigned long long)zfcp_sdev->port->wwpn);
zfcp_erp_adapter_shutdown(zfcp_sdev->port->adapter, 0,
"fssfch4");
zfcp_erp_adapter_shutdown(req->adapter, 0, "fssfch4");
req->status |= ZFCP_STATUS_FSFREQ_ERROR;
break;
case FSF_PORT_BOXED:
@ -2120,7 +2132,7 @@ static void zfcp_fsf_fcp_cmnd_handler(struct zfcp_fsf_req *req)
return;
}
zfcp_fsf_fcp_handler_common(req);
zfcp_fsf_fcp_handler_common(req, scpnt->device);
if (unlikely(req->status & ZFCP_STATUS_FSFREQ_ERROR)) {
set_host_byte(scpnt, DID_TRANSPORT_DISRUPTED);
@ -2258,7 +2270,7 @@ int zfcp_fsf_fcp_cmnd(struct scsi_cmnd *scsi_cmnd)
BUILD_BUG_ON(sizeof(struct fcp_cmnd) > FSF_FCP_CMND_SIZE);
fcp_cmnd = &req->qtcb->bottom.io.fcp_cmnd.iu;
zfcp_fc_scsi_to_fcp(fcp_cmnd, scsi_cmnd, 0);
zfcp_fc_scsi_to_fcp(fcp_cmnd, scsi_cmnd);
if ((scsi_get_prot_op(scsi_cmnd) != SCSI_PROT_NORMAL) &&
scsi_prot_sg_count(scsi_cmnd)) {
@ -2297,10 +2309,11 @@ out:
static void zfcp_fsf_fcp_task_mgmt_handler(struct zfcp_fsf_req *req)
{
struct scsi_device *sdev = req->data;
struct fcp_resp_with_ext *fcp_rsp;
struct fcp_resp_rsp_info *rsp_info;
zfcp_fsf_fcp_handler_common(req);
zfcp_fsf_fcp_handler_common(req, sdev);
fcp_rsp = &req->qtcb->bottom.io.fcp_rsp.iu;
rsp_info = (struct fcp_resp_rsp_info *) &fcp_rsp[1];
@ -2311,17 +2324,18 @@ static void zfcp_fsf_fcp_task_mgmt_handler(struct zfcp_fsf_req *req)
}
/**
* zfcp_fsf_fcp_task_mgmt - send SCSI task management command
* @scmnd: SCSI command to send the task management command for
* @tm_flags: unsigned byte for task management flags
* Returns: on success pointer to struct fsf_req, NULL otherwise
* zfcp_fsf_fcp_task_mgmt() - Send SCSI task management command (TMF).
* @sdev: Pointer to SCSI device to send the task management command to.
* @tm_flags: Unsigned byte for task management flags.
*
* Return: On success pointer to struct zfcp_fsf_req, %NULL otherwise.
*/
struct zfcp_fsf_req *zfcp_fsf_fcp_task_mgmt(struct scsi_cmnd *scmnd,
struct zfcp_fsf_req *zfcp_fsf_fcp_task_mgmt(struct scsi_device *sdev,
u8 tm_flags)
{
struct zfcp_fsf_req *req = NULL;
struct fcp_cmnd *fcp_cmnd;
struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(scmnd->device);
struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
struct zfcp_qdio *qdio = zfcp_sdev->port->adapter->qdio;
if (unlikely(!(atomic_read(&zfcp_sdev->status) &
@ -2341,7 +2355,8 @@ struct zfcp_fsf_req *zfcp_fsf_fcp_task_mgmt(struct scsi_cmnd *scmnd,
goto out;
}
req->data = scmnd;
req->data = sdev;
req->handler = zfcp_fsf_fcp_task_mgmt_handler;
req->qtcb->header.lun_handle = zfcp_sdev->lun_handle;
req->qtcb->header.port_handle = zfcp_sdev->port->handle;
@ -2352,7 +2367,7 @@ struct zfcp_fsf_req *zfcp_fsf_fcp_task_mgmt(struct scsi_cmnd *scmnd,
zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
fcp_cmnd = &req->qtcb->bottom.io.fcp_cmnd.iu;
zfcp_fc_scsi_to_fcp(fcp_cmnd, scmnd, tm_flags);
zfcp_fc_fcp_tm(fcp_cmnd, sdev, tm_flags);
zfcp_fsf_start_timer(req, ZFCP_SCSI_ER_TIMEOUT);
if (!zfcp_fsf_req_send(req))

View File

@ -4,7 +4,7 @@
*
* Interface to the FSF support functions.
*
* Copyright IBM Corp. 2002, 2017
* Copyright IBM Corp. 2002, 2018
*/
#ifndef FSF_H
@ -356,7 +356,7 @@ struct fsf_qtcb_bottom_config {
u32 adapter_features;
u32 connection_features;
u32 fc_topology;
u32 fc_link_speed;
u32 fc_link_speed; /* one of ZFCP_FSF_PORTSPEED_* */
u32 adapter_type;
u8 res0;
u8 peer_d_id[3];
@ -382,7 +382,7 @@ struct fsf_qtcb_bottom_port {
u32 class_of_service; /* should be 0x00000006 for class 2 and 3 */
u8 supported_fc4_types[32]; /* should be 0x00000100 for scsi fcp */
u8 active_fc4_types[32];
u32 supported_speed; /* 0x0001 for 1 GBit/s or 0x0002 for 2 GBit/s */
u32 supported_speed; /* any combination of ZFCP_FSF_PORTSPEED_* */
u32 maximum_frame_size; /* fixed value of 2112 */
u64 seconds_since_last_reset;
u64 tx_frames;

View File

@ -181,6 +181,7 @@ static int zfcp_scsi_eh_abort_handler(struct scsi_cmnd *scpnt)
if (abrt_req)
break;
zfcp_dbf_scsi_abort("abrt_wt", scpnt, NULL);
zfcp_erp_wait(adapter);
ret = fc_block_scsi_eh(scpnt);
if (ret) {
@ -264,44 +265,52 @@ static void zfcp_scsi_forget_cmnds(struct zfcp_scsi_dev *zsdev, u8 tm_flags)
write_unlock_irqrestore(&adapter->abort_lock, flags);
}
static int zfcp_task_mgmt_function(struct scsi_cmnd *scpnt, u8 tm_flags)
/**
* zfcp_scsi_task_mgmt_function() - Send a task management function (sync).
* @sdev: Pointer to SCSI device to send the task management command to.
* @tm_flags: Task management flags,
* here we only handle %FCP_TMF_TGT_RESET or %FCP_TMF_LUN_RESET.
*/
static int zfcp_scsi_task_mgmt_function(struct scsi_device *sdev, u8 tm_flags)
{
struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(scpnt->device);
struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
struct zfcp_adapter *adapter = zfcp_sdev->port->adapter;
struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
struct zfcp_fsf_req *fsf_req = NULL;
int retval = SUCCESS, ret;
int retry = 3;
while (retry--) {
fsf_req = zfcp_fsf_fcp_task_mgmt(scpnt, tm_flags);
fsf_req = zfcp_fsf_fcp_task_mgmt(sdev, tm_flags);
if (fsf_req)
break;
zfcp_dbf_scsi_devreset("wait", sdev, tm_flags, NULL);
zfcp_erp_wait(adapter);
ret = fc_block_scsi_eh(scpnt);
ret = fc_block_rport(rport);
if (ret) {
zfcp_dbf_scsi_devreset("fiof", scpnt, tm_flags, NULL);
zfcp_dbf_scsi_devreset("fiof", sdev, tm_flags, NULL);
return ret;
}
if (!(atomic_read(&adapter->status) &
ZFCP_STATUS_COMMON_RUNNING)) {
zfcp_dbf_scsi_devreset("nres", scpnt, tm_flags, NULL);
zfcp_dbf_scsi_devreset("nres", sdev, tm_flags, NULL);
return SUCCESS;
}
}
if (!fsf_req) {
zfcp_dbf_scsi_devreset("reqf", scpnt, tm_flags, NULL);
zfcp_dbf_scsi_devreset("reqf", sdev, tm_flags, NULL);
return FAILED;
}
wait_for_completion(&fsf_req->completion);
if (fsf_req->status & ZFCP_STATUS_FSFREQ_TMFUNCFAILED) {
zfcp_dbf_scsi_devreset("fail", scpnt, tm_flags, fsf_req);
zfcp_dbf_scsi_devreset("fail", sdev, tm_flags, fsf_req);
retval = FAILED;
} else {
zfcp_dbf_scsi_devreset("okay", scpnt, tm_flags, fsf_req);
zfcp_dbf_scsi_devreset("okay", sdev, tm_flags, fsf_req);
zfcp_scsi_forget_cmnds(zfcp_sdev, tm_flags);
}
@ -311,27 +320,81 @@ static int zfcp_task_mgmt_function(struct scsi_cmnd *scpnt, u8 tm_flags)
static int zfcp_scsi_eh_device_reset_handler(struct scsi_cmnd *scpnt)
{
return zfcp_task_mgmt_function(scpnt, FCP_TMF_LUN_RESET);
struct scsi_device *sdev = scpnt->device;
return zfcp_scsi_task_mgmt_function(sdev, FCP_TMF_LUN_RESET);
}
static int zfcp_scsi_eh_target_reset_handler(struct scsi_cmnd *scpnt)
{
return zfcp_task_mgmt_function(scpnt, FCP_TMF_TGT_RESET);
struct scsi_target *starget = scsi_target(scpnt->device);
struct fc_rport *rport = starget_to_rport(starget);
struct Scsi_Host *shost = rport_to_shost(rport);
struct scsi_device *sdev = NULL, *tmp_sdev;
struct zfcp_adapter *adapter =
(struct zfcp_adapter *)shost->hostdata[0];
int ret;
shost_for_each_device(tmp_sdev, shost) {
if (tmp_sdev->id == starget->id) {
sdev = tmp_sdev;
break;
}
}
if (!sdev) {
ret = FAILED;
zfcp_dbf_scsi_eh("tr_nosd", adapter, starget->id, ret);
return ret;
}
ret = zfcp_scsi_task_mgmt_function(sdev, FCP_TMF_TGT_RESET);
/* release reference from above shost_for_each_device */
if (sdev)
scsi_device_put(tmp_sdev);
return ret;
}
static int zfcp_scsi_eh_host_reset_handler(struct scsi_cmnd *scpnt)
{
struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(scpnt->device);
struct zfcp_adapter *adapter = zfcp_sdev->port->adapter;
int ret;
int ret = SUCCESS, fc_ret;
zfcp_erp_adapter_reopen(adapter, 0, "schrh_1");
zfcp_erp_wait(adapter);
ret = fc_block_scsi_eh(scpnt);
if (ret)
return ret;
fc_ret = fc_block_scsi_eh(scpnt);
if (fc_ret)
ret = fc_ret;
return SUCCESS;
zfcp_dbf_scsi_eh("schrh_r", adapter, ~0, ret);
return ret;
}
/**
* zfcp_scsi_sysfs_host_reset() - Support scsi_host sysfs attribute host_reset.
* @shost: Pointer to Scsi_Host to perform action on.
* @reset_type: We support %SCSI_ADAPTER_RESET but not %SCSI_FIRMWARE_RESET.
*
* Return: 0 on %SCSI_ADAPTER_RESET, -%EOPNOTSUPP otherwise.
*
* This is similar to zfcp_sysfs_adapter_failed_store().
*/
static int zfcp_scsi_sysfs_host_reset(struct Scsi_Host *shost, int reset_type)
{
struct zfcp_adapter *adapter =
(struct zfcp_adapter *)shost->hostdata[0];
int ret = 0;
if (reset_type != SCSI_ADAPTER_RESET) {
ret = -EOPNOTSUPP;
zfcp_dbf_scsi_eh("scshr_n", adapter, ~0, ret);
return ret;
}
zfcp_erp_adapter_reset_sync(adapter, "scshr_y");
return ret;
}
struct scsi_transport_template *zfcp_scsi_transport_template;
@ -349,6 +412,7 @@ static struct scsi_host_template zfcp_scsi_host_template = {
.slave_configure = zfcp_scsi_slave_configure,
.slave_destroy = zfcp_scsi_slave_destroy,
.change_queue_depth = scsi_change_queue_depth,
.host_reset = zfcp_scsi_sysfs_host_reset,
.proc_name = "zfcp",
.can_queue = 4096,
.this_id = -1,
@ -363,6 +427,7 @@ static struct scsi_host_template zfcp_scsi_host_template = {
.shost_attrs = zfcp_sysfs_shost_attrs,
.sdev_attrs = zfcp_sysfs_sdev_attrs,
.track_queue_depth = 1,
.supported_mode = MODE_INITIATOR,
};
/**
@ -430,7 +495,7 @@ void zfcp_scsi_adapter_unregister(struct zfcp_adapter *adapter)
}
static struct fc_host_statistics*
zfcp_init_fc_host_stats(struct zfcp_adapter *adapter)
zfcp_scsi_init_fc_host_stats(struct zfcp_adapter *adapter)
{
struct fc_host_statistics *fc_stats;
@ -444,9 +509,9 @@ zfcp_init_fc_host_stats(struct zfcp_adapter *adapter)
return adapter->fc_stats;
}
static void zfcp_adjust_fc_host_stats(struct fc_host_statistics *fc_stats,
struct fsf_qtcb_bottom_port *data,
struct fsf_qtcb_bottom_port *old)
static void zfcp_scsi_adjust_fc_host_stats(struct fc_host_statistics *fc_stats,
struct fsf_qtcb_bottom_port *data,
struct fsf_qtcb_bottom_port *old)
{
fc_stats->seconds_since_last_reset =
data->seconds_since_last_reset - old->seconds_since_last_reset;
@ -477,8 +542,8 @@ static void zfcp_adjust_fc_host_stats(struct fc_host_statistics *fc_stats,
fc_stats->fcp_output_megabytes = data->output_mb - old->output_mb;
}
static void zfcp_set_fc_host_stats(struct fc_host_statistics *fc_stats,
struct fsf_qtcb_bottom_port *data)
static void zfcp_scsi_set_fc_host_stats(struct fc_host_statistics *fc_stats,
struct fsf_qtcb_bottom_port *data)
{
fc_stats->seconds_since_last_reset = data->seconds_since_last_reset;
fc_stats->tx_frames = data->tx_frames;
@ -502,7 +567,8 @@ static void zfcp_set_fc_host_stats(struct fc_host_statistics *fc_stats,
fc_stats->fcp_output_megabytes = data->output_mb;
}
static struct fc_host_statistics *zfcp_get_fc_host_stats(struct Scsi_Host *host)
static struct fc_host_statistics *
zfcp_scsi_get_fc_host_stats(struct Scsi_Host *host)
{
struct zfcp_adapter *adapter;
struct fc_host_statistics *fc_stats;
@ -510,7 +576,7 @@ static struct fc_host_statistics *zfcp_get_fc_host_stats(struct Scsi_Host *host)
int ret;
adapter = (struct zfcp_adapter *)host->hostdata[0];
fc_stats = zfcp_init_fc_host_stats(adapter);
fc_stats = zfcp_scsi_init_fc_host_stats(adapter);
if (!fc_stats)
return NULL;
@ -527,16 +593,16 @@ static struct fc_host_statistics *zfcp_get_fc_host_stats(struct Scsi_Host *host)
if (adapter->stats_reset &&
((jiffies/HZ - adapter->stats_reset) <
data->seconds_since_last_reset))
zfcp_adjust_fc_host_stats(fc_stats, data,
adapter->stats_reset_data);
zfcp_scsi_adjust_fc_host_stats(fc_stats, data,
adapter->stats_reset_data);
else
zfcp_set_fc_host_stats(fc_stats, data);
zfcp_scsi_set_fc_host_stats(fc_stats, data);
kfree(data);
return fc_stats;
}
static void zfcp_reset_fc_host_stats(struct Scsi_Host *shost)
static void zfcp_scsi_reset_fc_host_stats(struct Scsi_Host *shost)
{
struct zfcp_adapter *adapter;
struct fsf_qtcb_bottom_port *data;
@ -558,7 +624,7 @@ static void zfcp_reset_fc_host_stats(struct Scsi_Host *shost)
}
}
static void zfcp_get_host_port_state(struct Scsi_Host *shost)
static void zfcp_scsi_get_host_port_state(struct Scsi_Host *shost)
{
struct zfcp_adapter *adapter =
(struct zfcp_adapter *)shost->hostdata[0];
@ -575,7 +641,8 @@ static void zfcp_get_host_port_state(struct Scsi_Host *shost)
fc_host_port_state(shost) = FC_PORTSTATE_UNKNOWN;
}
static void zfcp_set_rport_dev_loss_tmo(struct fc_rport *rport, u32 timeout)
static void zfcp_scsi_set_rport_dev_loss_tmo(struct fc_rport *rport,
u32 timeout)
{
rport->dev_loss_tmo = timeout;
}
@ -602,6 +669,11 @@ static void zfcp_scsi_terminate_rport_io(struct fc_rport *rport)
if (port) {
zfcp_erp_port_forced_reopen(port, 0, "sctrpi1");
put_device(&port->dev);
} else {
zfcp_erp_port_forced_no_port_dbf(
"sctrpin", adapter,
rport->port_name /* zfcp_scsi_rport_register */,
rport->port_id /* zfcp_scsi_rport_register */);
}
}
@ -687,6 +759,9 @@ void zfcp_scsi_rport_work(struct work_struct *work)
struct zfcp_port *port = container_of(work, struct zfcp_port,
rport_work);
set_worker_desc("zrp%c-%16llx",
(port->rport_task == RPORT_ADD) ? 'a' : 'd',
port->wwpn); /* < WORKER_DESC_LEN=24 */
while (port->rport_task) {
if (port->rport_task == RPORT_ADD) {
port->rport_task = RPORT_NONE;
@ -761,10 +836,10 @@ struct fc_function_template zfcp_transport_functions = {
.show_host_supported_speeds = 1,
.show_host_maxframe_size = 1,
.show_host_serial_number = 1,
.get_fc_host_stats = zfcp_get_fc_host_stats,
.reset_fc_host_stats = zfcp_reset_fc_host_stats,
.set_rport_dev_loss_tmo = zfcp_set_rport_dev_loss_tmo,
.get_host_port_state = zfcp_get_host_port_state,
.get_fc_host_stats = zfcp_scsi_get_fc_host_stats,
.reset_fc_host_stats = zfcp_scsi_reset_fc_host_stats,
.set_rport_dev_loss_tmo = zfcp_scsi_set_rport_dev_loss_tmo,
.get_host_port_state = zfcp_scsi_get_host_port_state,
.terminate_rport_io = zfcp_scsi_terminate_rport_io,
.show_host_port_state = 1,
.show_host_active_fc4s = 1,

View File

@ -200,10 +200,7 @@ static ssize_t zfcp_sysfs_adapter_failed_store(struct device *dev,
goto out;
}
zfcp_erp_set_adapter_status(adapter, ZFCP_STATUS_COMMON_RUNNING);
zfcp_erp_adapter_reopen(adapter, ZFCP_STATUS_COMMON_ERP_FAILED,
"syafai2");
zfcp_erp_wait(adapter);
zfcp_erp_adapter_reset_sync(adapter, "syafai2");
out:
zfcp_ccw_adapter_put(adapter);
return retval ? retval : (ssize_t) count;

View File

@ -882,6 +882,11 @@ static int twa_chrdev_open(struct inode *inode, struct file *file)
unsigned int minor_number;
int retval = TW_IOCTL_ERROR_OS_ENODEV;
if (!capable(CAP_SYS_ADMIN)) {
retval = -EACCES;
goto out;
}
minor_number = iminor(inode);
if (minor_number >= twa_device_extension_count)
goto out;

View File

@ -1033,6 +1033,9 @@ static int tw_chrdev_open(struct inode *inode, struct file *file)
dprintk(KERN_WARNING "3w-xxxx: tw_ioctl_open()\n");
if (!capable(CAP_SYS_ADMIN))
return -EACCES;
minor_number = iminor(inode);
if (minor_number >= tw_device_extension_count)
return -ENODEV;

View File

@ -1351,6 +1351,20 @@ config SCSI_ZORRO7XX
accelerator card for the Amiga 1200,
- the SCSI controller on the GVP Turbo 040/060 accelerator.
config SCSI_ZORRO_ESP
tristate "Zorro ESP SCSI support"
depends on ZORRO && SCSI
select SCSI_SPI_ATTRS
help
Support for various NCR53C9x (ESP) based SCSI controllers on Zorro
expansion boards for the Amiga.
This includes:
- the Phase5 Blizzard 1230 II and IV SCSI controllers,
- the Phase5 Blizzard 2060 SCSI controller,
- the Phase5 Blizzard Cyberstorm and Cyberstorm II SCSI
controllers,
- the Fastlane Zorro III SCSI controller.
config ATARI_SCSI
tristate "Atari native SCSI support"
depends on ATARI && SCSI

View File

@ -48,6 +48,7 @@ obj-$(CONFIG_INFINIBAND_ISER) += libiscsi.o
obj-$(CONFIG_ISCSI_BOOT_SYSFS) += iscsi_boot_sysfs.o
obj-$(CONFIG_SCSI_A4000T) += 53c700.o a4000t.o
obj-$(CONFIG_SCSI_ZORRO7XX) += 53c700.o zorro7xx.o
obj-$(CONFIG_SCSI_ZORRO_ESP) += esp_scsi.o zorro_esp.o
obj-$(CONFIG_A3000_SCSI) += a3000.o wd33c93.o
obj-$(CONFIG_A2091_SCSI) += a2091.o wd33c93.o
obj-$(CONFIG_GVP11_SCSI) += gvp11.o wd33c93.o
@ -189,7 +190,7 @@ $(obj)/53c700.o $(MODVERDIR)/$(obj)/53c700.ver: $(obj)/53c700_d.h
$(obj)/scsi_sysfs.o: $(obj)/scsi_devinfo_tbl.c
quiet_cmd_bflags = GEN $@
cmd_bflags = sed -n 's/.*BLIST_\([A-Z0-9_]*\) *.*/BLIST_FLAG_NAME(\1),/p' $< > $@
cmd_bflags = sed -n 's/.*define *BLIST_\([A-Z0-9_]*\) *.*/BLIST_FLAG_NAME(\1),/p' $< > $@
$(obj)/scsi_devinfo_tbl.c: include/scsi/scsi_devinfo.h
$(call if_changed,bflags)

View File

@ -1222,19 +1222,8 @@ static struct pci_driver inia100_pci_driver = {
.remove = inia100_remove_one,
};
static int __init inia100_init(void)
{
return pci_register_driver(&inia100_pci_driver);
}
static void __exit inia100_exit(void)
{
pci_unregister_driver(&inia100_pci_driver);
}
module_pci_driver(inia100_pci_driver);
MODULE_DESCRIPTION("Initio A100U2W SCSI driver");
MODULE_AUTHOR("Initio Corporation");
MODULE_LICENSE("Dual BSD/GPL");
module_init(inia100_init);
module_exit(inia100_exit);

View File

@ -556,15 +556,7 @@ static struct pci_driver am53c974_driver = {
.remove = pci_esp_remove_one,
};
static int __init am53c974_module_init(void)
{
return pci_register_driver(&am53c974_driver);
}
static void __exit am53c974_module_exit(void)
{
pci_unregister_driver(&am53c974_driver);
}
module_pci_driver(am53c974_driver);
MODULE_DESCRIPTION("AM53C974 SCSI driver");
MODULE_AUTHOR("Hannes Reinecke <hare@suse.de>");
@ -577,6 +569,3 @@ MODULE_PARM_DESC(am53c974_debug, "Enable debugging");
module_param(am53c974_fenab, bool, 0444);
MODULE_PARM_DESC(am53c974_fenab, "Enable 24-bit DMA transfer sizes");
module_init(am53c974_module_init);
module_exit(am53c974_module_exit);

View File

@ -4,7 +4,7 @@
config CXLFLASH
tristate "Support for IBM CAPI Flash"
depends on PCI && SCSI && CXL && EEH
depends on PCI && SCSI && (CXL || OCXL) && EEH
select IRQ_POLL
default m
help

View File

@ -1,2 +1,4 @@
obj-$(CONFIG_CXLFLASH) += cxlflash.o
cxlflash-y += main.o superpipe.o lunmgt.o vlun.o cxl_hw.o
cxlflash-y += main.o superpipe.o lunmgt.o vlun.o
cxlflash-$(CONFIG_CXL) += cxl_hw.o
cxlflash-$(CONFIG_OCXL) += ocxl_hw.o

View File

@ -12,30 +12,41 @@
* 2 of the License, or (at your option) any later version.
*/
#ifndef _CXLFLASH_BACKEND_H
#define _CXLFLASH_BACKEND_H
extern const struct cxlflash_backend_ops cxlflash_cxl_ops;
extern const struct cxlflash_backend_ops cxlflash_ocxl_ops;
struct cxlflash_backend_ops {
struct module *module;
void __iomem * (*psa_map)(void *);
void (*psa_unmap)(void __iomem *);
int (*process_element)(void *);
int (*map_afu_irq)(void *, int, irq_handler_t, void *, char *);
void (*unmap_afu_irq)(void *, int, void *);
int (*start_context)(void *);
int (*stop_context)(void *);
int (*afu_reset)(void *);
void (*set_master)(void *);
void * (*get_context)(struct pci_dev *, void *);
void * (*dev_context_init)(struct pci_dev *, void *);
int (*release_context)(void *);
void (*perst_reloads_same_image)(void *, bool);
ssize_t (*read_adapter_vpd)(struct pci_dev *, void *, size_t);
int (*allocate_afu_irqs)(void *, int);
void (*free_afu_irqs)(void *);
void * (*create_afu)(struct pci_dev *);
struct file * (*get_fd)(void *, struct file_operations *, int *);
void * (*fops_get_context)(struct file *);
int (*start_work)(void *, u64);
int (*fd_mmap)(struct file *, struct vm_area_struct *);
int (*fd_release)(struct inode *, struct file *);
void __iomem * (*psa_map)(void *ctx_cookie);
void (*psa_unmap)(void __iomem *addr);
int (*process_element)(void *ctx_cookie);
int (*map_afu_irq)(void *ctx_cookie, int num, irq_handler_t handler,
void *cookie, char *name);
void (*unmap_afu_irq)(void *ctx_cookie, int num, void *cookie);
u64 (*get_irq_objhndl)(void *ctx_cookie, int irq);
int (*start_context)(void *ctx_cookie);
int (*stop_context)(void *ctx_cookie);
int (*afu_reset)(void *ctx_cookie);
void (*set_master)(void *ctx_cookie);
void * (*get_context)(struct pci_dev *dev, void *afu_cookie);
void * (*dev_context_init)(struct pci_dev *dev, void *afu_cookie);
int (*release_context)(void *ctx_cookie);
void (*perst_reloads_same_image)(void *afu_cookie, bool image);
ssize_t (*read_adapter_vpd)(struct pci_dev *dev, void *buf,
size_t count);
int (*allocate_afu_irqs)(void *ctx_cookie, int num);
void (*free_afu_irqs)(void *ctx_cookie);
void * (*create_afu)(struct pci_dev *dev);
void (*destroy_afu)(void *afu_cookie);
struct file * (*get_fd)(void *ctx_cookie, struct file_operations *fops,
int *fd);
void * (*fops_get_context)(struct file *file);
int (*start_work)(void *ctx_cookie, u64 irqs);
int (*fd_mmap)(struct file *file, struct vm_area_struct *vm);
int (*fd_release)(struct inode *inode, struct file *file);
};
#endif /* _CXLFLASH_BACKEND_H */

View File

@ -211,6 +211,7 @@ struct hwq {
struct sisl_ctrl_map __iomem *ctrl_map; /* MC control map */
ctx_hndl_t ctx_hndl; /* master's context handle */
u32 index; /* Index of this hwq */
int num_irqs; /* Number of interrupts requested for context */
struct list_head pending_cmds; /* Commands pending completion */
atomic_t hsq_credits;
@ -223,6 +224,7 @@ struct hwq {
u64 *hrrq_end;
u64 *hrrq_curr;
bool toggle;
bool hrrq_online;
s64 room;
@ -231,13 +233,14 @@ struct hwq {
struct afu {
struct hwq hwqs[CXLFLASH_MAX_HWQS];
int (*send_cmd)(struct afu *, struct afu_cmd *);
int (*context_reset)(struct hwq *);
int (*send_cmd)(struct afu *afu, struct afu_cmd *cmd);
int (*context_reset)(struct hwq *hwq);
/* AFU HW */
struct cxlflash_afu_map __iomem *afu_map; /* entire MMIO map */
atomic_t cmds_active; /* Number of currently active AFU commands */
struct mutex sync_active; /* Mutex to serialize AFU commands */
u64 hb;
u32 internal_lun; /* User-desired LUN mode for this AFU */
@ -272,6 +275,11 @@ static inline bool afu_has_cap(struct afu *afu, u64 cap)
return afu_cap & cap;
}
static inline bool afu_is_ocxl_lisn(struct afu *afu)
{
return afu_has_cap(afu, SISL_INTVER_CAP_OCXL_LISN);
}
static inline bool afu_is_afu_debug(struct afu *afu)
{
return afu_has_cap(afu, SISL_INTVER_CAP_AFU_DEBUG);

View File

@ -49,6 +49,12 @@ static void cxlflash_unmap_afu_irq(void *ctx_cookie, int num, void *cookie)
cxl_unmap_afu_irq(ctx_cookie, num, cookie);
}
static u64 cxlflash_get_irq_objhndl(void *ctx_cookie, int irq)
{
/* Dummy fop for cxl */
return 0;
}
static int cxlflash_start_context(void *ctx_cookie)
{
return cxl_start_context(ctx_cookie, 0, NULL);
@ -110,6 +116,11 @@ static void *cxlflash_create_afu(struct pci_dev *dev)
return cxl_pci_to_afu(dev);
}
static void cxlflash_destroy_afu(void *afu)
{
/* Dummy fop for cxl */
}
static struct file *cxlflash_get_fd(void *ctx_cookie,
struct file_operations *fops, int *fd)
{
@ -148,6 +159,7 @@ const struct cxlflash_backend_ops cxlflash_cxl_ops = {
.process_element = cxlflash_process_element,
.map_afu_irq = cxlflash_map_afu_irq,
.unmap_afu_irq = cxlflash_unmap_afu_irq,
.get_irq_objhndl = cxlflash_get_irq_objhndl,
.start_context = cxlflash_start_context,
.stop_context = cxlflash_stop_context,
.afu_reset = cxlflash_afu_reset,
@ -160,6 +172,7 @@ const struct cxlflash_backend_ops cxlflash_cxl_ops = {
.allocate_afu_irqs = cxlflash_allocate_afu_irqs,
.free_afu_irqs = cxlflash_free_afu_irqs,
.create_afu = cxlflash_create_afu,
.destroy_afu = cxlflash_destroy_afu,
.get_fd = cxlflash_get_fd,
.fops_get_context = cxlflash_fops_get_context,
.start_work = cxlflash_start_work,

View File

@ -12,9 +12,11 @@
* 2 of the License, or (at your option) any later version.
*/
#include <misc/cxl.h>
#include <asm/unaligned.h>
#include <linux/interrupt.h>
#include <linux/pci.h>
#include <scsi/scsi_host.h>
#include <uapi/scsi/cxlflash_ioctl.h>

View File

@ -19,8 +19,6 @@
#include <asm/unaligned.h>
#include <misc/cxl.h>
#include <scsi/scsi_cmnd.h>
#include <scsi/scsi_host.h>
#include <uapi/scsi/cxlflash_ioctl.h>
@ -339,8 +337,8 @@ static int send_cmd_ioarrin(struct afu *afu, struct afu_cmd *cmd)
writeq_be((u64)&cmd->rcb, &hwq->host_map->ioarrin);
out:
spin_unlock_irqrestore(&hwq->hsq_slock, lock_flags);
dev_dbg(dev, "%s: cmd=%p len=%u ea=%016llx rc=%d\n", __func__,
cmd, cmd->rcb.data_len, cmd->rcb.data_ea, rc);
dev_dbg_ratelimited(dev, "%s: cmd=%p len=%u ea=%016llx rc=%d\n",
__func__, cmd, cmd->rcb.data_len, cmd->rcb.data_ea, rc);
return rc;
}
@ -473,6 +471,7 @@ static int send_tmf(struct cxlflash_cfg *cfg, struct scsi_device *sdev,
struct afu_cmd *cmd = NULL;
struct device *dev = &cfg->dev->dev;
struct hwq *hwq = get_hwq(afu, PRIMARY_HWQ);
bool needs_deletion = false;
char *buf = NULL;
ulong lock_flags;
int rc = 0;
@ -527,6 +526,7 @@ static int send_tmf(struct cxlflash_cfg *cfg, struct scsi_device *sdev,
if (!to) {
dev_err(dev, "%s: TMF timed out\n", __func__);
rc = -ETIMEDOUT;
needs_deletion = true;
} else if (cmd->cmd_aborted) {
dev_err(dev, "%s: TMF aborted\n", __func__);
rc = -EAGAIN;
@ -537,6 +537,12 @@ static int send_tmf(struct cxlflash_cfg *cfg, struct scsi_device *sdev,
}
cfg->tmf_active = false;
spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags);
if (needs_deletion) {
spin_lock_irqsave(&hwq->hsq_slock, lock_flags);
list_del(&cmd->list);
spin_unlock_irqrestore(&hwq->hsq_slock, lock_flags);
}
out:
kfree(buf);
return rc;
@ -608,6 +614,7 @@ static int cxlflash_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scp)
rc = 0;
goto out;
default:
atomic_inc(&afu->cmds_active);
break;
}
@ -633,6 +640,7 @@ static int cxlflash_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scp)
memcpy(cmd->rcb.cdb, scp->cmnd, sizeof(cmd->rcb.cdb));
rc = afu->send_cmd(afu, cmd);
atomic_dec(&afu->cmds_active);
out:
return rc;
}
@ -793,6 +801,10 @@ static void term_mc(struct cxlflash_cfg *cfg, u32 index)
WARN_ON(cfg->ops->release_context(hwq->ctx_cookie));
hwq->ctx_cookie = NULL;
spin_lock_irqsave(&hwq->hrrq_slock, lock_flags);
hwq->hrrq_online = false;
spin_unlock_irqrestore(&hwq->hrrq_slock, lock_flags);
spin_lock_irqsave(&hwq->hsq_slock, lock_flags);
flush_pending_cmds(hwq);
spin_unlock_irqrestore(&hwq->hsq_slock, lock_flags);
@ -946,9 +958,9 @@ static void cxlflash_remove(struct pci_dev *pdev)
return;
}
/* If a Task Management Function is active, wait for it to complete
* before continuing with remove.
*/
/* Yield to running recovery threads before continuing with remove */
wait_event(cfg->reset_waitq, cfg->state != STATE_RESET &&
cfg->state != STATE_PROBING);
spin_lock_irqsave(&cfg->tmf_slock, lock_flags);
if (cfg->tmf_active)
wait_event_interruptible_lock_irq(cfg->tmf_waitq,
@ -971,6 +983,7 @@ static void cxlflash_remove(struct pci_dev *pdev)
case INIT_STATE_AFU:
term_afu(cfg);
case INIT_STATE_PCI:
cfg->ops->destroy_afu(cfg->afu_cookie);
pci_disable_device(pdev);
case INIT_STATE_NONE:
free_mem(cfg);
@ -1303,7 +1316,10 @@ static void afu_err_intr_init(struct afu *afu)
for (i = 0; i < afu->num_hwqs; i++) {
hwq = get_hwq(afu, i);
writeq_be(SISL_MSI_SYNC_ERROR, &hwq->host_map->ctx_ctrl);
reg = readq_be(&hwq->host_map->ctx_ctrl);
WARN_ON((reg & SISL_CTX_CTRL_LISN_MASK) != 0);
reg |= SISL_MSI_SYNC_ERROR;
writeq_be(reg, &hwq->host_map->ctx_ctrl);
writeq_be(SISL_ISTATUS_MASK, &hwq->host_map->intr_mask);
}
}
@ -1463,6 +1479,12 @@ static irqreturn_t cxlflash_rrq_irq(int irq, void *data)
spin_lock_irqsave(&hwq->hrrq_slock, hrrq_flags);
/* Silently drop spurious interrupts when queue is not online */
if (!hwq->hrrq_online) {
spin_unlock_irqrestore(&hwq->hrrq_slock, hrrq_flags);
return IRQ_HANDLED;
}
if (afu_is_irqpoll_enabled(afu)) {
irq_poll_sched(&hwq->irqpoll);
spin_unlock_irqrestore(&hwq->hrrq_slock, hrrq_flags);
@ -1752,6 +1774,8 @@ static int init_global(struct cxlflash_cfg *cfg)
u64 wwpn[MAX_FC_PORTS]; /* wwpn of AFU ports */
int i = 0, num_ports = 0;
int rc = 0;
int j;
void *ctx;
u64 reg;
rc = read_vpd(cfg, &wwpn[0]);
@ -1767,6 +1791,7 @@ static int init_global(struct cxlflash_cfg *cfg)
writeq_be((u64) hwq->hrrq_start, &hmap->rrq_start);
writeq_be((u64) hwq->hrrq_end, &hmap->rrq_end);
hwq->hrrq_online = true;
if (afu_is_sq_cmd_mode(afu)) {
writeq_be((u64)hwq->hsq_start, &hmap->sq_start);
@ -1812,6 +1837,25 @@ static int init_global(struct cxlflash_cfg *cfg)
msleep(100);
}
if (afu_is_ocxl_lisn(afu)) {
/* Set up the LISN effective address for each master */
for (i = 0; i < afu->num_hwqs; i++) {
hwq = get_hwq(afu, i);
ctx = hwq->ctx_cookie;
for (j = 0; j < hwq->num_irqs; j++) {
reg = cfg->ops->get_irq_objhndl(ctx, j);
writeq_be(reg, &hwq->ctrl_map->lisn_ea[j]);
}
reg = hwq->ctx_hndl;
writeq_be(SISL_LISN_PASID(reg, reg),
&hwq->ctrl_map->lisn_pasid[0]);
writeq_be(SISL_LISN_PASID(0UL, reg),
&hwq->ctrl_map->lisn_pasid[1]);
}
}
/* Set up master's own CTX_CAP to allow real mode, host translation */
/* tables, afu cmds and read/write GSCSI cmds. */
/* First, unlock ctx_cap write by reading mbox */
@ -1911,7 +1955,7 @@ static enum undo_level init_intr(struct cxlflash_cfg *cfg,
int rc = 0;
enum undo_level level = UNDO_NOOP;
bool is_primary_hwq = (hwq->index == PRIMARY_HWQ);
int num_irqs = is_primary_hwq ? 3 : 2;
int num_irqs = hwq->num_irqs;
rc = cfg->ops->allocate_afu_irqs(ctx, num_irqs);
if (unlikely(rc)) {
@ -1965,16 +2009,20 @@ static int init_mc(struct cxlflash_cfg *cfg, u32 index)
struct device *dev = &cfg->dev->dev;
struct hwq *hwq = get_hwq(cfg->afu, index);
int rc = 0;
int num_irqs;
enum undo_level level;
hwq->afu = cfg->afu;
hwq->index = index;
INIT_LIST_HEAD(&hwq->pending_cmds);
if (index == PRIMARY_HWQ)
if (index == PRIMARY_HWQ) {
ctx = cfg->ops->get_context(cfg->dev, cfg->afu_cookie);
else
num_irqs = 3;
} else {
ctx = cfg->ops->dev_context_init(cfg->dev, cfg->afu_cookie);
num_irqs = 2;
}
if (IS_ERR_OR_NULL(ctx)) {
rc = -ENOMEM;
goto err1;
@ -1982,6 +2030,7 @@ static int init_mc(struct cxlflash_cfg *cfg, u32 index)
WARN_ON(hwq->ctx_cookie);
hwq->ctx_cookie = ctx;
hwq->num_irqs = num_irqs;
/* Set it up as a master with the CXL */
cfg->ops->set_master(ctx);
@ -2075,6 +2124,7 @@ static int init_afu(struct cxlflash_cfg *cfg)
cfg->ops->perst_reloads_same_image(cfg->afu_cookie, true);
mutex_init(&afu->sync_active);
afu->num_hwqs = afu->desired_hwqs;
for (i = 0; i < afu->num_hwqs; i++) {
rc = init_mc(cfg, i);
@ -2254,10 +2304,10 @@ static int send_afu_cmd(struct afu *afu, struct sisl_ioarcb *rcb)
struct device *dev = &cfg->dev->dev;
struct afu_cmd *cmd = NULL;
struct hwq *hwq = get_hwq(afu, PRIMARY_HWQ);
ulong lock_flags;
char *buf = NULL;
int rc = 0;
int nretry = 0;
static DEFINE_MUTEX(sync_active);
if (cfg->state != STATE_NORMAL) {
dev_dbg(dev, "%s: Sync not required state=%u\n",
@ -2265,7 +2315,7 @@ static int send_afu_cmd(struct afu *afu, struct sisl_ioarcb *rcb)
return 0;
}
mutex_lock(&sync_active);
mutex_lock(&afu->sync_active);
atomic_inc(&afu->cmds_active);
buf = kmalloc(sizeof(*cmd) + __alignof__(*cmd) - 1, GFP_KERNEL);
if (unlikely(!buf)) {
@ -2299,6 +2349,11 @@ retry:
case -ETIMEDOUT:
rc = afu->context_reset(hwq);
if (rc) {
/* Delete the command from pending_cmds list */
spin_lock_irqsave(&hwq->hsq_slock, lock_flags);
list_del(&cmd->list);
spin_unlock_irqrestore(&hwq->hsq_slock, lock_flags);
cxlflash_schedule_async_reset(cfg);
break;
}
@ -2315,7 +2370,7 @@ retry:
*rcb->ioasa = cmd->sa;
out:
atomic_dec(&afu->cmds_active);
mutex_unlock(&sync_active);
mutex_unlock(&afu->sync_active);
kfree(buf);
dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
return rc;
@ -3138,7 +3193,8 @@ static struct dev_dependent_vals dev_corsa_vals = { CXLFLASH_MAX_SECTORS,
static struct dev_dependent_vals dev_flash_gt_vals = { CXLFLASH_MAX_SECTORS,
CXLFLASH_NOTIFY_SHUTDOWN };
static struct dev_dependent_vals dev_briard_vals = { CXLFLASH_MAX_SECTORS,
CXLFLASH_NOTIFY_SHUTDOWN };
(CXLFLASH_NOTIFY_SHUTDOWN |
CXLFLASH_OCXL_DEV) };
/*
* PCI device binding table
@ -3649,8 +3705,9 @@ static int cxlflash_probe(struct pci_dev *pdev,
cfg->init_state = INIT_STATE_NONE;
cfg->dev = pdev;
cfg->ops = &cxlflash_cxl_ops;
cfg->cxl_fops = cxlflash_cxl_fops;
cfg->ops = cxlflash_assign_ops(ddv);
WARN_ON_ONCE(!cfg->ops);
/*
* Promoted LUNs move to the top of the LUN table. The rest stay on
@ -3681,8 +3738,6 @@ static int cxlflash_probe(struct pci_dev *pdev,
pci_set_drvdata(pdev, cfg);
cfg->afu_cookie = cfg->ops->create_afu(pdev);
rc = init_pci(cfg);
if (rc) {
dev_err(dev, "%s: init_pci failed rc=%d\n", __func__, rc);
@ -3690,6 +3745,12 @@ static int cxlflash_probe(struct pci_dev *pdev,
}
cfg->init_state = INIT_STATE_PCI;
cfg->afu_cookie = cfg->ops->create_afu(pdev);
if (unlikely(!cfg->afu_cookie)) {
dev_err(dev, "%s: create_afu failed\n", __func__);
goto out_remove;
}
rc = init_afu(cfg);
if (rc && !wq_has_sleeper(&cfg->reset_waitq)) {
dev_err(dev, "%s: init_afu failed rc=%d\n", __func__, rc);

View File

@ -20,6 +20,8 @@
#include <scsi/scsi.h>
#include <scsi/scsi_device.h>
#include "backend.h"
#define CXLFLASH_NAME "cxlflash"
#define CXLFLASH_ADAPTER_NAME "IBM POWER CXL Flash Adapter"
#define CXLFLASH_MAX_ADAPTERS 32
@ -97,8 +99,27 @@ struct dev_dependent_vals {
u64 flags;
#define CXLFLASH_NOTIFY_SHUTDOWN 0x0000000000000001ULL
#define CXLFLASH_WWPN_VPD_REQUIRED 0x0000000000000002ULL
#define CXLFLASH_OCXL_DEV 0x0000000000000004ULL
};
static inline const struct cxlflash_backend_ops *
cxlflash_assign_ops(struct dev_dependent_vals *ddv)
{
const struct cxlflash_backend_ops *ops = NULL;
#ifdef CONFIG_OCXL
if (ddv->flags & CXLFLASH_OCXL_DEV)
ops = &cxlflash_ocxl_ops;
#endif
#ifdef CONFIG_CXL
if (!(ddv->flags & CXLFLASH_OCXL_DEV))
ops = &cxlflash_cxl_ops;
#endif
return ops;
}
struct asyc_intr_info {
u64 status;
char *desc;

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,77 @@
/*
* CXL Flash Device Driver
*
* Written by: Matthew R. Ochs <mrochs@linux.vnet.ibm.com>, IBM Corporation
* Uma Krishnan <ukrishn@linux.vnet.ibm.com>, IBM Corporation
*
* Copyright (C) 2018 IBM Corporation
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#define OCXL_MAX_IRQS 4 /* Max interrupts per process */
struct ocxlflash_irqs {
int hwirq;
u32 virq;
u64 ptrig;
void __iomem *vtrig;
};
/* OCXL hardware AFU associated with the host */
struct ocxl_hw_afu {
struct ocxlflash_context *ocxl_ctx; /* Host context */
struct pci_dev *pdev; /* PCI device */
struct device *dev; /* Generic device */
bool perst_same_image; /* Same image loaded on perst */
struct ocxl_fn_config fcfg; /* DVSEC config of the function */
struct ocxl_afu_config acfg; /* AFU configuration data */
int fn_actag_base; /* Function acTag base */
int fn_actag_enabled; /* Function acTag number enabled */
int afu_actag_base; /* AFU acTag base */
int afu_actag_enabled; /* AFU acTag number enabled */
phys_addr_t ppmmio_phys; /* Per process MMIO space */
phys_addr_t gmmio_phys; /* Global AFU MMIO space */
void __iomem *gmmio_virt; /* Global MMIO map */
void *link_token; /* Link token for the SPA */
struct idr idr; /* IDR to manage contexts */
int max_pasid; /* Maximum number of contexts */
bool is_present; /* Function has AFUs defined */
};
enum ocxlflash_ctx_state {
CLOSED,
OPENED,
STARTED
};
struct ocxlflash_context {
struct ocxl_hw_afu *hw_afu; /* HW AFU back pointer */
struct address_space *mapping; /* Mapping for pseudo filesystem */
bool master; /* Whether this is a master context */
int pe; /* Process element */
phys_addr_t psn_phys; /* Process mapping */
u64 psn_size; /* Process mapping size */
spinlock_t slock; /* Protects irq/fault/event updates */
wait_queue_head_t wq; /* Wait queue for poll and interrupts */
struct mutex state_mutex; /* Mutex to update context state */
enum ocxlflash_ctx_state state; /* Context state */
struct ocxlflash_irqs *irqs; /* Pointer to array of structures */
int num_irqs; /* Number of interrupts */
bool pending_irq; /* Pending interrupt on the context */
ulong irq_bitmap; /* Bits indicating pending irq num */
u64 fault_addr; /* Address that triggered the fault */
u64 fault_dsisr; /* Value of dsisr register at fault */
bool pending_fault; /* Pending translation fault */
};

View File

@ -258,23 +258,30 @@ struct sisl_host_map {
* exit since there is no way to tell which
* command caused the error.
*/
#define SISL_ISTATUS_PERM_ERR_CMDROOM 0x0010ULL /* b59, user error */
#define SISL_ISTATUS_PERM_ERR_RCB_READ 0x0008ULL /* b60, user error */
#define SISL_ISTATUS_PERM_ERR_SA_WRITE 0x0004ULL /* b61, user error */
#define SISL_ISTATUS_PERM_ERR_RRQ_WRITE 0x0002ULL /* b62, user error */
#define SISL_ISTATUS_PERM_ERR_LISN_3_EA 0x0400ULL /* b53, user error */
#define SISL_ISTATUS_PERM_ERR_LISN_2_EA 0x0200ULL /* b54, user error */
#define SISL_ISTATUS_PERM_ERR_LISN_1_EA 0x0100ULL /* b55, user error */
#define SISL_ISTATUS_PERM_ERR_LISN_3_PASID 0x0080ULL /* b56, user error */
#define SISL_ISTATUS_PERM_ERR_LISN_2_PASID 0x0040ULL /* b57, user error */
#define SISL_ISTATUS_PERM_ERR_LISN_1_PASID 0x0020ULL /* b58, user error */
#define SISL_ISTATUS_PERM_ERR_CMDROOM 0x0010ULL /* b59, user error */
#define SISL_ISTATUS_PERM_ERR_RCB_READ 0x0008ULL /* b60, user error */
#define SISL_ISTATUS_PERM_ERR_SA_WRITE 0x0004ULL /* b61, user error */
#define SISL_ISTATUS_PERM_ERR_RRQ_WRITE 0x0002ULL /* b62, user error */
/* Page in wait accessing RCB/IOASA/RRQ is reported in b63.
* Same error in data/LXT/RHT access is reported via IOASA.
*/
#define SISL_ISTATUS_TEMP_ERR_PAGEIN 0x0001ULL /* b63, can be generated
* only when AFU auto
* retry is disabled.
* If user can determine
* the command that
* caused the error, it
* can be retried.
*/
#define SISL_ISTATUS_UNMASK (0x001FULL) /* 1 means unmasked */
#define SISL_ISTATUS_MASK ~(SISL_ISTATUS_UNMASK) /* 1 means masked */
#define SISL_ISTATUS_TEMP_ERR_PAGEIN 0x0001ULL /* b63, can only be
* generated when AFU
* auto retry is
* disabled. If user
* can determine the
* command that caused
* the error, it can
* be retried.
*/
#define SISL_ISTATUS_UNMASK (0x07FFULL) /* 1 means unmasked */
#define SISL_ISTATUS_MASK ~(SISL_ISTATUS_UNMASK) /* 1 means masked */
__be64 intr_clear;
__be64 intr_mask;
@ -284,6 +291,7 @@ struct sisl_host_map {
__be64 cmd_room;
__be64 ctx_ctrl; /* least significant byte or b56:63 is LISN# */
#define SISL_CTX_CTRL_UNMAP_SECTOR 0x8000000000000000ULL /* b0 */
#define SISL_CTX_CTRL_LISN_MASK (0xFFULL)
__be64 mbox_w; /* restricted use */
__be64 sq_start; /* Submission Queue (R/W): write sequence and */
__be64 sq_end; /* inclusion semantics are the same as RRQ */
@ -309,6 +317,10 @@ struct sisl_ctrl_map {
#define SISL_CTX_CAP_WRITE_CMD 0x0000000000000002ULL /* afu_rc 0x21 */
#define SISL_CTX_CAP_READ_CMD 0x0000000000000001ULL /* afu_rc 0x21 */
__be64 mbox_r;
__be64 lisn_pasid[2];
/* pasid _a arg must be ULL */
#define SISL_LISN_PASID(_a, _b) (((_a) << 32) | (_b))
__be64 lisn_ea[3];
};
/* single copy global regs */
@ -415,6 +427,7 @@ struct sisl_global_regs {
#define SISL_INTVER_CAP_RESERVED_CMD_MODE_B 0x100000000000ULL
#define SISL_INTVER_CAP_LUN_PROVISION 0x080000000000ULL
#define SISL_INTVER_CAP_AFU_DEBUG 0x040000000000ULL
#define SISL_INTVER_CAP_OCXL_LISN 0x020000000000ULL
};
#define CXLFLASH_NUM_FC_PORTS_PER_BANK 2 /* fixed # of ports per bank */

View File

@ -14,8 +14,9 @@
#include <linux/delay.h>
#include <linux/file.h>
#include <linux/interrupt.h>
#include <linux/pci.h>
#include <linux/syscalls.h>
#include <misc/cxl.h>
#include <asm/unaligned.h>
#include <scsi/scsi.h>
@ -269,6 +270,7 @@ static int afu_attach(struct cxlflash_cfg *cfg, struct ctx_info *ctxi)
int rc = 0;
struct hwq *hwq = get_hwq(afu, PRIMARY_HWQ);
u64 val;
int i;
/* Unlock cap and restrict user to read/write cmds in translated mode */
readq_be(&ctrl_map->mbox_r);
@ -282,6 +284,19 @@ static int afu_attach(struct cxlflash_cfg *cfg, struct ctx_info *ctxi)
goto out;
}
if (afu_is_ocxl_lisn(afu)) {
/* Set up the LISN effective address for each interrupt */
for (i = 0; i < ctxi->irqs; i++) {
val = cfg->ops->get_irq_objhndl(ctxi->ctx, i);
writeq_be(val, &ctrl_map->lisn_ea[i]);
}
/* Use primary HWQ PASID as identifier for all interrupts */
val = hwq->ctx_hndl;
writeq_be(SISL_LISN_PASID(val, val), &ctrl_map->lisn_pasid[0]);
writeq_be(SISL_LISN_PASID(0UL, val), &ctrl_map->lisn_pasid[1]);
}
/* Set up MMIO registers pointing to the RHT */
writeq_be((u64)ctxi->rht_start, &ctrl_map->rht_start);
val = SISL_RHT_CNT_ID((u64)MAX_RHT_PER_CONTEXT, (u64)(hwq->ctx_hndl));
@ -974,6 +989,10 @@ static int cxlflash_disk_detach(struct scsi_device *sdev,
* theoretically never occur), every call into this routine results
* in a complete freeing of a context.
*
* Detaching the LUN is typically an ioctl() operation and the underlying
* code assumes that ioctl_rwsem has been acquired as a reader. To support
* that design point, the semaphore is acquired and released around detach.
*
* Return: 0 on success
*/
static int cxlflash_cxl_release(struct inode *inode, struct file *file)
@ -1012,9 +1031,11 @@ static int cxlflash_cxl_release(struct inode *inode, struct file *file)
dev_dbg(dev, "%s: close for ctxid=%d\n", __func__, ctxid);
down_read(&cfg->ioctl_rwsem);
detach.context_id = ctxi->ctxid;
list_for_each_entry_safe(lun_access, t, &ctxi->luns, list)
_cxlflash_disk_detach(lun_access->sdev, ctxi, &detach);
up_read(&cfg->ioctl_rwsem);
out_release:
cfg->ops->fd_release(inode, file);
out:

View File

@ -12,8 +12,9 @@
* 2 of the License, or (at your option) any later version.
*/
#include <linux/interrupt.h>
#include <linux/pci.h>
#include <linux/syscalls.h>
#include <misc/cxl.h>
#include <asm/unaligned.h>
#include <asm/bitsperlong.h>

View File

@ -1706,7 +1706,7 @@ static int adpt_i2o_passthru(adpt_hba* pHba, u32 __user *arg)
u32 reply_size = 0;
u32 __user *user_msg = arg;
u32 __user * user_reply = NULL;
void *sg_list[pHba->sg_tablesize];
void **sg_list = NULL;
u32 sg_offset = 0;
u32 sg_count = 0;
int sg_index = 0;
@ -1748,19 +1748,23 @@ static int adpt_i2o_passthru(adpt_hba* pHba, u32 __user *arg)
msg[2] = 0x40000000; // IOCTL context
msg[3] = adpt_ioctl_to_context(pHba, reply);
if (msg[3] == (u32)-1) {
kfree(reply);
return -EBUSY;
rcode = -EBUSY;
goto free;
}
memset(sg_list,0, sizeof(sg_list[0])*pHba->sg_tablesize);
sg_list = kcalloc(pHba->sg_tablesize, sizeof(*sg_list), GFP_KERNEL);
if (!sg_list) {
rcode = -ENOMEM;
goto free;
}
if(sg_offset) {
// TODO add 64 bit API
struct sg_simple_element *sg = (struct sg_simple_element*) (msg+sg_offset);
sg_count = (size - sg_offset*4) / sizeof(struct sg_simple_element);
if (sg_count > pHba->sg_tablesize){
printk(KERN_DEBUG"%s:IOCTL SG List too large (%u)\n", pHba->name,sg_count);
kfree (reply);
return -EINVAL;
rcode = -EINVAL;
goto free;
}
for(i = 0; i < sg_count; i++) {
@ -1879,7 +1883,6 @@ cleanup:
if (rcode != -ETIME && rcode != -EINTR) {
struct sg_simple_element *sg =
(struct sg_simple_element*) (msg +sg_offset);
kfree (reply);
while(sg_index) {
if(sg_list[--sg_index]) {
dma_free_coherent(&pHba->pDev->dev,
@ -1889,6 +1892,10 @@ cleanup:
}
}
}
free:
kfree(sg_list);
kfree(reply);
return rcode;
}

View File

@ -1202,8 +1202,6 @@ static bool esas2r_format_init_msg(struct esas2r_adapter *a,
case ESAS2R_INIT_MSG_START:
case ESAS2R_INIT_MSG_REINIT:
{
struct timeval now;
do_gettimeofday(&now);
esas2r_hdebug("CFG init");
esas2r_build_cfg_req(a,
rq,
@ -1212,7 +1210,8 @@ static bool esas2r_format_init_msg(struct esas2r_adapter *a,
NULL);
ci = (struct atto_vda_cfg_init *)&rq->vrq->cfg.data.init;
ci->sgl_page_size = cpu_to_le32(sgl_page_size);
ci->epoch_time = cpu_to_le32(now.tv_sec);
/* firmware interface overflows in y2106 */
ci->epoch_time = cpu_to_le32(ktime_get_real_seconds());
rq->flags |= RF_FAILURE_OK;
a->init_msg = ESAS2R_INIT_MSG_INIT;
break;

View File

@ -1849,7 +1849,7 @@ int esas2r_read_vda(struct esas2r_adapter *a, char *buf, long off, int count)
/* allocate a request */
rq = esas2r_alloc_request(a);
if (rq == NULL) {
esas2r_debug("esas2r_read_vda: out of requestss");
esas2r_debug("esas2r_read_vda: out of requests");
return -EBUSY;
}

View File

@ -283,7 +283,7 @@ MODULE_PARM_DESC(num_requests,
int num_ae_requests = 4;
module_param(num_ae_requests, int, 0);
MODULE_PARM_DESC(num_ae_requests,
"Number of VDA asynchromous event requests. Default 4.");
"Number of VDA asynchronous event requests. Default 4.");
int cmd_per_lun = ESAS2R_DEFAULT_CMD_PER_LUN;
module_param(cmd_per_lun, int, 0);

View File

@ -136,12 +136,14 @@ struct hisi_sas_phy {
struct hisi_sas_port *port;
struct asd_sas_phy sas_phy;
struct sas_identify identify;
struct completion *reset_completion;
spinlock_t lock;
u64 port_id; /* from hw */
u64 dev_sas_addr;
u64 frame_rcvd_size;
u8 frame_rcvd[32];
u8 phy_attached;
u8 reserved[3];
u8 in_reset;
u8 reserved[2];
u32 phy_type;
enum sas_linkrate minimum_linkrate;
enum sas_linkrate maximum_linkrate;
@ -162,7 +164,7 @@ struct hisi_sas_cq {
struct hisi_sas_dq {
struct hisi_hba *hisi_hba;
struct hisi_sas_slot *slot_prep;
struct list_head list;
spinlock_t lock;
int wr_point;
int id;
@ -174,15 +176,22 @@ struct hisi_sas_device {
struct completion *completion;
struct hisi_sas_dq *dq;
struct list_head list;
u64 attached_phy;
enum sas_device_type dev_type;
int device_id;
int sata_idx;
u8 dev_status;
};
struct hisi_sas_tmf_task {
int force_phy;
int phy_id;
u8 tmf;
u16 tag_of_task_to_be_managed;
};
struct hisi_sas_slot {
struct list_head entry;
struct list_head delivery;
struct sas_task *task;
struct hisi_sas_port *port;
u64 n_elem;
@ -192,17 +201,15 @@ struct hisi_sas_slot {
int cmplt_queue_slot;
int idx;
int abort;
int ready;
void *buf;
dma_addr_t buf_dma;
void *cmd_hdr;
dma_addr_t cmd_hdr_dma;
struct work_struct abort_slot;
struct timer_list internal_abort_timer;
};
struct hisi_sas_tmf_task {
u8 tmf;
u16 tag_of_task_to_be_managed;
bool is_internal;
struct hisi_sas_tmf_task *tmf;
};
struct hisi_sas_hw {
@ -215,14 +222,13 @@ struct hisi_sas_hw {
void (*sl_notify)(struct hisi_hba *hisi_hba, int phy_no);
int (*get_free_slot)(struct hisi_hba *hisi_hba, struct hisi_sas_dq *dq);
void (*start_delivery)(struct hisi_sas_dq *dq);
int (*prep_ssp)(struct hisi_hba *hisi_hba,
struct hisi_sas_slot *slot, int is_tmf,
struct hisi_sas_tmf_task *tmf);
int (*prep_smp)(struct hisi_hba *hisi_hba,
void (*prep_ssp)(struct hisi_hba *hisi_hba,
struct hisi_sas_slot *slot);
int (*prep_stp)(struct hisi_hba *hisi_hba,
void (*prep_smp)(struct hisi_hba *hisi_hba,
struct hisi_sas_slot *slot);
int (*prep_abort)(struct hisi_hba *hisi_hba,
void (*prep_stp)(struct hisi_hba *hisi_hba,
struct hisi_sas_slot *slot);
void (*prep_abort)(struct hisi_hba *hisi_hba,
struct hisi_sas_slot *slot,
int device_id, int abort_flag, int tag_to_abort);
int (*slot_complete)(struct hisi_hba *hisi_hba,
@ -245,8 +251,11 @@ struct hisi_sas_hw {
u32 (*get_phys_state)(struct hisi_hba *hisi_hba);
int (*write_gpio)(struct hisi_hba *hisi_hba, u8 reg_type,
u8 reg_index, u8 reg_count, u8 *write_data);
void (*wait_cmds_complete_timeout)(struct hisi_hba *hisi_hba,
int delay_ms, int timeout_ms);
int max_command_entries;
int complete_hdr_size;
struct scsi_host_template *sht;
};
struct hisi_hba {
@ -273,6 +282,8 @@ struct hisi_hba {
struct workqueue_struct *wq;
int slot_index_count;
int last_slot_index;
int last_dev_id;
unsigned long *slot_index_tags;
unsigned long reject_stp_links_msk;
@ -411,7 +422,7 @@ struct hisi_sas_command_table_ssp {
union {
struct {
struct ssp_command_iu task;
u32 prot[6];
u32 prot[7];
};
struct ssp_tmf_iu ssp_task;
struct xfer_rdy_iu xfer_rdy;
@ -437,10 +448,7 @@ struct hisi_sas_slot_buf_table {
};
extern struct scsi_transport_template *hisi_sas_stt;
extern struct scsi_host_template *hisi_sas_sht;
extern void hisi_sas_stop_phys(struct hisi_hba *hisi_hba);
extern void hisi_sas_init_add(struct hisi_hba *hisi_hba);
extern int hisi_sas_alloc(struct hisi_hba *hisi_hba, struct Scsi_Host *shost);
extern void hisi_sas_free(struct hisi_hba *hisi_hba);
extern u8 hisi_sas_get_ata_protocol(struct host_to_dev_fis *fis,
@ -454,6 +462,11 @@ extern int hisi_sas_probe(struct platform_device *pdev,
const struct hisi_sas_hw *ops);
extern int hisi_sas_remove(struct platform_device *pdev);
extern int hisi_sas_slave_configure(struct scsi_device *sdev);
extern int hisi_sas_scan_finished(struct Scsi_Host *shost, unsigned long time);
extern void hisi_sas_scan_start(struct Scsi_Host *shost);
extern struct device_attribute *host_attrs[];
extern int hisi_sas_host_reset(struct Scsi_Host *shost, int reset_type);
extern void hisi_sas_phy_down(struct hisi_hba *hisi_hba, int phy_no, int rdy);
extern void hisi_sas_slot_task_free(struct hisi_hba *hisi_hba,
struct sas_task *task,
@ -465,4 +478,5 @@ extern void hisi_sas_kill_tasklets(struct hisi_hba *hisi_hba);
extern bool hisi_sas_notify_phy_event(struct hisi_sas_phy *phy,
enum hisi_sas_phy_event event);
extern void hisi_sas_release_tasks(struct hisi_hba *hisi_hba);
extern u8 hisi_sas_get_prog_phy_linkrate_mask(enum sas_linkrate max);
#endif

File diff suppressed because it is too large Load Diff

View File

@ -855,39 +855,12 @@ static enum sas_linkrate phy_get_max_linkrate_v1_hw(void)
static void phy_set_linkrate_v1_hw(struct hisi_hba *hisi_hba, int phy_no,
struct sas_phy_linkrates *r)
{
u32 prog_phy_link_rate =
hisi_sas_phy_read32(hisi_hba, phy_no, PROG_PHY_LINK_RATE);
struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
struct asd_sas_phy *sas_phy = &phy->sas_phy;
int i;
enum sas_linkrate min, max;
u32 rate_mask = 0;
enum sas_linkrate max = r->maximum_linkrate;
u32 prog_phy_link_rate = 0x800;
if (r->maximum_linkrate == SAS_LINK_RATE_UNKNOWN) {
max = sas_phy->phy->maximum_linkrate;
min = r->minimum_linkrate;
} else if (r->minimum_linkrate == SAS_LINK_RATE_UNKNOWN) {
max = r->maximum_linkrate;
min = sas_phy->phy->minimum_linkrate;
} else
return;
sas_phy->phy->maximum_linkrate = max;
sas_phy->phy->minimum_linkrate = min;
max -= SAS_LINK_RATE_1_5_GBPS;
for (i = 0; i <= max; i++)
rate_mask |= 1 << (i * 2);
prog_phy_link_rate &= ~0xff;
prog_phy_link_rate |= rate_mask;
disable_phy_v1_hw(hisi_hba, phy_no);
msleep(100);
prog_phy_link_rate |= hisi_sas_get_prog_phy_linkrate_mask(max);
hisi_sas_phy_write32(hisi_hba, phy_no, PROG_PHY_LINK_RATE,
prog_phy_link_rate);
start_phy_v1_hw(hisi_hba, phy_no);
prog_phy_link_rate);
}
static int get_wideport_bitmap_v1_hw(struct hisi_hba *hisi_hba, int port_id)
@ -921,37 +894,45 @@ get_free_slot_v1_hw(struct hisi_hba *hisi_hba, struct hisi_sas_dq *dq)
return -EAGAIN;
}
return 0;
dq->wr_point = (dq->wr_point + 1) % HISI_SAS_QUEUE_SLOTS;
return w;
}
/* DQ lock must be taken here */
static void start_delivery_v1_hw(struct hisi_sas_dq *dq)
{
struct hisi_hba *hisi_hba = dq->hisi_hba;
int dlvry_queue = dq->slot_prep->dlvry_queue;
int dlvry_queue_slot = dq->slot_prep->dlvry_queue_slot;
struct hisi_sas_slot *s, *s1;
struct list_head *dq_list;
int dlvry_queue = dq->id;
int wp, count = 0;
dq->wr_point = ++dlvry_queue_slot % HISI_SAS_QUEUE_SLOTS;
hisi_sas_write32(hisi_hba, DLVRY_Q_0_WR_PTR + (dlvry_queue * 0x14),
dq->wr_point);
dq_list = &dq->list;
list_for_each_entry_safe(s, s1, &dq->list, delivery) {
if (!s->ready)
break;
count++;
wp = (s->dlvry_queue_slot + 1) % HISI_SAS_QUEUE_SLOTS;
list_del(&s->delivery);
}
if (!count)
return;
hisi_sas_write32(hisi_hba, DLVRY_Q_0_WR_PTR + (dlvry_queue * 0x14), wp);
}
static int prep_prd_sge_v1_hw(struct hisi_hba *hisi_hba,
static void prep_prd_sge_v1_hw(struct hisi_hba *hisi_hba,
struct hisi_sas_slot *slot,
struct hisi_sas_cmd_hdr *hdr,
struct scatterlist *scatter,
int n_elem)
{
struct hisi_sas_sge_page *sge_page = hisi_sas_sge_addr_mem(slot);
struct device *dev = hisi_hba->dev;
struct scatterlist *sg;
int i;
if (n_elem > HISI_SAS_SGE_PAGE_CNT) {
dev_err(dev, "prd err: n_elem(%d) > HISI_SAS_SGE_PAGE_CNT",
n_elem);
return -EINVAL;
}
for_each_sg(scatter, sg, n_elem, i) {
struct hisi_sas_sge *entry = &sge_page->sge[i];
@ -964,48 +945,25 @@ static int prep_prd_sge_v1_hw(struct hisi_hba *hisi_hba,
hdr->prd_table_addr = cpu_to_le64(hisi_sas_sge_addr_dma(slot));
hdr->sg_len = cpu_to_le32(n_elem << CMD_HDR_DATA_SGL_LEN_OFF);
return 0;
}
static int prep_smp_v1_hw(struct hisi_hba *hisi_hba,
static void prep_smp_v1_hw(struct hisi_hba *hisi_hba,
struct hisi_sas_slot *slot)
{
struct sas_task *task = slot->task;
struct hisi_sas_cmd_hdr *hdr = slot->cmd_hdr;
struct domain_device *device = task->dev;
struct device *dev = hisi_hba->dev;
struct hisi_sas_port *port = slot->port;
struct scatterlist *sg_req, *sg_resp;
struct scatterlist *sg_req;
struct hisi_sas_device *sas_dev = device->lldd_dev;
dma_addr_t req_dma_addr;
unsigned int req_len, resp_len;
int elem, rc;
unsigned int req_len;
/*
* DMA-map SMP request, response buffers
*/
/* req */
sg_req = &task->smp_task.smp_req;
elem = dma_map_sg(dev, sg_req, 1, DMA_TO_DEVICE);
if (!elem)
return -ENOMEM;
req_len = sg_dma_len(sg_req);
req_dma_addr = sg_dma_address(sg_req);
/* resp */
sg_resp = &task->smp_task.smp_resp;
elem = dma_map_sg(dev, sg_resp, 1, DMA_FROM_DEVICE);
if (!elem) {
rc = -ENOMEM;
goto err_out_req;
}
resp_len = sg_dma_len(sg_resp);
if ((req_len & 0x3) || (resp_len & 0x3)) {
rc = -EINVAL;
goto err_out_resp;
}
/* create header */
/* dw0 */
hdr->dw0 = cpu_to_le32((port->id << CMD_HDR_PORT_OFF) |
@ -1025,21 +983,10 @@ static int prep_smp_v1_hw(struct hisi_hba *hisi_hba,
hdr->cmd_table_addr = cpu_to_le64(req_dma_addr);
hdr->sts_buffer_addr = cpu_to_le64(hisi_sas_status_buf_addr_dma(slot));
return 0;
err_out_resp:
dma_unmap_sg(dev, &slot->task->smp_task.smp_resp, 1,
DMA_FROM_DEVICE);
err_out_req:
dma_unmap_sg(dev, &slot->task->smp_task.smp_req, 1,
DMA_TO_DEVICE);
return rc;
}
static int prep_ssp_v1_hw(struct hisi_hba *hisi_hba,
struct hisi_sas_slot *slot, int is_tmf,
struct hisi_sas_tmf_task *tmf)
static void prep_ssp_v1_hw(struct hisi_hba *hisi_hba,
struct hisi_sas_slot *slot)
{
struct sas_task *task = slot->task;
struct hisi_sas_cmd_hdr *hdr = slot->cmd_hdr;
@ -1048,7 +995,8 @@ static int prep_ssp_v1_hw(struct hisi_hba *hisi_hba,
struct hisi_sas_port *port = slot->port;
struct sas_ssp_task *ssp_task = &task->ssp_task;
struct scsi_cmnd *scsi_cmnd = ssp_task->cmd;
int has_data = 0, rc, priority = is_tmf;
struct hisi_sas_tmf_task *tmf = slot->tmf;
int has_data = 0, priority = !!tmf;
u8 *buf_cmd, fburst = 0;
u32 dw1, dw2;
@ -1062,7 +1010,7 @@ static int prep_ssp_v1_hw(struct hisi_hba *hisi_hba,
dw1 = 1 << CMD_HDR_VERIFY_DTL_OFF;
if (is_tmf) {
if (tmf) {
dw1 |= 3 << CMD_HDR_SSP_FRAME_TYPE_OFF;
} else {
switch (scsi_cmnd->sc_data_direction) {
@ -1083,7 +1031,7 @@ static int prep_ssp_v1_hw(struct hisi_hba *hisi_hba,
dw1 |= sas_dev->device_id << CMD_HDR_DEVICE_ID_OFF;
hdr->dw1 = cpu_to_le32(dw1);
if (is_tmf) {
if (tmf) {
dw2 = ((sizeof(struct ssp_tmf_iu) +
sizeof(struct ssp_frame_hdr)+3)/4) <<
CMD_HDR_CFL_OFF;
@ -1097,12 +1045,9 @@ static int prep_ssp_v1_hw(struct hisi_hba *hisi_hba,
hdr->transfer_tags = cpu_to_le32(slot->idx << CMD_HDR_IPTT_OFF);
if (has_data) {
rc = prep_prd_sge_v1_hw(hisi_hba, slot, hdr, task->scatter,
if (has_data)
prep_prd_sge_v1_hw(hisi_hba, slot, hdr, task->scatter,
slot->n_elem);
if (rc)
return rc;
}
hdr->data_transfer_len = cpu_to_le32(task->total_xfer_len);
hdr->cmd_table_addr = cpu_to_le64(hisi_sas_cmd_hdr_addr_dma(slot));
@ -1117,7 +1062,7 @@ static int prep_ssp_v1_hw(struct hisi_hba *hisi_hba,
hdr->dw2 = cpu_to_le32(dw2);
memcpy(buf_cmd, &task->ssp_task.LUN, 8);
if (!is_tmf) {
if (!tmf) {
buf_cmd[9] = fburst | task->ssp_task.task_attr |
(task->ssp_task.task_prio << 3);
memcpy(buf_cmd + 12, task->ssp_task.cmd->cmnd,
@ -1136,8 +1081,6 @@ static int prep_ssp_v1_hw(struct hisi_hba *hisi_hba,
break;
}
}
return 0;
}
/* by default, task resp is complete */
@ -1430,6 +1373,7 @@ static irqreturn_t int_phyup_v1_hw(int irq_no, void *p)
u32 *frame_rcvd = (u32 *)sas_phy->frame_rcvd;
struct sas_identify_frame *id = (struct sas_identify_frame *)frame_rcvd;
irqreturn_t res = IRQ_HANDLED;
unsigned long flags;
irq_value = hisi_sas_phy_read32(hisi_hba, phy_no, CHL_INT2);
if (!(irq_value & CHL_INT2_SL_PHY_ENA_MSK)) {
@ -1483,6 +1427,13 @@ static irqreturn_t int_phyup_v1_hw(int irq_no, void *p)
SAS_PROTOCOL_SMP;
hisi_sas_notify_phy_event(phy, HISI_PHYE_PHY_UP);
spin_lock_irqsave(&phy->lock, flags);
if (phy->reset_completion) {
phy->in_reset = 0;
complete(phy->reset_completion);
}
spin_unlock_irqrestore(&phy->lock, flags);
end:
hisi_sas_phy_write32(hisi_hba, phy_no, CHL_INT2,
CHL_INT2_SL_PHY_ENA_MSK);
@ -1845,6 +1796,28 @@ static int hisi_sas_v1_init(struct hisi_hba *hisi_hba)
return 0;
}
static struct scsi_host_template sht_v1_hw = {
.name = DRV_NAME,
.module = THIS_MODULE,
.queuecommand = sas_queuecommand,
.target_alloc = sas_target_alloc,
.slave_configure = hisi_sas_slave_configure,
.scan_finished = hisi_sas_scan_finished,
.scan_start = hisi_sas_scan_start,
.change_queue_depth = sas_change_queue_depth,
.bios_param = sas_bios_param,
.can_queue = 1,
.this_id = -1,
.sg_tablesize = SG_ALL,
.max_sectors = SCSI_DEFAULT_MAX_SECTORS,
.use_clustering = ENABLE_CLUSTERING,
.eh_device_reset_handler = sas_eh_device_reset_handler,
.eh_target_reset_handler = sas_eh_target_reset_handler,
.target_destroy = sas_target_destroy,
.ioctl = sas_ioctl,
.shost_attrs = host_attrs,
};
static const struct hisi_sas_hw hisi_sas_v1_hw = {
.hw_init = hisi_sas_v1_init,
.setup_itct = setup_itct_v1_hw,
@ -1864,6 +1837,7 @@ static const struct hisi_sas_hw hisi_sas_v1_hw = {
.get_wideport_bitmap = get_wideport_bitmap_v1_hw,
.max_command_entries = HISI_SAS_COMMAND_ENTRIES_V1_HW,
.complete_hdr_size = sizeof(struct hisi_sas_complete_v1_hdr),
.sht = &sht_v1_hw,
};
static int hisi_sas_v1_probe(struct platform_device *pdev)

View File

@ -144,6 +144,7 @@
#define SAS_ECC_INTR_NCQ_MEM3_ECC_1B_OFF 19
#define SAS_ECC_INTR_MSK 0x1ec
#define HGC_ERR_STAT_EN 0x238
#define CQE_SEND_CNT 0x248
#define DLVRY_Q_0_BASE_ADDR_LO 0x260
#define DLVRY_Q_0_BASE_ADDR_HI 0x264
#define DLVRY_Q_0_DEPTH 0x268
@ -295,6 +296,10 @@
#define CMD_HDR_RESP_REPORT_MSK (0x1 << CMD_HDR_RESP_REPORT_OFF)
#define CMD_HDR_TLR_CTRL_OFF 6
#define CMD_HDR_TLR_CTRL_MSK (0x3 << CMD_HDR_TLR_CTRL_OFF)
#define CMD_HDR_PHY_ID_OFF 8
#define CMD_HDR_PHY_ID_MSK (0x1ff << CMD_HDR_PHY_ID_OFF)
#define CMD_HDR_FORCE_PHY_OFF 17
#define CMD_HDR_FORCE_PHY_MSK (0x1 << CMD_HDR_FORCE_PHY_OFF)
#define CMD_HDR_PORT_OFF 18
#define CMD_HDR_PORT_MSK (0xf << CMD_HDR_PORT_OFF)
#define CMD_HDR_PRIORITY_OFF 27
@ -1216,7 +1221,22 @@ static void init_reg_v2_hw(struct hisi_hba *hisi_hba)
}
for (i = 0; i < hisi_hba->n_phy; i++) {
hisi_sas_phy_write32(hisi_hba, i, PROG_PHY_LINK_RATE, 0x855);
struct hisi_sas_phy *phy = &hisi_hba->phy[i];
struct asd_sas_phy *sas_phy = &phy->sas_phy;
u32 prog_phy_link_rate = 0x800;
if (!sas_phy->phy || (sas_phy->phy->maximum_linkrate <
SAS_LINK_RATE_1_5_GBPS)) {
prog_phy_link_rate = 0x855;
} else {
enum sas_linkrate max = sas_phy->phy->maximum_linkrate;
prog_phy_link_rate =
hisi_sas_get_prog_phy_linkrate_mask(max) |
0x800;
}
hisi_sas_phy_write32(hisi_hba, i, PROG_PHY_LINK_RATE,
prog_phy_link_rate);
hisi_sas_phy_write32(hisi_hba, i, SAS_PHY_CTRL, sas_phy_ctrl);
hisi_sas_phy_write32(hisi_hba, i, SL_TOUT_CFG, 0x7d7d7d7d);
hisi_sas_phy_write32(hisi_hba, i, SL_CONTROL, 0x0);
@ -1585,39 +1605,12 @@ static enum sas_linkrate phy_get_max_linkrate_v2_hw(void)
static void phy_set_linkrate_v2_hw(struct hisi_hba *hisi_hba, int phy_no,
struct sas_phy_linkrates *r)
{
u32 prog_phy_link_rate =
hisi_sas_phy_read32(hisi_hba, phy_no, PROG_PHY_LINK_RATE);
struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
struct asd_sas_phy *sas_phy = &phy->sas_phy;
int i;
enum sas_linkrate min, max;
u32 rate_mask = 0;
enum sas_linkrate max = r->maximum_linkrate;
u32 prog_phy_link_rate = 0x800;
if (r->maximum_linkrate == SAS_LINK_RATE_UNKNOWN) {
max = sas_phy->phy->maximum_linkrate;
min = r->minimum_linkrate;
} else if (r->minimum_linkrate == SAS_LINK_RATE_UNKNOWN) {
max = r->maximum_linkrate;
min = sas_phy->phy->minimum_linkrate;
} else
return;
sas_phy->phy->maximum_linkrate = max;
sas_phy->phy->minimum_linkrate = min;
max -= SAS_LINK_RATE_1_5_GBPS;
for (i = 0; i <= max; i++)
rate_mask |= 1 << (i * 2);
prog_phy_link_rate &= ~0xff;
prog_phy_link_rate |= rate_mask;
disable_phy_v2_hw(hisi_hba, phy_no);
msleep(100);
prog_phy_link_rate |= hisi_sas_get_prog_phy_linkrate_mask(max);
hisi_sas_phy_write32(hisi_hba, phy_no, PROG_PHY_LINK_RATE,
prog_phy_link_rate);
start_phy_v2_hw(hisi_hba, phy_no);
prog_phy_link_rate);
}
static int get_wideport_bitmap_v2_hw(struct hisi_hba *hisi_hba, int port_id)
@ -1658,42 +1651,50 @@ get_free_slot_v2_hw(struct hisi_hba *hisi_hba, struct hisi_sas_dq *dq)
r = hisi_sas_read32_relaxed(hisi_hba,
DLVRY_Q_0_RD_PTR + (queue * 0x14));
if (r == (w+1) % HISI_SAS_QUEUE_SLOTS) {
dev_warn(dev, "full queue=%d r=%d w=%d\n\n",
dev_warn(dev, "full queue=%d r=%d w=%d\n",
queue, r, w);
return -EAGAIN;
}
return 0;
dq->wr_point = (dq->wr_point + 1) % HISI_SAS_QUEUE_SLOTS;
return w;
}
/* DQ lock must be taken here */
static void start_delivery_v2_hw(struct hisi_sas_dq *dq)
{
struct hisi_hba *hisi_hba = dq->hisi_hba;
int dlvry_queue = dq->slot_prep->dlvry_queue;
int dlvry_queue_slot = dq->slot_prep->dlvry_queue_slot;
struct hisi_sas_slot *s, *s1;
struct list_head *dq_list;
int dlvry_queue = dq->id;
int wp, count = 0;
dq->wr_point = ++dlvry_queue_slot % HISI_SAS_QUEUE_SLOTS;
hisi_sas_write32(hisi_hba, DLVRY_Q_0_WR_PTR + (dlvry_queue * 0x14),
dq->wr_point);
dq_list = &dq->list;
list_for_each_entry_safe(s, s1, &dq->list, delivery) {
if (!s->ready)
break;
count++;
wp = (s->dlvry_queue_slot + 1) % HISI_SAS_QUEUE_SLOTS;
list_del(&s->delivery);
}
if (!count)
return;
hisi_sas_write32(hisi_hba, DLVRY_Q_0_WR_PTR + (dlvry_queue * 0x14), wp);
}
static int prep_prd_sge_v2_hw(struct hisi_hba *hisi_hba,
static void prep_prd_sge_v2_hw(struct hisi_hba *hisi_hba,
struct hisi_sas_slot *slot,
struct hisi_sas_cmd_hdr *hdr,
struct scatterlist *scatter,
int n_elem)
{
struct hisi_sas_sge_page *sge_page = hisi_sas_sge_addr_mem(slot);
struct device *dev = hisi_hba->dev;
struct scatterlist *sg;
int i;
if (n_elem > HISI_SAS_SGE_PAGE_CNT) {
dev_err(dev, "prd err: n_elem(%d) > HISI_SAS_SGE_PAGE_CNT",
n_elem);
return -EINVAL;
}
for_each_sg(scatter, sg, n_elem, i) {
struct hisi_sas_sge *entry = &sge_page->sge[i];
@ -1706,47 +1707,24 @@ static int prep_prd_sge_v2_hw(struct hisi_hba *hisi_hba,
hdr->prd_table_addr = cpu_to_le64(hisi_sas_sge_addr_dma(slot));
hdr->sg_len = cpu_to_le32(n_elem << CMD_HDR_DATA_SGL_LEN_OFF);
return 0;
}
static int prep_smp_v2_hw(struct hisi_hba *hisi_hba,
static void prep_smp_v2_hw(struct hisi_hba *hisi_hba,
struct hisi_sas_slot *slot)
{
struct sas_task *task = slot->task;
struct hisi_sas_cmd_hdr *hdr = slot->cmd_hdr;
struct domain_device *device = task->dev;
struct device *dev = hisi_hba->dev;
struct hisi_sas_port *port = slot->port;
struct scatterlist *sg_req, *sg_resp;
struct scatterlist *sg_req;
struct hisi_sas_device *sas_dev = device->lldd_dev;
dma_addr_t req_dma_addr;
unsigned int req_len, resp_len;
int elem, rc;
unsigned int req_len;
/*
* DMA-map SMP request, response buffers
*/
/* req */
sg_req = &task->smp_task.smp_req;
elem = dma_map_sg(dev, sg_req, 1, DMA_TO_DEVICE);
if (!elem)
return -ENOMEM;
req_len = sg_dma_len(sg_req);
req_dma_addr = sg_dma_address(sg_req);
/* resp */
sg_resp = &task->smp_task.smp_resp;
elem = dma_map_sg(dev, sg_resp, 1, DMA_FROM_DEVICE);
if (!elem) {
rc = -ENOMEM;
goto err_out_req;
}
resp_len = sg_dma_len(sg_resp);
if ((req_len & 0x3) || (resp_len & 0x3)) {
rc = -EINVAL;
goto err_out_resp;
}
req_len = sg_dma_len(&task->smp_task.smp_req);
/* create header */
/* dw0 */
@ -1768,21 +1746,10 @@ static int prep_smp_v2_hw(struct hisi_hba *hisi_hba,
hdr->cmd_table_addr = cpu_to_le64(req_dma_addr);
hdr->sts_buffer_addr = cpu_to_le64(hisi_sas_status_buf_addr_dma(slot));
return 0;
err_out_resp:
dma_unmap_sg(dev, &slot->task->smp_task.smp_resp, 1,
DMA_FROM_DEVICE);
err_out_req:
dma_unmap_sg(dev, &slot->task->smp_task.smp_req, 1,
DMA_TO_DEVICE);
return rc;
}
static int prep_ssp_v2_hw(struct hisi_hba *hisi_hba,
struct hisi_sas_slot *slot, int is_tmf,
struct hisi_sas_tmf_task *tmf)
static void prep_ssp_v2_hw(struct hisi_hba *hisi_hba,
struct hisi_sas_slot *slot)
{
struct sas_task *task = slot->task;
struct hisi_sas_cmd_hdr *hdr = slot->cmd_hdr;
@ -1791,7 +1758,8 @@ static int prep_ssp_v2_hw(struct hisi_hba *hisi_hba,
struct hisi_sas_port *port = slot->port;
struct sas_ssp_task *ssp_task = &task->ssp_task;
struct scsi_cmnd *scsi_cmnd = ssp_task->cmd;
int has_data = 0, rc, priority = is_tmf;
struct hisi_sas_tmf_task *tmf = slot->tmf;
int has_data = 0, priority = !!tmf;
u8 *buf_cmd;
u32 dw1 = 0, dw2 = 0;
@ -1802,7 +1770,7 @@ static int prep_ssp_v2_hw(struct hisi_hba *hisi_hba,
(1 << CMD_HDR_CMD_OFF)); /* ssp */
dw1 = 1 << CMD_HDR_VDTL_OFF;
if (is_tmf) {
if (tmf) {
dw1 |= 2 << CMD_HDR_FRAME_TYPE_OFF;
dw1 |= DIR_NO_DATA << CMD_HDR_DIR_OFF;
} else {
@ -1833,12 +1801,9 @@ static int prep_ssp_v2_hw(struct hisi_hba *hisi_hba,
hdr->transfer_tags = cpu_to_le32(slot->idx);
if (has_data) {
rc = prep_prd_sge_v2_hw(hisi_hba, slot, hdr, task->scatter,
if (has_data)
prep_prd_sge_v2_hw(hisi_hba, slot, hdr, task->scatter,
slot->n_elem);
if (rc)
return rc;
}
hdr->data_transfer_len = cpu_to_le32(task->total_xfer_len);
hdr->cmd_table_addr = cpu_to_le64(hisi_sas_cmd_hdr_addr_dma(slot));
@ -1848,7 +1813,7 @@ static int prep_ssp_v2_hw(struct hisi_hba *hisi_hba,
sizeof(struct ssp_frame_hdr);
memcpy(buf_cmd, &task->ssp_task.LUN, 8);
if (!is_tmf) {
if (!tmf) {
buf_cmd[9] = task->ssp_task.task_attr |
(task->ssp_task.task_prio << 3);
memcpy(buf_cmd + 12, task->ssp_task.cmd->cmnd,
@ -1867,8 +1832,6 @@ static int prep_ssp_v2_hw(struct hisi_hba *hisi_hba,
break;
}
}
return 0;
}
#define TRANS_TX_ERR 0
@ -2380,23 +2343,24 @@ slot_complete_v2_hw(struct hisi_hba *hisi_hba, struct hisi_sas_slot *slot)
struct device *dev = hisi_hba->dev;
struct task_status_struct *ts;
struct domain_device *device;
struct sas_ha_struct *ha;
enum exec_status sts;
struct hisi_sas_complete_v2_hdr *complete_queue =
hisi_hba->complete_hdr[slot->cmplt_queue];
struct hisi_sas_complete_v2_hdr *complete_hdr =
&complete_queue[slot->cmplt_queue_slot];
unsigned long flags;
int aborted;
bool is_internal = slot->is_internal;
if (unlikely(!task || !task->lldd_task || !task->dev))
return -EINVAL;
ts = &task->task_status;
device = task->dev;
ha = device->port->ha;
sas_dev = device->lldd_dev;
spin_lock_irqsave(&task->task_state_lock, flags);
aborted = task->task_state_flags & SAS_TASK_STATE_ABORTED;
task->task_state_flags &=
~(SAS_TASK_STATE_PENDING | SAS_TASK_AT_INITIATOR);
spin_unlock_irqrestore(&task->task_state_lock, flags);
@ -2404,15 +2368,6 @@ slot_complete_v2_hw(struct hisi_hba *hisi_hba, struct hisi_sas_slot *slot)
memset(ts, 0, sizeof(*ts));
ts->resp = SAS_TASK_COMPLETE;
if (unlikely(aborted)) {
dev_dbg(dev, "slot_complete: task(%p) aborted\n", task);
ts->stat = SAS_ABORTED_TASK;
spin_lock_irqsave(&hisi_hba->lock, flags);
hisi_sas_slot_task_free(hisi_hba, task, slot);
spin_unlock_irqrestore(&hisi_hba->lock, flags);
return ts->stat;
}
if (unlikely(!sas_dev)) {
dev_dbg(dev, "slot complete: port has no device\n");
ts->stat = SAS_PHY_DOWN;
@ -2459,10 +2414,10 @@ slot_complete_v2_hw(struct hisi_hba *hisi_hba, struct hisi_sas_slot *slot)
slot_err_v2_hw(hisi_hba, task, slot, 2);
if (ts->stat != SAS_DATA_UNDERRUN)
dev_info(dev, "erroneous completion iptt=%d task=%p "
dev_info(dev, "erroneous completion iptt=%d task=%p dev id=%d "
"CQ hdr: 0x%x 0x%x 0x%x 0x%x "
"Error info: 0x%x 0x%x 0x%x 0x%x\n",
slot->idx, task,
slot->idx, task, sas_dev->device_id,
complete_hdr->dw0, complete_hdr->dw1,
complete_hdr->act, complete_hdr->dw3,
error_info[0], error_info[1],
@ -2523,13 +2478,27 @@ slot_complete_v2_hw(struct hisi_hba *hisi_hba, struct hisi_sas_slot *slot)
}
out:
hisi_sas_slot_task_free(hisi_hba, task, slot);
sts = ts->stat;
spin_lock_irqsave(&task->task_state_lock, flags);
if (task->task_state_flags & SAS_TASK_STATE_ABORTED) {
spin_unlock_irqrestore(&task->task_state_lock, flags);
dev_info(dev, "slot complete: task(%p) aborted\n", task);
return SAS_ABORTED_TASK;
}
task->task_state_flags |= SAS_TASK_STATE_DONE;
spin_unlock_irqrestore(&task->task_state_lock, flags);
spin_lock_irqsave(&hisi_hba->lock, flags);
hisi_sas_slot_task_free(hisi_hba, task, slot);
spin_unlock_irqrestore(&hisi_hba->lock, flags);
sts = ts->stat;
if (!is_internal && (task->task_proto != SAS_PROTOCOL_SMP)) {
spin_lock_irqsave(&device->done_lock, flags);
if (test_bit(SAS_HA_FROZEN, &ha->state)) {
spin_unlock_irqrestore(&device->done_lock, flags);
dev_info(dev, "slot complete: task(%p) ignored\n ",
task);
return sts;
}
spin_unlock_irqrestore(&device->done_lock, flags);
}
if (task->task_done)
task->task_done(task);
@ -2537,7 +2506,7 @@ out:
return sts;
}
static int prep_ata_v2_hw(struct hisi_hba *hisi_hba,
static void prep_ata_v2_hw(struct hisi_hba *hisi_hba,
struct hisi_sas_slot *slot)
{
struct sas_task *task = slot->task;
@ -2547,8 +2516,9 @@ static int prep_ata_v2_hw(struct hisi_hba *hisi_hba,
struct hisi_sas_cmd_hdr *hdr = slot->cmd_hdr;
struct asd_sas_port *sas_port = device->port;
struct hisi_sas_port *port = to_hisi_sas_port(sas_port);
struct hisi_sas_tmf_task *tmf = slot->tmf;
u8 *buf_cmd;
int has_data = 0, rc = 0, hdr_tag = 0;
int has_data = 0, hdr_tag = 0;
u32 dw1 = 0, dw2 = 0;
/* create header */
@ -2559,6 +2529,12 @@ static int prep_ata_v2_hw(struct hisi_hba *hisi_hba,
else
hdr->dw0 |= cpu_to_le32(4 << CMD_HDR_CMD_OFF);
if (tmf && tmf->force_phy) {
hdr->dw0 |= CMD_HDR_FORCE_PHY_MSK;
hdr->dw0 |= cpu_to_le32((1 << tmf->phy_id)
<< CMD_HDR_PHY_ID_OFF);
}
/* dw1 */
switch (task->data_dir) {
case DMA_TO_DEVICE:
@ -2596,12 +2572,9 @@ static int prep_ata_v2_hw(struct hisi_hba *hisi_hba,
/* dw3 */
hdr->transfer_tags = cpu_to_le32(slot->idx);
if (has_data) {
rc = prep_prd_sge_v2_hw(hisi_hba, slot, hdr, task->scatter,
if (has_data)
prep_prd_sge_v2_hw(hisi_hba, slot, hdr, task->scatter,
slot->n_elem);
if (rc)
return rc;
}
hdr->data_transfer_len = cpu_to_le32(task->total_xfer_len);
hdr->cmd_table_addr = cpu_to_le64(hisi_sas_cmd_hdr_addr_dma(slot));
@ -2613,8 +2586,6 @@ static int prep_ata_v2_hw(struct hisi_hba *hisi_hba,
task->ata_task.fis.flags |= 0x80; /* C=1: update ATA cmd reg */
/* fill in command FIS */
memcpy(buf_cmd, &task->ata_task.fis, sizeof(struct host_to_dev_fis));
return 0;
}
static void hisi_sas_internal_abort_quirk_timeout(struct timer_list *t)
@ -2651,7 +2622,7 @@ static void hisi_sas_internal_abort_quirk_timeout(struct timer_list *t)
}
}
static int prep_abort_v2_hw(struct hisi_hba *hisi_hba,
static void prep_abort_v2_hw(struct hisi_hba *hisi_hba,
struct hisi_sas_slot *slot,
int device_id, int abort_flag, int tag_to_abort)
{
@ -2679,8 +2650,6 @@ static int prep_abort_v2_hw(struct hisi_hba *hisi_hba,
/* dw7 */
hdr->dw7 = cpu_to_le32(tag_to_abort << CMD_HDR_ABORT_IPTT_OFF);
hdr->transfer_tags = cpu_to_le32(slot->idx);
return 0;
}
static int phy_up_v2_hw(int phy_no, struct hisi_hba *hisi_hba)
@ -2692,6 +2661,7 @@ static int phy_up_v2_hw(int phy_no, struct hisi_hba *hisi_hba)
struct device *dev = hisi_hba->dev;
u32 *frame_rcvd = (u32 *)sas_phy->frame_rcvd;
struct sas_identify_frame *id = (struct sas_identify_frame *)frame_rcvd;
unsigned long flags;
hisi_sas_phy_write32(hisi_hba, phy_no, PHYCTRL_PHY_ENA_MSK, 1);
@ -2744,6 +2714,12 @@ static int phy_up_v2_hw(int phy_no, struct hisi_hba *hisi_hba)
set_link_timer_quirk(hisi_hba);
}
hisi_sas_notify_phy_event(phy, HISI_PHYE_PHY_UP);
spin_lock_irqsave(&phy->lock, flags);
if (phy->reset_completion) {
phy->in_reset = 0;
complete(phy->reset_completion);
}
spin_unlock_irqrestore(&phy->lock, flags);
end:
hisi_sas_phy_write32(hisi_hba, phy_no, CHL_INT0,
@ -3151,14 +3127,12 @@ static void cq_tasklet_v2_hw(unsigned long val)
struct hisi_sas_complete_v2_hdr *complete_queue;
u32 rd_point = cq->rd_point, wr_point, dev_id;
int queue = cq->id;
struct hisi_sas_dq *dq = &hisi_hba->dq[queue];
if (unlikely(hisi_hba->reject_stp_links_msk))
phys_try_accept_stp_links_v2_hw(hisi_hba);
complete_queue = hisi_hba->complete_hdr[queue];
spin_lock(&dq->lock);
wr_point = hisi_sas_read32(hisi_hba, COMPL_Q_0_WR_PTR +
(0x14 * queue));
@ -3208,7 +3182,6 @@ static void cq_tasklet_v2_hw(unsigned long val)
/* update rd_point */
cq->rd_point = rd_point;
hisi_sas_write32(hisi_hba, COMPL_Q_0_RD_PTR + (0x14 * queue), rd_point);
spin_unlock(&dq->lock);
}
static irqreturn_t cq_interrupt_v2_hw(int irq_no, void *p)
@ -3235,6 +3208,7 @@ static irqreturn_t sata_int_v2_hw(int irq_no, void *p)
u32 ent_tmp, ent_msk, ent_int, port_id, link_rate, hard_phy_linkrate;
irqreturn_t res = IRQ_HANDLED;
u8 attached_sas_addr[SAS_ADDR_SIZE] = {0};
unsigned long flags;
int phy_no, offset;
phy_no = sas_phy->id;
@ -3295,6 +3269,7 @@ static irqreturn_t sata_int_v2_hw(int irq_no, void *p)
sas_phy->oob_mode = SATA_OOB_MODE;
/* Make up some unique SAS address */
attached_sas_addr[0] = 0x50;
attached_sas_addr[6] = hisi_hba->shost->host_no;
attached_sas_addr[7] = phy_no;
memcpy(sas_phy->attached_sas_addr, attached_sas_addr, SAS_ADDR_SIZE);
memcpy(sas_phy->frame_rcvd, fis, sizeof(struct dev_to_host_fis));
@ -3308,6 +3283,12 @@ static irqreturn_t sata_int_v2_hw(int irq_no, void *p)
phy->identify.target_port_protocols = SAS_PROTOCOL_SATA;
hisi_sas_notify_phy_event(phy, HISI_PHYE_PHY_UP);
spin_lock_irqsave(&phy->lock, flags);
if (phy->reset_completion) {
phy->in_reset = 0;
complete(phy->reset_completion);
}
spin_unlock_irqrestore(&phy->lock, flags);
end:
hisi_sas_write32(hisi_hba, ENT_INT_SRC1 + offset, ent_tmp);
hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK1 + offset, ent_msk);
@ -3546,6 +3527,46 @@ static int write_gpio_v2_hw(struct hisi_hba *hisi_hba, u8 reg_type,
return 0;
}
static void wait_cmds_complete_timeout_v2_hw(struct hisi_hba *hisi_hba,
int delay_ms, int timeout_ms)
{
struct device *dev = hisi_hba->dev;
int entries, entries_old = 0, time;
for (time = 0; time < timeout_ms; time += delay_ms) {
entries = hisi_sas_read32(hisi_hba, CQE_SEND_CNT);
if (entries == entries_old)
break;
entries_old = entries;
msleep(delay_ms);
}
dev_dbg(dev, "wait commands complete %dms\n", time);
}
static struct scsi_host_template sht_v2_hw = {
.name = DRV_NAME,
.module = THIS_MODULE,
.queuecommand = sas_queuecommand,
.target_alloc = sas_target_alloc,
.slave_configure = hisi_sas_slave_configure,
.scan_finished = hisi_sas_scan_finished,
.scan_start = hisi_sas_scan_start,
.change_queue_depth = sas_change_queue_depth,
.bios_param = sas_bios_param,
.can_queue = 1,
.this_id = -1,
.sg_tablesize = SG_ALL,
.max_sectors = SCSI_DEFAULT_MAX_SECTORS,
.use_clustering = ENABLE_CLUSTERING,
.eh_device_reset_handler = sas_eh_device_reset_handler,
.eh_target_reset_handler = sas_eh_target_reset_handler,
.target_destroy = sas_target_destroy,
.ioctl = sas_ioctl,
.shost_attrs = host_attrs,
};
static const struct hisi_sas_hw hisi_sas_v2_hw = {
.hw_init = hisi_sas_v2_init,
.setup_itct = setup_itct_v2_hw,
@ -3574,6 +3595,8 @@ static const struct hisi_sas_hw hisi_sas_v2_hw = {
.soft_reset = soft_reset_v2_hw,
.get_phys_state = get_phys_state_v2_hw,
.write_gpio = write_gpio_v2_hw,
.wait_cmds_complete_timeout = wait_cmds_complete_timeout_v2_hw,
.sht = &sht_v2_hw,
};
static int hisi_sas_v2_probe(struct platform_device *pdev)
@ -3598,9 +3621,6 @@ static int hisi_sas_v2_remove(struct platform_device *pdev)
struct sas_ha_struct *sha = platform_get_drvdata(pdev);
struct hisi_hba *hisi_hba = sha->lldd_ha;
if (timer_pending(&hisi_hba->timer))
del_timer(&hisi_hba->timer);
hisi_sas_kill_tasklets(hisi_hba);
return hisi_sas_remove(pdev);

View File

@ -92,6 +92,7 @@
#define SAS_ECC_INTR 0x1e8
#define SAS_ECC_INTR_MSK 0x1ec
#define HGC_ERR_STAT_EN 0x238
#define CQE_SEND_CNT 0x248
#define DLVRY_Q_0_BASE_ADDR_LO 0x260
#define DLVRY_Q_0_BASE_ADDR_HI 0x264
#define DLVRY_Q_0_DEPTH 0x268
@ -106,6 +107,11 @@
#define COMPL_Q_0_RD_PTR 0x4f0
#define AWQOS_AWCACHE_CFG 0xc84
#define ARQOS_ARCACHE_CFG 0xc88
#define HILINK_ERR_DFX 0xe04
#define SAS_GPIO_CFG_0 0x1000
#define SAS_GPIO_CFG_1 0x1004
#define SAS_GPIO_TX_0_1 0x1040
#define SAS_CFG_DRIVE_VLD 0x1070
/* phy registers requiring init */
#define PORT_BASE (0x2000)
@ -167,6 +173,7 @@
#define CHL_INT1_DMAC_RX_AXI_RD_ERR_OFF 22
#define CHL_INT2 (PORT_BASE + 0x1bc)
#define CHL_INT2_SL_IDAF_TOUT_CONF_OFF 0
#define CHL_INT2_RX_INVLD_DW_OFF 30
#define CHL_INT2_STP_LINK_TIMEOUT_OFF 31
#define CHL_INT0_MSK (PORT_BASE + 0x1c0)
#define CHL_INT1_MSK (PORT_BASE + 0x1c4)
@ -216,6 +223,9 @@
#define SAS_RAS_INTR1 (RAS_BASE + 0x04)
#define SAS_RAS_INTR0_MASK (RAS_BASE + 0x08)
#define SAS_RAS_INTR1_MASK (RAS_BASE + 0x0c)
#define CFG_SAS_RAS_INTR_MASK (RAS_BASE + 0x1c)
#define SAS_RAS_INTR2 (RAS_BASE + 0x20)
#define SAS_RAS_INTR2_MASK (RAS_BASE + 0x24)
/* HW dma structures */
/* Delivery queue header */
@ -348,10 +358,11 @@ struct hisi_sas_err_record_v3 {
#define DIR_TO_DEVICE 2
#define DIR_RESERVED 3
#define CMD_IS_UNCONSTRAINT(cmd) \
((cmd == ATA_CMD_READ_LOG_EXT) || \
(cmd == ATA_CMD_READ_LOG_DMA_EXT) || \
(cmd == ATA_CMD_DEV_RESET))
#define FIS_CMD_IS_UNCONSTRAINED(fis) \
((fis.command == ATA_CMD_READ_LOG_EXT) || \
(fis.command == ATA_CMD_READ_LOG_DMA_EXT) || \
((fis.command == ATA_CMD_DEV_RESET) && \
((fis.control & ATA_SRST) != 0)))
static u32 hisi_sas_read32(struct hisi_hba *hisi_hba, u32 off)
{
@ -390,8 +401,23 @@ static u32 hisi_sas_phy_read32(struct hisi_hba *hisi_hba,
return readl(regs);
}
#define hisi_sas_read32_poll_timeout(off, val, cond, delay_us, \
timeout_us) \
({ \
void __iomem *regs = hisi_hba->regs + off; \
readl_poll_timeout(regs, val, cond, delay_us, timeout_us); \
})
#define hisi_sas_read32_poll_timeout_atomic(off, val, cond, delay_us, \
timeout_us) \
({ \
void __iomem *regs = hisi_hba->regs + off; \
readl_poll_timeout_atomic(regs, val, cond, delay_us, timeout_us);\
})
static void init_reg_v3_hw(struct hisi_hba *hisi_hba)
{
struct pci_dev *pdev = hisi_hba->pci_dev;
int i;
/* Global registers init */
@ -409,7 +435,10 @@ static void init_reg_v3_hw(struct hisi_hba *hisi_hba)
hisi_sas_write32(hisi_hba, ENT_INT_SRC3, 0xffffffff);
hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK1, 0xfefefefe);
hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK2, 0xfefefefe);
hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK3, 0xfffe20ff);
if (pdev->revision >= 0x21)
hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK3, 0xffff7fff);
else
hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK3, 0xfffe20ff);
hisi_sas_write32(hisi_hba, CHNL_PHYUPDOWN_INT_MSK, 0x0);
hisi_sas_write32(hisi_hba, CHNL_ENT_INT_MSK, 0x0);
hisi_sas_write32(hisi_hba, HGC_COM_INT_MSK, 0x0);
@ -422,13 +451,33 @@ static void init_reg_v3_hw(struct hisi_hba *hisi_hba)
hisi_sas_write32(hisi_hba, HYPER_STREAM_ID_EN_CFG, 1);
for (i = 0; i < hisi_hba->n_phy; i++) {
hisi_sas_phy_write32(hisi_hba, i, PROG_PHY_LINK_RATE, 0x855);
struct hisi_sas_phy *phy = &hisi_hba->phy[i];
struct asd_sas_phy *sas_phy = &phy->sas_phy;
u32 prog_phy_link_rate = 0x800;
if (!sas_phy->phy || (sas_phy->phy->maximum_linkrate <
SAS_LINK_RATE_1_5_GBPS)) {
prog_phy_link_rate = 0x855;
} else {
enum sas_linkrate max = sas_phy->phy->maximum_linkrate;
prog_phy_link_rate =
hisi_sas_get_prog_phy_linkrate_mask(max) |
0x800;
}
hisi_sas_phy_write32(hisi_hba, i, PROG_PHY_LINK_RATE,
prog_phy_link_rate);
hisi_sas_phy_write32(hisi_hba, i, SAS_RX_TRAIN_TIMER, 0x13e80);
hisi_sas_phy_write32(hisi_hba, i, CHL_INT0, 0xffffffff);
hisi_sas_phy_write32(hisi_hba, i, CHL_INT1, 0xffffffff);
hisi_sas_phy_write32(hisi_hba, i, CHL_INT2, 0xffffffff);
hisi_sas_phy_write32(hisi_hba, i, RXOP_CHECK_CFG_H, 0x1000);
hisi_sas_phy_write32(hisi_hba, i, CHL_INT1_MSK, 0xff87ffff);
if (pdev->revision >= 0x21)
hisi_sas_phy_write32(hisi_hba, i, CHL_INT1_MSK,
0xffffffff);
else
hisi_sas_phy_write32(hisi_hba, i, CHL_INT1_MSK,
0xff87ffff);
hisi_sas_phy_write32(hisi_hba, i, CHL_INT2_MSK, 0xffffbfe);
hisi_sas_phy_write32(hisi_hba, i, PHY_CTRL_RDY_MSK, 0x0);
hisi_sas_phy_write32(hisi_hba, i, PHYCTRL_NOT_RDY_MSK, 0x0);
@ -503,6 +552,16 @@ static void init_reg_v3_hw(struct hisi_hba *hisi_hba)
/* RAS registers init */
hisi_sas_write32(hisi_hba, SAS_RAS_INTR0_MASK, 0x0);
hisi_sas_write32(hisi_hba, SAS_RAS_INTR1_MASK, 0x0);
hisi_sas_write32(hisi_hba, SAS_RAS_INTR2_MASK, 0x0);
hisi_sas_write32(hisi_hba, CFG_SAS_RAS_INTR_MASK, 0x0);
/* LED registers init */
hisi_sas_write32(hisi_hba, SAS_CFG_DRIVE_VLD, 0x80000ff);
hisi_sas_write32(hisi_hba, SAS_GPIO_TX_0_1, 0x80808080);
hisi_sas_write32(hisi_hba, SAS_GPIO_TX_0_1 + 0x4, 0x80808080);
/* Configure blink generator rate A to 1Hz and B to 4Hz */
hisi_sas_write32(hisi_hba, SAS_GPIO_CFG_1, 0x121700);
hisi_sas_write32(hisi_hba, SAS_GPIO_CFG_0, 0x800000);
}
static void config_phy_opt_mode_v3_hw(struct hisi_hba *hisi_hba, int phy_no)
@ -654,8 +713,8 @@ static int reset_hw_v3_hw(struct hisi_hba *hisi_hba)
udelay(50);
/* Ensure axi bus idle */
ret = readl_poll_timeout(hisi_hba->regs + AXI_CFG, val, !val,
20000, 1000000);
ret = hisi_sas_read32_poll_timeout(AXI_CFG, val, !val,
20000, 1000000);
if (ret) {
dev_err(dev, "axi bus is not idle, ret = %d!\n", ret);
return -EIO;
@ -794,42 +853,49 @@ get_free_slot_v3_hw(struct hisi_hba *hisi_hba, struct hisi_sas_dq *dq)
r = hisi_sas_read32_relaxed(hisi_hba,
DLVRY_Q_0_RD_PTR + (queue * 0x14));
if (r == (w+1) % HISI_SAS_QUEUE_SLOTS) {
dev_warn(dev, "full queue=%d r=%d w=%d\n\n",
dev_warn(dev, "full queue=%d r=%d w=%d\n",
queue, r, w);
return -EAGAIN;
}
return 0;
dq->wr_point = (dq->wr_point + 1) % HISI_SAS_QUEUE_SLOTS;
return w;
}
static void start_delivery_v3_hw(struct hisi_sas_dq *dq)
{
struct hisi_hba *hisi_hba = dq->hisi_hba;
int dlvry_queue = dq->slot_prep->dlvry_queue;
int dlvry_queue_slot = dq->slot_prep->dlvry_queue_slot;
struct hisi_sas_slot *s, *s1;
struct list_head *dq_list;
int dlvry_queue = dq->id;
int wp, count = 0;
dq->wr_point = ++dlvry_queue_slot % HISI_SAS_QUEUE_SLOTS;
hisi_sas_write32(hisi_hba, DLVRY_Q_0_WR_PTR + (dlvry_queue * 0x14),
dq->wr_point);
dq_list = &dq->list;
list_for_each_entry_safe(s, s1, &dq->list, delivery) {
if (!s->ready)
break;
count++;
wp = (s->dlvry_queue_slot + 1) % HISI_SAS_QUEUE_SLOTS;
list_del(&s->delivery);
}
if (!count)
return;
hisi_sas_write32(hisi_hba, DLVRY_Q_0_WR_PTR + (dlvry_queue * 0x14), wp);
}
static int prep_prd_sge_v3_hw(struct hisi_hba *hisi_hba,
static void prep_prd_sge_v3_hw(struct hisi_hba *hisi_hba,
struct hisi_sas_slot *slot,
struct hisi_sas_cmd_hdr *hdr,
struct scatterlist *scatter,
int n_elem)
{
struct hisi_sas_sge_page *sge_page = hisi_sas_sge_addr_mem(slot);
struct device *dev = hisi_hba->dev;
struct scatterlist *sg;
int i;
if (n_elem > HISI_SAS_SGE_PAGE_CNT) {
dev_err(dev, "prd err: n_elem(%d) > HISI_SAS_SGE_PAGE_CNT",
n_elem);
return -EINVAL;
}
for_each_sg(scatter, sg, n_elem, i) {
struct hisi_sas_sge *entry = &sge_page->sge[i];
@ -842,13 +908,10 @@ static int prep_prd_sge_v3_hw(struct hisi_hba *hisi_hba,
hdr->prd_table_addr = cpu_to_le64(hisi_sas_sge_addr_dma(slot));
hdr->sg_len = cpu_to_le32(n_elem << CMD_HDR_DATA_SGL_LEN_OFF);
return 0;
}
static int prep_ssp_v3_hw(struct hisi_hba *hisi_hba,
struct hisi_sas_slot *slot, int is_tmf,
struct hisi_sas_tmf_task *tmf)
static void prep_ssp_v3_hw(struct hisi_hba *hisi_hba,
struct hisi_sas_slot *slot)
{
struct sas_task *task = slot->task;
struct hisi_sas_cmd_hdr *hdr = slot->cmd_hdr;
@ -857,7 +920,8 @@ static int prep_ssp_v3_hw(struct hisi_hba *hisi_hba,
struct hisi_sas_port *port = slot->port;
struct sas_ssp_task *ssp_task = &task->ssp_task;
struct scsi_cmnd *scsi_cmnd = ssp_task->cmd;
int has_data = 0, rc, priority = is_tmf;
struct hisi_sas_tmf_task *tmf = slot->tmf;
int has_data = 0, priority = !!tmf;
u8 *buf_cmd;
u32 dw1 = 0, dw2 = 0;
@ -868,7 +932,7 @@ static int prep_ssp_v3_hw(struct hisi_hba *hisi_hba,
(1 << CMD_HDR_CMD_OFF)); /* ssp */
dw1 = 1 << CMD_HDR_VDTL_OFF;
if (is_tmf) {
if (tmf) {
dw1 |= 2 << CMD_HDR_FRAME_TYPE_OFF;
dw1 |= DIR_NO_DATA << CMD_HDR_DIR_OFF;
} else {
@ -898,12 +962,9 @@ static int prep_ssp_v3_hw(struct hisi_hba *hisi_hba,
hdr->dw2 = cpu_to_le32(dw2);
hdr->transfer_tags = cpu_to_le32(slot->idx);
if (has_data) {
rc = prep_prd_sge_v3_hw(hisi_hba, slot, hdr, task->scatter,
if (has_data)
prep_prd_sge_v3_hw(hisi_hba, slot, hdr, task->scatter,
slot->n_elem);
if (rc)
return rc;
}
hdr->data_transfer_len = cpu_to_le32(task->total_xfer_len);
hdr->cmd_table_addr = cpu_to_le64(hisi_sas_cmd_hdr_addr_dma(slot));
@ -913,7 +974,7 @@ static int prep_ssp_v3_hw(struct hisi_hba *hisi_hba,
sizeof(struct ssp_frame_hdr);
memcpy(buf_cmd, &task->ssp_task.LUN, 8);
if (!is_tmf) {
if (!tmf) {
buf_cmd[9] = ssp_task->task_attr | (ssp_task->task_prio << 3);
memcpy(buf_cmd + 12, scsi_cmnd->cmnd, scsi_cmnd->cmd_len);
} else {
@ -930,48 +991,25 @@ static int prep_ssp_v3_hw(struct hisi_hba *hisi_hba,
break;
}
}
return 0;
}
static int prep_smp_v3_hw(struct hisi_hba *hisi_hba,
static void prep_smp_v3_hw(struct hisi_hba *hisi_hba,
struct hisi_sas_slot *slot)
{
struct sas_task *task = slot->task;
struct hisi_sas_cmd_hdr *hdr = slot->cmd_hdr;
struct domain_device *device = task->dev;
struct device *dev = hisi_hba->dev;
struct hisi_sas_port *port = slot->port;
struct scatterlist *sg_req, *sg_resp;
struct scatterlist *sg_req;
struct hisi_sas_device *sas_dev = device->lldd_dev;
dma_addr_t req_dma_addr;
unsigned int req_len, resp_len;
int elem, rc;
unsigned int req_len;
/*
* DMA-map SMP request, response buffers
*/
/* req */
sg_req = &task->smp_task.smp_req;
elem = dma_map_sg(dev, sg_req, 1, DMA_TO_DEVICE);
if (!elem)
return -ENOMEM;
req_len = sg_dma_len(sg_req);
req_dma_addr = sg_dma_address(sg_req);
/* resp */
sg_resp = &task->smp_task.smp_resp;
elem = dma_map_sg(dev, sg_resp, 1, DMA_FROM_DEVICE);
if (!elem) {
rc = -ENOMEM;
goto err_out_req;
}
resp_len = sg_dma_len(sg_resp);
if ((req_len & 0x3) || (resp_len & 0x3)) {
rc = -EINVAL;
goto err_out_resp;
}
/* create header */
/* dw0 */
hdr->dw0 = cpu_to_le32((port->id << CMD_HDR_PORT_OFF) |
@ -993,18 +1031,9 @@ static int prep_smp_v3_hw(struct hisi_hba *hisi_hba,
hdr->cmd_table_addr = cpu_to_le64(req_dma_addr);
hdr->sts_buffer_addr = cpu_to_le64(hisi_sas_status_buf_addr_dma(slot));
return 0;
err_out_resp:
dma_unmap_sg(dev, &slot->task->smp_task.smp_resp, 1,
DMA_FROM_DEVICE);
err_out_req:
dma_unmap_sg(dev, &slot->task->smp_task.smp_req, 1,
DMA_TO_DEVICE);
return rc;
}
static int prep_ata_v3_hw(struct hisi_hba *hisi_hba,
static void prep_ata_v3_hw(struct hisi_hba *hisi_hba,
struct hisi_sas_slot *slot)
{
struct sas_task *task = slot->task;
@ -1015,7 +1044,7 @@ static int prep_ata_v3_hw(struct hisi_hba *hisi_hba,
struct asd_sas_port *sas_port = device->port;
struct hisi_sas_port *port = to_hisi_sas_port(sas_port);
u8 *buf_cmd;
int has_data = 0, rc = 0, hdr_tag = 0;
int has_data = 0, hdr_tag = 0;
u32 dw1 = 0, dw2 = 0;
hdr->dw0 = cpu_to_le32(port->id << CMD_HDR_PORT_OFF);
@ -1046,7 +1075,7 @@ static int prep_ata_v3_hw(struct hisi_hba *hisi_hba,
<< CMD_HDR_FRAME_TYPE_OFF;
dw1 |= sas_dev->device_id << CMD_HDR_DEV_ID_OFF;
if (CMD_IS_UNCONSTRAINT(task->ata_task.fis.command))
if (FIS_CMD_IS_UNCONSTRAINED(task->ata_task.fis))
dw1 |= 1 << CMD_HDR_UNCON_CMD_OFF;
hdr->dw1 = cpu_to_le32(dw1);
@ -1064,12 +1093,9 @@ static int prep_ata_v3_hw(struct hisi_hba *hisi_hba,
/* dw3 */
hdr->transfer_tags = cpu_to_le32(slot->idx);
if (has_data) {
rc = prep_prd_sge_v3_hw(hisi_hba, slot, hdr, task->scatter,
if (has_data)
prep_prd_sge_v3_hw(hisi_hba, slot, hdr, task->scatter,
slot->n_elem);
if (rc)
return rc;
}
hdr->data_transfer_len = cpu_to_le32(task->total_xfer_len);
hdr->cmd_table_addr = cpu_to_le64(hisi_sas_cmd_hdr_addr_dma(slot));
@ -1081,11 +1107,9 @@ static int prep_ata_v3_hw(struct hisi_hba *hisi_hba,
task->ata_task.fis.flags |= 0x80; /* C=1: update ATA cmd reg */
/* fill in command FIS */
memcpy(buf_cmd, &task->ata_task.fis, sizeof(struct host_to_dev_fis));
return 0;
}
static int prep_abort_v3_hw(struct hisi_hba *hisi_hba,
static void prep_abort_v3_hw(struct hisi_hba *hisi_hba,
struct hisi_sas_slot *slot,
int device_id, int abort_flag, int tag_to_abort)
{
@ -1110,7 +1134,6 @@ static int prep_abort_v3_hw(struct hisi_hba *hisi_hba,
hdr->dw7 = cpu_to_le32(tag_to_abort << CMD_HDR_ABORT_IPTT_OFF);
hdr->transfer_tags = cpu_to_le32(slot->idx);
return 0;
}
static irqreturn_t phy_up_v3_hw(int phy_no, struct hisi_hba *hisi_hba)
@ -1120,6 +1143,7 @@ static irqreturn_t phy_up_v3_hw(int phy_no, struct hisi_hba *hisi_hba)
struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
struct asd_sas_phy *sas_phy = &phy->sas_phy;
struct device *dev = hisi_hba->dev;
unsigned long flags;
hisi_sas_phy_write32(hisi_hba, phy_no, PHYCTRL_PHY_ENA_MSK, 1);
@ -1188,6 +1212,12 @@ static irqreturn_t phy_up_v3_hw(int phy_no, struct hisi_hba *hisi_hba)
phy->phy_attached = 1;
hisi_sas_notify_phy_event(phy, HISI_PHYE_PHY_UP);
res = IRQ_HANDLED;
spin_lock_irqsave(&phy->lock, flags);
if (phy->reset_completion) {
phy->in_reset = 0;
complete(phy->reset_completion);
}
spin_unlock_irqrestore(&phy->lock, flags);
end:
hisi_sas_phy_write32(hisi_hba, phy_no, CHL_INT0,
CHL_INT0_SL_PHY_ENABLE_MSK);
@ -1301,14 +1331,10 @@ static irqreturn_t int_chnl_int_v3_hw(int irq_no, void *p)
{
struct hisi_hba *hisi_hba = p;
struct device *dev = hisi_hba->dev;
u32 ent_msk, ent_tmp, irq_msk;
struct pci_dev *pci_dev = hisi_hba->pci_dev;
u32 irq_msk;
int phy_no = 0;
ent_msk = hisi_sas_read32(hisi_hba, ENT_INT_SRC_MSK3);
ent_tmp = ent_msk;
ent_msk |= ENT_INT_SRC_MSK3_ENT95_MSK_MSK;
hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK3, ent_msk);
irq_msk = hisi_sas_read32(hisi_hba, CHNL_INT_STATUS)
& 0xeeeeeeee;
@ -1319,6 +1345,13 @@ static irqreturn_t int_chnl_int_v3_hw(int irq_no, void *p)
CHL_INT1);
u32 irq_value2 = hisi_sas_phy_read32(hisi_hba, phy_no,
CHL_INT2);
u32 irq_msk1 = hisi_sas_phy_read32(hisi_hba, phy_no,
CHL_INT1_MSK);
u32 irq_msk2 = hisi_sas_phy_read32(hisi_hba, phy_no,
CHL_INT2_MSK);
irq_value1 &= ~irq_msk1;
irq_value2 &= ~irq_msk2;
if ((irq_msk & (4 << (phy_no * 4))) &&
irq_value1) {
@ -1364,8 +1397,28 @@ static irqreturn_t int_chnl_int_v3_hw(int irq_no, void *p)
hisi_sas_phy_write32(hisi_hba, phy_no,
CHL_INT2, irq_value2);
}
if ((irq_value2 & BIT(CHL_INT2_RX_INVLD_DW_OFF)) &&
(pci_dev->revision == 0x20)) {
u32 reg_value;
int rc;
rc = hisi_sas_read32_poll_timeout_atomic(
HILINK_ERR_DFX, reg_value,
!((reg_value >> 8) & BIT(phy_no)),
1000, 10000);
if (rc) {
disable_phy_v3_hw(hisi_hba, phy_no);
hisi_sas_phy_write32(hisi_hba, phy_no,
CHL_INT2,
BIT(CHL_INT2_RX_INVLD_DW_OFF));
hisi_sas_phy_read32(hisi_hba, phy_no,
ERR_CNT_INVLD_DW);
mdelay(1);
enable_phy_v3_hw(hisi_hba, phy_no);
}
}
}
if (irq_msk & (2 << (phy_no * 4)) && irq_value0) {
hisi_sas_phy_write32(hisi_hba, phy_no,
@ -1378,8 +1431,6 @@ static irqreturn_t int_chnl_int_v3_hw(int irq_no, void *p)
phy_no++;
}
hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK3, ent_tmp);
return IRQ_HANDLED;
}
@ -1448,6 +1499,7 @@ static irqreturn_t fatal_axi_int_v3_hw(int irq_no, void *p)
hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK3, irq_msk | 0x1df00);
irq_value = hisi_sas_read32(hisi_hba, ENT_INT_SRC3);
irq_value &= ~irq_msk;
for (i = 0; i < ARRAY_SIZE(fatal_axi_error); i++) {
const struct hisi_sas_hw_error *error = &fatal_axi_error[i];
@ -1549,37 +1601,30 @@ slot_complete_v3_hw(struct hisi_hba *hisi_hba, struct hisi_sas_slot *slot)
struct device *dev = hisi_hba->dev;
struct task_status_struct *ts;
struct domain_device *device;
struct sas_ha_struct *ha;
enum exec_status sts;
struct hisi_sas_complete_v3_hdr *complete_queue =
hisi_hba->complete_hdr[slot->cmplt_queue];
struct hisi_sas_complete_v3_hdr *complete_hdr =
&complete_queue[slot->cmplt_queue_slot];
int aborted;
unsigned long flags;
bool is_internal = slot->is_internal;
if (unlikely(!task || !task->lldd_task || !task->dev))
return -EINVAL;
ts = &task->task_status;
device = task->dev;
ha = device->port->ha;
sas_dev = device->lldd_dev;
spin_lock_irqsave(&task->task_state_lock, flags);
aborted = task->task_state_flags & SAS_TASK_STATE_ABORTED;
task->task_state_flags &=
~(SAS_TASK_STATE_PENDING | SAS_TASK_AT_INITIATOR);
spin_unlock_irqrestore(&task->task_state_lock, flags);
memset(ts, 0, sizeof(*ts));
ts->resp = SAS_TASK_COMPLETE;
if (unlikely(aborted)) {
dev_dbg(dev, "slot complete: task(%p) aborted\n", task);
ts->stat = SAS_ABORTED_TASK;
spin_lock_irqsave(&hisi_hba->lock, flags);
hisi_sas_slot_task_free(hisi_hba, task, slot);
spin_unlock_irqrestore(&hisi_hba->lock, flags);
return ts->stat;
}
if (unlikely(!sas_dev)) {
dev_dbg(dev, "slot complete: port has not device\n");
@ -1619,10 +1664,10 @@ slot_complete_v3_hw(struct hisi_hba *hisi_hba, struct hisi_sas_slot *slot)
slot_err_v3_hw(hisi_hba, task, slot);
if (ts->stat != SAS_DATA_UNDERRUN)
dev_info(dev, "erroneous completion iptt=%d task=%p "
dev_info(dev, "erroneous completion iptt=%d task=%p dev id=%d "
"CQ hdr: 0x%x 0x%x 0x%x 0x%x "
"Error info: 0x%x 0x%x 0x%x 0x%x\n",
slot->idx, task,
slot->idx, task, sas_dev->device_id,
complete_hdr->dw0, complete_hdr->dw1,
complete_hdr->act, complete_hdr->dw3,
error_info[0], error_info[1],
@ -1677,13 +1722,27 @@ slot_complete_v3_hw(struct hisi_hba *hisi_hba, struct hisi_sas_slot *slot)
}
out:
hisi_sas_slot_task_free(hisi_hba, task, slot);
sts = ts->stat;
spin_lock_irqsave(&task->task_state_lock, flags);
if (task->task_state_flags & SAS_TASK_STATE_ABORTED) {
spin_unlock_irqrestore(&task->task_state_lock, flags);
dev_info(dev, "slot complete: task(%p) aborted\n", task);
return SAS_ABORTED_TASK;
}
task->task_state_flags |= SAS_TASK_STATE_DONE;
spin_unlock_irqrestore(&task->task_state_lock, flags);
spin_lock_irqsave(&hisi_hba->lock, flags);
hisi_sas_slot_task_free(hisi_hba, task, slot);
spin_unlock_irqrestore(&hisi_hba->lock, flags);
sts = ts->stat;
if (!is_internal && (task->task_proto != SAS_PROTOCOL_SMP)) {
spin_lock_irqsave(&device->done_lock, flags);
if (test_bit(SAS_HA_FROZEN, &ha->state)) {
spin_unlock_irqrestore(&device->done_lock, flags);
dev_info(dev, "slot complete: task(%p) ignored\n ",
task);
return sts;
}
spin_unlock_irqrestore(&device->done_lock, flags);
}
if (task->task_done)
task->task_done(task);
@ -1699,25 +1758,27 @@ static void cq_tasklet_v3_hw(unsigned long val)
struct hisi_sas_complete_v3_hdr *complete_queue;
u32 rd_point = cq->rd_point, wr_point;
int queue = cq->id;
struct hisi_sas_dq *dq = &hisi_hba->dq[queue];
complete_queue = hisi_hba->complete_hdr[queue];
spin_lock(&dq->lock);
wr_point = hisi_sas_read32(hisi_hba, COMPL_Q_0_WR_PTR +
(0x14 * queue));
while (rd_point != wr_point) {
struct hisi_sas_complete_v3_hdr *complete_hdr;
struct device *dev = hisi_hba->dev;
int iptt;
complete_hdr = &complete_queue[rd_point];
iptt = (complete_hdr->dw1) & CMPLT_HDR_IPTT_MSK;
slot = &hisi_hba->slot_info[iptt];
slot->cmplt_queue_slot = rd_point;
slot->cmplt_queue = queue;
slot_complete_v3_hw(hisi_hba, slot);
if (likely(iptt < HISI_SAS_COMMAND_ENTRIES_V3_HW)) {
slot = &hisi_hba->slot_info[iptt];
slot->cmplt_queue_slot = rd_point;
slot->cmplt_queue = queue;
slot_complete_v3_hw(hisi_hba, slot);
} else
dev_err(dev, "IPTT %d is invalid, discard it.\n", iptt);
if (++rd_point >= HISI_SAS_QUEUE_SLOTS)
rd_point = 0;
@ -1726,7 +1787,6 @@ static void cq_tasklet_v3_hw(unsigned long val)
/* update rd_point */
cq->rd_point = rd_point;
hisi_sas_write32(hisi_hba, COMPL_Q_0_RD_PTR + (0x14 * queue), rd_point);
spin_unlock(&dq->lock);
}
static irqreturn_t cq_interrupt_v3_hw(int irq_no, void *p)
@ -1839,39 +1899,12 @@ static int hisi_sas_v3_init(struct hisi_hba *hisi_hba)
static void phy_set_linkrate_v3_hw(struct hisi_hba *hisi_hba, int phy_no,
struct sas_phy_linkrates *r)
{
u32 prog_phy_link_rate =
hisi_sas_phy_read32(hisi_hba, phy_no, PROG_PHY_LINK_RATE);
struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
struct asd_sas_phy *sas_phy = &phy->sas_phy;
int i;
enum sas_linkrate min, max;
u32 rate_mask = 0;
enum sas_linkrate max = r->maximum_linkrate;
u32 prog_phy_link_rate = 0x800;
if (r->maximum_linkrate == SAS_LINK_RATE_UNKNOWN) {
max = sas_phy->phy->maximum_linkrate;
min = r->minimum_linkrate;
} else if (r->minimum_linkrate == SAS_LINK_RATE_UNKNOWN) {
max = r->maximum_linkrate;
min = sas_phy->phy->minimum_linkrate;
} else
return;
sas_phy->phy->maximum_linkrate = max;
sas_phy->phy->minimum_linkrate = min;
max -= SAS_LINK_RATE_1_5_GBPS;
for (i = 0; i <= max; i++)
rate_mask |= 1 << (i * 2);
prog_phy_link_rate &= ~0xff;
prog_phy_link_rate |= rate_mask;
disable_phy_v3_hw(hisi_hba, phy_no);
msleep(100);
prog_phy_link_rate |= hisi_sas_get_prog_phy_linkrate_mask(max);
hisi_sas_phy_write32(hisi_hba, phy_no, PROG_PHY_LINK_RATE,
prog_phy_link_rate);
start_phy_v3_hw(hisi_hba, phy_no);
prog_phy_link_rate);
}
static void interrupt_disable_v3_hw(struct hisi_hba *hisi_hba)
@ -1948,8 +1981,9 @@ static int soft_reset_v3_hw(struct hisi_hba *hisi_hba)
hisi_sas_write32(hisi_hba, AXI_MASTER_CFG_BASE + AM_CTRL_GLOBAL, 0x1);
/* wait until bus idle */
rc = readl_poll_timeout(hisi_hba->regs + AXI_MASTER_CFG_BASE +
AM_CURR_TRANS_RETURN, status, status == 0x3, 10, 100);
rc = hisi_sas_read32_poll_timeout(AXI_MASTER_CFG_BASE +
AM_CURR_TRANS_RETURN, status,
status == 0x3, 10, 100);
if (rc) {
dev_err(dev, "axi bus is not idle, rc = %d\n", rc);
return rc;
@ -1960,6 +1994,75 @@ static int soft_reset_v3_hw(struct hisi_hba *hisi_hba)
return hw_init_v3_hw(hisi_hba);
}
static int write_gpio_v3_hw(struct hisi_hba *hisi_hba, u8 reg_type,
u8 reg_index, u8 reg_count, u8 *write_data)
{
struct device *dev = hisi_hba->dev;
u32 *data = (u32 *)write_data;
int i;
switch (reg_type) {
case SAS_GPIO_REG_TX:
if ((reg_index + reg_count) > ((hisi_hba->n_phy + 3) / 4)) {
dev_err(dev, "write gpio: invalid reg range[%d, %d]\n",
reg_index, reg_index + reg_count - 1);
return -EINVAL;
}
for (i = 0; i < reg_count; i++)
hisi_sas_write32(hisi_hba,
SAS_GPIO_TX_0_1 + (reg_index + i) * 4,
data[i]);
break;
default:
dev_err(dev, "write gpio: unsupported or bad reg type %d\n",
reg_type);
return -EINVAL;
}
return 0;
}
static void wait_cmds_complete_timeout_v3_hw(struct hisi_hba *hisi_hba,
int delay_ms, int timeout_ms)
{
struct device *dev = hisi_hba->dev;
int entries, entries_old = 0, time;
for (time = 0; time < timeout_ms; time += delay_ms) {
entries = hisi_sas_read32(hisi_hba, CQE_SEND_CNT);
if (entries == entries_old)
break;
entries_old = entries;
msleep(delay_ms);
}
dev_dbg(dev, "wait commands complete %dms\n", time);
}
static struct scsi_host_template sht_v3_hw = {
.name = DRV_NAME,
.module = THIS_MODULE,
.queuecommand = sas_queuecommand,
.target_alloc = sas_target_alloc,
.slave_configure = hisi_sas_slave_configure,
.scan_finished = hisi_sas_scan_finished,
.scan_start = hisi_sas_scan_start,
.change_queue_depth = sas_change_queue_depth,
.bios_param = sas_bios_param,
.can_queue = 1,
.this_id = -1,
.sg_tablesize = SG_ALL,
.max_sectors = SCSI_DEFAULT_MAX_SECTORS,
.use_clustering = ENABLE_CLUSTERING,
.eh_device_reset_handler = sas_eh_device_reset_handler,
.eh_target_reset_handler = sas_eh_target_reset_handler,
.target_destroy = sas_target_destroy,
.ioctl = sas_ioctl,
.shost_attrs = host_attrs,
};
static const struct hisi_sas_hw hisi_sas_v3_hw = {
.hw_init = hisi_sas_v3_init,
.setup_itct = setup_itct_v3_hw,
@ -1985,6 +2088,8 @@ static const struct hisi_sas_hw hisi_sas_v3_hw = {
.soft_reset = soft_reset_v3_hw,
.get_phys_state = get_phys_state_v3_hw,
.get_events = phy_get_events_v3_hw,
.write_gpio = write_gpio_v3_hw,
.wait_cmds_complete_timeout = wait_cmds_complete_timeout_v3_hw,
};
static struct Scsi_Host *
@ -1994,7 +2099,7 @@ hisi_sas_shost_alloc_pci(struct pci_dev *pdev)
struct hisi_hba *hisi_hba;
struct device *dev = &pdev->dev;
shost = scsi_host_alloc(hisi_sas_sht, sizeof(*hisi_hba));
shost = scsi_host_alloc(&sht_v3_hw, sizeof(*hisi_hba));
if (!shost) {
dev_err(dev, "shost alloc failed\n");
return NULL;
@ -2108,8 +2213,6 @@ hisi_sas_v3_probe(struct pci_dev *pdev, const struct pci_device_id *id)
sha->sas_port[i] = &hisi_hba->port[i].sas_port;
}
hisi_sas_init_add(hisi_hba);
rc = scsi_add_host(shost, dev);
if (rc)
goto err_out_ha;
@ -2161,6 +2264,9 @@ static void hisi_sas_v3_remove(struct pci_dev *pdev)
struct hisi_hba *hisi_hba = sha->lldd_ha;
struct Scsi_Host *shost = sha->core.shost;
if (timer_pending(&hisi_hba->timer))
del_timer(&hisi_hba->timer);
sas_unregister_ha(sha);
sas_remove_host(sha->core.shost);
@ -2222,6 +2328,29 @@ static const struct hisi_sas_hw_error sas_ras_intr1_nfe[] = {
{ .irq_msk = BIT(31), .msg = "DMAC7_RX_POISON" },
};
static const struct hisi_sas_hw_error sas_ras_intr2_nfe[] = {
{ .irq_msk = BIT(0), .msg = "DMAC0_AXI_BUS_ERR" },
{ .irq_msk = BIT(1), .msg = "DMAC1_AXI_BUS_ERR" },
{ .irq_msk = BIT(2), .msg = "DMAC2_AXI_BUS_ERR" },
{ .irq_msk = BIT(3), .msg = "DMAC3_AXI_BUS_ERR" },
{ .irq_msk = BIT(4), .msg = "DMAC4_AXI_BUS_ERR" },
{ .irq_msk = BIT(5), .msg = "DMAC5_AXI_BUS_ERR" },
{ .irq_msk = BIT(6), .msg = "DMAC6_AXI_BUS_ERR" },
{ .irq_msk = BIT(7), .msg = "DMAC7_AXI_BUS_ERR" },
{ .irq_msk = BIT(8), .msg = "DMAC0_FIFO_OMIT_ERR" },
{ .irq_msk = BIT(9), .msg = "DMAC1_FIFO_OMIT_ERR" },
{ .irq_msk = BIT(10), .msg = "DMAC2_FIFO_OMIT_ERR" },
{ .irq_msk = BIT(11), .msg = "DMAC3_FIFO_OMIT_ERR" },
{ .irq_msk = BIT(12), .msg = "DMAC4_FIFO_OMIT_ERR" },
{ .irq_msk = BIT(13), .msg = "DMAC5_FIFO_OMIT_ERR" },
{ .irq_msk = BIT(14), .msg = "DMAC6_FIFO_OMIT_ERR" },
{ .irq_msk = BIT(15), .msg = "DMAC7_FIFO_OMIT_ERR" },
{ .irq_msk = BIT(16), .msg = "HGC_RLSE_SLOT_UNMATCH" },
{ .irq_msk = BIT(17), .msg = "HGC_LM_ADD_FCH_LIST_ERR" },
{ .irq_msk = BIT(18), .msg = "HGC_AXI_BUS_ERR" },
{ .irq_msk = BIT(19), .msg = "HGC_FIFO_OMIT_ERR" },
};
static bool process_non_fatal_error_v3_hw(struct hisi_hba *hisi_hba)
{
struct device *dev = hisi_hba->dev;
@ -2252,6 +2381,17 @@ static bool process_non_fatal_error_v3_hw(struct hisi_hba *hisi_hba)
}
hisi_sas_write32(hisi_hba, SAS_RAS_INTR1, irq_value);
irq_value = hisi_sas_read32(hisi_hba, SAS_RAS_INTR2);
for (i = 0; i < ARRAY_SIZE(sas_ras_intr2_nfe); i++) {
ras_error = &sas_ras_intr2_nfe[i];
if (ras_error->irq_msk & irq_value) {
dev_warn(dev, "SAS_RAS_INTR2: %s(irq_value=0x%x) found.\n",
ras_error->msg, irq_value);
need_reset = true;
}
}
hisi_sas_write32(hisi_hba, SAS_RAS_INTR2, irq_value);
return need_reset;
}
@ -2307,7 +2447,6 @@ static int hisi_sas_v3_suspend(struct pci_dev *pdev, pm_message_t state)
u32 device_state, status;
int rc;
u32 reg_val;
unsigned long flags;
if (!pdev->pm_cap) {
dev_err(dev, "PCI PM not supported\n");
@ -2332,8 +2471,9 @@ static int hisi_sas_v3_suspend(struct pci_dev *pdev, pm_message_t state)
AM_CTRL_GLOBAL, reg_val);
/* wait until bus idle */
rc = readl_poll_timeout(hisi_hba->regs + AXI_MASTER_CFG_BASE +
AM_CURR_TRANS_RETURN, status, status == 0x3, 10, 100);
rc = hisi_sas_read32_poll_timeout(AXI_MASTER_CFG_BASE +
AM_CURR_TRANS_RETURN, status,
status == 0x3, 10, 100);
if (rc) {
dev_err(dev, "axi bus is not idle, rc = %d\n", rc);
clear_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags);
@ -2351,9 +2491,7 @@ static int hisi_sas_v3_suspend(struct pci_dev *pdev, pm_message_t state)
pci_disable_device(pdev);
pci_set_power_state(pdev, device_state);
spin_lock_irqsave(&hisi_hba->lock, flags);
hisi_sas_release_tasks(hisi_hba);
spin_unlock_irqrestore(&hisi_hba->lock, flags);
sas_suspend_ha(sha);
return 0;

View File

@ -435,6 +435,8 @@ struct ipr_error_table_t ipr_error_table[] = {
"4080: IOA exceeded maximum operating temperature"},
{0x060B8000, 0, IPR_DEFAULT_LOG_LEVEL,
"4085: Service required"},
{0x060B8100, 0, IPR_DEFAULT_LOG_LEVEL,
"4086: SAS Adapter Hardware Configuration Error"},
{0x06288000, 0, IPR_DEFAULT_LOG_LEVEL,
"3140: Device bus not ready to ready transition"},
{0x06290000, 0, IPR_DEFAULT_LOG_LEVEL,

View File

@ -291,7 +291,7 @@ static void ips_freescb(ips_ha_t *, ips_scb_t *);
static void ips_setup_funclist(ips_ha_t *);
static void ips_statinit(ips_ha_t *);
static void ips_statinit_memio(ips_ha_t *);
static void ips_fix_ffdc_time(ips_ha_t *, ips_scb_t *, time_t);
static void ips_fix_ffdc_time(ips_ha_t *, ips_scb_t *, time64_t);
static void ips_ffdc_reset(ips_ha_t *, int);
static void ips_ffdc_time(ips_ha_t *);
static uint32_t ips_statupd_copperhead(ips_ha_t *);
@ -985,10 +985,7 @@ static int __ips_eh_reset(struct scsi_cmnd *SC)
/* FFDC */
if (le32_to_cpu(ha->subsys->param[3]) & 0x300000) {
struct timeval tv;
do_gettimeofday(&tv);
ha->last_ffdc = tv.tv_sec;
ha->last_ffdc = ktime_get_real_seconds();
ha->reset_count++;
ips_ffdc_reset(ha, IPS_INTR_IORL);
}
@ -2392,7 +2389,6 @@ static int
ips_hainit(ips_ha_t * ha)
{
int i;
struct timeval tv;
METHOD_TRACE("ips_hainit", 1);
@ -2407,8 +2403,7 @@ ips_hainit(ips_ha_t * ha)
/* Send FFDC */
ha->reset_count = 1;
do_gettimeofday(&tv);
ha->last_ffdc = tv.tv_sec;
ha->last_ffdc = ktime_get_real_seconds();
ips_ffdc_reset(ha, IPS_INTR_IORL);
if (!ips_read_config(ha, IPS_INTR_IORL)) {
@ -2548,12 +2543,9 @@ ips_next(ips_ha_t * ha, int intr)
if ((ha->subsys->param[3] & 0x300000)
&& (ha->scb_activelist.count == 0)) {
struct timeval tv;
do_gettimeofday(&tv);
if (tv.tv_sec - ha->last_ffdc > IPS_SECS_8HOURS) {
ha->last_ffdc = tv.tv_sec;
time64_t now = ktime_get_real_seconds();
if (now - ha->last_ffdc > IPS_SECS_8HOURS) {
ha->last_ffdc = now;
ips_ffdc_time(ha);
}
}
@ -5988,59 +5980,21 @@ ips_ffdc_time(ips_ha_t * ha)
/* */
/****************************************************************************/
static void
ips_fix_ffdc_time(ips_ha_t * ha, ips_scb_t * scb, time_t current_time)
ips_fix_ffdc_time(ips_ha_t * ha, ips_scb_t * scb, time64_t current_time)
{
long days;
long rem;
int i;
int year;
int yleap;
int year_lengths[2] = { IPS_DAYS_NORMAL_YEAR, IPS_DAYS_LEAP_YEAR };
int month_lengths[12][2] = { {31, 31},
{28, 29},
{31, 31},
{30, 30},
{31, 31},
{30, 30},
{31, 31},
{31, 31},
{30, 30},
{31, 31},
{30, 30},
{31, 31}
};
struct tm tm;
METHOD_TRACE("ips_fix_ffdc_time", 1);
days = current_time / IPS_SECS_DAY;
rem = current_time % IPS_SECS_DAY;
time64_to_tm(current_time, 0, &tm);
scb->cmd.ffdc.hour = (rem / IPS_SECS_HOUR);
rem = rem % IPS_SECS_HOUR;
scb->cmd.ffdc.minute = (rem / IPS_SECS_MIN);
scb->cmd.ffdc.second = (rem % IPS_SECS_MIN);
year = IPS_EPOCH_YEAR;
while (days < 0 || days >= year_lengths[yleap = IPS_IS_LEAP_YEAR(year)]) {
int newy;
newy = year + (days / IPS_DAYS_NORMAL_YEAR);
if (days < 0)
--newy;
days -= (newy - year) * IPS_DAYS_NORMAL_YEAR +
IPS_NUM_LEAP_YEARS_THROUGH(newy - 1) -
IPS_NUM_LEAP_YEARS_THROUGH(year - 1);
year = newy;
}
scb->cmd.ffdc.yearH = year / 100;
scb->cmd.ffdc.yearL = year % 100;
for (i = 0; days >= month_lengths[i][yleap]; ++i)
days -= month_lengths[i][yleap];
scb->cmd.ffdc.month = i + 1;
scb->cmd.ffdc.day = days + 1;
scb->cmd.ffdc.hour = tm.tm_hour;
scb->cmd.ffdc.minute = tm.tm_min;
scb->cmd.ffdc.second = tm.tm_sec;
scb->cmd.ffdc.yearH = (tm.tm_year + 1900) / 100;
scb->cmd.ffdc.yearL = tm.tm_year % 100;
scb->cmd.ffdc.month = tm.tm_mon + 1;
scb->cmd.ffdc.day = tm.tm_mday;
}
/****************************************************************************

View File

@ -402,16 +402,7 @@
#define IPS_BIOS_HEADER 0xC0
/* time oriented stuff */
#define IPS_IS_LEAP_YEAR(y) (((y % 4 == 0) && ((y % 100 != 0) || (y % 400 == 0))) ? 1 : 0)
#define IPS_NUM_LEAP_YEARS_THROUGH(y) ((y) / 4 - (y) / 100 + (y) / 400)
#define IPS_SECS_MIN 60
#define IPS_SECS_HOUR 3600
#define IPS_SECS_8HOURS 28800
#define IPS_SECS_DAY 86400
#define IPS_DAYS_NORMAL_YEAR 365
#define IPS_DAYS_LEAP_YEAR 366
#define IPS_EPOCH_YEAR 1970
/*
* Scsi_Host Template
@ -1054,7 +1045,7 @@ typedef struct ips_ha {
uint8_t active;
int ioctl_reset; /* IOCTL Requested Reset Flag */
uint16_t reset_count; /* number of resets */
time_t last_ffdc; /* last time we sent ffdc info*/
time64_t last_ffdc; /* last time we sent ffdc info*/
uint8_t slot_num; /* PCI Slot Number */
int ioctl_len; /* size of ioctl buffer */
dma_addr_t ioctl_busaddr; /* dma address of ioctl buffer*/

View File

@ -433,9 +433,6 @@ static enum sci_status sci_user_parameters_set(struct isci_host *ihost,
(u->max_speed_generation > SCIC_SDS_PARM_NO_SPEED)))
return SCI_FAILURE_INVALID_PARAMETER_VALUE;
if (u->in_connection_align_insertion_frequency < 3)
return SCI_FAILURE_INVALID_PARAMETER_VALUE;
if ((u->in_connection_align_insertion_frequency < 3) ||
(u->align_insertion_frequency == 0) ||
(u->notify_enable_spin_up_insertion_frequency == 0))

View File

@ -962,7 +962,6 @@ static int iscsi_sw_tcp_slave_configure(struct scsi_device *sdev)
if (conn->datadgst_en)
sdev->request_queue->backing_dev_info->capabilities
|= BDI_CAP_STABLE_WRITES;
blk_queue_bounce_limit(sdev->request_queue, BLK_BOUNCE_ANY);
blk_queue_dma_alignment(sdev->request_queue, 0);
return 0;
}

View File

@ -577,6 +577,11 @@ int sas_ata_init(struct domain_device *found_dev)
ata_sas_port_destroy(ap);
return rc;
}
rc = ata_sas_tport_add(found_dev->sata_dev.ata_host.dev, ap);
if (rc) {
ata_sas_port_destroy(ap);
return rc;
}
found_dev->sata_dev.ap = ap;
return 0;

View File

@ -314,6 +314,7 @@ void sas_free_device(struct kref *kref)
kfree(dev->ex_dev.ex_phy);
if (dev_is_sata(dev) && dev->sata_dev.ap) {
ata_sas_tport_delete(dev->sata_dev.ap);
ata_sas_port_destroy(dev->sata_dev.ap);
dev->sata_dev.ap = NULL;
}

View File

@ -64,8 +64,6 @@ struct lpfc_sli2_slim;
#define LPFC_IOCB_LIST_CNT 2250 /* list of IOCBs for fast-path usage. */
#define LPFC_Q_RAMP_UP_INTERVAL 120 /* lun q_depth ramp up interval */
#define LPFC_VNAME_LEN 100 /* vport symbolic name length */
#define LPFC_TGTQ_INTERVAL 40000 /* Min amount of time between tgt
queue depth change in millisecs */
#define LPFC_TGTQ_RAMPUP_PCENT 5 /* Target queue rampup in percentage */
#define LPFC_MIN_TGT_QDEPTH 10
#define LPFC_MAX_TGT_QDEPTH 0xFFFF
@ -784,6 +782,7 @@ struct lpfc_hba {
uint32_t cfg_nvme_oas;
uint32_t cfg_nvme_embed_cmd;
uint32_t cfg_nvme_io_channel;
uint32_t cfg_nvmet_mrq_post;
uint32_t cfg_nvmet_mrq;
uint32_t cfg_enable_nvmet;
uint32_t cfg_nvme_enable_fb;
@ -922,12 +921,6 @@ struct lpfc_hba {
atomic_t fc4ScsiOutputRequests;
atomic_t fc4ScsiControlRequests;
atomic_t fc4ScsiIoCmpls;
atomic_t fc4NvmeInputRequests;
atomic_t fc4NvmeOutputRequests;
atomic_t fc4NvmeControlRequests;
atomic_t fc4NvmeIoCmpls;
atomic_t fc4NvmeLsRequests;
atomic_t fc4NvmeLsCmpls;
uint64_t bg_guard_err_cnt;
uint64_t bg_apptag_err_cnt;

View File

@ -2,7 +2,7 @@
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
* Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term *
* Broadcom refers to Broadcom Limited and/or its subsidiaries. *
* Broadcom refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.broadcom.com *
@ -149,10 +149,14 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr,
struct lpfc_nvmet_tgtport *tgtp;
struct nvme_fc_local_port *localport;
struct lpfc_nvme_lport *lport;
struct lpfc_nvme_rport *rport;
struct lpfc_nodelist *ndlp;
struct nvme_fc_remote_port *nrport;
uint64_t data1, data2, data3, tot;
struct lpfc_nvme_ctrl_stat *cstat;
uint64_t data1, data2, data3;
uint64_t totin, totout, tot;
char *statep;
int i;
int len = 0;
if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)) {
@ -293,6 +297,13 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr,
len = snprintf(buf, PAGE_SIZE, "NVME Initiator Enabled\n");
spin_lock_irq(shost->host_lock);
len += snprintf(buf + len, PAGE_SIZE - len,
"XRI Dist lpfc%d Total %d NVME %d SCSI %d ELS %d\n",
phba->brd_no,
phba->sli4_hba.max_cfg_param.max_xri,
phba->sli4_hba.nvme_xri_max,
phba->sli4_hba.scsi_xri_max,
lpfc_sli4_get_els_iocb_cnt(phba));
/* Port state is only one of two values for now. */
if (localport->port_id)
@ -309,11 +320,14 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr,
localport->port_id, statep);
list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
if (!ndlp->nrport)
rport = lpfc_ndlp_get_nrport(ndlp);
if (!rport)
continue;
/* local short-hand pointer. */
nrport = ndlp->nrport->remoteport;
nrport = rport->remoteport;
if (!nrport)
continue;
/* Port state is only one of two values for now. */
switch (nrport->port_state) {
@ -364,11 +378,14 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr,
}
spin_unlock_irq(shost->host_lock);
if (!lport)
return len;
len += snprintf(buf + len, PAGE_SIZE - len, "\nNVME Statistics\n");
len += snprintf(buf+len, PAGE_SIZE-len,
"LS: Xmt %010x Cmpl %010x Abort %08x\n",
atomic_read(&phba->fc4NvmeLsRequests),
atomic_read(&phba->fc4NvmeLsCmpls),
atomic_read(&lport->fc4NvmeLsRequests),
atomic_read(&lport->fc4NvmeLsCmpls),
atomic_read(&lport->xmt_ls_abort));
len += snprintf(buf + len, PAGE_SIZE - len,
@ -377,27 +394,32 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr,
atomic_read(&lport->cmpl_ls_xb),
atomic_read(&lport->cmpl_ls_err));
tot = atomic_read(&phba->fc4NvmeIoCmpls);
data1 = atomic_read(&phba->fc4NvmeInputRequests);
data2 = atomic_read(&phba->fc4NvmeOutputRequests);
data3 = atomic_read(&phba->fc4NvmeControlRequests);
totin = 0;
totout = 0;
for (i = 0; i < phba->cfg_nvme_io_channel; i++) {
cstat = &lport->cstat[i];
tot = atomic_read(&cstat->fc4NvmeIoCmpls);
totin += tot;
data1 = atomic_read(&cstat->fc4NvmeInputRequests);
data2 = atomic_read(&cstat->fc4NvmeOutputRequests);
data3 = atomic_read(&cstat->fc4NvmeControlRequests);
totout += (data1 + data2 + data3);
}
len += snprintf(buf+len, PAGE_SIZE-len,
"FCP: Rd %016llx Wr %016llx IO %016llx\n",
data1, data2, data3);
"Total FCP Cmpl %016llx Issue %016llx "
"OutIO %016llx\n",
totin, totout, totout - totin);
len += snprintf(buf+len, PAGE_SIZE-len,
" noxri %08x nondlp %08x qdepth %08x "
"wqerr %08x\n",
" abort %08x noxri %08x nondlp %08x qdepth %08x "
"wqerr %08x err %08x\n",
atomic_read(&lport->xmt_fcp_abort),
atomic_read(&lport->xmt_fcp_noxri),
atomic_read(&lport->xmt_fcp_bad_ndlp),
atomic_read(&lport->xmt_fcp_qdepth),
atomic_read(&lport->xmt_fcp_err),
atomic_read(&lport->xmt_fcp_wqerr));
len += snprintf(buf + len, PAGE_SIZE - len,
" Cmpl %016llx Outstanding %016llx Abort %08x\n",
tot, ((data1 + data2 + data3) - tot),
atomic_read(&lport->xmt_fcp_abort));
len += snprintf(buf + len, PAGE_SIZE - len,
"FCP CMPL: xb %08x Err %08x\n",
atomic_read(&lport->cmpl_fcp_xb),
@ -3280,6 +3302,9 @@ lpfc_update_rport_devloss_tmo(struct lpfc_vport *vport)
{
struct Scsi_Host *shost;
struct lpfc_nodelist *ndlp;
#if (IS_ENABLED(CONFIG_NVME_FC))
struct lpfc_nvme_rport *rport;
#endif
shost = lpfc_shost_from_vport(vport);
spin_lock_irq(shost->host_lock);
@ -3289,8 +3314,9 @@ lpfc_update_rport_devloss_tmo(struct lpfc_vport *vport)
if (ndlp->rport)
ndlp->rport->dev_loss_tmo = vport->cfg_devloss_tmo;
#if (IS_ENABLED(CONFIG_NVME_FC))
if (ndlp->nrport)
nvme_fc_set_remoteport_devloss(ndlp->nrport->remoteport,
rport = lpfc_ndlp_get_nrport(ndlp);
if (rport)
nvme_fc_set_remoteport_devloss(rport->remoteport,
vport->cfg_devloss_tmo);
#endif
}
@ -3413,6 +3439,15 @@ LPFC_ATTR_R(nvmet_mrq,
LPFC_NVMET_MRQ_AUTO, LPFC_NVMET_MRQ_AUTO, LPFC_NVMET_MRQ_MAX,
"Specify number of RQ pairs for processing NVMET cmds");
/*
* lpfc_nvmet_mrq_post: Specify number of RQ buffer to initially post
* to each NVMET RQ. Range 64 to 2048, default is 512.
*/
LPFC_ATTR_R(nvmet_mrq_post,
LPFC_NVMET_RQE_DEF_POST, LPFC_NVMET_RQE_MIN_POST,
LPFC_NVMET_RQE_DEF_COUNT,
"Specify number of RQ buffers to initially post");
/*
* lpfc_enable_fc4_type: Defines what FC4 types are supported.
* Supported Values: 1 - register just FCP
@ -3469,8 +3504,49 @@ LPFC_VPORT_ATTR_R(lun_queue_depth, 30, 1, 512,
# tgt_queue_depth: This parameter is used to limit the number of outstanding
# commands per target port. Value range is [10,65535]. Default value is 65535.
*/
LPFC_VPORT_ATTR_RW(tgt_queue_depth, 65535, 10, 65535,
"Max number of FCP commands we can queue to a specific target port");
static uint lpfc_tgt_queue_depth = LPFC_MAX_TGT_QDEPTH;
module_param(lpfc_tgt_queue_depth, uint, 0444);
MODULE_PARM_DESC(lpfc_tgt_queue_depth, "Set max Target queue depth");
lpfc_vport_param_show(tgt_queue_depth);
lpfc_vport_param_init(tgt_queue_depth, LPFC_MAX_TGT_QDEPTH,
LPFC_MIN_TGT_QDEPTH, LPFC_MAX_TGT_QDEPTH);
/**
* lpfc_tgt_queue_depth_store: Sets an attribute value.
* @phba: pointer the the adapter structure.
* @val: integer attribute value.
*
* Description: Sets the parameter to the new value.
*
* Returns:
* zero on success
* -EINVAL if val is invalid
*/
static int
lpfc_tgt_queue_depth_set(struct lpfc_vport *vport, uint val)
{
struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
struct lpfc_nodelist *ndlp;
if (!lpfc_rangecheck(val, LPFC_MIN_TGT_QDEPTH, LPFC_MAX_TGT_QDEPTH))
return -EINVAL;
if (val == vport->cfg_tgt_queue_depth)
return 0;
spin_lock_irq(shost->host_lock);
vport->cfg_tgt_queue_depth = val;
/* Next loop thru nodelist and change cmd_qdepth */
list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp)
ndlp->cmd_qdepth = vport->cfg_tgt_queue_depth;
spin_unlock_irq(shost->host_lock);
return 0;
}
lpfc_vport_param_store(tgt_queue_depth);
static DEVICE_ATTR_RW(lpfc_tgt_queue_depth);
/*
# hba_queue_depth: This parameter is used to limit the number of outstanding
@ -5302,6 +5378,7 @@ struct device_attribute *lpfc_hba_attrs[] = {
&dev_attr_lpfc_suppress_rsp,
&dev_attr_lpfc_nvme_io_channel,
&dev_attr_lpfc_nvmet_mrq,
&dev_attr_lpfc_nvmet_mrq_post,
&dev_attr_lpfc_nvme_enable_fb,
&dev_attr_lpfc_nvmet_fb_size,
&dev_attr_lpfc_enable_bg,
@ -6352,6 +6429,7 @@ lpfc_get_cfgparam(struct lpfc_hba *phba)
lpfc_enable_fc4_type_init(phba, lpfc_enable_fc4_type);
lpfc_nvmet_mrq_init(phba, lpfc_nvmet_mrq);
lpfc_nvmet_mrq_post_init(phba, lpfc_nvmet_mrq_post);
/* Initialize first burst. Target vs Initiator are different. */
lpfc_nvme_enable_fb_init(phba, lpfc_nvme_enable_fb);

View File

@ -3621,7 +3621,7 @@ lpfc_bsg_issue_mbox_ext_handle_job(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
bsg_reply->result = 0;
lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
"2937 SLI_CONFIG ext-buffer maibox command "
"2937 SLI_CONFIG ext-buffer mailbox command "
"(x%x/x%x) complete bsg job done, bsize:%d\n",
phba->mbox_ext_buf_ctx.nembType,
phba->mbox_ext_buf_ctx.mboxType, size);
@ -3632,7 +3632,7 @@ lpfc_bsg_issue_mbox_ext_handle_job(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
phba->mbox_ext_buf_ctx.mbx_dmabuf, 0);
} else {
lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
"2938 SLI_CONFIG ext-buffer maibox "
"2938 SLI_CONFIG ext-buffer mailbox "
"command (x%x/x%x) failure, rc:x%x\n",
phba->mbox_ext_buf_ctx.nembType,
phba->mbox_ext_buf_ctx.mboxType, rc);
@ -3666,7 +3666,7 @@ lpfc_bsg_issue_read_mbox_ext_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
pmboxq->u.mb.mbxStatus = MBXERR_ERROR;
lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
"2939 SLI_CONFIG ext-buffer rd maibox command "
"2939 SLI_CONFIG ext-buffer rd mailbox command "
"complete, ctxState:x%x, mbxStatus:x%x\n",
phba->mbox_ext_buf_ctx.state, pmboxq->u.mb.mbxStatus);
@ -3706,7 +3706,7 @@ lpfc_bsg_issue_write_mbox_ext_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
pmboxq->u.mb.mbxStatus = MBXERR_ERROR;
lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
"2940 SLI_CONFIG ext-buffer wr maibox command "
"2940 SLI_CONFIG ext-buffer wr mailbox command "
"complete, ctxState:x%x, mbxStatus:x%x\n",
phba->mbox_ext_buf_ctx.state, pmboxq->u.mb.mbxStatus);
@ -3988,12 +3988,12 @@ lpfc_bsg_sli_cfg_read_cmd_ext(struct lpfc_hba *phba, struct bsg_job *job,
if ((rc == MBX_SUCCESS) || (rc == MBX_BUSY)) {
lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
"2947 Issued SLI_CONFIG ext-buffer "
"maibox command, rc:x%x\n", rc);
"mailbox command, rc:x%x\n", rc);
return SLI_CONFIG_HANDLED;
}
lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
"2948 Failed to issue SLI_CONFIG ext-buffer "
"maibox command, rc:x%x\n", rc);
"mailbox command, rc:x%x\n", rc);
rc = -EPIPE;
job_error:
@ -4147,12 +4147,12 @@ lpfc_bsg_sli_cfg_write_cmd_ext(struct lpfc_hba *phba, struct bsg_job *job,
if ((rc == MBX_SUCCESS) || (rc == MBX_BUSY)) {
lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
"2955 Issued SLI_CONFIG ext-buffer "
"maibox command, rc:x%x\n", rc);
"mailbox command, rc:x%x\n", rc);
return SLI_CONFIG_HANDLED;
}
lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
"2956 Failed to issue SLI_CONFIG ext-buffer "
"maibox command, rc:x%x\n", rc);
"mailbox command, rc:x%x\n", rc);
rc = -EPIPE;
goto job_error;
}
@ -4492,12 +4492,12 @@ lpfc_bsg_write_ebuf_set(struct lpfc_hba *phba, struct bsg_job *job,
if ((rc == MBX_SUCCESS) || (rc == MBX_BUSY)) {
lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
"2969 Issued SLI_CONFIG ext-buffer "
"maibox command, rc:x%x\n", rc);
"mailbox command, rc:x%x\n", rc);
return SLI_CONFIG_HANDLED;
}
lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
"2970 Failed to issue SLI_CONFIG ext-buffer "
"maibox command, rc:x%x\n", rc);
"mailbox command, rc:x%x\n", rc);
rc = -EPIPE;
goto job_error;
}

View File

@ -471,6 +471,11 @@ lpfc_prep_node_fc4type(struct lpfc_vport *vport, uint32_t Did, uint8_t fc4_type)
"Parse GID_FTrsp: did:x%x flg:x%x x%x",
Did, ndlp->nlp_flag, vport->fc_flag);
/* Don't assume the rport is always the previous
* FC4 type.
*/
ndlp->nlp_fc4_type &= ~(NLP_FC4_FCP | NLP_FC4_NVME);
/* By default, the driver expects to support FCP FC4 */
if (fc4_type == FC_TYPE_FCP)
ndlp->nlp_fc4_type |= NLP_FC4_FCP;
@ -691,6 +696,11 @@ lpfc_cmpl_ct_cmd_gid_ft(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
vport->fc_flag &= ~FC_RSCN_DEFERRED;
spin_unlock_irq(shost->host_lock);
/* This is a GID_FT completing so the gidft_inp counter was
* incremented before the GID_FT was issued to the wire.
*/
vport->gidft_inp--;
/*
* Skip processing the NS response
* Re-issue the NS cmd

View File

@ -2,7 +2,7 @@
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
* Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term *
* Broadcom refers to Broadcom Limited and/or its subsidiaries. *
* Broadcom refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2007-2015 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.broadcom.com *
@ -544,7 +544,7 @@ static int
lpfc_debugfs_nodelist_data(struct lpfc_vport *vport, char *buf, int size)
{
int len = 0;
int cnt;
int i, iocnt, outio, cnt;
struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
struct lpfc_hba *phba = vport->phba;
struct lpfc_nodelist *ndlp;
@ -552,12 +552,15 @@ lpfc_debugfs_nodelist_data(struct lpfc_vport *vport, char *buf, int size)
struct nvme_fc_local_port *localport;
struct lpfc_nvmet_tgtport *tgtp;
struct nvme_fc_remote_port *nrport;
struct lpfc_nvme_rport *rport;
cnt = (LPFC_NODELIST_SIZE / LPFC_NODELIST_ENTRY_SIZE);
outio = 0;
len += snprintf(buf+len, size-len, "\nFCP Nodelist Entries ...\n");
spin_lock_irq(shost->host_lock);
list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
iocnt = 0;
if (!cnt) {
len += snprintf(buf+len, size-len,
"Missing Nodelist Entries\n");
@ -585,9 +588,11 @@ lpfc_debugfs_nodelist_data(struct lpfc_vport *vport, char *buf, int size)
break;
case NLP_STE_UNMAPPED_NODE:
statep = "UNMAP ";
iocnt = 1;
break;
case NLP_STE_MAPPED_NODE:
statep = "MAPPED";
iocnt = 1;
break;
case NLP_STE_NPR_NODE:
statep = "NPR ";
@ -614,8 +619,10 @@ lpfc_debugfs_nodelist_data(struct lpfc_vport *vport, char *buf, int size)
len += snprintf(buf+len, size-len, "UNKNOWN_TYPE ");
if (ndlp->nlp_type & NLP_FC_NODE)
len += snprintf(buf+len, size-len, "FC_NODE ");
if (ndlp->nlp_type & NLP_FABRIC)
if (ndlp->nlp_type & NLP_FABRIC) {
len += snprintf(buf+len, size-len, "FABRIC ");
iocnt = 0;
}
if (ndlp->nlp_type & NLP_FCP_TARGET)
len += snprintf(buf+len, size-len, "FCP_TGT sid:%d ",
ndlp->nlp_sid);
@ -632,10 +639,20 @@ lpfc_debugfs_nodelist_data(struct lpfc_vport *vport, char *buf, int size)
ndlp->nlp_usg_map);
len += snprintf(buf+len, size-len, "refcnt:%x",
kref_read(&ndlp->kref));
if (iocnt) {
i = atomic_read(&ndlp->cmd_pending);
len += snprintf(buf + len, size - len,
" OutIO:x%x Qdepth x%x",
i, ndlp->cmd_qdepth);
outio += i;
}
len += snprintf(buf+len, size-len, "\n");
}
spin_unlock_irq(shost->host_lock);
len += snprintf(buf + len, size - len,
"\nOutstanding IO x%x\n", outio);
if (phba->nvmet_support && phba->targetport && (vport == phba->pport)) {
tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
len += snprintf(buf + len, size - len,
@ -679,10 +696,13 @@ lpfc_debugfs_nodelist_data(struct lpfc_vport *vport, char *buf, int size)
len += snprintf(buf + len, size - len, "\tRport List:\n");
list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
/* local short-hand pointer. */
if (!ndlp->nrport)
rport = lpfc_ndlp_get_nrport(ndlp);
if (!rport)
continue;
nrport = ndlp->nrport->remoteport;
nrport = rport->remoteport;
if (!nrport)
continue;
/* Port state is only one of two values for now. */
switch (nrport->port_state) {
@ -751,10 +771,12 @@ lpfc_debugfs_nvmestat_data(struct lpfc_vport *vport, char *buf, int size)
struct lpfc_nvmet_tgtport *tgtp;
struct lpfc_nvmet_rcv_ctx *ctxp, *next_ctxp;
struct nvme_fc_local_port *localport;
struct lpfc_nvme_ctrl_stat *cstat;
struct lpfc_nvme_lport *lport;
uint64_t tot, data1, data2, data3;
uint64_t data1, data2, data3;
uint64_t tot, totin, totout;
int cnt, i, maxch;
int len = 0;
int cnt;
if (phba->nvmet_support) {
if (!phba->targetport)
@ -880,27 +902,6 @@ lpfc_debugfs_nvmestat_data(struct lpfc_vport *vport, char *buf, int size)
if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME))
return len;
len += snprintf(buf + len, size - len,
"\nNVME Lport Statistics\n");
len += snprintf(buf + len, size - len,
"LS: Xmt %016x Cmpl %016x\n",
atomic_read(&phba->fc4NvmeLsRequests),
atomic_read(&phba->fc4NvmeLsCmpls));
tot = atomic_read(&phba->fc4NvmeIoCmpls);
data1 = atomic_read(&phba->fc4NvmeInputRequests);
data2 = atomic_read(&phba->fc4NvmeOutputRequests);
data3 = atomic_read(&phba->fc4NvmeControlRequests);
len += snprintf(buf + len, size - len,
"FCP: Rd %016llx Wr %016llx IO %016llx\n",
data1, data2, data3);
len += snprintf(buf + len, size - len,
" Cmpl %016llx Outstanding %016llx\n",
tot, (data1 + data2 + data3) - tot);
localport = vport->localport;
if (!localport)
return len;
@ -908,6 +909,46 @@ lpfc_debugfs_nvmestat_data(struct lpfc_vport *vport, char *buf, int size)
if (!lport)
return len;
len += snprintf(buf + len, size - len,
"\nNVME Lport Statistics\n");
len += snprintf(buf + len, size - len,
"LS: Xmt %016x Cmpl %016x\n",
atomic_read(&lport->fc4NvmeLsRequests),
atomic_read(&lport->fc4NvmeLsCmpls));
if (phba->cfg_nvme_io_channel < 32)
maxch = phba->cfg_nvme_io_channel;
else
maxch = 32;
totin = 0;
totout = 0;
for (i = 0; i < phba->cfg_nvme_io_channel; i++) {
cstat = &lport->cstat[i];
tot = atomic_read(&cstat->fc4NvmeIoCmpls);
totin += tot;
data1 = atomic_read(&cstat->fc4NvmeInputRequests);
data2 = atomic_read(&cstat->fc4NvmeOutputRequests);
data3 = atomic_read(&cstat->fc4NvmeControlRequests);
totout += (data1 + data2 + data3);
/* Limit to 32, debugfs display buffer limitation */
if (i >= 32)
continue;
len += snprintf(buf + len, PAGE_SIZE - len,
"FCP (%d): Rd %016llx Wr %016llx "
"IO %016llx ",
i, data1, data2, data3);
len += snprintf(buf + len, PAGE_SIZE - len,
"Cmpl %016llx OutIO %016llx\n",
tot, ((data1 + data2 + data3) - tot));
}
len += snprintf(buf + len, PAGE_SIZE - len,
"Total FCP Cmpl %016llx Issue %016llx "
"OutIO %016llx\n",
totin, totout, totout - totin);
len += snprintf(buf + len, size - len,
"LS Xmt Err: Abrt %08x Err %08x "
"Cmpl Err: xb %08x Err %08x\n",
@ -918,11 +959,12 @@ lpfc_debugfs_nvmestat_data(struct lpfc_vport *vport, char *buf, int size)
len += snprintf(buf + len, size - len,
"FCP Xmt Err: noxri %06x nondlp %06x "
"qdepth %06x wqerr %06x Abrt %06x\n",
"qdepth %06x wqerr %06x err %06x Abrt %06x\n",
atomic_read(&lport->xmt_fcp_noxri),
atomic_read(&lport->xmt_fcp_bad_ndlp),
atomic_read(&lport->xmt_fcp_qdepth),
atomic_read(&lport->xmt_fcp_wqerr),
atomic_read(&lport->xmt_fcp_err),
atomic_read(&lport->xmt_fcp_abort));
len += snprintf(buf + len, size - len,

View File

@ -6268,7 +6268,6 @@ lpfc_els_handle_rscn(struct lpfc_vport *vport)
* flush the RSCN. Otherwise, the outstanding requests
* need to complete.
*/
vport->gidft_inp = 0;
if (lpfc_issue_gidft(vport) > 0)
return 1;
} else {

View File

@ -708,8 +708,7 @@ lpfc_work_done(struct lpfc_hba *phba)
HA_RXMASK));
}
}
if ((phba->sli_rev == LPFC_SLI_REV4) &&
(!list_empty(&pring->txq)))
if (phba->sli_rev == LPFC_SLI_REV4)
lpfc_drain_txq(phba);
/*
* Turn on Ring interrupts
@ -3876,10 +3875,6 @@ int
lpfc_issue_gidft(struct lpfc_vport *vport)
{
struct lpfc_hba *phba = vport->phba;
struct lpfc_nodelist *ndlp;
list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp)
ndlp->nlp_fc4_type &= ~(NLP_FC4_FCP | NLP_FC4_NVME);
/* Good status, issue CT Request to NameServer */
if ((phba->cfg_enable_fc4_type == LPFC_ENABLE_BOTH) ||

View File

@ -2,7 +2,7 @@
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
* Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term *
* Broadcom refers to Broadcom Limited and/or its subsidiaries. *
* Broadcom refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2009-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.broadcom.com *
@ -566,6 +566,7 @@ struct lpfc_register {
/* The following BAR0 register sets are defined for if_type 0 and 2 UCNAs. */
#define LPFC_SLI_INTF 0x0058
#define LPFC_SLI_ASIC_VER 0x009C
#define LPFC_CTL_PORT_SEM_OFFSET 0x400
#define lpfc_port_smphr_perr_SHIFT 31
@ -3912,6 +3913,7 @@ struct lpfc_acqe_link {
#define LPFC_ASYNC_LINK_FAULT_NONE 0x0
#define LPFC_ASYNC_LINK_FAULT_LOCAL 0x1
#define LPFC_ASYNC_LINK_FAULT_REMOTE 0x2
#define LPFC_ASYNC_LINK_FAULT_LR_LRR 0x3
#define lpfc_acqe_logical_link_speed_SHIFT 16
#define lpfc_acqe_logical_link_speed_MASK 0x0000FFFF
#define lpfc_acqe_logical_link_speed_WORD word1
@ -4616,6 +4618,9 @@ union lpfc_wqe128 {
struct send_frame_wqe send_frame;
};
#define MAGIC_NUMER_G6 0xFEAA0003
#define MAGIC_NUMER_G7 0xFEAA0005
struct lpfc_grp_hdr {
uint32_t size;
uint32_t magic_number;

View File

@ -2,7 +2,7 @@
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
* Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term *
* Broadcom refers to Broadcom Limited and/or its subsidiaries. *
* Broadcom refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.broadcom.com *
@ -1266,6 +1266,9 @@ lpfc_hb_timeout_handler(struct lpfc_hba *phba)
uint64_t tot, data1, data2, data3;
struct lpfc_nvmet_tgtport *tgtp;
struct lpfc_register reg_data;
struct nvme_fc_local_port *localport;
struct lpfc_nvme_lport *lport;
struct lpfc_nvme_ctrl_stat *cstat;
void __iomem *eqdreg = phba->sli4_hba.u.if_type2.EQDregaddr;
vports = lpfc_create_vport_work_array(phba);
@ -1299,14 +1302,25 @@ lpfc_hb_timeout_handler(struct lpfc_hba *phba)
tot += atomic_read(&tgtp->xmt_fcp_release);
tot = atomic_read(&tgtp->rcv_fcp_cmd_in) - tot;
} else {
tot = atomic_read(&phba->fc4NvmeIoCmpls);
data1 = atomic_read(
&phba->fc4NvmeInputRequests);
data2 = atomic_read(
&phba->fc4NvmeOutputRequests);
data3 = atomic_read(
&phba->fc4NvmeControlRequests);
tot = (data1 + data2 + data3) - tot;
localport = phba->pport->localport;
if (!localport || !localport->private)
goto skip_eqdelay;
lport = (struct lpfc_nvme_lport *)
localport->private;
tot = 0;
for (i = 0;
i < phba->cfg_nvme_io_channel; i++) {
cstat = &lport->cstat[i];
data1 = atomic_read(
&cstat->fc4NvmeInputRequests);
data2 = atomic_read(
&cstat->fc4NvmeOutputRequests);
data3 = atomic_read(
&cstat->fc4NvmeControlRequests);
tot += (data1 + data2 + data3);
tot -= atomic_read(
&cstat->fc4NvmeIoCmpls);
}
}
}
@ -4265,32 +4279,24 @@ lpfc_sli4_fcf_redisc_wait_tmo(struct timer_list *t)
* @phba: pointer to lpfc hba data structure.
* @acqe_link: pointer to the async link completion queue entry.
*
* This routine is to parse the SLI4 link-attention link fault code and
* translate it into the base driver's read link attention mailbox command
* status.
*
* Return: Link-attention status in terms of base driver's coding.
* This routine is to parse the SLI4 link-attention link fault code.
**/
static uint16_t
static void
lpfc_sli4_parse_latt_fault(struct lpfc_hba *phba,
struct lpfc_acqe_link *acqe_link)
{
uint16_t latt_fault;
switch (bf_get(lpfc_acqe_link_fault, acqe_link)) {
case LPFC_ASYNC_LINK_FAULT_NONE:
case LPFC_ASYNC_LINK_FAULT_LOCAL:
case LPFC_ASYNC_LINK_FAULT_REMOTE:
latt_fault = 0;
case LPFC_ASYNC_LINK_FAULT_LR_LRR:
break;
default:
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"0398 Invalid link fault code: x%x\n",
"0398 Unknown link fault code: x%x\n",
bf_get(lpfc_acqe_link_fault, acqe_link));
latt_fault = MBXERR_ERROR;
break;
}
return latt_fault;
}
/**
@ -4565,9 +4571,12 @@ lpfc_sli4_async_link_evt(struct lpfc_hba *phba,
* the READ_TOPOLOGY completion routine to continue without actually
* sending the READ_TOPOLOGY mailbox command to the port.
*/
/* Parse and translate status field */
/* Initialize completion status */
mb = &pmb->u.mb;
mb->mbxStatus = lpfc_sli4_parse_latt_fault(phba, acqe_link);
mb->mbxStatus = MBX_SUCCESS;
/* Parse port fault information field */
lpfc_sli4_parse_latt_fault(phba, acqe_link);
/* Parse and translate link attention fields */
la = (struct lpfc_mbx_read_top *) &pmb->u.mb.un.varReadTop;
@ -4695,10 +4704,12 @@ lpfc_sli4_async_fc_evt(struct lpfc_hba *phba, struct lpfc_acqe_fc_la *acqe_fc)
break;
}
/* Parse and translate status field */
/* Initialize completion status */
mb = &pmb->u.mb;
mb->mbxStatus = lpfc_sli4_parse_latt_fault(phba,
(void *)acqe_fc);
mb->mbxStatus = MBX_SUCCESS;
/* Parse port fault information field */
lpfc_sli4_parse_latt_fault(phba, (void *)acqe_fc);
/* Parse and translate link attention fields */
la = (struct lpfc_mbx_read_top *)&pmb->u.mb.un.varReadTop;
@ -5103,7 +5114,7 @@ lpfc_sli4_async_fip_evt(struct lpfc_hba *phba,
if (rc) {
lpfc_printf_log(phba, KERN_ERR, LOG_FIP |
LOG_DISCOVERY,
"2772 Issue FCF rediscover mabilbox "
"2772 Issue FCF rediscover mailbox "
"command failed, fail through to FCF "
"dead event\n");
spin_lock_irq(&phba->hbalock);
@ -5195,7 +5206,7 @@ lpfc_sli4_async_fip_evt(struct lpfc_hba *phba,
lpfc_printf_log(phba, KERN_ERR, LOG_FIP |
LOG_DISCOVERY,
"2774 Issue FCF rediscover "
"mabilbox command failed, "
"mailbox command failed, "
"through to CVL event\n");
spin_lock_irq(&phba->hbalock);
phba->fcf.fcf_flag &= ~FCF_ACVL_DISC;
@ -5839,6 +5850,8 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
int fof_vectors = 0;
int extra;
uint64_t wwn;
u32 if_type;
u32 if_fam;
phba->sli4_hba.num_online_cpu = num_online_cpus();
phba->sli4_hba.num_present_cpu = lpfc_present_cpu;
@ -6160,15 +6173,28 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
*/
rc = lpfc_get_sli4_parameters(phba, mboxq);
if (rc) {
if_type = bf_get(lpfc_sli_intf_if_type,
&phba->sli4_hba.sli_intf);
if_fam = bf_get(lpfc_sli_intf_sli_family,
&phba->sli4_hba.sli_intf);
if (phba->sli4_hba.extents_in_use &&
phba->sli4_hba.rpi_hdrs_in_use) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"2999 Unsupported SLI4 Parameters "
"Extents and RPI headers enabled.\n");
if (if_type == LPFC_SLI_INTF_IF_TYPE_0 &&
if_fam == LPFC_SLI_INTF_FAMILY_BE2) {
mempool_free(mboxq, phba->mbox_mem_pool);
rc = -EIO;
goto out_free_bsmbx;
}
}
if (!(if_type == LPFC_SLI_INTF_IF_TYPE_0 &&
if_fam == LPFC_SLI_INTF_FAMILY_BE2)) {
mempool_free(mboxq, phba->mbox_mem_pool);
rc = -EIO;
goto out_free_bsmbx;
}
mempool_free(mboxq, phba->mbox_mem_pool);
rc = -EIO;
goto out_free_bsmbx;
}
mempool_free(mboxq, phba->mbox_mem_pool);
@ -6406,8 +6432,11 @@ lpfc_setup_driver_resource_phase2(struct lpfc_hba *phba)
return error;
}
/* workqueue for deferred irq use */
phba->wq = alloc_workqueue("lpfc_wq", WQ_MEM_RECLAIM, 0);
/* The lpfc_wq workqueue for deferred irq use, is only used for SLI4 */
if (phba->sli_rev == LPFC_SLI_REV4)
phba->wq = alloc_workqueue("lpfc_wq", WQ_MEM_RECLAIM, 0);
else
phba->wq = NULL;
return 0;
}
@ -6430,7 +6459,8 @@ lpfc_unset_driver_resource_phase2(struct lpfc_hba *phba)
}
/* Stop kernel worker thread */
kthread_stop(phba->worker_thread);
if (phba->worker_thread)
kthread_stop(phba->worker_thread);
}
/**
@ -6895,12 +6925,6 @@ lpfc_create_shost(struct lpfc_hba *phba)
atomic_set(&phba->fc4ScsiOutputRequests, 0);
atomic_set(&phba->fc4ScsiControlRequests, 0);
atomic_set(&phba->fc4ScsiIoCmpls, 0);
atomic_set(&phba->fc4NvmeInputRequests, 0);
atomic_set(&phba->fc4NvmeOutputRequests, 0);
atomic_set(&phba->fc4NvmeControlRequests, 0);
atomic_set(&phba->fc4NvmeIoCmpls, 0);
atomic_set(&phba->fc4NvmeLsRequests, 0);
atomic_set(&phba->fc4NvmeLsCmpls, 0);
vport = lpfc_create_port(phba, phba->brd_no, &phba->pcidev->dev);
if (!vport)
return -ENODEV;
@ -7781,6 +7805,40 @@ lpfc_sli4_read_config(struct lpfc_hba *phba)
phba->sli4_hba.max_cfg_param.max_wq,
phba->sli4_hba.max_cfg_param.max_rq);
/*
* Calculate NVME queue resources based on how
* many WQ/CQs are available.
*/
if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
length = phba->sli4_hba.max_cfg_param.max_wq;
if (phba->sli4_hba.max_cfg_param.max_cq <
phba->sli4_hba.max_cfg_param.max_wq)
length = phba->sli4_hba.max_cfg_param.max_cq;
/*
* Whats left after this can go toward NVME.
* The minus 6 accounts for ELS, NVME LS, MBOX
* fof plus a couple extra. When configured for
* NVMET, FCP io channel WQs are not created.
*/
length -= 6;
if (!phba->nvmet_support)
length -= phba->cfg_fcp_io_channel;
if (phba->cfg_nvme_io_channel > length) {
lpfc_printf_log(
phba, KERN_ERR, LOG_SLI,
"2005 Reducing NVME IO channel to %d: "
"WQ %d CQ %d NVMEIO %d FCPIO %d\n",
length,
phba->sli4_hba.max_cfg_param.max_wq,
phba->sli4_hba.max_cfg_param.max_cq,
phba->cfg_nvme_io_channel,
phba->cfg_fcp_io_channel);
phba->cfg_nvme_io_channel = length;
}
}
}
if (rc)
@ -10533,6 +10591,7 @@ lpfc_get_sli4_parameters(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
struct lpfc_pc_sli4_params *sli4_params;
uint32_t mbox_tmo;
int length;
bool exp_wqcq_pages = true;
struct lpfc_sli4_parameters *mbx_sli4_parameters;
/*
@ -10659,8 +10718,15 @@ lpfc_get_sli4_parameters(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
phba->nvme_support, phba->nvme_embed_pbde,
phba->cfg_nvme_embed_cmd, phba->cfg_suppress_rsp);
if ((bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
LPFC_SLI_INTF_IF_TYPE_2) &&
(bf_get(lpfc_sli_intf_sli_family, &phba->sli4_hba.sli_intf) ==
LPFC_SLI_INTF_FAMILY_LNCR_A0))
exp_wqcq_pages = false;
if ((bf_get(cfg_cqpsize, mbx_sli4_parameters) & LPFC_CQ_16K_PAGE_SZ) &&
(bf_get(cfg_wqpsize, mbx_sli4_parameters) & LPFC_WQ_16K_PAGE_SZ) &&
exp_wqcq_pages &&
(sli4_params->wqsize & LPFC_WQ_SZ128_SUPPORT))
phba->enab_exp_wqcq_pages = 1;
else
@ -11322,7 +11388,11 @@ lpfc_log_write_firmware_error(struct lpfc_hba *phba, uint32_t offset,
uint32_t magic_number, uint32_t ftype, uint32_t fid, uint32_t fsize,
const struct firmware *fw)
{
if (offset == ADD_STATUS_FW_NOT_SUPPORTED)
if ((offset == ADD_STATUS_FW_NOT_SUPPORTED) ||
(phba->pcidev->device == PCI_DEVICE_ID_LANCER_G6_FC &&
magic_number != MAGIC_NUMER_G6) ||
(phba->pcidev->device == PCI_DEVICE_ID_LANCER_G7_FC &&
magic_number != MAGIC_NUMER_G7))
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"3030 This firmware version is not supported on "
"this HBA model. Device:%x Magic:%x Type:%x "
@ -11719,6 +11789,7 @@ lpfc_pci_remove_one_s4(struct pci_dev *pdev)
lpfc_nvme_free(phba);
lpfc_free_iocb_list(phba);
lpfc_unset_driver_resource_phase2(phba);
lpfc_sli4_driver_resource_unset(phba);
/* Unmap adapter Control and Doorbell registers */

View File

@ -1936,31 +1936,14 @@ lpfc_cmpl_prli_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
goto out;
}
/* When the rport rejected the FCP PRLI as unsupported.
* This should only happen in Pt2Pt so an NVME PRLI
* should be outstanding still.
*/
if (npr && ndlp->nlp_flag & NLP_FCP_PRLI_RJT) {
/* Adjust the nlp_type accordingly if the PRLI failed */
if (npr)
ndlp->nlp_fc4_type &= ~NLP_FC4_FCP;
goto out_err;
}
if (nvpr)
ndlp->nlp_fc4_type &= ~NLP_FC4_NVME;
/* The LS Req had some error. Don't let this be a
* target.
*/
if ((ndlp->fc4_prli_sent == 1) &&
(ndlp->nlp_state == NLP_STE_PRLI_ISSUE) &&
(ndlp->nlp_type & (NLP_FCP_TARGET | NLP_FCP_INITIATOR)))
/* The FCP PRLI completed successfully but
* the NVME PRLI failed. Since they are sent in
* succession, allow the FCP to complete.
*/
goto out_err;
ndlp->nlp_prev_state = NLP_STE_PRLI_ISSUE;
ndlp->nlp_type |= NLP_FCP_INITIATOR;
lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
return ndlp->nlp_state;
/* We can't set the DSM state till BOTH PRLIs complete */
goto out_err;
}
if (npr && (npr->acceptRspCode == PRLI_REQ_EXECUTED) &&
@ -1999,6 +1982,12 @@ lpfc_cmpl_prli_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
if (bf_get_be32(prli_disc, nvpr))
ndlp->nlp_type |= NLP_NVME_DISCOVERY;
/* This node is an NVME target. Adjust the command
* queue depth on this node to not exceed the available
* xris.
*/
ndlp->cmd_qdepth = phba->sli4_hba.nvme_xri_max;
/*
* If prli_fba is set, the Target supports FirstBurst.
* If prli_fb_sz is 0, the FirstBurst size is unlimited,

View File

@ -2,7 +2,7 @@
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
* Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term *
* Broadcom refers to Broadcom Limited and/or its subsidiaries. *
* Broadcom refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.broadcom.com *
@ -334,7 +334,14 @@ lpfc_nvme_remoteport_delete(struct nvme_fc_remote_port *remoteport)
"6146 remoteport delete of remoteport %p\n",
remoteport);
spin_lock_irq(&vport->phba->hbalock);
ndlp->nrport = NULL;
/* The register rebind might have occurred before the delete
* downcall. Guard against this race.
*/
if (ndlp->upcall_flags & NLP_WAIT_FOR_UNREG) {
ndlp->nrport = NULL;
ndlp->upcall_flags &= ~NLP_WAIT_FOR_UNREG;
}
spin_unlock_irq(&vport->phba->hbalock);
/* Remove original register reference. The host transport
@ -357,15 +364,19 @@ lpfc_nvme_cmpl_gen_req(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
struct lpfc_dmabuf *buf_ptr;
struct lpfc_nodelist *ndlp;
atomic_inc(&vport->phba->fc4NvmeLsCmpls);
pnvme_lsreq = (struct nvmefc_ls_req *)cmdwqe->context2;
status = bf_get(lpfc_wcqe_c_status, wcqe) & LPFC_IOCB_STATUS_MASK;
if (status) {
if (vport->localport) {
lport = (struct lpfc_nvme_lport *)vport->localport->private;
if (bf_get(lpfc_wcqe_c_xb, wcqe))
atomic_inc(&lport->cmpl_ls_xb);
atomic_inc(&lport->cmpl_ls_err);
if (lport) {
atomic_inc(&lport->fc4NvmeLsCmpls);
if (status) {
if (bf_get(lpfc_wcqe_c_xb, wcqe))
atomic_inc(&lport->cmpl_ls_xb);
atomic_inc(&lport->cmpl_ls_err);
}
}
}
ndlp = (struct lpfc_nodelist *)cmdwqe->context1;
@ -570,6 +581,9 @@ lpfc_nvme_ls_req(struct nvme_fc_local_port *pnvme_lport,
lport = (struct lpfc_nvme_lport *)pnvme_lport->private;
rport = (struct lpfc_nvme_rport *)pnvme_rport->private;
if (unlikely(!lport) || unlikely(!rport))
return -EINVAL;
vport = lport->vport;
if (vport->load_flag & FC_UNLOADING)
@ -639,7 +653,7 @@ lpfc_nvme_ls_req(struct nvme_fc_local_port *pnvme_lport,
pnvme_lsreq->rsplen, &pnvme_lsreq->rqstdma,
&pnvme_lsreq->rspdma);
atomic_inc(&vport->phba->fc4NvmeLsRequests);
atomic_inc(&lport->fc4NvmeLsRequests);
/* Hardcode the wait to 30 seconds. Connections are failing otherwise.
* This code allows it all to work.
@ -690,6 +704,8 @@ lpfc_nvme_ls_abort(struct nvme_fc_local_port *pnvme_lport,
struct lpfc_iocbq *wqe, *next_wqe;
lport = (struct lpfc_nvme_lport *)pnvme_lport->private;
if (unlikely(!lport))
return;
vport = lport->vport;
phba = vport->phba;
@ -949,28 +965,48 @@ lpfc_nvme_io_cmd_wqe_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn,
struct lpfc_nodelist *ndlp;
struct lpfc_nvme_fcpreq_priv *freqpriv;
struct lpfc_nvme_lport *lport;
struct lpfc_nvme_ctrl_stat *cstat;
unsigned long flags;
uint32_t code, status;
uint32_t code, status, idx;
uint16_t cid, sqhd, data;
uint32_t *ptr;
/* Sanity check on return of outstanding command */
if (!lpfc_ncmd || !lpfc_ncmd->nvmeCmd || !lpfc_ncmd->nrport) {
if (!lpfc_ncmd) {
lpfc_printf_vlog(vport, KERN_ERR,
LOG_NODE | LOG_NVME_IOERR,
"6071 Null lpfc_ncmd pointer. No "
"release, skip completion\n");
return;
}
lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE | LOG_NVME_IOERR,
"6071 Completion pointers bad on wqe %p.\n",
wcqe);
"6066 Missing cmpl ptrs: lpfc_ncmd %p, "
"nvmeCmd %p nrport %p\n",
lpfc_ncmd, lpfc_ncmd->nvmeCmd,
lpfc_ncmd->nrport);
/* Release the lpfc_ncmd regardless of the missing elements. */
lpfc_release_nvme_buf(phba, lpfc_ncmd);
return;
}
atomic_inc(&phba->fc4NvmeIoCmpls);
nCmd = lpfc_ncmd->nvmeCmd;
rport = lpfc_ncmd->nrport;
status = bf_get(lpfc_wcqe_c_status, wcqe);
if (status) {
if (vport->localport) {
lport = (struct lpfc_nvme_lport *)vport->localport->private;
if (bf_get(lpfc_wcqe_c_xb, wcqe))
atomic_inc(&lport->cmpl_fcp_xb);
atomic_inc(&lport->cmpl_fcp_err);
if (lport) {
idx = lpfc_ncmd->cur_iocbq.hba_wqidx;
cstat = &lport->cstat[idx];
atomic_inc(&cstat->fc4NvmeIoCmpls);
if (status) {
if (bf_get(lpfc_wcqe_c_xb, wcqe))
atomic_inc(&lport->cmpl_fcp_xb);
atomic_inc(&lport->cmpl_fcp_err);
}
}
}
lpfc_nvmeio_data(phba, "NVME FCP CMPL: xri x%x stat x%x parm x%x\n",
@ -1163,7 +1199,8 @@ out_err:
static int
lpfc_nvme_prep_io_cmd(struct lpfc_vport *vport,
struct lpfc_nvme_buf *lpfc_ncmd,
struct lpfc_nodelist *pnode)
struct lpfc_nodelist *pnode,
struct lpfc_nvme_ctrl_stat *cstat)
{
struct lpfc_hba *phba = vport->phba;
struct nvmefc_fcp_req *nCmd = lpfc_ncmd->nvmeCmd;
@ -1201,7 +1238,7 @@ lpfc_nvme_prep_io_cmd(struct lpfc_vport *vport,
} else {
wqe->fcp_iwrite.initial_xfer_len = 0;
}
atomic_inc(&phba->fc4NvmeOutputRequests);
atomic_inc(&cstat->fc4NvmeOutputRequests);
} else {
/* From the iread template, initialize words 7 - 11 */
memcpy(&wqe->words[7],
@ -1214,13 +1251,13 @@ lpfc_nvme_prep_io_cmd(struct lpfc_vport *vport,
/* Word 5 */
wqe->fcp_iread.rsrvd5 = 0;
atomic_inc(&phba->fc4NvmeInputRequests);
atomic_inc(&cstat->fc4NvmeInputRequests);
}
} else {
/* From the icmnd template, initialize words 4 - 11 */
memcpy(&wqe->words[4], &lpfc_icmnd_cmd_template.words[4],
sizeof(uint32_t) * 8);
atomic_inc(&phba->fc4NvmeControlRequests);
atomic_inc(&cstat->fc4NvmeControlRequests);
}
/*
* Finish initializing those WQE fields that are independent
@ -1400,7 +1437,9 @@ lpfc_nvme_fcp_io_submit(struct nvme_fc_local_port *pnvme_lport,
{
int ret = 0;
int expedite = 0;
int idx;
struct lpfc_nvme_lport *lport;
struct lpfc_nvme_ctrl_stat *cstat;
struct lpfc_vport *vport;
struct lpfc_hba *phba;
struct lpfc_nodelist *ndlp;
@ -1425,9 +1464,10 @@ lpfc_nvme_fcp_io_submit(struct nvme_fc_local_port *pnvme_lport,
vport = lport->vport;
if (unlikely(!hw_queue_handle)) {
lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_ABTS,
"6129 Fail Abort, NULL hw_queue_handle\n");
ret = -EINVAL;
lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR,
"6117 Fail IO, NULL hw_queue_handle\n");
atomic_inc(&lport->xmt_fcp_err);
ret = -EBUSY;
goto out_fail;
}
@ -1439,12 +1479,18 @@ lpfc_nvme_fcp_io_submit(struct nvme_fc_local_port *pnvme_lport,
}
if (vport->load_flag & FC_UNLOADING) {
lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR,
"6124 Fail IO, Driver unload\n");
atomic_inc(&lport->xmt_fcp_err);
ret = -ENODEV;
goto out_fail;
}
freqpriv = pnvme_fcreq->private;
if (unlikely(!freqpriv)) {
lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR,
"6158 Fail IO, NULL request data\n");
atomic_inc(&lport->xmt_fcp_err);
ret = -EINVAL;
goto out_fail;
}
@ -1462,32 +1508,26 @@ lpfc_nvme_fcp_io_submit(struct nvme_fc_local_port *pnvme_lport,
*/
ndlp = rport->ndlp;
if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE | LOG_NVME_IOERR,
"6053 rport %p, ndlp %p, DID x%06x "
"ndlp not ready.\n",
lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE | LOG_NVME_IOERR,
"6053 Fail IO, ndlp not ready: rport %p "
"ndlp %p, DID x%06x\n",
rport, ndlp, pnvme_rport->port_id);
ndlp = lpfc_findnode_did(vport, pnvme_rport->port_id);
if (!ndlp) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_IOERR,
"6066 Missing node for DID %x\n",
pnvme_rport->port_id);
atomic_inc(&lport->xmt_fcp_bad_ndlp);
ret = -ENODEV;
goto out_fail;
}
atomic_inc(&lport->xmt_fcp_err);
ret = -EBUSY;
goto out_fail;
}
/* The remote node has to be a mapped target or it's an error. */
if ((ndlp->nlp_type & NLP_NVME_TARGET) &&
(ndlp->nlp_state != NLP_STE_MAPPED_NODE)) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE | LOG_NVME_IOERR,
"6036 rport %p, DID x%06x not ready for "
"IO. State x%x, Type x%x\n",
rport, pnvme_rport->port_id,
ndlp->nlp_state, ndlp->nlp_type);
lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE | LOG_NVME_IOERR,
"6036 Fail IO, DID x%06x not ready for "
"IO. State x%x, Type x%x Flg x%x\n",
pnvme_rport->port_id,
ndlp->nlp_state, ndlp->nlp_type,
ndlp->upcall_flags);
atomic_inc(&lport->xmt_fcp_bad_ndlp);
ret = -ENODEV;
ret = -EBUSY;
goto out_fail;
}
@ -1508,6 +1548,12 @@ lpfc_nvme_fcp_io_submit(struct nvme_fc_local_port *pnvme_lport,
*/
if ((atomic_read(&ndlp->cmd_pending) >= ndlp->cmd_qdepth) &&
!expedite) {
lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR,
"6174 Fail IO, ndlp qdepth exceeded: "
"idx %d DID %x pend %d qdepth %d\n",
lpfc_queue_info->index, ndlp->nlp_DID,
atomic_read(&ndlp->cmd_pending),
ndlp->cmd_qdepth);
atomic_inc(&lport->xmt_fcp_qdepth);
ret = -EBUSY;
goto out_fail;
@ -1517,8 +1563,9 @@ lpfc_nvme_fcp_io_submit(struct nvme_fc_local_port *pnvme_lport,
if (lpfc_ncmd == NULL) {
atomic_inc(&lport->xmt_fcp_noxri);
lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR,
"6065 driver's buffer pool is empty, "
"IO failed\n");
"6065 Fail IO, driver buffer pool is empty: "
"idx %d DID %x\n",
lpfc_queue_info->index, ndlp->nlp_DID);
ret = -EBUSY;
goto out_fail;
}
@ -1543,15 +1590,6 @@ lpfc_nvme_fcp_io_submit(struct nvme_fc_local_port *pnvme_lport,
lpfc_ncmd->ndlp = ndlp;
lpfc_ncmd->start_time = jiffies;
lpfc_nvme_prep_io_cmd(vport, lpfc_ncmd, ndlp);
ret = lpfc_nvme_prep_io_dma(vport, lpfc_ncmd);
if (ret) {
ret = -ENOMEM;
goto out_free_nvme_buf;
}
atomic_inc(&ndlp->cmd_pending);
/*
* Issue the IO on the WQ indicated by index in the hw_queue_handle.
* This identfier was create in our hardware queue create callback
@ -1560,7 +1598,23 @@ lpfc_nvme_fcp_io_submit(struct nvme_fc_local_port *pnvme_lport,
* index to use and that they have affinitized a CPU to this hardware
* queue. A hardware queue maps to a driver MSI-X vector/EQ/CQ/WQ.
*/
lpfc_ncmd->cur_iocbq.hba_wqidx = lpfc_queue_info->index;
idx = lpfc_queue_info->index;
lpfc_ncmd->cur_iocbq.hba_wqidx = idx;
cstat = &lport->cstat[idx];
lpfc_nvme_prep_io_cmd(vport, lpfc_ncmd, ndlp, cstat);
ret = lpfc_nvme_prep_io_dma(vport, lpfc_ncmd);
if (ret) {
lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR,
"6175 Fail IO, Prep DMA: "
"idx %d DID %x\n",
lpfc_queue_info->index, ndlp->nlp_DID);
atomic_inc(&lport->xmt_fcp_err);
ret = -ENOMEM;
goto out_free_nvme_buf;
}
atomic_inc(&ndlp->cmd_pending);
lpfc_nvmeio_data(phba, "NVME FCP XMIT: xri x%x idx %d to %06x\n",
lpfc_ncmd->cur_iocbq.sli4_xritag,
@ -1571,7 +1625,7 @@ lpfc_nvme_fcp_io_submit(struct nvme_fc_local_port *pnvme_lport,
atomic_inc(&lport->xmt_fcp_wqerr);
atomic_dec(&ndlp->cmd_pending);
lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR,
"6113 FCP could not issue WQE err %x "
"6113 Fail IO, Could not issue WQE err %x "
"sid: x%x did: x%x oxid: x%x\n",
ret, vport->fc_myDID, ndlp->nlp_DID,
lpfc_ncmd->cur_iocbq.sli4_xritag);
@ -1605,11 +1659,11 @@ lpfc_nvme_fcp_io_submit(struct nvme_fc_local_port *pnvme_lport,
out_free_nvme_buf:
if (lpfc_ncmd->nvmeCmd->sg_cnt) {
if (lpfc_ncmd->nvmeCmd->io_dir == NVMEFC_FCP_WRITE)
atomic_dec(&phba->fc4NvmeOutputRequests);
atomic_dec(&cstat->fc4NvmeOutputRequests);
else
atomic_dec(&phba->fc4NvmeInputRequests);
atomic_dec(&cstat->fc4NvmeInputRequests);
} else
atomic_dec(&phba->fc4NvmeControlRequests);
atomic_dec(&cstat->fc4NvmeControlRequests);
lpfc_release_nvme_buf(phba, lpfc_ncmd);
out_fail:
return ret;
@ -2390,7 +2444,8 @@ lpfc_nvme_create_localport(struct lpfc_vport *vport)
struct nvme_fc_port_info nfcp_info;
struct nvme_fc_local_port *localport;
struct lpfc_nvme_lport *lport;
int len;
struct lpfc_nvme_ctrl_stat *cstat;
int len, i;
/* Initialize this localport instance. The vport wwn usage ensures
* that NPIV is accounted for.
@ -2414,6 +2469,11 @@ lpfc_nvme_create_localport(struct lpfc_vport *vport)
lpfc_nvme_template.max_sgl_segments = phba->cfg_nvme_seg_cnt + 1;
lpfc_nvme_template.max_hw_queues = phba->cfg_nvme_io_channel;
cstat = kmalloc((sizeof(struct lpfc_nvme_ctrl_stat) *
phba->cfg_nvme_io_channel), GFP_KERNEL);
if (!cstat)
return -ENOMEM;
/* localport is allocated from the stack, but the registration
* call allocates heap memory as well as the private area.
*/
@ -2436,11 +2496,13 @@ lpfc_nvme_create_localport(struct lpfc_vport *vport)
lport = (struct lpfc_nvme_lport *)localport->private;
vport->localport = localport;
lport->vport = vport;
lport->cstat = cstat;
vport->nvmei_support = 1;
atomic_set(&lport->xmt_fcp_noxri, 0);
atomic_set(&lport->xmt_fcp_bad_ndlp, 0);
atomic_set(&lport->xmt_fcp_qdepth, 0);
atomic_set(&lport->xmt_fcp_err, 0);
atomic_set(&lport->xmt_fcp_wqerr, 0);
atomic_set(&lport->xmt_fcp_abort, 0);
atomic_set(&lport->xmt_ls_abort, 0);
@ -2449,6 +2511,16 @@ lpfc_nvme_create_localport(struct lpfc_vport *vport)
atomic_set(&lport->cmpl_fcp_err, 0);
atomic_set(&lport->cmpl_ls_xb, 0);
atomic_set(&lport->cmpl_ls_err, 0);
atomic_set(&lport->fc4NvmeLsRequests, 0);
atomic_set(&lport->fc4NvmeLsCmpls, 0);
for (i = 0; i < phba->cfg_nvme_io_channel; i++) {
cstat = &lport->cstat[i];
atomic_set(&cstat->fc4NvmeInputRequests, 0);
atomic_set(&cstat->fc4NvmeOutputRequests, 0);
atomic_set(&cstat->fc4NvmeControlRequests, 0);
atomic_set(&cstat->fc4NvmeIoCmpls, 0);
}
/* Don't post more new bufs if repost already recovered
* the nvme sgls.
@ -2458,6 +2530,8 @@ lpfc_nvme_create_localport(struct lpfc_vport *vport)
phba->sli4_hba.nvme_xri_max);
vport->phba->total_nvme_bufs += len;
}
} else {
kfree(cstat);
}
return ret;
@ -2520,6 +2594,7 @@ lpfc_nvme_destroy_localport(struct lpfc_vport *vport)
#if (IS_ENABLED(CONFIG_NVME_FC))
struct nvme_fc_local_port *localport;
struct lpfc_nvme_lport *lport;
struct lpfc_nvme_ctrl_stat *cstat;
int ret;
if (vport->nvmei_support == 0)
@ -2528,6 +2603,7 @@ lpfc_nvme_destroy_localport(struct lpfc_vport *vport)
localport = vport->localport;
vport->localport = NULL;
lport = (struct lpfc_nvme_lport *)localport->private;
cstat = lport->cstat;
lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME,
"6011 Destroying NVME localport %p\n",
@ -2543,6 +2619,7 @@ lpfc_nvme_destroy_localport(struct lpfc_vport *vport)
* indefinitely or succeeds
*/
lpfc_nvme_lport_unreg_wait(vport, lport);
kfree(cstat);
/* Regardless of the unregister upcall response, clear
* nvmei_support. All rports are unregistered and the
@ -2607,6 +2684,7 @@ lpfc_nvme_register_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
struct nvme_fc_local_port *localport;
struct lpfc_nvme_lport *lport;
struct lpfc_nvme_rport *rport;
struct lpfc_nvme_rport *oldrport;
struct nvme_fc_remote_port *remote_port;
struct nvme_fc_port_info rpinfo;
struct lpfc_nodelist *prev_ndlp;
@ -2639,7 +2717,9 @@ lpfc_nvme_register_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
rpinfo.port_name = wwn_to_u64(ndlp->nlp_portname.u.wwn);
rpinfo.node_name = wwn_to_u64(ndlp->nlp_nodename.u.wwn);
if (!ndlp->nrport)
oldrport = lpfc_ndlp_get_nrport(ndlp);
if (!oldrport)
lpfc_nlp_get(ndlp);
ret = nvme_fc_register_remoteport(localport, &rpinfo, &remote_port);
@ -2648,9 +2728,15 @@ lpfc_nvme_register_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
* a resume of the existing rport. Else this is a
* new rport.
*/
/* Guard against an unregister/reregister
* race that leaves the WAIT flag set.
*/
spin_lock_irq(&vport->phba->hbalock);
ndlp->upcall_flags &= ~NLP_WAIT_FOR_UNREG;
spin_unlock_irq(&vport->phba->hbalock);
rport = remote_port->private;
if (ndlp->nrport) {
if (ndlp->nrport == remote_port->private) {
if (oldrport) {
if (oldrport == remote_port->private) {
/* Same remoteport. Just reuse. */
lpfc_printf_vlog(ndlp->vport, KERN_INFO,
LOG_NVME_DISC,
@ -2674,11 +2760,20 @@ lpfc_nvme_register_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
*/
spin_lock_irq(&vport->phba->hbalock);
ndlp->nrport = NULL;
ndlp->upcall_flags &= ~NLP_WAIT_FOR_UNREG;
spin_unlock_irq(&vport->phba->hbalock);
rport->ndlp = NULL;
rport->remoteport = NULL;
if (prev_ndlp)
lpfc_nlp_put(ndlp);
/* Reference only removed if previous NDLP is no longer
* active. It might be just a swap and removing the
* reference would cause a premature cleanup.
*/
if (prev_ndlp && prev_ndlp != ndlp) {
if ((!NLP_CHK_NODE_ACT(prev_ndlp)) ||
(!prev_ndlp->nrport))
lpfc_nlp_put(prev_ndlp);
}
}
/* Clean bind the rport to the ndlp. */
@ -2746,7 +2841,7 @@ lpfc_nvme_unregister_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
if (!lport)
goto input_err;
rport = ndlp->nrport;
rport = lpfc_ndlp_get_nrport(ndlp);
if (!rport)
goto input_err;
@ -2767,6 +2862,15 @@ lpfc_nvme_unregister_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
* The transport will update it.
*/
ndlp->upcall_flags |= NLP_WAIT_FOR_UNREG;
/* Don't let the host nvme transport keep sending keep-alives
* on this remoteport. Vport is unloading, no recovery. The
* return values is ignored. The upcall is a courtesy to the
* transport.
*/
if (vport->load_flag & FC_UNLOADING)
(void)nvme_fc_set_remoteport_devloss(remoteport, 0);
ret = nvme_fc_unregister_remoteport(remoteport);
if (ret != 0) {
lpfc_nlp_put(ndlp);

View File

@ -2,7 +2,7 @@
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
* Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term *
* Broadcom refers to Broadcom Limited and/or its subsidiaries. *
* Broadcom refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.broadcom.com *
@ -30,21 +30,36 @@
#define LPFC_NVME_FB_SHIFT 9
#define LPFC_NVME_MAX_FB (1 << 20) /* 1M */
#define lpfc_ndlp_get_nrport(ndlp) \
((!ndlp->nrport || (ndlp->upcall_flags & NLP_WAIT_FOR_UNREG)) \
? NULL : ndlp->nrport)
struct lpfc_nvme_qhandle {
uint32_t index; /* WQ index to use */
uint32_t qidx; /* queue index passed to create */
uint32_t cpu_id; /* current cpu id at time of create */
};
struct lpfc_nvme_ctrl_stat {
atomic_t fc4NvmeInputRequests;
atomic_t fc4NvmeOutputRequests;
atomic_t fc4NvmeControlRequests;
atomic_t fc4NvmeIoCmpls;
};
/* Declare nvme-based local and remote port definitions. */
struct lpfc_nvme_lport {
struct lpfc_vport *vport;
struct completion lport_unreg_done;
/* Add stats counters here */
struct lpfc_nvme_ctrl_stat *cstat;
atomic_t fc4NvmeLsRequests;
atomic_t fc4NvmeLsCmpls;
atomic_t xmt_fcp_noxri;
atomic_t xmt_fcp_bad_ndlp;
atomic_t xmt_fcp_qdepth;
atomic_t xmt_fcp_wqerr;
atomic_t xmt_fcp_err;
atomic_t xmt_fcp_abort;
atomic_t xmt_ls_abort;
atomic_t xmt_ls_err;

View File

@ -22,8 +22,10 @@
********************************************************************/
#define LPFC_NVMET_DEFAULT_SEGS (64 + 1) /* 256K IOs */
#define LPFC_NVMET_RQE_DEF_COUNT 512
#define LPFC_NVMET_SUCCESS_LEN 12
#define LPFC_NVMET_RQE_MIN_POST 128
#define LPFC_NVMET_RQE_DEF_POST 512
#define LPFC_NVMET_RQE_DEF_COUNT 2048
#define LPFC_NVMET_SUCCESS_LEN 12
#define LPFC_NVMET_MRQ_OFF 0xffff
#define LPFC_NVMET_MRQ_AUTO 0

View File

@ -2,7 +2,7 @@
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
* Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term *
* Broadcom refers to Broadcom Limited and/or its subsidiaries. *
* Broadcom refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.broadcom.com *
@ -1021,7 +1021,7 @@ lpfc_get_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
if (lpfc_test_rrq_active(phba, ndlp,
lpfc_cmd->cur_iocbq.sli4_lxritag))
continue;
list_del(&lpfc_cmd->list);
list_del_init(&lpfc_cmd->list);
found = 1;
break;
}
@ -1036,7 +1036,7 @@ lpfc_get_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
if (lpfc_test_rrq_active(
phba, ndlp, lpfc_cmd->cur_iocbq.sli4_lxritag))
continue;
list_del(&lpfc_cmd->list);
list_del_init(&lpfc_cmd->list);
found = 1;
break;
}
@ -3983,9 +3983,6 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
}
#endif
if (pnode && NLP_CHK_NODE_ACT(pnode))
atomic_dec(&pnode->cmd_pending);
if (lpfc_cmd->status) {
if (lpfc_cmd->status == IOSTAT_LOCAL_REJECT &&
(lpfc_cmd->result & IOERR_DRVR_MASK))
@ -4125,6 +4122,7 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
msecs_to_jiffies(vport->cfg_max_scsicmpl_time))) {
spin_lock_irqsave(shost->host_lock, flags);
if (pnode && NLP_CHK_NODE_ACT(pnode)) {
atomic_dec(&pnode->cmd_pending);
if (pnode->cmd_qdepth >
atomic_read(&pnode->cmd_pending) &&
(atomic_read(&pnode->cmd_pending) >
@ -4138,16 +4136,8 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
}
spin_unlock_irqrestore(shost->host_lock, flags);
} else if (pnode && NLP_CHK_NODE_ACT(pnode)) {
if ((pnode->cmd_qdepth != vport->cfg_tgt_queue_depth) &&
time_after(jiffies, pnode->last_change_time +
msecs_to_jiffies(LPFC_TGTQ_INTERVAL))) {
spin_lock_irqsave(shost->host_lock, flags);
pnode->cmd_qdepth = vport->cfg_tgt_queue_depth;
pnode->last_change_time = jiffies;
spin_unlock_irqrestore(shost->host_lock, flags);
}
atomic_dec(&pnode->cmd_pending);
}
lpfc_scsi_unprep_dma_buf(phba, lpfc_cmd);
spin_lock_irqsave(&phba->hbalock, flags);
@ -4591,6 +4581,8 @@ lpfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd)
ndlp->nlp_portname.u.wwn[7]);
goto out_tgt_busy;
}
atomic_inc(&ndlp->cmd_pending);
lpfc_cmd = lpfc_get_scsi_buf(phba, ndlp);
if (lpfc_cmd == NULL) {
lpfc_rampdown_queue_depth(phba);
@ -4643,11 +4635,9 @@ lpfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd)
lpfc_scsi_prep_cmnd(vport, lpfc_cmd, ndlp);
atomic_inc(&ndlp->cmd_pending);
err = lpfc_sli_issue_iocb(phba, LPFC_FCP_RING,
&lpfc_cmd->cur_iocbq, SLI_IOCB_RET_IOCB);
if (err) {
atomic_dec(&ndlp->cmd_pending);
lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
"3376 FCP could not issue IOCB err %x"
"FCP cmd x%x <%d/%llu> "
@ -4691,6 +4681,7 @@ lpfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd)
lpfc_scsi_unprep_dma_buf(phba, lpfc_cmd);
lpfc_release_scsi_buf(phba, lpfc_cmd);
out_host_busy:
atomic_dec(&ndlp->cmd_pending);
return SCSI_MLQUEUE_HOST_BUSY;
out_tgt_busy:
@ -4725,7 +4716,7 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd)
int ret = SUCCESS, status = 0;
struct lpfc_sli_ring *pring_s4;
int ret_val;
unsigned long flags, iflags;
unsigned long flags;
DECLARE_WAIT_QUEUE_HEAD_ONSTACK(waitq);
status = fc_block_scsi_eh(cmnd);
@ -4825,16 +4816,16 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd)
abtsiocb->iocb_cmpl = lpfc_sli_abort_fcp_cmpl;
abtsiocb->vport = vport;
if (phba->sli_rev == LPFC_SLI_REV4) {
pring_s4 = lpfc_sli4_calc_ring(phba, iocb);
pring_s4 = lpfc_sli4_calc_ring(phba, abtsiocb);
if (pring_s4 == NULL) {
ret = FAILED;
goto out_unlock;
}
/* Note: both hbalock and ring_lock must be set here */
spin_lock_irqsave(&pring_s4->ring_lock, iflags);
spin_lock(&pring_s4->ring_lock);
ret_val = __lpfc_sli_issue_iocb(phba, pring_s4->ringno,
abtsiocb, 0);
spin_unlock_irqrestore(&pring_s4->ring_lock, iflags);
spin_unlock(&pring_s4->ring_lock);
} else {
ret_val = __lpfc_sli_issue_iocb(phba, LPFC_FCP_RING,
abtsiocb, 0);

View File

@ -2,7 +2,7 @@
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
* Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term *
* Broadcom refers to Broadcom Limited and/or its subsidiaries. *
* Broadcom refers to Broadcom Inc and/or its subsidiaries. *
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.broadcom.com *

View File

@ -2,7 +2,7 @@
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
* Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term *
* Broadcom refers to Broadcom Limited and/or its subsidiaries. *
* Broadcom refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.broadcom.com *
@ -96,6 +96,34 @@ lpfc_get_iocb_from_iocbq(struct lpfc_iocbq *iocbq)
return &iocbq->iocb;
}
#if defined(CONFIG_64BIT) && defined(__LITTLE_ENDIAN)
/**
* lpfc_sli4_pcimem_bcopy - SLI4 memory copy function
* @srcp: Source memory pointer.
* @destp: Destination memory pointer.
* @cnt: Number of words required to be copied.
* Must be a multiple of sizeof(uint64_t)
*
* This function is used for copying data between driver memory
* and the SLI WQ. This function also changes the endianness
* of each word if native endianness is different from SLI
* endianness. This function can be called with or without
* lock.
**/
void
lpfc_sli4_pcimem_bcopy(void *srcp, void *destp, uint32_t cnt)
{
uint64_t *src = srcp;
uint64_t *dest = destp;
int i;
for (i = 0; i < (int)cnt; i += sizeof(uint64_t))
*dest++ = *src++;
}
#else
#define lpfc_sli4_pcimem_bcopy(a, b, c) lpfc_sli_pcimem_bcopy(a, b, c)
#endif
/**
* lpfc_sli4_wq_put - Put a Work Queue Entry on an Work Queue
* @q: The Work Queue to operate on.
@ -137,7 +165,7 @@ lpfc_sli4_wq_put(struct lpfc_queue *q, union lpfc_wqe128 *wqe)
bf_set(wqe_wqec, &wqe->generic.wqe_com, 0);
if (q->phba->sli3_options & LPFC_SLI4_PHWQ_ENABLED)
bf_set(wqe_wqid, &wqe->generic.wqe_com, q->queue_id);
lpfc_sli_pcimem_bcopy(wqe, temp_wqe, q->entry_size);
lpfc_sli4_pcimem_bcopy(wqe, temp_wqe, q->entry_size);
if (q->dpp_enable && q->phba->cfg_enable_dpp) {
/* write to DPP aperture taking advatage of Combined Writes */
tmp = (uint8_t *)temp_wqe;
@ -240,7 +268,7 @@ lpfc_sli4_mq_put(struct lpfc_queue *q, struct lpfc_mqe *mqe)
/* If the host has not yet processed the next entry then we are done */
if (((q->host_index + 1) % q->entry_count) == q->hba_index)
return -ENOMEM;
lpfc_sli_pcimem_bcopy(mqe, temp_mqe, q->entry_size);
lpfc_sli4_pcimem_bcopy(mqe, temp_mqe, q->entry_size);
/* Save off the mailbox pointer for completion */
q->phba->mbox = (MAILBOX_t *)temp_mqe;
@ -663,8 +691,8 @@ lpfc_sli4_rq_put(struct lpfc_queue *hq, struct lpfc_queue *dq,
/* If the host has not yet processed the next entry then we are done */
if (((hq_put_index + 1) % hq->entry_count) == hq->hba_index)
return -EBUSY;
lpfc_sli_pcimem_bcopy(hrqe, temp_hrqe, hq->entry_size);
lpfc_sli_pcimem_bcopy(drqe, temp_drqe, dq->entry_size);
lpfc_sli4_pcimem_bcopy(hrqe, temp_hrqe, hq->entry_size);
lpfc_sli4_pcimem_bcopy(drqe, temp_drqe, dq->entry_size);
/* Update the host index to point to the next slot */
hq->host_index = ((hq_put_index + 1) % hq->entry_count);
@ -7199,7 +7227,7 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
lpfc_post_rq_buffer(
phba, phba->sli4_hba.nvmet_mrq_hdr[i],
phba->sli4_hba.nvmet_mrq_data[i],
LPFC_NVMET_RQE_DEF_COUNT, i);
phba->cfg_nvmet_mrq_post, i);
}
}
@ -8185,8 +8213,8 @@ lpfc_sli4_post_sync_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
*/
mbx_cmnd = bf_get(lpfc_mqe_command, mb);
memset(phba->sli4_hba.bmbx.avirt, 0, sizeof(struct lpfc_bmbx_create));
lpfc_sli_pcimem_bcopy(mb, phba->sli4_hba.bmbx.avirt,
sizeof(struct lpfc_mqe));
lpfc_sli4_pcimem_bcopy(mb, phba->sli4_hba.bmbx.avirt,
sizeof(struct lpfc_mqe));
/* Post the high mailbox dma address to the port and wait for ready. */
dma_address = &phba->sli4_hba.bmbx.dma_address;
@ -8210,11 +8238,11 @@ lpfc_sli4_post_sync_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
* If so, update the mailbox status so that the upper layers
* can complete the request normally.
*/
lpfc_sli_pcimem_bcopy(phba->sli4_hba.bmbx.avirt, mb,
sizeof(struct lpfc_mqe));
lpfc_sli4_pcimem_bcopy(phba->sli4_hba.bmbx.avirt, mb,
sizeof(struct lpfc_mqe));
mbox_rgn = (struct lpfc_bmbx_create *) phba->sli4_hba.bmbx.avirt;
lpfc_sli_pcimem_bcopy(&mbox_rgn->mcqe, &mboxq->mcqe,
sizeof(struct lpfc_mcqe));
lpfc_sli4_pcimem_bcopy(&mbox_rgn->mcqe, &mboxq->mcqe,
sizeof(struct lpfc_mcqe));
mcqe_status = bf_get(lpfc_mcqe_status, &mbox_rgn->mcqe);
/*
* When the CQE status indicates a failure and the mailbox status
@ -11300,11 +11328,11 @@ lpfc_sli_abort_taskmgmt(struct lpfc_vport *vport, struct lpfc_sli_ring *pring,
unsigned long iflags;
struct lpfc_sli_ring *pring_s4;
spin_lock_irq(&phba->hbalock);
spin_lock_irqsave(&phba->hbalock, iflags);
/* all I/Os are in process of being flushed */
if (phba->hba_flag & HBA_FCP_IOQ_FLUSH) {
spin_unlock_irq(&phba->hbalock);
spin_unlock_irqrestore(&phba->hbalock, iflags);
return 0;
}
sum = 0;
@ -11366,14 +11394,14 @@ lpfc_sli_abort_taskmgmt(struct lpfc_vport *vport, struct lpfc_sli_ring *pring,
iocbq->iocb_flag |= LPFC_DRIVER_ABORTED;
if (phba->sli_rev == LPFC_SLI_REV4) {
pring_s4 = lpfc_sli4_calc_ring(phba, iocbq);
if (pring_s4 == NULL)
pring_s4 = lpfc_sli4_calc_ring(phba, abtsiocbq);
if (!pring_s4)
continue;
/* Note: both hbalock and ring_lock must be set here */
spin_lock_irqsave(&pring_s4->ring_lock, iflags);
spin_lock(&pring_s4->ring_lock);
ret_val = __lpfc_sli_issue_iocb(phba, pring_s4->ringno,
abtsiocbq, 0);
spin_unlock_irqrestore(&pring_s4->ring_lock, iflags);
spin_unlock(&pring_s4->ring_lock);
} else {
ret_val = __lpfc_sli_issue_iocb(phba, pring->ringno,
abtsiocbq, 0);
@ -11385,7 +11413,7 @@ lpfc_sli_abort_taskmgmt(struct lpfc_vport *vport, struct lpfc_sli_ring *pring,
else
sum++;
}
spin_unlock_irq(&phba->hbalock);
spin_unlock_irqrestore(&phba->hbalock, iflags);
return sum;
}
@ -12830,7 +12858,7 @@ lpfc_sli4_sp_handle_mbox_event(struct lpfc_hba *phba, struct lpfc_mcqe *mcqe)
/* Move mbox data to caller's mailbox region, do endian swapping */
if (pmb->mbox_cmpl && mbox)
lpfc_sli_pcimem_bcopy(mbox, mqe, sizeof(struct lpfc_mqe));
lpfc_sli4_pcimem_bcopy(mbox, mqe, sizeof(struct lpfc_mqe));
/*
* For mcqe errors, conditionally move a modified error code to
@ -12913,7 +12941,7 @@ lpfc_sli4_sp_handle_mcqe(struct lpfc_hba *phba, struct lpfc_cqe *cqe)
bool workposted;
/* Copy the mailbox MCQE and convert endian order as needed */
lpfc_sli_pcimem_bcopy(cqe, &mcqe, sizeof(struct lpfc_mcqe));
lpfc_sli4_pcimem_bcopy(cqe, &mcqe, sizeof(struct lpfc_mcqe));
/* Invoke the proper event handling routine */
if (!bf_get(lpfc_trailer_async, &mcqe))
@ -12944,6 +12972,17 @@ lpfc_sli4_sp_handle_els_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
int txcmplq_cnt = 0;
int fcp_txcmplq_cnt = 0;
/* Check for response status */
if (unlikely(bf_get(lpfc_wcqe_c_status, wcqe))) {
/* Log the error status */
lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
"0357 ELS CQE error: status=x%x: "
"CQE: %08x %08x %08x %08x\n",
bf_get(lpfc_wcqe_c_status, wcqe),
wcqe->word0, wcqe->total_data_placed,
wcqe->parameter, wcqe->word3);
}
/* Get an irspiocbq for later ELS response processing use */
irspiocbq = lpfc_sli_get_iocbq(phba);
if (!irspiocbq) {
@ -13173,7 +13212,7 @@ lpfc_sli4_sp_handle_cqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
bool workposted = false;
/* Copy the work queue CQE and convert endian order if needed */
lpfc_sli_pcimem_bcopy(cqe, &cqevt, sizeof(struct lpfc_cqe));
lpfc_sli4_pcimem_bcopy(cqe, &cqevt, sizeof(struct lpfc_cqe));
/* Check and process for different type of WCQE and dispatch */
switch (bf_get(lpfc_cqe_code, &cqevt)) {
@ -13364,14 +13403,12 @@ lpfc_sli4_fp_handle_fcp_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
phba->lpfc_rampdown_queue_depth(phba);
/* Log the error status */
lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
"0373 FCP complete error: status=x%x, "
"hw_status=x%x, total_data_specified=%d, "
"parameter=x%x, word3=x%x\n",
lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
"0373 FCP CQE error: status=x%x: "
"CQE: %08x %08x %08x %08x\n",
bf_get(lpfc_wcqe_c_status, wcqe),
bf_get(lpfc_wcqe_c_hw_status, wcqe),
wcqe->total_data_placed, wcqe->parameter,
wcqe->word3);
wcqe->word0, wcqe->total_data_placed,
wcqe->parameter, wcqe->word3);
}
/* Look up the FCP command IOCB and create pseudo response IOCB */
@ -13581,7 +13618,7 @@ lpfc_sli4_fp_handle_cqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
bool workposted = false;
/* Copy the work queue CQE and convert endian order if needed */
lpfc_sli_pcimem_bcopy(cqe, &wcqe, sizeof(struct lpfc_cqe));
lpfc_sli4_pcimem_bcopy(cqe, &wcqe, sizeof(struct lpfc_cqe));
/* Check and process for different type of WCQE and dispatch */
switch (bf_get(lpfc_wcqe_c_code, &wcqe)) {
@ -19032,9 +19069,22 @@ lpfc_drain_txq(struct lpfc_hba *phba)
struct lpfc_sglq *sglq;
union lpfc_wqe128 wqe;
uint32_t txq_cnt = 0;
struct lpfc_queue *wq;
pring = lpfc_phba_elsring(phba);
if (unlikely(!pring))
if (phba->link_flag & LS_MDS_LOOPBACK) {
/* MDS WQE are posted only to first WQ*/
wq = phba->sli4_hba.fcp_wq[0];
if (unlikely(!wq))
return 0;
pring = wq->pring;
} else {
wq = phba->sli4_hba.els_wq;
if (unlikely(!wq))
return 0;
pring = lpfc_phba_elsring(phba);
}
if (unlikely(!pring) || list_empty(&pring->txq))
return 0;
spin_lock_irqsave(&pring->ring_lock, iflags);
@ -19075,7 +19125,7 @@ lpfc_drain_txq(struct lpfc_hba *phba)
fail_msg = "to convert bpl to sgl";
else if (lpfc_sli4_iocb2wqe(phba, piocbq, &wqe))
fail_msg = "to convert iocb to wqe";
else if (lpfc_sli4_wq_put(phba->sli4_hba.els_wq, &wqe))
else if (lpfc_sli4_wq_put(wq, &wqe))
fail_msg = " - Wq is full";
else
lpfc_sli_ringtxcmpl_put(phba, pring, piocbq);

View File

@ -2,7 +2,7 @@
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
* Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term *
* Broadcom refers to Broadcom Limited and/or its subsidiaries. *
* Broadcom refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.broadcom.com *
@ -20,7 +20,7 @@
* included with this package. *
*******************************************************************/
#define LPFC_DRIVER_VERSION "12.0.0.1"
#define LPFC_DRIVER_VERSION "12.0.0.4"
#define LPFC_DRIVER_NAME "lpfc"
/* Used for SLI 2/3 */

View File

@ -4166,6 +4166,9 @@ megaraid_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
int irq, i, j;
int error = -ENODEV;
if (hba_count >= MAX_CONTROLLERS)
goto out;
if (pci_enable_device(pdev))
goto out;
pci_set_master(pdev);

View File

@ -35,8 +35,8 @@
/*
* MegaRAID SAS Driver meta data
*/
#define MEGASAS_VERSION "07.704.04.00-rc1"
#define MEGASAS_RELDATE "December 7, 2017"
#define MEGASAS_VERSION "07.705.02.00-rc1"
#define MEGASAS_RELDATE "April 4, 2018"
/*
* Device IDs

View File

@ -92,7 +92,7 @@ MODULE_PARM_DESC(resetwaittime, "Wait time in seconds after I/O timeout "
int smp_affinity_enable = 1;
module_param(smp_affinity_enable, int, S_IRUGO);
MODULE_PARM_DESC(smp_affinity_enable, "SMP affinity feature enable/disbale Default: enable(1)");
MODULE_PARM_DESC(smp_affinity_enable, "SMP affinity feature enable/disable Default: enable(1)");
int rdpq_enable = 1;
module_param(rdpq_enable, int, S_IRUGO);
@ -2224,9 +2224,9 @@ static int megasas_get_ld_vf_affiliation_111(struct megasas_instance *instance,
sizeof(struct MR_LD_VF_AFFILIATION_111));
else {
new_affiliation_111 =
pci_alloc_consistent(instance->pdev,
sizeof(struct MR_LD_VF_AFFILIATION_111),
&new_affiliation_111_h);
pci_zalloc_consistent(instance->pdev,
sizeof(struct MR_LD_VF_AFFILIATION_111),
&new_affiliation_111_h);
if (!new_affiliation_111) {
dev_printk(KERN_DEBUG, &instance->pdev->dev, "SR-IOV: Couldn't allocate "
"memory for new affiliation for scsi%d\n",
@ -2234,8 +2234,6 @@ static int megasas_get_ld_vf_affiliation_111(struct megasas_instance *instance,
megasas_return_cmd(instance, cmd);
return -ENOMEM;
}
memset(new_affiliation_111, 0,
sizeof(struct MR_LD_VF_AFFILIATION_111));
}
memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
@ -2333,10 +2331,10 @@ static int megasas_get_ld_vf_affiliation_12(struct megasas_instance *instance,
sizeof(struct MR_LD_VF_AFFILIATION));
else {
new_affiliation =
pci_alloc_consistent(instance->pdev,
(MAX_LOGICAL_DRIVES + 1) *
sizeof(struct MR_LD_VF_AFFILIATION),
&new_affiliation_h);
pci_zalloc_consistent(instance->pdev,
(MAX_LOGICAL_DRIVES + 1) *
sizeof(struct MR_LD_VF_AFFILIATION),
&new_affiliation_h);
if (!new_affiliation) {
dev_printk(KERN_DEBUG, &instance->pdev->dev, "SR-IOV: Couldn't allocate "
"memory for new affiliation for scsi%d\n",
@ -2344,8 +2342,6 @@ static int megasas_get_ld_vf_affiliation_12(struct megasas_instance *instance,
megasas_return_cmd(instance, cmd);
return -ENOMEM;
}
memset(new_affiliation, 0, (MAX_LOGICAL_DRIVES + 1) *
sizeof(struct MR_LD_VF_AFFILIATION));
}
memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
@ -5636,16 +5632,15 @@ megasas_get_seq_num(struct megasas_instance *instance,
}
dcmd = &cmd->frame->dcmd;
el_info = pci_alloc_consistent(instance->pdev,
sizeof(struct megasas_evt_log_info),
&el_info_h);
el_info = pci_zalloc_consistent(instance->pdev,
sizeof(struct megasas_evt_log_info),
&el_info_h);
if (!el_info) {
megasas_return_cmd(instance, cmd);
return -ENOMEM;
}
memset(el_info, 0, sizeof(*el_info));
memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
dcmd->cmd = MFI_CMD_DCMD;

View File

@ -684,15 +684,14 @@ megasas_alloc_rdpq_fusion(struct megasas_instance *instance)
array_size = sizeof(struct MPI2_IOC_INIT_RDPQ_ARRAY_ENTRY) *
MAX_MSIX_QUEUES_FUSION;
fusion->rdpq_virt = pci_alloc_consistent(instance->pdev, array_size,
&fusion->rdpq_phys);
fusion->rdpq_virt = pci_zalloc_consistent(instance->pdev, array_size,
&fusion->rdpq_phys);
if (!fusion->rdpq_virt) {
dev_err(&instance->pdev->dev,
"Failed from %s %d\n", __func__, __LINE__);
return -ENOMEM;
}
memset(fusion->rdpq_virt, 0, array_size);
msix_count = instance->msix_vectors > 0 ? instance->msix_vectors : 1;
fusion->reply_frames_desc_pool = dma_pool_create("mr_rdpq",
@ -2981,6 +2980,9 @@ megasas_build_syspd_fusion(struct megasas_instance *instance,
pRAID_Context->timeout_value = cpu_to_le16(os_timeout_value);
pRAID_Context->virtual_disk_tgt_id = cpu_to_le16(device_id);
} else {
if (os_timeout_value)
os_timeout_value++;
/* system pd Fast Path */
io_request->Function = MPI2_FUNCTION_SCSI_IO_REQUEST;
timeout_limit = (scmd->device->type == TYPE_DISK) ?

View File

@ -9,7 +9,7 @@
* scatter/gather formats.
* Creation Date: June 21, 2006
*
* mpi2.h Version: 02.00.48
* mpi2.h Version: 02.00.50
*
* NOTE: Names (typedefs, defines, etc.) beginning with an MPI25 or Mpi25
* prefix are for use only on MPI v2.5 products, and must not be used
@ -114,6 +114,8 @@
* 09-02-16 02.00.46 Bumped MPI2_HEADER_VERSION_UNIT.
* 11-23-16 02.00.47 Bumped MPI2_HEADER_VERSION_UNIT.
* 02-03-17 02.00.48 Bumped MPI2_HEADER_VERSION_UNIT.
* 06-13-17 02.00.49 Bumped MPI2_HEADER_VERSION_UNIT.
* 09-29-17 02.00.50 Bumped MPI2_HEADER_VERSION_UNIT.
* --------------------------------------------------------------------------
*/
@ -152,8 +154,9 @@
MPI26_VERSION_MINOR)
#define MPI2_VERSION_02_06 (0x0206)
/*Unit and Dev versioning for this MPI header set */
#define MPI2_HEADER_VERSION_UNIT (0x30)
/* Unit and Dev versioning for this MPI header set */
#define MPI2_HEADER_VERSION_UNIT (0x32)
#define MPI2_HEADER_VERSION_DEV (0x00)
#define MPI2_HEADER_VERSION_UNIT_MASK (0xFF00)
#define MPI2_HEADER_VERSION_UNIT_SHIFT (8)

View File

@ -7,7 +7,7 @@
* Title: MPI Configuration messages and pages
* Creation Date: November 10, 2006
*
* mpi2_cnfg.h Version: 02.00.40
* mpi2_cnfg.h Version: 02.00.42
*
* NOTE: Names (typedefs, defines, etc.) beginning with an MPI25 or Mpi25
* prefix are for use only on MPI v2.5 products, and must not be used
@ -219,6 +219,18 @@
* Added ChassisSlot field to SAS Enclosure Page 0.
* Added ChassisSlot Valid bit (bit 5) to the Flags field
* in SAS Enclosure Page 0.
* 06-13-17 02.00.41 Added MPI26_MFGPAGE_DEVID_SAS3816 and
* MPI26_MFGPAGE_DEVID_SAS3916 defines.
* Removed MPI26_MFGPAGE_DEVID_SAS4008 define.
* Added MPI26_PCIEIOUNIT1_LINKFLAGS_SRNS_EN define.
* Renamed PI26_PCIEIOUNIT1_LINKFLAGS_EN_SRIS to
* PI26_PCIEIOUNIT1_LINKFLAGS_SRIS_EN.
* Renamed MPI26_PCIEIOUNIT1_LINKFLAGS_DIS_SRIS to
* MPI26_PCIEIOUNIT1_LINKFLAGS_DIS_SEPARATE_REFCLK.
* 09-29-17 02.00.42 Added ControllerResetTO field to PCIe Device Page 2.
* Added NOIOB field to PCIe Device Page 2.
* Added MPI26_PCIEDEV2_CAP_DATA_BLK_ALIGN_AND_GRAN to
* the Capabilities field of PCIe Device Page 2.
* --------------------------------------------------------------------------
*/
@ -556,7 +568,8 @@ typedef struct _MPI2_CONFIG_REPLY {
#define MPI26_MFGPAGE_DEVID_SAS3616 (0x00D1)
#define MPI26_MFGPAGE_DEVID_SAS3708 (0x00D2)
#define MPI26_MFGPAGE_DEVID_SAS4008 (0x00A1)
#define MPI26_MFGPAGE_DEVID_SAS3816 (0x00A1)
#define MPI26_MFGPAGE_DEVID_SAS3916 (0x00A0)
/*Manufacturing Page 0 */
@ -3864,20 +3877,25 @@ typedef struct _MPI26_CONFIG_PAGE_PCIEDEV_0 {
typedef struct _MPI26_CONFIG_PAGE_PCIEDEV_2 {
MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /*0x00 */
U16 DevHandle; /*0x08 */
U16 Reserved1; /*0x0A */
U32 MaximumDataTransferSize;/*0x0C */
U8 ControllerResetTO; /* 0x0A */
U8 Reserved1; /* 0x0B */
U32 MaximumDataTransferSize; /*0x0C */
U32 Capabilities; /*0x10 */
U32 Reserved2; /*0x14 */
U16 NOIOB; /* 0x14 */
U16 Reserved2; /* 0x16 */
} MPI26_CONFIG_PAGE_PCIEDEV_2, *PTR_MPI26_CONFIG_PAGE_PCIEDEV_2,
Mpi26PCIeDevicePage2_t, *pMpi26PCIeDevicePage2_t;
#define MPI26_PCIEDEVICE2_PAGEVERSION (0x00)
#define MPI26_PCIEDEVICE2_PAGEVERSION (0x01)
/*defines for PCIe Device Page 2 Capabilities field */
#define MPI26_PCIEDEV2_CAP_DATA_BLK_ALIGN_AND_GRAN (0x00000008)
#define MPI26_PCIEDEV2_CAP_SGL_FORMAT (0x00000004)
#define MPI26_PCIEDEV2_CAP_BIT_BUCKET_SUPPORT (0x00000002)
#define MPI26_PCIEDEV2_CAP_SGL_SUPPORT (0x00000001)
/* Defines for the NOIOB field */
#define MPI26_PCIEDEV2_NOIOB_UNSUPPORTED (0x0000)
/****************************************************************************
* PCIe Link Config Pages (MPI v2.6 and later)

View File

@ -75,7 +75,7 @@
typedef struct _MPI2_SCSI_IO_CDB_EEDP32 {
U8 CDB[20]; /*0x00 */
U32 PrimaryReferenceTag; /*0x14 */
__be32 PrimaryReferenceTag; /*0x14 */
U16 PrimaryApplicationTag; /*0x18 */
U16 PrimaryApplicationTagMask; /*0x1A */
U32 TransferLength; /*0x1C */

View File

@ -7,7 +7,7 @@
* Title: MPI IOC, Port, Event, FW Download, and FW Upload messages
* Creation Date: October 11, 2006
*
* mpi2_ioc.h Version: 02.00.32
* mpi2_ioc.h Version: 02.00.34
*
* NOTE: Names (typedefs, defines, etc.) beginning with an MPI25 or Mpi25
* prefix are for use only on MPI v2.5 products, and must not be used
@ -167,6 +167,10 @@
* 02-02-17 02.00.32 Added MPI2_FW_DOWNLOAD_ITYPE_CBB_BACKUP.
* Added MPI25_EVENT_DATA_ACTIVE_CABLE_EXCEPT and related
* defines for the ReasonCode field.
* 06-13-17 02.00.33 Added MPI2_FW_DOWNLOAD_ITYPE_CPLD.
* 09-29-17 02.00.34 Added MPI26_EVENT_PCIDEV_STAT_RC_PCIE_HOT_RESET_FAILED
* to the ReasonCode field in PCIe Device Status Change
* Event Data.
* --------------------------------------------------------------------------
*/
@ -1182,6 +1186,7 @@ typedef struct _MPI26_EVENT_DATA_PCIE_DEVICE_STATUS_CHANGE {
#define MPI26_EVENT_PCIDEV_STAT_RC_CMP_INTERNAL_DEV_RESET (0x0E)
#define MPI26_EVENT_PCIDEV_STAT_RC_CMP_TASK_ABORT_INTERNAL (0x0F)
#define MPI26_EVENT_PCIDEV_STAT_RC_DEV_INIT_FAILURE (0x10)
#define MPI26_EVENT_PCIDEV_STAT_RC_PCIE_HOT_RESET_FAILED (0x11)
/*PCIe Enumeration Event data (MPI v2.6 and later) */

View File

@ -87,7 +87,7 @@ MODULE_PARM_DESC(msix_disable, " disable msix routed interrupts (default=0)");
static int smp_affinity_enable = 1;
module_param(smp_affinity_enable, int, S_IRUGO);
MODULE_PARM_DESC(smp_affinity_enable, "SMP affinity feature enable/disbale Default: enable(1)");
MODULE_PARM_DESC(smp_affinity_enable, "SMP affinity feature enable/disable Default: enable(1)");
static int max_msix_vectors = -1;
module_param(max_msix_vectors, int, 0);
@ -297,12 +297,15 @@ static void *
_base_get_chain_buffer_dma_to_chain_buffer(struct MPT3SAS_ADAPTER *ioc,
dma_addr_t chain_buffer_dma)
{
u16 index;
u16 index, j;
struct chain_tracker *ct;
for (index = 0; index < ioc->chain_depth; index++) {
if (ioc->chain_lookup[index].chain_buffer_dma ==
chain_buffer_dma)
return ioc->chain_lookup[index].chain_buffer;
for (index = 0; index < ioc->scsiio_depth; index++) {
for (j = 0; j < ioc->chains_needed_per_io; j++) {
ct = &ioc->chain_lookup[index].chains_per_smid[j];
if (ct && ct->chain_buffer_dma == chain_buffer_dma)
return ct->chain_buffer;
}
}
pr_info(MPT3SAS_FMT
"Provided chain_buffer_dma address is not in the lookup list\n",
@ -394,13 +397,14 @@ static void _clone_sg_entries(struct MPT3SAS_ADAPTER *ioc,
buff_ptr_phys = buffer_iomem_phys;
WARN_ON(buff_ptr_phys > U32_MAX);
if (sgel->FlagsLength &
if (le32_to_cpu(sgel->FlagsLength) &
(MPI2_SGE_FLAGS_HOST_TO_IOC << MPI2_SGE_FLAGS_SHIFT))
is_write = 1;
for (i = 0; i < MPT_MIN_PHYS_SEGMENTS + ioc->facts.MaxChainDepth; i++) {
sgl_flags = (sgel->FlagsLength >> MPI2_SGE_FLAGS_SHIFT);
sgl_flags =
(le32_to_cpu(sgel->FlagsLength) >> MPI2_SGE_FLAGS_SHIFT);
switch (sgl_flags & MPI2_SGE_FLAGS_ELEMENT_MASK) {
case MPI2_SGE_FLAGS_CHAIN_ELEMENT:
@ -411,7 +415,7 @@ static void _clone_sg_entries(struct MPT3SAS_ADAPTER *ioc,
*/
sgel_next =
_base_get_chain_buffer_dma_to_chain_buffer(ioc,
sgel->Address);
le32_to_cpu(sgel->Address));
if (sgel_next == NULL)
return;
/*
@ -426,7 +430,8 @@ static void _clone_sg_entries(struct MPT3SAS_ADAPTER *ioc,
dst_addr_phys = _base_get_chain_phys(ioc,
smid, sge_chain_count);
WARN_ON(dst_addr_phys > U32_MAX);
sgel->Address = (u32)dst_addr_phys;
sgel->Address =
cpu_to_le32(lower_32_bits(dst_addr_phys));
sgel = sgel_next;
sge_chain_count++;
break;
@ -435,22 +440,28 @@ static void _clone_sg_entries(struct MPT3SAS_ADAPTER *ioc,
if (is_scsiio_req) {
_base_clone_to_sys_mem(buff_ptr,
sg_virt(sg_scmd),
(sgel->FlagsLength & 0x00ffffff));
(le32_to_cpu(sgel->FlagsLength) &
0x00ffffff));
/*
* FIXME: this relies on a a zero
* PCI mem_offset.
*/
sgel->Address = (u32)buff_ptr_phys;
sgel->Address =
cpu_to_le32((u32)buff_ptr_phys);
} else {
_base_clone_to_sys_mem(buff_ptr,
ioc->config_vaddr,
(sgel->FlagsLength & 0x00ffffff));
sgel->Address = (u32)buff_ptr_phys;
(le32_to_cpu(sgel->FlagsLength) &
0x00ffffff));
sgel->Address =
cpu_to_le32((u32)buff_ptr_phys);
}
}
buff_ptr += (sgel->FlagsLength & 0x00ffffff);
buff_ptr_phys += (sgel->FlagsLength & 0x00ffffff);
if ((sgel->FlagsLength &
buff_ptr += (le32_to_cpu(sgel->FlagsLength) &
0x00ffffff);
buff_ptr_phys += (le32_to_cpu(sgel->FlagsLength) &
0x00ffffff);
if ((le32_to_cpu(sgel->FlagsLength) &
(MPI2_SGE_FLAGS_END_OF_BUFFER
<< MPI2_SGE_FLAGS_SHIFT)))
goto eob_clone_chain;
@ -1019,6 +1030,9 @@ _base_display_event_data(struct MPT3SAS_ADAPTER *ioc,
case MPI2_EVENT_ACTIVE_CABLE_EXCEPTION:
desc = "Cable Event";
break;
case MPI2_EVENT_SAS_DEVICE_DISCOVERY_ERROR:
desc = "SAS Device Discovery Error";
break;
case MPI2_EVENT_PCIE_DEVICE_STATUS_CHANGE:
desc = "PCIE Device Status Change";
break;
@ -1433,7 +1447,7 @@ _base_interrupt(int irq, void *bus_id)
cpu_to_le32(reply);
if (ioc->is_mcpu_endpoint)
_base_clone_reply_to_sys_mem(ioc,
cpu_to_le32(reply),
reply,
ioc->reply_free_host_index);
writel(ioc->reply_free_host_index,
&ioc->chip->ReplyFreeHostIndex);
@ -1671,7 +1685,8 @@ _base_add_sg_single_64(void *paddr, u32 flags_length, dma_addr_t dma_addr)
* @ioc: per adapter object
* @scmd: SCSI commands of the IO request
*
* Returns chain tracker(from ioc->free_chain_list)
* Returns chain tracker from chain_lookup table using key as
* smid and smid's chain_offset.
*/
static struct chain_tracker *
_base_get_chain_buffer_tracker(struct MPT3SAS_ADAPTER *ioc,
@ -1679,20 +1694,15 @@ _base_get_chain_buffer_tracker(struct MPT3SAS_ADAPTER *ioc,
{
struct chain_tracker *chain_req;
struct scsiio_tracker *st = scsi_cmd_priv(scmd);
unsigned long flags;
u16 smid = st->smid;
u8 chain_offset =
atomic_read(&ioc->chain_lookup[smid - 1].chain_offset);
spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
if (list_empty(&ioc->free_chain_list)) {
spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
dfailprintk(ioc, pr_warn(MPT3SAS_FMT
"chain buffers not available\n", ioc->name));
if (chain_offset == ioc->chains_needed_per_io)
return NULL;
}
chain_req = list_entry(ioc->free_chain_list.next,
struct chain_tracker, tracker_list);
list_del_init(&chain_req->tracker_list);
list_add_tail(&chain_req->tracker_list, &st->chain_list);
spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
chain_req = &ioc->chain_lookup[smid - 1].chains_per_smid[chain_offset];
atomic_inc(&ioc->chain_lookup[smid - 1].chain_offset);
return chain_req;
}
@ -3044,7 +3054,7 @@ mpt3sas_base_map_resources(struct MPT3SAS_ADAPTER *ioc)
for (i = 0; i < ioc->combined_reply_index_count; i++) {
ioc->replyPostRegisterIndex[i] = (resource_size_t *)
((u8 *)&ioc->chip->Doorbell +
((u8 __force *)&ioc->chip->Doorbell +
MPI25_SUP_REPLY_POST_HOST_INDEX_OFFSET +
(i * MPT3_SUP_REPLY_POST_HOST_INDEX_REG_OFFSET));
}
@ -3273,13 +3283,7 @@ void mpt3sas_base_clear_st(struct MPT3SAS_ADAPTER *ioc,
return;
st->cb_idx = 0xFF;
st->direct_io = 0;
if (!list_empty(&st->chain_list)) {
unsigned long flags;
spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
list_splice_init(&st->chain_list, &ioc->free_chain_list);
spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
}
atomic_set(&ioc->chain_lookup[st->smid - 1].chain_offset, 0);
}
/**
@ -3339,7 +3343,7 @@ _base_mpi_ep_writeq(__u64 b, volatile void __iomem *addr,
spinlock_t *writeq_lock)
{
unsigned long flags;
__u64 data_out = cpu_to_le64(b);
__u64 data_out = b;
spin_lock_irqsave(writeq_lock, flags);
writel((u32)(data_out), addr);
@ -3362,7 +3366,7 @@ _base_mpi_ep_writeq(__u64 b, volatile void __iomem *addr,
static inline void
_base_writeq(__u64 b, volatile void __iomem *addr, spinlock_t *writeq_lock)
{
writeq(cpu_to_le64(b), addr);
writeq(b, addr);
}
#else
static inline void
@ -3389,7 +3393,7 @@ _base_put_smid_mpi_ep_scsi_io(struct MPT3SAS_ADAPTER *ioc, u16 smid, u16 handle)
__le32 *mfp = (__le32 *)mpt3sas_base_get_msg_frame(ioc, smid);
_clone_sg_entries(ioc, (void *) mfp, smid);
mpi_req_iomem = (void *)ioc->chip +
mpi_req_iomem = (void __force *)ioc->chip +
MPI_FRAME_START_OFFSET + (smid * ioc->request_sz);
_base_clone_mpi_to_sys_mem(mpi_req_iomem, (void *)mfp,
ioc->request_sz);
@ -3473,7 +3477,8 @@ mpt3sas_base_put_smid_hi_priority(struct MPT3SAS_ADAPTER *ioc, u16 smid,
request_hdr = (MPI2RequestHeader_t *)mfp;
/* TBD 256 is offset within sys register. */
mpi_req_iomem = (void *)ioc->chip + MPI_FRAME_START_OFFSET
mpi_req_iomem = (void __force *)ioc->chip
+ MPI_FRAME_START_OFFSET
+ (smid * ioc->request_sz);
_base_clone_mpi_to_sys_mem(mpi_req_iomem, (void *)mfp,
ioc->request_sz);
@ -3542,7 +3547,7 @@ mpt3sas_base_put_smid_default(struct MPT3SAS_ADAPTER *ioc, u16 smid)
_clone_sg_entries(ioc, (void *) mfp, smid);
/* TBD 256 is offset within sys register */
mpi_req_iomem = (void *)ioc->chip +
mpi_req_iomem = (void __force *)ioc->chip +
MPI_FRAME_START_OFFSET + (smid * ioc->request_sz);
_base_clone_mpi_to_sys_mem(mpi_req_iomem, (void *)mfp,
ioc->request_sz);
@ -3822,6 +3827,105 @@ _base_display_OEMs_branding(struct MPT3SAS_ADAPTER *ioc)
}
}
/**
* _base_display_fwpkg_version - sends FWUpload request to pull FWPkg
* version from FW Image Header.
* @ioc: per adapter object
*
* Returns 0 for success, non-zero for failure.
*/
static int
_base_display_fwpkg_version(struct MPT3SAS_ADAPTER *ioc)
{
Mpi2FWImageHeader_t *FWImgHdr;
Mpi25FWUploadRequest_t *mpi_request;
Mpi2FWUploadReply_t mpi_reply;
int r = 0;
void *fwpkg_data = NULL;
dma_addr_t fwpkg_data_dma;
u16 smid, ioc_status;
size_t data_length;
dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
__func__));
if (ioc->base_cmds.status & MPT3_CMD_PENDING) {
pr_err(MPT3SAS_FMT "%s: internal command already in use\n",
ioc->name, __func__);
return -EAGAIN;
}
data_length = sizeof(Mpi2FWImageHeader_t);
fwpkg_data = pci_alloc_consistent(ioc->pdev, data_length,
&fwpkg_data_dma);
if (!fwpkg_data) {
pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
ioc->name, __FILE__, __LINE__, __func__);
return -ENOMEM;
}
smid = mpt3sas_base_get_smid(ioc, ioc->base_cb_idx);
if (!smid) {
pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n",
ioc->name, __func__);
r = -EAGAIN;
goto out;
}
ioc->base_cmds.status = MPT3_CMD_PENDING;
mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
ioc->base_cmds.smid = smid;
memset(mpi_request, 0, sizeof(Mpi25FWUploadRequest_t));
mpi_request->Function = MPI2_FUNCTION_FW_UPLOAD;
mpi_request->ImageType = MPI2_FW_UPLOAD_ITYPE_FW_FLASH;
mpi_request->ImageSize = cpu_to_le32(data_length);
ioc->build_sg(ioc, &mpi_request->SGL, 0, 0, fwpkg_data_dma,
data_length);
init_completion(&ioc->base_cmds.done);
mpt3sas_base_put_smid_default(ioc, smid);
/* Wait for 15 seconds */
wait_for_completion_timeout(&ioc->base_cmds.done,
FW_IMG_HDR_READ_TIMEOUT*HZ);
pr_info(MPT3SAS_FMT "%s: complete\n",
ioc->name, __func__);
if (!(ioc->base_cmds.status & MPT3_CMD_COMPLETE)) {
pr_err(MPT3SAS_FMT "%s: timeout\n",
ioc->name, __func__);
_debug_dump_mf(mpi_request,
sizeof(Mpi25FWUploadRequest_t)/4);
r = -ETIME;
} else {
memset(&mpi_reply, 0, sizeof(Mpi2FWUploadReply_t));
if (ioc->base_cmds.status & MPT3_CMD_REPLY_VALID) {
memcpy(&mpi_reply, ioc->base_cmds.reply,
sizeof(Mpi2FWUploadReply_t));
ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
MPI2_IOCSTATUS_MASK;
if (ioc_status == MPI2_IOCSTATUS_SUCCESS) {
FWImgHdr = (Mpi2FWImageHeader_t *)fwpkg_data;
if (FWImgHdr->PackageVersion.Word) {
pr_info(MPT3SAS_FMT "FW Package Version"
"(%02d.%02d.%02d.%02d)\n",
ioc->name,
FWImgHdr->PackageVersion.Struct.Major,
FWImgHdr->PackageVersion.Struct.Minor,
FWImgHdr->PackageVersion.Struct.Unit,
FWImgHdr->PackageVersion.Struct.Dev);
}
} else {
_debug_dump_mf(&mpi_reply,
sizeof(Mpi2FWUploadReply_t)/4);
}
}
}
ioc->base_cmds.status = MPT3_CMD_NOT_USED;
out:
if (fwpkg_data)
pci_free_consistent(ioc->pdev, data_length, fwpkg_data,
fwpkg_data_dma);
return r;
}
/**
* _base_display_ioc_capabilities - Disply IOC's capabilities.
* @ioc: per adapter object
@ -4038,6 +4142,7 @@ _base_static_config_pages(struct MPT3SAS_ADAPTER *ioc)
Mpi2ConfigReply_t mpi_reply;
u32 iounit_pg1_flags;
ioc->nvme_abort_timeout = 30;
mpt3sas_config_get_manufacturing_pg0(ioc, &mpi_reply, &ioc->manu_pg0);
if (ioc->ir_firmware)
mpt3sas_config_get_manufacturing_pg10(ioc, &mpi_reply,
@ -4056,6 +4161,18 @@ _base_static_config_pages(struct MPT3SAS_ADAPTER *ioc)
mpt3sas_config_set_manufacturing_pg11(ioc, &mpi_reply,
&ioc->manu_pg11);
}
if (ioc->manu_pg11.AddlFlags2 & NVME_TASK_MNGT_CUSTOM_MASK)
ioc->tm_custom_handling = 1;
else {
ioc->tm_custom_handling = 0;
if (ioc->manu_pg11.NVMeAbortTO < NVME_TASK_ABORT_MIN_TIMEOUT)
ioc->nvme_abort_timeout = NVME_TASK_ABORT_MIN_TIMEOUT;
else if (ioc->manu_pg11.NVMeAbortTO >
NVME_TASK_ABORT_MAX_TIMEOUT)
ioc->nvme_abort_timeout = NVME_TASK_ABORT_MAX_TIMEOUT;
else
ioc->nvme_abort_timeout = ioc->manu_pg11.NVMeAbortTO;
}
mpt3sas_config_get_bios_pg2(ioc, &mpi_reply, &ioc->bios_pg2);
mpt3sas_config_get_bios_pg3(ioc, &mpi_reply, &ioc->bios_pg3);
@ -4084,6 +4201,27 @@ _base_static_config_pages(struct MPT3SAS_ADAPTER *ioc)
ioc->temp_sensors_count = ioc->iounit_pg8.NumSensors;
}
/**
* mpt3sas_free_enclosure_list - release memory
* @ioc: per adapter object
*
* Free memory allocated during encloure add.
*
* Return nothing.
*/
void
mpt3sas_free_enclosure_list(struct MPT3SAS_ADAPTER *ioc)
{
struct _enclosure_node *enclosure_dev, *enclosure_dev_next;
/* Free enclosure list */
list_for_each_entry_safe(enclosure_dev,
enclosure_dev_next, &ioc->enclosure_list, list) {
list_del(&enclosure_dev->list);
kfree(enclosure_dev);
}
}
/**
* _base_release_memory_pools - release memory
* @ioc: per adapter object
@ -4096,6 +4234,8 @@ static void
_base_release_memory_pools(struct MPT3SAS_ADAPTER *ioc)
{
int i = 0;
int j = 0;
struct chain_tracker *ct;
struct reply_post_struct *rps;
dexitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
@ -4153,7 +4293,14 @@ _base_release_memory_pools(struct MPT3SAS_ADAPTER *ioc)
}
} while (ioc->rdpq_array_enable &&
(++i < ioc->reply_queue_count));
if (ioc->reply_post_free_array &&
ioc->rdpq_array_enable) {
dma_pool_free(ioc->reply_post_free_array_dma_pool,
ioc->reply_post_free_array,
ioc->reply_post_free_array_dma);
ioc->reply_post_free_array = NULL;
}
dma_pool_destroy(ioc->reply_post_free_array_dma_pool);
dma_pool_destroy(ioc->reply_post_free_dma_pool);
kfree(ioc->reply_post);
}
@ -4179,18 +4326,48 @@ _base_release_memory_pools(struct MPT3SAS_ADAPTER *ioc)
kfree(ioc->hpr_lookup);
kfree(ioc->internal_lookup);
if (ioc->chain_lookup) {
for (i = 0; i < ioc->chain_depth; i++) {
if (ioc->chain_lookup[i].chain_buffer)
dma_pool_free(ioc->chain_dma_pool,
ioc->chain_lookup[i].chain_buffer,
ioc->chain_lookup[i].chain_buffer_dma);
for (i = 0; i < ioc->scsiio_depth; i++) {
for (j = ioc->chains_per_prp_buffer;
j < ioc->chains_needed_per_io; j++) {
ct = &ioc->chain_lookup[i].chains_per_smid[j];
if (ct && ct->chain_buffer)
dma_pool_free(ioc->chain_dma_pool,
ct->chain_buffer,
ct->chain_buffer_dma);
}
kfree(ioc->chain_lookup[i].chains_per_smid);
}
dma_pool_destroy(ioc->chain_dma_pool);
free_pages((ulong)ioc->chain_lookup, ioc->chain_pages);
kfree(ioc->chain_lookup);
ioc->chain_lookup = NULL;
}
}
/**
* is_MSB_are_same - checks whether all reply queues in a set are
* having same upper 32bits in their base memory address.
* @reply_pool_start_address: Base address of a reply queue set
* @pool_sz: Size of single Reply Descriptor Post Queues pool size
*
* Returns 1 if reply queues in a set have a same upper 32bits
* in their base memory address,
* else 0
*/
static int
is_MSB_are_same(long reply_pool_start_address, u32 pool_sz)
{
long reply_pool_end_address;
reply_pool_end_address = reply_pool_start_address + pool_sz;
if (upper_32_bits(reply_pool_start_address) ==
upper_32_bits(reply_pool_end_address))
return 1;
else
return 0;
}
/**
* _base_allocate_memory_pools - allocate start of day memory pools
* @ioc: per adapter object
@ -4203,12 +4380,13 @@ _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc)
struct mpt3sas_facts *facts;
u16 max_sge_elements;
u16 chains_needed_per_io;
u32 sz, total_sz, reply_post_free_sz;
u32 sz, total_sz, reply_post_free_sz, reply_post_free_array_sz;
u32 retry_sz;
u16 max_request_credit, nvme_blocks_needed;
unsigned short sg_tablesize;
u16 sge_size;
int i;
int i, j;
struct chain_tracker *ct;
dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
__func__));
@ -4489,37 +4667,23 @@ _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc)
ioc->name, ioc->request, ioc->scsiio_depth));
ioc->chain_depth = min_t(u32, ioc->chain_depth, MAX_CHAIN_DEPTH);
sz = ioc->chain_depth * sizeof(struct chain_tracker);
ioc->chain_pages = get_order(sz);
ioc->chain_lookup = (struct chain_tracker *)__get_free_pages(
GFP_KERNEL, ioc->chain_pages);
sz = ioc->scsiio_depth * sizeof(struct chain_lookup);
ioc->chain_lookup = kzalloc(sz, GFP_KERNEL);
if (!ioc->chain_lookup) {
pr_err(MPT3SAS_FMT "chain_lookup: __get_free_pages failed\n",
ioc->name);
pr_err(MPT3SAS_FMT "chain_lookup: __get_free_pages "
"failed\n", ioc->name);
goto out;
}
ioc->chain_dma_pool = dma_pool_create("chain pool", &ioc->pdev->dev,
ioc->chain_segment_sz, 16, 0);
if (!ioc->chain_dma_pool) {
pr_err(MPT3SAS_FMT "chain_dma_pool: dma_pool_create failed\n",
ioc->name);
goto out;
}
for (i = 0; i < ioc->chain_depth; i++) {
ioc->chain_lookup[i].chain_buffer = dma_pool_alloc(
ioc->chain_dma_pool , GFP_KERNEL,
&ioc->chain_lookup[i].chain_buffer_dma);
if (!ioc->chain_lookup[i].chain_buffer) {
ioc->chain_depth = i;
goto chain_done;
sz = ioc->chains_needed_per_io * sizeof(struct chain_tracker);
for (i = 0; i < ioc->scsiio_depth; i++) {
ioc->chain_lookup[i].chains_per_smid = kzalloc(sz, GFP_KERNEL);
if (!ioc->chain_lookup[i].chains_per_smid) {
pr_err(MPT3SAS_FMT "chain_lookup: "
" kzalloc failed\n", ioc->name);
goto out;
}
total_sz += ioc->chain_segment_sz;
}
chain_done:
dinitprintk(ioc, pr_info(MPT3SAS_FMT
"chain pool depth(%d), frame_size(%d), pool_size(%d kB)\n",
ioc->name, ioc->chain_depth, ioc->chain_segment_sz,
((ioc->chain_depth * ioc->chain_segment_sz))/1024));
/* initialize hi-priority queue smid's */
ioc->hpr_lookup = kcalloc(ioc->hi_priority_depth,
@ -4561,6 +4725,7 @@ _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc)
* be required for NVMe PRP's, only each set of NVMe blocks will be
* contiguous, so a new set is allocated for each possible I/O.
*/
ioc->chains_per_prp_buffer = 0;
if (ioc->facts.ProtocolFlags & MPI2_IOCFACTS_PROTOCOL_NVME_DEVICES) {
nvme_blocks_needed =
(ioc->shost->sg_tablesize * NVME_PRP_SIZE) - 1;
@ -4583,6 +4748,11 @@ _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc)
ioc->name);
goto out;
}
ioc->chains_per_prp_buffer = sz/ioc->chain_segment_sz;
ioc->chains_per_prp_buffer = min(ioc->chains_per_prp_buffer,
ioc->chains_needed_per_io);
for (i = 0; i < ioc->scsiio_depth; i++) {
ioc->pcie_sg_lookup[i].pcie_sgl = dma_pool_alloc(
ioc->pcie_sgl_dma_pool, GFP_KERNEL,
@ -4593,13 +4763,55 @@ _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc)
ioc->name);
goto out;
}
for (j = 0; j < ioc->chains_per_prp_buffer; j++) {
ct = &ioc->chain_lookup[i].chains_per_smid[j];
ct->chain_buffer =
ioc->pcie_sg_lookup[i].pcie_sgl +
(j * ioc->chain_segment_sz);
ct->chain_buffer_dma =
ioc->pcie_sg_lookup[i].pcie_sgl_dma +
(j * ioc->chain_segment_sz);
}
}
dinitprintk(ioc, pr_info(MPT3SAS_FMT "PCIe sgl pool depth(%d), "
"element_size(%d), pool_size(%d kB)\n", ioc->name,
ioc->scsiio_depth, sz, (sz * ioc->scsiio_depth)/1024));
dinitprintk(ioc, pr_info(MPT3SAS_FMT "Number of chains can "
"fit in a PRP page(%d)\n", ioc->name,
ioc->chains_per_prp_buffer));
total_sz += sz * ioc->scsiio_depth;
}
ioc->chain_dma_pool = dma_pool_create("chain pool", &ioc->pdev->dev,
ioc->chain_segment_sz, 16, 0);
if (!ioc->chain_dma_pool) {
pr_err(MPT3SAS_FMT "chain_dma_pool: dma_pool_create failed\n",
ioc->name);
goto out;
}
for (i = 0; i < ioc->scsiio_depth; i++) {
for (j = ioc->chains_per_prp_buffer;
j < ioc->chains_needed_per_io; j++) {
ct = &ioc->chain_lookup[i].chains_per_smid[j];
ct->chain_buffer = dma_pool_alloc(
ioc->chain_dma_pool, GFP_KERNEL,
&ct->chain_buffer_dma);
if (!ct->chain_buffer) {
pr_err(MPT3SAS_FMT "chain_lookup: "
" pci_pool_alloc failed\n", ioc->name);
_base_release_memory_pools(ioc);
goto out;
}
}
total_sz += ioc->chain_segment_sz;
}
dinitprintk(ioc, pr_info(MPT3SAS_FMT
"chain pool depth(%d), frame_size(%d), pool_size(%d kB)\n",
ioc->name, ioc->chain_depth, ioc->chain_segment_sz,
((ioc->chain_depth * ioc->chain_segment_sz))/1024));
/* sense buffers, 4 byte align */
sz = ioc->scsiio_depth * SCSI_SENSE_BUFFERSIZE;
ioc->sense_dma_pool = dma_pool_create("sense pool", &ioc->pdev->dev, sz,
@ -4616,6 +4828,37 @@ _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc)
ioc->name);
goto out;
}
/* sense buffer requires to be in same 4 gb region.
* Below function will check the same.
* In case of failure, new pci pool will be created with updated
* alignment. Older allocation and pool will be destroyed.
* Alignment will be used such a way that next allocation if
* success, will always meet same 4gb region requirement.
* Actual requirement is not alignment, but we need start and end of
* DMA address must have same upper 32 bit address.
*/
if (!is_MSB_are_same((long)ioc->sense, sz)) {
//Release Sense pool & Reallocate
dma_pool_free(ioc->sense_dma_pool, ioc->sense, ioc->sense_dma);
dma_pool_destroy(ioc->sense_dma_pool);
ioc->sense = NULL;
ioc->sense_dma_pool =
dma_pool_create("sense pool", &ioc->pdev->dev, sz,
roundup_pow_of_two(sz), 0);
if (!ioc->sense_dma_pool) {
pr_err(MPT3SAS_FMT "sense pool: pci_pool_create failed\n",
ioc->name);
goto out;
}
ioc->sense = dma_pool_alloc(ioc->sense_dma_pool, GFP_KERNEL,
&ioc->sense_dma);
if (!ioc->sense) {
pr_err(MPT3SAS_FMT "sense pool: pci_pool_alloc failed\n",
ioc->name);
goto out;
}
}
dinitprintk(ioc, pr_info(MPT3SAS_FMT
"sense pool(0x%p): depth(%d), element_size(%d), pool_size"
"(%d kB)\n", ioc->name, ioc->sense, ioc->scsiio_depth,
@ -4675,6 +4918,28 @@ _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc)
ioc->name, (unsigned long long)ioc->reply_free_dma));
total_sz += sz;
if (ioc->rdpq_array_enable) {
reply_post_free_array_sz = ioc->reply_queue_count *
sizeof(Mpi2IOCInitRDPQArrayEntry);
ioc->reply_post_free_array_dma_pool =
dma_pool_create("reply_post_free_array pool",
&ioc->pdev->dev, reply_post_free_array_sz, 16, 0);
if (!ioc->reply_post_free_array_dma_pool) {
dinitprintk(ioc,
pr_info(MPT3SAS_FMT "reply_post_free_array pool: "
"dma_pool_create failed\n", ioc->name));
goto out;
}
ioc->reply_post_free_array =
dma_pool_alloc(ioc->reply_post_free_array_dma_pool,
GFP_KERNEL, &ioc->reply_post_free_array_dma);
if (!ioc->reply_post_free_array) {
dinitprintk(ioc,
pr_info(MPT3SAS_FMT "reply_post_free_array pool: "
"dma_pool_alloc failed\n", ioc->name));
goto out;
}
}
ioc->config_page_sz = 512;
ioc->config_page = pci_alloc_consistent(ioc->pdev,
ioc->config_page_sz, &ioc->config_page_dma);
@ -5002,7 +5267,7 @@ _base_handshake_req_reply_wait(struct MPT3SAS_ADAPTER *ioc, int request_bytes,
/* send message 32-bits at a time */
for (i = 0, failed = 0; i < request_bytes/4 && !failed; i++) {
writel(cpu_to_le32(request[i]), &ioc->chip->Doorbell);
writel((u32)(request[i]), &ioc->chip->Doorbell);
if ((_base_wait_for_doorbell_ack(ioc, 5)))
failed = 1;
}
@ -5023,7 +5288,7 @@ _base_handshake_req_reply_wait(struct MPT3SAS_ADAPTER *ioc, int request_bytes,
}
/* read the first two 16-bits, it gives the total length of the reply */
reply[0] = le16_to_cpu(readl(&ioc->chip->Doorbell)
reply[0] = (u16)(readl(&ioc->chip->Doorbell)
& MPI2_DOORBELL_DATA_MASK);
writel(0, &ioc->chip->HostInterruptStatus);
if ((_base_wait_for_doorbell_int(ioc, 5))) {
@ -5032,7 +5297,7 @@ _base_handshake_req_reply_wait(struct MPT3SAS_ADAPTER *ioc, int request_bytes,
ioc->name, __LINE__);
return -EFAULT;
}
reply[1] = le16_to_cpu(readl(&ioc->chip->Doorbell)
reply[1] = (u16)(readl(&ioc->chip->Doorbell)
& MPI2_DOORBELL_DATA_MASK);
writel(0, &ioc->chip->HostInterruptStatus);
@ -5046,7 +5311,7 @@ _base_handshake_req_reply_wait(struct MPT3SAS_ADAPTER *ioc, int request_bytes,
if (i >= reply_bytes/2) /* overflow case */
readl(&ioc->chip->Doorbell);
else
reply[i] = le16_to_cpu(readl(&ioc->chip->Doorbell)
reply[i] = (u16)(readl(&ioc->chip->Doorbell)
& MPI2_DOORBELL_DATA_MASK);
writel(0, &ioc->chip->HostInterruptStatus);
}
@ -5481,8 +5746,6 @@ _base_send_ioc_init(struct MPT3SAS_ADAPTER *ioc)
ktime_t current_time;
u16 ioc_status;
u32 reply_post_free_array_sz = 0;
Mpi2IOCInitRDPQArrayEntry *reply_post_free_array = NULL;
dma_addr_t reply_post_free_array_dma;
dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
__func__));
@ -5516,23 +5779,14 @@ _base_send_ioc_init(struct MPT3SAS_ADAPTER *ioc)
if (ioc->rdpq_array_enable) {
reply_post_free_array_sz = ioc->reply_queue_count *
sizeof(Mpi2IOCInitRDPQArrayEntry);
reply_post_free_array = pci_alloc_consistent(ioc->pdev,
reply_post_free_array_sz, &reply_post_free_array_dma);
if (!reply_post_free_array) {
pr_err(MPT3SAS_FMT
"reply_post_free_array: pci_alloc_consistent failed\n",
ioc->name);
r = -ENOMEM;
goto out;
}
memset(reply_post_free_array, 0, reply_post_free_array_sz);
memset(ioc->reply_post_free_array, 0, reply_post_free_array_sz);
for (i = 0; i < ioc->reply_queue_count; i++)
reply_post_free_array[i].RDPQBaseAddress =
ioc->reply_post_free_array[i].RDPQBaseAddress =
cpu_to_le64(
(u64)ioc->reply_post[i].reply_post_free_dma);
mpi_request.MsgFlags = MPI2_IOCINIT_MSGFLAG_RDPQ_ARRAY_MODE;
mpi_request.ReplyDescriptorPostQueueAddress =
cpu_to_le64((u64)reply_post_free_array_dma);
cpu_to_le64((u64)ioc->reply_post_free_array_dma);
} else {
mpi_request.ReplyDescriptorPostQueueAddress =
cpu_to_le64((u64)ioc->reply_post[0].reply_post_free_dma);
@ -5562,7 +5816,7 @@ _base_send_ioc_init(struct MPT3SAS_ADAPTER *ioc)
if (r != 0) {
pr_err(MPT3SAS_FMT "%s: handshake failed (r=%d)\n",
ioc->name, __func__, r);
goto out;
return r;
}
ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK;
@ -5572,11 +5826,6 @@ _base_send_ioc_init(struct MPT3SAS_ADAPTER *ioc)
r = -EIO;
}
out:
if (reply_post_free_array)
pci_free_consistent(ioc->pdev, reply_post_free_array_sz,
reply_post_free_array,
reply_post_free_array_dma);
return r;
}
@ -6157,12 +6406,6 @@ _base_make_ioc_operational(struct MPT3SAS_ADAPTER *ioc)
&ioc->internal_free_list);
}
/* chain pool */
INIT_LIST_HEAD(&ioc->free_chain_list);
for (i = 0; i < ioc->chain_depth; i++)
list_add_tail(&ioc->chain_lookup[i].tracker_list,
&ioc->free_chain_list);
spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
/* initialize Reply Free Queue */
@ -6172,7 +6415,7 @@ _base_make_ioc_operational(struct MPT3SAS_ADAPTER *ioc)
ioc->reply_free[i] = cpu_to_le32(reply_address);
if (ioc->is_mcpu_endpoint)
_base_clone_reply_to_sys_mem(ioc,
(__le32)reply_address, i);
reply_address, i);
}
/* initialize reply queues */
@ -6230,12 +6473,18 @@ _base_make_ioc_operational(struct MPT3SAS_ADAPTER *ioc)
skip_init_reply_post_host_index:
_base_unmask_interrupts(ioc);
if (ioc->hba_mpi_version_belonged != MPI2_VERSION) {
r = _base_display_fwpkg_version(ioc);
if (r)
return r;
}
_base_static_config_pages(ioc);
r = _base_event_notification(ioc);
if (r)
return r;
_base_static_config_pages(ioc);
if (ioc->is_driver_loading) {
if (ioc->is_warpdrive && ioc->manu_pg10.OEMIdentifier
@ -6492,6 +6741,7 @@ mpt3sas_base_attach(struct MPT3SAS_ADAPTER *ioc)
_base_unmask_events(ioc, MPI2_EVENT_LOG_ENTRY_ADDED);
_base_unmask_events(ioc, MPI2_EVENT_TEMP_THRESHOLD);
_base_unmask_events(ioc, MPI2_EVENT_ACTIVE_CABLE_EXCEPTION);
_base_unmask_events(ioc, MPI2_EVENT_SAS_DEVICE_DISCOVERY_ERROR);
if (ioc->hba_mpi_version_belonged == MPI26_VERSION) {
if (ioc->is_gen35_ioc) {
_base_unmask_events(ioc,
@ -6558,6 +6808,7 @@ mpt3sas_base_detach(struct MPT3SAS_ADAPTER *ioc)
mpt3sas_base_stop_watchdog(ioc);
mpt3sas_base_free_resources(ioc);
_base_release_memory_pools(ioc);
mpt3sas_free_enclosure_list(ioc);
pci_set_drvdata(ioc->pdev, NULL);
kfree(ioc->cpu_msix_table);
if (ioc->is_warpdrive)

View File

@ -74,8 +74,8 @@
#define MPT3SAS_DRIVER_NAME "mpt3sas"
#define MPT3SAS_AUTHOR "Avago Technologies <MPT-FusionLinux.pdl@avagotech.com>"
#define MPT3SAS_DESCRIPTION "LSI MPT Fusion SAS 3.0 Device Driver"
#define MPT3SAS_DRIVER_VERSION "17.100.00.00"
#define MPT3SAS_MAJOR_VERSION 17
#define MPT3SAS_DRIVER_VERSION "25.100.00.00"
#define MPT3SAS_MAJOR_VERSION 25
#define MPT3SAS_MINOR_VERSION 100
#define MPT3SAS_BUILD_VERSION 0
#define MPT3SAS_RELEASE_VERSION 00
@ -138,6 +138,7 @@
#define MAX_CHAIN_ELEMT_SZ 16
#define DEFAULT_NUM_FWCHAIN_ELEMTS 8
#define FW_IMG_HDR_READ_TIMEOUT 15
/*
* NVMe defines
*/
@ -145,8 +146,12 @@
#define NVME_CMD_PRP1_OFFSET 24 /* PRP1 offset in NVMe cmd */
#define NVME_CMD_PRP2_OFFSET 32 /* PRP2 offset in NVMe cmd */
#define NVME_ERROR_RESPONSE_SIZE 16 /* Max NVME Error Response */
#define NVME_TASK_ABORT_MIN_TIMEOUT 6
#define NVME_TASK_ABORT_MAX_TIMEOUT 60
#define NVME_TASK_MNGT_CUSTOM_MASK (0x0010)
#define NVME_PRP_PAGE_SIZE 4096 /* Page size */
/*
* reset phases
*/
@ -362,7 +367,15 @@ struct Mpi2ManufacturingPage11_t {
u8 EEDPTagMode; /* 09h */
u8 Reserved3; /* 0Ah */
u8 Reserved4; /* 0Bh */
__le32 Reserved5[23]; /* 0Ch-60h*/
__le32 Reserved5[8]; /* 0Ch-2Ch */
u16 AddlFlags2; /* 2Ch */
u8 AddlFlags3; /* 2Eh */
u8 Reserved6; /* 2Fh */
__le32 Reserved7[7]; /* 30h - 4Bh */
u8 NVMeAbortTO; /* 4Ch */
u8 Reserved8; /* 4Dh */
u16 Reserved9; /* 4Eh */
__le32 Reserved10[4]; /* 50h - 60h */
};
/**
@ -572,6 +585,7 @@ struct _pcie_device {
u8 enclosure_level;
u8 connector_name[4];
u8 *serial_number;
u8 reset_timeout;
struct kref refcount;
};
/**
@ -741,6 +755,17 @@ struct _sas_node {
struct list_head sas_port_list;
};
/**
* struct _enclosure_node - enclosure information
* @list: list of enclosures
* @pg0: enclosure pg0;
*/
struct _enclosure_node {
struct list_head list;
Mpi2SasEnclosurePage0_t pg0;
};
/**
* enum reset_type - reset state
* @FORCE_BIG_HAMMER: issue diagnostic reset
@ -770,7 +795,11 @@ struct pcie_sg_list {
struct chain_tracker {
void *chain_buffer;
dma_addr_t chain_buffer_dma;
struct list_head tracker_list;
};
struct chain_lookup {
struct chain_tracker *chains_per_smid;
atomic_t chain_offset;
};
/**
@ -829,8 +858,8 @@ struct _sc_list {
*/
struct _event_ack_list {
struct list_head list;
u16 Event;
u32 EventContext;
U16 Event;
U32 EventContext;
};
/**
@ -1009,6 +1038,7 @@ typedef void (*MPT3SAS_FLUSH_RUNNING_CMDS)(struct MPT3SAS_ADAPTER *ioc);
* @iounit_pg8: static iounit page 8
* @sas_hba: sas host object
* @sas_expander_list: expander object list
* @enclosure_list: enclosure object list
* @sas_node_lock:
* @sas_device_list: sas device object list
* @sas_device_init_list: sas device object list (used only at init time)
@ -1194,6 +1224,10 @@ struct MPT3SAS_ADAPTER {
void *event_log;
u32 event_masks[MPI2_EVENT_NOTIFY_EVENTMASK_WORDS];
u8 tm_custom_handling;
u8 nvme_abort_timeout;
/* static config pages */
struct mpt3sas_facts facts;
struct mpt3sas_port_facts *pfacts;
@ -1214,6 +1248,7 @@ struct MPT3SAS_ADAPTER {
/* sas hba, expander, and device list */
struct _sas_node sas_hba;
struct list_head sas_expander_list;
struct list_head enclosure_list;
spinlock_t sas_node_lock;
struct list_head sas_device_list;
struct list_head sas_device_init_list;
@ -1261,7 +1296,7 @@ struct MPT3SAS_ADAPTER {
u32 page_size;
/* chain */
struct chain_tracker *chain_lookup;
struct chain_lookup *chain_lookup;
struct list_head free_chain_list;
struct dma_pool *chain_dma_pool;
ulong chain_pages;
@ -1315,6 +1350,9 @@ struct MPT3SAS_ADAPTER {
u8 rdpq_array_enable;
u8 rdpq_array_enable_assigned;
struct dma_pool *reply_post_free_dma_pool;
struct dma_pool *reply_post_free_array_dma_pool;
Mpi2IOCInitRDPQArrayEntry *reply_post_free_array;
dma_addr_t reply_post_free_array_dma;
u8 reply_queue_count;
struct list_head reply_queue_list;
@ -1384,6 +1422,7 @@ int mpt3sas_base_attach(struct MPT3SAS_ADAPTER *ioc);
void mpt3sas_base_detach(struct MPT3SAS_ADAPTER *ioc);
int mpt3sas_base_map_resources(struct MPT3SAS_ADAPTER *ioc);
void mpt3sas_base_free_resources(struct MPT3SAS_ADAPTER *ioc);
void mpt3sas_free_enclosure_list(struct MPT3SAS_ADAPTER *ioc);
int mpt3sas_base_hard_reset_handler(struct MPT3SAS_ADAPTER *ioc,
enum reset_type type);
@ -1451,10 +1490,11 @@ u8 mpt3sas_scsih_event_callback(struct MPT3SAS_ADAPTER *ioc, u8 msix_index,
u32 reply);
void mpt3sas_scsih_reset_handler(struct MPT3SAS_ADAPTER *ioc, int reset_phase);
int mpt3sas_scsih_issue_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle,
u64 lun, u8 type, u16 smid_task, u16 msix_task, ulong timeout);
int mpt3sas_scsih_issue_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle, u64 lun,
u8 type, u16 smid_task, u16 msix_task, u8 timeout, u8 tr_method);
int mpt3sas_scsih_issue_locked_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle,
u64 lun, u8 type, u16 smid_task, u16 msix_task, ulong timeout);
u64 lun, u8 type, u16 smid_task, u16 msix_task,
u8 timeout, u8 tr_method);
void mpt3sas_scsih_set_tm_flag(struct MPT3SAS_ADAPTER *ioc, u16 handle);
void mpt3sas_scsih_clear_tm_flag(struct MPT3SAS_ADAPTER *ioc, u16 handle);

View File

@ -297,7 +297,7 @@ mpt3sas_ctl_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
nvme_error_reply =
(Mpi26NVMeEncapsulatedErrorReply_t *)mpi_reply;
sz = min_t(u32, NVME_ERROR_RESPONSE_SIZE,
le32_to_cpu(nvme_error_reply->ErrorResponseCount));
le16_to_cpu(nvme_error_reply->ErrorResponseCount));
sense_data = mpt3sas_base_get_sense_buffer(ioc, smid);
memcpy(ioc->ctl_cmds.sense, sense_data, sz);
}
@ -644,9 +644,10 @@ _ctl_do_mpt_command(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command karg,
MPI2RequestHeader_t *mpi_request = NULL, *request;
MPI2DefaultReply_t *mpi_reply;
Mpi26NVMeEncapsulatedRequest_t *nvme_encap_request = NULL;
struct _pcie_device *pcie_device = NULL;
u32 ioc_state;
u16 smid;
unsigned long timeout;
u8 timeout;
u8 issue_reset;
u32 sz, sz_arg;
void *psge;
@ -659,6 +660,7 @@ _ctl_do_mpt_command(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command karg,
long ret;
u16 wait_state_count;
u16 device_handle = MPT3SAS_INVALID_DEVICE_HANDLE;
u8 tr_method = MPI26_SCSITASKMGMT_MSGFLAGS_PROTOCOL_LVL_RST_PCIE;
issue_reset = 0;
@ -803,12 +805,13 @@ _ctl_do_mpt_command(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command karg,
* Build the PRPs and set direction bits.
* Send the request.
*/
nvme_encap_request->ErrorResponseBaseAddress = ioc->sense_dma &
0xFFFFFFFF00000000;
nvme_encap_request->ErrorResponseBaseAddress =
cpu_to_le64(ioc->sense_dma & 0xFFFFFFFF00000000UL);
nvme_encap_request->ErrorResponseBaseAddress |=
(U64)mpt3sas_base_get_sense_buffer_dma(ioc, smid);
cpu_to_le64(le32_to_cpu(
mpt3sas_base_get_sense_buffer_dma(ioc, smid)));
nvme_encap_request->ErrorResponseAllocationLength =
NVME_ERROR_RESPONSE_SIZE;
cpu_to_le16(NVME_ERROR_RESPONSE_SIZE);
memset(ioc->ctl_cmds.sense, 0, NVME_ERROR_RESPONSE_SIZE);
ioc->build_nvme_prp(ioc, smid, nvme_encap_request,
data_out_dma, data_out_sz, data_in_dma, data_in_sz);
@ -1073,14 +1076,26 @@ _ctl_do_mpt_command(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command karg,
ioc->name,
le16_to_cpu(mpi_request->FunctionDependent1));
mpt3sas_halt_firmware(ioc);
mpt3sas_scsih_issue_locked_tm(ioc,
le16_to_cpu(mpi_request->FunctionDependent1), 0,
MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET, 0, 0, 30);
pcie_device = mpt3sas_get_pdev_by_handle(ioc,
le16_to_cpu(mpi_request->FunctionDependent1));
if (pcie_device && (!ioc->tm_custom_handling))
mpt3sas_scsih_issue_locked_tm(ioc,
le16_to_cpu(mpi_request->FunctionDependent1),
0, MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET, 0,
0, pcie_device->reset_timeout,
tr_method);
else
mpt3sas_scsih_issue_locked_tm(ioc,
le16_to_cpu(mpi_request->FunctionDependent1),
0, MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET, 0,
0, 30, MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET);
} else
mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
}
out:
if (pcie_device)
pcie_device_put(pcie_device);
/* free memory associated with sg buffers */
if (data_in)

View File

@ -184,7 +184,7 @@ struct mpt3_ioctl_iocinfo {
/* number of event log entries */
#define MPT3SAS_CTL_EVENT_LOG_SIZE (50)
#define MPT3SAS_CTL_EVENT_LOG_SIZE (200)
/**
* struct mpt3_ioctl_eventquery - query event count and type

View File

@ -157,8 +157,8 @@ MODULE_PARM_DESC(prot_mask, " host protection capabilities mask, def=7 ");
/* raid transport support */
struct raid_template *mpt3sas_raid_template;
struct raid_template *mpt2sas_raid_template;
static struct raid_template *mpt3sas_raid_template;
static struct raid_template *mpt2sas_raid_template;
/**
@ -1088,7 +1088,7 @@ _scsih_pcie_device_remove(struct MPT3SAS_ADAPTER *ioc,
pcie_device->slot);
if (pcie_device->connector_name[0] != '\0')
pr_info(MPT3SAS_FMT
"removing enclosure level(0x%04x), connector name( %s)\n",
"removing enclosure level(0x%04x), connector name( %s)\n",
ioc->name, pcie_device->enclosure_level,
pcie_device->connector_name);
@ -1361,6 +1361,30 @@ mpt3sas_scsih_expander_find_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
return r;
}
/**
* mpt3sas_scsih_enclosure_find_by_handle - exclosure device search
* @ioc: per adapter object
* @handle: enclosure handle (assigned by firmware)
* Context: Calling function should acquire ioc->sas_device_lock
*
* This searches for enclosure device based on handle, then returns the
* enclosure object.
*/
static struct _enclosure_node *
mpt3sas_scsih_enclosure_find_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
{
struct _enclosure_node *enclosure_dev, *r;
r = NULL;
list_for_each_entry(enclosure_dev, &ioc->enclosure_list, list) {
if (le16_to_cpu(enclosure_dev->pg0.EnclosureHandle) != handle)
continue;
r = enclosure_dev;
goto out;
}
out:
return r;
}
/**
* mpt3sas_scsih_expander_find_by_sas_address - expander device search
* @ioc: per adapter object
@ -2608,6 +2632,7 @@ mpt3sas_scsih_clear_tm_flag(struct MPT3SAS_ADAPTER *ioc, u16 handle)
* @smid_task: smid assigned to the task
* @msix_task: MSIX table index supplied by the OS
* @timeout: timeout in seconds
* @tr_method: Target Reset Method
* Context: user
*
* A generic API for sending task management requests to firmware.
@ -2618,8 +2643,8 @@ mpt3sas_scsih_clear_tm_flag(struct MPT3SAS_ADAPTER *ioc, u16 handle)
* Return SUCCESS or FAILED.
*/
int
mpt3sas_scsih_issue_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle,
u64 lun, u8 type, u16 smid_task, u16 msix_task, ulong timeout)
mpt3sas_scsih_issue_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle, u64 lun,
u8 type, u16 smid_task, u16 msix_task, u8 timeout, u8 tr_method)
{
Mpi2SCSITaskManagementRequest_t *mpi_request;
Mpi2SCSITaskManagementReply_t *mpi_reply;
@ -2665,8 +2690,8 @@ mpt3sas_scsih_issue_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle,
}
dtmprintk(ioc, pr_info(MPT3SAS_FMT
"sending tm: handle(0x%04x), task_type(0x%02x), smid(%d)\n",
ioc->name, handle, type, smid_task));
"sending tm: handle(0x%04x), task_type(0x%02x), smid(%d), timeout(%d), tr_method(0x%x)\n",
ioc->name, handle, type, smid_task, timeout, tr_method));
ioc->tm_cmds.status = MPT3_CMD_PENDING;
mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
ioc->tm_cmds.smid = smid;
@ -2675,6 +2700,7 @@ mpt3sas_scsih_issue_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle,
mpi_request->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
mpi_request->DevHandle = cpu_to_le16(handle);
mpi_request->TaskType = type;
mpi_request->MsgFlags = tr_method;
mpi_request->TaskMID = cpu_to_le16(smid_task);
int_to_scsilun(lun, (struct scsi_lun *)mpi_request->LUN);
mpt3sas_scsih_set_tm_flag(ioc, handle);
@ -2721,13 +2747,14 @@ out:
}
int mpt3sas_scsih_issue_locked_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle,
u64 lun, u8 type, u16 smid_task, u16 msix_task, ulong timeout)
u64 lun, u8 type, u16 smid_task, u16 msix_task,
u8 timeout, u8 tr_method)
{
int ret;
mutex_lock(&ioc->tm_cmds.mutex);
ret = mpt3sas_scsih_issue_tm(ioc, handle, lun, type, smid_task,
msix_task, timeout);
msix_task, timeout, tr_method);
mutex_unlock(&ioc->tm_cmds.mutex);
return ret;
@ -2830,6 +2857,8 @@ scsih_abort(struct scsi_cmnd *scmd)
u16 handle;
int r;
u8 timeout = 30;
struct _pcie_device *pcie_device = NULL;
sdev_printk(KERN_INFO, scmd->device,
"attempting task abort! scmd(%p)\n", scmd);
_scsih_tm_display_info(ioc, scmd);
@ -2864,15 +2893,20 @@ scsih_abort(struct scsi_cmnd *scmd)
mpt3sas_halt_firmware(ioc);
handle = sas_device_priv_data->sas_target->handle;
pcie_device = mpt3sas_get_pdev_by_handle(ioc, handle);
if (pcie_device && (!ioc->tm_custom_handling))
timeout = ioc->nvme_abort_timeout;
r = mpt3sas_scsih_issue_locked_tm(ioc, handle, scmd->device->lun,
MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK,
st->smid, st->msix_io, 30);
st->smid, st->msix_io, timeout, 0);
/* Command must be cleared after abort */
if (r == SUCCESS && st->cb_idx != 0xFF)
r = FAILED;
out:
sdev_printk(KERN_INFO, scmd->device, "task abort: %s scmd(%p)\n",
((r == SUCCESS) ? "SUCCESS" : "FAILED"), scmd);
if (pcie_device)
pcie_device_put(pcie_device);
return r;
}
@ -2888,7 +2922,10 @@ scsih_dev_reset(struct scsi_cmnd *scmd)
struct MPT3SAS_ADAPTER *ioc = shost_priv(scmd->device->host);
struct MPT3SAS_DEVICE *sas_device_priv_data;
struct _sas_device *sas_device = NULL;
struct _pcie_device *pcie_device = NULL;
u16 handle;
u8 tr_method = 0;
u8 tr_timeout = 30;
int r;
struct scsi_target *starget = scmd->device->sdev_target;
@ -2926,8 +2963,16 @@ scsih_dev_reset(struct scsi_cmnd *scmd)
goto out;
}
pcie_device = mpt3sas_get_pdev_by_handle(ioc, handle);
if (pcie_device && (!ioc->tm_custom_handling)) {
tr_timeout = pcie_device->reset_timeout;
tr_method = MPI26_SCSITASKMGMT_MSGFLAGS_PROTOCOL_LVL_RST_PCIE;
} else
tr_method = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
r = mpt3sas_scsih_issue_locked_tm(ioc, handle, scmd->device->lun,
MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET, 0, 0, 30);
MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET, 0, 0,
tr_timeout, tr_method);
/* Check for busy commands after reset */
if (r == SUCCESS && atomic_read(&scmd->device->device_busy))
r = FAILED;
@ -2937,6 +2982,8 @@ scsih_dev_reset(struct scsi_cmnd *scmd)
if (sas_device)
sas_device_put(sas_device);
if (pcie_device)
pcie_device_put(pcie_device);
return r;
}
@ -2953,7 +3000,10 @@ scsih_target_reset(struct scsi_cmnd *scmd)
struct MPT3SAS_ADAPTER *ioc = shost_priv(scmd->device->host);
struct MPT3SAS_DEVICE *sas_device_priv_data;
struct _sas_device *sas_device = NULL;
struct _pcie_device *pcie_device = NULL;
u16 handle;
u8 tr_method = 0;
u8 tr_timeout = 30;
int r;
struct scsi_target *starget = scmd->device->sdev_target;
struct MPT3SAS_TARGET *target_priv_data = starget->hostdata;
@ -2990,8 +3040,16 @@ scsih_target_reset(struct scsi_cmnd *scmd)
goto out;
}
pcie_device = mpt3sas_get_pdev_by_handle(ioc, handle);
if (pcie_device && (!ioc->tm_custom_handling)) {
tr_timeout = pcie_device->reset_timeout;
tr_method = MPI26_SCSITASKMGMT_MSGFLAGS_PROTOCOL_LVL_RST_PCIE;
} else
tr_method = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
r = mpt3sas_scsih_issue_locked_tm(ioc, handle, 0,
MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET, 0, 0, 30);
MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET, 0, 0,
tr_timeout, tr_method);
/* Check for busy commands after reset */
if (r == SUCCESS && atomic_read(&starget->target_busy))
r = FAILED;
@ -3001,7 +3059,8 @@ scsih_target_reset(struct scsi_cmnd *scmd)
if (sas_device)
sas_device_put(sas_device);
if (pcie_device)
pcie_device_put(pcie_device);
return r;
}
@ -3535,6 +3594,7 @@ _scsih_tm_tr_send(struct MPT3SAS_ADAPTER *ioc, u16 handle)
unsigned long flags;
struct _tr_list *delayed_tr;
u32 ioc_state;
u8 tr_method = 0;
if (ioc->pci_error_recovery) {
dewtprintk(ioc, pr_info(MPT3SAS_FMT
@ -3577,6 +3637,11 @@ _scsih_tm_tr_send(struct MPT3SAS_ADAPTER *ioc, u16 handle)
sas_address = pcie_device->wwid;
}
spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
if (pcie_device && (!ioc->tm_custom_handling))
tr_method =
MPI26_SCSITASKMGMT_MSGFLAGS_PROTOCOL_LVL_RST_PCIE;
else
tr_method = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
}
if (sas_target_priv_data) {
dewtprintk(ioc, pr_info(MPT3SAS_FMT
@ -3640,6 +3705,7 @@ _scsih_tm_tr_send(struct MPT3SAS_ADAPTER *ioc, u16 handle)
mpi_request->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
mpi_request->DevHandle = cpu_to_le16(handle);
mpi_request->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
mpi_request->MsgFlags = tr_method;
set_bit(handle, ioc->device_remove_in_progress);
mpt3sas_base_put_smid_hi_priority(ioc, smid, 0);
mpt3sas_trigger_master(ioc, MASTER_TRIGGER_DEVICE_REMOVAL);
@ -3680,11 +3746,7 @@ _scsih_tm_tr_complete(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
u32 ioc_state;
struct _sc_list *delayed_sc;
if (ioc->remove_host) {
dewtprintk(ioc, pr_info(MPT3SAS_FMT
"%s: host has been removed\n", __func__, ioc->name));
return 1;
} else if (ioc->pci_error_recovery) {
if (ioc->pci_error_recovery) {
dewtprintk(ioc, pr_info(MPT3SAS_FMT
"%s: host in pci error recovery\n", __func__,
ioc->name));
@ -3725,7 +3787,7 @@ _scsih_tm_tr_complete(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
if (!delayed_sc)
return _scsih_check_for_pending_tm(ioc, smid);
INIT_LIST_HEAD(&delayed_sc->list);
delayed_sc->handle = mpi_request_tm->DevHandle;
delayed_sc->handle = le16_to_cpu(mpi_request_tm->DevHandle);
list_add_tail(&delayed_sc->list, &ioc->delayed_sc_list);
dewtprintk(ioc, pr_info(MPT3SAS_FMT
"DELAYED:sc:handle(0x%04x), (open)\n",
@ -3806,8 +3868,7 @@ _scsih_tm_tr_volume_send(struct MPT3SAS_ADAPTER *ioc, u16 handle)
u16 smid;
struct _tr_list *delayed_tr;
if (ioc->shost_recovery || ioc->remove_host ||
ioc->pci_error_recovery) {
if (ioc->pci_error_recovery) {
dewtprintk(ioc, pr_info(MPT3SAS_FMT
"%s: host reset in progress!\n",
__func__, ioc->name));
@ -3860,8 +3921,7 @@ _scsih_tm_volume_tr_complete(struct MPT3SAS_ADAPTER *ioc, u16 smid,
Mpi2SCSITaskManagementReply_t *mpi_reply =
mpt3sas_base_get_reply_virt_addr(ioc, reply);
if (ioc->shost_recovery || ioc->remove_host ||
ioc->pci_error_recovery) {
if (ioc->shost_recovery || ioc->pci_error_recovery) {
dewtprintk(ioc, pr_info(MPT3SAS_FMT
"%s: host reset in progress!\n",
__func__, ioc->name));
@ -3903,8 +3963,8 @@ _scsih_tm_volume_tr_complete(struct MPT3SAS_ADAPTER *ioc, u16 smid,
* Context - processed in interrupt context.
*/
static void
_scsih_issue_delayed_event_ack(struct MPT3SAS_ADAPTER *ioc, u16 smid, u16 event,
u32 event_context)
_scsih_issue_delayed_event_ack(struct MPT3SAS_ADAPTER *ioc, u16 smid, U16 event,
U32 event_context)
{
Mpi2EventAckRequest_t *ack_request;
int i = smid - ioc->internal_smid;
@ -3979,13 +4039,13 @@ _scsih_issue_delayed_sas_io_unit_ctrl(struct MPT3SAS_ADAPTER *ioc,
dewtprintk(ioc, pr_info(MPT3SAS_FMT
"sc_send:handle(0x%04x), (open), smid(%d), cb(%d)\n",
ioc->name, le16_to_cpu(handle), smid,
ioc->name, handle, smid,
ioc->tm_sas_control_cb_idx));
mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
memset(mpi_request, 0, sizeof(Mpi2SasIoUnitControlRequest_t));
mpi_request->Function = MPI2_FUNCTION_SAS_IO_UNIT_CONTROL;
mpi_request->Operation = MPI2_SAS_OP_REMOVE_DEVICE;
mpi_request->DevHandle = handle;
mpi_request->DevHandle = cpu_to_le16(handle);
mpt3sas_base_put_smid_default(ioc, smid);
}
@ -5618,10 +5678,10 @@ static int
_scsih_expander_add(struct MPT3SAS_ADAPTER *ioc, u16 handle)
{
struct _sas_node *sas_expander;
struct _enclosure_node *enclosure_dev;
Mpi2ConfigReply_t mpi_reply;
Mpi2ExpanderPage0_t expander_pg0;
Mpi2ExpanderPage1_t expander_pg1;
Mpi2SasEnclosurePage0_t enclosure_pg0;
u32 ioc_status;
u16 parent_handle;
u64 sas_address, sas_address_parent = 0;
@ -5743,11 +5803,12 @@ _scsih_expander_add(struct MPT3SAS_ADAPTER *ioc, u16 handle)
}
if (sas_expander->enclosure_handle) {
if (!(mpt3sas_config_get_enclosure_pg0(ioc, &mpi_reply,
&enclosure_pg0, MPI2_SAS_ENCLOS_PGAD_FORM_HANDLE,
sas_expander->enclosure_handle)))
enclosure_dev =
mpt3sas_scsih_enclosure_find_by_handle(ioc,
sas_expander->enclosure_handle);
if (enclosure_dev)
sas_expander->enclosure_logical_id =
le64_to_cpu(enclosure_pg0.EnclosureLogicalID);
le64_to_cpu(enclosure_dev->pg0.EnclosureLogicalID);
}
_scsih_expander_node_add(ioc, sas_expander);
@ -5890,52 +5951,6 @@ _scsih_check_access_status(struct MPT3SAS_ADAPTER *ioc, u64 sas_address,
return rc;
}
/**
* _scsih_get_enclosure_logicalid_chassis_slot - get device's
* EnclosureLogicalID and ChassisSlot information.
* @ioc: per adapter object
* @sas_device_pg0: SAS device page0
* @sas_device: per sas device object
*
* Returns nothing.
*/
static void
_scsih_get_enclosure_logicalid_chassis_slot(struct MPT3SAS_ADAPTER *ioc,
Mpi2SasDevicePage0_t *sas_device_pg0, struct _sas_device *sas_device)
{
Mpi2ConfigReply_t mpi_reply;
Mpi2SasEnclosurePage0_t enclosure_pg0;
if (!sas_device_pg0 || !sas_device)
return;
sas_device->enclosure_handle =
le16_to_cpu(sas_device_pg0->EnclosureHandle);
sas_device->is_chassis_slot_valid = 0;
if (!le16_to_cpu(sas_device_pg0->EnclosureHandle))
return;
if (mpt3sas_config_get_enclosure_pg0(ioc, &mpi_reply,
&enclosure_pg0, MPI2_SAS_ENCLOS_PGAD_FORM_HANDLE,
le16_to_cpu(sas_device_pg0->EnclosureHandle))) {
pr_err(MPT3SAS_FMT
"Enclosure Pg0 read failed for handle(0x%04x)\n",
ioc->name, le16_to_cpu(sas_device_pg0->EnclosureHandle));
return;
}
sas_device->enclosure_logical_id =
le64_to_cpu(enclosure_pg0.EnclosureLogicalID);
if (le16_to_cpu(enclosure_pg0.Flags) &
MPI2_SAS_ENCLS0_FLAGS_CHASSIS_SLOT_VALID) {
sas_device->is_chassis_slot_valid = 1;
sas_device->chassis_slot = enclosure_pg0.ChassisSlot;
}
}
/**
* _scsih_check_device - checking device responsiveness
* @ioc: per adapter object
@ -5953,6 +5968,7 @@ _scsih_check_device(struct MPT3SAS_ADAPTER *ioc,
Mpi2ConfigReply_t mpi_reply;
Mpi2SasDevicePage0_t sas_device_pg0;
struct _sas_device *sas_device;
struct _enclosure_node *enclosure_dev = NULL;
u32 ioc_status;
unsigned long flags;
u64 sas_address;
@ -6007,8 +6023,21 @@ _scsih_check_device(struct MPT3SAS_ADAPTER *ioc,
sas_device->connector_name[0] = '\0';
}
_scsih_get_enclosure_logicalid_chassis_slot(ioc,
&sas_device_pg0, sas_device);
sas_device->enclosure_handle =
le16_to_cpu(sas_device_pg0.EnclosureHandle);
sas_device->is_chassis_slot_valid = 0;
enclosure_dev = mpt3sas_scsih_enclosure_find_by_handle(ioc,
sas_device->enclosure_handle);
if (enclosure_dev) {
sas_device->enclosure_logical_id =
le64_to_cpu(enclosure_dev->pg0.EnclosureLogicalID);
if (le16_to_cpu(enclosure_dev->pg0.Flags) &
MPI2_SAS_ENCLS0_FLAGS_CHASSIS_SLOT_VALID) {
sas_device->is_chassis_slot_valid = 1;
sas_device->chassis_slot =
enclosure_dev->pg0.ChassisSlot;
}
}
}
/* check if device is present */
@ -6055,12 +6084,11 @@ _scsih_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle, u8 phy_num,
{
Mpi2ConfigReply_t mpi_reply;
Mpi2SasDevicePage0_t sas_device_pg0;
Mpi2SasEnclosurePage0_t enclosure_pg0;
struct _sas_device *sas_device;
struct _enclosure_node *enclosure_dev = NULL;
u32 ioc_status;
u64 sas_address;
u32 device_info;
int encl_pg0_rc = -1;
if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0,
MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle))) {
@ -6106,12 +6134,12 @@ _scsih_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle, u8 phy_num,
}
if (sas_device_pg0.EnclosureHandle) {
encl_pg0_rc = mpt3sas_config_get_enclosure_pg0(ioc, &mpi_reply,
&enclosure_pg0, MPI2_SAS_ENCLOS_PGAD_FORM_HANDLE,
sas_device_pg0.EnclosureHandle);
if (encl_pg0_rc)
pr_info(MPT3SAS_FMT
"Enclosure Pg0 read failed for handle(0x%04x)\n",
enclosure_dev =
mpt3sas_scsih_enclosure_find_by_handle(ioc,
le16_to_cpu(sas_device_pg0.EnclosureHandle));
if (enclosure_dev == NULL)
pr_info(MPT3SAS_FMT "Enclosure handle(0x%04x)"
"doesn't match with enclosure device!\n",
ioc->name, sas_device_pg0.EnclosureHandle);
}
@ -6152,18 +6180,16 @@ _scsih_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle, u8 phy_num,
sas_device->enclosure_level = 0;
sas_device->connector_name[0] = '\0';
}
/* get enclosure_logical_id & chassis_slot */
/* get enclosure_logical_id & chassis_slot*/
sas_device->is_chassis_slot_valid = 0;
if (encl_pg0_rc == 0) {
if (enclosure_dev) {
sas_device->enclosure_logical_id =
le64_to_cpu(enclosure_pg0.EnclosureLogicalID);
if (le16_to_cpu(enclosure_pg0.Flags) &
le64_to_cpu(enclosure_dev->pg0.EnclosureLogicalID);
if (le16_to_cpu(enclosure_dev->pg0.Flags) &
MPI2_SAS_ENCLS0_FLAGS_CHASSIS_SLOT_VALID) {
sas_device->is_chassis_slot_valid = 1;
sas_device->chassis_slot =
enclosure_pg0.ChassisSlot;
enclosure_dev->pg0.ChassisSlot;
}
}
@ -6845,8 +6871,8 @@ _scsih_pcie_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle)
Mpi26PCIeDevicePage0_t pcie_device_pg0;
Mpi26PCIeDevicePage2_t pcie_device_pg2;
Mpi2ConfigReply_t mpi_reply;
Mpi2SasEnclosurePage0_t enclosure_pg0;
struct _pcie_device *pcie_device;
struct _enclosure_node *enclosure_dev;
u32 pcie_device_type;
u32 ioc_status;
u64 wwid;
@ -6917,7 +6943,7 @@ _scsih_pcie_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle)
if (pcie_device->enclosure_handle != 0)
pcie_device->slot = le16_to_cpu(pcie_device_pg0.Slot);
if (le16_to_cpu(pcie_device_pg0.Flags) &
if (le32_to_cpu(pcie_device_pg0.Flags) &
MPI26_PCIEDEV0_FLAGS_ENCL_LEVEL_VALID) {
pcie_device->enclosure_level = pcie_device_pg0.EnclosureLevel;
memcpy(&pcie_device->connector_name[0],
@ -6928,13 +6954,14 @@ _scsih_pcie_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle)
}
/* get enclosure_logical_id */
if (pcie_device->enclosure_handle &&
!(mpt3sas_config_get_enclosure_pg0(ioc, &mpi_reply,
&enclosure_pg0, MPI2_SAS_ENCLOS_PGAD_FORM_HANDLE,
pcie_device->enclosure_handle)))
pcie_device->enclosure_logical_id =
le64_to_cpu(enclosure_pg0.EnclosureLogicalID);
if (pcie_device->enclosure_handle) {
enclosure_dev =
mpt3sas_scsih_enclosure_find_by_handle(ioc,
pcie_device->enclosure_handle);
if (enclosure_dev)
pcie_device->enclosure_logical_id =
le64_to_cpu(enclosure_dev->pg0.EnclosureLogicalID);
}
/* TODO -- Add device name once FW supports it */
if (mpt3sas_config_get_pcie_device_pg2(ioc, &mpi_reply,
&pcie_device_pg2, MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle)) {
@ -6953,6 +6980,11 @@ _scsih_pcie_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle)
}
pcie_device->nvme_mdts =
le32_to_cpu(pcie_device_pg2.MaximumDataTransferSize);
if (pcie_device_pg2.ControllerResetTO)
pcie_device->reset_timeout =
pcie_device_pg2.ControllerResetTO;
else
pcie_device->reset_timeout = 30;
if (ioc->wait_for_discovery_to_complete)
_scsih_pcie_device_init_add(ioc, pcie_device);
@ -7205,6 +7237,9 @@ _scsih_pcie_device_status_change_event_debug(struct MPT3SAS_ADAPTER *ioc,
case MPI26_EVENT_PCIDEV_STAT_RC_ASYNC_NOTIFICATION:
reason_str = "internal async notification";
break;
case MPI26_EVENT_PCIDEV_STAT_RC_PCIE_HOT_RESET_FAILED:
reason_str = "pcie hot reset failed";
break;
default:
reason_str = "unknown reason";
break;
@ -7320,10 +7355,60 @@ static void
_scsih_sas_enclosure_dev_status_change_event(struct MPT3SAS_ADAPTER *ioc,
struct fw_event_work *fw_event)
{
Mpi2ConfigReply_t mpi_reply;
struct _enclosure_node *enclosure_dev = NULL;
Mpi2EventDataSasEnclDevStatusChange_t *event_data =
(Mpi2EventDataSasEnclDevStatusChange_t *)fw_event->event_data;
int rc;
u16 enclosure_handle = le16_to_cpu(event_data->EnclosureHandle);
if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK)
_scsih_sas_enclosure_dev_status_change_event_debug(ioc,
(Mpi2EventDataSasEnclDevStatusChange_t *)
fw_event->event_data);
if (ioc->shost_recovery)
return;
if (enclosure_handle)
enclosure_dev =
mpt3sas_scsih_enclosure_find_by_handle(ioc,
enclosure_handle);
switch (event_data->ReasonCode) {
case MPI2_EVENT_SAS_ENCL_RC_ADDED:
if (!enclosure_dev) {
enclosure_dev =
kzalloc(sizeof(struct _enclosure_node),
GFP_KERNEL);
if (!enclosure_dev) {
pr_info(MPT3SAS_FMT
"failure at %s:%d/%s()!\n", ioc->name,
__FILE__, __LINE__, __func__);
return;
}
rc = mpt3sas_config_get_enclosure_pg0(ioc, &mpi_reply,
&enclosure_dev->pg0,
MPI2_SAS_ENCLOS_PGAD_FORM_HANDLE,
enclosure_handle);
if (rc || (le16_to_cpu(mpi_reply.IOCStatus) &
MPI2_IOCSTATUS_MASK)) {
kfree(enclosure_dev);
return;
}
list_add_tail(&enclosure_dev->list,
&ioc->enclosure_list);
}
break;
case MPI2_EVENT_SAS_ENCL_RC_NOT_RESPONDING:
if (enclosure_dev) {
list_del(&enclosure_dev->list);
kfree(enclosure_dev);
}
break;
default:
break;
}
}
/**
@ -7409,7 +7494,7 @@ _scsih_sas_broadcast_primitive_event(struct MPT3SAS_ADAPTER *ioc,
spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
r = mpt3sas_scsih_issue_tm(ioc, handle, lun,
MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK, st->smid,
st->msix_io, 30);
st->msix_io, 30, 0);
if (r == FAILED) {
sdev_printk(KERN_WARNING, sdev,
"mpt3sas_scsih_issue_tm: FAILED when sending "
@ -7450,7 +7535,7 @@ _scsih_sas_broadcast_primitive_event(struct MPT3SAS_ADAPTER *ioc,
r = mpt3sas_scsih_issue_tm(ioc, handle, sdev->lun,
MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK, st->smid,
st->msix_io, 30);
st->msix_io, 30, 0);
if (r == FAILED || st->cb_idx != 0xFF) {
sdev_printk(KERN_WARNING, sdev,
"mpt3sas_scsih_issue_tm: ABORT_TASK: FAILED : "
@ -7526,6 +7611,44 @@ _scsih_sas_discovery_event(struct MPT3SAS_ADAPTER *ioc,
}
}
/**
* _scsih_sas_device_discovery_error_event - display SAS device discovery error
* events
* @ioc: per adapter object
* @fw_event: The fw_event_work object
* Context: user.
*
* Return nothing.
*/
static void
_scsih_sas_device_discovery_error_event(struct MPT3SAS_ADAPTER *ioc,
struct fw_event_work *fw_event)
{
Mpi25EventDataSasDeviceDiscoveryError_t *event_data =
(Mpi25EventDataSasDeviceDiscoveryError_t *)fw_event->event_data;
switch (event_data->ReasonCode) {
case MPI25_EVENT_SAS_DISC_ERR_SMP_FAILED:
pr_warn(MPT3SAS_FMT "SMP command sent to the expander"
"(handle:0x%04x, sas_address:0x%016llx,"
"physical_port:0x%02x) has failed",
ioc->name, le16_to_cpu(event_data->DevHandle),
(unsigned long long)le64_to_cpu(event_data->SASAddress),
event_data->PhysicalPort);
break;
case MPI25_EVENT_SAS_DISC_ERR_SMP_TIMEOUT:
pr_warn(MPT3SAS_FMT "SMP command sent to the expander"
"(handle:0x%04x, sas_address:0x%016llx,"
"physical_port:0x%02x) has timed out",
ioc->name, le16_to_cpu(event_data->DevHandle),
(unsigned long long)le64_to_cpu(event_data->SASAddress),
event_data->PhysicalPort);
break;
default:
break;
}
}
/**
* _scsih_pcie_enumeration_event - handle enumeration events
* @ioc: per adapter object
@ -8360,12 +8483,23 @@ Mpi2SasDevicePage0_t *sas_device_pg0)
struct MPT3SAS_TARGET *sas_target_priv_data = NULL;
struct scsi_target *starget;
struct _sas_device *sas_device = NULL;
struct _enclosure_node *enclosure_dev = NULL;
unsigned long flags;
if (sas_device_pg0->EnclosureHandle) {
enclosure_dev =
mpt3sas_scsih_enclosure_find_by_handle(ioc,
le16_to_cpu(sas_device_pg0->EnclosureHandle));
if (enclosure_dev == NULL)
pr_info(MPT3SAS_FMT "Enclosure handle(0x%04x)"
"doesn't match with enclosure device!\n",
ioc->name, sas_device_pg0->EnclosureHandle);
}
spin_lock_irqsave(&ioc->sas_device_lock, flags);
list_for_each_entry(sas_device, &ioc->sas_device_list, list) {
if ((sas_device->sas_address == sas_device_pg0->SASAddress) &&
(sas_device->slot == sas_device_pg0->Slot)) {
if ((sas_device->sas_address == le64_to_cpu(
sas_device_pg0->SASAddress)) && (sas_device->slot ==
le16_to_cpu(sas_device_pg0->Slot))) {
sas_device->responding = 1;
starget = sas_device->starget;
if (starget && starget->hostdata) {
@ -8377,7 +8511,7 @@ Mpi2SasDevicePage0_t *sas_device_pg0)
if (starget) {
starget_printk(KERN_INFO, starget,
"handle(0x%04x), sas_addr(0x%016llx)\n",
sas_device_pg0->DevHandle,
le16_to_cpu(sas_device_pg0->DevHandle),
(unsigned long long)
sas_device->sas_address);
@ -8389,7 +8523,7 @@ Mpi2SasDevicePage0_t *sas_device_pg0)
sas_device->enclosure_logical_id,
sas_device->slot);
}
if (sas_device_pg0->Flags &
if (le16_to_cpu(sas_device_pg0->Flags) &
MPI2_SAS_DEVICE0_FLAGS_ENCL_LEVEL_VALID) {
sas_device->enclosure_level =
sas_device_pg0->EnclosureLevel;
@ -8400,17 +8534,30 @@ Mpi2SasDevicePage0_t *sas_device_pg0)
sas_device->connector_name[0] = '\0';
}
_scsih_get_enclosure_logicalid_chassis_slot(ioc,
sas_device_pg0, sas_device);
sas_device->enclosure_handle =
le16_to_cpu(sas_device_pg0->EnclosureHandle);
sas_device->is_chassis_slot_valid = 0;
if (enclosure_dev) {
sas_device->enclosure_logical_id = le64_to_cpu(
enclosure_dev->pg0.EnclosureLogicalID);
if (le16_to_cpu(enclosure_dev->pg0.Flags) &
MPI2_SAS_ENCLS0_FLAGS_CHASSIS_SLOT_VALID) {
sas_device->is_chassis_slot_valid = 1;
sas_device->chassis_slot =
enclosure_dev->pg0.ChassisSlot;
}
}
if (sas_device->handle == sas_device_pg0->DevHandle)
if (sas_device->handle == le16_to_cpu(
sas_device_pg0->DevHandle))
goto out;
pr_info("\thandle changed from(0x%04x)!!!\n",
sas_device->handle);
sas_device->handle = sas_device_pg0->DevHandle;
sas_device->handle = le16_to_cpu(
sas_device_pg0->DevHandle);
if (sas_target_priv_data)
sas_target_priv_data->handle =
sas_device_pg0->DevHandle;
le16_to_cpu(sas_device_pg0->DevHandle);
goto out;
}
}
@ -8418,6 +8565,52 @@ Mpi2SasDevicePage0_t *sas_device_pg0)
spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
}
/**
* _scsih_create_enclosure_list_after_reset - Free Existing list,
* And create enclosure list by scanning all Enclosure Page(0)s
* @ioc: per adapter object
*
* Return nothing.
*/
static void
_scsih_create_enclosure_list_after_reset(struct MPT3SAS_ADAPTER *ioc)
{
struct _enclosure_node *enclosure_dev;
Mpi2ConfigReply_t mpi_reply;
u16 enclosure_handle;
int rc;
/* Free existing enclosure list */
mpt3sas_free_enclosure_list(ioc);
/* Re constructing enclosure list after reset*/
enclosure_handle = 0xFFFF;
do {
enclosure_dev =
kzalloc(sizeof(struct _enclosure_node), GFP_KERNEL);
if (!enclosure_dev) {
pr_err(MPT3SAS_FMT
"failure at %s:%d/%s()!\n", ioc->name,
__FILE__, __LINE__, __func__);
return;
}
rc = mpt3sas_config_get_enclosure_pg0(ioc, &mpi_reply,
&enclosure_dev->pg0,
MPI2_SAS_ENCLOS_PGAD_FORM_GET_NEXT_HANDLE,
enclosure_handle);
if (rc || (le16_to_cpu(mpi_reply.IOCStatus) &
MPI2_IOCSTATUS_MASK)) {
kfree(enclosure_dev);
return;
}
list_add_tail(&enclosure_dev->list,
&ioc->enclosure_list);
enclosure_handle =
le16_to_cpu(enclosure_dev->pg0.EnclosureHandle);
} while (1);
}
/**
* _scsih_search_responding_sas_devices -
* @ioc: per adapter object
@ -8449,15 +8642,10 @@ _scsih_search_responding_sas_devices(struct MPT3SAS_ADAPTER *ioc)
MPI2_IOCSTATUS_MASK;
if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
break;
handle = sas_device_pg0.DevHandle =
le16_to_cpu(sas_device_pg0.DevHandle);
handle = le16_to_cpu(sas_device_pg0.DevHandle);
device_info = le32_to_cpu(sas_device_pg0.DeviceInfo);
if (!(_scsih_is_end_device(device_info)))
continue;
sas_device_pg0.SASAddress =
le64_to_cpu(sas_device_pg0.SASAddress);
sas_device_pg0.Slot = le16_to_cpu(sas_device_pg0.Slot);
sas_device_pg0.Flags = le16_to_cpu(sas_device_pg0.Flags);
_scsih_mark_responding_sas_device(ioc, &sas_device_pg0);
}
@ -8487,8 +8675,9 @@ _scsih_mark_responding_pcie_device(struct MPT3SAS_ADAPTER *ioc,
spin_lock_irqsave(&ioc->pcie_device_lock, flags);
list_for_each_entry(pcie_device, &ioc->pcie_device_list, list) {
if ((pcie_device->wwid == pcie_device_pg0->WWID) &&
(pcie_device->slot == pcie_device_pg0->Slot)) {
if ((pcie_device->wwid == le64_to_cpu(pcie_device_pg0->WWID))
&& (pcie_device->slot == le16_to_cpu(
pcie_device_pg0->Slot))) {
pcie_device->responding = 1;
starget = pcie_device->starget;
if (starget && starget->hostdata) {
@ -8523,14 +8712,16 @@ _scsih_mark_responding_pcie_device(struct MPT3SAS_ADAPTER *ioc,
pcie_device->connector_name[0] = '\0';
}
if (pcie_device->handle == pcie_device_pg0->DevHandle)
if (pcie_device->handle == le16_to_cpu(
pcie_device_pg0->DevHandle))
goto out;
pr_info("\thandle changed from(0x%04x)!!!\n",
pcie_device->handle);
pcie_device->handle = pcie_device_pg0->DevHandle;
pcie_device->handle = le16_to_cpu(
pcie_device_pg0->DevHandle);
if (sas_target_priv_data)
sas_target_priv_data->handle =
pcie_device_pg0->DevHandle;
le16_to_cpu(pcie_device_pg0->DevHandle);
goto out;
}
}
@ -8579,10 +8770,6 @@ _scsih_search_responding_pcie_devices(struct MPT3SAS_ADAPTER *ioc)
device_info = le32_to_cpu(pcie_device_pg0.DeviceInfo);
if (!(_scsih_is_nvme_device(device_info)))
continue;
pcie_device_pg0.WWID = le64_to_cpu(pcie_device_pg0.WWID),
pcie_device_pg0.Slot = le16_to_cpu(pcie_device_pg0.Slot);
pcie_device_pg0.Flags = le32_to_cpu(pcie_device_pg0.Flags);
pcie_device_pg0.DevHandle = handle;
_scsih_mark_responding_pcie_device(ioc, &pcie_device_pg0);
}
out:
@ -8736,22 +8923,16 @@ _scsih_mark_responding_expander(struct MPT3SAS_ADAPTER *ioc,
{
struct _sas_node *sas_expander = NULL;
unsigned long flags;
int i, encl_pg0_rc = -1;
Mpi2ConfigReply_t mpi_reply;
Mpi2SasEnclosurePage0_t enclosure_pg0;
int i;
struct _enclosure_node *enclosure_dev = NULL;
u16 handle = le16_to_cpu(expander_pg0->DevHandle);
u16 enclosure_handle = le16_to_cpu(expander_pg0->EnclosureHandle);
u64 sas_address = le64_to_cpu(expander_pg0->SASAddress);
if (le16_to_cpu(expander_pg0->EnclosureHandle)) {
encl_pg0_rc = mpt3sas_config_get_enclosure_pg0(ioc, &mpi_reply,
&enclosure_pg0, MPI2_SAS_ENCLOS_PGAD_FORM_HANDLE,
le16_to_cpu(expander_pg0->EnclosureHandle));
if (encl_pg0_rc)
pr_info(MPT3SAS_FMT
"Enclosure Pg0 read failed for handle(0x%04x)\n",
ioc->name,
le16_to_cpu(expander_pg0->EnclosureHandle));
}
if (enclosure_handle)
enclosure_dev =
mpt3sas_scsih_enclosure_find_by_handle(ioc,
enclosure_handle);
spin_lock_irqsave(&ioc->sas_node_lock, flags);
list_for_each_entry(sas_expander, &ioc->sas_expander_list, list) {
@ -8759,12 +8940,12 @@ _scsih_mark_responding_expander(struct MPT3SAS_ADAPTER *ioc,
continue;
sas_expander->responding = 1;
if (!encl_pg0_rc)
if (enclosure_dev) {
sas_expander->enclosure_logical_id =
le64_to_cpu(enclosure_pg0.EnclosureLogicalID);
sas_expander->enclosure_handle =
le16_to_cpu(expander_pg0->EnclosureHandle);
le64_to_cpu(enclosure_dev->pg0.EnclosureLogicalID);
sas_expander->enclosure_handle =
le16_to_cpu(expander_pg0->EnclosureHandle);
}
if (sas_expander->handle == handle)
goto out;
@ -9286,6 +9467,7 @@ mpt3sas_scsih_reset_handler(struct MPT3SAS_ADAPTER *ioc, int reset_phase)
if ((!ioc->is_driver_loading) && !(disable_discovery > 0 &&
!ioc->sas_hba.num_phys)) {
_scsih_prep_device_scan(ioc);
_scsih_create_enclosure_list_after_reset(ioc);
_scsih_search_responding_sas_devices(ioc);
_scsih_search_responding_pcie_devices(ioc);
_scsih_search_responding_raid_devices(ioc);
@ -9356,6 +9538,9 @@ _mpt3sas_fw_work(struct MPT3SAS_ADAPTER *ioc, struct fw_event_work *fw_event)
case MPI2_EVENT_SAS_DISCOVERY:
_scsih_sas_discovery_event(ioc, fw_event);
break;
case MPI2_EVENT_SAS_DEVICE_DISCOVERY_ERROR:
_scsih_sas_device_discovery_error_event(ioc, fw_event);
break;
case MPI2_EVENT_SAS_BROADCAST_PRIMITIVE:
_scsih_sas_broadcast_primitive_event(ioc, fw_event);
break;
@ -9433,8 +9618,8 @@ mpt3sas_scsih_event_callback(struct MPT3SAS_ADAPTER *ioc, u8 msix_index,
u16 sz;
Mpi26EventDataActiveCableExcept_t *ActiveCableEventData;
/* events turned off due to host reset or driver unloading */
if (ioc->remove_host || ioc->pci_error_recovery)
/* events turned off due to host reset */
if (ioc->pci_error_recovery)
return 1;
mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply);
@ -9540,6 +9725,7 @@ mpt3sas_scsih_event_callback(struct MPT3SAS_ADAPTER *ioc, u8 msix_index,
case MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE:
case MPI2_EVENT_IR_OPERATION_STATUS:
case MPI2_EVENT_SAS_DISCOVERY:
case MPI2_EVENT_SAS_DEVICE_DISCOVERY_ERROR:
case MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE:
case MPI2_EVENT_IR_PHYSICAL_DISK:
case MPI2_EVENT_PCIE_ENUMERATION:
@ -10513,6 +10699,7 @@ _scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id)
INIT_LIST_HEAD(&ioc->sas_device_list);
INIT_LIST_HEAD(&ioc->sas_device_init_list);
INIT_LIST_HEAD(&ioc->sas_expander_list);
INIT_LIST_HEAD(&ioc->enclosure_list);
INIT_LIST_HEAD(&ioc->pcie_device_list);
INIT_LIST_HEAD(&ioc->pcie_device_init_list);
INIT_LIST_HEAD(&ioc->fw_event_list);
@ -11100,10 +11287,10 @@ _mpt3sas_exit(void)
pr_info("mpt3sas version %s unloading\n",
MPT3SAS_DRIVER_VERSION);
pci_unregister_driver(&mpt3sas_driver);
mpt3sas_ctl_exit(hbas_to_enumerate);
pci_unregister_driver(&mpt3sas_driver);
scsih_exit();
}

View File

@ -177,7 +177,8 @@ mpt3sas_init_warpdrive_properties(struct MPT3SAS_ADAPTER *ioc,
if (mpt3sas_config_get_phys_disk_pg0(ioc, &mpi_reply,
&pd_pg0, MPI2_PHYSDISK_PGAD_FORM_PHYSDISKNUM,
vol_pg0->PhysDisk[count].PhysDiskNum) ||
pd_pg0.DevHandle == MPT3SAS_INVALID_DEVICE_HANDLE) {
le16_to_cpu(pd_pg0.DevHandle) ==
MPT3SAS_INVALID_DEVICE_HANDLE) {
pr_info(MPT3SAS_FMT "WarpDrive : Direct IO is "
"disabled for the drive with handle(0x%04x) member"
"handle retrieval failed for member number=%d\n",

View File

@ -2693,22 +2693,4 @@ static struct pci_driver mvumi_pci_driver = {
#endif
};
/**
* mvumi_init - Driver load entry point
*/
static int __init mvumi_init(void)
{
return pci_register_driver(&mvumi_pci_driver);
}
/**
* mvumi_exit - Driver unload entry point
*/
static void __exit mvumi_exit(void)
{
pci_unregister_driver(&mvumi_pci_driver);
}
module_init(mvumi_init);
module_exit(mvumi_exit);
module_pci_driver(mvumi_pci_driver);

View File

@ -1840,14 +1840,14 @@ int osd_req_decode_sense_full(struct osd_request *or,
case osd_sense_response_integrity_check:
{
struct osd_sense_response_integrity_check_descriptor
*osricd = cur_descriptor;
const unsigned len =
sizeof(osricd->integrity_check_value);
char key_dump[len*4 + 2]; /* 2nibbles+space+ASCII */
*d = cur_descriptor;
/* 2nibbles+space+ASCII */
char dump[sizeof(d->integrity_check_value) * 4 + 2];
hex_dump_to_buffer(osricd->integrity_check_value, len,
32, 1, key_dump, sizeof(key_dump), true);
OSD_SENSE_PRINT2("response_integrity [%s]\n", key_dump);
hex_dump_to_buffer(d->integrity_check_value,
sizeof(d->integrity_check_value),
32, 1, dump, sizeof(dump), true);
OSD_SENSE_PRINT2("response_integrity [%s]\n", dump);
}
case osd_sense_attribute_identification:
{

View File

@ -3607,7 +3607,7 @@ int pm8001_mpi_reg_resp(struct pm8001_hba_info *pm8001_ha, void *piomb)
break;
default:
PM8001_MSG_DBG(pm8001_ha,
pm8001_printk("DEVREG_FAILURE_DEVICE_TYPE_NOT_UNSORPORTED\n"));
pm8001_printk("DEVREG_FAILURE_DEVICE_TYPE_NOT_SUPPORTED\n"));
break;
}
complete(pm8001_dev->dcompletion);

View File

@ -1,5 +1,5 @@
/* QLogic FCoE Offload Driver
* Copyright (c) 2016-2017 Cavium Inc.
* Copyright (c) 2016-2018 Cavium Inc.
*
* This software is available under the terms of the GNU General Public License
* (GPL) Version 2, available from the file COPYING in the main directory of

View File

@ -1,5 +1,5 @@
/* QLogic FCoE Offload Driver
* Copyright (c) 2016-2017 Cavium Inc.
* Copyright (c) 2016-2018 Cavium Inc.
*
* This software is available under the terms of the GNU General Public License
* (GPL) Version 2, available from the file COPYING in the main directory of

View File

@ -1,5 +1,5 @@
/* QLogic FCoE Offload Driver
* Copyright (c) 2016-2017 Cavium Inc.
* Copyright (c) 2016-2018 Cavium Inc.
*
* This software is available under the terms of the GNU General Public License
* (GPL) Version 2, available from the file COPYING in the main directory of

View File

@ -1,5 +1,5 @@
/* QLogic FCoE Offload Driver
* Copyright (c) 2016-2017 Cavium Inc.
* Copyright (c) 2016-2018 Cavium Inc.
*
* This software is available under the terms of the GNU General Public License
* (GPL) Version 2, available from the file COPYING in the main directory of

View File

@ -1,6 +1,6 @@
/*
* QLogic FCoE Offload Driver
* Copyright (c) 2016-2017 Cavium Inc.
* Copyright (c) 2016-2018 Cavium Inc.
*
* This software is available under the terms of the GNU General Public License
* (GPL) Version 2, available from the file COPYING in the main directory of
@ -180,6 +180,7 @@ struct qedf_rport {
spinlock_t rport_lock;
#define QEDF_RPORT_SESSION_READY 1
#define QEDF_RPORT_UPLOADING_CONNECTION 2
#define QEDF_RPORT_IN_RESET 3
unsigned long flags;
unsigned long retry_delay_timestamp;
struct fc_rport *rport;
@ -300,6 +301,7 @@ struct qedf_ctx {
#define QEDF_FALLBACK_VLAN 1002
#define QEDF_DEFAULT_PRIO 3
int vlan_id;
u8 prio;
struct qed_dev *cdev;
struct qed_dev_fcoe_info dev_info;
struct qed_int_info int_info;
@ -365,6 +367,7 @@ struct qedf_ctx {
#define QEDF_IO_WORK_MIN 64
mempool_t *io_mempool;
struct workqueue_struct *dpc_wq;
struct delayed_work grcdump_work;
u32 slow_sge_ios;
u32 fast_sge_ios;
@ -504,6 +507,7 @@ extern int qedf_send_flogi(struct qedf_ctx *qedf);
extern void qedf_get_protocol_tlv_data(void *dev, void *data);
extern void qedf_fp_io_handler(struct work_struct *work);
extern void qedf_get_generic_tlv_data(void *dev, struct qed_generic_tlvs *data);
extern void qedf_wq_grcdump(struct work_struct *work);
#define FCOE_WORD_TO_BYTE 4
#define QEDF_MAX_TASK_NUM 0xFFFF

View File

@ -1,6 +1,6 @@
/*
* QLogic FCoE Offload Driver
* Copyright (c) 2016-2017 Cavium Inc.
* Copyright (c) 2016-2018 Cavium Inc.
*
* This software is available under the terms of the GNU General Public License
* (GPL) Version 2, available from the file COPYING in the main directory of

Some files were not shown because too many files have changed in this diff Show More