SCSI misc on 20240919

Updates to the usual drivers (ufs, smartpqi, NCR5380, mac_scsi, lpfc,
 mpi3mr).  There are no user visible core changes and a whole series of
 minor updates and fixes.  The largest core change is probably the
 simplification of the workqueue allocation path.
 
 Signed-off-by: James E.J. Bottomley <James.Bottomley@HansenPartnership.com>
 -----BEGIN PGP SIGNATURE-----
 
 iJwEABMIAEQWIQTnYEDbdso9F2cI+arnQslM7pishQUCZuvd5yYcamFtZXMuYm90
 dG9tbGV5QGhhbnNlbnBhcnRuZXJzaGlwLmNvbQAKCRDnQslM7pishV7dAQC+TSlv
 BeNm8W4yAFCXLCwnJh8rT6ZzuBsjsIHH1DPP3wD+IXuIOFf5gVRJGpCNJc/dI082
 /ehSrIdeJxwaNoOOt+Y=
 =SXZD
 -----END PGP SIGNATURE-----

Merge tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi

Pull SCSI updates from James Bottomley:
 "Updates to the usual drivers (ufs, smartpqi, NCR5380, mac_scsi, lpfc,
  mpi3mr).

  There are no user visible core changes and a whole series of minor
  updates and fixes. The largest core change is probably the
  simplification of the workqueue allocation path"

* tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi: (86 commits)
  scsi: smartpqi: update driver version to 2.1.30-031
  scsi: smartpqi: fix volume size updates
  scsi: smartpqi: fix rare system hang during LUN reset
  scsi: smartpqi: add new controller PCI IDs
  scsi: smartpqi: add counter for parity write stream requests
  scsi: smartpqi: correct stream detection
  scsi: smartpqi: Add fw log to kdump
  scsi: bnx2fc: Remove some unused fields in struct bnx2fc_rport
  scsi: qla2xxx: Remove the unused 'del_list_entry' field in struct fc_port
  scsi: ufs: core: Remove ufshcd_urgent_bkops()
  scsi: core: Remove obsoleted declaration for scsi_driverbyte_string()
  scsi: bnx2i: Remove unused declarations
  scsi: core: Simplify an alloc_workqueue() invocation
  scsi: ufs: Simplify alloc*_workqueue() invocation
  scsi: stex: Simplify an alloc_ordered_workqueue() invocation
  scsi: scsi_transport_fc: Simplify alloc_workqueue() invocations
  scsi: snic: Simplify alloc_workqueue() invocations
  scsi: qedi: Simplify an alloc_workqueue() invocation
  scsi: qedf: Simplify alloc_workqueue() invocations
  scsi: myrs: Simplify an alloc_ordered_workqueue() invocation
  ...
This commit is contained in:
Linus Torvalds 2024-09-19 11:28:51 +02:00
commit a1d1eb2f57
87 changed files with 1185 additions and 849 deletions

View File

@ -1532,3 +1532,30 @@ Contact: Bean Huo <beanhuo@micron.com>
Description: Description:
rtc_update_ms indicates how often the host should synchronize or update the rtc_update_ms indicates how often the host should synchronize or update the
UFS RTC. If set to 0, this will disable UFS RTC periodic update. UFS RTC. If set to 0, this will disable UFS RTC periodic update.
What: /sys/devices/platform/.../ufshci_capabilities/version
Date: August 2024
Contact: Avri Altman <avri.altman@wdc.com>
Description:
Host Capabilities register group: UFS version register.
Symbol - VER. This file shows the UFSHCD version.
Example: Version 3.12 would be represented as 0000_0312h.
The file is read only.
What: /sys/devices/platform/.../ufshci_capabilities/product_id
Date: August 2024
Contact: Avri Altman <avri.altman@wdc.com>
Description:
Host Capabilities register group: product ID register.
Symbol - HCPID. This file shows the UFSHCD product id.
The content of this register is vendor specific.
The file is read only.
What: /sys/devices/platform/.../ufshci_capabilities/man_id
Date: August 2024
Contact: Avri Altman <avri.altman@wdc.com>
Description:
Host Capabilities register group: manufacturer ID register.
Symbol - HCMID. This file shows the UFSHCD manufacturer id.
The Manufacturer ID is defined by JEDEC in JEDEC-JEP106.
The file is read only.

View File

@ -799,6 +799,7 @@ void submit_bio_noacct(struct bio *bio)
switch (bio_op(bio)) { switch (bio_op(bio)) {
case REQ_OP_READ: case REQ_OP_READ:
break;
case REQ_OP_WRITE: case REQ_OP_WRITE:
if (bio->bi_opf & REQ_ATOMIC) { if (bio->bi_opf & REQ_ATOMIC) {
status = blk_validate_atomic_write_op_size(q, bio); status = blk_validate_atomic_write_op_size(q, bio);

View File

@ -1018,14 +1018,6 @@ typedef struct _CONFIG_PAGE_IOC_2_RAID_VOL
#define MPI_IOCPAGE2_FLAG_VOLUME_INACTIVE (0x08) #define MPI_IOCPAGE2_FLAG_VOLUME_INACTIVE (0x08)
/*
* Host code (drivers, BIOS, utilities, etc.) should leave this define set to
* one and check Header.PageLength at runtime.
*/
#ifndef MPI_IOC_PAGE_2_RAID_VOLUME_MAX
#define MPI_IOC_PAGE_2_RAID_VOLUME_MAX (1)
#endif
typedef struct _CONFIG_PAGE_IOC_2 typedef struct _CONFIG_PAGE_IOC_2
{ {
CONFIG_PAGE_HEADER Header; /* 00h */ CONFIG_PAGE_HEADER Header; /* 00h */
@ -1034,7 +1026,7 @@ typedef struct _CONFIG_PAGE_IOC_2
U8 MaxVolumes; /* 09h */ U8 MaxVolumes; /* 09h */
U8 NumActivePhysDisks; /* 0Ah */ U8 NumActivePhysDisks; /* 0Ah */
U8 MaxPhysDisks; /* 0Bh */ U8 MaxPhysDisks; /* 0Bh */
CONFIG_PAGE_IOC_2_RAID_VOL RaidVolume[MPI_IOC_PAGE_2_RAID_VOLUME_MAX];/* 0Ch */ CONFIG_PAGE_IOC_2_RAID_VOL RaidVolume[] __counted_by(NumActiveVolumes); /* 0Ch */
} CONFIG_PAGE_IOC_2, MPI_POINTER PTR_CONFIG_PAGE_IOC_2, } CONFIG_PAGE_IOC_2, MPI_POINTER PTR_CONFIG_PAGE_IOC_2,
IOCPage2_t, MPI_POINTER pIOCPage2_t; IOCPage2_t, MPI_POINTER pIOCPage2_t;
@ -1064,21 +1056,13 @@ typedef struct _IOC_3_PHYS_DISK
} IOC_3_PHYS_DISK, MPI_POINTER PTR_IOC_3_PHYS_DISK, } IOC_3_PHYS_DISK, MPI_POINTER PTR_IOC_3_PHYS_DISK,
Ioc3PhysDisk_t, MPI_POINTER pIoc3PhysDisk_t; Ioc3PhysDisk_t, MPI_POINTER pIoc3PhysDisk_t;
/*
* Host code (drivers, BIOS, utilities, etc.) should leave this define set to
* one and check Header.PageLength at runtime.
*/
#ifndef MPI_IOC_PAGE_3_PHYSDISK_MAX
#define MPI_IOC_PAGE_3_PHYSDISK_MAX (1)
#endif
typedef struct _CONFIG_PAGE_IOC_3 typedef struct _CONFIG_PAGE_IOC_3
{ {
CONFIG_PAGE_HEADER Header; /* 00h */ CONFIG_PAGE_HEADER Header; /* 00h */
U8 NumPhysDisks; /* 04h */ U8 NumPhysDisks; /* 04h */
U8 Reserved1; /* 05h */ U8 Reserved1; /* 05h */
U16 Reserved2; /* 06h */ U16 Reserved2; /* 06h */
IOC_3_PHYS_DISK PhysDisk[MPI_IOC_PAGE_3_PHYSDISK_MAX]; /* 08h */ IOC_3_PHYS_DISK PhysDisk[] __counted_by(NumPhysDisks); /* 08h */
} CONFIG_PAGE_IOC_3, MPI_POINTER PTR_CONFIG_PAGE_IOC_3, } CONFIG_PAGE_IOC_3, MPI_POINTER PTR_CONFIG_PAGE_IOC_3,
IOCPage3_t, MPI_POINTER pIOCPage3_t; IOCPage3_t, MPI_POINTER pIOCPage3_t;
@ -1093,21 +1077,13 @@ typedef struct _IOC_4_SEP
} IOC_4_SEP, MPI_POINTER PTR_IOC_4_SEP, } IOC_4_SEP, MPI_POINTER PTR_IOC_4_SEP,
Ioc4Sep_t, MPI_POINTER pIoc4Sep_t; Ioc4Sep_t, MPI_POINTER pIoc4Sep_t;
/*
* Host code (drivers, BIOS, utilities, etc.) should leave this define set to
* one and check Header.PageLength at runtime.
*/
#ifndef MPI_IOC_PAGE_4_SEP_MAX
#define MPI_IOC_PAGE_4_SEP_MAX (1)
#endif
typedef struct _CONFIG_PAGE_IOC_4 typedef struct _CONFIG_PAGE_IOC_4
{ {
CONFIG_PAGE_HEADER Header; /* 00h */ CONFIG_PAGE_HEADER Header; /* 00h */
U8 ActiveSEP; /* 04h */ U8 ActiveSEP; /* 04h */
U8 MaxSEP; /* 05h */ U8 MaxSEP; /* 05h */
U16 Reserved1; /* 06h */ U16 Reserved1; /* 06h */
IOC_4_SEP SEP[MPI_IOC_PAGE_4_SEP_MAX]; /* 08h */ IOC_4_SEP SEP[] __counted_by(ActiveSEP); /* 08h */
} CONFIG_PAGE_IOC_4, MPI_POINTER PTR_CONFIG_PAGE_IOC_4, } CONFIG_PAGE_IOC_4, MPI_POINTER PTR_CONFIG_PAGE_IOC_4,
IOCPage4_t, MPI_POINTER pIOCPage4_t; IOCPage4_t, MPI_POINTER pIOCPage4_t;
@ -2295,14 +2271,6 @@ typedef struct _RAID_VOL0_SETTINGS
#define MPI_RAID_HOT_SPARE_POOL_6 (0x40) #define MPI_RAID_HOT_SPARE_POOL_6 (0x40)
#define MPI_RAID_HOT_SPARE_POOL_7 (0x80) #define MPI_RAID_HOT_SPARE_POOL_7 (0x80)
/*
* Host code (drivers, BIOS, utilities, etc.) should leave this define set to
* one and check Header.PageLength at runtime.
*/
#ifndef MPI_RAID_VOL_PAGE_0_PHYSDISK_MAX
#define MPI_RAID_VOL_PAGE_0_PHYSDISK_MAX (1)
#endif
typedef struct _CONFIG_PAGE_RAID_VOL_0 typedef struct _CONFIG_PAGE_RAID_VOL_0
{ {
CONFIG_PAGE_HEADER Header; /* 00h */ CONFIG_PAGE_HEADER Header; /* 00h */
@ -2321,7 +2289,7 @@ typedef struct _CONFIG_PAGE_RAID_VOL_0
U8 DataScrubRate; /* 25h */ U8 DataScrubRate; /* 25h */
U8 ResyncRate; /* 26h */ U8 ResyncRate; /* 26h */
U8 InactiveStatus; /* 27h */ U8 InactiveStatus; /* 27h */
RAID_VOL0_PHYS_DISK PhysDisk[MPI_RAID_VOL_PAGE_0_PHYSDISK_MAX];/* 28h */ RAID_VOL0_PHYS_DISK PhysDisk[] __counted_by(NumPhysDisks); /* 28h */
} CONFIG_PAGE_RAID_VOL_0, MPI_POINTER PTR_CONFIG_PAGE_RAID_VOL_0, } CONFIG_PAGE_RAID_VOL_0, MPI_POINTER PTR_CONFIG_PAGE_RAID_VOL_0,
RaidVolumePage0_t, MPI_POINTER pRaidVolumePage0_t; RaidVolumePage0_t, MPI_POINTER pRaidVolumePage0_t;
@ -2455,14 +2423,6 @@ typedef struct _RAID_PHYS_DISK1_PATH
#define MPI_RAID_PHYSDISK1_FLAG_INVALID (0x0001) #define MPI_RAID_PHYSDISK1_FLAG_INVALID (0x0001)
/*
* Host code (drivers, BIOS, utilities, etc.) should leave this define set to
* one and check Header.PageLength or NumPhysDiskPaths at runtime.
*/
#ifndef MPI_RAID_PHYS_DISK1_PATH_MAX
#define MPI_RAID_PHYS_DISK1_PATH_MAX (1)
#endif
typedef struct _CONFIG_PAGE_RAID_PHYS_DISK_1 typedef struct _CONFIG_PAGE_RAID_PHYS_DISK_1
{ {
CONFIG_PAGE_HEADER Header; /* 00h */ CONFIG_PAGE_HEADER Header; /* 00h */
@ -2470,7 +2430,7 @@ typedef struct _CONFIG_PAGE_RAID_PHYS_DISK_1
U8 PhysDiskNum; /* 05h */ U8 PhysDiskNum; /* 05h */
U16 Reserved2; /* 06h */ U16 Reserved2; /* 06h */
U32 Reserved1; /* 08h */ U32 Reserved1; /* 08h */
RAID_PHYS_DISK1_PATH Path[MPI_RAID_PHYS_DISK1_PATH_MAX];/* 0Ch */ RAID_PHYS_DISK1_PATH Path[] __counted_by(NumPhysDiskPaths);/* 0Ch */
} CONFIG_PAGE_RAID_PHYS_DISK_1, MPI_POINTER PTR_CONFIG_PAGE_RAID_PHYS_DISK_1, } CONFIG_PAGE_RAID_PHYS_DISK_1, MPI_POINTER PTR_CONFIG_PAGE_RAID_PHYS_DISK_1,
RaidPhysDiskPage1_t, MPI_POINTER pRaidPhysDiskPage1_t; RaidPhysDiskPage1_t, MPI_POINTER pRaidPhysDiskPage1_t;
@ -2555,14 +2515,6 @@ typedef struct _MPI_SAS_IO_UNIT0_PHY_DATA
} MPI_SAS_IO_UNIT0_PHY_DATA, MPI_POINTER PTR_MPI_SAS_IO_UNIT0_PHY_DATA, } MPI_SAS_IO_UNIT0_PHY_DATA, MPI_POINTER PTR_MPI_SAS_IO_UNIT0_PHY_DATA,
SasIOUnit0PhyData, MPI_POINTER pSasIOUnit0PhyData; SasIOUnit0PhyData, MPI_POINTER pSasIOUnit0PhyData;
/*
* Host code (drivers, BIOS, utilities, etc.) should leave this define set to
* one and check Header.PageLength at runtime.
*/
#ifndef MPI_SAS_IOUNIT0_PHY_MAX
#define MPI_SAS_IOUNIT0_PHY_MAX (1)
#endif
typedef struct _CONFIG_PAGE_SAS_IO_UNIT_0 typedef struct _CONFIG_PAGE_SAS_IO_UNIT_0
{ {
CONFIG_EXTENDED_PAGE_HEADER Header; /* 00h */ CONFIG_EXTENDED_PAGE_HEADER Header; /* 00h */
@ -2571,7 +2523,7 @@ typedef struct _CONFIG_PAGE_SAS_IO_UNIT_0
U8 NumPhys; /* 0Ch */ U8 NumPhys; /* 0Ch */
U8 Reserved2; /* 0Dh */ U8 Reserved2; /* 0Dh */
U16 Reserved3; /* 0Eh */ U16 Reserved3; /* 0Eh */
MPI_SAS_IO_UNIT0_PHY_DATA PhyData[MPI_SAS_IOUNIT0_PHY_MAX]; /* 10h */ MPI_SAS_IO_UNIT0_PHY_DATA PhyData[] __counted_by(NumPhys); /* 10h */
} CONFIG_PAGE_SAS_IO_UNIT_0, MPI_POINTER PTR_CONFIG_PAGE_SAS_IO_UNIT_0, } CONFIG_PAGE_SAS_IO_UNIT_0, MPI_POINTER PTR_CONFIG_PAGE_SAS_IO_UNIT_0,
SasIOUnitPage0_t, MPI_POINTER pSasIOUnitPage0_t; SasIOUnitPage0_t, MPI_POINTER pSasIOUnitPage0_t;

View File

@ -1856,10 +1856,8 @@ mpt_attach(struct pci_dev *pdev, const struct pci_device_id *id)
/* Initialize workqueue */ /* Initialize workqueue */
INIT_DELAYED_WORK(&ioc->fault_reset_work, mpt_fault_reset_work); INIT_DELAYED_WORK(&ioc->fault_reset_work, mpt_fault_reset_work);
snprintf(ioc->reset_work_q_name, MPT_KOBJ_NAME_LEN, ioc->reset_work_q =
"mpt_poll_%d", ioc->id); alloc_workqueue("mpt_poll_%d", WQ_MEM_RECLAIM, 0, ioc->id);
ioc->reset_work_q = alloc_workqueue(ioc->reset_work_q_name,
WQ_MEM_RECLAIM, 0);
if (!ioc->reset_work_q) { if (!ioc->reset_work_q) {
printk(MYIOC_s_ERR_FMT "Insufficient memory to add adapter!\n", printk(MYIOC_s_ERR_FMT "Insufficient memory to add adapter!\n",
ioc->name); ioc->name);
@ -1986,9 +1984,7 @@ mpt_attach(struct pci_dev *pdev, const struct pci_device_id *id)
INIT_LIST_HEAD(&ioc->fw_event_list); INIT_LIST_HEAD(&ioc->fw_event_list);
spin_lock_init(&ioc->fw_event_lock); spin_lock_init(&ioc->fw_event_lock);
snprintf(ioc->fw_event_q_name, MPT_KOBJ_NAME_LEN, "mpt/%d", ioc->id); ioc->fw_event_q = alloc_workqueue("mpt/%d", WQ_MEM_RECLAIM, 0, ioc->id);
ioc->fw_event_q = alloc_workqueue(ioc->fw_event_q_name,
WQ_MEM_RECLAIM, 0);
if (!ioc->fw_event_q) { if (!ioc->fw_event_q) {
printk(MYIOC_s_ERR_FMT "Insufficient memory to add adapter!\n", printk(MYIOC_s_ERR_FMT "Insufficient memory to add adapter!\n",
ioc->name); ioc->name);

View File

@ -729,7 +729,6 @@ typedef struct _MPT_ADAPTER
struct list_head fw_event_list; struct list_head fw_event_list;
spinlock_t fw_event_lock; spinlock_t fw_event_lock;
u8 fw_events_off; /* if '1', then ignore events */ u8 fw_events_off; /* if '1', then ignore events */
char fw_event_q_name[MPT_KOBJ_NAME_LEN];
struct mutex sas_discovery_mutex; struct mutex sas_discovery_mutex;
u8 sas_discovery_runtime; u8 sas_discovery_runtime;
@ -764,7 +763,6 @@ typedef struct _MPT_ADAPTER
u8 fc_link_speed[2]; u8 fc_link_speed[2];
spinlock_t fc_rescan_work_lock; spinlock_t fc_rescan_work_lock;
struct work_struct fc_rescan_work; struct work_struct fc_rescan_work;
char fc_rescan_work_q_name[MPT_KOBJ_NAME_LEN];
struct workqueue_struct *fc_rescan_work_q; struct workqueue_struct *fc_rescan_work_q;
/* driver forced bus resets count */ /* driver forced bus resets count */
@ -778,7 +776,6 @@ typedef struct _MPT_ADAPTER
spinlock_t scsi_lookup_lock; spinlock_t scsi_lookup_lock;
u64 dma_mask; u64 dma_mask;
u32 broadcast_aen_busy; u32 broadcast_aen_busy;
char reset_work_q_name[MPT_KOBJ_NAME_LEN];
struct workqueue_struct *reset_work_q; struct workqueue_struct *reset_work_q;
struct delayed_work fault_reset_work; struct delayed_work fault_reset_work;

View File

@ -1349,11 +1349,8 @@ mptfc_probe(struct pci_dev *pdev, const struct pci_device_id *id)
/* initialize workqueue */ /* initialize workqueue */
snprintf(ioc->fc_rescan_work_q_name, sizeof(ioc->fc_rescan_work_q_name), ioc->fc_rescan_work_q = alloc_ordered_workqueue(
"mptfc_wq_%d", sh->host_no); "mptfc_wq_%d", WQ_MEM_RECLAIM, sh->host_no);
ioc->fc_rescan_work_q =
alloc_ordered_workqueue(ioc->fc_rescan_work_q_name,
WQ_MEM_RECLAIM);
if (!ioc->fc_rescan_work_q) { if (!ioc->fc_rescan_work_q) {
error = -ENOMEM; error = -ENOMEM;
goto out_mptfc_host; goto out_mptfc_host;

View File

@ -157,7 +157,6 @@ static inline void initialize_SCp(struct scsi_cmnd *cmd)
} }
ncmd->status = 0; ncmd->status = 0;
ncmd->message = 0;
} }
static inline void advance_sg_buffer(struct NCR5380_cmd *ncmd) static inline void advance_sg_buffer(struct NCR5380_cmd *ncmd)
@ -199,7 +198,6 @@ static inline void set_resid_from_SCp(struct scsi_cmnd *cmd)
* Polls the chip in a reasonably efficient manner waiting for an * Polls the chip in a reasonably efficient manner waiting for an
* event to occur. After a short quick poll we begin to yield the CPU * event to occur. After a short quick poll we begin to yield the CPU
* (if possible). In irq contexts the time-out is arbitrarily limited. * (if possible). In irq contexts the time-out is arbitrarily limited.
* Callers may hold locks as long as they are held in irq mode.
* *
* Returns 0 if either or both event(s) occurred otherwise -ETIMEDOUT. * Returns 0 if either or both event(s) occurred otherwise -ETIMEDOUT.
*/ */
@ -1228,24 +1226,15 @@ out:
return ret; return ret;
} }
/* /**
* Function : int NCR5380_transfer_pio (struct Scsi_Host *instance, * NCR5380_transfer_pio() - transfers data in given phase using polled I/O
* unsigned char *phase, int *count, unsigned char **data) * @instance: instance of driver
* @phase: pointer to what phase is expected
* @count: pointer to number of bytes to transfer
* @data: pointer to data pointer
* @can_sleep: 1 or 0 when sleeping is permitted or not, respectively
* *
* Purpose : transfers data in given phase using polled I/O * Returns: void. *phase, *count, *data are modified in place.
*
* Inputs : instance - instance of driver, *phase - pointer to
* what phase is expected, *count - pointer to number of
* bytes to transfer, **data - pointer to data pointer,
* can_sleep - 1 or 0 when sleeping is permitted or not, respectively.
*
* Returns : -1 when different phase is entered without transferring
* maximum number of bytes, 0 if all bytes are transferred or exit
* is in same phase.
*
* Also, *phase, *count, *data are modified in place.
*
* XXX Note : handling for bus free may be useful.
*/ */
/* /*
@ -1254,9 +1243,9 @@ out:
* counts, we will always do a pseudo DMA or DMA transfer. * counts, we will always do a pseudo DMA or DMA transfer.
*/ */
static int NCR5380_transfer_pio(struct Scsi_Host *instance, static void NCR5380_transfer_pio(struct Scsi_Host *instance,
unsigned char *phase, int *count, unsigned char *phase, int *count,
unsigned char **data, unsigned int can_sleep) unsigned char **data, unsigned int can_sleep)
{ {
struct NCR5380_hostdata *hostdata = shost_priv(instance); struct NCR5380_hostdata *hostdata = shost_priv(instance);
unsigned char p = *phase, tmp; unsigned char p = *phase, tmp;
@ -1277,8 +1266,8 @@ static int NCR5380_transfer_pio(struct Scsi_Host *instance,
* valid * valid
*/ */
if (NCR5380_poll_politely(hostdata, STATUS_REG, SR_REQ, SR_REQ, if (NCR5380_poll_politely(hostdata, STATUS_REG, SR_REQ | SR_BSY,
HZ * can_sleep) < 0) SR_REQ | SR_BSY, HZ * can_sleep) < 0)
break; break;
dsprintk(NDEBUG_HANDSHAKE, instance, "REQ asserted\n"); dsprintk(NDEBUG_HANDSHAKE, instance, "REQ asserted\n");
@ -1329,17 +1318,19 @@ static int NCR5380_transfer_pio(struct Scsi_Host *instance,
dsprintk(NDEBUG_HANDSHAKE, instance, "REQ negated, handshake complete\n"); dsprintk(NDEBUG_HANDSHAKE, instance, "REQ negated, handshake complete\n");
/* /*
* We have several special cases to consider during REQ/ACK handshaking : * We have several special cases to consider during REQ/ACK
* 1. We were in MSGOUT phase, and we are on the last byte of the * handshaking:
* message. ATN must be dropped as ACK is dropped. *
* * 1. We were in MSGOUT phase, and we are on the last byte of
* 2. We are in a MSGIN phase, and we are on the last byte of the * the message. ATN must be dropped as ACK is dropped.
* message. We must exit with ACK asserted, so that the calling *
* code may raise ATN before dropping ACK to reject the message. * 2. We are in MSGIN phase, and we are on the last byte of the
* * message. We must exit with ACK asserted, so that the calling
* 3. ACK and ATN are clear and the target may proceed as normal. * code may raise ATN before dropping ACK to reject the message.
*/ *
* 3. ACK and ATN are clear & the target may proceed as normal.
*/
if (!(p == PHASE_MSGIN && c == 1)) { if (!(p == PHASE_MSGIN && c == 1)) {
if (p == PHASE_MSGOUT && c > 1) if (p == PHASE_MSGOUT && c > 1)
NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_ATN); NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_ATN);
@ -1361,11 +1352,6 @@ static int NCR5380_transfer_pio(struct Scsi_Host *instance,
*phase = tmp & PHASE_MASK; *phase = tmp & PHASE_MASK;
else else
*phase = PHASE_UNKNOWN; *phase = PHASE_UNKNOWN;
if (!c || (*phase == p))
return 0;
else
return -1;
} }
/** /**
@ -1485,6 +1471,7 @@ static int NCR5380_transfer_dma(struct Scsi_Host *instance,
unsigned char **data) unsigned char **data)
{ {
struct NCR5380_hostdata *hostdata = shost_priv(instance); struct NCR5380_hostdata *hostdata = shost_priv(instance);
struct NCR5380_cmd *ncmd = NCR5380_to_ncmd(hostdata->connected);
int c = *count; int c = *count;
unsigned char p = *phase; unsigned char p = *phase;
unsigned char *d = *data; unsigned char *d = *data;
@ -1496,7 +1483,7 @@ static int NCR5380_transfer_dma(struct Scsi_Host *instance,
return -1; return -1;
} }
NCR5380_to_ncmd(hostdata->connected)->phase = p; ncmd->phase = p;
if (p & SR_IO) { if (p & SR_IO) {
if (hostdata->read_overruns) if (hostdata->read_overruns)
@ -1574,79 +1561,80 @@ static int NCR5380_transfer_dma(struct Scsi_Host *instance,
/* The result is zero iff pseudo DMA send/receive was completed. */ /* The result is zero iff pseudo DMA send/receive was completed. */
hostdata->dma_len = c; hostdata->dma_len = c;
/* /*
* A note regarding the DMA errata workarounds for early NMOS silicon. * A note regarding the DMA errata workarounds for early NMOS silicon.
* *
* For DMA sends, we want to wait until the last byte has been * For DMA sends, we want to wait until the last byte has been
* transferred out over the bus before we turn off DMA mode. Alas, there * transferred out over the bus before we turn off DMA mode. Alas, there
* seems to be no terribly good way of doing this on a 5380 under all * seems to be no terribly good way of doing this on a 5380 under all
* conditions. For non-scatter-gather operations, we can wait until REQ * conditions. For non-scatter-gather operations, we can wait until REQ
* and ACK both go false, or until a phase mismatch occurs. Gather-sends * and ACK both go false, or until a phase mismatch occurs. Gather-sends
* are nastier, since the device will be expecting more data than we * are nastier, since the device will be expecting more data than we
* are prepared to send it, and REQ will remain asserted. On a 53C8[01] we * are prepared to send it, and REQ will remain asserted. On a 53C8[01]
* could test Last Byte Sent to assure transfer (I imagine this is precisely * we could test Last Byte Sent to assure transfer (I imagine this is
* why this signal was added to the newer chips) but on the older 538[01] * precisely why this signal was added to the newer chips) but on the
* this signal does not exist. The workaround for this lack is a watchdog; * older 538[01] this signal does not exist. The workaround for this
* we bail out of the wait-loop after a modest amount of wait-time if * lack is a watchdog; we bail out of the wait-loop after a modest
* the usual exit conditions are not met. Not a terribly clean or * amount of wait-time if the usual exit conditions are not met.
* correct solution :-% * Not a terribly clean or correct solution :-%
* *
* DMA receive is equally tricky due to a nasty characteristic of the NCR5380. * DMA receive is equally tricky due to a nasty characteristic of the
* If the chip is in DMA receive mode, it will respond to a target's * NCR5380. If the chip is in DMA receive mode, it will respond to a
* REQ by latching the SCSI data into the INPUT DATA register and asserting * target's REQ by latching the SCSI data into the INPUT DATA register
* ACK, even if it has _already_ been notified by the DMA controller that * and asserting ACK, even if it has _already_ been notified by the
* the current DMA transfer has completed! If the NCR5380 is then taken * DMA controller that the current DMA transfer has completed! If the
* out of DMA mode, this already-acknowledged byte is lost. This is * NCR5380 is then taken out of DMA mode, this already-acknowledged
* not a problem for "one DMA transfer per READ command", because * byte is lost.
* the situation will never arise... either all of the data is DMA'ed *
* properly, or the target switches to MESSAGE IN phase to signal a * This is not a problem for "one DMA transfer per READ
* disconnection (either operation bringing the DMA to a clean halt). * command", because the situation will never arise... either all of
* However, in order to handle scatter-receive, we must work around the * the data is DMA'ed properly, or the target switches to MESSAGE IN
* problem. The chosen fix is to DMA fewer bytes, then check for the * phase to signal a disconnection (either operation bringing the DMA
* condition before taking the NCR5380 out of DMA mode. One or two extra * to a clean halt). However, in order to handle scatter-receive, we
* bytes are transferred via PIO as necessary to fill out the original * must work around the problem. The chosen fix is to DMA fewer bytes,
* request. * then check for the condition before taking the NCR5380 out of DMA
*/ * mode. One or two extra bytes are transferred via PIO as necessary
* to fill out the original request.
*/
if (hostdata->flags & FLAG_DMA_FIXUP) { if ((hostdata->flags & FLAG_DMA_FIXUP) &&
if (p & SR_IO) { (NCR5380_read(BUS_AND_STATUS_REG) & BASR_PHASE_MATCH)) {
/* /*
* The workaround was to transfer fewer bytes than we * The workaround was to transfer fewer bytes than we
* intended to with the pseudo-DMA read function, wait for * intended to with the pseudo-DMA receive function, wait for
* the chip to latch the last byte, read it, and then disable * the chip to latch the last byte, read it, and then disable
* pseudo-DMA mode. * DMA mode.
* *
* After REQ is asserted, the NCR5380 asserts DRQ and ACK. * After REQ is asserted, the NCR5380 asserts DRQ and ACK.
* REQ is deasserted when ACK is asserted, and not reasserted * REQ is deasserted when ACK is asserted, and not reasserted
* until ACK goes false. Since the NCR5380 won't lower ACK * until ACK goes false. Since the NCR5380 won't lower ACK
* until DACK is asserted, which won't happen unless we twiddle * until DACK is asserted, which won't happen unless we twiddle
* the DMA port or we take the NCR5380 out of DMA mode, we * the DMA port or we take the NCR5380 out of DMA mode, we
* can guarantee that we won't handshake another extra * can guarantee that we won't handshake another extra
* byte. * byte.
*/ *
* If sending, wait for the last byte to be sent. If REQ is
if (NCR5380_poll_politely(hostdata, BUS_AND_STATUS_REG, * being asserted for the byte we're interested, we'll ACK it
BASR_DRQ, BASR_DRQ, 0) < 0) { * and it will go false.
result = -1; */
shost_printk(KERN_ERR, instance, "PDMA read: DRQ timeout\n"); if (!NCR5380_poll_politely(hostdata, BUS_AND_STATUS_REG,
} BASR_DRQ, BASR_DRQ, 0)) {
if (NCR5380_poll_politely(hostdata, STATUS_REG, if ((p & SR_IO) &&
SR_REQ, 0, 0) < 0) { (NCR5380_read(BUS_AND_STATUS_REG) & BASR_PHASE_MATCH)) {
result = -1; if (!NCR5380_poll_politely(hostdata, STATUS_REG,
shost_printk(KERN_ERR, instance, "PDMA read: !REQ timeout\n"); SR_REQ, 0, 0)) {
} d[c] = NCR5380_read(INPUT_DATA_REG);
d[*count - 1] = NCR5380_read(INPUT_DATA_REG); --ncmd->this_residual;
} else { } else {
/* result = -1;
* Wait for the last byte to be sent. If REQ is being asserted for scmd_printk(KERN_ERR, hostdata->connected,
* the byte we're interested, we'll ACK it and it will go false. "PDMA fixup: !REQ timeout\n");
*/ }
if (NCR5380_poll_politely2(hostdata,
BUS_AND_STATUS_REG, BASR_DRQ, BASR_DRQ,
BUS_AND_STATUS_REG, BASR_PHASE_MATCH, 0, 0) < 0) {
result = -1;
shost_printk(KERN_ERR, instance, "PDMA write: DRQ and phase timeout\n");
} }
} else if (NCR5380_read(BUS_AND_STATUS_REG) & BASR_PHASE_MATCH) {
result = -1;
scmd_printk(KERN_ERR, hostdata->connected,
"PDMA fixup: DRQ timeout\n");
} }
} }
@ -1666,9 +1654,6 @@ static int NCR5380_transfer_dma(struct Scsi_Host *instance,
* Side effects : SCSI things happen, the disconnected queue will be * Side effects : SCSI things happen, the disconnected queue will be
* modified if a command disconnects, *instance->connected will * modified if a command disconnects, *instance->connected will
* change. * change.
*
* XXX Note : we need to watch for bus free or a reset condition here
* to recover from an unexpected bus free condition.
*/ */
static void NCR5380_information_transfer(struct Scsi_Host *instance) static void NCR5380_information_transfer(struct Scsi_Host *instance)
@ -1807,9 +1792,11 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance)
return; return;
case PHASE_MSGIN: case PHASE_MSGIN:
len = 1; len = 1;
tmp = 0xff;
data = &tmp; data = &tmp;
NCR5380_transfer_pio(instance, &phase, &len, &data, 0); NCR5380_transfer_pio(instance, &phase, &len, &data, 0);
ncmd->message = tmp; if (tmp == 0xff)
break;
switch (tmp) { switch (tmp) {
case ABORT: case ABORT:
@ -1996,6 +1983,7 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance)
break; break;
case PHASE_STATIN: case PHASE_STATIN:
len = 1; len = 1;
tmp = ncmd->status;
data = &tmp; data = &tmp;
NCR5380_transfer_pio(instance, &phase, &len, &data, 0); NCR5380_transfer_pio(instance, &phase, &len, &data, 0);
ncmd->status = tmp; ncmd->status = tmp;
@ -2005,9 +1993,20 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance)
NCR5380_dprint(NDEBUG_ANY, instance); NCR5380_dprint(NDEBUG_ANY, instance);
} /* switch(phase) */ } /* switch(phase) */
} else { } else {
int err;
spin_unlock_irq(&hostdata->lock); spin_unlock_irq(&hostdata->lock);
NCR5380_poll_politely(hostdata, STATUS_REG, SR_REQ, SR_REQ, HZ); err = NCR5380_poll_politely(hostdata, STATUS_REG,
SR_REQ, SR_REQ, HZ);
spin_lock_irq(&hostdata->lock); spin_lock_irq(&hostdata->lock);
if (err < 0 && hostdata->connected &&
!(NCR5380_read(STATUS_REG) & SR_BSY)) {
scmd_printk(KERN_ERR, hostdata->connected,
"BSY signal lost\n");
do_reset(instance);
bus_reset_cleanup(instance);
}
} }
} }
} }

View File

@ -3,10 +3,10 @@
* NCR 5380 defines * NCR 5380 defines
* *
* Copyright 1993, Drew Eckhardt * Copyright 1993, Drew Eckhardt
* Visionary Computing * Visionary Computing
* (Unix consulting and custom programming) * (Unix consulting and custom programming)
* drew@colorado.edu * drew@colorado.edu
* +1 (303) 666-5836 * +1 (303) 666-5836
* *
* For more information, please consult * For more information, please consult
* *
@ -78,7 +78,7 @@
#define ICR_DIFF_ENABLE 0x20 /* wo Set to enable diff. drivers */ #define ICR_DIFF_ENABLE 0x20 /* wo Set to enable diff. drivers */
#define ICR_ASSERT_ACK 0x10 /* rw ini Set to assert ACK */ #define ICR_ASSERT_ACK 0x10 /* rw ini Set to assert ACK */
#define ICR_ASSERT_BSY 0x08 /* rw Set to assert BSY */ #define ICR_ASSERT_BSY 0x08 /* rw Set to assert BSY */
#define ICR_ASSERT_SEL 0x04 /* rw Set to assert SEL */ #define ICR_ASSERT_SEL 0x04 /* rw Set to assert SEL */
#define ICR_ASSERT_ATN 0x02 /* rw Set to assert ATN */ #define ICR_ASSERT_ATN 0x02 /* rw Set to assert ATN */
#define ICR_ASSERT_DATA 0x01 /* rw SCSI_DATA_REG is asserted */ #define ICR_ASSERT_DATA 0x01 /* rw SCSI_DATA_REG is asserted */
@ -135,7 +135,7 @@
#define BASR_IRQ 0x10 /* ro mirror of IRQ pin */ #define BASR_IRQ 0x10 /* ro mirror of IRQ pin */
#define BASR_PHASE_MATCH 0x08 /* ro Set when MSG CD IO match TCR */ #define BASR_PHASE_MATCH 0x08 /* ro Set when MSG CD IO match TCR */
#define BASR_BUSY_ERROR 0x04 /* ro Unexpected change to inactive state */ #define BASR_BUSY_ERROR 0x04 /* ro Unexpected change to inactive state */
#define BASR_ATN 0x02 /* ro BUS status */ #define BASR_ATN 0x02 /* ro BUS status */
#define BASR_ACK 0x01 /* ro BUS status */ #define BASR_ACK 0x01 /* ro BUS status */
/* Write any value to this register to start a DMA send */ /* Write any value to this register to start a DMA send */
@ -170,7 +170,7 @@
#define CSR_BASE CSR_53C80_INTR #define CSR_BASE CSR_53C80_INTR
/* Note : PHASE_* macros are based on the values of the STATUS register */ /* Note : PHASE_* macros are based on the values of the STATUS register */
#define PHASE_MASK (SR_MSG | SR_CD | SR_IO) #define PHASE_MASK (SR_MSG | SR_CD | SR_IO)
#define PHASE_DATAOUT 0 #define PHASE_DATAOUT 0
#define PHASE_DATAIN SR_IO #define PHASE_DATAIN SR_IO
@ -231,7 +231,6 @@ struct NCR5380_cmd {
int this_residual; int this_residual;
struct scatterlist *buffer; struct scatterlist *buffer;
int status; int status;
int message;
int phase; int phase;
struct list_head list; struct list_head list;
}; };
@ -286,8 +285,9 @@ static const char *NCR5380_info(struct Scsi_Host *instance);
static void NCR5380_reselect(struct Scsi_Host *instance); static void NCR5380_reselect(struct Scsi_Host *instance);
static bool NCR5380_select(struct Scsi_Host *, struct scsi_cmnd *); static bool NCR5380_select(struct Scsi_Host *, struct scsi_cmnd *);
static int NCR5380_transfer_dma(struct Scsi_Host *instance, unsigned char *phase, int *count, unsigned char **data); static int NCR5380_transfer_dma(struct Scsi_Host *instance, unsigned char *phase, int *count, unsigned char **data);
static int NCR5380_transfer_pio(struct Scsi_Host *instance, unsigned char *phase, int *count, unsigned char **data, static void NCR5380_transfer_pio(struct Scsi_Host *instance,
unsigned int can_sleep); unsigned char *phase, int *count,
unsigned char **data, unsigned int can_sleep);
static int NCR5380_poll_politely2(struct NCR5380_hostdata *, static int NCR5380_poll_politely2(struct NCR5380_hostdata *,
unsigned int, u8, u8, unsigned int, u8, u8,
unsigned int, u8, u8, unsigned long); unsigned int, u8, u8, unsigned long);

View File

@ -1267,7 +1267,7 @@ static int aac_read_raw_io(struct fib * fib, struct scsi_cmnd * cmd, u64 lba, u3
return ret; return ret;
command = ContainerRawIo; command = ContainerRawIo;
fibsize = sizeof(struct aac_raw_io) + fibsize = sizeof(struct aac_raw_io) +
((le32_to_cpu(readcmd->sg.count)-1) * sizeof(struct sgentryraw)); (le32_to_cpu(readcmd->sg.count) * sizeof(struct sgentryraw));
} }
BUG_ON(fibsize > (fib->dev->max_fib_size - sizeof(struct aac_fibhdr))); BUG_ON(fibsize > (fib->dev->max_fib_size - sizeof(struct aac_fibhdr)));
@ -1302,7 +1302,7 @@ static int aac_read_block64(struct fib * fib, struct scsi_cmnd * cmd, u64 lba, u
if (ret < 0) if (ret < 0)
return ret; return ret;
fibsize = sizeof(struct aac_read64) + fibsize = sizeof(struct aac_read64) +
((le32_to_cpu(readcmd->sg.count) - 1) * (le32_to_cpu(readcmd->sg.count) *
sizeof (struct sgentry64)); sizeof (struct sgentry64));
BUG_ON (fibsize > (fib->dev->max_fib_size - BUG_ON (fibsize > (fib->dev->max_fib_size -
sizeof(struct aac_fibhdr))); sizeof(struct aac_fibhdr)));
@ -1337,7 +1337,7 @@ static int aac_read_block(struct fib * fib, struct scsi_cmnd * cmd, u64 lba, u32
if (ret < 0) if (ret < 0)
return ret; return ret;
fibsize = sizeof(struct aac_read) + fibsize = sizeof(struct aac_read) +
((le32_to_cpu(readcmd->sg.count) - 1) * (le32_to_cpu(readcmd->sg.count) *
sizeof (struct sgentry)); sizeof (struct sgentry));
BUG_ON (fibsize > (fib->dev->max_fib_size - BUG_ON (fibsize > (fib->dev->max_fib_size -
sizeof(struct aac_fibhdr))); sizeof(struct aac_fibhdr)));
@ -1401,7 +1401,7 @@ static int aac_write_raw_io(struct fib * fib, struct scsi_cmnd * cmd, u64 lba, u
return ret; return ret;
command = ContainerRawIo; command = ContainerRawIo;
fibsize = sizeof(struct aac_raw_io) + fibsize = sizeof(struct aac_raw_io) +
((le32_to_cpu(writecmd->sg.count)-1) * sizeof (struct sgentryraw)); (le32_to_cpu(writecmd->sg.count) * sizeof(struct sgentryraw));
} }
BUG_ON(fibsize > (fib->dev->max_fib_size - sizeof(struct aac_fibhdr))); BUG_ON(fibsize > (fib->dev->max_fib_size - sizeof(struct aac_fibhdr)));
@ -1436,7 +1436,7 @@ static int aac_write_block64(struct fib * fib, struct scsi_cmnd * cmd, u64 lba,
if (ret < 0) if (ret < 0)
return ret; return ret;
fibsize = sizeof(struct aac_write64) + fibsize = sizeof(struct aac_write64) +
((le32_to_cpu(writecmd->sg.count) - 1) * (le32_to_cpu(writecmd->sg.count) *
sizeof (struct sgentry64)); sizeof (struct sgentry64));
BUG_ON (fibsize > (fib->dev->max_fib_size - BUG_ON (fibsize > (fib->dev->max_fib_size -
sizeof(struct aac_fibhdr))); sizeof(struct aac_fibhdr)));
@ -1473,7 +1473,7 @@ static int aac_write_block(struct fib * fib, struct scsi_cmnd * cmd, u64 lba, u3
if (ret < 0) if (ret < 0)
return ret; return ret;
fibsize = sizeof(struct aac_write) + fibsize = sizeof(struct aac_write) +
((le32_to_cpu(writecmd->sg.count) - 1) * (le32_to_cpu(writecmd->sg.count) *
sizeof (struct sgentry)); sizeof (struct sgentry));
BUG_ON (fibsize > (fib->dev->max_fib_size - BUG_ON (fibsize > (fib->dev->max_fib_size -
sizeof(struct aac_fibhdr))); sizeof(struct aac_fibhdr)));
@ -1592,9 +1592,9 @@ static int aac_scsi_64(struct fib * fib, struct scsi_cmnd * cmd)
/* /*
* Build Scatter/Gather list * Build Scatter/Gather list
*/ */
fibsize = sizeof (struct aac_srb) - sizeof (struct sgentry) + fibsize = sizeof(struct aac_srb) +
((le32_to_cpu(srbcmd->sg.count) & 0xff) * ((le32_to_cpu(srbcmd->sg.count) & 0xff) *
sizeof (struct sgentry64)); sizeof(struct sgentry64));
BUG_ON (fibsize > (fib->dev->max_fib_size - BUG_ON (fibsize > (fib->dev->max_fib_size -
sizeof(struct aac_fibhdr))); sizeof(struct aac_fibhdr)));
@ -1624,7 +1624,7 @@ static int aac_scsi_32(struct fib * fib, struct scsi_cmnd * cmd)
* Build Scatter/Gather list * Build Scatter/Gather list
*/ */
fibsize = sizeof (struct aac_srb) + fibsize = sizeof (struct aac_srb) +
(((le32_to_cpu(srbcmd->sg.count) & 0xff) - 1) * ((le32_to_cpu(srbcmd->sg.count) & 0xff) *
sizeof (struct sgentry)); sizeof (struct sgentry));
BUG_ON (fibsize > (fib->dev->max_fib_size - BUG_ON (fibsize > (fib->dev->max_fib_size -
sizeof(struct aac_fibhdr))); sizeof(struct aac_fibhdr)));
@ -1693,8 +1693,7 @@ static int aac_send_safw_bmic_cmd(struct aac_dev *dev,
fibptr->hw_fib_va->header.XferState &= fibptr->hw_fib_va->header.XferState &=
~cpu_to_le32(FastResponseCapable); ~cpu_to_le32(FastResponseCapable);
fibsize = sizeof(struct aac_srb) - sizeof(struct sgentry) + fibsize = sizeof(struct aac_srb) + sizeof(struct sgentry64);
sizeof(struct sgentry64);
/* allocate DMA buffer for response */ /* allocate DMA buffer for response */
addr = dma_map_single(&dev->pdev->dev, xfer_buf, xfer_len, addr = dma_map_single(&dev->pdev->dev, xfer_buf, xfer_len,
@ -1833,7 +1832,7 @@ static int aac_get_safw_ciss_luns(struct aac_dev *dev)
struct aac_ciss_phys_luns_resp *phys_luns; struct aac_ciss_phys_luns_resp *phys_luns;
datasize = sizeof(struct aac_ciss_phys_luns_resp) + datasize = sizeof(struct aac_ciss_phys_luns_resp) +
(AAC_MAX_TARGETS - 1) * sizeof(struct _ciss_lun); AAC_MAX_TARGETS * sizeof(struct _ciss_lun);
phys_luns = kmalloc(datasize, GFP_KERNEL); phys_luns = kmalloc(datasize, GFP_KERNEL);
if (phys_luns == NULL) if (phys_luns == NULL)
goto out; goto out;
@ -2267,7 +2266,7 @@ int aac_get_adapter_info(struct aac_dev* dev)
dev->a_ops.adapter_bounds = aac_bounds_32; dev->a_ops.adapter_bounds = aac_bounds_32;
dev->scsi_host_ptr->sg_tablesize = (dev->max_fib_size - dev->scsi_host_ptr->sg_tablesize = (dev->max_fib_size -
sizeof(struct aac_fibhdr) - sizeof(struct aac_fibhdr) -
sizeof(struct aac_write) + sizeof(struct sgentry)) / sizeof(struct aac_write)) /
sizeof(struct sgentry); sizeof(struct sgentry);
if (dev->dac_support) { if (dev->dac_support) {
dev->a_ops.adapter_read = aac_read_block64; dev->a_ops.adapter_read = aac_read_block64;
@ -2278,8 +2277,7 @@ int aac_get_adapter_info(struct aac_dev* dev)
dev->scsi_host_ptr->sg_tablesize = dev->scsi_host_ptr->sg_tablesize =
(dev->max_fib_size - (dev->max_fib_size -
sizeof(struct aac_fibhdr) - sizeof(struct aac_fibhdr) -
sizeof(struct aac_write64) + sizeof(struct aac_write64)) /
sizeof(struct sgentry64)) /
sizeof(struct sgentry64); sizeof(struct sgentry64);
} else { } else {
dev->a_ops.adapter_read = aac_read_block; dev->a_ops.adapter_read = aac_read_block;

View File

@ -322,7 +322,7 @@ struct aac_ciss_phys_luns_resp {
u8 level3[2]; u8 level3[2];
u8 level2[2]; u8 level2[2];
u8 node_ident[16]; /* phys. node identifier */ u8 node_ident[16]; /* phys. node identifier */
} lun[1]; /* List of phys. devices */ } lun[]; /* List of phys. devices */
}; };
/* /*
@ -507,32 +507,27 @@ struct sge_ieee1212 {
struct sgmap { struct sgmap {
__le32 count; __le32 count;
struct sgentry sg[1]; struct sgentry sg[];
}; };
struct user_sgmap { struct user_sgmap {
u32 count; u32 count;
struct user_sgentry sg[1]; struct user_sgentry sg[];
}; };
struct sgmap64 { struct sgmap64 {
__le32 count; __le32 count;
struct sgentry64 sg[1]; struct sgentry64 sg[];
}; };
struct user_sgmap64 { struct user_sgmap64 {
u32 count; u32 count;
struct user_sgentry64 sg[1]; struct user_sgentry64 sg[];
}; };
struct sgmapraw { struct sgmapraw {
__le32 count; __le32 count;
struct sgentryraw sg[1]; struct sgentryraw sg[];
};
struct user_sgmapraw {
u32 count;
struct user_sgentryraw sg[1];
}; };
struct creation_info struct creation_info
@ -873,7 +868,7 @@ union aac_init
__le16 element_count; __le16 element_count;
__le16 comp_thresh; __le16 comp_thresh;
__le16 unused; __le16 unused;
} rrq[1]; /* up to 64 RRQ addresses */ } rrq[] __counted_by_le(rr_queue_count); /* up to 64 RRQ addresses */
} r8; } r8;
}; };
@ -2029,8 +2024,8 @@ struct aac_srb_reply
}; };
struct aac_srb_unit { struct aac_srb_unit {
struct aac_srb srb;
struct aac_srb_reply srb_reply; struct aac_srb_reply srb_reply;
struct aac_srb srb;
}; };
/* /*

View File

@ -523,7 +523,7 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg)
goto cleanup; goto cleanup;
} }
if ((fibsize < (sizeof(struct user_aac_srb) - sizeof(struct user_sgentry))) || if ((fibsize < sizeof(struct user_aac_srb)) ||
(fibsize > (dev->max_fib_size - sizeof(struct aac_fibhdr)))) { (fibsize > (dev->max_fib_size - sizeof(struct aac_fibhdr)))) {
rcode = -EINVAL; rcode = -EINVAL;
goto cleanup; goto cleanup;
@ -561,7 +561,7 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg)
rcode = -EINVAL; rcode = -EINVAL;
goto cleanup; goto cleanup;
} }
actual_fibsize = sizeof(struct aac_srb) - sizeof(struct sgentry) + actual_fibsize = sizeof(struct aac_srb) +
((user_srbcmd->sg.count & 0xff) * sizeof(struct sgentry)); ((user_srbcmd->sg.count & 0xff) * sizeof(struct sgentry));
actual_fibsize64 = actual_fibsize + (user_srbcmd->sg.count & 0xff) * actual_fibsize64 = actual_fibsize + (user_srbcmd->sg.count & 0xff) *
(sizeof(struct sgentry64) - sizeof(struct sgentry)); (sizeof(struct sgentry64) - sizeof(struct sgentry));

View File

@ -522,8 +522,7 @@ struct aac_dev *aac_init_adapter(struct aac_dev *dev)
spin_lock_init(&dev->iq_lock); spin_lock_init(&dev->iq_lock);
dev->max_fib_size = sizeof(struct hw_fib); dev->max_fib_size = sizeof(struct hw_fib);
dev->sg_tablesize = host->sg_tablesize = (dev->max_fib_size dev->sg_tablesize = host->sg_tablesize = (dev->max_fib_size
- sizeof(struct aac_fibhdr) - sizeof(struct aac_fibhdr) - sizeof(struct aac_write))
- sizeof(struct aac_write) + sizeof(struct sgentry))
/ sizeof(struct sgentry); / sizeof(struct sgentry);
dev->comm_interface = AAC_COMM_PRODUCER; dev->comm_interface = AAC_COMM_PRODUCER;
dev->raw_io_interface = dev->raw_io_64 = 0; dev->raw_io_interface = dev->raw_io_64 = 0;

View File

@ -2327,8 +2327,9 @@ static int aac_send_wellness_command(struct aac_dev *dev, char *wellness_str,
sg64->sg[0].addr[0] = cpu_to_le32((u32)(addr & 0xffffffff)); sg64->sg[0].addr[0] = cpu_to_le32((u32)(addr & 0xffffffff));
sg64->sg[0].count = cpu_to_le32(datasize); sg64->sg[0].count = cpu_to_le32(datasize);
ret = aac_fib_send(ScsiPortCommand64, fibptr, sizeof(struct aac_srb), ret = aac_fib_send(ScsiPortCommand64, fibptr,
FsaNormal, 1, 1, NULL, NULL); sizeof(struct aac_srb) + sizeof(struct sgentry),
FsaNormal, 1, 1, NULL, NULL);
dma_free_coherent(&dev->pdev->dev, datasize, dma_buf, addr); dma_free_coherent(&dev->pdev->dev, datasize, dma_buf, addr);

View File

@ -410,7 +410,7 @@ static void aac_src_start_adapter(struct aac_dev *dev)
lower_32_bits(dev->init_pa), lower_32_bits(dev->init_pa),
upper_32_bits(dev->init_pa), upper_32_bits(dev->init_pa),
sizeof(struct _r8) + sizeof(struct _r8) +
(AAC_MAX_HRRQ - 1) * sizeof(struct _rrq), AAC_MAX_HRRQ * sizeof(struct _rrq),
0, 0, 0, NULL, NULL, NULL, NULL, NULL); 0, 0, 0, NULL, NULL, NULL, NULL, NULL);
} else { } else {
init->r7.host_elapsed_seconds = init->r7.host_elapsed_seconds =

View File

@ -5528,7 +5528,6 @@ static int beiscsi_dev_probe(struct pci_dev *pcidev,
struct beiscsi_hba *phba = NULL; struct beiscsi_hba *phba = NULL;
struct be_eq_obj *pbe_eq; struct be_eq_obj *pbe_eq;
unsigned int s_handle; unsigned int s_handle;
char wq_name[20];
int ret, i; int ret, i;
ret = beiscsi_enable_pci(pcidev); ret = beiscsi_enable_pci(pcidev);
@ -5634,9 +5633,8 @@ static int beiscsi_dev_probe(struct pci_dev *pcidev,
phba->ctrl.mcc_alloc_index = phba->ctrl.mcc_free_index = 0; phba->ctrl.mcc_alloc_index = phba->ctrl.mcc_free_index = 0;
snprintf(wq_name, sizeof(wq_name), "beiscsi_%02x_wq", phba->wq = alloc_workqueue("beiscsi_%02x_wq", WQ_MEM_RECLAIM, 1,
phba->shost->host_no); phba->shost->host_no);
phba->wq = alloc_workqueue("%s", WQ_MEM_RECLAIM, 1, wq_name);
if (!phba->wq) { if (!phba->wq) {
beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
"BM_%d : beiscsi_dev_probe-" "BM_%d : beiscsi_dev_probe-"

View File

@ -766,9 +766,8 @@ bfad_thread_workq(struct bfad_s *bfad)
struct bfad_im_s *im = bfad->im; struct bfad_im_s *im = bfad->im;
bfa_trc(bfad, 0); bfa_trc(bfad, 0);
snprintf(im->drv_workq_name, KOBJ_NAME_LEN, "bfad_wq_%d", im->drv_workq = alloc_ordered_workqueue("bfad_wq_%d", WQ_MEM_RECLAIM,
bfad->inst_no); bfad->inst_no);
im->drv_workq = create_singlethread_workqueue(im->drv_workq_name);
if (!im->drv_workq) if (!im->drv_workq)
return BFA_STATUS_FAILED; return BFA_STATUS_FAILED;

View File

@ -134,7 +134,6 @@ struct bfad_fcp_binding {
struct bfad_im_s { struct bfad_im_s {
struct bfad_s *bfad; struct bfad_s *bfad;
struct workqueue_struct *drv_workq; struct workqueue_struct *drv_workq;
char drv_workq_name[KOBJ_NAME_LEN];
struct work_struct aen_im_notify_work; struct work_struct aen_im_notify_work;
}; };

View File

@ -358,18 +358,12 @@ struct bnx2fc_rport {
dma_addr_t lcq_dma; dma_addr_t lcq_dma;
u32 lcq_mem_size; u32 lcq_mem_size;
void *ofld_req[4];
dma_addr_t ofld_req_dma[4];
void *enbl_req;
dma_addr_t enbl_req_dma;
spinlock_t tgt_lock; spinlock_t tgt_lock;
spinlock_t cq_lock; spinlock_t cq_lock;
atomic_t num_active_ios; atomic_t num_active_ios;
u32 flush_in_prog; u32 flush_in_prog;
unsigned long timestamp; unsigned long timestamp;
unsigned long retry_delay_timestamp; unsigned long retry_delay_timestamp;
struct list_head free_task_list;
struct bnx2fc_cmd *pending_queue[BNX2FC_SQ_WQES_MAX+1]; struct bnx2fc_cmd *pending_queue[BNX2FC_SQ_WQES_MAX+1];
struct list_head active_cmd_queue; struct list_head active_cmd_queue;
struct list_head els_queue; struct list_head els_queue;

View File

@ -2363,8 +2363,8 @@ static int _bnx2fc_create(struct net_device *netdev,
interface->vlan_id = vlan_id; interface->vlan_id = vlan_id;
interface->tm_timeout = BNX2FC_TM_TIMEOUT; interface->tm_timeout = BNX2FC_TM_TIMEOUT;
interface->timer_work_queue = interface->timer_work_queue = alloc_ordered_workqueue(
create_singlethread_workqueue("bnx2fc_timer_wq"); "%s", WQ_MEM_RECLAIM, "bnx2fc_timer_wq");
if (!interface->timer_work_queue) { if (!interface->timer_work_queue) {
printk(KERN_ERR PFX "ulp_init could not create timer_wq\n"); printk(KERN_ERR PFX "ulp_init could not create timer_wq\n");
rc = -EINVAL; rc = -EINVAL;

View File

@ -815,11 +815,6 @@ extern struct bnx2i_hba *get_adapter_list_head(void);
struct bnx2i_conn *bnx2i_get_conn_from_id(struct bnx2i_hba *hba, struct bnx2i_conn *bnx2i_get_conn_from_id(struct bnx2i_hba *hba,
u16 iscsi_cid); u16 iscsi_cid);
int bnx2i_alloc_ep_pool(void);
void bnx2i_release_ep_pool(void);
struct bnx2i_endpoint *bnx2i_ep_ofld_list_next(struct bnx2i_hba *hba);
struct bnx2i_endpoint *bnx2i_ep_destroy_list_next(struct bnx2i_hba *hba);
struct bnx2i_hba *bnx2i_find_hba_for_cnic(struct cnic_dev *cnic); struct bnx2i_hba *bnx2i_find_hba_for_cnic(struct cnic_dev *cnic);
struct bnx2i_hba *bnx2i_alloc_hba(struct cnic_dev *cnic); struct bnx2i_hba *bnx2i_alloc_hba(struct cnic_dev *cnic);
@ -869,12 +864,6 @@ extern int bnx2i_arm_cq_event_coalescing(struct bnx2i_endpoint *ep, u8 action);
extern int bnx2i_hw_ep_disconnect(struct bnx2i_endpoint *bnx2i_ep); extern int bnx2i_hw_ep_disconnect(struct bnx2i_endpoint *bnx2i_ep);
/* Debug related function prototypes */
extern void bnx2i_print_pend_cmd_queue(struct bnx2i_conn *conn);
extern void bnx2i_print_active_cmd_queue(struct bnx2i_conn *conn);
extern void bnx2i_print_xmit_pdu_queue(struct bnx2i_conn *conn);
extern void bnx2i_print_recv_state(struct bnx2i_conn *conn);
extern int bnx2i_percpu_io_thread(void *arg); extern int bnx2i_percpu_io_thread(void *arg);
extern int bnx2i_process_scsi_cmd_resp(struct iscsi_session *session, extern int bnx2i_process_scsi_cmd_resp(struct iscsi_session *session,
struct bnx2i_conn *bnx2i_conn, struct bnx2i_conn *bnx2i_conn,

View File

@ -822,7 +822,8 @@ static int __init rdac_init(void)
/* /*
* Create workqueue to handle mode selects for rdac * Create workqueue to handle mode selects for rdac
*/ */
kmpath_rdacd = create_singlethread_workqueue("kmpath_rdacd"); kmpath_rdacd =
alloc_ordered_workqueue("%s", WQ_MEM_RECLAIM, "kmpath_rdacd");
if (!kmpath_rdacd) { if (!kmpath_rdacd) {
scsi_unregister_device_handler(&rdac_dh); scsi_unregister_device_handler(&rdac_dh);
printk(KERN_ERR "kmpath_rdacd creation failed.\n"); printk(KERN_ERR "kmpath_rdacd creation failed.\n");

View File

@ -1114,7 +1114,8 @@ int efct_scsi_tgt_new_device(struct efct *efct)
atomic_set(&efct->tgt_efct.watermark_hit, 0); atomic_set(&efct->tgt_efct.watermark_hit, 0);
atomic_set(&efct->tgt_efct.initiator_count, 0); atomic_set(&efct->tgt_efct.initiator_count, 0);
lio_wq = create_singlethread_workqueue("efct_lio_worker"); lio_wq = alloc_ordered_workqueue("%s", WQ_MEM_RECLAIM,
"efct_lio_worker");
if (!lio_wq) { if (!lio_wq) {
efc_log_err(efct, "workqueue create failed\n"); efc_log_err(efct, "workqueue create failed\n");
return -EIO; return -EIO;

View File

@ -705,9 +705,9 @@ efc_nport_vport_del(struct efc *efc, struct efc_domain *domain,
spin_lock_irqsave(&efc->lock, flags); spin_lock_irqsave(&efc->lock, flags);
list_for_each_entry(nport, &domain->nport_list, list_entry) { list_for_each_entry(nport, &domain->nport_list, list_entry) {
if (nport->wwpn == wwpn && nport->wwnn == wwnn) { if (nport->wwpn == wwpn && nport->wwnn == wwnn) {
kref_put(&nport->ref, nport->release);
/* Shutdown this NPORT */ /* Shutdown this NPORT */
efc_sm_post_event(&nport->sm, EFC_EVT_SHUTDOWN, NULL); efc_sm_post_event(&nport->sm, EFC_EVT_SHUTDOWN, NULL);
kref_put(&nport->ref, nport->release);
break; break;
} }
} }

View File

@ -929,7 +929,6 @@ struct esas2r_adapter {
struct list_head fw_event_list; struct list_head fw_event_list;
spinlock_t fw_event_lock; spinlock_t fw_event_lock;
u8 fw_events_off; /* if '1', then ignore events */ u8 fw_events_off; /* if '1', then ignore events */
char fw_event_q_name[ESAS2R_KOBJ_NAME_LEN];
/* /*
* intr_mode stores the interrupt mode currently being used by this * intr_mode stores the interrupt mode currently being used by this
* adapter. it is based on the interrupt_mode module parameter, but * adapter. it is based on the interrupt_mode module parameter, but

View File

@ -311,9 +311,8 @@ int esas2r_init_adapter(struct Scsi_Host *host, struct pci_dev *pcid,
sema_init(&a->nvram_semaphore, 1); sema_init(&a->nvram_semaphore, 1);
esas2r_fw_event_off(a); esas2r_fw_event_off(a);
snprintf(a->fw_event_q_name, ESAS2R_KOBJ_NAME_LEN, "esas2r/%d", a->fw_event_q =
a->index); alloc_ordered_workqueue("esas2r/%d", WQ_MEM_RECLAIM, a->index);
a->fw_event_q = create_singlethread_workqueue(a->fw_event_q_name);
init_waitqueue_head(&a->buffered_ioctl_waiter); init_waitqueue_head(&a->buffered_ioctl_waiter);
init_waitqueue_head(&a->nvram_waiter); init_waitqueue_head(&a->nvram_waiter);

View File

@ -45,12 +45,8 @@ MODULE_PARM_DESC(fcf_dev_loss_tmo,
*/ */
#define fcoe_ctlr_id(x) \ #define fcoe_ctlr_id(x) \
((x)->id) ((x)->id)
#define fcoe_ctlr_work_q_name(x) \
((x)->work_q_name)
#define fcoe_ctlr_work_q(x) \ #define fcoe_ctlr_work_q(x) \
((x)->work_q) ((x)->work_q)
#define fcoe_ctlr_devloss_work_q_name(x) \
((x)->devloss_work_q_name)
#define fcoe_ctlr_devloss_work_q(x) \ #define fcoe_ctlr_devloss_work_q(x) \
((x)->devloss_work_q) ((x)->devloss_work_q)
#define fcoe_ctlr_mode(x) \ #define fcoe_ctlr_mode(x) \
@ -797,18 +793,14 @@ struct fcoe_ctlr_device *fcoe_ctlr_device_add(struct device *parent,
ctlr->fcf_dev_loss_tmo = fcoe_fcf_dev_loss_tmo; ctlr->fcf_dev_loss_tmo = fcoe_fcf_dev_loss_tmo;
snprintf(ctlr->work_q_name, sizeof(ctlr->work_q_name), ctlr->work_q = alloc_ordered_workqueue("ctlr_wq_%d", WQ_MEM_RECLAIM,
"ctlr_wq_%d", ctlr->id); ctlr->id);
ctlr->work_q = create_singlethread_workqueue(
ctlr->work_q_name);
if (!ctlr->work_q) if (!ctlr->work_q)
goto out_del; goto out_del;
snprintf(ctlr->devloss_work_q_name, ctlr->devloss_work_q = alloc_ordered_workqueue("ctlr_dl_wq_%d",
sizeof(ctlr->devloss_work_q_name), WQ_MEM_RECLAIM,
"ctlr_dl_wq_%d", ctlr->id); ctlr->id);
ctlr->devloss_work_q = create_singlethread_workqueue(
ctlr->devloss_work_q_name);
if (!ctlr->devloss_work_q) if (!ctlr->devloss_work_q)
goto out_del_q; goto out_del_q;

View File

@ -1161,14 +1161,16 @@ static int __init fnic_init_module(void)
goto err_create_fnic_ioreq_slab; goto err_create_fnic_ioreq_slab;
} }
fnic_event_queue = create_singlethread_workqueue("fnic_event_wq"); fnic_event_queue =
alloc_ordered_workqueue("%s", WQ_MEM_RECLAIM, "fnic_event_wq");
if (!fnic_event_queue) { if (!fnic_event_queue) {
printk(KERN_ERR PFX "fnic work queue create failed\n"); printk(KERN_ERR PFX "fnic work queue create failed\n");
err = -ENOMEM; err = -ENOMEM;
goto err_create_fnic_workq; goto err_create_fnic_workq;
} }
fnic_fip_queue = create_singlethread_workqueue("fnic_fip_q"); fnic_fip_queue =
alloc_ordered_workqueue("%s", WQ_MEM_RECLAIM, "fnic_fip_q");
if (!fnic_fip_queue) { if (!fnic_fip_queue) {
printk(KERN_ERR PFX "fnic FIP work queue create failed\n"); printk(KERN_ERR PFX "fnic FIP work queue create failed\n");
err = -ENOMEM; err = -ENOMEM;

View File

@ -2302,7 +2302,8 @@ int hisi_sas_alloc(struct hisi_hba *hisi_hba)
hisi_hba->last_slot_index = 0; hisi_hba->last_slot_index = 0;
hisi_hba->wq = create_singlethread_workqueue(dev_name(dev)); hisi_hba->wq =
alloc_ordered_workqueue("%s", WQ_MEM_RECLAIM, dev_name(dev));
if (!hisi_hba->wq) { if (!hisi_hba->wq) {
dev_err(dev, "sas_alloc: failed to create workqueue\n"); dev_err(dev, "sas_alloc: failed to create workqueue\n");
goto err_out; goto err_out;

View File

@ -292,11 +292,10 @@ int scsi_add_host_with_dma(struct Scsi_Host *shost, struct device *dev,
} }
if (shost->transportt->create_work_queue) { if (shost->transportt->create_work_queue) {
snprintf(shost->work_q_name, sizeof(shost->work_q_name), shost->work_q = alloc_workqueue(
"scsi_wq_%d", shost->host_no); "scsi_wq_%d",
shost->work_q = alloc_workqueue("%s", WQ_SYSFS | __WQ_LEGACY | WQ_MEM_RECLAIM | WQ_UNBOUND, 1,
WQ_SYSFS | __WQ_LEGACY | WQ_MEM_RECLAIM | WQ_UNBOUND, shost->host_no);
1, shost->work_q_name);
if (!shost->work_q) { if (!shost->work_q) {
error = -EINVAL; error = -EINVAL;

View File

@ -3425,7 +3425,6 @@ static int ibmvscsis_probe(struct vio_dev *vdev,
struct scsi_info *vscsi; struct scsi_info *vscsi;
int rc = 0; int rc = 0;
long hrc = 0; long hrc = 0;
char wq_name[24];
vscsi = kzalloc(sizeof(*vscsi), GFP_KERNEL); vscsi = kzalloc(sizeof(*vscsi), GFP_KERNEL);
if (!vscsi) { if (!vscsi) {
@ -3536,8 +3535,8 @@ static int ibmvscsis_probe(struct vio_dev *vdev,
init_completion(&vscsi->wait_idle); init_completion(&vscsi->wait_idle);
init_completion(&vscsi->unconfig); init_completion(&vscsi->unconfig);
snprintf(wq_name, 24, "ibmvscsis%s", dev_name(&vdev->dev)); vscsi->work_q = alloc_workqueue("ibmvscsis%s", WQ_MEM_RECLAIM, 1,
vscsi->work_q = create_workqueue(wq_name); dev_name(&vdev->dev));
if (!vscsi->work_q) { if (!vscsi->work_q) {
rc = -ENOMEM; rc = -ENOMEM;
dev_err(&vscsi->dev, "create_workqueue failed\n"); dev_err(&vscsi->dev, "create_workqueue failed\n");

View File

@ -1030,7 +1030,7 @@ struct ipr_hostrcb_fabric_desc {
#define IPR_PATH_FAILED 0x03 #define IPR_PATH_FAILED 0x03
__be16 num_entries; __be16 num_entries;
struct ipr_hostrcb_config_element elem[1]; struct ipr_hostrcb_config_element elem[];
}__attribute__((packed, aligned (4))); }__attribute__((packed, aligned (4)));
struct ipr_hostrcb64_fabric_desc { struct ipr_hostrcb64_fabric_desc {
@ -1044,7 +1044,7 @@ struct ipr_hostrcb64_fabric_desc {
u8 res_path[8]; u8 res_path[8];
u8 reserved3[6]; u8 reserved3[6];
__be16 num_entries; __be16 num_entries;
struct ipr_hostrcb64_config_element elem[1]; struct ipr_hostrcb64_config_element elem[];
}__attribute__((packed, aligned (8))); }__attribute__((packed, aligned (8)));
#define for_each_hrrq(hrrq, ioa_cfg) \ #define for_each_hrrq(hrrq, ioa_cfg) \

View File

@ -2693,7 +2693,8 @@ int fc_setup_exch_mgr(void)
fc_cpu_order = ilog2(roundup_pow_of_two(nr_cpu_ids)); fc_cpu_order = ilog2(roundup_pow_of_two(nr_cpu_ids));
fc_cpu_mask = (1 << fc_cpu_order) - 1; fc_cpu_mask = (1 << fc_cpu_order) - 1;
fc_exch_workqueue = create_singlethread_workqueue("fc_exch_workqueue"); fc_exch_workqueue = alloc_ordered_workqueue("%s", WQ_MEM_RECLAIM,
"fc_exch_workqueue");
if (!fc_exch_workqueue) if (!fc_exch_workqueue)
goto err; goto err;
return 0; return 0;

View File

@ -2263,7 +2263,8 @@ struct fc4_prov fc_rport_t0_prov = {
*/ */
int fc_setup_rport(void) int fc_setup_rport(void)
{ {
rport_event_queue = create_singlethread_workqueue("fc_rport_eq"); rport_event_queue =
alloc_ordered_workqueue("%s", WQ_MEM_RECLAIM, "fc_rport_eq");
if (!rport_event_queue) if (!rport_event_queue)
return -ENOMEM; return -ENOMEM;
return 0; return 0;

View File

@ -122,12 +122,12 @@ int sas_register_ha(struct sas_ha_struct *sas_ha)
error = -ENOMEM; error = -ENOMEM;
snprintf(name, sizeof(name), "%s_event_q", dev_name(sas_ha->dev)); snprintf(name, sizeof(name), "%s_event_q", dev_name(sas_ha->dev));
sas_ha->event_q = create_singlethread_workqueue(name); sas_ha->event_q = alloc_ordered_workqueue("%s", WQ_MEM_RECLAIM, name);
if (!sas_ha->event_q) if (!sas_ha->event_q)
goto Undo_ports; goto Undo_ports;
snprintf(name, sizeof(name), "%s_disco_q", dev_name(sas_ha->dev)); snprintf(name, sizeof(name), "%s_disco_q", dev_name(sas_ha->dev));
sas_ha->disco_q = create_singlethread_workqueue(name); sas_ha->disco_q = alloc_ordered_workqueue("%s", WQ_MEM_RECLAIM, name);
if (!sas_ha->disco_q) if (!sas_ha->disco_q)
goto Undo_event_q; goto Undo_event_q;

View File

@ -306,6 +306,14 @@ struct lpfc_stats {
struct lpfc_hba; struct lpfc_hba;
/* Data structure to keep withheld FLOGI_ACC information */
struct lpfc_defer_flogi_acc {
bool flag;
u16 rx_id;
u16 ox_id;
struct lpfc_nodelist *ndlp;
};
#define LPFC_VMID_TIMER 300 /* timer interval in seconds */ #define LPFC_VMID_TIMER 300 /* timer interval in seconds */
@ -1430,9 +1438,7 @@ struct lpfc_hba {
uint16_t vlan_id; uint16_t vlan_id;
struct list_head fcf_conn_rec_list; struct list_head fcf_conn_rec_list;
bool defer_flogi_acc_flag; struct lpfc_defer_flogi_acc defer_flogi_acc;
uint16_t defer_flogi_acc_rx_id;
uint16_t defer_flogi_acc_ox_id;
spinlock_t ct_ev_lock; /* synchronize access to ct_ev_waiters */ spinlock_t ct_ev_lock; /* synchronize access to ct_ev_waiters */
struct list_head ct_ev_waiters; struct list_head ct_ev_waiters;

View File

@ -1099,8 +1099,10 @@ stop_rr_fcf_flogi:
sp->cmn.priority_tagging, kref_read(&ndlp->kref)); sp->cmn.priority_tagging, kref_read(&ndlp->kref));
/* reinitialize the VMID datastructure before returning */ /* reinitialize the VMID datastructure before returning */
if (lpfc_is_vmid_enabled(phba)) if (lpfc_is_vmid_enabled(phba)) {
lpfc_reinit_vmid(vport); lpfc_reinit_vmid(vport);
vport->vmid_flag = 0;
}
if (sp->cmn.priority_tagging) if (sp->cmn.priority_tagging)
vport->phba->pport->vmid_flag |= (LPFC_VMID_ISSUE_QFPA | vport->phba->pport->vmid_flag |= (LPFC_VMID_ISSUE_QFPA |
LPFC_VMID_TYPE_PRIO); LPFC_VMID_TYPE_PRIO);
@ -1390,7 +1392,7 @@ lpfc_issue_els_flogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
phba->link_flag &= ~LS_EXTERNAL_LOOPBACK; phba->link_flag &= ~LS_EXTERNAL_LOOPBACK;
/* Check for a deferred FLOGI ACC condition */ /* Check for a deferred FLOGI ACC condition */
if (phba->defer_flogi_acc_flag) { if (phba->defer_flogi_acc.flag) {
/* lookup ndlp for received FLOGI */ /* lookup ndlp for received FLOGI */
ndlp = lpfc_findnode_did(vport, 0); ndlp = lpfc_findnode_did(vport, 0);
if (!ndlp) if (!ndlp)
@ -1404,34 +1406,38 @@ lpfc_issue_els_flogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
if (phba->sli_rev == LPFC_SLI_REV4) { if (phba->sli_rev == LPFC_SLI_REV4) {
bf_set(wqe_ctxt_tag, bf_set(wqe_ctxt_tag,
&defer_flogi_acc.wqe.xmit_els_rsp.wqe_com, &defer_flogi_acc.wqe.xmit_els_rsp.wqe_com,
phba->defer_flogi_acc_rx_id); phba->defer_flogi_acc.rx_id);
bf_set(wqe_rcvoxid, bf_set(wqe_rcvoxid,
&defer_flogi_acc.wqe.xmit_els_rsp.wqe_com, &defer_flogi_acc.wqe.xmit_els_rsp.wqe_com,
phba->defer_flogi_acc_ox_id); phba->defer_flogi_acc.ox_id);
} else { } else {
icmd = &defer_flogi_acc.iocb; icmd = &defer_flogi_acc.iocb;
icmd->ulpContext = phba->defer_flogi_acc_rx_id; icmd->ulpContext = phba->defer_flogi_acc.rx_id;
icmd->unsli3.rcvsli3.ox_id = icmd->unsli3.rcvsli3.ox_id =
phba->defer_flogi_acc_ox_id; phba->defer_flogi_acc.ox_id;
} }
lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
"3354 Xmit deferred FLOGI ACC: rx_id: x%x," "3354 Xmit deferred FLOGI ACC: rx_id: x%x,"
" ox_id: x%x, hba_flag x%lx\n", " ox_id: x%x, hba_flag x%lx\n",
phba->defer_flogi_acc_rx_id, phba->defer_flogi_acc.rx_id,
phba->defer_flogi_acc_ox_id, phba->hba_flag); phba->defer_flogi_acc.ox_id, phba->hba_flag);
/* Send deferred FLOGI ACC */ /* Send deferred FLOGI ACC */
lpfc_els_rsp_acc(vport, ELS_CMD_FLOGI, &defer_flogi_acc, lpfc_els_rsp_acc(vport, ELS_CMD_FLOGI, &defer_flogi_acc,
ndlp, NULL); ndlp, NULL);
phba->defer_flogi_acc_flag = false; phba->defer_flogi_acc.flag = false;
vport->fc_myDID = did;
/* Decrement ndlp reference count to indicate the node can be /* Decrement the held ndlp that was incremented when the
* released when other references are removed. * deferred flogi acc flag was set.
*/ */
lpfc_nlp_put(ndlp); if (phba->defer_flogi_acc.ndlp) {
lpfc_nlp_put(phba->defer_flogi_acc.ndlp);
phba->defer_flogi_acc.ndlp = NULL;
}
vport->fc_myDID = did;
} }
return 0; return 0;
@ -5240,9 +5246,10 @@ lpfc_cmpl_els_logo_acc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
/* ACC to LOGO completes to NPort <nlp_DID> */ /* ACC to LOGO completes to NPort <nlp_DID> */
lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
"0109 ACC to LOGO completes to NPort x%x refcnt %d " "0109 ACC to LOGO completes to NPort x%x refcnt %d "
"Data: x%x x%x x%x\n", "last els x%x Data: x%x x%x x%x\n",
ndlp->nlp_DID, kref_read(&ndlp->kref), ndlp->nlp_flag, ndlp->nlp_DID, kref_read(&ndlp->kref),
ndlp->nlp_state, ndlp->nlp_rpi); ndlp->nlp_last_elscmd, ndlp->nlp_flag, ndlp->nlp_state,
ndlp->nlp_rpi);
/* This clause allows the LOGO ACC to complete and free resources /* This clause allows the LOGO ACC to complete and free resources
* for the Fabric Domain Controller. It does deliberately skip * for the Fabric Domain Controller. It does deliberately skip
@ -5254,18 +5261,22 @@ lpfc_cmpl_els_logo_acc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
goto out; goto out;
if (ndlp->nlp_state == NLP_STE_NPR_NODE) { if (ndlp->nlp_state == NLP_STE_NPR_NODE) {
/* If PLOGI is being retried, PLOGI completion will cleanup the
* node. The NLP_NPR_2B_DISC flag needs to be retained to make
* progress on nodes discovered from last RSCN.
*/
if ((ndlp->nlp_flag & NLP_DELAY_TMO) &&
(ndlp->nlp_last_elscmd == ELS_CMD_PLOGI))
goto out;
if (ndlp->nlp_flag & NLP_RPI_REGISTERED) if (ndlp->nlp_flag & NLP_RPI_REGISTERED)
lpfc_unreg_rpi(vport, ndlp); lpfc_unreg_rpi(vport, ndlp);
/* If came from PRLO, then PRLO_ACC is done.
* Start rediscovery now.
*/
if (ndlp->nlp_last_elscmd == ELS_CMD_PRLO) {
spin_lock_irq(&ndlp->lock);
ndlp->nlp_flag |= NLP_NPR_2B_DISC;
spin_unlock_irq(&ndlp->lock);
ndlp->nlp_prev_state = ndlp->nlp_state;
lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE);
lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0);
}
} }
out: out:
/* /*
* The driver received a LOGO from the rport and has ACK'd it. * The driver received a LOGO from the rport and has ACK'd it.
@ -8454,9 +8465,9 @@ lpfc_els_rcv_flogi(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
/* Defer ACC response until AFTER we issue a FLOGI */ /* Defer ACC response until AFTER we issue a FLOGI */
if (!test_bit(HBA_FLOGI_ISSUED, &phba->hba_flag)) { if (!test_bit(HBA_FLOGI_ISSUED, &phba->hba_flag)) {
phba->defer_flogi_acc_rx_id = bf_get(wqe_ctxt_tag, phba->defer_flogi_acc.rx_id = bf_get(wqe_ctxt_tag,
&wqe->xmit_els_rsp.wqe_com); &wqe->xmit_els_rsp.wqe_com);
phba->defer_flogi_acc_ox_id = bf_get(wqe_rcvoxid, phba->defer_flogi_acc.ox_id = bf_get(wqe_rcvoxid,
&wqe->xmit_els_rsp.wqe_com); &wqe->xmit_els_rsp.wqe_com);
vport->fc_myDID = did; vport->fc_myDID = did;
@ -8464,11 +8475,17 @@ lpfc_els_rcv_flogi(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
"3344 Deferring FLOGI ACC: rx_id: x%x," "3344 Deferring FLOGI ACC: rx_id: x%x,"
" ox_id: x%x, hba_flag x%lx\n", " ox_id: x%x, hba_flag x%lx\n",
phba->defer_flogi_acc_rx_id, phba->defer_flogi_acc.rx_id,
phba->defer_flogi_acc_ox_id, phba->hba_flag); phba->defer_flogi_acc.ox_id, phba->hba_flag);
phba->defer_flogi_acc_flag = true; phba->defer_flogi_acc.flag = true;
/* This nlp_get is paired with nlp_puts that reset the
* defer_flogi_acc.flag back to false. We need to retain
* a kref on the ndlp until the deferred FLOGI ACC is
* processed or cancelled.
*/
phba->defer_flogi_acc.ndlp = lpfc_nlp_get(ndlp);
return 0; return 0;
} }
@ -10504,7 +10521,7 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
lpfc_els_rcv_flogi(vport, elsiocb, ndlp); lpfc_els_rcv_flogi(vport, elsiocb, ndlp);
/* retain node if our response is deferred */ /* retain node if our response is deferred */
if (phba->defer_flogi_acc_flag) if (phba->defer_flogi_acc.flag)
break; break;
if (newnode) if (newnode)
lpfc_disc_state_machine(vport, ndlp, NULL, lpfc_disc_state_machine(vport, ndlp, NULL,
@ -10742,7 +10759,7 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
rjt_exp = LSEXP_NOTHING_MORE; rjt_exp = LSEXP_NOTHING_MORE;
/* Unknown ELS command <elsCmd> received from NPORT <did> */ /* Unknown ELS command <elsCmd> received from NPORT <did> */
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
"0115 Unknown ELS command x%x " "0115 Unknown ELS command x%x "
"received from NPORT x%x\n", cmd, did); "received from NPORT x%x\n", cmd, did);
if (newnode) if (newnode)

View File

@ -175,7 +175,8 @@ lpfc_dev_loss_tmo_callbk(struct fc_rport *rport)
ndlp->nlp_state, ndlp->fc4_xpt_flags); ndlp->nlp_state, ndlp->fc4_xpt_flags);
/* Don't schedule a worker thread event if the vport is going down. */ /* Don't schedule a worker thread event if the vport is going down. */
if (test_bit(FC_UNLOADING, &vport->load_flag)) { if (test_bit(FC_UNLOADING, &vport->load_flag) ||
!test_bit(HBA_SETUP, &phba->hba_flag)) {
spin_lock_irqsave(&ndlp->lock, iflags); spin_lock_irqsave(&ndlp->lock, iflags);
ndlp->rport = NULL; ndlp->rport = NULL;
@ -1254,7 +1255,14 @@ lpfc_linkdown(struct lpfc_hba *phba)
lpfc_scsi_dev_block(phba); lpfc_scsi_dev_block(phba);
offline = pci_channel_offline(phba->pcidev); offline = pci_channel_offline(phba->pcidev);
phba->defer_flogi_acc_flag = false; /* Decrement the held ndlp if there is a deferred flogi acc */
if (phba->defer_flogi_acc.flag) {
if (phba->defer_flogi_acc.ndlp) {
lpfc_nlp_put(phba->defer_flogi_acc.ndlp);
phba->defer_flogi_acc.ndlp = NULL;
}
}
phba->defer_flogi_acc.flag = false;
/* Clear external loopback plug detected flag */ /* Clear external loopback plug detected flag */
phba->link_flag &= ~LS_EXTERNAL_LOOPBACK; phba->link_flag &= ~LS_EXTERNAL_LOOPBACK;
@ -1376,7 +1384,7 @@ lpfc_linkup_port(struct lpfc_vport *vport)
(vport != phba->pport)) (vport != phba->pport))
return; return;
if (phba->defer_flogi_acc_flag) { if (phba->defer_flogi_acc.flag) {
clear_bit(FC_ABORT_DISCOVERY, &vport->fc_flag); clear_bit(FC_ABORT_DISCOVERY, &vport->fc_flag);
clear_bit(FC_RSCN_MODE, &vport->fc_flag); clear_bit(FC_RSCN_MODE, &vport->fc_flag);
clear_bit(FC_NLP_MORE, &vport->fc_flag); clear_bit(FC_NLP_MORE, &vport->fc_flag);

View File

@ -2652,8 +2652,26 @@ lpfc_rcv_prlo_mapped_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
/* flush the target */ /* flush the target */
lpfc_sli_abort_iocb(vport, ndlp->nlp_sid, 0, LPFC_CTX_TGT); lpfc_sli_abort_iocb(vport, ndlp->nlp_sid, 0, LPFC_CTX_TGT);
/* Treat like rcv logo */ /* Send PRLO_ACC */
lpfc_rcv_logo(vport, ndlp, cmdiocb, ELS_CMD_PRLO); spin_lock_irq(&ndlp->lock);
ndlp->nlp_flag |= NLP_LOGO_ACC;
spin_unlock_irq(&ndlp->lock);
lpfc_els_rsp_acc(vport, ELS_CMD_PRLO, cmdiocb, ndlp, NULL);
/* Save ELS_CMD_PRLO as the last elscmd and then set to NPR.
* lpfc_cmpl_els_logo_acc is expected to restart discovery.
*/
ndlp->nlp_last_elscmd = ELS_CMD_PRLO;
ndlp->nlp_prev_state = ndlp->nlp_state;
lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE | LOG_ELS | LOG_DISCOVERY,
"3422 DID x%06x nflag x%x lastels x%x ref cnt %u\n",
ndlp->nlp_DID, ndlp->nlp_flag,
ndlp->nlp_last_elscmd,
kref_read(&ndlp->kref));
lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
return ndlp->nlp_state; return ndlp->nlp_state;
} }

View File

@ -5555,11 +5555,20 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd)
iocb = &lpfc_cmd->cur_iocbq; iocb = &lpfc_cmd->cur_iocbq;
if (phba->sli_rev == LPFC_SLI_REV4) { if (phba->sli_rev == LPFC_SLI_REV4) {
pring_s4 = phba->sli4_hba.hdwq[iocb->hba_wqidx].io_wq->pring; /* if the io_wq & pring are gone, the port was reset. */
if (!pring_s4) { if (!phba->sli4_hba.hdwq[iocb->hba_wqidx].io_wq ||
!phba->sli4_hba.hdwq[iocb->hba_wqidx].io_wq->pring) {
lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
"2877 SCSI Layer I/O Abort Request "
"IO CMPL Status x%x ID %d LUN %llu "
"HBA_SETUP %d\n", FAILED,
cmnd->device->id,
(u64)cmnd->device->lun,
test_bit(HBA_SETUP, &phba->hba_flag));
ret = FAILED; ret = FAILED;
goto out_unlock_hba; goto out_unlock_hba;
} }
pring_s4 = phba->sli4_hba.hdwq[iocb->hba_wqidx].io_wq->pring;
spin_lock(&pring_s4->ring_lock); spin_lock(&pring_s4->ring_lock);
} }
/* the command is in process of being cancelled */ /* the command is in process of being cancelled */

View File

@ -4687,6 +4687,17 @@ lpfc_sli_flush_io_rings(struct lpfc_hba *phba)
/* Look on all the FCP Rings for the iotag */ /* Look on all the FCP Rings for the iotag */
if (phba->sli_rev >= LPFC_SLI_REV4) { if (phba->sli_rev >= LPFC_SLI_REV4) {
for (i = 0; i < phba->cfg_hdw_queue; i++) { for (i = 0; i < phba->cfg_hdw_queue; i++) {
if (!phba->sli4_hba.hdwq ||
!phba->sli4_hba.hdwq[i].io_wq) {
lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
"7777 hdwq's deleted %lx "
"%lx %x %x\n",
phba->pport->load_flag,
phba->hba_flag,
phba->link_state,
phba->sli.sli_flag);
return;
}
pring = phba->sli4_hba.hdwq[i].io_wq->pring; pring = phba->sli4_hba.hdwq[i].io_wq->pring;
spin_lock_irq(&pring->ring_lock); spin_lock_irq(&pring->ring_lock);
@ -12473,8 +12484,6 @@ lpfc_sli_issue_abort_iotag(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
cmdiocb->iocb.ulpClass, cmdiocb->iocb.ulpClass,
LPFC_WQE_CQ_ID_DEFAULT, ia, false); LPFC_WQE_CQ_ID_DEFAULT, ia, false);
abtsiocbp->vport = vport;
/* ABTS WQE must go to the same WQ as the WQE to be aborted */ /* ABTS WQE must go to the same WQ as the WQE to be aborted */
abtsiocbp->hba_wqidx = cmdiocb->hba_wqidx; abtsiocbp->hba_wqidx = cmdiocb->hba_wqidx;
if (cmdiocb->cmd_flag & LPFC_IO_FCP) if (cmdiocb->cmd_flag & LPFC_IO_FCP)

View File

@ -20,7 +20,7 @@
* included with this package. * * included with this package. *
*******************************************************************/ *******************************************************************/
#define LPFC_DRIVER_VERSION "14.4.0.3" #define LPFC_DRIVER_VERSION "14.4.0.4"
#define LPFC_DRIVER_NAME "lpfc" #define LPFC_DRIVER_NAME "lpfc"
/* Used for SLI 2/3 */ /* Used for SLI 2/3 */

View File

@ -1,7 +1,7 @@
/******************************************************************* /*******************************************************************
* This file is part of the Emulex Linux Device Driver for * * This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. * * Fibre Channel Host Bus Adapters. *
* Copyright (C) 2017-2023 Broadcom. All Rights Reserved. The term * * Copyright (C) 2017-2024 Broadcom. All Rights Reserved. The term *
* Broadcom refers to Broadcom Inc. and/or its subsidiaries. * * Broadcom refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2004-2016 Emulex. All rights reserved. * * Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. * * EMULEX and SLI are trademarks of Emulex. *
@ -321,6 +321,5 @@ lpfc_reinit_vmid(struct lpfc_vport *vport)
if (!hash_empty(vport->hash_table)) if (!hash_empty(vport->hash_table))
hash_for_each_safe(vport->hash_table, bucket, tmp, cur, hnode) hash_for_each_safe(vport->hash_table, bucket, tmp, cur, hnode)
hash_del(&cur->hnode); hash_del(&cur->hnode);
vport->vmid_flag = 0;
write_unlock(&vport->vmid_lock); write_unlock(&vport->vmid_lock);
} }

View File

@ -102,11 +102,15 @@ __setup("mac5380=", mac_scsi_setup);
* Linux SCSI drivers lack knowledge of the timing behaviour of SCSI targets * Linux SCSI drivers lack knowledge of the timing behaviour of SCSI targets
* so bus errors are unavoidable. * so bus errors are unavoidable.
* *
* If a MOVE.B instruction faults, we assume that zero bytes were transferred * If a MOVE.B instruction faults during a receive operation, we assume the
* and simply retry. That assumption probably depends on target behaviour but * target sent nothing and try again. That assumption probably depends on
* seems to hold up okay. The NOP provides synchronization: without it the * target firmware but it seems to hold up okay. If a fault happens during a
* fault can sometimes occur after the program counter has moved past the * send operation, the target may or may not have seen /ACK and got the byte.
* offending instruction. Post-increment addressing can't be used. * It's uncertain so the whole SCSI command gets retried.
*
* The NOP is needed for synchronization because the fault address in the
* exception stack frame may or may not be the instruction that actually
* caused the bus error. Post-increment addressing can't be used.
*/ */
#define MOVE_BYTE(operands) \ #define MOVE_BYTE(operands) \
@ -208,8 +212,6 @@ __setup("mac5380=", mac_scsi_setup);
".previous \n" \ ".previous \n" \
: "+a" (addr), "+r" (n), "+r" (result) : "a" (io)) : "+a" (addr), "+r" (n), "+r" (result) : "a" (io))
#define MAC_PDMA_DELAY 32
static inline int mac_pdma_recv(void __iomem *io, unsigned char *start, int n) static inline int mac_pdma_recv(void __iomem *io, unsigned char *start, int n)
{ {
unsigned char *addr = start; unsigned char *addr = start;
@ -245,22 +247,21 @@ static inline int mac_pdma_send(unsigned char *start, void __iomem *io, int n)
if (n >= 1) { if (n >= 1) {
MOVE_BYTE("%0@,%3@"); MOVE_BYTE("%0@,%3@");
if (result) if (result)
goto out; return -1;
} }
if (n >= 1 && ((unsigned long)addr & 1)) { if (n >= 1 && ((unsigned long)addr & 1)) {
MOVE_BYTE("%0@,%3@"); MOVE_BYTE("%0@,%3@");
if (result) if (result)
goto out; return -2;
} }
while (n >= 32) while (n >= 32)
MOVE_16_WORDS("%0@+,%3@"); MOVE_16_WORDS("%0@+,%3@");
while (n >= 2) while (n >= 2)
MOVE_WORD("%0@+,%3@"); MOVE_WORD("%0@+,%3@");
if (result) if (result)
return start - addr; /* Negated to indicate uncertain length */ return start - addr - 1; /* Negated to indicate uncertain length */
if (n == 1) if (n == 1)
MOVE_BYTE("%0@,%3@"); MOVE_BYTE("%0@,%3@");
out:
return addr - start; return addr - start;
} }
@ -274,25 +275,56 @@ static inline void write_ctrl_reg(struct NCR5380_hostdata *hostdata, u32 value)
out_be32(hostdata->io + (CTRL_REG << 4), value); out_be32(hostdata->io + (CTRL_REG << 4), value);
} }
static inline int macscsi_wait_for_drq(struct NCR5380_hostdata *hostdata)
{
unsigned int n = 1; /* effectively multiplies NCR5380_REG_POLL_TIME */
unsigned char basr;
again:
basr = NCR5380_read(BUS_AND_STATUS_REG);
if (!(basr & BASR_PHASE_MATCH))
return 1;
if (basr & BASR_IRQ)
return -1;
if (basr & BASR_DRQ)
return 0;
if (n-- == 0) {
NCR5380_dprint(NDEBUG_PSEUDO_DMA, hostdata->host);
dsprintk(NDEBUG_PSEUDO_DMA, hostdata->host,
"%s: DRQ timeout\n", __func__);
return -1;
}
NCR5380_poll_politely2(hostdata,
BUS_AND_STATUS_REG, BASR_DRQ, BASR_DRQ,
BUS_AND_STATUS_REG, BASR_PHASE_MATCH, 0, 0);
goto again;
}
static inline int macscsi_pread(struct NCR5380_hostdata *hostdata, static inline int macscsi_pread(struct NCR5380_hostdata *hostdata,
unsigned char *dst, int len) unsigned char *dst, int len)
{ {
u8 __iomem *s = hostdata->pdma_io + (INPUT_DATA_REG << 4); u8 __iomem *s = hostdata->pdma_io + (INPUT_DATA_REG << 4);
unsigned char *d = dst; unsigned char *d = dst;
int result = 0;
hostdata->pdma_residual = len; hostdata->pdma_residual = len;
while (!NCR5380_poll_politely(hostdata, BUS_AND_STATUS_REG, while (macscsi_wait_for_drq(hostdata) == 0) {
BASR_DRQ | BASR_PHASE_MATCH, int bytes, chunk_bytes;
BASR_DRQ | BASR_PHASE_MATCH, 0)) {
int bytes;
if (macintosh_config->ident == MAC_MODEL_IIFX) if (macintosh_config->ident == MAC_MODEL_IIFX)
write_ctrl_reg(hostdata, CTRL_HANDSHAKE_MODE | write_ctrl_reg(hostdata, CTRL_HANDSHAKE_MODE |
CTRL_INTERRUPTS_ENABLE); CTRL_INTERRUPTS_ENABLE);
bytes = mac_pdma_recv(s, d, min(hostdata->pdma_residual, 512)); chunk_bytes = min(hostdata->pdma_residual, 512);
bytes = mac_pdma_recv(s, d, chunk_bytes);
if (macintosh_config->ident == MAC_MODEL_IIFX)
write_ctrl_reg(hostdata, CTRL_INTERRUPTS_ENABLE);
if (bytes > 0) { if (bytes > 0) {
d += bytes; d += bytes;
@ -300,37 +332,25 @@ static inline int macscsi_pread(struct NCR5380_hostdata *hostdata,
} }
if (hostdata->pdma_residual == 0) if (hostdata->pdma_residual == 0)
goto out; break;
if (NCR5380_poll_politely2(hostdata, STATUS_REG, SR_REQ, SR_REQ, if (bytes > 0)
BUS_AND_STATUS_REG, BASR_ACK,
BASR_ACK, 0) < 0)
scmd_printk(KERN_DEBUG, hostdata->connected,
"%s: !REQ and !ACK\n", __func__);
if (!(NCR5380_read(BUS_AND_STATUS_REG) & BASR_PHASE_MATCH))
goto out;
if (bytes == 0)
udelay(MAC_PDMA_DELAY);
if (bytes >= 0)
continue; continue;
dsprintk(NDEBUG_PSEUDO_DMA, hostdata->host,
"%s: bus error (%d/%d)\n", __func__, d - dst, len);
NCR5380_dprint(NDEBUG_PSEUDO_DMA, hostdata->host); NCR5380_dprint(NDEBUG_PSEUDO_DMA, hostdata->host);
result = -1; dsprintk(NDEBUG_PSEUDO_DMA, hostdata->host,
goto out; "%s: bus error [%d/%d] (%d/%d)\n",
__func__, d - dst, len, bytes, chunk_bytes);
if (bytes == 0)
continue;
if (macscsi_wait_for_drq(hostdata) <= 0)
set_host_byte(hostdata->connected, DID_ERROR);
break;
} }
scmd_printk(KERN_ERR, hostdata->connected, return 0;
"%s: phase mismatch or !DRQ\n", __func__);
NCR5380_dprint(NDEBUG_PSEUDO_DMA, hostdata->host);
result = -1;
out:
if (macintosh_config->ident == MAC_MODEL_IIFX)
write_ctrl_reg(hostdata, CTRL_INTERRUPTS_ENABLE);
return result;
} }
static inline int macscsi_pwrite(struct NCR5380_hostdata *hostdata, static inline int macscsi_pwrite(struct NCR5380_hostdata *hostdata,
@ -338,67 +358,47 @@ static inline int macscsi_pwrite(struct NCR5380_hostdata *hostdata,
{ {
unsigned char *s = src; unsigned char *s = src;
u8 __iomem *d = hostdata->pdma_io + (OUTPUT_DATA_REG << 4); u8 __iomem *d = hostdata->pdma_io + (OUTPUT_DATA_REG << 4);
int result = 0;
hostdata->pdma_residual = len; hostdata->pdma_residual = len;
while (!NCR5380_poll_politely(hostdata, BUS_AND_STATUS_REG, while (macscsi_wait_for_drq(hostdata) == 0) {
BASR_DRQ | BASR_PHASE_MATCH, int bytes, chunk_bytes;
BASR_DRQ | BASR_PHASE_MATCH, 0)) {
int bytes;
if (macintosh_config->ident == MAC_MODEL_IIFX) if (macintosh_config->ident == MAC_MODEL_IIFX)
write_ctrl_reg(hostdata, CTRL_HANDSHAKE_MODE | write_ctrl_reg(hostdata, CTRL_HANDSHAKE_MODE |
CTRL_INTERRUPTS_ENABLE); CTRL_INTERRUPTS_ENABLE);
bytes = mac_pdma_send(s, d, min(hostdata->pdma_residual, 512)); chunk_bytes = min(hostdata->pdma_residual, 512);
bytes = mac_pdma_send(s, d, chunk_bytes);
if (macintosh_config->ident == MAC_MODEL_IIFX)
write_ctrl_reg(hostdata, CTRL_INTERRUPTS_ENABLE);
if (bytes > 0) { if (bytes > 0) {
s += bytes; s += bytes;
hostdata->pdma_residual -= bytes; hostdata->pdma_residual -= bytes;
} }
if (hostdata->pdma_residual == 0) { if (hostdata->pdma_residual == 0)
if (NCR5380_poll_politely(hostdata, TARGET_COMMAND_REG, break;
TCR_LAST_BYTE_SENT,
TCR_LAST_BYTE_SENT,
0) < 0) {
scmd_printk(KERN_ERR, hostdata->connected,
"%s: Last Byte Sent timeout\n", __func__);
result = -1;
}
goto out;
}
if (NCR5380_poll_politely2(hostdata, STATUS_REG, SR_REQ, SR_REQ, if (bytes > 0)
BUS_AND_STATUS_REG, BASR_ACK,
BASR_ACK, 0) < 0)
scmd_printk(KERN_DEBUG, hostdata->connected,
"%s: !REQ and !ACK\n", __func__);
if (!(NCR5380_read(BUS_AND_STATUS_REG) & BASR_PHASE_MATCH))
goto out;
if (bytes == 0)
udelay(MAC_PDMA_DELAY);
if (bytes >= 0)
continue; continue;
dsprintk(NDEBUG_PSEUDO_DMA, hostdata->host,
"%s: bus error (%d/%d)\n", __func__, s - src, len);
NCR5380_dprint(NDEBUG_PSEUDO_DMA, hostdata->host); NCR5380_dprint(NDEBUG_PSEUDO_DMA, hostdata->host);
result = -1; dsprintk(NDEBUG_PSEUDO_DMA, hostdata->host,
goto out; "%s: bus error [%d/%d] (%d/%d)\n",
__func__, s - src, len, bytes, chunk_bytes);
if (bytes == 0)
continue;
if (macscsi_wait_for_drq(hostdata) <= 0)
set_host_byte(hostdata->connected, DID_ERROR);
break;
} }
scmd_printk(KERN_ERR, hostdata->connected, return 0;
"%s: phase mismatch or !DRQ\n", __func__);
NCR5380_dprint(NDEBUG_PSEUDO_DMA, hostdata->host);
result = -1;
out:
if (macintosh_config->ident == MAC_MODEL_IIFX)
write_ctrl_reg(hostdata, CTRL_INTERRUPTS_ENABLE);
return result;
} }
static int macscsi_dma_xfer_len(struct NCR5380_hostdata *hostdata, static int macscsi_dma_xfer_len(struct NCR5380_hostdata *hostdata,
@ -432,7 +432,7 @@ static struct scsi_host_template mac_scsi_template = {
.eh_host_reset_handler = macscsi_host_reset, .eh_host_reset_handler = macscsi_host_reset,
.can_queue = 16, .can_queue = 16,
.this_id = 7, .this_id = 7,
.sg_tablesize = 1, .sg_tablesize = SG_ALL,
.cmd_per_lun = 2, .cmd_per_lun = 2,
.dma_boundary = PAGE_SIZE - 1, .dma_boundary = PAGE_SIZE - 1,
.cmd_size = sizeof(struct NCR5380_cmd), .cmd_size = sizeof(struct NCR5380_cmd),
@ -470,6 +470,9 @@ static int __init mac_scsi_probe(struct platform_device *pdev)
if (setup_hostid >= 0) if (setup_hostid >= 0)
mac_scsi_template.this_id = setup_hostid & 7; mac_scsi_template.this_id = setup_hostid & 7;
if (macintosh_config->ident == MAC_MODEL_IIFX)
mac_scsi_template.sg_tablesize = 1;
instance = scsi_host_alloc(&mac_scsi_template, instance = scsi_host_alloc(&mac_scsi_template,
sizeof(struct NCR5380_hostdata)); sizeof(struct NCR5380_hostdata));
if (!instance) if (!instance)
@ -491,6 +494,9 @@ static int __init mac_scsi_probe(struct platform_device *pdev)
host_flags |= setup_toshiba_delay > 0 ? FLAG_TOSHIBA_DELAY : 0; host_flags |= setup_toshiba_delay > 0 ? FLAG_TOSHIBA_DELAY : 0;
if (instance->sg_tablesize > 1)
host_flags |= FLAG_DMA_FIXUP;
error = NCR5380_init(instance, host_flags | FLAG_LATE_DMA_SETUP); error = NCR5380_init(instance, host_flags | FLAG_LATE_DMA_SETUP);
if (error) if (error)
goto fail_init; goto fail_init;

View File

@ -814,12 +814,12 @@ struct MR_HOST_DEVICE_LIST {
__le32 size; __le32 size;
__le32 count; __le32 count;
__le32 reserved[2]; __le32 reserved[2];
struct MR_HOST_DEVICE_LIST_ENTRY host_device_list[1]; struct MR_HOST_DEVICE_LIST_ENTRY host_device_list[] __counted_by_le(count);
} __packed; } __packed;
#define HOST_DEVICE_LIST_SZ (sizeof(struct MR_HOST_DEVICE_LIST) + \ #define HOST_DEVICE_LIST_SZ (sizeof(struct MR_HOST_DEVICE_LIST) + \
(sizeof(struct MR_HOST_DEVICE_LIST_ENTRY) * \ (sizeof(struct MR_HOST_DEVICE_LIST_ENTRY) * \
(MEGASAS_MAX_PD + MAX_LOGICAL_DRIVES_EXT - 1))) (MEGASAS_MAX_PD + MAX_LOGICAL_DRIVES_EXT)))
/* /*
@ -2473,7 +2473,7 @@ struct MR_LD_VF_MAP {
union MR_LD_REF ref; union MR_LD_REF ref;
u8 ldVfCount; u8 ldVfCount;
u8 reserved[6]; u8 reserved[6];
u8 policy[1]; u8 policy[];
}; };
struct MR_LD_VF_AFFILIATION { struct MR_LD_VF_AFFILIATION {

View File

@ -1988,8 +1988,8 @@ megasas_fusion_start_watchdog(struct megasas_instance *instance)
sizeof(instance->fault_handler_work_q_name), sizeof(instance->fault_handler_work_q_name),
"poll_megasas%d_status", instance->host->host_no); "poll_megasas%d_status", instance->host->host_no);
instance->fw_fault_work_q = instance->fw_fault_work_q = alloc_ordered_workqueue(
create_singlethread_workqueue(instance->fault_handler_work_q_name); "%s", WQ_MEM_RECLAIM, instance->fault_handler_work_q_name);
if (!instance->fw_fault_work_q) { if (!instance->fw_fault_work_q) {
dev_err(&instance->pdev->dev, "Failed from %s %d\n", dev_err(&instance->pdev->dev, "Failed from %s %d\n",
__func__, __LINE__); __func__, __LINE__);

View File

@ -1565,16 +1565,13 @@ struct mpi3_sas_io_unit0_phy_data {
__le32 reserved10; __le32 reserved10;
}; };
#ifndef MPI3_SAS_IO_UNIT0_PHY_MAX
#define MPI3_SAS_IO_UNIT0_PHY_MAX (1)
#endif
struct mpi3_sas_io_unit_page0 { struct mpi3_sas_io_unit_page0 {
struct mpi3_config_page_header header; struct mpi3_config_page_header header;
__le32 reserved08; __le32 reserved08;
u8 num_phys; u8 num_phys;
u8 init_status; u8 init_status;
__le16 reserved0e; __le16 reserved0e;
struct mpi3_sas_io_unit0_phy_data phy_data[MPI3_SAS_IO_UNIT0_PHY_MAX]; struct mpi3_sas_io_unit0_phy_data phy_data[];
}; };
#define MPI3_SASIOUNIT0_PAGEVERSION (0x00) #define MPI3_SASIOUNIT0_PAGEVERSION (0x00)
@ -1606,9 +1603,6 @@ struct mpi3_sas_io_unit1_phy_data {
__le32 reserved08; __le32 reserved08;
}; };
#ifndef MPI3_SAS_IO_UNIT1_PHY_MAX
#define MPI3_SAS_IO_UNIT1_PHY_MAX (1)
#endif
struct mpi3_sas_io_unit_page1 { struct mpi3_sas_io_unit_page1 {
struct mpi3_config_page_header header; struct mpi3_config_page_header header;
__le16 control_flags; __le16 control_flags;
@ -1618,7 +1612,7 @@ struct mpi3_sas_io_unit_page1 {
u8 num_phys; u8 num_phys;
u8 sata_max_q_depth; u8 sata_max_q_depth;
__le16 reserved12; __le16 reserved12;
struct mpi3_sas_io_unit1_phy_data phy_data[MPI3_SAS_IO_UNIT1_PHY_MAX]; struct mpi3_sas_io_unit1_phy_data phy_data[];
}; };
#define MPI3_SASIOUNIT1_PAGEVERSION (0x00) #define MPI3_SASIOUNIT1_PAGEVERSION (0x00)

View File

@ -453,9 +453,6 @@ struct mpi3_event_data_sas_notify_primitive {
#define MPI3_EVENT_NOTIFY_PRIMITIVE_POWER_LOSS_EXPECTED (0x02) #define MPI3_EVENT_NOTIFY_PRIMITIVE_POWER_LOSS_EXPECTED (0x02)
#define MPI3_EVENT_NOTIFY_PRIMITIVE_RESERVED1 (0x03) #define MPI3_EVENT_NOTIFY_PRIMITIVE_RESERVED1 (0x03)
#define MPI3_EVENT_NOTIFY_PRIMITIVE_RESERVED2 (0x04) #define MPI3_EVENT_NOTIFY_PRIMITIVE_RESERVED2 (0x04)
#ifndef MPI3_EVENT_SAS_TOPO_PHY_COUNT
#define MPI3_EVENT_SAS_TOPO_PHY_COUNT (1)
#endif
struct mpi3_event_sas_topo_phy_entry { struct mpi3_event_sas_topo_phy_entry {
__le16 attached_dev_handle; __le16 attached_dev_handle;
u8 link_rate; u8 link_rate;
@ -496,7 +493,7 @@ struct mpi3_event_data_sas_topology_change_list {
u8 start_phy_num; u8 start_phy_num;
u8 exp_status; u8 exp_status;
u8 io_unit_port; u8 io_unit_port;
struct mpi3_event_sas_topo_phy_entry phy_entry[MPI3_EVENT_SAS_TOPO_PHY_COUNT]; struct mpi3_event_sas_topo_phy_entry phy_entry[] __counted_by(num_entries);
}; };
#define MPI3_EVENT_SAS_TOPO_ES_NO_EXPANDER (0x00) #define MPI3_EVENT_SAS_TOPO_ES_NO_EXPANDER (0x00)
@ -545,9 +542,6 @@ struct mpi3_event_data_pcie_enumeration {
#define MPI3_EVENT_PCIE_ENUM_ES_MAX_SWITCHES_EXCEED (0x40000000) #define MPI3_EVENT_PCIE_ENUM_ES_MAX_SWITCHES_EXCEED (0x40000000)
#define MPI3_EVENT_PCIE_ENUM_ES_MAX_DEVICES_EXCEED (0x20000000) #define MPI3_EVENT_PCIE_ENUM_ES_MAX_DEVICES_EXCEED (0x20000000)
#define MPI3_EVENT_PCIE_ENUM_ES_RESOURCES_EXHAUSTED (0x10000000) #define MPI3_EVENT_PCIE_ENUM_ES_RESOURCES_EXHAUSTED (0x10000000)
#ifndef MPI3_EVENT_PCIE_TOPO_PORT_COUNT
#define MPI3_EVENT_PCIE_TOPO_PORT_COUNT (1)
#endif
struct mpi3_event_pcie_topo_port_entry { struct mpi3_event_pcie_topo_port_entry {
__le16 attached_dev_handle; __le16 attached_dev_handle;
u8 port_status; u8 port_status;
@ -588,7 +582,7 @@ struct mpi3_event_data_pcie_topology_change_list {
u8 switch_status; u8 switch_status;
u8 io_unit_port; u8 io_unit_port;
__le32 reserved0c; __le32 reserved0c;
struct mpi3_event_pcie_topo_port_entry port_entry[MPI3_EVENT_PCIE_TOPO_PORT_COUNT]; struct mpi3_event_pcie_topo_port_entry port_entry[] __counted_by(num_entries);
}; };
#define MPI3_EVENT_PCIE_TOPO_SS_NO_PCIE_SWITCH (0x00) #define MPI3_EVENT_PCIE_TOPO_SS_NO_PCIE_SWITCH (0x00)

View File

@ -57,8 +57,8 @@ extern struct list_head mrioc_list;
extern int prot_mask; extern int prot_mask;
extern atomic64_t event_counter; extern atomic64_t event_counter;
#define MPI3MR_DRIVER_VERSION "8.9.1.0.51" #define MPI3MR_DRIVER_VERSION "8.10.0.5.50"
#define MPI3MR_DRIVER_RELDATE "29-May-2024" #define MPI3MR_DRIVER_RELDATE "08-Aug-2024"
#define MPI3MR_DRIVER_NAME "mpi3mr" #define MPI3MR_DRIVER_NAME "mpi3mr"
#define MPI3MR_DRIVER_LICENSE "GPL" #define MPI3MR_DRIVER_LICENSE "GPL"
@ -213,6 +213,7 @@ extern atomic64_t event_counter;
#define MPI3MR_HDB_QUERY_ELEMENT_TRIGGER_FORMAT_INDEX 0 #define MPI3MR_HDB_QUERY_ELEMENT_TRIGGER_FORMAT_INDEX 0
#define MPI3MR_HDB_QUERY_ELEMENT_TRIGGER_FORMAT_DATA 1 #define MPI3MR_HDB_QUERY_ELEMENT_TRIGGER_FORMAT_DATA 1
#define MPI3MR_THRESHOLD_REPLY_COUNT 100
/* SGE Flag definition */ /* SGE Flag definition */
#define MPI3MR_SGEFLAGS_SYSTEM_SIMPLE_END_OF_LIST \ #define MPI3MR_SGEFLAGS_SYSTEM_SIMPLE_END_OF_LIST \
@ -1059,7 +1060,6 @@ struct scmd_priv {
* @sbq_lock: Sense buffer queue lock * @sbq_lock: Sense buffer queue lock
* @sbq_host_index: Sense buffer queuehost index * @sbq_host_index: Sense buffer queuehost index
* @event_masks: Event mask bitmap * @event_masks: Event mask bitmap
* @fwevt_worker_name: Firmware event worker thread name
* @fwevt_worker_thread: Firmware event worker thread * @fwevt_worker_thread: Firmware event worker thread
* @fwevt_lock: Firmware event lock * @fwevt_lock: Firmware event lock
* @fwevt_list: Firmware event list * @fwevt_list: Firmware event list
@ -1240,7 +1240,6 @@ struct mpi3mr_ioc {
u32 sbq_host_index; u32 sbq_host_index;
u32 event_masks[MPI3_EVENT_NOTIFY_EVENTMASK_WORDS]; u32 event_masks[MPI3_EVENT_NOTIFY_EVENTMASK_WORDS];
char fwevt_worker_name[MPI3MR_NAME_LENGTH];
struct workqueue_struct *fwevt_worker_thread; struct workqueue_struct *fwevt_worker_thread;
spinlock_t fwevt_lock; spinlock_t fwevt_lock;
struct list_head fwevt_list; struct list_head fwevt_list;

View File

@ -345,6 +345,7 @@ static void mpi3mr_process_admin_reply_desc(struct mpi3mr_ioc *mrioc,
{ {
u16 reply_desc_type, host_tag = 0; u16 reply_desc_type, host_tag = 0;
u16 ioc_status = MPI3_IOCSTATUS_SUCCESS; u16 ioc_status = MPI3_IOCSTATUS_SUCCESS;
u16 masked_ioc_status = MPI3_IOCSTATUS_SUCCESS;
u32 ioc_loginfo = 0, sense_count = 0; u32 ioc_loginfo = 0, sense_count = 0;
struct mpi3_status_reply_descriptor *status_desc; struct mpi3_status_reply_descriptor *status_desc;
struct mpi3_address_reply_descriptor *addr_desc; struct mpi3_address_reply_descriptor *addr_desc;
@ -366,8 +367,8 @@ static void mpi3mr_process_admin_reply_desc(struct mpi3mr_ioc *mrioc,
if (ioc_status & if (ioc_status &
MPI3_REPLY_DESCRIPT_STATUS_IOCSTATUS_LOGINFOAVAIL) MPI3_REPLY_DESCRIPT_STATUS_IOCSTATUS_LOGINFOAVAIL)
ioc_loginfo = le32_to_cpu(status_desc->ioc_log_info); ioc_loginfo = le32_to_cpu(status_desc->ioc_log_info);
ioc_status &= MPI3_REPLY_DESCRIPT_STATUS_IOCSTATUS_STATUS_MASK; masked_ioc_status = ioc_status & MPI3_IOCSTATUS_STATUS_MASK;
mpi3mr_reply_trigger(mrioc, ioc_status, ioc_loginfo); mpi3mr_reply_trigger(mrioc, masked_ioc_status, ioc_loginfo);
break; break;
case MPI3_REPLY_DESCRIPT_FLAGS_TYPE_ADDRESS_REPLY: case MPI3_REPLY_DESCRIPT_FLAGS_TYPE_ADDRESS_REPLY:
addr_desc = (struct mpi3_address_reply_descriptor *)reply_desc; addr_desc = (struct mpi3_address_reply_descriptor *)reply_desc;
@ -380,7 +381,7 @@ static void mpi3mr_process_admin_reply_desc(struct mpi3mr_ioc *mrioc,
if (ioc_status & if (ioc_status &
MPI3_REPLY_DESCRIPT_STATUS_IOCSTATUS_LOGINFOAVAIL) MPI3_REPLY_DESCRIPT_STATUS_IOCSTATUS_LOGINFOAVAIL)
ioc_loginfo = le32_to_cpu(def_reply->ioc_log_info); ioc_loginfo = le32_to_cpu(def_reply->ioc_log_info);
ioc_status &= MPI3_REPLY_DESCRIPT_STATUS_IOCSTATUS_STATUS_MASK; masked_ioc_status = ioc_status & MPI3_IOCSTATUS_STATUS_MASK;
if (def_reply->function == MPI3_FUNCTION_SCSI_IO) { if (def_reply->function == MPI3_FUNCTION_SCSI_IO) {
scsi_reply = (struct mpi3_scsi_io_reply *)def_reply; scsi_reply = (struct mpi3_scsi_io_reply *)def_reply;
sense_buf = mpi3mr_get_sensebuf_virt_addr(mrioc, sense_buf = mpi3mr_get_sensebuf_virt_addr(mrioc,
@ -393,7 +394,7 @@ static void mpi3mr_process_admin_reply_desc(struct mpi3mr_ioc *mrioc,
sshdr.asc, sshdr.ascq); sshdr.asc, sshdr.ascq);
} }
} }
mpi3mr_reply_trigger(mrioc, ioc_status, ioc_loginfo); mpi3mr_reply_trigger(mrioc, masked_ioc_status, ioc_loginfo);
break; break;
case MPI3_REPLY_DESCRIPT_FLAGS_TYPE_SUCCESS: case MPI3_REPLY_DESCRIPT_FLAGS_TYPE_SUCCESS:
success_desc = (struct mpi3_success_reply_descriptor *)reply_desc; success_desc = (struct mpi3_success_reply_descriptor *)reply_desc;
@ -408,7 +409,10 @@ static void mpi3mr_process_admin_reply_desc(struct mpi3mr_ioc *mrioc,
if (cmdptr->state & MPI3MR_CMD_PENDING) { if (cmdptr->state & MPI3MR_CMD_PENDING) {
cmdptr->state |= MPI3MR_CMD_COMPLETE; cmdptr->state |= MPI3MR_CMD_COMPLETE;
cmdptr->ioc_loginfo = ioc_loginfo; cmdptr->ioc_loginfo = ioc_loginfo;
cmdptr->ioc_status = ioc_status; if (host_tag == MPI3MR_HOSTTAG_BSG_CMDS)
cmdptr->ioc_status = ioc_status;
else
cmdptr->ioc_status = masked_ioc_status;
cmdptr->state &= ~MPI3MR_CMD_PENDING; cmdptr->state &= ~MPI3MR_CMD_PENDING;
if (def_reply) { if (def_reply) {
cmdptr->state |= MPI3MR_CMD_REPLY_VALID; cmdptr->state |= MPI3MR_CMD_REPLY_VALID;
@ -439,6 +443,7 @@ int mpi3mr_process_admin_reply_q(struct mpi3mr_ioc *mrioc)
u32 admin_reply_ci = mrioc->admin_reply_ci; u32 admin_reply_ci = mrioc->admin_reply_ci;
u32 num_admin_replies = 0; u32 num_admin_replies = 0;
u64 reply_dma = 0; u64 reply_dma = 0;
u16 threshold_comps = 0;
struct mpi3_default_reply_descriptor *reply_desc; struct mpi3_default_reply_descriptor *reply_desc;
if (!atomic_add_unless(&mrioc->admin_reply_q_in_use, 1, 1)) if (!atomic_add_unless(&mrioc->admin_reply_q_in_use, 1, 1))
@ -462,6 +467,7 @@ int mpi3mr_process_admin_reply_q(struct mpi3mr_ioc *mrioc)
if (reply_dma) if (reply_dma)
mpi3mr_repost_reply_buf(mrioc, reply_dma); mpi3mr_repost_reply_buf(mrioc, reply_dma);
num_admin_replies++; num_admin_replies++;
threshold_comps++;
if (++admin_reply_ci == mrioc->num_admin_replies) { if (++admin_reply_ci == mrioc->num_admin_replies) {
admin_reply_ci = 0; admin_reply_ci = 0;
exp_phase ^= 1; exp_phase ^= 1;
@ -472,6 +478,11 @@ int mpi3mr_process_admin_reply_q(struct mpi3mr_ioc *mrioc)
if ((le16_to_cpu(reply_desc->reply_flags) & if ((le16_to_cpu(reply_desc->reply_flags) &
MPI3_REPLY_DESCRIPT_FLAGS_PHASE_MASK) != exp_phase) MPI3_REPLY_DESCRIPT_FLAGS_PHASE_MASK) != exp_phase)
break; break;
if (threshold_comps == MPI3MR_THRESHOLD_REPLY_COUNT) {
writel(admin_reply_ci,
&mrioc->sysif_regs->admin_reply_queue_ci);
threshold_comps = 0;
}
} while (1); } while (1);
writel(admin_reply_ci, &mrioc->sysif_regs->admin_reply_queue_ci); writel(admin_reply_ci, &mrioc->sysif_regs->admin_reply_queue_ci);
@ -525,7 +536,7 @@ int mpi3mr_process_op_reply_q(struct mpi3mr_ioc *mrioc,
u32 num_op_reply = 0; u32 num_op_reply = 0;
u64 reply_dma = 0; u64 reply_dma = 0;
struct mpi3_default_reply_descriptor *reply_desc; struct mpi3_default_reply_descriptor *reply_desc;
u16 req_q_idx = 0, reply_qidx; u16 req_q_idx = 0, reply_qidx, threshold_comps = 0;
reply_qidx = op_reply_q->qid - 1; reply_qidx = op_reply_q->qid - 1;
@ -556,6 +567,7 @@ int mpi3mr_process_op_reply_q(struct mpi3mr_ioc *mrioc,
if (reply_dma) if (reply_dma)
mpi3mr_repost_reply_buf(mrioc, reply_dma); mpi3mr_repost_reply_buf(mrioc, reply_dma);
num_op_reply++; num_op_reply++;
threshold_comps++;
if (++reply_ci == op_reply_q->num_replies) { if (++reply_ci == op_reply_q->num_replies) {
reply_ci = 0; reply_ci = 0;
@ -577,13 +589,19 @@ int mpi3mr_process_op_reply_q(struct mpi3mr_ioc *mrioc,
break; break;
} }
#endif #endif
if (threshold_comps == MPI3MR_THRESHOLD_REPLY_COUNT) {
writel(reply_ci,
&mrioc->sysif_regs->oper_queue_indexes[reply_qidx].consumer_index);
atomic_sub(threshold_comps, &op_reply_q->pend_ios);
threshold_comps = 0;
}
} while (1); } while (1);
writel(reply_ci, writel(reply_ci,
&mrioc->sysif_regs->oper_queue_indexes[reply_qidx].consumer_index); &mrioc->sysif_regs->oper_queue_indexes[reply_qidx].consumer_index);
op_reply_q->ci = reply_ci; op_reply_q->ci = reply_ci;
op_reply_q->ephase = exp_phase; op_reply_q->ephase = exp_phase;
atomic_sub(threshold_comps, &op_reply_q->pend_ios);
atomic_dec(&op_reply_q->in_use); atomic_dec(&op_reply_q->in_use);
return num_op_reply; return num_op_reply;
} }
@ -2742,8 +2760,8 @@ void mpi3mr_start_watchdog(struct mpi3mr_ioc *mrioc)
snprintf(mrioc->watchdog_work_q_name, snprintf(mrioc->watchdog_work_q_name,
sizeof(mrioc->watchdog_work_q_name), "watchdog_%s%d", mrioc->name, sizeof(mrioc->watchdog_work_q_name), "watchdog_%s%d", mrioc->name,
mrioc->id); mrioc->id);
mrioc->watchdog_work_q = mrioc->watchdog_work_q = alloc_ordered_workqueue(
create_singlethread_workqueue(mrioc->watchdog_work_q_name); "%s", WQ_MEM_RECLAIM, mrioc->watchdog_work_q_name);
if (!mrioc->watchdog_work_q) { if (!mrioc->watchdog_work_q) {
ioc_err(mrioc, "%s: failed (line=%d)\n", __func__, __LINE__); ioc_err(mrioc, "%s: failed (line=%d)\n", __func__, __LINE__);
return; return;

View File

@ -5317,10 +5317,8 @@ mpi3mr_probe(struct pci_dev *pdev, const struct pci_device_id *id)
else else
scsi_host_set_guard(shost, SHOST_DIX_GUARD_CRC); scsi_host_set_guard(shost, SHOST_DIX_GUARD_CRC);
snprintf(mrioc->fwevt_worker_name, sizeof(mrioc->fwevt_worker_name),
"%s%d_fwevt_wrkr", mrioc->driver_name, mrioc->id);
mrioc->fwevt_worker_thread = alloc_ordered_workqueue( mrioc->fwevt_worker_thread = alloc_ordered_workqueue(
mrioc->fwevt_worker_name, 0); "%s%d_fwevt_wrkr", 0, mrioc->driver_name, mrioc->id);
if (!mrioc->fwevt_worker_thread) { if (!mrioc->fwevt_worker_thread) {
ioc_err(mrioc, "failure at %s:%d/%s()!\n", ioc_err(mrioc, "failure at %s:%d/%s()!\n",
__FILE__, __LINE__, __func__); __FILE__, __LINE__, __func__);

View File

@ -846,8 +846,8 @@ mpt3sas_base_start_watchdog(struct MPT3SAS_ADAPTER *ioc)
snprintf(ioc->fault_reset_work_q_name, snprintf(ioc->fault_reset_work_q_name,
sizeof(ioc->fault_reset_work_q_name), "poll_%s%d_status", sizeof(ioc->fault_reset_work_q_name), "poll_%s%d_status",
ioc->driver_name, ioc->id); ioc->driver_name, ioc->id);
ioc->fault_reset_work_q = ioc->fault_reset_work_q = alloc_ordered_workqueue(
create_singlethread_workqueue(ioc->fault_reset_work_q_name); "%s", WQ_MEM_RECLAIM, ioc->fault_reset_work_q_name);
if (!ioc->fault_reset_work_q) { if (!ioc->fault_reset_work_q) {
ioc_err(ioc, "%s: failed (line=%d)\n", __func__, __LINE__); ioc_err(ioc, "%s: failed (line=%d)\n", __func__, __LINE__);
return; return;

View File

@ -1162,8 +1162,7 @@ typedef void (*MPT3SAS_FLUSH_RUNNING_CMDS)(struct MPT3SAS_ADAPTER *ioc);
* @fault_reset_work_q_name: fw fault work queue * @fault_reset_work_q_name: fw fault work queue
* @fault_reset_work_q: "" * @fault_reset_work_q: ""
* @fault_reset_work: "" * @fault_reset_work: ""
* @firmware_event_name: fw event work queue * @firmware_event_thread: fw event work queue
* @firmware_event_thread: ""
* @fw_event_lock: * @fw_event_lock:
* @fw_event_list: list of fw events * @fw_event_list: list of fw events
* @current_evet: current processing firmware event * @current_evet: current processing firmware event
@ -1351,7 +1350,6 @@ struct MPT3SAS_ADAPTER {
struct delayed_work fault_reset_work; struct delayed_work fault_reset_work;
/* fw event handler */ /* fw event handler */
char firmware_event_name[20];
struct workqueue_struct *firmware_event_thread; struct workqueue_struct *firmware_event_thread;
spinlock_t fw_event_lock; spinlock_t fw_event_lock;
struct list_head fw_event_list; struct list_head fw_event_list;

View File

@ -12301,10 +12301,8 @@ _scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id)
scsi_host_set_guard(shost, SHOST_DIX_GUARD_CRC); scsi_host_set_guard(shost, SHOST_DIX_GUARD_CRC);
/* event thread */ /* event thread */
snprintf(ioc->firmware_event_name, sizeof(ioc->firmware_event_name),
"fw_event_%s%d", ioc->driver_name, ioc->id);
ioc->firmware_event_thread = alloc_ordered_workqueue( ioc->firmware_event_thread = alloc_ordered_workqueue(
ioc->firmware_event_name, 0); "fw_event_%s%d", 0, ioc->driver_name, ioc->id);
if (!ioc->firmware_event_thread) { if (!ioc->firmware_event_thread) {
ioc_err(ioc, "failure at %s:%d/%s()!\n", ioc_err(ioc, "failure at %s:%d/%s()!\n",
__FILE__, __LINE__, __func__); __FILE__, __LINE__, __func__);

View File

@ -112,9 +112,8 @@ static bool myrb_create_mempools(struct pci_dev *pdev, struct myrb_hba *cb)
return false; return false;
} }
snprintf(cb->work_q_name, sizeof(cb->work_q_name), cb->work_q = alloc_ordered_workqueue("myrb_wq_%d", WQ_MEM_RECLAIM,
"myrb_wq_%d", cb->host->host_no); cb->host->host_no);
cb->work_q = create_singlethread_workqueue(cb->work_q_name);
if (!cb->work_q) { if (!cb->work_q) {
dma_pool_destroy(cb->dcdb_pool); dma_pool_destroy(cb->dcdb_pool);
cb->dcdb_pool = NULL; cb->dcdb_pool = NULL;

View File

@ -712,7 +712,6 @@ struct myrb_hba {
struct Scsi_Host *host; struct Scsi_Host *host;
struct workqueue_struct *work_q; struct workqueue_struct *work_q;
char work_q_name[20];
struct delayed_work monitor_work; struct delayed_work monitor_work;
unsigned long primary_monitor_time; unsigned long primary_monitor_time;
unsigned long secondary_monitor_time; unsigned long secondary_monitor_time;

View File

@ -2206,9 +2206,8 @@ static bool myrs_create_mempools(struct pci_dev *pdev, struct myrs_hba *cs)
return false; return false;
} }
snprintf(cs->work_q_name, sizeof(cs->work_q_name), cs->work_q = alloc_ordered_workqueue("myrs_wq_%d", WQ_MEM_RECLAIM,
"myrs_wq_%d", shost->host_no); shost->host_no);
cs->work_q = create_singlethread_workqueue(cs->work_q_name);
if (!cs->work_q) { if (!cs->work_q) {
dma_pool_destroy(cs->dcdb_pool); dma_pool_destroy(cs->dcdb_pool);
cs->dcdb_pool = NULL; cs->dcdb_pool = NULL;

View File

@ -904,7 +904,6 @@ struct myrs_hba {
bool disable_enc_msg; bool disable_enc_msg;
struct workqueue_struct *work_q; struct workqueue_struct *work_q;
char work_q_name[20];
struct delayed_work monitor_work; struct delayed_work monitor_work;
unsigned long primary_monitor_time; unsigned long primary_monitor_time;
unsigned long secondary_monitor_time; unsigned long secondary_monitor_time;

View File

@ -3372,9 +3372,8 @@ retry_probe:
QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_INFO, "qedf->io_mempool=%p.\n", QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_INFO, "qedf->io_mempool=%p.\n",
qedf->io_mempool); qedf->io_mempool);
sprintf(host_buf, "qedf_%u_link", qedf->link_update_wq = alloc_workqueue("qedf_%u_link", WQ_MEM_RECLAIM,
qedf->lport->host->host_no); 1, qedf->lport->host->host_no);
qedf->link_update_wq = create_workqueue(host_buf);
INIT_DELAYED_WORK(&qedf->link_update, qedf_handle_link_update); INIT_DELAYED_WORK(&qedf->link_update, qedf_handle_link_update);
INIT_DELAYED_WORK(&qedf->link_recovery, qedf_link_recovery); INIT_DELAYED_WORK(&qedf->link_recovery, qedf_link_recovery);
INIT_DELAYED_WORK(&qedf->grcdump_work, qedf_wq_grcdump); INIT_DELAYED_WORK(&qedf->grcdump_work, qedf_wq_grcdump);
@ -3584,9 +3583,8 @@ retry_probe:
ether_addr_copy(params.ll2_mac_address, qedf->mac); ether_addr_copy(params.ll2_mac_address, qedf->mac);
/* Start LL2 processing thread */ /* Start LL2 processing thread */
snprintf(host_buf, 20, "qedf_%d_ll2", host->host_no); qedf->ll2_recv_wq = alloc_workqueue("qedf_%d_ll2", WQ_MEM_RECLAIM, 1,
qedf->ll2_recv_wq = host->host_no);
create_workqueue(host_buf);
if (!qedf->ll2_recv_wq) { if (!qedf->ll2_recv_wq) {
QEDF_ERR(&(qedf->dbg_ctx), "Failed to LL2 workqueue.\n"); QEDF_ERR(&(qedf->dbg_ctx), "Failed to LL2 workqueue.\n");
rc = -ENOMEM; rc = -ENOMEM;
@ -3627,9 +3625,8 @@ retry_probe:
} }
} }
sprintf(host_buf, "qedf_%u_timer", qedf->lport->host->host_no); qedf->timer_work_queue = alloc_workqueue("qedf_%u_timer",
qedf->timer_work_queue = WQ_MEM_RECLAIM, 1, qedf->lport->host->host_no);
create_workqueue(host_buf);
if (!qedf->timer_work_queue) { if (!qedf->timer_work_queue) {
QEDF_ERR(&(qedf->dbg_ctx), "Failed to start timer " QEDF_ERR(&(qedf->dbg_ctx), "Failed to start timer "
"workqueue.\n"); "workqueue.\n");
@ -3641,7 +3638,8 @@ retry_probe:
if (mode != QEDF_MODE_RECOVERY) { if (mode != QEDF_MODE_RECOVERY) {
sprintf(host_buf, "qedf_%u_dpc", sprintf(host_buf, "qedf_%u_dpc",
qedf->lport->host->host_no); qedf->lport->host->host_no);
qedf->dpc_wq = create_workqueue(host_buf); qedf->dpc_wq =
alloc_workqueue("%s", WQ_MEM_RECLAIM, 1, host_buf);
} }
INIT_DELAYED_WORK(&qedf->recovery_work, qedf_recovery_handler); INIT_DELAYED_WORK(&qedf->recovery_work, qedf_recovery_handler);
@ -4182,7 +4180,7 @@ static int __init qedf_init(void)
goto err3; goto err3;
} }
qedf_io_wq = create_workqueue("qedf_io_wq"); qedf_io_wq = alloc_workqueue("%s", WQ_MEM_RECLAIM, 1, "qedf_io_wq");
if (!qedf_io_wq) { if (!qedf_io_wq) {
QEDF_ERR(NULL, "Could not create qedf_io_wq.\n"); QEDF_ERR(NULL, "Could not create qedf_io_wq.\n");
goto err4; goto err4;

View File

@ -2767,7 +2767,8 @@ retry_probe:
} }
sprintf(host_buf, "host_%d", qedi->shost->host_no); sprintf(host_buf, "host_%d", qedi->shost->host_no);
qedi->tmf_thread = create_singlethread_workqueue(host_buf); qedi->tmf_thread =
alloc_ordered_workqueue("%s", WQ_MEM_RECLAIM, host_buf);
if (!qedi->tmf_thread) { if (!qedi->tmf_thread) {
QEDI_ERR(&qedi->dbg_ctx, QEDI_ERR(&qedi->dbg_ctx,
"Unable to start tmf thread!\n"); "Unable to start tmf thread!\n");
@ -2775,8 +2776,9 @@ retry_probe:
goto free_cid_que; goto free_cid_que;
} }
sprintf(host_buf, "qedi_ofld%d", qedi->shost->host_no); qedi->offload_thread = alloc_workqueue("qedi_ofld%d",
qedi->offload_thread = create_workqueue(host_buf); WQ_MEM_RECLAIM,
1, qedi->shost->host_no);
if (!qedi->offload_thread) { if (!qedi->offload_thread) {
QEDI_ERR(&qedi->dbg_ctx, QEDI_ERR(&qedi->dbg_ctx,
"Unable to start offload thread!\n"); "Unable to start offload thread!\n");

View File

@ -2621,7 +2621,6 @@ typedef struct fc_port {
struct kref sess_kref; struct kref sess_kref;
struct qla_tgt *tgt; struct qla_tgt *tgt;
unsigned long expires; unsigned long expires;
struct list_head del_list_entry;
struct work_struct free_work; struct work_struct free_work;
struct work_struct reg_work; struct work_struct reg_work;
uint64_t jiffies_at_registration; uint64_t jiffies_at_registration;

View File

@ -3501,11 +3501,13 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
if (IS_QLA8031(ha) || IS_MCTP_CAPABLE(ha)) { if (IS_QLA8031(ha) || IS_MCTP_CAPABLE(ha)) {
sprintf(wq_name, "qla2xxx_%lu_dpc_lp_wq", base_vha->host_no); sprintf(wq_name, "qla2xxx_%lu_dpc_lp_wq", base_vha->host_no);
ha->dpc_lp_wq = create_singlethread_workqueue(wq_name); ha->dpc_lp_wq =
alloc_ordered_workqueue("%s", WQ_MEM_RECLAIM, wq_name);
INIT_WORK(&ha->idc_aen, qla83xx_service_idc_aen); INIT_WORK(&ha->idc_aen, qla83xx_service_idc_aen);
sprintf(wq_name, "qla2xxx_%lu_dpc_hp_wq", base_vha->host_no); sprintf(wq_name, "qla2xxx_%lu_dpc_hp_wq", base_vha->host_no);
ha->dpc_hp_wq = create_singlethread_workqueue(wq_name); ha->dpc_hp_wq =
alloc_ordered_workqueue("%s", WQ_MEM_RECLAIM, wq_name);
INIT_WORK(&ha->nic_core_reset, qla83xx_nic_core_reset_work); INIT_WORK(&ha->nic_core_reset, qla83xx_nic_core_reset_work);
INIT_WORK(&ha->idc_state_handler, INIT_WORK(&ha->idc_state_handler,
qla83xx_idc_state_handler_work); qla83xx_idc_state_handler_work);

View File

@ -8806,7 +8806,7 @@ skip_retry_init:
DEBUG2(printk("scsi: %s: Starting kernel thread for " DEBUG2(printk("scsi: %s: Starting kernel thread for "
"qla4xxx_dpc\n", __func__)); "qla4xxx_dpc\n", __func__));
sprintf(buf, "qla4xxx_%lu_dpc", ha->host_no); sprintf(buf, "qla4xxx_%lu_dpc", ha->host_no);
ha->dpc_thread = create_singlethread_workqueue(buf); ha->dpc_thread = alloc_ordered_workqueue("%s", WQ_MEM_RECLAIM, buf);
if (!ha->dpc_thread) { if (!ha->dpc_thread) {
ql4_printk(KERN_WARNING, ha, "Unable to start DPC thread!\n"); ql4_printk(KERN_WARNING, ha, "Unable to start DPC thread!\n");
ret = -ENODEV; ret = -ENODEV;

View File

@ -441,18 +441,13 @@ static int fc_host_setup(struct transport_container *tc, struct device *dev,
fc_host->next_vport_number = 0; fc_host->next_vport_number = 0;
fc_host->npiv_vports_inuse = 0; fc_host->npiv_vports_inuse = 0;
snprintf(fc_host->work_q_name, sizeof(fc_host->work_q_name), fc_host->work_q = alloc_workqueue("fc_wq_%d", 0, 0, shost->host_no);
"fc_wq_%d", shost->host_no);
fc_host->work_q = alloc_workqueue("%s", 0, 0, fc_host->work_q_name);
if (!fc_host->work_q) if (!fc_host->work_q)
return -ENOMEM; return -ENOMEM;
fc_host->dev_loss_tmo = fc_dev_loss_tmo; fc_host->dev_loss_tmo = fc_dev_loss_tmo;
snprintf(fc_host->devloss_work_q_name, fc_host->devloss_work_q = alloc_workqueue("fc_dl_%d", 0, 0,
sizeof(fc_host->devloss_work_q_name), shost->host_no);
"fc_dl_%d", shost->host_no);
fc_host->devloss_work_q = alloc_workqueue("%s", 0, 0,
fc_host->devloss_work_q_name);
if (!fc_host->devloss_work_q) { if (!fc_host->devloss_work_q) {
destroy_workqueue(fc_host->work_q); destroy_workqueue(fc_host->work_q);
fc_host->work_q = NULL; fc_host->work_q = NULL;

View File

@ -1382,7 +1382,7 @@ static blk_status_t sd_setup_read_write_cmnd(struct scsi_cmnd *cmd)
if (protect && sdkp->protection_type == T10_PI_TYPE2_PROTECTION) { if (protect && sdkp->protection_type == T10_PI_TYPE2_PROTECTION) {
ret = sd_setup_rw32_cmnd(cmd, write, lba, nr_blocks, ret = sd_setup_rw32_cmnd(cmd, write, lba, nr_blocks,
protect | fua, dld); protect | fua, dld);
} else if (rq->cmd_flags & REQ_ATOMIC && write) { } else if (rq->cmd_flags & REQ_ATOMIC) {
ret = sd_setup_atomic_cmnd(cmd, lba, nr_blocks, ret = sd_setup_atomic_cmnd(cmd, lba, nr_blocks,
sdkp->use_atomic_write_boundary, sdkp->use_atomic_write_boundary,
protect | fua); protect | fua);

View File

@ -505,7 +505,7 @@ struct pqi_vendor_general_request {
__le64 buffer_address; __le64 buffer_address;
__le32 buffer_length; __le32 buffer_length;
u8 reserved[40]; u8 reserved[40];
} ofa_memory_allocation; } host_memory_allocation;
} data; } data;
}; };
@ -517,21 +517,30 @@ struct pqi_vendor_general_response {
u8 reserved[2]; u8 reserved[2];
}; };
#define PQI_VENDOR_GENERAL_CONFIG_TABLE_UPDATE 0 #define PQI_VENDOR_GENERAL_CONFIG_TABLE_UPDATE 0
#define PQI_VENDOR_GENERAL_HOST_MEMORY_UPDATE 1 #define PQI_VENDOR_GENERAL_OFA_MEMORY_UPDATE 1
#define PQI_VENDOR_GENERAL_CTRL_LOG_MEMORY_UPDATE 2
#define PQI_OFA_VERSION 1 #define PQI_OFA_VERSION 1
#define PQI_OFA_SIGNATURE "OFA_QRM" #define PQI_OFA_SIGNATURE "OFA_QRM"
#define PQI_OFA_MAX_SG_DESCRIPTORS 64 #define PQI_CTRL_LOG_VERSION 1
#define PQI_CTRL_LOG_SIGNATURE "FW_DATA"
#define PQI_HOST_MAX_SG_DESCRIPTORS 64
struct pqi_ofa_memory { struct pqi_host_memory {
__le64 signature; /* "OFA_QRM" */ __le64 signature; /* "OFA_QRM", "FW_DATA", etc. */
__le16 version; /* version of this struct (1 = 1st version) */ __le16 version; /* version of this struct (1 = 1st version) */
u8 reserved[62]; u8 reserved[62];
__le32 bytes_allocated; /* total allocated memory in bytes */ __le32 bytes_allocated; /* total allocated memory in bytes */
__le16 num_memory_descriptors; __le16 num_memory_descriptors;
u8 reserved1[2]; u8 reserved1[2];
struct pqi_sg_descriptor sg_descriptor[PQI_OFA_MAX_SG_DESCRIPTORS]; struct pqi_sg_descriptor sg_descriptor[PQI_HOST_MAX_SG_DESCRIPTORS];
};
struct pqi_host_memory_descriptor {
struct pqi_host_memory *host_memory;
dma_addr_t host_memory_dma_handle;
void **host_chunk_virt_address;
}; };
struct pqi_aio_error_info { struct pqi_aio_error_info {
@ -867,7 +876,8 @@ struct pqi_config_table_firmware_features {
#define PQI_FIRMWARE_FEATURE_FW_TRIAGE 17 #define PQI_FIRMWARE_FEATURE_FW_TRIAGE 17
#define PQI_FIRMWARE_FEATURE_RPL_EXTENDED_FORMAT_4_5 18 #define PQI_FIRMWARE_FEATURE_RPL_EXTENDED_FORMAT_4_5 18
#define PQI_FIRMWARE_FEATURE_MULTI_LUN_DEVICE_SUPPORT 21 #define PQI_FIRMWARE_FEATURE_MULTI_LUN_DEVICE_SUPPORT 21
#define PQI_FIRMWARE_FEATURE_MAXIMUM 21 #define PQI_FIRMWARE_FEATURE_CTRL_LOGGING 22
#define PQI_FIRMWARE_FEATURE_MAXIMUM 22
struct pqi_config_table_debug { struct pqi_config_table_debug {
struct pqi_config_table_section_header header; struct pqi_config_table_section_header header;
@ -1096,6 +1106,11 @@ struct pqi_tmf_work {
u8 scsi_opcode; u8 scsi_opcode;
}; };
struct pqi_raid_io_stats {
u64 raid_bypass_cnt;
u64 write_stream_cnt;
};
struct pqi_scsi_dev { struct pqi_scsi_dev {
int devtype; /* as reported by INQUIRY command */ int devtype; /* as reported by INQUIRY command */
u8 device_type; /* as reported by */ u8 device_type; /* as reported by */
@ -1158,7 +1173,7 @@ struct pqi_scsi_dev {
struct pqi_stream_data stream_data[NUM_STREAMS_PER_LUN]; struct pqi_stream_data stream_data[NUM_STREAMS_PER_LUN];
atomic_t scsi_cmds_outstanding[PQI_MAX_LUNS_PER_DEVICE]; atomic_t scsi_cmds_outstanding[PQI_MAX_LUNS_PER_DEVICE];
unsigned int raid_bypass_cnt; struct pqi_raid_io_stats __percpu *raid_io_stats;
struct pqi_tmf_work tmf_work[PQI_MAX_LUNS_PER_DEVICE]; struct pqi_tmf_work tmf_work[PQI_MAX_LUNS_PER_DEVICE];
}; };
@ -1357,6 +1372,7 @@ struct pqi_ctrl_info {
u8 firmware_triage_supported : 1; u8 firmware_triage_supported : 1;
u8 rpl_extended_format_4_5_supported : 1; u8 rpl_extended_format_4_5_supported : 1;
u8 multi_lun_device_supported : 1; u8 multi_lun_device_supported : 1;
u8 ctrl_logging_supported : 1;
u8 enable_r1_writes : 1; u8 enable_r1_writes : 1;
u8 enable_r5_writes : 1; u8 enable_r5_writes : 1;
u8 enable_r6_writes : 1; u8 enable_r6_writes : 1;
@ -1398,13 +1414,12 @@ struct pqi_ctrl_info {
wait_queue_head_t block_requests_wait; wait_queue_head_t block_requests_wait;
struct mutex ofa_mutex; struct mutex ofa_mutex;
struct pqi_ofa_memory *pqi_ofa_mem_virt_addr;
dma_addr_t pqi_ofa_mem_dma_handle;
void **pqi_ofa_chunk_virt_addr;
struct work_struct ofa_memory_alloc_work; struct work_struct ofa_memory_alloc_work;
struct work_struct ofa_quiesce_work; struct work_struct ofa_quiesce_work;
u32 ofa_bytes_requested; u32 ofa_bytes_requested;
u16 ofa_cancel_reason; u16 ofa_cancel_reason;
struct pqi_host_memory_descriptor ofa_memory;
struct pqi_host_memory_descriptor ctrl_log_memory;
enum pqi_ctrl_removal_state ctrl_removal_state; enum pqi_ctrl_removal_state ctrl_removal_state;
}; };

View File

@ -33,11 +33,11 @@
#define BUILD_TIMESTAMP #define BUILD_TIMESTAMP
#endif #endif
#define DRIVER_VERSION "2.1.26-030" #define DRIVER_VERSION "2.1.30-031"
#define DRIVER_MAJOR 2 #define DRIVER_MAJOR 2
#define DRIVER_MINOR 1 #define DRIVER_MINOR 1
#define DRIVER_RELEASE 26 #define DRIVER_RELEASE 30
#define DRIVER_REVISION 30 #define DRIVER_REVISION 31
#define DRIVER_NAME "Microchip SmartPQI Driver (v" \ #define DRIVER_NAME "Microchip SmartPQI Driver (v" \
DRIVER_VERSION BUILD_TIMESTAMP ")" DRIVER_VERSION BUILD_TIMESTAMP ")"
@ -92,9 +92,9 @@ static int pqi_aio_submit_r56_write_io(struct pqi_ctrl_info *ctrl_info,
static void pqi_ofa_ctrl_quiesce(struct pqi_ctrl_info *ctrl_info); static void pqi_ofa_ctrl_quiesce(struct pqi_ctrl_info *ctrl_info);
static void pqi_ofa_ctrl_unquiesce(struct pqi_ctrl_info *ctrl_info); static void pqi_ofa_ctrl_unquiesce(struct pqi_ctrl_info *ctrl_info);
static int pqi_ofa_ctrl_restart(struct pqi_ctrl_info *ctrl_info, unsigned int delay_secs); static int pqi_ofa_ctrl_restart(struct pqi_ctrl_info *ctrl_info, unsigned int delay_secs);
static void pqi_ofa_setup_host_buffer(struct pqi_ctrl_info *ctrl_info); static void pqi_host_setup_buffer(struct pqi_ctrl_info *ctrl_info, struct pqi_host_memory_descriptor *host_memory_descriptor, u32 total_size, u32 min_size);
static void pqi_ofa_free_host_buffer(struct pqi_ctrl_info *ctrl_info); static void pqi_host_free_buffer(struct pqi_ctrl_info *ctrl_info, struct pqi_host_memory_descriptor *host_memory_descriptor);
static int pqi_ofa_host_memory_update(struct pqi_ctrl_info *ctrl_info); static int pqi_host_memory_update(struct pqi_ctrl_info *ctrl_info, struct pqi_host_memory_descriptor *host_memory_descriptor, u16 function_code);
static int pqi_device_wait_for_pending_io(struct pqi_ctrl_info *ctrl_info, static int pqi_device_wait_for_pending_io(struct pqi_ctrl_info *ctrl_info,
struct pqi_scsi_dev *device, u8 lun, unsigned long timeout_msecs); struct pqi_scsi_dev *device, u8 lun, unsigned long timeout_msecs);
static void pqi_fail_all_outstanding_requests(struct pqi_ctrl_info *ctrl_info); static void pqi_fail_all_outstanding_requests(struct pqi_ctrl_info *ctrl_info);
@ -1508,6 +1508,12 @@ static int pqi_get_raid_map(struct pqi_ctrl_info *ctrl_info,
if (rc) if (rc)
goto error; goto error;
device->raid_io_stats = alloc_percpu(struct pqi_raid_io_stats);
if (!device->raid_io_stats) {
rc = -ENOMEM;
goto error;
}
device->raid_map = raid_map; device->raid_map = raid_map;
return 0; return 0;
@ -2099,6 +2105,10 @@ static void pqi_scsi_update_device(struct pqi_ctrl_info *ctrl_info,
/* To prevent this from being freed later. */ /* To prevent this from being freed later. */
new_device->raid_map = NULL; new_device->raid_map = NULL;
} }
if (new_device->raid_bypass_enabled && existing_device->raid_io_stats == NULL) {
existing_device->raid_io_stats = new_device->raid_io_stats;
new_device->raid_io_stats = NULL;
}
existing_device->raid_bypass_configured = new_device->raid_bypass_configured; existing_device->raid_bypass_configured = new_device->raid_bypass_configured;
existing_device->raid_bypass_enabled = new_device->raid_bypass_enabled; existing_device->raid_bypass_enabled = new_device->raid_bypass_enabled;
} }
@ -2121,6 +2131,7 @@ static void pqi_scsi_update_device(struct pqi_ctrl_info *ctrl_info,
static inline void pqi_free_device(struct pqi_scsi_dev *device) static inline void pqi_free_device(struct pqi_scsi_dev *device)
{ {
if (device) { if (device) {
free_percpu(device->raid_io_stats);
kfree(device->raid_map); kfree(device->raid_map);
kfree(device); kfree(device);
} }
@ -2292,17 +2303,23 @@ static void pqi_update_device_list(struct pqi_ctrl_info *ctrl_info,
* queue depth, device size. * queue depth, device size.
*/ */
list_for_each_entry(device, &ctrl_info->scsi_device_list, scsi_device_list_entry) { list_for_each_entry(device, &ctrl_info->scsi_device_list, scsi_device_list_entry) {
/*
* Check for queue depth change.
*/
if (device->sdev && device->queue_depth != device->advertised_queue_depth) { if (device->sdev && device->queue_depth != device->advertised_queue_depth) {
device->advertised_queue_depth = device->queue_depth; device->advertised_queue_depth = device->queue_depth;
scsi_change_queue_depth(device->sdev, device->advertised_queue_depth); scsi_change_queue_depth(device->sdev, device->advertised_queue_depth);
spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); }
if (pqi_volume_rescan_needed(device)) { spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
device->rescan = false; /*
spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); * Check for changes in the device, such as size.
scsi_rescan_device(device->sdev); */
} else { if (pqi_volume_rescan_needed(device)) {
spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); device->rescan = false;
} spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
scsi_rescan_device(device->sdev);
} else {
spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
} }
} }
@ -2354,14 +2371,6 @@ static inline void pqi_mask_device(u8 *scsi3addr)
scsi3addr[3] |= 0xc0; scsi3addr[3] |= 0xc0;
} }
static inline bool pqi_is_multipath_device(struct pqi_scsi_dev *device)
{
if (pqi_is_logical_device(device))
return false;
return (device->path_map & (device->path_map - 1)) != 0;
}
static inline bool pqi_expose_device(struct pqi_scsi_dev *device) static inline bool pqi_expose_device(struct pqi_scsi_dev *device)
{ {
return !device->is_physical_device || !pqi_skip_device(device->scsi3addr); return !device->is_physical_device || !pqi_skip_device(device->scsi3addr);
@ -3244,6 +3253,20 @@ static void pqi_process_raid_io_error(struct pqi_io_request *io_request)
sense_data_length); sense_data_length);
} }
if (pqi_cmd_priv(scmd)->this_residual &&
!pqi_is_logical_device(scmd->device->hostdata) &&
scsi_status == SAM_STAT_CHECK_CONDITION &&
host_byte == DID_OK &&
sense_data_length &&
scsi_normalize_sense(error_info->data, sense_data_length, &sshdr) &&
sshdr.sense_key == ILLEGAL_REQUEST &&
sshdr.asc == 0x26 &&
sshdr.ascq == 0x0) {
host_byte = DID_NO_CONNECT;
pqi_take_device_offline(scmd->device, "AIO");
scsi_build_sense_buffer(0, scmd->sense_buffer, HARDWARE_ERROR, 0x3e, 0x1);
}
scmd->result = scsi_status; scmd->result = scsi_status;
set_host_byte(scmd, host_byte); set_host_byte(scmd, host_byte);
} }
@ -3258,14 +3281,12 @@ static void pqi_process_aio_io_error(struct pqi_io_request *io_request)
int residual_count; int residual_count;
int xfer_count; int xfer_count;
bool device_offline; bool device_offline;
struct pqi_scsi_dev *device;
scmd = io_request->scmd; scmd = io_request->scmd;
error_info = io_request->error_info; error_info = io_request->error_info;
host_byte = DID_OK; host_byte = DID_OK;
sense_data_length = 0; sense_data_length = 0;
device_offline = false; device_offline = false;
device = scmd->device->hostdata;
switch (error_info->service_response) { switch (error_info->service_response) {
case PQI_AIO_SERV_RESPONSE_COMPLETE: case PQI_AIO_SERV_RESPONSE_COMPLETE:
@ -3290,14 +3311,8 @@ static void pqi_process_aio_io_error(struct pqi_io_request *io_request)
break; break;
case PQI_AIO_STATUS_AIO_PATH_DISABLED: case PQI_AIO_STATUS_AIO_PATH_DISABLED:
pqi_aio_path_disabled(io_request); pqi_aio_path_disabled(io_request);
if (pqi_is_multipath_device(device)) { scsi_status = SAM_STAT_GOOD;
pqi_device_remove_start(device); io_request->status = -EAGAIN;
host_byte = DID_NO_CONNECT;
scsi_status = SAM_STAT_CHECK_CONDITION;
} else {
scsi_status = SAM_STAT_GOOD;
io_request->status = -EAGAIN;
}
break; break;
case PQI_AIO_STATUS_NO_PATH_TO_DEVICE: case PQI_AIO_STATUS_NO_PATH_TO_DEVICE:
case PQI_AIO_STATUS_INVALID_DEVICE: case PQI_AIO_STATUS_INVALID_DEVICE:
@ -3625,7 +3640,7 @@ static void pqi_process_soft_reset(struct pqi_ctrl_info *ctrl_info)
ctrl_info->pqi_mode_enabled = false; ctrl_info->pqi_mode_enabled = false;
pqi_save_ctrl_mode(ctrl_info, SIS_MODE); pqi_save_ctrl_mode(ctrl_info, SIS_MODE);
rc = pqi_ofa_ctrl_restart(ctrl_info, delay_secs); rc = pqi_ofa_ctrl_restart(ctrl_info, delay_secs);
pqi_ofa_free_host_buffer(ctrl_info); pqi_host_free_buffer(ctrl_info, &ctrl_info->ofa_memory);
pqi_ctrl_ofa_done(ctrl_info); pqi_ctrl_ofa_done(ctrl_info);
dev_info(&ctrl_info->pci_dev->dev, dev_info(&ctrl_info->pci_dev->dev,
"Online Firmware Activation: %s\n", "Online Firmware Activation: %s\n",
@ -3636,7 +3651,7 @@ static void pqi_process_soft_reset(struct pqi_ctrl_info *ctrl_info)
"Online Firmware Activation ABORTED\n"); "Online Firmware Activation ABORTED\n");
if (ctrl_info->soft_reset_handshake_supported) if (ctrl_info->soft_reset_handshake_supported)
pqi_clear_soft_reset_status(ctrl_info); pqi_clear_soft_reset_status(ctrl_info);
pqi_ofa_free_host_buffer(ctrl_info); pqi_host_free_buffer(ctrl_info, &ctrl_info->ofa_memory);
pqi_ctrl_ofa_done(ctrl_info); pqi_ctrl_ofa_done(ctrl_info);
pqi_ofa_ctrl_unquiesce(ctrl_info); pqi_ofa_ctrl_unquiesce(ctrl_info);
break; break;
@ -3646,7 +3661,7 @@ static void pqi_process_soft_reset(struct pqi_ctrl_info *ctrl_info)
dev_err(&ctrl_info->pci_dev->dev, dev_err(&ctrl_info->pci_dev->dev,
"unexpected Online Firmware Activation reset status: 0x%x\n", "unexpected Online Firmware Activation reset status: 0x%x\n",
reset_status); reset_status);
pqi_ofa_free_host_buffer(ctrl_info); pqi_host_free_buffer(ctrl_info, &ctrl_info->ofa_memory);
pqi_ctrl_ofa_done(ctrl_info); pqi_ctrl_ofa_done(ctrl_info);
pqi_ofa_ctrl_unquiesce(ctrl_info); pqi_ofa_ctrl_unquiesce(ctrl_info);
pqi_take_ctrl_offline(ctrl_info, PQI_OFA_RESPONSE_TIMEOUT); pqi_take_ctrl_offline(ctrl_info, PQI_OFA_RESPONSE_TIMEOUT);
@ -3661,8 +3676,8 @@ static void pqi_ofa_memory_alloc_worker(struct work_struct *work)
ctrl_info = container_of(work, struct pqi_ctrl_info, ofa_memory_alloc_work); ctrl_info = container_of(work, struct pqi_ctrl_info, ofa_memory_alloc_work);
pqi_ctrl_ofa_start(ctrl_info); pqi_ctrl_ofa_start(ctrl_info);
pqi_ofa_setup_host_buffer(ctrl_info); pqi_host_setup_buffer(ctrl_info, &ctrl_info->ofa_memory, ctrl_info->ofa_bytes_requested, ctrl_info->ofa_bytes_requested);
pqi_ofa_host_memory_update(ctrl_info); pqi_host_memory_update(ctrl_info, &ctrl_info->ofa_memory, PQI_VENDOR_GENERAL_OFA_MEMORY_UPDATE);
} }
static void pqi_ofa_quiesce_worker(struct work_struct *work) static void pqi_ofa_quiesce_worker(struct work_struct *work)
@ -3702,7 +3717,7 @@ static bool pqi_ofa_process_event(struct pqi_ctrl_info *ctrl_info,
dev_info(&ctrl_info->pci_dev->dev, dev_info(&ctrl_info->pci_dev->dev,
"received Online Firmware Activation cancel request: reason: %u\n", "received Online Firmware Activation cancel request: reason: %u\n",
ctrl_info->ofa_cancel_reason); ctrl_info->ofa_cancel_reason);
pqi_ofa_free_host_buffer(ctrl_info); pqi_host_free_buffer(ctrl_info, &ctrl_info->ofa_memory);
pqi_ctrl_ofa_done(ctrl_info); pqi_ctrl_ofa_done(ctrl_info);
break; break;
default: default:
@ -5933,7 +5948,7 @@ static bool pqi_is_parity_write_stream(struct pqi_ctrl_info *ctrl_info,
int rc; int rc;
struct pqi_scsi_dev *device; struct pqi_scsi_dev *device;
struct pqi_stream_data *pqi_stream_data; struct pqi_stream_data *pqi_stream_data;
struct pqi_scsi_dev_raid_map_data rmd; struct pqi_scsi_dev_raid_map_data rmd = { 0 };
if (!ctrl_info->enable_stream_detection) if (!ctrl_info->enable_stream_detection)
return false; return false;
@ -5975,6 +5990,7 @@ static bool pqi_is_parity_write_stream(struct pqi_ctrl_info *ctrl_info,
pqi_stream_data->next_lba = rmd.first_block + pqi_stream_data->next_lba = rmd.first_block +
rmd.block_cnt; rmd.block_cnt;
pqi_stream_data->last_accessed = jiffies; pqi_stream_data->last_accessed = jiffies;
per_cpu_ptr(device->raid_io_stats, smp_processor_id())->write_stream_cnt++;
return true; return true;
} }
@ -6025,7 +6041,7 @@ static int pqi_scsi_queue_command(struct Scsi_Host *shost, struct scsi_cmnd *scm
ctrl_info = shost_to_hba(shost); ctrl_info = shost_to_hba(shost);
if (pqi_ctrl_offline(ctrl_info) || pqi_device_in_remove(device)) { if (pqi_ctrl_offline(ctrl_info) || pqi_device_offline(device) || pqi_device_in_remove(device)) {
set_host_byte(scmd, DID_NO_CONNECT); set_host_byte(scmd, DID_NO_CONNECT);
pqi_scsi_done(scmd); pqi_scsi_done(scmd);
return 0; return 0;
@ -6053,7 +6069,7 @@ static int pqi_scsi_queue_command(struct Scsi_Host *shost, struct scsi_cmnd *scm
rc = pqi_raid_bypass_submit_scsi_cmd(ctrl_info, device, scmd, queue_group); rc = pqi_raid_bypass_submit_scsi_cmd(ctrl_info, device, scmd, queue_group);
if (rc == 0 || rc == SCSI_MLQUEUE_HOST_BUSY) { if (rc == 0 || rc == SCSI_MLQUEUE_HOST_BUSY) {
raid_bypassed = true; raid_bypassed = true;
device->raid_bypass_cnt++; per_cpu_ptr(device->raid_io_stats, smp_processor_id())->raid_bypass_cnt++;
} }
} }
if (!raid_bypassed) if (!raid_bypassed)
@ -6190,14 +6206,12 @@ static void pqi_fail_io_queued_for_device(struct pqi_ctrl_info *ctrl_info,
continue; continue;
scsi_device = scmd->device->hostdata; scsi_device = scmd->device->hostdata;
if (scsi_device != device)
continue;
if ((u8)scmd->device->lun != lun)
continue;
list_del(&io_request->request_list_entry); list_del(&io_request->request_list_entry);
set_host_byte(scmd, DID_RESET); if (scsi_device == device && (u8)scmd->device->lun == lun)
set_host_byte(scmd, DID_RESET);
else
set_host_byte(scmd, DID_REQUEUE);
pqi_free_io_request(io_request); pqi_free_io_request(io_request);
scsi_dma_unmap(scmd); scsi_dma_unmap(scmd);
pqi_scsi_done(scmd); pqi_scsi_done(scmd);
@ -7350,7 +7364,8 @@ static ssize_t pqi_raid_bypass_cnt_show(struct device *dev,
struct scsi_device *sdev; struct scsi_device *sdev;
struct pqi_scsi_dev *device; struct pqi_scsi_dev *device;
unsigned long flags; unsigned long flags;
unsigned int raid_bypass_cnt; u64 raid_bypass_cnt;
int cpu;
sdev = to_scsi_device(dev); sdev = to_scsi_device(dev);
ctrl_info = shost_to_hba(sdev->host); ctrl_info = shost_to_hba(sdev->host);
@ -7366,11 +7381,17 @@ static ssize_t pqi_raid_bypass_cnt_show(struct device *dev,
return -ENODEV; return -ENODEV;
} }
raid_bypass_cnt = device->raid_bypass_cnt; raid_bypass_cnt = 0;
if (device->raid_io_stats) {
for_each_online_cpu(cpu) {
raid_bypass_cnt += per_cpu_ptr(device->raid_io_stats, cpu)->raid_bypass_cnt;
}
}
spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
return scnprintf(buffer, PAGE_SIZE, "0x%x\n", raid_bypass_cnt); return scnprintf(buffer, PAGE_SIZE, "0x%llx\n", raid_bypass_cnt);
} }
static ssize_t pqi_sas_ncq_prio_enable_show(struct device *dev, static ssize_t pqi_sas_ncq_prio_enable_show(struct device *dev,
@ -7452,6 +7473,43 @@ static ssize_t pqi_numa_node_show(struct device *dev,
return scnprintf(buffer, PAGE_SIZE, "%d\n", ctrl_info->numa_node); return scnprintf(buffer, PAGE_SIZE, "%d\n", ctrl_info->numa_node);
} }
static ssize_t pqi_write_stream_cnt_show(struct device *dev,
struct device_attribute *attr, char *buffer)
{
struct pqi_ctrl_info *ctrl_info;
struct scsi_device *sdev;
struct pqi_scsi_dev *device;
unsigned long flags;
u64 write_stream_cnt;
int cpu;
sdev = to_scsi_device(dev);
ctrl_info = shost_to_hba(sdev->host);
if (pqi_ctrl_offline(ctrl_info))
return -ENODEV;
spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
device = sdev->hostdata;
if (!device) {
spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
return -ENODEV;
}
write_stream_cnt = 0;
if (device->raid_io_stats) {
for_each_online_cpu(cpu) {
write_stream_cnt += per_cpu_ptr(device->raid_io_stats, cpu)->write_stream_cnt;
}
}
spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
return scnprintf(buffer, PAGE_SIZE, "0x%llx\n", write_stream_cnt);
}
static DEVICE_ATTR(lunid, 0444, pqi_lunid_show, NULL); static DEVICE_ATTR(lunid, 0444, pqi_lunid_show, NULL);
static DEVICE_ATTR(unique_id, 0444, pqi_unique_id_show, NULL); static DEVICE_ATTR(unique_id, 0444, pqi_unique_id_show, NULL);
static DEVICE_ATTR(path_info, 0444, pqi_path_info_show, NULL); static DEVICE_ATTR(path_info, 0444, pqi_path_info_show, NULL);
@ -7462,6 +7520,7 @@ static DEVICE_ATTR(raid_bypass_cnt, 0444, pqi_raid_bypass_cnt_show, NULL);
static DEVICE_ATTR(sas_ncq_prio_enable, 0644, static DEVICE_ATTR(sas_ncq_prio_enable, 0644,
pqi_sas_ncq_prio_enable_show, pqi_sas_ncq_prio_enable_store); pqi_sas_ncq_prio_enable_show, pqi_sas_ncq_prio_enable_store);
static DEVICE_ATTR(numa_node, 0444, pqi_numa_node_show, NULL); static DEVICE_ATTR(numa_node, 0444, pqi_numa_node_show, NULL);
static DEVICE_ATTR(write_stream_cnt, 0444, pqi_write_stream_cnt_show, NULL);
static struct attribute *pqi_sdev_attrs[] = { static struct attribute *pqi_sdev_attrs[] = {
&dev_attr_lunid.attr, &dev_attr_lunid.attr,
@ -7473,6 +7532,7 @@ static struct attribute *pqi_sdev_attrs[] = {
&dev_attr_raid_bypass_cnt.attr, &dev_attr_raid_bypass_cnt.attr,
&dev_attr_sas_ncq_prio_enable.attr, &dev_attr_sas_ncq_prio_enable.attr,
&dev_attr_numa_node.attr, &dev_attr_numa_node.attr,
&dev_attr_write_stream_cnt.attr,
NULL NULL
}; };
@ -7863,6 +7923,9 @@ static void pqi_ctrl_update_feature_flags(struct pqi_ctrl_info *ctrl_info,
case PQI_FIRMWARE_FEATURE_MULTI_LUN_DEVICE_SUPPORT: case PQI_FIRMWARE_FEATURE_MULTI_LUN_DEVICE_SUPPORT:
ctrl_info->multi_lun_device_supported = firmware_feature->enabled; ctrl_info->multi_lun_device_supported = firmware_feature->enabled;
break; break;
case PQI_FIRMWARE_FEATURE_CTRL_LOGGING:
ctrl_info->ctrl_logging_supported = firmware_feature->enabled;
break;
} }
pqi_firmware_feature_status(ctrl_info, firmware_feature); pqi_firmware_feature_status(ctrl_info, firmware_feature);
@ -7968,6 +8031,11 @@ static struct pqi_firmware_feature pqi_firmware_features[] = {
.feature_bit = PQI_FIRMWARE_FEATURE_MULTI_LUN_DEVICE_SUPPORT, .feature_bit = PQI_FIRMWARE_FEATURE_MULTI_LUN_DEVICE_SUPPORT,
.feature_status = pqi_ctrl_update_feature_flags, .feature_status = pqi_ctrl_update_feature_flags,
}, },
{
.feature_name = "Controller Data Logging",
.feature_bit = PQI_FIRMWARE_FEATURE_CTRL_LOGGING,
.feature_status = pqi_ctrl_update_feature_flags,
},
}; };
static void pqi_process_firmware_features( static void pqi_process_firmware_features(
@ -8070,6 +8138,7 @@ static void pqi_ctrl_reset_config(struct pqi_ctrl_info *ctrl_info)
ctrl_info->firmware_triage_supported = false; ctrl_info->firmware_triage_supported = false;
ctrl_info->rpl_extended_format_4_5_supported = false; ctrl_info->rpl_extended_format_4_5_supported = false;
ctrl_info->multi_lun_device_supported = false; ctrl_info->multi_lun_device_supported = false;
ctrl_info->ctrl_logging_supported = false;
} }
static int pqi_process_config_table(struct pqi_ctrl_info *ctrl_info) static int pqi_process_config_table(struct pqi_ctrl_info *ctrl_info)
@ -8210,6 +8279,9 @@ static void pqi_perform_lockup_action(void)
} }
} }
#define PQI_CTRL_LOG_TOTAL_SIZE (4 * 1024 * 1024)
#define PQI_CTRL_LOG_MIN_SIZE (PQI_CTRL_LOG_TOTAL_SIZE / PQI_HOST_MAX_SG_DESCRIPTORS)
static int pqi_ctrl_init(struct pqi_ctrl_info *ctrl_info) static int pqi_ctrl_init(struct pqi_ctrl_info *ctrl_info)
{ {
int rc; int rc;
@ -8221,6 +8293,12 @@ static int pqi_ctrl_init(struct pqi_ctrl_info *ctrl_info)
if (rc) if (rc)
return rc; return rc;
} }
if (sis_is_ctrl_logging_supported(ctrl_info)) {
sis_notify_kdump(ctrl_info);
rc = sis_wait_for_ctrl_logging_completion(ctrl_info);
if (rc)
return rc;
}
sis_soft_reset(ctrl_info); sis_soft_reset(ctrl_info);
ssleep(PQI_POST_RESET_DELAY_SECS); ssleep(PQI_POST_RESET_DELAY_SECS);
} else { } else {
@ -8402,6 +8480,11 @@ static int pqi_ctrl_init(struct pqi_ctrl_info *ctrl_info)
if (rc) if (rc)
return rc; return rc;
if (ctrl_info->ctrl_logging_supported && !reset_devices) {
pqi_host_setup_buffer(ctrl_info, &ctrl_info->ctrl_log_memory, PQI_CTRL_LOG_TOTAL_SIZE, PQI_CTRL_LOG_MIN_SIZE);
pqi_host_memory_update(ctrl_info, &ctrl_info->ctrl_log_memory, PQI_VENDOR_GENERAL_CTRL_LOG_MEMORY_UPDATE);
}
rc = pqi_get_ctrl_product_details(ctrl_info); rc = pqi_get_ctrl_product_details(ctrl_info);
if (rc) { if (rc) {
dev_err(&ctrl_info->pci_dev->dev, dev_err(&ctrl_info->pci_dev->dev,
@ -8586,8 +8669,22 @@ static int pqi_ctrl_init_resume(struct pqi_ctrl_info *ctrl_info)
return rc; return rc;
} }
if (pqi_ofa_in_progress(ctrl_info)) if (pqi_ofa_in_progress(ctrl_info)) {
pqi_ctrl_unblock_scan(ctrl_info); pqi_ctrl_unblock_scan(ctrl_info);
if (ctrl_info->ctrl_logging_supported) {
if (!ctrl_info->ctrl_log_memory.host_memory)
pqi_host_setup_buffer(ctrl_info,
&ctrl_info->ctrl_log_memory,
PQI_CTRL_LOG_TOTAL_SIZE,
PQI_CTRL_LOG_MIN_SIZE);
pqi_host_memory_update(ctrl_info,
&ctrl_info->ctrl_log_memory, PQI_VENDOR_GENERAL_CTRL_LOG_MEMORY_UPDATE);
} else {
if (ctrl_info->ctrl_log_memory.host_memory)
pqi_host_free_buffer(ctrl_info,
&ctrl_info->ctrl_log_memory);
}
}
pqi_scan_scsi_devices(ctrl_info); pqi_scan_scsi_devices(ctrl_info);
@ -8777,6 +8874,7 @@ static void pqi_remove_ctrl(struct pqi_ctrl_info *ctrl_info)
pqi_fail_all_outstanding_requests(ctrl_info); pqi_fail_all_outstanding_requests(ctrl_info);
ctrl_info->pqi_mode_enabled = false; ctrl_info->pqi_mode_enabled = false;
} }
pqi_host_free_buffer(ctrl_info, &ctrl_info->ctrl_log_memory);
pqi_unregister_scsi(ctrl_info); pqi_unregister_scsi(ctrl_info);
if (ctrl_info->pqi_mode_enabled) if (ctrl_info->pqi_mode_enabled)
pqi_revert_to_sis_mode(ctrl_info); pqi_revert_to_sis_mode(ctrl_info);
@ -8802,170 +8900,6 @@ static void pqi_ofa_ctrl_unquiesce(struct pqi_ctrl_info *ctrl_info)
pqi_ctrl_unblock_scan(ctrl_info); pqi_ctrl_unblock_scan(ctrl_info);
} }
static int pqi_ofa_alloc_mem(struct pqi_ctrl_info *ctrl_info, u32 total_size, u32 chunk_size)
{
int i;
u32 sg_count;
struct device *dev;
struct pqi_ofa_memory *ofap;
struct pqi_sg_descriptor *mem_descriptor;
dma_addr_t dma_handle;
ofap = ctrl_info->pqi_ofa_mem_virt_addr;
sg_count = DIV_ROUND_UP(total_size, chunk_size);
if (sg_count == 0 || sg_count > PQI_OFA_MAX_SG_DESCRIPTORS)
goto out;
ctrl_info->pqi_ofa_chunk_virt_addr = kmalloc_array(sg_count, sizeof(void *), GFP_KERNEL);
if (!ctrl_info->pqi_ofa_chunk_virt_addr)
goto out;
dev = &ctrl_info->pci_dev->dev;
for (i = 0; i < sg_count; i++) {
ctrl_info->pqi_ofa_chunk_virt_addr[i] =
dma_alloc_coherent(dev, chunk_size, &dma_handle, GFP_KERNEL);
if (!ctrl_info->pqi_ofa_chunk_virt_addr[i])
goto out_free_chunks;
mem_descriptor = &ofap->sg_descriptor[i];
put_unaligned_le64((u64)dma_handle, &mem_descriptor->address);
put_unaligned_le32(chunk_size, &mem_descriptor->length);
}
put_unaligned_le32(CISS_SG_LAST, &mem_descriptor->flags);
put_unaligned_le16(sg_count, &ofap->num_memory_descriptors);
put_unaligned_le32(sg_count * chunk_size, &ofap->bytes_allocated);
return 0;
out_free_chunks:
while (--i >= 0) {
mem_descriptor = &ofap->sg_descriptor[i];
dma_free_coherent(dev, chunk_size,
ctrl_info->pqi_ofa_chunk_virt_addr[i],
get_unaligned_le64(&mem_descriptor->address));
}
kfree(ctrl_info->pqi_ofa_chunk_virt_addr);
out:
return -ENOMEM;
}
static int pqi_ofa_alloc_host_buffer(struct pqi_ctrl_info *ctrl_info)
{
u32 total_size;
u32 chunk_size;
u32 min_chunk_size;
if (ctrl_info->ofa_bytes_requested == 0)
return 0;
total_size = PAGE_ALIGN(ctrl_info->ofa_bytes_requested);
min_chunk_size = DIV_ROUND_UP(total_size, PQI_OFA_MAX_SG_DESCRIPTORS);
min_chunk_size = PAGE_ALIGN(min_chunk_size);
for (chunk_size = total_size; chunk_size >= min_chunk_size;) {
if (pqi_ofa_alloc_mem(ctrl_info, total_size, chunk_size) == 0)
return 0;
chunk_size /= 2;
chunk_size = PAGE_ALIGN(chunk_size);
}
return -ENOMEM;
}
static void pqi_ofa_setup_host_buffer(struct pqi_ctrl_info *ctrl_info)
{
struct device *dev;
struct pqi_ofa_memory *ofap;
dev = &ctrl_info->pci_dev->dev;
ofap = dma_alloc_coherent(dev, sizeof(*ofap),
&ctrl_info->pqi_ofa_mem_dma_handle, GFP_KERNEL);
if (!ofap)
return;
ctrl_info->pqi_ofa_mem_virt_addr = ofap;
if (pqi_ofa_alloc_host_buffer(ctrl_info) < 0) {
dev_err(dev,
"failed to allocate host buffer for Online Firmware Activation\n");
dma_free_coherent(dev, sizeof(*ofap), ofap, ctrl_info->pqi_ofa_mem_dma_handle);
ctrl_info->pqi_ofa_mem_virt_addr = NULL;
return;
}
put_unaligned_le16(PQI_OFA_VERSION, &ofap->version);
memcpy(&ofap->signature, PQI_OFA_SIGNATURE, sizeof(ofap->signature));
}
static void pqi_ofa_free_host_buffer(struct pqi_ctrl_info *ctrl_info)
{
unsigned int i;
struct device *dev;
struct pqi_ofa_memory *ofap;
struct pqi_sg_descriptor *mem_descriptor;
unsigned int num_memory_descriptors;
ofap = ctrl_info->pqi_ofa_mem_virt_addr;
if (!ofap)
return;
dev = &ctrl_info->pci_dev->dev;
if (get_unaligned_le32(&ofap->bytes_allocated) == 0)
goto out;
mem_descriptor = ofap->sg_descriptor;
num_memory_descriptors =
get_unaligned_le16(&ofap->num_memory_descriptors);
for (i = 0; i < num_memory_descriptors; i++) {
dma_free_coherent(dev,
get_unaligned_le32(&mem_descriptor[i].length),
ctrl_info->pqi_ofa_chunk_virt_addr[i],
get_unaligned_le64(&mem_descriptor[i].address));
}
kfree(ctrl_info->pqi_ofa_chunk_virt_addr);
out:
dma_free_coherent(dev, sizeof(*ofap), ofap,
ctrl_info->pqi_ofa_mem_dma_handle);
ctrl_info->pqi_ofa_mem_virt_addr = NULL;
}
static int pqi_ofa_host_memory_update(struct pqi_ctrl_info *ctrl_info)
{
u32 buffer_length;
struct pqi_vendor_general_request request;
struct pqi_ofa_memory *ofap;
memset(&request, 0, sizeof(request));
request.header.iu_type = PQI_REQUEST_IU_VENDOR_GENERAL;
put_unaligned_le16(sizeof(request) - PQI_REQUEST_HEADER_LENGTH,
&request.header.iu_length);
put_unaligned_le16(PQI_VENDOR_GENERAL_HOST_MEMORY_UPDATE,
&request.function_code);
ofap = ctrl_info->pqi_ofa_mem_virt_addr;
if (ofap) {
buffer_length = offsetof(struct pqi_ofa_memory, sg_descriptor) +
get_unaligned_le16(&ofap->num_memory_descriptors) *
sizeof(struct pqi_sg_descriptor);
put_unaligned_le64((u64)ctrl_info->pqi_ofa_mem_dma_handle,
&request.data.ofa_memory_allocation.buffer_address);
put_unaligned_le32(buffer_length,
&request.data.ofa_memory_allocation.buffer_length);
}
return pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0, NULL);
}
static int pqi_ofa_ctrl_restart(struct pqi_ctrl_info *ctrl_info, unsigned int delay_secs) static int pqi_ofa_ctrl_restart(struct pqi_ctrl_info *ctrl_info, unsigned int delay_secs)
{ {
ssleep(delay_secs); ssleep(delay_secs);
@ -8973,6 +8907,180 @@ static int pqi_ofa_ctrl_restart(struct pqi_ctrl_info *ctrl_info, unsigned int de
return pqi_ctrl_init_resume(ctrl_info); return pqi_ctrl_init_resume(ctrl_info);
} }
static int pqi_host_alloc_mem(struct pqi_ctrl_info *ctrl_info,
struct pqi_host_memory_descriptor *host_memory_descriptor,
u32 total_size, u32 chunk_size)
{
int i;
u32 sg_count;
struct device *dev;
struct pqi_host_memory *host_memory;
struct pqi_sg_descriptor *mem_descriptor;
dma_addr_t dma_handle;
sg_count = DIV_ROUND_UP(total_size, chunk_size);
if (sg_count == 0 || sg_count > PQI_HOST_MAX_SG_DESCRIPTORS)
goto out;
host_memory_descriptor->host_chunk_virt_address = kmalloc(sg_count * sizeof(void *), GFP_KERNEL);
if (!host_memory_descriptor->host_chunk_virt_address)
goto out;
dev = &ctrl_info->pci_dev->dev;
host_memory = host_memory_descriptor->host_memory;
for (i = 0; i < sg_count; i++) {
host_memory_descriptor->host_chunk_virt_address[i] = dma_alloc_coherent(dev, chunk_size, &dma_handle, GFP_KERNEL);
if (!host_memory_descriptor->host_chunk_virt_address[i])
goto out_free_chunks;
mem_descriptor = &host_memory->sg_descriptor[i];
put_unaligned_le64((u64)dma_handle, &mem_descriptor->address);
put_unaligned_le32(chunk_size, &mem_descriptor->length);
}
put_unaligned_le32(CISS_SG_LAST, &mem_descriptor->flags);
put_unaligned_le16(sg_count, &host_memory->num_memory_descriptors);
put_unaligned_le32(sg_count * chunk_size, &host_memory->bytes_allocated);
return 0;
out_free_chunks:
while (--i >= 0) {
mem_descriptor = &host_memory->sg_descriptor[i];
dma_free_coherent(dev, chunk_size,
host_memory_descriptor->host_chunk_virt_address[i],
get_unaligned_le64(&mem_descriptor->address));
}
kfree(host_memory_descriptor->host_chunk_virt_address);
out:
return -ENOMEM;
}
static int pqi_host_alloc_buffer(struct pqi_ctrl_info *ctrl_info,
struct pqi_host_memory_descriptor *host_memory_descriptor,
u32 total_required_size, u32 min_required_size)
{
u32 chunk_size;
u32 min_chunk_size;
if (total_required_size == 0 || min_required_size == 0)
return 0;
total_required_size = PAGE_ALIGN(total_required_size);
min_required_size = PAGE_ALIGN(min_required_size);
min_chunk_size = DIV_ROUND_UP(total_required_size, PQI_HOST_MAX_SG_DESCRIPTORS);
min_chunk_size = PAGE_ALIGN(min_chunk_size);
while (total_required_size >= min_required_size) {
for (chunk_size = total_required_size; chunk_size >= min_chunk_size;) {
if (pqi_host_alloc_mem(ctrl_info,
host_memory_descriptor, total_required_size,
chunk_size) == 0)
return 0;
chunk_size /= 2;
chunk_size = PAGE_ALIGN(chunk_size);
}
total_required_size /= 2;
total_required_size = PAGE_ALIGN(total_required_size);
}
return -ENOMEM;
}
static void pqi_host_setup_buffer(struct pqi_ctrl_info *ctrl_info,
struct pqi_host_memory_descriptor *host_memory_descriptor,
u32 total_size, u32 min_size)
{
struct device *dev;
struct pqi_host_memory *host_memory;
dev = &ctrl_info->pci_dev->dev;
host_memory = dma_alloc_coherent(dev, sizeof(*host_memory),
&host_memory_descriptor->host_memory_dma_handle, GFP_KERNEL);
if (!host_memory)
return;
host_memory_descriptor->host_memory = host_memory;
if (pqi_host_alloc_buffer(ctrl_info, host_memory_descriptor,
total_size, min_size) < 0) {
dev_err(dev, "failed to allocate firmware usable host buffer\n");
dma_free_coherent(dev, sizeof(*host_memory), host_memory,
host_memory_descriptor->host_memory_dma_handle);
host_memory_descriptor->host_memory = NULL;
return;
}
}
static void pqi_host_free_buffer(struct pqi_ctrl_info *ctrl_info,
struct pqi_host_memory_descriptor *host_memory_descriptor)
{
unsigned int i;
struct device *dev;
struct pqi_host_memory *host_memory;
struct pqi_sg_descriptor *mem_descriptor;
unsigned int num_memory_descriptors;
host_memory = host_memory_descriptor->host_memory;
if (!host_memory)
return;
dev = &ctrl_info->pci_dev->dev;
if (get_unaligned_le32(&host_memory->bytes_allocated) == 0)
goto out;
mem_descriptor = host_memory->sg_descriptor;
num_memory_descriptors = get_unaligned_le16(&host_memory->num_memory_descriptors);
for (i = 0; i < num_memory_descriptors; i++) {
dma_free_coherent(dev,
get_unaligned_le32(&mem_descriptor[i].length),
host_memory_descriptor->host_chunk_virt_address[i],
get_unaligned_le64(&mem_descriptor[i].address));
}
kfree(host_memory_descriptor->host_chunk_virt_address);
out:
dma_free_coherent(dev, sizeof(*host_memory), host_memory,
host_memory_descriptor->host_memory_dma_handle);
host_memory_descriptor->host_memory = NULL;
}
static int pqi_host_memory_update(struct pqi_ctrl_info *ctrl_info,
struct pqi_host_memory_descriptor *host_memory_descriptor,
u16 function_code)
{
u32 buffer_length;
struct pqi_vendor_general_request request;
struct pqi_host_memory *host_memory;
memset(&request, 0, sizeof(request));
request.header.iu_type = PQI_REQUEST_IU_VENDOR_GENERAL;
put_unaligned_le16(sizeof(request) - PQI_REQUEST_HEADER_LENGTH, &request.header.iu_length);
put_unaligned_le16(function_code, &request.function_code);
host_memory = host_memory_descriptor->host_memory;
if (host_memory) {
buffer_length = offsetof(struct pqi_host_memory, sg_descriptor) + get_unaligned_le16(&host_memory->num_memory_descriptors) * sizeof(struct pqi_sg_descriptor);
put_unaligned_le64((u64)host_memory_descriptor->host_memory_dma_handle, &request.data.host_memory_allocation.buffer_address);
put_unaligned_le32(buffer_length, &request.data.host_memory_allocation.buffer_length);
if (function_code == PQI_VENDOR_GENERAL_OFA_MEMORY_UPDATE) {
put_unaligned_le16(PQI_OFA_VERSION, &host_memory->version);
memcpy(&host_memory->signature, PQI_OFA_SIGNATURE, sizeof(host_memory->signature));
} else if (function_code == PQI_VENDOR_GENERAL_CTRL_LOG_MEMORY_UPDATE) {
put_unaligned_le16(PQI_CTRL_LOG_VERSION, &host_memory->version);
memcpy(&host_memory->signature, PQI_CTRL_LOG_SIGNATURE, sizeof(host_memory->signature));
}
}
return pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0, NULL);
}
static struct pqi_raid_error_info pqi_ctrl_offline_raid_error_info = { static struct pqi_raid_error_info pqi_ctrl_offline_raid_error_info = {
.data_out_result = PQI_DATA_IN_OUT_HARDWARE_ERROR, .data_out_result = PQI_DATA_IN_OUT_HARDWARE_ERROR,
.status = SAM_STAT_CHECK_CONDITION, .status = SAM_STAT_CHECK_CONDITION,
@ -9444,6 +9552,10 @@ static const struct pci_device_id pqi_pci_id_table[] = {
PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
0x152d, 0x8a37) 0x152d, 0x8a37)
}, },
{
PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
0x193d, 0x0462)
},
{ {
PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
0x193d, 0x1104) 0x193d, 0x1104)
@ -9472,6 +9584,10 @@ static const struct pci_device_id pqi_pci_id_table[] = {
PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
0x193d, 0x110b) 0x193d, 0x110b)
}, },
{
PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
0x193d, 0x1110)
},
{ {
PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
0x193d, 0x8460) 0x193d, 0x8460)
@ -9480,6 +9596,10 @@ static const struct pci_device_id pqi_pci_id_table[] = {
PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
0x193d, 0x8461) 0x193d, 0x8461)
}, },
{
PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
0x193d, 0x8462)
},
{ {
PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
0x193d, 0xc460) 0x193d, 0xc460)
@ -9588,6 +9708,14 @@ static const struct pci_device_id pqi_pci_id_table[] = {
PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
0x1bd4, 0x0089) 0x1bd4, 0x0089)
}, },
{
PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
0x1ff9, 0x00a1)
},
{
PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
0x1f3a, 0x0104)
},
{ {
PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
0x19e5, 0xd227) 0x19e5, 0xd227)
@ -10180,6 +10308,110 @@ static const struct pci_device_id pqi_pci_id_table[] = {
PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
0x1137, 0x02fa) 0x1137, 0x02fa)
}, },
{
PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
0x1137, 0x02fe)
},
{
PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
0x1137, 0x02ff)
},
{
PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
0x1137, 0x0300)
},
{
PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
0x1ff9, 0x0045)
},
{
PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
0x1ff9, 0x0046)
},
{
PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
0x1ff9, 0x0047)
},
{
PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
0x1ff9, 0x0048)
},
{
PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
0x1ff9, 0x004a)
},
{
PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
0x1ff9, 0x004b)
},
{
PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
0x1ff9, 0x004c)
},
{
PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
0x1ff9, 0x004f)
},
{
PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
0x1ff9, 0x0051)
},
{
PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
0x1ff9, 0x0052)
},
{
PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
0x1ff9, 0x0053)
},
{
PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
0x1ff9, 0x0054)
},
{
PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
0x1ff9, 0x006b)
},
{
PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
0x1ff9, 0x006c)
},
{
PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
0x1ff9, 0x006d)
},
{
PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
0x1ff9, 0x006f)
},
{
PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
0x1ff9, 0x0070)
},
{
PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
0x1ff9, 0x0071)
},
{
PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
0x1ff9, 0x0072)
},
{
PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
0x1ff9, 0x0086)
},
{
PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
0x1ff9, 0x0087)
},
{
PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
0x1ff9, 0x0088)
},
{
PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
0x1ff9, 0x0089)
},
{ {
PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
0x1e93, 0x1000) 0x1e93, 0x1000)
@ -10264,6 +10496,10 @@ static const struct pci_device_id pqi_pci_id_table[] = {
PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
0x1f51, 0x1045) 0x1f51, 0x1045)
}, },
{
PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
0x1ff9, 0x00a3)
},
{ {
PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
PCI_ANY_ID, PCI_ANY_ID) PCI_ANY_ID, PCI_ANY_ID)

View File

@ -29,6 +29,7 @@
#define SIS_ENABLE_INTX 0x80 #define SIS_ENABLE_INTX 0x80
#define SIS_SOFT_RESET 0x100 #define SIS_SOFT_RESET 0x100
#define SIS_CMD_READY 0x200 #define SIS_CMD_READY 0x200
#define SIS_NOTIFY_KDUMP 0x400
#define SIS_TRIGGER_SHUTDOWN 0x800000 #define SIS_TRIGGER_SHUTDOWN 0x800000
#define SIS_PQI_RESET_QUIESCE 0x1000000 #define SIS_PQI_RESET_QUIESCE 0x1000000
@ -52,6 +53,8 @@
#define SIS_BASE_STRUCT_ALIGNMENT 16 #define SIS_BASE_STRUCT_ALIGNMENT 16
#define SIS_CTRL_KERNEL_FW_TRIAGE 0x3 #define SIS_CTRL_KERNEL_FW_TRIAGE 0x3
#define SIS_CTRL_KERNEL_CTRL_LOGGING 0x4
#define SIS_CTRL_KERNEL_CTRL_LOGGING_STATUS 0x18
#define SIS_CTRL_KERNEL_UP 0x80 #define SIS_CTRL_KERNEL_UP 0x80
#define SIS_CTRL_KERNEL_PANIC 0x100 #define SIS_CTRL_KERNEL_PANIC 0x100
#define SIS_CTRL_READY_TIMEOUT_SECS 180 #define SIS_CTRL_READY_TIMEOUT_SECS 180
@ -65,6 +68,13 @@ enum sis_fw_triage_status {
FW_TRIAGE_COMPLETED FW_TRIAGE_COMPLETED
}; };
enum sis_ctrl_logging_status {
CTRL_LOGGING_NOT_STARTED = 0,
CTRL_LOGGING_STARTED,
CTRL_LOGGING_COND_INVALID,
CTRL_LOGGING_COMPLETED
};
#pragma pack(1) #pragma pack(1)
/* for use with SIS_CMD_INIT_BASE_STRUCT_ADDRESS command */ /* for use with SIS_CMD_INIT_BASE_STRUCT_ADDRESS command */
@ -442,6 +452,21 @@ static inline enum sis_fw_triage_status
SIS_CTRL_KERNEL_FW_TRIAGE)); SIS_CTRL_KERNEL_FW_TRIAGE));
} }
bool sis_is_ctrl_logging_supported(struct pqi_ctrl_info *ctrl_info)
{
return readl(&ctrl_info->registers->sis_firmware_status) & SIS_CTRL_KERNEL_CTRL_LOGGING;
}
void sis_notify_kdump(struct pqi_ctrl_info *ctrl_info)
{
sis_set_doorbell_bit(ctrl_info, SIS_NOTIFY_KDUMP);
}
static inline enum sis_ctrl_logging_status sis_read_ctrl_logging_status(struct pqi_ctrl_info *ctrl_info)
{
return ((enum sis_ctrl_logging_status)((readl(&ctrl_info->registers->sis_firmware_status) & SIS_CTRL_KERNEL_CTRL_LOGGING_STATUS) >> 3));
}
void sis_soft_reset(struct pqi_ctrl_info *ctrl_info) void sis_soft_reset(struct pqi_ctrl_info *ctrl_info)
{ {
writel(SIS_SOFT_RESET, writel(SIS_SOFT_RESET,
@ -484,6 +509,41 @@ int sis_wait_for_fw_triage_completion(struct pqi_ctrl_info *ctrl_info)
return rc; return rc;
} }
#define SIS_CTRL_LOGGING_STATUS_TIMEOUT_SECS 180
#define SIS_CTRL_LOGGING_STATUS_POLL_INTERVAL_SECS 1
int sis_wait_for_ctrl_logging_completion(struct pqi_ctrl_info *ctrl_info)
{
int rc;
enum sis_ctrl_logging_status status;
unsigned long timeout;
timeout = (SIS_CTRL_LOGGING_STATUS_TIMEOUT_SECS * HZ) + jiffies;
while (1) {
status = sis_read_ctrl_logging_status(ctrl_info);
if (status == CTRL_LOGGING_COND_INVALID) {
dev_err(&ctrl_info->pci_dev->dev,
"controller data logging condition invalid\n");
rc = -EINVAL;
break;
} else if (status == CTRL_LOGGING_COMPLETED) {
rc = 0;
break;
}
if (time_after(jiffies, timeout)) {
dev_err(&ctrl_info->pci_dev->dev,
"timed out waiting for controller data logging status\n");
rc = -ETIMEDOUT;
break;
}
ssleep(SIS_CTRL_LOGGING_STATUS_POLL_INTERVAL_SECS);
}
return rc;
}
void sis_verify_structures(void) void sis_verify_structures(void)
{ {
BUILD_BUG_ON(offsetof(struct sis_base_struct, BUILD_BUG_ON(offsetof(struct sis_base_struct,

View File

@ -31,6 +31,9 @@ u32 sis_read_driver_scratch(struct pqi_ctrl_info *ctrl_info);
void sis_soft_reset(struct pqi_ctrl_info *ctrl_info); void sis_soft_reset(struct pqi_ctrl_info *ctrl_info);
u32 sis_get_product_id(struct pqi_ctrl_info *ctrl_info); u32 sis_get_product_id(struct pqi_ctrl_info *ctrl_info);
int sis_wait_for_fw_triage_completion(struct pqi_ctrl_info *ctrl_info); int sis_wait_for_fw_triage_completion(struct pqi_ctrl_info *ctrl_info);
bool sis_is_ctrl_logging_supported(struct pqi_ctrl_info *ctrl_info);
void sis_notify_kdump(struct pqi_ctrl_info *ctrl_info);
int sis_wait_for_ctrl_logging_completion(struct pqi_ctrl_info *ctrl_info);
extern unsigned int sis_ctrl_ready_timeout_secs; extern unsigned int sis_ctrl_ready_timeout_secs;

View File

@ -300,9 +300,8 @@ snic_add_host(struct Scsi_Host *shost, struct pci_dev *pdev)
} }
SNIC_BUG_ON(shost->work_q != NULL); SNIC_BUG_ON(shost->work_q != NULL);
snprintf(shost->work_q_name, sizeof(shost->work_q_name), "scsi_wq_%d", shost->work_q = alloc_ordered_workqueue("scsi_wq_%d", WQ_MEM_RECLAIM,
shost->host_no); shost->host_no);
shost->work_q = create_singlethread_workqueue(shost->work_q_name);
if (!shost->work_q) { if (!shost->work_q) {
SNIC_HOST_ERR(shost, "Failed to Create ScsiHost wq.\n"); SNIC_HOST_ERR(shost, "Failed to Create ScsiHost wq.\n");
@ -873,7 +872,7 @@ snic_global_data_init(void)
snic_glob->req_cache[SNIC_REQ_CACHE_MAX_SGL] = cachep; snic_glob->req_cache[SNIC_REQ_CACHE_MAX_SGL] = cachep;
len = sizeof(struct snic_host_req); len = sizeof(struct snic_host_req);
cachep = kmem_cache_create("snic_req_maxsgl", len, SNIC_SG_DESC_ALIGN, cachep = kmem_cache_create("snic_req_tm", len, SNIC_SG_DESC_ALIGN,
SLAB_HWCACHE_ALIGN, NULL); SLAB_HWCACHE_ALIGN, NULL);
if (!cachep) { if (!cachep) {
SNIC_ERR("Failed to create snic tm req slab\n"); SNIC_ERR("Failed to create snic tm req slab\n");
@ -884,7 +883,8 @@ snic_global_data_init(void)
snic_glob->req_cache[SNIC_REQ_TM_CACHE] = cachep; snic_glob->req_cache[SNIC_REQ_TM_CACHE] = cachep;
/* snic_event queue */ /* snic_event queue */
snic_glob->event_q = create_singlethread_workqueue("snic_event_wq"); snic_glob->event_q =
alloc_ordered_workqueue("%s", WQ_MEM_RECLAIM, "snic_event_wq");
if (!snic_glob->event_q) { if (!snic_glob->event_q) {
SNIC_ERR("snic event queue create failed\n"); SNIC_ERR("snic event queue create failed\n");
ret = -ENOMEM; ret = -ENOMEM;

View File

@ -334,7 +334,6 @@ struct st_hba {
struct st_ccb *wait_ccb; struct st_ccb *wait_ccb;
__le32 *scratch; __le32 *scratch;
char work_q_name[20];
struct workqueue_struct *work_q; struct workqueue_struct *work_q;
struct work_struct reset_work; struct work_struct reset_work;
wait_queue_head_t reset_waitq; wait_queue_head_t reset_waitq;
@ -1795,9 +1794,8 @@ static int stex_probe(struct pci_dev *pdev, const struct pci_device_id *id)
hba->pdev = pdev; hba->pdev = pdev;
init_waitqueue_head(&hba->reset_waitq); init_waitqueue_head(&hba->reset_waitq);
snprintf(hba->work_q_name, sizeof(hba->work_q_name), hba->work_q = alloc_ordered_workqueue("stex_wq_%d", WQ_MEM_RECLAIM,
"stex_wq_%d", host->host_no); host->host_no);
hba->work_q = create_singlethread_workqueue(hba->work_q_name);
if (!hba->work_q) { if (!hba->work_q) {
printk(KERN_ERR DRV_NAME "(%s): create workqueue failed\n", printk(KERN_ERR DRV_NAME "(%s): create workqueue failed\n",
pci_name(pdev)); pci_name(pdev));

View File

@ -304,7 +304,7 @@ static int sun3scsi_dma_setup(struct NCR5380_hostdata *hostdata,
sun3_udc_write(UDC_INT_ENABLE, UDC_CSR); sun3_udc_write(UDC_INT_ENABLE, UDC_CSR);
#endif #endif
return count; return count;
} }

View File

@ -1137,7 +1137,8 @@ static int pvscsi_setup_msg_workqueue(struct pvscsi_adapter *adapter)
snprintf(name, sizeof(name), snprintf(name, sizeof(name),
"vmw_pvscsi_wq_%u", adapter->host->host_no); "vmw_pvscsi_wq_%u", adapter->host->host_no);
adapter->workqueue = create_singlethread_workqueue(name); adapter->workqueue =
alloc_ordered_workqueue("%s", WQ_MEM_RECLAIM, name);
if (!adapter->workqueue) { if (!adapter->workqueue) {
printk(KERN_ERR "vmw_pvscsi: failed to create work queue\n"); printk(KERN_ERR "vmw_pvscsi: failed to create work queue\n");
return 0; return 0;

View File

@ -15,7 +15,6 @@ struct kref;
struct sockaddr_storage; struct sockaddr_storage;
extern struct iscsi_tiqn *iscsit_get_tiqn_for_login(unsigned char *); extern struct iscsi_tiqn *iscsit_get_tiqn_for_login(unsigned char *);
extern struct iscsi_tiqn *iscsit_get_tiqn(unsigned char *, int);
extern void iscsit_put_tiqn_for_login(struct iscsi_tiqn *); extern void iscsit_put_tiqn_for_login(struct iscsi_tiqn *);
extern struct iscsi_tiqn *iscsit_add_tiqn(unsigned char *); extern struct iscsi_tiqn *iscsit_add_tiqn(unsigned char *);
extern void iscsit_del_tiqn(struct iscsi_tiqn *); extern void iscsit_del_tiqn(struct iscsi_tiqn *);
@ -35,7 +34,6 @@ extern void iscsit_set_unsolicited_dataout(struct iscsit_cmd *);
extern int iscsit_logout_closesession(struct iscsit_cmd *, struct iscsit_conn *); extern int iscsit_logout_closesession(struct iscsit_cmd *, struct iscsit_conn *);
extern int iscsit_logout_closeconnection(struct iscsit_cmd *, struct iscsit_conn *); extern int iscsit_logout_closeconnection(struct iscsit_cmd *, struct iscsit_conn *);
extern int iscsit_logout_removeconnforrecovery(struct iscsit_cmd *, struct iscsit_conn *); extern int iscsit_logout_removeconnforrecovery(struct iscsit_cmd *, struct iscsit_conn *);
extern int iscsit_send_async_msg(struct iscsit_conn *, u16, u8, u8);
extern int iscsit_build_r2ts_for_cmd(struct iscsit_conn *, struct iscsit_cmd *, bool recovery); extern int iscsit_build_r2ts_for_cmd(struct iscsit_conn *, struct iscsit_cmd *, bool recovery);
extern void iscsit_thread_get_cpumask(struct iscsit_conn *); extern void iscsit_thread_get_cpumask(struct iscsit_conn *);
extern int iscsi_target_tx_thread(void *); extern int iscsi_target_tx_thread(void *);

View File

@ -24,6 +24,5 @@ extern int iscsit_start_kthreads(struct iscsit_conn *);
extern void iscsi_post_login_handler(struct iscsi_np *, struct iscsit_conn *, u8); extern void iscsi_post_login_handler(struct iscsi_np *, struct iscsit_conn *, u8);
extern void iscsi_target_login_sess_out(struct iscsit_conn *, bool, bool); extern void iscsi_target_login_sess_out(struct iscsit_conn *, bool, bool);
extern int iscsi_target_login_thread(void *); extern int iscsi_target_login_thread(void *);
extern void iscsi_handle_login_thread_timeout(struct timer_list *t);
#endif /*** ISCSI_TARGET_LOGIN_H ***/ #endif /*** ISCSI_TARGET_LOGIN_H ***/

View File

@ -15,8 +15,6 @@ extern int extract_param(const char *, const char *, unsigned int, char *,
unsigned char *); unsigned char *);
extern int iscsi_target_check_login_request(struct iscsit_conn *, extern int iscsi_target_check_login_request(struct iscsit_conn *,
struct iscsi_login *); struct iscsi_login *);
extern int iscsi_target_get_initial_payload(struct iscsit_conn *,
struct iscsi_login *);
extern int iscsi_target_locate_portal(struct iscsi_np *, struct iscsit_conn *, extern int iscsi_target_locate_portal(struct iscsi_np *, struct iscsit_conn *,
struct iscsi_login *); struct iscsi_login *);
extern int iscsi_target_start_negotiation( extern int iscsi_target_start_negotiation(

View File

@ -24,12 +24,7 @@ extern int iscsit_tpg_del_portal_group(struct iscsi_tiqn *, struct iscsi_portal_
int); int);
extern int iscsit_tpg_enable_portal_group(struct iscsi_portal_group *); extern int iscsit_tpg_enable_portal_group(struct iscsi_portal_group *);
extern int iscsit_tpg_disable_portal_group(struct iscsi_portal_group *, int); extern int iscsit_tpg_disable_portal_group(struct iscsi_portal_group *, int);
extern struct iscsi_node_acl *iscsit_tpg_add_initiator_node_acl(
struct iscsi_portal_group *, const char *, u32);
extern void iscsit_tpg_del_initiator_node_acl(struct iscsi_portal_group *,
struct se_node_acl *);
extern struct iscsi_node_attrib *iscsit_tpg_get_node_attrib(struct iscsit_session *); extern struct iscsi_node_attrib *iscsit_tpg_get_node_attrib(struct iscsit_session *);
extern void iscsit_tpg_del_external_nps(struct iscsi_tpg_np *);
extern struct iscsi_tpg_np *iscsit_tpg_locate_child_np(struct iscsi_tpg_np *, int); extern struct iscsi_tpg_np *iscsit_tpg_locate_child_np(struct iscsi_tpg_np *, int);
extern struct iscsi_tpg_np *iscsit_tpg_add_network_portal(struct iscsi_portal_group *, extern struct iscsi_tpg_np *iscsit_tpg_add_network_portal(struct iscsi_portal_group *,
struct sockaddr_storage *, struct iscsi_tpg_np *, struct sockaddr_storage *, struct iscsi_tpg_np *,

View File

@ -17,7 +17,6 @@ extern struct iscsi_r2t *iscsit_get_r2t_for_eos(struct iscsit_cmd *, u32, u32);
extern struct iscsi_r2t *iscsit_get_r2t_from_list(struct iscsit_cmd *); extern struct iscsi_r2t *iscsit_get_r2t_from_list(struct iscsit_cmd *);
extern void iscsit_free_r2t(struct iscsi_r2t *, struct iscsit_cmd *); extern void iscsit_free_r2t(struct iscsi_r2t *, struct iscsit_cmd *);
extern void iscsit_free_r2ts_from_list(struct iscsit_cmd *); extern void iscsit_free_r2ts_from_list(struct iscsit_cmd *);
extern struct iscsit_cmd *iscsit_alloc_cmd(struct iscsit_conn *, gfp_t);
extern struct iscsit_cmd *iscsit_allocate_cmd(struct iscsit_conn *, int); extern struct iscsit_cmd *iscsit_allocate_cmd(struct iscsit_conn *, int);
extern struct iscsi_seq *iscsit_get_seq_holder_for_datain(struct iscsit_cmd *, u32); extern struct iscsi_seq *iscsit_get_seq_holder_for_datain(struct iscsit_cmd *, u32);
extern struct iscsi_seq *iscsit_get_seq_holder_for_r2t(struct iscsit_cmd *); extern struct iscsi_seq *iscsit_get_seq_holder_for_r2t(struct iscsit_cmd *);
@ -34,7 +33,6 @@ extern void iscsit_add_cmd_to_immediate_queue(struct iscsit_cmd *, struct iscsit
extern struct iscsi_queue_req *iscsit_get_cmd_from_immediate_queue(struct iscsit_conn *); extern struct iscsi_queue_req *iscsit_get_cmd_from_immediate_queue(struct iscsit_conn *);
extern int iscsit_add_cmd_to_response_queue(struct iscsit_cmd *, struct iscsit_conn *, u8); extern int iscsit_add_cmd_to_response_queue(struct iscsit_cmd *, struct iscsit_conn *, u8);
extern struct iscsi_queue_req *iscsit_get_cmd_from_response_queue(struct iscsit_conn *); extern struct iscsi_queue_req *iscsit_get_cmd_from_response_queue(struct iscsit_conn *);
extern void iscsit_remove_cmd_from_tx_queues(struct iscsit_cmd *, struct iscsit_conn *);
extern bool iscsit_conn_all_queues_empty(struct iscsit_conn *); extern bool iscsit_conn_all_queues_empty(struct iscsit_conn *);
extern void iscsit_free_queue_reqs_for_conn(struct iscsit_conn *); extern void iscsit_free_queue_reqs_for_conn(struct iscsit_conn *);
extern void iscsit_release_cmd(struct iscsit_cmd *); extern void iscsit_release_cmd(struct iscsit_cmd *);
@ -64,9 +62,6 @@ extern int iscsit_send_tx_data(struct iscsit_cmd *, struct iscsit_conn *, int);
extern int iscsit_fe_sendpage_sg(struct iscsit_cmd *, struct iscsit_conn *); extern int iscsit_fe_sendpage_sg(struct iscsit_cmd *, struct iscsit_conn *);
extern int iscsit_tx_login_rsp(struct iscsit_conn *, u8, u8); extern int iscsit_tx_login_rsp(struct iscsit_conn *, u8, u8);
extern void iscsit_print_session_params(struct iscsit_session *); extern void iscsit_print_session_params(struct iscsit_session *);
extern int iscsit_print_dev_to_proc(char *, char **, off_t, int);
extern int iscsit_print_sessions_to_proc(char *, char **, off_t, int);
extern int iscsit_print_tpg_to_proc(char *, char **, off_t, int);
extern int rx_data(struct iscsit_conn *, struct kvec *, int, int); extern int rx_data(struct iscsit_conn *, struct kvec *, int, int);
extern int tx_data(struct iscsit_conn *, struct kvec *, int, int); extern int tx_data(struct iscsit_conn *, struct kvec *, int, int);
extern void iscsit_collect_login_stats(struct iscsit_conn *, u8, u8); extern void iscsit_collect_login_stats(struct iscsit_conn *, u8, u8);

View File

@ -198,6 +198,24 @@ static u32 ufshcd_us_to_ahit(unsigned int timer)
FIELD_PREP(UFSHCI_AHIBERN8_SCALE_MASK, scale); FIELD_PREP(UFSHCI_AHIBERN8_SCALE_MASK, scale);
} }
static int ufshcd_read_hci_reg(struct ufs_hba *hba, u32 *val, unsigned int reg)
{
down(&hba->host_sem);
if (!ufshcd_is_user_access_allowed(hba)) {
up(&hba->host_sem);
return -EBUSY;
}
ufshcd_rpm_get_sync(hba);
ufshcd_hold(hba);
*val = ufshcd_readl(hba, reg);
ufshcd_release(hba);
ufshcd_rpm_put_sync(hba);
up(&hba->host_sem);
return 0;
}
static ssize_t auto_hibern8_show(struct device *dev, static ssize_t auto_hibern8_show(struct device *dev,
struct device_attribute *attr, char *buf) struct device_attribute *attr, char *buf)
{ {
@ -208,23 +226,11 @@ static ssize_t auto_hibern8_show(struct device *dev,
if (!ufshcd_is_auto_hibern8_supported(hba)) if (!ufshcd_is_auto_hibern8_supported(hba))
return -EOPNOTSUPP; return -EOPNOTSUPP;
down(&hba->host_sem); ret = ufshcd_read_hci_reg(hba, &ahit, REG_AUTO_HIBERNATE_IDLE_TIMER);
if (!ufshcd_is_user_access_allowed(hba)) { if (ret)
ret = -EBUSY; return ret;
goto out;
}
pm_runtime_get_sync(hba->dev); return sysfs_emit(buf, "%d\n", ufshcd_ahit_to_us(ahit));
ufshcd_hold(hba);
ahit = ufshcd_readl(hba, REG_AUTO_HIBERNATE_IDLE_TIMER);
ufshcd_release(hba);
pm_runtime_put_sync(hba->dev);
ret = sysfs_emit(buf, "%d\n", ufshcd_ahit_to_us(ahit));
out:
up(&hba->host_sem);
return ret;
} }
static ssize_t auto_hibern8_store(struct device *dev, static ssize_t auto_hibern8_store(struct device *dev,
@ -519,6 +525,58 @@ static const struct attribute_group ufs_sysfs_capabilities_group = {
.attrs = ufs_sysfs_capabilities_attrs, .attrs = ufs_sysfs_capabilities_attrs,
}; };
static ssize_t version_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct ufs_hba *hba = dev_get_drvdata(dev);
return sysfs_emit(buf, "0x%x\n", hba->ufs_version);
}
static ssize_t product_id_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
int ret;
u32 val;
struct ufs_hba *hba = dev_get_drvdata(dev);
ret = ufshcd_read_hci_reg(hba, &val, REG_CONTROLLER_PID);
if (ret)
return ret;
return sysfs_emit(buf, "0x%x\n", val);
}
static ssize_t man_id_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
int ret;
u32 val;
struct ufs_hba *hba = dev_get_drvdata(dev);
ret = ufshcd_read_hci_reg(hba, &val, REG_CONTROLLER_MID);
if (ret)
return ret;
return sysfs_emit(buf, "0x%x\n", val);
}
static DEVICE_ATTR_RO(version);
static DEVICE_ATTR_RO(product_id);
static DEVICE_ATTR_RO(man_id);
static struct attribute *ufs_sysfs_ufshci_cap_attrs[] = {
&dev_attr_version.attr,
&dev_attr_product_id.attr,
&dev_attr_man_id.attr,
NULL
};
static const struct attribute_group ufs_sysfs_ufshci_group = {
.name = "ufshci_capabilities",
.attrs = ufs_sysfs_ufshci_cap_attrs,
};
static ssize_t monitor_enable_show(struct device *dev, static ssize_t monitor_enable_show(struct device *dev,
struct device_attribute *attr, char *buf) struct device_attribute *attr, char *buf)
{ {
@ -1502,6 +1560,7 @@ static const struct attribute_group ufs_sysfs_attributes_group = {
static const struct attribute_group *ufs_sysfs_groups[] = { static const struct attribute_group *ufs_sysfs_groups[] = {
&ufs_sysfs_default_group, &ufs_sysfs_default_group,
&ufs_sysfs_capabilities_group, &ufs_sysfs_capabilities_group,
&ufs_sysfs_ufshci_group,
&ufs_sysfs_monitor_group, &ufs_sysfs_monitor_group,
&ufs_sysfs_power_info_group, &ufs_sysfs_power_info_group,
&ufs_sysfs_device_descriptor_group, &ufs_sysfs_device_descriptor_group,

View File

@ -9,6 +9,7 @@
#if !defined(_TRACE_UFS_H) || defined(TRACE_HEADER_MULTI_READ) #if !defined(_TRACE_UFS_H) || defined(TRACE_HEADER_MULTI_READ)
#define _TRACE_UFS_H #define _TRACE_UFS_H
#include <ufs/ufs.h>
#include <linux/tracepoint.h> #include <linux/tracepoint.h>
#define str_opcode(opcode) \ #define str_opcode(opcode) \
@ -395,5 +396,10 @@ TRACE_EVENT(ufshcd_exception_event,
#endif /* if !defined(_TRACE_UFS_H) || defined(TRACE_HEADER_MULTI_READ) */ #endif /* if !defined(_TRACE_UFS_H) || defined(TRACE_HEADER_MULTI_READ) */
#undef TRACE_INCLUDE_PATH
#define TRACE_INCLUDE_PATH ../../drivers/ufs/core
#undef TRACE_INCLUDE_FILE
#define TRACE_INCLUDE_FILE ufs_trace
/* This part must be outside protection */ /* This part must be outside protection */
#include <trace/define_trace.h> #include <trace/define_trace.h>

View File

@ -39,7 +39,7 @@
#include <asm/unaligned.h> #include <asm/unaligned.h>
#define CREATE_TRACE_POINTS #define CREATE_TRACE_POINTS
#include <trace/events/ufs.h> #include "ufs_trace.h"
#define UFSHCD_ENABLE_INTRS (UTP_TRANSFER_REQ_COMPL |\ #define UFSHCD_ENABLE_INTRS (UTP_TRANSFER_REQ_COMPL |\
UTP_TASK_REQ_COMPL |\ UTP_TASK_REQ_COMPL |\
@ -51,8 +51,10 @@
/* UIC command timeout, unit: ms */ /* UIC command timeout, unit: ms */
#define UIC_CMD_TIMEOUT 500 enum {
UIC_CMD_TIMEOUT_DEFAULT = 500,
UIC_CMD_TIMEOUT_MAX = 2000,
};
/* NOP OUT retries waiting for NOP IN response */ /* NOP OUT retries waiting for NOP IN response */
#define NOP_OUT_RETRIES 10 #define NOP_OUT_RETRIES 10
/* Timeout after 50 msecs if NOP OUT hangs without response */ /* Timeout after 50 msecs if NOP OUT hangs without response */
@ -116,6 +118,23 @@ static bool is_mcq_supported(struct ufs_hba *hba)
module_param(use_mcq_mode, bool, 0644); module_param(use_mcq_mode, bool, 0644);
MODULE_PARM_DESC(use_mcq_mode, "Control MCQ mode for controllers starting from UFSHCI 4.0. 1 - enable MCQ, 0 - disable MCQ. MCQ is enabled by default"); MODULE_PARM_DESC(use_mcq_mode, "Control MCQ mode for controllers starting from UFSHCI 4.0. 1 - enable MCQ, 0 - disable MCQ. MCQ is enabled by default");
static unsigned int uic_cmd_timeout = UIC_CMD_TIMEOUT_DEFAULT;
static int uic_cmd_timeout_set(const char *val, const struct kernel_param *kp)
{
return param_set_uint_minmax(val, kp, UIC_CMD_TIMEOUT_DEFAULT,
UIC_CMD_TIMEOUT_MAX);
}
static const struct kernel_param_ops uic_cmd_timeout_ops = {
.set = uic_cmd_timeout_set,
.get = param_get_uint,
};
module_param_cb(uic_cmd_timeout, &uic_cmd_timeout_ops, &uic_cmd_timeout, 0644);
MODULE_PARM_DESC(uic_cmd_timeout,
"UFS UIC command timeout in milliseconds. Defaults to 500ms. Supported values range from 500ms to 2 seconds inclusively");
#define ufshcd_toggle_vreg(_dev, _vreg, _on) \ #define ufshcd_toggle_vreg(_dev, _vreg, _on) \
({ \ ({ \
int _ret; \ int _ret; \
@ -1785,8 +1804,6 @@ static void ufshcd_remove_clk_scaling_sysfs(struct ufs_hba *hba)
static void ufshcd_init_clk_scaling(struct ufs_hba *hba) static void ufshcd_init_clk_scaling(struct ufs_hba *hba)
{ {
char wq_name[sizeof("ufs_clkscaling_00")];
if (!ufshcd_is_clkscaling_supported(hba)) if (!ufshcd_is_clkscaling_supported(hba))
return; return;
@ -1798,9 +1815,8 @@ static void ufshcd_init_clk_scaling(struct ufs_hba *hba)
INIT_WORK(&hba->clk_scaling.resume_work, INIT_WORK(&hba->clk_scaling.resume_work,
ufshcd_clk_scaling_resume_work); ufshcd_clk_scaling_resume_work);
snprintf(wq_name, sizeof(wq_name), "ufs_clkscaling_%d", hba->clk_scaling.workq = alloc_ordered_workqueue(
hba->host->host_no); "ufs_clkscaling_%d", WQ_MEM_RECLAIM, hba->host->host_no);
hba->clk_scaling.workq = create_singlethread_workqueue(wq_name);
hba->clk_scaling.is_initialized = true; hba->clk_scaling.is_initialized = true;
} }
@ -2124,8 +2140,6 @@ static void ufshcd_remove_clk_gating_sysfs(struct ufs_hba *hba)
static void ufshcd_init_clk_gating(struct ufs_hba *hba) static void ufshcd_init_clk_gating(struct ufs_hba *hba)
{ {
char wq_name[sizeof("ufs_clk_gating_00")];
if (!ufshcd_is_clkgating_allowed(hba)) if (!ufshcd_is_clkgating_allowed(hba))
return; return;
@ -2135,10 +2149,9 @@ static void ufshcd_init_clk_gating(struct ufs_hba *hba)
INIT_DELAYED_WORK(&hba->clk_gating.gate_work, ufshcd_gate_work); INIT_DELAYED_WORK(&hba->clk_gating.gate_work, ufshcd_gate_work);
INIT_WORK(&hba->clk_gating.ungate_work, ufshcd_ungate_work); INIT_WORK(&hba->clk_gating.ungate_work, ufshcd_ungate_work);
snprintf(wq_name, ARRAY_SIZE(wq_name), "ufs_clk_gating_%d", hba->clk_gating.clk_gating_workq = alloc_ordered_workqueue(
hba->host->host_no); "ufs_clk_gating_%d", WQ_MEM_RECLAIM | WQ_HIGHPRI,
hba->clk_gating.clk_gating_workq = alloc_ordered_workqueue(wq_name, hba->host->host_no);
WQ_MEM_RECLAIM | WQ_HIGHPRI);
ufshcd_init_clk_gating_sysfs(hba); ufshcd_init_clk_gating_sysfs(hba);
@ -2452,7 +2465,7 @@ static inline bool ufshcd_ready_for_uic_cmd(struct ufs_hba *hba)
{ {
u32 val; u32 val;
int ret = read_poll_timeout(ufshcd_readl, val, val & UIC_COMMAND_READY, int ret = read_poll_timeout(ufshcd_readl, val, val & UIC_COMMAND_READY,
500, UIC_CMD_TIMEOUT * 1000, false, hba, 500, uic_cmd_timeout * 1000, false, hba,
REG_CONTROLLER_STATUS); REG_CONTROLLER_STATUS);
return ret == 0; return ret == 0;
} }
@ -2512,7 +2525,7 @@ ufshcd_wait_for_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
lockdep_assert_held(&hba->uic_cmd_mutex); lockdep_assert_held(&hba->uic_cmd_mutex);
if (wait_for_completion_timeout(&uic_cmd->done, if (wait_for_completion_timeout(&uic_cmd->done,
msecs_to_jiffies(UIC_CMD_TIMEOUT))) { msecs_to_jiffies(uic_cmd_timeout))) {
ret = uic_cmd->argument2 & MASK_UIC_COMMAND_RESULT; ret = uic_cmd->argument2 & MASK_UIC_COMMAND_RESULT;
} else { } else {
ret = -ETIMEDOUT; ret = -ETIMEDOUT;
@ -4285,7 +4298,7 @@ static int ufshcd_uic_pwr_ctrl(struct ufs_hba *hba, struct uic_command *cmd)
} }
if (!wait_for_completion_timeout(hba->uic_async_done, if (!wait_for_completion_timeout(hba->uic_async_done,
msecs_to_jiffies(UIC_CMD_TIMEOUT))) { msecs_to_jiffies(uic_cmd_timeout))) {
dev_err(hba->dev, dev_err(hba->dev,
"pwr ctrl cmd 0x%x with mode 0x%x completion timeout\n", "pwr ctrl cmd 0x%x with mode 0x%x completion timeout\n",
cmd->command, cmd->argument3); cmd->command, cmd->argument3);
@ -5876,12 +5889,11 @@ static inline int ufshcd_get_bkops_status(struct ufs_hba *hba, u32 *status)
/** /**
* ufshcd_bkops_ctrl - control the auto bkops based on current bkops status * ufshcd_bkops_ctrl - control the auto bkops based on current bkops status
* @hba: per-adapter instance * @hba: per-adapter instance
* @status: bkops_status value
* *
* Read the bkops_status from the UFS device and Enable fBackgroundOpsEn * Read the bkops_status from the UFS device and Enable fBackgroundOpsEn
* flag in the device to permit background operations if the device * flag in the device to permit background operations if the device
* bkops_status is greater than or equal to "status" argument passed to * bkops_status is greater than or equal to the "hba->urgent_bkops_lvl",
* this function, disable otherwise. * disable otherwise.
* *
* Return: 0 for success, non-zero in case of failure. * Return: 0 for success, non-zero in case of failure.
* *
@ -5889,11 +5901,11 @@ static inline int ufshcd_get_bkops_status(struct ufs_hba *hba, u32 *status)
* to know whether auto bkops is enabled or disabled after this function * to know whether auto bkops is enabled or disabled after this function
* returns control to it. * returns control to it.
*/ */
static int ufshcd_bkops_ctrl(struct ufs_hba *hba, static int ufshcd_bkops_ctrl(struct ufs_hba *hba)
enum bkops_status status)
{ {
int err; enum bkops_status status = hba->urgent_bkops_lvl;
u32 curr_status = 0; u32 curr_status = 0;
int err;
err = ufshcd_get_bkops_status(hba, &curr_status); err = ufshcd_get_bkops_status(hba, &curr_status);
if (err) { if (err) {
@ -5915,23 +5927,6 @@ out:
return err; return err;
} }
/**
* ufshcd_urgent_bkops - handle urgent bkops exception event
* @hba: per-adapter instance
*
* Enable fBackgroundOpsEn flag in the device to permit background
* operations.
*
* If BKOPs is enabled, this function returns 0, 1 if the bkops in not enabled
* and negative error value for any other failure.
*
* Return: 0 upon success; < 0 upon failure.
*/
static int ufshcd_urgent_bkops(struct ufs_hba *hba)
{
return ufshcd_bkops_ctrl(hba, hba->urgent_bkops_lvl);
}
static inline int ufshcd_get_ee_status(struct ufs_hba *hba, u32 *status) static inline int ufshcd_get_ee_status(struct ufs_hba *hba, u32 *status)
{ {
return ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR, return ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
@ -9692,7 +9687,7 @@ static int __ufshcd_wl_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
* allow background operations if bkops status shows * allow background operations if bkops status shows
* that performance might be impacted. * that performance might be impacted.
*/ */
ret = ufshcd_urgent_bkops(hba); ret = ufshcd_bkops_ctrl(hba);
if (ret) { if (ret) {
/* /*
* If return err in suspend flow, IO will hang. * If return err in suspend flow, IO will hang.
@ -9881,7 +9876,7 @@ static int __ufshcd_wl_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
* If BKOPs operations are urgently needed at this moment then * If BKOPs operations are urgently needed at this moment then
* keep auto-bkops enabled or else disable it. * keep auto-bkops enabled or else disable it.
*/ */
ufshcd_urgent_bkops(hba); ufshcd_bkops_ctrl(hba);
if (hba->ee_usr_mask) if (hba->ee_usr_mask)
ufshcd_write_ee_control(hba); ufshcd_write_ee_control(hba);
@ -10395,7 +10390,6 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
int err; int err;
struct Scsi_Host *host = hba->host; struct Scsi_Host *host = hba->host;
struct device *dev = hba->dev; struct device *dev = hba->dev;
char eh_wq_name[sizeof("ufs_eh_wq_00")];
/* /*
* dev_set_drvdata() must be called before any callbacks are registered * dev_set_drvdata() must be called before any callbacks are registered
@ -10462,9 +10456,8 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
hba->max_pwr_info.is_valid = false; hba->max_pwr_info.is_valid = false;
/* Initialize work queues */ /* Initialize work queues */
snprintf(eh_wq_name, sizeof(eh_wq_name), "ufs_eh_wq_%d", hba->eh_wq = alloc_ordered_workqueue("ufs_eh_wq_%d", WQ_MEM_RECLAIM,
hba->host->host_no); hba->host->host_no);
hba->eh_wq = create_singlethread_workqueue(eh_wq_name);
if (!hba->eh_wq) { if (!hba->eh_wq) {
dev_err(hba->dev, "%s: failed to create eh workqueue\n", dev_err(hba->dev, "%s: failed to create eh workqueue\n",
__func__); __func__);

View File

@ -31,8 +31,7 @@ static int ufshcd_parse_clock_info(struct ufs_hba *hba)
const char *name; const char *name;
u32 *clkfreq = NULL; u32 *clkfreq = NULL;
struct ufs_clk_info *clki; struct ufs_clk_info *clki;
int len = 0; ssize_t sz = 0;
size_t sz = 0;
if (!np) if (!np)
goto out; goto out;
@ -50,15 +49,12 @@ static int ufshcd_parse_clock_info(struct ufs_hba *hba)
if (cnt <= 0) if (cnt <= 0)
goto out; goto out;
if (!of_get_property(np, "freq-table-hz", &len)) { sz = of_property_count_u32_elems(np, "freq-table-hz");
if (sz <= 0) {
dev_info(dev, "freq-table-hz property not specified\n"); dev_info(dev, "freq-table-hz property not specified\n");
goto out; goto out;
} }
if (len <= 0)
goto out;
sz = len / sizeof(*clkfreq);
if (sz != 2 * cnt) { if (sz != 2 * cnt) {
dev_err(dev, "%s len mismatch\n", "freq-table-hz"); dev_err(dev, "%s len mismatch\n", "freq-table-hz");
ret = -EINVAL; ret = -EINVAL;
@ -272,10 +268,10 @@ static int ufshcd_parse_operating_points(struct ufs_hba *hba)
const char **clk_names; const char **clk_names;
int cnt, i, ret; int cnt, i, ret;
if (!of_find_property(np, "operating-points-v2", NULL)) if (!of_property_present(np, "operating-points-v2"))
return 0; return 0;
if (of_find_property(np, "freq-table-hz", NULL)) { if (of_property_present(np, "freq-table-hz")) {
dev_err(dev, "%s: operating-points and freq-table-hz are incompatible\n", dev_err(dev, "%s: operating-points and freq-table-hz are incompatible\n",
__func__); __func__);
return -EINVAL; return -EINVAL;

View File

@ -50,9 +50,7 @@ struct fcoe_ctlr_device {
struct fcoe_sysfs_function_template *f; struct fcoe_sysfs_function_template *f;
struct list_head fcfs; struct list_head fcfs;
char work_q_name[20];
struct workqueue_struct *work_q; struct workqueue_struct *work_q;
char devloss_work_q_name[20];
struct workqueue_struct *devloss_work_q; struct workqueue_struct *devloss_work_q;
struct mutex lock; struct mutex lock;

View File

@ -24,7 +24,6 @@ extern const char *scsi_extd_sense_format(unsigned char, unsigned char,
const char **); const char **);
extern const char *scsi_mlreturn_string(int); extern const char *scsi_mlreturn_string(int);
extern const char *scsi_hostbyte_string(int); extern const char *scsi_hostbyte_string(int);
extern const char *scsi_driverbyte_string(int);
#else #else
static inline bool static inline bool
scsi_opcode_sa_name(int cmd, int sa, scsi_opcode_sa_name(int cmd, int sa,
@ -76,12 +75,6 @@ scsi_hostbyte_string(int result)
return NULL; return NULL;
} }
static inline const char *
scsi_driverbyte_string(int result)
{
return NULL;
}
#endif #endif
#endif /* _SCSI_SCSI_DBG_H */ #endif /* _SCSI_SCSI_DBG_H */

View File

@ -677,7 +677,6 @@ struct Scsi_Host {
/* /*
* Optional work queue to be utilized by the transport * Optional work queue to be utilized by the transport
*/ */
char work_q_name[20];
struct workqueue_struct *work_q; struct workqueue_struct *work_q;
/* /*

View File

@ -575,9 +575,7 @@ struct fc_host_attrs {
u16 npiv_vports_inuse; u16 npiv_vports_inuse;
/* work queues for rport state manipulation */ /* work queues for rport state manipulation */
char work_q_name[20];
struct workqueue_struct *work_q; struct workqueue_struct *work_q;
char devloss_work_q_name[20];
struct workqueue_struct *devloss_work_q; struct workqueue_struct *devloss_work_q;
/* bsg support */ /* bsg support */
@ -654,12 +652,8 @@ struct fc_host_attrs {
(((struct fc_host_attrs *)(x)->shost_data)->next_vport_number) (((struct fc_host_attrs *)(x)->shost_data)->next_vport_number)
#define fc_host_npiv_vports_inuse(x) \ #define fc_host_npiv_vports_inuse(x) \
(((struct fc_host_attrs *)(x)->shost_data)->npiv_vports_inuse) (((struct fc_host_attrs *)(x)->shost_data)->npiv_vports_inuse)
#define fc_host_work_q_name(x) \
(((struct fc_host_attrs *)(x)->shost_data)->work_q_name)
#define fc_host_work_q(x) \ #define fc_host_work_q(x) \
(((struct fc_host_attrs *)(x)->shost_data)->work_q) (((struct fc_host_attrs *)(x)->shost_data)->work_q)
#define fc_host_devloss_work_q_name(x) \
(((struct fc_host_attrs *)(x)->shost_data)->devloss_work_q_name)
#define fc_host_devloss_work_q(x) \ #define fc_host_devloss_work_q(x) \
(((struct fc_host_attrs *)(x)->shost_data)->devloss_work_q) (((struct fc_host_attrs *)(x)->shost_data)->devloss_work_q)
#define fc_host_dev_loss_tmo(x) \ #define fc_host_dev_loss_tmo(x) \

View File

@ -597,7 +597,7 @@ struct ufs_dev_info {
}; };
/* /*
* This enum is used in string mapping in include/trace/events/ufs.h. * This enum is used in string mapping in ufs_trace.h.
*/ */
enum ufs_trace_str_t { enum ufs_trace_str_t {
UFS_CMD_SEND, UFS_CMD_COMP, UFS_DEV_COMP, UFS_CMD_SEND, UFS_CMD_COMP, UFS_DEV_COMP,
@ -607,7 +607,7 @@ enum ufs_trace_str_t {
/* /*
* Transaction Specific Fields (TSF) type in the UPIU package, this enum is * Transaction Specific Fields (TSF) type in the UPIU package, this enum is
* used in include/trace/events/ufs.h for UFS command trace. * used in ufs_trace.h for UFS command trace.
*/ */
enum ufs_trace_tsf_t { enum ufs_trace_tsf_t {
UFS_TSF_CDB, UFS_TSF_OSF, UFS_TSF_TM_INPUT, UFS_TSF_TM_OUTPUT UFS_TSF_CDB, UFS_TSF_OSF, UFS_TSF_TM_INPUT, UFS_TSF_TM_OUTPUT

View File

@ -25,8 +25,9 @@ enum {
REG_CONTROLLER_CAPABILITIES = 0x00, REG_CONTROLLER_CAPABILITIES = 0x00,
REG_MCQCAP = 0x04, REG_MCQCAP = 0x04,
REG_UFS_VERSION = 0x08, REG_UFS_VERSION = 0x08,
REG_CONTROLLER_DEV_ID = 0x10, REG_EXT_CONTROLLER_CAPABILITIES = 0x0C,
REG_CONTROLLER_PROD_ID = 0x14, REG_CONTROLLER_PID = 0x10,
REG_CONTROLLER_MID = 0x14,
REG_AUTO_HIBERNATE_IDLE_TIMER = 0x18, REG_AUTO_HIBERNATE_IDLE_TIMER = 0x18,
REG_INTERRUPT_STATUS = 0x20, REG_INTERRUPT_STATUS = 0x20,
REG_INTERRUPT_ENABLE = 0x24, REG_INTERRUPT_ENABLE = 0x24,