Merge patch series "smartpqi updates"

Don Brace <don.brace@microchip.com> says:

These patches are based on Martin Petersen's 6.11/scsi-queue tree

  https://git.kernel.org/pub/scm/linux/kernel/git/mkp/scsi.git 6.11/scsi-queue

The functional changes of note to smartpqi are for: multipath failover
and improving the accuracy of our RAID bypass counter.

For multipath we are:

    Reverting commit 94a68c8143 ("scsi: smartpqi: Quickly propagate
    path failures to SCSI midlayer") because under certain rare
    conditions involving encryption-enabled devices, a false path
    failure is reported to the SML causing multipath to failover to
    the other path.

    Improving errors returned from the driver back to the SML by
    checking for error codes returned from the firmware and returning
    the correct ASC/ASCQ codes to the SML.

The other two patches add PCI-IDs for new controllers and change the
driver version.

Link: https://lore.kernel.org/r/20240711194704.982400-1-don.brace@microchip.com
Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
This commit is contained in:
Martin K. Petersen 2024-08-02 22:10:04 -04:00
commit 5f36bd89a9
2 changed files with 151 additions and 27 deletions

View File

@ -1158,7 +1158,7 @@ struct pqi_scsi_dev {
struct pqi_stream_data stream_data[NUM_STREAMS_PER_LUN];
atomic_t scsi_cmds_outstanding[PQI_MAX_LUNS_PER_DEVICE];
unsigned int raid_bypass_cnt;
u64 __percpu *raid_bypass_cnt;
struct pqi_tmf_work tmf_work[PQI_MAX_LUNS_PER_DEVICE];
};

View File

@ -33,11 +33,11 @@
#define BUILD_TIMESTAMP
#endif
#define DRIVER_VERSION "2.1.26-030"
#define DRIVER_VERSION "2.1.28-025"
#define DRIVER_MAJOR 2
#define DRIVER_MINOR 1
#define DRIVER_RELEASE 26
#define DRIVER_REVISION 30
#define DRIVER_RELEASE 28
#define DRIVER_REVISION 25
#define DRIVER_NAME "Microchip SmartPQI Driver (v" \
DRIVER_VERSION BUILD_TIMESTAMP ")"
@ -1508,6 +1508,12 @@ static int pqi_get_raid_map(struct pqi_ctrl_info *ctrl_info,
if (rc)
goto error;
device->raid_bypass_cnt = alloc_percpu(u64);
if (!device->raid_bypass_cnt) {
rc = -ENOMEM;
goto error;
}
device->raid_map = raid_map;
return 0;
@ -2099,6 +2105,10 @@ static void pqi_scsi_update_device(struct pqi_ctrl_info *ctrl_info,
/* To prevent this from being freed later. */
new_device->raid_map = NULL;
}
if (new_device->raid_bypass_enabled && existing_device->raid_bypass_cnt == NULL) {
existing_device->raid_bypass_cnt = new_device->raid_bypass_cnt;
new_device->raid_bypass_cnt = NULL;
}
existing_device->raid_bypass_configured = new_device->raid_bypass_configured;
existing_device->raid_bypass_enabled = new_device->raid_bypass_enabled;
}
@ -2121,6 +2131,7 @@ static void pqi_scsi_update_device(struct pqi_ctrl_info *ctrl_info,
static inline void pqi_free_device(struct pqi_scsi_dev *device)
{
if (device) {
free_percpu(device->raid_bypass_cnt);
kfree(device->raid_map);
kfree(device);
}
@ -2354,14 +2365,6 @@ static inline void pqi_mask_device(u8 *scsi3addr)
scsi3addr[3] |= 0xc0;
}
static inline bool pqi_is_multipath_device(struct pqi_scsi_dev *device)
{
if (pqi_is_logical_device(device))
return false;
return (device->path_map & (device->path_map - 1)) != 0;
}
static inline bool pqi_expose_device(struct pqi_scsi_dev *device)
{
return !device->is_physical_device || !pqi_skip_device(device->scsi3addr);
@ -3244,6 +3247,20 @@ static void pqi_process_raid_io_error(struct pqi_io_request *io_request)
sense_data_length);
}
if (pqi_cmd_priv(scmd)->this_residual &&
!pqi_is_logical_device(scmd->device->hostdata) &&
scsi_status == SAM_STAT_CHECK_CONDITION &&
host_byte == DID_OK &&
sense_data_length &&
scsi_normalize_sense(error_info->data, sense_data_length, &sshdr) &&
sshdr.sense_key == ILLEGAL_REQUEST &&
sshdr.asc == 0x26 &&
sshdr.ascq == 0x0) {
host_byte = DID_NO_CONNECT;
pqi_take_device_offline(scmd->device, "AIO");
scsi_build_sense_buffer(0, scmd->sense_buffer, HARDWARE_ERROR, 0x3e, 0x1);
}
scmd->result = scsi_status;
set_host_byte(scmd, host_byte);
}
@ -3258,14 +3275,12 @@ static void pqi_process_aio_io_error(struct pqi_io_request *io_request)
int residual_count;
int xfer_count;
bool device_offline;
struct pqi_scsi_dev *device;
scmd = io_request->scmd;
error_info = io_request->error_info;
host_byte = DID_OK;
sense_data_length = 0;
device_offline = false;
device = scmd->device->hostdata;
switch (error_info->service_response) {
case PQI_AIO_SERV_RESPONSE_COMPLETE:
@ -3290,14 +3305,8 @@ static void pqi_process_aio_io_error(struct pqi_io_request *io_request)
break;
case PQI_AIO_STATUS_AIO_PATH_DISABLED:
pqi_aio_path_disabled(io_request);
if (pqi_is_multipath_device(device)) {
pqi_device_remove_start(device);
host_byte = DID_NO_CONNECT;
scsi_status = SAM_STAT_CHECK_CONDITION;
} else {
scsi_status = SAM_STAT_GOOD;
io_request->status = -EAGAIN;
}
scsi_status = SAM_STAT_GOOD;
io_request->status = -EAGAIN;
break;
case PQI_AIO_STATUS_NO_PATH_TO_DEVICE:
case PQI_AIO_STATUS_INVALID_DEVICE:
@ -6007,6 +6016,7 @@ static int pqi_scsi_queue_command(struct Scsi_Host *shost, struct scsi_cmnd *scm
u16 hw_queue;
struct pqi_queue_group *queue_group;
bool raid_bypassed;
u64 *raid_bypass_cnt;
u8 lun;
scmd->host_scribble = PQI_NO_COMPLETION;
@ -6025,7 +6035,7 @@ static int pqi_scsi_queue_command(struct Scsi_Host *shost, struct scsi_cmnd *scm
ctrl_info = shost_to_hba(shost);
if (pqi_ctrl_offline(ctrl_info) || pqi_device_in_remove(device)) {
if (pqi_ctrl_offline(ctrl_info) || pqi_device_offline(device) || pqi_device_in_remove(device)) {
set_host_byte(scmd, DID_NO_CONNECT);
pqi_scsi_done(scmd);
return 0;
@ -6053,7 +6063,8 @@ static int pqi_scsi_queue_command(struct Scsi_Host *shost, struct scsi_cmnd *scm
rc = pqi_raid_bypass_submit_scsi_cmd(ctrl_info, device, scmd, queue_group);
if (rc == 0 || rc == SCSI_MLQUEUE_HOST_BUSY) {
raid_bypassed = true;
device->raid_bypass_cnt++;
raid_bypass_cnt = per_cpu_ptr(device->raid_bypass_cnt, smp_processor_id());
(*raid_bypass_cnt)++;
}
}
if (!raid_bypassed)
@ -7350,7 +7361,9 @@ static ssize_t pqi_raid_bypass_cnt_show(struct device *dev,
struct scsi_device *sdev;
struct pqi_scsi_dev *device;
unsigned long flags;
unsigned int raid_bypass_cnt;
u64 raid_bypass_cnt;
int cpu;
u64 *per_cpu_bypass_cnt_ptr;
sdev = to_scsi_device(dev);
ctrl_info = shost_to_hba(sdev->host);
@ -7366,11 +7379,18 @@ static ssize_t pqi_raid_bypass_cnt_show(struct device *dev,
return -ENODEV;
}
raid_bypass_cnt = device->raid_bypass_cnt;
raid_bypass_cnt = 0;
if (device->raid_bypass_cnt) {
for_each_online_cpu(cpu) {
per_cpu_bypass_cnt_ptr = per_cpu_ptr(device->raid_bypass_cnt, cpu);
raid_bypass_cnt += *per_cpu_bypass_cnt_ptr;
}
}
spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
return scnprintf(buffer, PAGE_SIZE, "0x%x\n", raid_bypass_cnt);
return scnprintf(buffer, PAGE_SIZE, "0x%llx\n", raid_bypass_cnt);
}
static ssize_t pqi_sas_ncq_prio_enable_show(struct device *dev,
@ -9472,6 +9492,10 @@ static const struct pci_device_id pqi_pci_id_table[] = {
PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
0x193d, 0x110b)
},
{
PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
0x193d, 0x1110)
},
{
PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
0x193d, 0x8460)
@ -9588,6 +9612,14 @@ static const struct pci_device_id pqi_pci_id_table[] = {
PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
0x1bd4, 0x0089)
},
{
PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
0x1ff9, 0x00a1)
},
{
PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
0x1f3a, 0x0104)
},
{
PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
0x19e5, 0xd227)
@ -10180,6 +10212,98 @@ static const struct pci_device_id pqi_pci_id_table[] = {
PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
0x1137, 0x02fa)
},
{
PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
0x1ff9, 0x0045)
},
{
PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
0x1ff9, 0x0046)
},
{
PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
0x1ff9, 0x0047)
},
{
PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
0x1ff9, 0x0048)
},
{
PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
0x1ff9, 0x004a)
},
{
PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
0x1ff9, 0x004b)
},
{
PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
0x1ff9, 0x004c)
},
{
PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
0x1ff9, 0x004f)
},
{
PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
0x1ff9, 0x0051)
},
{
PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
0x1ff9, 0x0052)
},
{
PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
0x1ff9, 0x0053)
},
{
PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
0x1ff9, 0x0054)
},
{
PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
0x1ff9, 0x006b)
},
{
PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
0x1ff9, 0x006c)
},
{
PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
0x1ff9, 0x006d)
},
{
PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
0x1ff9, 0x006f)
},
{
PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
0x1ff9, 0x0070)
},
{
PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
0x1ff9, 0x0071)
},
{
PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
0x1ff9, 0x0072)
},
{
PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
0x1ff9, 0x0086)
},
{
PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
0x1ff9, 0x0087)
},
{
PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
0x1ff9, 0x0088)
},
{
PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
0x1ff9, 0x0089)
},
{
PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
0x1e93, 0x1000)