Merge patch series "smartpqi updates"

Don Brace <don.brace@microchip.com> says:

These patches are based on Martin Petersen's 6.12/scsi-queue tree
  https://git.kernel.org/pub/scm/linux/kernel/git/mkp/scsi.git
  6.12/scsi-queue

There are two functional changes:
    smartpqi-add-fw-log-to-kdump
    smartpqi-add-counter-for-parity-write-stream-requests

There are three minor bug fixes:
    smartpqi-fix-stream-detection
    smartpqi-fix-rare-system-hang-during-LUN-reset
    smartpqi-fix-volume-size-updates

The other two patches add PCI-IDs for new controllers and change the
driver version.

This set of changes consists of:
* smartpqi-add-fw-log-to-kdump

  During a kdump, the driver tells the controller to copy its logging
  information to some pre-allocated buffers that can be analyzed
  later.

  This is a "feature" driven capability and is backward compatible
  with existing controller FW.

  This patch renames some prefixes for OFA (Online-Firmware Activation
  ofa_*) buffers to host_memory_*. So, not a lot of actual functional
  changes to smartpqi_init.c, mainly determining the memory size
  allocation.

  We added a function to notify the controller to copy debug data into
  host memory before continuing kdump.

  Most of the functional changes are in smartpqi_sis.c where the
  actual handshaking is done.

* smartpqi-fix-stream-detection

  Correct some false write-stream detections. The data structure used
  to check for write-streams was not initialized to all 0's causing
  some false write stream detections. The driver sends down streamed
  requests to the raid engine instead of using AIO bypass for some
  extra performance.  (Potential full-stripe write verses Read Modify
  Write).

  False detections have not caused any data corruption.  Found by
  internal testing. No known externally reported bugs.

* smartpqi-add-counter-for-parity-write-stream-requests

  Adding some counters for raid_bypass and write streams. These two
  counters are related because write stream detection is only checked
  if an I/O request is eligible for bypass (AIO).

  The bypass counter (raid_bypass_cnt) was moved into a common
  structure (pqi_raid_io_stats) and changed to type __percpu. The
  write stream counter is (write_stream_cnt) has been added to this
  same structure.

  These counters are __percpu counters for performance. We added a
  sysfs entry to show the write stream count. The raid bypass counter
  sysfs entry already exists.

  Useful for checking streaming writes. The change in the sysfs entry
  write_stream_cnt can be checked during AIO eligible write
  operations.

* smartpqi-add-new-controller-PCI-IDs

  Adding support for new controller HW.  No functional changes.

* smartpqi-fix-rare-system-hang-during-LUN-reset

  We found a rare race condition that can occur during a LUN reset. We
  were not emptying our internal queue completely.

  There have been some rare conditions where our internal request
  queue has requests for multiple LUNs and a reset comes in for one of
  the LUNs. The driver waits for this internal queue to empty. We were
  only clearing out the requests for the LUN being reset so the
  request queue was never empty causing a hang.

  The Fix:

     For all requests in our internal request queue:

        Complete requests with DID_RESET for queued requests for the
        device undergoing a reset.

        Complete requests with DID_REQUEUE for all other queued requests.

  Found by internal testing. No known externally reported bugs.

* smartpqi-fix-volume-size-updates

  The current code only checks for a size change if there is also a
  queue depth change.  We are separating the check for queue depth and
  the size changes.

  Found by internal testing. No known bugs were filed.

* smartpqi-update-version-to-2.1.30-031
  No functional changes.

Link: https://lore.kernel.org/r/20240827185501.692804-1-don.brace@microchip.com
Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
This commit is contained in:
Martin K. Petersen 2024-08-28 22:16:33 -04:00
commit cff06a799d
4 changed files with 407 additions and 217 deletions

View File

@ -505,7 +505,7 @@ struct pqi_vendor_general_request {
__le64 buffer_address; __le64 buffer_address;
__le32 buffer_length; __le32 buffer_length;
u8 reserved[40]; u8 reserved[40];
} ofa_memory_allocation; } host_memory_allocation;
} data; } data;
}; };
@ -517,21 +517,30 @@ struct pqi_vendor_general_response {
u8 reserved[2]; u8 reserved[2];
}; };
#define PQI_VENDOR_GENERAL_CONFIG_TABLE_UPDATE 0 #define PQI_VENDOR_GENERAL_CONFIG_TABLE_UPDATE 0
#define PQI_VENDOR_GENERAL_HOST_MEMORY_UPDATE 1 #define PQI_VENDOR_GENERAL_OFA_MEMORY_UPDATE 1
#define PQI_VENDOR_GENERAL_CTRL_LOG_MEMORY_UPDATE 2
#define PQI_OFA_VERSION 1 #define PQI_OFA_VERSION 1
#define PQI_OFA_SIGNATURE "OFA_QRM" #define PQI_OFA_SIGNATURE "OFA_QRM"
#define PQI_OFA_MAX_SG_DESCRIPTORS 64 #define PQI_CTRL_LOG_VERSION 1
#define PQI_CTRL_LOG_SIGNATURE "FW_DATA"
#define PQI_HOST_MAX_SG_DESCRIPTORS 64
struct pqi_ofa_memory { struct pqi_host_memory {
__le64 signature; /* "OFA_QRM" */ __le64 signature; /* "OFA_QRM", "FW_DATA", etc. */
__le16 version; /* version of this struct (1 = 1st version) */ __le16 version; /* version of this struct (1 = 1st version) */
u8 reserved[62]; u8 reserved[62];
__le32 bytes_allocated; /* total allocated memory in bytes */ __le32 bytes_allocated; /* total allocated memory in bytes */
__le16 num_memory_descriptors; __le16 num_memory_descriptors;
u8 reserved1[2]; u8 reserved1[2];
struct pqi_sg_descriptor sg_descriptor[PQI_OFA_MAX_SG_DESCRIPTORS]; struct pqi_sg_descriptor sg_descriptor[PQI_HOST_MAX_SG_DESCRIPTORS];
};
struct pqi_host_memory_descriptor {
struct pqi_host_memory *host_memory;
dma_addr_t host_memory_dma_handle;
void **host_chunk_virt_address;
}; };
struct pqi_aio_error_info { struct pqi_aio_error_info {
@ -867,7 +876,8 @@ struct pqi_config_table_firmware_features {
#define PQI_FIRMWARE_FEATURE_FW_TRIAGE 17 #define PQI_FIRMWARE_FEATURE_FW_TRIAGE 17
#define PQI_FIRMWARE_FEATURE_RPL_EXTENDED_FORMAT_4_5 18 #define PQI_FIRMWARE_FEATURE_RPL_EXTENDED_FORMAT_4_5 18
#define PQI_FIRMWARE_FEATURE_MULTI_LUN_DEVICE_SUPPORT 21 #define PQI_FIRMWARE_FEATURE_MULTI_LUN_DEVICE_SUPPORT 21
#define PQI_FIRMWARE_FEATURE_MAXIMUM 21 #define PQI_FIRMWARE_FEATURE_CTRL_LOGGING 22
#define PQI_FIRMWARE_FEATURE_MAXIMUM 22
struct pqi_config_table_debug { struct pqi_config_table_debug {
struct pqi_config_table_section_header header; struct pqi_config_table_section_header header;
@ -1096,6 +1106,11 @@ struct pqi_tmf_work {
u8 scsi_opcode; u8 scsi_opcode;
}; };
struct pqi_raid_io_stats {
u64 raid_bypass_cnt;
u64 write_stream_cnt;
};
struct pqi_scsi_dev { struct pqi_scsi_dev {
int devtype; /* as reported by INQUIRY command */ int devtype; /* as reported by INQUIRY command */
u8 device_type; /* as reported by */ u8 device_type; /* as reported by */
@ -1158,7 +1173,7 @@ struct pqi_scsi_dev {
struct pqi_stream_data stream_data[NUM_STREAMS_PER_LUN]; struct pqi_stream_data stream_data[NUM_STREAMS_PER_LUN];
atomic_t scsi_cmds_outstanding[PQI_MAX_LUNS_PER_DEVICE]; atomic_t scsi_cmds_outstanding[PQI_MAX_LUNS_PER_DEVICE];
u64 __percpu *raid_bypass_cnt; struct pqi_raid_io_stats __percpu *raid_io_stats;
struct pqi_tmf_work tmf_work[PQI_MAX_LUNS_PER_DEVICE]; struct pqi_tmf_work tmf_work[PQI_MAX_LUNS_PER_DEVICE];
}; };
@ -1357,6 +1372,7 @@ struct pqi_ctrl_info {
u8 firmware_triage_supported : 1; u8 firmware_triage_supported : 1;
u8 rpl_extended_format_4_5_supported : 1; u8 rpl_extended_format_4_5_supported : 1;
u8 multi_lun_device_supported : 1; u8 multi_lun_device_supported : 1;
u8 ctrl_logging_supported : 1;
u8 enable_r1_writes : 1; u8 enable_r1_writes : 1;
u8 enable_r5_writes : 1; u8 enable_r5_writes : 1;
u8 enable_r6_writes : 1; u8 enable_r6_writes : 1;
@ -1398,13 +1414,12 @@ struct pqi_ctrl_info {
wait_queue_head_t block_requests_wait; wait_queue_head_t block_requests_wait;
struct mutex ofa_mutex; struct mutex ofa_mutex;
struct pqi_ofa_memory *pqi_ofa_mem_virt_addr;
dma_addr_t pqi_ofa_mem_dma_handle;
void **pqi_ofa_chunk_virt_addr;
struct work_struct ofa_memory_alloc_work; struct work_struct ofa_memory_alloc_work;
struct work_struct ofa_quiesce_work; struct work_struct ofa_quiesce_work;
u32 ofa_bytes_requested; u32 ofa_bytes_requested;
u16 ofa_cancel_reason; u16 ofa_cancel_reason;
struct pqi_host_memory_descriptor ofa_memory;
struct pqi_host_memory_descriptor ctrl_log_memory;
enum pqi_ctrl_removal_state ctrl_removal_state; enum pqi_ctrl_removal_state ctrl_removal_state;
}; };

View File

@ -33,11 +33,11 @@
#define BUILD_TIMESTAMP #define BUILD_TIMESTAMP
#endif #endif
#define DRIVER_VERSION "2.1.28-025" #define DRIVER_VERSION "2.1.30-031"
#define DRIVER_MAJOR 2 #define DRIVER_MAJOR 2
#define DRIVER_MINOR 1 #define DRIVER_MINOR 1
#define DRIVER_RELEASE 28 #define DRIVER_RELEASE 30
#define DRIVER_REVISION 25 #define DRIVER_REVISION 31
#define DRIVER_NAME "Microchip SmartPQI Driver (v" \ #define DRIVER_NAME "Microchip SmartPQI Driver (v" \
DRIVER_VERSION BUILD_TIMESTAMP ")" DRIVER_VERSION BUILD_TIMESTAMP ")"
@ -92,9 +92,9 @@ static int pqi_aio_submit_r56_write_io(struct pqi_ctrl_info *ctrl_info,
static void pqi_ofa_ctrl_quiesce(struct pqi_ctrl_info *ctrl_info); static void pqi_ofa_ctrl_quiesce(struct pqi_ctrl_info *ctrl_info);
static void pqi_ofa_ctrl_unquiesce(struct pqi_ctrl_info *ctrl_info); static void pqi_ofa_ctrl_unquiesce(struct pqi_ctrl_info *ctrl_info);
static int pqi_ofa_ctrl_restart(struct pqi_ctrl_info *ctrl_info, unsigned int delay_secs); static int pqi_ofa_ctrl_restart(struct pqi_ctrl_info *ctrl_info, unsigned int delay_secs);
static void pqi_ofa_setup_host_buffer(struct pqi_ctrl_info *ctrl_info); static void pqi_host_setup_buffer(struct pqi_ctrl_info *ctrl_info, struct pqi_host_memory_descriptor *host_memory_descriptor, u32 total_size, u32 min_size);
static void pqi_ofa_free_host_buffer(struct pqi_ctrl_info *ctrl_info); static void pqi_host_free_buffer(struct pqi_ctrl_info *ctrl_info, struct pqi_host_memory_descriptor *host_memory_descriptor);
static int pqi_ofa_host_memory_update(struct pqi_ctrl_info *ctrl_info); static int pqi_host_memory_update(struct pqi_ctrl_info *ctrl_info, struct pqi_host_memory_descriptor *host_memory_descriptor, u16 function_code);
static int pqi_device_wait_for_pending_io(struct pqi_ctrl_info *ctrl_info, static int pqi_device_wait_for_pending_io(struct pqi_ctrl_info *ctrl_info,
struct pqi_scsi_dev *device, u8 lun, unsigned long timeout_msecs); struct pqi_scsi_dev *device, u8 lun, unsigned long timeout_msecs);
static void pqi_fail_all_outstanding_requests(struct pqi_ctrl_info *ctrl_info); static void pqi_fail_all_outstanding_requests(struct pqi_ctrl_info *ctrl_info);
@ -1508,8 +1508,8 @@ static int pqi_get_raid_map(struct pqi_ctrl_info *ctrl_info,
if (rc) if (rc)
goto error; goto error;
device->raid_bypass_cnt = alloc_percpu(u64); device->raid_io_stats = alloc_percpu(struct pqi_raid_io_stats);
if (!device->raid_bypass_cnt) { if (!device->raid_io_stats) {
rc = -ENOMEM; rc = -ENOMEM;
goto error; goto error;
} }
@ -2105,9 +2105,9 @@ static void pqi_scsi_update_device(struct pqi_ctrl_info *ctrl_info,
/* To prevent this from being freed later. */ /* To prevent this from being freed later. */
new_device->raid_map = NULL; new_device->raid_map = NULL;
} }
if (new_device->raid_bypass_enabled && existing_device->raid_bypass_cnt == NULL) { if (new_device->raid_bypass_enabled && existing_device->raid_io_stats == NULL) {
existing_device->raid_bypass_cnt = new_device->raid_bypass_cnt; existing_device->raid_io_stats = new_device->raid_io_stats;
new_device->raid_bypass_cnt = NULL; new_device->raid_io_stats = NULL;
} }
existing_device->raid_bypass_configured = new_device->raid_bypass_configured; existing_device->raid_bypass_configured = new_device->raid_bypass_configured;
existing_device->raid_bypass_enabled = new_device->raid_bypass_enabled; existing_device->raid_bypass_enabled = new_device->raid_bypass_enabled;
@ -2131,7 +2131,7 @@ static void pqi_scsi_update_device(struct pqi_ctrl_info *ctrl_info,
static inline void pqi_free_device(struct pqi_scsi_dev *device) static inline void pqi_free_device(struct pqi_scsi_dev *device)
{ {
if (device) { if (device) {
free_percpu(device->raid_bypass_cnt); free_percpu(device->raid_io_stats);
kfree(device->raid_map); kfree(device->raid_map);
kfree(device); kfree(device);
} }
@ -2303,17 +2303,23 @@ static void pqi_update_device_list(struct pqi_ctrl_info *ctrl_info,
* queue depth, device size. * queue depth, device size.
*/ */
list_for_each_entry(device, &ctrl_info->scsi_device_list, scsi_device_list_entry) { list_for_each_entry(device, &ctrl_info->scsi_device_list, scsi_device_list_entry) {
/*
* Check for queue depth change.
*/
if (device->sdev && device->queue_depth != device->advertised_queue_depth) { if (device->sdev && device->queue_depth != device->advertised_queue_depth) {
device->advertised_queue_depth = device->queue_depth; device->advertised_queue_depth = device->queue_depth;
scsi_change_queue_depth(device->sdev, device->advertised_queue_depth); scsi_change_queue_depth(device->sdev, device->advertised_queue_depth);
spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); }
if (pqi_volume_rescan_needed(device)) { spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
device->rescan = false; /*
spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); * Check for changes in the device, such as size.
scsi_rescan_device(device->sdev); */
} else { if (pqi_volume_rescan_needed(device)) {
spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); device->rescan = false;
} spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
scsi_rescan_device(device->sdev);
} else {
spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
} }
} }
@ -3634,7 +3640,7 @@ static void pqi_process_soft_reset(struct pqi_ctrl_info *ctrl_info)
ctrl_info->pqi_mode_enabled = false; ctrl_info->pqi_mode_enabled = false;
pqi_save_ctrl_mode(ctrl_info, SIS_MODE); pqi_save_ctrl_mode(ctrl_info, SIS_MODE);
rc = pqi_ofa_ctrl_restart(ctrl_info, delay_secs); rc = pqi_ofa_ctrl_restart(ctrl_info, delay_secs);
pqi_ofa_free_host_buffer(ctrl_info); pqi_host_free_buffer(ctrl_info, &ctrl_info->ofa_memory);
pqi_ctrl_ofa_done(ctrl_info); pqi_ctrl_ofa_done(ctrl_info);
dev_info(&ctrl_info->pci_dev->dev, dev_info(&ctrl_info->pci_dev->dev,
"Online Firmware Activation: %s\n", "Online Firmware Activation: %s\n",
@ -3645,7 +3651,7 @@ static void pqi_process_soft_reset(struct pqi_ctrl_info *ctrl_info)
"Online Firmware Activation ABORTED\n"); "Online Firmware Activation ABORTED\n");
if (ctrl_info->soft_reset_handshake_supported) if (ctrl_info->soft_reset_handshake_supported)
pqi_clear_soft_reset_status(ctrl_info); pqi_clear_soft_reset_status(ctrl_info);
pqi_ofa_free_host_buffer(ctrl_info); pqi_host_free_buffer(ctrl_info, &ctrl_info->ofa_memory);
pqi_ctrl_ofa_done(ctrl_info); pqi_ctrl_ofa_done(ctrl_info);
pqi_ofa_ctrl_unquiesce(ctrl_info); pqi_ofa_ctrl_unquiesce(ctrl_info);
break; break;
@ -3655,7 +3661,7 @@ static void pqi_process_soft_reset(struct pqi_ctrl_info *ctrl_info)
dev_err(&ctrl_info->pci_dev->dev, dev_err(&ctrl_info->pci_dev->dev,
"unexpected Online Firmware Activation reset status: 0x%x\n", "unexpected Online Firmware Activation reset status: 0x%x\n",
reset_status); reset_status);
pqi_ofa_free_host_buffer(ctrl_info); pqi_host_free_buffer(ctrl_info, &ctrl_info->ofa_memory);
pqi_ctrl_ofa_done(ctrl_info); pqi_ctrl_ofa_done(ctrl_info);
pqi_ofa_ctrl_unquiesce(ctrl_info); pqi_ofa_ctrl_unquiesce(ctrl_info);
pqi_take_ctrl_offline(ctrl_info, PQI_OFA_RESPONSE_TIMEOUT); pqi_take_ctrl_offline(ctrl_info, PQI_OFA_RESPONSE_TIMEOUT);
@ -3670,8 +3676,8 @@ static void pqi_ofa_memory_alloc_worker(struct work_struct *work)
ctrl_info = container_of(work, struct pqi_ctrl_info, ofa_memory_alloc_work); ctrl_info = container_of(work, struct pqi_ctrl_info, ofa_memory_alloc_work);
pqi_ctrl_ofa_start(ctrl_info); pqi_ctrl_ofa_start(ctrl_info);
pqi_ofa_setup_host_buffer(ctrl_info); pqi_host_setup_buffer(ctrl_info, &ctrl_info->ofa_memory, ctrl_info->ofa_bytes_requested, ctrl_info->ofa_bytes_requested);
pqi_ofa_host_memory_update(ctrl_info); pqi_host_memory_update(ctrl_info, &ctrl_info->ofa_memory, PQI_VENDOR_GENERAL_OFA_MEMORY_UPDATE);
} }
static void pqi_ofa_quiesce_worker(struct work_struct *work) static void pqi_ofa_quiesce_worker(struct work_struct *work)
@ -3711,7 +3717,7 @@ static bool pqi_ofa_process_event(struct pqi_ctrl_info *ctrl_info,
dev_info(&ctrl_info->pci_dev->dev, dev_info(&ctrl_info->pci_dev->dev,
"received Online Firmware Activation cancel request: reason: %u\n", "received Online Firmware Activation cancel request: reason: %u\n",
ctrl_info->ofa_cancel_reason); ctrl_info->ofa_cancel_reason);
pqi_ofa_free_host_buffer(ctrl_info); pqi_host_free_buffer(ctrl_info, &ctrl_info->ofa_memory);
pqi_ctrl_ofa_done(ctrl_info); pqi_ctrl_ofa_done(ctrl_info);
break; break;
default: default:
@ -5942,7 +5948,7 @@ static bool pqi_is_parity_write_stream(struct pqi_ctrl_info *ctrl_info,
int rc; int rc;
struct pqi_scsi_dev *device; struct pqi_scsi_dev *device;
struct pqi_stream_data *pqi_stream_data; struct pqi_stream_data *pqi_stream_data;
struct pqi_scsi_dev_raid_map_data rmd; struct pqi_scsi_dev_raid_map_data rmd = { 0 };
if (!ctrl_info->enable_stream_detection) if (!ctrl_info->enable_stream_detection)
return false; return false;
@ -5984,6 +5990,7 @@ static bool pqi_is_parity_write_stream(struct pqi_ctrl_info *ctrl_info,
pqi_stream_data->next_lba = rmd.first_block + pqi_stream_data->next_lba = rmd.first_block +
rmd.block_cnt; rmd.block_cnt;
pqi_stream_data->last_accessed = jiffies; pqi_stream_data->last_accessed = jiffies;
per_cpu_ptr(device->raid_io_stats, smp_processor_id())->write_stream_cnt++;
return true; return true;
} }
@ -6016,7 +6023,6 @@ static int pqi_scsi_queue_command(struct Scsi_Host *shost, struct scsi_cmnd *scm
u16 hw_queue; u16 hw_queue;
struct pqi_queue_group *queue_group; struct pqi_queue_group *queue_group;
bool raid_bypassed; bool raid_bypassed;
u64 *raid_bypass_cnt;
u8 lun; u8 lun;
scmd->host_scribble = PQI_NO_COMPLETION; scmd->host_scribble = PQI_NO_COMPLETION;
@ -6063,8 +6069,7 @@ static int pqi_scsi_queue_command(struct Scsi_Host *shost, struct scsi_cmnd *scm
rc = pqi_raid_bypass_submit_scsi_cmd(ctrl_info, device, scmd, queue_group); rc = pqi_raid_bypass_submit_scsi_cmd(ctrl_info, device, scmd, queue_group);
if (rc == 0 || rc == SCSI_MLQUEUE_HOST_BUSY) { if (rc == 0 || rc == SCSI_MLQUEUE_HOST_BUSY) {
raid_bypassed = true; raid_bypassed = true;
raid_bypass_cnt = per_cpu_ptr(device->raid_bypass_cnt, smp_processor_id()); per_cpu_ptr(device->raid_io_stats, smp_processor_id())->raid_bypass_cnt++;
(*raid_bypass_cnt)++;
} }
} }
if (!raid_bypassed) if (!raid_bypassed)
@ -6201,14 +6206,12 @@ static void pqi_fail_io_queued_for_device(struct pqi_ctrl_info *ctrl_info,
continue; continue;
scsi_device = scmd->device->hostdata; scsi_device = scmd->device->hostdata;
if (scsi_device != device)
continue;
if ((u8)scmd->device->lun != lun)
continue;
list_del(&io_request->request_list_entry); list_del(&io_request->request_list_entry);
set_host_byte(scmd, DID_RESET); if (scsi_device == device && (u8)scmd->device->lun == lun)
set_host_byte(scmd, DID_RESET);
else
set_host_byte(scmd, DID_REQUEUE);
pqi_free_io_request(io_request); pqi_free_io_request(io_request);
scsi_dma_unmap(scmd); scsi_dma_unmap(scmd);
pqi_scsi_done(scmd); pqi_scsi_done(scmd);
@ -7363,7 +7366,6 @@ static ssize_t pqi_raid_bypass_cnt_show(struct device *dev,
unsigned long flags; unsigned long flags;
u64 raid_bypass_cnt; u64 raid_bypass_cnt;
int cpu; int cpu;
u64 *per_cpu_bypass_cnt_ptr;
sdev = to_scsi_device(dev); sdev = to_scsi_device(dev);
ctrl_info = shost_to_hba(sdev->host); ctrl_info = shost_to_hba(sdev->host);
@ -7381,10 +7383,9 @@ static ssize_t pqi_raid_bypass_cnt_show(struct device *dev,
raid_bypass_cnt = 0; raid_bypass_cnt = 0;
if (device->raid_bypass_cnt) { if (device->raid_io_stats) {
for_each_online_cpu(cpu) { for_each_online_cpu(cpu) {
per_cpu_bypass_cnt_ptr = per_cpu_ptr(device->raid_bypass_cnt, cpu); raid_bypass_cnt += per_cpu_ptr(device->raid_io_stats, cpu)->raid_bypass_cnt;
raid_bypass_cnt += *per_cpu_bypass_cnt_ptr;
} }
} }
@ -7472,6 +7473,43 @@ static ssize_t pqi_numa_node_show(struct device *dev,
return scnprintf(buffer, PAGE_SIZE, "%d\n", ctrl_info->numa_node); return scnprintf(buffer, PAGE_SIZE, "%d\n", ctrl_info->numa_node);
} }
static ssize_t pqi_write_stream_cnt_show(struct device *dev,
struct device_attribute *attr, char *buffer)
{
struct pqi_ctrl_info *ctrl_info;
struct scsi_device *sdev;
struct pqi_scsi_dev *device;
unsigned long flags;
u64 write_stream_cnt;
int cpu;
sdev = to_scsi_device(dev);
ctrl_info = shost_to_hba(sdev->host);
if (pqi_ctrl_offline(ctrl_info))
return -ENODEV;
spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
device = sdev->hostdata;
if (!device) {
spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
return -ENODEV;
}
write_stream_cnt = 0;
if (device->raid_io_stats) {
for_each_online_cpu(cpu) {
write_stream_cnt += per_cpu_ptr(device->raid_io_stats, cpu)->write_stream_cnt;
}
}
spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
return scnprintf(buffer, PAGE_SIZE, "0x%llx\n", write_stream_cnt);
}
static DEVICE_ATTR(lunid, 0444, pqi_lunid_show, NULL); static DEVICE_ATTR(lunid, 0444, pqi_lunid_show, NULL);
static DEVICE_ATTR(unique_id, 0444, pqi_unique_id_show, NULL); static DEVICE_ATTR(unique_id, 0444, pqi_unique_id_show, NULL);
static DEVICE_ATTR(path_info, 0444, pqi_path_info_show, NULL); static DEVICE_ATTR(path_info, 0444, pqi_path_info_show, NULL);
@ -7482,6 +7520,7 @@ static DEVICE_ATTR(raid_bypass_cnt, 0444, pqi_raid_bypass_cnt_show, NULL);
static DEVICE_ATTR(sas_ncq_prio_enable, 0644, static DEVICE_ATTR(sas_ncq_prio_enable, 0644,
pqi_sas_ncq_prio_enable_show, pqi_sas_ncq_prio_enable_store); pqi_sas_ncq_prio_enable_show, pqi_sas_ncq_prio_enable_store);
static DEVICE_ATTR(numa_node, 0444, pqi_numa_node_show, NULL); static DEVICE_ATTR(numa_node, 0444, pqi_numa_node_show, NULL);
static DEVICE_ATTR(write_stream_cnt, 0444, pqi_write_stream_cnt_show, NULL);
static struct attribute *pqi_sdev_attrs[] = { static struct attribute *pqi_sdev_attrs[] = {
&dev_attr_lunid.attr, &dev_attr_lunid.attr,
@ -7493,6 +7532,7 @@ static struct attribute *pqi_sdev_attrs[] = {
&dev_attr_raid_bypass_cnt.attr, &dev_attr_raid_bypass_cnt.attr,
&dev_attr_sas_ncq_prio_enable.attr, &dev_attr_sas_ncq_prio_enable.attr,
&dev_attr_numa_node.attr, &dev_attr_numa_node.attr,
&dev_attr_write_stream_cnt.attr,
NULL NULL
}; };
@ -7883,6 +7923,9 @@ static void pqi_ctrl_update_feature_flags(struct pqi_ctrl_info *ctrl_info,
case PQI_FIRMWARE_FEATURE_MULTI_LUN_DEVICE_SUPPORT: case PQI_FIRMWARE_FEATURE_MULTI_LUN_DEVICE_SUPPORT:
ctrl_info->multi_lun_device_supported = firmware_feature->enabled; ctrl_info->multi_lun_device_supported = firmware_feature->enabled;
break; break;
case PQI_FIRMWARE_FEATURE_CTRL_LOGGING:
ctrl_info->ctrl_logging_supported = firmware_feature->enabled;
break;
} }
pqi_firmware_feature_status(ctrl_info, firmware_feature); pqi_firmware_feature_status(ctrl_info, firmware_feature);
@ -7988,6 +8031,11 @@ static struct pqi_firmware_feature pqi_firmware_features[] = {
.feature_bit = PQI_FIRMWARE_FEATURE_MULTI_LUN_DEVICE_SUPPORT, .feature_bit = PQI_FIRMWARE_FEATURE_MULTI_LUN_DEVICE_SUPPORT,
.feature_status = pqi_ctrl_update_feature_flags, .feature_status = pqi_ctrl_update_feature_flags,
}, },
{
.feature_name = "Controller Data Logging",
.feature_bit = PQI_FIRMWARE_FEATURE_CTRL_LOGGING,
.feature_status = pqi_ctrl_update_feature_flags,
},
}; };
static void pqi_process_firmware_features( static void pqi_process_firmware_features(
@ -8090,6 +8138,7 @@ static void pqi_ctrl_reset_config(struct pqi_ctrl_info *ctrl_info)
ctrl_info->firmware_triage_supported = false; ctrl_info->firmware_triage_supported = false;
ctrl_info->rpl_extended_format_4_5_supported = false; ctrl_info->rpl_extended_format_4_5_supported = false;
ctrl_info->multi_lun_device_supported = false; ctrl_info->multi_lun_device_supported = false;
ctrl_info->ctrl_logging_supported = false;
} }
static int pqi_process_config_table(struct pqi_ctrl_info *ctrl_info) static int pqi_process_config_table(struct pqi_ctrl_info *ctrl_info)
@ -8230,6 +8279,9 @@ static void pqi_perform_lockup_action(void)
} }
} }
#define PQI_CTRL_LOG_TOTAL_SIZE (4 * 1024 * 1024)
#define PQI_CTRL_LOG_MIN_SIZE (PQI_CTRL_LOG_TOTAL_SIZE / PQI_HOST_MAX_SG_DESCRIPTORS)
static int pqi_ctrl_init(struct pqi_ctrl_info *ctrl_info) static int pqi_ctrl_init(struct pqi_ctrl_info *ctrl_info)
{ {
int rc; int rc;
@ -8241,6 +8293,12 @@ static int pqi_ctrl_init(struct pqi_ctrl_info *ctrl_info)
if (rc) if (rc)
return rc; return rc;
} }
if (sis_is_ctrl_logging_supported(ctrl_info)) {
sis_notify_kdump(ctrl_info);
rc = sis_wait_for_ctrl_logging_completion(ctrl_info);
if (rc)
return rc;
}
sis_soft_reset(ctrl_info); sis_soft_reset(ctrl_info);
ssleep(PQI_POST_RESET_DELAY_SECS); ssleep(PQI_POST_RESET_DELAY_SECS);
} else { } else {
@ -8422,6 +8480,11 @@ static int pqi_ctrl_init(struct pqi_ctrl_info *ctrl_info)
if (rc) if (rc)
return rc; return rc;
if (ctrl_info->ctrl_logging_supported && !reset_devices) {
pqi_host_setup_buffer(ctrl_info, &ctrl_info->ctrl_log_memory, PQI_CTRL_LOG_TOTAL_SIZE, PQI_CTRL_LOG_MIN_SIZE);
pqi_host_memory_update(ctrl_info, &ctrl_info->ctrl_log_memory, PQI_VENDOR_GENERAL_CTRL_LOG_MEMORY_UPDATE);
}
rc = pqi_get_ctrl_product_details(ctrl_info); rc = pqi_get_ctrl_product_details(ctrl_info);
if (rc) { if (rc) {
dev_err(&ctrl_info->pci_dev->dev, dev_err(&ctrl_info->pci_dev->dev,
@ -8606,8 +8669,22 @@ static int pqi_ctrl_init_resume(struct pqi_ctrl_info *ctrl_info)
return rc; return rc;
} }
if (pqi_ofa_in_progress(ctrl_info)) if (pqi_ofa_in_progress(ctrl_info)) {
pqi_ctrl_unblock_scan(ctrl_info); pqi_ctrl_unblock_scan(ctrl_info);
if (ctrl_info->ctrl_logging_supported) {
if (!ctrl_info->ctrl_log_memory.host_memory)
pqi_host_setup_buffer(ctrl_info,
&ctrl_info->ctrl_log_memory,
PQI_CTRL_LOG_TOTAL_SIZE,
PQI_CTRL_LOG_MIN_SIZE);
pqi_host_memory_update(ctrl_info,
&ctrl_info->ctrl_log_memory, PQI_VENDOR_GENERAL_CTRL_LOG_MEMORY_UPDATE);
} else {
if (ctrl_info->ctrl_log_memory.host_memory)
pqi_host_free_buffer(ctrl_info,
&ctrl_info->ctrl_log_memory);
}
}
pqi_scan_scsi_devices(ctrl_info); pqi_scan_scsi_devices(ctrl_info);
@ -8797,6 +8874,7 @@ static void pqi_remove_ctrl(struct pqi_ctrl_info *ctrl_info)
pqi_fail_all_outstanding_requests(ctrl_info); pqi_fail_all_outstanding_requests(ctrl_info);
ctrl_info->pqi_mode_enabled = false; ctrl_info->pqi_mode_enabled = false;
} }
pqi_host_free_buffer(ctrl_info, &ctrl_info->ctrl_log_memory);
pqi_unregister_scsi(ctrl_info); pqi_unregister_scsi(ctrl_info);
if (ctrl_info->pqi_mode_enabled) if (ctrl_info->pqi_mode_enabled)
pqi_revert_to_sis_mode(ctrl_info); pqi_revert_to_sis_mode(ctrl_info);
@ -8822,170 +8900,6 @@ static void pqi_ofa_ctrl_unquiesce(struct pqi_ctrl_info *ctrl_info)
pqi_ctrl_unblock_scan(ctrl_info); pqi_ctrl_unblock_scan(ctrl_info);
} }
static int pqi_ofa_alloc_mem(struct pqi_ctrl_info *ctrl_info, u32 total_size, u32 chunk_size)
{
int i;
u32 sg_count;
struct device *dev;
struct pqi_ofa_memory *ofap;
struct pqi_sg_descriptor *mem_descriptor;
dma_addr_t dma_handle;
ofap = ctrl_info->pqi_ofa_mem_virt_addr;
sg_count = DIV_ROUND_UP(total_size, chunk_size);
if (sg_count == 0 || sg_count > PQI_OFA_MAX_SG_DESCRIPTORS)
goto out;
ctrl_info->pqi_ofa_chunk_virt_addr = kmalloc_array(sg_count, sizeof(void *), GFP_KERNEL);
if (!ctrl_info->pqi_ofa_chunk_virt_addr)
goto out;
dev = &ctrl_info->pci_dev->dev;
for (i = 0; i < sg_count; i++) {
ctrl_info->pqi_ofa_chunk_virt_addr[i] =
dma_alloc_coherent(dev, chunk_size, &dma_handle, GFP_KERNEL);
if (!ctrl_info->pqi_ofa_chunk_virt_addr[i])
goto out_free_chunks;
mem_descriptor = &ofap->sg_descriptor[i];
put_unaligned_le64((u64)dma_handle, &mem_descriptor->address);
put_unaligned_le32(chunk_size, &mem_descriptor->length);
}
put_unaligned_le32(CISS_SG_LAST, &mem_descriptor->flags);
put_unaligned_le16(sg_count, &ofap->num_memory_descriptors);
put_unaligned_le32(sg_count * chunk_size, &ofap->bytes_allocated);
return 0;
out_free_chunks:
while (--i >= 0) {
mem_descriptor = &ofap->sg_descriptor[i];
dma_free_coherent(dev, chunk_size,
ctrl_info->pqi_ofa_chunk_virt_addr[i],
get_unaligned_le64(&mem_descriptor->address));
}
kfree(ctrl_info->pqi_ofa_chunk_virt_addr);
out:
return -ENOMEM;
}
static int pqi_ofa_alloc_host_buffer(struct pqi_ctrl_info *ctrl_info)
{
u32 total_size;
u32 chunk_size;
u32 min_chunk_size;
if (ctrl_info->ofa_bytes_requested == 0)
return 0;
total_size = PAGE_ALIGN(ctrl_info->ofa_bytes_requested);
min_chunk_size = DIV_ROUND_UP(total_size, PQI_OFA_MAX_SG_DESCRIPTORS);
min_chunk_size = PAGE_ALIGN(min_chunk_size);
for (chunk_size = total_size; chunk_size >= min_chunk_size;) {
if (pqi_ofa_alloc_mem(ctrl_info, total_size, chunk_size) == 0)
return 0;
chunk_size /= 2;
chunk_size = PAGE_ALIGN(chunk_size);
}
return -ENOMEM;
}
static void pqi_ofa_setup_host_buffer(struct pqi_ctrl_info *ctrl_info)
{
struct device *dev;
struct pqi_ofa_memory *ofap;
dev = &ctrl_info->pci_dev->dev;
ofap = dma_alloc_coherent(dev, sizeof(*ofap),
&ctrl_info->pqi_ofa_mem_dma_handle, GFP_KERNEL);
if (!ofap)
return;
ctrl_info->pqi_ofa_mem_virt_addr = ofap;
if (pqi_ofa_alloc_host_buffer(ctrl_info) < 0) {
dev_err(dev,
"failed to allocate host buffer for Online Firmware Activation\n");
dma_free_coherent(dev, sizeof(*ofap), ofap, ctrl_info->pqi_ofa_mem_dma_handle);
ctrl_info->pqi_ofa_mem_virt_addr = NULL;
return;
}
put_unaligned_le16(PQI_OFA_VERSION, &ofap->version);
memcpy(&ofap->signature, PQI_OFA_SIGNATURE, sizeof(ofap->signature));
}
static void pqi_ofa_free_host_buffer(struct pqi_ctrl_info *ctrl_info)
{
unsigned int i;
struct device *dev;
struct pqi_ofa_memory *ofap;
struct pqi_sg_descriptor *mem_descriptor;
unsigned int num_memory_descriptors;
ofap = ctrl_info->pqi_ofa_mem_virt_addr;
if (!ofap)
return;
dev = &ctrl_info->pci_dev->dev;
if (get_unaligned_le32(&ofap->bytes_allocated) == 0)
goto out;
mem_descriptor = ofap->sg_descriptor;
num_memory_descriptors =
get_unaligned_le16(&ofap->num_memory_descriptors);
for (i = 0; i < num_memory_descriptors; i++) {
dma_free_coherent(dev,
get_unaligned_le32(&mem_descriptor[i].length),
ctrl_info->pqi_ofa_chunk_virt_addr[i],
get_unaligned_le64(&mem_descriptor[i].address));
}
kfree(ctrl_info->pqi_ofa_chunk_virt_addr);
out:
dma_free_coherent(dev, sizeof(*ofap), ofap,
ctrl_info->pqi_ofa_mem_dma_handle);
ctrl_info->pqi_ofa_mem_virt_addr = NULL;
}
static int pqi_ofa_host_memory_update(struct pqi_ctrl_info *ctrl_info)
{
u32 buffer_length;
struct pqi_vendor_general_request request;
struct pqi_ofa_memory *ofap;
memset(&request, 0, sizeof(request));
request.header.iu_type = PQI_REQUEST_IU_VENDOR_GENERAL;
put_unaligned_le16(sizeof(request) - PQI_REQUEST_HEADER_LENGTH,
&request.header.iu_length);
put_unaligned_le16(PQI_VENDOR_GENERAL_HOST_MEMORY_UPDATE,
&request.function_code);
ofap = ctrl_info->pqi_ofa_mem_virt_addr;
if (ofap) {
buffer_length = offsetof(struct pqi_ofa_memory, sg_descriptor) +
get_unaligned_le16(&ofap->num_memory_descriptors) *
sizeof(struct pqi_sg_descriptor);
put_unaligned_le64((u64)ctrl_info->pqi_ofa_mem_dma_handle,
&request.data.ofa_memory_allocation.buffer_address);
put_unaligned_le32(buffer_length,
&request.data.ofa_memory_allocation.buffer_length);
}
return pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0, NULL);
}
static int pqi_ofa_ctrl_restart(struct pqi_ctrl_info *ctrl_info, unsigned int delay_secs) static int pqi_ofa_ctrl_restart(struct pqi_ctrl_info *ctrl_info, unsigned int delay_secs)
{ {
ssleep(delay_secs); ssleep(delay_secs);
@ -8993,6 +8907,180 @@ static int pqi_ofa_ctrl_restart(struct pqi_ctrl_info *ctrl_info, unsigned int de
return pqi_ctrl_init_resume(ctrl_info); return pqi_ctrl_init_resume(ctrl_info);
} }
static int pqi_host_alloc_mem(struct pqi_ctrl_info *ctrl_info,
struct pqi_host_memory_descriptor *host_memory_descriptor,
u32 total_size, u32 chunk_size)
{
int i;
u32 sg_count;
struct device *dev;
struct pqi_host_memory *host_memory;
struct pqi_sg_descriptor *mem_descriptor;
dma_addr_t dma_handle;
sg_count = DIV_ROUND_UP(total_size, chunk_size);
if (sg_count == 0 || sg_count > PQI_HOST_MAX_SG_DESCRIPTORS)
goto out;
host_memory_descriptor->host_chunk_virt_address = kmalloc(sg_count * sizeof(void *), GFP_KERNEL);
if (!host_memory_descriptor->host_chunk_virt_address)
goto out;
dev = &ctrl_info->pci_dev->dev;
host_memory = host_memory_descriptor->host_memory;
for (i = 0; i < sg_count; i++) {
host_memory_descriptor->host_chunk_virt_address[i] = dma_alloc_coherent(dev, chunk_size, &dma_handle, GFP_KERNEL);
if (!host_memory_descriptor->host_chunk_virt_address[i])
goto out_free_chunks;
mem_descriptor = &host_memory->sg_descriptor[i];
put_unaligned_le64((u64)dma_handle, &mem_descriptor->address);
put_unaligned_le32(chunk_size, &mem_descriptor->length);
}
put_unaligned_le32(CISS_SG_LAST, &mem_descriptor->flags);
put_unaligned_le16(sg_count, &host_memory->num_memory_descriptors);
put_unaligned_le32(sg_count * chunk_size, &host_memory->bytes_allocated);
return 0;
out_free_chunks:
while (--i >= 0) {
mem_descriptor = &host_memory->sg_descriptor[i];
dma_free_coherent(dev, chunk_size,
host_memory_descriptor->host_chunk_virt_address[i],
get_unaligned_le64(&mem_descriptor->address));
}
kfree(host_memory_descriptor->host_chunk_virt_address);
out:
return -ENOMEM;
}
static int pqi_host_alloc_buffer(struct pqi_ctrl_info *ctrl_info,
struct pqi_host_memory_descriptor *host_memory_descriptor,
u32 total_required_size, u32 min_required_size)
{
u32 chunk_size;
u32 min_chunk_size;
if (total_required_size == 0 || min_required_size == 0)
return 0;
total_required_size = PAGE_ALIGN(total_required_size);
min_required_size = PAGE_ALIGN(min_required_size);
min_chunk_size = DIV_ROUND_UP(total_required_size, PQI_HOST_MAX_SG_DESCRIPTORS);
min_chunk_size = PAGE_ALIGN(min_chunk_size);
while (total_required_size >= min_required_size) {
for (chunk_size = total_required_size; chunk_size >= min_chunk_size;) {
if (pqi_host_alloc_mem(ctrl_info,
host_memory_descriptor, total_required_size,
chunk_size) == 0)
return 0;
chunk_size /= 2;
chunk_size = PAGE_ALIGN(chunk_size);
}
total_required_size /= 2;
total_required_size = PAGE_ALIGN(total_required_size);
}
return -ENOMEM;
}
static void pqi_host_setup_buffer(struct pqi_ctrl_info *ctrl_info,
struct pqi_host_memory_descriptor *host_memory_descriptor,
u32 total_size, u32 min_size)
{
struct device *dev;
struct pqi_host_memory *host_memory;
dev = &ctrl_info->pci_dev->dev;
host_memory = dma_alloc_coherent(dev, sizeof(*host_memory),
&host_memory_descriptor->host_memory_dma_handle, GFP_KERNEL);
if (!host_memory)
return;
host_memory_descriptor->host_memory = host_memory;
if (pqi_host_alloc_buffer(ctrl_info, host_memory_descriptor,
total_size, min_size) < 0) {
dev_err(dev, "failed to allocate firmware usable host buffer\n");
dma_free_coherent(dev, sizeof(*host_memory), host_memory,
host_memory_descriptor->host_memory_dma_handle);
host_memory_descriptor->host_memory = NULL;
return;
}
}
static void pqi_host_free_buffer(struct pqi_ctrl_info *ctrl_info,
struct pqi_host_memory_descriptor *host_memory_descriptor)
{
unsigned int i;
struct device *dev;
struct pqi_host_memory *host_memory;
struct pqi_sg_descriptor *mem_descriptor;
unsigned int num_memory_descriptors;
host_memory = host_memory_descriptor->host_memory;
if (!host_memory)
return;
dev = &ctrl_info->pci_dev->dev;
if (get_unaligned_le32(&host_memory->bytes_allocated) == 0)
goto out;
mem_descriptor = host_memory->sg_descriptor;
num_memory_descriptors = get_unaligned_le16(&host_memory->num_memory_descriptors);
for (i = 0; i < num_memory_descriptors; i++) {
dma_free_coherent(dev,
get_unaligned_le32(&mem_descriptor[i].length),
host_memory_descriptor->host_chunk_virt_address[i],
get_unaligned_le64(&mem_descriptor[i].address));
}
kfree(host_memory_descriptor->host_chunk_virt_address);
out:
dma_free_coherent(dev, sizeof(*host_memory), host_memory,
host_memory_descriptor->host_memory_dma_handle);
host_memory_descriptor->host_memory = NULL;
}
static int pqi_host_memory_update(struct pqi_ctrl_info *ctrl_info,
struct pqi_host_memory_descriptor *host_memory_descriptor,
u16 function_code)
{
u32 buffer_length;
struct pqi_vendor_general_request request;
struct pqi_host_memory *host_memory;
memset(&request, 0, sizeof(request));
request.header.iu_type = PQI_REQUEST_IU_VENDOR_GENERAL;
put_unaligned_le16(sizeof(request) - PQI_REQUEST_HEADER_LENGTH, &request.header.iu_length);
put_unaligned_le16(function_code, &request.function_code);
host_memory = host_memory_descriptor->host_memory;
if (host_memory) {
buffer_length = offsetof(struct pqi_host_memory, sg_descriptor) + get_unaligned_le16(&host_memory->num_memory_descriptors) * sizeof(struct pqi_sg_descriptor);
put_unaligned_le64((u64)host_memory_descriptor->host_memory_dma_handle, &request.data.host_memory_allocation.buffer_address);
put_unaligned_le32(buffer_length, &request.data.host_memory_allocation.buffer_length);
if (function_code == PQI_VENDOR_GENERAL_OFA_MEMORY_UPDATE) {
put_unaligned_le16(PQI_OFA_VERSION, &host_memory->version);
memcpy(&host_memory->signature, PQI_OFA_SIGNATURE, sizeof(host_memory->signature));
} else if (function_code == PQI_VENDOR_GENERAL_CTRL_LOG_MEMORY_UPDATE) {
put_unaligned_le16(PQI_CTRL_LOG_VERSION, &host_memory->version);
memcpy(&host_memory->signature, PQI_CTRL_LOG_SIGNATURE, sizeof(host_memory->signature));
}
}
return pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0, NULL);
}
static struct pqi_raid_error_info pqi_ctrl_offline_raid_error_info = { static struct pqi_raid_error_info pqi_ctrl_offline_raid_error_info = {
.data_out_result = PQI_DATA_IN_OUT_HARDWARE_ERROR, .data_out_result = PQI_DATA_IN_OUT_HARDWARE_ERROR,
.status = SAM_STAT_CHECK_CONDITION, .status = SAM_STAT_CHECK_CONDITION,
@ -9464,6 +9552,10 @@ static const struct pci_device_id pqi_pci_id_table[] = {
PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
0x152d, 0x8a37) 0x152d, 0x8a37)
}, },
{
PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
0x193d, 0x0462)
},
{ {
PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
0x193d, 0x1104) 0x193d, 0x1104)
@ -9504,6 +9596,10 @@ static const struct pci_device_id pqi_pci_id_table[] = {
PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
0x193d, 0x8461) 0x193d, 0x8461)
}, },
{
PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
0x193d, 0x8462)
},
{ {
PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
0x193d, 0xc460) 0x193d, 0xc460)
@ -10212,6 +10308,18 @@ static const struct pci_device_id pqi_pci_id_table[] = {
PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
0x1137, 0x02fa) 0x1137, 0x02fa)
}, },
{
PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
0x1137, 0x02fe)
},
{
PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
0x1137, 0x02ff)
},
{
PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
0x1137, 0x0300)
},
{ {
PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
0x1ff9, 0x0045) 0x1ff9, 0x0045)
@ -10388,6 +10496,10 @@ static const struct pci_device_id pqi_pci_id_table[] = {
PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
0x1f51, 0x1045) 0x1f51, 0x1045)
}, },
{
PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
0x1ff9, 0x00a3)
},
{ {
PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
PCI_ANY_ID, PCI_ANY_ID) PCI_ANY_ID, PCI_ANY_ID)

View File

@ -29,6 +29,7 @@
#define SIS_ENABLE_INTX 0x80 #define SIS_ENABLE_INTX 0x80
#define SIS_SOFT_RESET 0x100 #define SIS_SOFT_RESET 0x100
#define SIS_CMD_READY 0x200 #define SIS_CMD_READY 0x200
#define SIS_NOTIFY_KDUMP 0x400
#define SIS_TRIGGER_SHUTDOWN 0x800000 #define SIS_TRIGGER_SHUTDOWN 0x800000
#define SIS_PQI_RESET_QUIESCE 0x1000000 #define SIS_PQI_RESET_QUIESCE 0x1000000
@ -52,6 +53,8 @@
#define SIS_BASE_STRUCT_ALIGNMENT 16 #define SIS_BASE_STRUCT_ALIGNMENT 16
#define SIS_CTRL_KERNEL_FW_TRIAGE 0x3 #define SIS_CTRL_KERNEL_FW_TRIAGE 0x3
#define SIS_CTRL_KERNEL_CTRL_LOGGING 0x4
#define SIS_CTRL_KERNEL_CTRL_LOGGING_STATUS 0x18
#define SIS_CTRL_KERNEL_UP 0x80 #define SIS_CTRL_KERNEL_UP 0x80
#define SIS_CTRL_KERNEL_PANIC 0x100 #define SIS_CTRL_KERNEL_PANIC 0x100
#define SIS_CTRL_READY_TIMEOUT_SECS 180 #define SIS_CTRL_READY_TIMEOUT_SECS 180
@ -65,6 +68,13 @@ enum sis_fw_triage_status {
FW_TRIAGE_COMPLETED FW_TRIAGE_COMPLETED
}; };
enum sis_ctrl_logging_status {
CTRL_LOGGING_NOT_STARTED = 0,
CTRL_LOGGING_STARTED,
CTRL_LOGGING_COND_INVALID,
CTRL_LOGGING_COMPLETED
};
#pragma pack(1) #pragma pack(1)
/* for use with SIS_CMD_INIT_BASE_STRUCT_ADDRESS command */ /* for use with SIS_CMD_INIT_BASE_STRUCT_ADDRESS command */
@ -442,6 +452,21 @@ static inline enum sis_fw_triage_status
SIS_CTRL_KERNEL_FW_TRIAGE)); SIS_CTRL_KERNEL_FW_TRIAGE));
} }
bool sis_is_ctrl_logging_supported(struct pqi_ctrl_info *ctrl_info)
{
return readl(&ctrl_info->registers->sis_firmware_status) & SIS_CTRL_KERNEL_CTRL_LOGGING;
}
void sis_notify_kdump(struct pqi_ctrl_info *ctrl_info)
{
sis_set_doorbell_bit(ctrl_info, SIS_NOTIFY_KDUMP);
}
static inline enum sis_ctrl_logging_status sis_read_ctrl_logging_status(struct pqi_ctrl_info *ctrl_info)
{
return ((enum sis_ctrl_logging_status)((readl(&ctrl_info->registers->sis_firmware_status) & SIS_CTRL_KERNEL_CTRL_LOGGING_STATUS) >> 3));
}
void sis_soft_reset(struct pqi_ctrl_info *ctrl_info) void sis_soft_reset(struct pqi_ctrl_info *ctrl_info)
{ {
writel(SIS_SOFT_RESET, writel(SIS_SOFT_RESET,
@ -484,6 +509,41 @@ int sis_wait_for_fw_triage_completion(struct pqi_ctrl_info *ctrl_info)
return rc; return rc;
} }
#define SIS_CTRL_LOGGING_STATUS_TIMEOUT_SECS 180
#define SIS_CTRL_LOGGING_STATUS_POLL_INTERVAL_SECS 1
int sis_wait_for_ctrl_logging_completion(struct pqi_ctrl_info *ctrl_info)
{
int rc;
enum sis_ctrl_logging_status status;
unsigned long timeout;
timeout = (SIS_CTRL_LOGGING_STATUS_TIMEOUT_SECS * HZ) + jiffies;
while (1) {
status = sis_read_ctrl_logging_status(ctrl_info);
if (status == CTRL_LOGGING_COND_INVALID) {
dev_err(&ctrl_info->pci_dev->dev,
"controller data logging condition invalid\n");
rc = -EINVAL;
break;
} else if (status == CTRL_LOGGING_COMPLETED) {
rc = 0;
break;
}
if (time_after(jiffies, timeout)) {
dev_err(&ctrl_info->pci_dev->dev,
"timed out waiting for controller data logging status\n");
rc = -ETIMEDOUT;
break;
}
ssleep(SIS_CTRL_LOGGING_STATUS_POLL_INTERVAL_SECS);
}
return rc;
}
void sis_verify_structures(void) void sis_verify_structures(void)
{ {
BUILD_BUG_ON(offsetof(struct sis_base_struct, BUILD_BUG_ON(offsetof(struct sis_base_struct,

View File

@ -31,6 +31,9 @@ u32 sis_read_driver_scratch(struct pqi_ctrl_info *ctrl_info);
void sis_soft_reset(struct pqi_ctrl_info *ctrl_info); void sis_soft_reset(struct pqi_ctrl_info *ctrl_info);
u32 sis_get_product_id(struct pqi_ctrl_info *ctrl_info); u32 sis_get_product_id(struct pqi_ctrl_info *ctrl_info);
int sis_wait_for_fw_triage_completion(struct pqi_ctrl_info *ctrl_info); int sis_wait_for_fw_triage_completion(struct pqi_ctrl_info *ctrl_info);
bool sis_is_ctrl_logging_supported(struct pqi_ctrl_info *ctrl_info);
void sis_notify_kdump(struct pqi_ctrl_info *ctrl_info);
int sis_wait_for_ctrl_logging_completion(struct pqi_ctrl_info *ctrl_info);
extern unsigned int sis_ctrl_ready_timeout_secs; extern unsigned int sis_ctrl_ready_timeout_secs;