mirror of
https://github.com/torvalds/linux.git
synced 2024-11-10 22:21:40 +00:00
Merge branch 'mkp-fixes' into fixes
This commit is contained in:
commit
be9e2f775f
@ -9411,8 +9411,10 @@ F: include/scsi/sg.h
|
|||||||
|
|
||||||
SCSI SUBSYSTEM
|
SCSI SUBSYSTEM
|
||||||
M: "James E.J. Bottomley" <JBottomley@odin.com>
|
M: "James E.J. Bottomley" <JBottomley@odin.com>
|
||||||
L: linux-scsi@vger.kernel.org
|
|
||||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi.git
|
T: git git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi.git
|
||||||
|
M: "Martin K. Petersen" <martin.petersen@oracle.com>
|
||||||
|
T: git git://git.kernel.org/pub/scm/linux/kernel/git/mkp/scsi.git
|
||||||
|
L: linux-scsi@vger.kernel.org
|
||||||
S: Maintained
|
S: Maintained
|
||||||
F: drivers/scsi/
|
F: drivers/scsi/
|
||||||
F: include/scsi/
|
F: include/scsi/
|
||||||
|
@ -91,7 +91,8 @@ void blk_set_default_limits(struct queue_limits *lim)
|
|||||||
lim->seg_boundary_mask = BLK_SEG_BOUNDARY_MASK;
|
lim->seg_boundary_mask = BLK_SEG_BOUNDARY_MASK;
|
||||||
lim->virt_boundary_mask = 0;
|
lim->virt_boundary_mask = 0;
|
||||||
lim->max_segment_size = BLK_MAX_SEGMENT_SIZE;
|
lim->max_segment_size = BLK_MAX_SEGMENT_SIZE;
|
||||||
lim->max_sectors = lim->max_hw_sectors = BLK_SAFE_MAX_SECTORS;
|
lim->max_sectors = lim->max_dev_sectors = lim->max_hw_sectors =
|
||||||
|
BLK_SAFE_MAX_SECTORS;
|
||||||
lim->chunk_sectors = 0;
|
lim->chunk_sectors = 0;
|
||||||
lim->max_write_same_sectors = 0;
|
lim->max_write_same_sectors = 0;
|
||||||
lim->max_discard_sectors = 0;
|
lim->max_discard_sectors = 0;
|
||||||
@ -127,6 +128,7 @@ void blk_set_stacking_limits(struct queue_limits *lim)
|
|||||||
lim->max_hw_sectors = UINT_MAX;
|
lim->max_hw_sectors = UINT_MAX;
|
||||||
lim->max_segment_size = UINT_MAX;
|
lim->max_segment_size = UINT_MAX;
|
||||||
lim->max_sectors = UINT_MAX;
|
lim->max_sectors = UINT_MAX;
|
||||||
|
lim->max_dev_sectors = UINT_MAX;
|
||||||
lim->max_write_same_sectors = UINT_MAX;
|
lim->max_write_same_sectors = UINT_MAX;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(blk_set_stacking_limits);
|
EXPORT_SYMBOL(blk_set_stacking_limits);
|
||||||
@ -214,8 +216,8 @@ void blk_queue_bounce_limit(struct request_queue *q, u64 max_addr)
|
|||||||
EXPORT_SYMBOL(blk_queue_bounce_limit);
|
EXPORT_SYMBOL(blk_queue_bounce_limit);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* blk_limits_max_hw_sectors - set hard and soft limit of max sectors for request
|
* blk_queue_max_hw_sectors - set max sectors for a request for this queue
|
||||||
* @limits: the queue limits
|
* @q: the request queue for the device
|
||||||
* @max_hw_sectors: max hardware sectors in the usual 512b unit
|
* @max_hw_sectors: max hardware sectors in the usual 512b unit
|
||||||
*
|
*
|
||||||
* Description:
|
* Description:
|
||||||
@ -224,13 +226,19 @@ EXPORT_SYMBOL(blk_queue_bounce_limit);
|
|||||||
* the device driver based upon the capabilities of the I/O
|
* the device driver based upon the capabilities of the I/O
|
||||||
* controller.
|
* controller.
|
||||||
*
|
*
|
||||||
|
* max_dev_sectors is a hard limit imposed by the storage device for
|
||||||
|
* READ/WRITE requests. It is set by the disk driver.
|
||||||
|
*
|
||||||
* max_sectors is a soft limit imposed by the block layer for
|
* max_sectors is a soft limit imposed by the block layer for
|
||||||
* filesystem type requests. This value can be overridden on a
|
* filesystem type requests. This value can be overridden on a
|
||||||
* per-device basis in /sys/block/<device>/queue/max_sectors_kb.
|
* per-device basis in /sys/block/<device>/queue/max_sectors_kb.
|
||||||
* The soft limit can not exceed max_hw_sectors.
|
* The soft limit can not exceed max_hw_sectors.
|
||||||
**/
|
**/
|
||||||
void blk_limits_max_hw_sectors(struct queue_limits *limits, unsigned int max_hw_sectors)
|
void blk_queue_max_hw_sectors(struct request_queue *q, unsigned int max_hw_sectors)
|
||||||
{
|
{
|
||||||
|
struct queue_limits *limits = &q->limits;
|
||||||
|
unsigned int max_sectors;
|
||||||
|
|
||||||
if ((max_hw_sectors << 9) < PAGE_CACHE_SIZE) {
|
if ((max_hw_sectors << 9) < PAGE_CACHE_SIZE) {
|
||||||
max_hw_sectors = 1 << (PAGE_CACHE_SHIFT - 9);
|
max_hw_sectors = 1 << (PAGE_CACHE_SHIFT - 9);
|
||||||
printk(KERN_INFO "%s: set to minimum %d\n",
|
printk(KERN_INFO "%s: set to minimum %d\n",
|
||||||
@ -238,22 +246,9 @@ void blk_limits_max_hw_sectors(struct queue_limits *limits, unsigned int max_hw_
|
|||||||
}
|
}
|
||||||
|
|
||||||
limits->max_hw_sectors = max_hw_sectors;
|
limits->max_hw_sectors = max_hw_sectors;
|
||||||
limits->max_sectors = min_t(unsigned int, max_hw_sectors,
|
max_sectors = min_not_zero(max_hw_sectors, limits->max_dev_sectors);
|
||||||
BLK_DEF_MAX_SECTORS);
|
max_sectors = min_t(unsigned int, max_sectors, BLK_DEF_MAX_SECTORS);
|
||||||
}
|
limits->max_sectors = max_sectors;
|
||||||
EXPORT_SYMBOL(blk_limits_max_hw_sectors);
|
|
||||||
|
|
||||||
/**
|
|
||||||
* blk_queue_max_hw_sectors - set max sectors for a request for this queue
|
|
||||||
* @q: the request queue for the device
|
|
||||||
* @max_hw_sectors: max hardware sectors in the usual 512b unit
|
|
||||||
*
|
|
||||||
* Description:
|
|
||||||
* See description for blk_limits_max_hw_sectors().
|
|
||||||
**/
|
|
||||||
void blk_queue_max_hw_sectors(struct request_queue *q, unsigned int max_hw_sectors)
|
|
||||||
{
|
|
||||||
blk_limits_max_hw_sectors(&q->limits, max_hw_sectors);
|
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(blk_queue_max_hw_sectors);
|
EXPORT_SYMBOL(blk_queue_max_hw_sectors);
|
||||||
|
|
||||||
@ -527,6 +522,7 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
|
|||||||
|
|
||||||
t->max_sectors = min_not_zero(t->max_sectors, b->max_sectors);
|
t->max_sectors = min_not_zero(t->max_sectors, b->max_sectors);
|
||||||
t->max_hw_sectors = min_not_zero(t->max_hw_sectors, b->max_hw_sectors);
|
t->max_hw_sectors = min_not_zero(t->max_hw_sectors, b->max_hw_sectors);
|
||||||
|
t->max_dev_sectors = min_not_zero(t->max_dev_sectors, b->max_dev_sectors);
|
||||||
t->max_write_same_sectors = min(t->max_write_same_sectors,
|
t->max_write_same_sectors = min(t->max_write_same_sectors,
|
||||||
b->max_write_same_sectors);
|
b->max_write_same_sectors);
|
||||||
t->bounce_pfn = min_not_zero(t->bounce_pfn, b->bounce_pfn);
|
t->bounce_pfn = min_not_zero(t->bounce_pfn, b->bounce_pfn);
|
||||||
|
@ -205,6 +205,9 @@ queue_max_sectors_store(struct request_queue *q, const char *page, size_t count)
|
|||||||
if (ret < 0)
|
if (ret < 0)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
|
max_hw_sectors_kb = min_not_zero(max_hw_sectors_kb, (unsigned long)
|
||||||
|
q->limits.max_dev_sectors >> 1);
|
||||||
|
|
||||||
if (max_sectors_kb > max_hw_sectors_kb || max_sectors_kb < page_kb)
|
if (max_sectors_kb > max_hw_sectors_kb || max_sectors_kb < page_kb)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
|
@ -364,6 +364,7 @@ config SCSI_HPSA
|
|||||||
tristate "HP Smart Array SCSI driver"
|
tristate "HP Smart Array SCSI driver"
|
||||||
depends on PCI && SCSI
|
depends on PCI && SCSI
|
||||||
select CHECK_SIGNATURE
|
select CHECK_SIGNATURE
|
||||||
|
select SCSI_SAS_ATTRS
|
||||||
help
|
help
|
||||||
This driver supports HP Smart Array Controllers (circa 2009).
|
This driver supports HP Smart Array Controllers (circa 2009).
|
||||||
It is a SCSI alternative to the cciss driver, which is a block
|
It is a SCSI alternative to the cciss driver, which is a block
|
||||||
@ -499,6 +500,7 @@ config SCSI_ADVANSYS
|
|||||||
tristate "AdvanSys SCSI support"
|
tristate "AdvanSys SCSI support"
|
||||||
depends on SCSI
|
depends on SCSI
|
||||||
depends on ISA || EISA || PCI
|
depends on ISA || EISA || PCI
|
||||||
|
depends on ISA_DMA_API || !ISA
|
||||||
help
|
help
|
||||||
This is a driver for all SCSI host adapters manufactured by
|
This is a driver for all SCSI host adapters manufactured by
|
||||||
AdvanSys. It is documented in the kernel source in
|
AdvanSys. It is documented in the kernel source in
|
||||||
|
@ -7803,7 +7803,7 @@ adv_build_req(struct asc_board *boardp, struct scsi_cmnd *scp,
|
|||||||
return ASC_BUSY;
|
return ASC_BUSY;
|
||||||
}
|
}
|
||||||
scsiqp->sense_addr = cpu_to_le32(sense_addr);
|
scsiqp->sense_addr = cpu_to_le32(sense_addr);
|
||||||
scsiqp->sense_len = cpu_to_le32(SCSI_SENSE_BUFFERSIZE);
|
scsiqp->sense_len = SCSI_SENSE_BUFFERSIZE;
|
||||||
|
|
||||||
/* Build ADV_SCSI_REQ_Q */
|
/* Build ADV_SCSI_REQ_Q */
|
||||||
|
|
||||||
|
@ -333,6 +333,17 @@ static void scsi_host_dev_release(struct device *dev)
|
|||||||
kfree(queuedata);
|
kfree(queuedata);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (shost->shost_state == SHOST_CREATED) {
|
||||||
|
/*
|
||||||
|
* Free the shost_dev device name here if scsi_host_alloc()
|
||||||
|
* and scsi_host_put() have been called but neither
|
||||||
|
* scsi_host_add() nor scsi_host_remove() has been called.
|
||||||
|
* This avoids that the memory allocated for the shost_dev
|
||||||
|
* name is leaked.
|
||||||
|
*/
|
||||||
|
kfree(dev_name(&shost->shost_dev));
|
||||||
|
}
|
||||||
|
|
||||||
scsi_destroy_command_freelist(shost);
|
scsi_destroy_command_freelist(shost);
|
||||||
if (shost_use_blk_mq(shost)) {
|
if (shost_use_blk_mq(shost)) {
|
||||||
if (shost->tag_set.tags)
|
if (shost->tag_set.tags)
|
||||||
|
@ -8671,7 +8671,7 @@ static void hpsa_disable_rld_caching(struct ctlr_info *h)
|
|||||||
if ((rc != 0) || (c->err_info->CommandStatus != 0))
|
if ((rc != 0) || (c->err_info->CommandStatus != 0))
|
||||||
goto errout;
|
goto errout;
|
||||||
|
|
||||||
if (*options && HPSA_DIAG_OPTS_DISABLE_RLD_CACHING)
|
if (*options & HPSA_DIAG_OPTS_DISABLE_RLD_CACHING)
|
||||||
goto out;
|
goto out;
|
||||||
|
|
||||||
errout:
|
errout:
|
||||||
|
@ -71,3 +71,12 @@ config SCSI_MPT3SAS_MAX_SGE
|
|||||||
MAX_PHYS_SEGMENTS in most kernels. However in SuSE kernels this
|
MAX_PHYS_SEGMENTS in most kernels. However in SuSE kernels this
|
||||||
can be 256. However, it may decreased down to 16. Decreasing this
|
can be 256. However, it may decreased down to 16. Decreasing this
|
||||||
parameter will reduce memory requirements on a per controller instance.
|
parameter will reduce memory requirements on a per controller instance.
|
||||||
|
|
||||||
|
config SCSI_MPT2SAS
|
||||||
|
tristate "Legacy MPT2SAS config option"
|
||||||
|
default n
|
||||||
|
select SCSI_MPT3SAS
|
||||||
|
depends on PCI && SCSI
|
||||||
|
---help---
|
||||||
|
Dummy config option for backwards compatiblity: configure the MPT3SAS
|
||||||
|
driver instead.
|
||||||
|
@ -3905,8 +3905,7 @@ scsih_qcmd(struct Scsi_Host *shost, struct scsi_cmnd *scmd)
|
|||||||
* We do not expose raid functionality to upper layer for warpdrive.
|
* We do not expose raid functionality to upper layer for warpdrive.
|
||||||
*/
|
*/
|
||||||
if (!ioc->is_warpdrive && !scsih_is_raid(&scmd->device->sdev_gendev)
|
if (!ioc->is_warpdrive && !scsih_is_raid(&scmd->device->sdev_gendev)
|
||||||
&& (sas_device_priv_data->flags & MPT_DEVICE_TLR_ON) &&
|
&& sas_is_tlr_enabled(scmd->device) && scmd->cmd_len != 32)
|
||||||
scmd->cmd_len != 32)
|
|
||||||
mpi_control |= MPI2_SCSIIO_CONTROL_TLR_ON;
|
mpi_control |= MPI2_SCSIIO_CONTROL_TLR_ON;
|
||||||
|
|
||||||
smid = mpt3sas_base_get_smid_scsiio(ioc, ioc->scsi_io_cb_idx, scmd);
|
smid = mpt3sas_base_get_smid_scsiio(ioc, ioc->scsi_io_cb_idx, scmd);
|
||||||
|
@ -758,7 +758,7 @@ mvs_store_interrupt_coalescing(struct device *cdev,
|
|||||||
struct device_attribute *attr,
|
struct device_attribute *attr,
|
||||||
const char *buffer, size_t size)
|
const char *buffer, size_t size)
|
||||||
{
|
{
|
||||||
int val = 0;
|
unsigned int val = 0;
|
||||||
struct mvs_info *mvi = NULL;
|
struct mvs_info *mvi = NULL;
|
||||||
struct Scsi_Host *shost = class_to_shost(cdev);
|
struct Scsi_Host *shost = class_to_shost(cdev);
|
||||||
struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
|
struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
|
||||||
@ -766,7 +766,7 @@ mvs_store_interrupt_coalescing(struct device *cdev,
|
|||||||
if (buffer == NULL)
|
if (buffer == NULL)
|
||||||
return size;
|
return size;
|
||||||
|
|
||||||
if (sscanf(buffer, "%d", &val) != 1)
|
if (sscanf(buffer, "%u", &val) != 1)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
if (val >= 0x10000) {
|
if (val >= 0x10000) {
|
||||||
|
@ -433,7 +433,7 @@ qla82xx_pci_get_crb_addr_2M(struct qla_hw_data *ha, ulong off_in,
|
|||||||
if (off_in < QLA82XX_PCI_CRBSPACE)
|
if (off_in < QLA82XX_PCI_CRBSPACE)
|
||||||
return -1;
|
return -1;
|
||||||
|
|
||||||
*off_out = (void __iomem *)(off_in - QLA82XX_PCI_CRBSPACE);
|
off_in -= QLA82XX_PCI_CRBSPACE;
|
||||||
|
|
||||||
/* Try direct map */
|
/* Try direct map */
|
||||||
m = &crb_128M_2M_map[CRB_BLK(off_in)].sub_block[CRB_SUBBLK(off_in)];
|
m = &crb_128M_2M_map[CRB_BLK(off_in)].sub_block[CRB_SUBBLK(off_in)];
|
||||||
@ -443,6 +443,7 @@ qla82xx_pci_get_crb_addr_2M(struct qla_hw_data *ha, ulong off_in,
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
/* Not in direct map, use crb window */
|
/* Not in direct map, use crb window */
|
||||||
|
*off_out = (void __iomem *)off_in;
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -465,8 +465,9 @@ static const struct opcode_info_t opcode_info_arr[SDEB_I_LAST_ELEMENT + 1] = {
|
|||||||
0} },
|
0} },
|
||||||
{0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* MAINT OUT */
|
{0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* MAINT OUT */
|
||||||
{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
|
{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
|
||||||
{0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* VERIFY */
|
{0, 0x2f, 0, F_D_OUT_MAYBE | FF_DIRECT_IO, NULL, NULL, /* VERIFY(10) */
|
||||||
{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
|
{10, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7,
|
||||||
|
0, 0, 0, 0, 0, 0} },
|
||||||
{1, 0x7f, 0x9, F_SA_HIGH | F_D_IN | FF_DIRECT_IO, resp_read_dt0,
|
{1, 0x7f, 0x9, F_SA_HIGH | F_D_IN | FF_DIRECT_IO, resp_read_dt0,
|
||||||
vl_iarr, {32, 0xc7, 0, 0, 0, 0, 0x1f, 0x18, 0x0, 0x9, 0xfe, 0,
|
vl_iarr, {32, 0xc7, 0, 0, 0, 0, 0x1f, 0x18, 0x0, 0x9, 0xfe, 0,
|
||||||
0xff, 0xff, 0xff, 0xff} },/* VARIABLE LENGTH, READ(32) */
|
0xff, 0xff, 0xff, 0xff} },/* VARIABLE LENGTH, READ(32) */
|
||||||
@ -477,8 +478,8 @@ static const struct opcode_info_t opcode_info_arr[SDEB_I_LAST_ELEMENT + 1] = {
|
|||||||
{10, 0x13, 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0,
|
{10, 0x13, 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0,
|
||||||
0} },
|
0} },
|
||||||
/* 20 */
|
/* 20 */
|
||||||
{0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* ALLOW REMOVAL */
|
{0, 0x1e, 0, 0, NULL, NULL, /* ALLOW REMOVAL */
|
||||||
{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
|
{6, 0, 0, 0, 0x3, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
|
||||||
{0, 0x1, 0, 0, resp_start_stop, NULL, /* REWIND ?? */
|
{0, 0x1, 0, 0, resp_start_stop, NULL, /* REWIND ?? */
|
||||||
{6, 0x1, 0, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
|
{6, 0x1, 0, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
|
||||||
{0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* ATA_PT */
|
{0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* ATA_PT */
|
||||||
|
@ -701,9 +701,12 @@ static int scsi_probe_lun(struct scsi_device *sdev, unsigned char *inq_result,
|
|||||||
* strings.
|
* strings.
|
||||||
*/
|
*/
|
||||||
if (sdev->inquiry_len < 36) {
|
if (sdev->inquiry_len < 36) {
|
||||||
sdev_printk(KERN_INFO, sdev,
|
if (!sdev->host->short_inquiry) {
|
||||||
"scsi scan: INQUIRY result too short (%d),"
|
shost_printk(KERN_INFO, sdev->host,
|
||||||
" using 36\n", sdev->inquiry_len);
|
"scsi scan: INQUIRY result too short (%d),"
|
||||||
|
" using 36\n", sdev->inquiry_len);
|
||||||
|
sdev->host->short_inquiry = 1;
|
||||||
|
}
|
||||||
sdev->inquiry_len = 36;
|
sdev->inquiry_len = 36;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1102,6 +1102,14 @@ void __scsi_remove_device(struct scsi_device *sdev)
|
|||||||
{
|
{
|
||||||
struct device *dev = &sdev->sdev_gendev;
|
struct device *dev = &sdev->sdev_gendev;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* This cleanup path is not reentrant and while it is impossible
|
||||||
|
* to get a new reference with scsi_device_get() someone can still
|
||||||
|
* hold a previously acquired one.
|
||||||
|
*/
|
||||||
|
if (sdev->sdev_state == SDEV_DEL)
|
||||||
|
return;
|
||||||
|
|
||||||
if (sdev->is_visible) {
|
if (sdev->is_visible) {
|
||||||
if (scsi_device_set_state(sdev, SDEV_CANCEL) != 0)
|
if (scsi_device_set_state(sdev, SDEV_CANCEL) != 0)
|
||||||
return;
|
return;
|
||||||
|
@ -638,11 +638,24 @@ static void sd_config_discard(struct scsi_disk *sdkp, unsigned int mode)
|
|||||||
unsigned int max_blocks = 0;
|
unsigned int max_blocks = 0;
|
||||||
|
|
||||||
q->limits.discard_zeroes_data = 0;
|
q->limits.discard_zeroes_data = 0;
|
||||||
q->limits.discard_alignment = sdkp->unmap_alignment *
|
|
||||||
logical_block_size;
|
/*
|
||||||
q->limits.discard_granularity =
|
* When LBPRZ is reported, discard alignment and granularity
|
||||||
max(sdkp->physical_block_size,
|
* must be fixed to the logical block size. Otherwise the block
|
||||||
sdkp->unmap_granularity * logical_block_size);
|
* layer will drop misaligned portions of the request which can
|
||||||
|
* lead to data corruption. If LBPRZ is not set, we honor the
|
||||||
|
* device preference.
|
||||||
|
*/
|
||||||
|
if (sdkp->lbprz) {
|
||||||
|
q->limits.discard_alignment = 0;
|
||||||
|
q->limits.discard_granularity = 1;
|
||||||
|
} else {
|
||||||
|
q->limits.discard_alignment = sdkp->unmap_alignment *
|
||||||
|
logical_block_size;
|
||||||
|
q->limits.discard_granularity =
|
||||||
|
max(sdkp->physical_block_size,
|
||||||
|
sdkp->unmap_granularity * logical_block_size);
|
||||||
|
}
|
||||||
|
|
||||||
sdkp->provisioning_mode = mode;
|
sdkp->provisioning_mode = mode;
|
||||||
|
|
||||||
@ -2321,11 +2334,8 @@ got_data:
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (sdkp->capacity > 0xffffffff) {
|
if (sdkp->capacity > 0xffffffff)
|
||||||
sdp->use_16_for_rw = 1;
|
sdp->use_16_for_rw = 1;
|
||||||
sdkp->max_xfer_blocks = SD_MAX_XFER_BLOCKS;
|
|
||||||
} else
|
|
||||||
sdkp->max_xfer_blocks = SD_DEF_XFER_BLOCKS;
|
|
||||||
|
|
||||||
/* Rescale capacity to 512-byte units */
|
/* Rescale capacity to 512-byte units */
|
||||||
if (sector_size == 4096)
|
if (sector_size == 4096)
|
||||||
@ -2642,7 +2652,6 @@ static void sd_read_block_limits(struct scsi_disk *sdkp)
|
|||||||
{
|
{
|
||||||
unsigned int sector_sz = sdkp->device->sector_size;
|
unsigned int sector_sz = sdkp->device->sector_size;
|
||||||
const int vpd_len = 64;
|
const int vpd_len = 64;
|
||||||
u32 max_xfer_length;
|
|
||||||
unsigned char *buffer = kmalloc(vpd_len, GFP_KERNEL);
|
unsigned char *buffer = kmalloc(vpd_len, GFP_KERNEL);
|
||||||
|
|
||||||
if (!buffer ||
|
if (!buffer ||
|
||||||
@ -2650,14 +2659,11 @@ static void sd_read_block_limits(struct scsi_disk *sdkp)
|
|||||||
scsi_get_vpd_page(sdkp->device, 0xb0, buffer, vpd_len))
|
scsi_get_vpd_page(sdkp->device, 0xb0, buffer, vpd_len))
|
||||||
goto out;
|
goto out;
|
||||||
|
|
||||||
max_xfer_length = get_unaligned_be32(&buffer[8]);
|
|
||||||
if (max_xfer_length)
|
|
||||||
sdkp->max_xfer_blocks = max_xfer_length;
|
|
||||||
|
|
||||||
blk_queue_io_min(sdkp->disk->queue,
|
blk_queue_io_min(sdkp->disk->queue,
|
||||||
get_unaligned_be16(&buffer[6]) * sector_sz);
|
get_unaligned_be16(&buffer[6]) * sector_sz);
|
||||||
blk_queue_io_opt(sdkp->disk->queue,
|
|
||||||
get_unaligned_be32(&buffer[12]) * sector_sz);
|
sdkp->max_xfer_blocks = get_unaligned_be32(&buffer[8]);
|
||||||
|
sdkp->opt_xfer_blocks = get_unaligned_be32(&buffer[12]);
|
||||||
|
|
||||||
if (buffer[3] == 0x3c) {
|
if (buffer[3] == 0x3c) {
|
||||||
unsigned int lba_count, desc_count;
|
unsigned int lba_count, desc_count;
|
||||||
@ -2806,6 +2812,11 @@ static int sd_try_extended_inquiry(struct scsi_device *sdp)
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline u32 logical_to_sectors(struct scsi_device *sdev, u32 blocks)
|
||||||
|
{
|
||||||
|
return blocks << (ilog2(sdev->sector_size) - 9);
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* sd_revalidate_disk - called the first time a new disk is seen,
|
* sd_revalidate_disk - called the first time a new disk is seen,
|
||||||
* performs disk spin up, read_capacity, etc.
|
* performs disk spin up, read_capacity, etc.
|
||||||
@ -2815,8 +2826,9 @@ static int sd_revalidate_disk(struct gendisk *disk)
|
|||||||
{
|
{
|
||||||
struct scsi_disk *sdkp = scsi_disk(disk);
|
struct scsi_disk *sdkp = scsi_disk(disk);
|
||||||
struct scsi_device *sdp = sdkp->device;
|
struct scsi_device *sdp = sdkp->device;
|
||||||
|
struct request_queue *q = sdkp->disk->queue;
|
||||||
unsigned char *buffer;
|
unsigned char *buffer;
|
||||||
unsigned int max_xfer;
|
unsigned int dev_max, rw_max;
|
||||||
|
|
||||||
SCSI_LOG_HLQUEUE(3, sd_printk(KERN_INFO, sdkp,
|
SCSI_LOG_HLQUEUE(3, sd_printk(KERN_INFO, sdkp,
|
||||||
"sd_revalidate_disk\n"));
|
"sd_revalidate_disk\n"));
|
||||||
@ -2864,11 +2876,26 @@ static int sd_revalidate_disk(struct gendisk *disk)
|
|||||||
*/
|
*/
|
||||||
sd_set_flush_flag(sdkp);
|
sd_set_flush_flag(sdkp);
|
||||||
|
|
||||||
max_xfer = sdkp->max_xfer_blocks;
|
/* Initial block count limit based on CDB TRANSFER LENGTH field size. */
|
||||||
max_xfer <<= ilog2(sdp->sector_size) - 9;
|
dev_max = sdp->use_16_for_rw ? SD_MAX_XFER_BLOCKS : SD_DEF_XFER_BLOCKS;
|
||||||
|
|
||||||
sdkp->disk->queue->limits.max_sectors =
|
/* Some devices report a maximum block count for READ/WRITE requests. */
|
||||||
min_not_zero(queue_max_hw_sectors(sdkp->disk->queue), max_xfer);
|
dev_max = min_not_zero(dev_max, sdkp->max_xfer_blocks);
|
||||||
|
q->limits.max_dev_sectors = logical_to_sectors(sdp, dev_max);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Use the device's preferred I/O size for reads and writes
|
||||||
|
* unless the reported value is unreasonably large (or garbage).
|
||||||
|
*/
|
||||||
|
if (sdkp->opt_xfer_blocks && sdkp->opt_xfer_blocks <= dev_max &&
|
||||||
|
sdkp->opt_xfer_blocks <= SD_DEF_XFER_BLOCKS)
|
||||||
|
rw_max = q->limits.io_opt =
|
||||||
|
logical_to_sectors(sdp, sdkp->opt_xfer_blocks);
|
||||||
|
else
|
||||||
|
rw_max = BLK_DEF_MAX_SECTORS;
|
||||||
|
|
||||||
|
/* Combine with controller limits */
|
||||||
|
q->limits.max_sectors = min(rw_max, queue_max_hw_sectors(q));
|
||||||
|
|
||||||
set_capacity(disk, sdkp->capacity);
|
set_capacity(disk, sdkp->capacity);
|
||||||
sd_config_write_same(sdkp);
|
sd_config_write_same(sdkp);
|
||||||
|
@ -67,6 +67,7 @@ struct scsi_disk {
|
|||||||
atomic_t openers;
|
atomic_t openers;
|
||||||
sector_t capacity; /* size in 512-byte sectors */
|
sector_t capacity; /* size in 512-byte sectors */
|
||||||
u32 max_xfer_blocks;
|
u32 max_xfer_blocks;
|
||||||
|
u32 opt_xfer_blocks;
|
||||||
u32 max_ws_blocks;
|
u32 max_ws_blocks;
|
||||||
u32 max_unmap_blocks;
|
u32 max_unmap_blocks;
|
||||||
u32 unmap_granularity;
|
u32 unmap_granularity;
|
||||||
|
@ -4083,6 +4083,7 @@ static int create_one_cdev(struct scsi_tape *tape, int mode, int rew)
|
|||||||
}
|
}
|
||||||
cdev->owner = THIS_MODULE;
|
cdev->owner = THIS_MODULE;
|
||||||
cdev->ops = &st_fops;
|
cdev->ops = &st_fops;
|
||||||
|
STm->cdevs[rew] = cdev;
|
||||||
|
|
||||||
error = cdev_add(cdev, cdev_devno, 1);
|
error = cdev_add(cdev, cdev_devno, 1);
|
||||||
if (error) {
|
if (error) {
|
||||||
@ -4091,7 +4092,6 @@ static int create_one_cdev(struct scsi_tape *tape, int mode, int rew)
|
|||||||
pr_err("st%d: Device not attached.\n", dev_num);
|
pr_err("st%d: Device not attached.\n", dev_num);
|
||||||
goto out_free;
|
goto out_free;
|
||||||
}
|
}
|
||||||
STm->cdevs[rew] = cdev;
|
|
||||||
|
|
||||||
i = mode << (4 - ST_NBR_MODE_BITS);
|
i = mode << (4 - ST_NBR_MODE_BITS);
|
||||||
snprintf(name, 10, "%s%s%s", rew ? "n" : "",
|
snprintf(name, 10, "%s%s%s", rew ? "n" : "",
|
||||||
@ -4110,8 +4110,9 @@ static int create_one_cdev(struct scsi_tape *tape, int mode, int rew)
|
|||||||
return 0;
|
return 0;
|
||||||
out_free:
|
out_free:
|
||||||
cdev_del(STm->cdevs[rew]);
|
cdev_del(STm->cdevs[rew]);
|
||||||
STm->cdevs[rew] = NULL;
|
|
||||||
out:
|
out:
|
||||||
|
STm->cdevs[rew] = NULL;
|
||||||
|
STm->devs[rew] = NULL;
|
||||||
return error;
|
return error;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -254,6 +254,7 @@ struct queue_limits {
|
|||||||
unsigned long virt_boundary_mask;
|
unsigned long virt_boundary_mask;
|
||||||
|
|
||||||
unsigned int max_hw_sectors;
|
unsigned int max_hw_sectors;
|
||||||
|
unsigned int max_dev_sectors;
|
||||||
unsigned int chunk_sectors;
|
unsigned int chunk_sectors;
|
||||||
unsigned int max_sectors;
|
unsigned int max_sectors;
|
||||||
unsigned int max_segment_size;
|
unsigned int max_segment_size;
|
||||||
@ -958,7 +959,6 @@ extern struct request_queue *blk_init_allocated_queue(struct request_queue *,
|
|||||||
extern void blk_cleanup_queue(struct request_queue *);
|
extern void blk_cleanup_queue(struct request_queue *);
|
||||||
extern void blk_queue_make_request(struct request_queue *, make_request_fn *);
|
extern void blk_queue_make_request(struct request_queue *, make_request_fn *);
|
||||||
extern void blk_queue_bounce_limit(struct request_queue *, u64);
|
extern void blk_queue_bounce_limit(struct request_queue *, u64);
|
||||||
extern void blk_limits_max_hw_sectors(struct queue_limits *, unsigned int);
|
|
||||||
extern void blk_queue_max_hw_sectors(struct request_queue *, unsigned int);
|
extern void blk_queue_max_hw_sectors(struct request_queue *, unsigned int);
|
||||||
extern void blk_queue_chunk_sectors(struct request_queue *, unsigned int);
|
extern void blk_queue_chunk_sectors(struct request_queue *, unsigned int);
|
||||||
extern void blk_queue_max_segments(struct request_queue *, unsigned short);
|
extern void blk_queue_max_segments(struct request_queue *, unsigned short);
|
||||||
|
@ -668,6 +668,9 @@ struct Scsi_Host {
|
|||||||
unsigned use_blk_mq:1;
|
unsigned use_blk_mq:1;
|
||||||
unsigned use_cmd_list:1;
|
unsigned use_cmd_list:1;
|
||||||
|
|
||||||
|
/* Host responded with short (<36 bytes) INQUIRY result */
|
||||||
|
unsigned short_inquiry:1;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Optional work queue to be utilized by the transport
|
* Optional work queue to be utilized by the transport
|
||||||
*/
|
*/
|
||||||
|
Loading…
Reference in New Issue
Block a user