forked from Minki/linux
target: Update QUEUE ALGORITHM MODIFIER control page default
This patch adds the default 'Unrestricted reordering allowed' for SCSI control mode page QUEUE ALGORITHM MODIFIER on a per se_device basis in target_modesense_control() following spc4r23. This includes a new emuluate_rest_reord configfs attribute that currently (only) accepts zero to signal 'Unrestricted reordering allowed' in control mode page usage by the backend target device. Reported-by: Roland Dreier <roland@purestorage.com> Signed-off-by: Nicholas Bellinger <nab@risingtidesystems.com>
This commit is contained in:
parent
1d20bb6147
commit
5de619a31d
@ -774,6 +774,35 @@ target_modesense_control(struct se_device *dev, unsigned char *p)
|
||||
p[0] = 0x0a;
|
||||
p[1] = 0x0a;
|
||||
p[2] = 2;
|
||||
/*
|
||||
* From spc4r23, 7.4.7 Control mode page
|
||||
*
|
||||
* The QUEUE ALGORITHM MODIFIER field (see table 368) specifies
|
||||
* restrictions on the algorithm used for reordering commands
|
||||
* having the SIMPLE task attribute (see SAM-4).
|
||||
*
|
||||
* Table 368 -- QUEUE ALGORITHM MODIFIER field
|
||||
* Code Description
|
||||
* 0h Restricted reordering
|
||||
* 1h Unrestricted reordering allowed
|
||||
* 2h to 7h Reserved
|
||||
* 8h to Fh Vendor specific
|
||||
*
|
||||
* A value of zero in the QUEUE ALGORITHM MODIFIER field specifies that
|
||||
* the device server shall order the processing sequence of commands
|
||||
* having the SIMPLE task attribute such that data integrity is maintained
|
||||
* for that I_T nexus (i.e., if the transmission of new SCSI transport protocol
|
||||
* requests is halted at any time, the final value of all data observable
|
||||
* on the medium shall be the same as if all the commands had been processed
|
||||
* with the ORDERED task attribute).
|
||||
*
|
||||
* A value of one in the QUEUE ALGORITHM MODIFIER field specifies that the
|
||||
* device server may reorder the processing sequence of commands having the
|
||||
* SIMPLE task attribute in any manner. Any data integrity exposures related to
|
||||
* command sequence order shall be explicitly handled by the application client
|
||||
* through the selection of appropriate ommands and task attributes.
|
||||
*/
|
||||
p[3] = (dev->se_sub_dev->se_dev_attrib.emulate_rest_reord == 1) ? 0x00 : 0x10;
|
||||
/*
|
||||
* From spc4r17, section 7.4.6 Control mode Page
|
||||
*
|
||||
|
@ -701,6 +701,9 @@ SE_DEV_ATTR(enforce_pr_isids, S_IRUGO | S_IWUSR);
|
||||
DEF_DEV_ATTRIB(is_nonrot);
|
||||
SE_DEV_ATTR(is_nonrot, S_IRUGO | S_IWUSR);
|
||||
|
||||
DEF_DEV_ATTRIB(emulate_rest_reord);
|
||||
SE_DEV_ATTR(emulate_rest_reord, S_IRUGO | S_IWUSR);
|
||||
|
||||
DEF_DEV_ATTRIB_RO(hw_block_size);
|
||||
SE_DEV_ATTR_RO(hw_block_size);
|
||||
|
||||
@ -750,6 +753,7 @@ static struct configfs_attribute *target_core_dev_attrib_attrs[] = {
|
||||
&target_core_dev_attrib_emulate_tpws.attr,
|
||||
&target_core_dev_attrib_enforce_pr_isids.attr,
|
||||
&target_core_dev_attrib_is_nonrot.attr,
|
||||
&target_core_dev_attrib_emulate_rest_reord.attr,
|
||||
&target_core_dev_attrib_hw_block_size.attr,
|
||||
&target_core_dev_attrib_block_size.attr,
|
||||
&target_core_dev_attrib_hw_max_sectors.attr,
|
||||
|
@ -857,6 +857,7 @@ void se_dev_set_default_attribs(
|
||||
dev->se_sub_dev->se_dev_attrib.emulate_alua = DA_EMULATE_ALUA;
|
||||
dev->se_sub_dev->se_dev_attrib.enforce_pr_isids = DA_ENFORCE_PR_ISIDS;
|
||||
dev->se_sub_dev->se_dev_attrib.is_nonrot = DA_IS_NONROT;
|
||||
dev->se_sub_dev->se_dev_attrib.emulate_rest_reord = DA_EMULATE_REST_REORD;
|
||||
/*
|
||||
* The TPU=1 and TPWS=1 settings will be set in TCM/IBLOCK
|
||||
* iblock_create_virtdevice() from struct queue_limits values
|
||||
@ -1128,11 +1129,23 @@ int se_dev_set_is_nonrot(struct se_device *dev, int flag)
|
||||
return -EINVAL;
|
||||
}
|
||||
dev->se_sub_dev->se_dev_attrib.is_nonrot = flag;
|
||||
printk(KERN_INFO "dev[%p]: SE Device is_nonrot bit: %d\n",
|
||||
pr_debug("dev[%p]: SE Device is_nonrot bit: %d\n",
|
||||
dev, flag);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int se_dev_set_emulate_rest_reord(struct se_device *dev, int flag)
|
||||
{
|
||||
if (flag != 0) {
|
||||
printk(KERN_ERR "dev[%p]: SE Device emulatation of restricted"
|
||||
" reordering not implemented\n", dev);
|
||||
return -ENOSYS;
|
||||
}
|
||||
dev->se_sub_dev->se_dev_attrib.emulate_rest_reord = flag;
|
||||
pr_debug("dev[%p]: SE Device emulate_rest_reord: %d\n", dev, flag);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Note, this can only be called on unexported SE Device Object.
|
||||
*/
|
||||
|
@ -662,6 +662,7 @@ struct se_dev_attrib {
|
||||
int emulate_alua;
|
||||
int enforce_pr_isids;
|
||||
int is_nonrot;
|
||||
int emulate_rest_reord;
|
||||
u32 hw_block_size;
|
||||
u32 block_size;
|
||||
u32 hw_max_sectors;
|
||||
|
@ -40,6 +40,7 @@ extern int se_dev_set_emulate_tpu(struct se_device *, int);
|
||||
extern int se_dev_set_emulate_tpws(struct se_device *, int);
|
||||
extern int se_dev_set_enforce_pr_isids(struct se_device *, int);
|
||||
extern int se_dev_set_is_nonrot(struct se_device *, int);
|
||||
extern int se_dev_set_emulate_rest_reord(struct se_device *dev, int);
|
||||
extern int se_dev_set_queue_depth(struct se_device *, u32);
|
||||
extern int se_dev_set_max_sectors(struct se_device *, u32);
|
||||
extern int se_dev_set_optimal_sectors(struct se_device *, u32);
|
||||
|
@ -103,6 +103,8 @@
|
||||
#define DA_STATUS_MAX_SECTORS_MAX 8192
|
||||
/* By default don't report non-rotating (solid state) medium */
|
||||
#define DA_IS_NONROT 0
|
||||
/* Queue Algorithm Modifier default for restricted reordering in control mode page */
|
||||
#define DA_EMULATE_REST_REORD 0
|
||||
|
||||
#define SE_MODE_PAGE_BUF 512
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user