mirror of
https://github.com/torvalds/linux.git
synced 2024-11-10 14:11:52 +00:00
scsi: sd: Remove WRITE_SAME support
There are no more end-users of REQ_OP_WRITE_SAME left, so we can start deleting it. Link: https://lore.kernel.org/r/20220209082828.2629273-5-hch@lst.de Reviewed-by: Chaitanya Kulkarni <kch@nvidia.com> Signed-off-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
This commit is contained in:
parent
ebd0473763
commit
e383e16e84
@ -1035,13 +1035,13 @@ static void sd_config_write_same(struct scsi_disk *sdkp)
|
||||
* Reporting a maximum number of blocks that is not aligned
|
||||
* on the device physical size would cause a large write same
|
||||
* request to be split into physically unaligned chunks by
|
||||
* __blkdev_issue_write_zeroes() and __blkdev_issue_write_same()
|
||||
* even if the caller of these functions took care to align the
|
||||
* large request. So make sure the maximum reported is aligned
|
||||
* to the device physical block size. This is only an optional
|
||||
* optimization for regular disks, but this is mandatory to
|
||||
* avoid failure of large write same requests directed at
|
||||
* sequential write required zones of host-managed ZBC disks.
|
||||
* __blkdev_issue_write_zeroes() even if the caller of this
|
||||
* functions took care to align the large request. So make sure
|
||||
* the maximum reported is aligned to the device physical block
|
||||
* size. This is only an optional optimization for regular
|
||||
* disks, but this is mandatory to avoid failure of large write
|
||||
* same requests directed at sequential write required zones of
|
||||
* host-managed ZBC disks.
|
||||
*/
|
||||
sdkp->max_ws_blocks =
|
||||
round_down(sdkp->max_ws_blocks,
|
||||
@ -1050,68 +1050,10 @@ static void sd_config_write_same(struct scsi_disk *sdkp)
|
||||
}
|
||||
|
||||
out:
|
||||
blk_queue_max_write_same_sectors(q, sdkp->max_ws_blocks *
|
||||
(logical_block_size >> 9));
|
||||
blk_queue_max_write_zeroes_sectors(q, sdkp->max_ws_blocks *
|
||||
(logical_block_size >> 9));
|
||||
}
|
||||
|
||||
/**
|
||||
* sd_setup_write_same_cmnd - write the same data to multiple blocks
|
||||
* @cmd: command to prepare
|
||||
*
|
||||
* Will set up either WRITE SAME(10) or WRITE SAME(16) depending on
|
||||
* the preference indicated by the target device.
|
||||
**/
|
||||
static blk_status_t sd_setup_write_same_cmnd(struct scsi_cmnd *cmd)
|
||||
{
|
||||
struct request *rq = scsi_cmd_to_rq(cmd);
|
||||
struct scsi_device *sdp = cmd->device;
|
||||
struct scsi_disk *sdkp = scsi_disk(rq->q->disk);
|
||||
struct bio *bio = rq->bio;
|
||||
u64 lba = sectors_to_logical(sdp, blk_rq_pos(rq));
|
||||
u32 nr_blocks = sectors_to_logical(sdp, blk_rq_sectors(rq));
|
||||
blk_status_t ret;
|
||||
|
||||
if (sdkp->device->no_write_same)
|
||||
return BLK_STS_TARGET;
|
||||
|
||||
BUG_ON(bio_offset(bio) || bio_iovec(bio).bv_len != sdp->sector_size);
|
||||
|
||||
rq->timeout = SD_WRITE_SAME_TIMEOUT;
|
||||
|
||||
if (sdkp->ws16 || lba > 0xffffffff || nr_blocks > 0xffff) {
|
||||
cmd->cmd_len = 16;
|
||||
cmd->cmnd[0] = WRITE_SAME_16;
|
||||
put_unaligned_be64(lba, &cmd->cmnd[2]);
|
||||
put_unaligned_be32(nr_blocks, &cmd->cmnd[10]);
|
||||
} else {
|
||||
cmd->cmd_len = 10;
|
||||
cmd->cmnd[0] = WRITE_SAME;
|
||||
put_unaligned_be32(lba, &cmd->cmnd[2]);
|
||||
put_unaligned_be16(nr_blocks, &cmd->cmnd[7]);
|
||||
}
|
||||
|
||||
cmd->transfersize = sdp->sector_size;
|
||||
cmd->allowed = sdkp->max_retries;
|
||||
|
||||
/*
|
||||
* For WRITE SAME the data transferred via the DATA OUT buffer is
|
||||
* different from the amount of data actually written to the target.
|
||||
*
|
||||
* We set up __data_len to the amount of data transferred via the
|
||||
* DATA OUT buffer so that blk_rq_map_sg sets up the proper S/G list
|
||||
* to transfer a single sector of data first, but then reset it to
|
||||
* the amount of data to be written right after so that the I/O path
|
||||
* knows how much to actually write.
|
||||
*/
|
||||
rq->__data_len = sdp->sector_size;
|
||||
ret = scsi_alloc_sgtables(cmd);
|
||||
rq->__data_len = blk_rq_bytes(rq);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static blk_status_t sd_setup_flush_cmnd(struct scsi_cmnd *cmd)
|
||||
{
|
||||
struct request *rq = scsi_cmd_to_rq(cmd);
|
||||
@ -1344,8 +1286,6 @@ static blk_status_t sd_init_command(struct scsi_cmnd *cmd)
|
||||
}
|
||||
case REQ_OP_WRITE_ZEROES:
|
||||
return sd_setup_write_zeroes_cmnd(cmd);
|
||||
case REQ_OP_WRITE_SAME:
|
||||
return sd_setup_write_same_cmnd(cmd);
|
||||
case REQ_OP_FLUSH:
|
||||
return sd_setup_flush_cmnd(cmd);
|
||||
case REQ_OP_READ:
|
||||
@ -2040,7 +1980,6 @@ static int sd_done(struct scsi_cmnd *SCpnt)
|
||||
switch (req_op(req)) {
|
||||
case REQ_OP_DISCARD:
|
||||
case REQ_OP_WRITE_ZEROES:
|
||||
case REQ_OP_WRITE_SAME:
|
||||
case REQ_OP_ZONE_RESET:
|
||||
case REQ_OP_ZONE_RESET_ALL:
|
||||
case REQ_OP_ZONE_OPEN:
|
||||
|
@ -423,7 +423,6 @@ static bool sd_zbc_need_zone_wp_update(struct request *rq)
|
||||
return true;
|
||||
case REQ_OP_WRITE:
|
||||
case REQ_OP_WRITE_ZEROES:
|
||||
case REQ_OP_WRITE_SAME:
|
||||
return blk_rq_zone_is_seq(rq);
|
||||
default:
|
||||
return false;
|
||||
@ -477,7 +476,6 @@ static unsigned int sd_zbc_zone_wp_update(struct scsi_cmnd *cmd,
|
||||
rq->__sector += sdkp->zones_wp_offset[zno];
|
||||
fallthrough;
|
||||
case REQ_OP_WRITE_ZEROES:
|
||||
case REQ_OP_WRITE_SAME:
|
||||
case REQ_OP_WRITE:
|
||||
if (sdkp->zones_wp_offset[zno] < sd_zbc_zone_sectors(sdkp))
|
||||
sdkp->zones_wp_offset[zno] +=
|
||||
|
Loading…
Reference in New Issue
Block a user