mirror of
https://github.com/torvalds/linux.git
synced 2024-12-26 21:02:19 +00:00
scsi: block: Remove REQ_OP_WRITE_SAME support
No more users of REQ_OP_WRITE_SAME or drivers implementing it are left, so remove the infrastructure. [mkp: fold in and tweak sysfs reporting fix] Link: https://lore.kernel.org/r/20220209082828.2629273-8-hch@lst.de Reviewed-by: Chaitanya Kulkarni <kch@nvidia.com> Signed-off-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
This commit is contained in:
parent
a773187e37
commit
73bd66d9c8
@ -122,7 +122,6 @@ static const char *const blk_op_name[] = {
|
||||
REQ_OP_NAME(ZONE_CLOSE),
|
||||
REQ_OP_NAME(ZONE_FINISH),
|
||||
REQ_OP_NAME(ZONE_APPEND),
|
||||
REQ_OP_NAME(WRITE_SAME),
|
||||
REQ_OP_NAME(WRITE_ZEROES),
|
||||
REQ_OP_NAME(DRV_IN),
|
||||
REQ_OP_NAME(DRV_OUT),
|
||||
@ -734,10 +733,6 @@ noinline_for_stack bool submit_bio_checks(struct bio *bio)
|
||||
if (!blk_queue_secure_erase(q))
|
||||
goto not_supported;
|
||||
break;
|
||||
case REQ_OP_WRITE_SAME:
|
||||
if (!q->limits.max_write_same_sectors)
|
||||
goto not_supported;
|
||||
break;
|
||||
case REQ_OP_ZONE_APPEND:
|
||||
status = blk_check_zone_append(q, bio);
|
||||
if (status != BLK_STS_OK)
|
||||
@ -933,13 +928,7 @@ void submit_bio(struct bio *bio)
|
||||
* go through the normal accounting stuff before submission.
|
||||
*/
|
||||
if (bio_has_data(bio)) {
|
||||
unsigned int count;
|
||||
|
||||
if (unlikely(bio_op(bio) == REQ_OP_WRITE_SAME))
|
||||
count = queue_logical_block_size(
|
||||
bdev_get_queue(bio->bi_bdev)) >> 9;
|
||||
else
|
||||
count = bio_sectors(bio);
|
||||
unsigned int count = bio_sectors(bio);
|
||||
|
||||
if (op_is_write(bio_op(bio))) {
|
||||
count_vm_events(PGPGOUT, count);
|
||||
|
@ -151,99 +151,6 @@ int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
|
||||
}
|
||||
EXPORT_SYMBOL(blkdev_issue_discard);
|
||||
|
||||
/**
|
||||
* __blkdev_issue_write_same - generate number of bios with same page
|
||||
* @bdev: target blockdev
|
||||
* @sector: start sector
|
||||
* @nr_sects: number of sectors to write
|
||||
* @gfp_mask: memory allocation flags (for bio_alloc)
|
||||
* @page: page containing data to write
|
||||
* @biop: pointer to anchor bio
|
||||
*
|
||||
* Description:
|
||||
* Generate and issue number of bios(REQ_OP_WRITE_SAME) with same page.
|
||||
*/
|
||||
static int __blkdev_issue_write_same(struct block_device *bdev, sector_t sector,
|
||||
sector_t nr_sects, gfp_t gfp_mask, struct page *page,
|
||||
struct bio **biop)
|
||||
{
|
||||
struct request_queue *q = bdev_get_queue(bdev);
|
||||
unsigned int max_write_same_sectors;
|
||||
struct bio *bio = *biop;
|
||||
sector_t bs_mask;
|
||||
|
||||
if (!q)
|
||||
return -ENXIO;
|
||||
|
||||
if (bdev_read_only(bdev))
|
||||
return -EPERM;
|
||||
|
||||
bs_mask = (bdev_logical_block_size(bdev) >> 9) - 1;
|
||||
if ((sector | nr_sects) & bs_mask)
|
||||
return -EINVAL;
|
||||
|
||||
if (!bdev_write_same(bdev))
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
/* Ensure that max_write_same_sectors doesn't overflow bi_size */
|
||||
max_write_same_sectors = bio_allowed_max_sectors(q);
|
||||
|
||||
while (nr_sects) {
|
||||
bio = blk_next_bio(bio, 1, gfp_mask);
|
||||
bio->bi_iter.bi_sector = sector;
|
||||
bio_set_dev(bio, bdev);
|
||||
bio->bi_vcnt = 1;
|
||||
bio->bi_io_vec->bv_page = page;
|
||||
bio->bi_io_vec->bv_offset = 0;
|
||||
bio->bi_io_vec->bv_len = bdev_logical_block_size(bdev);
|
||||
bio_set_op_attrs(bio, REQ_OP_WRITE_SAME, 0);
|
||||
|
||||
if (nr_sects > max_write_same_sectors) {
|
||||
bio->bi_iter.bi_size = max_write_same_sectors << 9;
|
||||
nr_sects -= max_write_same_sectors;
|
||||
sector += max_write_same_sectors;
|
||||
} else {
|
||||
bio->bi_iter.bi_size = nr_sects << 9;
|
||||
nr_sects = 0;
|
||||
}
|
||||
cond_resched();
|
||||
}
|
||||
|
||||
*biop = bio;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* blkdev_issue_write_same - queue a write same operation
|
||||
* @bdev: target blockdev
|
||||
* @sector: start sector
|
||||
* @nr_sects: number of sectors to write
|
||||
* @gfp_mask: memory allocation flags (for bio_alloc)
|
||||
* @page: page containing data
|
||||
*
|
||||
* Description:
|
||||
* Issue a write same request for the sectors in question.
|
||||
*/
|
||||
int blkdev_issue_write_same(struct block_device *bdev, sector_t sector,
|
||||
sector_t nr_sects, gfp_t gfp_mask,
|
||||
struct page *page)
|
||||
{
|
||||
struct bio *bio = NULL;
|
||||
struct blk_plug plug;
|
||||
int ret;
|
||||
|
||||
blk_start_plug(&plug);
|
||||
ret = __blkdev_issue_write_same(bdev, sector, nr_sects, gfp_mask, page,
|
||||
&bio);
|
||||
if (ret == 0 && bio) {
|
||||
ret = submit_bio_wait(bio);
|
||||
bio_put(bio);
|
||||
}
|
||||
blk_finish_plug(&plug);
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(blkdev_issue_write_same);
|
||||
|
||||
static int __blkdev_issue_write_zeroes(struct block_device *bdev,
|
||||
sector_t sector, sector_t nr_sects, gfp_t gfp_mask,
|
||||
struct bio **biop, unsigned flags)
|
||||
|
@ -152,22 +152,6 @@ static struct bio *blk_bio_write_zeroes_split(struct request_queue *q,
|
||||
return bio_split(bio, q->limits.max_write_zeroes_sectors, GFP_NOIO, bs);
|
||||
}
|
||||
|
||||
static struct bio *blk_bio_write_same_split(struct request_queue *q,
|
||||
struct bio *bio,
|
||||
struct bio_set *bs,
|
||||
unsigned *nsegs)
|
||||
{
|
||||
*nsegs = 1;
|
||||
|
||||
if (!q->limits.max_write_same_sectors)
|
||||
return NULL;
|
||||
|
||||
if (bio_sectors(bio) <= q->limits.max_write_same_sectors)
|
||||
return NULL;
|
||||
|
||||
return bio_split(bio, q->limits.max_write_same_sectors, GFP_NOIO, bs);
|
||||
}
|
||||
|
||||
/*
|
||||
* Return the maximum number of sectors from the start of a bio that may be
|
||||
* submitted as a single request to a block device. If enough sectors remain,
|
||||
@ -351,10 +335,6 @@ void __blk_queue_split(struct request_queue *q, struct bio **bio,
|
||||
split = blk_bio_write_zeroes_split(q, *bio, &q->bio_split,
|
||||
nr_segs);
|
||||
break;
|
||||
case REQ_OP_WRITE_SAME:
|
||||
split = blk_bio_write_same_split(q, *bio, &q->bio_split,
|
||||
nr_segs);
|
||||
break;
|
||||
default:
|
||||
split = blk_bio_segment_split(q, *bio, &q->bio_split, nr_segs);
|
||||
break;
|
||||
@ -416,8 +396,6 @@ unsigned int blk_recalc_rq_segments(struct request *rq)
|
||||
return 1;
|
||||
case REQ_OP_WRITE_ZEROES:
|
||||
return 0;
|
||||
case REQ_OP_WRITE_SAME:
|
||||
return 1;
|
||||
}
|
||||
|
||||
rq_for_each_bvec(bv, rq, iter)
|
||||
@ -555,8 +533,6 @@ int __blk_rq_map_sg(struct request_queue *q, struct request *rq,
|
||||
|
||||
if (rq->rq_flags & RQF_SPECIAL_PAYLOAD)
|
||||
nsegs = __blk_bvec_map_sg(rq->special_vec, sglist, last_sg);
|
||||
else if (rq->bio && bio_op(rq->bio) == REQ_OP_WRITE_SAME)
|
||||
nsegs = __blk_bvec_map_sg(bio_iovec(rq->bio), sglist, last_sg);
|
||||
else if (rq->bio)
|
||||
nsegs = __blk_bios_map_sg(q, rq->bio, sglist, last_sg);
|
||||
|
||||
@ -757,13 +733,6 @@ static enum elv_merge blk_try_req_merge(struct request *req,
|
||||
return ELEVATOR_NO_MERGE;
|
||||
}
|
||||
|
||||
static inline bool blk_write_same_mergeable(struct bio *a, struct bio *b)
|
||||
{
|
||||
if (bio_page(a) == bio_page(b) && bio_offset(a) == bio_offset(b))
|
||||
return true;
|
||||
return false;
|
||||
}
|
||||
|
||||
/*
|
||||
* For non-mq, this has to be called with the request spinlock acquired.
|
||||
* For mq with scheduling, the appropriate queue wide lock should be held.
|
||||
@ -780,10 +749,6 @@ static struct request *attempt_merge(struct request_queue *q,
|
||||
if (rq_data_dir(req) != rq_data_dir(next))
|
||||
return NULL;
|
||||
|
||||
if (req_op(req) == REQ_OP_WRITE_SAME &&
|
||||
!blk_write_same_mergeable(req->bio, next->bio))
|
||||
return NULL;
|
||||
|
||||
/*
|
||||
* Don't allow merge of different write hints, or for a hint with
|
||||
* non-hint IO.
|
||||
@ -912,11 +877,6 @@ bool blk_rq_merge_ok(struct request *rq, struct bio *bio)
|
||||
if (!bio_crypt_rq_ctx_compatible(rq, bio))
|
||||
return false;
|
||||
|
||||
/* must be using the same buffer */
|
||||
if (req_op(rq) == REQ_OP_WRITE_SAME &&
|
||||
!blk_write_same_mergeable(rq->bio, bio))
|
||||
return false;
|
||||
|
||||
/*
|
||||
* Don't allow merge of different write hints, or for a hint with
|
||||
* non-hint IO.
|
||||
|
@ -42,7 +42,6 @@ void blk_set_default_limits(struct queue_limits *lim)
|
||||
lim->max_sectors = lim->max_hw_sectors = BLK_SAFE_MAX_SECTORS;
|
||||
lim->max_dev_sectors = 0;
|
||||
lim->chunk_sectors = 0;
|
||||
lim->max_write_same_sectors = 0;
|
||||
lim->max_write_zeroes_sectors = 0;
|
||||
lim->max_zone_append_sectors = 0;
|
||||
lim->max_discard_sectors = 0;
|
||||
@ -79,7 +78,6 @@ void blk_set_stacking_limits(struct queue_limits *lim)
|
||||
lim->max_segment_size = UINT_MAX;
|
||||
lim->max_sectors = UINT_MAX;
|
||||
lim->max_dev_sectors = UINT_MAX;
|
||||
lim->max_write_same_sectors = UINT_MAX;
|
||||
lim->max_write_zeroes_sectors = UINT_MAX;
|
||||
lim->max_zone_append_sectors = UINT_MAX;
|
||||
}
|
||||
@ -178,18 +176,6 @@ void blk_queue_max_discard_sectors(struct request_queue *q,
|
||||
}
|
||||
EXPORT_SYMBOL(blk_queue_max_discard_sectors);
|
||||
|
||||
/**
|
||||
* blk_queue_max_write_same_sectors - set max sectors for a single write same
|
||||
* @q: the request queue for the device
|
||||
* @max_write_same_sectors: maximum number of sectors to write per command
|
||||
**/
|
||||
void blk_queue_max_write_same_sectors(struct request_queue *q,
|
||||
unsigned int max_write_same_sectors)
|
||||
{
|
||||
q->limits.max_write_same_sectors = max_write_same_sectors;
|
||||
}
|
||||
EXPORT_SYMBOL(blk_queue_max_write_same_sectors);
|
||||
|
||||
/**
|
||||
* blk_queue_max_write_zeroes_sectors - set max sectors for a single
|
||||
* write zeroes
|
||||
@ -519,8 +505,6 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
|
||||
t->max_sectors = min_not_zero(t->max_sectors, b->max_sectors);
|
||||
t->max_hw_sectors = min_not_zero(t->max_hw_sectors, b->max_hw_sectors);
|
||||
t->max_dev_sectors = min_not_zero(t->max_dev_sectors, b->max_dev_sectors);
|
||||
t->max_write_same_sectors = min(t->max_write_same_sectors,
|
||||
b->max_write_same_sectors);
|
||||
t->max_write_zeroes_sectors = min(t->max_write_zeroes_sectors,
|
||||
b->max_write_zeroes_sectors);
|
||||
t->max_zone_append_sectors = min(t->max_zone_append_sectors,
|
||||
|
@ -214,8 +214,7 @@ static ssize_t queue_discard_zeroes_data_show(struct request_queue *q, char *pag
|
||||
|
||||
static ssize_t queue_write_same_max_show(struct request_queue *q, char *page)
|
||||
{
|
||||
return sprintf(page, "%llu\n",
|
||||
(unsigned long long)q->limits.max_write_same_sectors << 9);
|
||||
return queue_var_show(0, page);
|
||||
}
|
||||
|
||||
static ssize_t queue_write_zeroes_max_show(struct request_queue *q, char *page)
|
||||
|
@ -65,7 +65,6 @@ bool blk_req_needs_zone_write_lock(struct request *rq)
|
||||
|
||||
switch (req_op(rq)) {
|
||||
case REQ_OP_WRITE_ZEROES:
|
||||
case REQ_OP_WRITE_SAME:
|
||||
case REQ_OP_WRITE:
|
||||
return blk_rq_zone_is_seq(rq);
|
||||
default:
|
||||
|
@ -286,7 +286,6 @@ static inline bool blk_may_split(struct request_queue *q, struct bio *bio)
|
||||
case REQ_OP_DISCARD:
|
||||
case REQ_OP_SECURE_ERASE:
|
||||
case REQ_OP_WRITE_ZEROES:
|
||||
case REQ_OP_WRITE_SAME:
|
||||
return true; /* non-trivial splitting decisions */
|
||||
default:
|
||||
break;
|
||||
|
@ -181,9 +181,6 @@ static struct bio *bounce_clone_bio(struct bio *bio_src)
|
||||
case REQ_OP_SECURE_ERASE:
|
||||
case REQ_OP_WRITE_ZEROES:
|
||||
break;
|
||||
case REQ_OP_WRITE_SAME:
|
||||
bio->bi_io_vec[bio->bi_vcnt++] = bio_src->bi_io_vec[0];
|
||||
break;
|
||||
default:
|
||||
bio_for_each_segment(bv, bio_src, iter)
|
||||
bio->bi_io_vec[bio->bi_vcnt++] = bv;
|
||||
|
@ -65,7 +65,6 @@ static inline bool bio_no_advance_iter(const struct bio *bio)
|
||||
{
|
||||
return bio_op(bio) == REQ_OP_DISCARD ||
|
||||
bio_op(bio) == REQ_OP_SECURE_ERASE ||
|
||||
bio_op(bio) == REQ_OP_WRITE_SAME ||
|
||||
bio_op(bio) == REQ_OP_WRITE_ZEROES;
|
||||
}
|
||||
|
||||
@ -186,8 +185,6 @@ static inline unsigned bio_segments(struct bio *bio)
|
||||
case REQ_OP_SECURE_ERASE:
|
||||
case REQ_OP_WRITE_ZEROES:
|
||||
return 0;
|
||||
case REQ_OP_WRITE_SAME:
|
||||
return 1;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
@ -354,8 +354,6 @@ enum req_opf {
|
||||
REQ_OP_DISCARD = 3,
|
||||
/* securely erase sectors */
|
||||
REQ_OP_SECURE_ERASE = 5,
|
||||
/* write the same sector many times */
|
||||
REQ_OP_WRITE_SAME = 7,
|
||||
/* write the zero filled sector many times */
|
||||
REQ_OP_WRITE_ZEROES = 9,
|
||||
/* Open a zone */
|
||||
|
@ -97,7 +97,6 @@ struct queue_limits {
|
||||
unsigned int io_opt;
|
||||
unsigned int max_discard_sectors;
|
||||
unsigned int max_hw_discard_sectors;
|
||||
unsigned int max_write_same_sectors;
|
||||
unsigned int max_write_zeroes_sectors;
|
||||
unsigned int max_zone_append_sectors;
|
||||
unsigned int discard_granularity;
|
||||
@ -651,9 +650,6 @@ static inline unsigned int blk_queue_get_max_sectors(struct request_queue *q,
|
||||
return min(q->limits.max_discard_sectors,
|
||||
UINT_MAX >> SECTOR_SHIFT);
|
||||
|
||||
if (unlikely(op == REQ_OP_WRITE_SAME))
|
||||
return q->limits.max_write_same_sectors;
|
||||
|
||||
if (unlikely(op == REQ_OP_WRITE_ZEROES))
|
||||
return q->limits.max_write_zeroes_sectors;
|
||||
|
||||
@ -696,8 +692,6 @@ extern void blk_queue_max_discard_segments(struct request_queue *,
|
||||
extern void blk_queue_max_segment_size(struct request_queue *, unsigned int);
|
||||
extern void blk_queue_max_discard_sectors(struct request_queue *q,
|
||||
unsigned int max_discard_sectors);
|
||||
extern void blk_queue_max_write_same_sectors(struct request_queue *q,
|
||||
unsigned int max_write_same_sectors);
|
||||
extern void blk_queue_max_write_zeroes_sectors(struct request_queue *q,
|
||||
unsigned int max_write_same_sectors);
|
||||
extern void blk_queue_logical_block_size(struct request_queue *, unsigned int);
|
||||
@ -842,9 +836,6 @@ static inline long nr_blockdev_pages(void)
|
||||
|
||||
extern void blk_io_schedule(void);
|
||||
|
||||
extern int blkdev_issue_write_same(struct block_device *bdev, sector_t sector,
|
||||
sector_t nr_sects, gfp_t gfp_mask, struct page *page);
|
||||
|
||||
#define BLKDEV_DISCARD_SECURE (1 << 0) /* issue a secure erase */
|
||||
|
||||
extern int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
|
||||
@ -1071,16 +1062,6 @@ static inline int bdev_discard_alignment(struct block_device *bdev)
|
||||
return q->limits.discard_alignment;
|
||||
}
|
||||
|
||||
static inline unsigned int bdev_write_same(struct block_device *bdev)
|
||||
{
|
||||
struct request_queue *q = bdev_get_queue(bdev);
|
||||
|
||||
if (q)
|
||||
return q->limits.max_write_same_sectors;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline unsigned int bdev_write_zeroes_sectors(struct block_device *bdev)
|
||||
{
|
||||
struct request_queue *q = bdev_get_queue(bdev);
|
||||
|
@ -1892,7 +1892,6 @@ void blk_fill_rwbs(char *rwbs, unsigned int op)
|
||||
|
||||
switch (op & REQ_OP_MASK) {
|
||||
case REQ_OP_WRITE:
|
||||
case REQ_OP_WRITE_SAME:
|
||||
rwbs[i++] = 'W';
|
||||
break;
|
||||
case REQ_OP_DISCARD:
|
||||
|
Loading…
Reference in New Issue
Block a user