mirror of
https://github.com/torvalds/linux.git
synced 2024-11-10 14:11:52 +00:00
block: Remove REQ_OP_ZONE_RESET_ALL emulation
Now that device mapper can handle resetting all zones of a mapped zoned device using REQ_OP_ZONE_RESET_ALL, all zoned block device drivers support this operation. With this, the request queue feature BLK_FEAT_ZONE_RESETALL is not necessary and the emulation code in blk-zone.c can be removed. Signed-off-by: Damien Le Moal <dlemoal@kernel.org> Reviewed-by: Christoph Hellwig <hch@lst.de> Reviewed-by: Johannes Thumshirn <johannes.thumshirn@wdc.com> Reviewed-by: Martin K. Petersen <martin.petersen@oracle.com> Link: https://lore.kernel.org/r/20240704052816.623865-5-dlemoal@kernel.org Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
parent
81e7706345
commit
f2a7bea237
@ -830,11 +830,8 @@ void submit_bio_noacct(struct bio *bio)
|
|||||||
case REQ_OP_ZONE_OPEN:
|
case REQ_OP_ZONE_OPEN:
|
||||||
case REQ_OP_ZONE_CLOSE:
|
case REQ_OP_ZONE_CLOSE:
|
||||||
case REQ_OP_ZONE_FINISH:
|
case REQ_OP_ZONE_FINISH:
|
||||||
if (!bdev_is_zoned(bio->bi_bdev))
|
|
||||||
goto not_supported;
|
|
||||||
break;
|
|
||||||
case REQ_OP_ZONE_RESET_ALL:
|
case REQ_OP_ZONE_RESET_ALL:
|
||||||
if (!bdev_is_zoned(bio->bi_bdev) || !blk_queue_zone_resetall(q))
|
if (!bdev_is_zoned(bio->bi_bdev))
|
||||||
goto not_supported;
|
goto not_supported;
|
||||||
break;
|
break;
|
||||||
case REQ_OP_DRV_IN:
|
case REQ_OP_DRV_IN:
|
||||||
|
@ -157,70 +157,6 @@ static inline unsigned long *blk_alloc_zone_bitmap(int node,
|
|||||||
GFP_NOIO, node);
|
GFP_NOIO, node);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int blk_zone_need_reset_cb(struct blk_zone *zone, unsigned int idx,
|
|
||||||
void *data)
|
|
||||||
{
|
|
||||||
/*
|
|
||||||
* For an all-zones reset, ignore conventional, empty, read-only
|
|
||||||
* and offline zones.
|
|
||||||
*/
|
|
||||||
switch (zone->cond) {
|
|
||||||
case BLK_ZONE_COND_NOT_WP:
|
|
||||||
case BLK_ZONE_COND_EMPTY:
|
|
||||||
case BLK_ZONE_COND_READONLY:
|
|
||||||
case BLK_ZONE_COND_OFFLINE:
|
|
||||||
return 0;
|
|
||||||
default:
|
|
||||||
set_bit(idx, (unsigned long *)data);
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
static int blkdev_zone_reset_all_emulated(struct block_device *bdev)
|
|
||||||
{
|
|
||||||
struct gendisk *disk = bdev->bd_disk;
|
|
||||||
sector_t capacity = bdev_nr_sectors(bdev);
|
|
||||||
sector_t zone_sectors = bdev_zone_sectors(bdev);
|
|
||||||
unsigned long *need_reset;
|
|
||||||
struct bio *bio = NULL;
|
|
||||||
sector_t sector = 0;
|
|
||||||
int ret;
|
|
||||||
|
|
||||||
need_reset = blk_alloc_zone_bitmap(disk->queue->node, disk->nr_zones);
|
|
||||||
if (!need_reset)
|
|
||||||
return -ENOMEM;
|
|
||||||
|
|
||||||
ret = disk->fops->report_zones(disk, 0, disk->nr_zones,
|
|
||||||
blk_zone_need_reset_cb, need_reset);
|
|
||||||
if (ret < 0)
|
|
||||||
goto out_free_need_reset;
|
|
||||||
|
|
||||||
ret = 0;
|
|
||||||
while (sector < capacity) {
|
|
||||||
if (!test_bit(disk_zone_no(disk, sector), need_reset)) {
|
|
||||||
sector += zone_sectors;
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
|
|
||||||
bio = blk_next_bio(bio, bdev, 0, REQ_OP_ZONE_RESET | REQ_SYNC,
|
|
||||||
GFP_KERNEL);
|
|
||||||
bio->bi_iter.bi_sector = sector;
|
|
||||||
sector += zone_sectors;
|
|
||||||
|
|
||||||
/* This may take a while, so be nice to others */
|
|
||||||
cond_resched();
|
|
||||||
}
|
|
||||||
|
|
||||||
if (bio) {
|
|
||||||
ret = submit_bio_wait(bio);
|
|
||||||
bio_put(bio);
|
|
||||||
}
|
|
||||||
|
|
||||||
out_free_need_reset:
|
|
||||||
kfree(need_reset);
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
static int blkdev_zone_reset_all(struct block_device *bdev)
|
static int blkdev_zone_reset_all(struct block_device *bdev)
|
||||||
{
|
{
|
||||||
struct bio bio;
|
struct bio bio;
|
||||||
@ -247,7 +183,6 @@ static int blkdev_zone_reset_all(struct block_device *bdev)
|
|||||||
int blkdev_zone_mgmt(struct block_device *bdev, enum req_op op,
|
int blkdev_zone_mgmt(struct block_device *bdev, enum req_op op,
|
||||||
sector_t sector, sector_t nr_sectors)
|
sector_t sector, sector_t nr_sectors)
|
||||||
{
|
{
|
||||||
struct request_queue *q = bdev_get_queue(bdev);
|
|
||||||
sector_t zone_sectors = bdev_zone_sectors(bdev);
|
sector_t zone_sectors = bdev_zone_sectors(bdev);
|
||||||
sector_t capacity = bdev_nr_sectors(bdev);
|
sector_t capacity = bdev_nr_sectors(bdev);
|
||||||
sector_t end_sector = sector + nr_sectors;
|
sector_t end_sector = sector + nr_sectors;
|
||||||
@ -275,16 +210,11 @@ int blkdev_zone_mgmt(struct block_device *bdev, enum req_op op,
|
|||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* In the case of a zone reset operation over all zones,
|
* In the case of a zone reset operation over all zones, use
|
||||||
* REQ_OP_ZONE_RESET_ALL can be used with devices supporting this
|
* REQ_OP_ZONE_RESET_ALL.
|
||||||
* command. For other devices, we emulate this command behavior by
|
|
||||||
* identifying the zones needing a reset.
|
|
||||||
*/
|
*/
|
||||||
if (op == REQ_OP_ZONE_RESET && sector == 0 && nr_sectors == capacity) {
|
if (op == REQ_OP_ZONE_RESET && sector == 0 && nr_sectors == capacity)
|
||||||
if (!blk_queue_zone_resetall(q))
|
|
||||||
return blkdev_zone_reset_all_emulated(bdev);
|
|
||||||
return blkdev_zone_reset_all(bdev);
|
return blkdev_zone_reset_all(bdev);
|
||||||
}
|
|
||||||
|
|
||||||
while (sector < end_sector) {
|
while (sector < end_sector) {
|
||||||
bio = blk_next_bio(bio, bdev, 0, op | REQ_SYNC, GFP_KERNEL);
|
bio = blk_next_bio(bio, bdev, 0, op | REQ_SYNC, GFP_KERNEL);
|
||||||
|
@ -164,7 +164,7 @@ int null_init_zoned_dev(struct nullb_device *dev,
|
|||||||
sector += dev->zone_size_sects;
|
sector += dev->zone_size_sects;
|
||||||
}
|
}
|
||||||
|
|
||||||
lim->features |= BLK_FEAT_ZONED | BLK_FEAT_ZONE_RESETALL;
|
lim->features |= BLK_FEAT_ZONED;
|
||||||
lim->chunk_sectors = dev->zone_size_sects;
|
lim->chunk_sectors = dev->zone_size_sects;
|
||||||
lim->max_zone_append_sectors = dev->zone_append_max_sectors;
|
lim->max_zone_append_sectors = dev->zone_append_max_sectors;
|
||||||
lim->max_open_zones = dev->zone_max_open;
|
lim->max_open_zones = dev->zone_max_open;
|
||||||
|
@ -2194,7 +2194,7 @@ static int ublk_ctrl_start_dev(struct ublk_device *ub, struct io_uring_cmd *cmd)
|
|||||||
if (!IS_ENABLED(CONFIG_BLK_DEV_ZONED))
|
if (!IS_ENABLED(CONFIG_BLK_DEV_ZONED))
|
||||||
return -EOPNOTSUPP;
|
return -EOPNOTSUPP;
|
||||||
|
|
||||||
lim.features |= BLK_FEAT_ZONED | BLK_FEAT_ZONE_RESETALL;
|
lim.features |= BLK_FEAT_ZONED;
|
||||||
lim.max_active_zones = p->max_active_zones;
|
lim.max_active_zones = p->max_active_zones;
|
||||||
lim.max_open_zones = p->max_open_zones;
|
lim.max_open_zones = p->max_open_zones;
|
||||||
lim.max_zone_append_sectors = p->max_zone_append_sectors;
|
lim.max_zone_append_sectors = p->max_zone_append_sectors;
|
||||||
|
@ -728,7 +728,7 @@ static int virtblk_read_zoned_limits(struct virtio_blk *vblk,
|
|||||||
|
|
||||||
dev_dbg(&vdev->dev, "probing host-managed zoned device\n");
|
dev_dbg(&vdev->dev, "probing host-managed zoned device\n");
|
||||||
|
|
||||||
lim->features |= BLK_FEAT_ZONED | BLK_FEAT_ZONE_RESETALL;
|
lim->features |= BLK_FEAT_ZONED;
|
||||||
|
|
||||||
virtio_cread(vdev, struct virtio_blk_config,
|
virtio_cread(vdev, struct virtio_blk_config,
|
||||||
zoned.max_open_zones, &v);
|
zoned.max_open_zones, &v);
|
||||||
|
@ -108,7 +108,7 @@ free_data:
|
|||||||
void nvme_update_zone_info(struct nvme_ns *ns, struct queue_limits *lim,
|
void nvme_update_zone_info(struct nvme_ns *ns, struct queue_limits *lim,
|
||||||
struct nvme_zone_info *zi)
|
struct nvme_zone_info *zi)
|
||||||
{
|
{
|
||||||
lim->features |= BLK_FEAT_ZONED | BLK_FEAT_ZONE_RESETALL;
|
lim->features |= BLK_FEAT_ZONED;
|
||||||
lim->max_open_zones = zi->max_open_zones;
|
lim->max_open_zones = zi->max_open_zones;
|
||||||
lim->max_active_zones = zi->max_active_zones;
|
lim->max_active_zones = zi->max_active_zones;
|
||||||
lim->max_zone_append_sectors = ns->ctrl->max_zone_append;
|
lim->max_zone_append_sectors = ns->ctrl->max_zone_append;
|
||||||
|
@ -599,7 +599,7 @@ int sd_zbc_read_zones(struct scsi_disk *sdkp, struct queue_limits *lim,
|
|||||||
if (sdkp->device->type != TYPE_ZBC)
|
if (sdkp->device->type != TYPE_ZBC)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
lim->features |= BLK_FEAT_ZONED | BLK_FEAT_ZONE_RESETALL;
|
lim->features |= BLK_FEAT_ZONED;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Per ZBC and ZAC specifications, writes in sequential write required
|
* Per ZBC and ZAC specifications, writes in sequential write required
|
||||||
|
@ -318,9 +318,6 @@ typedef unsigned int __bitwise blk_features_t;
|
|||||||
/* is a zoned device */
|
/* is a zoned device */
|
||||||
#define BLK_FEAT_ZONED ((__force blk_features_t)(1u << 10))
|
#define BLK_FEAT_ZONED ((__force blk_features_t)(1u << 10))
|
||||||
|
|
||||||
/* supports Zone Reset All */
|
|
||||||
#define BLK_FEAT_ZONE_RESETALL ((__force blk_features_t)(1u << 11))
|
|
||||||
|
|
||||||
/* supports PCI(e) p2p requests */
|
/* supports PCI(e) p2p requests */
|
||||||
#define BLK_FEAT_PCI_P2PDMA ((__force blk_features_t)(1u << 12))
|
#define BLK_FEAT_PCI_P2PDMA ((__force blk_features_t)(1u << 12))
|
||||||
|
|
||||||
@ -618,8 +615,6 @@ void blk_queue_flag_clear(unsigned int flag, struct request_queue *q);
|
|||||||
test_bit(QUEUE_FLAG_NOXMERGES, &(q)->queue_flags)
|
test_bit(QUEUE_FLAG_NOXMERGES, &(q)->queue_flags)
|
||||||
#define blk_queue_nonrot(q) (!((q)->limits.features & BLK_FEAT_ROTATIONAL))
|
#define blk_queue_nonrot(q) (!((q)->limits.features & BLK_FEAT_ROTATIONAL))
|
||||||
#define blk_queue_io_stat(q) ((q)->limits.features & BLK_FEAT_IO_STAT)
|
#define blk_queue_io_stat(q) ((q)->limits.features & BLK_FEAT_IO_STAT)
|
||||||
#define blk_queue_zone_resetall(q) \
|
|
||||||
((q)->limits.features & BLK_FEAT_ZONE_RESETALL)
|
|
||||||
#define blk_queue_dax(q) ((q)->limits.features & BLK_FEAT_DAX)
|
#define blk_queue_dax(q) ((q)->limits.features & BLK_FEAT_DAX)
|
||||||
#define blk_queue_pci_p2pdma(q) ((q)->limits.features & BLK_FEAT_PCI_P2PDMA)
|
#define blk_queue_pci_p2pdma(q) ((q)->limits.features & BLK_FEAT_PCI_P2PDMA)
|
||||||
#ifdef CONFIG_BLK_RQ_ALLOC_TIME
|
#ifdef CONFIG_BLK_RQ_ALLOC_TIME
|
||||||
|
Loading…
Reference in New Issue
Block a user