dm: make dm_set_zones_restrictions work on the queue limits

Don't stuff the values directly into the queue without any
synchronization, but instead delay applying the queue limits in
the caller and let dm_set_zones_restrictions work on the limit
structure.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Johannes Thumshirn <johannes.thumshirn@wdc.com>
Link: https://lore.kernel.org/r/20240527123634.1116952-4-hch@lst.de
Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
Christoph Hellwig 2024-05-27 14:36:20 +02:00 committed by Jens Axboe
parent 5e7a4bbcc3
commit c8c1f7012b
3 changed files with 14 additions and 12 deletions

View File

@ -1981,10 +1981,6 @@ int dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
if (!dm_table_supports_secure_erase(t))
limits->max_secure_erase_sectors = 0;
r = queue_limits_set(q, limits);
if (r)
return r;
if (dm_table_supports_flush(t, (1UL << QUEUE_FLAG_WC))) {
wc = true;
if (dm_table_supports_flush(t, (1UL << QUEUE_FLAG_FUA)))
@ -2036,12 +2032,16 @@ int dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
* For a zoned target, setup the zones related queue attributes
* and resources necessary for zone append emulation if necessary.
*/
if (blk_queue_is_zoned(q)) {
r = dm_set_zones_restrictions(t, q);
if (IS_ENABLED(CONFIG_BLK_DEV_ZONED) && limits->zoned) {
r = dm_set_zones_restrictions(t, q, limits);
if (r)
return r;
}
r = queue_limits_set(q, limits);
if (r)
return r;
dm_update_crypto_profile(q, t);
/*

View File

@ -220,7 +220,8 @@ static bool dm_table_supports_zone_append(struct dm_table *t)
return true;
}
int dm_set_zones_restrictions(struct dm_table *t, struct request_queue *q)
int dm_set_zones_restrictions(struct dm_table *t, struct request_queue *q,
struct queue_limits *lim)
{
struct mapped_device *md = t->md;
struct gendisk *disk = md->disk;
@ -236,7 +237,7 @@ int dm_set_zones_restrictions(struct dm_table *t, struct request_queue *q)
clear_bit(DMF_EMULATE_ZONE_APPEND, &md->flags);
} else {
set_bit(DMF_EMULATE_ZONE_APPEND, &md->flags);
blk_queue_max_zone_append_sectors(q, 0);
lim->max_zone_append_sectors = 0;
}
if (!get_capacity(md->disk))
@ -260,9 +261,9 @@ int dm_set_zones_restrictions(struct dm_table *t, struct request_queue *q)
* a regular device.
*/
if (nr_conv_zones >= ret) {
disk->queue->limits.max_open_zones = 0;
disk->queue->limits.max_active_zones = 0;
disk->queue->limits.zoned = false;
lim->max_open_zones = 0;
lim->max_active_zones = 0;
lim->zoned = false;
clear_bit(DMF_EMULATE_ZONE_APPEND, &md->flags);
disk->nr_zones = 0;
return 0;

View File

@ -101,7 +101,8 @@ int dm_setup_md_queue(struct mapped_device *md, struct dm_table *t);
/*
* Zoned targets related functions.
*/
int dm_set_zones_restrictions(struct dm_table *t, struct request_queue *q);
int dm_set_zones_restrictions(struct dm_table *t, struct request_queue *q,
struct queue_limits *lim);
void dm_zone_endio(struct dm_io *io, struct bio *clone);
#ifdef CONFIG_BLK_DEV_ZONED
int dm_blk_report_zones(struct gendisk *disk, sector_t sector,