mirror of
https://github.com/torvalds/linux.git
synced 2024-11-11 06:31:49 +00:00
btrfs: zoned: cache if block group is on a sequential zone
On a zoned filesystem, cache if a block group is on a sequential write only zone. On sequential write only zones, we can use REQ_OP_ZONE_APPEND for writing data, therefore provide btrfs_use_zone_append() to figure out if IO is targeting a sequential write only zone and we can use REQ_OP_ZONE_APPEND for data writing. Reviewed-by: Josef Bacik <josef@toxicpanda.com> Signed-off-by: Johannes Thumshirn <johannes.thumshirn@wdc.com> Reviewed-by: David Sterba <dsterba@suse.com> Signed-off-by: David Sterba <dsterba@suse.com>
This commit is contained in:
parent
138082f366
commit
08f455593f
@ -181,6 +181,9 @@ struct btrfs_block_group {
|
||||
*/
|
||||
int needs_free_space;
|
||||
|
||||
/* Flag indicating this block group is placed on a sequential zone */
|
||||
bool seq_zone;
|
||||
|
||||
/* Record locked full stripes for RAID5/6 block group */
|
||||
struct btrfs_full_stripe_locks_tree full_stripe_locks_root;
|
||||
|
||||
|
@ -1101,6 +1101,9 @@ int btrfs_load_block_group_zone_info(struct btrfs_block_group *cache, bool new)
|
||||
}
|
||||
}
|
||||
|
||||
if (num_sequential > 0)
|
||||
cache->seq_zone = true;
|
||||
|
||||
if (num_conventional > 0) {
|
||||
/*
|
||||
* Avoid calling calculate_alloc_pointer() for new BG. It
|
||||
@ -1218,3 +1221,29 @@ void btrfs_free_redirty_list(struct btrfs_transaction *trans)
|
||||
}
|
||||
spin_unlock(&trans->releasing_ebs_lock);
|
||||
}
|
||||
|
||||
bool btrfs_use_zone_append(struct btrfs_inode *inode, struct extent_map *em)
|
||||
{
|
||||
struct btrfs_fs_info *fs_info = inode->root->fs_info;
|
||||
struct btrfs_block_group *cache;
|
||||
bool ret = false;
|
||||
|
||||
if (!btrfs_is_zoned(fs_info))
|
||||
return false;
|
||||
|
||||
if (!fs_info->max_zone_append_size)
|
||||
return false;
|
||||
|
||||
if (!is_data_inode(&inode->vfs_inode))
|
||||
return false;
|
||||
|
||||
cache = btrfs_lookup_block_group(fs_info, em->block_start);
|
||||
ASSERT(cache);
|
||||
if (!cache)
|
||||
return false;
|
||||
|
||||
ret = cache->seq_zone;
|
||||
btrfs_put_block_group(cache);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@ -46,6 +46,7 @@ void btrfs_calc_zone_unusable(struct btrfs_block_group *cache);
|
||||
void btrfs_redirty_list_add(struct btrfs_transaction *trans,
|
||||
struct extent_buffer *eb);
|
||||
void btrfs_free_redirty_list(struct btrfs_transaction *trans);
|
||||
bool btrfs_use_zone_append(struct btrfs_inode *inode, struct extent_map *em);
|
||||
#else /* CONFIG_BLK_DEV_ZONED */
|
||||
static inline int btrfs_get_dev_zone(struct btrfs_device *device, u64 pos,
|
||||
struct blk_zone *zone)
|
||||
@ -133,6 +134,11 @@ static inline void btrfs_redirty_list_add(struct btrfs_transaction *trans,
|
||||
struct extent_buffer *eb) { }
|
||||
static inline void btrfs_free_redirty_list(struct btrfs_transaction *trans) { }
|
||||
|
||||
static inline bool btrfs_use_zone_append(struct btrfs_inode *inode,
|
||||
struct extent_map *em)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
#endif
|
||||
|
||||
static inline bool btrfs_dev_is_sequential(struct btrfs_device *device, u64 pos)
|
||||
|
Loading…
Reference in New Issue
Block a user