for-6.8-rc6-tag

-----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCgAdFiEE8rQSAMVO+zA4DBdWxWXV+ddtWDsFAmXcsfAACgkQxWXV+ddt
 WDt3XA/6AkPT8QNT+mOyp4NjPzquR4UMIPVGGvjWTeKNtjNnco9gPkOBWsHeeDQe
 aiihh3X2NpNtsduEmqaz717EJW4za9lplGiyPR51H/pTfGfOthWL6Nj+auTPva3t
 GnlYh+GUQ+44JJ5+biOK5HUpEEeUR87EN2z5lTWsHAxg7PolBiKYKvV4Wp33xJqR
 ILGlYw04reOAljTn0Zf738IL5WpY9etj1GnNxQeEKFRrdF1GH1i6r/JRONU1hGHu
 EiZT6XwoN07V+JURB+fPqtY1IXODDC8904OwLg5fKhBggWvR2IaiW1XH+ToFXQgU
 idae1+Dy85Hi4s40SL5GcSO8mVHPEGEspwM/5G87YqIu3uH4L9+Wd4zTwVYLcwNm
 SSUCDGj2d+/JIug5dPBV8GL7jrhPNnPOu8HR+bIxY9XUhyf+IZVlUNYlorup3lbm
 rAouZiCevRhQRBAx33Id5ZOMhlIpPONKObcCEKmdm6WLlnkkqgKQbnapd/I/1mfT
 nP5N7oWUtfXO4oq4k5XpJBcTVhXU+DzpQ7EMDGv3mSmIem0wsDmXPbF2MfoSIim8
 UuToZ1YF5MuxNLGwYnpkUaxWhKKOFWMvAe65eXP+ureIjOJwQ4f85Nkro0JvKbr8
 nVdzl3rDy49tnqW7Qu3vaNPOQneuWaOqCoQcYDcVAiqk11UhH9E=
 =mBP6
 -----END PGP SIGNATURE-----

Merge tag 'for-6.8-rc6-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/kdave/linux

Pull btrfs fixes from David Sterba:
 "A  more fixes for recently reported or discovered problems:

   - fix corner case of send that would generate potentially large
     stream of zeros if there's a hole at the end of the file

   - fix chunk validation in zoned mode on conventional zones, it was
     possible to create chunks that would not be allowed on sequential
     zones

   - fix validation of dev-replace ioctl filenames

   - fix KCSAN warnings about access to block reserve struct members"

* tag 'for-6.8-rc6-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/kdave/linux:
  btrfs: fix data race at btrfs_use_block_rsv() when accessing block reserve
  btrfs: fix data races when accessing the reserved amount of block reserves
  btrfs: send: don't issue unnecessary zero writes for trailing hole
  btrfs: dev-replace: properly validate device names
  btrfs: zoned: don't skip block group profile checks on conventional zones
This commit is contained in:
Linus Torvalds 2024-02-26 11:00:54 -08:00
commit b6c1f1ecb3
6 changed files with 88 additions and 22 deletions

View File

@ -494,7 +494,7 @@ struct btrfs_block_rsv *btrfs_use_block_rsv(struct btrfs_trans_handle *trans,
block_rsv = get_block_rsv(trans, root);
if (unlikely(block_rsv->size == 0))
if (unlikely(btrfs_block_rsv_size(block_rsv) == 0))
goto try_reserve;
again:
ret = btrfs_block_rsv_use_bytes(block_rsv, blocksize);

View File

@ -101,4 +101,36 @@ static inline bool btrfs_block_rsv_full(const struct btrfs_block_rsv *rsv)
return data_race(rsv->full);
}
/*
* Get the reserved mount of a block reserve in a context where getting a stale
* value is acceptable, instead of accessing it directly and trigger data race
* warning from KCSAN.
*/
static inline u64 btrfs_block_rsv_reserved(struct btrfs_block_rsv *rsv)
{
u64 ret;
spin_lock(&rsv->lock);
ret = rsv->reserved;
spin_unlock(&rsv->lock);
return ret;
}
/*
* Get the size of a block reserve in a context where getting a stale value is
* acceptable, instead of accessing it directly and trigger data race warning
* from KCSAN.
*/
static inline u64 btrfs_block_rsv_size(struct btrfs_block_rsv *rsv)
{
u64 ret;
spin_lock(&rsv->lock);
ret = rsv->size;
spin_unlock(&rsv->lock);
return ret;
}
#endif /* BTRFS_BLOCK_RSV_H */

View File

@ -725,6 +725,23 @@ leave:
return ret;
}
static int btrfs_check_replace_dev_names(struct btrfs_ioctl_dev_replace_args *args)
{
if (args->start.srcdevid == 0) {
if (memchr(args->start.srcdev_name, 0,
sizeof(args->start.srcdev_name)) == NULL)
return -ENAMETOOLONG;
} else {
args->start.srcdev_name[0] = 0;
}
if (memchr(args->start.tgtdev_name, 0,
sizeof(args->start.tgtdev_name)) == NULL)
return -ENAMETOOLONG;
return 0;
}
int btrfs_dev_replace_by_ioctl(struct btrfs_fs_info *fs_info,
struct btrfs_ioctl_dev_replace_args *args)
{
@ -737,10 +754,9 @@ int btrfs_dev_replace_by_ioctl(struct btrfs_fs_info *fs_info,
default:
return -EINVAL;
}
if ((args->start.srcdevid == 0 && args->start.srcdev_name[0] == '\0') ||
args->start.tgtdev_name[0] == '\0')
return -EINVAL;
ret = btrfs_check_replace_dev_names(args);
if (ret < 0)
return ret;
ret = btrfs_dev_replace_start(fs_info, args->start.tgtdev_name,
args->start.srcdevid,

View File

@ -6705,11 +6705,20 @@ static int finish_inode_if_needed(struct send_ctx *sctx, int at_end)
if (ret)
goto out;
}
if (sctx->cur_inode_last_extent <
sctx->cur_inode_size) {
ret = send_hole(sctx, sctx->cur_inode_size);
if (ret)
if (sctx->cur_inode_last_extent < sctx->cur_inode_size) {
ret = range_is_hole_in_parent(sctx,
sctx->cur_inode_last_extent,
sctx->cur_inode_size);
if (ret < 0) {
goto out;
} else if (ret == 0) {
ret = send_hole(sctx, sctx->cur_inode_size);
if (ret < 0)
goto out;
} else {
/* Range is already a hole, skip. */
ret = 0;
}
}
}
if (need_truncate) {

View File

@ -856,7 +856,7 @@ btrfs_calc_reclaim_metadata_size(struct btrfs_fs_info *fs_info,
static bool need_preemptive_reclaim(struct btrfs_fs_info *fs_info,
struct btrfs_space_info *space_info)
{
u64 global_rsv_size = fs_info->global_block_rsv.reserved;
const u64 global_rsv_size = btrfs_block_rsv_reserved(&fs_info->global_block_rsv);
u64 ordered, delalloc;
u64 thresh;
u64 used;
@ -956,8 +956,8 @@ static bool need_preemptive_reclaim(struct btrfs_fs_info *fs_info,
ordered = percpu_counter_read_positive(&fs_info->ordered_bytes) >> 1;
delalloc = percpu_counter_read_positive(&fs_info->delalloc_bytes);
if (ordered >= delalloc)
used += fs_info->delayed_refs_rsv.reserved +
fs_info->delayed_block_rsv.reserved;
used += btrfs_block_rsv_reserved(&fs_info->delayed_refs_rsv) +
btrfs_block_rsv_reserved(&fs_info->delayed_block_rsv);
else
used += space_info->bytes_may_use - global_rsv_size;
@ -1173,7 +1173,7 @@ static void btrfs_preempt_reclaim_metadata_space(struct work_struct *work)
enum btrfs_flush_state flush;
u64 delalloc_size = 0;
u64 to_reclaim, block_rsv_size;
u64 global_rsv_size = global_rsv->reserved;
const u64 global_rsv_size = btrfs_block_rsv_reserved(global_rsv);
loops++;
@ -1185,9 +1185,9 @@ static void btrfs_preempt_reclaim_metadata_space(struct work_struct *work)
* assume it's tied up in delalloc reservations.
*/
block_rsv_size = global_rsv_size +
delayed_block_rsv->reserved +
delayed_refs_rsv->reserved +
trans_rsv->reserved;
btrfs_block_rsv_reserved(delayed_block_rsv) +
btrfs_block_rsv_reserved(delayed_refs_rsv) +
btrfs_block_rsv_reserved(trans_rsv);
if (block_rsv_size < space_info->bytes_may_use)
delalloc_size = space_info->bytes_may_use - block_rsv_size;
@ -1207,16 +1207,16 @@ static void btrfs_preempt_reclaim_metadata_space(struct work_struct *work)
to_reclaim = delalloc_size;
flush = FLUSH_DELALLOC;
} else if (space_info->bytes_pinned >
(delayed_block_rsv->reserved +
delayed_refs_rsv->reserved)) {
(btrfs_block_rsv_reserved(delayed_block_rsv) +
btrfs_block_rsv_reserved(delayed_refs_rsv))) {
to_reclaim = space_info->bytes_pinned;
flush = COMMIT_TRANS;
} else if (delayed_block_rsv->reserved >
delayed_refs_rsv->reserved) {
to_reclaim = delayed_block_rsv->reserved;
} else if (btrfs_block_rsv_reserved(delayed_block_rsv) >
btrfs_block_rsv_reserved(delayed_refs_rsv)) {
to_reclaim = btrfs_block_rsv_reserved(delayed_block_rsv);
flush = FLUSH_DELAYED_ITEMS_NR;
} else {
to_reclaim = delayed_refs_rsv->reserved;
to_reclaim = btrfs_block_rsv_reserved(delayed_refs_rsv);
flush = FLUSH_DELAYED_REFS_NR;
}

View File

@ -1639,6 +1639,15 @@ int btrfs_load_block_group_zone_info(struct btrfs_block_group *cache, bool new)
}
out:
/* Reject non SINGLE data profiles without RST */
if ((map->type & BTRFS_BLOCK_GROUP_DATA) &&
(map->type & BTRFS_BLOCK_GROUP_PROFILE_MASK) &&
!fs_info->stripe_root) {
btrfs_err(fs_info, "zoned: data %s needs raid-stripe-tree",
btrfs_bg_type_to_raid_name(map->type));
return -EINVAL;
}
if (cache->alloc_offset > cache->zone_capacity) {
btrfs_err(fs_info,
"zoned: invalid write pointer %llu (larger than zone capacity %llu) in block group %llu",