mirror of
https://github.com/torvalds/linux.git
synced 2024-11-10 22:21:40 +00:00
for-5.17-rc5-tag
-----BEGIN PGP SIGNATURE----- iQIzBAABCgAdFiEE8rQSAMVO+zA4DBdWxWXV+ddtWDsFAmIY790ACgkQxWXV+ddt WDvKxA//ctgUNhKEPOfJlmmaKAVRgrE6FfDgfk6c2v/PrpPFH0U9+frishcsImxu XAObMCyPY7PfLDnk6I0Lmxm+8T56+NNGjbxq7/R1Uv0DJm75f51OJbr/H7NSjVfu g6IyPmIft7jmt7Vp9lPyYcPNDTFyG+XARdWYS3AFtAfr2MfXgjx9AALxFjaytbLi AevXP0qEkbLHv5npEG56pouhn44J/8GZKeUGM1crNNUDQoYpgreifZ2SHpLIfxP5 lvzrA1noaZSFS3Cth7NBPhHTFS2tiMb96bHFdF56A2EIq5vAXQF7w6IAUlvBEVoR 5XgWsxGfsv5FbdFmyrRIvOh6gGHwHw8BH5/ZRTRRVuRZAPKPY0oiJ9OJk5kIBCgo LiYksqRTOs0Zp/e5wn/8d/UGp2A6mujxwqw7gLcOZBzfhKw7QIha6BM64BfJxBni 3dakBDCWZ/X+Kje+WaM4Sev7JUIyDVoKWClHrvzoLeEzdIgruNguMnQ+3yOZBFiG 4YRTPUeafAj0OspJ0LLG01X4NJVmnQVAFoKuFOsGbUsxeCaQ9vF3/IGTlhgkwehf KjvE9nzl9DewpvRRd7AAirj5FncuwRw6KNci1gBBixxPaveBClCIuuyfx6lXPusK sIF3eb7xcqKYLh0iYPd2XMZInXbWXIGuJoVG/Gu1IYm1OXAFQ5A= =q/NS -----END PGP SIGNATURE----- Merge tag 'for-5.17-rc5-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/kdave/linux Pull btrfs fixes from David Sterba: "This is a hopefully last batch of fixes for defrag that got broken in 5.16, all stable material. The remaining reported problem is excessive IO with autodefrag due to various conditions in the defrag code not met or missing" * tag 'for-5.17-rc5-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/kdave/linux: btrfs: reduce extent threshold for autodefrag btrfs: autodefrag: only scan one inode once btrfs: defrag: don't use merged extent map for their generation check btrfs: defrag: bring back the old file extent search behavior btrfs: defrag: remove an ambiguous condition for rejection btrfs: defrag: don't defrag extents which are already at max capacity btrfs: defrag: don't try to merge regular extents with preallocated extents btrfs: defrag: allow defrag_one_cluster() to skip large extent which is not a target btrfs: prevent copying too big compressed lzo segment
This commit is contained in:
commit
c0419188b5
@ -3291,7 +3291,7 @@ void btrfs_exclop_balance(struct btrfs_fs_info *fs_info,
|
||||
int __init btrfs_auto_defrag_init(void);
|
||||
void __cold btrfs_auto_defrag_exit(void);
|
||||
int btrfs_add_inode_defrag(struct btrfs_trans_handle *trans,
|
||||
struct btrfs_inode *inode);
|
||||
struct btrfs_inode *inode, u32 extent_thresh);
|
||||
int btrfs_run_defrag_inodes(struct btrfs_fs_info *fs_info);
|
||||
void btrfs_cleanup_defrag_inodes(struct btrfs_fs_info *fs_info);
|
||||
int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync);
|
||||
|
@ -261,6 +261,7 @@ static void try_merge_map(struct extent_map_tree *tree, struct extent_map *em)
|
||||
em->mod_len = (em->mod_len + em->mod_start) - merge->mod_start;
|
||||
em->mod_start = merge->mod_start;
|
||||
em->generation = max(em->generation, merge->generation);
|
||||
set_bit(EXTENT_FLAG_MERGED, &em->flags);
|
||||
|
||||
rb_erase_cached(&merge->rb_node, &tree->map);
|
||||
RB_CLEAR_NODE(&merge->rb_node);
|
||||
@ -278,6 +279,7 @@ static void try_merge_map(struct extent_map_tree *tree, struct extent_map *em)
|
||||
RB_CLEAR_NODE(&merge->rb_node);
|
||||
em->mod_len = (merge->mod_start + merge->mod_len) - em->mod_start;
|
||||
em->generation = max(em->generation, merge->generation);
|
||||
set_bit(EXTENT_FLAG_MERGED, &em->flags);
|
||||
free_extent_map(merge);
|
||||
}
|
||||
}
|
||||
|
@ -25,6 +25,8 @@ enum {
|
||||
EXTENT_FLAG_FILLING,
|
||||
/* filesystem extent mapping type */
|
||||
EXTENT_FLAG_FS_MAPPING,
|
||||
/* This em is merged from two or more physically adjacent ems */
|
||||
EXTENT_FLAG_MERGED,
|
||||
};
|
||||
|
||||
struct extent_map {
|
||||
@ -40,6 +42,12 @@ struct extent_map {
|
||||
u64 ram_bytes;
|
||||
u64 block_start;
|
||||
u64 block_len;
|
||||
|
||||
/*
|
||||
* Generation of the extent map, for merged em it's the highest
|
||||
* generation of all merged ems.
|
||||
* For non-merged extents, it's from btrfs_file_extent_item::generation.
|
||||
*/
|
||||
u64 generation;
|
||||
unsigned long flags;
|
||||
/* Used for chunk mappings, flag EXTENT_FLAG_FS_MAPPING must be set */
|
||||
|
@ -50,11 +50,14 @@ struct inode_defrag {
|
||||
/* root objectid */
|
||||
u64 root;
|
||||
|
||||
/* last offset we were able to defrag */
|
||||
u64 last_offset;
|
||||
|
||||
/* if we've wrapped around back to zero once already */
|
||||
int cycled;
|
||||
/*
|
||||
* The extent size threshold for autodefrag.
|
||||
*
|
||||
* This value is different for compressed/non-compressed extents,
|
||||
* thus needs to be passed from higher layer.
|
||||
* (aka, inode_should_defrag())
|
||||
*/
|
||||
u32 extent_thresh;
|
||||
};
|
||||
|
||||
static int __compare_inode_defrag(struct inode_defrag *defrag1,
|
||||
@ -107,8 +110,8 @@ static int __btrfs_add_inode_defrag(struct btrfs_inode *inode,
|
||||
*/
|
||||
if (defrag->transid < entry->transid)
|
||||
entry->transid = defrag->transid;
|
||||
if (defrag->last_offset > entry->last_offset)
|
||||
entry->last_offset = defrag->last_offset;
|
||||
entry->extent_thresh = min(defrag->extent_thresh,
|
||||
entry->extent_thresh);
|
||||
return -EEXIST;
|
||||
}
|
||||
}
|
||||
@ -134,7 +137,7 @@ static inline int __need_auto_defrag(struct btrfs_fs_info *fs_info)
|
||||
* enabled
|
||||
*/
|
||||
int btrfs_add_inode_defrag(struct btrfs_trans_handle *trans,
|
||||
struct btrfs_inode *inode)
|
||||
struct btrfs_inode *inode, u32 extent_thresh)
|
||||
{
|
||||
struct btrfs_root *root = inode->root;
|
||||
struct btrfs_fs_info *fs_info = root->fs_info;
|
||||
@ -160,6 +163,7 @@ int btrfs_add_inode_defrag(struct btrfs_trans_handle *trans,
|
||||
defrag->ino = btrfs_ino(inode);
|
||||
defrag->transid = transid;
|
||||
defrag->root = root->root_key.objectid;
|
||||
defrag->extent_thresh = extent_thresh;
|
||||
|
||||
spin_lock(&fs_info->defrag_inodes_lock);
|
||||
if (!test_bit(BTRFS_INODE_IN_DEFRAG, &inode->runtime_flags)) {
|
||||
@ -178,34 +182,6 @@ int btrfs_add_inode_defrag(struct btrfs_trans_handle *trans,
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Requeue the defrag object. If there is a defrag object that points to
|
||||
* the same inode in the tree, we will merge them together (by
|
||||
* __btrfs_add_inode_defrag()) and free the one that we want to requeue.
|
||||
*/
|
||||
static void btrfs_requeue_inode_defrag(struct btrfs_inode *inode,
|
||||
struct inode_defrag *defrag)
|
||||
{
|
||||
struct btrfs_fs_info *fs_info = inode->root->fs_info;
|
||||
int ret;
|
||||
|
||||
if (!__need_auto_defrag(fs_info))
|
||||
goto out;
|
||||
|
||||
/*
|
||||
* Here we don't check the IN_DEFRAG flag, because we need merge
|
||||
* them together.
|
||||
*/
|
||||
spin_lock(&fs_info->defrag_inodes_lock);
|
||||
ret = __btrfs_add_inode_defrag(inode, defrag);
|
||||
spin_unlock(&fs_info->defrag_inodes_lock);
|
||||
if (ret)
|
||||
goto out;
|
||||
return;
|
||||
out:
|
||||
kmem_cache_free(btrfs_inode_defrag_cachep, defrag);
|
||||
}
|
||||
|
||||
/*
|
||||
* pick the defragable inode that we want, if it doesn't exist, we will get
|
||||
* the next one.
|
||||
@ -278,8 +254,14 @@ static int __btrfs_run_defrag_inode(struct btrfs_fs_info *fs_info,
|
||||
struct btrfs_root *inode_root;
|
||||
struct inode *inode;
|
||||
struct btrfs_ioctl_defrag_range_args range;
|
||||
int num_defrag;
|
||||
int ret;
|
||||
int ret = 0;
|
||||
u64 cur = 0;
|
||||
|
||||
again:
|
||||
if (test_bit(BTRFS_FS_STATE_REMOUNTING, &fs_info->fs_state))
|
||||
goto cleanup;
|
||||
if (!__need_auto_defrag(fs_info))
|
||||
goto cleanup;
|
||||
|
||||
/* get the inode */
|
||||
inode_root = btrfs_get_fs_root(fs_info, defrag->root, true);
|
||||
@ -295,39 +277,30 @@ static int __btrfs_run_defrag_inode(struct btrfs_fs_info *fs_info,
|
||||
goto cleanup;
|
||||
}
|
||||
|
||||
if (cur >= i_size_read(inode)) {
|
||||
iput(inode);
|
||||
goto cleanup;
|
||||
}
|
||||
|
||||
/* do a chunk of defrag */
|
||||
clear_bit(BTRFS_INODE_IN_DEFRAG, &BTRFS_I(inode)->runtime_flags);
|
||||
memset(&range, 0, sizeof(range));
|
||||
range.len = (u64)-1;
|
||||
range.start = defrag->last_offset;
|
||||
range.start = cur;
|
||||
range.extent_thresh = defrag->extent_thresh;
|
||||
|
||||
sb_start_write(fs_info->sb);
|
||||
num_defrag = btrfs_defrag_file(inode, NULL, &range, defrag->transid,
|
||||
ret = btrfs_defrag_file(inode, NULL, &range, defrag->transid,
|
||||
BTRFS_DEFRAG_BATCH);
|
||||
sb_end_write(fs_info->sb);
|
||||
/*
|
||||
* if we filled the whole defrag batch, there
|
||||
* must be more work to do. Queue this defrag
|
||||
* again
|
||||
*/
|
||||
if (num_defrag == BTRFS_DEFRAG_BATCH) {
|
||||
defrag->last_offset = range.start;
|
||||
btrfs_requeue_inode_defrag(BTRFS_I(inode), defrag);
|
||||
} else if (defrag->last_offset && !defrag->cycled) {
|
||||
/*
|
||||
* we didn't fill our defrag batch, but
|
||||
* we didn't start at zero. Make sure we loop
|
||||
* around to the start of the file.
|
||||
*/
|
||||
defrag->last_offset = 0;
|
||||
defrag->cycled = 1;
|
||||
btrfs_requeue_inode_defrag(BTRFS_I(inode), defrag);
|
||||
} else {
|
||||
kmem_cache_free(btrfs_inode_defrag_cachep, defrag);
|
||||
}
|
||||
|
||||
iput(inode);
|
||||
return 0;
|
||||
|
||||
if (ret < 0)
|
||||
goto cleanup;
|
||||
|
||||
cur = max(cur + fs_info->sectorsize, range.start);
|
||||
goto again;
|
||||
|
||||
cleanup:
|
||||
kmem_cache_free(btrfs_inode_defrag_cachep, defrag);
|
||||
return ret;
|
||||
|
@ -560,12 +560,12 @@ static inline int inode_need_compress(struct btrfs_inode *inode, u64 start,
|
||||
}
|
||||
|
||||
static inline void inode_should_defrag(struct btrfs_inode *inode,
|
||||
u64 start, u64 end, u64 num_bytes, u64 small_write)
|
||||
u64 start, u64 end, u64 num_bytes, u32 small_write)
|
||||
{
|
||||
/* If this is a small write inside eof, kick off a defrag */
|
||||
if (num_bytes < small_write &&
|
||||
(start > 0 || end + 1 < inode->disk_i_size))
|
||||
btrfs_add_inode_defrag(NULL, inode);
|
||||
btrfs_add_inode_defrag(NULL, inode, small_write);
|
||||
}
|
||||
|
||||
/*
|
||||
|
256
fs/btrfs/ioctl.c
256
fs/btrfs/ioctl.c
@ -1012,8 +1012,155 @@ out:
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* Defrag specific helper to get an extent map.
|
||||
*
|
||||
* Differences between this and btrfs_get_extent() are:
|
||||
*
|
||||
* - No extent_map will be added to inode->extent_tree
|
||||
* To reduce memory usage in the long run.
|
||||
*
|
||||
* - Extra optimization to skip file extents older than @newer_than
|
||||
* By using btrfs_search_forward() we can skip entire file ranges that
|
||||
* have extents created in past transactions, because btrfs_search_forward()
|
||||
* will not visit leaves and nodes with a generation smaller than given
|
||||
* minimal generation threshold (@newer_than).
|
||||
*
|
||||
* Return valid em if we find a file extent matching the requirement.
|
||||
* Return NULL if we can not find a file extent matching the requirement.
|
||||
*
|
||||
* Return ERR_PTR() for error.
|
||||
*/
|
||||
static struct extent_map *defrag_get_extent(struct btrfs_inode *inode,
|
||||
u64 start, u64 newer_than)
|
||||
{
|
||||
struct btrfs_root *root = inode->root;
|
||||
struct btrfs_file_extent_item *fi;
|
||||
struct btrfs_path path = { 0 };
|
||||
struct extent_map *em;
|
||||
struct btrfs_key key;
|
||||
u64 ino = btrfs_ino(inode);
|
||||
int ret;
|
||||
|
||||
em = alloc_extent_map();
|
||||
if (!em) {
|
||||
ret = -ENOMEM;
|
||||
goto err;
|
||||
}
|
||||
|
||||
key.objectid = ino;
|
||||
key.type = BTRFS_EXTENT_DATA_KEY;
|
||||
key.offset = start;
|
||||
|
||||
if (newer_than) {
|
||||
ret = btrfs_search_forward(root, &key, &path, newer_than);
|
||||
if (ret < 0)
|
||||
goto err;
|
||||
/* Can't find anything newer */
|
||||
if (ret > 0)
|
||||
goto not_found;
|
||||
} else {
|
||||
ret = btrfs_search_slot(NULL, root, &key, &path, 0, 0);
|
||||
if (ret < 0)
|
||||
goto err;
|
||||
}
|
||||
if (path.slots[0] >= btrfs_header_nritems(path.nodes[0])) {
|
||||
/*
|
||||
* If btrfs_search_slot() makes path to point beyond nritems,
|
||||
* we should not have an empty leaf, as this inode must at
|
||||
* least have its INODE_ITEM.
|
||||
*/
|
||||
ASSERT(btrfs_header_nritems(path.nodes[0]));
|
||||
path.slots[0] = btrfs_header_nritems(path.nodes[0]) - 1;
|
||||
}
|
||||
btrfs_item_key_to_cpu(path.nodes[0], &key, path.slots[0]);
|
||||
/* Perfect match, no need to go one slot back */
|
||||
if (key.objectid == ino && key.type == BTRFS_EXTENT_DATA_KEY &&
|
||||
key.offset == start)
|
||||
goto iterate;
|
||||
|
||||
/* We didn't find a perfect match, needs to go one slot back */
|
||||
if (path.slots[0] > 0) {
|
||||
btrfs_item_key_to_cpu(path.nodes[0], &key, path.slots[0]);
|
||||
if (key.objectid == ino && key.type == BTRFS_EXTENT_DATA_KEY)
|
||||
path.slots[0]--;
|
||||
}
|
||||
|
||||
iterate:
|
||||
/* Iterate through the path to find a file extent covering @start */
|
||||
while (true) {
|
||||
u64 extent_end;
|
||||
|
||||
if (path.slots[0] >= btrfs_header_nritems(path.nodes[0]))
|
||||
goto next;
|
||||
|
||||
btrfs_item_key_to_cpu(path.nodes[0], &key, path.slots[0]);
|
||||
|
||||
/*
|
||||
* We may go one slot back to INODE_REF/XATTR item, then
|
||||
* need to go forward until we reach an EXTENT_DATA.
|
||||
* But we should still has the correct ino as key.objectid.
|
||||
*/
|
||||
if (WARN_ON(key.objectid < ino) || key.type < BTRFS_EXTENT_DATA_KEY)
|
||||
goto next;
|
||||
|
||||
/* It's beyond our target range, definitely not extent found */
|
||||
if (key.objectid > ino || key.type > BTRFS_EXTENT_DATA_KEY)
|
||||
goto not_found;
|
||||
|
||||
/*
|
||||
* | |<- File extent ->|
|
||||
* \- start
|
||||
*
|
||||
* This means there is a hole between start and key.offset.
|
||||
*/
|
||||
if (key.offset > start) {
|
||||
em->start = start;
|
||||
em->orig_start = start;
|
||||
em->block_start = EXTENT_MAP_HOLE;
|
||||
em->len = key.offset - start;
|
||||
break;
|
||||
}
|
||||
|
||||
fi = btrfs_item_ptr(path.nodes[0], path.slots[0],
|
||||
struct btrfs_file_extent_item);
|
||||
extent_end = btrfs_file_extent_end(&path);
|
||||
|
||||
/*
|
||||
* |<- file extent ->| |
|
||||
* \- start
|
||||
*
|
||||
* We haven't reached start, search next slot.
|
||||
*/
|
||||
if (extent_end <= start)
|
||||
goto next;
|
||||
|
||||
/* Now this extent covers @start, convert it to em */
|
||||
btrfs_extent_item_to_extent_map(inode, &path, fi, false, em);
|
||||
break;
|
||||
next:
|
||||
ret = btrfs_next_item(root, &path);
|
||||
if (ret < 0)
|
||||
goto err;
|
||||
if (ret > 0)
|
||||
goto not_found;
|
||||
}
|
||||
btrfs_release_path(&path);
|
||||
return em;
|
||||
|
||||
not_found:
|
||||
btrfs_release_path(&path);
|
||||
free_extent_map(em);
|
||||
return NULL;
|
||||
|
||||
err:
|
||||
btrfs_release_path(&path);
|
||||
free_extent_map(em);
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
|
||||
static struct extent_map *defrag_lookup_extent(struct inode *inode, u64 start,
|
||||
bool locked)
|
||||
u64 newer_than, bool locked)
|
||||
{
|
||||
struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
|
||||
struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
|
||||
@ -1028,6 +1175,20 @@ static struct extent_map *defrag_lookup_extent(struct inode *inode, u64 start,
|
||||
em = lookup_extent_mapping(em_tree, start, sectorsize);
|
||||
read_unlock(&em_tree->lock);
|
||||
|
||||
/*
|
||||
* We can get a merged extent, in that case, we need to re-search
|
||||
* tree to get the original em for defrag.
|
||||
*
|
||||
* If @newer_than is 0 or em::generation < newer_than, we can trust
|
||||
* this em, as either we don't care about the generation, or the
|
||||
* merged extent map will be rejected anyway.
|
||||
*/
|
||||
if (em && test_bit(EXTENT_FLAG_MERGED, &em->flags) &&
|
||||
newer_than && em->generation >= newer_than) {
|
||||
free_extent_map(em);
|
||||
em = NULL;
|
||||
}
|
||||
|
||||
if (!em) {
|
||||
struct extent_state *cached = NULL;
|
||||
u64 end = start + sectorsize - 1;
|
||||
@ -1035,7 +1196,7 @@ static struct extent_map *defrag_lookup_extent(struct inode *inode, u64 start,
|
||||
/* get the big lock and read metadata off disk */
|
||||
if (!locked)
|
||||
lock_extent_bits(io_tree, start, end, &cached);
|
||||
em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, start, sectorsize);
|
||||
em = defrag_get_extent(BTRFS_I(inode), start, newer_than);
|
||||
if (!locked)
|
||||
unlock_extent_cached(io_tree, start, end, &cached);
|
||||
|
||||
@ -1046,23 +1207,42 @@ static struct extent_map *defrag_lookup_extent(struct inode *inode, u64 start,
|
||||
return em;
|
||||
}
|
||||
|
||||
static u32 get_extent_max_capacity(const struct extent_map *em)
|
||||
{
|
||||
if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags))
|
||||
return BTRFS_MAX_COMPRESSED;
|
||||
return BTRFS_MAX_EXTENT_SIZE;
|
||||
}
|
||||
|
||||
static bool defrag_check_next_extent(struct inode *inode, struct extent_map *em,
|
||||
bool locked)
|
||||
{
|
||||
struct extent_map *next;
|
||||
bool ret = true;
|
||||
bool ret = false;
|
||||
|
||||
/* this is the last extent */
|
||||
if (em->start + em->len >= i_size_read(inode))
|
||||
return false;
|
||||
|
||||
next = defrag_lookup_extent(inode, em->start + em->len, locked);
|
||||
/*
|
||||
* We want to check if the next extent can be merged with the current
|
||||
* one, which can be an extent created in a past generation, so we pass
|
||||
* a minimum generation of 0 to defrag_lookup_extent().
|
||||
*/
|
||||
next = defrag_lookup_extent(inode, em->start + em->len, 0, locked);
|
||||
/* No more em or hole */
|
||||
if (!next || next->block_start >= EXTENT_MAP_LAST_BYTE)
|
||||
ret = false;
|
||||
else if ((em->block_start + em->block_len == next->block_start) &&
|
||||
(em->block_len > SZ_128K && next->block_len > SZ_128K))
|
||||
ret = false;
|
||||
|
||||
goto out;
|
||||
if (test_bit(EXTENT_FLAG_PREALLOC, &next->flags))
|
||||
goto out;
|
||||
/*
|
||||
* If the next extent is at its max capacity, defragging current extent
|
||||
* makes no sense, as the total number of extents won't change.
|
||||
*/
|
||||
if (next->len >= get_extent_max_capacity(em))
|
||||
goto out;
|
||||
ret = true;
|
||||
out:
|
||||
free_extent_map(next);
|
||||
return ret;
|
||||
}
|
||||
@ -1186,8 +1366,10 @@ struct defrag_target_range {
|
||||
static int defrag_collect_targets(struct btrfs_inode *inode,
|
||||
u64 start, u64 len, u32 extent_thresh,
|
||||
u64 newer_than, bool do_compress,
|
||||
bool locked, struct list_head *target_list)
|
||||
bool locked, struct list_head *target_list,
|
||||
u64 *last_scanned_ret)
|
||||
{
|
||||
bool last_is_target = false;
|
||||
u64 cur = start;
|
||||
int ret = 0;
|
||||
|
||||
@ -1197,7 +1379,9 @@ static int defrag_collect_targets(struct btrfs_inode *inode,
|
||||
bool next_mergeable = true;
|
||||
u64 range_len;
|
||||
|
||||
em = defrag_lookup_extent(&inode->vfs_inode, cur, locked);
|
||||
last_is_target = false;
|
||||
em = defrag_lookup_extent(&inode->vfs_inode, cur,
|
||||
newer_than, locked);
|
||||
if (!em)
|
||||
break;
|
||||
|
||||
@ -1254,6 +1438,13 @@ static int defrag_collect_targets(struct btrfs_inode *inode,
|
||||
if (range_len >= extent_thresh)
|
||||
goto next;
|
||||
|
||||
/*
|
||||
* Skip extents already at its max capacity, this is mostly for
|
||||
* compressed extents, which max cap is only 128K.
|
||||
*/
|
||||
if (em->len >= get_extent_max_capacity(em))
|
||||
goto next;
|
||||
|
||||
next_mergeable = defrag_check_next_extent(&inode->vfs_inode, em,
|
||||
locked);
|
||||
if (!next_mergeable) {
|
||||
@ -1272,6 +1463,7 @@ static int defrag_collect_targets(struct btrfs_inode *inode,
|
||||
}
|
||||
|
||||
add:
|
||||
last_is_target = true;
|
||||
range_len = min(extent_map_end(em), start + len) - cur;
|
||||
/*
|
||||
* This one is a good target, check if it can be merged into
|
||||
@ -1315,6 +1507,17 @@ next:
|
||||
kfree(entry);
|
||||
}
|
||||
}
|
||||
if (!ret && last_scanned_ret) {
|
||||
/*
|
||||
* If the last extent is not a target, the caller can skip to
|
||||
* the end of that extent.
|
||||
* Otherwise, we can only go the end of the specified range.
|
||||
*/
|
||||
if (!last_is_target)
|
||||
*last_scanned_ret = max(cur, *last_scanned_ret);
|
||||
else
|
||||
*last_scanned_ret = max(start + len, *last_scanned_ret);
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -1373,7 +1576,8 @@ static int defrag_one_locked_target(struct btrfs_inode *inode,
|
||||
}
|
||||
|
||||
static int defrag_one_range(struct btrfs_inode *inode, u64 start, u32 len,
|
||||
u32 extent_thresh, u64 newer_than, bool do_compress)
|
||||
u32 extent_thresh, u64 newer_than, bool do_compress,
|
||||
u64 *last_scanned_ret)
|
||||
{
|
||||
struct extent_state *cached_state = NULL;
|
||||
struct defrag_target_range *entry;
|
||||
@ -1419,7 +1623,7 @@ static int defrag_one_range(struct btrfs_inode *inode, u64 start, u32 len,
|
||||
*/
|
||||
ret = defrag_collect_targets(inode, start, len, extent_thresh,
|
||||
newer_than, do_compress, true,
|
||||
&target_list);
|
||||
&target_list, last_scanned_ret);
|
||||
if (ret < 0)
|
||||
goto unlock_extent;
|
||||
|
||||
@ -1454,7 +1658,8 @@ static int defrag_one_cluster(struct btrfs_inode *inode,
|
||||
u64 start, u32 len, u32 extent_thresh,
|
||||
u64 newer_than, bool do_compress,
|
||||
unsigned long *sectors_defragged,
|
||||
unsigned long max_sectors)
|
||||
unsigned long max_sectors,
|
||||
u64 *last_scanned_ret)
|
||||
{
|
||||
const u32 sectorsize = inode->root->fs_info->sectorsize;
|
||||
struct defrag_target_range *entry;
|
||||
@ -1465,7 +1670,7 @@ static int defrag_one_cluster(struct btrfs_inode *inode,
|
||||
BUILD_BUG_ON(!IS_ALIGNED(CLUSTER_SIZE, PAGE_SIZE));
|
||||
ret = defrag_collect_targets(inode, start, len, extent_thresh,
|
||||
newer_than, do_compress, false,
|
||||
&target_list);
|
||||
&target_list, NULL);
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
|
||||
@ -1482,6 +1687,15 @@ static int defrag_one_cluster(struct btrfs_inode *inode,
|
||||
range_len = min_t(u32, range_len,
|
||||
(max_sectors - *sectors_defragged) * sectorsize);
|
||||
|
||||
/*
|
||||
* If defrag_one_range() has updated last_scanned_ret,
|
||||
* our range may already be invalid (e.g. hole punched).
|
||||
* Skip if our range is before last_scanned_ret, as there is
|
||||
* no need to defrag the range anymore.
|
||||
*/
|
||||
if (entry->start + range_len <= *last_scanned_ret)
|
||||
continue;
|
||||
|
||||
if (ra)
|
||||
page_cache_sync_readahead(inode->vfs_inode.i_mapping,
|
||||
ra, NULL, entry->start >> PAGE_SHIFT,
|
||||
@ -1494,7 +1708,8 @@ static int defrag_one_cluster(struct btrfs_inode *inode,
|
||||
* accounting.
|
||||
*/
|
||||
ret = defrag_one_range(inode, entry->start, range_len,
|
||||
extent_thresh, newer_than, do_compress);
|
||||
extent_thresh, newer_than, do_compress,
|
||||
last_scanned_ret);
|
||||
if (ret < 0)
|
||||
break;
|
||||
*sectors_defragged += range_len >>
|
||||
@ -1505,6 +1720,8 @@ out:
|
||||
list_del_init(&entry->list);
|
||||
kfree(entry);
|
||||
}
|
||||
if (ret >= 0)
|
||||
*last_scanned_ret = max(*last_scanned_ret, start + len);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -1590,6 +1807,7 @@ int btrfs_defrag_file(struct inode *inode, struct file_ra_state *ra,
|
||||
|
||||
while (cur < last_byte) {
|
||||
const unsigned long prev_sectors_defragged = sectors_defragged;
|
||||
u64 last_scanned = cur;
|
||||
u64 cluster_end;
|
||||
|
||||
/* The cluster size 256K should always be page aligned */
|
||||
@ -1619,8 +1837,8 @@ int btrfs_defrag_file(struct inode *inode, struct file_ra_state *ra,
|
||||
BTRFS_I(inode)->defrag_compress = compress_type;
|
||||
ret = defrag_one_cluster(BTRFS_I(inode), ra, cur,
|
||||
cluster_end + 1 - cur, extent_thresh,
|
||||
newer_than, do_compress,
|
||||
§ors_defragged, max_to_defrag);
|
||||
newer_than, do_compress, §ors_defragged,
|
||||
max_to_defrag, &last_scanned);
|
||||
|
||||
if (sectors_defragged > prev_sectors_defragged)
|
||||
balance_dirty_pages_ratelimited(inode->i_mapping);
|
||||
@ -1628,7 +1846,7 @@ int btrfs_defrag_file(struct inode *inode, struct file_ra_state *ra,
|
||||
btrfs_inode_unlock(inode, 0);
|
||||
if (ret < 0)
|
||||
break;
|
||||
cur = cluster_end + 1;
|
||||
cur = max(cluster_end + 1, last_scanned);
|
||||
if (ret > 0) {
|
||||
ret = 0;
|
||||
break;
|
||||
|
@ -380,6 +380,17 @@ int lzo_decompress_bio(struct list_head *ws, struct compressed_bio *cb)
|
||||
kunmap(cur_page);
|
||||
cur_in += LZO_LEN;
|
||||
|
||||
if (seg_len > lzo1x_worst_compress(PAGE_SIZE)) {
|
||||
/*
|
||||
* seg_len shouldn't be larger than we have allocated
|
||||
* for workspace->cbuf
|
||||
*/
|
||||
btrfs_err(fs_info, "unexpectedly large lzo segment len %u",
|
||||
seg_len);
|
||||
ret = -EIO;
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* Copy the compressed segment payload into workspace */
|
||||
copy_compressed_segment(cb, workspace->cbuf, seg_len, &cur_in);
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user