btrfs: clean snapshots one by one

Each time pick one dead root from the list and let the caller know if
it's needed to continue. This should improve responsiveness during
umount and balance which at some point waits for cleaning all currently
queued dead roots.

A new dead root is added to the end of the list, so the snapshots
disappear in the order of deletion.

The snapshot cleaning work is now done only from the cleaner thread and the
others wake it if needed.

Signed-off-by: David Sterba <dsterba@suse.cz>
Signed-off-by: Josef Bacik <jbacik@fusionio.com>
This commit is contained in:
David Sterba 2013-03-12 15:13:28 +00:00 committed by Josef Bacik
parent 6841ebee6b
commit 9d1a2a3ad5
5 changed files with 57 additions and 29 deletions

View File

@ -1658,15 +1658,20 @@ static int cleaner_kthread(void *arg)
struct btrfs_root *root = arg;
do {
int again = 0;
if (!(root->fs_info->sb->s_flags & MS_RDONLY) &&
mutex_trylock(&root->fs_info->cleaner_mutex)) {
btrfs_run_delayed_iputs(root);
btrfs_clean_old_snapshots(root);
mutex_unlock(&root->fs_info->cleaner_mutex);
down_read_trylock(&root->fs_info->sb->s_umount)) {
if (mutex_trylock(&root->fs_info->cleaner_mutex)) {
btrfs_run_delayed_iputs(root);
again = btrfs_clean_one_deleted_snapshot(root);
mutex_unlock(&root->fs_info->cleaner_mutex);
}
btrfs_run_defrag_inodes(root->fs_info);
up_read(&root->fs_info->sb->s_umount);
}
if (!try_to_freeze()) {
if (!try_to_freeze() && !again) {
set_current_state(TASK_INTERRUPTIBLE);
if (!kthread_should_stop())
schedule();
@ -3358,8 +3363,8 @@ int btrfs_commit_super(struct btrfs_root *root)
mutex_lock(&root->fs_info->cleaner_mutex);
btrfs_run_delayed_iputs(root);
btrfs_clean_old_snapshots(root);
mutex_unlock(&root->fs_info->cleaner_mutex);
wake_up_process(root->fs_info->cleaner_kthread);
/* wait until ongoing cleanup work done */
down_write(&root->fs_info->cleanup_work_sem);

View File

@ -7286,6 +7286,8 @@ static noinline int walk_up_tree(struct btrfs_trans_handle *trans,
* reference count by one. if update_ref is true, this function
* also make sure backrefs for the shared block and all lower level
* blocks are properly updated.
*
* If called with for_reloc == 0, may exit early with -EAGAIN
*/
int btrfs_drop_snapshot(struct btrfs_root *root,
struct btrfs_block_rsv *block_rsv, int update_ref,
@ -7386,6 +7388,12 @@ int btrfs_drop_snapshot(struct btrfs_root *root,
wc->reada_count = BTRFS_NODEPTRS_PER_BLOCK(root);
while (1) {
if (!for_reloc && btrfs_fs_closing(root->fs_info)) {
pr_debug("btrfs: drop snapshot early exit\n");
err = -EAGAIN;
goto out_end_trans;
}
ret = walk_down_tree(trans, root, path, wc);
if (ret < 0) {
err = ret;

View File

@ -4148,10 +4148,7 @@ int btrfs_relocate_block_group(struct btrfs_root *extent_root, u64 group_start)
while (1) {
mutex_lock(&fs_info->cleaner_mutex);
btrfs_clean_old_snapshots(fs_info->tree_root);
ret = relocate_block_group(rc);
mutex_unlock(&fs_info->cleaner_mutex);
if (ret < 0) {
err = ret;

View File

@ -948,7 +948,7 @@ static noinline int commit_cowonly_roots(struct btrfs_trans_handle *trans,
int btrfs_add_dead_root(struct btrfs_root *root)
{
spin_lock(&root->fs_info->trans_lock);
list_add(&root->root_list, &root->fs_info->dead_roots);
list_add_tail(&root->root_list, &root->fs_info->dead_roots);
spin_unlock(&root->fs_info->trans_lock);
return 0;
}
@ -1873,31 +1873,49 @@ cleanup_transaction:
}
/*
* interface function to delete all the snapshots we have scheduled for deletion
* return < 0 if error
* 0 if there are no more dead_roots at the time of call
* 1 there are more to be processed, call me again
*
* The return value indicates there are certainly more snapshots to delete, but
* if there comes a new one during processing, it may return 0. We don't mind,
* because btrfs_commit_super will poke cleaner thread and it will process it a
* few seconds later.
*/
int btrfs_clean_old_snapshots(struct btrfs_root *root)
int btrfs_clean_one_deleted_snapshot(struct btrfs_root *root)
{
LIST_HEAD(list);
int ret;
struct btrfs_fs_info *fs_info = root->fs_info;
if (fs_info->sb->s_flags & MS_RDONLY) {
pr_debug("btrfs: cleaner called for RO fs!\n");
return 0;
}
spin_lock(&fs_info->trans_lock);
list_splice_init(&fs_info->dead_roots, &list);
if (list_empty(&fs_info->dead_roots)) {
spin_unlock(&fs_info->trans_lock);
return 0;
}
root = list_first_entry(&fs_info->dead_roots,
struct btrfs_root, root_list);
list_del(&root->root_list);
spin_unlock(&fs_info->trans_lock);
while (!list_empty(&list)) {
int ret;
pr_debug("btrfs: cleaner removing %llu\n",
(unsigned long long)root->objectid);
root = list_entry(list.next, struct btrfs_root, root_list);
list_del(&root->root_list);
btrfs_kill_all_delayed_nodes(root);
btrfs_kill_all_delayed_nodes(root);
if (btrfs_header_backref_rev(root->node) <
BTRFS_MIXED_BACKREF_REV)
ret = btrfs_drop_snapshot(root, NULL, 0, 0);
else
ret =btrfs_drop_snapshot(root, NULL, 1, 0);
BUG_ON(ret < 0);
}
return 0;
if (btrfs_header_backref_rev(root->node) <
BTRFS_MIXED_BACKREF_REV)
ret = btrfs_drop_snapshot(root, NULL, 0, 0);
else
ret = btrfs_drop_snapshot(root, NULL, 1, 0);
/*
* If we encounter a transaction abort during snapshot cleaning, we
* don't want to crash here
*/
BUG_ON(ret < 0 && ret != -EAGAIN && ret != -EROFS);
return 1;
}

View File

@ -123,7 +123,7 @@ int btrfs_write_and_wait_transaction(struct btrfs_trans_handle *trans,
int btrfs_add_dead_root(struct btrfs_root *root);
int btrfs_defrag_root(struct btrfs_root *root);
int btrfs_clean_old_snapshots(struct btrfs_root *root);
int btrfs_clean_one_deleted_snapshot(struct btrfs_root *root);
int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
struct btrfs_root *root);
int btrfs_commit_transaction_async(struct btrfs_trans_handle *trans,