forked from Minki/linux
Btrfs: cleanup destroy_marked_extents
We can just look up the extent_buffers for the range and free stuff that way. This makes the cleanup a bit cleaner and we can make sure to evict the extent_buffers pretty quickly by marking them as stale. Thanks, Signed-off-by: Josef Bacik <jbacik@fusionio.com>
This commit is contained in:
parent
abefa55ac1
commit
fd8b2b6115
@ -3752,13 +3752,9 @@ static int btrfs_destroy_marked_extents(struct btrfs_root *root,
|
||||
int mark)
|
||||
{
|
||||
int ret;
|
||||
struct page *page;
|
||||
struct inode *btree_inode = root->fs_info->btree_inode;
|
||||
struct extent_buffer *eb;
|
||||
u64 start = 0;
|
||||
u64 end;
|
||||
u64 offset;
|
||||
unsigned long index;
|
||||
|
||||
while (1) {
|
||||
ret = find_first_extent_bit(dirty_pages, start, &start, &end,
|
||||
@ -3768,35 +3764,17 @@ static int btrfs_destroy_marked_extents(struct btrfs_root *root,
|
||||
|
||||
clear_extent_bits(dirty_pages, start, end, mark, GFP_NOFS);
|
||||
while (start <= end) {
|
||||
index = start >> PAGE_CACHE_SHIFT;
|
||||
start = (u64)(index + 1) << PAGE_CACHE_SHIFT;
|
||||
page = find_get_page(btree_inode->i_mapping, index);
|
||||
if (!page)
|
||||
eb = btrfs_find_tree_block(root, start,
|
||||
root->leafsize);
|
||||
start += eb->len;
|
||||
if (!eb)
|
||||
continue;
|
||||
offset = page_offset(page);
|
||||
wait_on_extent_buffer_writeback(eb);
|
||||
|
||||
spin_lock(&dirty_pages->buffer_lock);
|
||||
eb = radix_tree_lookup(
|
||||
&(&BTRFS_I(page->mapping->host)->io_tree)->buffer,
|
||||
offset >> PAGE_CACHE_SHIFT);
|
||||
spin_unlock(&dirty_pages->buffer_lock);
|
||||
if (eb)
|
||||
ret = test_and_clear_bit(EXTENT_BUFFER_DIRTY,
|
||||
&eb->bflags);
|
||||
lock_page(page);
|
||||
|
||||
wait_on_page_writeback(page);
|
||||
if (PageDirty(page)) {
|
||||
clear_page_dirty_for_io(page);
|
||||
spin_lock_irq(&page->mapping->tree_lock);
|
||||
radix_tree_tag_clear(&page->mapping->page_tree,
|
||||
page_index(page),
|
||||
PAGECACHE_TAG_DIRTY);
|
||||
spin_unlock_irq(&page->mapping->tree_lock);
|
||||
}
|
||||
|
||||
unlock_page(page);
|
||||
page_cache_release(page);
|
||||
if (test_and_clear_bit(EXTENT_BUFFER_DIRTY,
|
||||
&eb->bflags))
|
||||
clear_extent_buffer_dirty(eb);
|
||||
free_extent_buffer_stale(eb);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -3162,7 +3162,7 @@ static int eb_wait(void *word)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void wait_on_extent_buffer_writeback(struct extent_buffer *eb)
|
||||
void wait_on_extent_buffer_writeback(struct extent_buffer *eb)
|
||||
{
|
||||
wait_on_bit(&eb->bflags, EXTENT_BUFFER_WRITEBACK, eb_wait,
|
||||
TASK_UNINTERRUPTIBLE);
|
||||
|
@ -282,6 +282,7 @@ void free_extent_buffer_stale(struct extent_buffer *eb);
|
||||
int read_extent_buffer_pages(struct extent_io_tree *tree,
|
||||
struct extent_buffer *eb, u64 start, int wait,
|
||||
get_extent_t *get_extent, int mirror_num);
|
||||
void wait_on_extent_buffer_writeback(struct extent_buffer *eb);
|
||||
|
||||
static inline unsigned long num_extent_pages(u64 start, u64 len)
|
||||
{
|
||||
|
Loading…
Reference in New Issue
Block a user