Btrfs: don't lock bits in the extent tree during writepage

At writepage time, we have the page locked and we have the
extent_map entry for this extent pinned in the extent_map tree.
So, the page can't go away and its mapping can't change.

There is no need for the extra extent_state lock bits during writepage.

Signed-off-by: Chris Mason <chris.mason@oracle.com>
This commit is contained in:
Chris Mason 2009-09-02 15:11:07 -04:00
parent 2c64c53d8d
commit d5550c6315

View File

@ -2219,16 +2219,10 @@ static int __extent_writepage(struct page *page, struct writeback_control *wbc,
goto done_unlocked; goto done_unlocked;
} }
} }
lock_extent_bits(tree, start, page_end, 0, &cached_state, GFP_NOFS);
unlock_start = start;
if (tree->ops && tree->ops->writepage_start_hook) { if (tree->ops && tree->ops->writepage_start_hook) {
ret = tree->ops->writepage_start_hook(page, start, ret = tree->ops->writepage_start_hook(page, start,
page_end); page_end);
if (ret == -EAGAIN) { if (ret == -EAGAIN) {
unlock_extent_cached(tree, start, page_end,
&cached_state, GFP_NOFS);
redirty_page_for_writepage(wbc, page); redirty_page_for_writepage(wbc, page);
update_nr_written(page, wbc, nr_written); update_nr_written(page, wbc, nr_written);
unlock_page(page); unlock_page(page);
@ -2244,13 +2238,7 @@ static int __extent_writepage(struct page *page, struct writeback_control *wbc,
update_nr_written(page, wbc, nr_written + 1); update_nr_written(page, wbc, nr_written + 1);
end = page_end; end = page_end;
if (test_range_bit(tree, start, page_end, EXTENT_DELALLOC, 0))
printk(KERN_ERR "btrfs delalloc bits after lock_extent\n");
if (last_byte <= start) { if (last_byte <= start) {
clear_extent_bit(tree, start, page_end,
EXTENT_LOCKED | EXTENT_DIRTY,
1, 0, NULL, GFP_NOFS);
if (tree->ops && tree->ops->writepage_end_io_hook) if (tree->ops && tree->ops->writepage_end_io_hook)
tree->ops->writepage_end_io_hook(page, start, tree->ops->writepage_end_io_hook(page, start,
page_end, NULL, 1); page_end, NULL, 1);
@ -2262,8 +2250,6 @@ static int __extent_writepage(struct page *page, struct writeback_control *wbc,
while (cur <= end) { while (cur <= end) {
if (cur >= last_byte) { if (cur >= last_byte) {
unlock_extent_cached(tree, unlock_start, page_end,
&cached_state, GFP_NOFS);
if (tree->ops && tree->ops->writepage_end_io_hook) if (tree->ops && tree->ops->writepage_end_io_hook)
tree->ops->writepage_end_io_hook(page, cur, tree->ops->writepage_end_io_hook(page, cur,
page_end, NULL, 1); page_end, NULL, 1);
@ -2295,10 +2281,6 @@ static int __extent_writepage(struct page *page, struct writeback_control *wbc,
*/ */
if (compressed || block_start == EXTENT_MAP_HOLE || if (compressed || block_start == EXTENT_MAP_HOLE ||
block_start == EXTENT_MAP_INLINE) { block_start == EXTENT_MAP_INLINE) {
unlock_extent_cached(tree, unlock_start,
cur + iosize - 1, &cached_state,
GFP_NOFS);
/* /*
* end_io notification does not happen here for * end_io notification does not happen here for
* compressed extents * compressed extents
@ -2366,9 +2348,6 @@ done:
set_page_writeback(page); set_page_writeback(page);
end_page_writeback(page); end_page_writeback(page);
} }
if (unlock_start <= page_end)
unlock_extent_cached(tree, unlock_start, page_end,
&cached_state, GFP_NOFS);
unlock_page(page); unlock_page(page);
done_unlocked: done_unlocked: