mirror of
https://github.com/torvalds/linux.git
synced 2024-11-13 15:41:39 +00:00
Btrfs: Use a mutex in the extent buffer for tree block locking
This replaces the use of the page cache lock bit for locking, which wasn't suitable for block size < page size and couldn't be used recursively. The mutexes alone don't fix either problem, but they are the first step. Signed-off-by: Chris Mason <chris.mason@oracle.com>
This commit is contained in:
parent
6af118ce51
commit
a61e6f29dc
@ -1451,7 +1451,7 @@ static int pin_down_bytes(struct btrfs_root *root, u64 bytenr, u32 num_bytes,
|
|||||||
struct extent_buffer *buf;
|
struct extent_buffer *buf;
|
||||||
buf = btrfs_find_tree_block(root, bytenr, num_bytes);
|
buf = btrfs_find_tree_block(root, bytenr, num_bytes);
|
||||||
if (buf) {
|
if (buf) {
|
||||||
if (!btrfs_try_tree_lock(buf) &&
|
if (btrfs_try_tree_lock(buf) &&
|
||||||
btrfs_buffer_uptodate(buf, 0)) {
|
btrfs_buffer_uptodate(buf, 0)) {
|
||||||
u64 transid =
|
u64 transid =
|
||||||
root->fs_info->running_transaction->transid;
|
root->fs_info->running_transaction->transid;
|
||||||
@ -3345,11 +3345,6 @@ int btrfs_read_block_groups(struct btrfs_root *root)
|
|||||||
set_state_private(block_group_cache, found_key.objectid,
|
set_state_private(block_group_cache, found_key.objectid,
|
||||||
(unsigned long)cache);
|
(unsigned long)cache);
|
||||||
|
|
||||||
/* hack for now */
|
|
||||||
if (cache->flags & BTRFS_BLOCK_GROUP_METADATA) {
|
|
||||||
cache_block_group(root->fs_info->extent_root,
|
|
||||||
cache);
|
|
||||||
}
|
|
||||||
if (key.objectid >=
|
if (key.objectid >=
|
||||||
btrfs_super_total_bytes(&info->super_copy))
|
btrfs_super_total_bytes(&info->super_copy))
|
||||||
break;
|
break;
|
||||||
|
@ -2690,6 +2690,7 @@ static struct extent_buffer *__alloc_extent_buffer(struct extent_io_tree *tree,
|
|||||||
eb = kmem_cache_zalloc(extent_buffer_cache, mask);
|
eb = kmem_cache_zalloc(extent_buffer_cache, mask);
|
||||||
eb->start = start;
|
eb->start = start;
|
||||||
eb->len = len;
|
eb->len = len;
|
||||||
|
mutex_init(&eb->mutex);
|
||||||
spin_lock_irqsave(&leak_lock, flags);
|
spin_lock_irqsave(&leak_lock, flags);
|
||||||
list_add(&eb->leak_list, &buffers);
|
list_add(&eb->leak_list, &buffers);
|
||||||
spin_unlock_irqrestore(&leak_lock, flags);
|
spin_unlock_irqrestore(&leak_lock, flags);
|
||||||
@ -2837,6 +2838,7 @@ int clear_extent_buffer_dirty(struct extent_io_tree *tree,
|
|||||||
|
|
||||||
for (i = 0; i < num_pages; i++) {
|
for (i = 0; i < num_pages; i++) {
|
||||||
page = extent_buffer_page(eb, i);
|
page = extent_buffer_page(eb, i);
|
||||||
|
lock_page(page);
|
||||||
if (i == 0)
|
if (i == 0)
|
||||||
set_page_extent_head(page, eb->len);
|
set_page_extent_head(page, eb->len);
|
||||||
else
|
else
|
||||||
@ -2854,6 +2856,7 @@ int clear_extent_buffer_dirty(struct extent_io_tree *tree,
|
|||||||
end = start + PAGE_CACHE_SIZE - 1;
|
end = start + PAGE_CACHE_SIZE - 1;
|
||||||
if (test_range_bit(tree, start, end,
|
if (test_range_bit(tree, start, end,
|
||||||
EXTENT_DIRTY, 0)) {
|
EXTENT_DIRTY, 0)) {
|
||||||
|
unlock_page(page);
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -2865,6 +2868,7 @@ int clear_extent_buffer_dirty(struct extent_io_tree *tree,
|
|||||||
PAGECACHE_TAG_DIRTY);
|
PAGECACHE_TAG_DIRTY);
|
||||||
}
|
}
|
||||||
read_unlock_irq(&page->mapping->tree_lock);
|
read_unlock_irq(&page->mapping->tree_lock);
|
||||||
|
unlock_page(page);
|
||||||
}
|
}
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
@ -2893,12 +2897,17 @@ int set_extent_buffer_dirty(struct extent_io_tree *tree,
|
|||||||
* on us if the page isn't already dirty.
|
* on us if the page isn't already dirty.
|
||||||
*/
|
*/
|
||||||
if (i == 0) {
|
if (i == 0) {
|
||||||
|
lock_page(page);
|
||||||
set_page_extent_head(page, eb->len);
|
set_page_extent_head(page, eb->len);
|
||||||
} else if (PagePrivate(page) &&
|
} else if (PagePrivate(page) &&
|
||||||
page->private != EXTENT_PAGE_PRIVATE) {
|
page->private != EXTENT_PAGE_PRIVATE) {
|
||||||
|
lock_page(page);
|
||||||
set_page_extent_mapped(page);
|
set_page_extent_mapped(page);
|
||||||
|
unlock_page(page);
|
||||||
}
|
}
|
||||||
__set_page_dirty_nobuffers(extent_buffer_page(eb, i));
|
__set_page_dirty_nobuffers(extent_buffer_page(eb, i));
|
||||||
|
if (i == 0)
|
||||||
|
unlock_page(page);
|
||||||
}
|
}
|
||||||
return set_extent_dirty(tree, eb->start,
|
return set_extent_dirty(tree, eb->start,
|
||||||
eb->start + eb->len - 1, GFP_NOFS);
|
eb->start + eb->len - 1, GFP_NOFS);
|
||||||
|
@ -90,6 +90,7 @@ struct extent_buffer {
|
|||||||
int flags;
|
int flags;
|
||||||
struct list_head leak_list;
|
struct list_head leak_list;
|
||||||
struct rb_node rb_node;
|
struct rb_node rb_node;
|
||||||
|
struct mutex mutex;
|
||||||
};
|
};
|
||||||
|
|
||||||
struct extent_map_tree;
|
struct extent_map_tree;
|
||||||
|
@ -29,32 +29,31 @@ int btrfs_tree_lock(struct extent_buffer *eb)
|
|||||||
{
|
{
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
if (!TestSetPageLocked(eb->first_page))
|
if (mutex_trylock(&eb->mutex))
|
||||||
return 0;
|
return 0;
|
||||||
for (i = 0; i < 512; i++) {
|
for (i = 0; i < 512; i++) {
|
||||||
cpu_relax();
|
cpu_relax();
|
||||||
if (!TestSetPageLocked(eb->first_page))
|
if (mutex_trylock(&eb->mutex))
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
cpu_relax();
|
cpu_relax();
|
||||||
lock_page(eb->first_page);
|
mutex_lock(&eb->mutex);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
int btrfs_try_tree_lock(struct extent_buffer *eb)
|
int btrfs_try_tree_lock(struct extent_buffer *eb)
|
||||||
{
|
{
|
||||||
return TestSetPageLocked(eb->first_page);
|
return mutex_trylock(&eb->mutex);
|
||||||
}
|
}
|
||||||
|
|
||||||
int btrfs_tree_unlock(struct extent_buffer *eb)
|
int btrfs_tree_unlock(struct extent_buffer *eb)
|
||||||
{
|
{
|
||||||
WARN_ON(!PageLocked(eb->first_page));
|
mutex_unlock(&eb->mutex);
|
||||||
unlock_page(eb->first_page);
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
int btrfs_tree_locked(struct extent_buffer *eb)
|
int btrfs_tree_locked(struct extent_buffer *eb)
|
||||||
{
|
{
|
||||||
return PageLocked(eb->first_page);
|
return mutex_is_locked(&eb->mutex);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user