Merge branch 'dev/simplify-set-bit' of git://git.kernel.org/pub/scm/linux/kernel/git/kdave/linux into for-linus-4.5
Signed-off-by: Chris Mason <clm@fb.com>
This commit is contained in:
commit
bb9d687618
@ -362,7 +362,7 @@ static int verify_parent_transid(struct extent_io_tree *io_tree,
|
|||||||
}
|
}
|
||||||
|
|
||||||
lock_extent_bits(io_tree, eb->start, eb->start + eb->len - 1,
|
lock_extent_bits(io_tree, eb->start, eb->start + eb->len - 1,
|
||||||
0, &cached_state);
|
&cached_state);
|
||||||
if (extent_buffer_uptodate(eb) &&
|
if (extent_buffer_uptodate(eb) &&
|
||||||
btrfs_header_generation(eb) == parent_transid) {
|
btrfs_header_generation(eb) == parent_transid) {
|
||||||
ret = 0;
|
ret = 0;
|
||||||
|
@ -1285,20 +1285,6 @@ search_again:
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* wrappers around set/clear extent bit */
|
/* wrappers around set/clear extent bit */
|
||||||
int set_extent_dirty(struct extent_io_tree *tree, u64 start, u64 end,
|
|
||||||
gfp_t mask)
|
|
||||||
{
|
|
||||||
return set_extent_bit(tree, start, end, EXTENT_DIRTY, NULL,
|
|
||||||
NULL, mask);
|
|
||||||
}
|
|
||||||
|
|
||||||
int set_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
|
|
||||||
unsigned bits, gfp_t mask)
|
|
||||||
{
|
|
||||||
return set_extent_bit(tree, start, end, bits, NULL,
|
|
||||||
NULL, mask);
|
|
||||||
}
|
|
||||||
|
|
||||||
int set_record_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
|
int set_record_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
|
||||||
unsigned bits, gfp_t mask,
|
unsigned bits, gfp_t mask,
|
||||||
struct extent_changeset *changeset)
|
struct extent_changeset *changeset)
|
||||||
@ -1323,17 +1309,6 @@ int clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
|
|||||||
cached, mask, NULL);
|
cached, mask, NULL);
|
||||||
}
|
}
|
||||||
|
|
||||||
int clear_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
|
|
||||||
unsigned bits, gfp_t mask)
|
|
||||||
{
|
|
||||||
int wake = 0;
|
|
||||||
|
|
||||||
if (bits & EXTENT_LOCKED)
|
|
||||||
wake = 1;
|
|
||||||
|
|
||||||
return clear_extent_bit(tree, start, end, bits, wake, 0, NULL, mask);
|
|
||||||
}
|
|
||||||
|
|
||||||
int clear_record_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
|
int clear_record_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
|
||||||
unsigned bits, gfp_t mask,
|
unsigned bits, gfp_t mask,
|
||||||
struct extent_changeset *changeset)
|
struct extent_changeset *changeset)
|
||||||
@ -1348,63 +1323,18 @@ int clear_record_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
|
|||||||
changeset);
|
changeset);
|
||||||
}
|
}
|
||||||
|
|
||||||
int set_extent_delalloc(struct extent_io_tree *tree, u64 start, u64 end,
|
|
||||||
struct extent_state **cached_state, gfp_t mask)
|
|
||||||
{
|
|
||||||
return set_extent_bit(tree, start, end,
|
|
||||||
EXTENT_DELALLOC | EXTENT_UPTODATE,
|
|
||||||
NULL, cached_state, mask);
|
|
||||||
}
|
|
||||||
|
|
||||||
int set_extent_defrag(struct extent_io_tree *tree, u64 start, u64 end,
|
|
||||||
struct extent_state **cached_state, gfp_t mask)
|
|
||||||
{
|
|
||||||
return set_extent_bit(tree, start, end,
|
|
||||||
EXTENT_DELALLOC | EXTENT_UPTODATE | EXTENT_DEFRAG,
|
|
||||||
NULL, cached_state, mask);
|
|
||||||
}
|
|
||||||
|
|
||||||
int clear_extent_dirty(struct extent_io_tree *tree, u64 start, u64 end,
|
|
||||||
gfp_t mask)
|
|
||||||
{
|
|
||||||
return clear_extent_bit(tree, start, end,
|
|
||||||
EXTENT_DIRTY | EXTENT_DELALLOC |
|
|
||||||
EXTENT_DO_ACCOUNTING, 0, 0, NULL, mask);
|
|
||||||
}
|
|
||||||
|
|
||||||
int set_extent_new(struct extent_io_tree *tree, u64 start, u64 end,
|
|
||||||
gfp_t mask)
|
|
||||||
{
|
|
||||||
return set_extent_bit(tree, start, end, EXTENT_NEW, NULL,
|
|
||||||
NULL, mask);
|
|
||||||
}
|
|
||||||
|
|
||||||
int set_extent_uptodate(struct extent_io_tree *tree, u64 start, u64 end,
|
|
||||||
struct extent_state **cached_state, gfp_t mask)
|
|
||||||
{
|
|
||||||
return set_extent_bit(tree, start, end, EXTENT_UPTODATE, NULL,
|
|
||||||
cached_state, mask);
|
|
||||||
}
|
|
||||||
|
|
||||||
int clear_extent_uptodate(struct extent_io_tree *tree, u64 start, u64 end,
|
|
||||||
struct extent_state **cached_state, gfp_t mask)
|
|
||||||
{
|
|
||||||
return clear_extent_bit(tree, start, end, EXTENT_UPTODATE, 0, 0,
|
|
||||||
cached_state, mask);
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* either insert or lock state struct between start and end use mask to tell
|
* either insert or lock state struct between start and end use mask to tell
|
||||||
* us if waiting is desired.
|
* us if waiting is desired.
|
||||||
*/
|
*/
|
||||||
int lock_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
|
int lock_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
|
||||||
unsigned bits, struct extent_state **cached_state)
|
struct extent_state **cached_state)
|
||||||
{
|
{
|
||||||
int err;
|
int err;
|
||||||
u64 failed_start;
|
u64 failed_start;
|
||||||
|
|
||||||
while (1) {
|
while (1) {
|
||||||
err = __set_extent_bit(tree, start, end, EXTENT_LOCKED | bits,
|
err = __set_extent_bit(tree, start, end, EXTENT_LOCKED,
|
||||||
EXTENT_LOCKED, &failed_start,
|
EXTENT_LOCKED, &failed_start,
|
||||||
cached_state, GFP_NOFS, NULL);
|
cached_state, GFP_NOFS, NULL);
|
||||||
if (err == -EEXIST) {
|
if (err == -EEXIST) {
|
||||||
@ -1417,11 +1347,6 @@ int lock_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
|
|||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
int lock_extent(struct extent_io_tree *tree, u64 start, u64 end)
|
|
||||||
{
|
|
||||||
return lock_extent_bits(tree, start, end, 0, NULL);
|
|
||||||
}
|
|
||||||
|
|
||||||
int try_lock_extent(struct extent_io_tree *tree, u64 start, u64 end)
|
int try_lock_extent(struct extent_io_tree *tree, u64 start, u64 end)
|
||||||
{
|
{
|
||||||
int err;
|
int err;
|
||||||
@ -1438,19 +1363,6 @@ int try_lock_extent(struct extent_io_tree *tree, u64 start, u64 end)
|
|||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
int unlock_extent_cached(struct extent_io_tree *tree, u64 start, u64 end,
|
|
||||||
struct extent_state **cached, gfp_t mask)
|
|
||||||
{
|
|
||||||
return clear_extent_bit(tree, start, end, EXTENT_LOCKED, 1, 0, cached,
|
|
||||||
mask);
|
|
||||||
}
|
|
||||||
|
|
||||||
int unlock_extent(struct extent_io_tree *tree, u64 start, u64 end)
|
|
||||||
{
|
|
||||||
return clear_extent_bit(tree, start, end, EXTENT_LOCKED, 1, 0, NULL,
|
|
||||||
GFP_NOFS);
|
|
||||||
}
|
|
||||||
|
|
||||||
void extent_range_clear_dirty_for_io(struct inode *inode, u64 start, u64 end)
|
void extent_range_clear_dirty_for_io(struct inode *inode, u64 start, u64 end)
|
||||||
{
|
{
|
||||||
unsigned long index = start >> PAGE_CACHE_SHIFT;
|
unsigned long index = start >> PAGE_CACHE_SHIFT;
|
||||||
@ -1797,7 +1709,7 @@ again:
|
|||||||
BUG_ON(ret); /* Only valid values are 0 and -EAGAIN */
|
BUG_ON(ret); /* Only valid values are 0 and -EAGAIN */
|
||||||
|
|
||||||
/* step three, lock the state bits for the whole range */
|
/* step three, lock the state bits for the whole range */
|
||||||
lock_extent_bits(tree, delalloc_start, delalloc_end, 0, &cached_state);
|
lock_extent_bits(tree, delalloc_start, delalloc_end, &cached_state);
|
||||||
|
|
||||||
/* then test to make sure it is all still delalloc */
|
/* then test to make sure it is all still delalloc */
|
||||||
ret = test_range_bit(tree, delalloc_start, delalloc_end,
|
ret = test_range_bit(tree, delalloc_start, delalloc_end,
|
||||||
@ -4319,7 +4231,7 @@ int extent_invalidatepage(struct extent_io_tree *tree,
|
|||||||
if (start > end)
|
if (start > end)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
lock_extent_bits(tree, start, end, 0, &cached_state);
|
lock_extent_bits(tree, start, end, &cached_state);
|
||||||
wait_on_page_writeback(page);
|
wait_on_page_writeback(page);
|
||||||
clear_extent_bit(tree, start, end,
|
clear_extent_bit(tree, start, end,
|
||||||
EXTENT_LOCKED | EXTENT_DIRTY | EXTENT_DELALLOC |
|
EXTENT_LOCKED | EXTENT_DIRTY | EXTENT_DELALLOC |
|
||||||
@ -4529,7 +4441,7 @@ int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
|
|||||||
last_for_get_extent = isize;
|
last_for_get_extent = isize;
|
||||||
}
|
}
|
||||||
|
|
||||||
lock_extent_bits(&BTRFS_I(inode)->io_tree, start, start + len - 1, 0,
|
lock_extent_bits(&BTRFS_I(inode)->io_tree, start, start + len - 1,
|
||||||
&cached_state);
|
&cached_state);
|
||||||
|
|
||||||
em = get_extent_skip_holes(inode, start, last_for_get_extent,
|
em = get_extent_skip_holes(inode, start, last_for_get_extent,
|
||||||
|
@ -199,12 +199,14 @@ int try_release_extent_mapping(struct extent_map_tree *map,
|
|||||||
struct extent_io_tree *tree, struct page *page,
|
struct extent_io_tree *tree, struct page *page,
|
||||||
gfp_t mask);
|
gfp_t mask);
|
||||||
int try_release_extent_buffer(struct page *page);
|
int try_release_extent_buffer(struct page *page);
|
||||||
int lock_extent(struct extent_io_tree *tree, u64 start, u64 end);
|
|
||||||
int lock_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
|
int lock_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
|
||||||
unsigned bits, struct extent_state **cached);
|
struct extent_state **cached);
|
||||||
int unlock_extent(struct extent_io_tree *tree, u64 start, u64 end);
|
|
||||||
int unlock_extent_cached(struct extent_io_tree *tree, u64 start, u64 end,
|
static inline int lock_extent(struct extent_io_tree *tree, u64 start, u64 end)
|
||||||
struct extent_state **cached, gfp_t mask);
|
{
|
||||||
|
return lock_extent_bits(tree, start, end, NULL);
|
||||||
|
}
|
||||||
|
|
||||||
int try_lock_extent(struct extent_io_tree *tree, u64 start, u64 end);
|
int try_lock_extent(struct extent_io_tree *tree, u64 start, u64 end);
|
||||||
int extent_read_full_page(struct extent_io_tree *tree, struct page *page,
|
int extent_read_full_page(struct extent_io_tree *tree, struct page *page,
|
||||||
get_extent_t *get_extent, int mirror_num);
|
get_extent_t *get_extent, int mirror_num);
|
||||||
@ -221,39 +223,105 @@ void free_extent_state(struct extent_state *state);
|
|||||||
int test_range_bit(struct extent_io_tree *tree, u64 start, u64 end,
|
int test_range_bit(struct extent_io_tree *tree, u64 start, u64 end,
|
||||||
unsigned bits, int filled,
|
unsigned bits, int filled,
|
||||||
struct extent_state *cached_state);
|
struct extent_state *cached_state);
|
||||||
int clear_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
|
|
||||||
unsigned bits, gfp_t mask);
|
|
||||||
int clear_record_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
|
int clear_record_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
|
||||||
unsigned bits, gfp_t mask,
|
unsigned bits, gfp_t mask,
|
||||||
struct extent_changeset *changeset);
|
struct extent_changeset *changeset);
|
||||||
int clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
|
int clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
|
||||||
unsigned bits, int wake, int delete,
|
unsigned bits, int wake, int delete,
|
||||||
struct extent_state **cached, gfp_t mask);
|
struct extent_state **cached, gfp_t mask);
|
||||||
int set_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
|
|
||||||
unsigned bits, gfp_t mask);
|
static inline int unlock_extent(struct extent_io_tree *tree, u64 start, u64 end)
|
||||||
|
{
|
||||||
|
return clear_extent_bit(tree, start, end, EXTENT_LOCKED, 1, 0, NULL,
|
||||||
|
GFP_NOFS);
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline int unlock_extent_cached(struct extent_io_tree *tree, u64 start,
|
||||||
|
u64 end, struct extent_state **cached, gfp_t mask)
|
||||||
|
{
|
||||||
|
return clear_extent_bit(tree, start, end, EXTENT_LOCKED, 1, 0, cached,
|
||||||
|
mask);
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline int clear_extent_bits(struct extent_io_tree *tree, u64 start,
|
||||||
|
u64 end, unsigned bits, gfp_t mask)
|
||||||
|
{
|
||||||
|
int wake = 0;
|
||||||
|
|
||||||
|
if (bits & EXTENT_LOCKED)
|
||||||
|
wake = 1;
|
||||||
|
|
||||||
|
return clear_extent_bit(tree, start, end, bits, wake, 0, NULL, mask);
|
||||||
|
}
|
||||||
|
|
||||||
int set_record_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
|
int set_record_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
|
||||||
unsigned bits, gfp_t mask,
|
unsigned bits, gfp_t mask,
|
||||||
struct extent_changeset *changeset);
|
struct extent_changeset *changeset);
|
||||||
int set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
|
int set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
|
||||||
unsigned bits, u64 *failed_start,
|
unsigned bits, u64 *failed_start,
|
||||||
struct extent_state **cached_state, gfp_t mask);
|
struct extent_state **cached_state, gfp_t mask);
|
||||||
int set_extent_uptodate(struct extent_io_tree *tree, u64 start, u64 end,
|
|
||||||
struct extent_state **cached_state, gfp_t mask);
|
static inline int set_extent_bits(struct extent_io_tree *tree, u64 start,
|
||||||
int clear_extent_uptodate(struct extent_io_tree *tree, u64 start, u64 end,
|
u64 end, unsigned bits, gfp_t mask)
|
||||||
struct extent_state **cached_state, gfp_t mask);
|
{
|
||||||
int set_extent_new(struct extent_io_tree *tree, u64 start, u64 end,
|
return set_extent_bit(tree, start, end, bits, NULL, NULL, mask);
|
||||||
gfp_t mask);
|
}
|
||||||
int set_extent_dirty(struct extent_io_tree *tree, u64 start, u64 end,
|
|
||||||
gfp_t mask);
|
static inline int clear_extent_uptodate(struct extent_io_tree *tree, u64 start,
|
||||||
int clear_extent_dirty(struct extent_io_tree *tree, u64 start, u64 end,
|
u64 end, struct extent_state **cached_state, gfp_t mask)
|
||||||
gfp_t mask);
|
{
|
||||||
|
return clear_extent_bit(tree, start, end, EXTENT_UPTODATE, 0, 0,
|
||||||
|
cached_state, mask);
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline int set_extent_dirty(struct extent_io_tree *tree, u64 start,
|
||||||
|
u64 end, gfp_t mask)
|
||||||
|
{
|
||||||
|
return set_extent_bit(tree, start, end, EXTENT_DIRTY, NULL,
|
||||||
|
NULL, mask);
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline int clear_extent_dirty(struct extent_io_tree *tree, u64 start,
|
||||||
|
u64 end, gfp_t mask)
|
||||||
|
{
|
||||||
|
return clear_extent_bit(tree, start, end,
|
||||||
|
EXTENT_DIRTY | EXTENT_DELALLOC |
|
||||||
|
EXTENT_DO_ACCOUNTING, 0, 0, NULL, mask);
|
||||||
|
}
|
||||||
|
|
||||||
int convert_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
|
int convert_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
|
||||||
unsigned bits, unsigned clear_bits,
|
unsigned bits, unsigned clear_bits,
|
||||||
struct extent_state **cached_state, gfp_t mask);
|
struct extent_state **cached_state, gfp_t mask);
|
||||||
int set_extent_delalloc(struct extent_io_tree *tree, u64 start, u64 end,
|
|
||||||
struct extent_state **cached_state, gfp_t mask);
|
static inline int set_extent_delalloc(struct extent_io_tree *tree, u64 start,
|
||||||
int set_extent_defrag(struct extent_io_tree *tree, u64 start, u64 end,
|
u64 end, struct extent_state **cached_state, gfp_t mask)
|
||||||
struct extent_state **cached_state, gfp_t mask);
|
{
|
||||||
|
return set_extent_bit(tree, start, end,
|
||||||
|
EXTENT_DELALLOC | EXTENT_UPTODATE,
|
||||||
|
NULL, cached_state, mask);
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline int set_extent_defrag(struct extent_io_tree *tree, u64 start,
|
||||||
|
u64 end, struct extent_state **cached_state, gfp_t mask)
|
||||||
|
{
|
||||||
|
return set_extent_bit(tree, start, end,
|
||||||
|
EXTENT_DELALLOC | EXTENT_UPTODATE | EXTENT_DEFRAG,
|
||||||
|
NULL, cached_state, mask);
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline int set_extent_new(struct extent_io_tree *tree, u64 start,
|
||||||
|
u64 end, gfp_t mask)
|
||||||
|
{
|
||||||
|
return set_extent_bit(tree, start, end, EXTENT_NEW, NULL, NULL, mask);
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline int set_extent_uptodate(struct extent_io_tree *tree, u64 start,
|
||||||
|
u64 end, struct extent_state **cached_state, gfp_t mask)
|
||||||
|
{
|
||||||
|
return set_extent_bit(tree, start, end, EXTENT_UPTODATE, NULL,
|
||||||
|
cached_state, mask);
|
||||||
|
}
|
||||||
|
|
||||||
int find_first_extent_bit(struct extent_io_tree *tree, u64 start,
|
int find_first_extent_bit(struct extent_io_tree *tree, u64 start,
|
||||||
u64 *start_ret, u64 *end_ret, unsigned bits,
|
u64 *start_ret, u64 *end_ret, unsigned bits,
|
||||||
struct extent_state **cached_state);
|
struct extent_state **cached_state);
|
||||||
|
@ -1394,7 +1394,7 @@ lock_and_cleanup_extent_if_need(struct inode *inode, struct page **pages,
|
|||||||
if (start_pos < inode->i_size) {
|
if (start_pos < inode->i_size) {
|
||||||
struct btrfs_ordered_extent *ordered;
|
struct btrfs_ordered_extent *ordered;
|
||||||
lock_extent_bits(&BTRFS_I(inode)->io_tree,
|
lock_extent_bits(&BTRFS_I(inode)->io_tree,
|
||||||
start_pos, last_pos, 0, cached_state);
|
start_pos, last_pos, cached_state);
|
||||||
ordered = btrfs_lookup_ordered_range(inode, start_pos,
|
ordered = btrfs_lookup_ordered_range(inode, start_pos,
|
||||||
last_pos - start_pos + 1);
|
last_pos - start_pos + 1);
|
||||||
if (ordered &&
|
if (ordered &&
|
||||||
@ -2398,7 +2398,7 @@ static int btrfs_punch_hole(struct inode *inode, loff_t offset, loff_t len)
|
|||||||
truncate_pagecache_range(inode, lockstart, lockend);
|
truncate_pagecache_range(inode, lockstart, lockend);
|
||||||
|
|
||||||
lock_extent_bits(&BTRFS_I(inode)->io_tree, lockstart, lockend,
|
lock_extent_bits(&BTRFS_I(inode)->io_tree, lockstart, lockend,
|
||||||
0, &cached_state);
|
&cached_state);
|
||||||
ordered = btrfs_lookup_first_ordered_extent(inode, lockend);
|
ordered = btrfs_lookup_first_ordered_extent(inode, lockend);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -2705,7 +2705,7 @@ static long btrfs_fallocate(struct file *file, int mode,
|
|||||||
* transaction
|
* transaction
|
||||||
*/
|
*/
|
||||||
lock_extent_bits(&BTRFS_I(inode)->io_tree, alloc_start,
|
lock_extent_bits(&BTRFS_I(inode)->io_tree, alloc_start,
|
||||||
locked_end, 0, &cached_state);
|
locked_end, &cached_state);
|
||||||
ordered = btrfs_lookup_first_ordered_extent(inode,
|
ordered = btrfs_lookup_first_ordered_extent(inode,
|
||||||
alloc_end - 1);
|
alloc_end - 1);
|
||||||
if (ordered &&
|
if (ordered &&
|
||||||
@ -2852,7 +2852,7 @@ static int find_desired_extent(struct inode *inode, loff_t *offset, int whence)
|
|||||||
lockend--;
|
lockend--;
|
||||||
len = lockend - lockstart + 1;
|
len = lockend - lockstart + 1;
|
||||||
|
|
||||||
lock_extent_bits(&BTRFS_I(inode)->io_tree, lockstart, lockend, 0,
|
lock_extent_bits(&BTRFS_I(inode)->io_tree, lockstart, lockend,
|
||||||
&cached_state);
|
&cached_state);
|
||||||
|
|
||||||
while (start < inode->i_size) {
|
while (start < inode->i_size) {
|
||||||
|
@ -1261,7 +1261,7 @@ static int __btrfs_write_out_cache(struct btrfs_root *root, struct inode *inode,
|
|||||||
goto out;
|
goto out;
|
||||||
|
|
||||||
lock_extent_bits(&BTRFS_I(inode)->io_tree, 0, i_size_read(inode) - 1,
|
lock_extent_bits(&BTRFS_I(inode)->io_tree, 0, i_size_read(inode) - 1,
|
||||||
0, &cached_state);
|
&cached_state);
|
||||||
|
|
||||||
io_ctl_set_generation(io_ctl, trans->transid);
|
io_ctl_set_generation(io_ctl, trans->transid);
|
||||||
|
|
||||||
|
@ -1989,7 +1989,7 @@ again:
|
|||||||
page_start = page_offset(page);
|
page_start = page_offset(page);
|
||||||
page_end = page_offset(page) + PAGE_CACHE_SIZE - 1;
|
page_end = page_offset(page) + PAGE_CACHE_SIZE - 1;
|
||||||
|
|
||||||
lock_extent_bits(&BTRFS_I(inode)->io_tree, page_start, page_end, 0,
|
lock_extent_bits(&BTRFS_I(inode)->io_tree, page_start, page_end,
|
||||||
&cached_state);
|
&cached_state);
|
||||||
|
|
||||||
/* already ordered? We're done */
|
/* already ordered? We're done */
|
||||||
@ -2482,7 +2482,7 @@ static noinline int relink_extent_backref(struct btrfs_path *path,
|
|||||||
lock_start = backref->file_pos;
|
lock_start = backref->file_pos;
|
||||||
lock_end = backref->file_pos + backref->num_bytes - 1;
|
lock_end = backref->file_pos + backref->num_bytes - 1;
|
||||||
lock_extent_bits(&BTRFS_I(inode)->io_tree, lock_start, lock_end,
|
lock_extent_bits(&BTRFS_I(inode)->io_tree, lock_start, lock_end,
|
||||||
0, &cached);
|
&cached);
|
||||||
|
|
||||||
ordered = btrfs_lookup_first_ordered_extent(inode, lock_end);
|
ordered = btrfs_lookup_first_ordered_extent(inode, lock_end);
|
||||||
if (ordered) {
|
if (ordered) {
|
||||||
@ -2874,7 +2874,7 @@ static int btrfs_finish_ordered_io(struct btrfs_ordered_extent *ordered_extent)
|
|||||||
|
|
||||||
lock_extent_bits(io_tree, ordered_extent->file_offset,
|
lock_extent_bits(io_tree, ordered_extent->file_offset,
|
||||||
ordered_extent->file_offset + ordered_extent->len - 1,
|
ordered_extent->file_offset + ordered_extent->len - 1,
|
||||||
0, &cached_state);
|
&cached_state);
|
||||||
|
|
||||||
ret = test_range_bit(io_tree, ordered_extent->file_offset,
|
ret = test_range_bit(io_tree, ordered_extent->file_offset,
|
||||||
ordered_extent->file_offset + ordered_extent->len - 1,
|
ordered_extent->file_offset + ordered_extent->len - 1,
|
||||||
@ -4668,7 +4668,7 @@ again:
|
|||||||
}
|
}
|
||||||
wait_on_page_writeback(page);
|
wait_on_page_writeback(page);
|
||||||
|
|
||||||
lock_extent_bits(io_tree, page_start, page_end, 0, &cached_state);
|
lock_extent_bits(io_tree, page_start, page_end, &cached_state);
|
||||||
set_page_extent_mapped(page);
|
set_page_extent_mapped(page);
|
||||||
|
|
||||||
ordered = btrfs_lookup_ordered_extent(inode, page_start);
|
ordered = btrfs_lookup_ordered_extent(inode, page_start);
|
||||||
@ -4799,7 +4799,7 @@ int btrfs_cont_expand(struct inode *inode, loff_t oldsize, loff_t size)
|
|||||||
while (1) {
|
while (1) {
|
||||||
struct btrfs_ordered_extent *ordered;
|
struct btrfs_ordered_extent *ordered;
|
||||||
|
|
||||||
lock_extent_bits(io_tree, hole_start, block_end - 1, 0,
|
lock_extent_bits(io_tree, hole_start, block_end - 1,
|
||||||
&cached_state);
|
&cached_state);
|
||||||
ordered = btrfs_lookup_ordered_range(inode, hole_start,
|
ordered = btrfs_lookup_ordered_range(inode, hole_start,
|
||||||
block_end - hole_start);
|
block_end - hole_start);
|
||||||
@ -5111,7 +5111,7 @@ static void evict_inode_truncate_pages(struct inode *inode)
|
|||||||
end = state->end;
|
end = state->end;
|
||||||
spin_unlock(&io_tree->lock);
|
spin_unlock(&io_tree->lock);
|
||||||
|
|
||||||
lock_extent_bits(io_tree, start, end, 0, &cached_state);
|
lock_extent_bits(io_tree, start, end, &cached_state);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* If still has DELALLOC flag, the extent didn't reach disk,
|
* If still has DELALLOC flag, the extent didn't reach disk,
|
||||||
@ -7380,7 +7380,7 @@ static int lock_extent_direct(struct inode *inode, u64 lockstart, u64 lockend,
|
|||||||
|
|
||||||
while (1) {
|
while (1) {
|
||||||
lock_extent_bits(&BTRFS_I(inode)->io_tree, lockstart, lockend,
|
lock_extent_bits(&BTRFS_I(inode)->io_tree, lockstart, lockend,
|
||||||
0, cached_state);
|
cached_state);
|
||||||
/*
|
/*
|
||||||
* We're concerned with the entire range that we're going to be
|
* We're concerned with the entire range that we're going to be
|
||||||
* doing DIO to, so we need to make sure theres no ordered
|
* doing DIO to, so we need to make sure theres no ordered
|
||||||
@ -8614,7 +8614,7 @@ static void btrfs_invalidatepage(struct page *page, unsigned int offset,
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (!inode_evicting)
|
if (!inode_evicting)
|
||||||
lock_extent_bits(tree, page_start, page_end, 0, &cached_state);
|
lock_extent_bits(tree, page_start, page_end, &cached_state);
|
||||||
ordered = btrfs_lookup_ordered_extent(inode, page_start);
|
ordered = btrfs_lookup_ordered_extent(inode, page_start);
|
||||||
if (ordered) {
|
if (ordered) {
|
||||||
/*
|
/*
|
||||||
@ -8652,7 +8652,7 @@ static void btrfs_invalidatepage(struct page *page, unsigned int offset,
|
|||||||
btrfs_put_ordered_extent(ordered);
|
btrfs_put_ordered_extent(ordered);
|
||||||
if (!inode_evicting) {
|
if (!inode_evicting) {
|
||||||
cached_state = NULL;
|
cached_state = NULL;
|
||||||
lock_extent_bits(tree, page_start, page_end, 0,
|
lock_extent_bits(tree, page_start, page_end,
|
||||||
&cached_state);
|
&cached_state);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -8750,7 +8750,7 @@ again:
|
|||||||
}
|
}
|
||||||
wait_on_page_writeback(page);
|
wait_on_page_writeback(page);
|
||||||
|
|
||||||
lock_extent_bits(io_tree, page_start, page_end, 0, &cached_state);
|
lock_extent_bits(io_tree, page_start, page_end, &cached_state);
|
||||||
set_page_extent_mapped(page);
|
set_page_extent_mapped(page);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -992,7 +992,7 @@ static struct extent_map *defrag_lookup_extent(struct inode *inode, u64 start)
|
|||||||
u64 end = start + len - 1;
|
u64 end = start + len - 1;
|
||||||
|
|
||||||
/* get the big lock and read metadata off disk */
|
/* get the big lock and read metadata off disk */
|
||||||
lock_extent_bits(io_tree, start, end, 0, &cached);
|
lock_extent_bits(io_tree, start, end, &cached);
|
||||||
em = btrfs_get_extent(inode, NULL, 0, start, len, 0);
|
em = btrfs_get_extent(inode, NULL, 0, start, len, 0);
|
||||||
unlock_extent_cached(io_tree, start, end, &cached, GFP_NOFS);
|
unlock_extent_cached(io_tree, start, end, &cached, GFP_NOFS);
|
||||||
|
|
||||||
@ -1140,7 +1140,7 @@ again:
|
|||||||
page_end = page_start + PAGE_CACHE_SIZE - 1;
|
page_end = page_start + PAGE_CACHE_SIZE - 1;
|
||||||
while (1) {
|
while (1) {
|
||||||
lock_extent_bits(tree, page_start, page_end,
|
lock_extent_bits(tree, page_start, page_end,
|
||||||
0, &cached_state);
|
&cached_state);
|
||||||
ordered = btrfs_lookup_ordered_extent(inode,
|
ordered = btrfs_lookup_ordered_extent(inode,
|
||||||
page_start);
|
page_start);
|
||||||
unlock_extent_cached(tree, page_start, page_end,
|
unlock_extent_cached(tree, page_start, page_end,
|
||||||
@ -1200,7 +1200,7 @@ again:
|
|||||||
page_end = page_offset(pages[i_done - 1]) + PAGE_CACHE_SIZE;
|
page_end = page_offset(pages[i_done - 1]) + PAGE_CACHE_SIZE;
|
||||||
|
|
||||||
lock_extent_bits(&BTRFS_I(inode)->io_tree,
|
lock_extent_bits(&BTRFS_I(inode)->io_tree,
|
||||||
page_start, page_end - 1, 0, &cached_state);
|
page_start, page_end - 1, &cached_state);
|
||||||
clear_extent_bit(&BTRFS_I(inode)->io_tree, page_start,
|
clear_extent_bit(&BTRFS_I(inode)->io_tree, page_start,
|
||||||
page_end - 1, EXTENT_DIRTY | EXTENT_DELALLOC |
|
page_end - 1, EXTENT_DIRTY | EXTENT_DELALLOC |
|
||||||
EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG, 0, 0,
|
EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG, 0, 0,
|
||||||
|
@ -4211,7 +4211,7 @@ static int check_extent_to_block(struct inode *inode, u64 start, u64 len,
|
|||||||
|
|
||||||
io_tree = &BTRFS_I(inode)->io_tree;
|
io_tree = &BTRFS_I(inode)->io_tree;
|
||||||
|
|
||||||
lock_extent_bits(io_tree, lockstart, lockend, 0, &cached_state);
|
lock_extent_bits(io_tree, lockstart, lockend, &cached_state);
|
||||||
ordered = btrfs_lookup_ordered_range(inode, lockstart, len);
|
ordered = btrfs_lookup_ordered_range(inode, lockstart, len);
|
||||||
if (ordered) {
|
if (ordered) {
|
||||||
btrfs_put_ordered_extent(ordered);
|
btrfs_put_ordered_extent(ordered);
|
||||||
|
Loading…
Reference in New Issue
Block a user