f2fs: call f2fs_balance_fs only when node was changed

If user tries to update or read data, we don't need to call f2fs_balance_fs
which triggers f2fs_gc, which increases unnecessary long latency.

Reviewed-by: Chao Yu <chao2.yu@samsung.com>
Signed-off-by: Jaegeuk Kim <jaegeuk@kernel.org>
This commit is contained in:
Jaegeuk Kim 2015-12-22 13:23:35 -08:00
parent 3104af35eb
commit 2a34076070
3 changed files with 35 additions and 21 deletions

View File

@ -509,7 +509,6 @@ static void __allocate_data_blocks(struct inode *inode, loff_t offset,
u64 end_offset;
while (len) {
f2fs_balance_fs(sbi);
f2fs_lock_op(sbi);
/* When reading holes, we need its node page */
@ -542,6 +541,9 @@ static void __allocate_data_blocks(struct inode *inode, loff_t offset,
f2fs_put_dnode(&dn);
f2fs_unlock_op(sbi);
if (dn.node_changed)
f2fs_balance_fs(sbi);
}
return;
@ -551,6 +553,8 @@ sync_out:
f2fs_put_dnode(&dn);
out:
f2fs_unlock_op(sbi);
if (dn.node_changed)
f2fs_balance_fs(sbi);
return;
}
@ -649,6 +653,8 @@ get_next:
if (create) {
f2fs_unlock_op(sbi);
if (dn.node_changed)
f2fs_balance_fs(sbi);
f2fs_lock_op(sbi);
}
@ -706,8 +712,11 @@ sync_out:
put_out:
f2fs_put_dnode(&dn);
unlock_out:
if (create)
if (create) {
f2fs_unlock_op(sbi);
if (dn.node_changed)
f2fs_balance_fs(sbi);
}
out:
trace_f2fs_map_blocks(inode, map, err);
return err;
@ -1415,8 +1424,6 @@ static int f2fs_write_begin(struct file *file, struct address_space *mapping,
trace_f2fs_write_begin(inode, pos, len, flags);
f2fs_balance_fs(sbi);
/*
* We should check this at this moment to avoid deadlock on inode page
* and #0 page. The locking rule for inline_data conversion should be:
@ -1466,6 +1473,17 @@ put_next:
f2fs_put_dnode(&dn);
f2fs_unlock_op(sbi);
if (dn.node_changed && has_not_enough_free_secs(sbi, 0)) {
unlock_page(page);
f2fs_balance_fs(sbi);
lock_page(page);
if (page->mapping != mapping) {
/* The page got truncated from under us */
f2fs_put_page(page, 1);
goto repeat;
}
}
f2fs_wait_on_page_writeback(page, DATA);
/* wait for GCed encrypted page writeback */

View File

@ -40,8 +40,6 @@ static int f2fs_vm_page_mkwrite(struct vm_area_struct *vma,
struct dnode_of_data dn;
int err;
f2fs_balance_fs(sbi);
sb_start_pagefault(inode->i_sb);
f2fs_bug_on(sbi, f2fs_has_inline_data(inode));
@ -57,6 +55,9 @@ static int f2fs_vm_page_mkwrite(struct vm_area_struct *vma,
f2fs_put_dnode(&dn);
f2fs_unlock_op(sbi);
if (dn.node_changed)
f2fs_balance_fs(sbi);
file_update_time(vma->vm_file);
lock_page(page);
if (unlikely(page->mapping != inode->i_mapping ||
@ -233,9 +234,6 @@ int f2fs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
goto out;
}
go_write:
/* guarantee free sections for fsync */
f2fs_balance_fs(sbi);
/*
* Both of fdatasync() and fsync() are able to be recovered from
* sudden-power-off.
@ -267,6 +265,8 @@ sync_nodes:
if (need_inode_block_update(sbi, ino)) {
mark_inode_dirty_sync(inode);
f2fs_write_inode(inode, NULL);
f2fs_balance_fs(sbi);
goto sync_nodes;
}
@ -945,8 +945,6 @@ static int f2fs_collapse_range(struct inode *inode, loff_t offset, loff_t len)
if (offset & (F2FS_BLKSIZE - 1) || len & (F2FS_BLKSIZE - 1))
return -EINVAL;
f2fs_balance_fs(F2FS_I_SB(inode));
ret = f2fs_convert_inline_inode(inode);
if (ret)
return ret;
@ -993,8 +991,6 @@ static int f2fs_zero_range(struct inode *inode, loff_t offset, loff_t len,
if (ret)
return ret;
f2fs_balance_fs(sbi);
ret = f2fs_convert_inline_inode(inode);
if (ret)
return ret;
@ -1104,12 +1100,12 @@ static int f2fs_insert_range(struct inode *inode, loff_t offset, loff_t len)
if (offset & (F2FS_BLKSIZE - 1) || len & (F2FS_BLKSIZE - 1))
return -EINVAL;
f2fs_balance_fs(sbi);
ret = f2fs_convert_inline_inode(inode);
if (ret)
return ret;
f2fs_balance_fs(sbi);
ret = truncate_blocks(inode, i_size_read(inode), true);
if (ret)
return ret;
@ -1152,8 +1148,6 @@ static int expand_inode_data(struct inode *inode, loff_t offset,
loff_t off_start, off_end;
int ret = 0;
f2fs_balance_fs(sbi);
ret = inode_newsize_ok(inode, (len + offset));
if (ret)
return ret;
@ -1162,6 +1156,8 @@ static int expand_inode_data(struct inode *inode, loff_t offset,
if (ret)
return ret;
f2fs_balance_fs(sbi);
pg_start = ((unsigned long long) offset) >> PAGE_CACHE_SHIFT;
pg_end = ((unsigned long long) offset + len) >> PAGE_CACHE_SHIFT;
@ -1349,8 +1345,6 @@ static int f2fs_ioc_start_atomic_write(struct file *filp)
if (!inode_owner_or_capable(inode))
return -EACCES;
f2fs_balance_fs(F2FS_I_SB(inode));
if (f2fs_is_atomic_file(inode))
return 0;
@ -1437,8 +1431,6 @@ static int f2fs_ioc_abort_volatile_write(struct file *filp)
if (ret)
return ret;
f2fs_balance_fs(F2FS_I_SB(inode));
clear_inode_flag(F2FS_I(inode), FI_ATOMIC_FILE);
clear_inode_flag(F2FS_I(inode), FI_VOLATILE_FILE);
commit_inmem_pages(inode, true);

View File

@ -202,6 +202,10 @@ out:
f2fs_unlock_op(sbi);
f2fs_put_page(page, 1);
if (dn.node_changed)
f2fs_balance_fs(sbi);
return err;
}