forked from Minki/linux
writeback: dirty inodes against their matching cgroup bdi_writeback's
__mark_inode_dirty() always dirtied the inode against the root wb (bdi_writeback). The previous patches added all the infrastructure necessary to attribute an inode against the wb of the dirtying cgroup. This patch updates __mark_inode_dirty() so that it uses the wb associated with the inode instead of unconditionally using the root one. Currently, none of the filesystems has FS_CGROUP_WRITEBACK and all pages will keep being dirtied against the root wb. v2: Updated for per-inode wb association. Signed-off-by: Tejun Heo <tj@kernel.org> Cc: Jens Axboe <axboe@kernel.dk> Cc: Jan Kara <jack@suse.cz> Signed-off-by: Jens Axboe <axboe@fb.com>
This commit is contained in:
parent
db12536040
commit
0747259d13
@ -1504,7 +1504,6 @@ static noinline void block_dump___mark_inode_dirty(struct inode *inode)
|
||||
void __mark_inode_dirty(struct inode *inode, int flags)
|
||||
{
|
||||
struct super_block *sb = inode->i_sb;
|
||||
struct backing_dev_info *bdi = NULL;
|
||||
int dirtytime;
|
||||
|
||||
trace_writeback_mark_inode_dirty(inode, flags);
|
||||
@ -1574,30 +1573,30 @@ void __mark_inode_dirty(struct inode *inode, int flags)
|
||||
* reposition it (that would break b_dirty time-ordering).
|
||||
*/
|
||||
if (!was_dirty) {
|
||||
struct bdi_writeback *wb = inode_to_wb(inode);
|
||||
struct list_head *dirty_list;
|
||||
bool wakeup_bdi = false;
|
||||
bdi = inode_to_bdi(inode);
|
||||
|
||||
spin_unlock(&inode->i_lock);
|
||||
spin_lock(&bdi->wb.list_lock);
|
||||
spin_lock(&wb->list_lock);
|
||||
|
||||
WARN(bdi_cap_writeback_dirty(bdi) &&
|
||||
!test_bit(WB_registered, &bdi->wb.state),
|
||||
"bdi-%s not registered\n", bdi->name);
|
||||
WARN(bdi_cap_writeback_dirty(wb->bdi) &&
|
||||
!test_bit(WB_registered, &wb->state),
|
||||
"bdi-%s not registered\n", wb->bdi->name);
|
||||
|
||||
inode->dirtied_when = jiffies;
|
||||
if (dirtytime)
|
||||
inode->dirtied_time_when = jiffies;
|
||||
|
||||
if (inode->i_state & (I_DIRTY_INODE | I_DIRTY_PAGES))
|
||||
dirty_list = &bdi->wb.b_dirty;
|
||||
dirty_list = &wb->b_dirty;
|
||||
else
|
||||
dirty_list = &bdi->wb.b_dirty_time;
|
||||
dirty_list = &wb->b_dirty_time;
|
||||
|
||||
wakeup_bdi = inode_wb_list_move_locked(inode, &bdi->wb,
|
||||
wakeup_bdi = inode_wb_list_move_locked(inode, wb,
|
||||
dirty_list);
|
||||
|
||||
spin_unlock(&bdi->wb.list_lock);
|
||||
spin_unlock(&wb->list_lock);
|
||||
trace_writeback_dirty_inode_enqueue(inode);
|
||||
|
||||
/*
|
||||
@ -1606,8 +1605,8 @@ void __mark_inode_dirty(struct inode *inode, int flags)
|
||||
* to make sure background write-back happens
|
||||
* later.
|
||||
*/
|
||||
if (bdi_cap_writeback_dirty(bdi) && wakeup_bdi)
|
||||
wb_wakeup_delayed(&bdi->wb);
|
||||
if (bdi_cap_writeback_dirty(wb->bdi) && wakeup_bdi)
|
||||
wb_wakeup_delayed(wb);
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user