mirror of
https://github.com/torvalds/linux.git
synced 2024-12-30 14:52:05 +00:00
write_one_page series
Signed-off-by: Al Viro <viro@zeniv.linux.org.uk> -----BEGIN PGP SIGNATURE----- iHUEABYIAB0WIQQqUNBr3gm4hGXdBJlZ7Krx/gZQ6wUCZEYC0gAKCRBZ7Krx/gZQ 6+N5AQCERtd5I2RLm7URX0kurwOthe7o+DX4Lj7y/mcjZV2N4gEAu9VrgHBIuLev +KuGGY0VnKwCtcgGcGGNBrfrRjyKugs= =gDk0 -----END PGP SIGNATURE----- Merge tag 'pull-write-one-page' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs Pull vfs write_one_page removal from Al Viro: "write_one_page series" * tag 'pull-write-one-page' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs: mm,jfs: move write_one_page/folio_write_one to jfs ocfs2: don't use write_one_page in ocfs2_duplicate_clusters_by_page ufs: don't flush page immediately for DIRSYNC directories
This commit is contained in:
commit
0e497ad525
@ -691,6 +691,35 @@ void grab_metapage(struct metapage * mp)
|
||||
unlock_page(mp->page);
|
||||
}
|
||||
|
||||
static int metapage_write_one(struct page *page)
|
||||
{
|
||||
struct folio *folio = page_folio(page);
|
||||
struct address_space *mapping = folio->mapping;
|
||||
struct writeback_control wbc = {
|
||||
.sync_mode = WB_SYNC_ALL,
|
||||
.nr_to_write = folio_nr_pages(folio),
|
||||
};
|
||||
int ret = 0;
|
||||
|
||||
BUG_ON(!folio_test_locked(folio));
|
||||
|
||||
folio_wait_writeback(folio);
|
||||
|
||||
if (folio_clear_dirty_for_io(folio)) {
|
||||
folio_get(folio);
|
||||
ret = metapage_writepage(page, &wbc);
|
||||
if (ret == 0)
|
||||
folio_wait_writeback(folio);
|
||||
folio_put(folio);
|
||||
} else {
|
||||
folio_unlock(folio);
|
||||
}
|
||||
|
||||
if (!ret)
|
||||
ret = filemap_check_errors(mapping);
|
||||
return ret;
|
||||
}
|
||||
|
||||
void force_metapage(struct metapage *mp)
|
||||
{
|
||||
struct page *page = mp->page;
|
||||
@ -700,8 +729,8 @@ void force_metapage(struct metapage *mp)
|
||||
get_page(page);
|
||||
lock_page(page);
|
||||
set_page_dirty(page);
|
||||
if (write_one_page(page))
|
||||
jfs_error(mp->sb, "write_one_page() failed\n");
|
||||
if (metapage_write_one(page))
|
||||
jfs_error(mp->sb, "metapage_write_one() failed\n");
|
||||
clear_bit(META_forcewrite, &mp->flag);
|
||||
put_page(page);
|
||||
}
|
||||
@ -746,9 +775,9 @@ void release_metapage(struct metapage * mp)
|
||||
set_page_dirty(page);
|
||||
if (test_bit(META_sync, &mp->flag)) {
|
||||
clear_bit(META_sync, &mp->flag);
|
||||
if (write_one_page(page))
|
||||
jfs_error(mp->sb, "write_one_page() failed\n");
|
||||
lock_page(page); /* write_one_page unlocks the page */
|
||||
if (metapage_write_one(page))
|
||||
jfs_error(mp->sb, "metapage_write_one() failed\n");
|
||||
lock_page(page);
|
||||
}
|
||||
} else if (mp->lsn) /* discard_metapage doesn't remove it */
|
||||
remove_from_logsync(mp);
|
||||
|
@ -2952,10 +2952,11 @@ retry:
|
||||
*/
|
||||
if (PAGE_SIZE <= OCFS2_SB(sb)->s_clustersize) {
|
||||
if (PageDirty(page)) {
|
||||
/*
|
||||
* write_on_page will unlock the page on return
|
||||
*/
|
||||
ret = write_one_page(page);
|
||||
unlock_page(page);
|
||||
put_page(page);
|
||||
|
||||
ret = filemap_write_and_wait_range(mapping,
|
||||
offset, map_end - 1);
|
||||
goto retry;
|
||||
}
|
||||
}
|
||||
|
27
fs/ufs/dir.c
27
fs/ufs/dir.c
@ -42,11 +42,10 @@ static inline int ufs_match(struct super_block *sb, int len,
|
||||
return !memcmp(name, de->d_name, len);
|
||||
}
|
||||
|
||||
static int ufs_commit_chunk(struct page *page, loff_t pos, unsigned len)
|
||||
static void ufs_commit_chunk(struct page *page, loff_t pos, unsigned len)
|
||||
{
|
||||
struct address_space *mapping = page->mapping;
|
||||
struct inode *dir = mapping->host;
|
||||
int err = 0;
|
||||
|
||||
inode_inc_iversion(dir);
|
||||
block_write_end(NULL, mapping, pos, len, len, page, NULL);
|
||||
@ -54,10 +53,16 @@ static int ufs_commit_chunk(struct page *page, loff_t pos, unsigned len)
|
||||
i_size_write(dir, pos+len);
|
||||
mark_inode_dirty(dir);
|
||||
}
|
||||
if (IS_DIRSYNC(dir))
|
||||
err = write_one_page(page);
|
||||
else
|
||||
unlock_page(page);
|
||||
}
|
||||
|
||||
static int ufs_handle_dirsync(struct inode *dir)
|
||||
{
|
||||
int err;
|
||||
|
||||
err = filemap_write_and_wait(dir->i_mapping);
|
||||
if (!err)
|
||||
err = sync_inode_metadata(dir, 1);
|
||||
return err;
|
||||
}
|
||||
|
||||
@ -99,11 +104,12 @@ void ufs_set_link(struct inode *dir, struct ufs_dir_entry *de,
|
||||
de->d_ino = cpu_to_fs32(dir->i_sb, inode->i_ino);
|
||||
ufs_set_de_type(dir->i_sb, de, inode->i_mode);
|
||||
|
||||
err = ufs_commit_chunk(page, pos, len);
|
||||
ufs_commit_chunk(page, pos, len);
|
||||
ufs_put_page(page);
|
||||
if (update_times)
|
||||
dir->i_mtime = dir->i_ctime = current_time(dir);
|
||||
mark_inode_dirty(dir);
|
||||
ufs_handle_dirsync(dir);
|
||||
}
|
||||
|
||||
|
||||
@ -390,10 +396,11 @@ got_it:
|
||||
de->d_ino = cpu_to_fs32(sb, inode->i_ino);
|
||||
ufs_set_de_type(sb, de, inode->i_mode);
|
||||
|
||||
err = ufs_commit_chunk(page, pos, rec_len);
|
||||
ufs_commit_chunk(page, pos, rec_len);
|
||||
dir->i_mtime = dir->i_ctime = current_time(dir);
|
||||
|
||||
mark_inode_dirty(dir);
|
||||
err = ufs_handle_dirsync(dir);
|
||||
/* OFFSET_CACHE */
|
||||
out_put:
|
||||
ufs_put_page(page);
|
||||
@ -531,9 +538,10 @@ int ufs_delete_entry(struct inode *inode, struct ufs_dir_entry *dir,
|
||||
if (pde)
|
||||
pde->d_reclen = cpu_to_fs16(sb, to - from);
|
||||
dir->d_ino = 0;
|
||||
err = ufs_commit_chunk(page, pos, to - from);
|
||||
ufs_commit_chunk(page, pos, to - from);
|
||||
inode->i_ctime = inode->i_mtime = current_time(inode);
|
||||
mark_inode_dirty(inode);
|
||||
err = ufs_handle_dirsync(inode);
|
||||
out:
|
||||
ufs_put_page(page);
|
||||
UFSD("EXIT\n");
|
||||
@ -579,7 +587,8 @@ int ufs_make_empty(struct inode * inode, struct inode *dir)
|
||||
strcpy (de->d_name, "..");
|
||||
kunmap(page);
|
||||
|
||||
err = ufs_commit_chunk(page, 0, chunk_size);
|
||||
ufs_commit_chunk(page, 0, chunk_size);
|
||||
err = ufs_handle_dirsync(inode);
|
||||
fail:
|
||||
put_page(page);
|
||||
return err;
|
||||
|
@ -1066,12 +1066,6 @@ static inline void folio_cancel_dirty(struct folio *folio)
|
||||
bool folio_clear_dirty_for_io(struct folio *folio);
|
||||
bool clear_page_dirty_for_io(struct page *page);
|
||||
void folio_invalidate(struct folio *folio, size_t offset, size_t length);
|
||||
int __must_check folio_write_one(struct folio *folio);
|
||||
static inline int __must_check write_one_page(struct page *page)
|
||||
{
|
||||
return folio_write_one(page_folio(page));
|
||||
}
|
||||
|
||||
int __set_page_dirty_nobuffers(struct page *page);
|
||||
bool noop_dirty_folio(struct address_space *mapping, struct folio *folio);
|
||||
|
||||
|
@ -2583,46 +2583,6 @@ int do_writepages(struct address_space *mapping, struct writeback_control *wbc)
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* folio_write_one - write out a single folio and wait on I/O.
|
||||
* @folio: The folio to write.
|
||||
*
|
||||
* The folio must be locked by the caller and will be unlocked upon return.
|
||||
*
|
||||
* Note that the mapping's AS_EIO/AS_ENOSPC flags will be cleared when this
|
||||
* function returns.
|
||||
*
|
||||
* Return: %0 on success, negative error code otherwise
|
||||
*/
|
||||
int folio_write_one(struct folio *folio)
|
||||
{
|
||||
struct address_space *mapping = folio->mapping;
|
||||
int ret = 0;
|
||||
struct writeback_control wbc = {
|
||||
.sync_mode = WB_SYNC_ALL,
|
||||
.nr_to_write = folio_nr_pages(folio),
|
||||
};
|
||||
|
||||
BUG_ON(!folio_test_locked(folio));
|
||||
|
||||
folio_wait_writeback(folio);
|
||||
|
||||
if (folio_clear_dirty_for_io(folio)) {
|
||||
folio_get(folio);
|
||||
ret = mapping->a_ops->writepage(&folio->page, &wbc);
|
||||
if (ret == 0)
|
||||
folio_wait_writeback(folio);
|
||||
folio_put(folio);
|
||||
} else {
|
||||
folio_unlock(folio);
|
||||
}
|
||||
|
||||
if (!ret)
|
||||
ret = filemap_check_errors(mapping);
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(folio_write_one);
|
||||
|
||||
/*
|
||||
* For address_spaces which do not use buffers nor write back.
|
||||
*/
|
||||
|
Loading…
Reference in New Issue
Block a user