forked from Minki/linux
fs/buffer: Convert __block_write_begin_int() to take a folio
There are no plans to convert buffer_head infrastructure to use large folios, but __block_write_begin_int() is called from iomap, and it's more convenient and less error-prone if we pass in a folio from iomap. It also has a nice saving of almost 200 bytes of code from removing repeated calls to compound_head(). Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> Reviewed-by: Christoph Hellwig <hch@lst.de> Reviewed-by: Darrick J. Wong <djwong@kernel.org>
This commit is contained in:
parent
640d1930be
commit
d1bd0b4ebf
23
fs/buffer.c
23
fs/buffer.c
@ -1969,34 +1969,34 @@ iomap_to_bh(struct inode *inode, sector_t block, struct buffer_head *bh,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
int __block_write_begin_int(struct page *page, loff_t pos, unsigned len,
|
int __block_write_begin_int(struct folio *folio, loff_t pos, unsigned len,
|
||||||
get_block_t *get_block, const struct iomap *iomap)
|
get_block_t *get_block, const struct iomap *iomap)
|
||||||
{
|
{
|
||||||
unsigned from = pos & (PAGE_SIZE - 1);
|
unsigned from = pos & (PAGE_SIZE - 1);
|
||||||
unsigned to = from + len;
|
unsigned to = from + len;
|
||||||
struct inode *inode = page->mapping->host;
|
struct inode *inode = folio->mapping->host;
|
||||||
unsigned block_start, block_end;
|
unsigned block_start, block_end;
|
||||||
sector_t block;
|
sector_t block;
|
||||||
int err = 0;
|
int err = 0;
|
||||||
unsigned blocksize, bbits;
|
unsigned blocksize, bbits;
|
||||||
struct buffer_head *bh, *head, *wait[2], **wait_bh=wait;
|
struct buffer_head *bh, *head, *wait[2], **wait_bh=wait;
|
||||||
|
|
||||||
BUG_ON(!PageLocked(page));
|
BUG_ON(!folio_test_locked(folio));
|
||||||
BUG_ON(from > PAGE_SIZE);
|
BUG_ON(from > PAGE_SIZE);
|
||||||
BUG_ON(to > PAGE_SIZE);
|
BUG_ON(to > PAGE_SIZE);
|
||||||
BUG_ON(from > to);
|
BUG_ON(from > to);
|
||||||
|
|
||||||
head = create_page_buffers(page, inode, 0);
|
head = create_page_buffers(&folio->page, inode, 0);
|
||||||
blocksize = head->b_size;
|
blocksize = head->b_size;
|
||||||
bbits = block_size_bits(blocksize);
|
bbits = block_size_bits(blocksize);
|
||||||
|
|
||||||
block = (sector_t)page->index << (PAGE_SHIFT - bbits);
|
block = (sector_t)folio->index << (PAGE_SHIFT - bbits);
|
||||||
|
|
||||||
for(bh = head, block_start = 0; bh != head || !block_start;
|
for(bh = head, block_start = 0; bh != head || !block_start;
|
||||||
block++, block_start=block_end, bh = bh->b_this_page) {
|
block++, block_start=block_end, bh = bh->b_this_page) {
|
||||||
block_end = block_start + blocksize;
|
block_end = block_start + blocksize;
|
||||||
if (block_end <= from || block_start >= to) {
|
if (block_end <= from || block_start >= to) {
|
||||||
if (PageUptodate(page)) {
|
if (folio_test_uptodate(folio)) {
|
||||||
if (!buffer_uptodate(bh))
|
if (!buffer_uptodate(bh))
|
||||||
set_buffer_uptodate(bh);
|
set_buffer_uptodate(bh);
|
||||||
}
|
}
|
||||||
@ -2016,20 +2016,20 @@ int __block_write_begin_int(struct page *page, loff_t pos, unsigned len,
|
|||||||
|
|
||||||
if (buffer_new(bh)) {
|
if (buffer_new(bh)) {
|
||||||
clean_bdev_bh_alias(bh);
|
clean_bdev_bh_alias(bh);
|
||||||
if (PageUptodate(page)) {
|
if (folio_test_uptodate(folio)) {
|
||||||
clear_buffer_new(bh);
|
clear_buffer_new(bh);
|
||||||
set_buffer_uptodate(bh);
|
set_buffer_uptodate(bh);
|
||||||
mark_buffer_dirty(bh);
|
mark_buffer_dirty(bh);
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
if (block_end > to || block_start < from)
|
if (block_end > to || block_start < from)
|
||||||
zero_user_segments(page,
|
folio_zero_segments(folio,
|
||||||
to, block_end,
|
to, block_end,
|
||||||
block_start, from);
|
block_start, from);
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if (PageUptodate(page)) {
|
if (folio_test_uptodate(folio)) {
|
||||||
if (!buffer_uptodate(bh))
|
if (!buffer_uptodate(bh))
|
||||||
set_buffer_uptodate(bh);
|
set_buffer_uptodate(bh);
|
||||||
continue;
|
continue;
|
||||||
@ -2050,14 +2050,15 @@ int __block_write_begin_int(struct page *page, loff_t pos, unsigned len,
|
|||||||
err = -EIO;
|
err = -EIO;
|
||||||
}
|
}
|
||||||
if (unlikely(err))
|
if (unlikely(err))
|
||||||
page_zero_new_buffers(page, from, to);
|
page_zero_new_buffers(&folio->page, from, to);
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
int __block_write_begin(struct page *page, loff_t pos, unsigned len,
|
int __block_write_begin(struct page *page, loff_t pos, unsigned len,
|
||||||
get_block_t *get_block)
|
get_block_t *get_block)
|
||||||
{
|
{
|
||||||
return __block_write_begin_int(page, pos, len, get_block, NULL);
|
return __block_write_begin_int(page_folio(page), pos, len, get_block,
|
||||||
|
NULL);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(__block_write_begin);
|
EXPORT_SYMBOL(__block_write_begin);
|
||||||
|
|
||||||
|
@ -37,7 +37,7 @@ static inline int emergency_thaw_bdev(struct super_block *sb)
|
|||||||
/*
|
/*
|
||||||
* buffer.c
|
* buffer.c
|
||||||
*/
|
*/
|
||||||
int __block_write_begin_int(struct page *page, loff_t pos, unsigned len,
|
int __block_write_begin_int(struct folio *folio, loff_t pos, unsigned len,
|
||||||
get_block_t *get_block, const struct iomap *iomap);
|
get_block_t *get_block, const struct iomap *iomap);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -603,6 +603,7 @@ static int iomap_write_begin(const struct iomap_iter *iter, loff_t pos,
|
|||||||
const struct iomap_page_ops *page_ops = iter->iomap.page_ops;
|
const struct iomap_page_ops *page_ops = iter->iomap.page_ops;
|
||||||
const struct iomap *srcmap = iomap_iter_srcmap(iter);
|
const struct iomap *srcmap = iomap_iter_srcmap(iter);
|
||||||
struct page *page;
|
struct page *page;
|
||||||
|
struct folio *folio;
|
||||||
int status = 0;
|
int status = 0;
|
||||||
|
|
||||||
BUG_ON(pos + len > iter->iomap.offset + iter->iomap.length);
|
BUG_ON(pos + len > iter->iomap.offset + iter->iomap.length);
|
||||||
@ -624,11 +625,12 @@ static int iomap_write_begin(const struct iomap_iter *iter, loff_t pos,
|
|||||||
status = -ENOMEM;
|
status = -ENOMEM;
|
||||||
goto out_no_page;
|
goto out_no_page;
|
||||||
}
|
}
|
||||||
|
folio = page_folio(page);
|
||||||
|
|
||||||
if (srcmap->type == IOMAP_INLINE)
|
if (srcmap->type == IOMAP_INLINE)
|
||||||
status = iomap_write_begin_inline(iter, page);
|
status = iomap_write_begin_inline(iter, page);
|
||||||
else if (srcmap->flags & IOMAP_F_BUFFER_HEAD)
|
else if (srcmap->flags & IOMAP_F_BUFFER_HEAD)
|
||||||
status = __block_write_begin_int(page, pos, len, NULL, srcmap);
|
status = __block_write_begin_int(folio, pos, len, NULL, srcmap);
|
||||||
else
|
else
|
||||||
status = __iomap_write_begin(iter, pos, len, page);
|
status = __iomap_write_begin(iter, pos, len, page);
|
||||||
|
|
||||||
@ -960,11 +962,12 @@ EXPORT_SYMBOL_GPL(iomap_truncate_page);
|
|||||||
static loff_t iomap_page_mkwrite_iter(struct iomap_iter *iter,
|
static loff_t iomap_page_mkwrite_iter(struct iomap_iter *iter,
|
||||||
struct page *page)
|
struct page *page)
|
||||||
{
|
{
|
||||||
|
struct folio *folio = page_folio(page);
|
||||||
loff_t length = iomap_length(iter);
|
loff_t length = iomap_length(iter);
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
if (iter->iomap.flags & IOMAP_F_BUFFER_HEAD) {
|
if (iter->iomap.flags & IOMAP_F_BUFFER_HEAD) {
|
||||||
ret = __block_write_begin_int(page, iter->pos, length, NULL,
|
ret = __block_write_begin_int(folio, iter->pos, length, NULL,
|
||||||
&iter->iomap);
|
&iter->iomap);
|
||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
|
Loading…
Reference in New Issue
Block a user