mirror of
https://github.com/torvalds/linux.git
synced 2024-12-28 22:02:28 +00:00
fs: Convert is_partially_uptodate to folios
Since the uptodate property is maintained on a per-folio basis, the is_partially_uptodate method should also take a folio. Fix the types at the same time so it's clear that it returns true/false and takes the count in bytes, not blocks. Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> Tested-by: Damien Le Moal <damien.lemoal@opensource.wdc.com> Acked-by: Damien Le Moal <damien.lemoal@opensource.wdc.com> Tested-by: Mike Marshall <hubcap@omnibond.com> # orangefs Tested-by: David Howells <dhowells@redhat.com> # afs
This commit is contained in:
parent
cd1067beee
commit
2e7e80f7e7
@ -258,7 +258,7 @@ prototypes::
|
||||
int (*migratepage)(struct address_space *, struct page *, struct page *);
|
||||
void (*putback_page) (struct page *);
|
||||
int (*launder_page)(struct page *);
|
||||
int (*is_partially_uptodate)(struct page *, unsigned long, unsigned long);
|
||||
bool (*is_partially_uptodate)(struct folio *, size_t from, size_t count);
|
||||
int (*error_remove_page)(struct address_space *, struct page *);
|
||||
int (*swap_activate)(struct file *);
|
||||
int (*swap_deactivate)(struct file *);
|
||||
|
@ -747,8 +747,8 @@ cache in your filesystem. The following members are defined:
|
||||
void (*putback_page) (struct page *);
|
||||
int (*launder_page) (struct page *);
|
||||
|
||||
int (*is_partially_uptodate) (struct page *, unsigned long,
|
||||
unsigned long);
|
||||
bool (*is_partially_uptodate) (struct folio *, size_t from,
|
||||
size_t count);
|
||||
void (*is_dirty_writeback) (struct page *, bool *, bool *);
|
||||
int (*error_remove_page) (struct mapping *mapping, struct page *page);
|
||||
int (*swap_activate)(struct file *);
|
||||
@ -937,9 +937,9 @@ cache in your filesystem. The following members are defined:
|
||||
|
||||
``is_partially_uptodate``
|
||||
Called by the VM when reading a file through the pagecache when
|
||||
the underlying blocksize != pagesize. If the required block is
|
||||
up to date then the read can complete without needing the IO to
|
||||
bring the whole page up to date.
|
||||
the underlying blocksize is smaller than the size of the folio.
|
||||
If the required block is up to date then the read can complete
|
||||
without needing I/O to bring the whole page up to date.
|
||||
|
||||
``is_dirty_writeback``
|
||||
Called by the VM when attempting to reclaim a page. The VM uses
|
||||
|
26
fs/buffer.c
26
fs/buffer.c
@ -2206,29 +2206,27 @@ int generic_write_end(struct file *file, struct address_space *mapping,
|
||||
EXPORT_SYMBOL(generic_write_end);
|
||||
|
||||
/*
|
||||
* block_is_partially_uptodate checks whether buffers within a page are
|
||||
* block_is_partially_uptodate checks whether buffers within a folio are
|
||||
* uptodate or not.
|
||||
*
|
||||
* Returns true if all buffers which correspond to a file portion
|
||||
* we want to read are uptodate.
|
||||
* Returns true if all buffers which correspond to the specified part
|
||||
* of the folio are uptodate.
|
||||
*/
|
||||
int block_is_partially_uptodate(struct page *page, unsigned long from,
|
||||
unsigned long count)
|
||||
bool block_is_partially_uptodate(struct folio *folio, size_t from, size_t count)
|
||||
{
|
||||
unsigned block_start, block_end, blocksize;
|
||||
unsigned to;
|
||||
struct buffer_head *bh, *head;
|
||||
int ret = 1;
|
||||
bool ret = true;
|
||||
|
||||
if (!page_has_buffers(page))
|
||||
return 0;
|
||||
|
||||
head = page_buffers(page);
|
||||
head = folio_buffers(folio);
|
||||
if (!head)
|
||||
return false;
|
||||
blocksize = head->b_size;
|
||||
to = min_t(unsigned, PAGE_SIZE - from, count);
|
||||
to = min_t(unsigned, folio_size(folio) - from, count);
|
||||
to = from + to;
|
||||
if (from < blocksize && to > PAGE_SIZE - blocksize)
|
||||
return 0;
|
||||
if (from < blocksize && to > folio_size(folio) - blocksize)
|
||||
return false;
|
||||
|
||||
bh = head;
|
||||
block_start = 0;
|
||||
@ -2236,7 +2234,7 @@ int block_is_partially_uptodate(struct page *page, unsigned long from,
|
||||
block_end = block_start + blocksize;
|
||||
if (block_end > from && block_start < to) {
|
||||
if (!buffer_uptodate(bh)) {
|
||||
ret = 0;
|
||||
ret = false;
|
||||
break;
|
||||
}
|
||||
if (block_end >= to)
|
||||
|
@ -424,37 +424,33 @@ void iomap_readahead(struct readahead_control *rac, const struct iomap_ops *ops)
|
||||
EXPORT_SYMBOL_GPL(iomap_readahead);
|
||||
|
||||
/*
|
||||
* iomap_is_partially_uptodate checks whether blocks within a page are
|
||||
* iomap_is_partially_uptodate checks whether blocks within a folio are
|
||||
* uptodate or not.
|
||||
*
|
||||
* Returns true if all blocks which correspond to a file portion
|
||||
* we want to read within the page are uptodate.
|
||||
* Returns true if all blocks which correspond to the specified part
|
||||
* of the folio are uptodate.
|
||||
*/
|
||||
int
|
||||
iomap_is_partially_uptodate(struct page *page, unsigned long from,
|
||||
unsigned long count)
|
||||
bool iomap_is_partially_uptodate(struct folio *folio, size_t from, size_t count)
|
||||
{
|
||||
struct folio *folio = page_folio(page);
|
||||
struct iomap_page *iop = to_iomap_page(folio);
|
||||
struct inode *inode = page->mapping->host;
|
||||
unsigned len, first, last;
|
||||
unsigned i;
|
||||
struct inode *inode = folio->mapping->host;
|
||||
size_t len;
|
||||
unsigned first, last, i;
|
||||
|
||||
/* Limit range to one page */
|
||||
len = min_t(unsigned, PAGE_SIZE - from, count);
|
||||
if (!iop)
|
||||
return false;
|
||||
|
||||
/* Limit range to this folio */
|
||||
len = min(folio_size(folio) - from, count);
|
||||
|
||||
/* First and last blocks in range within page */
|
||||
first = from >> inode->i_blkbits;
|
||||
last = (from + len - 1) >> inode->i_blkbits;
|
||||
|
||||
if (iop) {
|
||||
for (i = first; i <= last; i++)
|
||||
if (!test_bit(i, iop->uptodate))
|
||||
return 0;
|
||||
return 1;
|
||||
}
|
||||
|
||||
return 0;
|
||||
for (i = first; i <= last; i++)
|
||||
if (!test_bit(i, iop->uptodate))
|
||||
return false;
|
||||
return true;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(iomap_is_partially_uptodate);
|
||||
|
||||
|
@ -225,8 +225,7 @@ int __block_write_full_page(struct inode *inode, struct page *page,
|
||||
get_block_t *get_block, struct writeback_control *wbc,
|
||||
bh_end_io_t *handler);
|
||||
int block_read_full_page(struct page*, get_block_t*);
|
||||
int block_is_partially_uptodate(struct page *page, unsigned long from,
|
||||
unsigned long count);
|
||||
bool block_is_partially_uptodate(struct folio *, size_t from, size_t count);
|
||||
int block_write_begin(struct address_space *mapping, loff_t pos, unsigned len,
|
||||
unsigned flags, struct page **pagep, get_block_t *get_block);
|
||||
int __block_write_begin(struct page *page, loff_t pos, unsigned len,
|
||||
|
@ -400,8 +400,8 @@ struct address_space_operations {
|
||||
bool (*isolate_page)(struct page *, isolate_mode_t);
|
||||
void (*putback_page)(struct page *);
|
||||
int (*launder_page) (struct page *);
|
||||
int (*is_partially_uptodate) (struct page *, unsigned long,
|
||||
unsigned long);
|
||||
bool (*is_partially_uptodate) (struct folio *, size_t from,
|
||||
size_t count);
|
||||
void (*is_dirty_writeback) (struct page *, bool *, bool *);
|
||||
int (*error_remove_page)(struct address_space *, struct page *);
|
||||
|
||||
|
@ -227,8 +227,7 @@ ssize_t iomap_file_buffered_write(struct kiocb *iocb, struct iov_iter *from,
|
||||
const struct iomap_ops *ops);
|
||||
int iomap_readpage(struct page *page, const struct iomap_ops *ops);
|
||||
void iomap_readahead(struct readahead_control *, const struct iomap_ops *ops);
|
||||
int iomap_is_partially_uptodate(struct page *page, unsigned long from,
|
||||
unsigned long count);
|
||||
bool iomap_is_partially_uptodate(struct folio *, size_t from, size_t count);
|
||||
int iomap_releasepage(struct page *page, gfp_t gfp_mask);
|
||||
void iomap_invalidate_folio(struct folio *folio, size_t offset, size_t len);
|
||||
void iomap_invalidatepage(struct page *page, unsigned int offset,
|
||||
|
@ -2452,7 +2452,7 @@ static bool filemap_range_uptodate(struct address_space *mapping,
|
||||
pos -= folio_pos(folio);
|
||||
}
|
||||
|
||||
return mapping->a_ops->is_partially_uptodate(&folio->page, pos, count);
|
||||
return mapping->a_ops->is_partially_uptodate(folio, pos, count);
|
||||
}
|
||||
|
||||
static int filemap_update_page(struct kiocb *iocb,
|
||||
@ -2844,7 +2844,7 @@ static inline loff_t folio_seek_hole_data(struct xa_state *xas,
|
||||
offset = offset_in_folio(folio, start) & ~(bsz - 1);
|
||||
|
||||
do {
|
||||
if (ops->is_partially_uptodate(&folio->page, offset, bsz) ==
|
||||
if (ops->is_partially_uptodate(folio, offset, bsz) ==
|
||||
seek_data)
|
||||
break;
|
||||
start = (start + bsz) & ~(bsz - 1);
|
||||
|
Loading…
Reference in New Issue
Block a user