forked from Minki/linux
ntfs: use zero_user_page
Use zero_user_page() instead of open-coding it. [akpm@linux-foundation.org: kmap-type fixes] Signed-off-by: Nate Diller <nate.diller@gmail.com> Acked-by: Anton Altaparmakov <aia21@cantab.net> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
6d690dcac9
commit
e3bf460f3e
@ -86,19 +86,15 @@ static void ntfs_end_buffer_async_read(struct buffer_head *bh, int uptodate)
|
|||||||
}
|
}
|
||||||
/* Check for the current buffer head overflowing. */
|
/* Check for the current buffer head overflowing. */
|
||||||
if (unlikely(file_ofs + bh->b_size > init_size)) {
|
if (unlikely(file_ofs + bh->b_size > init_size)) {
|
||||||
u8 *kaddr;
|
|
||||||
int ofs;
|
int ofs;
|
||||||
|
|
||||||
ofs = 0;
|
ofs = 0;
|
||||||
if (file_ofs < init_size)
|
if (file_ofs < init_size)
|
||||||
ofs = init_size - file_ofs;
|
ofs = init_size - file_ofs;
|
||||||
local_irq_save(flags);
|
local_irq_save(flags);
|
||||||
kaddr = kmap_atomic(page, KM_BIO_SRC_IRQ);
|
zero_user_page(page, bh_offset(bh) + ofs,
|
||||||
memset(kaddr + bh_offset(bh) + ofs, 0,
|
bh->b_size - ofs, KM_BIO_SRC_IRQ);
|
||||||
bh->b_size - ofs);
|
|
||||||
kunmap_atomic(kaddr, KM_BIO_SRC_IRQ);
|
|
||||||
local_irq_restore(flags);
|
local_irq_restore(flags);
|
||||||
flush_dcache_page(page);
|
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
clear_buffer_uptodate(bh);
|
clear_buffer_uptodate(bh);
|
||||||
@ -245,8 +241,7 @@ static int ntfs_read_block(struct page *page)
|
|||||||
rl = NULL;
|
rl = NULL;
|
||||||
nr = i = 0;
|
nr = i = 0;
|
||||||
do {
|
do {
|
||||||
u8 *kaddr;
|
int err = 0;
|
||||||
int err;
|
|
||||||
|
|
||||||
if (unlikely(buffer_uptodate(bh)))
|
if (unlikely(buffer_uptodate(bh)))
|
||||||
continue;
|
continue;
|
||||||
@ -254,7 +249,6 @@ static int ntfs_read_block(struct page *page)
|
|||||||
arr[nr++] = bh;
|
arr[nr++] = bh;
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
err = 0;
|
|
||||||
bh->b_bdev = vol->sb->s_bdev;
|
bh->b_bdev = vol->sb->s_bdev;
|
||||||
/* Is the block within the allowed limits? */
|
/* Is the block within the allowed limits? */
|
||||||
if (iblock < lblock) {
|
if (iblock < lblock) {
|
||||||
@ -340,10 +334,7 @@ handle_hole:
|
|||||||
bh->b_blocknr = -1UL;
|
bh->b_blocknr = -1UL;
|
||||||
clear_buffer_mapped(bh);
|
clear_buffer_mapped(bh);
|
||||||
handle_zblock:
|
handle_zblock:
|
||||||
kaddr = kmap_atomic(page, KM_USER0);
|
zero_user_page(page, i * blocksize, blocksize, KM_USER0);
|
||||||
memset(kaddr + i * blocksize, 0, blocksize);
|
|
||||||
kunmap_atomic(kaddr, KM_USER0);
|
|
||||||
flush_dcache_page(page);
|
|
||||||
if (likely(!err))
|
if (likely(!err))
|
||||||
set_buffer_uptodate(bh);
|
set_buffer_uptodate(bh);
|
||||||
} while (i++, iblock++, (bh = bh->b_this_page) != head);
|
} while (i++, iblock++, (bh = bh->b_this_page) != head);
|
||||||
@ -460,10 +451,7 @@ retry_readpage:
|
|||||||
* ok to ignore the compressed flag here.
|
* ok to ignore the compressed flag here.
|
||||||
*/
|
*/
|
||||||
if (unlikely(page->index > 0)) {
|
if (unlikely(page->index > 0)) {
|
||||||
kaddr = kmap_atomic(page, KM_USER0);
|
zero_user_page(page, 0, PAGE_CACHE_SIZE, KM_USER0);
|
||||||
memset(kaddr, 0, PAGE_CACHE_SIZE);
|
|
||||||
flush_dcache_page(page);
|
|
||||||
kunmap_atomic(kaddr, KM_USER0);
|
|
||||||
goto done;
|
goto done;
|
||||||
}
|
}
|
||||||
if (!NInoAttr(ni))
|
if (!NInoAttr(ni))
|
||||||
@ -790,14 +778,10 @@ lock_retry_remap:
|
|||||||
* uptodate so it can get discarded by the VM.
|
* uptodate so it can get discarded by the VM.
|
||||||
*/
|
*/
|
||||||
if (err == -ENOENT || lcn == LCN_ENOENT) {
|
if (err == -ENOENT || lcn == LCN_ENOENT) {
|
||||||
u8 *kaddr;
|
|
||||||
|
|
||||||
bh->b_blocknr = -1;
|
bh->b_blocknr = -1;
|
||||||
clear_buffer_dirty(bh);
|
clear_buffer_dirty(bh);
|
||||||
kaddr = kmap_atomic(page, KM_USER0);
|
zero_user_page(page, bh_offset(bh), blocksize,
|
||||||
memset(kaddr + bh_offset(bh), 0, blocksize);
|
KM_USER0);
|
||||||
kunmap_atomic(kaddr, KM_USER0);
|
|
||||||
flush_dcache_page(page);
|
|
||||||
set_buffer_uptodate(bh);
|
set_buffer_uptodate(bh);
|
||||||
err = 0;
|
err = 0;
|
||||||
continue;
|
continue;
|
||||||
@ -1422,10 +1406,8 @@ retry_writepage:
|
|||||||
if (page->index >= (i_size >> PAGE_CACHE_SHIFT)) {
|
if (page->index >= (i_size >> PAGE_CACHE_SHIFT)) {
|
||||||
/* The page straddles i_size. */
|
/* The page straddles i_size. */
|
||||||
unsigned int ofs = i_size & ~PAGE_CACHE_MASK;
|
unsigned int ofs = i_size & ~PAGE_CACHE_MASK;
|
||||||
kaddr = kmap_atomic(page, KM_USER0);
|
zero_user_page(page, ofs, PAGE_CACHE_SIZE - ofs,
|
||||||
memset(kaddr + ofs, 0, PAGE_CACHE_SIZE - ofs);
|
KM_USER0);
|
||||||
kunmap_atomic(kaddr, KM_USER0);
|
|
||||||
flush_dcache_page(page);
|
|
||||||
}
|
}
|
||||||
/* Handle mst protected attributes. */
|
/* Handle mst protected attributes. */
|
||||||
if (NInoMstProtected(ni))
|
if (NInoMstProtected(ni))
|
||||||
|
@ -606,11 +606,8 @@ do_next_page:
|
|||||||
ntfs_submit_bh_for_read(bh);
|
ntfs_submit_bh_for_read(bh);
|
||||||
*wait_bh++ = bh;
|
*wait_bh++ = bh;
|
||||||
} else {
|
} else {
|
||||||
u8 *kaddr = kmap_atomic(page, KM_USER0);
|
zero_user_page(page, bh_offset(bh),
|
||||||
memset(kaddr + bh_offset(bh), 0,
|
blocksize, KM_USER0);
|
||||||
blocksize);
|
|
||||||
kunmap_atomic(kaddr, KM_USER0);
|
|
||||||
flush_dcache_page(page);
|
|
||||||
set_buffer_uptodate(bh);
|
set_buffer_uptodate(bh);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -685,12 +682,9 @@ map_buffer_cached:
|
|||||||
ntfs_submit_bh_for_read(bh);
|
ntfs_submit_bh_for_read(bh);
|
||||||
*wait_bh++ = bh;
|
*wait_bh++ = bh;
|
||||||
} else {
|
} else {
|
||||||
u8 *kaddr = kmap_atomic(page,
|
zero_user_page(page,
|
||||||
KM_USER0);
|
bh_offset(bh),
|
||||||
memset(kaddr + bh_offset(bh),
|
blocksize, KM_USER0);
|
||||||
0, blocksize);
|
|
||||||
kunmap_atomic(kaddr, KM_USER0);
|
|
||||||
flush_dcache_page(page);
|
|
||||||
set_buffer_uptodate(bh);
|
set_buffer_uptodate(bh);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -708,11 +702,8 @@ map_buffer_cached:
|
|||||||
*/
|
*/
|
||||||
if (bh_end <= pos || bh_pos >= end) {
|
if (bh_end <= pos || bh_pos >= end) {
|
||||||
if (!buffer_uptodate(bh)) {
|
if (!buffer_uptodate(bh)) {
|
||||||
u8 *kaddr = kmap_atomic(page, KM_USER0);
|
zero_user_page(page, bh_offset(bh),
|
||||||
memset(kaddr + bh_offset(bh), 0,
|
blocksize, KM_USER0);
|
||||||
blocksize);
|
|
||||||
kunmap_atomic(kaddr, KM_USER0);
|
|
||||||
flush_dcache_page(page);
|
|
||||||
set_buffer_uptodate(bh);
|
set_buffer_uptodate(bh);
|
||||||
}
|
}
|
||||||
mark_buffer_dirty(bh);
|
mark_buffer_dirty(bh);
|
||||||
@ -751,10 +742,8 @@ map_buffer_cached:
|
|||||||
if (!buffer_uptodate(bh))
|
if (!buffer_uptodate(bh))
|
||||||
set_buffer_uptodate(bh);
|
set_buffer_uptodate(bh);
|
||||||
} else if (!buffer_uptodate(bh)) {
|
} else if (!buffer_uptodate(bh)) {
|
||||||
u8 *kaddr = kmap_atomic(page, KM_USER0);
|
zero_user_page(page, bh_offset(bh), blocksize,
|
||||||
memset(kaddr + bh_offset(bh), 0, blocksize);
|
KM_USER0);
|
||||||
kunmap_atomic(kaddr, KM_USER0);
|
|
||||||
flush_dcache_page(page);
|
|
||||||
set_buffer_uptodate(bh);
|
set_buffer_uptodate(bh);
|
||||||
}
|
}
|
||||||
continue;
|
continue;
|
||||||
@ -878,11 +867,8 @@ rl_not_mapped_enoent:
|
|||||||
if (!buffer_uptodate(bh))
|
if (!buffer_uptodate(bh))
|
||||||
set_buffer_uptodate(bh);
|
set_buffer_uptodate(bh);
|
||||||
} else if (!buffer_uptodate(bh)) {
|
} else if (!buffer_uptodate(bh)) {
|
||||||
u8 *kaddr = kmap_atomic(page, KM_USER0);
|
zero_user_page(page, bh_offset(bh),
|
||||||
memset(kaddr + bh_offset(bh), 0,
|
blocksize, KM_USER0);
|
||||||
blocksize);
|
|
||||||
kunmap_atomic(kaddr, KM_USER0);
|
|
||||||
flush_dcache_page(page);
|
|
||||||
set_buffer_uptodate(bh);
|
set_buffer_uptodate(bh);
|
||||||
}
|
}
|
||||||
continue;
|
continue;
|
||||||
@ -1137,16 +1123,12 @@ rl_not_mapped_enoent:
|
|||||||
* to zero the overflowing region.
|
* to zero the overflowing region.
|
||||||
*/
|
*/
|
||||||
if (unlikely(bh_pos + blocksize > initialized_size)) {
|
if (unlikely(bh_pos + blocksize > initialized_size)) {
|
||||||
u8 *kaddr;
|
|
||||||
int ofs = 0;
|
int ofs = 0;
|
||||||
|
|
||||||
if (likely(bh_pos < initialized_size))
|
if (likely(bh_pos < initialized_size))
|
||||||
ofs = initialized_size - bh_pos;
|
ofs = initialized_size - bh_pos;
|
||||||
kaddr = kmap_atomic(page, KM_USER0);
|
zero_user_page(page, bh_offset(bh) + ofs,
|
||||||
memset(kaddr + bh_offset(bh) + ofs, 0,
|
blocksize - ofs, KM_USER0);
|
||||||
blocksize - ofs);
|
|
||||||
kunmap_atomic(kaddr, KM_USER0);
|
|
||||||
flush_dcache_page(page);
|
|
||||||
}
|
}
|
||||||
} else /* if (unlikely(!buffer_uptodate(bh))) */
|
} else /* if (unlikely(!buffer_uptodate(bh))) */
|
||||||
err = -EIO;
|
err = -EIO;
|
||||||
@ -1286,11 +1268,8 @@ rl_not_mapped_enoent:
|
|||||||
if (PageUptodate(page))
|
if (PageUptodate(page))
|
||||||
set_buffer_uptodate(bh);
|
set_buffer_uptodate(bh);
|
||||||
else {
|
else {
|
||||||
u8 *kaddr = kmap_atomic(page, KM_USER0);
|
zero_user_page(page, bh_offset(bh),
|
||||||
memset(kaddr + bh_offset(bh), 0,
|
blocksize, KM_USER0);
|
||||||
blocksize);
|
|
||||||
kunmap_atomic(kaddr, KM_USER0);
|
|
||||||
flush_dcache_page(page);
|
|
||||||
set_buffer_uptodate(bh);
|
set_buffer_uptodate(bh);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -1350,9 +1329,7 @@ err_out:
|
|||||||
len = PAGE_CACHE_SIZE;
|
len = PAGE_CACHE_SIZE;
|
||||||
if (len > bytes)
|
if (len > bytes)
|
||||||
len = bytes;
|
len = bytes;
|
||||||
kaddr = kmap_atomic(*pages, KM_USER0);
|
zero_user_page(*pages, 0, len, KM_USER0);
|
||||||
memset(kaddr, 0, len);
|
|
||||||
kunmap_atomic(kaddr, KM_USER0);
|
|
||||||
}
|
}
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
@ -1473,9 +1450,7 @@ err_out:
|
|||||||
len = PAGE_CACHE_SIZE;
|
len = PAGE_CACHE_SIZE;
|
||||||
if (len > bytes)
|
if (len > bytes)
|
||||||
len = bytes;
|
len = bytes;
|
||||||
kaddr = kmap_atomic(*pages, KM_USER0);
|
zero_user_page(*pages, 0, len, KM_USER0);
|
||||||
memset(kaddr, 0, len);
|
|
||||||
kunmap_atomic(kaddr, KM_USER0);
|
|
||||||
}
|
}
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user