mirror of
https://github.com/torvalds/linux.git
synced 2024-11-10 14:11:52 +00:00
btrfs: fix a out-of-bound access in copy_compressed_data_to_page()
[BUG] The following script can cause btrfs to crash: $ mount -o compress-force=lzo $DEV /mnt $ dd if=/dev/urandom of=/mnt/foo bs=4k count=1 $ sync The call trace looks like this: general protection fault, probably for non-canonical address 0xe04b37fccce3b000: 0000 [#1] PREEMPT SMP NOPTI CPU: 5 PID: 164 Comm: kworker/u20:3 Not tainted 5.15.0-rc7-custom+ #4 Workqueue: btrfs-delalloc btrfs_work_helper [btrfs] RIP: 0010:__memcpy+0x12/0x20 Call Trace: lzo_compress_pages+0x236/0x540 [btrfs] btrfs_compress_pages+0xaa/0xf0 [btrfs] compress_file_range+0x431/0x8e0 [btrfs] async_cow_start+0x12/0x30 [btrfs] btrfs_work_helper+0xf6/0x3e0 [btrfs] process_one_work+0x294/0x5d0 worker_thread+0x55/0x3c0 kthread+0x140/0x170 ret_from_fork+0x22/0x30 ---[ end trace 63c3c0f131e61982 ]--- [CAUSE] In lzo_compress_pages(), parameter @out_pages is not only an output parameter (for the number of compressed pages), but also an input parameter, as the upper limit of compressed pages we can utilize. In commitd4088803f5
("btrfs: subpage: make lzo_compress_pages() compatible"), the refactoring doesn't take @out_pages as an input, thus completely ignoring the limit. And for compress-force case, we could hit incompressible data that compressed size would go beyond the page limit, and cause the above crash. [FIX] Save @out_pages as @max_nr_page, and pass it to lzo_compress_pages(), and check if we're beyond the limit before accessing the pages. Note: this also fixes crash on 32bit architectures that was suspected to be caused by merge of btrfs patches to 5.16-rc1. Reported in https://lore.kernel.org/all/20211104115001.GU20319@twin.jikos.cz/ . Reported-by: Omar Sandoval <osandov@fb.com> Fixes:d4088803f5
("btrfs: subpage: make lzo_compress_pages() compatible") Reviewed-by: Omar Sandoval <osandov@fb.com> Reviewed-by: Josef Bacik <josef@toxicpanda.com> Signed-off-by: Qu Wenruo <wqu@suse.com> Reviewed-by: David Sterba <dsterba@suse.com> [ add note ] Signed-off-by: David Sterba <dsterba@suse.com>
This commit is contained in:
parent
d1ed82f355
commit
6f019c0e01
@ -125,6 +125,7 @@ static inline size_t read_compress_length(const char *buf)
|
||||
static int copy_compressed_data_to_page(char *compressed_data,
|
||||
size_t compressed_size,
|
||||
struct page **out_pages,
|
||||
unsigned long max_nr_page,
|
||||
u32 *cur_out,
|
||||
const u32 sectorsize)
|
||||
{
|
||||
@ -132,6 +133,9 @@ static int copy_compressed_data_to_page(char *compressed_data,
|
||||
u32 orig_out;
|
||||
struct page *cur_page;
|
||||
|
||||
if ((*cur_out / PAGE_SIZE) >= max_nr_page)
|
||||
return -E2BIG;
|
||||
|
||||
/*
|
||||
* We never allow a segment header crossing sector boundary, previous
|
||||
* run should ensure we have enough space left inside the sector.
|
||||
@ -158,6 +162,9 @@ static int copy_compressed_data_to_page(char *compressed_data,
|
||||
u32 copy_len = min_t(u32, sectorsize - *cur_out % sectorsize,
|
||||
orig_out + compressed_size - *cur_out);
|
||||
|
||||
if ((*cur_out / PAGE_SIZE) >= max_nr_page)
|
||||
return -E2BIG;
|
||||
|
||||
cur_page = out_pages[*cur_out / PAGE_SIZE];
|
||||
/* Allocate a new page */
|
||||
if (!cur_page) {
|
||||
@ -195,6 +202,7 @@ int lzo_compress_pages(struct list_head *ws, struct address_space *mapping,
|
||||
struct workspace *workspace = list_entry(ws, struct workspace, list);
|
||||
const u32 sectorsize = btrfs_sb(mapping->host->i_sb)->sectorsize;
|
||||
struct page *page_in = NULL;
|
||||
const unsigned long max_nr_page = *out_pages;
|
||||
int ret = 0;
|
||||
/* Points to the file offset of input data */
|
||||
u64 cur_in = start;
|
||||
@ -202,6 +210,7 @@ int lzo_compress_pages(struct list_head *ws, struct address_space *mapping,
|
||||
u32 cur_out = 0;
|
||||
u32 len = *total_out;
|
||||
|
||||
ASSERT(max_nr_page > 0);
|
||||
*out_pages = 0;
|
||||
*total_out = 0;
|
||||
*total_in = 0;
|
||||
@ -237,7 +246,8 @@ int lzo_compress_pages(struct list_head *ws, struct address_space *mapping,
|
||||
}
|
||||
|
||||
ret = copy_compressed_data_to_page(workspace->cbuf, out_len,
|
||||
pages, &cur_out, sectorsize);
|
||||
pages, max_nr_page,
|
||||
&cur_out, sectorsize);
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user