mirror of
https://github.com/torvalds/linux.git
synced 2024-12-29 06:12:08 +00:00
bc4ef7592f
The value of ctx->pos in the last readdir call is supposed to be set to INT_MAX due to 32bit compatibility, unless 'pos' is intentially set to a larger value, then it's LLONG_MAX. There's a report from PaX SIZE_OVERFLOW plugin that "ctx->pos++" overflows (https://forums.grsecurity.net/viewtopic.php?f=1&t=4284), on a 64bit arch, where the value is 0x7fffffffffffffff ie. LLONG_MAX before the increment. We can get to that situation like that: * emit all regular readdir entries * still in the same call to readdir, bump the last pos to INT_MAX * next call to readdir will not emit any entries, but will reach the bump code again, finds pos to be INT_MAX and sets it to LLONG_MAX Normally this is not a problem, but if we call readdir again, we'll find 'pos' set to LLONG_MAX and the unconditional increment will overflow. The report from Victor at (http://thread.gmane.org/gmane.comp.file-systems.btrfs/49500) with debugging print shows that pattern: Overflow: e Overflow: 7fffffff Overflow: 7fffffffffffffff PAX: size overflow detected in function btrfs_real_readdir fs/btrfs/inode.c:5760 cicus.935_282 max, count: 9, decl: pos; num: 0; context: dir_context; CPU: 0 PID: 2630 Comm: polkitd Not tainted 4.2.3-grsec #1 Hardware name: Gigabyte Technology Co., Ltd. H81ND2H/H81ND2H, BIOS F3 08/11/2015 ffffffff81901608 0000000000000000 ffffffff819015e6 ffffc90004973d48 ffffffff81742f0f 0000000000000007 ffffffff81901608 ffffc90004973d78 ffffffff811cb706 0000000000000000 ffff8800d47359e0 ffffc90004973ed8 Call Trace: [<ffffffff81742f0f>] dump_stack+0x4c/0x7f [<ffffffff811cb706>] report_size_overflow+0x36/0x40 [<ffffffff812ef0bc>] btrfs_real_readdir+0x69c/0x6d0 [<ffffffff811dafc8>] iterate_dir+0xa8/0x150 [<ffffffff811e6d8d>] ? __fget_light+0x2d/0x70 [<ffffffff811dba3a>] SyS_getdents+0xba/0x1c0 Overflow: 1a [<ffffffff811db070>] ? iterate_dir+0x150/0x150 [<ffffffff81749b69>] entry_SYSCALL_64_fastpath+0x12/0x83 The jump from 7fffffff to 7fffffffffffffff happens when new dir entries are not yet synced and are processed from the delayed list. Then the code could go to the bump section again even though it might not emit any new dir entries from the delayed list. The fix avoids entering the "bump" section again once we've finished emitting the entries, both for synced and delayed entries. References: https://forums.grsecurity.net/viewtopic.php?f=1&t=4284 Reported-by: Victor <services@swwu.com> CC: stable@vger.kernel.org Signed-off-by: David Sterba <dsterba@suse.com> Tested-by: Holger Hoffstätte <holger.hoffstaette@googlemail.com> Signed-off-by: Chris Mason <clm@fb.com>
157 lines
4.8 KiB
C
157 lines
4.8 KiB
C
/*
|
|
* Copyright (C) 2011 Fujitsu. All rights reserved.
|
|
* Written by Miao Xie <miaox@cn.fujitsu.com>
|
|
*
|
|
* This program is free software; you can redistribute it and/or
|
|
* modify it under the terms of the GNU General Public
|
|
* License v2 as published by the Free Software Foundation.
|
|
*
|
|
* This program is distributed in the hope that it will be useful,
|
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
|
* General Public License for more details.
|
|
*
|
|
* You should have received a copy of the GNU General Public
|
|
* License along with this program; if not, write to the
|
|
* Free Software Foundation, Inc., 59 Temple Place - Suite 330,
|
|
* Boston, MA 021110-1307, USA.
|
|
*/
|
|
|
|
#ifndef __DELAYED_TREE_OPERATION_H
|
|
#define __DELAYED_TREE_OPERATION_H
|
|
|
|
#include <linux/rbtree.h>
|
|
#include <linux/spinlock.h>
|
|
#include <linux/mutex.h>
|
|
#include <linux/list.h>
|
|
#include <linux/wait.h>
|
|
#include <linux/atomic.h>
|
|
|
|
#include "ctree.h"
|
|
|
|
/* types of the delayed item */
|
|
#define BTRFS_DELAYED_INSERTION_ITEM 1
|
|
#define BTRFS_DELAYED_DELETION_ITEM 2
|
|
|
|
struct btrfs_delayed_root {
|
|
spinlock_t lock;
|
|
struct list_head node_list;
|
|
/*
|
|
* Used for delayed nodes which is waiting to be dealt with by the
|
|
* worker. If the delayed node is inserted into the work queue, we
|
|
* drop it from this list.
|
|
*/
|
|
struct list_head prepare_list;
|
|
atomic_t items; /* for delayed items */
|
|
atomic_t items_seq; /* for delayed items */
|
|
int nodes; /* for delayed nodes */
|
|
wait_queue_head_t wait;
|
|
};
|
|
|
|
#define BTRFS_DELAYED_NODE_IN_LIST 0
|
|
#define BTRFS_DELAYED_NODE_INODE_DIRTY 1
|
|
#define BTRFS_DELAYED_NODE_DEL_IREF 2
|
|
|
|
struct btrfs_delayed_node {
|
|
u64 inode_id;
|
|
u64 bytes_reserved;
|
|
struct btrfs_root *root;
|
|
/* Used to add the node into the delayed root's node list. */
|
|
struct list_head n_list;
|
|
/*
|
|
* Used to add the node into the prepare list, the nodes in this list
|
|
* is waiting to be dealt with by the async worker.
|
|
*/
|
|
struct list_head p_list;
|
|
struct rb_root ins_root;
|
|
struct rb_root del_root;
|
|
struct mutex mutex;
|
|
struct btrfs_inode_item inode_item;
|
|
atomic_t refs;
|
|
u64 index_cnt;
|
|
unsigned long flags;
|
|
int count;
|
|
};
|
|
|
|
struct btrfs_delayed_item {
|
|
struct rb_node rb_node;
|
|
struct btrfs_key key;
|
|
struct list_head tree_list; /* used for batch insert/delete items */
|
|
struct list_head readdir_list; /* used for readdir items */
|
|
u64 bytes_reserved;
|
|
struct btrfs_delayed_node *delayed_node;
|
|
atomic_t refs;
|
|
int ins_or_del;
|
|
u32 data_len;
|
|
char data[0];
|
|
};
|
|
|
|
static inline void btrfs_init_delayed_root(
|
|
struct btrfs_delayed_root *delayed_root)
|
|
{
|
|
atomic_set(&delayed_root->items, 0);
|
|
atomic_set(&delayed_root->items_seq, 0);
|
|
delayed_root->nodes = 0;
|
|
spin_lock_init(&delayed_root->lock);
|
|
init_waitqueue_head(&delayed_root->wait);
|
|
INIT_LIST_HEAD(&delayed_root->node_list);
|
|
INIT_LIST_HEAD(&delayed_root->prepare_list);
|
|
}
|
|
|
|
int btrfs_insert_delayed_dir_index(struct btrfs_trans_handle *trans,
|
|
struct btrfs_root *root, const char *name,
|
|
int name_len, struct inode *dir,
|
|
struct btrfs_disk_key *disk_key, u8 type,
|
|
u64 index);
|
|
|
|
int btrfs_delete_delayed_dir_index(struct btrfs_trans_handle *trans,
|
|
struct btrfs_root *root, struct inode *dir,
|
|
u64 index);
|
|
|
|
int btrfs_inode_delayed_dir_index_count(struct inode *inode);
|
|
|
|
int btrfs_run_delayed_items(struct btrfs_trans_handle *trans,
|
|
struct btrfs_root *root);
|
|
int btrfs_run_delayed_items_nr(struct btrfs_trans_handle *trans,
|
|
struct btrfs_root *root, int nr);
|
|
|
|
void btrfs_balance_delayed_items(struct btrfs_root *root);
|
|
|
|
int btrfs_commit_inode_delayed_items(struct btrfs_trans_handle *trans,
|
|
struct inode *inode);
|
|
/* Used for evicting the inode. */
|
|
void btrfs_remove_delayed_node(struct inode *inode);
|
|
void btrfs_kill_delayed_inode_items(struct inode *inode);
|
|
int btrfs_commit_inode_delayed_inode(struct inode *inode);
|
|
|
|
|
|
int btrfs_delayed_update_inode(struct btrfs_trans_handle *trans,
|
|
struct btrfs_root *root, struct inode *inode);
|
|
int btrfs_fill_inode(struct inode *inode, u32 *rdev);
|
|
int btrfs_delayed_delete_inode_ref(struct inode *inode);
|
|
|
|
/* Used for drop dead root */
|
|
void btrfs_kill_all_delayed_nodes(struct btrfs_root *root);
|
|
|
|
/* Used for clean the transaction */
|
|
void btrfs_destroy_delayed_inodes(struct btrfs_root *root);
|
|
|
|
/* Used for readdir() */
|
|
void btrfs_get_delayed_items(struct inode *inode, struct list_head *ins_list,
|
|
struct list_head *del_list);
|
|
void btrfs_put_delayed_items(struct list_head *ins_list,
|
|
struct list_head *del_list);
|
|
int btrfs_should_delete_dir_index(struct list_head *del_list,
|
|
u64 index);
|
|
int btrfs_readdir_delayed_dir_index(struct dir_context *ctx,
|
|
struct list_head *ins_list, bool *emitted);
|
|
|
|
/* for init */
|
|
int __init btrfs_delayed_inode_init(void);
|
|
void btrfs_delayed_inode_exit(void);
|
|
|
|
/* for debugging */
|
|
void btrfs_assert_delayed_root_empty(struct btrfs_root *root);
|
|
|
|
#endif
|