mirror of
https://github.com/torvalds/linux.git
synced 2024-11-12 15:11:50 +00:00
Hao Qin reported an integer overflow possibility with signed and
unsigned numbers in the ring-buffer code. https://bugzilla.kernel.org/show_bug.cgi?id=118001 At first I did not think this was too much of an issue, because the overflow would be caught later when either too much data was allocated or it would trigger RB_WARN_ON() which shuts down the ring buffer. But looking closer into it, I found that the right settings could bypass the checks and crash the kernel. Luckily, this is only accessible by root. The first fix is to convert all the variables into long, such that we don't get into issues between 32 bit variables being assigned 64 bit ones. This fixes the RB_WARN_ON() triggering. The next fix is to get rid of a duplicate DIV_ROUND_UP() that when called twice with the right value, can cause a kernel crash. The first DIV_ROUND_UP() is to normalize the input and it is checked against the minimum allowable value. But then DIV_ROUND_UP() is called again, which can overflow due to the (a + b - 1)/b, logic. The first called upped the value, the second can overflow (with the +b part). The second call to DIV_ROUND_UP() came in via a second change a while ago and the code is cleaned up to remove it. -----BEGIN PGP SIGNATURE----- Version: GnuPG v1 iQEbBAABAgAGBQJXOdaqAAoJEKKk/i67LK/8FSAH93vLHClJJFaD5kn8dRhTS7rl xVHAC5jHCHiKkQqIGI/N7qhzZ7DqiXpIQjs8KcE86Ser65AGNA48aeBKAA6xSQ+k nghDGhiwLixaMIUFA7SNry4VBEcbACxtLENIhBMWo9fmw85jVTH98B958J6CXdlL g6OC/PCNmt7eZwPrSB/aqpZ1Jp0Fik3GMXjMtY7axo9D+ONm7LF9qiHT9BcyKxN4 WHC83yDwUsWqLWxuvuhpGAeMu+nCQurRsPebyXwFh4hj56fhWJjv21ZLKtn2MjKL 8VO9sKCVEQTvLRGSzPMNP9lxkeuVp/wPrj2JRvX2JtGOqurnRNt2gqIZn2qPqA== =Zjyz -----END PGP SIGNATURE----- Merge tag 'trace-fixes-v4.6-rc7' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace Pull tracing ring-buffer fixes from Steven Rostedt: "Hao Qin reported an integer overflow possibility with signed and unsigned numbers in the ring-buffer code. https://bugzilla.kernel.org/show_bug.cgi?id=118001 At first I did not think this was too much of an issue, because the overflow would be caught later when either too much data was allocated or it would trigger RB_WARN_ON() which shuts down the ring buffer. But looking closer into it, I found that the right settings could bypass the checks and crash the kernel. Luckily, this is only accessible by root. The first fix is to convert all the variables into long, such that we don't get into issues between 32 bit variables being assigned 64 bit ones. This fixes the RB_WARN_ON() triggering. The next fix is to get rid of a duplicate DIV_ROUND_UP() that when called twice with the right value, can cause a kernel crash. The first DIV_ROUND_UP() is to normalize the input and it is checked against the minimum allowable value. But then DIV_ROUND_UP() is called again, which can overflow due to the (a + b - 1)/b, logic. The first called upped the value, the second can overflow (with the +b part). The second call to DIV_ROUND_UP() came in via a second change a while ago and the code is cleaned up to remove it" * tag 'trace-fixes-v4.6-rc7' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace: ring-buffer: Prevent overflow of size in ring_buffer_resize() ring-buffer: Use long for nr_pages to avoid overflow failures
This commit is contained in:
commit
2fe2edf85f
@ -437,7 +437,7 @@ struct ring_buffer_per_cpu {
|
||||
raw_spinlock_t reader_lock; /* serialize readers */
|
||||
arch_spinlock_t lock;
|
||||
struct lock_class_key lock_key;
|
||||
unsigned int nr_pages;
|
||||
unsigned long nr_pages;
|
||||
unsigned int current_context;
|
||||
struct list_head *pages;
|
||||
struct buffer_page *head_page; /* read from head */
|
||||
@ -458,7 +458,7 @@ struct ring_buffer_per_cpu {
|
||||
u64 write_stamp;
|
||||
u64 read_stamp;
|
||||
/* ring buffer pages to update, > 0 to add, < 0 to remove */
|
||||
int nr_pages_to_update;
|
||||
long nr_pages_to_update;
|
||||
struct list_head new_pages; /* new pages to add */
|
||||
struct work_struct update_pages_work;
|
||||
struct completion update_done;
|
||||
@ -1128,10 +1128,10 @@ static int rb_check_pages(struct ring_buffer_per_cpu *cpu_buffer)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int __rb_allocate_pages(int nr_pages, struct list_head *pages, int cpu)
|
||||
static int __rb_allocate_pages(long nr_pages, struct list_head *pages, int cpu)
|
||||
{
|
||||
int i;
|
||||
struct buffer_page *bpage, *tmp;
|
||||
long i;
|
||||
|
||||
for (i = 0; i < nr_pages; i++) {
|
||||
struct page *page;
|
||||
@ -1168,7 +1168,7 @@ free_pages:
|
||||
}
|
||||
|
||||
static int rb_allocate_pages(struct ring_buffer_per_cpu *cpu_buffer,
|
||||
unsigned nr_pages)
|
||||
unsigned long nr_pages)
|
||||
{
|
||||
LIST_HEAD(pages);
|
||||
|
||||
@ -1193,7 +1193,7 @@ static int rb_allocate_pages(struct ring_buffer_per_cpu *cpu_buffer,
|
||||
}
|
||||
|
||||
static struct ring_buffer_per_cpu *
|
||||
rb_allocate_cpu_buffer(struct ring_buffer *buffer, int nr_pages, int cpu)
|
||||
rb_allocate_cpu_buffer(struct ring_buffer *buffer, long nr_pages, int cpu)
|
||||
{
|
||||
struct ring_buffer_per_cpu *cpu_buffer;
|
||||
struct buffer_page *bpage;
|
||||
@ -1293,8 +1293,9 @@ struct ring_buffer *__ring_buffer_alloc(unsigned long size, unsigned flags,
|
||||
struct lock_class_key *key)
|
||||
{
|
||||
struct ring_buffer *buffer;
|
||||
long nr_pages;
|
||||
int bsize;
|
||||
int cpu, nr_pages;
|
||||
int cpu;
|
||||
|
||||
/* keep it in its own cache line */
|
||||
buffer = kzalloc(ALIGN(sizeof(*buffer), cache_line_size()),
|
||||
@ -1420,12 +1421,12 @@ static inline unsigned long rb_page_write(struct buffer_page *bpage)
|
||||
}
|
||||
|
||||
static int
|
||||
rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned int nr_pages)
|
||||
rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned long nr_pages)
|
||||
{
|
||||
struct list_head *tail_page, *to_remove, *next_page;
|
||||
struct buffer_page *to_remove_page, *tmp_iter_page;
|
||||
struct buffer_page *last_page, *first_page;
|
||||
unsigned int nr_removed;
|
||||
unsigned long nr_removed;
|
||||
unsigned long head_bit;
|
||||
int page_entries;
|
||||
|
||||
@ -1642,7 +1643,7 @@ int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size,
|
||||
int cpu_id)
|
||||
{
|
||||
struct ring_buffer_per_cpu *cpu_buffer;
|
||||
unsigned nr_pages;
|
||||
unsigned long nr_pages;
|
||||
int cpu, err = 0;
|
||||
|
||||
/*
|
||||
@ -1656,14 +1657,13 @@ int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size,
|
||||
!cpumask_test_cpu(cpu_id, buffer->cpumask))
|
||||
return size;
|
||||
|
||||
size = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
|
||||
size *= BUF_PAGE_SIZE;
|
||||
nr_pages = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
|
||||
|
||||
/* we need a minimum of two pages */
|
||||
if (size < BUF_PAGE_SIZE * 2)
|
||||
size = BUF_PAGE_SIZE * 2;
|
||||
if (nr_pages < 2)
|
||||
nr_pages = 2;
|
||||
|
||||
nr_pages = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
|
||||
size = nr_pages * BUF_PAGE_SIZE;
|
||||
|
||||
/*
|
||||
* Don't succeed if resizing is disabled, as a reader might be
|
||||
@ -4640,8 +4640,9 @@ static int rb_cpu_notify(struct notifier_block *self,
|
||||
struct ring_buffer *buffer =
|
||||
container_of(self, struct ring_buffer, cpu_notify);
|
||||
long cpu = (long)hcpu;
|
||||
int cpu_i, nr_pages_same;
|
||||
unsigned int nr_pages;
|
||||
long nr_pages_same;
|
||||
int cpu_i;
|
||||
unsigned long nr_pages;
|
||||
|
||||
switch (action) {
|
||||
case CPU_UP_PREPARE:
|
||||
|
Loading…
Reference in New Issue
Block a user