devmem: check vmalloc address on kmem read/write

Otherwise vmalloc_to_page() will BUG().

This also makes the kmem read/write implementation aligned with mem(4):
"References to nonexistent locations cause errors to be returned." Here we
return -ENXIO (inspired by Hugh) if no bytes have been transfered to/from
user space, otherwise return partial read/write results.

Signed-off-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Signed-off-by: Wu Fengguang <fengguang.wu@intel.com>
Cc: Greg Kroah-Hartman <gregkh@suse.de>
Cc: Hugh Dickins <hugh.dickins@tiscali.co.uk>
Cc: <stable@kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
KAMEZAWA Hiroyuki 2010-02-02 13:44:05 -08:00 committed by Linus Torvalds
parent 931e80e4b3
commit 325fda71d0

View File

@ -395,6 +395,7 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
unsigned long p = *ppos; unsigned long p = *ppos;
ssize_t low_count, read, sz; ssize_t low_count, read, sz;
char * kbuf; /* k-addr because vread() takes vmlist_lock rwlock */ char * kbuf; /* k-addr because vread() takes vmlist_lock rwlock */
int err = 0;
read = 0; read = 0;
if (p < (unsigned long) high_memory) { if (p < (unsigned long) high_memory) {
@ -441,12 +442,16 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
return -ENOMEM; return -ENOMEM;
while (count > 0) { while (count > 0) {
sz = size_inside_page(p, count); sz = size_inside_page(p, count);
if (!is_vmalloc_or_module_addr((void *)p)) {
err = -ENXIO;
break;
}
sz = vread(kbuf, (char *)p, sz); sz = vread(kbuf, (char *)p, sz);
if (!sz) if (!sz)
break; break;
if (copy_to_user(buf, kbuf, sz)) { if (copy_to_user(buf, kbuf, sz)) {
free_page((unsigned long)kbuf); err = -EFAULT;
return -EFAULT; break;
} }
count -= sz; count -= sz;
buf += sz; buf += sz;
@ -455,8 +460,8 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
} }
free_page((unsigned long)kbuf); free_page((unsigned long)kbuf);
} }
*ppos = p; *ppos = p;
return read; return read ? read : err;
} }
@ -520,6 +525,7 @@ static ssize_t write_kmem(struct file * file, const char __user * buf,
ssize_t wrote = 0; ssize_t wrote = 0;
ssize_t virtr = 0; ssize_t virtr = 0;
char * kbuf; /* k-addr because vwrite() takes vmlist_lock rwlock */ char * kbuf; /* k-addr because vwrite() takes vmlist_lock rwlock */
int err = 0;
if (p < (unsigned long) high_memory) { if (p < (unsigned long) high_memory) {
unsigned long to_write = min_t(unsigned long, count, unsigned long to_write = min_t(unsigned long, count,
@ -540,12 +546,14 @@ static ssize_t write_kmem(struct file * file, const char __user * buf,
unsigned long sz = size_inside_page(p, count); unsigned long sz = size_inside_page(p, count);
unsigned long n; unsigned long n;
if (!is_vmalloc_or_module_addr((void *)p)) {
err = -ENXIO;
break;
}
n = copy_from_user(kbuf, buf, sz); n = copy_from_user(kbuf, buf, sz);
if (n) { if (n) {
if (wrote + virtr) err = -EFAULT;
break; break;
free_page((unsigned long)kbuf);
return -EFAULT;
} }
sz = vwrite(kbuf, (char *)p, sz); sz = vwrite(kbuf, (char *)p, sz);
count -= sz; count -= sz;
@ -556,8 +564,8 @@ static ssize_t write_kmem(struct file * file, const char __user * buf,
free_page((unsigned long)kbuf); free_page((unsigned long)kbuf);
} }
*ppos = p; *ppos = p;
return virtr + wrote; return virtr + wrote ? : err;
} }
#endif #endif