2011-06-07 09:49:55 +00:00
|
|
|
/*
|
|
|
|
* User address space access functions.
|
|
|
|
*
|
|
|
|
* For licencing details see kernel-base/COPYING
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <linux/highmem.h>
|
|
|
|
#include <linux/module.h>
|
|
|
|
|
2012-04-06 21:32:32 +00:00
|
|
|
#include <asm/word-at-a-time.h>
|
2012-04-20 22:41:36 +00:00
|
|
|
#include <linux/sched.h>
|
2012-04-06 21:32:32 +00:00
|
|
|
|
2011-06-07 09:49:55 +00:00
|
|
|
/*
|
2013-10-24 10:52:06 +00:00
|
|
|
* We rely on the nested NMI work to allow atomic faults from the NMI path; the
|
|
|
|
* nested NMI paths are careful to preserve CR2.
|
2011-06-07 09:49:55 +00:00
|
|
|
*/
|
|
|
|
unsigned long
|
|
|
|
copy_from_user_nmi(void *to, const void __user *from, unsigned long n)
|
|
|
|
{
|
2013-10-24 10:52:06 +00:00
|
|
|
unsigned long ret;
|
2011-06-07 09:49:55 +00:00
|
|
|
|
2012-06-11 13:44:26 +00:00
|
|
|
if (__range_not_ok(from, n, TASK_SIZE))
|
2013-10-24 10:52:06 +00:00
|
|
|
return 0;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Even though this function is typically called from NMI/IRQ context
|
|
|
|
* disable pagefaults so that its behaviour is consistent even when
|
|
|
|
* called form other contexts.
|
|
|
|
*/
|
|
|
|
pagefault_disable();
|
|
|
|
ret = __copy_from_user_inatomic(to, from, n);
|
|
|
|
pagefault_enable();
|
|
|
|
|
2013-10-30 20:16:22 +00:00
|
|
|
return ret;
|
2011-06-07 09:49:55 +00:00
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(copy_from_user_nmi);
|