mirror of
https://github.com/torvalds/linux.git
synced 2024-11-10 14:11:52 +00:00
xtensa: fix NOMMU build with lock_mm_and_find_vma() conversion
It turns out that xtensa has a really odd configuration situation: you
can do a no-MMU config, but still have the page fault code enabled.
Which doesn't sound all that sensible, but it turns out that xtensa can
have protection faults even without the MMU, and we have this:
config PFAULT
bool "Handle protection faults" if EXPERT && !MMU
default y
help
Handle protection faults. MMU configurations must enable it.
noMMU configurations may disable it if used memory map never
generates protection faults or faults are always fatal.
If unsure, say Y.
which completely violated my expectations of the page fault handling.
End result: Guenter reports that the xtensa no-MMU builds all fail with
arch/xtensa/mm/fault.c: In function ‘do_page_fault’:
arch/xtensa/mm/fault.c:133:8: error: implicit declaration of function ‘lock_mm_and_find_vma’
because I never exposed the new lock_mm_and_find_vma() function for the
no-MMU case.
Doing so is simple enough, and fixes the problem.
Reported-and-tested-by: Guenter Roeck <linux@roeck-us.net>
Fixes: a050ba1e74
("mm/fault: convert remaining simple cases to lock_mm_and_find_vma()")
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
b25f62ccb4
commit
d85a143b69
@ -2323,6 +2323,9 @@ void pagecache_isize_extended(struct inode *inode, loff_t from, loff_t to);
|
||||
void truncate_pagecache_range(struct inode *inode, loff_t offset, loff_t end);
|
||||
int generic_error_remove_page(struct address_space *mapping, struct page *page);
|
||||
|
||||
struct vm_area_struct *lock_mm_and_find_vma(struct mm_struct *mm,
|
||||
unsigned long address, struct pt_regs *regs);
|
||||
|
||||
#ifdef CONFIG_MMU
|
||||
extern vm_fault_t handle_mm_fault(struct vm_area_struct *vma,
|
||||
unsigned long address, unsigned int flags,
|
||||
@ -2334,8 +2337,6 @@ void unmap_mapping_pages(struct address_space *mapping,
|
||||
pgoff_t start, pgoff_t nr, bool even_cows);
|
||||
void unmap_mapping_range(struct address_space *mapping,
|
||||
loff_t const holebegin, loff_t const holelen, int even_cows);
|
||||
struct vm_area_struct *lock_mm_and_find_vma(struct mm_struct *mm,
|
||||
unsigned long address, struct pt_regs *regs);
|
||||
#else
|
||||
static inline vm_fault_t handle_mm_fault(struct vm_area_struct *vma,
|
||||
unsigned long address, unsigned int flags,
|
||||
|
11
mm/nommu.c
11
mm/nommu.c
@ -630,6 +630,17 @@ struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr)
|
||||
}
|
||||
EXPORT_SYMBOL(find_vma);
|
||||
|
||||
/*
|
||||
* At least xtensa ends up having protection faults even with no
|
||||
* MMU.. No stack expansion, at least.
|
||||
*/
|
||||
struct vm_area_struct *lock_mm_and_find_vma(struct mm_struct *mm,
|
||||
unsigned long addr, struct pt_regs *regs)
|
||||
{
|
||||
mmap_read_lock(mm);
|
||||
return vma_lookup(mm, addr);
|
||||
}
|
||||
|
||||
/*
|
||||
* expand a stack to a given address
|
||||
* - not supported under NOMMU conditions
|
||||
|
Loading…
Reference in New Issue
Block a user