mirror of
https://github.com/torvalds/linux.git
synced 2024-11-17 17:41:44 +00:00
Merge commit 'kumar/kumar-mmu'
This commit is contained in:
commit
c9b59da130
@ -84,7 +84,7 @@ static inline void *kmap_atomic_prot(struct page *page, enum km_type type, pgpro
|
||||
#ifdef CONFIG_DEBUG_HIGHMEM
|
||||
BUG_ON(!pte_none(*(kmap_pte-idx)));
|
||||
#endif
|
||||
set_pte_at(&init_mm, vaddr, kmap_pte-idx, mk_pte(page, prot));
|
||||
__set_pte_at(&init_mm, vaddr, kmap_pte-idx, mk_pte(page, prot));
|
||||
flush_tlb_page(NULL, vaddr);
|
||||
|
||||
return (void*) vaddr;
|
||||
|
@ -711,7 +711,7 @@ static inline void * phys_to_virt(unsigned long address)
|
||||
/*
|
||||
* Change "struct page" to physical address.
|
||||
*/
|
||||
#define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT)
|
||||
#define page_to_phys(page) ((phys_addr_t)page_to_pfn(page) << PAGE_SHIFT)
|
||||
|
||||
/* We do NOT want virtual merging, it would put too much pressure on
|
||||
* our iommu allocator. Instead, we want drivers to be smart enough
|
||||
|
@ -13,10 +13,16 @@
|
||||
#define ARCH_KMALLOC_MINALIGN L1_CACHE_BYTES
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_PTE_64BIT
|
||||
#define PTE_FLAGS_OFFSET 4 /* offset of PTE flags, in bytes */
|
||||
#else
|
||||
#define PTE_FLAGS_OFFSET 0
|
||||
#endif
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
/*
|
||||
* The basic type of a PTE - 64 bits for those CPUs with > 32 bit
|
||||
* physical addressing. For now this just the IBM PPC440.
|
||||
* physical addressing.
|
||||
*/
|
||||
#ifdef CONFIG_PTE_64BIT
|
||||
typedef unsigned long long pte_basic_t;
|
||||
|
@ -261,6 +261,7 @@ extern int icache_44x_need_flush;
|
||||
#define _PAGE_HWEXEC 0x00000004 /* H: Execute permission */
|
||||
#define _PAGE_ACCESSED 0x00000008 /* S: Page referenced */
|
||||
#define _PAGE_DIRTY 0x00000010 /* S: Page dirty */
|
||||
#define _PAGE_SPECIAL 0x00000020 /* S: Special page */
|
||||
#define _PAGE_USER 0x00000040 /* S: User page */
|
||||
#define _PAGE_ENDIAN 0x00000080 /* H: E bit */
|
||||
#define _PAGE_GUARDED 0x00000100 /* H: G bit */
|
||||
@ -276,6 +277,7 @@ extern int icache_44x_need_flush;
|
||||
/* ERPN in a PTE never gets cleared, ignore it */
|
||||
#define _PTE_NONE_MASK 0xffffffff00000000ULL
|
||||
|
||||
#define __HAVE_ARCH_PTE_SPECIAL
|
||||
|
||||
#elif defined(CONFIG_FSL_BOOKE)
|
||||
/*
|
||||
@ -305,6 +307,7 @@ extern int icache_44x_need_flush;
|
||||
#define _PAGE_COHERENT 0x00100 /* H: M bit */
|
||||
#define _PAGE_NO_CACHE 0x00200 /* H: I bit */
|
||||
#define _PAGE_WRITETHRU 0x00400 /* H: W bit */
|
||||
#define _PAGE_SPECIAL 0x00800 /* S: Special page */
|
||||
|
||||
#ifdef CONFIG_PTE_64BIT
|
||||
/* ERPN in a PTE never gets cleared, ignore it */
|
||||
@ -315,6 +318,8 @@ extern int icache_44x_need_flush;
|
||||
#define _PMD_PRESENT_MASK (PAGE_MASK)
|
||||
#define _PMD_BAD (~PAGE_MASK)
|
||||
|
||||
#define __HAVE_ARCH_PTE_SPECIAL
|
||||
|
||||
#elif defined(CONFIG_8xx)
|
||||
/* Definitions for 8xx embedded chips. */
|
||||
#define _PAGE_PRESENT 0x0001 /* Page is valid */
|
||||
@ -362,8 +367,14 @@ extern int icache_44x_need_flush;
|
||||
#define _PAGE_ACCESSED 0x100 /* R: page referenced */
|
||||
#define _PAGE_EXEC 0x200 /* software: i-cache coherency required */
|
||||
#define _PAGE_RW 0x400 /* software: user write access allowed */
|
||||
#define _PAGE_SPECIAL 0x800 /* software: Special page */
|
||||
|
||||
#ifdef CONFIG_PTE_64BIT
|
||||
/* We never clear the high word of the pte */
|
||||
#define _PTE_NONE_MASK (0xffffffff00000000ULL | _PAGE_HASHPTE)
|
||||
#else
|
||||
#define _PTE_NONE_MASK _PAGE_HASHPTE
|
||||
#endif
|
||||
|
||||
#define _PMD_PRESENT 0
|
||||
#define _PMD_PRESENT_MASK (PAGE_MASK)
|
||||
@ -372,6 +383,8 @@ extern int icache_44x_need_flush;
|
||||
/* Hash table based platforms need atomic updates of the linux PTE */
|
||||
#define PTE_ATOMIC_UPDATES 1
|
||||
|
||||
#define __HAVE_ARCH_PTE_SPECIAL
|
||||
|
||||
#endif
|
||||
|
||||
/*
|
||||
@ -404,6 +417,9 @@ extern int icache_44x_need_flush;
|
||||
#ifndef _PAGE_WRITETHRU
|
||||
#define _PAGE_WRITETHRU 0
|
||||
#endif
|
||||
#ifndef _PAGE_SPECIAL
|
||||
#define _PAGE_SPECIAL 0
|
||||
#endif
|
||||
#ifndef _PMD_PRESENT_MASK
|
||||
#define _PMD_PRESENT_MASK _PMD_PRESENT
|
||||
#endif
|
||||
@ -517,7 +533,8 @@ extern unsigned long bad_call_to_PMD_PAGE_SIZE(void);
|
||||
|
||||
#define pte_none(pte) ((pte_val(pte) & ~_PTE_NONE_MASK) == 0)
|
||||
#define pte_present(pte) (pte_val(pte) & _PAGE_PRESENT)
|
||||
#define pte_clear(mm,addr,ptep) do { set_pte_at((mm), (addr), (ptep), __pte(0)); } while (0)
|
||||
#define pte_clear(mm, addr, ptep) \
|
||||
do { pte_update(ptep, ~_PAGE_HASHPTE, 0); } while (0)
|
||||
|
||||
#define pmd_none(pmd) (!pmd_val(pmd))
|
||||
#define pmd_bad(pmd) (pmd_val(pmd) & _PMD_BAD)
|
||||
@ -533,7 +550,7 @@ static inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_RW; }
|
||||
static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; }
|
||||
static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; }
|
||||
static inline int pte_file(pte_t pte) { return pte_val(pte) & _PAGE_FILE; }
|
||||
static inline int pte_special(pte_t pte) { return 0; }
|
||||
static inline int pte_special(pte_t pte) { return pte_val(pte) & _PAGE_SPECIAL; }
|
||||
|
||||
static inline void pte_uncache(pte_t pte) { pte_val(pte) |= _PAGE_NO_CACHE; }
|
||||
static inline void pte_cache(pte_t pte) { pte_val(pte) &= ~_PAGE_NO_CACHE; }
|
||||
@ -552,7 +569,7 @@ static inline pte_t pte_mkdirty(pte_t pte) {
|
||||
static inline pte_t pte_mkyoung(pte_t pte) {
|
||||
pte_val(pte) |= _PAGE_ACCESSED; return pte; }
|
||||
static inline pte_t pte_mkspecial(pte_t pte) {
|
||||
return pte; }
|
||||
pte_val(pte) |= _PAGE_SPECIAL; return pte; }
|
||||
static inline unsigned long pte_pgprot(pte_t pte)
|
||||
{
|
||||
return __pgprot(pte_val(pte)) & PAGE_PROT_BITS;
|
||||
@ -575,6 +592,10 @@ extern int flush_hash_pages(unsigned context, unsigned long va,
|
||||
extern void add_hash_page(unsigned context, unsigned long va,
|
||||
unsigned long pmdval);
|
||||
|
||||
/* Flush an entry from the TLB/hash table */
|
||||
extern void flush_hash_entry(struct mm_struct *mm, pte_t *ptep,
|
||||
unsigned long address);
|
||||
|
||||
/*
|
||||
* Atomic PTE updates.
|
||||
*
|
||||
@ -612,9 +633,6 @@ static inline unsigned long pte_update(pte_t *p,
|
||||
return old;
|
||||
}
|
||||
#else /* CONFIG_PTE_64BIT */
|
||||
/* TODO: Change that to only modify the low word and move set_pte_at()
|
||||
* out of line
|
||||
*/
|
||||
static inline unsigned long long pte_update(pte_t *p,
|
||||
unsigned long clr,
|
||||
unsigned long set)
|
||||
@ -652,14 +670,35 @@ static inline unsigned long long pte_update(pte_t *p,
|
||||
* On machines which use an MMU hash table we avoid changing the
|
||||
* _PAGE_HASHPTE bit.
|
||||
*/
|
||||
|
||||
static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr,
|
||||
pte_t *ptep, pte_t pte)
|
||||
{
|
||||
#if (_PAGE_HASHPTE != 0) && defined(CONFIG_SMP) && !defined(CONFIG_PTE_64BIT)
|
||||
pte_update(ptep, ~_PAGE_HASHPTE, pte_val(pte) & ~_PAGE_HASHPTE);
|
||||
#elif defined(CONFIG_PTE_64BIT) && defined(CONFIG_SMP)
|
||||
#if _PAGE_HASHPTE != 0
|
||||
if (pte_val(*ptep) & _PAGE_HASHPTE)
|
||||
flush_hash_entry(mm, ptep, addr);
|
||||
#endif
|
||||
__asm__ __volatile__("\
|
||||
stw%U0%X0 %2,%0\n\
|
||||
eieio\n\
|
||||
stw%U0%X0 %L2,%1"
|
||||
: "=m" (*ptep), "=m" (*((unsigned char *)ptep+4))
|
||||
: "r" (pte) : "memory");
|
||||
#else
|
||||
*ptep = (*ptep & _PAGE_HASHPTE) | (pte & ~_PAGE_HASHPTE);
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
|
||||
pte_t *ptep, pte_t pte)
|
||||
{
|
||||
#if _PAGE_HASHPTE != 0
|
||||
pte_update(ptep, ~_PAGE_HASHPTE, pte_val(pte) & ~_PAGE_HASHPTE);
|
||||
#else
|
||||
*ptep = pte;
|
||||
#if defined(CONFIG_PTE_64BIT) && defined(CONFIG_SMP)
|
||||
WARN_ON(pte_present(*ptep));
|
||||
#endif
|
||||
__set_pte_at(mm, addr, ptep, pte);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -109,6 +109,7 @@
|
||||
#define SPRN_EVPR 0x3D6 /* Exception Vector Prefix Register */
|
||||
#define SPRN_L1CSR0 0x3F2 /* L1 Cache Control and Status Register 0 */
|
||||
#define SPRN_L1CSR1 0x3F3 /* L1 Cache Control and Status Register 1 */
|
||||
#define SPRN_MMUCSR0 0x3F4 /* MMU Control and Status Register 0 */
|
||||
#define SPRN_PIT 0x3DB /* Programmable Interval Timer */
|
||||
#define SPRN_BUCSR 0x3F5 /* Branch Unit Control and Status */
|
||||
#define SPRN_L2CSR0 0x3F9 /* L2 Data Cache Control and Status Register 0 */
|
||||
@ -410,6 +411,12 @@
|
||||
#define L2CSR0_L2LOA 0x00000080 /* L2 Cache Lock Overflow Allocate */
|
||||
#define L2CSR0_L2LO 0x00000020 /* L2 Cache Lock Overflow */
|
||||
|
||||
/* Bit definitions for MMUCSR0 */
|
||||
#define MMUCSR0_TLB1FI 0x00000002 /* TLB1 Flash invalidate */
|
||||
#define MMUCSR0_TLB0FI 0x00000004 /* TLB0 Flash invalidate */
|
||||
#define MMUCSR0_TLB2FI 0x00000040 /* TLB2 Flash invalidate */
|
||||
#define MMUCSR0_TLB3FI 0x00000020 /* TLB3 Flash invalidate */
|
||||
|
||||
/* Bit definitions for SGR. */
|
||||
#define SGR_NORMAL 0 /* Speculative fetching allowed. */
|
||||
#define SGR_GUARDED 1 /* Speculative fetching disallowed. */
|
||||
|
@ -29,6 +29,9 @@
|
||||
#include <linux/mm.h>
|
||||
|
||||
extern void _tlbie(unsigned long address, unsigned int pid);
|
||||
extern void _tlbil_all(void);
|
||||
extern void _tlbil_pid(unsigned int pid);
|
||||
extern void _tlbil_va(unsigned long address, unsigned int pid);
|
||||
|
||||
#if defined(CONFIG_40x) || defined(CONFIG_8xx)
|
||||
#define _tlbia() asm volatile ("tlbia; sync" : : : "memory")
|
||||
@ -38,31 +41,31 @@ extern void _tlbia(void);
|
||||
|
||||
static inline void flush_tlb_mm(struct mm_struct *mm)
|
||||
{
|
||||
_tlbia();
|
||||
_tlbil_pid(mm->context.id);
|
||||
}
|
||||
|
||||
static inline void flush_tlb_page(struct vm_area_struct *vma,
|
||||
unsigned long vmaddr)
|
||||
{
|
||||
_tlbie(vmaddr, vma ? vma->vm_mm->context.id : 0);
|
||||
_tlbil_va(vmaddr, vma ? vma->vm_mm->context.id : 0);
|
||||
}
|
||||
|
||||
static inline void flush_tlb_page_nohash(struct vm_area_struct *vma,
|
||||
unsigned long vmaddr)
|
||||
{
|
||||
_tlbie(vmaddr, vma ? vma->vm_mm->context.id : 0);
|
||||
flush_tlb_page(vma, vmaddr);
|
||||
}
|
||||
|
||||
static inline void flush_tlb_range(struct vm_area_struct *vma,
|
||||
unsigned long start, unsigned long end)
|
||||
{
|
||||
_tlbia();
|
||||
_tlbil_pid(vma->vm_mm->context.id);
|
||||
}
|
||||
|
||||
static inline void flush_tlb_kernel_range(unsigned long start,
|
||||
unsigned long end)
|
||||
{
|
||||
_tlbia();
|
||||
_tlbil_pid(0);
|
||||
}
|
||||
|
||||
#elif defined(CONFIG_PPC32)
|
||||
|
@ -352,6 +352,7 @@ int main(void)
|
||||
#endif
|
||||
|
||||
DEFINE(PGD_TABLE_SIZE, PGD_TABLE_SIZE);
|
||||
DEFINE(PTE_SIZE, sizeof(pte_t));
|
||||
|
||||
#ifdef CONFIG_KVM
|
||||
DEFINE(TLBE_BYTES, sizeof(struct tlbe));
|
||||
|
@ -369,13 +369,13 @@ i##n: \
|
||||
DataAccess:
|
||||
EXCEPTION_PROLOG
|
||||
mfspr r10,SPRN_DSISR
|
||||
stw r10,_DSISR(r11)
|
||||
andis. r0,r10,0xa470 /* weird error? */
|
||||
bne 1f /* if not, try to put a PTE */
|
||||
mfspr r4,SPRN_DAR /* into the hash table */
|
||||
rlwinm r3,r10,32-15,21,21 /* DSISR_STORE -> _PAGE_RW */
|
||||
bl hash_page
|
||||
1: stw r10,_DSISR(r11)
|
||||
mr r5,r10
|
||||
1: lwz r5,_DSISR(r11) /* get DSISR value */
|
||||
mfspr r4,SPRN_DAR
|
||||
EXC_XFER_EE_LITE(0x300, handle_page_fault)
|
||||
|
||||
|
@ -422,7 +422,6 @@ skpinv: addi r6,r6,1 /* Increment */
|
||||
* r12 is pointer to the pte
|
||||
*/
|
||||
#ifdef CONFIG_PTE_64BIT
|
||||
#define PTE_FLAGS_OFFSET 4
|
||||
#define FIND_PTE \
|
||||
rlwinm r12, r10, 13, 19, 29; /* Compute pgdir/pmd offset */ \
|
||||
lwzx r11, r12, r11; /* Get pgd/pmd entry */ \
|
||||
@ -431,7 +430,6 @@ skpinv: addi r6,r6,1 /* Increment */
|
||||
rlwimi r12, r10, 23, 20, 28; /* Compute pte address */ \
|
||||
lwz r11, 4(r12); /* Get pte entry */
|
||||
#else
|
||||
#define PTE_FLAGS_OFFSET 0
|
||||
#define FIND_PTE \
|
||||
rlwimi r11, r10, 12, 20, 29; /* Create L1 (pgdir/pmd) address */ \
|
||||
lwz r11, 0(r11); /* Get L1 entry */ \
|
||||
|
@ -274,6 +274,10 @@ _GLOBAL(real_writeb)
|
||||
/*
|
||||
* Flush MMU TLB
|
||||
*/
|
||||
#ifndef CONFIG_FSL_BOOKE
|
||||
_GLOBAL(_tlbil_all)
|
||||
_GLOBAL(_tlbil_pid)
|
||||
#endif
|
||||
_GLOBAL(_tlbia)
|
||||
#if defined(CONFIG_40x)
|
||||
sync /* Flush to memory before changing mapping */
|
||||
@ -344,6 +348,9 @@ _GLOBAL(_tlbia)
|
||||
/*
|
||||
* Flush MMU TLB for a particular address
|
||||
*/
|
||||
#ifndef CONFIG_FSL_BOOKE
|
||||
_GLOBAL(_tlbil_va)
|
||||
#endif
|
||||
_GLOBAL(_tlbie)
|
||||
#if defined(CONFIG_40x)
|
||||
/* We run the search with interrupts disabled because we have to change
|
||||
@ -436,6 +443,53 @@ _GLOBAL(_tlbie)
|
||||
#endif /* ! CONFIG_40x */
|
||||
blr
|
||||
|
||||
#if defined(CONFIG_FSL_BOOKE)
|
||||
/*
|
||||
* Flush MMU TLB, but only on the local processor (no broadcast)
|
||||
*/
|
||||
_GLOBAL(_tlbil_all)
|
||||
#define MMUCSR0_TLBFI (MMUCSR0_TLB0FI | MMUCSR0_TLB1FI | \
|
||||
MMUCSR0_TLB2FI | MMUCSR0_TLB3FI)
|
||||
li r3,(MMUCSR0_TLBFI)@l
|
||||
mtspr SPRN_MMUCSR0, r3
|
||||
1:
|
||||
mfspr r3,SPRN_MMUCSR0
|
||||
andi. r3,r3,MMUCSR0_TLBFI@l
|
||||
bne 1b
|
||||
blr
|
||||
|
||||
/*
|
||||
* Flush MMU TLB for a particular process id, but only on the local processor
|
||||
* (no broadcast)
|
||||
*/
|
||||
_GLOBAL(_tlbil_pid)
|
||||
/* we currently do an invalidate all since we don't have per pid invalidate */
|
||||
li r3,(MMUCSR0_TLBFI)@l
|
||||
mtspr SPRN_MMUCSR0, r3
|
||||
1:
|
||||
mfspr r3,SPRN_MMUCSR0
|
||||
andi. r3,r3,MMUCSR0_TLBFI@l
|
||||
bne 1b
|
||||
blr
|
||||
|
||||
/*
|
||||
* Flush MMU TLB for a particular address, but only on the local processor
|
||||
* (no broadcast)
|
||||
*/
|
||||
_GLOBAL(_tlbil_va)
|
||||
slwi r4,r4,16
|
||||
mtspr SPRN_MAS6,r4 /* assume AS=0 for now */
|
||||
tlbsx 0,r3
|
||||
mfspr r4,SPRN_MAS1 /* check valid */
|
||||
andis. r3,r4,MAS1_VALID@h
|
||||
beqlr
|
||||
rlwinm r4,r4,0,1,31
|
||||
mtspr SPRN_MAS1,r4
|
||||
tlbwe
|
||||
blr
|
||||
#endif /* CONFIG_FSL_BOOKE */
|
||||
|
||||
|
||||
/*
|
||||
* Flush instruction cache.
|
||||
* This is a no-op on the 601.
|
||||
|
@ -119,6 +119,9 @@ EXPORT_SYMBOL(flush_instruction_cache);
|
||||
EXPORT_SYMBOL(flush_tlb_kernel_range);
|
||||
EXPORT_SYMBOL(flush_tlb_page);
|
||||
EXPORT_SYMBOL(_tlbie);
|
||||
#if defined(CONFIG_4xx) || defined(CONFIG_8xx) || defined(CONFIG_FSL_BOOKE)
|
||||
EXPORT_SYMBOL(_tlbil_va);
|
||||
#endif
|
||||
#endif
|
||||
EXPORT_SYMBOL(__flush_icache_range);
|
||||
EXPORT_SYMBOL(flush_dcache_range);
|
||||
|
@ -75,7 +75,7 @@ _GLOBAL(hash_page_sync)
|
||||
* Returns to the caller if the access is illegal or there is no
|
||||
* mapping for the address. Otherwise it places an appropriate PTE
|
||||
* in the hash table and returns from the exception.
|
||||
* Uses r0, r3 - r8, ctr, lr.
|
||||
* Uses r0, r3 - r8, r10, ctr, lr.
|
||||
*/
|
||||
.text
|
||||
_GLOBAL(hash_page)
|
||||
@ -106,9 +106,15 @@ _GLOBAL(hash_page)
|
||||
addi r5,r5,swapper_pg_dir@l /* kernel page table */
|
||||
rlwimi r3,r9,32-12,29,29 /* MSR_PR -> _PAGE_USER */
|
||||
112: add r5,r5,r7 /* convert to phys addr */
|
||||
#ifndef CONFIG_PTE_64BIT
|
||||
rlwimi r5,r4,12,20,29 /* insert top 10 bits of address */
|
||||
lwz r8,0(r5) /* get pmd entry */
|
||||
rlwinm. r8,r8,0,0,19 /* extract address of pte page */
|
||||
#else
|
||||
rlwinm r8,r4,13,19,29 /* Compute pgdir/pmd offset */
|
||||
lwzx r8,r8,r5 /* Get L1 entry */
|
||||
rlwinm. r8,r8,0,0,20 /* extract pt base address */
|
||||
#endif
|
||||
#ifdef CONFIG_SMP
|
||||
beq- hash_page_out /* return if no mapping */
|
||||
#else
|
||||
@ -118,7 +124,11 @@ _GLOBAL(hash_page)
|
||||
to the address following the rfi. */
|
||||
beqlr-
|
||||
#endif
|
||||
#ifndef CONFIG_PTE_64BIT
|
||||
rlwimi r8,r4,22,20,29 /* insert next 10 bits of address */
|
||||
#else
|
||||
rlwimi r8,r4,23,20,28 /* compute pte address */
|
||||
#endif
|
||||
rlwinm r0,r3,32-3,24,24 /* _PAGE_RW access -> _PAGE_DIRTY */
|
||||
ori r0,r0,_PAGE_ACCESSED|_PAGE_HASHPTE
|
||||
|
||||
@ -127,9 +137,15 @@ _GLOBAL(hash_page)
|
||||
* because almost always, there won't be a permission violation
|
||||
* and there won't already be an HPTE, and thus we will have
|
||||
* to update the PTE to set _PAGE_HASHPTE. -- paulus.
|
||||
*
|
||||
* If PTE_64BIT is set, the low word is the flags word; use that
|
||||
* word for locking since it contains all the interesting bits.
|
||||
*/
|
||||
#if (PTE_FLAGS_OFFSET != 0)
|
||||
addi r8,r8,PTE_FLAGS_OFFSET
|
||||
#endif
|
||||
retry:
|
||||
lwarx r6,0,r8 /* get linux-style pte */
|
||||
lwarx r6,0,r8 /* get linux-style pte, flag word */
|
||||
andc. r5,r3,r6 /* check access & ~permission */
|
||||
#ifdef CONFIG_SMP
|
||||
bne- hash_page_out /* return if access not permitted */
|
||||
@ -137,6 +153,15 @@ retry:
|
||||
bnelr-
|
||||
#endif
|
||||
or r5,r0,r6 /* set accessed/dirty bits */
|
||||
#ifdef CONFIG_PTE_64BIT
|
||||
#ifdef CONFIG_SMP
|
||||
subf r10,r6,r8 /* create false data dependency */
|
||||
subi r10,r10,PTE_FLAGS_OFFSET
|
||||
lwzx r10,r6,r10 /* Get upper PTE word */
|
||||
#else
|
||||
lwz r10,-PTE_FLAGS_OFFSET(r8)
|
||||
#endif /* CONFIG_SMP */
|
||||
#endif /* CONFIG_PTE_64BIT */
|
||||
stwcx. r5,0,r8 /* attempt to update PTE */
|
||||
bne- retry /* retry if someone got there first */
|
||||
|
||||
@ -203,9 +228,9 @@ _GLOBAL(add_hash_page)
|
||||
* we can't take a hash table miss (assuming the code is
|
||||
* covered by a BAT). -- paulus
|
||||
*/
|
||||
mfmsr r10
|
||||
mfmsr r9
|
||||
SYNC
|
||||
rlwinm r0,r10,0,17,15 /* clear bit 16 (MSR_EE) */
|
||||
rlwinm r0,r9,0,17,15 /* clear bit 16 (MSR_EE) */
|
||||
rlwinm r0,r0,0,28,26 /* clear MSR_DR */
|
||||
mtmsr r0
|
||||
SYNC_601
|
||||
@ -214,14 +239,14 @@ _GLOBAL(add_hash_page)
|
||||
tophys(r7,0)
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
addis r9,r7,mmu_hash_lock@ha
|
||||
addi r9,r9,mmu_hash_lock@l
|
||||
10: lwarx r0,0,r9 /* take the mmu_hash_lock */
|
||||
addis r6,r7,mmu_hash_lock@ha
|
||||
addi r6,r6,mmu_hash_lock@l
|
||||
10: lwarx r0,0,r6 /* take the mmu_hash_lock */
|
||||
cmpi 0,r0,0
|
||||
bne- 11f
|
||||
stwcx. r8,0,r9
|
||||
stwcx. r8,0,r6
|
||||
beq+ 12f
|
||||
11: lwz r0,0(r9)
|
||||
11: lwz r0,0(r6)
|
||||
cmpi 0,r0,0
|
||||
beq 10b
|
||||
b 11b
|
||||
@ -234,10 +259,24 @@ _GLOBAL(add_hash_page)
|
||||
* HPTE, so we just unlock and return.
|
||||
*/
|
||||
mr r8,r5
|
||||
#ifndef CONFIG_PTE_64BIT
|
||||
rlwimi r8,r4,22,20,29
|
||||
#else
|
||||
rlwimi r8,r4,23,20,28
|
||||
addi r8,r8,PTE_FLAGS_OFFSET
|
||||
#endif
|
||||
1: lwarx r6,0,r8
|
||||
andi. r0,r6,_PAGE_HASHPTE
|
||||
bne 9f /* if HASHPTE already set, done */
|
||||
#ifdef CONFIG_PTE_64BIT
|
||||
#ifdef CONFIG_SMP
|
||||
subf r10,r6,r8 /* create false data dependency */
|
||||
subi r10,r10,PTE_FLAGS_OFFSET
|
||||
lwzx r10,r6,r10 /* Get upper PTE word */
|
||||
#else
|
||||
lwz r10,-PTE_FLAGS_OFFSET(r8)
|
||||
#endif /* CONFIG_SMP */
|
||||
#endif /* CONFIG_PTE_64BIT */
|
||||
ori r5,r6,_PAGE_HASHPTE
|
||||
stwcx. r5,0,r8
|
||||
bne- 1b
|
||||
@ -246,13 +285,15 @@ _GLOBAL(add_hash_page)
|
||||
|
||||
9:
|
||||
#ifdef CONFIG_SMP
|
||||
addis r6,r7,mmu_hash_lock@ha
|
||||
addi r6,r6,mmu_hash_lock@l
|
||||
eieio
|
||||
li r0,0
|
||||
stw r0,0(r9) /* clear mmu_hash_lock */
|
||||
stw r0,0(r6) /* clear mmu_hash_lock */
|
||||
#endif
|
||||
|
||||
/* reenable interrupts and DR */
|
||||
mtmsr r10
|
||||
mtmsr r9
|
||||
SYNC_601
|
||||
isync
|
||||
|
||||
@ -267,7 +308,8 @@ _GLOBAL(add_hash_page)
|
||||
* r5 contains the linux PTE, r6 contains the old value of the
|
||||
* linux PTE (before setting _PAGE_HASHPTE) and r7 contains the
|
||||
* offset to be added to addresses (0 if the MMU is on,
|
||||
* -KERNELBASE if it is off).
|
||||
* -KERNELBASE if it is off). r10 contains the upper half of
|
||||
* the PTE if CONFIG_PTE_64BIT.
|
||||
* On SMP, the caller should have the mmu_hash_lock held.
|
||||
* We assume that the caller has (or will) set the _PAGE_HASHPTE
|
||||
* bit in the linux PTE in memory. The value passed in r6 should
|
||||
@ -313,6 +355,11 @@ _GLOBAL(create_hpte)
|
||||
BEGIN_FTR_SECTION
|
||||
ori r8,r8,_PAGE_COHERENT /* set M (coherence required) */
|
||||
END_FTR_SECTION_IFSET(CPU_FTR_NEED_COHERENT)
|
||||
#ifdef CONFIG_PTE_64BIT
|
||||
/* Put the XPN bits into the PTE */
|
||||
rlwimi r8,r10,8,20,22
|
||||
rlwimi r8,r10,2,29,29
|
||||
#endif
|
||||
|
||||
/* Construct the high word of the PPC-style PTE (r5) */
|
||||
rlwinm r5,r3,7,1,24 /* put VSID in 0x7fffff80 bits */
|
||||
@ -499,14 +546,18 @@ _GLOBAL(flush_hash_pages)
|
||||
isync
|
||||
|
||||
/* First find a PTE in the range that has _PAGE_HASHPTE set */
|
||||
#ifndef CONFIG_PTE_64BIT
|
||||
rlwimi r5,r4,22,20,29
|
||||
1: lwz r0,0(r5)
|
||||
#else
|
||||
rlwimi r5,r4,23,20,28
|
||||
#endif
|
||||
1: lwz r0,PTE_FLAGS_OFFSET(r5)
|
||||
cmpwi cr1,r6,1
|
||||
andi. r0,r0,_PAGE_HASHPTE
|
||||
bne 2f
|
||||
ble cr1,19f
|
||||
addi r4,r4,0x1000
|
||||
addi r5,r5,4
|
||||
addi r5,r5,PTE_SIZE
|
||||
addi r6,r6,-1
|
||||
b 1b
|
||||
|
||||
@ -545,7 +596,10 @@ _GLOBAL(flush_hash_pages)
|
||||
* already clear, we're done (for this pte). If not,
|
||||
* clear it (atomically) and proceed. -- paulus.
|
||||
*/
|
||||
33: lwarx r8,0,r5 /* fetch the pte */
|
||||
#if (PTE_FLAGS_OFFSET != 0)
|
||||
addi r5,r5,PTE_FLAGS_OFFSET
|
||||
#endif
|
||||
33: lwarx r8,0,r5 /* fetch the pte flags word */
|
||||
andi. r0,r8,_PAGE_HASHPTE
|
||||
beq 8f /* done if HASHPTE is already clear */
|
||||
rlwinm r8,r8,0,31,29 /* clear HASHPTE bit */
|
||||
@ -590,7 +644,7 @@ _GLOBAL(flush_hash_patch_B)
|
||||
|
||||
8: ble cr1,9f /* if all ptes checked */
|
||||
81: addi r6,r6,-1
|
||||
addi r5,r5,4 /* advance to next pte */
|
||||
addi r5,r5,PTE_SIZE
|
||||
addi r4,r4,0x1000
|
||||
lwz r0,0(r5) /* check next pte */
|
||||
cmpwi cr1,r6,1
|
||||
|
@ -73,7 +73,7 @@ extern unsigned long p_mapped_by_tlbcam(unsigned long pa);
|
||||
#endif /* HAVE_TLBCAM */
|
||||
|
||||
#ifdef CONFIG_PTE_64BIT
|
||||
/* 44x uses an 8kB pgdir because it has 8-byte Linux PTEs. */
|
||||
/* Some processors use an 8kB pgdir because they have 8-byte Linux PTEs. */
|
||||
#define PGDIR_ORDER 1
|
||||
#else
|
||||
#define PGDIR_ORDER 0
|
||||
@ -288,7 +288,7 @@ int map_page(unsigned long va, phys_addr_t pa, int flags)
|
||||
}
|
||||
|
||||
/*
|
||||
* Map in all of physical memory starting at KERNELBASE.
|
||||
* Map in a big chunk of physical memory starting at KERNELBASE.
|
||||
*/
|
||||
void __init mapin_ram(void)
|
||||
{
|
||||
|
@ -45,6 +45,7 @@ void flush_hash_entry(struct mm_struct *mm, pte_t *ptep, unsigned long addr)
|
||||
flush_hash_pages(mm->context.id, addr, ptephys, 1);
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL(flush_hash_entry);
|
||||
|
||||
/*
|
||||
* Called by ptep_set_access_flags, must flush on CPUs for which the
|
||||
|
@ -50,6 +50,7 @@ config 44x
|
||||
select PPC_UDBG_16550
|
||||
select 4xx_SOC
|
||||
select PPC_PCI_CHOICE
|
||||
select PHYS_64BIT
|
||||
|
||||
config E200
|
||||
bool "Freescale e200"
|
||||
@ -128,18 +129,20 @@ config FSL_EMB_PERFMON
|
||||
|
||||
config PTE_64BIT
|
||||
bool
|
||||
depends on 44x || E500
|
||||
default y if 44x
|
||||
default y if E500 && PHYS_64BIT
|
||||
depends on 44x || E500 || PPC_86xx
|
||||
default y if PHYS_64BIT
|
||||
|
||||
config PHYS_64BIT
|
||||
bool 'Large physical address support' if E500
|
||||
depends on 44x || E500
|
||||
bool 'Large physical address support' if E500 || PPC_86xx
|
||||
depends on (44x || E500 || PPC_86xx) && !PPC_83xx && !PPC_82xx
|
||||
select RESOURCES_64BIT
|
||||
default y if 44x
|
||||
---help---
|
||||
This option enables kernel support for larger than 32-bit physical
|
||||
addresses. This features is not be available on all e500 cores.
|
||||
addresses. This feature may not be available on all cores.
|
||||
|
||||
If you have more than 3.5GB of RAM or so, you also need to enable
|
||||
SWIOTLB under Kernel Options for this to work. The actual number
|
||||
is platform-dependent.
|
||||
|
||||
If in doubt, say N here.
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user