mirror of
https://github.com/torvalds/linux.git
synced 2024-11-10 22:21:40 +00:00
powerpc/mm: properly set PAGE_KERNEL flags in ioremap()
Set PAGE_KERNEL directly in the caller and do not rely on a hack adding PAGE_KERNEL flags when _PAGE_PRESENT is not set. As already done for PPC64, use pgprot_cache() helpers instead of _PAGE_XXX flags in PPC32 ioremap() derived functions. Signed-off-by: Christophe Leroy <christophe.leroy@c-s.fr> Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
This commit is contained in:
parent
aa91796ec4
commit
56f3c1413f
@ -197,6 +197,8 @@ extern int ptep_set_access_flags(struct vm_area_struct *vma, unsigned long addre
|
||||
#if _PAGE_WRITETHRU != 0
|
||||
#define pgprot_cached_wthru(prot) (__pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) | \
|
||||
_PAGE_COHERENT | _PAGE_WRITETHRU))
|
||||
#else
|
||||
#define pgprot_cached_wthru(prot) pgprot_noncached(prot)
|
||||
#endif
|
||||
|
||||
#define pgprot_cached_noncoherent(prot) \
|
||||
|
@ -110,14 +110,14 @@ static void pci_process_ISA_OF_ranges(struct device_node *isa_node,
|
||||
size = 0x10000;
|
||||
|
||||
__ioremap_at(phb_io_base_phys, (void *)ISA_IO_BASE,
|
||||
size, pgprot_val(pgprot_noncached(__pgprot(0))));
|
||||
size, pgprot_val(pgprot_noncached(PAGE_KERNEL)));
|
||||
return;
|
||||
|
||||
inval_range:
|
||||
printk(KERN_ERR "no ISA IO ranges or unexpected isa range, "
|
||||
"mapping 64k\n");
|
||||
__ioremap_at(phb_io_base_phys, (void *)ISA_IO_BASE,
|
||||
0x10000, pgprot_val(pgprot_noncached(__pgprot(0))));
|
||||
0x10000, pgprot_val(pgprot_noncached(PAGE_KERNEL)));
|
||||
}
|
||||
|
||||
|
||||
@ -253,7 +253,7 @@ void __init isa_bridge_init_non_pci(struct device_node *np)
|
||||
*/
|
||||
isa_io_base = ISA_IO_BASE;
|
||||
__ioremap_at(pbase, (void *)ISA_IO_BASE,
|
||||
size, pgprot_val(pgprot_noncached(__pgprot(0))));
|
||||
size, pgprot_val(pgprot_noncached(PAGE_KERNEL)));
|
||||
|
||||
pr_debug("ISA: Non-PCI bridge is %pOF\n", np);
|
||||
}
|
||||
|
@ -159,7 +159,7 @@ static int pcibios_map_phb_io_space(struct pci_controller *hose)
|
||||
|
||||
/* Establish the mapping */
|
||||
if (__ioremap_at(phys_page, area->addr, size_page,
|
||||
pgprot_val(pgprot_noncached(__pgprot(0)))) == NULL)
|
||||
pgprot_val(pgprot_noncached(PAGE_KERNEL))) == NULL)
|
||||
return -ENOMEM;
|
||||
|
||||
/* Fixup hose IO resource */
|
||||
|
@ -76,32 +76,36 @@ pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long address)
|
||||
void __iomem *
|
||||
ioremap(phys_addr_t addr, unsigned long size)
|
||||
{
|
||||
return __ioremap_caller(addr, size, _PAGE_NO_CACHE | _PAGE_GUARDED,
|
||||
__builtin_return_address(0));
|
||||
unsigned long flags = pgprot_val(pgprot_noncached(PAGE_KERNEL));
|
||||
|
||||
return __ioremap_caller(addr, size, flags, __builtin_return_address(0));
|
||||
}
|
||||
EXPORT_SYMBOL(ioremap);
|
||||
|
||||
void __iomem *
|
||||
ioremap_wc(phys_addr_t addr, unsigned long size)
|
||||
{
|
||||
return __ioremap_caller(addr, size, _PAGE_NO_CACHE,
|
||||
__builtin_return_address(0));
|
||||
unsigned long flags = pgprot_val(pgprot_noncached_wc(PAGE_KERNEL));
|
||||
|
||||
return __ioremap_caller(addr, size, flags, __builtin_return_address(0));
|
||||
}
|
||||
EXPORT_SYMBOL(ioremap_wc);
|
||||
|
||||
void __iomem *
|
||||
ioremap_wt(phys_addr_t addr, unsigned long size)
|
||||
{
|
||||
return __ioremap_caller(addr, size, _PAGE_WRITETHRU,
|
||||
__builtin_return_address(0));
|
||||
unsigned long flags = pgprot_val(pgprot_cached_wthru(PAGE_KERNEL));
|
||||
|
||||
return __ioremap_caller(addr, size, flags, __builtin_return_address(0));
|
||||
}
|
||||
EXPORT_SYMBOL(ioremap_wt);
|
||||
|
||||
void __iomem *
|
||||
ioremap_coherent(phys_addr_t addr, unsigned long size)
|
||||
{
|
||||
return __ioremap_caller(addr, size, _PAGE_COHERENT,
|
||||
__builtin_return_address(0));
|
||||
unsigned long flags = pgprot_val(pgprot_cached(PAGE_KERNEL));
|
||||
|
||||
return __ioremap_caller(addr, size, flags, __builtin_return_address(0));
|
||||
}
|
||||
EXPORT_SYMBOL(ioremap_coherent);
|
||||
|
||||
@ -134,14 +138,6 @@ __ioremap_caller(phys_addr_t addr, unsigned long size, unsigned long flags,
|
||||
phys_addr_t p;
|
||||
int err;
|
||||
|
||||
/* Make sure we have the base flags */
|
||||
if ((flags & _PAGE_PRESENT) == 0)
|
||||
flags |= pgprot_val(PAGE_KERNEL);
|
||||
|
||||
/* Non-cacheable page cannot be coherent */
|
||||
if (flags & _PAGE_NO_CACHE)
|
||||
flags &= ~_PAGE_COHERENT;
|
||||
|
||||
/*
|
||||
* Choose an address to map it to.
|
||||
* Once the vmalloc system is running, we use it.
|
||||
|
@ -118,10 +118,6 @@ void __iomem * __ioremap_at(phys_addr_t pa, void *ea, unsigned long size,
|
||||
{
|
||||
unsigned long i;
|
||||
|
||||
/* Make sure we have the base flags */
|
||||
if ((flags & _PAGE_PRESENT) == 0)
|
||||
flags |= pgprot_val(PAGE_KERNEL);
|
||||
|
||||
/* We don't support the 4K PFN hack with ioremap */
|
||||
if (flags & H_PAGE_4K_PFN)
|
||||
return NULL;
|
||||
@ -204,7 +200,7 @@ void __iomem * __ioremap(phys_addr_t addr, unsigned long size,
|
||||
|
||||
void __iomem * ioremap(phys_addr_t addr, unsigned long size)
|
||||
{
|
||||
unsigned long flags = pgprot_val(pgprot_noncached(__pgprot(0)));
|
||||
unsigned long flags = pgprot_val(pgprot_noncached(PAGE_KERNEL));
|
||||
void *caller = __builtin_return_address(0);
|
||||
|
||||
if (ppc_md.ioremap)
|
||||
@ -214,7 +210,7 @@ void __iomem * ioremap(phys_addr_t addr, unsigned long size)
|
||||
|
||||
void __iomem * ioremap_wc(phys_addr_t addr, unsigned long size)
|
||||
{
|
||||
unsigned long flags = pgprot_val(pgprot_noncached_wc(__pgprot(0)));
|
||||
unsigned long flags = pgprot_val(pgprot_noncached_wc(PAGE_KERNEL));
|
||||
void *caller = __builtin_return_address(0);
|
||||
|
||||
if (ppc_md.ioremap)
|
||||
@ -224,7 +220,7 @@ void __iomem * ioremap_wc(phys_addr_t addr, unsigned long size)
|
||||
|
||||
void __iomem *ioremap_coherent(phys_addr_t addr, unsigned long size)
|
||||
{
|
||||
unsigned long flags = pgprot_val(pgprot_cached(__pgprot(0)));
|
||||
unsigned long flags = pgprot_val(pgprot_cached(PAGE_KERNEL));
|
||||
void *caller = __builtin_return_address(0);
|
||||
|
||||
if (ppc_md.ioremap)
|
||||
|
@ -113,7 +113,6 @@ static void __init ocm_init_node(int count, struct device_node *node)
|
||||
int len;
|
||||
|
||||
struct resource rsrc;
|
||||
int ioflags;
|
||||
|
||||
ocm = ocm_get_node(count);
|
||||
|
||||
@ -179,9 +178,8 @@ static void __init ocm_init_node(int count, struct device_node *node)
|
||||
|
||||
/* ioremap the non-cached region */
|
||||
if (ocm->nc.memtotal) {
|
||||
ioflags = _PAGE_NO_CACHE | _PAGE_GUARDED | _PAGE_EXEC;
|
||||
ocm->nc.virt = __ioremap(ocm->nc.phys, ocm->nc.memtotal,
|
||||
ioflags);
|
||||
_PAGE_EXEC | PAGE_KERNEL_NCG);
|
||||
|
||||
if (!ocm->nc.virt) {
|
||||
printk(KERN_ERR
|
||||
@ -195,9 +193,8 @@ static void __init ocm_init_node(int count, struct device_node *node)
|
||||
/* ioremap the cached region */
|
||||
|
||||
if (ocm->c.memtotal) {
|
||||
ioflags = _PAGE_EXEC;
|
||||
ocm->c.virt = __ioremap(ocm->c.phys, ocm->c.memtotal,
|
||||
ioflags);
|
||||
_PAGE_EXEC | PAGE_KERNEL);
|
||||
|
||||
if (!ocm->c.virt) {
|
||||
printk(KERN_ERR
|
||||
|
@ -230,7 +230,7 @@ static int electra_cf_probe(struct platform_device *ofdev)
|
||||
|
||||
if (!cf->mem_base || !cf->io_virt || !cf->gpio_base ||
|
||||
(__ioremap_at(io.start, cf->io_virt, cf->io_size,
|
||||
pgprot_val(pgprot_noncached(__pgprot(0)))) == NULL)) {
|
||||
pgprot_val(pgprot_noncached(PAGE_KERNEL))) == NULL)) {
|
||||
dev_err(device, "can't ioremap ranges\n");
|
||||
status = -ENOMEM;
|
||||
goto fail1;
|
||||
|
Loading…
Reference in New Issue
Block a user