mirror of
https://github.com/torvalds/linux.git
synced 2024-11-11 06:31:49 +00:00
Merge branch 'x86-mm-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull x86 mm updates from Ingo Molnar: "A handful of changes: - two memory encryption related fixes - don't display the kernel's virtual memory layout plaintext on 32-bit kernels either - two simplifications" * 'x86-mm-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: x86/mm: Remove the now redundant N_MEMORY check dma-mapping: Fix dma_pgprot() for unencrypted coherent pages x86: Don't let pgprot_modify() change the page encryption bit x86/mm/kmmio: Use this_cpu_ptr() instead get_cpu_var() for kmmio_ctx x86/mm/init/32: Stop printing the virtual memory layout
This commit is contained in:
commit
d9d7677892
@ -621,12 +621,15 @@ static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
|
||||
return __pmd(val);
|
||||
}
|
||||
|
||||
/* mprotect needs to preserve PAT bits when updating vm_page_prot */
|
||||
/*
|
||||
* mprotect needs to preserve PAT and encryption bits when updating
|
||||
* vm_page_prot
|
||||
*/
|
||||
#define pgprot_modify pgprot_modify
|
||||
static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
|
||||
{
|
||||
pgprotval_t preservebits = pgprot_val(oldprot) & _PAGE_CHG_MASK;
|
||||
pgprotval_t addbits = pgprot_val(newprot);
|
||||
pgprotval_t addbits = pgprot_val(newprot) & ~_PAGE_CHG_MASK;
|
||||
return __pgprot(preservebits | addbits);
|
||||
}
|
||||
|
||||
|
@ -118,7 +118,7 @@
|
||||
*/
|
||||
#define _PAGE_CHG_MASK (PTE_PFN_MASK | _PAGE_PCD | _PAGE_PWT | \
|
||||
_PAGE_SPECIAL | _PAGE_ACCESSED | _PAGE_DIRTY | \
|
||||
_PAGE_SOFT_DIRTY | _PAGE_DEVMAP)
|
||||
_PAGE_SOFT_DIRTY | _PAGE_DEVMAP | _PAGE_ENC)
|
||||
#define _HPAGE_CHG_MASK (_PAGE_CHG_MASK | _PAGE_PSE)
|
||||
|
||||
/*
|
||||
|
@ -788,44 +788,6 @@ void __init mem_init(void)
|
||||
x86_init.hyper.init_after_bootmem();
|
||||
|
||||
mem_init_print_info(NULL);
|
||||
printk(KERN_INFO "virtual kernel memory layout:\n"
|
||||
" fixmap : 0x%08lx - 0x%08lx (%4ld kB)\n"
|
||||
" cpu_entry : 0x%08lx - 0x%08lx (%4ld kB)\n"
|
||||
#ifdef CONFIG_HIGHMEM
|
||||
" pkmap : 0x%08lx - 0x%08lx (%4ld kB)\n"
|
||||
#endif
|
||||
" vmalloc : 0x%08lx - 0x%08lx (%4ld MB)\n"
|
||||
" lowmem : 0x%08lx - 0x%08lx (%4ld MB)\n"
|
||||
" .init : 0x%08lx - 0x%08lx (%4ld kB)\n"
|
||||
" .data : 0x%08lx - 0x%08lx (%4ld kB)\n"
|
||||
" .text : 0x%08lx - 0x%08lx (%4ld kB)\n",
|
||||
FIXADDR_START, FIXADDR_TOP,
|
||||
(FIXADDR_TOP - FIXADDR_START) >> 10,
|
||||
|
||||
CPU_ENTRY_AREA_BASE,
|
||||
CPU_ENTRY_AREA_BASE + CPU_ENTRY_AREA_MAP_SIZE,
|
||||
CPU_ENTRY_AREA_MAP_SIZE >> 10,
|
||||
|
||||
#ifdef CONFIG_HIGHMEM
|
||||
PKMAP_BASE, PKMAP_BASE+LAST_PKMAP*PAGE_SIZE,
|
||||
(LAST_PKMAP*PAGE_SIZE) >> 10,
|
||||
#endif
|
||||
|
||||
VMALLOC_START, VMALLOC_END,
|
||||
(VMALLOC_END - VMALLOC_START) >> 20,
|
||||
|
||||
(unsigned long)__va(0), (unsigned long)high_memory,
|
||||
((unsigned long)high_memory - (unsigned long)__va(0)) >> 20,
|
||||
|
||||
(unsigned long)&__init_begin, (unsigned long)&__init_end,
|
||||
((unsigned long)&__init_end -
|
||||
(unsigned long)&__init_begin) >> 10,
|
||||
|
||||
(unsigned long)&_etext, (unsigned long)&_edata,
|
||||
((unsigned long)&_edata - (unsigned long)&_etext) >> 10,
|
||||
|
||||
(unsigned long)&_text, (unsigned long)&_etext,
|
||||
((unsigned long)&_etext - (unsigned long)&_text) >> 10);
|
||||
|
||||
/*
|
||||
* Check boundaries twice: Some fundamental inconsistencies can
|
||||
|
@ -818,8 +818,7 @@ void __init paging_init(void)
|
||||
* will not set it back.
|
||||
*/
|
||||
node_clear_state(0, N_MEMORY);
|
||||
if (N_MEMORY != N_NORMAL_MEMORY)
|
||||
node_clear_state(0, N_NORMAL_MEMORY);
|
||||
node_clear_state(0, N_NORMAL_MEMORY);
|
||||
|
||||
zone_sizes_init();
|
||||
}
|
||||
|
@ -260,7 +260,7 @@ int kmmio_handler(struct pt_regs *regs, unsigned long addr)
|
||||
goto no_kmmio;
|
||||
}
|
||||
|
||||
ctx = &get_cpu_var(kmmio_ctx);
|
||||
ctx = this_cpu_ptr(&kmmio_ctx);
|
||||
if (ctx->active) {
|
||||
if (page_base == ctx->addr) {
|
||||
/*
|
||||
@ -285,7 +285,7 @@ int kmmio_handler(struct pt_regs *regs, unsigned long addr)
|
||||
pr_emerg("previous hit was at 0x%08lx.\n", ctx->addr);
|
||||
disarm_kmmio_fault_page(faultpage);
|
||||
}
|
||||
goto no_kmmio_ctx;
|
||||
goto no_kmmio;
|
||||
}
|
||||
ctx->active++;
|
||||
|
||||
@ -314,11 +314,8 @@ int kmmio_handler(struct pt_regs *regs, unsigned long addr)
|
||||
* the user should drop to single cpu before tracing.
|
||||
*/
|
||||
|
||||
put_cpu_var(kmmio_ctx);
|
||||
return 1; /* fault handled */
|
||||
|
||||
no_kmmio_ctx:
|
||||
put_cpu_var(kmmio_ctx);
|
||||
no_kmmio:
|
||||
rcu_read_unlock();
|
||||
preempt_enable_no_resched();
|
||||
@ -333,7 +330,7 @@ no_kmmio:
|
||||
static int post_kmmio_handler(unsigned long condition, struct pt_regs *regs)
|
||||
{
|
||||
int ret = 0;
|
||||
struct kmmio_context *ctx = &get_cpu_var(kmmio_ctx);
|
||||
struct kmmio_context *ctx = this_cpu_ptr(&kmmio_ctx);
|
||||
|
||||
if (!ctx->active) {
|
||||
/*
|
||||
@ -371,7 +368,6 @@ static int post_kmmio_handler(unsigned long condition, struct pt_regs *regs)
|
||||
if (!(regs->flags & X86_EFLAGS_TF))
|
||||
ret = 1;
|
||||
out:
|
||||
put_cpu_var(kmmio_ctx);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -154,6 +154,8 @@ EXPORT_SYMBOL(dma_get_sgtable_attrs);
|
||||
*/
|
||||
pgprot_t dma_pgprot(struct device *dev, pgprot_t prot, unsigned long attrs)
|
||||
{
|
||||
if (force_dma_unencrypted(dev))
|
||||
prot = pgprot_decrypted(prot);
|
||||
if (dev_is_dma_coherent(dev) ||
|
||||
(IS_ENABLED(CONFIG_DMA_NONCOHERENT_CACHE_SYNC) &&
|
||||
(attrs & DMA_ATTR_NON_CONSISTENT)))
|
||||
|
Loading…
Reference in New Issue
Block a user