mirror of
https://github.com/torvalds/linux.git
synced 2024-11-17 01:22:07 +00:00
x86/asm: Get rid of __read_cr4_safe()
We use __read_cr4() vs __read_cr4_safe() inconsistently. On CR4-less CPUs, all CR4 bits are effectively clear, so we can make the code simpler and more robust by making __read_cr4() always fix up faults on 32-bit kernels. This may fix some bugs on old 486-like CPUs, but I don't have any easy way to test that. Signed-off-by: Andy Lutomirski <luto@kernel.org> Cc: Brian Gerst <brgerst@gmail.com> Cc: Borislav Petkov <bp@alien8.de> Cc: david@saggiorato.net Link: http://lkml.kernel.org/r/ea647033d357d9ce2ad2bbde5a631045f5052fb6.1475178370.git.luto@kernel.org Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
This commit is contained in:
parent
d7e25c66c9
commit
1ef55be16e
@ -80,10 +80,6 @@ static inline unsigned long __read_cr4(void)
|
||||
{
|
||||
return PVOP_CALL0(unsigned long, pv_cpu_ops.read_cr4);
|
||||
}
|
||||
static inline unsigned long __read_cr4_safe(void)
|
||||
{
|
||||
return PVOP_CALL0(unsigned long, pv_cpu_ops.read_cr4_safe);
|
||||
}
|
||||
|
||||
static inline void __write_cr4(unsigned long x)
|
||||
{
|
||||
|
@ -108,7 +108,6 @@ struct pv_cpu_ops {
|
||||
unsigned long (*read_cr0)(void);
|
||||
void (*write_cr0)(unsigned long);
|
||||
|
||||
unsigned long (*read_cr4_safe)(void);
|
||||
unsigned long (*read_cr4)(void);
|
||||
void (*write_cr4)(unsigned long);
|
||||
|
||||
|
@ -59,22 +59,19 @@ static inline void native_write_cr3(unsigned long val)
|
||||
static inline unsigned long native_read_cr4(void)
|
||||
{
|
||||
unsigned long val;
|
||||
asm volatile("mov %%cr4,%0\n\t" : "=r" (val), "=m" (__force_order));
|
||||
return val;
|
||||
}
|
||||
|
||||
static inline unsigned long native_read_cr4_safe(void)
|
||||
{
|
||||
unsigned long val;
|
||||
/* This could fault if %cr4 does not exist. In x86_64, a cr4 always
|
||||
* exists, so it will never fail. */
|
||||
#ifdef CONFIG_X86_32
|
||||
/*
|
||||
* This could fault if CR4 does not exist. Non-existent CR4
|
||||
* is functionally equivalent to CR4 == 0. Keep it simple and pretend
|
||||
* that CR4 == 0 on CPUs that don't have CR4.
|
||||
*/
|
||||
asm volatile("1: mov %%cr4, %0\n"
|
||||
"2:\n"
|
||||
_ASM_EXTABLE(1b, 2b)
|
||||
: "=r" (val), "=m" (__force_order) : "0" (0));
|
||||
#else
|
||||
val = native_read_cr4();
|
||||
/* CR4 always exists on x86_64. */
|
||||
asm volatile("mov %%cr4,%0\n\t" : "=r" (val), "=m" (__force_order));
|
||||
#endif
|
||||
return val;
|
||||
}
|
||||
@ -182,11 +179,6 @@ static inline unsigned long __read_cr4(void)
|
||||
return native_read_cr4();
|
||||
}
|
||||
|
||||
static inline unsigned long __read_cr4_safe(void)
|
||||
{
|
||||
return native_read_cr4_safe();
|
||||
}
|
||||
|
||||
static inline void __write_cr4(unsigned long x)
|
||||
{
|
||||
native_write_cr4(x);
|
||||
|
@ -81,7 +81,7 @@ DECLARE_PER_CPU_SHARED_ALIGNED(struct tlb_state, cpu_tlbstate);
|
||||
/* Initialize cr4 shadow for this CPU. */
|
||||
static inline void cr4_init_shadow(void)
|
||||
{
|
||||
this_cpu_write(cpu_tlbstate.cr4, __read_cr4_safe());
|
||||
this_cpu_write(cpu_tlbstate.cr4, __read_cr4());
|
||||
}
|
||||
|
||||
/* Set in this cpu's CR4. */
|
||||
|
@ -332,7 +332,6 @@ __visible struct pv_cpu_ops pv_cpu_ops = {
|
||||
.read_cr0 = native_read_cr0,
|
||||
.write_cr0 = native_write_cr0,
|
||||
.read_cr4 = native_read_cr4,
|
||||
.read_cr4_safe = native_read_cr4_safe,
|
||||
.write_cr4 = native_write_cr4,
|
||||
#ifdef CONFIG_X86_64
|
||||
.read_cr8 = native_read_cr8,
|
||||
|
@ -90,7 +90,7 @@ void __show_regs(struct pt_regs *regs, int all)
|
||||
cr0 = read_cr0();
|
||||
cr2 = read_cr2();
|
||||
cr3 = read_cr3();
|
||||
cr4 = __read_cr4_safe();
|
||||
cr4 = __read_cr4();
|
||||
printk(KERN_DEFAULT "CR0: %08lx CR2: %08lx CR3: %08lx CR4: %08lx\n",
|
||||
cr0, cr2, cr3, cr4);
|
||||
|
||||
|
@ -1137,7 +1137,7 @@ void __init setup_arch(char **cmdline_p)
|
||||
* auditing all the early-boot CR4 manipulation would be needed to
|
||||
* rule it out.
|
||||
*/
|
||||
mmu_cr4_features = __read_cr4_safe();
|
||||
mmu_cr4_features = __read_cr4();
|
||||
|
||||
memblock_set_current_limit(get_max_mapped());
|
||||
|
||||
|
@ -130,7 +130,7 @@ static void __save_processor_state(struct saved_context *ctxt)
|
||||
ctxt->cr0 = read_cr0();
|
||||
ctxt->cr2 = read_cr2();
|
||||
ctxt->cr3 = read_cr3();
|
||||
ctxt->cr4 = __read_cr4_safe();
|
||||
ctxt->cr4 = __read_cr4();
|
||||
#ifdef CONFIG_X86_64
|
||||
ctxt->cr8 = read_cr8();
|
||||
#endif
|
||||
|
@ -1237,7 +1237,6 @@ static const struct pv_cpu_ops xen_cpu_ops __initconst = {
|
||||
.write_cr0 = xen_write_cr0,
|
||||
|
||||
.read_cr4 = native_read_cr4,
|
||||
.read_cr4_safe = native_read_cr4_safe,
|
||||
.write_cr4 = xen_write_cr4,
|
||||
|
||||
#ifdef CONFIG_X86_64
|
||||
|
Loading…
Reference in New Issue
Block a user