forked from Minki/linux
[PATCH] x86: GDT alignment fix
Make GDT page aligned and page padded to support running inside of a hypervisor. This prevents false sharing of the GDT page with other hot data, which is not allowed in Xen, and causes performance problems in VMware. Rather than go back to the old method of statically allocating the GDT (which wastes unneded space for non-present CPUs), the GDT for APs is allocated dynamically. Signed-off-by: Zachary Amsden <zach@vmware.com> Cc: "Seth, Rohit" <rohit.seth@intel.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
This commit is contained in:
parent
599a6e8ca4
commit
7c4cb60e5b
@ -2317,6 +2317,8 @@ static int __init apm_init(void)
|
||||
|
||||
for (i = 0; i < NR_CPUS; i++) {
|
||||
struct desc_struct *gdt = get_cpu_gdt_table(i);
|
||||
if (!gdt)
|
||||
continue;
|
||||
set_base(gdt[APM_CS >> 3],
|
||||
__va((unsigned long)apm_info.bios.cseg << 4));
|
||||
set_base(gdt[APM_CS_16 >> 3],
|
||||
|
@ -18,9 +18,6 @@
|
||||
|
||||
#include "cpu.h"
|
||||
|
||||
DEFINE_PER_CPU(struct desc_struct, cpu_gdt_table[GDT_ENTRIES]);
|
||||
EXPORT_PER_CPU_SYMBOL(cpu_gdt_table);
|
||||
|
||||
DEFINE_PER_CPU(unsigned char, cpu_16bit_stack[CPU_16BIT_STACK_SIZE]);
|
||||
EXPORT_PER_CPU_SYMBOL(cpu_16bit_stack);
|
||||
|
||||
|
@ -525,3 +525,5 @@ ENTRY(cpu_gdt_table)
|
||||
.quad 0x0000000000000000 /* 0xf0 - unused */
|
||||
.quad 0x0000000000000000 /* 0xf8 - GDT entry 31: double-fault TSS */
|
||||
|
||||
/* Be sure this is zeroed to avoid false validations in Xen */
|
||||
.fill PAGE_SIZE_asm / 8 - GDT_ENTRIES,8,0
|
||||
|
@ -3,8 +3,7 @@
|
||||
#include <asm/checksum.h>
|
||||
#include <asm/desc.h>
|
||||
|
||||
/* This is definitely a GPL-only symbol */
|
||||
EXPORT_SYMBOL_GPL(cpu_gdt_table);
|
||||
EXPORT_SYMBOL_GPL(cpu_gdt_descr);
|
||||
|
||||
EXPORT_SYMBOL(__down_failed);
|
||||
EXPORT_SYMBOL(__down_failed_interruptible);
|
||||
|
@ -903,6 +903,12 @@ static int __devinit do_boot_cpu(int apicid, int cpu)
|
||||
unsigned long start_eip;
|
||||
unsigned short nmi_high = 0, nmi_low = 0;
|
||||
|
||||
if (!cpu_gdt_descr[cpu].address &&
|
||||
!(cpu_gdt_descr[cpu].address = get_zeroed_page(GFP_KERNEL))) {
|
||||
printk("Failed to allocate GDT for CPU %d\n", cpu);
|
||||
return 1;
|
||||
}
|
||||
|
||||
++cpucount;
|
||||
|
||||
/*
|
||||
|
@ -69,14 +69,16 @@ __asm__(
|
||||
|
||||
#define Q_SET_SEL(cpu, selname, address, size) \
|
||||
do { \
|
||||
set_base(per_cpu(cpu_gdt_table,cpu)[(selname) >> 3], __va((u32)(address))); \
|
||||
set_limit(per_cpu(cpu_gdt_table,cpu)[(selname) >> 3], size); \
|
||||
struct desc_struct *gdt = get_cpu_gdt_table((cpu)); \
|
||||
set_base(gdt[(selname) >> 3], __va((u32)(address))); \
|
||||
set_limit(gdt[(selname) >> 3], size); \
|
||||
} while(0)
|
||||
|
||||
#define Q2_SET_SEL(cpu, selname, address, size) \
|
||||
do { \
|
||||
set_base(per_cpu(cpu_gdt_table,cpu)[(selname) >> 3], (u32)(address)); \
|
||||
set_limit(per_cpu(cpu_gdt_table,cpu)[(selname) >> 3], size); \
|
||||
struct desc_struct *gdt = get_cpu_gdt_table((cpu)); \
|
||||
set_base(gdt[(selname) >> 3], (u32)(address)); \
|
||||
set_limit(gdt[(selname) >> 3], size); \
|
||||
} while(0)
|
||||
|
||||
static struct desc_struct bad_bios_desc = { 0, 0x00409200 };
|
||||
@ -115,8 +117,8 @@ static inline u16 call_pnp_bios(u16 func, u16 arg1, u16 arg2, u16 arg3,
|
||||
return PNP_FUNCTION_NOT_SUPPORTED;
|
||||
|
||||
cpu = get_cpu();
|
||||
save_desc_40 = per_cpu(cpu_gdt_table,cpu)[0x40 / 8];
|
||||
per_cpu(cpu_gdt_table,cpu)[0x40 / 8] = bad_bios_desc;
|
||||
save_desc_40 = get_cpu_gdt_table(cpu)[0x40 / 8];
|
||||
get_cpu_gdt_table(cpu)[0x40 / 8] = bad_bios_desc;
|
||||
|
||||
/* On some boxes IRQ's during PnP BIOS calls are deadly. */
|
||||
spin_lock_irqsave(&pnp_bios_lock, flags);
|
||||
@ -158,7 +160,7 @@ static inline u16 call_pnp_bios(u16 func, u16 arg1, u16 arg2, u16 arg3,
|
||||
);
|
||||
spin_unlock_irqrestore(&pnp_bios_lock, flags);
|
||||
|
||||
per_cpu(cpu_gdt_table,cpu)[0x40 / 8] = save_desc_40;
|
||||
get_cpu_gdt_table(cpu)[0x40 / 8] = save_desc_40;
|
||||
put_cpu();
|
||||
|
||||
/* If we get here and this is set then the PnP BIOS faulted on us. */
|
||||
@ -535,8 +537,10 @@ void pnpbios_calls_init(union pnp_bios_install_struct *header)
|
||||
|
||||
set_base(bad_bios_desc, __va((unsigned long)0x40 << 4));
|
||||
_set_limit((char *)&bad_bios_desc, 4095 - (0x40 << 4));
|
||||
for(i=0; i < NR_CPUS; i++)
|
||||
{
|
||||
for (i = 0; i < NR_CPUS; i++) {
|
||||
struct desc_struct *gdt = get_cpu_gdt_table(i);
|
||||
if (!gdt)
|
||||
continue;
|
||||
Q2_SET_SEL(i, PNP_CS32, &pnp_bios_callfunc, 64 * 1024);
|
||||
Q_SET_SEL(i, PNP_CS16, header->fields.pm16cseg, 64 * 1024);
|
||||
Q_SET_SEL(i, PNP_DS, header->fields.pm16dseg, 64 * 1024);
|
||||
|
@ -15,9 +15,6 @@
|
||||
#include <asm/mmu.h>
|
||||
|
||||
extern struct desc_struct cpu_gdt_table[GDT_ENTRIES];
|
||||
DECLARE_PER_CPU(struct desc_struct, cpu_gdt_table[GDT_ENTRIES]);
|
||||
|
||||
#define get_cpu_gdt_table(_cpu) (per_cpu(cpu_gdt_table,_cpu))
|
||||
|
||||
DECLARE_PER_CPU(unsigned char, cpu_16bit_stack[CPU_16BIT_STACK_SIZE]);
|
||||
|
||||
@ -29,6 +26,11 @@ struct Xgt_desc_struct {
|
||||
|
||||
extern struct Xgt_desc_struct idt_descr, cpu_gdt_descr[NR_CPUS];
|
||||
|
||||
static inline struct desc_struct *get_cpu_gdt_table(unsigned int cpu)
|
||||
{
|
||||
return ((struct desc_struct *)cpu_gdt_descr[cpu].address);
|
||||
}
|
||||
|
||||
#define load_TR_desc() __asm__ __volatile__("ltr %w0"::"q" (GDT_ENTRY_TSS*8))
|
||||
#define load_LDT_desc() __asm__ __volatile__("lldt %w0"::"q" (GDT_ENTRY_LDT*8))
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user