Merge tag 'x86_cleanups_for_v5.13' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull misc x86 cleanups from Borislav Petkov: "Trivial cleanups and fixes all over the place" * tag 'x86_cleanups_for_v5.13' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: MAINTAINERS: Remove me from IDE/ATAPI section x86/pat: Do not compile stubbed functions when X86_PAT is off x86/asm: Ensure asm/proto.h can be included stand-alone x86/platform/intel/quark: Fix incorrect kernel-doc comment syntax in files x86/msr: Make locally used functions static x86/cacheinfo: Remove unneeded dead-store initialization x86/process/64: Move cpu_current_top_of_stack out of TSS tools/turbostat: Unmark non-kernel-doc comment x86/syscalls: Fix -Wmissing-prototypes warnings from COND_SYSCALL() x86/fpu/math-emu: Fix function cast warning x86/msr: Fix wr/rdmsr_safe_regs_on_cpu() prototypes x86: Fix various typos in comments, take #2 x86: Remove unusual Unicode characters from comments x86/kaslr: Return boolean values from a function returning bool x86: Fix various typos in comments x86/setup: Remove unused RESERVE_BRK_ARRAY() stacktrace: Move documentation for arch_stack_walk_reliable() to header x86: Remove duplicate TSC DEADLINE MSR definitions
This commit is contained in:
@@ -8612,9 +8612,8 @@ F: drivers/ide/
|
|||||||
F: include/linux/ide.h
|
F: include/linux/ide.h
|
||||||
|
|
||||||
IDE/ATAPI DRIVERS
|
IDE/ATAPI DRIVERS
|
||||||
M: Borislav Petkov <bp@alien8.de>
|
|
||||||
L: linux-ide@vger.kernel.org
|
L: linux-ide@vger.kernel.org
|
||||||
S: Maintained
|
S: Orphan
|
||||||
F: Documentation/cdrom/ide-cd.rst
|
F: Documentation/cdrom/ide-cd.rst
|
||||||
F: drivers/ide/ide-cd*
|
F: drivers/ide/ide-cd*
|
||||||
|
|
||||||
|
|||||||
@@ -24,12 +24,6 @@ void arch_stack_walk(stack_trace_consume_fn consume_entry, void *cookie,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
|
||||||
* This function returns an error if it detects any unreliable features of the
|
|
||||||
* stack. Otherwise it guarantees that the stack trace is reliable.
|
|
||||||
*
|
|
||||||
* If the task is not 'current', the caller *must* ensure the task is inactive.
|
|
||||||
*/
|
|
||||||
int arch_stack_walk_reliable(stack_trace_consume_fn consume_entry,
|
int arch_stack_walk_reliable(stack_trace_consume_fn consume_entry,
|
||||||
void *cookie, struct task_struct *task)
|
void *cookie, struct task_struct *task)
|
||||||
{
|
{
|
||||||
|
|||||||
@@ -5,7 +5,7 @@
|
|||||||
* Early support for invoking 32-bit EFI services from a 64-bit kernel.
|
* Early support for invoking 32-bit EFI services from a 64-bit kernel.
|
||||||
*
|
*
|
||||||
* Because this thunking occurs before ExitBootServices() we have to
|
* Because this thunking occurs before ExitBootServices() we have to
|
||||||
* restore the firmware's 32-bit GDT before we make EFI serivce calls,
|
* restore the firmware's 32-bit GDT before we make EFI service calls,
|
||||||
* since the firmware's 32-bit IDT is still currently installed and it
|
* since the firmware's 32-bit IDT is still currently installed and it
|
||||||
* needs to be able to service interrupts.
|
* needs to be able to service interrupts.
|
||||||
*
|
*
|
||||||
|
|||||||
@@ -252,7 +252,7 @@ SYM_FUNC_START(startup_32)
|
|||||||
/*
|
/*
|
||||||
* Setup for the jump to 64bit mode
|
* Setup for the jump to 64bit mode
|
||||||
*
|
*
|
||||||
* When the jump is performend we will be in long mode but
|
* When the jump is performed we will be in long mode but
|
||||||
* in 32bit compatibility mode with EFER.LME = 1, CS.L = 0, CS.D = 1
|
* in 32bit compatibility mode with EFER.LME = 1, CS.L = 0, CS.D = 1
|
||||||
* (and in turn EFER.LMA = 1). To jump into 64bit mode we use
|
* (and in turn EFER.LMA = 1). To jump into 64bit mode we use
|
||||||
* the new gdt/idt that has __KERNEL_CS with CS.L = 1.
|
* the new gdt/idt that has __KERNEL_CS with CS.L = 1.
|
||||||
|
|||||||
@@ -639,9 +639,9 @@ static bool process_mem_region(struct mem_vector *region,
|
|||||||
|
|
||||||
if (slot_area_index == MAX_SLOT_AREA) {
|
if (slot_area_index == MAX_SLOT_AREA) {
|
||||||
debug_putstr("Aborted e820/efi memmap scan (slot_areas full)!\n");
|
debug_putstr("Aborted e820/efi memmap scan (slot_areas full)!\n");
|
||||||
return 1;
|
return true;
|
||||||
}
|
}
|
||||||
return 0;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
#if defined(CONFIG_MEMORY_HOTREMOVE) && defined(CONFIG_ACPI)
|
#if defined(CONFIG_MEMORY_HOTREMOVE) && defined(CONFIG_ACPI)
|
||||||
|
|||||||
@@ -24,7 +24,7 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright 2012 Xyratex Technology Limited
|
* Copyright 2012 Xyratex Technology Limited
|
||||||
*
|
*
|
||||||
* Wrappers for kernel crypto shash api to pclmulqdq crc32 imlementation.
|
* Wrappers for kernel crypto shash api to pclmulqdq crc32 implementation.
|
||||||
*/
|
*/
|
||||||
#include <linux/init.h>
|
#include <linux/init.h>
|
||||||
#include <linux/module.h>
|
#include <linux/module.h>
|
||||||
|
|||||||
@@ -114,11 +114,11 @@ static inline void fadd(u64 *out, const u64 *f1, const u64 *f2)
|
|||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Computes the field substraction of two field elements */
|
/* Computes the field subtraction of two field elements */
|
||||||
static inline void fsub(u64 *out, const u64 *f1, const u64 *f2)
|
static inline void fsub(u64 *out, const u64 *f1, const u64 *f2)
|
||||||
{
|
{
|
||||||
asm volatile(
|
asm volatile(
|
||||||
/* Compute the raw substraction of f1-f2 */
|
/* Compute the raw subtraction of f1-f2 */
|
||||||
" movq 0(%1), %%r8;"
|
" movq 0(%1), %%r8;"
|
||||||
" subq 0(%2), %%r8;"
|
" subq 0(%2), %%r8;"
|
||||||
" movq 8(%1), %%r9;"
|
" movq 8(%1), %%r9;"
|
||||||
@@ -135,7 +135,7 @@ static inline void fsub(u64 *out, const u64 *f1, const u64 *f2)
|
|||||||
" mov $38, %%rcx;"
|
" mov $38, %%rcx;"
|
||||||
" cmovc %%rcx, %%rax;"
|
" cmovc %%rcx, %%rax;"
|
||||||
|
|
||||||
/* Step 2: Substract carry*38 from the original difference */
|
/* Step 2: Subtract carry*38 from the original difference */
|
||||||
" sub %%rax, %%r8;"
|
" sub %%rax, %%r8;"
|
||||||
" sbb $0, %%r9;"
|
" sbb $0, %%r9;"
|
||||||
" sbb $0, %%r10;"
|
" sbb $0, %%r10;"
|
||||||
|
|||||||
@@ -88,7 +88,7 @@
|
|||||||
|
|
||||||
/*
|
/*
|
||||||
* Combined G1 & G2 function. Reordered with help of rotates to have moves
|
* Combined G1 & G2 function. Reordered with help of rotates to have moves
|
||||||
* at begining.
|
* at beginning.
|
||||||
*/
|
*/
|
||||||
#define g1g2_3(ab, cd, Tx0, Tx1, Tx2, Tx3, Ty0, Ty1, Ty2, Ty3, x, y) \
|
#define g1g2_3(ab, cd, Tx0, Tx1, Tx2, Tx3, Ty0, Ty1, Ty2, Ty3, x, y) \
|
||||||
/* G1,1 && G2,1 */ \
|
/* G1,1 && G2,1 */ \
|
||||||
|
|||||||
@@ -117,7 +117,7 @@ static bool is_blacklisted_cpu(void)
|
|||||||
* storing blocks in 64bit registers to allow three blocks to
|
* storing blocks in 64bit registers to allow three blocks to
|
||||||
* be processed parallel. Parallel operation then allows gaining
|
* be processed parallel. Parallel operation then allows gaining
|
||||||
* more performance than was trade off, on out-of-order CPUs.
|
* more performance than was trade off, on out-of-order CPUs.
|
||||||
* However Atom does not benefit from this parallellism and
|
* However Atom does not benefit from this parallelism and
|
||||||
* should be blacklisted.
|
* should be blacklisted.
|
||||||
*/
|
*/
|
||||||
return true;
|
return true;
|
||||||
|
|||||||
@@ -209,7 +209,7 @@
|
|||||||
*
|
*
|
||||||
* Lets build a 5 entry IRET frame after that, such that struct pt_regs
|
* Lets build a 5 entry IRET frame after that, such that struct pt_regs
|
||||||
* is complete and in particular regs->sp is correct. This gives us
|
* is complete and in particular regs->sp is correct. This gives us
|
||||||
* the original 6 enties as gap:
|
* the original 6 entries as gap:
|
||||||
*
|
*
|
||||||
* 14*4(%esp) - <previous context>
|
* 14*4(%esp) - <previous context>
|
||||||
* 13*4(%esp) - gap / flags
|
* 13*4(%esp) - gap / flags
|
||||||
|
|||||||
@@ -511,7 +511,7 @@ SYM_CODE_START(\asmsym)
|
|||||||
/*
|
/*
|
||||||
* No need to switch back to the IST stack. The current stack is either
|
* No need to switch back to the IST stack. The current stack is either
|
||||||
* identical to the stack in the IRET frame or the VC fall-back stack,
|
* identical to the stack in the IRET frame or the VC fall-back stack,
|
||||||
* so it is definitly mapped even with PTI enabled.
|
* so it is definitely mapped even with PTI enabled.
|
||||||
*/
|
*/
|
||||||
jmp paranoid_exit
|
jmp paranoid_exit
|
||||||
|
|
||||||
|
|||||||
@@ -218,7 +218,7 @@ int main(int argc, char **argv)
|
|||||||
|
|
||||||
/*
|
/*
|
||||||
* Figure out the struct name. If we're writing to a .so file,
|
* Figure out the struct name. If we're writing to a .so file,
|
||||||
* generate raw output insted.
|
* generate raw output instead.
|
||||||
*/
|
*/
|
||||||
name = strdup(argv[3]);
|
name = strdup(argv[3]);
|
||||||
namelen = strlen(name);
|
namelen = strlen(name);
|
||||||
|
|||||||
@@ -29,7 +29,7 @@ __kernel_vsyscall:
|
|||||||
* anyone with an AMD CPU, for example). Nonetheless, we try to keep
|
* anyone with an AMD CPU, for example). Nonetheless, we try to keep
|
||||||
* it working approximately as well as it ever worked.
|
* it working approximately as well as it ever worked.
|
||||||
*
|
*
|
||||||
* This link may eludicate some of the history:
|
* This link may elucidate some of the history:
|
||||||
* https://android-review.googlesource.com/#/q/Iac3295376d61ef83e713ac9b528f3b50aa780cd7
|
* https://android-review.googlesource.com/#/q/Iac3295376d61ef83e713ac9b528f3b50aa780cd7
|
||||||
* personally, I find it hard to understand what's going on there.
|
* personally, I find it hard to understand what's going on there.
|
||||||
*
|
*
|
||||||
|
|||||||
@@ -358,7 +358,7 @@ int map_vdso_once(const struct vdso_image *image, unsigned long addr)
|
|||||||
mmap_write_lock(mm);
|
mmap_write_lock(mm);
|
||||||
/*
|
/*
|
||||||
* Check if we have already mapped vdso blob - fail to prevent
|
* Check if we have already mapped vdso blob - fail to prevent
|
||||||
* abusing from userspace install_speciall_mapping, which may
|
* abusing from userspace install_special_mapping, which may
|
||||||
* not do accounting and rlimit right.
|
* not do accounting and rlimit right.
|
||||||
* We could search vma near context.vdso, but it's a slowpath,
|
* We could search vma near context.vdso, but it's a slowpath,
|
||||||
* so let's explicitly check all VMAs to be completely sure.
|
* so let's explicitly check all VMAs to be completely sure.
|
||||||
|
|||||||
@@ -137,7 +137,7 @@ SYM_FUNC_START(__vdso_sgx_enter_enclave)
|
|||||||
|
|
||||||
/*
|
/*
|
||||||
* If the return from callback is zero or negative, return immediately,
|
* If the return from callback is zero or negative, return immediately,
|
||||||
* else re-execute ENCLU with the postive return value interpreted as
|
* else re-execute ENCLU with the positive return value interpreted as
|
||||||
* the requested ENCLU function.
|
* the requested ENCLU function.
|
||||||
*/
|
*/
|
||||||
cmp $0, %eax
|
cmp $0, %eax
|
||||||
|
|||||||
@@ -623,7 +623,7 @@ static void amd_pmu_disable_all(void)
|
|||||||
/*
|
/*
|
||||||
* Check each counter for overflow and wait for it to be reset by the
|
* Check each counter for overflow and wait for it to be reset by the
|
||||||
* NMI if it has overflowed. This relies on the fact that all active
|
* NMI if it has overflowed. This relies on the fact that all active
|
||||||
* counters are always enabled when this function is caled and
|
* counters are always enabled when this function is called and
|
||||||
* ARCH_PERFMON_EVENTSEL_INT is always set.
|
* ARCH_PERFMON_EVENTSEL_INT is always set.
|
||||||
*/
|
*/
|
||||||
for (idx = 0; idx < x86_pmu.num_counters; idx++) {
|
for (idx = 0; idx < x86_pmu.num_counters; idx++) {
|
||||||
|
|||||||
@@ -17,7 +17,7 @@
|
|||||||
#define IOMMU_PC_DEVID_MATCH_REG 0x20
|
#define IOMMU_PC_DEVID_MATCH_REG 0x20
|
||||||
#define IOMMU_PC_COUNTER_REPORT_REG 0x28
|
#define IOMMU_PC_COUNTER_REPORT_REG 0x28
|
||||||
|
|
||||||
/* maximun specified bank/counters */
|
/* maximum specified bank/counters */
|
||||||
#define PC_MAX_SPEC_BNKS 64
|
#define PC_MAX_SPEC_BNKS 64
|
||||||
#define PC_MAX_SPEC_CNTRS 16
|
#define PC_MAX_SPEC_CNTRS 16
|
||||||
|
|
||||||
|
|||||||
@@ -765,7 +765,7 @@ struct perf_sched {
|
|||||||
};
|
};
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Initialize interator that runs through all events and counters.
|
* Initialize iterator that runs through all events and counters.
|
||||||
*/
|
*/
|
||||||
static void perf_sched_init(struct perf_sched *sched, struct event_constraint **constraints,
|
static void perf_sched_init(struct perf_sched *sched, struct event_constraint **constraints,
|
||||||
int num, int wmin, int wmax, int gpmax)
|
int num, int wmin, int wmax, int gpmax)
|
||||||
|
|||||||
@@ -594,7 +594,7 @@ static __init int bts_init(void)
|
|||||||
* we cannot use the user mapping since it will not be available
|
* we cannot use the user mapping since it will not be available
|
||||||
* if we're not running the owning process.
|
* if we're not running the owning process.
|
||||||
*
|
*
|
||||||
* With PTI we can't use the kernal map either, because its not
|
* With PTI we can't use the kernel map either, because its not
|
||||||
* there when we run userspace.
|
* there when we run userspace.
|
||||||
*
|
*
|
||||||
* For now, disable this driver when using PTI.
|
* For now, disable this driver when using PTI.
|
||||||
|
|||||||
@@ -137,7 +137,7 @@ static struct event_constraint intel_ivb_event_constraints[] __read_mostly =
|
|||||||
FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
|
FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
|
||||||
FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
|
FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
|
||||||
INTEL_UEVENT_CONSTRAINT(0x0148, 0x4), /* L1D_PEND_MISS.PENDING */
|
INTEL_UEVENT_CONSTRAINT(0x0148, 0x4), /* L1D_PEND_MISS.PENDING */
|
||||||
INTEL_UEVENT_CONSTRAINT(0x0279, 0xf), /* IDQ.EMTPY */
|
INTEL_UEVENT_CONSTRAINT(0x0279, 0xf), /* IDQ.EMPTY */
|
||||||
INTEL_UEVENT_CONSTRAINT(0x019c, 0xf), /* IDQ_UOPS_NOT_DELIVERED.CORE */
|
INTEL_UEVENT_CONSTRAINT(0x019c, 0xf), /* IDQ_UOPS_NOT_DELIVERED.CORE */
|
||||||
INTEL_UEVENT_CONSTRAINT(0x02a3, 0xf), /* CYCLE_ACTIVITY.CYCLES_LDM_PENDING */
|
INTEL_UEVENT_CONSTRAINT(0x02a3, 0xf), /* CYCLE_ACTIVITY.CYCLES_LDM_PENDING */
|
||||||
INTEL_UEVENT_CONSTRAINT(0x04a3, 0xf), /* CYCLE_ACTIVITY.CYCLES_NO_EXECUTE */
|
INTEL_UEVENT_CONSTRAINT(0x04a3, 0xf), /* CYCLE_ACTIVITY.CYCLES_NO_EXECUTE */
|
||||||
@@ -2186,7 +2186,7 @@ static void intel_pmu_enable_all(int added)
|
|||||||
* magic three (non-counting) events 0x4300B5, 0x4300D2, and 0x4300B1 either
|
* magic three (non-counting) events 0x4300B5, 0x4300D2, and 0x4300B1 either
|
||||||
* in sequence on the same PMC or on different PMCs.
|
* in sequence on the same PMC or on different PMCs.
|
||||||
*
|
*
|
||||||
* In practise it appears some of these events do in fact count, and
|
* In practice it appears some of these events do in fact count, and
|
||||||
* we need to program all 4 events.
|
* we need to program all 4 events.
|
||||||
*/
|
*/
|
||||||
static void intel_pmu_nhm_workaround(void)
|
static void intel_pmu_nhm_workaround(void)
|
||||||
@@ -2435,7 +2435,7 @@ static inline u64 icl_get_metrics_event_value(u64 metric, u64 slots, int idx)
|
|||||||
|
|
||||||
/*
|
/*
|
||||||
* The metric is reported as an 8bit integer fraction
|
* The metric is reported as an 8bit integer fraction
|
||||||
* suming up to 0xff.
|
* summing up to 0xff.
|
||||||
* slots-in-metric = (Metric / 0xff) * slots
|
* slots-in-metric = (Metric / 0xff) * slots
|
||||||
*/
|
*/
|
||||||
val = (metric >> ((idx - INTEL_PMC_IDX_METRIC_BASE) * 8)) & 0xff;
|
val = (metric >> ((idx - INTEL_PMC_IDX_METRIC_BASE) * 8)) & 0xff;
|
||||||
@@ -2776,7 +2776,7 @@ static int handle_pmi_common(struct pt_regs *regs, u64 status)
|
|||||||
* processing loop coming after that the function, otherwise
|
* processing loop coming after that the function, otherwise
|
||||||
* phony regular samples may be generated in the sampling buffer
|
* phony regular samples may be generated in the sampling buffer
|
||||||
* not marked with the EXACT tag. Another possibility is to have
|
* not marked with the EXACT tag. Another possibility is to have
|
||||||
* one PEBS event and at least one non-PEBS event whic hoverflows
|
* one PEBS event and at least one non-PEBS event which overflows
|
||||||
* while PEBS has armed. In this case, bit 62 of GLOBAL_STATUS will
|
* while PEBS has armed. In this case, bit 62 of GLOBAL_STATUS will
|
||||||
* not be set, yet the overflow status bit for the PEBS counter will
|
* not be set, yet the overflow status bit for the PEBS counter will
|
||||||
* be on Skylake.
|
* be on Skylake.
|
||||||
@@ -2824,7 +2824,7 @@ static int handle_pmi_common(struct pt_regs *regs, u64 status)
|
|||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Intel Perf mertrics
|
* Intel Perf metrics
|
||||||
*/
|
*/
|
||||||
if (__test_and_clear_bit(GLOBAL_STATUS_PERF_METRICS_OVF_BIT, (unsigned long *)&status)) {
|
if (__test_and_clear_bit(GLOBAL_STATUS_PERF_METRICS_OVF_BIT, (unsigned long *)&status)) {
|
||||||
handled++;
|
handled++;
|
||||||
@@ -4594,7 +4594,7 @@ static bool check_msr(unsigned long msr, u64 mask)
|
|||||||
|
|
||||||
/*
|
/*
|
||||||
* Disable the check for real HW, so we don't
|
* Disable the check for real HW, so we don't
|
||||||
* mess with potentionaly enabled registers:
|
* mess with potentially enabled registers:
|
||||||
*/
|
*/
|
||||||
if (!boot_cpu_has(X86_FEATURE_HYPERVISOR))
|
if (!boot_cpu_has(X86_FEATURE_HYPERVISOR))
|
||||||
return true;
|
return true;
|
||||||
@@ -4659,7 +4659,7 @@ static __init void intel_arch_events_quirk(void)
|
|||||||
{
|
{
|
||||||
int bit;
|
int bit;
|
||||||
|
|
||||||
/* disable event that reported as not presend by cpuid */
|
/* disable event that reported as not present by cpuid */
|
||||||
for_each_set_bit(bit, x86_pmu.events_mask, ARRAY_SIZE(intel_arch_events_map)) {
|
for_each_set_bit(bit, x86_pmu.events_mask, ARRAY_SIZE(intel_arch_events_map)) {
|
||||||
intel_perfmon_event_map[intel_arch_events_map[bit].id] = 0;
|
intel_perfmon_event_map[intel_arch_events_map[bit].id] = 0;
|
||||||
pr_warn("CPUID marked event: \'%s\' unavailable\n",
|
pr_warn("CPUID marked event: \'%s\' unavailable\n",
|
||||||
|
|||||||
@@ -1805,7 +1805,7 @@ intel_pmu_save_and_restart_reload(struct perf_event *event, int count)
|
|||||||
*
|
*
|
||||||
* [-period, 0]
|
* [-period, 0]
|
||||||
*
|
*
|
||||||
* the difference between two consequtive reads is:
|
* the difference between two consecutive reads is:
|
||||||
*
|
*
|
||||||
* A) value2 - value1;
|
* A) value2 - value1;
|
||||||
* when no overflows have happened in between,
|
* when no overflows have happened in between,
|
||||||
|
|||||||
@@ -1198,7 +1198,7 @@ static int branch_type(unsigned long from, unsigned long to, int abort)
|
|||||||
/*
|
/*
|
||||||
* The LBR logs any address in the IP, even if the IP just
|
* The LBR logs any address in the IP, even if the IP just
|
||||||
* faulted. This means userspace can control the from address.
|
* faulted. This means userspace can control the from address.
|
||||||
* Ensure we don't blindy read any address by validating it is
|
* Ensure we don't blindly read any address by validating it is
|
||||||
* a known text address.
|
* a known text address.
|
||||||
*/
|
*/
|
||||||
if (kernel_text_address(from)) {
|
if (kernel_text_address(from)) {
|
||||||
|
|||||||
@@ -24,7 +24,7 @@ struct p4_event_bind {
|
|||||||
unsigned int escr_msr[2]; /* ESCR MSR for this event */
|
unsigned int escr_msr[2]; /* ESCR MSR for this event */
|
||||||
unsigned int escr_emask; /* valid ESCR EventMask bits */
|
unsigned int escr_emask; /* valid ESCR EventMask bits */
|
||||||
unsigned int shared; /* event is shared across threads */
|
unsigned int shared; /* event is shared across threads */
|
||||||
char cntr[2][P4_CNTR_LIMIT]; /* counter index (offset), -1 on abscence */
|
char cntr[2][P4_CNTR_LIMIT]; /* counter index (offset), -1 on absence */
|
||||||
};
|
};
|
||||||
|
|
||||||
struct p4_pebs_bind {
|
struct p4_pebs_bind {
|
||||||
@@ -45,7 +45,7 @@ struct p4_pebs_bind {
|
|||||||
* it's needed for mapping P4_PEBS_CONFIG_METRIC_MASK bits of
|
* it's needed for mapping P4_PEBS_CONFIG_METRIC_MASK bits of
|
||||||
* event configuration to find out which values are to be
|
* event configuration to find out which values are to be
|
||||||
* written into MSR_IA32_PEBS_ENABLE and MSR_P4_PEBS_MATRIX_VERT
|
* written into MSR_IA32_PEBS_ENABLE and MSR_P4_PEBS_MATRIX_VERT
|
||||||
* resgisters
|
* registers
|
||||||
*/
|
*/
|
||||||
static struct p4_pebs_bind p4_pebs_bind_map[] = {
|
static struct p4_pebs_bind p4_pebs_bind_map[] = {
|
||||||
P4_GEN_PEBS_BIND(1stl_cache_load_miss_retired, 0x0000001, 0x0000001),
|
P4_GEN_PEBS_BIND(1stl_cache_load_miss_retired, 0x0000001, 0x0000001),
|
||||||
@@ -1313,7 +1313,7 @@ static __initconst const struct x86_pmu p4_pmu = {
|
|||||||
.get_event_constraints = x86_get_event_constraints,
|
.get_event_constraints = x86_get_event_constraints,
|
||||||
/*
|
/*
|
||||||
* IF HT disabled we may need to use all
|
* IF HT disabled we may need to use all
|
||||||
* ARCH_P4_MAX_CCCR counters simulaneously
|
* ARCH_P4_MAX_CCCR counters simultaneously
|
||||||
* though leave it restricted at moment assuming
|
* though leave it restricted at moment assuming
|
||||||
* HT is on
|
* HT is on
|
||||||
*/
|
*/
|
||||||
|
|||||||
@@ -362,7 +362,7 @@ static bool pt_event_valid(struct perf_event *event)
|
|||||||
|
|
||||||
/*
|
/*
|
||||||
* Setting bit 0 (TraceEn in RTIT_CTL MSR) in the attr.config
|
* Setting bit 0 (TraceEn in RTIT_CTL MSR) in the attr.config
|
||||||
* clears the assomption that BranchEn must always be enabled,
|
* clears the assumption that BranchEn must always be enabled,
|
||||||
* as was the case with the first implementation of PT.
|
* as was the case with the first implementation of PT.
|
||||||
* If this bit is not set, the legacy behavior is preserved
|
* If this bit is not set, the legacy behavior is preserved
|
||||||
* for compatibility with the older userspace.
|
* for compatibility with the older userspace.
|
||||||
|
|||||||
@@ -280,17 +280,17 @@
|
|||||||
* | [63] | 00h | VALID - When set, indicates the CPU bus
|
* | [63] | 00h | VALID - When set, indicates the CPU bus
|
||||||
* numbers have been initialized. (RO)
|
* numbers have been initialized. (RO)
|
||||||
* |[62:48]| --- | Reserved
|
* |[62:48]| --- | Reserved
|
||||||
* |[47:40]| 00h | BUS_NUM_5 — Return the bus number BIOS assigned
|
* |[47:40]| 00h | BUS_NUM_5 - Return the bus number BIOS assigned
|
||||||
* CPUBUSNO(5). (RO)
|
* CPUBUSNO(5). (RO)
|
||||||
* |[39:32]| 00h | BUS_NUM_4 — Return the bus number BIOS assigned
|
* |[39:32]| 00h | BUS_NUM_4 - Return the bus number BIOS assigned
|
||||||
* CPUBUSNO(4). (RO)
|
* CPUBUSNO(4). (RO)
|
||||||
* |[31:24]| 00h | BUS_NUM_3 — Return the bus number BIOS assigned
|
* |[31:24]| 00h | BUS_NUM_3 - Return the bus number BIOS assigned
|
||||||
* CPUBUSNO(3). (RO)
|
* CPUBUSNO(3). (RO)
|
||||||
* |[23:16]| 00h | BUS_NUM_2 — Return the bus number BIOS assigned
|
* |[23:16]| 00h | BUS_NUM_2 - Return the bus number BIOS assigned
|
||||||
* CPUBUSNO(2). (RO)
|
* CPUBUSNO(2). (RO)
|
||||||
* |[15:8] | 00h | BUS_NUM_1 — Return the bus number BIOS assigned
|
* |[15:8] | 00h | BUS_NUM_1 - Return the bus number BIOS assigned
|
||||||
* CPUBUSNO(1). (RO)
|
* CPUBUSNO(1). (RO)
|
||||||
* | [7:0] | 00h | BUS_NUM_0 — Return the bus number BIOS assigned
|
* | [7:0] | 00h | BUS_NUM_0 - Return the bus number BIOS assigned
|
||||||
* CPUBUSNO(0). (RO)
|
* CPUBUSNO(0). (RO)
|
||||||
*/
|
*/
|
||||||
#define SKX_MSR_CPU_BUS_NUMBER 0x300
|
#define SKX_MSR_CPU_BUS_NUMBER 0x300
|
||||||
|
|||||||
@@ -494,7 +494,7 @@ static __init void zhaoxin_arch_events_quirk(void)
|
|||||||
{
|
{
|
||||||
int bit;
|
int bit;
|
||||||
|
|
||||||
/* disable event that reported as not presend by cpuid */
|
/* disable event that reported as not present by cpuid */
|
||||||
for_each_set_bit(bit, x86_pmu.events_mask, ARRAY_SIZE(zx_arch_events_map)) {
|
for_each_set_bit(bit, x86_pmu.events_mask, ARRAY_SIZE(zx_arch_events_map)) {
|
||||||
zx_pmon_event_map[zx_arch_events_map[bit].id] = 0;
|
zx_pmon_event_map[zx_arch_events_map[bit].id] = 0;
|
||||||
pr_warn("CPUID marked event: \'%s\' unavailable\n",
|
pr_warn("CPUID marked event: \'%s\' unavailable\n",
|
||||||
|
|||||||
@@ -162,7 +162,7 @@ EXPORT_SYMBOL_GPL(hyperv_stop_tsc_emulation);
|
|||||||
static inline bool hv_reenlightenment_available(void)
|
static inline bool hv_reenlightenment_available(void)
|
||||||
{
|
{
|
||||||
/*
|
/*
|
||||||
* Check for required features and priviliges to make TSC frequency
|
* Check for required features and privileges to make TSC frequency
|
||||||
* change notifications work.
|
* change notifications work.
|
||||||
*/
|
*/
|
||||||
return ms_hyperv.features & HV_ACCESS_FREQUENCY_MSRS &&
|
return ms_hyperv.features & HV_ACCESS_FREQUENCY_MSRS &&
|
||||||
@@ -292,7 +292,7 @@ static int hv_suspend(void)
|
|||||||
|
|
||||||
/*
|
/*
|
||||||
* Reset the hypercall page as it is going to be invalidated
|
* Reset the hypercall page as it is going to be invalidated
|
||||||
* accross hibernation. Setting hv_hypercall_pg to NULL ensures
|
* across hibernation. Setting hv_hypercall_pg to NULL ensures
|
||||||
* that any subsequent hypercall operation fails safely instead of
|
* that any subsequent hypercall operation fails safely instead of
|
||||||
* crashing due to an access of an invalid page. The hypercall page
|
* crashing due to an access of an invalid page. The hypercall page
|
||||||
* pointer is restored on resume.
|
* pointer is restored on resume.
|
||||||
|
|||||||
@@ -9,7 +9,7 @@
|
|||||||
* Functions to keep the agpgart mappings coherent with the MMU. The
|
* Functions to keep the agpgart mappings coherent with the MMU. The
|
||||||
* GART gives the CPU a physical alias of pages in memory. The alias
|
* GART gives the CPU a physical alias of pages in memory. The alias
|
||||||
* region is mapped uncacheable. Make sure there are no conflicting
|
* region is mapped uncacheable. Make sure there are no conflicting
|
||||||
* mappings with different cachability attributes for the same
|
* mappings with different cacheability attributes for the same
|
||||||
* page. This avoids data corruption on some CPUs.
|
* page. This avoids data corruption on some CPUs.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
|||||||
@@ -22,7 +22,7 @@ extern void __add_wrong_size(void)
|
|||||||
/*
|
/*
|
||||||
* Constants for operation sizes. On 32-bit, the 64-bit size it set to
|
* Constants for operation sizes. On 32-bit, the 64-bit size it set to
|
||||||
* -1 because sizeof will never return -1, thereby making those switch
|
* -1 because sizeof will never return -1, thereby making those switch
|
||||||
* case statements guaranteeed dead code which the compiler will
|
* case statements guaranteed dead code which the compiler will
|
||||||
* eliminate, and allowing the "missing symbol in the default case" to
|
* eliminate, and allowing the "missing symbol in the default case" to
|
||||||
* indicate a usage error.
|
* indicate a usage error.
|
||||||
*/
|
*/
|
||||||
|
|||||||
@@ -283,12 +283,12 @@ extern u32 elf_hwcap2;
|
|||||||
*
|
*
|
||||||
* The decision process for determining the results are:
|
* The decision process for determining the results are:
|
||||||
*
|
*
|
||||||
* CPU: | lacks NX* | has NX, ia32 | has NX, x86_64 |
|
* CPU: | lacks NX* | has NX, ia32 | has NX, x86_64 |
|
||||||
* ELF: | | | |
|
* ELF: | | | |
|
||||||
* ---------------------|------------|------------------|----------------|
|
* ---------------------|------------|------------------|----------------|
|
||||||
* missing PT_GNU_STACK | exec-all | exec-all | exec-none |
|
* missing PT_GNU_STACK | exec-all | exec-all | exec-none |
|
||||||
* PT_GNU_STACK == RWX | exec-stack | exec-stack | exec-stack |
|
* PT_GNU_STACK == RWX | exec-stack | exec-stack | exec-stack |
|
||||||
* PT_GNU_STACK == RW | exec-none | exec-none | exec-none |
|
* PT_GNU_STACK == RW | exec-none | exec-none | exec-none |
|
||||||
*
|
*
|
||||||
* exec-all : all PROT_READ user mappings are executable, except when
|
* exec-all : all PROT_READ user mappings are executable, except when
|
||||||
* backed by files on a noexec-filesystem.
|
* backed by files on a noexec-filesystem.
|
||||||
|
|||||||
@@ -547,7 +547,7 @@ SYM_CODE_END(spurious_entries_start)
|
|||||||
/*
|
/*
|
||||||
* Dummy trap number so the low level ASM macro vector number checks do not
|
* Dummy trap number so the low level ASM macro vector number checks do not
|
||||||
* match which results in emitting plain IDTENTRY stubs without bells and
|
* match which results in emitting plain IDTENTRY stubs without bells and
|
||||||
* whistels.
|
* whistles.
|
||||||
*/
|
*/
|
||||||
#define X86_TRAP_OTHER 0xFFFF
|
#define X86_TRAP_OTHER 0xFFFF
|
||||||
|
|
||||||
|
|||||||
@@ -38,7 +38,7 @@ enum pconfig_leaf {
|
|||||||
#define MKTME_INVALID_ENC_ALG 4
|
#define MKTME_INVALID_ENC_ALG 4
|
||||||
#define MKTME_DEVICE_BUSY 5
|
#define MKTME_DEVICE_BUSY 5
|
||||||
|
|
||||||
/* Hardware requires the structure to be 256 byte alinged. Otherwise #GP(0). */
|
/* Hardware requires the structure to be 256 byte aligned. Otherwise #GP(0). */
|
||||||
struct mktme_key_program {
|
struct mktme_key_program {
|
||||||
u16 keyid;
|
u16 keyid;
|
||||||
u32 keyid_ctrl;
|
u32 keyid_ctrl;
|
||||||
|
|||||||
@@ -3,7 +3,7 @@
|
|||||||
#define _ASM_X86_INTEL_PT_H
|
#define _ASM_X86_INTEL_PT_H
|
||||||
|
|
||||||
#define PT_CPUID_LEAVES 2
|
#define PT_CPUID_LEAVES 2
|
||||||
#define PT_CPUID_REGS_NUM 4 /* number of regsters (eax, ebx, ecx, edx) */
|
#define PT_CPUID_REGS_NUM 4 /* number of registers (eax, ebx, ecx, edx) */
|
||||||
|
|
||||||
enum pt_capabilities {
|
enum pt_capabilities {
|
||||||
PT_CAP_max_subleaf = 0,
|
PT_CAP_max_subleaf = 0,
|
||||||
|
|||||||
@@ -159,7 +159,7 @@ static inline void *phys_to_virt(phys_addr_t address)
|
|||||||
/*
|
/*
|
||||||
* ISA I/O bus memory addresses are 1:1 with the physical address.
|
* ISA I/O bus memory addresses are 1:1 with the physical address.
|
||||||
* However, we truncate the address to unsigned int to avoid undesirable
|
* However, we truncate the address to unsigned int to avoid undesirable
|
||||||
* promitions in legacy drivers.
|
* promotions in legacy drivers.
|
||||||
*/
|
*/
|
||||||
static inline unsigned int isa_virt_to_bus(volatile void *address)
|
static inline unsigned int isa_virt_to_bus(volatile void *address)
|
||||||
{
|
{
|
||||||
|
|||||||
@@ -190,7 +190,7 @@
|
|||||||
|
|
||||||
/*
|
/*
|
||||||
* Macro to invoke __do_softirq on the irq stack. This is only called from
|
* Macro to invoke __do_softirq on the irq stack. This is only called from
|
||||||
* task context when bottom halfs are about to be reenabled and soft
|
* task context when bottom halves are about to be reenabled and soft
|
||||||
* interrupts are pending to be processed. The interrupt stack cannot be in
|
* interrupts are pending to be processed. The interrupt stack cannot be in
|
||||||
* use here.
|
* use here.
|
||||||
*/
|
*/
|
||||||
|
|||||||
@@ -1488,7 +1488,7 @@ extern u64 kvm_mce_cap_supported;
|
|||||||
/*
|
/*
|
||||||
* EMULTYPE_NO_DECODE - Set when re-emulating an instruction (after completing
|
* EMULTYPE_NO_DECODE - Set when re-emulating an instruction (after completing
|
||||||
* userspace I/O) to indicate that the emulation context
|
* userspace I/O) to indicate that the emulation context
|
||||||
* should be resued as is, i.e. skip initialization of
|
* should be reused as is, i.e. skip initialization of
|
||||||
* emulation context, instruction fetch and decode.
|
* emulation context, instruction fetch and decode.
|
||||||
*
|
*
|
||||||
* EMULTYPE_TRAP_UD - Set when emulating an intercepted #UD from hardware.
|
* EMULTYPE_TRAP_UD - Set when emulating an intercepted #UD from hardware.
|
||||||
@@ -1513,7 +1513,7 @@ extern u64 kvm_mce_cap_supported;
|
|||||||
*
|
*
|
||||||
* EMULTYPE_VMWARE_GP - Set when emulating an intercepted #GP for VMware
|
* EMULTYPE_VMWARE_GP - Set when emulating an intercepted #GP for VMware
|
||||||
* backdoor emulation, which is opt in via module param.
|
* backdoor emulation, which is opt in via module param.
|
||||||
* VMware backoor emulation handles select instructions
|
* VMware backdoor emulation handles select instructions
|
||||||
* and reinjects the #GP for all other cases.
|
* and reinjects the #GP for all other cases.
|
||||||
*
|
*
|
||||||
* EMULTYPE_PF - Set when emulating MMIO by way of an intercepted #PF, in which
|
* EMULTYPE_PF - Set when emulating MMIO by way of an intercepted #PF, in which
|
||||||
|
|||||||
@@ -628,8 +628,6 @@
|
|||||||
#define MSR_IA32_APICBASE_ENABLE (1<<11)
|
#define MSR_IA32_APICBASE_ENABLE (1<<11)
|
||||||
#define MSR_IA32_APICBASE_BASE (0xfffff<<12)
|
#define MSR_IA32_APICBASE_BASE (0xfffff<<12)
|
||||||
|
|
||||||
#define MSR_IA32_TSCDEADLINE 0x000006e0
|
|
||||||
|
|
||||||
#define MSR_IA32_UCODE_WRITE 0x00000079
|
#define MSR_IA32_UCODE_WRITE 0x00000079
|
||||||
#define MSR_IA32_UCODE_REV 0x0000008b
|
#define MSR_IA32_UCODE_REV 0x0000008b
|
||||||
|
|
||||||
|
|||||||
@@ -32,7 +32,7 @@
|
|||||||
|
|
||||||
/*
|
/*
|
||||||
* Google experimented with loop-unrolling and this turned out to be
|
* Google experimented with loop-unrolling and this turned out to be
|
||||||
* the optimal version — two calls, each with their own speculation
|
* the optimal version - two calls, each with their own speculation
|
||||||
* trap should their return address end up getting used, in a loop.
|
* trap should their return address end up getting used, in a loop.
|
||||||
*/
|
*/
|
||||||
#define __FILL_RETURN_BUFFER(reg, nr, sp) \
|
#define __FILL_RETURN_BUFFER(reg, nr, sp) \
|
||||||
|
|||||||
@@ -339,7 +339,7 @@ int paravirt_disable_iospace(void);
|
|||||||
* on the stack. All caller-save registers (eax,edx,ecx) are expected
|
* on the stack. All caller-save registers (eax,edx,ecx) are expected
|
||||||
* to be modified (either clobbered or used for return values).
|
* to be modified (either clobbered or used for return values).
|
||||||
* X86_64, on the other hand, already specifies a register-based calling
|
* X86_64, on the other hand, already specifies a register-based calling
|
||||||
* conventions, returning at %rax, with parameteres going on %rdi, %rsi,
|
* conventions, returning at %rax, with parameters going on %rdi, %rsi,
|
||||||
* %rdx, and %rcx. Note that for this reason, x86_64 does not need any
|
* %rdx, and %rcx. Note that for this reason, x86_64 does not need any
|
||||||
* special handling for dealing with 4 arguments, unlike i386.
|
* special handling for dealing with 4 arguments, unlike i386.
|
||||||
* However, x86_64 also have to clobber all caller saved registers, which
|
* However, x86_64 also have to clobber all caller saved registers, which
|
||||||
|
|||||||
@@ -1244,7 +1244,7 @@ static inline p4d_t *user_to_kernel_p4dp(p4d_t *p4dp)
|
|||||||
/*
|
/*
|
||||||
* clone_pgd_range(pgd_t *dst, pgd_t *src, int count);
|
* clone_pgd_range(pgd_t *dst, pgd_t *src, int count);
|
||||||
*
|
*
|
||||||
* dst - pointer to pgd range anwhere on a pgd page
|
* dst - pointer to pgd range anywhere on a pgd page
|
||||||
* src - ""
|
* src - ""
|
||||||
* count - the number of pgds to copy.
|
* count - the number of pgds to copy.
|
||||||
*
|
*
|
||||||
|
|||||||
@@ -314,11 +314,6 @@ struct x86_hw_tss {
|
|||||||
struct x86_hw_tss {
|
struct x86_hw_tss {
|
||||||
u32 reserved1;
|
u32 reserved1;
|
||||||
u64 sp0;
|
u64 sp0;
|
||||||
|
|
||||||
/*
|
|
||||||
* We store cpu_current_top_of_stack in sp1 so it's always accessible.
|
|
||||||
* Linux does not use ring 1, so sp1 is not otherwise needed.
|
|
||||||
*/
|
|
||||||
u64 sp1;
|
u64 sp1;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@@ -426,12 +421,7 @@ struct irq_stack {
|
|||||||
char stack[IRQ_STACK_SIZE];
|
char stack[IRQ_STACK_SIZE];
|
||||||
} __aligned(IRQ_STACK_SIZE);
|
} __aligned(IRQ_STACK_SIZE);
|
||||||
|
|
||||||
#ifdef CONFIG_X86_32
|
|
||||||
DECLARE_PER_CPU(unsigned long, cpu_current_top_of_stack);
|
DECLARE_PER_CPU(unsigned long, cpu_current_top_of_stack);
|
||||||
#else
|
|
||||||
/* The RO copy can't be accessed with this_cpu_xyz(), so use the RW copy. */
|
|
||||||
#define cpu_current_top_of_stack cpu_tss_rw.x86_tss.sp1
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#ifdef CONFIG_X86_64
|
#ifdef CONFIG_X86_64
|
||||||
struct fixed_percpu_data {
|
struct fixed_percpu_data {
|
||||||
@@ -527,7 +517,7 @@ struct thread_struct {
|
|||||||
struct io_bitmap *io_bitmap;
|
struct io_bitmap *io_bitmap;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* IOPL. Priviledge level dependent I/O permission which is
|
* IOPL. Privilege level dependent I/O permission which is
|
||||||
* emulated via the I/O bitmap to prevent user space from disabling
|
* emulated via the I/O bitmap to prevent user space from disabling
|
||||||
* interrupts.
|
* interrupts.
|
||||||
*/
|
*/
|
||||||
|
|||||||
@@ -4,6 +4,8 @@
|
|||||||
|
|
||||||
#include <asm/ldt.h>
|
#include <asm/ldt.h>
|
||||||
|
|
||||||
|
struct task_struct;
|
||||||
|
|
||||||
/* misc architecture specific prototypes */
|
/* misc architecture specific prototypes */
|
||||||
|
|
||||||
void syscall_init(void);
|
void syscall_init(void);
|
||||||
|
|||||||
@@ -8,8 +8,8 @@
|
|||||||
/*
|
/*
|
||||||
* The set_memory_* API can be used to change various attributes of a virtual
|
* The set_memory_* API can be used to change various attributes of a virtual
|
||||||
* address range. The attributes include:
|
* address range. The attributes include:
|
||||||
* Cachability : UnCached, WriteCombining, WriteThrough, WriteBack
|
* Cacheability : UnCached, WriteCombining, WriteThrough, WriteBack
|
||||||
* Executability : eXeutable, NoteXecutable
|
* Executability : eXecutable, NoteXecutable
|
||||||
* Read/Write : ReadOnly, ReadWrite
|
* Read/Write : ReadOnly, ReadWrite
|
||||||
* Presence : NotPresent
|
* Presence : NotPresent
|
||||||
* Encryption : Encrypted, Decrypted
|
* Encryption : Encrypted, Decrypted
|
||||||
|
|||||||
@@ -130,11 +130,6 @@ void *extend_brk(size_t size, size_t align);
|
|||||||
: : "i" (sz)); \
|
: : "i" (sz)); \
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Helper for reserving space for arrays of things */
|
|
||||||
#define RESERVE_BRK_ARRAY(type, name, entries) \
|
|
||||||
type *name; \
|
|
||||||
RESERVE_BRK(name, sizeof(type) * entries)
|
|
||||||
|
|
||||||
extern void probe_roms(void);
|
extern void probe_roms(void);
|
||||||
#ifdef __i386__
|
#ifdef __i386__
|
||||||
|
|
||||||
|
|||||||
@@ -296,7 +296,7 @@ struct sgx_pcmd {
|
|||||||
* @header1: constant byte string
|
* @header1: constant byte string
|
||||||
* @vendor: must be either 0x0000 or 0x8086
|
* @vendor: must be either 0x0000 or 0x8086
|
||||||
* @date: YYYYMMDD in BCD
|
* @date: YYYYMMDD in BCD
|
||||||
* @header2: costant byte string
|
* @header2: constant byte string
|
||||||
* @swdefined: software defined value
|
* @swdefined: software defined value
|
||||||
*/
|
*/
|
||||||
struct sgx_sigstruct_header {
|
struct sgx_sigstruct_header {
|
||||||
|
|||||||
@@ -71,12 +71,7 @@ static inline void update_task_stack(struct task_struct *task)
|
|||||||
else
|
else
|
||||||
this_cpu_write(cpu_tss_rw.x86_tss.sp1, task->thread.sp0);
|
this_cpu_write(cpu_tss_rw.x86_tss.sp1, task->thread.sp0);
|
||||||
#else
|
#else
|
||||||
/*
|
/* Xen PV enters the kernel on the thread stack. */
|
||||||
* x86-64 updates x86_tss.sp1 via cpu_current_top_of_stack. That
|
|
||||||
* doesn't work on x86-32 because sp1 and
|
|
||||||
* cpu_current_top_of_stack have different values (because of
|
|
||||||
* the non-zero stack-padding on 32bit).
|
|
||||||
*/
|
|
||||||
if (static_cpu_has(X86_FEATURE_XENPV))
|
if (static_cpu_has(X86_FEATURE_XENPV))
|
||||||
load_sp0(task_top_of_stack(task));
|
load_sp0(task_top_of_stack(task));
|
||||||
#endif
|
#endif
|
||||||
|
|||||||
@@ -80,6 +80,7 @@ extern long __ia32_sys_ni_syscall(const struct pt_regs *regs);
|
|||||||
}
|
}
|
||||||
|
|
||||||
#define __COND_SYSCALL(abi, name) \
|
#define __COND_SYSCALL(abi, name) \
|
||||||
|
__weak long __##abi##_##name(const struct pt_regs *__unused); \
|
||||||
__weak long __##abi##_##name(const struct pt_regs *__unused) \
|
__weak long __##abi##_##name(const struct pt_regs *__unused) \
|
||||||
{ \
|
{ \
|
||||||
return sys_ni_syscall(); \
|
return sys_ni_syscall(); \
|
||||||
|
|||||||
@@ -197,13 +197,7 @@ static inline int arch_within_stack_frames(const void * const stack,
|
|||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
#else /* !__ASSEMBLY__ */
|
#endif /* !__ASSEMBLY__ */
|
||||||
|
|
||||||
#ifdef CONFIG_X86_64
|
|
||||||
# define cpu_current_top_of_stack (cpu_tss_rw + TSS_sp1)
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#endif
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Thread-synchronous status.
|
* Thread-synchronous status.
|
||||||
|
|||||||
@@ -10,7 +10,7 @@
|
|||||||
#ifndef _ASM_UV_GEO_H
|
#ifndef _ASM_UV_GEO_H
|
||||||
#define _ASM_UV_GEO_H
|
#define _ASM_UV_GEO_H
|
||||||
|
|
||||||
/* Type declaractions */
|
/* Type declarations */
|
||||||
|
|
||||||
/* Size of a geoid_s structure (must be before decl. of geoid_u) */
|
/* Size of a geoid_s structure (must be before decl. of geoid_u) */
|
||||||
#define GEOID_SIZE 8
|
#define GEOID_SIZE 8
|
||||||
|
|||||||
@@ -353,7 +353,7 @@ union uvh_apicid {
|
|||||||
*
|
*
|
||||||
* Note there are NO leds on a UV system. This register is only
|
* Note there are NO leds on a UV system. This register is only
|
||||||
* used by the system controller to monitor system-wide operation.
|
* used by the system controller to monitor system-wide operation.
|
||||||
* There are 64 regs per node. With Nahelem cpus (2 cores per node,
|
* There are 64 regs per node. With Nehalem cpus (2 cores per node,
|
||||||
* 8 cpus per core, 2 threads per cpu) there are 32 cpu threads on
|
* 8 cpus per core, 2 threads per cpu) there are 32 cpu threads on
|
||||||
* a node.
|
* a node.
|
||||||
*
|
*
|
||||||
|
|||||||
@@ -234,7 +234,7 @@ struct boot_params {
|
|||||||
* handling of page tables.
|
* handling of page tables.
|
||||||
*
|
*
|
||||||
* These enums should only ever be used by x86 code, and the code that uses
|
* These enums should only ever be used by x86 code, and the code that uses
|
||||||
* it should be well contained and compartamentalized.
|
* it should be well contained and compartmentalized.
|
||||||
*
|
*
|
||||||
* KVM and Xen HVM do not have a subarch as these are expected to follow
|
* KVM and Xen HVM do not have a subarch as these are expected to follow
|
||||||
* standard x86 boot entries. If there is a genuine need for "hypervisor" type
|
* standard x86 boot entries. If there is a genuine need for "hypervisor" type
|
||||||
@@ -252,7 +252,7 @@ struct boot_params {
|
|||||||
* @X86_SUBARCH_XEN: Used for Xen guest types which follow the PV boot path,
|
* @X86_SUBARCH_XEN: Used for Xen guest types which follow the PV boot path,
|
||||||
* which start at asm startup_xen() entry point and later jump to the C
|
* which start at asm startup_xen() entry point and later jump to the C
|
||||||
* xen_start_kernel() entry point. Both domU and dom0 type of guests are
|
* xen_start_kernel() entry point. Both domU and dom0 type of guests are
|
||||||
* currently supportd through this PV boot path.
|
* currently supported through this PV boot path.
|
||||||
* @X86_SUBARCH_INTEL_MID: Used for Intel MID (Mobile Internet Device) platform
|
* @X86_SUBARCH_INTEL_MID: Used for Intel MID (Mobile Internet Device) platform
|
||||||
* systems which do not have the PCI legacy interfaces.
|
* systems which do not have the PCI legacy interfaces.
|
||||||
* @X86_SUBARCH_CE4100: Used for Intel CE media processor (CE4100) SoC
|
* @X86_SUBARCH_CE4100: Used for Intel CE media processor (CE4100) SoC
|
||||||
|
|||||||
@@ -12,7 +12,7 @@
|
|||||||
* The msqid64_ds structure for x86 architecture with x32 ABI.
|
* The msqid64_ds structure for x86 architecture with x32 ABI.
|
||||||
*
|
*
|
||||||
* On x86-32 and x86-64 we can just use the generic definition, but
|
* On x86-32 and x86-64 we can just use the generic definition, but
|
||||||
* x32 uses the same binary layout as x86_64, which is differnet
|
* x32 uses the same binary layout as x86_64, which is different
|
||||||
* from other 32-bit architectures.
|
* from other 32-bit architectures.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
|||||||
@@ -152,7 +152,7 @@ struct sgx_enclave_run {
|
|||||||
* Most exceptions reported on ENCLU, including those that occur within the
|
* Most exceptions reported on ENCLU, including those that occur within the
|
||||||
* enclave, are fixed up and reported synchronously instead of being delivered
|
* enclave, are fixed up and reported synchronously instead of being delivered
|
||||||
* via a standard signal. Debug Exceptions (#DB) and Breakpoints (#BP) are
|
* via a standard signal. Debug Exceptions (#DB) and Breakpoints (#BP) are
|
||||||
* never fixed up and are always delivered via standard signals. On synchrously
|
* never fixed up and are always delivered via standard signals. On synchronously
|
||||||
* reported exceptions, -EFAULT is returned and details about the exception are
|
* reported exceptions, -EFAULT is returned and details about the exception are
|
||||||
* recorded in @run.exception, the optional sgx_enclave_exception struct.
|
* recorded in @run.exception, the optional sgx_enclave_exception struct.
|
||||||
*
|
*
|
||||||
|
|||||||
@@ -9,7 +9,7 @@
|
|||||||
* The shmid64_ds structure for x86 architecture with x32 ABI.
|
* The shmid64_ds structure for x86 architecture with x32 ABI.
|
||||||
*
|
*
|
||||||
* On x86-32 and x86-64 we can just use the generic definition, but
|
* On x86-32 and x86-64 we can just use the generic definition, but
|
||||||
* x32 uses the same binary layout as x86_64, which is differnet
|
* x32 uses the same binary layout as x86_64, which is different
|
||||||
* from other 32-bit architectures.
|
* from other 32-bit architectures.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
|||||||
@@ -139,7 +139,7 @@ struct _fpstate_32 {
|
|||||||
* The 64-bit FPU frame. (FXSAVE format and later)
|
* The 64-bit FPU frame. (FXSAVE format and later)
|
||||||
*
|
*
|
||||||
* Note1: If sw_reserved.magic1 == FP_XSTATE_MAGIC1 then the structure is
|
* Note1: If sw_reserved.magic1 == FP_XSTATE_MAGIC1 then the structure is
|
||||||
* larger: 'struct _xstate'. Note that 'struct _xstate' embedds
|
* larger: 'struct _xstate'. Note that 'struct _xstate' embeds
|
||||||
* 'struct _fpstate' so that you can always assume the _fpstate portion
|
* 'struct _fpstate' so that you can always assume the _fpstate portion
|
||||||
* exists so that you can check the magic value.
|
* exists so that you can check the magic value.
|
||||||
*
|
*
|
||||||
|
|||||||
@@ -830,7 +830,7 @@ int acpi_unregister_ioapic(acpi_handle handle, u32 gsi_base)
|
|||||||
EXPORT_SYMBOL(acpi_unregister_ioapic);
|
EXPORT_SYMBOL(acpi_unregister_ioapic);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* acpi_ioapic_registered - Check whether IOAPIC assoicatied with @gsi_base
|
* acpi_ioapic_registered - Check whether IOAPIC associated with @gsi_base
|
||||||
* has been registered
|
* has been registered
|
||||||
* @handle: ACPI handle of the IOAPIC device
|
* @handle: ACPI handle of the IOAPIC device
|
||||||
* @gsi_base: GSI base associated with the IOAPIC
|
* @gsi_base: GSI base associated with the IOAPIC
|
||||||
@@ -1656,7 +1656,7 @@ static int __init parse_acpi(char *arg)
|
|||||||
else if (strcmp(arg, "noirq") == 0) {
|
else if (strcmp(arg, "noirq") == 0) {
|
||||||
acpi_noirq_set();
|
acpi_noirq_set();
|
||||||
}
|
}
|
||||||
/* "acpi=copy_dsdt" copys DSDT */
|
/* "acpi=copy_dsdt" copies DSDT */
|
||||||
else if (strcmp(arg, "copy_dsdt") == 0) {
|
else if (strcmp(arg, "copy_dsdt") == 0) {
|
||||||
acpi_gbl_copy_dsdt_locally = 1;
|
acpi_gbl_copy_dsdt_locally = 1;
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -41,7 +41,7 @@ unsigned long acpi_get_wakeup_address(void)
|
|||||||
* x86_acpi_enter_sleep_state - enter sleep state
|
* x86_acpi_enter_sleep_state - enter sleep state
|
||||||
* @state: Sleep state to enter.
|
* @state: Sleep state to enter.
|
||||||
*
|
*
|
||||||
* Wrapper around acpi_enter_sleep_state() to be called by assmebly.
|
* Wrapper around acpi_enter_sleep_state() to be called by assembly.
|
||||||
*/
|
*/
|
||||||
asmlinkage acpi_status __visible x86_acpi_enter_sleep_state(u8 state)
|
asmlinkage acpi_status __visible x86_acpi_enter_sleep_state(u8 state)
|
||||||
{
|
{
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
// SPDX-License-Identifier: GPL-2.0-only
|
// SPDX-License-Identifier: GPL-2.0-only
|
||||||
/*
|
/*
|
||||||
* Shared support code for AMD K8 northbridges and derivates.
|
* Shared support code for AMD K8 northbridges and derivatives.
|
||||||
* Copyright 2006 Andi Kleen, SUSE Labs.
|
* Copyright 2006 Andi Kleen, SUSE Labs.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
|||||||
@@ -619,7 +619,7 @@ static void setup_APIC_timer(void)
|
|||||||
|
|
||||||
if (this_cpu_has(X86_FEATURE_ARAT)) {
|
if (this_cpu_has(X86_FEATURE_ARAT)) {
|
||||||
lapic_clockevent.features &= ~CLOCK_EVT_FEAT_C3STOP;
|
lapic_clockevent.features &= ~CLOCK_EVT_FEAT_C3STOP;
|
||||||
/* Make LAPIC timer preferrable over percpu HPET */
|
/* Make LAPIC timer preferable over percpu HPET */
|
||||||
lapic_clockevent.rating = 150;
|
lapic_clockevent.rating = 150;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -666,7 +666,7 @@ void lapic_update_tsc_freq(void)
|
|||||||
* In this functions we calibrate APIC bus clocks to the external timer.
|
* In this functions we calibrate APIC bus clocks to the external timer.
|
||||||
*
|
*
|
||||||
* We want to do the calibration only once since we want to have local timer
|
* We want to do the calibration only once since we want to have local timer
|
||||||
* irqs syncron. CPUs connected by the same APIC bus have the very same bus
|
* irqs synchronous. CPUs connected by the same APIC bus have the very same bus
|
||||||
* frequency.
|
* frequency.
|
||||||
*
|
*
|
||||||
* This was previously done by reading the PIT/HPET and waiting for a wrap
|
* This was previously done by reading the PIT/HPET and waiting for a wrap
|
||||||
@@ -1532,7 +1532,7 @@ static bool apic_check_and_ack(union apic_ir *irr, union apic_ir *isr)
|
|||||||
* Most probably by now the CPU has serviced that pending interrupt and it
|
* Most probably by now the CPU has serviced that pending interrupt and it
|
||||||
* might not have done the ack_APIC_irq() because it thought, interrupt
|
* might not have done the ack_APIC_irq() because it thought, interrupt
|
||||||
* came from i8259 as ExtInt. LAPIC did not get EOI so it does not clear
|
* came from i8259 as ExtInt. LAPIC did not get EOI so it does not clear
|
||||||
* the ISR bit and cpu thinks it has already serivced the interrupt. Hence
|
* the ISR bit and cpu thinks it has already serviced the interrupt. Hence
|
||||||
* a vector might get locked. It was noticed for timer irq (vector
|
* a vector might get locked. It was noticed for timer irq (vector
|
||||||
* 0x31). Issue an extra EOI to clear ISR.
|
* 0x31). Issue an extra EOI to clear ISR.
|
||||||
*
|
*
|
||||||
@@ -1657,7 +1657,7 @@ static void setup_local_APIC(void)
|
|||||||
*/
|
*/
|
||||||
/*
|
/*
|
||||||
* Actually disabling the focus CPU check just makes the hang less
|
* Actually disabling the focus CPU check just makes the hang less
|
||||||
* frequent as it makes the interrupt distributon model be more
|
* frequent as it makes the interrupt distribution model be more
|
||||||
* like LRU than MRU (the short-term load is more even across CPUs).
|
* like LRU than MRU (the short-term load is more even across CPUs).
|
||||||
*/
|
*/
|
||||||
|
|
||||||
@@ -1875,7 +1875,7 @@ static __init void try_to_enable_x2apic(int remap_mode)
|
|||||||
|
|
||||||
/*
|
/*
|
||||||
* Without IR, all CPUs can be addressed by IOAPIC/MSI only
|
* Without IR, all CPUs can be addressed by IOAPIC/MSI only
|
||||||
* in physical mode, and CPUs with an APIC ID that cannnot
|
* in physical mode, and CPUs with an APIC ID that cannot
|
||||||
* be addressed must not be brought online.
|
* be addressed must not be brought online.
|
||||||
*/
|
*/
|
||||||
x2apic_set_max_apicid(apic_limit);
|
x2apic_set_max_apicid(apic_limit);
|
||||||
|
|||||||
@@ -928,7 +928,7 @@ static bool mp_check_pin_attr(int irq, struct irq_alloc_info *info)
|
|||||||
|
|
||||||
/*
|
/*
|
||||||
* setup_IO_APIC_irqs() programs all legacy IRQs with default trigger
|
* setup_IO_APIC_irqs() programs all legacy IRQs with default trigger
|
||||||
* and polarity attirbutes. So allow the first user to reprogram the
|
* and polarity attributes. So allow the first user to reprogram the
|
||||||
* pin with real trigger and polarity attributes.
|
* pin with real trigger and polarity attributes.
|
||||||
*/
|
*/
|
||||||
if (irq < nr_legacy_irqs() && data->count == 1) {
|
if (irq < nr_legacy_irqs() && data->count == 1) {
|
||||||
@@ -994,7 +994,7 @@ static int alloc_isa_irq_from_domain(struct irq_domain *domain,
|
|||||||
|
|
||||||
/*
|
/*
|
||||||
* Legacy ISA IRQ has already been allocated, just add pin to
|
* Legacy ISA IRQ has already been allocated, just add pin to
|
||||||
* the pin list assoicated with this IRQ and program the IOAPIC
|
* the pin list associated with this IRQ and program the IOAPIC
|
||||||
* entry. The IOAPIC entry
|
* entry. The IOAPIC entry
|
||||||
*/
|
*/
|
||||||
if (irq_data && irq_data->parent_data) {
|
if (irq_data && irq_data->parent_data) {
|
||||||
@@ -1752,7 +1752,7 @@ static inline void ioapic_finish_move(struct irq_data *data, bool moveit)
|
|||||||
* with masking the ioapic entry and then polling until
|
* with masking the ioapic entry and then polling until
|
||||||
* Remote IRR was clear before reprogramming the
|
* Remote IRR was clear before reprogramming the
|
||||||
* ioapic I don't trust the Remote IRR bit to be
|
* ioapic I don't trust the Remote IRR bit to be
|
||||||
* completey accurate.
|
* completely accurate.
|
||||||
*
|
*
|
||||||
* However there appears to be no other way to plug
|
* However there appears to be no other way to plug
|
||||||
* this race, so if the Remote IRR bit is not
|
* this race, so if the Remote IRR bit is not
|
||||||
@@ -1830,7 +1830,7 @@ static void ioapic_ack_level(struct irq_data *irq_data)
|
|||||||
/*
|
/*
|
||||||
* Tail end of clearing remote IRR bit (either by delivering the EOI
|
* Tail end of clearing remote IRR bit (either by delivering the EOI
|
||||||
* message via io-apic EOI register write or simulating it using
|
* message via io-apic EOI register write or simulating it using
|
||||||
* mask+edge followed by unnask+level logic) manually when the
|
* mask+edge followed by unmask+level logic) manually when the
|
||||||
* level triggered interrupt is seen as the edge triggered interrupt
|
* level triggered interrupt is seen as the edge triggered interrupt
|
||||||
* at the cpu.
|
* at the cpu.
|
||||||
*/
|
*/
|
||||||
|
|||||||
@@ -1045,7 +1045,7 @@ void irq_force_complete_move(struct irq_desc *desc)
|
|||||||
*
|
*
|
||||||
* But in case of cpu hotplug this should be a non issue
|
* But in case of cpu hotplug this should be a non issue
|
||||||
* because if the affinity update happens right before all
|
* because if the affinity update happens right before all
|
||||||
* cpus rendevouz in stop machine, there is no way that the
|
* cpus rendezvous in stop machine, there is no way that the
|
||||||
* interrupt can be blocked on the target cpu because all cpus
|
* interrupt can be blocked on the target cpu because all cpus
|
||||||
* loops first with interrupts enabled in stop machine, so the
|
* loops first with interrupts enabled in stop machine, so the
|
||||||
* old vector is not yet cleaned up when the interrupt fires.
|
* old vector is not yet cleaned up when the interrupt fires.
|
||||||
@@ -1054,7 +1054,7 @@ void irq_force_complete_move(struct irq_desc *desc)
|
|||||||
* of the interrupt on the apic/system bus would be delayed
|
* of the interrupt on the apic/system bus would be delayed
|
||||||
* beyond the point where the target cpu disables interrupts
|
* beyond the point where the target cpu disables interrupts
|
||||||
* in stop machine. I doubt that it can happen, but at least
|
* in stop machine. I doubt that it can happen, but at least
|
||||||
* there is a theroretical chance. Virtualization might be
|
* there is a theoretical chance. Virtualization might be
|
||||||
* able to expose this, but AFAICT the IOAPIC emulation is not
|
* able to expose this, but AFAICT the IOAPIC emulation is not
|
||||||
* as stupid as the real hardware.
|
* as stupid as the real hardware.
|
||||||
*
|
*
|
||||||
|
|||||||
@@ -94,7 +94,7 @@
|
|||||||
* Remove APM dependencies in arch/i386/kernel/process.c
|
* Remove APM dependencies in arch/i386/kernel/process.c
|
||||||
* Remove APM dependencies in drivers/char/sysrq.c
|
* Remove APM dependencies in drivers/char/sysrq.c
|
||||||
* Reset time across standby.
|
* Reset time across standby.
|
||||||
* Allow more inititialisation on SMP.
|
* Allow more initialisation on SMP.
|
||||||
* Remove CONFIG_APM_POWER_OFF and make it boot time
|
* Remove CONFIG_APM_POWER_OFF and make it boot time
|
||||||
* configurable (default on).
|
* configurable (default on).
|
||||||
* Make debug only a boot time parameter (remove APM_DEBUG).
|
* Make debug only a boot time parameter (remove APM_DEBUG).
|
||||||
@@ -766,7 +766,7 @@ static int apm_driver_version(u_short *val)
|
|||||||
* not cleared until it is acknowledged.
|
* not cleared until it is acknowledged.
|
||||||
*
|
*
|
||||||
* Additional information is returned in the info pointer, providing
|
* Additional information is returned in the info pointer, providing
|
||||||
* that APM 1.2 is in use. If no messges are pending the value 0x80
|
* that APM 1.2 is in use. If no messages are pending the value 0x80
|
||||||
* is returned (No power management events pending).
|
* is returned (No power management events pending).
|
||||||
*/
|
*/
|
||||||
static int apm_get_event(apm_event_t *event, apm_eventinfo_t *info)
|
static int apm_get_event(apm_event_t *event, apm_eventinfo_t *info)
|
||||||
@@ -1025,7 +1025,7 @@ static int apm_enable_power_management(int enable)
|
|||||||
* status which gives the rough battery status, and current power
|
* status which gives the rough battery status, and current power
|
||||||
* source. The bat value returned give an estimate as a percentage
|
* source. The bat value returned give an estimate as a percentage
|
||||||
* of life and a status value for the battery. The estimated life
|
* of life and a status value for the battery. The estimated life
|
||||||
* if reported is a lifetime in secodnds/minutes at current powwer
|
* if reported is a lifetime in seconds/minutes at current power
|
||||||
* consumption.
|
* consumption.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
|||||||
@@ -877,7 +877,7 @@ void init_intel_cacheinfo(struct cpuinfo_x86 *c)
|
|||||||
static int __cache_amd_cpumap_setup(unsigned int cpu, int index,
|
static int __cache_amd_cpumap_setup(unsigned int cpu, int index,
|
||||||
struct _cpuid4_info_regs *base)
|
struct _cpuid4_info_regs *base)
|
||||||
{
|
{
|
||||||
struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
|
struct cpu_cacheinfo *this_cpu_ci;
|
||||||
struct cacheinfo *this_leaf;
|
struct cacheinfo *this_leaf;
|
||||||
int i, sibling;
|
int i, sibling;
|
||||||
|
|
||||||
|
|||||||
@@ -482,7 +482,7 @@ static __always_inline void setup_pku(struct cpuinfo_x86 *c)
|
|||||||
if (pk)
|
if (pk)
|
||||||
pk->pkru = init_pkru_value;
|
pk->pkru = init_pkru_value;
|
||||||
/*
|
/*
|
||||||
* Seting X86_CR4_PKE will cause the X86_FEATURE_OSPKE
|
* Setting X86_CR4_PKE will cause the X86_FEATURE_OSPKE
|
||||||
* cpuid bit to be set. We need to ensure that we
|
* cpuid bit to be set. We need to ensure that we
|
||||||
* update that bit in this CPU's "cpu_info".
|
* update that bit in this CPU's "cpu_info".
|
||||||
*/
|
*/
|
||||||
@@ -1404,7 +1404,7 @@ static void detect_null_seg_behavior(struct cpuinfo_x86 *c)
|
|||||||
* where GS is unused by the prev and next threads.
|
* where GS is unused by the prev and next threads.
|
||||||
*
|
*
|
||||||
* Since neither vendor documents this anywhere that I can see,
|
* Since neither vendor documents this anywhere that I can see,
|
||||||
* detect it directly instead of hardcoding the choice by
|
* detect it directly instead of hard-coding the choice by
|
||||||
* vendor.
|
* vendor.
|
||||||
*
|
*
|
||||||
* I've designated AMD's behavior as the "bug" because it's
|
* I've designated AMD's behavior as the "bug" because it's
|
||||||
@@ -1748,6 +1748,8 @@ DEFINE_PER_CPU(bool, hardirq_stack_inuse);
|
|||||||
DEFINE_PER_CPU(int, __preempt_count) = INIT_PREEMPT_COUNT;
|
DEFINE_PER_CPU(int, __preempt_count) = INIT_PREEMPT_COUNT;
|
||||||
EXPORT_PER_CPU_SYMBOL(__preempt_count);
|
EXPORT_PER_CPU_SYMBOL(__preempt_count);
|
||||||
|
|
||||||
|
DEFINE_PER_CPU(unsigned long, cpu_current_top_of_stack) = TOP_OF_INIT_STACK;
|
||||||
|
|
||||||
/* May not be marked __init: used by software suspend */
|
/* May not be marked __init: used by software suspend */
|
||||||
void syscall_init(void)
|
void syscall_init(void)
|
||||||
{
|
{
|
||||||
|
|||||||
@@ -291,7 +291,7 @@ static void init_cyrix(struct cpuinfo_x86 *c)
|
|||||||
mark_tsc_unstable("cyrix 5510/5520 detected");
|
mark_tsc_unstable("cyrix 5510/5520 detected");
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
c->x86_cache_size = 16; /* Yep 16K integrated cache thats it */
|
c->x86_cache_size = 16; /* Yep 16K integrated cache that's it */
|
||||||
|
|
||||||
/* GXm supports extended cpuid levels 'ala' AMD */
|
/* GXm supports extended cpuid levels 'ala' AMD */
|
||||||
if (c->cpuid_level == 2) {
|
if (c->cpuid_level == 2) {
|
||||||
|
|||||||
@@ -301,7 +301,7 @@ static void early_init_intel(struct cpuinfo_x86 *c)
|
|||||||
* The operating system must reload CR3 to cause the TLB to be flushed"
|
* The operating system must reload CR3 to cause the TLB to be flushed"
|
||||||
*
|
*
|
||||||
* As a result, boot_cpu_has(X86_FEATURE_PGE) in arch/x86/include/asm/tlbflush.h
|
* As a result, boot_cpu_has(X86_FEATURE_PGE) in arch/x86/include/asm/tlbflush.h
|
||||||
* should be false so that __flush_tlb_all() causes CR3 insted of CR4.PGE
|
* should be false so that __flush_tlb_all() causes CR3 instead of CR4.PGE
|
||||||
* to be modified.
|
* to be modified.
|
||||||
*/
|
*/
|
||||||
if (c->x86 == 5 && c->x86_model == 9) {
|
if (c->x86 == 5 && c->x86_model == 9) {
|
||||||
|
|||||||
@@ -529,7 +529,7 @@ static void mce_irq_work_cb(struct irq_work *entry)
|
|||||||
* Check if the address reported by the CPU is in a format we can parse.
|
* Check if the address reported by the CPU is in a format we can parse.
|
||||||
* It would be possible to add code for most other cases, but all would
|
* It would be possible to add code for most other cases, but all would
|
||||||
* be somewhat complicated (e.g. segment offset would require an instruction
|
* be somewhat complicated (e.g. segment offset would require an instruction
|
||||||
* parser). So only support physical addresses up to page granuality for now.
|
* parser). So only support physical addresses up to page granularity for now.
|
||||||
*/
|
*/
|
||||||
int mce_usable_address(struct mce *m)
|
int mce_usable_address(struct mce *m)
|
||||||
{
|
{
|
||||||
|
|||||||
@@ -142,7 +142,7 @@ static struct severity {
|
|||||||
MASK(MCI_STATUS_OVER|MCI_UC_SAR, MCI_STATUS_UC|MCI_STATUS_AR)
|
MASK(MCI_STATUS_OVER|MCI_UC_SAR, MCI_STATUS_UC|MCI_STATUS_AR)
|
||||||
),
|
),
|
||||||
MCESEV(
|
MCESEV(
|
||||||
KEEP, "Non signalled machine check",
|
KEEP, "Non signaled machine check",
|
||||||
SER, BITCLR(MCI_STATUS_S)
|
SER, BITCLR(MCI_STATUS_S)
|
||||||
),
|
),
|
||||||
|
|
||||||
|
|||||||
@@ -197,7 +197,7 @@ static unsigned char hv_get_nmi_reason(void)
|
|||||||
#ifdef CONFIG_X86_LOCAL_APIC
|
#ifdef CONFIG_X86_LOCAL_APIC
|
||||||
/*
|
/*
|
||||||
* Prior to WS2016 Debug-VM sends NMIs to all CPUs which makes
|
* Prior to WS2016 Debug-VM sends NMIs to all CPUs which makes
|
||||||
* it dificult to process CHANNELMSG_UNLOAD in case of crash. Handle
|
* it difficult to process CHANNELMSG_UNLOAD in case of crash. Handle
|
||||||
* unknown NMI on the first CPU which gets it.
|
* unknown NMI on the first CPU which gets it.
|
||||||
*/
|
*/
|
||||||
static int hv_nmi_unknown(unsigned int val, struct pt_regs *regs)
|
static int hv_nmi_unknown(unsigned int val, struct pt_regs *regs)
|
||||||
@@ -428,7 +428,7 @@ static void __init ms_hyperv_init_platform(void)
|
|||||||
|
|
||||||
/*
|
/*
|
||||||
* Hyper-V doesn't provide irq remapping for IO-APIC. To enable x2apic,
|
* Hyper-V doesn't provide irq remapping for IO-APIC. To enable x2apic,
|
||||||
* set x2apic destination mode to physcial mode when x2apic is available
|
* set x2apic destination mode to physical mode when x2apic is available
|
||||||
* and Hyper-V IOMMU driver makes sure cpus assigned with IO-APIC irqs
|
* and Hyper-V IOMMU driver makes sure cpus assigned with IO-APIC irqs
|
||||||
* have 8-bit APIC id.
|
* have 8-bit APIC id.
|
||||||
*/
|
*/
|
||||||
|
|||||||
@@ -434,7 +434,7 @@ set_var_mtrr_range(struct var_mtrr_state *state, unsigned long base_pfn,
|
|||||||
state->range_sizek = sizek - second_sizek;
|
state->range_sizek = sizek - second_sizek;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Mininum size of mtrr block that can take hole: */
|
/* Minimum size of mtrr block that can take hole: */
|
||||||
static u64 mtrr_chunk_size __initdata = (256ULL<<20);
|
static u64 mtrr_chunk_size __initdata = (256ULL<<20);
|
||||||
|
|
||||||
static int __init parse_mtrr_chunk_size_opt(char *p)
|
static int __init parse_mtrr_chunk_size_opt(char *p)
|
||||||
|
|||||||
@@ -799,7 +799,7 @@ void mtrr_ap_init(void)
|
|||||||
*
|
*
|
||||||
* This routine is called in two cases:
|
* This routine is called in two cases:
|
||||||
*
|
*
|
||||||
* 1. very earily time of software resume, when there absolutely
|
* 1. very early time of software resume, when there absolutely
|
||||||
* isn't mtrr entry changes;
|
* isn't mtrr entry changes;
|
||||||
*
|
*
|
||||||
* 2. cpu hotadd time. We let mtrr_add/del_page hold cpuhotplug
|
* 2. cpu hotadd time. We let mtrr_add/del_page hold cpuhotplug
|
||||||
|
|||||||
@@ -192,7 +192,7 @@ static unsigned int cbm_idx(struct rdt_resource *r, unsigned int closid)
|
|||||||
* Intel(R) Xeon(R) CPU E5-2608L v3 @ 2.00GHz
|
* Intel(R) Xeon(R) CPU E5-2608L v3 @ 2.00GHz
|
||||||
* Intel(R) Xeon(R) CPU E5-2658A v3 @ 2.20GHz
|
* Intel(R) Xeon(R) CPU E5-2658A v3 @ 2.20GHz
|
||||||
*
|
*
|
||||||
* Probe by trying to write the first of the L3 cach mask registers
|
* Probe by trying to write the first of the L3 cache mask registers
|
||||||
* and checking that the bits stick. Max CLOSids is always 4 and max cbm length
|
* and checking that the bits stick. Max CLOSids is always 4 and max cbm length
|
||||||
* is always 20 on hsw server parts. The minimum cache bitmask length
|
* is always 20 on hsw server parts. The minimum cache bitmask length
|
||||||
* allowed for HSW server is always 2 bits. Hardcode all of them.
|
* allowed for HSW server is always 2 bits. Hardcode all of them.
|
||||||
|
|||||||
@@ -387,7 +387,7 @@ void mon_event_count(void *info)
|
|||||||
* adjust the bandwidth percentage values via the IA32_MBA_THRTL_MSRs so
|
* adjust the bandwidth percentage values via the IA32_MBA_THRTL_MSRs so
|
||||||
* that:
|
* that:
|
||||||
*
|
*
|
||||||
* current bandwdith(cur_bw) < user specified bandwidth(user_bw)
|
* current bandwidth(cur_bw) < user specified bandwidth(user_bw)
|
||||||
*
|
*
|
||||||
* This uses the MBM counters to measure the bandwidth and MBA throttle
|
* This uses the MBM counters to measure the bandwidth and MBA throttle
|
||||||
* MSRs to control the bandwidth for a particular rdtgrp. It builds on the
|
* MSRs to control the bandwidth for a particular rdtgrp. It builds on the
|
||||||
@@ -397,7 +397,7 @@ void mon_event_count(void *info)
|
|||||||
* timer. Having 1s interval makes the calculation of bandwidth simpler.
|
* timer. Having 1s interval makes the calculation of bandwidth simpler.
|
||||||
*
|
*
|
||||||
* Although MBA's goal is to restrict the bandwidth to a maximum, there may
|
* Although MBA's goal is to restrict the bandwidth to a maximum, there may
|
||||||
* be a need to increase the bandwidth to avoid uncecessarily restricting
|
* be a need to increase the bandwidth to avoid unnecessarily restricting
|
||||||
* the L2 <-> L3 traffic.
|
* the L2 <-> L3 traffic.
|
||||||
*
|
*
|
||||||
* Since MBA controls the L2 external bandwidth where as MBM measures the
|
* Since MBA controls the L2 external bandwidth where as MBM measures the
|
||||||
@@ -480,7 +480,7 @@ static void update_mba_bw(struct rdtgroup *rgrp, struct rdt_domain *dom_mbm)
|
|||||||
|
|
||||||
/*
|
/*
|
||||||
* Delta values are updated dynamically package wise for each
|
* Delta values are updated dynamically package wise for each
|
||||||
* rdtgrp everytime the throttle MSR changes value.
|
* rdtgrp every time the throttle MSR changes value.
|
||||||
*
|
*
|
||||||
* This is because (1)the increase in bandwidth is not perfectly
|
* This is because (1)the increase in bandwidth is not perfectly
|
||||||
* linear and only "approximately" linear even when the hardware
|
* linear and only "approximately" linear even when the hardware
|
||||||
|
|||||||
@@ -1307,7 +1307,7 @@ int rdtgroup_pseudo_lock_create(struct rdtgroup *rdtgrp)
|
|||||||
* If the thread does not get on the CPU for whatever
|
* If the thread does not get on the CPU for whatever
|
||||||
* reason and the process which sets up the region is
|
* reason and the process which sets up the region is
|
||||||
* interrupted then this will leave the thread in runnable
|
* interrupted then this will leave the thread in runnable
|
||||||
* state and once it gets on the CPU it will derefence
|
* state and once it gets on the CPU it will dereference
|
||||||
* the cleared, but not freed, plr struct resulting in an
|
* the cleared, but not freed, plr struct resulting in an
|
||||||
* empty pseudo-locking loop.
|
* empty pseudo-locking loop.
|
||||||
*/
|
*/
|
||||||
@@ -1391,7 +1391,7 @@ out:
|
|||||||
* group is removed from user space via a "rmdir" from userspace or the
|
* group is removed from user space via a "rmdir" from userspace or the
|
||||||
* unmount of the resctrl filesystem. On removal the resource group does
|
* unmount of the resctrl filesystem. On removal the resource group does
|
||||||
* not go back to pseudo-locksetup mode before it is removed, instead it is
|
* not go back to pseudo-locksetup mode before it is removed, instead it is
|
||||||
* removed directly. There is thus assymmetry with the creation where the
|
* removed directly. There is thus asymmetry with the creation where the
|
||||||
* &struct pseudo_lock_region is removed here while it was not created in
|
* &struct pseudo_lock_region is removed here while it was not created in
|
||||||
* rdtgroup_pseudo_lock_create().
|
* rdtgroup_pseudo_lock_create().
|
||||||
*
|
*
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
// SPDX-License-Identifier: GPL-2.0-only
|
// SPDX-License-Identifier: GPL-2.0-only
|
||||||
/*
|
/*
|
||||||
* User interface for Resource Alloction in Resource Director Technology(RDT)
|
* User interface for Resource Allocation in Resource Director Technology(RDT)
|
||||||
*
|
*
|
||||||
* Copyright (C) 2016 Intel Corporation
|
* Copyright (C) 2016 Intel Corporation
|
||||||
*
|
*
|
||||||
@@ -294,7 +294,7 @@ static int rdtgroup_cpus_show(struct kernfs_open_file *of,
|
|||||||
/*
|
/*
|
||||||
* This is safe against resctrl_sched_in() called from __switch_to()
|
* This is safe against resctrl_sched_in() called from __switch_to()
|
||||||
* because __switch_to() is executed with interrupts disabled. A local call
|
* because __switch_to() is executed with interrupts disabled. A local call
|
||||||
* from update_closid_rmid() is proteced against __switch_to() because
|
* from update_closid_rmid() is protected against __switch_to() because
|
||||||
* preemption is disabled.
|
* preemption is disabled.
|
||||||
*/
|
*/
|
||||||
static void update_cpu_closid_rmid(void *info)
|
static void update_cpu_closid_rmid(void *info)
|
||||||
@@ -2555,7 +2555,7 @@ static int mkdir_mondata_subdir_alldom(struct kernfs_node *parent_kn,
|
|||||||
/*
|
/*
|
||||||
* This creates a directory mon_data which contains the monitored data.
|
* This creates a directory mon_data which contains the monitored data.
|
||||||
*
|
*
|
||||||
* mon_data has one directory for each domain whic are named
|
* mon_data has one directory for each domain which are named
|
||||||
* in the format mon_<domain_name>_<domain_id>. For ex: A mon_data
|
* in the format mon_<domain_name>_<domain_id>. For ex: A mon_data
|
||||||
* with L3 domain looks as below:
|
* with L3 domain looks as below:
|
||||||
* ./mon_data:
|
* ./mon_data:
|
||||||
|
|||||||
@@ -214,10 +214,10 @@ static const cpumask_t *sgx_encl_ewb_cpumask(struct sgx_encl *encl)
|
|||||||
|
|
||||||
/*
|
/*
|
||||||
* Swap page to the regular memory transformed to the blocked state by using
|
* Swap page to the regular memory transformed to the blocked state by using
|
||||||
* EBLOCK, which means that it can no loger be referenced (no new TLB entries).
|
* EBLOCK, which means that it can no longer be referenced (no new TLB entries).
|
||||||
*
|
*
|
||||||
* The first trial just tries to write the page assuming that some other thread
|
* The first trial just tries to write the page assuming that some other thread
|
||||||
* has reset the count for threads inside the enlave by using ETRACK, and
|
* has reset the count for threads inside the enclave by using ETRACK, and
|
||||||
* previous thread count has been zeroed out. The second trial calls ETRACK
|
* previous thread count has been zeroed out. The second trial calls ETRACK
|
||||||
* before EWB. If that fails we kick all the HW threads out, and then do EWB,
|
* before EWB. If that fails we kick all the HW threads out, and then do EWB,
|
||||||
* which should be guaranteed the succeed.
|
* which should be guaranteed the succeed.
|
||||||
|
|||||||
@@ -30,7 +30,7 @@ EXPORT_SYMBOL(__max_die_per_package);
|
|||||||
|
|
||||||
#ifdef CONFIG_SMP
|
#ifdef CONFIG_SMP
|
||||||
/*
|
/*
|
||||||
* Check if given CPUID extended toplogy "leaf" is implemented
|
* Check if given CPUID extended topology "leaf" is implemented
|
||||||
*/
|
*/
|
||||||
static int check_extended_topology_leaf(int leaf)
|
static int check_extended_topology_leaf(int leaf)
|
||||||
{
|
{
|
||||||
@@ -44,7 +44,7 @@ static int check_extended_topology_leaf(int leaf)
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
/*
|
/*
|
||||||
* Return best CPUID Extended Toplogy Leaf supported
|
* Return best CPUID Extended Topology Leaf supported
|
||||||
*/
|
*/
|
||||||
static int detect_extended_topology_leaf(struct cpuinfo_x86 *c)
|
static int detect_extended_topology_leaf(struct cpuinfo_x86 *c)
|
||||||
{
|
{
|
||||||
|
|||||||
@@ -793,7 +793,7 @@ core_initcall(e820__register_nvs_regions);
|
|||||||
#endif
|
#endif
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Allocate the requested number of bytes with the requsted alignment
|
* Allocate the requested number of bytes with the requested alignment
|
||||||
* and return (the physical address) to the caller. Also register this
|
* and return (the physical address) to the caller. Also register this
|
||||||
* range in the 'kexec' E820 table as a reserved range.
|
* range in the 'kexec' E820 table as a reserved range.
|
||||||
*
|
*
|
||||||
|
|||||||
@@ -253,7 +253,7 @@ static bool xfeature_enabled(enum xfeature xfeature)
|
|||||||
static void __init setup_xstate_features(void)
|
static void __init setup_xstate_features(void)
|
||||||
{
|
{
|
||||||
u32 eax, ebx, ecx, edx, i;
|
u32 eax, ebx, ecx, edx, i;
|
||||||
/* start at the beginnning of the "extended state" */
|
/* start at the beginning of the "extended state" */
|
||||||
unsigned int last_good_offset = offsetof(struct xregs_state,
|
unsigned int last_good_offset = offsetof(struct xregs_state,
|
||||||
extended_state_area);
|
extended_state_area);
|
||||||
/*
|
/*
|
||||||
|
|||||||
@@ -104,7 +104,7 @@ static unsigned int __head *fixup_int(void *ptr, unsigned long physaddr)
|
|||||||
static bool __head check_la57_support(unsigned long physaddr)
|
static bool __head check_la57_support(unsigned long physaddr)
|
||||||
{
|
{
|
||||||
/*
|
/*
|
||||||
* 5-level paging is detected and enabled at kernel decomression
|
* 5-level paging is detected and enabled at kernel decompression
|
||||||
* stage. Only check if it has been enabled there.
|
* stage. Only check if it has been enabled there.
|
||||||
*/
|
*/
|
||||||
if (!(native_read_cr4() & X86_CR4_LA57))
|
if (!(native_read_cr4() & X86_CR4_LA57))
|
||||||
|
|||||||
@@ -245,7 +245,7 @@ static const __initconst struct idt_data ist_idts[] = {
|
|||||||
* after that.
|
* after that.
|
||||||
*
|
*
|
||||||
* Note, that X86_64 cannot install the real #PF handler in
|
* Note, that X86_64 cannot install the real #PF handler in
|
||||||
* idt_setup_early_traps() because the memory intialization needs the #PF
|
* idt_setup_early_traps() because the memory initialization needs the #PF
|
||||||
* handler from the early_idt_handler_array to initialize the early page
|
* handler from the early_idt_handler_array to initialize the early page
|
||||||
* tables.
|
* tables.
|
||||||
*/
|
*/
|
||||||
|
|||||||
@@ -338,7 +338,7 @@ void fixup_irqs(void)
|
|||||||
irq_migrate_all_off_this_cpu();
|
irq_migrate_all_off_this_cpu();
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* We can remove mdelay() and then send spuriuous interrupts to
|
* We can remove mdelay() and then send spurious interrupts to
|
||||||
* new cpu targets for all the irqs that were handled previously by
|
* new cpu targets for all the irqs that were handled previously by
|
||||||
* this cpu. While it works, I have seen spurious interrupt messages
|
* this cpu. While it works, I have seen spurious interrupt messages
|
||||||
* (nothing wrong but still...).
|
* (nothing wrong but still...).
|
||||||
|
|||||||
@@ -17,7 +17,7 @@
|
|||||||
* Updated by: Tom Rini <trini@kernel.crashing.org>
|
* Updated by: Tom Rini <trini@kernel.crashing.org>
|
||||||
* Updated by: Jason Wessel <jason.wessel@windriver.com>
|
* Updated by: Jason Wessel <jason.wessel@windriver.com>
|
||||||
* Modified for 386 by Jim Kingdon, Cygnus Support.
|
* Modified for 386 by Jim Kingdon, Cygnus Support.
|
||||||
* Origianl kgdb, compatibility with 2.1.xx kernel by
|
* Original kgdb, compatibility with 2.1.xx kernel by
|
||||||
* David Grothe <dave@gcom.com>
|
* David Grothe <dave@gcom.com>
|
||||||
* Integrated into 2.2.5 kernel by Tigran Aivazian <tigran@sco.com>
|
* Integrated into 2.2.5 kernel by Tigran Aivazian <tigran@sco.com>
|
||||||
* X86_64 changes from Andi Kleen's patch merged by Jim Houston
|
* X86_64 changes from Andi Kleen's patch merged by Jim Houston
|
||||||
@@ -642,7 +642,7 @@ void kgdb_arch_late(void)
|
|||||||
struct perf_event **pevent;
|
struct perf_event **pevent;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Pre-allocate the hw breakpoint structions in the non-atomic
|
* Pre-allocate the hw breakpoint instructions in the non-atomic
|
||||||
* portion of kgdb because this operation requires mutexs to
|
* portion of kgdb because this operation requires mutexs to
|
||||||
* complete.
|
* complete.
|
||||||
*/
|
*/
|
||||||
|
|||||||
@@ -12,7 +12,7 @@
|
|||||||
|
|
||||||
#include "common.h"
|
#include "common.h"
|
||||||
|
|
||||||
/* Ftrace callback handler for kprobes -- called under preepmt disabled */
|
/* Ftrace callback handler for kprobes -- called under preempt disabled */
|
||||||
void kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip,
|
void kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip,
|
||||||
struct ftrace_ops *ops, struct ftrace_regs *fregs)
|
struct ftrace_ops *ops, struct ftrace_regs *fregs)
|
||||||
{
|
{
|
||||||
|
|||||||
@@ -260,7 +260,7 @@ static void set_idt(void *newidt, u16 limit)
|
|||||||
{
|
{
|
||||||
struct desc_ptr curidt;
|
struct desc_ptr curidt;
|
||||||
|
|
||||||
/* x86-64 supports unaliged loads & stores */
|
/* x86-64 supports unaligned loads & stores */
|
||||||
curidt.size = limit;
|
curidt.size = limit;
|
||||||
curidt.address = (unsigned long)newidt;
|
curidt.address = (unsigned long)newidt;
|
||||||
|
|
||||||
|
|||||||
@@ -63,14 +63,9 @@ __visible DEFINE_PER_CPU_PAGE_ALIGNED(struct tss_struct, cpu_tss_rw) = {
|
|||||||
*/
|
*/
|
||||||
.sp0 = (1UL << (BITS_PER_LONG-1)) + 1,
|
.sp0 = (1UL << (BITS_PER_LONG-1)) + 1,
|
||||||
|
|
||||||
/*
|
#ifdef CONFIG_X86_32
|
||||||
* .sp1 is cpu_current_top_of_stack. The init task never
|
|
||||||
* runs user code, but cpu_current_top_of_stack should still
|
|
||||||
* be well defined before the first context switch.
|
|
||||||
*/
|
|
||||||
.sp1 = TOP_OF_INIT_STACK,
|
.sp1 = TOP_OF_INIT_STACK,
|
||||||
|
|
||||||
#ifdef CONFIG_X86_32
|
|
||||||
.ss0 = __KERNEL_DS,
|
.ss0 = __KERNEL_DS,
|
||||||
.ss1 = __KERNEL_CS,
|
.ss1 = __KERNEL_CS,
|
||||||
#endif
|
#endif
|
||||||
@@ -451,7 +446,7 @@ void speculative_store_bypass_ht_init(void)
|
|||||||
* First HT sibling to come up on the core. Link shared state of
|
* First HT sibling to come up on the core. Link shared state of
|
||||||
* the first HT sibling to itself. The siblings on the same core
|
* the first HT sibling to itself. The siblings on the same core
|
||||||
* which come up later will see the shared state pointer and link
|
* which come up later will see the shared state pointer and link
|
||||||
* themself to the state of this CPU.
|
* themselves to the state of this CPU.
|
||||||
*/
|
*/
|
||||||
st->shared_state = st;
|
st->shared_state = st;
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -89,7 +89,7 @@ u64 pvclock_clocksource_read(struct pvclock_vcpu_time_info *src)
|
|||||||
/*
|
/*
|
||||||
* Assumption here is that last_value, a global accumulator, always goes
|
* Assumption here is that last_value, a global accumulator, always goes
|
||||||
* forward. If we are less than that, we should not be much smaller.
|
* forward. If we are less than that, we should not be much smaller.
|
||||||
* We assume there is an error marging we're inside, and then the correction
|
* We assume there is an error margin we're inside, and then the correction
|
||||||
* does not sacrifice accuracy.
|
* does not sacrifice accuracy.
|
||||||
*
|
*
|
||||||
* For reads: global may have changed between test and return,
|
* For reads: global may have changed between test and return,
|
||||||
|
|||||||
@@ -107,7 +107,7 @@ SYM_CODE_START_LOCAL_NOALIGN(identity_mapped)
|
|||||||
* - Write protect disabled
|
* - Write protect disabled
|
||||||
* - No task switch
|
* - No task switch
|
||||||
* - Don't do FP software emulation.
|
* - Don't do FP software emulation.
|
||||||
* - Proctected mode enabled
|
* - Protected mode enabled
|
||||||
*/
|
*/
|
||||||
movl %cr0, %eax
|
movl %cr0, %eax
|
||||||
andl $~(X86_CR0_PG | X86_CR0_AM | X86_CR0_WP | X86_CR0_TS | X86_CR0_EM), %eax
|
andl $~(X86_CR0_PG | X86_CR0_AM | X86_CR0_WP | X86_CR0_TS | X86_CR0_EM), %eax
|
||||||
|
|||||||
@@ -121,7 +121,7 @@ SYM_CODE_START_LOCAL_NOALIGN(identity_mapped)
|
|||||||
* - Write protect disabled
|
* - Write protect disabled
|
||||||
* - No task switch
|
* - No task switch
|
||||||
* - Don't do FP software emulation.
|
* - Don't do FP software emulation.
|
||||||
* - Proctected mode enabled
|
* - Protected mode enabled
|
||||||
*/
|
*/
|
||||||
movq %cr0, %rax
|
movq %cr0, %rax
|
||||||
andq $~(X86_CR0_AM | X86_CR0_WP | X86_CR0_TS | X86_CR0_EM), %rax
|
andq $~(X86_CR0_AM | X86_CR0_WP | X86_CR0_TS | X86_CR0_EM), %rax
|
||||||
|
|||||||
@@ -65,7 +65,7 @@ RESERVE_BRK(dmi_alloc, 65536);
|
|||||||
|
|
||||||
/*
|
/*
|
||||||
* Range of the BSS area. The size of the BSS area is determined
|
* Range of the BSS area. The size of the BSS area is determined
|
||||||
* at link time, with RESERVE_BRK*() facility reserving additional
|
* at link time, with RESERVE_BRK() facility reserving additional
|
||||||
* chunks.
|
* chunks.
|
||||||
*/
|
*/
|
||||||
unsigned long _brk_start = (unsigned long)__brk_base;
|
unsigned long _brk_start = (unsigned long)__brk_base;
|
||||||
@@ -1043,8 +1043,8 @@ void __init setup_arch(char **cmdline_p)
|
|||||||
|
|
||||||
/*
|
/*
|
||||||
* Need to conclude brk, before e820__memblock_setup()
|
* Need to conclude brk, before e820__memblock_setup()
|
||||||
* it could use memblock_find_in_range, could overlap with
|
* it could use memblock_find_in_range, could overlap with
|
||||||
* brk area.
|
* brk area.
|
||||||
*/
|
*/
|
||||||
reserve_brk();
|
reserve_brk();
|
||||||
|
|
||||||
|
|||||||
@@ -492,7 +492,7 @@ static int __setup_rt_frame(int sig, struct ksignal *ksig,
|
|||||||
* SS descriptor, but we do need SS to be valid. It's possible
|
* SS descriptor, but we do need SS to be valid. It's possible
|
||||||
* that the old SS is entirely bogus -- this can happen if the
|
* that the old SS is entirely bogus -- this can happen if the
|
||||||
* signal we're trying to deliver is #GP or #SS caused by a bad
|
* signal we're trying to deliver is #GP or #SS caused by a bad
|
||||||
* SS value. We also have a compatbility issue here: DOSEMU
|
* SS value. We also have a compatibility issue here: DOSEMU
|
||||||
* relies on the contents of the SS register indicating the
|
* relies on the contents of the SS register indicating the
|
||||||
* SS value at the time of the signal, even though that code in
|
* SS value at the time of the signal, even though that code in
|
||||||
* DOSEMU predates sigreturn's ability to restore SS. (DOSEMU
|
* DOSEMU predates sigreturn's ability to restore SS. (DOSEMU
|
||||||
|
|||||||
@@ -67,7 +67,7 @@
|
|||||||
* 5AP. symmetric IO mode (normal Linux operation) not affected.
|
* 5AP. symmetric IO mode (normal Linux operation) not affected.
|
||||||
* 'noapic' mode has vector 0xf filled out properly.
|
* 'noapic' mode has vector 0xf filled out properly.
|
||||||
* 6AP. 'noapic' mode might be affected - fixed in later steppings
|
* 6AP. 'noapic' mode might be affected - fixed in later steppings
|
||||||
* 7AP. We do not assume writes to the LVT deassering IRQs
|
* 7AP. We do not assume writes to the LVT deasserting IRQs
|
||||||
* 8AP. We do not enable low power mode (deep sleep) during MP bootup
|
* 8AP. We do not enable low power mode (deep sleep) during MP bootup
|
||||||
* 9AP. We do not use mixed mode
|
* 9AP. We do not use mixed mode
|
||||||
*
|
*
|
||||||
@@ -204,7 +204,7 @@ static void native_stop_other_cpus(int wait)
|
|||||||
}
|
}
|
||||||
/*
|
/*
|
||||||
* Don't wait longer than 10 ms if the caller didn't
|
* Don't wait longer than 10 ms if the caller didn't
|
||||||
* reqeust it. If wait is true, the machine hangs here if
|
* request it. If wait is true, the machine hangs here if
|
||||||
* one or more CPUs do not reach shutdown state.
|
* one or more CPUs do not reach shutdown state.
|
||||||
*/
|
*/
|
||||||
timeout = USEC_PER_MSEC * 10;
|
timeout = USEC_PER_MSEC * 10;
|
||||||
|
|||||||
@@ -1407,7 +1407,7 @@ void __init calculate_max_logical_packages(void)
|
|||||||
int ncpus;
|
int ncpus;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Today neither Intel nor AMD support heterogenous systems so
|
* Today neither Intel nor AMD support heterogeneous systems so
|
||||||
* extrapolate the boot cpu's data to all packages.
|
* extrapolate the boot cpu's data to all packages.
|
||||||
*/
|
*/
|
||||||
ncpus = cpu_data(0).booted_cores * topology_max_smt_threads();
|
ncpus = cpu_data(0).booted_cores * topology_max_smt_threads();
|
||||||
|
|||||||
@@ -29,12 +29,6 @@ void arch_stack_walk(stack_trace_consume_fn consume_entry, void *cookie,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
|
||||||
* This function returns an error if it detects any unreliable features of the
|
|
||||||
* stack. Otherwise it guarantees that the stack trace is reliable.
|
|
||||||
*
|
|
||||||
* If the task is not 'current', the caller *must* ensure the task is inactive.
|
|
||||||
*/
|
|
||||||
int arch_stack_walk_reliable(stack_trace_consume_fn consume_entry,
|
int arch_stack_walk_reliable(stack_trace_consume_fn consume_entry,
|
||||||
void *cookie, struct task_struct *task)
|
void *cookie, struct task_struct *task)
|
||||||
{
|
{
|
||||||
|
|||||||
@@ -10,7 +10,7 @@
|
|||||||
* EFI Quirks
|
* EFI Quirks
|
||||||
* Several EFI systems do not correctly advertise their boot framebuffers.
|
* Several EFI systems do not correctly advertise their boot framebuffers.
|
||||||
* Hence, we use this static table of known broken machines and fix up the
|
* Hence, we use this static table of known broken machines and fix up the
|
||||||
* information so framebuffer drivers can load corectly.
|
* information so framebuffer drivers can load correctly.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
#include <linux/dmi.h>
|
#include <linux/dmi.h>
|
||||||
|
|||||||
@@ -113,7 +113,7 @@ int arch_register_cpu(int num)
|
|||||||
* Two known BSP/CPU0 dependencies: Resume from suspend/hibernate
|
* Two known BSP/CPU0 dependencies: Resume from suspend/hibernate
|
||||||
* depends on BSP. PIC interrupts depend on BSP.
|
* depends on BSP. PIC interrupts depend on BSP.
|
||||||
*
|
*
|
||||||
* If the BSP depencies are under control, one can tell kernel to
|
* If the BSP dependencies are under control, one can tell kernel to
|
||||||
* enable BSP hotplug. This basically adds a control file and
|
* enable BSP hotplug. This basically adds a control file and
|
||||||
* one can attempt to offline BSP.
|
* one can attempt to offline BSP.
|
||||||
*/
|
*/
|
||||||
|
|||||||
@@ -395,7 +395,7 @@ DEFINE_IDTENTRY_DF(exc_double_fault)
|
|||||||
/*
|
/*
|
||||||
* Adjust our frame so that we return straight to the #GP
|
* Adjust our frame so that we return straight to the #GP
|
||||||
* vector with the expected RSP value. This is safe because
|
* vector with the expected RSP value. This is safe because
|
||||||
* we won't enable interupts or schedule before we invoke
|
* we won't enable interrupts or schedule before we invoke
|
||||||
* general_protection, so nothing will clobber the stack
|
* general_protection, so nothing will clobber the stack
|
||||||
* frame we just set up.
|
* frame we just set up.
|
||||||
*
|
*
|
||||||
|
|||||||
@@ -740,7 +740,7 @@ static unsigned long pit_hpet_ptimer_calibrate_cpu(void)
|
|||||||
* 2) Reference counter. If available we use the HPET or the
|
* 2) Reference counter. If available we use the HPET or the
|
||||||
* PMTIMER as a reference to check the sanity of that value.
|
* PMTIMER as a reference to check the sanity of that value.
|
||||||
* We use separate TSC readouts and check inside of the
|
* We use separate TSC readouts and check inside of the
|
||||||
* reference read for any possible disturbance. We dicard
|
* reference read for any possible disturbance. We discard
|
||||||
* disturbed values here as well. We do that around the PIT
|
* disturbed values here as well. We do that around the PIT
|
||||||
* calibration delay loop as we have to wait for a certain
|
* calibration delay loop as we have to wait for a certain
|
||||||
* amount of time anyway.
|
* amount of time anyway.
|
||||||
@@ -1080,7 +1080,7 @@ static void tsc_resume(struct clocksource *cs)
|
|||||||
* very small window right after one CPU updated cycle_last under
|
* very small window right after one CPU updated cycle_last under
|
||||||
* xtime/vsyscall_gtod lock and the other CPU reads a TSC value which
|
* xtime/vsyscall_gtod lock and the other CPU reads a TSC value which
|
||||||
* is smaller than the cycle_last reference value due to a TSC which
|
* is smaller than the cycle_last reference value due to a TSC which
|
||||||
* is slighty behind. This delta is nowhere else observable, but in
|
* is slightly behind. This delta is nowhere else observable, but in
|
||||||
* that case it results in a forward time jump in the range of hours
|
* that case it results in a forward time jump in the range of hours
|
||||||
* due to the unsigned delta calculation of the time keeping core
|
* due to the unsigned delta calculation of the time keeping core
|
||||||
* code, which is necessary to support wrapping clocksources like pm
|
* code, which is necessary to support wrapping clocksources like pm
|
||||||
@@ -1265,7 +1265,7 @@ EXPORT_SYMBOL(convert_art_to_tsc);
|
|||||||
* corresponding clocksource
|
* corresponding clocksource
|
||||||
* @cycles: System counter value
|
* @cycles: System counter value
|
||||||
* @cs: Clocksource corresponding to system counter value. Used
|
* @cs: Clocksource corresponding to system counter value. Used
|
||||||
* by timekeeping code to verify comparibility of two cycle
|
* by timekeeping code to verify comparability of two cycle
|
||||||
* values.
|
* values.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
|||||||
@@ -472,7 +472,7 @@ retry:
|
|||||||
/*
|
/*
|
||||||
* Add the result to the previous adjustment value.
|
* Add the result to the previous adjustment value.
|
||||||
*
|
*
|
||||||
* The adjustement value is slightly off by the overhead of the
|
* The adjustment value is slightly off by the overhead of the
|
||||||
* sync mechanism (observed values are ~200 TSC cycles), but this
|
* sync mechanism (observed values are ~200 TSC cycles), but this
|
||||||
* really depends on CPU, node distance and frequency. So
|
* really depends on CPU, node distance and frequency. So
|
||||||
* compensating for this is hard to get right. Experiments show
|
* compensating for this is hard to get right. Experiments show
|
||||||
|
|||||||
@@ -272,7 +272,7 @@ static int emulate_umip_insn(struct insn *insn, int umip_inst,
|
|||||||
* by whether the operand is a register or a memory location.
|
* by whether the operand is a register or a memory location.
|
||||||
* If operand is a register, return as many bytes as the operand
|
* If operand is a register, return as many bytes as the operand
|
||||||
* size. If operand is memory, return only the two least
|
* size. If operand is memory, return only the two least
|
||||||
* siginificant bytes.
|
* significant bytes.
|
||||||
*/
|
*/
|
||||||
if (X86_MODRM_MOD(insn->modrm.value) == 3)
|
if (X86_MODRM_MOD(insn->modrm.value) == 3)
|
||||||
*data_size = insn->opnd_bytes;
|
*data_size = insn->opnd_bytes;
|
||||||
|
|||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user