Merge branch 'x86/asm' into x86/mm, to resolve conflicts
Conflicts: tools/testing/selftests/x86/Makefile Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
commit
38452af242
@ -45,7 +45,7 @@ is how we expect the compiler, application and kernel to work together.
|
||||
MPX-instrumented.
|
||||
3) The kernel detects that the CPU has MPX, allows the new prctl() to
|
||||
succeed, and notes the location of the bounds directory. Userspace is
|
||||
expected to keep the bounds directory at that locationWe note it
|
||||
expected to keep the bounds directory at that location. We note it
|
||||
instead of reading it each time because the 'xsave' operation needed
|
||||
to access the bounds directory register is an expensive operation.
|
||||
4) If the application needs to spill bounds out of the 4 registers, it
|
||||
@ -167,7 +167,7 @@ If a #BR is generated due to a bounds violation caused by MPX.
|
||||
We need to decode MPX instructions to get violation address and
|
||||
set this address into extended struct siginfo.
|
||||
|
||||
The _sigfault feild of struct siginfo is extended as follow:
|
||||
The _sigfault field of struct siginfo is extended as follow:
|
||||
|
||||
87 /* SIGILL, SIGFPE, SIGSEGV, SIGBUS */
|
||||
88 struct {
|
||||
@ -240,5 +240,5 @@ them at the same bounds table.
|
||||
This is allowed architecturally. See more information "Intel(R) Architecture
|
||||
Instruction Set Extensions Programming Reference" (9.3.4).
|
||||
|
||||
However, if users did this, the kernel might be fooled in to unmaping an
|
||||
However, if users did this, the kernel might be fooled in to unmapping an
|
||||
in-use bounds table since it does not recognize sharing.
|
||||
|
@ -5,7 +5,7 @@ memory, it has two choices:
|
||||
from areas other than the one we are trying to flush will be
|
||||
destroyed and must be refilled later, at some cost.
|
||||
2. Use the invlpg instruction to invalidate a single page at a
|
||||
time. This could potentialy cost many more instructions, but
|
||||
time. This could potentially cost many more instructions, but
|
||||
it is a much more precise operation, causing no collateral
|
||||
damage to other TLB entries.
|
||||
|
||||
@ -19,7 +19,7 @@ Which method to do depends on a few things:
|
||||
work.
|
||||
3. The size of the TLB. The larger the TLB, the more collateral
|
||||
damage we do with a full flush. So, the larger the TLB, the
|
||||
more attrative an individual flush looks. Data and
|
||||
more attractive an individual flush looks. Data and
|
||||
instructions have separate TLBs, as do different page sizes.
|
||||
4. The microarchitecture. The TLB has become a multi-level
|
||||
cache on modern CPUs, and the global flushes have become more
|
||||
|
@ -36,7 +36,7 @@ between all CPUs.
|
||||
|
||||
check_interval
|
||||
How often to poll for corrected machine check errors, in seconds
|
||||
(Note output is hexademical). Default 5 minutes. When the poller
|
||||
(Note output is hexadecimal). Default 5 minutes. When the poller
|
||||
finds MCEs it triggers an exponential speedup (poll more often) on
|
||||
the polling interval. When the poller stops finding MCEs, it
|
||||
triggers an exponential backoff (poll less often) on the polling
|
||||
|
@ -80,12 +80,14 @@
|
||||
#define APM_CPU_PART_POTENZA 0x000
|
||||
|
||||
#define CAVIUM_CPU_PART_THUNDERX 0x0A1
|
||||
#define CAVIUM_CPU_PART_THUNDERX_81XX 0x0A2
|
||||
|
||||
#define BRCM_CPU_PART_VULCAN 0x516
|
||||
|
||||
#define MIDR_CORTEX_A53 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A53)
|
||||
#define MIDR_CORTEX_A57 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A57)
|
||||
#define MIDR_THUNDERX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX)
|
||||
#define MIDR_THUNDERX_81XX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX_81XX)
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
|
||||
|
@ -117,6 +117,8 @@ struct pt_regs {
|
||||
};
|
||||
u64 orig_x0;
|
||||
u64 syscallno;
|
||||
u64 orig_addr_limit;
|
||||
u64 unused; // maintain 16 byte alignment
|
||||
};
|
||||
|
||||
#define arch_has_single_step() (1)
|
||||
|
@ -60,6 +60,7 @@ int main(void)
|
||||
DEFINE(S_PC, offsetof(struct pt_regs, pc));
|
||||
DEFINE(S_ORIG_X0, offsetof(struct pt_regs, orig_x0));
|
||||
DEFINE(S_SYSCALLNO, offsetof(struct pt_regs, syscallno));
|
||||
DEFINE(S_ORIG_ADDR_LIMIT, offsetof(struct pt_regs, orig_addr_limit));
|
||||
DEFINE(S_FRAME_SIZE, sizeof(struct pt_regs));
|
||||
BLANK();
|
||||
DEFINE(MM_CONTEXT_ID, offsetof(struct mm_struct, context.id.counter));
|
||||
|
@ -98,6 +98,12 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
|
||||
MIDR_RANGE(MIDR_THUNDERX, 0x00,
|
||||
(1 << MIDR_VARIANT_SHIFT) | 1),
|
||||
},
|
||||
{
|
||||
/* Cavium ThunderX, T81 pass 1.0 */
|
||||
.desc = "Cavium erratum 27456",
|
||||
.capability = ARM64_WORKAROUND_CAVIUM_27456,
|
||||
MIDR_RANGE(MIDR_THUNDERX_81XX, 0x00, 0x00),
|
||||
},
|
||||
#endif
|
||||
{
|
||||
}
|
||||
|
@ -28,6 +28,7 @@
|
||||
#include <asm/errno.h>
|
||||
#include <asm/esr.h>
|
||||
#include <asm/irq.h>
|
||||
#include <asm/memory.h>
|
||||
#include <asm/thread_info.h>
|
||||
#include <asm/unistd.h>
|
||||
|
||||
@ -97,7 +98,14 @@
|
||||
mov x29, xzr // fp pointed to user-space
|
||||
.else
|
||||
add x21, sp, #S_FRAME_SIZE
|
||||
.endif
|
||||
get_thread_info tsk
|
||||
/* Save the task's original addr_limit and set USER_DS (TASK_SIZE_64) */
|
||||
ldr x20, [tsk, #TI_ADDR_LIMIT]
|
||||
str x20, [sp, #S_ORIG_ADDR_LIMIT]
|
||||
mov x20, #TASK_SIZE_64
|
||||
str x20, [tsk, #TI_ADDR_LIMIT]
|
||||
ALTERNATIVE(nop, SET_PSTATE_UAO(0), ARM64_HAS_UAO, CONFIG_ARM64_UAO)
|
||||
.endif /* \el == 0 */
|
||||
mrs x22, elr_el1
|
||||
mrs x23, spsr_el1
|
||||
stp lr, x21, [sp, #S_LR]
|
||||
@ -128,6 +136,14 @@
|
||||
.endm
|
||||
|
||||
.macro kernel_exit, el
|
||||
.if \el != 0
|
||||
/* Restore the task's original addr_limit. */
|
||||
ldr x20, [sp, #S_ORIG_ADDR_LIMIT]
|
||||
str x20, [tsk, #TI_ADDR_LIMIT]
|
||||
|
||||
/* No need to restore UAO, it will be restored from SPSR_EL1 */
|
||||
.endif
|
||||
|
||||
ldp x21, x22, [sp, #S_PC] // load ELR, SPSR
|
||||
.if \el == 0
|
||||
ct_user_enter
|
||||
@ -406,7 +422,6 @@ el1_irq:
|
||||
bl trace_hardirqs_off
|
||||
#endif
|
||||
|
||||
get_thread_info tsk
|
||||
irq_handler
|
||||
|
||||
#ifdef CONFIG_PREEMPT
|
||||
|
@ -280,7 +280,8 @@ static int __kprobes do_page_fault(unsigned long addr, unsigned int esr,
|
||||
}
|
||||
|
||||
if (permission_fault(esr) && (addr < USER_DS)) {
|
||||
if (get_fs() == KERNEL_DS)
|
||||
/* regs->orig_addr_limit may be 0 if we entered from EL0 */
|
||||
if (regs->orig_addr_limit == KERNEL_DS)
|
||||
die("Accessing user space memory with fs=KERNEL_DS", regs, esr);
|
||||
|
||||
if (!search_exception_tables(regs->pc))
|
||||
|
@ -294,11 +294,6 @@ config X86_32_LAZY_GS
|
||||
def_bool y
|
||||
depends on X86_32 && !CC_STACKPROTECTOR
|
||||
|
||||
config ARCH_HWEIGHT_CFLAGS
|
||||
string
|
||||
default "-fcall-saved-ecx -fcall-saved-edx" if X86_32
|
||||
default "-fcall-saved-rdi -fcall-saved-rsi -fcall-saved-rdx -fcall-saved-rcx -fcall-saved-r8 -fcall-saved-r9 -fcall-saved-r10 -fcall-saved-r11" if X86_64
|
||||
|
||||
config ARCH_SUPPORTS_UPROBES
|
||||
def_bool y
|
||||
|
||||
|
@ -16,14 +16,16 @@
|
||||
#define BOOT_BITOPS_H
|
||||
#define _LINUX_BITOPS_H /* Inhibit inclusion of <linux/bitops.h> */
|
||||
|
||||
static inline int constant_test_bit(int nr, const void *addr)
|
||||
#include <linux/types.h>
|
||||
|
||||
static inline bool constant_test_bit(int nr, const void *addr)
|
||||
{
|
||||
const u32 *p = (const u32 *)addr;
|
||||
return ((1UL << (nr & 31)) & (p[nr >> 5])) != 0;
|
||||
}
|
||||
static inline int variable_test_bit(int nr, const void *addr)
|
||||
static inline bool variable_test_bit(int nr, const void *addr)
|
||||
{
|
||||
u8 v;
|
||||
bool v;
|
||||
const u32 *p = (const u32 *)addr;
|
||||
|
||||
asm("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
|
||||
|
@ -24,6 +24,7 @@
|
||||
#include <linux/types.h>
|
||||
#include <linux/edd.h>
|
||||
#include <asm/setup.h>
|
||||
#include <asm/asm.h>
|
||||
#include "bitops.h"
|
||||
#include "ctype.h"
|
||||
#include "cpuflags.h"
|
||||
@ -176,18 +177,18 @@ static inline void wrgs32(u32 v, addr_t addr)
|
||||
}
|
||||
|
||||
/* Note: these only return true/false, not a signed return value! */
|
||||
static inline int memcmp_fs(const void *s1, addr_t s2, size_t len)
|
||||
static inline bool memcmp_fs(const void *s1, addr_t s2, size_t len)
|
||||
{
|
||||
u8 diff;
|
||||
asm volatile("fs; repe; cmpsb; setnz %0"
|
||||
: "=qm" (diff), "+D" (s1), "+S" (s2), "+c" (len));
|
||||
bool diff;
|
||||
asm volatile("fs; repe; cmpsb" CC_SET(nz)
|
||||
: CC_OUT(nz) (diff), "+D" (s1), "+S" (s2), "+c" (len));
|
||||
return diff;
|
||||
}
|
||||
static inline int memcmp_gs(const void *s1, addr_t s2, size_t len)
|
||||
static inline bool memcmp_gs(const void *s1, addr_t s2, size_t len)
|
||||
{
|
||||
u8 diff;
|
||||
asm volatile("gs; repe; cmpsb; setnz %0"
|
||||
: "=qm" (diff), "+D" (s1), "+S" (s2), "+c" (len));
|
||||
bool diff;
|
||||
asm volatile("gs; repe; cmpsb" CC_SET(nz)
|
||||
: CC_OUT(nz) (diff), "+D" (s1), "+S" (s2), "+c" (len));
|
||||
return diff;
|
||||
}
|
||||
|
||||
|
@ -17,7 +17,7 @@
|
||||
|
||||
int memcmp(const void *s1, const void *s2, size_t len)
|
||||
{
|
||||
u8 diff;
|
||||
bool diff;
|
||||
asm("repe; cmpsb; setnz %0"
|
||||
: "=qm" (diff), "+D" (s1), "+S" (s2), "+c" (len));
|
||||
return diff;
|
||||
|
@ -40,10 +40,10 @@ static struct thread_info *pt_regs_to_thread_info(struct pt_regs *regs)
|
||||
|
||||
#ifdef CONFIG_CONTEXT_TRACKING
|
||||
/* Called on entry from user mode with IRQs off. */
|
||||
__visible void enter_from_user_mode(void)
|
||||
__visible inline void enter_from_user_mode(void)
|
||||
{
|
||||
CT_WARN_ON(ct_state() != CONTEXT_USER);
|
||||
user_exit();
|
||||
user_exit_irqoff();
|
||||
}
|
||||
#else
|
||||
static inline void enter_from_user_mode(void) {}
|
||||
@ -274,7 +274,7 @@ __visible inline void prepare_exit_to_usermode(struct pt_regs *regs)
|
||||
ti->status &= ~TS_COMPAT;
|
||||
#endif
|
||||
|
||||
user_enter();
|
||||
user_enter_irqoff();
|
||||
}
|
||||
|
||||
#define SYSCALL_EXIT_WORK_FLAGS \
|
||||
|
@ -33,7 +33,7 @@
|
||||
.endif
|
||||
|
||||
call \func
|
||||
jmp restore
|
||||
jmp .L_restore
|
||||
_ASM_NOKPROBE(\name)
|
||||
.endm
|
||||
|
||||
@ -54,7 +54,7 @@
|
||||
#if defined(CONFIG_TRACE_IRQFLAGS) \
|
||||
|| defined(CONFIG_DEBUG_LOCK_ALLOC) \
|
||||
|| defined(CONFIG_PREEMPT)
|
||||
restore:
|
||||
.L_restore:
|
||||
popq %r11
|
||||
popq %r10
|
||||
popq %r9
|
||||
@ -66,5 +66,5 @@ restore:
|
||||
popq %rdi
|
||||
popq %rbp
|
||||
ret
|
||||
_ASM_NOKPROBE(restore)
|
||||
_ASM_NOKPROBE(.L_restore)
|
||||
#endif
|
||||
|
@ -134,7 +134,7 @@ VDSO_LDFLAGS_vdso32.lds = -m32 -Wl,-m,elf_i386 -Wl,-soname=linux-gate.so.1
|
||||
override obj-dirs = $(dir $(obj)) $(obj)/vdso32/
|
||||
|
||||
targets += vdso32/vdso32.lds
|
||||
targets += vdso32/note.o vdso32/vclock_gettime.o vdso32/system_call.o
|
||||
targets += vdso32/note.o vdso32/system_call.o vdso32/sigreturn.o
|
||||
targets += vdso32/vclock_gettime.o
|
||||
|
||||
KBUILD_AFLAGS_32 := $(filter-out -m64,$(KBUILD_AFLAGS)) -DBUILD_VDSO
|
||||
@ -156,7 +156,8 @@ $(obj)/vdso32.so.dbg: FORCE \
|
||||
$(obj)/vdso32/vdso32.lds \
|
||||
$(obj)/vdso32/vclock_gettime.o \
|
||||
$(obj)/vdso32/note.o \
|
||||
$(obj)/vdso32/system_call.o
|
||||
$(obj)/vdso32/system_call.o \
|
||||
$(obj)/vdso32/sigreturn.o
|
||||
$(call if_changed,vdso)
|
||||
|
||||
#
|
||||
|
@ -1,11 +1,3 @@
|
||||
/*
|
||||
* Common code for the sigreturn entry points in vDSO images.
|
||||
* So far this code is the same for both int80 and sysenter versions.
|
||||
* This file is #include'd by int80.S et al to define them first thing.
|
||||
* The kernel assumes that the addresses of these routines are constant
|
||||
* for all vDSO implementations.
|
||||
*/
|
||||
|
||||
#include <linux/linkage.h>
|
||||
#include <asm/unistd_32.h>
|
||||
#include <asm/asm-offsets.h>
|
||||
|
@ -2,16 +2,11 @@
|
||||
* AT_SYSINFO entry point
|
||||
*/
|
||||
|
||||
#include <linux/linkage.h>
|
||||
#include <asm/dwarf2.h>
|
||||
#include <asm/cpufeatures.h>
|
||||
#include <asm/alternative-asm.h>
|
||||
|
||||
/*
|
||||
* First get the common code for the sigreturn entry points.
|
||||
* This must come first.
|
||||
*/
|
||||
#include "sigreturn.S"
|
||||
|
||||
.text
|
||||
.globl __kernel_vsyscall
|
||||
.type __kernel_vsyscall,@function
|
||||
|
@ -2319,7 +2319,7 @@ void
|
||||
perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs)
|
||||
{
|
||||
struct stack_frame frame;
|
||||
const void __user *fp;
|
||||
const unsigned long __user *fp;
|
||||
|
||||
if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
|
||||
/* TODO: We don't support guest os callchain now */
|
||||
@ -2332,7 +2332,7 @@ perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs
|
||||
if (regs->flags & (X86_VM_MASK | PERF_EFLAGS_VM))
|
||||
return;
|
||||
|
||||
fp = (void __user *)regs->bp;
|
||||
fp = (unsigned long __user *)regs->bp;
|
||||
|
||||
perf_callchain_store(entry, regs->ip);
|
||||
|
||||
@ -2345,16 +2345,17 @@ perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs
|
||||
pagefault_disable();
|
||||
while (entry->nr < entry->max_stack) {
|
||||
unsigned long bytes;
|
||||
|
||||
frame.next_frame = NULL;
|
||||
frame.return_address = 0;
|
||||
|
||||
if (!access_ok(VERIFY_READ, fp, 16))
|
||||
if (!access_ok(VERIFY_READ, fp, sizeof(*fp) * 2))
|
||||
break;
|
||||
|
||||
bytes = __copy_from_user_nmi(&frame.next_frame, fp, 8);
|
||||
bytes = __copy_from_user_nmi(&frame.next_frame, fp, sizeof(*fp));
|
||||
if (bytes != 0)
|
||||
break;
|
||||
bytes = __copy_from_user_nmi(&frame.return_address, fp+8, 8);
|
||||
bytes = __copy_from_user_nmi(&frame.return_address, fp + 1, sizeof(*fp));
|
||||
if (bytes != 0)
|
||||
break;
|
||||
|
||||
|
@ -1,8 +1,8 @@
|
||||
obj-$(CONFIG_CPU_SUP_INTEL) += core.o bts.o cqm.o
|
||||
obj-$(CONFIG_CPU_SUP_INTEL) += ds.o knc.o
|
||||
obj-$(CONFIG_CPU_SUP_INTEL) += lbr.o p4.o p6.o pt.o
|
||||
obj-$(CONFIG_PERF_EVENTS_INTEL_RAPL) += intel-rapl.o
|
||||
intel-rapl-objs := rapl.o
|
||||
obj-$(CONFIG_PERF_EVENTS_INTEL_RAPL) += intel-rapl-perf.o
|
||||
intel-rapl-perf-objs := rapl.o
|
||||
obj-$(CONFIG_PERF_EVENTS_INTEL_UNCORE) += intel-uncore.o
|
||||
intel-uncore-objs := uncore.o uncore_nhmex.o uncore_snb.o uncore_snbep.o
|
||||
obj-$(CONFIG_PERF_EVENTS_INTEL_CSTATE) += intel-cstate.o
|
||||
|
@ -115,6 +115,10 @@ static struct event_constraint intel_snb_event_constraints[] __read_mostly =
|
||||
INTEL_UEVENT_CONSTRAINT(0x04a3, 0xf), /* CYCLE_ACTIVITY.CYCLES_NO_DISPATCH */
|
||||
INTEL_UEVENT_CONSTRAINT(0x02a3, 0x4), /* CYCLE_ACTIVITY.CYCLES_L1D_PENDING */
|
||||
|
||||
/*
|
||||
* When HT is off these events can only run on the bottom 4 counters
|
||||
* When HT is on, they are impacted by the HT bug and require EXCL access
|
||||
*/
|
||||
INTEL_EXCLEVT_CONSTRAINT(0xd0, 0xf), /* MEM_UOPS_RETIRED.* */
|
||||
INTEL_EXCLEVT_CONSTRAINT(0xd1, 0xf), /* MEM_LOAD_UOPS_RETIRED.* */
|
||||
INTEL_EXCLEVT_CONSTRAINT(0xd2, 0xf), /* MEM_LOAD_UOPS_LLC_HIT_RETIRED.* */
|
||||
@ -139,6 +143,10 @@ static struct event_constraint intel_ivb_event_constraints[] __read_mostly =
|
||||
INTEL_UEVENT_CONSTRAINT(0x0ca3, 0x4), /* CYCLE_ACTIVITY.STALLS_L1D_PENDING */
|
||||
INTEL_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PREC_DIST */
|
||||
|
||||
/*
|
||||
* When HT is off these events can only run on the bottom 4 counters
|
||||
* When HT is on, they are impacted by the HT bug and require EXCL access
|
||||
*/
|
||||
INTEL_EXCLEVT_CONSTRAINT(0xd0, 0xf), /* MEM_UOPS_RETIRED.* */
|
||||
INTEL_EXCLEVT_CONSTRAINT(0xd1, 0xf), /* MEM_LOAD_UOPS_RETIRED.* */
|
||||
INTEL_EXCLEVT_CONSTRAINT(0xd2, 0xf), /* MEM_LOAD_UOPS_LLC_HIT_RETIRED.* */
|
||||
@ -182,6 +190,16 @@ struct event_constraint intel_skl_event_constraints[] = {
|
||||
FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
|
||||
FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
|
||||
INTEL_UEVENT_CONSTRAINT(0x1c0, 0x2), /* INST_RETIRED.PREC_DIST */
|
||||
|
||||
/*
|
||||
* when HT is off, these can only run on the bottom 4 counters
|
||||
*/
|
||||
INTEL_EVENT_CONSTRAINT(0xd0, 0xf), /* MEM_INST_RETIRED.* */
|
||||
INTEL_EVENT_CONSTRAINT(0xd1, 0xf), /* MEM_LOAD_RETIRED.* */
|
||||
INTEL_EVENT_CONSTRAINT(0xd2, 0xf), /* MEM_LOAD_L3_HIT_RETIRED.* */
|
||||
INTEL_EVENT_CONSTRAINT(0xcd, 0xf), /* MEM_TRANS_RETIRED.* */
|
||||
INTEL_EVENT_CONSTRAINT(0xc6, 0xf), /* FRONTEND_RETIRED.* */
|
||||
|
||||
EVENT_CONSTRAINT_END
|
||||
};
|
||||
|
||||
@ -250,6 +268,10 @@ static struct event_constraint intel_hsw_event_constraints[] = {
|
||||
/* CYCLE_ACTIVITY.CYCLES_NO_EXECUTE */
|
||||
INTEL_UEVENT_CONSTRAINT(0x04a3, 0xf),
|
||||
|
||||
/*
|
||||
* When HT is off these events can only run on the bottom 4 counters
|
||||
* When HT is on, they are impacted by the HT bug and require EXCL access
|
||||
*/
|
||||
INTEL_EXCLEVT_CONSTRAINT(0xd0, 0xf), /* MEM_UOPS_RETIRED.* */
|
||||
INTEL_EXCLEVT_CONSTRAINT(0xd1, 0xf), /* MEM_LOAD_UOPS_RETIRED.* */
|
||||
INTEL_EXCLEVT_CONSTRAINT(0xd2, 0xf), /* MEM_LOAD_UOPS_LLC_HIT_RETIRED.* */
|
||||
@ -264,6 +286,13 @@ struct event_constraint intel_bdw_event_constraints[] = {
|
||||
FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
|
||||
INTEL_UEVENT_CONSTRAINT(0x148, 0x4), /* L1D_PEND_MISS.PENDING */
|
||||
INTEL_UBIT_EVENT_CONSTRAINT(0x8a3, 0x4), /* CYCLE_ACTIVITY.CYCLES_L1D_MISS */
|
||||
/*
|
||||
* when HT is off, these can only run on the bottom 4 counters
|
||||
*/
|
||||
INTEL_EVENT_CONSTRAINT(0xd0, 0xf), /* MEM_INST_RETIRED.* */
|
||||
INTEL_EVENT_CONSTRAINT(0xd1, 0xf), /* MEM_LOAD_RETIRED.* */
|
||||
INTEL_EVENT_CONSTRAINT(0xd2, 0xf), /* MEM_LOAD_L3_HIT_RETIRED.* */
|
||||
INTEL_EVENT_CONSTRAINT(0xcd, 0xf), /* MEM_TRANS_RETIRED.* */
|
||||
EVENT_CONSTRAINT_END
|
||||
};
|
||||
|
||||
|
@ -45,11 +45,11 @@ static inline void apm_bios_call_asm(u32 func, u32 ebx_in, u32 ecx_in,
|
||||
: "memory", "cc");
|
||||
}
|
||||
|
||||
static inline u8 apm_bios_call_simple_asm(u32 func, u32 ebx_in,
|
||||
u32 ecx_in, u32 *eax)
|
||||
static inline bool apm_bios_call_simple_asm(u32 func, u32 ebx_in,
|
||||
u32 ecx_in, u32 *eax)
|
||||
{
|
||||
int cx, dx, si;
|
||||
u8 error;
|
||||
bool error;
|
||||
|
||||
/*
|
||||
* N.B. We do NOT need a cld after the BIOS call
|
||||
|
@ -4,8 +4,8 @@
|
||||
#include <asm/cpufeatures.h>
|
||||
|
||||
#ifdef CONFIG_64BIT
|
||||
/* popcnt %edi, %eax -- redundant REX prefix for alignment */
|
||||
#define POPCNT32 ".byte 0xf3,0x40,0x0f,0xb8,0xc7"
|
||||
/* popcnt %edi, %eax */
|
||||
#define POPCNT32 ".byte 0xf3,0x0f,0xb8,0xc7"
|
||||
/* popcnt %rdi, %rax */
|
||||
#define POPCNT64 ".byte 0xf3,0x48,0x0f,0xb8,0xc7"
|
||||
#define REG_IN "D"
|
||||
@ -17,19 +17,15 @@
|
||||
#define REG_OUT "a"
|
||||
#endif
|
||||
|
||||
/*
|
||||
* __sw_hweightXX are called from within the alternatives below
|
||||
* and callee-clobbered registers need to be taken care of. See
|
||||
* ARCH_HWEIGHT_CFLAGS in <arch/x86/Kconfig> for the respective
|
||||
* compiler switches.
|
||||
*/
|
||||
#define __HAVE_ARCH_SW_HWEIGHT
|
||||
|
||||
static __always_inline unsigned int __arch_hweight32(unsigned int w)
|
||||
{
|
||||
unsigned int res = 0;
|
||||
unsigned int res;
|
||||
|
||||
asm (ALTERNATIVE("call __sw_hweight32", POPCNT32, X86_FEATURE_POPCNT)
|
||||
: "="REG_OUT (res)
|
||||
: REG_IN (w));
|
||||
: "="REG_OUT (res)
|
||||
: REG_IN (w));
|
||||
|
||||
return res;
|
||||
}
|
||||
@ -53,11 +49,11 @@ static inline unsigned long __arch_hweight64(__u64 w)
|
||||
#else
|
||||
static __always_inline unsigned long __arch_hweight64(__u64 w)
|
||||
{
|
||||
unsigned long res = 0;
|
||||
unsigned long res;
|
||||
|
||||
asm (ALTERNATIVE("call __sw_hweight64", POPCNT64, X86_FEATURE_POPCNT)
|
||||
: "="REG_OUT (res)
|
||||
: REG_IN (w));
|
||||
: "="REG_OUT (res)
|
||||
: REG_IN (w));
|
||||
|
||||
return res;
|
||||
}
|
||||
|
@ -25,8 +25,6 @@
|
||||
|
||||
#include <asm/processor.h>
|
||||
#include <asm/cpufeature.h>
|
||||
#include <asm/alternative.h>
|
||||
#include <asm/nops.h>
|
||||
|
||||
#define RDRAND_RETRY_LOOPS 10
|
||||
|
||||
@ -40,97 +38,91 @@
|
||||
# define RDSEED_LONG RDSEED_INT
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_ARCH_RANDOM
|
||||
/* Unconditional execution of RDRAND and RDSEED */
|
||||
|
||||
/* Instead of arch_get_random_long() when alternatives haven't run. */
|
||||
static inline int rdrand_long(unsigned long *v)
|
||||
static inline bool rdrand_long(unsigned long *v)
|
||||
{
|
||||
int ok;
|
||||
asm volatile("1: " RDRAND_LONG "\n\t"
|
||||
"jc 2f\n\t"
|
||||
"decl %0\n\t"
|
||||
"jnz 1b\n\t"
|
||||
"2:"
|
||||
: "=r" (ok), "=a" (*v)
|
||||
: "0" (RDRAND_RETRY_LOOPS));
|
||||
return ok;
|
||||
bool ok;
|
||||
unsigned int retry = RDRAND_RETRY_LOOPS;
|
||||
do {
|
||||
asm volatile(RDRAND_LONG "\n\t"
|
||||
CC_SET(c)
|
||||
: CC_OUT(c) (ok), "=a" (*v));
|
||||
if (ok)
|
||||
return true;
|
||||
} while (--retry);
|
||||
return false;
|
||||
}
|
||||
|
||||
static inline bool rdrand_int(unsigned int *v)
|
||||
{
|
||||
bool ok;
|
||||
unsigned int retry = RDRAND_RETRY_LOOPS;
|
||||
do {
|
||||
asm volatile(RDRAND_INT "\n\t"
|
||||
CC_SET(c)
|
||||
: CC_OUT(c) (ok), "=a" (*v));
|
||||
if (ok)
|
||||
return true;
|
||||
} while (--retry);
|
||||
return false;
|
||||
}
|
||||
|
||||
/* A single attempt at RDSEED */
|
||||
static inline bool rdseed_long(unsigned long *v)
|
||||
{
|
||||
unsigned char ok;
|
||||
bool ok;
|
||||
asm volatile(RDSEED_LONG "\n\t"
|
||||
"setc %0"
|
||||
: "=qm" (ok), "=a" (*v));
|
||||
CC_SET(c)
|
||||
: CC_OUT(c) (ok), "=a" (*v));
|
||||
return ok;
|
||||
}
|
||||
|
||||
#define GET_RANDOM(name, type, rdrand, nop) \
|
||||
static inline int name(type *v) \
|
||||
{ \
|
||||
int ok; \
|
||||
alternative_io("movl $0, %0\n\t" \
|
||||
nop, \
|
||||
"\n1: " rdrand "\n\t" \
|
||||
"jc 2f\n\t" \
|
||||
"decl %0\n\t" \
|
||||
"jnz 1b\n\t" \
|
||||
"2:", \
|
||||
X86_FEATURE_RDRAND, \
|
||||
ASM_OUTPUT2("=r" (ok), "=a" (*v)), \
|
||||
"0" (RDRAND_RETRY_LOOPS)); \
|
||||
return ok; \
|
||||
static inline bool rdseed_int(unsigned int *v)
|
||||
{
|
||||
bool ok;
|
||||
asm volatile(RDSEED_INT "\n\t"
|
||||
CC_SET(c)
|
||||
: CC_OUT(c) (ok), "=a" (*v));
|
||||
return ok;
|
||||
}
|
||||
|
||||
#define GET_SEED(name, type, rdseed, nop) \
|
||||
static inline int name(type *v) \
|
||||
{ \
|
||||
unsigned char ok; \
|
||||
alternative_io("movb $0, %0\n\t" \
|
||||
nop, \
|
||||
rdseed "\n\t" \
|
||||
"setc %0", \
|
||||
X86_FEATURE_RDSEED, \
|
||||
ASM_OUTPUT2("=q" (ok), "=a" (*v))); \
|
||||
return ok; \
|
||||
}
|
||||
|
||||
#ifdef CONFIG_X86_64
|
||||
|
||||
GET_RANDOM(arch_get_random_long, unsigned long, RDRAND_LONG, ASM_NOP5);
|
||||
GET_RANDOM(arch_get_random_int, unsigned int, RDRAND_INT, ASM_NOP4);
|
||||
|
||||
GET_SEED(arch_get_random_seed_long, unsigned long, RDSEED_LONG, ASM_NOP5);
|
||||
GET_SEED(arch_get_random_seed_int, unsigned int, RDSEED_INT, ASM_NOP4);
|
||||
|
||||
#else
|
||||
|
||||
GET_RANDOM(arch_get_random_long, unsigned long, RDRAND_LONG, ASM_NOP3);
|
||||
GET_RANDOM(arch_get_random_int, unsigned int, RDRAND_INT, ASM_NOP3);
|
||||
|
||||
GET_SEED(arch_get_random_seed_long, unsigned long, RDSEED_LONG, ASM_NOP4);
|
||||
GET_SEED(arch_get_random_seed_int, unsigned int, RDSEED_INT, ASM_NOP4);
|
||||
|
||||
#endif /* CONFIG_X86_64 */
|
||||
|
||||
/* Conditional execution based on CPU type */
|
||||
#define arch_has_random() static_cpu_has(X86_FEATURE_RDRAND)
|
||||
#define arch_has_random_seed() static_cpu_has(X86_FEATURE_RDSEED)
|
||||
|
||||
#else
|
||||
/*
|
||||
* These are the generic interfaces; they must not be declared if the
|
||||
* stubs in <linux/random.h> are to be invoked,
|
||||
* i.e. CONFIG_ARCH_RANDOM is not defined.
|
||||
*/
|
||||
#ifdef CONFIG_ARCH_RANDOM
|
||||
|
||||
static inline int rdrand_long(unsigned long *v)
|
||||
static inline bool arch_get_random_long(unsigned long *v)
|
||||
{
|
||||
return 0;
|
||||
return arch_has_random() ? rdrand_long(v) : false;
|
||||
}
|
||||
|
||||
static inline bool rdseed_long(unsigned long *v)
|
||||
static inline bool arch_get_random_int(unsigned int *v)
|
||||
{
|
||||
return 0;
|
||||
return arch_has_random() ? rdrand_int(v) : false;
|
||||
}
|
||||
|
||||
#endif /* CONFIG_ARCH_RANDOM */
|
||||
static inline bool arch_get_random_seed_long(unsigned long *v)
|
||||
{
|
||||
return arch_has_random_seed() ? rdseed_long(v) : false;
|
||||
}
|
||||
|
||||
static inline bool arch_get_random_seed_int(unsigned int *v)
|
||||
{
|
||||
return arch_has_random_seed() ? rdseed_int(v) : false;
|
||||
}
|
||||
|
||||
extern void x86_init_rdrand(struct cpuinfo_x86 *c);
|
||||
|
||||
#else /* !CONFIG_ARCH_RANDOM */
|
||||
|
||||
static inline void x86_init_rdrand(struct cpuinfo_x86 *c) { }
|
||||
|
||||
#endif /* !CONFIG_ARCH_RANDOM */
|
||||
|
||||
#endif /* ASM_X86_ARCHRANDOM_H */
|
||||
|
@ -42,6 +42,18 @@
|
||||
#define _ASM_SI __ASM_REG(si)
|
||||
#define _ASM_DI __ASM_REG(di)
|
||||
|
||||
/*
|
||||
* Macros to generate condition code outputs from inline assembly,
|
||||
* The output operand must be type "bool".
|
||||
*/
|
||||
#ifdef __GCC_ASM_FLAG_OUTPUTS__
|
||||
# define CC_SET(c) "\n\t/* output condition code " #c "*/\n"
|
||||
# define CC_OUT(c) "=@cc" #c
|
||||
#else
|
||||
# define CC_SET(c) "\n\tset" #c " %[_cc_" #c "]\n"
|
||||
# define CC_OUT(c) [_cc_ ## c] "=qm"
|
||||
#endif
|
||||
|
||||
/* Exception table entry */
|
||||
#ifdef __ASSEMBLY__
|
||||
# define _ASM_EXTABLE_HANDLE(from, to, handler) \
|
||||
|
@ -75,9 +75,9 @@ static __always_inline void atomic_sub(int i, atomic_t *v)
|
||||
* true if the result is zero, or false for all
|
||||
* other cases.
|
||||
*/
|
||||
static __always_inline int atomic_sub_and_test(int i, atomic_t *v)
|
||||
static __always_inline bool atomic_sub_and_test(int i, atomic_t *v)
|
||||
{
|
||||
GEN_BINARY_RMWcc(LOCK_PREFIX "subl", v->counter, "er", i, "%0", "e");
|
||||
GEN_BINARY_RMWcc(LOCK_PREFIX "subl", v->counter, "er", i, "%0", e);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -112,9 +112,9 @@ static __always_inline void atomic_dec(atomic_t *v)
|
||||
* returns true if the result is 0, or false for all other
|
||||
* cases.
|
||||
*/
|
||||
static __always_inline int atomic_dec_and_test(atomic_t *v)
|
||||
static __always_inline bool atomic_dec_and_test(atomic_t *v)
|
||||
{
|
||||
GEN_UNARY_RMWcc(LOCK_PREFIX "decl", v->counter, "%0", "e");
|
||||
GEN_UNARY_RMWcc(LOCK_PREFIX "decl", v->counter, "%0", e);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -125,9 +125,9 @@ static __always_inline int atomic_dec_and_test(atomic_t *v)
|
||||
* and returns true if the result is zero, or false for all
|
||||
* other cases.
|
||||
*/
|
||||
static __always_inline int atomic_inc_and_test(atomic_t *v)
|
||||
static __always_inline bool atomic_inc_and_test(atomic_t *v)
|
||||
{
|
||||
GEN_UNARY_RMWcc(LOCK_PREFIX "incl", v->counter, "%0", "e");
|
||||
GEN_UNARY_RMWcc(LOCK_PREFIX "incl", v->counter, "%0", e);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -139,9 +139,9 @@ static __always_inline int atomic_inc_and_test(atomic_t *v)
|
||||
* if the result is negative, or false when
|
||||
* result is greater than or equal to zero.
|
||||
*/
|
||||
static __always_inline int atomic_add_negative(int i, atomic_t *v)
|
||||
static __always_inline bool atomic_add_negative(int i, atomic_t *v)
|
||||
{
|
||||
GEN_BINARY_RMWcc(LOCK_PREFIX "addl", v->counter, "er", i, "%0", "s");
|
||||
GEN_BINARY_RMWcc(LOCK_PREFIX "addl", v->counter, "er", i, "%0", s);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -70,9 +70,9 @@ static inline void atomic64_sub(long i, atomic64_t *v)
|
||||
* true if the result is zero, or false for all
|
||||
* other cases.
|
||||
*/
|
||||
static inline int atomic64_sub_and_test(long i, atomic64_t *v)
|
||||
static inline bool atomic64_sub_and_test(long i, atomic64_t *v)
|
||||
{
|
||||
GEN_BINARY_RMWcc(LOCK_PREFIX "subq", v->counter, "er", i, "%0", "e");
|
||||
GEN_BINARY_RMWcc(LOCK_PREFIX "subq", v->counter, "er", i, "%0", e);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -109,9 +109,9 @@ static __always_inline void atomic64_dec(atomic64_t *v)
|
||||
* returns true if the result is 0, or false for all other
|
||||
* cases.
|
||||
*/
|
||||
static inline int atomic64_dec_and_test(atomic64_t *v)
|
||||
static inline bool atomic64_dec_and_test(atomic64_t *v)
|
||||
{
|
||||
GEN_UNARY_RMWcc(LOCK_PREFIX "decq", v->counter, "%0", "e");
|
||||
GEN_UNARY_RMWcc(LOCK_PREFIX "decq", v->counter, "%0", e);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -122,9 +122,9 @@ static inline int atomic64_dec_and_test(atomic64_t *v)
|
||||
* and returns true if the result is zero, or false for all
|
||||
* other cases.
|
||||
*/
|
||||
static inline int atomic64_inc_and_test(atomic64_t *v)
|
||||
static inline bool atomic64_inc_and_test(atomic64_t *v)
|
||||
{
|
||||
GEN_UNARY_RMWcc(LOCK_PREFIX "incq", v->counter, "%0", "e");
|
||||
GEN_UNARY_RMWcc(LOCK_PREFIX "incq", v->counter, "%0", e);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -136,9 +136,9 @@ static inline int atomic64_inc_and_test(atomic64_t *v)
|
||||
* if the result is negative, or false when
|
||||
* result is greater than or equal to zero.
|
||||
*/
|
||||
static inline int atomic64_add_negative(long i, atomic64_t *v)
|
||||
static inline bool atomic64_add_negative(long i, atomic64_t *v)
|
||||
{
|
||||
GEN_BINARY_RMWcc(LOCK_PREFIX "addq", v->counter, "er", i, "%0", "s");
|
||||
GEN_BINARY_RMWcc(LOCK_PREFIX "addq", v->counter, "er", i, "%0", s);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -180,7 +180,7 @@ static inline long atomic64_xchg(atomic64_t *v, long new)
|
||||
* Atomically adds @a to @v, so long as it was not @u.
|
||||
* Returns the old value of @v.
|
||||
*/
|
||||
static inline int atomic64_add_unless(atomic64_t *v, long a, long u)
|
||||
static inline bool atomic64_add_unless(atomic64_t *v, long a, long u)
|
||||
{
|
||||
long c, old;
|
||||
c = atomic64_read(v);
|
||||
|
@ -201,9 +201,9 @@ static __always_inline void change_bit(long nr, volatile unsigned long *addr)
|
||||
* This operation is atomic and cannot be reordered.
|
||||
* It also implies a memory barrier.
|
||||
*/
|
||||
static __always_inline int test_and_set_bit(long nr, volatile unsigned long *addr)
|
||||
static __always_inline bool test_and_set_bit(long nr, volatile unsigned long *addr)
|
||||
{
|
||||
GEN_BINARY_RMWcc(LOCK_PREFIX "bts", *addr, "Ir", nr, "%0", "c");
|
||||
GEN_BINARY_RMWcc(LOCK_PREFIX "bts", *addr, "Ir", nr, "%0", c);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -213,7 +213,7 @@ static __always_inline int test_and_set_bit(long nr, volatile unsigned long *add
|
||||
*
|
||||
* This is the same as test_and_set_bit on x86.
|
||||
*/
|
||||
static __always_inline int
|
||||
static __always_inline bool
|
||||
test_and_set_bit_lock(long nr, volatile unsigned long *addr)
|
||||
{
|
||||
return test_and_set_bit(nr, addr);
|
||||
@ -228,13 +228,13 @@ test_and_set_bit_lock(long nr, volatile unsigned long *addr)
|
||||
* If two examples of this operation race, one can appear to succeed
|
||||
* but actually fail. You must protect multiple accesses with a lock.
|
||||
*/
|
||||
static __always_inline int __test_and_set_bit(long nr, volatile unsigned long *addr)
|
||||
static __always_inline bool __test_and_set_bit(long nr, volatile unsigned long *addr)
|
||||
{
|
||||
int oldbit;
|
||||
bool oldbit;
|
||||
|
||||
asm("bts %2,%1\n\t"
|
||||
"sbb %0,%0"
|
||||
: "=r" (oldbit), ADDR
|
||||
CC_SET(c)
|
||||
: CC_OUT(c) (oldbit), ADDR
|
||||
: "Ir" (nr));
|
||||
return oldbit;
|
||||
}
|
||||
@ -247,9 +247,9 @@ static __always_inline int __test_and_set_bit(long nr, volatile unsigned long *a
|
||||
* This operation is atomic and cannot be reordered.
|
||||
* It also implies a memory barrier.
|
||||
*/
|
||||
static __always_inline int test_and_clear_bit(long nr, volatile unsigned long *addr)
|
||||
static __always_inline bool test_and_clear_bit(long nr, volatile unsigned long *addr)
|
||||
{
|
||||
GEN_BINARY_RMWcc(LOCK_PREFIX "btr", *addr, "Ir", nr, "%0", "c");
|
||||
GEN_BINARY_RMWcc(LOCK_PREFIX "btr", *addr, "Ir", nr, "%0", c);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -268,25 +268,25 @@ static __always_inline int test_and_clear_bit(long nr, volatile unsigned long *a
|
||||
* accessed from a hypervisor on the same CPU if running in a VM: don't change
|
||||
* this without also updating arch/x86/kernel/kvm.c
|
||||
*/
|
||||
static __always_inline int __test_and_clear_bit(long nr, volatile unsigned long *addr)
|
||||
static __always_inline bool __test_and_clear_bit(long nr, volatile unsigned long *addr)
|
||||
{
|
||||
int oldbit;
|
||||
bool oldbit;
|
||||
|
||||
asm volatile("btr %2,%1\n\t"
|
||||
"sbb %0,%0"
|
||||
: "=r" (oldbit), ADDR
|
||||
CC_SET(c)
|
||||
: CC_OUT(c) (oldbit), ADDR
|
||||
: "Ir" (nr));
|
||||
return oldbit;
|
||||
}
|
||||
|
||||
/* WARNING: non atomic and it can be reordered! */
|
||||
static __always_inline int __test_and_change_bit(long nr, volatile unsigned long *addr)
|
||||
static __always_inline bool __test_and_change_bit(long nr, volatile unsigned long *addr)
|
||||
{
|
||||
int oldbit;
|
||||
bool oldbit;
|
||||
|
||||
asm volatile("btc %2,%1\n\t"
|
||||
"sbb %0,%0"
|
||||
: "=r" (oldbit), ADDR
|
||||
CC_SET(c)
|
||||
: CC_OUT(c) (oldbit), ADDR
|
||||
: "Ir" (nr) : "memory");
|
||||
|
||||
return oldbit;
|
||||
@ -300,24 +300,24 @@ static __always_inline int __test_and_change_bit(long nr, volatile unsigned long
|
||||
* This operation is atomic and cannot be reordered.
|
||||
* It also implies a memory barrier.
|
||||
*/
|
||||
static __always_inline int test_and_change_bit(long nr, volatile unsigned long *addr)
|
||||
static __always_inline bool test_and_change_bit(long nr, volatile unsigned long *addr)
|
||||
{
|
||||
GEN_BINARY_RMWcc(LOCK_PREFIX "btc", *addr, "Ir", nr, "%0", "c");
|
||||
GEN_BINARY_RMWcc(LOCK_PREFIX "btc", *addr, "Ir", nr, "%0", c);
|
||||
}
|
||||
|
||||
static __always_inline int constant_test_bit(long nr, const volatile unsigned long *addr)
|
||||
static __always_inline bool constant_test_bit(long nr, const volatile unsigned long *addr)
|
||||
{
|
||||
return ((1UL << (nr & (BITS_PER_LONG-1))) &
|
||||
(addr[nr >> _BITOPS_LONG_SHIFT])) != 0;
|
||||
}
|
||||
|
||||
static __always_inline int variable_test_bit(long nr, volatile const unsigned long *addr)
|
||||
static __always_inline bool variable_test_bit(long nr, volatile const unsigned long *addr)
|
||||
{
|
||||
int oldbit;
|
||||
bool oldbit;
|
||||
|
||||
asm volatile("bt %2,%1\n\t"
|
||||
"sbb %0,%0"
|
||||
: "=r" (oldbit)
|
||||
CC_SET(c)
|
||||
: CC_OUT(c) (oldbit)
|
||||
: "m" (*(unsigned long *)addr), "Ir" (nr));
|
||||
|
||||
return oldbit;
|
||||
@ -329,7 +329,7 @@ static __always_inline int variable_test_bit(long nr, volatile const unsigned lo
|
||||
* @nr: bit number to test
|
||||
* @addr: Address to start counting from
|
||||
*/
|
||||
static int test_bit(int nr, const volatile unsigned long *addr);
|
||||
static bool test_bit(int nr, const volatile unsigned long *addr);
|
||||
#endif
|
||||
|
||||
#define test_bit(nr, addr) \
|
||||
|
@ -40,6 +40,7 @@ typedef s32 compat_long_t;
|
||||
typedef s64 __attribute__((aligned(4))) compat_s64;
|
||||
typedef u32 compat_uint_t;
|
||||
typedef u32 compat_ulong_t;
|
||||
typedef u32 compat_u32;
|
||||
typedef u64 __attribute__((aligned(4))) compat_u64;
|
||||
typedef u32 compat_uptr_t;
|
||||
|
||||
@ -181,6 +182,16 @@ typedef struct compat_siginfo {
|
||||
/* SIGILL, SIGFPE, SIGSEGV, SIGBUS */
|
||||
struct {
|
||||
unsigned int _addr; /* faulting insn/memory ref. */
|
||||
short int _addr_lsb; /* Valid LSB of the reported address. */
|
||||
union {
|
||||
/* used when si_code=SEGV_BNDERR */
|
||||
struct {
|
||||
compat_uptr_t _lower;
|
||||
compat_uptr_t _upper;
|
||||
} _addr_bnd;
|
||||
/* used when si_code=SEGV_PKUERR */
|
||||
compat_u32 _pkey;
|
||||
};
|
||||
} _sigfault;
|
||||
|
||||
/* SIGPOLL */
|
||||
|
@ -50,9 +50,9 @@ static inline void local_sub(long i, local_t *l)
|
||||
* true if the result is zero, or false for all
|
||||
* other cases.
|
||||
*/
|
||||
static inline int local_sub_and_test(long i, local_t *l)
|
||||
static inline bool local_sub_and_test(long i, local_t *l)
|
||||
{
|
||||
GEN_BINARY_RMWcc(_ASM_SUB, l->a.counter, "er", i, "%0", "e");
|
||||
GEN_BINARY_RMWcc(_ASM_SUB, l->a.counter, "er", i, "%0", e);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -63,9 +63,9 @@ static inline int local_sub_and_test(long i, local_t *l)
|
||||
* returns true if the result is 0, or false for all other
|
||||
* cases.
|
||||
*/
|
||||
static inline int local_dec_and_test(local_t *l)
|
||||
static inline bool local_dec_and_test(local_t *l)
|
||||
{
|
||||
GEN_UNARY_RMWcc(_ASM_DEC, l->a.counter, "%0", "e");
|
||||
GEN_UNARY_RMWcc(_ASM_DEC, l->a.counter, "%0", e);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -76,9 +76,9 @@ static inline int local_dec_and_test(local_t *l)
|
||||
* and returns true if the result is zero, or false for all
|
||||
* other cases.
|
||||
*/
|
||||
static inline int local_inc_and_test(local_t *l)
|
||||
static inline bool local_inc_and_test(local_t *l)
|
||||
{
|
||||
GEN_UNARY_RMWcc(_ASM_INC, l->a.counter, "%0", "e");
|
||||
GEN_UNARY_RMWcc(_ASM_INC, l->a.counter, "%0", e);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -90,9 +90,9 @@ static inline int local_inc_and_test(local_t *l)
|
||||
* if the result is negative, or false when
|
||||
* result is greater than or equal to zero.
|
||||
*/
|
||||
static inline int local_add_negative(long i, local_t *l)
|
||||
static inline bool local_add_negative(long i, local_t *l)
|
||||
{
|
||||
GEN_BINARY_RMWcc(_ASM_ADD, l->a.counter, "er", i, "%0", "s");
|
||||
GEN_BINARY_RMWcc(_ASM_ADD, l->a.counter, "er", i, "%0", s);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -510,14 +510,15 @@ do { \
|
||||
/* This is not atomic against other CPUs -- CPU preemption needs to be off */
|
||||
#define x86_test_and_clear_bit_percpu(bit, var) \
|
||||
({ \
|
||||
int old__; \
|
||||
asm volatile("btr %2,"__percpu_arg(1)"\n\tsbbl %0,%0" \
|
||||
: "=r" (old__), "+m" (var) \
|
||||
bool old__; \
|
||||
asm volatile("btr %2,"__percpu_arg(1)"\n\t" \
|
||||
CC_SET(c) \
|
||||
: CC_OUT(c) (old__), "+m" (var) \
|
||||
: "dIr" (bit)); \
|
||||
old__; \
|
||||
})
|
||||
|
||||
static __always_inline int x86_this_cpu_constant_test_bit(unsigned int nr,
|
||||
static __always_inline bool x86_this_cpu_constant_test_bit(unsigned int nr,
|
||||
const unsigned long __percpu *addr)
|
||||
{
|
||||
unsigned long __percpu *a = (unsigned long *)addr + nr / BITS_PER_LONG;
|
||||
@ -529,14 +530,14 @@ static __always_inline int x86_this_cpu_constant_test_bit(unsigned int nr,
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline int x86_this_cpu_variable_test_bit(int nr,
|
||||
static inline bool x86_this_cpu_variable_test_bit(int nr,
|
||||
const unsigned long __percpu *addr)
|
||||
{
|
||||
int oldbit;
|
||||
bool oldbit;
|
||||
|
||||
asm volatile("bt "__percpu_arg(2)",%1\n\t"
|
||||
"sbb %0,%0"
|
||||
: "=r" (oldbit)
|
||||
CC_SET(c)
|
||||
: CC_OUT(c) (oldbit)
|
||||
: "m" (*(unsigned long *)addr), "Ir" (nr));
|
||||
|
||||
return oldbit;
|
||||
|
@ -81,7 +81,7 @@ static __always_inline void __preempt_count_sub(int val)
|
||||
*/
|
||||
static __always_inline bool __preempt_count_dec_and_test(void)
|
||||
{
|
||||
GEN_UNARY_RMWcc("decl", __preempt_count, __percpu_arg(0), "e");
|
||||
GEN_UNARY_RMWcc("decl", __preempt_count, __percpu_arg(0), e);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -1,11 +1,13 @@
|
||||
#ifndef _ASM_X86_RMWcc
|
||||
#define _ASM_X86_RMWcc
|
||||
|
||||
#ifdef CC_HAVE_ASM_GOTO
|
||||
#if !defined(__GCC_ASM_FLAG_OUTPUTS__) && defined(CC_HAVE_ASM_GOTO)
|
||||
|
||||
/* Use asm goto */
|
||||
|
||||
#define __GEN_RMWcc(fullop, var, cc, ...) \
|
||||
do { \
|
||||
asm_volatile_goto (fullop "; j" cc " %l[cc_label]" \
|
||||
asm_volatile_goto (fullop "; j" #cc " %l[cc_label]" \
|
||||
: : "m" (var), ## __VA_ARGS__ \
|
||||
: "memory" : cc_label); \
|
||||
return 0; \
|
||||
@ -19,15 +21,17 @@ cc_label: \
|
||||
#define GEN_BINARY_RMWcc(op, var, vcon, val, arg0, cc) \
|
||||
__GEN_RMWcc(op " %1, " arg0, var, cc, vcon (val))
|
||||
|
||||
#else /* !CC_HAVE_ASM_GOTO */
|
||||
#else /* defined(__GCC_ASM_FLAG_OUTPUTS__) || !defined(CC_HAVE_ASM_GOTO) */
|
||||
|
||||
/* Use flags output or a set instruction */
|
||||
|
||||
#define __GEN_RMWcc(fullop, var, cc, ...) \
|
||||
do { \
|
||||
char c; \
|
||||
asm volatile (fullop "; set" cc " %1" \
|
||||
: "+m" (var), "=qm" (c) \
|
||||
bool c; \
|
||||
asm volatile (fullop ";" CC_SET(cc) \
|
||||
: "+m" (var), CC_OUT(cc) (c) \
|
||||
: __VA_ARGS__ : "memory"); \
|
||||
return c != 0; \
|
||||
return c; \
|
||||
} while (0)
|
||||
|
||||
#define GEN_UNARY_RMWcc(op, var, arg0, cc) \
|
||||
@ -36,6 +40,6 @@ do { \
|
||||
#define GEN_BINARY_RMWcc(op, var, vcon, val, arg0, cc) \
|
||||
__GEN_RMWcc(op " %2, " arg0, var, cc, vcon (val))
|
||||
|
||||
#endif /* CC_HAVE_ASM_GOTO */
|
||||
#endif /* defined(__GCC_ASM_FLAG_OUTPUTS__) || !defined(CC_HAVE_ASM_GOTO) */
|
||||
|
||||
#endif /* _ASM_X86_RMWcc */
|
||||
|
@ -77,7 +77,7 @@ static inline void __down_read(struct rw_semaphore *sem)
|
||||
/*
|
||||
* trylock for reading -- returns 1 if successful, 0 if contention
|
||||
*/
|
||||
static inline int __down_read_trylock(struct rw_semaphore *sem)
|
||||
static inline bool __down_read_trylock(struct rw_semaphore *sem)
|
||||
{
|
||||
long result, tmp;
|
||||
asm volatile("# beginning __down_read_trylock\n\t"
|
||||
@ -93,7 +93,7 @@ static inline int __down_read_trylock(struct rw_semaphore *sem)
|
||||
: "+m" (sem->count), "=&a" (result), "=&r" (tmp)
|
||||
: "i" (RWSEM_ACTIVE_READ_BIAS)
|
||||
: "memory", "cc");
|
||||
return result >= 0 ? 1 : 0;
|
||||
return result >= 0;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -134,9 +134,10 @@ static inline int __down_write_killable(struct rw_semaphore *sem)
|
||||
/*
|
||||
* trylock for writing -- returns 1 if successful, 0 if contention
|
||||
*/
|
||||
static inline int __down_write_trylock(struct rw_semaphore *sem)
|
||||
static inline bool __down_write_trylock(struct rw_semaphore *sem)
|
||||
{
|
||||
long result, tmp;
|
||||
bool result;
|
||||
long tmp0, tmp1;
|
||||
asm volatile("# beginning __down_write_trylock\n\t"
|
||||
" mov %0,%1\n\t"
|
||||
"1:\n\t"
|
||||
@ -144,14 +145,14 @@ static inline int __down_write_trylock(struct rw_semaphore *sem)
|
||||
/* was the active mask 0 before? */
|
||||
" jnz 2f\n\t"
|
||||
" mov %1,%2\n\t"
|
||||
" add %3,%2\n\t"
|
||||
" add %4,%2\n\t"
|
||||
LOCK_PREFIX " cmpxchg %2,%0\n\t"
|
||||
" jnz 1b\n\t"
|
||||
"2:\n\t"
|
||||
" sete %b1\n\t"
|
||||
" movzbl %b1, %k1\n\t"
|
||||
CC_SET(e)
|
||||
"# ending __down_write_trylock\n\t"
|
||||
: "+m" (sem->count), "=&a" (result), "=&r" (tmp)
|
||||
: "+m" (sem->count), "=&a" (tmp0), "=&r" (tmp1),
|
||||
CC_OUT(e) (result)
|
||||
: "er" (RWSEM_ACTIVE_WRITE_BIAS)
|
||||
: "memory", "cc");
|
||||
return result;
|
||||
|
@ -81,9 +81,9 @@ static inline int __const_sigismember(sigset_t *set, int _sig)
|
||||
|
||||
static inline int __gen_sigismember(sigset_t *set, int _sig)
|
||||
{
|
||||
int ret;
|
||||
asm("btl %2,%1\n\tsbbl %0,%0"
|
||||
: "=r"(ret) : "m"(*set), "Ir"(_sig-1) : "cc");
|
||||
unsigned char ret;
|
||||
asm("btl %2,%1\n\tsetc %0"
|
||||
: "=qm"(ret) : "m"(*set), "Ir"(_sig-1) : "cc");
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -79,10 +79,10 @@ static inline void sync_change_bit(long nr, volatile unsigned long *addr)
|
||||
*/
|
||||
static inline int sync_test_and_set_bit(long nr, volatile unsigned long *addr)
|
||||
{
|
||||
int oldbit;
|
||||
unsigned char oldbit;
|
||||
|
||||
asm volatile("lock; bts %2,%1\n\tsbbl %0,%0"
|
||||
: "=r" (oldbit), "+m" (ADDR)
|
||||
asm volatile("lock; bts %2,%1\n\tsetc %0"
|
||||
: "=qm" (oldbit), "+m" (ADDR)
|
||||
: "Ir" (nr) : "memory");
|
||||
return oldbit;
|
||||
}
|
||||
@ -97,10 +97,10 @@ static inline int sync_test_and_set_bit(long nr, volatile unsigned long *addr)
|
||||
*/
|
||||
static inline int sync_test_and_clear_bit(long nr, volatile unsigned long *addr)
|
||||
{
|
||||
int oldbit;
|
||||
unsigned char oldbit;
|
||||
|
||||
asm volatile("lock; btr %2,%1\n\tsbbl %0,%0"
|
||||
: "=r" (oldbit), "+m" (ADDR)
|
||||
asm volatile("lock; btr %2,%1\n\tsetc %0"
|
||||
: "=qm" (oldbit), "+m" (ADDR)
|
||||
: "Ir" (nr) : "memory");
|
||||
return oldbit;
|
||||
}
|
||||
@ -115,10 +115,10 @@ static inline int sync_test_and_clear_bit(long nr, volatile unsigned long *addr)
|
||||
*/
|
||||
static inline int sync_test_and_change_bit(long nr, volatile unsigned long *addr)
|
||||
{
|
||||
int oldbit;
|
||||
unsigned char oldbit;
|
||||
|
||||
asm volatile("lock; btc %2,%1\n\tsbbl %0,%0"
|
||||
: "=r" (oldbit), "+m" (ADDR)
|
||||
asm volatile("lock; btc %2,%1\n\tsetc %0"
|
||||
: "=qm" (oldbit), "+m" (ADDR)
|
||||
: "Ir" (nr) : "memory");
|
||||
return oldbit;
|
||||
}
|
||||
|
@ -71,8 +71,8 @@ int amd_cache_northbridges(void)
|
||||
while ((misc = next_northbridge(misc, amd_nb_misc_ids)) != NULL)
|
||||
i++;
|
||||
|
||||
if (i == 0)
|
||||
return 0;
|
||||
if (!i)
|
||||
return -ENODEV;
|
||||
|
||||
nb = kzalloc(i * sizeof(struct amd_northbridge), GFP_KERNEL);
|
||||
if (!nb)
|
||||
|
@ -39,9 +39,9 @@ __setup("nordrand", x86_rdrand_setup);
|
||||
*/
|
||||
#define SANITY_CHECK_LOOPS 8
|
||||
|
||||
#ifdef CONFIG_ARCH_RANDOM
|
||||
void x86_init_rdrand(struct cpuinfo_x86 *c)
|
||||
{
|
||||
#ifdef CONFIG_ARCH_RANDOM
|
||||
unsigned long tmp;
|
||||
int i;
|
||||
|
||||
@ -55,5 +55,5 @@ void x86_init_rdrand(struct cpuinfo_x86 *c)
|
||||
return;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
}
|
||||
#endif
|
||||
|
@ -42,3 +42,5 @@ EXPORT_SYMBOL(empty_zero_page);
|
||||
EXPORT_SYMBOL(___preempt_schedule);
|
||||
EXPORT_SYMBOL(___preempt_schedule_notrace);
|
||||
#endif
|
||||
|
||||
EXPORT_SYMBOL(__sw_hweight32);
|
||||
|
@ -1,11 +1,104 @@
|
||||
#include <linux/compat.h>
|
||||
#include <linux/uaccess.h>
|
||||
|
||||
/*
|
||||
* The compat_siginfo_t structure and handing code is very easy
|
||||
* to break in several ways. It must always be updated when new
|
||||
* updates are made to the main siginfo_t, and
|
||||
* copy_siginfo_to_user32() must be updated when the
|
||||
* (arch-independent) copy_siginfo_to_user() is updated.
|
||||
*
|
||||
* It is also easy to put a new member in the compat_siginfo_t
|
||||
* which has implicit alignment which can move internal structure
|
||||
* alignment around breaking the ABI. This can happen if you,
|
||||
* for instance, put a plain 64-bit value in there.
|
||||
*/
|
||||
static inline void signal_compat_build_tests(void)
|
||||
{
|
||||
int _sifields_offset = offsetof(compat_siginfo_t, _sifields);
|
||||
|
||||
/*
|
||||
* If adding a new si_code, there is probably new data in
|
||||
* the siginfo. Make sure folks bumping the si_code
|
||||
* limits also have to look at this code. Make sure any
|
||||
* new fields are handled in copy_siginfo_to_user32()!
|
||||
*/
|
||||
BUILD_BUG_ON(NSIGILL != 8);
|
||||
BUILD_BUG_ON(NSIGFPE != 8);
|
||||
BUILD_BUG_ON(NSIGSEGV != 4);
|
||||
BUILD_BUG_ON(NSIGBUS != 5);
|
||||
BUILD_BUG_ON(NSIGTRAP != 4);
|
||||
BUILD_BUG_ON(NSIGCHLD != 6);
|
||||
BUILD_BUG_ON(NSIGSYS != 1);
|
||||
|
||||
/* This is part of the ABI and can never change in size: */
|
||||
BUILD_BUG_ON(sizeof(compat_siginfo_t) != 128);
|
||||
/*
|
||||
* The offsets of all the (unioned) si_fields are fixed
|
||||
* in the ABI, of course. Make sure none of them ever
|
||||
* move and are always at the beginning:
|
||||
*/
|
||||
BUILD_BUG_ON(offsetof(compat_siginfo_t, _sifields) != 3 * sizeof(int));
|
||||
#define CHECK_CSI_OFFSET(name) BUILD_BUG_ON(_sifields_offset != offsetof(compat_siginfo_t, _sifields.name))
|
||||
|
||||
/*
|
||||
* Ensure that the size of each si_field never changes.
|
||||
* If it does, it is a sign that the
|
||||
* copy_siginfo_to_user32() code below needs to updated
|
||||
* along with the size in the CHECK_SI_SIZE().
|
||||
*
|
||||
* We repeat this check for both the generic and compat
|
||||
* siginfos.
|
||||
*
|
||||
* Note: it is OK for these to grow as long as the whole
|
||||
* structure stays within the padding size (checked
|
||||
* above).
|
||||
*/
|
||||
#define CHECK_CSI_SIZE(name, size) BUILD_BUG_ON(size != sizeof(((compat_siginfo_t *)0)->_sifields.name))
|
||||
#define CHECK_SI_SIZE(name, size) BUILD_BUG_ON(size != sizeof(((siginfo_t *)0)->_sifields.name))
|
||||
|
||||
CHECK_CSI_OFFSET(_kill);
|
||||
CHECK_CSI_SIZE (_kill, 2*sizeof(int));
|
||||
CHECK_SI_SIZE (_kill, 2*sizeof(int));
|
||||
|
||||
CHECK_CSI_OFFSET(_timer);
|
||||
CHECK_CSI_SIZE (_timer, 5*sizeof(int));
|
||||
CHECK_SI_SIZE (_timer, 6*sizeof(int));
|
||||
|
||||
CHECK_CSI_OFFSET(_rt);
|
||||
CHECK_CSI_SIZE (_rt, 3*sizeof(int));
|
||||
CHECK_SI_SIZE (_rt, 4*sizeof(int));
|
||||
|
||||
CHECK_CSI_OFFSET(_sigchld);
|
||||
CHECK_CSI_SIZE (_sigchld, 5*sizeof(int));
|
||||
CHECK_SI_SIZE (_sigchld, 8*sizeof(int));
|
||||
|
||||
CHECK_CSI_OFFSET(_sigchld_x32);
|
||||
CHECK_CSI_SIZE (_sigchld_x32, 7*sizeof(int));
|
||||
/* no _sigchld_x32 in the generic siginfo_t */
|
||||
|
||||
CHECK_CSI_OFFSET(_sigfault);
|
||||
CHECK_CSI_SIZE (_sigfault, 4*sizeof(int));
|
||||
CHECK_SI_SIZE (_sigfault, 8*sizeof(int));
|
||||
|
||||
CHECK_CSI_OFFSET(_sigpoll);
|
||||
CHECK_CSI_SIZE (_sigpoll, 2*sizeof(int));
|
||||
CHECK_SI_SIZE (_sigpoll, 4*sizeof(int));
|
||||
|
||||
CHECK_CSI_OFFSET(_sigsys);
|
||||
CHECK_CSI_SIZE (_sigsys, 3*sizeof(int));
|
||||
CHECK_SI_SIZE (_sigsys, 4*sizeof(int));
|
||||
|
||||
/* any new si_fields should be added here */
|
||||
}
|
||||
|
||||
int copy_siginfo_to_user32(compat_siginfo_t __user *to, const siginfo_t *from)
|
||||
{
|
||||
int err = 0;
|
||||
bool ia32 = test_thread_flag(TIF_IA32);
|
||||
|
||||
signal_compat_build_tests();
|
||||
|
||||
if (!access_ok(VERIFY_WRITE, to, sizeof(compat_siginfo_t)))
|
||||
return -EFAULT;
|
||||
|
||||
@ -32,6 +125,21 @@ int copy_siginfo_to_user32(compat_siginfo_t __user *to, const siginfo_t *from)
|
||||
&to->_sifields._pad[0]);
|
||||
switch (from->si_code >> 16) {
|
||||
case __SI_FAULT >> 16:
|
||||
if (from->si_signo == SIGBUS &&
|
||||
(from->si_code == BUS_MCEERR_AR ||
|
||||
from->si_code == BUS_MCEERR_AO))
|
||||
put_user_ex(from->si_addr_lsb, &to->si_addr_lsb);
|
||||
|
||||
if (from->si_signo == SIGSEGV) {
|
||||
if (from->si_code == SEGV_BNDERR) {
|
||||
compat_uptr_t lower = (unsigned long)&to->si_lower;
|
||||
compat_uptr_t upper = (unsigned long)&to->si_upper;
|
||||
put_user_ex(lower, &to->si_lower);
|
||||
put_user_ex(upper, &to->si_upper);
|
||||
}
|
||||
if (from->si_code == SEGV_PKUERR)
|
||||
put_user_ex(from->si_pkey, &to->si_pkey);
|
||||
}
|
||||
break;
|
||||
case __SI_SYS >> 16:
|
||||
put_user_ex(from->si_syscall, &to->si_syscall);
|
||||
|
@ -440,10 +440,7 @@ static inline unsigned long get_vflags(struct kernel_vm86_regs *regs)
|
||||
|
||||
static inline int is_revectored(int nr, struct revectored_struct *bitmap)
|
||||
{
|
||||
__asm__ __volatile__("btl %2,%1\n\tsbbl %0,%0"
|
||||
:"=r" (nr)
|
||||
:"m" (*bitmap), "r" (nr));
|
||||
return nr;
|
||||
return test_bit(nr, bitmap->__map);
|
||||
}
|
||||
|
||||
#define val_byte(val, n) (((__u8 *)&val)[n])
|
||||
|
@ -44,6 +44,9 @@ EXPORT_SYMBOL(clear_page);
|
||||
|
||||
EXPORT_SYMBOL(csum_partial);
|
||||
|
||||
EXPORT_SYMBOL(__sw_hweight32);
|
||||
EXPORT_SYMBOL(__sw_hweight64);
|
||||
|
||||
/*
|
||||
* Export string functions. We normally rely on gcc builtin for most of these,
|
||||
* but gcc sometimes decides not to inline them.
|
||||
|
@ -25,7 +25,7 @@ lib-y += memcpy_$(BITS).o
|
||||
lib-$(CONFIG_RWSEM_XCHGADD_ALGORITHM) += rwsem.o
|
||||
lib-$(CONFIG_INSTRUCTION_DECODER) += insn.o inat.o
|
||||
|
||||
obj-y += msr.o msr-reg.o msr-reg-export.o
|
||||
obj-y += msr.o msr-reg.o msr-reg-export.o hweight.o
|
||||
|
||||
ifeq ($(CONFIG_X86_32),y)
|
||||
obj-y += atomic64_32.o
|
||||
|
77
arch/x86/lib/hweight.S
Normal file
77
arch/x86/lib/hweight.S
Normal file
@ -0,0 +1,77 @@
|
||||
#include <linux/linkage.h>
|
||||
|
||||
#include <asm/asm.h>
|
||||
|
||||
/*
|
||||
* unsigned int __sw_hweight32(unsigned int w)
|
||||
* %rdi: w
|
||||
*/
|
||||
ENTRY(__sw_hweight32)
|
||||
|
||||
#ifdef CONFIG_X86_64
|
||||
movl %edi, %eax # w
|
||||
#endif
|
||||
__ASM_SIZE(push,) %__ASM_REG(dx)
|
||||
movl %eax, %edx # w -> t
|
||||
shrl %edx # t >>= 1
|
||||
andl $0x55555555, %edx # t &= 0x55555555
|
||||
subl %edx, %eax # w -= t
|
||||
|
||||
movl %eax, %edx # w -> t
|
||||
shrl $2, %eax # w_tmp >>= 2
|
||||
andl $0x33333333, %edx # t &= 0x33333333
|
||||
andl $0x33333333, %eax # w_tmp &= 0x33333333
|
||||
addl %edx, %eax # w = w_tmp + t
|
||||
|
||||
movl %eax, %edx # w -> t
|
||||
shrl $4, %edx # t >>= 4
|
||||
addl %edx, %eax # w_tmp += t
|
||||
andl $0x0f0f0f0f, %eax # w_tmp &= 0x0f0f0f0f
|
||||
imull $0x01010101, %eax, %eax # w_tmp *= 0x01010101
|
||||
shrl $24, %eax # w = w_tmp >> 24
|
||||
__ASM_SIZE(pop,) %__ASM_REG(dx)
|
||||
ret
|
||||
ENDPROC(__sw_hweight32)
|
||||
|
||||
ENTRY(__sw_hweight64)
|
||||
#ifdef CONFIG_X86_64
|
||||
pushq %rdx
|
||||
|
||||
movq %rdi, %rdx # w -> t
|
||||
movabsq $0x5555555555555555, %rax
|
||||
shrq %rdx # t >>= 1
|
||||
andq %rdx, %rax # t &= 0x5555555555555555
|
||||
movabsq $0x3333333333333333, %rdx
|
||||
subq %rax, %rdi # w -= t
|
||||
|
||||
movq %rdi, %rax # w -> t
|
||||
shrq $2, %rdi # w_tmp >>= 2
|
||||
andq %rdx, %rax # t &= 0x3333333333333333
|
||||
andq %rdi, %rdx # w_tmp &= 0x3333333333333333
|
||||
addq %rdx, %rax # w = w_tmp + t
|
||||
|
||||
movq %rax, %rdx # w -> t
|
||||
shrq $4, %rdx # t >>= 4
|
||||
addq %rdx, %rax # w_tmp += t
|
||||
movabsq $0x0f0f0f0f0f0f0f0f, %rdx
|
||||
andq %rdx, %rax # w_tmp &= 0x0f0f0f0f0f0f0f0f
|
||||
movabsq $0x0101010101010101, %rdx
|
||||
imulq %rdx, %rax # w_tmp *= 0x0101010101010101
|
||||
shrq $56, %rax # w = w_tmp >> 56
|
||||
|
||||
popq %rdx
|
||||
ret
|
||||
#else /* CONFIG_X86_32 */
|
||||
/* We're getting an u64 arg in (%eax,%edx): unsigned long hweight64(__u64 w) */
|
||||
pushl %ecx
|
||||
|
||||
call __sw_hweight32
|
||||
movl %eax, %ecx # stash away result
|
||||
movl %edx, %eax # second part of input
|
||||
call __sw_hweight32
|
||||
addl %ecx, %eax # result
|
||||
|
||||
popl %ecx
|
||||
ret
|
||||
#endif
|
||||
ENDPROC(__sw_hweight64)
|
@ -396,6 +396,7 @@ int __init pci_acpi_init(void)
|
||||
return -ENODEV;
|
||||
|
||||
printk(KERN_INFO "PCI: Using ACPI for IRQ routing\n");
|
||||
acpi_irq_penalty_init();
|
||||
pcibios_enable_irq = acpi_pci_irq_enable;
|
||||
pcibios_disable_irq = acpi_pci_irq_disable;
|
||||
x86_init.pci.init_irq = x86_init_noop;
|
||||
|
@ -19,6 +19,7 @@
|
||||
#include <asm/mtrr.h>
|
||||
#include <asm/sections.h>
|
||||
#include <asm/suspend.h>
|
||||
#include <asm/tlbflush.h>
|
||||
|
||||
/* Defined in hibernate_asm_64.S */
|
||||
extern asmlinkage __visible int restore_image(void);
|
||||
@ -28,6 +29,7 @@ extern asmlinkage __visible int restore_image(void);
|
||||
* kernel's text (this value is passed in the image header).
|
||||
*/
|
||||
unsigned long restore_jump_address __visible;
|
||||
unsigned long jump_address_phys;
|
||||
|
||||
/*
|
||||
* Value of the cr3 register from before the hibernation (this value is passed
|
||||
@ -37,7 +39,43 @@ unsigned long restore_cr3 __visible;
|
||||
|
||||
pgd_t *temp_level4_pgt __visible;
|
||||
|
||||
void *relocated_restore_code __visible;
|
||||
unsigned long relocated_restore_code __visible;
|
||||
|
||||
static int set_up_temporary_text_mapping(void)
|
||||
{
|
||||
pmd_t *pmd;
|
||||
pud_t *pud;
|
||||
|
||||
/*
|
||||
* The new mapping only has to cover the page containing the image
|
||||
* kernel's entry point (jump_address_phys), because the switch over to
|
||||
* it is carried out by relocated code running from a page allocated
|
||||
* specifically for this purpose and covered by the identity mapping, so
|
||||
* the temporary kernel text mapping is only needed for the final jump.
|
||||
* Moreover, in that mapping the virtual address of the image kernel's
|
||||
* entry point must be the same as its virtual address in the image
|
||||
* kernel (restore_jump_address), so the image kernel's
|
||||
* restore_registers() code doesn't find itself in a different area of
|
||||
* the virtual address space after switching over to the original page
|
||||
* tables used by the image kernel.
|
||||
*/
|
||||
pud = (pud_t *)get_safe_page(GFP_ATOMIC);
|
||||
if (!pud)
|
||||
return -ENOMEM;
|
||||
|
||||
pmd = (pmd_t *)get_safe_page(GFP_ATOMIC);
|
||||
if (!pmd)
|
||||
return -ENOMEM;
|
||||
|
||||
set_pmd(pmd + pmd_index(restore_jump_address),
|
||||
__pmd((jump_address_phys & PMD_MASK) | __PAGE_KERNEL_LARGE_EXEC));
|
||||
set_pud(pud + pud_index(restore_jump_address),
|
||||
__pud(__pa(pmd) | _KERNPG_TABLE));
|
||||
set_pgd(temp_level4_pgt + pgd_index(restore_jump_address),
|
||||
__pgd(__pa(pud) | _KERNPG_TABLE));
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void *alloc_pgt_page(void *context)
|
||||
{
|
||||
@ -59,9 +97,10 @@ static int set_up_temporary_mappings(void)
|
||||
if (!temp_level4_pgt)
|
||||
return -ENOMEM;
|
||||
|
||||
/* It is safe to reuse the original kernel mapping */
|
||||
set_pgd(temp_level4_pgt + pgd_index(__START_KERNEL_map),
|
||||
init_level4_pgt[pgd_index(__START_KERNEL_map)]);
|
||||
/* Prepare a temporary mapping for the kernel text */
|
||||
result = set_up_temporary_text_mapping();
|
||||
if (result)
|
||||
return result;
|
||||
|
||||
/* Set up the direct mapping from scratch */
|
||||
for (i = 0; i < nr_pfn_mapped; i++) {
|
||||
@ -78,19 +117,50 @@ static int set_up_temporary_mappings(void)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int relocate_restore_code(void)
|
||||
{
|
||||
pgd_t *pgd;
|
||||
pud_t *pud;
|
||||
|
||||
relocated_restore_code = get_safe_page(GFP_ATOMIC);
|
||||
if (!relocated_restore_code)
|
||||
return -ENOMEM;
|
||||
|
||||
memcpy((void *)relocated_restore_code, &core_restore_code, PAGE_SIZE);
|
||||
|
||||
/* Make the page containing the relocated code executable */
|
||||
pgd = (pgd_t *)__va(read_cr3()) + pgd_index(relocated_restore_code);
|
||||
pud = pud_offset(pgd, relocated_restore_code);
|
||||
if (pud_large(*pud)) {
|
||||
set_pud(pud, __pud(pud_val(*pud) & ~_PAGE_NX));
|
||||
} else {
|
||||
pmd_t *pmd = pmd_offset(pud, relocated_restore_code);
|
||||
|
||||
if (pmd_large(*pmd)) {
|
||||
set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_NX));
|
||||
} else {
|
||||
pte_t *pte = pte_offset_kernel(pmd, relocated_restore_code);
|
||||
|
||||
set_pte(pte, __pte(pte_val(*pte) & ~_PAGE_NX));
|
||||
}
|
||||
}
|
||||
__flush_tlb_all();
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int swsusp_arch_resume(void)
|
||||
{
|
||||
int error;
|
||||
|
||||
/* We have got enough memory and from now on we cannot recover */
|
||||
if ((error = set_up_temporary_mappings()))
|
||||
error = set_up_temporary_mappings();
|
||||
if (error)
|
||||
return error;
|
||||
|
||||
relocated_restore_code = (void *)get_safe_page(GFP_ATOMIC);
|
||||
if (!relocated_restore_code)
|
||||
return -ENOMEM;
|
||||
memcpy(relocated_restore_code, &core_restore_code,
|
||||
&restore_registers - &core_restore_code);
|
||||
error = relocate_restore_code();
|
||||
if (error)
|
||||
return error;
|
||||
|
||||
restore_image();
|
||||
return 0;
|
||||
@ -109,11 +179,12 @@ int pfn_is_nosave(unsigned long pfn)
|
||||
|
||||
struct restore_data_record {
|
||||
unsigned long jump_address;
|
||||
unsigned long jump_address_phys;
|
||||
unsigned long cr3;
|
||||
unsigned long magic;
|
||||
};
|
||||
|
||||
#define RESTORE_MAGIC 0x0123456789ABCDEFUL
|
||||
#define RESTORE_MAGIC 0x123456789ABCDEF0UL
|
||||
|
||||
/**
|
||||
* arch_hibernation_header_save - populate the architecture specific part
|
||||
@ -126,7 +197,8 @@ int arch_hibernation_header_save(void *addr, unsigned int max_size)
|
||||
|
||||
if (max_size < sizeof(struct restore_data_record))
|
||||
return -EOVERFLOW;
|
||||
rdr->jump_address = restore_jump_address;
|
||||
rdr->jump_address = (unsigned long)&restore_registers;
|
||||
rdr->jump_address_phys = __pa_symbol(&restore_registers);
|
||||
rdr->cr3 = restore_cr3;
|
||||
rdr->magic = RESTORE_MAGIC;
|
||||
return 0;
|
||||
@ -142,6 +214,7 @@ int arch_hibernation_header_restore(void *addr)
|
||||
struct restore_data_record *rdr = addr;
|
||||
|
||||
restore_jump_address = rdr->jump_address;
|
||||
jump_address_phys = rdr->jump_address_phys;
|
||||
restore_cr3 = rdr->cr3;
|
||||
return (rdr->magic == RESTORE_MAGIC) ? 0 : -EINVAL;
|
||||
}
|
||||
|
@ -44,9 +44,6 @@ ENTRY(swsusp_arch_suspend)
|
||||
pushfq
|
||||
popq pt_regs_flags(%rax)
|
||||
|
||||
/* save the address of restore_registers */
|
||||
movq $restore_registers, %rax
|
||||
movq %rax, restore_jump_address(%rip)
|
||||
/* save cr3 */
|
||||
movq %cr3, %rax
|
||||
movq %rax, restore_cr3(%rip)
|
||||
@ -57,31 +54,34 @@ ENTRY(swsusp_arch_suspend)
|
||||
ENDPROC(swsusp_arch_suspend)
|
||||
|
||||
ENTRY(restore_image)
|
||||
/* switch to temporary page tables */
|
||||
movq $__PAGE_OFFSET, %rdx
|
||||
movq temp_level4_pgt(%rip), %rax
|
||||
subq %rdx, %rax
|
||||
movq %rax, %cr3
|
||||
/* Flush TLB */
|
||||
movq mmu_cr4_features(%rip), %rax
|
||||
movq %rax, %rdx
|
||||
andq $~(X86_CR4_PGE), %rdx
|
||||
movq %rdx, %cr4; # turn off PGE
|
||||
movq %cr3, %rcx; # flush TLB
|
||||
movq %rcx, %cr3;
|
||||
movq %rax, %cr4; # turn PGE back on
|
||||
|
||||
/* prepare to jump to the image kernel */
|
||||
movq restore_jump_address(%rip), %rax
|
||||
movq restore_cr3(%rip), %rbx
|
||||
movq restore_jump_address(%rip), %r8
|
||||
movq restore_cr3(%rip), %r9
|
||||
|
||||
/* prepare to switch to temporary page tables */
|
||||
movq temp_level4_pgt(%rip), %rax
|
||||
movq mmu_cr4_features(%rip), %rbx
|
||||
|
||||
/* prepare to copy image data to their original locations */
|
||||
movq restore_pblist(%rip), %rdx
|
||||
|
||||
/* jump to relocated restore code */
|
||||
movq relocated_restore_code(%rip), %rcx
|
||||
jmpq *%rcx
|
||||
|
||||
/* code below has been relocated to a safe page */
|
||||
ENTRY(core_restore_code)
|
||||
/* switch to temporary page tables */
|
||||
movq $__PAGE_OFFSET, %rcx
|
||||
subq %rcx, %rax
|
||||
movq %rax, %cr3
|
||||
/* flush TLB */
|
||||
movq %rbx, %rcx
|
||||
andq $~(X86_CR4_PGE), %rcx
|
||||
movq %rcx, %cr4; # turn off PGE
|
||||
movq %cr3, %rcx; # flush TLB
|
||||
movq %rcx, %cr3;
|
||||
movq %rbx, %cr4; # turn PGE back on
|
||||
.Lloop:
|
||||
testq %rdx, %rdx
|
||||
jz .Ldone
|
||||
@ -96,24 +96,17 @@ ENTRY(core_restore_code)
|
||||
/* progress to the next pbe */
|
||||
movq pbe_next(%rdx), %rdx
|
||||
jmp .Lloop
|
||||
|
||||
.Ldone:
|
||||
/* jump to the restore_registers address from the image header */
|
||||
jmpq *%rax
|
||||
/*
|
||||
* NOTE: This assumes that the boot kernel's text mapping covers the
|
||||
* image kernel's page containing restore_registers and the address of
|
||||
* this page is the same as in the image kernel's text mapping (it
|
||||
* should always be true, because the text mapping is linear, starting
|
||||
* from 0, and is supposed to cover the entire kernel text for every
|
||||
* kernel).
|
||||
*
|
||||
* code below belongs to the image kernel
|
||||
*/
|
||||
jmpq *%r8
|
||||
|
||||
/* code below belongs to the image kernel */
|
||||
.align PAGE_SIZE
|
||||
ENTRY(restore_registers)
|
||||
FRAME_BEGIN
|
||||
/* go back to the original page tables */
|
||||
movq %rbx, %cr3
|
||||
movq %r9, %cr3
|
||||
|
||||
/* Flush TLB, including "global" things (vmalloc) */
|
||||
movq mmu_cr4_features(%rip), %rax
|
||||
|
@ -521,9 +521,7 @@ static void set_aliased_prot(void *v, pgprot_t prot)
|
||||
|
||||
preempt_disable();
|
||||
|
||||
pagefault_disable(); /* Avoid warnings due to being atomic. */
|
||||
__get_user(dummy, (unsigned char __user __force *)v);
|
||||
pagefault_enable();
|
||||
probe_kernel_read(&dummy, v, 1);
|
||||
|
||||
if (HYPERVISOR_update_va_mapping((unsigned long)v, pte, 0))
|
||||
BUG();
|
||||
|
@ -150,8 +150,10 @@ static int get_task_ioprio(struct task_struct *p)
|
||||
if (ret)
|
||||
goto out;
|
||||
ret = IOPRIO_PRIO_VALUE(IOPRIO_CLASS_NONE, IOPRIO_NORM);
|
||||
task_lock(p);
|
||||
if (p->io_context)
|
||||
ret = p->io_context->ioprio;
|
||||
task_unlock(p);
|
||||
out:
|
||||
return ret;
|
||||
}
|
||||
|
@ -602,7 +602,7 @@ static int acpi_aml_read_user(char __user *buf, int len)
|
||||
crc->tail = (crc->tail + n) & (ACPI_AML_BUF_SIZE - 1);
|
||||
ret = n;
|
||||
out:
|
||||
acpi_aml_unlock_fifo(ACPI_AML_OUT_USER, !ret);
|
||||
acpi_aml_unlock_fifo(ACPI_AML_OUT_USER, ret >= 0);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -672,7 +672,7 @@ static int acpi_aml_write_user(const char __user *buf, int len)
|
||||
crc->head = (crc->head + n) & (ACPI_AML_BUF_SIZE - 1);
|
||||
ret = n;
|
||||
out:
|
||||
acpi_aml_unlock_fifo(ACPI_AML_IN_USER, !ret);
|
||||
acpi_aml_unlock_fifo(ACPI_AML_IN_USER, ret >= 0);
|
||||
return n;
|
||||
}
|
||||
|
||||
|
@ -46,6 +46,7 @@
|
||||
#include "acnamesp.h"
|
||||
#include "acdispat.h"
|
||||
#include "actables.h"
|
||||
#include "acinterp.h"
|
||||
|
||||
#define _COMPONENT ACPI_NAMESPACE
|
||||
ACPI_MODULE_NAME("nsload")
|
||||
@ -78,6 +79,8 @@ acpi_ns_load_table(u32 table_index, struct acpi_namespace_node *node)
|
||||
|
||||
ACPI_FUNCTION_TRACE(ns_load_table);
|
||||
|
||||
acpi_ex_enter_interpreter();
|
||||
|
||||
/*
|
||||
* Parse the table and load the namespace with all named
|
||||
* objects found within. Control methods are NOT parsed
|
||||
@ -89,7 +92,7 @@ acpi_ns_load_table(u32 table_index, struct acpi_namespace_node *node)
|
||||
*/
|
||||
status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
|
||||
if (ACPI_FAILURE(status)) {
|
||||
return_ACPI_STATUS(status);
|
||||
goto unlock_interp;
|
||||
}
|
||||
|
||||
/* If table already loaded into namespace, just return */
|
||||
@ -130,6 +133,8 @@ acpi_ns_load_table(u32 table_index, struct acpi_namespace_node *node)
|
||||
|
||||
unlock:
|
||||
(void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
|
||||
unlock_interp:
|
||||
(void)acpi_ex_exit_interpreter();
|
||||
|
||||
if (ACPI_FAILURE(status)) {
|
||||
return_ACPI_STATUS(status);
|
||||
|
@ -47,7 +47,6 @@
|
||||
#include "acparser.h"
|
||||
#include "acdispat.h"
|
||||
#include "actables.h"
|
||||
#include "acinterp.h"
|
||||
|
||||
#define _COMPONENT ACPI_NAMESPACE
|
||||
ACPI_MODULE_NAME("nsparse")
|
||||
@ -171,8 +170,6 @@ acpi_ns_parse_table(u32 table_index, struct acpi_namespace_node *start_node)
|
||||
|
||||
ACPI_FUNCTION_TRACE(ns_parse_table);
|
||||
|
||||
acpi_ex_enter_interpreter();
|
||||
|
||||
/*
|
||||
* AML Parse, pass 1
|
||||
*
|
||||
@ -188,7 +185,7 @@ acpi_ns_parse_table(u32 table_index, struct acpi_namespace_node *start_node)
|
||||
status = acpi_ns_one_complete_parse(ACPI_IMODE_LOAD_PASS1,
|
||||
table_index, start_node);
|
||||
if (ACPI_FAILURE(status)) {
|
||||
goto error_exit;
|
||||
return_ACPI_STATUS(status);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -204,10 +201,8 @@ acpi_ns_parse_table(u32 table_index, struct acpi_namespace_node *start_node)
|
||||
status = acpi_ns_one_complete_parse(ACPI_IMODE_LOAD_PASS2,
|
||||
table_index, start_node);
|
||||
if (ACPI_FAILURE(status)) {
|
||||
goto error_exit;
|
||||
return_ACPI_STATUS(status);
|
||||
}
|
||||
|
||||
error_exit:
|
||||
acpi_ex_exit_interpreter();
|
||||
return_ACPI_STATUS(status);
|
||||
}
|
||||
|
@ -470,6 +470,7 @@ static int acpi_irq_pci_sharing_penalty(int irq)
|
||||
{
|
||||
struct acpi_pci_link *link;
|
||||
int penalty = 0;
|
||||
int i;
|
||||
|
||||
list_for_each_entry(link, &acpi_link_list, list) {
|
||||
/*
|
||||
@ -478,18 +479,14 @@ static int acpi_irq_pci_sharing_penalty(int irq)
|
||||
*/
|
||||
if (link->irq.active && link->irq.active == irq)
|
||||
penalty += PIRQ_PENALTY_PCI_USING;
|
||||
else {
|
||||
int i;
|
||||
|
||||
/*
|
||||
* If a link is inactive, penalize the IRQs it
|
||||
* might use, but not as severely.
|
||||
*/
|
||||
for (i = 0; i < link->irq.possible_count; i++)
|
||||
if (link->irq.possible[i] == irq)
|
||||
penalty += PIRQ_PENALTY_PCI_POSSIBLE /
|
||||
link->irq.possible_count;
|
||||
}
|
||||
/*
|
||||
* penalize the IRQs PCI might use, but not as severely.
|
||||
*/
|
||||
for (i = 0; i < link->irq.possible_count; i++)
|
||||
if (link->irq.possible[i] == irq)
|
||||
penalty += PIRQ_PENALTY_PCI_POSSIBLE /
|
||||
link->irq.possible_count;
|
||||
}
|
||||
|
||||
return penalty;
|
||||
@ -499,9 +496,6 @@ static int acpi_irq_get_penalty(int irq)
|
||||
{
|
||||
int penalty = 0;
|
||||
|
||||
if (irq < ACPI_MAX_ISA_IRQS)
|
||||
penalty += acpi_isa_irq_penalty[irq];
|
||||
|
||||
/*
|
||||
* Penalize IRQ used by ACPI SCI. If ACPI SCI pin attributes conflict
|
||||
* with PCI IRQ attributes, mark ACPI SCI as ISA_ALWAYS so it won't be
|
||||
@ -516,10 +510,49 @@ static int acpi_irq_get_penalty(int irq)
|
||||
penalty += PIRQ_PENALTY_PCI_USING;
|
||||
}
|
||||
|
||||
if (irq < ACPI_MAX_ISA_IRQS)
|
||||
return penalty + acpi_isa_irq_penalty[irq];
|
||||
|
||||
penalty += acpi_irq_pci_sharing_penalty(irq);
|
||||
return penalty;
|
||||
}
|
||||
|
||||
int __init acpi_irq_penalty_init(void)
|
||||
{
|
||||
struct acpi_pci_link *link;
|
||||
int i;
|
||||
|
||||
/*
|
||||
* Update penalties to facilitate IRQ balancing.
|
||||
*/
|
||||
list_for_each_entry(link, &acpi_link_list, list) {
|
||||
|
||||
/*
|
||||
* reflect the possible and active irqs in the penalty table --
|
||||
* useful for breaking ties.
|
||||
*/
|
||||
if (link->irq.possible_count) {
|
||||
int penalty =
|
||||
PIRQ_PENALTY_PCI_POSSIBLE /
|
||||
link->irq.possible_count;
|
||||
|
||||
for (i = 0; i < link->irq.possible_count; i++) {
|
||||
if (link->irq.possible[i] < ACPI_MAX_ISA_IRQS)
|
||||
acpi_isa_irq_penalty[link->irq.
|
||||
possible[i]] +=
|
||||
penalty;
|
||||
}
|
||||
|
||||
} else if (link->irq.active &&
|
||||
(link->irq.active < ACPI_MAX_ISA_IRQS)) {
|
||||
acpi_isa_irq_penalty[link->irq.active] +=
|
||||
PIRQ_PENALTY_PCI_POSSIBLE;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int acpi_irq_balance = -1; /* 0: static, 1: balance */
|
||||
|
||||
static int acpi_pci_link_allocate(struct acpi_pci_link *link)
|
||||
|
@ -207,6 +207,9 @@ struct blkfront_info
|
||||
struct blk_mq_tag_set tag_set;
|
||||
struct blkfront_ring_info *rinfo;
|
||||
unsigned int nr_rings;
|
||||
/* Save uncomplete reqs and bios for migration. */
|
||||
struct list_head requests;
|
||||
struct bio_list bio_list;
|
||||
};
|
||||
|
||||
static unsigned int nr_minors;
|
||||
@ -2002,69 +2005,22 @@ static int blkif_recover(struct blkfront_info *info)
|
||||
{
|
||||
unsigned int i, r_index;
|
||||
struct request *req, *n;
|
||||
struct blk_shadow *copy;
|
||||
int rc;
|
||||
struct bio *bio, *cloned_bio;
|
||||
struct bio_list bio_list, merge_bio;
|
||||
unsigned int segs, offset;
|
||||
int pending, size;
|
||||
struct split_bio *split_bio;
|
||||
struct list_head requests;
|
||||
|
||||
blkfront_gather_backend_features(info);
|
||||
segs = info->max_indirect_segments ? : BLKIF_MAX_SEGMENTS_PER_REQUEST;
|
||||
blk_queue_max_segments(info->rq, segs);
|
||||
bio_list_init(&bio_list);
|
||||
INIT_LIST_HEAD(&requests);
|
||||
|
||||
for (r_index = 0; r_index < info->nr_rings; r_index++) {
|
||||
struct blkfront_ring_info *rinfo;
|
||||
|
||||
rinfo = &info->rinfo[r_index];
|
||||
/* Stage 1: Make a safe copy of the shadow state. */
|
||||
copy = kmemdup(rinfo->shadow, sizeof(rinfo->shadow),
|
||||
GFP_NOIO | __GFP_REPEAT | __GFP_HIGH);
|
||||
if (!copy)
|
||||
return -ENOMEM;
|
||||
|
||||
/* Stage 2: Set up free list. */
|
||||
memset(&rinfo->shadow, 0, sizeof(rinfo->shadow));
|
||||
for (i = 0; i < BLK_RING_SIZE(info); i++)
|
||||
rinfo->shadow[i].req.u.rw.id = i+1;
|
||||
rinfo->shadow_free = rinfo->ring.req_prod_pvt;
|
||||
rinfo->shadow[BLK_RING_SIZE(info)-1].req.u.rw.id = 0x0fffffff;
|
||||
struct blkfront_ring_info *rinfo = &info->rinfo[r_index];
|
||||
|
||||
rc = blkfront_setup_indirect(rinfo);
|
||||
if (rc) {
|
||||
kfree(copy);
|
||||
if (rc)
|
||||
return rc;
|
||||
}
|
||||
|
||||
for (i = 0; i < BLK_RING_SIZE(info); i++) {
|
||||
/* Not in use? */
|
||||
if (!copy[i].request)
|
||||
continue;
|
||||
|
||||
/*
|
||||
* Get the bios in the request so we can re-queue them.
|
||||
*/
|
||||
if (copy[i].request->cmd_flags &
|
||||
(REQ_FLUSH | REQ_FUA | REQ_DISCARD | REQ_SECURE)) {
|
||||
/*
|
||||
* Flush operations don't contain bios, so
|
||||
* we need to requeue the whole request
|
||||
*/
|
||||
list_add(©[i].request->queuelist, &requests);
|
||||
continue;
|
||||
}
|
||||
merge_bio.head = copy[i].request->bio;
|
||||
merge_bio.tail = copy[i].request->biotail;
|
||||
bio_list_merge(&bio_list, &merge_bio);
|
||||
copy[i].request->bio = NULL;
|
||||
blk_end_request_all(copy[i].request, 0);
|
||||
}
|
||||
|
||||
kfree(copy);
|
||||
}
|
||||
xenbus_switch_state(info->xbdev, XenbusStateConnected);
|
||||
|
||||
@ -2079,7 +2035,7 @@ static int blkif_recover(struct blkfront_info *info)
|
||||
kick_pending_request_queues(rinfo);
|
||||
}
|
||||
|
||||
list_for_each_entry_safe(req, n, &requests, queuelist) {
|
||||
list_for_each_entry_safe(req, n, &info->requests, queuelist) {
|
||||
/* Requeue pending requests (flush or discard) */
|
||||
list_del_init(&req->queuelist);
|
||||
BUG_ON(req->nr_phys_segments > segs);
|
||||
@ -2087,7 +2043,7 @@ static int blkif_recover(struct blkfront_info *info)
|
||||
}
|
||||
blk_mq_kick_requeue_list(info->rq);
|
||||
|
||||
while ((bio = bio_list_pop(&bio_list)) != NULL) {
|
||||
while ((bio = bio_list_pop(&info->bio_list)) != NULL) {
|
||||
/* Traverse the list of pending bios and re-queue them */
|
||||
if (bio_segments(bio) > segs) {
|
||||
/*
|
||||
@ -2133,9 +2089,42 @@ static int blkfront_resume(struct xenbus_device *dev)
|
||||
{
|
||||
struct blkfront_info *info = dev_get_drvdata(&dev->dev);
|
||||
int err = 0;
|
||||
unsigned int i, j;
|
||||
|
||||
dev_dbg(&dev->dev, "blkfront_resume: %s\n", dev->nodename);
|
||||
|
||||
bio_list_init(&info->bio_list);
|
||||
INIT_LIST_HEAD(&info->requests);
|
||||
for (i = 0; i < info->nr_rings; i++) {
|
||||
struct blkfront_ring_info *rinfo = &info->rinfo[i];
|
||||
struct bio_list merge_bio;
|
||||
struct blk_shadow *shadow = rinfo->shadow;
|
||||
|
||||
for (j = 0; j < BLK_RING_SIZE(info); j++) {
|
||||
/* Not in use? */
|
||||
if (!shadow[j].request)
|
||||
continue;
|
||||
|
||||
/*
|
||||
* Get the bios in the request so we can re-queue them.
|
||||
*/
|
||||
if (shadow[j].request->cmd_flags &
|
||||
(REQ_FLUSH | REQ_FUA | REQ_DISCARD | REQ_SECURE)) {
|
||||
/*
|
||||
* Flush operations don't contain bios, so
|
||||
* we need to requeue the whole request
|
||||
*/
|
||||
list_add(&shadow[j].request->queuelist, &info->requests);
|
||||
continue;
|
||||
}
|
||||
merge_bio.head = shadow[j].request->bio;
|
||||
merge_bio.tail = shadow[j].request->biotail;
|
||||
bio_list_merge(&info->bio_list, &merge_bio);
|
||||
shadow[j].request->bio = NULL;
|
||||
blk_mq_end_request(shadow[j].request, 0);
|
||||
}
|
||||
}
|
||||
|
||||
blkif_free(info, info->connected == BLKIF_STATE_CONNECTED);
|
||||
|
||||
err = negotiate_mq(info);
|
||||
|
@ -173,7 +173,7 @@ int cpuidle_enter_state(struct cpuidle_device *dev, struct cpuidle_driver *drv,
|
||||
|
||||
struct cpuidle_state *target_state = &drv->states[index];
|
||||
bool broadcast = !!(target_state->flags & CPUIDLE_FLAG_TIMER_STOP);
|
||||
u64 time_start, time_end;
|
||||
ktime_t time_start, time_end;
|
||||
s64 diff;
|
||||
|
||||
/*
|
||||
@ -195,13 +195,13 @@ int cpuidle_enter_state(struct cpuidle_device *dev, struct cpuidle_driver *drv,
|
||||
sched_idle_set_state(target_state);
|
||||
|
||||
trace_cpu_idle_rcuidle(index, dev->cpu);
|
||||
time_start = local_clock();
|
||||
time_start = ns_to_ktime(local_clock());
|
||||
|
||||
stop_critical_timings();
|
||||
entered_state = target_state->enter(dev, drv, index);
|
||||
start_critical_timings();
|
||||
|
||||
time_end = local_clock();
|
||||
time_end = ns_to_ktime(local_clock());
|
||||
trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, dev->cpu);
|
||||
|
||||
/* The cpu is no longer idle or about to enter idle. */
|
||||
@ -217,11 +217,7 @@ int cpuidle_enter_state(struct cpuidle_device *dev, struct cpuidle_driver *drv,
|
||||
if (!cpuidle_state_is_coupled(drv, index))
|
||||
local_irq_enable();
|
||||
|
||||
/*
|
||||
* local_clock() returns the time in nanosecond, let's shift
|
||||
* by 10 (divide by 1024) to have microsecond based time.
|
||||
*/
|
||||
diff = (time_end - time_start) >> 10;
|
||||
diff = ktime_us_delta(time_end, time_start);
|
||||
if (diff > INT_MAX)
|
||||
diff = INT_MAX;
|
||||
|
||||
|
@ -49,7 +49,7 @@ config GPIO_DEVRES
|
||||
|
||||
config OF_GPIO
|
||||
def_bool y
|
||||
depends on OF || COMPILE_TEST
|
||||
depends on OF
|
||||
|
||||
config GPIO_ACPI
|
||||
def_bool y
|
||||
|
@ -61,9 +61,8 @@ static unsigned sch_gpio_bit(struct sch_gpio *sch, unsigned gpio)
|
||||
return gpio % 8;
|
||||
}
|
||||
|
||||
static int sch_gpio_reg_get(struct gpio_chip *gc, unsigned gpio, unsigned reg)
|
||||
static int sch_gpio_reg_get(struct sch_gpio *sch, unsigned gpio, unsigned reg)
|
||||
{
|
||||
struct sch_gpio *sch = gpiochip_get_data(gc);
|
||||
unsigned short offset, bit;
|
||||
u8 reg_val;
|
||||
|
||||
@ -75,10 +74,9 @@ static int sch_gpio_reg_get(struct gpio_chip *gc, unsigned gpio, unsigned reg)
|
||||
return reg_val;
|
||||
}
|
||||
|
||||
static void sch_gpio_reg_set(struct gpio_chip *gc, unsigned gpio, unsigned reg,
|
||||
static void sch_gpio_reg_set(struct sch_gpio *sch, unsigned gpio, unsigned reg,
|
||||
int val)
|
||||
{
|
||||
struct sch_gpio *sch = gpiochip_get_data(gc);
|
||||
unsigned short offset, bit;
|
||||
u8 reg_val;
|
||||
|
||||
@ -98,14 +96,15 @@ static int sch_gpio_direction_in(struct gpio_chip *gc, unsigned gpio_num)
|
||||
struct sch_gpio *sch = gpiochip_get_data(gc);
|
||||
|
||||
spin_lock(&sch->lock);
|
||||
sch_gpio_reg_set(gc, gpio_num, GIO, 1);
|
||||
sch_gpio_reg_set(sch, gpio_num, GIO, 1);
|
||||
spin_unlock(&sch->lock);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int sch_gpio_get(struct gpio_chip *gc, unsigned gpio_num)
|
||||
{
|
||||
return sch_gpio_reg_get(gc, gpio_num, GLV);
|
||||
struct sch_gpio *sch = gpiochip_get_data(gc);
|
||||
return sch_gpio_reg_get(sch, gpio_num, GLV);
|
||||
}
|
||||
|
||||
static void sch_gpio_set(struct gpio_chip *gc, unsigned gpio_num, int val)
|
||||
@ -113,7 +112,7 @@ static void sch_gpio_set(struct gpio_chip *gc, unsigned gpio_num, int val)
|
||||
struct sch_gpio *sch = gpiochip_get_data(gc);
|
||||
|
||||
spin_lock(&sch->lock);
|
||||
sch_gpio_reg_set(gc, gpio_num, GLV, val);
|
||||
sch_gpio_reg_set(sch, gpio_num, GLV, val);
|
||||
spin_unlock(&sch->lock);
|
||||
}
|
||||
|
||||
@ -123,7 +122,7 @@ static int sch_gpio_direction_out(struct gpio_chip *gc, unsigned gpio_num,
|
||||
struct sch_gpio *sch = gpiochip_get_data(gc);
|
||||
|
||||
spin_lock(&sch->lock);
|
||||
sch_gpio_reg_set(gc, gpio_num, GIO, 0);
|
||||
sch_gpio_reg_set(sch, gpio_num, GIO, 0);
|
||||
spin_unlock(&sch->lock);
|
||||
|
||||
/*
|
||||
@ -182,13 +181,13 @@ static int sch_gpio_probe(struct platform_device *pdev)
|
||||
* GPIO7 is configured by the CMC as SLPIOVR
|
||||
* Enable GPIO[9:8] core powered gpios explicitly
|
||||
*/
|
||||
sch_gpio_reg_set(&sch->chip, 8, GEN, 1);
|
||||
sch_gpio_reg_set(&sch->chip, 9, GEN, 1);
|
||||
sch_gpio_reg_set(sch, 8, GEN, 1);
|
||||
sch_gpio_reg_set(sch, 9, GEN, 1);
|
||||
/*
|
||||
* SUS_GPIO[2:0] enabled by default
|
||||
* Enable SUS_GPIO3 resume powered gpio explicitly
|
||||
*/
|
||||
sch_gpio_reg_set(&sch->chip, 13, GEN, 1);
|
||||
sch_gpio_reg_set(sch, 13, GEN, 1);
|
||||
break;
|
||||
|
||||
case PCI_DEVICE_ID_INTEL_ITC_LPC:
|
||||
|
@ -28,6 +28,10 @@ int gpio_request_one(unsigned gpio, unsigned long flags, const char *label)
|
||||
if (!desc && gpio_is_valid(gpio))
|
||||
return -EPROBE_DEFER;
|
||||
|
||||
err = gpiod_request(desc, label);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
if (flags & GPIOF_OPEN_DRAIN)
|
||||
set_bit(FLAG_OPEN_DRAIN, &desc->flags);
|
||||
|
||||
@ -37,10 +41,6 @@ int gpio_request_one(unsigned gpio, unsigned long flags, const char *label)
|
||||
if (flags & GPIOF_ACTIVE_LOW)
|
||||
set_bit(FLAG_ACTIVE_LOW, &desc->flags);
|
||||
|
||||
err = gpiod_request(desc, label);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
if (flags & GPIOF_DIR_IN)
|
||||
err = gpiod_direction_input(desc);
|
||||
else
|
||||
|
@ -1352,14 +1352,6 @@ static int __gpiod_request(struct gpio_desc *desc, const char *label)
|
||||
spin_lock_irqsave(&gpio_lock, flags);
|
||||
}
|
||||
done:
|
||||
if (status < 0) {
|
||||
/* Clear flags that might have been set by the caller before
|
||||
* requesting the GPIO.
|
||||
*/
|
||||
clear_bit(FLAG_ACTIVE_LOW, &desc->flags);
|
||||
clear_bit(FLAG_OPEN_DRAIN, &desc->flags);
|
||||
clear_bit(FLAG_OPEN_SOURCE, &desc->flags);
|
||||
}
|
||||
spin_unlock_irqrestore(&gpio_lock, flags);
|
||||
return status;
|
||||
}
|
||||
@ -2587,28 +2579,13 @@ struct gpio_desc *__must_check gpiod_get_optional(struct device *dev,
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(gpiod_get_optional);
|
||||
|
||||
/**
|
||||
* gpiod_parse_flags - helper function to parse GPIO lookup flags
|
||||
* @desc: gpio to be setup
|
||||
* @lflags: gpio_lookup_flags - returned from of_find_gpio() or
|
||||
* of_get_gpio_hog()
|
||||
*
|
||||
* Set the GPIO descriptor flags based on the given GPIO lookup flags.
|
||||
*/
|
||||
static void gpiod_parse_flags(struct gpio_desc *desc, unsigned long lflags)
|
||||
{
|
||||
if (lflags & GPIO_ACTIVE_LOW)
|
||||
set_bit(FLAG_ACTIVE_LOW, &desc->flags);
|
||||
if (lflags & GPIO_OPEN_DRAIN)
|
||||
set_bit(FLAG_OPEN_DRAIN, &desc->flags);
|
||||
if (lflags & GPIO_OPEN_SOURCE)
|
||||
set_bit(FLAG_OPEN_SOURCE, &desc->flags);
|
||||
}
|
||||
|
||||
/**
|
||||
* gpiod_configure_flags - helper function to configure a given GPIO
|
||||
* @desc: gpio whose value will be assigned
|
||||
* @con_id: function within the GPIO consumer
|
||||
* @lflags: gpio_lookup_flags - returned from of_find_gpio() or
|
||||
* of_get_gpio_hog()
|
||||
* @dflags: gpiod_flags - optional GPIO initialization flags
|
||||
*
|
||||
* Return 0 on success, -ENOENT if no GPIO has been assigned to the
|
||||
@ -2616,10 +2593,17 @@ static void gpiod_parse_flags(struct gpio_desc *desc, unsigned long lflags)
|
||||
* occurred while trying to acquire the GPIO.
|
||||
*/
|
||||
static int gpiod_configure_flags(struct gpio_desc *desc, const char *con_id,
|
||||
enum gpiod_flags dflags)
|
||||
unsigned long lflags, enum gpiod_flags dflags)
|
||||
{
|
||||
int status;
|
||||
|
||||
if (lflags & GPIO_ACTIVE_LOW)
|
||||
set_bit(FLAG_ACTIVE_LOW, &desc->flags);
|
||||
if (lflags & GPIO_OPEN_DRAIN)
|
||||
set_bit(FLAG_OPEN_DRAIN, &desc->flags);
|
||||
if (lflags & GPIO_OPEN_SOURCE)
|
||||
set_bit(FLAG_OPEN_SOURCE, &desc->flags);
|
||||
|
||||
/* No particular flag request, return here... */
|
||||
if (!(dflags & GPIOD_FLAGS_BIT_DIR_SET)) {
|
||||
pr_debug("no flags found for %s\n", con_id);
|
||||
@ -2686,13 +2670,11 @@ struct gpio_desc *__must_check gpiod_get_index(struct device *dev,
|
||||
return desc;
|
||||
}
|
||||
|
||||
gpiod_parse_flags(desc, lookupflags);
|
||||
|
||||
status = gpiod_request(desc, con_id);
|
||||
if (status < 0)
|
||||
return ERR_PTR(status);
|
||||
|
||||
status = gpiod_configure_flags(desc, con_id, flags);
|
||||
status = gpiod_configure_flags(desc, con_id, lookupflags, flags);
|
||||
if (status < 0) {
|
||||
dev_dbg(dev, "setup of GPIO %s failed\n", con_id);
|
||||
gpiod_put(desc);
|
||||
@ -2748,6 +2730,10 @@ struct gpio_desc *fwnode_get_named_gpiod(struct fwnode_handle *fwnode,
|
||||
if (IS_ERR(desc))
|
||||
return desc;
|
||||
|
||||
ret = gpiod_request(desc, NULL);
|
||||
if (ret)
|
||||
return ERR_PTR(ret);
|
||||
|
||||
if (active_low)
|
||||
set_bit(FLAG_ACTIVE_LOW, &desc->flags);
|
||||
|
||||
@ -2758,10 +2744,6 @@ struct gpio_desc *fwnode_get_named_gpiod(struct fwnode_handle *fwnode,
|
||||
set_bit(FLAG_OPEN_SOURCE, &desc->flags);
|
||||
}
|
||||
|
||||
ret = gpiod_request(desc, NULL);
|
||||
if (ret)
|
||||
return ERR_PTR(ret);
|
||||
|
||||
return desc;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(fwnode_get_named_gpiod);
|
||||
@ -2814,8 +2796,6 @@ int gpiod_hog(struct gpio_desc *desc, const char *name,
|
||||
chip = gpiod_to_chip(desc);
|
||||
hwnum = gpio_chip_hwgpio(desc);
|
||||
|
||||
gpiod_parse_flags(desc, lflags);
|
||||
|
||||
local_desc = gpiochip_request_own_desc(chip, hwnum, name);
|
||||
if (IS_ERR(local_desc)) {
|
||||
status = PTR_ERR(local_desc);
|
||||
@ -2824,7 +2804,7 @@ int gpiod_hog(struct gpio_desc *desc, const char *name,
|
||||
return status;
|
||||
}
|
||||
|
||||
status = gpiod_configure_flags(desc, name, dflags);
|
||||
status = gpiod_configure_flags(desc, name, lflags, dflags);
|
||||
if (status < 0) {
|
||||
pr_err("setup of hog GPIO %s (chip %s, offset %d) failed, %d\n",
|
||||
name, chip->label, hwnum, status);
|
||||
|
@ -98,7 +98,6 @@
|
||||
#define PCIE_BUS_CLK 10000
|
||||
#define TCLK (PCIE_BUS_CLK / 10)
|
||||
|
||||
#define CEILING_UCHAR(double) ((double-(uint8_t)(double)) > 0 ? (uint8_t)(double+1) : (uint8_t)(double))
|
||||
|
||||
static const uint16_t polaris10_clock_stretcher_lookup_table[2][4] =
|
||||
{ {600, 1050, 3, 0}, {600, 1050, 6, 1} };
|
||||
@ -733,7 +732,7 @@ static int polaris10_populate_smc_mvdd_table(struct pp_hwmgr *hwmgr,
|
||||
table->Smio[level] |=
|
||||
data->mvdd_voltage_table.entries[level].smio_low;
|
||||
}
|
||||
table->SmioMask2 = data->vddci_voltage_table.mask_low;
|
||||
table->SmioMask2 = data->mvdd_voltage_table.mask_low;
|
||||
|
||||
table->MvddLevelCount = (uint32_t) PP_HOST_TO_SMC_UL(count);
|
||||
}
|
||||
@ -1807,27 +1806,25 @@ static int polaris10_populate_clock_stretcher_data_table(struct pp_hwmgr *hwmgr)
|
||||
|
||||
ro = efuse * (max -min)/255 + min;
|
||||
|
||||
/* Populate Sclk_CKS_masterEn0_7 and Sclk_voltageOffset
|
||||
* there is a little difference in calculating
|
||||
* volt_with_cks with windows */
|
||||
/* Populate Sclk_CKS_masterEn0_7 and Sclk_voltageOffset */
|
||||
for (i = 0; i < sclk_table->count; i++) {
|
||||
data->smc_state_table.Sclk_CKS_masterEn0_7 |=
|
||||
sclk_table->entries[i].cks_enable << i;
|
||||
if (hwmgr->chip_id == CHIP_POLARIS10) {
|
||||
volt_without_cks = (uint32_t)((2753594000 + (sclk_table->entries[i].clk/100) * 136418 -(ro - 70) * 1000000) / \
|
||||
volt_without_cks = (uint32_t)((2753594000U + (sclk_table->entries[i].clk/100) * 136418 -(ro - 70) * 1000000) / \
|
||||
(2424180 - (sclk_table->entries[i].clk/100) * 1132925/1000));
|
||||
volt_with_cks = (uint32_t)((279720200 + sclk_table->entries[i].clk * 3232 - (ro - 65) * 100000000) / \
|
||||
(252248000 - sclk_table->entries[i].clk/100 * 115764));
|
||||
volt_with_cks = (uint32_t)((2797202000U + sclk_table->entries[i].clk/100 * 3232 - (ro - 65) * 1000000) / \
|
||||
(2522480 - sclk_table->entries[i].clk/100 * 115764/100));
|
||||
} else {
|
||||
volt_without_cks = (uint32_t)((2416794800 + (sclk_table->entries[i].clk/100) * 1476925/10 -(ro - 50) * 1000000) / \
|
||||
(2625416 - (sclk_table->entries[i].clk/100) * 12586807/10000));
|
||||
volt_with_cks = (uint32_t)((2999656000 + sclk_table->entries[i].clk * 392803/100 - (ro - 44) * 1000000) / \
|
||||
(3422454 - sclk_table->entries[i].clk/100 * 18886376/10000));
|
||||
volt_without_cks = (uint32_t)((2416794800U + (sclk_table->entries[i].clk/100) * 1476925/10 -(ro - 50) * 1000000) / \
|
||||
(2625416 - (sclk_table->entries[i].clk/100) * (12586807/10000)));
|
||||
volt_with_cks = (uint32_t)((2999656000U - sclk_table->entries[i].clk/100 * 392803 - (ro - 44) * 1000000) / \
|
||||
(3422454 - sclk_table->entries[i].clk/100 * (18886376/10000)));
|
||||
}
|
||||
|
||||
if (volt_without_cks >= volt_with_cks)
|
||||
volt_offset = (uint8_t)CEILING_UCHAR((volt_without_cks - volt_with_cks +
|
||||
sclk_table->entries[i].cks_voffset) * 100 / 625);
|
||||
volt_offset = (uint8_t)(((volt_without_cks - volt_with_cks +
|
||||
sclk_table->entries[i].cks_voffset) * 100 + 624) / 625);
|
||||
|
||||
data->smc_state_table.Sclk_voltageOffset[i] = volt_offset;
|
||||
}
|
||||
@ -2685,7 +2682,7 @@ static int polaris10_get_evv_voltages(struct pp_hwmgr *hwmgr)
|
||||
{
|
||||
struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
|
||||
uint16_t vv_id;
|
||||
uint16_t vddc = 0;
|
||||
uint32_t vddc = 0;
|
||||
uint16_t i, j;
|
||||
uint32_t sclk = 0;
|
||||
struct phm_ppt_v1_information *table_info =
|
||||
@ -2716,8 +2713,9 @@ static int polaris10_get_evv_voltages(struct pp_hwmgr *hwmgr)
|
||||
continue);
|
||||
|
||||
|
||||
/* need to make sure vddc is less than 2v or else, it could burn the ASIC. */
|
||||
PP_ASSERT_WITH_CODE((vddc < 2000 && vddc != 0),
|
||||
/* need to make sure vddc is less than 2v or else, it could burn the ASIC.
|
||||
* real voltage level in unit of 0.01mv */
|
||||
PP_ASSERT_WITH_CODE((vddc < 200000 && vddc != 0),
|
||||
"Invalid VDDC value", result = -EINVAL;);
|
||||
|
||||
/* the voltage should not be zero nor equal to leakage ID */
|
||||
|
@ -1256,7 +1256,7 @@ int atomctrl_set_ac_timing_ai(struct pp_hwmgr *hwmgr, uint32_t memory_clock,
|
||||
}
|
||||
|
||||
int atomctrl_get_voltage_evv_on_sclk_ai(struct pp_hwmgr *hwmgr, uint8_t voltage_type,
|
||||
uint32_t sclk, uint16_t virtual_voltage_Id, uint16_t *voltage)
|
||||
uint32_t sclk, uint16_t virtual_voltage_Id, uint32_t *voltage)
|
||||
{
|
||||
|
||||
int result;
|
||||
@ -1274,7 +1274,7 @@ int atomctrl_get_voltage_evv_on_sclk_ai(struct pp_hwmgr *hwmgr, uint8_t voltage_
|
||||
if (0 != result)
|
||||
return result;
|
||||
|
||||
*voltage = get_voltage_info_param_space.usVoltageLevel;
|
||||
*voltage = ((GET_EVV_VOLTAGE_INFO_OUTPUT_PARAMETER_V1_3 *)(&get_voltage_info_param_space))->ulVoltageLevel;
|
||||
|
||||
return result;
|
||||
}
|
||||
|
@ -305,7 +305,7 @@ extern int atomctrl_get_engine_pll_dividers_ai(struct pp_hwmgr *hwmgr, uint32_t
|
||||
extern int atomctrl_set_ac_timing_ai(struct pp_hwmgr *hwmgr, uint32_t memory_clock,
|
||||
uint8_t level);
|
||||
extern int atomctrl_get_voltage_evv_on_sclk_ai(struct pp_hwmgr *hwmgr, uint8_t voltage_type,
|
||||
uint32_t sclk, uint16_t virtual_voltage_Id, uint16_t *voltage);
|
||||
uint32_t sclk, uint16_t virtual_voltage_Id, uint32_t *voltage);
|
||||
extern int atomctrl_get_smc_sclk_range_table(struct pp_hwmgr *hwmgr, struct pp_atom_ctrl_sclk_range_table *table);
|
||||
|
||||
extern int atomctrl_get_avfs_information(struct pp_hwmgr *hwmgr, struct pp_atom_ctrl__avfs_parameters *param);
|
||||
|
@ -1302,7 +1302,7 @@ static int tonga_populate_smc_mvdd_table(struct pp_hwmgr *hwmgr,
|
||||
table->Smio[count] |=
|
||||
data->mvdd_voltage_table.entries[count].smio_low;
|
||||
}
|
||||
table->SmioMask2 = data->vddci_voltage_table.mask_low;
|
||||
table->SmioMask2 = data->mvdd_voltage_table.mask_low;
|
||||
|
||||
CONVERT_FROM_HOST_TO_SMC_UL(table->MvddLevelCount);
|
||||
}
|
||||
|
@ -302,7 +302,7 @@ static int init_dpm_2_parameters(
|
||||
(((unsigned long)powerplay_table) + le16_to_cpu(powerplay_table->usPPMTableOffset));
|
||||
|
||||
if (0 != powerplay_table->usPPMTableOffset) {
|
||||
if (1 == get_platform_power_management_table(hwmgr, atom_ppm_table)) {
|
||||
if (get_platform_power_management_table(hwmgr, atom_ppm_table) == 0) {
|
||||
phm_cap_set(hwmgr->platform_descriptor.platformCaps,
|
||||
PHM_PlatformCaps_EnablePlatformPowerManagement);
|
||||
}
|
||||
|
@ -40,7 +40,8 @@ static int
|
||||
gf119_sor_dp_pattern(struct nvkm_output_dp *outp, int pattern)
|
||||
{
|
||||
struct nvkm_device *device = outp->base.disp->engine.subdev.device;
|
||||
nvkm_mask(device, 0x61c110, 0x0f0f0f0f, 0x01010101 * pattern);
|
||||
const u32 soff = gf119_sor_soff(outp);
|
||||
nvkm_mask(device, 0x61c110 + soff, 0x0f0f0f0f, 0x01010101 * pattern);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -65,6 +65,14 @@ static void sun4i_crtc_disable(struct drm_crtc *crtc)
|
||||
DRM_DEBUG_DRIVER("Disabling the CRTC\n");
|
||||
|
||||
sun4i_tcon_disable(drv->tcon);
|
||||
|
||||
if (crtc->state->event && !crtc->state->active) {
|
||||
spin_lock_irq(&crtc->dev->event_lock);
|
||||
drm_crtc_send_vblank_event(crtc, crtc->state->event);
|
||||
spin_unlock_irq(&crtc->dev->event_lock);
|
||||
|
||||
crtc->state->event = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
static void sun4i_crtc_enable(struct drm_crtc *crtc)
|
||||
|
@ -92,7 +92,7 @@ static struct drm_driver sun4i_drv_driver = {
|
||||
/* Frame Buffer Operations */
|
||||
|
||||
/* VBlank Operations */
|
||||
.get_vblank_counter = drm_vblank_count,
|
||||
.get_vblank_counter = drm_vblank_no_hw_counter,
|
||||
.enable_vblank = sun4i_drv_enable_vblank,
|
||||
.disable_vblank = sun4i_drv_disable_vblank,
|
||||
};
|
||||
@ -310,6 +310,7 @@ static int sun4i_drv_probe(struct platform_device *pdev)
|
||||
|
||||
count += sun4i_drv_add_endpoints(&pdev->dev, &match,
|
||||
pipeline);
|
||||
of_node_put(pipeline);
|
||||
|
||||
DRM_DEBUG_DRIVER("Queued %d outputs on pipeline %d\n",
|
||||
count, i);
|
||||
|
@ -1568,13 +1568,23 @@ static int __init amd_iommu_init_pci(void)
|
||||
break;
|
||||
}
|
||||
|
||||
/*
|
||||
* Order is important here to make sure any unity map requirements are
|
||||
* fulfilled. The unity mappings are created and written to the device
|
||||
* table during the amd_iommu_init_api() call.
|
||||
*
|
||||
* After that we call init_device_table_dma() to make sure any
|
||||
* uninitialized DTE will block DMA, and in the end we flush the caches
|
||||
* of all IOMMUs to make sure the changes to the device table are
|
||||
* active.
|
||||
*/
|
||||
ret = amd_iommu_init_api();
|
||||
|
||||
init_device_table_dma();
|
||||
|
||||
for_each_iommu(iommu)
|
||||
iommu_flush_all_caches(iommu);
|
||||
|
||||
ret = amd_iommu_init_api();
|
||||
|
||||
if (!ret)
|
||||
print_iommu_info();
|
||||
|
||||
|
@ -4602,13 +4602,13 @@ static void free_all_cpu_cached_iovas(unsigned int cpu)
|
||||
for (i = 0; i < g_num_of_iommus; i++) {
|
||||
struct intel_iommu *iommu = g_iommus[i];
|
||||
struct dmar_domain *domain;
|
||||
u16 did;
|
||||
int did;
|
||||
|
||||
if (!iommu)
|
||||
continue;
|
||||
|
||||
for (did = 0; did < cap_ndoms(iommu->cap); did++) {
|
||||
domain = get_iommu_domain(iommu, did);
|
||||
domain = get_iommu_domain(iommu, (u16)did);
|
||||
|
||||
if (!domain)
|
||||
continue;
|
||||
|
@ -718,7 +718,7 @@ static int gic_shared_irq_domain_map(struct irq_domain *d, unsigned int virq,
|
||||
|
||||
spin_lock_irqsave(&gic_lock, flags);
|
||||
gic_map_to_pin(intr, gic_cpu_pin);
|
||||
gic_map_to_vpe(intr, vpe);
|
||||
gic_map_to_vpe(intr, mips_cm_vp_id(vpe));
|
||||
for (i = 0; i < min(gic_vpes, NR_CPUS); i++)
|
||||
clear_bit(intr, pcpu_masks[i].pcpu_mask);
|
||||
set_bit(intr, pcpu_masks[vpe].pcpu_mask);
|
||||
@ -959,7 +959,7 @@ int gic_ipi_domain_match(struct irq_domain *d, struct device_node *node,
|
||||
switch (bus_token) {
|
||||
case DOMAIN_BUS_IPI:
|
||||
is_ipi = d->bus_token == bus_token;
|
||||
return to_of_node(d->fwnode) == node && is_ipi;
|
||||
return (!node || to_of_node(d->fwnode) == node) && is_ipi;
|
||||
break;
|
||||
default:
|
||||
return 0;
|
||||
|
@ -101,11 +101,14 @@ enum ad_link_speed_type {
|
||||
#define MAC_ADDRESS_EQUAL(A, B) \
|
||||
ether_addr_equal_64bits((const u8 *)A, (const u8 *)B)
|
||||
|
||||
static struct mac_addr null_mac_addr = { { 0, 0, 0, 0, 0, 0 } };
|
||||
static const u8 null_mac_addr[ETH_ALEN + 2] __long_aligned = {
|
||||
0, 0, 0, 0, 0, 0
|
||||
};
|
||||
static u16 ad_ticks_per_sec;
|
||||
static const int ad_delta_in_ticks = (AD_TIMER_INTERVAL * HZ) / 1000;
|
||||
|
||||
static const u8 lacpdu_mcast_addr[ETH_ALEN] = MULTICAST_LACPDU_ADDR;
|
||||
static const u8 lacpdu_mcast_addr[ETH_ALEN + 2] __long_aligned =
|
||||
MULTICAST_LACPDU_ADDR;
|
||||
|
||||
/* ================= main 802.3ad protocol functions ================== */
|
||||
static int ad_lacpdu_send(struct port *port);
|
||||
@ -1739,7 +1742,7 @@ static void ad_clear_agg(struct aggregator *aggregator)
|
||||
aggregator->is_individual = false;
|
||||
aggregator->actor_admin_aggregator_key = 0;
|
||||
aggregator->actor_oper_aggregator_key = 0;
|
||||
aggregator->partner_system = null_mac_addr;
|
||||
eth_zero_addr(aggregator->partner_system.mac_addr_value);
|
||||
aggregator->partner_system_priority = 0;
|
||||
aggregator->partner_oper_aggregator_key = 0;
|
||||
aggregator->receive_state = 0;
|
||||
@ -1761,7 +1764,7 @@ static void ad_initialize_agg(struct aggregator *aggregator)
|
||||
if (aggregator) {
|
||||
ad_clear_agg(aggregator);
|
||||
|
||||
aggregator->aggregator_mac_address = null_mac_addr;
|
||||
eth_zero_addr(aggregator->aggregator_mac_address.mac_addr_value);
|
||||
aggregator->aggregator_identifier = 0;
|
||||
aggregator->slave = NULL;
|
||||
}
|
||||
|
@ -42,13 +42,10 @@
|
||||
|
||||
|
||||
|
||||
#ifndef __long_aligned
|
||||
#define __long_aligned __attribute__((aligned((sizeof(long)))))
|
||||
#endif
|
||||
static const u8 mac_bcast[ETH_ALEN] __long_aligned = {
|
||||
static const u8 mac_bcast[ETH_ALEN + 2] __long_aligned = {
|
||||
0xff, 0xff, 0xff, 0xff, 0xff, 0xff
|
||||
};
|
||||
static const u8 mac_v6_allmcast[ETH_ALEN] __long_aligned = {
|
||||
static const u8 mac_v6_allmcast[ETH_ALEN + 2] __long_aligned = {
|
||||
0x33, 0x33, 0x00, 0x00, 0x00, 0x01
|
||||
};
|
||||
static const int alb_delta_in_ticks = HZ / ALB_TIMER_TICKS_PER_SEC;
|
||||
|
@ -1584,6 +1584,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
|
||||
}
|
||||
|
||||
/* check for initial state */
|
||||
new_slave->link = BOND_LINK_NOCHANGE;
|
||||
if (bond->params.miimon) {
|
||||
if (bond_check_dev_link(bond, slave_dev, 0) == BMSR_LSTATUS) {
|
||||
if (bond->params.updelay) {
|
||||
|
@ -392,7 +392,7 @@ static void bcm_sysport_get_stats(struct net_device *dev,
|
||||
else
|
||||
p = (char *)priv;
|
||||
p += s->stat_offset;
|
||||
data[i] = *(u32 *)p;
|
||||
data[i] = *(unsigned long *)p;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -36,8 +36,8 @@
|
||||
#define __T4FW_VERSION_H__
|
||||
|
||||
#define T4FW_VERSION_MAJOR 0x01
|
||||
#define T4FW_VERSION_MINOR 0x0E
|
||||
#define T4FW_VERSION_MICRO 0x04
|
||||
#define T4FW_VERSION_MINOR 0x0F
|
||||
#define T4FW_VERSION_MICRO 0x25
|
||||
#define T4FW_VERSION_BUILD 0x00
|
||||
|
||||
#define T4FW_MIN_VERSION_MAJOR 0x01
|
||||
@ -45,8 +45,8 @@
|
||||
#define T4FW_MIN_VERSION_MICRO 0x00
|
||||
|
||||
#define T5FW_VERSION_MAJOR 0x01
|
||||
#define T5FW_VERSION_MINOR 0x0E
|
||||
#define T5FW_VERSION_MICRO 0x04
|
||||
#define T5FW_VERSION_MINOR 0x0F
|
||||
#define T5FW_VERSION_MICRO 0x25
|
||||
#define T5FW_VERSION_BUILD 0x00
|
||||
|
||||
#define T5FW_MIN_VERSION_MAJOR 0x00
|
||||
@ -54,8 +54,8 @@
|
||||
#define T5FW_MIN_VERSION_MICRO 0x00
|
||||
|
||||
#define T6FW_VERSION_MAJOR 0x01
|
||||
#define T6FW_VERSION_MINOR 0x0E
|
||||
#define T6FW_VERSION_MICRO 0x04
|
||||
#define T6FW_VERSION_MINOR 0x0F
|
||||
#define T6FW_VERSION_MICRO 0x25
|
||||
#define T6FW_VERSION_BUILD 0x00
|
||||
|
||||
#define T6FW_MIN_VERSION_MAJOR 0x00
|
||||
|
@ -154,16 +154,6 @@ void __ew32(struct e1000_hw *hw, unsigned long reg, u32 val)
|
||||
writel(val, hw->hw_addr + reg);
|
||||
}
|
||||
|
||||
static bool e1000e_vlan_used(struct e1000_adapter *adapter)
|
||||
{
|
||||
u16 vid;
|
||||
|
||||
for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
|
||||
return true;
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
/**
|
||||
* e1000_regdump - register printout routine
|
||||
* @hw: pointer to the HW structure
|
||||
@ -3453,8 +3443,7 @@ static void e1000e_set_rx_mode(struct net_device *netdev)
|
||||
|
||||
ew32(RCTL, rctl);
|
||||
|
||||
if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX ||
|
||||
e1000e_vlan_used(adapter))
|
||||
if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX)
|
||||
e1000e_vlan_strip_enable(adapter);
|
||||
else
|
||||
e1000e_vlan_strip_disable(adapter);
|
||||
@ -6926,6 +6915,14 @@ static netdev_features_t e1000_fix_features(struct net_device *netdev,
|
||||
if ((hw->mac.type >= e1000_pch2lan) && (netdev->mtu > ETH_DATA_LEN))
|
||||
features &= ~NETIF_F_RXFCS;
|
||||
|
||||
/* Since there is no support for separate Rx/Tx vlan accel
|
||||
* enable/disable make sure Tx flag is always in same state as Rx.
|
||||
*/
|
||||
if (features & NETIF_F_HW_VLAN_CTAG_RX)
|
||||
features |= NETIF_F_HW_VLAN_CTAG_TX;
|
||||
else
|
||||
features &= ~NETIF_F_HW_VLAN_CTAG_TX;
|
||||
|
||||
return features;
|
||||
}
|
||||
|
||||
|
@ -85,7 +85,7 @@ static s32 ixgbevf_poll_for_ack(struct ixgbe_hw *hw)
|
||||
static s32 ixgbevf_read_posted_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size)
|
||||
{
|
||||
struct ixgbe_mbx_info *mbx = &hw->mbx;
|
||||
s32 ret_val = -IXGBE_ERR_MBX;
|
||||
s32 ret_val = IXGBE_ERR_MBX;
|
||||
|
||||
if (!mbx->ops.read)
|
||||
goto out;
|
||||
@ -111,7 +111,7 @@ out:
|
||||
static s32 ixgbevf_write_posted_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size)
|
||||
{
|
||||
struct ixgbe_mbx_info *mbx = &hw->mbx;
|
||||
s32 ret_val = -IXGBE_ERR_MBX;
|
||||
s32 ret_val = IXGBE_ERR_MBX;
|
||||
|
||||
/* exit if either we can't write or there isn't a defined timeout */
|
||||
if (!mbx->ops.write || !mbx->timeout)
|
||||
|
@ -3458,6 +3458,8 @@ static int mvneta_open(struct net_device *dev)
|
||||
return 0;
|
||||
|
||||
err_free_irq:
|
||||
unregister_cpu_notifier(&pp->cpu_notifier);
|
||||
on_each_cpu(mvneta_percpu_disable, pp, true);
|
||||
free_percpu_irq(pp->dev->irq, pp->ports);
|
||||
err_cleanup_txqs:
|
||||
mvneta_cleanup_txqs(pp);
|
||||
|
@ -295,6 +295,12 @@ static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op,
|
||||
case MLX5_CMD_OP_DESTROY_FLOW_GROUP:
|
||||
case MLX5_CMD_OP_DELETE_FLOW_TABLE_ENTRY:
|
||||
case MLX5_CMD_OP_DEALLOC_FLOW_COUNTER:
|
||||
case MLX5_CMD_OP_2ERR_QP:
|
||||
case MLX5_CMD_OP_2RST_QP:
|
||||
case MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT:
|
||||
case MLX5_CMD_OP_MODIFY_FLOW_TABLE:
|
||||
case MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY:
|
||||
case MLX5_CMD_OP_SET_FLOW_TABLE_ROOT:
|
||||
return MLX5_CMD_STAT_OK;
|
||||
|
||||
case MLX5_CMD_OP_QUERY_HCA_CAP:
|
||||
@ -321,8 +327,6 @@ static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op,
|
||||
case MLX5_CMD_OP_RTR2RTS_QP:
|
||||
case MLX5_CMD_OP_RTS2RTS_QP:
|
||||
case MLX5_CMD_OP_SQERR2RTS_QP:
|
||||
case MLX5_CMD_OP_2ERR_QP:
|
||||
case MLX5_CMD_OP_2RST_QP:
|
||||
case MLX5_CMD_OP_QUERY_QP:
|
||||
case MLX5_CMD_OP_SQD_RTS_QP:
|
||||
case MLX5_CMD_OP_INIT2INIT_QP:
|
||||
@ -342,7 +346,6 @@ static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op,
|
||||
case MLX5_CMD_OP_QUERY_ESW_VPORT_CONTEXT:
|
||||
case MLX5_CMD_OP_MODIFY_ESW_VPORT_CONTEXT:
|
||||
case MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT:
|
||||
case MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT:
|
||||
case MLX5_CMD_OP_QUERY_ROCE_ADDRESS:
|
||||
case MLX5_CMD_OP_SET_ROCE_ADDRESS:
|
||||
case MLX5_CMD_OP_QUERY_HCA_VPORT_CONTEXT:
|
||||
@ -390,11 +393,12 @@ static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op,
|
||||
case MLX5_CMD_OP_CREATE_RQT:
|
||||
case MLX5_CMD_OP_MODIFY_RQT:
|
||||
case MLX5_CMD_OP_QUERY_RQT:
|
||||
|
||||
case MLX5_CMD_OP_CREATE_FLOW_TABLE:
|
||||
case MLX5_CMD_OP_QUERY_FLOW_TABLE:
|
||||
case MLX5_CMD_OP_CREATE_FLOW_GROUP:
|
||||
case MLX5_CMD_OP_QUERY_FLOW_GROUP:
|
||||
case MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY:
|
||||
|
||||
case MLX5_CMD_OP_QUERY_FLOW_TABLE_ENTRY:
|
||||
case MLX5_CMD_OP_ALLOC_FLOW_COUNTER:
|
||||
case MLX5_CMD_OP_QUERY_FLOW_COUNTER:
|
||||
@ -602,11 +606,36 @@ static void dump_command(struct mlx5_core_dev *dev,
|
||||
pr_debug("\n");
|
||||
}
|
||||
|
||||
static u16 msg_to_opcode(struct mlx5_cmd_msg *in)
|
||||
{
|
||||
struct mlx5_inbox_hdr *hdr = (struct mlx5_inbox_hdr *)(in->first.data);
|
||||
|
||||
return be16_to_cpu(hdr->opcode);
|
||||
}
|
||||
|
||||
static void cb_timeout_handler(struct work_struct *work)
|
||||
{
|
||||
struct delayed_work *dwork = container_of(work, struct delayed_work,
|
||||
work);
|
||||
struct mlx5_cmd_work_ent *ent = container_of(dwork,
|
||||
struct mlx5_cmd_work_ent,
|
||||
cb_timeout_work);
|
||||
struct mlx5_core_dev *dev = container_of(ent->cmd, struct mlx5_core_dev,
|
||||
cmd);
|
||||
|
||||
ent->ret = -ETIMEDOUT;
|
||||
mlx5_core_warn(dev, "%s(0x%x) timeout. Will cause a leak of a command resource\n",
|
||||
mlx5_command_str(msg_to_opcode(ent->in)),
|
||||
msg_to_opcode(ent->in));
|
||||
mlx5_cmd_comp_handler(dev, 1UL << ent->idx);
|
||||
}
|
||||
|
||||
static void cmd_work_handler(struct work_struct *work)
|
||||
{
|
||||
struct mlx5_cmd_work_ent *ent = container_of(work, struct mlx5_cmd_work_ent, work);
|
||||
struct mlx5_cmd *cmd = ent->cmd;
|
||||
struct mlx5_core_dev *dev = container_of(cmd, struct mlx5_core_dev, cmd);
|
||||
unsigned long cb_timeout = msecs_to_jiffies(MLX5_CMD_TIMEOUT_MSEC);
|
||||
struct mlx5_cmd_layout *lay;
|
||||
struct semaphore *sem;
|
||||
unsigned long flags;
|
||||
@ -647,6 +676,9 @@ static void cmd_work_handler(struct work_struct *work)
|
||||
dump_command(dev, ent, 1);
|
||||
ent->ts1 = ktime_get_ns();
|
||||
|
||||
if (ent->callback)
|
||||
schedule_delayed_work(&ent->cb_timeout_work, cb_timeout);
|
||||
|
||||
/* ring doorbell after the descriptor is valid */
|
||||
mlx5_core_dbg(dev, "writing 0x%x to command doorbell\n", 1 << ent->idx);
|
||||
wmb();
|
||||
@ -691,13 +723,6 @@ static const char *deliv_status_to_str(u8 status)
|
||||
}
|
||||
}
|
||||
|
||||
static u16 msg_to_opcode(struct mlx5_cmd_msg *in)
|
||||
{
|
||||
struct mlx5_inbox_hdr *hdr = (struct mlx5_inbox_hdr *)(in->first.data);
|
||||
|
||||
return be16_to_cpu(hdr->opcode);
|
||||
}
|
||||
|
||||
static int wait_func(struct mlx5_core_dev *dev, struct mlx5_cmd_work_ent *ent)
|
||||
{
|
||||
unsigned long timeout = msecs_to_jiffies(MLX5_CMD_TIMEOUT_MSEC);
|
||||
@ -706,13 +731,13 @@ static int wait_func(struct mlx5_core_dev *dev, struct mlx5_cmd_work_ent *ent)
|
||||
|
||||
if (cmd->mode == CMD_MODE_POLLING) {
|
||||
wait_for_completion(&ent->done);
|
||||
err = ent->ret;
|
||||
} else {
|
||||
if (!wait_for_completion_timeout(&ent->done, timeout))
|
||||
err = -ETIMEDOUT;
|
||||
else
|
||||
err = 0;
|
||||
} else if (!wait_for_completion_timeout(&ent->done, timeout)) {
|
||||
ent->ret = -ETIMEDOUT;
|
||||
mlx5_cmd_comp_handler(dev, 1UL << ent->idx);
|
||||
}
|
||||
|
||||
err = ent->ret;
|
||||
|
||||
if (err == -ETIMEDOUT) {
|
||||
mlx5_core_warn(dev, "%s(0x%x) timeout. Will cause a leak of a command resource\n",
|
||||
mlx5_command_str(msg_to_opcode(ent->in)),
|
||||
@ -761,6 +786,7 @@ static int mlx5_cmd_invoke(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *in,
|
||||
if (!callback)
|
||||
init_completion(&ent->done);
|
||||
|
||||
INIT_DELAYED_WORK(&ent->cb_timeout_work, cb_timeout_handler);
|
||||
INIT_WORK(&ent->work, cmd_work_handler);
|
||||
if (page_queue) {
|
||||
cmd_work_handler(&ent->work);
|
||||
@ -770,28 +796,26 @@ static int mlx5_cmd_invoke(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *in,
|
||||
goto out_free;
|
||||
}
|
||||
|
||||
if (!callback) {
|
||||
err = wait_func(dev, ent);
|
||||
if (err == -ETIMEDOUT)
|
||||
goto out;
|
||||
if (callback)
|
||||
goto out;
|
||||
|
||||
ds = ent->ts2 - ent->ts1;
|
||||
op = be16_to_cpu(((struct mlx5_inbox_hdr *)in->first.data)->opcode);
|
||||
if (op < ARRAY_SIZE(cmd->stats)) {
|
||||
stats = &cmd->stats[op];
|
||||
spin_lock_irq(&stats->lock);
|
||||
stats->sum += ds;
|
||||
++stats->n;
|
||||
spin_unlock_irq(&stats->lock);
|
||||
}
|
||||
mlx5_core_dbg_mask(dev, 1 << MLX5_CMD_TIME,
|
||||
"fw exec time for %s is %lld nsec\n",
|
||||
mlx5_command_str(op), ds);
|
||||
*status = ent->status;
|
||||
free_cmd(ent);
|
||||
err = wait_func(dev, ent);
|
||||
if (err == -ETIMEDOUT)
|
||||
goto out_free;
|
||||
|
||||
ds = ent->ts2 - ent->ts1;
|
||||
op = be16_to_cpu(((struct mlx5_inbox_hdr *)in->first.data)->opcode);
|
||||
if (op < ARRAY_SIZE(cmd->stats)) {
|
||||
stats = &cmd->stats[op];
|
||||
spin_lock_irq(&stats->lock);
|
||||
stats->sum += ds;
|
||||
++stats->n;
|
||||
spin_unlock_irq(&stats->lock);
|
||||
}
|
||||
|
||||
return err;
|
||||
mlx5_core_dbg_mask(dev, 1 << MLX5_CMD_TIME,
|
||||
"fw exec time for %s is %lld nsec\n",
|
||||
mlx5_command_str(op), ds);
|
||||
*status = ent->status;
|
||||
|
||||
out_free:
|
||||
free_cmd(ent);
|
||||
@ -1181,41 +1205,30 @@ err_dbg:
|
||||
return err;
|
||||
}
|
||||
|
||||
void mlx5_cmd_use_events(struct mlx5_core_dev *dev)
|
||||
static void mlx5_cmd_change_mod(struct mlx5_core_dev *dev, int mode)
|
||||
{
|
||||
struct mlx5_cmd *cmd = &dev->cmd;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < cmd->max_reg_cmds; i++)
|
||||
down(&cmd->sem);
|
||||
|
||||
down(&cmd->pages_sem);
|
||||
|
||||
flush_workqueue(cmd->wq);
|
||||
|
||||
cmd->mode = CMD_MODE_EVENTS;
|
||||
cmd->mode = mode;
|
||||
|
||||
up(&cmd->pages_sem);
|
||||
for (i = 0; i < cmd->max_reg_cmds; i++)
|
||||
up(&cmd->sem);
|
||||
}
|
||||
|
||||
void mlx5_cmd_use_events(struct mlx5_core_dev *dev)
|
||||
{
|
||||
mlx5_cmd_change_mod(dev, CMD_MODE_EVENTS);
|
||||
}
|
||||
|
||||
void mlx5_cmd_use_polling(struct mlx5_core_dev *dev)
|
||||
{
|
||||
struct mlx5_cmd *cmd = &dev->cmd;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < cmd->max_reg_cmds; i++)
|
||||
down(&cmd->sem);
|
||||
|
||||
down(&cmd->pages_sem);
|
||||
|
||||
flush_workqueue(cmd->wq);
|
||||
cmd->mode = CMD_MODE_POLLING;
|
||||
|
||||
up(&cmd->pages_sem);
|
||||
for (i = 0; i < cmd->max_reg_cmds; i++)
|
||||
up(&cmd->sem);
|
||||
mlx5_cmd_change_mod(dev, CMD_MODE_POLLING);
|
||||
}
|
||||
|
||||
static void free_msg(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *msg)
|
||||
@ -1251,6 +1264,8 @@ void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u64 vec)
|
||||
struct semaphore *sem;
|
||||
|
||||
ent = cmd->ent_arr[i];
|
||||
if (ent->callback)
|
||||
cancel_delayed_work(&ent->cb_timeout_work);
|
||||
if (ent->page_queue)
|
||||
sem = &cmd->pages_sem;
|
||||
else
|
||||
|
@ -145,7 +145,6 @@ struct mlx5e_umr_wqe {
|
||||
|
||||
#ifdef CONFIG_MLX5_CORE_EN_DCB
|
||||
#define MLX5E_MAX_BW_ALLOC 100 /* Max percentage of BW allocation */
|
||||
#define MLX5E_MIN_BW_ALLOC 1 /* Min percentage of BW allocation */
|
||||
#endif
|
||||
|
||||
struct mlx5e_params {
|
||||
@ -191,6 +190,7 @@ struct mlx5e_tstamp {
|
||||
enum {
|
||||
MLX5E_RQ_STATE_POST_WQES_ENABLE,
|
||||
MLX5E_RQ_STATE_UMR_WQE_IN_PROGRESS,
|
||||
MLX5E_RQ_STATE_FLUSH_TIMEOUT,
|
||||
};
|
||||
|
||||
struct mlx5e_cq {
|
||||
@ -220,6 +220,8 @@ typedef void (*mlx5e_fp_handle_rx_cqe)(struct mlx5e_rq *rq,
|
||||
typedef int (*mlx5e_fp_alloc_wqe)(struct mlx5e_rq *rq, struct mlx5e_rx_wqe *wqe,
|
||||
u16 ix);
|
||||
|
||||
typedef void (*mlx5e_fp_dealloc_wqe)(struct mlx5e_rq *rq, u16 ix);
|
||||
|
||||
struct mlx5e_dma_info {
|
||||
struct page *page;
|
||||
dma_addr_t addr;
|
||||
@ -241,6 +243,7 @@ struct mlx5e_rq {
|
||||
struct mlx5e_cq cq;
|
||||
mlx5e_fp_handle_rx_cqe handle_rx_cqe;
|
||||
mlx5e_fp_alloc_wqe alloc_wqe;
|
||||
mlx5e_fp_dealloc_wqe dealloc_wqe;
|
||||
|
||||
unsigned long state;
|
||||
int ix;
|
||||
@ -305,6 +308,7 @@ struct mlx5e_sq_dma {
|
||||
enum {
|
||||
MLX5E_SQ_STATE_WAKE_TXQ_ENABLE,
|
||||
MLX5E_SQ_STATE_BF_ENABLE,
|
||||
MLX5E_SQ_STATE_TX_TIMEOUT,
|
||||
};
|
||||
|
||||
struct mlx5e_ico_wqe_info {
|
||||
@ -538,6 +542,7 @@ struct mlx5e_priv {
|
||||
struct workqueue_struct *wq;
|
||||
struct work_struct update_carrier_work;
|
||||
struct work_struct set_rx_mode_work;
|
||||
struct work_struct tx_timeout_work;
|
||||
struct delayed_work update_stats_work;
|
||||
|
||||
struct mlx5_core_dev *mdev;
|
||||
@ -589,12 +594,16 @@ void mlx5e_cq_error_event(struct mlx5_core_cq *mcq, enum mlx5_event event);
|
||||
int mlx5e_napi_poll(struct napi_struct *napi, int budget);
|
||||
bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget);
|
||||
int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget);
|
||||
void mlx5e_free_tx_descs(struct mlx5e_sq *sq);
|
||||
void mlx5e_free_rx_descs(struct mlx5e_rq *rq);
|
||||
|
||||
void mlx5e_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe);
|
||||
void mlx5e_handle_rx_cqe_mpwrq(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe);
|
||||
bool mlx5e_post_rx_wqes(struct mlx5e_rq *rq);
|
||||
int mlx5e_alloc_rx_wqe(struct mlx5e_rq *rq, struct mlx5e_rx_wqe *wqe, u16 ix);
|
||||
int mlx5e_alloc_rx_mpwqe(struct mlx5e_rq *rq, struct mlx5e_rx_wqe *wqe, u16 ix);
|
||||
void mlx5e_dealloc_rx_wqe(struct mlx5e_rq *rq, u16 ix);
|
||||
void mlx5e_dealloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix);
|
||||
void mlx5e_post_rx_fragmented_mpwqe(struct mlx5e_rq *rq);
|
||||
void mlx5e_complete_rx_linear_mpwqe(struct mlx5e_rq *rq,
|
||||
struct mlx5_cqe64 *cqe,
|
||||
|
@ -96,7 +96,7 @@ static void mlx5e_build_tc_tx_bw(struct ieee_ets *ets, u8 *tc_tx_bw,
|
||||
tc_tx_bw[i] = MLX5E_MAX_BW_ALLOC;
|
||||
break;
|
||||
case IEEE_8021QAZ_TSA_ETS:
|
||||
tc_tx_bw[i] = ets->tc_tx_bw[i] ?: MLX5E_MIN_BW_ALLOC;
|
||||
tc_tx_bw[i] = ets->tc_tx_bw[i];
|
||||
break;
|
||||
}
|
||||
}
|
||||
@ -140,8 +140,12 @@ static int mlx5e_dbcnl_validate_ets(struct ieee_ets *ets)
|
||||
|
||||
/* Validate Bandwidth Sum */
|
||||
for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
|
||||
if (ets->tc_tsa[i] == IEEE_8021QAZ_TSA_ETS)
|
||||
if (ets->tc_tsa[i] == IEEE_8021QAZ_TSA_ETS) {
|
||||
if (!ets->tc_tx_bw[i])
|
||||
return -EINVAL;
|
||||
|
||||
bw_sum += ets->tc_tx_bw[i];
|
||||
}
|
||||
}
|
||||
|
||||
if (bw_sum != 0 && bw_sum != 100)
|
||||
|
@ -39,6 +39,13 @@
|
||||
#include "eswitch.h"
|
||||
#include "vxlan.h"
|
||||
|
||||
enum {
|
||||
MLX5_EN_QP_FLUSH_TIMEOUT_MS = 5000,
|
||||
MLX5_EN_QP_FLUSH_MSLEEP_QUANT = 20,
|
||||
MLX5_EN_QP_FLUSH_MAX_ITER = MLX5_EN_QP_FLUSH_TIMEOUT_MS /
|
||||
MLX5_EN_QP_FLUSH_MSLEEP_QUANT,
|
||||
};
|
||||
|
||||
struct mlx5e_rq_param {
|
||||
u32 rqc[MLX5_ST_SZ_DW(rqc)];
|
||||
struct mlx5_wq_param wq;
|
||||
@ -74,10 +81,13 @@ static void mlx5e_update_carrier(struct mlx5e_priv *priv)
|
||||
port_state = mlx5_query_vport_state(mdev,
|
||||
MLX5_QUERY_VPORT_STATE_IN_OP_MOD_VNIC_VPORT, 0);
|
||||
|
||||
if (port_state == VPORT_STATE_UP)
|
||||
if (port_state == VPORT_STATE_UP) {
|
||||
netdev_info(priv->netdev, "Link up\n");
|
||||
netif_carrier_on(priv->netdev);
|
||||
else
|
||||
} else {
|
||||
netdev_info(priv->netdev, "Link down\n");
|
||||
netif_carrier_off(priv->netdev);
|
||||
}
|
||||
}
|
||||
|
||||
static void mlx5e_update_carrier_work(struct work_struct *work)
|
||||
@ -91,6 +101,26 @@ static void mlx5e_update_carrier_work(struct work_struct *work)
|
||||
mutex_unlock(&priv->state_lock);
|
||||
}
|
||||
|
||||
static void mlx5e_tx_timeout_work(struct work_struct *work)
|
||||
{
|
||||
struct mlx5e_priv *priv = container_of(work, struct mlx5e_priv,
|
||||
tx_timeout_work);
|
||||
int err;
|
||||
|
||||
rtnl_lock();
|
||||
mutex_lock(&priv->state_lock);
|
||||
if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
|
||||
goto unlock;
|
||||
mlx5e_close_locked(priv->netdev);
|
||||
err = mlx5e_open_locked(priv->netdev);
|
||||
if (err)
|
||||
netdev_err(priv->netdev, "mlx5e_open_locked failed recovering from a tx_timeout, err(%d).\n",
|
||||
err);
|
||||
unlock:
|
||||
mutex_unlock(&priv->state_lock);
|
||||
rtnl_unlock();
|
||||
}
|
||||
|
||||
static void mlx5e_update_sw_counters(struct mlx5e_priv *priv)
|
||||
{
|
||||
struct mlx5e_sw_stats *s = &priv->stats.sw;
|
||||
@ -305,6 +335,7 @@ static int mlx5e_create_rq(struct mlx5e_channel *c,
|
||||
}
|
||||
rq->handle_rx_cqe = mlx5e_handle_rx_cqe_mpwrq;
|
||||
rq->alloc_wqe = mlx5e_alloc_rx_mpwqe;
|
||||
rq->dealloc_wqe = mlx5e_dealloc_rx_mpwqe;
|
||||
|
||||
rq->mpwqe_stride_sz = BIT(priv->params.mpwqe_log_stride_sz);
|
||||
rq->mpwqe_num_strides = BIT(priv->params.mpwqe_log_num_strides);
|
||||
@ -320,6 +351,7 @@ static int mlx5e_create_rq(struct mlx5e_channel *c,
|
||||
}
|
||||
rq->handle_rx_cqe = mlx5e_handle_rx_cqe;
|
||||
rq->alloc_wqe = mlx5e_alloc_rx_wqe;
|
||||
rq->dealloc_wqe = mlx5e_dealloc_rx_wqe;
|
||||
|
||||
rq->wqe_sz = (priv->params.lro_en) ?
|
||||
priv->params.lro_wqe_sz :
|
||||
@ -525,17 +557,25 @@ err_destroy_rq:
|
||||
|
||||
static void mlx5e_close_rq(struct mlx5e_rq *rq)
|
||||
{
|
||||
int tout = 0;
|
||||
int err;
|
||||
|
||||
clear_bit(MLX5E_RQ_STATE_POST_WQES_ENABLE, &rq->state);
|
||||
napi_synchronize(&rq->channel->napi); /* prevent mlx5e_post_rx_wqes */
|
||||
|
||||
mlx5e_modify_rq_state(rq, MLX5_RQC_STATE_RDY, MLX5_RQC_STATE_ERR);
|
||||
while (!mlx5_wq_ll_is_empty(&rq->wq))
|
||||
msleep(20);
|
||||
err = mlx5e_modify_rq_state(rq, MLX5_RQC_STATE_RDY, MLX5_RQC_STATE_ERR);
|
||||
while (!mlx5_wq_ll_is_empty(&rq->wq) && !err &&
|
||||
tout++ < MLX5_EN_QP_FLUSH_MAX_ITER)
|
||||
msleep(MLX5_EN_QP_FLUSH_MSLEEP_QUANT);
|
||||
|
||||
if (err || tout == MLX5_EN_QP_FLUSH_MAX_ITER)
|
||||
set_bit(MLX5E_RQ_STATE_FLUSH_TIMEOUT, &rq->state);
|
||||
|
||||
/* avoid destroying rq before mlx5e_poll_rx_cq() is done with it */
|
||||
napi_synchronize(&rq->channel->napi);
|
||||
|
||||
mlx5e_disable_rq(rq);
|
||||
mlx5e_free_rx_descs(rq);
|
||||
mlx5e_destroy_rq(rq);
|
||||
}
|
||||
|
||||
@ -782,6 +822,9 @@ static inline void netif_tx_disable_queue(struct netdev_queue *txq)
|
||||
|
||||
static void mlx5e_close_sq(struct mlx5e_sq *sq)
|
||||
{
|
||||
int tout = 0;
|
||||
int err;
|
||||
|
||||
if (sq->txq) {
|
||||
clear_bit(MLX5E_SQ_STATE_WAKE_TXQ_ENABLE, &sq->state);
|
||||
/* prevent netif_tx_wake_queue */
|
||||
@ -792,15 +835,24 @@ static void mlx5e_close_sq(struct mlx5e_sq *sq)
|
||||
if (mlx5e_sq_has_room_for(sq, 1))
|
||||
mlx5e_send_nop(sq, true);
|
||||
|
||||
mlx5e_modify_sq(sq, MLX5_SQC_STATE_RDY, MLX5_SQC_STATE_ERR);
|
||||
err = mlx5e_modify_sq(sq, MLX5_SQC_STATE_RDY,
|
||||
MLX5_SQC_STATE_ERR);
|
||||
if (err)
|
||||
set_bit(MLX5E_SQ_STATE_TX_TIMEOUT, &sq->state);
|
||||
}
|
||||
|
||||
while (sq->cc != sq->pc) /* wait till sq is empty */
|
||||
msleep(20);
|
||||
/* wait till sq is empty, unless a TX timeout occurred on this SQ */
|
||||
while (sq->cc != sq->pc &&
|
||||
!test_bit(MLX5E_SQ_STATE_TX_TIMEOUT, &sq->state)) {
|
||||
msleep(MLX5_EN_QP_FLUSH_MSLEEP_QUANT);
|
||||
if (tout++ > MLX5_EN_QP_FLUSH_MAX_ITER)
|
||||
set_bit(MLX5E_SQ_STATE_TX_TIMEOUT, &sq->state);
|
||||
}
|
||||
|
||||
/* avoid destroying sq before mlx5e_poll_tx_cq() is done with it */
|
||||
napi_synchronize(&sq->channel->napi);
|
||||
|
||||
mlx5e_free_tx_descs(sq);
|
||||
mlx5e_disable_sq(sq);
|
||||
mlx5e_destroy_sq(sq);
|
||||
}
|
||||
@ -1658,8 +1710,11 @@ static void mlx5e_netdev_set_tcs(struct net_device *netdev)
|
||||
|
||||
netdev_set_num_tc(netdev, ntc);
|
||||
|
||||
/* Map netdev TCs to offset 0
|
||||
* We have our own UP to TXQ mapping for QoS
|
||||
*/
|
||||
for (tc = 0; tc < ntc; tc++)
|
||||
netdev_set_tc_queue(netdev, tc, nch, tc * nch);
|
||||
netdev_set_tc_queue(netdev, tc, nch, 0);
|
||||
}
|
||||
|
||||
int mlx5e_open_locked(struct net_device *netdev)
|
||||
@ -2590,6 +2645,29 @@ static netdev_features_t mlx5e_features_check(struct sk_buff *skb,
|
||||
return features;
|
||||
}
|
||||
|
||||
static void mlx5e_tx_timeout(struct net_device *dev)
|
||||
{
|
||||
struct mlx5e_priv *priv = netdev_priv(dev);
|
||||
bool sched_work = false;
|
||||
int i;
|
||||
|
||||
netdev_err(dev, "TX timeout detected\n");
|
||||
|
||||
for (i = 0; i < priv->params.num_channels * priv->params.num_tc; i++) {
|
||||
struct mlx5e_sq *sq = priv->txq_to_sq_map[i];
|
||||
|
||||
if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, i)))
|
||||
continue;
|
||||
sched_work = true;
|
||||
set_bit(MLX5E_SQ_STATE_TX_TIMEOUT, &sq->state);
|
||||
netdev_err(dev, "TX timeout on queue: %d, SQ: 0x%x, CQ: 0x%x, SQ Cons: 0x%x SQ Prod: 0x%x\n",
|
||||
i, sq->sqn, sq->cq.mcq.cqn, sq->cc, sq->pc);
|
||||
}
|
||||
|
||||
if (sched_work && test_bit(MLX5E_STATE_OPENED, &priv->state))
|
||||
schedule_work(&priv->tx_timeout_work);
|
||||
}
|
||||
|
||||
static const struct net_device_ops mlx5e_netdev_ops_basic = {
|
||||
.ndo_open = mlx5e_open,
|
||||
.ndo_stop = mlx5e_close,
|
||||
@ -2607,6 +2685,7 @@ static const struct net_device_ops mlx5e_netdev_ops_basic = {
|
||||
#ifdef CONFIG_RFS_ACCEL
|
||||
.ndo_rx_flow_steer = mlx5e_rx_flow_steer,
|
||||
#endif
|
||||
.ndo_tx_timeout = mlx5e_tx_timeout,
|
||||
};
|
||||
|
||||
static const struct net_device_ops mlx5e_netdev_ops_sriov = {
|
||||
@ -2636,6 +2715,7 @@ static const struct net_device_ops mlx5e_netdev_ops_sriov = {
|
||||
.ndo_get_vf_config = mlx5e_get_vf_config,
|
||||
.ndo_set_vf_link_state = mlx5e_set_vf_link_state,
|
||||
.ndo_get_vf_stats = mlx5e_get_vf_stats,
|
||||
.ndo_tx_timeout = mlx5e_tx_timeout,
|
||||
};
|
||||
|
||||
static int mlx5e_check_required_hca_cap(struct mlx5_core_dev *mdev)
|
||||
@ -2838,6 +2918,7 @@ static void mlx5e_build_netdev_priv(struct mlx5_core_dev *mdev,
|
||||
|
||||
INIT_WORK(&priv->update_carrier_work, mlx5e_update_carrier_work);
|
||||
INIT_WORK(&priv->set_rx_mode_work, mlx5e_set_rx_mode_work);
|
||||
INIT_WORK(&priv->tx_timeout_work, mlx5e_tx_timeout_work);
|
||||
INIT_DELAYED_WORK(&priv->update_stats_work, mlx5e_update_stats_work);
|
||||
}
|
||||
|
||||
|
@ -212,6 +212,20 @@ err_free_skb:
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
void mlx5e_dealloc_rx_wqe(struct mlx5e_rq *rq, u16 ix)
|
||||
{
|
||||
struct sk_buff *skb = rq->skb[ix];
|
||||
|
||||
if (skb) {
|
||||
rq->skb[ix] = NULL;
|
||||
dma_unmap_single(rq->pdev,
|
||||
*((dma_addr_t *)skb->cb),
|
||||
rq->wqe_sz,
|
||||
DMA_FROM_DEVICE);
|
||||
dev_kfree_skb(skb);
|
||||
}
|
||||
}
|
||||
|
||||
static inline int mlx5e_mpwqe_strides_per_page(struct mlx5e_rq *rq)
|
||||
{
|
||||
return rq->mpwqe_num_strides >> MLX5_MPWRQ_WQE_PAGE_ORDER;
|
||||
@ -574,6 +588,30 @@ int mlx5e_alloc_rx_mpwqe(struct mlx5e_rq *rq, struct mlx5e_rx_wqe *wqe, u16 ix)
|
||||
return 0;
|
||||
}
|
||||
|
||||
void mlx5e_dealloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix)
|
||||
{
|
||||
struct mlx5e_mpw_info *wi = &rq->wqe_info[ix];
|
||||
|
||||
wi->free_wqe(rq, wi);
|
||||
}
|
||||
|
||||
void mlx5e_free_rx_descs(struct mlx5e_rq *rq)
|
||||
{
|
||||
struct mlx5_wq_ll *wq = &rq->wq;
|
||||
struct mlx5e_rx_wqe *wqe;
|
||||
__be16 wqe_ix_be;
|
||||
u16 wqe_ix;
|
||||
|
||||
while (!mlx5_wq_ll_is_empty(wq)) {
|
||||
wqe_ix_be = *wq->tail_next;
|
||||
wqe_ix = be16_to_cpu(wqe_ix_be);
|
||||
wqe = mlx5_wq_ll_get_wqe(&rq->wq, wqe_ix);
|
||||
rq->dealloc_wqe(rq, wqe_ix);
|
||||
mlx5_wq_ll_pop(&rq->wq, wqe_ix_be,
|
||||
&wqe->next.next_wqe_index);
|
||||
}
|
||||
}
|
||||
|
||||
#define RQ_CANNOT_POST(rq) \
|
||||
(!test_bit(MLX5E_RQ_STATE_POST_WQES_ENABLE, &rq->state) || \
|
||||
test_bit(MLX5E_RQ_STATE_UMR_WQE_IN_PROGRESS, &rq->state))
|
||||
@ -878,6 +916,9 @@ int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget)
|
||||
struct mlx5e_rq *rq = container_of(cq, struct mlx5e_rq, cq);
|
||||
int work_done = 0;
|
||||
|
||||
if (unlikely(test_bit(MLX5E_RQ_STATE_FLUSH_TIMEOUT, &rq->state)))
|
||||
return 0;
|
||||
|
||||
if (cq->decmprs_left)
|
||||
work_done += mlx5e_decompress_cqes_cont(rq, cq, 0, budget);
|
||||
|
||||
|
@ -110,8 +110,20 @@ u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb,
|
||||
{
|
||||
struct mlx5e_priv *priv = netdev_priv(dev);
|
||||
int channel_ix = fallback(dev, skb);
|
||||
int up = (netdev_get_num_tc(dev) && skb_vlan_tag_present(skb)) ?
|
||||
skb->vlan_tci >> VLAN_PRIO_SHIFT : 0;
|
||||
int up = 0;
|
||||
|
||||
if (!netdev_get_num_tc(dev))
|
||||
return channel_ix;
|
||||
|
||||
if (skb_vlan_tag_present(skb))
|
||||
up = skb->vlan_tci >> VLAN_PRIO_SHIFT;
|
||||
|
||||
/* channel_ix can be larger than num_channels since
|
||||
* dev->num_real_tx_queues = num_channels * num_tc
|
||||
*/
|
||||
if (channel_ix >= priv->params.num_channels)
|
||||
channel_ix = reciprocal_scale(channel_ix,
|
||||
priv->params.num_channels);
|
||||
|
||||
return priv->channeltc_to_txq_map[channel_ix][up];
|
||||
}
|
||||
@ -123,7 +135,7 @@ static inline u16 mlx5e_get_inline_hdr_size(struct mlx5e_sq *sq,
|
||||
* headers and occur before the data gather.
|
||||
* Therefore these headers must be copied into the WQE
|
||||
*/
|
||||
#define MLX5E_MIN_INLINE ETH_HLEN
|
||||
#define MLX5E_MIN_INLINE (ETH_HLEN + VLAN_HLEN)
|
||||
|
||||
if (bf) {
|
||||
u16 ihs = skb_headlen(skb);
|
||||
@ -135,7 +147,7 @@ static inline u16 mlx5e_get_inline_hdr_size(struct mlx5e_sq *sq,
|
||||
return skb_headlen(skb);
|
||||
}
|
||||
|
||||
return MLX5E_MIN_INLINE;
|
||||
return max(skb_network_offset(skb), MLX5E_MIN_INLINE);
|
||||
}
|
||||
|
||||
static inline void mlx5e_tx_skb_pull_inline(unsigned char **skb_data,
|
||||
@ -341,6 +353,35 @@ netdev_tx_t mlx5e_xmit(struct sk_buff *skb, struct net_device *dev)
|
||||
return mlx5e_sq_xmit(sq, skb);
|
||||
}
|
||||
|
||||
void mlx5e_free_tx_descs(struct mlx5e_sq *sq)
|
||||
{
|
||||
struct mlx5e_tx_wqe_info *wi;
|
||||
struct sk_buff *skb;
|
||||
u16 ci;
|
||||
int i;
|
||||
|
||||
while (sq->cc != sq->pc) {
|
||||
ci = sq->cc & sq->wq.sz_m1;
|
||||
skb = sq->skb[ci];
|
||||
wi = &sq->wqe_info[ci];
|
||||
|
||||
if (!skb) { /* nop */
|
||||
sq->cc++;
|
||||
continue;
|
||||
}
|
||||
|
||||
for (i = 0; i < wi->num_dma; i++) {
|
||||
struct mlx5e_sq_dma *dma =
|
||||
mlx5e_dma_get(sq, sq->dma_fifo_cc++);
|
||||
|
||||
mlx5e_tx_dma_unmap(sq->pdev, dma);
|
||||
}
|
||||
|
||||
dev_kfree_skb_any(skb);
|
||||
sq->cc += wi->num_wqebbs;
|
||||
}
|
||||
}
|
||||
|
||||
bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget)
|
||||
{
|
||||
struct mlx5e_sq *sq;
|
||||
@ -352,6 +393,9 @@ bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget)
|
||||
|
||||
sq = container_of(cq, struct mlx5e_sq, cq);
|
||||
|
||||
if (unlikely(test_bit(MLX5E_SQ_STATE_TX_TIMEOUT, &sq->state)))
|
||||
return false;
|
||||
|
||||
npkts = 0;
|
||||
nbytes = 0;
|
||||
|
||||
|
@ -108,15 +108,21 @@ static int in_fatal(struct mlx5_core_dev *dev)
|
||||
|
||||
void mlx5_enter_error_state(struct mlx5_core_dev *dev)
|
||||
{
|
||||
mutex_lock(&dev->intf_state_mutex);
|
||||
if (dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR)
|
||||
return;
|
||||
goto unlock;
|
||||
|
||||
mlx5_core_err(dev, "start\n");
|
||||
if (pci_channel_offline(dev->pdev) || in_fatal(dev))
|
||||
if (pci_channel_offline(dev->pdev) || in_fatal(dev)) {
|
||||
dev->state = MLX5_DEVICE_STATE_INTERNAL_ERROR;
|
||||
trigger_cmd_completions(dev);
|
||||
}
|
||||
|
||||
mlx5_core_event(dev, MLX5_DEV_EVENT_SYS_ERROR, 0);
|
||||
mlx5_core_err(dev, "end\n");
|
||||
|
||||
unlock:
|
||||
mutex_unlock(&dev->intf_state_mutex);
|
||||
}
|
||||
|
||||
static void mlx5_handle_bad_state(struct mlx5_core_dev *dev)
|
||||
@ -245,7 +251,6 @@ static void poll_health(unsigned long data)
|
||||
u32 count;
|
||||
|
||||
if (dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) {
|
||||
trigger_cmd_completions(dev);
|
||||
mod_timer(&health->timer, get_next_poll_jiffies());
|
||||
return;
|
||||
}
|
||||
|
@ -1422,46 +1422,31 @@ void mlx5_disable_device(struct mlx5_core_dev *dev)
|
||||
mlx5_pci_err_detected(dev->pdev, 0);
|
||||
}
|
||||
|
||||
/* wait for the device to show vital signs. For now we check
|
||||
* that we can read the device ID and that the health buffer
|
||||
* shows a non zero value which is different than 0xffffffff
|
||||
/* wait for the device to show vital signs by waiting
|
||||
* for the health counter to start counting.
|
||||
*/
|
||||
static void wait_vital(struct pci_dev *pdev)
|
||||
static int wait_vital(struct pci_dev *pdev)
|
||||
{
|
||||
struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
|
||||
struct mlx5_core_health *health = &dev->priv.health;
|
||||
const int niter = 100;
|
||||
u32 last_count = 0;
|
||||
u32 count;
|
||||
u16 did;
|
||||
int i;
|
||||
|
||||
/* Wait for firmware to be ready after reset */
|
||||
msleep(1000);
|
||||
for (i = 0; i < niter; i++) {
|
||||
if (pci_read_config_word(pdev, 2, &did)) {
|
||||
dev_warn(&pdev->dev, "failed reading config word\n");
|
||||
break;
|
||||
}
|
||||
if (did == pdev->device) {
|
||||
dev_info(&pdev->dev, "device ID correctly read after %d iterations\n", i);
|
||||
break;
|
||||
}
|
||||
msleep(50);
|
||||
}
|
||||
if (i == niter)
|
||||
dev_warn(&pdev->dev, "%s-%d: could not read device ID\n", __func__, __LINE__);
|
||||
|
||||
for (i = 0; i < niter; i++) {
|
||||
count = ioread32be(health->health_counter);
|
||||
if (count && count != 0xffffffff) {
|
||||
dev_info(&pdev->dev, "Counter value 0x%x after %d iterations\n", count, i);
|
||||
break;
|
||||
if (last_count && last_count != count) {
|
||||
dev_info(&pdev->dev, "Counter value 0x%x after %d iterations\n", count, i);
|
||||
return 0;
|
||||
}
|
||||
last_count = count;
|
||||
}
|
||||
msleep(50);
|
||||
}
|
||||
|
||||
if (i == niter)
|
||||
dev_warn(&pdev->dev, "%s-%d: could not read device ID\n", __func__, __LINE__);
|
||||
return -ETIMEDOUT;
|
||||
}
|
||||
|
||||
static void mlx5_pci_resume(struct pci_dev *pdev)
|
||||
@ -1473,7 +1458,11 @@ static void mlx5_pci_resume(struct pci_dev *pdev)
|
||||
dev_info(&pdev->dev, "%s was called\n", __func__);
|
||||
|
||||
pci_save_state(pdev);
|
||||
wait_vital(pdev);
|
||||
err = wait_vital(pdev);
|
||||
if (err) {
|
||||
dev_err(&pdev->dev, "%s: wait_vital timed out\n", __func__);
|
||||
return;
|
||||
}
|
||||
|
||||
err = mlx5_load_one(dev, priv);
|
||||
if (err)
|
||||
|
@ -345,7 +345,6 @@ retry:
|
||||
func_id, npages, err);
|
||||
goto out_4k;
|
||||
}
|
||||
dev->priv.fw_pages += npages;
|
||||
|
||||
err = mlx5_cmd_status_to_err(&out.hdr);
|
||||
if (err) {
|
||||
@ -373,6 +372,33 @@ out_free:
|
||||
return err;
|
||||
}
|
||||
|
||||
static int reclaim_pages_cmd(struct mlx5_core_dev *dev,
|
||||
struct mlx5_manage_pages_inbox *in, int in_size,
|
||||
struct mlx5_manage_pages_outbox *out, int out_size)
|
||||
{
|
||||
struct fw_page *fwp;
|
||||
struct rb_node *p;
|
||||
u32 npages;
|
||||
u32 i = 0;
|
||||
|
||||
if (dev->state != MLX5_DEVICE_STATE_INTERNAL_ERROR)
|
||||
return mlx5_cmd_exec_check_status(dev, (u32 *)in, in_size,
|
||||
(u32 *)out, out_size);
|
||||
|
||||
npages = be32_to_cpu(in->num_entries);
|
||||
|
||||
p = rb_first(&dev->priv.page_root);
|
||||
while (p && i < npages) {
|
||||
fwp = rb_entry(p, struct fw_page, rb_node);
|
||||
out->pas[i] = cpu_to_be64(fwp->addr);
|
||||
p = rb_next(p);
|
||||
i++;
|
||||
}
|
||||
|
||||
out->num_entries = cpu_to_be32(i);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int reclaim_pages(struct mlx5_core_dev *dev, u32 func_id, int npages,
|
||||
int *nclaimed)
|
||||
{
|
||||
@ -398,15 +424,9 @@ static int reclaim_pages(struct mlx5_core_dev *dev, u32 func_id, int npages,
|
||||
in.func_id = cpu_to_be16(func_id);
|
||||
in.num_entries = cpu_to_be32(npages);
|
||||
mlx5_core_dbg(dev, "npages %d, outlen %d\n", npages, outlen);
|
||||
err = mlx5_cmd_exec(dev, &in, sizeof(in), out, outlen);
|
||||
err = reclaim_pages_cmd(dev, &in, sizeof(in), out, outlen);
|
||||
if (err) {
|
||||
mlx5_core_err(dev, "failed reclaiming pages\n");
|
||||
goto out_free;
|
||||
}
|
||||
dev->priv.fw_pages -= npages;
|
||||
|
||||
if (out->hdr.status) {
|
||||
err = mlx5_cmd_status_to_err(&out->hdr);
|
||||
mlx5_core_err(dev, "failed reclaiming pages: err %d\n", err);
|
||||
goto out_free;
|
||||
}
|
||||
|
||||
@ -417,13 +437,15 @@ static int reclaim_pages(struct mlx5_core_dev *dev, u32 func_id, int npages,
|
||||
err = -EINVAL;
|
||||
goto out_free;
|
||||
}
|
||||
if (nclaimed)
|
||||
*nclaimed = num_claimed;
|
||||
|
||||
for (i = 0; i < num_claimed; i++) {
|
||||
addr = be64_to_cpu(out->pas[i]);
|
||||
free_4k(dev, addr);
|
||||
}
|
||||
|
||||
if (nclaimed)
|
||||
*nclaimed = num_claimed;
|
||||
|
||||
dev->priv.fw_pages -= num_claimed;
|
||||
if (func_id)
|
||||
dev->priv.vfs_pages -= num_claimed;
|
||||
@ -514,14 +536,10 @@ int mlx5_reclaim_startup_pages(struct mlx5_core_dev *dev)
|
||||
p = rb_first(&dev->priv.page_root);
|
||||
if (p) {
|
||||
fwp = rb_entry(p, struct fw_page, rb_node);
|
||||
if (dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) {
|
||||
free_4k(dev, fwp->addr);
|
||||
nclaimed = 1;
|
||||
} else {
|
||||
err = reclaim_pages(dev, fwp->func_id,
|
||||
optimal_reclaimed_pages(),
|
||||
&nclaimed);
|
||||
}
|
||||
err = reclaim_pages(dev, fwp->func_id,
|
||||
optimal_reclaimed_pages(),
|
||||
&nclaimed);
|
||||
|
||||
if (err) {
|
||||
mlx5_core_warn(dev, "failed reclaiming pages (%d)\n",
|
||||
err);
|
||||
@ -536,6 +554,13 @@ int mlx5_reclaim_startup_pages(struct mlx5_core_dev *dev)
|
||||
}
|
||||
} while (p);
|
||||
|
||||
WARN(dev->priv.fw_pages,
|
||||
"FW pages counter is %d after reclaiming all pages\n",
|
||||
dev->priv.fw_pages);
|
||||
WARN(dev->priv.vfs_pages,
|
||||
"VFs FW pages counter is %d after reclaiming all pages\n",
|
||||
dev->priv.vfs_pages);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -513,7 +513,6 @@ int mlx5_modify_nic_vport_node_guid(struct mlx5_core_dev *mdev,
|
||||
{
|
||||
int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
|
||||
void *nic_vport_context;
|
||||
u8 *guid;
|
||||
void *in;
|
||||
int err;
|
||||
|
||||
@ -535,8 +534,6 @@ int mlx5_modify_nic_vport_node_guid(struct mlx5_core_dev *mdev,
|
||||
|
||||
nic_vport_context = MLX5_ADDR_OF(modify_nic_vport_context_in,
|
||||
in, nic_vport_context);
|
||||
guid = MLX5_ADDR_OF(nic_vport_context, nic_vport_context,
|
||||
node_guid);
|
||||
MLX5_SET64(nic_vport_context, nic_vport_context, node_guid, node_guid);
|
||||
|
||||
err = mlx5_modify_nic_vport_context(mdev, in, inlen);
|
||||
|
@ -1151,7 +1151,8 @@ static void enc28j60_irq_work_handler(struct work_struct *work)
|
||||
enc28j60_phy_read(priv, PHIR);
|
||||
}
|
||||
/* TX complete handler */
|
||||
if ((intflags & EIR_TXIF) != 0) {
|
||||
if (((intflags & EIR_TXIF) != 0) &&
|
||||
((intflags & EIR_TXERIF) == 0)) {
|
||||
bool err = false;
|
||||
loop++;
|
||||
if (netif_msg_intr(priv))
|
||||
@ -1203,7 +1204,7 @@ static void enc28j60_irq_work_handler(struct work_struct *work)
|
||||
enc28j60_tx_clear(ndev, true);
|
||||
} else
|
||||
enc28j60_tx_clear(ndev, true);
|
||||
locked_reg_bfclr(priv, EIR, EIR_TXERIF);
|
||||
locked_reg_bfclr(priv, EIR, EIR_TXERIF | EIR_TXIF);
|
||||
}
|
||||
/* RX Error handler */
|
||||
if ((intflags & EIR_RXERIF) != 0) {
|
||||
@ -1238,6 +1239,8 @@ static void enc28j60_irq_work_handler(struct work_struct *work)
|
||||
*/
|
||||
static void enc28j60_hw_tx(struct enc28j60_net *priv)
|
||||
{
|
||||
BUG_ON(!priv->tx_skb);
|
||||
|
||||
if (netif_msg_tx_queued(priv))
|
||||
printk(KERN_DEBUG DRV_NAME
|
||||
": Tx Packet Len:%d\n", priv->tx_skb->len);
|
||||
|
@ -772,6 +772,8 @@ netdev_tx_t qlcnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
|
||||
tx_ring->tx_stats.tx_bytes += skb->len;
|
||||
tx_ring->tx_stats.xmit_called++;
|
||||
|
||||
/* Ensure writes are complete before HW fetches Tx descriptors */
|
||||
wmb();
|
||||
qlcnic_update_cmd_producer(tx_ring);
|
||||
|
||||
return NETDEV_TX_OK;
|
||||
|
@ -2804,7 +2804,7 @@ static irqreturn_t stmmac_interrupt(int irq, void *dev_id)
|
||||
priv->tx_path_in_lpi_mode = true;
|
||||
if (status & CORE_IRQ_TX_PATH_EXIT_LPI_MODE)
|
||||
priv->tx_path_in_lpi_mode = false;
|
||||
if (status & CORE_IRQ_MTL_RX_OVERFLOW)
|
||||
if (status & CORE_IRQ_MTL_RX_OVERFLOW && priv->hw->dma->set_rx_tail_ptr)
|
||||
priv->hw->dma->set_rx_tail_ptr(priv->ioaddr,
|
||||
priv->rx_tail_addr,
|
||||
STMMAC_CHAN0);
|
||||
|
@ -1072,12 +1072,17 @@ static netdev_tx_t geneve_xmit(struct sk_buff *skb, struct net_device *dev)
|
||||
|
||||
static int __geneve_change_mtu(struct net_device *dev, int new_mtu, bool strict)
|
||||
{
|
||||
struct geneve_dev *geneve = netdev_priv(dev);
|
||||
/* The max_mtu calculation does not take account of GENEVE
|
||||
* options, to avoid excluding potentially valid
|
||||
* configurations.
|
||||
*/
|
||||
int max_mtu = IP_MAX_MTU - GENEVE_BASE_HLEN - sizeof(struct iphdr)
|
||||
- dev->hard_header_len;
|
||||
int max_mtu = IP_MAX_MTU - GENEVE_BASE_HLEN - dev->hard_header_len;
|
||||
|
||||
if (geneve->remote.sa.sa_family == AF_INET6)
|
||||
max_mtu -= sizeof(struct ipv6hdr);
|
||||
else
|
||||
max_mtu -= sizeof(struct iphdr);
|
||||
|
||||
if (new_mtu < 68)
|
||||
return -EINVAL;
|
||||
|
@ -2640,6 +2640,7 @@ static netdev_tx_t macsec_start_xmit(struct sk_buff *skb,
|
||||
u64_stats_update_begin(&secy_stats->syncp);
|
||||
secy_stats->stats.OutPktsUntagged++;
|
||||
u64_stats_update_end(&secy_stats->syncp);
|
||||
skb->dev = macsec->real_dev;
|
||||
len = skb->len;
|
||||
ret = dev_queue_xmit(skb);
|
||||
count_tx(dev, ret, len);
|
||||
|
@ -57,6 +57,7 @@
|
||||
|
||||
/* PHY CTRL bits */
|
||||
#define DP83867_PHYCR_FIFO_DEPTH_SHIFT 14
|
||||
#define DP83867_PHYCR_FIFO_DEPTH_MASK (3 << 14)
|
||||
|
||||
/* RGMIIDCTL bits */
|
||||
#define DP83867_RGMII_TX_CLK_DELAY_SHIFT 4
|
||||
@ -133,8 +134,8 @@ static int dp83867_of_init(struct phy_device *phydev)
|
||||
static int dp83867_config_init(struct phy_device *phydev)
|
||||
{
|
||||
struct dp83867_private *dp83867;
|
||||
int ret;
|
||||
u16 val, delay;
|
||||
int ret, val;
|
||||
u16 delay;
|
||||
|
||||
if (!phydev->priv) {
|
||||
dp83867 = devm_kzalloc(&phydev->mdio.dev, sizeof(*dp83867),
|
||||
@ -151,8 +152,12 @@ static int dp83867_config_init(struct phy_device *phydev)
|
||||
}
|
||||
|
||||
if (phy_interface_is_rgmii(phydev)) {
|
||||
ret = phy_write(phydev, MII_DP83867_PHYCTRL,
|
||||
(dp83867->fifo_depth << DP83867_PHYCR_FIFO_DEPTH_SHIFT));
|
||||
val = phy_read(phydev, MII_DP83867_PHYCTRL);
|
||||
if (val < 0)
|
||||
return val;
|
||||
val &= ~DP83867_PHYCR_FIFO_DEPTH_MASK;
|
||||
val |= (dp83867->fifo_depth << DP83867_PHYCR_FIFO_DEPTH_SHIFT);
|
||||
ret = phy_write(phydev, MII_DP83867_PHYCTRL, val);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
@ -854,6 +854,13 @@ int cdc_ncm_bind_common(struct usbnet *dev, struct usb_interface *intf, u8 data_
|
||||
if (cdc_ncm_init(dev))
|
||||
goto error2;
|
||||
|
||||
/* Some firmwares need a pause here or they will silently fail
|
||||
* to set up the interface properly. This value was decided
|
||||
* empirically on a Sierra Wireless MC7455 running 02.08.02.00
|
||||
* firmware.
|
||||
*/
|
||||
usleep_range(10000, 20000);
|
||||
|
||||
/* configure data interface */
|
||||
temp = usb_set_interface(dev->udev, iface_no, data_altsetting);
|
||||
if (temp) {
|
||||
|
@ -31,7 +31,7 @@
|
||||
#define NETNEXT_VERSION "08"
|
||||
|
||||
/* Information for net */
|
||||
#define NET_VERSION "4"
|
||||
#define NET_VERSION "5"
|
||||
|
||||
#define DRIVER_VERSION "v1." NETNEXT_VERSION "." NET_VERSION
|
||||
#define DRIVER_AUTHOR "Realtek linux nic maintainers <nic_swsd@realtek.com>"
|
||||
@ -624,6 +624,7 @@ struct r8152 {
|
||||
int (*eee_get)(struct r8152 *, struct ethtool_eee *);
|
||||
int (*eee_set)(struct r8152 *, struct ethtool_eee *);
|
||||
bool (*in_nway)(struct r8152 *);
|
||||
void (*autosuspend_en)(struct r8152 *tp, bool enable);
|
||||
} rtl_ops;
|
||||
|
||||
int intr_interval;
|
||||
@ -2408,9 +2409,6 @@ static void rtl_runtime_suspend_enable(struct r8152 *tp, bool enable)
|
||||
if (enable) {
|
||||
u32 ocp_data;
|
||||
|
||||
r8153_u1u2en(tp, false);
|
||||
r8153_u2p3en(tp, false);
|
||||
|
||||
__rtl_set_wol(tp, WAKE_ANY);
|
||||
|
||||
ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CRWECR, CRWECR_CONFIG);
|
||||
@ -2421,7 +2419,28 @@ static void rtl_runtime_suspend_enable(struct r8152 *tp, bool enable)
|
||||
|
||||
ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CRWECR, CRWECR_NORAML);
|
||||
} else {
|
||||
u32 ocp_data;
|
||||
|
||||
__rtl_set_wol(tp, tp->saved_wolopts);
|
||||
|
||||
ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CRWECR, CRWECR_CONFIG);
|
||||
|
||||
ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_CONFIG34);
|
||||
ocp_data &= ~LINK_OFF_WAKE_EN;
|
||||
ocp_write_word(tp, MCU_TYPE_PLA, PLA_CONFIG34, ocp_data);
|
||||
|
||||
ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CRWECR, CRWECR_NORAML);
|
||||
}
|
||||
}
|
||||
|
||||
static void rtl8153_runtime_enable(struct r8152 *tp, bool enable)
|
||||
{
|
||||
rtl_runtime_suspend_enable(tp, enable);
|
||||
|
||||
if (enable) {
|
||||
r8153_u1u2en(tp, false);
|
||||
r8153_u2p3en(tp, false);
|
||||
} else {
|
||||
r8153_u2p3en(tp, true);
|
||||
r8153_u1u2en(tp, true);
|
||||
}
|
||||
@ -3512,7 +3531,7 @@ static int rtl8152_suspend(struct usb_interface *intf, pm_message_t message)
|
||||
napi_disable(&tp->napi);
|
||||
if (test_bit(SELECTIVE_SUSPEND, &tp->flags)) {
|
||||
rtl_stop_rx(tp);
|
||||
rtl_runtime_suspend_enable(tp, true);
|
||||
tp->rtl_ops.autosuspend_en(tp, true);
|
||||
} else {
|
||||
cancel_delayed_work_sync(&tp->schedule);
|
||||
tp->rtl_ops.down(tp);
|
||||
@ -3538,7 +3557,7 @@ static int rtl8152_resume(struct usb_interface *intf)
|
||||
|
||||
if (netif_running(tp->netdev) && tp->netdev->flags & IFF_UP) {
|
||||
if (test_bit(SELECTIVE_SUSPEND, &tp->flags)) {
|
||||
rtl_runtime_suspend_enable(tp, false);
|
||||
tp->rtl_ops.autosuspend_en(tp, false);
|
||||
clear_bit(SELECTIVE_SUSPEND, &tp->flags);
|
||||
napi_disable(&tp->napi);
|
||||
set_bit(WORK_ENABLE, &tp->flags);
|
||||
@ -3557,7 +3576,7 @@ static int rtl8152_resume(struct usb_interface *intf)
|
||||
usb_submit_urb(tp->intr_urb, GFP_KERNEL);
|
||||
} else if (test_bit(SELECTIVE_SUSPEND, &tp->flags)) {
|
||||
if (tp->netdev->flags & IFF_UP)
|
||||
rtl_runtime_suspend_enable(tp, false);
|
||||
tp->rtl_ops.autosuspend_en(tp, false);
|
||||
clear_bit(SELECTIVE_SUSPEND, &tp->flags);
|
||||
}
|
||||
|
||||
@ -4137,6 +4156,7 @@ static int rtl_ops_init(struct r8152 *tp)
|
||||
ops->eee_get = r8152_get_eee;
|
||||
ops->eee_set = r8152_set_eee;
|
||||
ops->in_nway = rtl8152_in_nway;
|
||||
ops->autosuspend_en = rtl_runtime_suspend_enable;
|
||||
break;
|
||||
|
||||
case RTL_VER_03:
|
||||
@ -4152,6 +4172,7 @@ static int rtl_ops_init(struct r8152 *tp)
|
||||
ops->eee_get = r8153_get_eee;
|
||||
ops->eee_set = r8153_set_eee;
|
||||
ops->in_nway = rtl8153_in_nway;
|
||||
ops->autosuspend_en = rtl8153_runtime_enable;
|
||||
break;
|
||||
|
||||
default:
|
||||
|
@ -395,8 +395,11 @@ int usbnet_change_mtu (struct net_device *net, int new_mtu)
|
||||
dev->hard_mtu = net->mtu + net->hard_header_len;
|
||||
if (dev->rx_urb_size == old_hard_mtu) {
|
||||
dev->rx_urb_size = dev->hard_mtu;
|
||||
if (dev->rx_urb_size > old_rx_urb_size)
|
||||
if (dev->rx_urb_size > old_rx_urb_size) {
|
||||
usbnet_pause_rx(dev);
|
||||
usbnet_unlink_rx_urbs(dev);
|
||||
usbnet_resume_rx(dev);
|
||||
}
|
||||
}
|
||||
|
||||
/* max qlen depend on hard_mtu and rx_urb_size */
|
||||
@ -1508,8 +1511,9 @@ static void usbnet_bh (unsigned long param)
|
||||
} else if (netif_running (dev->net) &&
|
||||
netif_device_present (dev->net) &&
|
||||
netif_carrier_ok(dev->net) &&
|
||||
!timer_pending (&dev->delay) &&
|
||||
!test_bit (EVENT_RX_HALT, &dev->flags)) {
|
||||
!timer_pending(&dev->delay) &&
|
||||
!test_bit(EVENT_RX_PAUSED, &dev->flags) &&
|
||||
!test_bit(EVENT_RX_HALT, &dev->flags)) {
|
||||
int temp = dev->rxq.qlen;
|
||||
|
||||
if (temp < RX_QLEN(dev)) {
|
||||
|
@ -151,13 +151,19 @@ static long ec_device_ioctl_xcmd(struct cros_ec_dev *ec, void __user *arg)
|
||||
goto exit;
|
||||
}
|
||||
|
||||
if (u_cmd.outsize != s_cmd->outsize ||
|
||||
u_cmd.insize != s_cmd->insize) {
|
||||
ret = -EINVAL;
|
||||
goto exit;
|
||||
}
|
||||
|
||||
s_cmd->command += ec->cmd_offset;
|
||||
ret = cros_ec_cmd_xfer(ec->ec_dev, s_cmd);
|
||||
/* Only copy data to userland if data was received. */
|
||||
if (ret < 0)
|
||||
goto exit;
|
||||
|
||||
if (copy_to_user(arg, s_cmd, sizeof(*s_cmd) + u_cmd.insize))
|
||||
if (copy_to_user(arg, s_cmd, sizeof(*s_cmd) + s_cmd->insize))
|
||||
ret = -EFAULT;
|
||||
exit:
|
||||
kfree(s_cmd);
|
||||
|
@ -1051,6 +1051,7 @@ static void qeth_l2_remove_device(struct ccwgroup_device *cgdev)
|
||||
qeth_l2_set_offline(cgdev);
|
||||
|
||||
if (card->dev) {
|
||||
netif_napi_del(&card->napi);
|
||||
unregister_netdev(card->dev);
|
||||
card->dev = NULL;
|
||||
}
|
||||
|
@ -3226,6 +3226,7 @@ static void qeth_l3_remove_device(struct ccwgroup_device *cgdev)
|
||||
qeth_l3_set_offline(cgdev);
|
||||
|
||||
if (card->dev) {
|
||||
netif_napi_del(&card->napi);
|
||||
unregister_netdev(card->dev);
|
||||
card->dev = NULL;
|
||||
}
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user