Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net

A set of overlapping changes in macvlan and the rocker
driver, nothing serious.

Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
David S. Miller 2017-06-30 12:43:08 -04:00
commit b079115937
200 changed files with 1391 additions and 880 deletions

View File

@ -41,9 +41,9 @@ Required properties:
Optional properties: Optional properties:
In order to use the GPIO lines in PWM mode, some additional optional In order to use the GPIO lines in PWM mode, some additional optional
properties are required. Only Armada 370 and XP support these properties. properties are required.
- compatible: Must contain "marvell,armada-370-xp-gpio" - compatible: Must contain "marvell,armada-370-gpio"
- reg: an additional register set is needed, for the GPIO Blink - reg: an additional register set is needed, for the GPIO Blink
Counter on/off registers. Counter on/off registers.
@ -71,7 +71,7 @@ Example:
}; };
gpio1: gpio@18140 { gpio1: gpio@18140 {
compatible = "marvell,armada-370-xp-gpio"; compatible = "marvell,armada-370-gpio";
reg = <0x18140 0x40>, <0x181c8 0x08>; reg = <0x18140 0x40>, <0x181c8 0x08>;
reg-names = "gpio", "pwm"; reg-names = "gpio", "pwm";
ngpios = <17>; ngpios = <17>;

View File

@ -31,7 +31,7 @@ Example:
compatible = "st,stm32-timers"; compatible = "st,stm32-timers";
reg = <0x40010000 0x400>; reg = <0x40010000 0x400>;
clocks = <&rcc 0 160>; clocks = <&rcc 0 160>;
clock-names = "clk_int"; clock-names = "int";
pwm { pwm {
compatible = "st,stm32-pwm"; compatible = "st,stm32-pwm";

View File

@ -2964,7 +2964,7 @@ F: sound/pci/oxygen/
C6X ARCHITECTURE C6X ARCHITECTURE
M: Mark Salter <msalter@redhat.com> M: Mark Salter <msalter@redhat.com>
M: Aurelien Jacquiot <a-jacquiot@ti.com> M: Aurelien Jacquiot <jacquiot.aurelien@gmail.com>
L: linux-c6x-dev@linux-c6x.org L: linux-c6x-dev@linux-c6x.org
W: http://www.linux-c6x.org/wiki/index.php/Main_Page W: http://www.linux-c6x.org/wiki/index.php/Main_Page
S: Maintained S: Maintained

View File

@ -1,7 +1,7 @@
VERSION = 4 VERSION = 4
PATCHLEVEL = 12 PATCHLEVEL = 12
SUBLEVEL = 0 SUBLEVEL = 0
EXTRAVERSION = -rc6 EXTRAVERSION = -rc7
NAME = Fearless Coyote NAME = Fearless Coyote
# *DOCUMENTATION* # *DOCUMENTATION*
@ -1437,7 +1437,7 @@ help:
@echo ' make V=0|1 [targets] 0 => quiet build (default), 1 => verbose build' @echo ' make V=0|1 [targets] 0 => quiet build (default), 1 => verbose build'
@echo ' make V=2 [targets] 2 => give reason for rebuild of target' @echo ' make V=2 [targets] 2 => give reason for rebuild of target'
@echo ' make O=dir [targets] Locate all output files in "dir", including .config' @echo ' make O=dir [targets] Locate all output files in "dir", including .config'
@echo ' make C=1 [targets] Check all c source with $$CHECK (sparse by default)' @echo ' make C=1 [targets] Check re-compiled c source with $$CHECK (sparse by default)'
@echo ' make C=2 [targets] Force check of all c source with $$CHECK' @echo ' make C=2 [targets] Force check of all c source with $$CHECK'
@echo ' make RECORDMCOUNT_WARN=1 [targets] Warn about ignored mcount sections' @echo ' make RECORDMCOUNT_WARN=1 [targets] Warn about ignored mcount sections'
@echo ' make W=n [targets] Enable extra gcc checks, n=1,2,3 where' @echo ' make W=n [targets] Enable extra gcc checks, n=1,2,3 where'

View File

@ -86,8 +86,6 @@ struct task_struct;
#define TSK_K_BLINK(tsk) TSK_K_REG(tsk, 4) #define TSK_K_BLINK(tsk) TSK_K_REG(tsk, 4)
#define TSK_K_FP(tsk) TSK_K_REG(tsk, 0) #define TSK_K_FP(tsk) TSK_K_REG(tsk, 0)
#define thread_saved_pc(tsk) TSK_K_BLINK(tsk)
extern void start_thread(struct pt_regs * regs, unsigned long pc, extern void start_thread(struct pt_regs * regs, unsigned long pc,
unsigned long usp); unsigned long usp);

View File

@ -1416,6 +1416,7 @@ choice
config VMSPLIT_3G config VMSPLIT_3G
bool "3G/1G user/kernel split" bool "3G/1G user/kernel split"
config VMSPLIT_3G_OPT config VMSPLIT_3G_OPT
depends on !ARM_LPAE
bool "3G/1G user/kernel split (for full 1G low memory)" bool "3G/1G user/kernel split (for full 1G low memory)"
config VMSPLIT_2G config VMSPLIT_2G
bool "2G/2G user/kernel split" bool "2G/2G user/kernel split"

View File

@ -17,7 +17,8 @@
@ there. @ there.
.inst 'M' | ('Z' << 8) | (0x1310 << 16) @ tstne r0, #0x4d000 .inst 'M' | ('Z' << 8) | (0x1310 << 16) @ tstne r0, #0x4d000
#else #else
W(mov) r0, r0 AR_CLASS( mov r0, r0 )
M_CLASS( nop.w )
#endif #endif
.endm .endm

View File

@ -315,7 +315,7 @@ static void __init cacheid_init(void)
if (arch >= CPU_ARCH_ARMv6) { if (arch >= CPU_ARCH_ARMv6) {
unsigned int cachetype = read_cpuid_cachetype(); unsigned int cachetype = read_cpuid_cachetype();
if ((arch == CPU_ARCH_ARMv7M) && !cachetype) { if ((arch == CPU_ARCH_ARMv7M) && !(cachetype & 0xf000f)) {
cacheid = 0; cacheid = 0;
} else if ((cachetype & (7 << 29)) == 4 << 29) { } else if ((cachetype & (7 << 29)) == 4 << 29) {
/* ARMv7 register format */ /* ARMv7 register format */

View File

@ -221,10 +221,11 @@ void update_vsyscall(struct timekeeper *tk)
/* tkr_mono.cycle_last == tkr_raw.cycle_last */ /* tkr_mono.cycle_last == tkr_raw.cycle_last */
vdso_data->cs_cycle_last = tk->tkr_mono.cycle_last; vdso_data->cs_cycle_last = tk->tkr_mono.cycle_last;
vdso_data->raw_time_sec = tk->raw_time.tv_sec; vdso_data->raw_time_sec = tk->raw_time.tv_sec;
vdso_data->raw_time_nsec = tk->raw_time.tv_nsec; vdso_data->raw_time_nsec = (tk->raw_time.tv_nsec <<
tk->tkr_raw.shift) +
tk->tkr_raw.xtime_nsec;
vdso_data->xtime_clock_sec = tk->xtime_sec; vdso_data->xtime_clock_sec = tk->xtime_sec;
vdso_data->xtime_clock_nsec = tk->tkr_mono.xtime_nsec; vdso_data->xtime_clock_nsec = tk->tkr_mono.xtime_nsec;
/* tkr_raw.xtime_nsec == 0 */
vdso_data->cs_mono_mult = tk->tkr_mono.mult; vdso_data->cs_mono_mult = tk->tkr_mono.mult;
vdso_data->cs_raw_mult = tk->tkr_raw.mult; vdso_data->cs_raw_mult = tk->tkr_raw.mult;
/* tkr_mono.shift == tkr_raw.shift */ /* tkr_mono.shift == tkr_raw.shift */

View File

@ -256,7 +256,6 @@ monotonic_raw:
seqcnt_check fail=monotonic_raw seqcnt_check fail=monotonic_raw
/* All computations are done with left-shifted nsecs. */ /* All computations are done with left-shifted nsecs. */
lsl x14, x14, x12
get_nsec_per_sec res=x9 get_nsec_per_sec res=x9
lsl x9, x9, x12 lsl x9, x9, x12

View File

@ -75,11 +75,6 @@ static inline void release_thread(struct task_struct *dead_task)
{ {
} }
/*
* Return saved PC of a blocked thread.
*/
#define thread_saved_pc(tsk) (tsk->thread.pc)
unsigned long get_wchan(struct task_struct *p); unsigned long get_wchan(struct task_struct *p);
#define KSTK_EIP(tsk) \ #define KSTK_EIP(tsk) \

View File

@ -95,11 +95,6 @@ static inline void release_thread(struct task_struct *dead_task)
#define copy_segments(tsk, mm) do { } while (0) #define copy_segments(tsk, mm) do { } while (0)
#define release_segments(mm) do { } while (0) #define release_segments(mm) do { } while (0)
/*
* saved PC of a blocked thread.
*/
#define thread_saved_pc(tsk) (task_pt_regs(tsk)->pc)
/* /*
* saved kernel SP and DP of a blocked thread. * saved kernel SP and DP of a blocked thread.
*/ */

View File

@ -69,14 +69,6 @@ void hard_reset_now (void)
while(1) /* waiting for RETRIBUTION! */ ; while(1) /* waiting for RETRIBUTION! */ ;
} }
/*
* Return saved PC of a blocked thread.
*/
unsigned long thread_saved_pc(struct task_struct *t)
{
return task_pt_regs(t)->irp;
}
/* setup the child's kernel stack with a pt_regs and switch_stack on it. /* setup the child's kernel stack with a pt_regs and switch_stack on it.
* it will be un-nested during _resume and _ret_from_sys_call when the * it will be un-nested during _resume and _ret_from_sys_call when the
* new thread is scheduled. * new thread is scheduled.

View File

@ -84,14 +84,6 @@ hard_reset_now(void)
; /* Wait for reset. */ ; /* Wait for reset. */
} }
/*
* Return saved PC of a blocked thread.
*/
unsigned long thread_saved_pc(struct task_struct *t)
{
return task_pt_regs(t)->erp;
}
/* /*
* Setup the child's kernel stack with a pt_regs and call switch_stack() on it. * Setup the child's kernel stack with a pt_regs and call switch_stack() on it.
* It will be unnested during _resume and _ret_from_sys_call when the new thread * It will be unnested during _resume and _ret_from_sys_call when the new thread

View File

@ -52,8 +52,6 @@ unsigned long get_wchan(struct task_struct *p);
#define KSTK_ESP(tsk) ((tsk) == current ? rdusp() : (tsk)->thread.usp) #define KSTK_ESP(tsk) ((tsk) == current ? rdusp() : (tsk)->thread.usp)
extern unsigned long thread_saved_pc(struct task_struct *tsk);
/* Free all resources held by a thread. */ /* Free all resources held by a thread. */
static inline void release_thread(struct task_struct *dead_task) static inline void release_thread(struct task_struct *dead_task)
{ {

View File

@ -96,11 +96,6 @@ extern asmlinkage void *restore_user_regs(const struct user_context *target, ...
#define release_segments(mm) do { } while (0) #define release_segments(mm) do { } while (0)
#define forget_segments() do { } while (0) #define forget_segments() do { } while (0)
/*
* Return saved PC of a blocked thread.
*/
extern unsigned long thread_saved_pc(struct task_struct *tsk);
unsigned long get_wchan(struct task_struct *p); unsigned long get_wchan(struct task_struct *p);
#define KSTK_EIP(tsk) ((tsk)->thread.frame0->pc) #define KSTK_EIP(tsk) ((tsk)->thread.frame0->pc)

View File

@ -198,15 +198,6 @@ unsigned long get_wchan(struct task_struct *p)
return 0; return 0;
} }
unsigned long thread_saved_pc(struct task_struct *tsk)
{
/* Check whether the thread is blocked in resume() */
if (in_sched_functions(tsk->thread.pc))
return ((unsigned long *)tsk->thread.fp)[2];
else
return tsk->thread.pc;
}
int elf_check_arch(const struct elf32_hdr *hdr) int elf_check_arch(const struct elf32_hdr *hdr)
{ {
unsigned long hsr0 = __get_HSR(0); unsigned long hsr0 = __get_HSR(0);

View File

@ -110,10 +110,6 @@ static inline void release_thread(struct task_struct *dead_task)
{ {
} }
/*
* Return saved PC of a blocked thread.
*/
unsigned long thread_saved_pc(struct task_struct *tsk);
unsigned long get_wchan(struct task_struct *p); unsigned long get_wchan(struct task_struct *p);
#define KSTK_EIP(tsk) \ #define KSTK_EIP(tsk) \

View File

@ -129,11 +129,6 @@ int copy_thread(unsigned long clone_flags,
return 0; return 0;
} }
unsigned long thread_saved_pc(struct task_struct *tsk)
{
return ((struct pt_regs *)tsk->thread.esp0)->pc;
}
unsigned long get_wchan(struct task_struct *p) unsigned long get_wchan(struct task_struct *p)
{ {
unsigned long fp, pc; unsigned long fp, pc;

View File

@ -33,9 +33,6 @@
/* task_struct, defined elsewhere, is the "process descriptor" */ /* task_struct, defined elsewhere, is the "process descriptor" */
struct task_struct; struct task_struct;
/* this is defined in arch/process.c */
extern unsigned long thread_saved_pc(struct task_struct *tsk);
extern void start_thread(struct pt_regs *, unsigned long, unsigned long); extern void start_thread(struct pt_regs *, unsigned long, unsigned long);
/* /*

View File

@ -60,14 +60,6 @@ void arch_cpu_idle(void)
local_irq_enable(); local_irq_enable();
} }
/*
* Return saved PC of a blocked thread
*/
unsigned long thread_saved_pc(struct task_struct *tsk)
{
return 0;
}
/* /*
* Copy architecture-specific thread state * Copy architecture-specific thread state
*/ */

View File

@ -601,23 +601,6 @@ ia64_set_unat (__u64 *unat, void *spill_addr, unsigned long nat)
*unat = (*unat & ~mask) | (nat << bit); *unat = (*unat & ~mask) | (nat << bit);
} }
/*
* Return saved PC of a blocked thread.
* Note that the only way T can block is through a call to schedule() -> switch_to().
*/
static inline unsigned long
thread_saved_pc (struct task_struct *t)
{
struct unw_frame_info info;
unsigned long ip;
unw_init_from_blocked_task(&info, t);
if (unw_unwind(&info) < 0)
return 0;
unw_get_ip(&info, &ip);
return ip;
}
/* /*
* Get the current instruction/program counter value. * Get the current instruction/program counter value.
*/ */

View File

@ -122,8 +122,6 @@ extern void release_thread(struct task_struct *);
extern void copy_segments(struct task_struct *p, struct mm_struct * mm); extern void copy_segments(struct task_struct *p, struct mm_struct * mm);
extern void release_segments(struct mm_struct * mm); extern void release_segments(struct mm_struct * mm);
extern unsigned long thread_saved_pc(struct task_struct *);
/* Copy and release all segment info associated with a VM */ /* Copy and release all segment info associated with a VM */
#define copy_segments(p, mm) do { } while (0) #define copy_segments(p, mm) do { } while (0)
#define release_segments(mm) do { } while (0) #define release_segments(mm) do { } while (0)

View File

@ -39,14 +39,6 @@
#include <linux/err.h> #include <linux/err.h>
/*
* Return saved PC of a blocked thread.
*/
unsigned long thread_saved_pc(struct task_struct *tsk)
{
return tsk->thread.lr;
}
void (*pm_power_off)(void) = NULL; void (*pm_power_off)(void) = NULL;
EXPORT_SYMBOL(pm_power_off); EXPORT_SYMBOL(pm_power_off);

View File

@ -130,8 +130,6 @@ static inline void release_thread(struct task_struct *dead_task)
{ {
} }
extern unsigned long thread_saved_pc(struct task_struct *tsk);
unsigned long get_wchan(struct task_struct *p); unsigned long get_wchan(struct task_struct *p);
#define KSTK_EIP(tsk) \ #define KSTK_EIP(tsk) \

View File

@ -40,20 +40,6 @@
asmlinkage void ret_from_fork(void); asmlinkage void ret_from_fork(void);
asmlinkage void ret_from_kernel_thread(void); asmlinkage void ret_from_kernel_thread(void);
/*
* Return saved PC from a blocked thread
*/
unsigned long thread_saved_pc(struct task_struct *tsk)
{
struct switch_stack *sw = (struct switch_stack *)tsk->thread.ksp;
/* Check whether the thread is blocked in resume() */
if (in_sched_functions(sw->retpc))
return ((unsigned long *)sw->a6)[1];
else
return sw->retpc;
}
void arch_cpu_idle(void) void arch_cpu_idle(void)
{ {
#if defined(MACH_ATARI_ONLY) #if defined(MACH_ATARI_ONLY)

View File

@ -69,8 +69,6 @@ static inline void release_thread(struct task_struct *dead_task)
{ {
} }
extern unsigned long thread_saved_pc(struct task_struct *t);
extern unsigned long get_wchan(struct task_struct *p); extern unsigned long get_wchan(struct task_struct *p);
# define KSTK_EIP(tsk) (0) # define KSTK_EIP(tsk) (0)
@ -121,10 +119,6 @@ static inline void release_thread(struct task_struct *dead_task)
{ {
} }
/* Return saved (kernel) PC of a blocked thread. */
# define thread_saved_pc(tsk) \
((tsk)->thread.regs ? (tsk)->thread.regs->r15 : 0)
unsigned long get_wchan(struct task_struct *p); unsigned long get_wchan(struct task_struct *p);
/* The size allocated for kernel stacks. This _must_ be a power of two! */ /* The size allocated for kernel stacks. This _must_ be a power of two! */

View File

@ -119,23 +119,6 @@ int copy_thread(unsigned long clone_flags, unsigned long usp,
return 0; return 0;
} }
#ifndef CONFIG_MMU
/*
* Return saved PC of a blocked thread.
*/
unsigned long thread_saved_pc(struct task_struct *tsk)
{
struct cpu_context *ctx =
&(((struct thread_info *)(tsk->stack))->cpu_context);
/* Check whether the thread is blocked in resume() */
if (in_sched_functions(ctx->r15))
return (unsigned long)ctx->r15;
else
return ctx->r14;
}
#endif
unsigned long get_wchan(struct task_struct *p) unsigned long get_wchan(struct task_struct *p)
{ {
/* TBD (used by procfs) */ /* TBD (used by procfs) */

View File

@ -166,7 +166,11 @@ static int _kvm_mips_host_tlb_inv(unsigned long entryhi)
int kvm_mips_host_tlb_inv(struct kvm_vcpu *vcpu, unsigned long va, int kvm_mips_host_tlb_inv(struct kvm_vcpu *vcpu, unsigned long va,
bool user, bool kernel) bool user, bool kernel)
{ {
int idx_user, idx_kernel; /*
* Initialize idx_user and idx_kernel to workaround bogus
* maybe-initialized warning when using GCC 6.
*/
int idx_user = 0, idx_kernel = 0;
unsigned long flags, old_entryhi; unsigned long flags, old_entryhi;
local_irq_save(flags); local_irq_save(flags);

View File

@ -132,11 +132,6 @@ static inline void start_thread(struct pt_regs *regs,
/* Free all resources held by a thread. */ /* Free all resources held by a thread. */
extern void release_thread(struct task_struct *); extern void release_thread(struct task_struct *);
/*
* Return saved PC of a blocked thread.
*/
extern unsigned long thread_saved_pc(struct task_struct *tsk);
unsigned long get_wchan(struct task_struct *p); unsigned long get_wchan(struct task_struct *p);
#define task_pt_regs(task) ((task)->thread.uregs) #define task_pt_regs(task) ((task)->thread.uregs)

View File

@ -39,14 +39,6 @@
#include <asm/gdb-stub.h> #include <asm/gdb-stub.h>
#include "internal.h" #include "internal.h"
/*
* return saved PC of a blocked thread.
*/
unsigned long thread_saved_pc(struct task_struct *tsk)
{
return ((unsigned long *) tsk->thread.sp)[3];
}
/* /*
* power off function, if any * power off function, if any
*/ */

View File

@ -75,9 +75,6 @@ static inline void release_thread(struct task_struct *dead_task)
{ {
} }
/* Return saved PC of a blocked thread. */
#define thread_saved_pc(tsk) ((tsk)->thread.kregs->ea)
extern unsigned long get_wchan(struct task_struct *p); extern unsigned long get_wchan(struct task_struct *p);
#define task_pt_regs(p) \ #define task_pt_regs(p) \

View File

@ -84,11 +84,6 @@ void start_thread(struct pt_regs *regs, unsigned long nip, unsigned long sp);
void release_thread(struct task_struct *); void release_thread(struct task_struct *);
unsigned long get_wchan(struct task_struct *p); unsigned long get_wchan(struct task_struct *p);
/*
* Return saved PC of a blocked thread. For now, this is the "user" PC
*/
extern unsigned long thread_saved_pc(struct task_struct *t);
#define init_stack (init_thread_union.stack) #define init_stack (init_thread_union.stack)
#define cpu_relax() barrier() #define cpu_relax() barrier()

View File

@ -110,11 +110,6 @@ void show_regs(struct pt_regs *regs)
show_registers(regs); show_registers(regs);
} }
unsigned long thread_saved_pc(struct task_struct *t)
{
return (unsigned long)user_regs(t->stack)->pc;
}
void release_thread(struct task_struct *dead_task) void release_thread(struct task_struct *dead_task)
{ {
} }

View File

@ -163,12 +163,7 @@ struct thread_struct {
.flags = 0 \ .flags = 0 \
} }
/*
* Return saved PC of a blocked thread. This is used by ps mostly.
*/
struct task_struct; struct task_struct;
unsigned long thread_saved_pc(struct task_struct *t);
void show_trace(struct task_struct *task, unsigned long *stack); void show_trace(struct task_struct *task, unsigned long *stack);
/* /*

View File

@ -239,11 +239,6 @@ copy_thread(unsigned long clone_flags, unsigned long usp,
return 0; return 0;
} }
unsigned long thread_saved_pc(struct task_struct *t)
{
return t->thread.regs.kpc;
}
unsigned long unsigned long
get_wchan(struct task_struct *p) get_wchan(struct task_struct *p)
{ {

View File

@ -103,6 +103,7 @@ extern int kprobe_exceptions_notify(struct notifier_block *self,
extern int kprobe_fault_handler(struct pt_regs *regs, int trapnr); extern int kprobe_fault_handler(struct pt_regs *regs, int trapnr);
extern int kprobe_handler(struct pt_regs *regs); extern int kprobe_handler(struct pt_regs *regs);
extern int kprobe_post_handler(struct pt_regs *regs); extern int kprobe_post_handler(struct pt_regs *regs);
extern int is_current_kprobe_addr(unsigned long addr);
#ifdef CONFIG_KPROBES_ON_FTRACE #ifdef CONFIG_KPROBES_ON_FTRACE
extern int skip_singlestep(struct kprobe *p, struct pt_regs *regs, extern int skip_singlestep(struct kprobe *p, struct pt_regs *regs,
struct kprobe_ctlblk *kcb); struct kprobe_ctlblk *kcb);

View File

@ -378,12 +378,6 @@ struct thread_struct {
} }
#endif #endif
/*
* Return saved PC of a blocked thread. For now, this is the "user" PC
*/
#define thread_saved_pc(tsk) \
((tsk)->thread.regs? (tsk)->thread.regs->nip: 0)
#define task_pt_regs(tsk) ((struct pt_regs *)(tsk)->thread.regs) #define task_pt_regs(tsk) ((struct pt_regs *)(tsk)->thread.regs)
unsigned long get_wchan(struct task_struct *p); unsigned long get_wchan(struct task_struct *p);

View File

@ -1411,10 +1411,8 @@ USE_TEXT_SECTION()
.balign IFETCH_ALIGN_BYTES .balign IFETCH_ALIGN_BYTES
do_hash_page: do_hash_page:
#ifdef CONFIG_PPC_STD_MMU_64 #ifdef CONFIG_PPC_STD_MMU_64
andis. r0,r4,0xa410 /* weird error? */ andis. r0,r4,0xa450 /* weird error? */
bne- handle_page_fault /* if not, try to insert a HPTE */ bne- handle_page_fault /* if not, try to insert a HPTE */
andis. r0,r4,DSISR_DABRMATCH@h
bne- handle_dabr_fault
CURRENT_THREAD_INFO(r11, r1) CURRENT_THREAD_INFO(r11, r1)
lwz r0,TI_PREEMPT(r11) /* If we're in an "NMI" */ lwz r0,TI_PREEMPT(r11) /* If we're in an "NMI" */
andis. r0,r0,NMI_MASK@h /* (i.e. an irq when soft-disabled) */ andis. r0,r0,NMI_MASK@h /* (i.e. an irq when soft-disabled) */
@ -1438,11 +1436,16 @@ do_hash_page:
/* Error */ /* Error */
blt- 13f blt- 13f
/* Reload DSISR into r4 for the DABR check below */
ld r4,_DSISR(r1)
#endif /* CONFIG_PPC_STD_MMU_64 */ #endif /* CONFIG_PPC_STD_MMU_64 */
/* Here we have a page fault that hash_page can't handle. */ /* Here we have a page fault that hash_page can't handle. */
handle_page_fault: handle_page_fault:
11: ld r4,_DAR(r1) 11: andis. r0,r4,DSISR_DABRMATCH@h
bne- handle_dabr_fault
ld r4,_DAR(r1)
ld r5,_DSISR(r1) ld r5,_DSISR(r1)
addi r3,r1,STACK_FRAME_OVERHEAD addi r3,r1,STACK_FRAME_OVERHEAD
bl do_page_fault bl do_page_fault

View File

@ -43,6 +43,12 @@ DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
struct kretprobe_blackpoint kretprobe_blacklist[] = {{NULL, NULL}}; struct kretprobe_blackpoint kretprobe_blacklist[] = {{NULL, NULL}};
int is_current_kprobe_addr(unsigned long addr)
{
struct kprobe *p = kprobe_running();
return (p && (unsigned long)p->addr == addr) ? 1 : 0;
}
bool arch_within_kprobe_blacklist(unsigned long addr) bool arch_within_kprobe_blacklist(unsigned long addr)
{ {
return (addr >= (unsigned long)__kprobes_text_start && return (addr >= (unsigned long)__kprobes_text_start &&
@ -617,6 +623,15 @@ int setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
regs->gpr[2] = (unsigned long)(((func_descr_t *)jp->entry)->toc); regs->gpr[2] = (unsigned long)(((func_descr_t *)jp->entry)->toc);
#endif #endif
/*
* jprobes use jprobe_return() which skips the normal return
* path of the function, and this messes up the accounting of the
* function graph tracer.
*
* Pause function graph tracing while performing the jprobe function.
*/
pause_graph_tracing();
return 1; return 1;
} }
NOKPROBE_SYMBOL(setjmp_pre_handler); NOKPROBE_SYMBOL(setjmp_pre_handler);
@ -642,6 +657,8 @@ int longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
* saved regs... * saved regs...
*/ */
memcpy(regs, &kcb->jprobe_saved_regs, sizeof(struct pt_regs)); memcpy(regs, &kcb->jprobe_saved_regs, sizeof(struct pt_regs));
/* It's OK to start function graph tracing again */
unpause_graph_tracing();
preempt_enable_no_resched(); preempt_enable_no_resched();
return 1; return 1;
} }

View File

@ -615,6 +615,24 @@ void __init exc_lvl_early_init(void)
} }
#endif #endif
/*
* Emergency stacks are used for a range of things, from asynchronous
* NMIs (system reset, machine check) to synchronous, process context.
* We set preempt_count to zero, even though that isn't necessarily correct. To
* get the right value we'd need to copy it from the previous thread_info, but
* doing that might fault causing more problems.
* TODO: what to do with accounting?
*/
static void emerg_stack_init_thread_info(struct thread_info *ti, int cpu)
{
ti->task = NULL;
ti->cpu = cpu;
ti->preempt_count = 0;
ti->local_flags = 0;
ti->flags = 0;
klp_init_thread_info(ti);
}
/* /*
* Stack space used when we detect a bad kernel stack pointer, and * Stack space used when we detect a bad kernel stack pointer, and
* early in SMP boots before relocation is enabled. Exclusive emergency * early in SMP boots before relocation is enabled. Exclusive emergency
@ -633,24 +651,31 @@ void __init emergency_stack_init(void)
* Since we use these as temporary stacks during secondary CPU * Since we use these as temporary stacks during secondary CPU
* bringup, we need to get at them in real mode. This means they * bringup, we need to get at them in real mode. This means they
* must also be within the RMO region. * must also be within the RMO region.
*
* The IRQ stacks allocated elsewhere in this file are zeroed and
* initialized in kernel/irq.c. These are initialized here in order
* to have emergency stacks available as early as possible.
*/ */
limit = min(safe_stack_limit(), ppc64_rma_size); limit = min(safe_stack_limit(), ppc64_rma_size);
for_each_possible_cpu(i) { for_each_possible_cpu(i) {
struct thread_info *ti; struct thread_info *ti;
ti = __va(memblock_alloc_base(THREAD_SIZE, THREAD_SIZE, limit)); ti = __va(memblock_alloc_base(THREAD_SIZE, THREAD_SIZE, limit));
klp_init_thread_info(ti); memset(ti, 0, THREAD_SIZE);
emerg_stack_init_thread_info(ti, i);
paca[i].emergency_sp = (void *)ti + THREAD_SIZE; paca[i].emergency_sp = (void *)ti + THREAD_SIZE;
#ifdef CONFIG_PPC_BOOK3S_64 #ifdef CONFIG_PPC_BOOK3S_64
/* emergency stack for NMI exception handling. */ /* emergency stack for NMI exception handling. */
ti = __va(memblock_alloc_base(THREAD_SIZE, THREAD_SIZE, limit)); ti = __va(memblock_alloc_base(THREAD_SIZE, THREAD_SIZE, limit));
klp_init_thread_info(ti); memset(ti, 0, THREAD_SIZE);
emerg_stack_init_thread_info(ti, i);
paca[i].nmi_emergency_sp = (void *)ti + THREAD_SIZE; paca[i].nmi_emergency_sp = (void *)ti + THREAD_SIZE;
/* emergency stack for machine check exception handling. */ /* emergency stack for machine check exception handling. */
ti = __va(memblock_alloc_base(THREAD_SIZE, THREAD_SIZE, limit)); ti = __va(memblock_alloc_base(THREAD_SIZE, THREAD_SIZE, limit));
klp_init_thread_info(ti); memset(ti, 0, THREAD_SIZE);
emerg_stack_init_thread_info(ti, i);
paca[i].mc_emergency_sp = (void *)ti + THREAD_SIZE; paca[i].mc_emergency_sp = (void *)ti + THREAD_SIZE;
#endif #endif
} }

View File

@ -45,10 +45,14 @@ _GLOBAL(ftrace_caller)
stdu r1,-SWITCH_FRAME_SIZE(r1) stdu r1,-SWITCH_FRAME_SIZE(r1)
/* Save all gprs to pt_regs */ /* Save all gprs to pt_regs */
SAVE_8GPRS(0,r1) SAVE_GPR(0, r1)
SAVE_8GPRS(8,r1) SAVE_10GPRS(2, r1)
SAVE_8GPRS(16,r1) SAVE_10GPRS(12, r1)
SAVE_8GPRS(24,r1) SAVE_10GPRS(22, r1)
/* Save previous stack pointer (r1) */
addi r8, r1, SWITCH_FRAME_SIZE
std r8, GPR1(r1)
/* Load special regs for save below */ /* Load special regs for save below */
mfmsr r8 mfmsr r8
@ -95,18 +99,44 @@ ftrace_call:
bl ftrace_stub bl ftrace_stub
nop nop
/* Load ctr with the possibly modified NIP */ /* Load the possibly modified NIP */
ld r3, _NIP(r1) ld r15, _NIP(r1)
mtctr r3
#ifdef CONFIG_LIVEPATCH #ifdef CONFIG_LIVEPATCH
cmpd r14,r3 /* has NIP been altered? */ cmpd r14, r15 /* has NIP been altered? */
#endif #endif
#if defined(CONFIG_LIVEPATCH) && defined(CONFIG_KPROBES_ON_FTRACE)
/* NIP has not been altered, skip over further checks */
beq 1f
/* Check if there is an active kprobe on us */
subi r3, r14, 4
bl is_current_kprobe_addr
nop
/*
* If r3 == 1, then this is a kprobe/jprobe.
* else, this is livepatched function.
*
* The conditional branch for livepatch_handler below will use the
* result of this comparison. For kprobe/jprobe, we just need to branch to
* the new NIP, not call livepatch_handler. The branch below is bne, so we
* want CR0[EQ] to be true if this is a kprobe/jprobe. Which means we want
* CR0[EQ] = (r3 == 1).
*/
cmpdi r3, 1
1:
#endif
/* Load CTR with the possibly modified NIP */
mtctr r15
/* Restore gprs */ /* Restore gprs */
REST_8GPRS(0,r1) REST_GPR(0,r1)
REST_8GPRS(8,r1) REST_10GPRS(2,r1)
REST_8GPRS(16,r1) REST_10GPRS(12,r1)
REST_8GPRS(24,r1) REST_10GPRS(22,r1)
/* Restore possibly modified LR */ /* Restore possibly modified LR */
ld r0, _LINK(r1) ld r0, _LINK(r1)
@ -119,7 +149,10 @@ ftrace_call:
addi r1, r1, SWITCH_FRAME_SIZE addi r1, r1, SWITCH_FRAME_SIZE
#ifdef CONFIG_LIVEPATCH #ifdef CONFIG_LIVEPATCH
/* Based on the cmpd above, if the NIP was altered handle livepatch */ /*
* Based on the cmpd or cmpdi above, if the NIP was altered and we're
* not on a kprobe/jprobe, then handle livepatch.
*/
bne- livepatch_handler bne- livepatch_handler
#endif #endif

View File

@ -1486,6 +1486,14 @@ static int kvmppc_set_one_reg_hv(struct kvm_vcpu *vcpu, u64 id,
r = set_vpa(vcpu, &vcpu->arch.dtl, addr, len); r = set_vpa(vcpu, &vcpu->arch.dtl, addr, len);
break; break;
case KVM_REG_PPC_TB_OFFSET: case KVM_REG_PPC_TB_OFFSET:
/*
* POWER9 DD1 has an erratum where writing TBU40 causes
* the timebase to lose ticks. So we don't let the
* timebase offset be changed on P9 DD1. (It is
* initialized to zero.)
*/
if (cpu_has_feature(CPU_FTR_POWER9_DD1))
break;
/* round up to multiple of 2^24 */ /* round up to multiple of 2^24 */
vcpu->arch.vcore->tb_offset = vcpu->arch.vcore->tb_offset =
ALIGN(set_reg_val(id, *val), 1UL << 24); ALIGN(set_reg_val(id, *val), 1UL << 24);
@ -2907,12 +2915,36 @@ static int kvmppc_vcpu_run_hv(struct kvm_run *run, struct kvm_vcpu *vcpu)
{ {
int r; int r;
int srcu_idx; int srcu_idx;
unsigned long ebb_regs[3] = {}; /* shut up GCC */
unsigned long user_tar = 0;
unsigned int user_vrsave;
if (!vcpu->arch.sane) { if (!vcpu->arch.sane) {
run->exit_reason = KVM_EXIT_INTERNAL_ERROR; run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
return -EINVAL; return -EINVAL;
} }
/*
* Don't allow entry with a suspended transaction, because
* the guest entry/exit code will lose it.
* If the guest has TM enabled, save away their TM-related SPRs
* (they will get restored by the TM unavailable interrupt).
*/
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
if (cpu_has_feature(CPU_FTR_TM) && current->thread.regs &&
(current->thread.regs->msr & MSR_TM)) {
if (MSR_TM_ACTIVE(current->thread.regs->msr)) {
run->exit_reason = KVM_EXIT_FAIL_ENTRY;
run->fail_entry.hardware_entry_failure_reason = 0;
return -EINVAL;
}
current->thread.tm_tfhar = mfspr(SPRN_TFHAR);
current->thread.tm_tfiar = mfspr(SPRN_TFIAR);
current->thread.tm_texasr = mfspr(SPRN_TEXASR);
current->thread.regs->msr &= ~MSR_TM;
}
#endif
kvmppc_core_prepare_to_enter(vcpu); kvmppc_core_prepare_to_enter(vcpu);
/* No need to go into the guest when all we'll do is come back out */ /* No need to go into the guest when all we'll do is come back out */
@ -2934,6 +2966,15 @@ static int kvmppc_vcpu_run_hv(struct kvm_run *run, struct kvm_vcpu *vcpu)
flush_all_to_thread(current); flush_all_to_thread(current);
/* Save userspace EBB and other register values */
if (cpu_has_feature(CPU_FTR_ARCH_207S)) {
ebb_regs[0] = mfspr(SPRN_EBBHR);
ebb_regs[1] = mfspr(SPRN_EBBRR);
ebb_regs[2] = mfspr(SPRN_BESCR);
user_tar = mfspr(SPRN_TAR);
}
user_vrsave = mfspr(SPRN_VRSAVE);
vcpu->arch.wqp = &vcpu->arch.vcore->wq; vcpu->arch.wqp = &vcpu->arch.vcore->wq;
vcpu->arch.pgdir = current->mm->pgd; vcpu->arch.pgdir = current->mm->pgd;
vcpu->arch.state = KVMPPC_VCPU_BUSY_IN_HOST; vcpu->arch.state = KVMPPC_VCPU_BUSY_IN_HOST;
@ -2960,6 +3001,16 @@ static int kvmppc_vcpu_run_hv(struct kvm_run *run, struct kvm_vcpu *vcpu)
} }
} while (is_kvmppc_resume_guest(r)); } while (is_kvmppc_resume_guest(r));
/* Restore userspace EBB and other register values */
if (cpu_has_feature(CPU_FTR_ARCH_207S)) {
mtspr(SPRN_EBBHR, ebb_regs[0]);
mtspr(SPRN_EBBRR, ebb_regs[1]);
mtspr(SPRN_BESCR, ebb_regs[2]);
mtspr(SPRN_TAR, user_tar);
mtspr(SPRN_FSCR, current->thread.fscr);
}
mtspr(SPRN_VRSAVE, user_vrsave);
out: out:
vcpu->arch.state = KVMPPC_VCPU_NOTREADY; vcpu->arch.state = KVMPPC_VCPU_NOTREADY;
atomic_dec(&vcpu->kvm->arch.vcpus_running); atomic_dec(&vcpu->kvm->arch.vcpus_running);

View File

@ -121,10 +121,20 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
* Put whatever is in the decrementer into the * Put whatever is in the decrementer into the
* hypervisor decrementer. * hypervisor decrementer.
*/ */
BEGIN_FTR_SECTION
ld r5, HSTATE_KVM_VCORE(r13)
ld r6, VCORE_KVM(r5)
ld r9, KVM_HOST_LPCR(r6)
andis. r9, r9, LPCR_LD@h
END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
mfspr r8,SPRN_DEC mfspr r8,SPRN_DEC
mftb r7 mftb r7
mtspr SPRN_HDEC,r8 BEGIN_FTR_SECTION
/* On POWER9, don't sign-extend if host LPCR[LD] bit is set */
bne 32f
END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
extsw r8,r8 extsw r8,r8
32: mtspr SPRN_HDEC,r8
add r8,r8,r7 add r8,r8,r7
std r8,HSTATE_DECEXP(r13) std r8,HSTATE_DECEXP(r13)

View File

@ -32,12 +32,29 @@
#include <asm/opal.h> #include <asm/opal.h>
#include <asm/xive-regs.h> #include <asm/xive-regs.h>
/* Sign-extend HDEC if not on POWER9 */
#define EXTEND_HDEC(reg) \
BEGIN_FTR_SECTION; \
extsw reg, reg; \
END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300)
#define VCPU_GPRS_TM(reg) (((reg) * ULONG_SIZE) + VCPU_GPR_TM) #define VCPU_GPRS_TM(reg) (((reg) * ULONG_SIZE) + VCPU_GPR_TM)
/* Values in HSTATE_NAPPING(r13) */ /* Values in HSTATE_NAPPING(r13) */
#define NAPPING_CEDE 1 #define NAPPING_CEDE 1
#define NAPPING_NOVCPU 2 #define NAPPING_NOVCPU 2
/* Stack frame offsets for kvmppc_hv_entry */
#define SFS 144
#define STACK_SLOT_TRAP (SFS-4)
#define STACK_SLOT_TID (SFS-16)
#define STACK_SLOT_PSSCR (SFS-24)
#define STACK_SLOT_PID (SFS-32)
#define STACK_SLOT_IAMR (SFS-40)
#define STACK_SLOT_CIABR (SFS-48)
#define STACK_SLOT_DAWR (SFS-56)
#define STACK_SLOT_DAWRX (SFS-64)
/* /*
* Call kvmppc_hv_entry in real mode. * Call kvmppc_hv_entry in real mode.
* Must be called with interrupts hard-disabled. * Must be called with interrupts hard-disabled.
@ -214,6 +231,8 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
kvmppc_primary_no_guest: kvmppc_primary_no_guest:
/* We handle this much like a ceded vcpu */ /* We handle this much like a ceded vcpu */
/* put the HDEC into the DEC, since HDEC interrupts don't wake us */ /* put the HDEC into the DEC, since HDEC interrupts don't wake us */
/* HDEC may be larger than DEC for arch >= v3.00, but since the */
/* HDEC value came from DEC in the first place, it will fit */
mfspr r3, SPRN_HDEC mfspr r3, SPRN_HDEC
mtspr SPRN_DEC, r3 mtspr SPRN_DEC, r3
/* /*
@ -295,8 +314,9 @@ kvm_novcpu_wakeup:
/* See if our timeslice has expired (HDEC is negative) */ /* See if our timeslice has expired (HDEC is negative) */
mfspr r0, SPRN_HDEC mfspr r0, SPRN_HDEC
EXTEND_HDEC(r0)
li r12, BOOK3S_INTERRUPT_HV_DECREMENTER li r12, BOOK3S_INTERRUPT_HV_DECREMENTER
cmpwi r0, 0 cmpdi r0, 0
blt kvm_novcpu_exit blt kvm_novcpu_exit
/* Got an IPI but other vcpus aren't yet exiting, must be a latecomer */ /* Got an IPI but other vcpus aren't yet exiting, must be a latecomer */
@ -319,10 +339,10 @@ kvm_novcpu_exit:
bl kvmhv_accumulate_time bl kvmhv_accumulate_time
#endif #endif
13: mr r3, r12 13: mr r3, r12
stw r12, 112-4(r1) stw r12, STACK_SLOT_TRAP(r1)
bl kvmhv_commence_exit bl kvmhv_commence_exit
nop nop
lwz r12, 112-4(r1) lwz r12, STACK_SLOT_TRAP(r1)
b kvmhv_switch_to_host b kvmhv_switch_to_host
/* /*
@ -390,8 +410,8 @@ kvm_secondary_got_guest:
lbz r4, HSTATE_PTID(r13) lbz r4, HSTATE_PTID(r13)
cmpwi r4, 0 cmpwi r4, 0
bne 63f bne 63f
lis r6, 0x7fff LOAD_REG_ADDR(r6, decrementer_max)
ori r6, r6, 0xffff ld r6, 0(r6)
mtspr SPRN_HDEC, r6 mtspr SPRN_HDEC, r6
/* and set per-LPAR registers, if doing dynamic micro-threading */ /* and set per-LPAR registers, if doing dynamic micro-threading */
ld r6, HSTATE_SPLIT_MODE(r13) ld r6, HSTATE_SPLIT_MODE(r13)
@ -545,11 +565,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
* * * *
*****************************************************************************/ *****************************************************************************/
/* Stack frame offsets */
#define STACK_SLOT_TID (112-16)
#define STACK_SLOT_PSSCR (112-24)
#define STACK_SLOT_PID (112-32)
.global kvmppc_hv_entry .global kvmppc_hv_entry
kvmppc_hv_entry: kvmppc_hv_entry:
@ -565,7 +580,7 @@ kvmppc_hv_entry:
*/ */
mflr r0 mflr r0
std r0, PPC_LR_STKOFF(r1) std r0, PPC_LR_STKOFF(r1)
stdu r1, -112(r1) stdu r1, -SFS(r1)
/* Save R1 in the PACA */ /* Save R1 in the PACA */
std r1, HSTATE_HOST_R1(r13) std r1, HSTATE_HOST_R1(r13)
@ -749,10 +764,20 @@ BEGIN_FTR_SECTION
mfspr r5, SPRN_TIDR mfspr r5, SPRN_TIDR
mfspr r6, SPRN_PSSCR mfspr r6, SPRN_PSSCR
mfspr r7, SPRN_PID mfspr r7, SPRN_PID
mfspr r8, SPRN_IAMR
std r5, STACK_SLOT_TID(r1) std r5, STACK_SLOT_TID(r1)
std r6, STACK_SLOT_PSSCR(r1) std r6, STACK_SLOT_PSSCR(r1)
std r7, STACK_SLOT_PID(r1) std r7, STACK_SLOT_PID(r1)
std r8, STACK_SLOT_IAMR(r1)
END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300) END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
BEGIN_FTR_SECTION
mfspr r5, SPRN_CIABR
mfspr r6, SPRN_DAWR
mfspr r7, SPRN_DAWRX
std r5, STACK_SLOT_CIABR(r1)
std r6, STACK_SLOT_DAWR(r1)
std r7, STACK_SLOT_DAWRX(r1)
END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
BEGIN_FTR_SECTION BEGIN_FTR_SECTION
/* Set partition DABR */ /* Set partition DABR */
@ -968,7 +993,8 @@ ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_300)
/* Check if HDEC expires soon */ /* Check if HDEC expires soon */
mfspr r3, SPRN_HDEC mfspr r3, SPRN_HDEC
cmpwi r3, 512 /* 1 microsecond */ EXTEND_HDEC(r3)
cmpdi r3, 512 /* 1 microsecond */
blt hdec_soon blt hdec_soon
#ifdef CONFIG_KVM_XICS #ifdef CONFIG_KVM_XICS
@ -1505,11 +1531,10 @@ ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_300)
* set by the guest could disrupt the host. * set by the guest could disrupt the host.
*/ */
li r0, 0 li r0, 0
mtspr SPRN_IAMR, r0 mtspr SPRN_PSPB, r0
mtspr SPRN_CIABR, r0
mtspr SPRN_DAWRX, r0
mtspr SPRN_WORT, r0 mtspr SPRN_WORT, r0
BEGIN_FTR_SECTION BEGIN_FTR_SECTION
mtspr SPRN_IAMR, r0
mtspr SPRN_TCSCR, r0 mtspr SPRN_TCSCR, r0
/* Set MMCRS to 1<<31 to freeze and disable the SPMC counters */ /* Set MMCRS to 1<<31 to freeze and disable the SPMC counters */
li r0, 1 li r0, 1
@ -1525,6 +1550,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300)
std r6,VCPU_UAMOR(r9) std r6,VCPU_UAMOR(r9)
li r6,0 li r6,0
mtspr SPRN_AMR,r6 mtspr SPRN_AMR,r6
mtspr SPRN_UAMOR, r6
/* Switch DSCR back to host value */ /* Switch DSCR back to host value */
mfspr r8, SPRN_DSCR mfspr r8, SPRN_DSCR
@ -1669,13 +1695,23 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
ptesync ptesync
/* Restore host values of some registers */ /* Restore host values of some registers */
BEGIN_FTR_SECTION
ld r5, STACK_SLOT_CIABR(r1)
ld r6, STACK_SLOT_DAWR(r1)
ld r7, STACK_SLOT_DAWRX(r1)
mtspr SPRN_CIABR, r5
mtspr SPRN_DAWR, r6
mtspr SPRN_DAWRX, r7
END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
BEGIN_FTR_SECTION BEGIN_FTR_SECTION
ld r5, STACK_SLOT_TID(r1) ld r5, STACK_SLOT_TID(r1)
ld r6, STACK_SLOT_PSSCR(r1) ld r6, STACK_SLOT_PSSCR(r1)
ld r7, STACK_SLOT_PID(r1) ld r7, STACK_SLOT_PID(r1)
ld r8, STACK_SLOT_IAMR(r1)
mtspr SPRN_TIDR, r5 mtspr SPRN_TIDR, r5
mtspr SPRN_PSSCR, r6 mtspr SPRN_PSSCR, r6
mtspr SPRN_PID, r7 mtspr SPRN_PID, r7
mtspr SPRN_IAMR, r8
END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300) END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
BEGIN_FTR_SECTION BEGIN_FTR_SECTION
PPC_INVALIDATE_ERAT PPC_INVALIDATE_ERAT
@ -1819,8 +1855,8 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_RADIX)
li r0, KVM_GUEST_MODE_NONE li r0, KVM_GUEST_MODE_NONE
stb r0, HSTATE_IN_GUEST(r13) stb r0, HSTATE_IN_GUEST(r13)
ld r0, 112+PPC_LR_STKOFF(r1) ld r0, SFS+PPC_LR_STKOFF(r1)
addi r1, r1, 112 addi r1, r1, SFS
mtlr r0 mtlr r0
blr blr
@ -2366,12 +2402,13 @@ END_FTR_SECTION_IFSET(CPU_FTR_TM)
mfspr r3, SPRN_DEC mfspr r3, SPRN_DEC
mfspr r4, SPRN_HDEC mfspr r4, SPRN_HDEC
mftb r5 mftb r5
cmpw r3, r4 extsw r3, r3
EXTEND_HDEC(r4)
cmpd r3, r4
ble 67f ble 67f
mtspr SPRN_DEC, r4 mtspr SPRN_DEC, r4
67: 67:
/* save expiry time of guest decrementer */ /* save expiry time of guest decrementer */
extsw r3, r3
add r3, r3, r5 add r3, r3, r5
ld r4, HSTATE_KVM_VCPU(r13) ld r4, HSTATE_KVM_VCPU(r13)
ld r5, HSTATE_KVM_VCORE(r13) ld r5, HSTATE_KVM_VCORE(r13)

View File

@ -101,5 +101,6 @@ void perf_get_regs_user(struct perf_regs *regs_user,
struct pt_regs *regs_user_copy) struct pt_regs *regs_user_copy)
{ {
regs_user->regs = task_pt_regs(current); regs_user->regs = task_pt_regs(current);
regs_user->abi = perf_reg_abi(current); regs_user->abi = (regs_user->regs) ? perf_reg_abi(current) :
PERF_SAMPLE_REGS_ABI_NONE;
} }

View File

@ -449,7 +449,7 @@ static int mmio_launch_invalidate(struct npu *npu, unsigned long launch,
return mmio_atsd_reg; return mmio_atsd_reg;
} }
static int mmio_invalidate_pid(struct npu *npu, unsigned long pid) static int mmio_invalidate_pid(struct npu *npu, unsigned long pid, bool flush)
{ {
unsigned long launch; unsigned long launch;
@ -465,12 +465,15 @@ static int mmio_invalidate_pid(struct npu *npu, unsigned long pid)
/* PID */ /* PID */
launch |= pid << PPC_BITLSHIFT(38); launch |= pid << PPC_BITLSHIFT(38);
/* No flush */
launch |= !flush << PPC_BITLSHIFT(39);
/* Invalidating the entire process doesn't use a va */ /* Invalidating the entire process doesn't use a va */
return mmio_launch_invalidate(npu, launch, 0); return mmio_launch_invalidate(npu, launch, 0);
} }
static int mmio_invalidate_va(struct npu *npu, unsigned long va, static int mmio_invalidate_va(struct npu *npu, unsigned long va,
unsigned long pid) unsigned long pid, bool flush)
{ {
unsigned long launch; unsigned long launch;
@ -486,26 +489,60 @@ static int mmio_invalidate_va(struct npu *npu, unsigned long va,
/* PID */ /* PID */
launch |= pid << PPC_BITLSHIFT(38); launch |= pid << PPC_BITLSHIFT(38);
/* No flush */
launch |= !flush << PPC_BITLSHIFT(39);
return mmio_launch_invalidate(npu, launch, va); return mmio_launch_invalidate(npu, launch, va);
} }
#define mn_to_npu_context(x) container_of(x, struct npu_context, mn) #define mn_to_npu_context(x) container_of(x, struct npu_context, mn)
struct mmio_atsd_reg {
struct npu *npu;
int reg;
};
static void mmio_invalidate_wait(
struct mmio_atsd_reg mmio_atsd_reg[NV_MAX_NPUS], bool flush)
{
struct npu *npu;
int i, reg;
/* Wait for all invalidations to complete */
for (i = 0; i <= max_npu2_index; i++) {
if (mmio_atsd_reg[i].reg < 0)
continue;
/* Wait for completion */
npu = mmio_atsd_reg[i].npu;
reg = mmio_atsd_reg[i].reg;
while (__raw_readq(npu->mmio_atsd_regs[reg] + XTS_ATSD_STAT))
cpu_relax();
put_mmio_atsd_reg(npu, reg);
/*
* The GPU requires two flush ATSDs to ensure all entries have
* been flushed. We use PID 0 as it will never be used for a
* process on the GPU.
*/
if (flush)
mmio_invalidate_pid(npu, 0, true);
}
}
/* /*
* Invalidate either a single address or an entire PID depending on * Invalidate either a single address or an entire PID depending on
* the value of va. * the value of va.
*/ */
static void mmio_invalidate(struct npu_context *npu_context, int va, static void mmio_invalidate(struct npu_context *npu_context, int va,
unsigned long address) unsigned long address, bool flush)
{ {
int i, j, reg; int i, j;
struct npu *npu; struct npu *npu;
struct pnv_phb *nphb; struct pnv_phb *nphb;
struct pci_dev *npdev; struct pci_dev *npdev;
struct { struct mmio_atsd_reg mmio_atsd_reg[NV_MAX_NPUS];
struct npu *npu;
int reg;
} mmio_atsd_reg[NV_MAX_NPUS];
unsigned long pid = npu_context->mm->context.id; unsigned long pid = npu_context->mm->context.id;
/* /*
@ -525,10 +562,11 @@ static void mmio_invalidate(struct npu_context *npu_context, int va,
if (va) if (va)
mmio_atsd_reg[i].reg = mmio_atsd_reg[i].reg =
mmio_invalidate_va(npu, address, pid); mmio_invalidate_va(npu, address, pid,
flush);
else else
mmio_atsd_reg[i].reg = mmio_atsd_reg[i].reg =
mmio_invalidate_pid(npu, pid); mmio_invalidate_pid(npu, pid, flush);
/* /*
* The NPU hardware forwards the shootdown to all GPUs * The NPU hardware forwards the shootdown to all GPUs
@ -544,18 +582,10 @@ static void mmio_invalidate(struct npu_context *npu_context, int va,
*/ */
flush_tlb_mm(npu_context->mm); flush_tlb_mm(npu_context->mm);
/* Wait for all invalidations to complete */ mmio_invalidate_wait(mmio_atsd_reg, flush);
for (i = 0; i <= max_npu2_index; i++) { if (flush)
if (mmio_atsd_reg[i].reg < 0) /* Wait for the flush to complete */
continue; mmio_invalidate_wait(mmio_atsd_reg, false);
/* Wait for completion */
npu = mmio_atsd_reg[i].npu;
reg = mmio_atsd_reg[i].reg;
while (__raw_readq(npu->mmio_atsd_regs[reg] + XTS_ATSD_STAT))
cpu_relax();
put_mmio_atsd_reg(npu, reg);
}
} }
static void pnv_npu2_mn_release(struct mmu_notifier *mn, static void pnv_npu2_mn_release(struct mmu_notifier *mn,
@ -571,7 +601,7 @@ static void pnv_npu2_mn_release(struct mmu_notifier *mn,
* There should be no more translation requests for this PID, but we * There should be no more translation requests for this PID, but we
* need to ensure any entries for it are removed from the TLB. * need to ensure any entries for it are removed from the TLB.
*/ */
mmio_invalidate(npu_context, 0, 0); mmio_invalidate(npu_context, 0, 0, true);
} }
static void pnv_npu2_mn_change_pte(struct mmu_notifier *mn, static void pnv_npu2_mn_change_pte(struct mmu_notifier *mn,
@ -581,7 +611,7 @@ static void pnv_npu2_mn_change_pte(struct mmu_notifier *mn,
{ {
struct npu_context *npu_context = mn_to_npu_context(mn); struct npu_context *npu_context = mn_to_npu_context(mn);
mmio_invalidate(npu_context, 1, address); mmio_invalidate(npu_context, 1, address, true);
} }
static void pnv_npu2_mn_invalidate_page(struct mmu_notifier *mn, static void pnv_npu2_mn_invalidate_page(struct mmu_notifier *mn,
@ -590,7 +620,7 @@ static void pnv_npu2_mn_invalidate_page(struct mmu_notifier *mn,
{ {
struct npu_context *npu_context = mn_to_npu_context(mn); struct npu_context *npu_context = mn_to_npu_context(mn);
mmio_invalidate(npu_context, 1, address); mmio_invalidate(npu_context, 1, address, true);
} }
static void pnv_npu2_mn_invalidate_range(struct mmu_notifier *mn, static void pnv_npu2_mn_invalidate_range(struct mmu_notifier *mn,
@ -600,8 +630,11 @@ static void pnv_npu2_mn_invalidate_range(struct mmu_notifier *mn,
struct npu_context *npu_context = mn_to_npu_context(mn); struct npu_context *npu_context = mn_to_npu_context(mn);
unsigned long address; unsigned long address;
for (address = start; address <= end; address += PAGE_SIZE) for (address = start; address < end; address += PAGE_SIZE)
mmio_invalidate(npu_context, 1, address); mmio_invalidate(npu_context, 1, address, false);
/* Do the flush only on the final addess == end */
mmio_invalidate(npu_context, 1, address, true);
} }
static const struct mmu_notifier_ops nv_nmmu_notifier_ops = { static const struct mmu_notifier_ops nv_nmmu_notifier_ops = {
@ -651,8 +684,11 @@ struct npu_context *pnv_npu2_init_context(struct pci_dev *gpdev,
/* No nvlink associated with this GPU device */ /* No nvlink associated with this GPU device */
return ERR_PTR(-ENODEV); return ERR_PTR(-ENODEV);
if (!mm) { if (!mm || mm->context.id == 0) {
/* kernel thread contexts are not supported */ /*
* Kernel thread contexts are not supported and context id 0 is
* reserved on the GPU.
*/
return ERR_PTR(-EINVAL); return ERR_PTR(-EINVAL);
} }

View File

@ -221,11 +221,6 @@ extern void release_thread(struct task_struct *);
/* Free guarded storage control block for current */ /* Free guarded storage control block for current */
void exit_thread_gs(void); void exit_thread_gs(void);
/*
* Return saved PC of a blocked thread.
*/
extern unsigned long thread_saved_pc(struct task_struct *t);
unsigned long get_wchan(struct task_struct *p); unsigned long get_wchan(struct task_struct *p);
#define task_pt_regs(tsk) ((struct pt_regs *) \ #define task_pt_regs(tsk) ((struct pt_regs *) \
(task_stack_page(tsk) + THREAD_SIZE) - 1) (task_stack_page(tsk) + THREAD_SIZE) - 1)

View File

@ -564,8 +564,6 @@ static struct kset *ipl_kset;
static void __ipl_run(void *unused) static void __ipl_run(void *unused)
{ {
if (MACHINE_IS_LPAR && ipl_info.type == IPL_TYPE_CCW)
diag308(DIAG308_LOAD_NORMAL_DUMP, NULL);
diag308(DIAG308_LOAD_CLEAR, NULL); diag308(DIAG308_LOAD_CLEAR, NULL);
if (MACHINE_IS_VM) if (MACHINE_IS_VM)
__cpcmd("IPL", NULL, 0, NULL); __cpcmd("IPL", NULL, 0, NULL);
@ -1088,10 +1086,7 @@ static void __reipl_run(void *unused)
break; break;
case REIPL_METHOD_CCW_DIAG: case REIPL_METHOD_CCW_DIAG:
diag308(DIAG308_SET, reipl_block_ccw); diag308(DIAG308_SET, reipl_block_ccw);
if (MACHINE_IS_LPAR) diag308(DIAG308_LOAD_CLEAR, NULL);
diag308(DIAG308_LOAD_NORMAL_DUMP, NULL);
else
diag308(DIAG308_LOAD_CLEAR, NULL);
break; break;
case REIPL_METHOD_FCP_RW_DIAG: case REIPL_METHOD_FCP_RW_DIAG:
diag308(DIAG308_SET, reipl_block_fcp); diag308(DIAG308_SET, reipl_block_fcp);

View File

@ -41,31 +41,6 @@
asmlinkage void ret_from_fork(void) asm ("ret_from_fork"); asmlinkage void ret_from_fork(void) asm ("ret_from_fork");
/*
* Return saved PC of a blocked thread. used in kernel/sched.
* resume in entry.S does not create a new stack frame, it
* just stores the registers %r6-%r15 to the frame given by
* schedule. We want to return the address of the caller of
* schedule, so we have to walk the backchain one time to
* find the frame schedule() store its return address.
*/
unsigned long thread_saved_pc(struct task_struct *tsk)
{
struct stack_frame *sf, *low, *high;
if (!tsk || !task_stack_page(tsk))
return 0;
low = task_stack_page(tsk);
high = (struct stack_frame *) task_pt_regs(tsk);
sf = (struct stack_frame *) tsk->thread.ksp;
if (sf <= low || sf > high)
return 0;
sf = (struct stack_frame *) sf->back_chain;
if (sf <= low || sf > high)
return 0;
return sf->gprs[8];
}
extern void kernel_thread_starter(void); extern void kernel_thread_starter(void);
/* /*

View File

@ -977,11 +977,12 @@ static int kvm_s390_shadow_tables(struct gmap *sg, unsigned long saddr,
ptr = asce.origin * 4096; ptr = asce.origin * 4096;
if (asce.r) { if (asce.r) {
*fake = 1; *fake = 1;
ptr = 0;
asce.dt = ASCE_TYPE_REGION1; asce.dt = ASCE_TYPE_REGION1;
} }
switch (asce.dt) { switch (asce.dt) {
case ASCE_TYPE_REGION1: case ASCE_TYPE_REGION1:
if (vaddr.rfx01 > asce.tl && !asce.r) if (vaddr.rfx01 > asce.tl && !*fake)
return PGM_REGION_FIRST_TRANS; return PGM_REGION_FIRST_TRANS;
break; break;
case ASCE_TYPE_REGION2: case ASCE_TYPE_REGION2:
@ -1009,8 +1010,7 @@ static int kvm_s390_shadow_tables(struct gmap *sg, unsigned long saddr,
union region1_table_entry rfte; union region1_table_entry rfte;
if (*fake) { if (*fake) {
/* offset in 16EB guest memory block */ ptr += (unsigned long) vaddr.rfx << 53;
ptr = ptr + ((unsigned long) vaddr.rsx << 53UL);
rfte.val = ptr; rfte.val = ptr;
goto shadow_r2t; goto shadow_r2t;
} }
@ -1036,8 +1036,7 @@ shadow_r2t:
union region2_table_entry rste; union region2_table_entry rste;
if (*fake) { if (*fake) {
/* offset in 8PB guest memory block */ ptr += (unsigned long) vaddr.rsx << 42;
ptr = ptr + ((unsigned long) vaddr.rtx << 42UL);
rste.val = ptr; rste.val = ptr;
goto shadow_r3t; goto shadow_r3t;
} }
@ -1064,8 +1063,7 @@ shadow_r3t:
union region3_table_entry rtte; union region3_table_entry rtte;
if (*fake) { if (*fake) {
/* offset in 4TB guest memory block */ ptr += (unsigned long) vaddr.rtx << 31;
ptr = ptr + ((unsigned long) vaddr.sx << 31UL);
rtte.val = ptr; rtte.val = ptr;
goto shadow_sgt; goto shadow_sgt;
} }
@ -1101,8 +1099,7 @@ shadow_sgt:
union segment_table_entry ste; union segment_table_entry ste;
if (*fake) { if (*fake) {
/* offset in 2G guest memory block */ ptr += (unsigned long) vaddr.sx << 20;
ptr = ptr + ((unsigned long) vaddr.sx << 20UL);
ste.val = ptr; ste.val = ptr;
goto shadow_pgt; goto shadow_pgt;
} }

View File

@ -13,7 +13,6 @@ struct task_struct;
*/ */
extern void (*cpu_wait)(void); extern void (*cpu_wait)(void);
extern unsigned long thread_saved_pc(struct task_struct *tsk);
extern void start_thread(struct pt_regs *regs, extern void start_thread(struct pt_regs *regs,
unsigned long pc, unsigned long sp); unsigned long pc, unsigned long sp);
extern unsigned long get_wchan(struct task_struct *p); extern unsigned long get_wchan(struct task_struct *p);

View File

@ -101,11 +101,6 @@ int dump_fpu(struct pt_regs *regs, elf_fpregset_t *r)
return 1; return 1;
} }
unsigned long thread_saved_pc(struct task_struct *tsk)
{
return task_pt_regs(tsk)->cp0_epc;
}
unsigned long get_wchan(struct task_struct *task) unsigned long get_wchan(struct task_struct *task)
{ {
if (!task || task == current || task->state == TASK_RUNNING) if (!task || task == current || task->state == TASK_RUNNING)

View File

@ -67,9 +67,6 @@ struct thread_struct {
.current_ds = KERNEL_DS, \ .current_ds = KERNEL_DS, \
} }
/* Return saved PC of a blocked thread. */
unsigned long thread_saved_pc(struct task_struct *t);
/* Do necessary setup to start up a newly executed thread. */ /* Do necessary setup to start up a newly executed thread. */
static inline void start_thread(struct pt_regs * regs, unsigned long pc, static inline void start_thread(struct pt_regs * regs, unsigned long pc,
unsigned long sp) unsigned long sp)

View File

@ -89,9 +89,7 @@ struct thread_struct {
#include <linux/types.h> #include <linux/types.h>
#include <asm/fpumacro.h> #include <asm/fpumacro.h>
/* Return saved PC of a blocked thread. */
struct task_struct; struct task_struct;
unsigned long thread_saved_pc(struct task_struct *);
/* On Uniprocessor, even in RMO processes see TSO semantics */ /* On Uniprocessor, even in RMO processes see TSO semantics */
#ifdef CONFIG_SMP #ifdef CONFIG_SMP

View File

@ -176,14 +176,6 @@ void show_stack(struct task_struct *tsk, unsigned long *_ksp)
printk("\n"); printk("\n");
} }
/*
* Note: sparc64 has a pretty intricated thread_saved_pc, check it out.
*/
unsigned long thread_saved_pc(struct task_struct *tsk)
{
return task_thread_info(tsk)->kpc;
}
/* /*
* Free current thread data structures etc.. * Free current thread data structures etc..
*/ */

View File

@ -400,25 +400,6 @@ core_initcall(sparc_sysrq_init);
#endif #endif
unsigned long thread_saved_pc(struct task_struct *tsk)
{
struct thread_info *ti = task_thread_info(tsk);
unsigned long ret = 0xdeadbeefUL;
if (ti && ti->ksp) {
unsigned long *sp;
sp = (unsigned long *)(ti->ksp + STACK_BIAS);
if (((unsigned long)sp & (sizeof(long) - 1)) == 0UL &&
sp[14]) {
unsigned long *fp;
fp = (unsigned long *)(sp[14] + STACK_BIAS);
if (((unsigned long)fp & (sizeof(long) - 1)) == 0UL)
ret = fp[15];
}
}
return ret;
}
/* Free current thread data structures etc.. */ /* Free current thread data structures etc.. */
void exit_thread(struct task_struct *tsk) void exit_thread(struct task_struct *tsk)
{ {

View File

@ -214,13 +214,6 @@ static inline void release_thread(struct task_struct *dead_task)
extern void prepare_exit_to_usermode(struct pt_regs *regs, u32 flags); extern void prepare_exit_to_usermode(struct pt_regs *regs, u32 flags);
/*
* Return saved (kernel) PC of a blocked thread.
* Only used in a printk() in kernel/sched/core.c, so don't work too hard.
*/
#define thread_saved_pc(t) ((t)->thread.pc)
unsigned long get_wchan(struct task_struct *p); unsigned long get_wchan(struct task_struct *p);
/* Return initial ksp value for given task. */ /* Return initial ksp value for given task. */

View File

@ -58,8 +58,6 @@ static inline void release_thread(struct task_struct *task)
{ {
} }
extern unsigned long thread_saved_pc(struct task_struct *t);
static inline void mm_copy_segments(struct mm_struct *from_mm, static inline void mm_copy_segments(struct mm_struct *from_mm,
struct mm_struct *new_mm) struct mm_struct *new_mm)
{ {

View File

@ -56,12 +56,6 @@ union thread_union cpu0_irqstack
__attribute__((__section__(".data..init_irqstack"))) = __attribute__((__section__(".data..init_irqstack"))) =
{ INIT_THREAD_INFO(init_task) }; { INIT_THREAD_INFO(init_task) };
unsigned long thread_saved_pc(struct task_struct *task)
{
/* FIXME: Need to look up userspace_pid by cpu */
return os_process_pc(userspace_pid[0]);
}
/* Changed in setup_arch, which is called in early boot */ /* Changed in setup_arch, which is called in early boot */
static char host_info[(__NEW_UTS_LEN + 1) * 5]; static char host_info[(__NEW_UTS_LEN + 1) * 5];

View File

@ -431,11 +431,11 @@ static __initconst const u64 skl_hw_cache_event_ids
[ C(DTLB) ] = { [ C(DTLB) ] = {
[ C(OP_READ) ] = { [ C(OP_READ) ] = {
[ C(RESULT_ACCESS) ] = 0x81d0, /* MEM_INST_RETIRED.ALL_LOADS */ [ C(RESULT_ACCESS) ] = 0x81d0, /* MEM_INST_RETIRED.ALL_LOADS */
[ C(RESULT_MISS) ] = 0x608, /* DTLB_LOAD_MISSES.WALK_COMPLETED */ [ C(RESULT_MISS) ] = 0xe08, /* DTLB_LOAD_MISSES.WALK_COMPLETED */
}, },
[ C(OP_WRITE) ] = { [ C(OP_WRITE) ] = {
[ C(RESULT_ACCESS) ] = 0x82d0, /* MEM_INST_RETIRED.ALL_STORES */ [ C(RESULT_ACCESS) ] = 0x82d0, /* MEM_INST_RETIRED.ALL_STORES */
[ C(RESULT_MISS) ] = 0x649, /* DTLB_STORE_MISSES.WALK_COMPLETED */ [ C(RESULT_MISS) ] = 0xe49, /* DTLB_STORE_MISSES.WALK_COMPLETED */
}, },
[ C(OP_PREFETCH) ] = { [ C(OP_PREFETCH) ] = {
[ C(RESULT_ACCESS) ] = 0x0, [ C(RESULT_ACCESS) ] = 0x0,

View File

@ -296,6 +296,7 @@ struct x86_emulate_ctxt {
bool perm_ok; /* do not check permissions if true */ bool perm_ok; /* do not check permissions if true */
bool ud; /* inject an #UD if host doesn't support insn */ bool ud; /* inject an #UD if host doesn't support insn */
bool tf; /* TF value before instruction (after for syscall/sysret) */
bool have_exception; bool have_exception;
struct x86_exception exception; struct x86_exception exception;

View File

@ -2,8 +2,7 @@
#define _ASM_X86_MSHYPER_H #define _ASM_X86_MSHYPER_H
#include <linux/types.h> #include <linux/types.h>
#include <linux/interrupt.h> #include <linux/atomic.h>
#include <linux/clocksource.h>
#include <asm/hyperv.h> #include <asm/hyperv.h>
/* /*

View File

@ -860,8 +860,6 @@ extern unsigned long KSTK_ESP(struct task_struct *task);
#endif /* CONFIG_X86_64 */ #endif /* CONFIG_X86_64 */
extern unsigned long thread_saved_pc(struct task_struct *tsk);
extern void start_thread(struct pt_regs *regs, unsigned long new_ip, extern void start_thread(struct pt_regs *regs, unsigned long new_ip,
unsigned long new_sp); unsigned long new_sp);

View File

@ -544,17 +544,6 @@ unsigned long arch_randomize_brk(struct mm_struct *mm)
return randomize_page(mm->brk, 0x02000000); return randomize_page(mm->brk, 0x02000000);
} }
/*
* Return saved PC of a blocked thread.
* What is this good for? it will be always the scheduler or ret_from_fork.
*/
unsigned long thread_saved_pc(struct task_struct *tsk)
{
struct inactive_task_frame *frame =
(struct inactive_task_frame *) READ_ONCE(tsk->thread.sp);
return READ_ONCE_NOCHECK(frame->ret_addr);
}
/* /*
* Called from fs/proc with a reference on @p to find the function * Called from fs/proc with a reference on @p to find the function
* which called into schedule(). This needs to be done carefully * which called into schedule(). This needs to be done carefully

View File

@ -2742,6 +2742,7 @@ static int em_syscall(struct x86_emulate_ctxt *ctxt)
ctxt->eflags &= ~(X86_EFLAGS_VM | X86_EFLAGS_IF); ctxt->eflags &= ~(X86_EFLAGS_VM | X86_EFLAGS_IF);
} }
ctxt->tf = (ctxt->eflags & X86_EFLAGS_TF) != 0;
return X86EMUL_CONTINUE; return X86EMUL_CONTINUE;
} }

View File

@ -5313,6 +5313,8 @@ static void init_emulate_ctxt(struct kvm_vcpu *vcpu)
kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l); kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l);
ctxt->eflags = kvm_get_rflags(vcpu); ctxt->eflags = kvm_get_rflags(vcpu);
ctxt->tf = (ctxt->eflags & X86_EFLAGS_TF) != 0;
ctxt->eip = kvm_rip_read(vcpu); ctxt->eip = kvm_rip_read(vcpu);
ctxt->mode = (!is_protmode(vcpu)) ? X86EMUL_MODE_REAL : ctxt->mode = (!is_protmode(vcpu)) ? X86EMUL_MODE_REAL :
(ctxt->eflags & X86_EFLAGS_VM) ? X86EMUL_MODE_VM86 : (ctxt->eflags & X86_EFLAGS_VM) ? X86EMUL_MODE_VM86 :
@ -5528,36 +5530,25 @@ static int kvm_vcpu_check_hw_bp(unsigned long addr, u32 type, u32 dr7,
return dr6; return dr6;
} }
static void kvm_vcpu_check_singlestep(struct kvm_vcpu *vcpu, unsigned long rflags, int *r) static void kvm_vcpu_do_singlestep(struct kvm_vcpu *vcpu, int *r)
{ {
struct kvm_run *kvm_run = vcpu->run; struct kvm_run *kvm_run = vcpu->run;
/* if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) {
* rflags is the old, "raw" value of the flags. The new value has kvm_run->debug.arch.dr6 = DR6_BS | DR6_FIXED_1 | DR6_RTM;
* not been saved yet. kvm_run->debug.arch.pc = vcpu->arch.singlestep_rip;
* kvm_run->debug.arch.exception = DB_VECTOR;
* This is correct even for TF set by the guest, because "the kvm_run->exit_reason = KVM_EXIT_DEBUG;
* processor will not generate this exception after the instruction *r = EMULATE_USER_EXIT;
* that sets the TF flag". } else {
*/ /*
if (unlikely(rflags & X86_EFLAGS_TF)) { * "Certain debug exceptions may clear bit 0-3. The
if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) { * remaining contents of the DR6 register are never
kvm_run->debug.arch.dr6 = DR6_BS | DR6_FIXED_1 | * cleared by the processor".
DR6_RTM; */
kvm_run->debug.arch.pc = vcpu->arch.singlestep_rip; vcpu->arch.dr6 &= ~15;
kvm_run->debug.arch.exception = DB_VECTOR; vcpu->arch.dr6 |= DR6_BS | DR6_RTM;
kvm_run->exit_reason = KVM_EXIT_DEBUG; kvm_queue_exception(vcpu, DB_VECTOR);
*r = EMULATE_USER_EXIT;
} else {
/*
* "Certain debug exceptions may clear bit 0-3. The
* remaining contents of the DR6 register are never
* cleared by the processor".
*/
vcpu->arch.dr6 &= ~15;
vcpu->arch.dr6 |= DR6_BS | DR6_RTM;
kvm_queue_exception(vcpu, DB_VECTOR);
}
} }
} }
@ -5567,7 +5558,17 @@ int kvm_skip_emulated_instruction(struct kvm_vcpu *vcpu)
int r = EMULATE_DONE; int r = EMULATE_DONE;
kvm_x86_ops->skip_emulated_instruction(vcpu); kvm_x86_ops->skip_emulated_instruction(vcpu);
kvm_vcpu_check_singlestep(vcpu, rflags, &r);
/*
* rflags is the old, "raw" value of the flags. The new value has
* not been saved yet.
*
* This is correct even for TF set by the guest, because "the
* processor will not generate this exception after the instruction
* that sets the TF flag".
*/
if (unlikely(rflags & X86_EFLAGS_TF))
kvm_vcpu_do_singlestep(vcpu, &r);
return r == EMULATE_DONE; return r == EMULATE_DONE;
} }
EXPORT_SYMBOL_GPL(kvm_skip_emulated_instruction); EXPORT_SYMBOL_GPL(kvm_skip_emulated_instruction);
@ -5726,8 +5727,9 @@ restart:
toggle_interruptibility(vcpu, ctxt->interruptibility); toggle_interruptibility(vcpu, ctxt->interruptibility);
vcpu->arch.emulate_regs_need_sync_to_vcpu = false; vcpu->arch.emulate_regs_need_sync_to_vcpu = false;
kvm_rip_write(vcpu, ctxt->eip); kvm_rip_write(vcpu, ctxt->eip);
if (r == EMULATE_DONE) if (r == EMULATE_DONE &&
kvm_vcpu_check_singlestep(vcpu, rflags, &r); (ctxt->tf || (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)))
kvm_vcpu_do_singlestep(vcpu, &r);
if (!ctxt->have_exception || if (!ctxt->have_exception ||
exception_type(ctxt->exception.vector) == EXCPT_TRAP) exception_type(ctxt->exception.vector) == EXCPT_TRAP)
__kvm_set_rflags(vcpu, ctxt->eflags); __kvm_set_rflags(vcpu, ctxt->eflags);

View File

@ -213,8 +213,6 @@ struct mm_struct;
#define release_segments(mm) do { } while(0) #define release_segments(mm) do { } while(0)
#define forget_segments() do { } while (0) #define forget_segments() do { } while (0)
#define thread_saved_pc(tsk) (task_pt_regs(tsk)->pc)
extern unsigned long get_wchan(struct task_struct *p); extern unsigned long get_wchan(struct task_struct *p);
#define KSTK_EIP(tsk) (task_pt_regs(tsk)->pc) #define KSTK_EIP(tsk) (task_pt_regs(tsk)->pc)

View File

@ -240,20 +240,21 @@ fallback:
return bvl; return bvl;
} }
static void __bio_free(struct bio *bio) void bio_uninit(struct bio *bio)
{ {
bio_disassociate_task(bio); bio_disassociate_task(bio);
if (bio_integrity(bio)) if (bio_integrity(bio))
bio_integrity_free(bio); bio_integrity_free(bio);
} }
EXPORT_SYMBOL(bio_uninit);
static void bio_free(struct bio *bio) static void bio_free(struct bio *bio)
{ {
struct bio_set *bs = bio->bi_pool; struct bio_set *bs = bio->bi_pool;
void *p; void *p;
__bio_free(bio); bio_uninit(bio);
if (bs) { if (bs) {
bvec_free(bs->bvec_pool, bio->bi_io_vec, BVEC_POOL_IDX(bio)); bvec_free(bs->bvec_pool, bio->bi_io_vec, BVEC_POOL_IDX(bio));
@ -271,6 +272,11 @@ static void bio_free(struct bio *bio)
} }
} }
/*
* Users of this function have their own bio allocation. Subsequently,
* they must remember to pair any call to bio_init() with bio_uninit()
* when IO has completed, or when the bio is released.
*/
void bio_init(struct bio *bio, struct bio_vec *table, void bio_init(struct bio *bio, struct bio_vec *table,
unsigned short max_vecs) unsigned short max_vecs)
{ {
@ -297,7 +303,7 @@ void bio_reset(struct bio *bio)
{ {
unsigned long flags = bio->bi_flags & (~0UL << BIO_RESET_BITS); unsigned long flags = bio->bi_flags & (~0UL << BIO_RESET_BITS);
__bio_free(bio); bio_uninit(bio);
memset(bio, 0, BIO_RESET_BYTES); memset(bio, 0, BIO_RESET_BYTES);
bio->bi_flags = flags; bio->bi_flags = flags;

View File

@ -68,6 +68,45 @@ static void blk_mq_sched_assign_ioc(struct request_queue *q,
__blk_mq_sched_assign_ioc(q, rq, bio, ioc); __blk_mq_sched_assign_ioc(q, rq, bio, ioc);
} }
/*
* Mark a hardware queue as needing a restart. For shared queues, maintain
* a count of how many hardware queues are marked for restart.
*/
static void blk_mq_sched_mark_restart_hctx(struct blk_mq_hw_ctx *hctx)
{
if (test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state))
return;
if (hctx->flags & BLK_MQ_F_TAG_SHARED) {
struct request_queue *q = hctx->queue;
if (!test_and_set_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state))
atomic_inc(&q->shared_hctx_restart);
} else
set_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state);
}
static bool blk_mq_sched_restart_hctx(struct blk_mq_hw_ctx *hctx)
{
if (!test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state))
return false;
if (hctx->flags & BLK_MQ_F_TAG_SHARED) {
struct request_queue *q = hctx->queue;
if (test_and_clear_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state))
atomic_dec(&q->shared_hctx_restart);
} else
clear_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state);
if (blk_mq_hctx_has_pending(hctx)) {
blk_mq_run_hw_queue(hctx, true);
return true;
}
return false;
}
struct request *blk_mq_sched_get_request(struct request_queue *q, struct request *blk_mq_sched_get_request(struct request_queue *q,
struct bio *bio, struct bio *bio,
unsigned int op, unsigned int op,
@ -266,18 +305,6 @@ static bool blk_mq_sched_bypass_insert(struct blk_mq_hw_ctx *hctx,
return true; return true;
} }
static bool blk_mq_sched_restart_hctx(struct blk_mq_hw_ctx *hctx)
{
if (test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state)) {
clear_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state);
if (blk_mq_hctx_has_pending(hctx)) {
blk_mq_run_hw_queue(hctx, true);
return true;
}
}
return false;
}
/** /**
* list_for_each_entry_rcu_rr - iterate in a round-robin fashion over rcu list * list_for_each_entry_rcu_rr - iterate in a round-robin fashion over rcu list
* @pos: loop cursor. * @pos: loop cursor.
@ -309,6 +336,13 @@ void blk_mq_sched_restart(struct blk_mq_hw_ctx *const hctx)
unsigned int i, j; unsigned int i, j;
if (set->flags & BLK_MQ_F_TAG_SHARED) { if (set->flags & BLK_MQ_F_TAG_SHARED) {
/*
* If this is 0, then we know that no hardware queues
* have RESTART marked. We're done.
*/
if (!atomic_read(&queue->shared_hctx_restart))
return;
rcu_read_lock(); rcu_read_lock();
list_for_each_entry_rcu_rr(q, queue, &set->tag_list, list_for_each_entry_rcu_rr(q, queue, &set->tag_list,
tag_set_list) { tag_set_list) {

View File

@ -115,15 +115,6 @@ static inline bool blk_mq_sched_has_work(struct blk_mq_hw_ctx *hctx)
return false; return false;
} }
/*
* Mark a hardware queue as needing a restart.
*/
static inline void blk_mq_sched_mark_restart_hctx(struct blk_mq_hw_ctx *hctx)
{
if (!test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state))
set_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state);
}
static inline bool blk_mq_sched_needs_restart(struct blk_mq_hw_ctx *hctx) static inline bool blk_mq_sched_needs_restart(struct blk_mq_hw_ctx *hctx)
{ {
return test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state); return test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state);

View File

@ -2103,20 +2103,30 @@ static void blk_mq_map_swqueue(struct request_queue *q,
} }
} }
/*
* Caller needs to ensure that we're either frozen/quiesced, or that
* the queue isn't live yet.
*/
static void queue_set_hctx_shared(struct request_queue *q, bool shared) static void queue_set_hctx_shared(struct request_queue *q, bool shared)
{ {
struct blk_mq_hw_ctx *hctx; struct blk_mq_hw_ctx *hctx;
int i; int i;
queue_for_each_hw_ctx(q, hctx, i) { queue_for_each_hw_ctx(q, hctx, i) {
if (shared) if (shared) {
if (test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state))
atomic_inc(&q->shared_hctx_restart);
hctx->flags |= BLK_MQ_F_TAG_SHARED; hctx->flags |= BLK_MQ_F_TAG_SHARED;
else } else {
if (test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state))
atomic_dec(&q->shared_hctx_restart);
hctx->flags &= ~BLK_MQ_F_TAG_SHARED; hctx->flags &= ~BLK_MQ_F_TAG_SHARED;
}
} }
} }
static void blk_mq_update_tag_set_depth(struct blk_mq_tag_set *set, bool shared) static void blk_mq_update_tag_set_depth(struct blk_mq_tag_set *set,
bool shared)
{ {
struct request_queue *q; struct request_queue *q;

View File

@ -1428,6 +1428,37 @@ static void acpi_init_coherency(struct acpi_device *adev)
adev->flags.coherent_dma = cca; adev->flags.coherent_dma = cca;
} }
static int acpi_check_spi_i2c_slave(struct acpi_resource *ares, void *data)
{
bool *is_spi_i2c_slave_p = data;
if (ares->type != ACPI_RESOURCE_TYPE_SERIAL_BUS)
return 1;
/*
* devices that are connected to UART still need to be enumerated to
* platform bus
*/
if (ares->data.common_serial_bus.type != ACPI_RESOURCE_SERIAL_TYPE_UART)
*is_spi_i2c_slave_p = true;
/* no need to do more checking */
return -1;
}
static bool acpi_is_spi_i2c_slave(struct acpi_device *device)
{
struct list_head resource_list;
bool is_spi_i2c_slave = false;
INIT_LIST_HEAD(&resource_list);
acpi_dev_get_resources(device, &resource_list, acpi_check_spi_i2c_slave,
&is_spi_i2c_slave);
acpi_dev_free_resource_list(&resource_list);
return is_spi_i2c_slave;
}
void acpi_init_device_object(struct acpi_device *device, acpi_handle handle, void acpi_init_device_object(struct acpi_device *device, acpi_handle handle,
int type, unsigned long long sta) int type, unsigned long long sta)
{ {
@ -1443,6 +1474,7 @@ void acpi_init_device_object(struct acpi_device *device, acpi_handle handle,
acpi_bus_get_flags(device); acpi_bus_get_flags(device);
device->flags.match_driver = false; device->flags.match_driver = false;
device->flags.initialized = true; device->flags.initialized = true;
device->flags.spi_i2c_slave = acpi_is_spi_i2c_slave(device);
acpi_device_clear_enumerated(device); acpi_device_clear_enumerated(device);
device_initialize(&device->dev); device_initialize(&device->dev);
dev_set_uevent_suppress(&device->dev, true); dev_set_uevent_suppress(&device->dev, true);
@ -1727,38 +1759,13 @@ static acpi_status acpi_bus_check_add(acpi_handle handle, u32 lvl_not_used,
return AE_OK; return AE_OK;
} }
static int acpi_check_spi_i2c_slave(struct acpi_resource *ares, void *data)
{
bool *is_spi_i2c_slave_p = data;
if (ares->type != ACPI_RESOURCE_TYPE_SERIAL_BUS)
return 1;
/*
* devices that are connected to UART still need to be enumerated to
* platform bus
*/
if (ares->data.common_serial_bus.type != ACPI_RESOURCE_SERIAL_TYPE_UART)
*is_spi_i2c_slave_p = true;
/* no need to do more checking */
return -1;
}
static void acpi_default_enumeration(struct acpi_device *device) static void acpi_default_enumeration(struct acpi_device *device)
{ {
struct list_head resource_list;
bool is_spi_i2c_slave = false;
/* /*
* Do not enumerate SPI/I2C slaves as they will be enumerated by their * Do not enumerate SPI/I2C slaves as they will be enumerated by their
* respective parents. * respective parents.
*/ */
INIT_LIST_HEAD(&resource_list); if (!device->flags.spi_i2c_slave) {
acpi_dev_get_resources(device, &resource_list, acpi_check_spi_i2c_slave,
&is_spi_i2c_slave);
acpi_dev_free_resource_list(&resource_list);
if (!is_spi_i2c_slave) {
acpi_create_platform_device(device, NULL); acpi_create_platform_device(device, NULL);
acpi_device_set_enumerated(device); acpi_device_set_enumerated(device);
} else { } else {
@ -1854,7 +1861,7 @@ static void acpi_bus_attach(struct acpi_device *device)
return; return;
device->flags.match_driver = true; device->flags.match_driver = true;
if (ret > 0) { if (ret > 0 && !device->flags.spi_i2c_slave) {
acpi_device_set_enumerated(device); acpi_device_set_enumerated(device);
goto ok; goto ok;
} }
@ -1863,10 +1870,10 @@ static void acpi_bus_attach(struct acpi_device *device)
if (ret < 0) if (ret < 0)
return; return;
if (device->pnp.type.platform_id) if (!device->pnp.type.platform_id && !device->flags.spi_i2c_slave)
acpi_default_enumeration(device);
else
acpi_device_set_enumerated(device); acpi_device_set_enumerated(device);
else
acpi_default_enumeration(device);
ok: ok:
list_for_each_entry(child, &device->children, node) list_for_each_entry(child, &device->children, node)

View File

@ -609,8 +609,6 @@ int xen_blkif_schedule(void *arg)
unsigned long timeout; unsigned long timeout;
int ret; int ret;
xen_blkif_get(blkif);
set_freezable(); set_freezable();
while (!kthread_should_stop()) { while (!kthread_should_stop()) {
if (try_to_freeze()) if (try_to_freeze())
@ -665,7 +663,6 @@ purge_gnt_list:
print_stats(ring); print_stats(ring);
ring->xenblkd = NULL; ring->xenblkd = NULL;
xen_blkif_put(blkif);
return 0; return 0;
} }
@ -1436,34 +1433,35 @@ static int dispatch_rw_block_io(struct xen_blkif_ring *ring,
static void make_response(struct xen_blkif_ring *ring, u64 id, static void make_response(struct xen_blkif_ring *ring, u64 id,
unsigned short op, int st) unsigned short op, int st)
{ {
struct blkif_response resp; struct blkif_response *resp;
unsigned long flags; unsigned long flags;
union blkif_back_rings *blk_rings; union blkif_back_rings *blk_rings;
int notify; int notify;
resp.id = id;
resp.operation = op;
resp.status = st;
spin_lock_irqsave(&ring->blk_ring_lock, flags); spin_lock_irqsave(&ring->blk_ring_lock, flags);
blk_rings = &ring->blk_rings; blk_rings = &ring->blk_rings;
/* Place on the response ring for the relevant domain. */ /* Place on the response ring for the relevant domain. */
switch (ring->blkif->blk_protocol) { switch (ring->blkif->blk_protocol) {
case BLKIF_PROTOCOL_NATIVE: case BLKIF_PROTOCOL_NATIVE:
memcpy(RING_GET_RESPONSE(&blk_rings->native, blk_rings->native.rsp_prod_pvt), resp = RING_GET_RESPONSE(&blk_rings->native,
&resp, sizeof(resp)); blk_rings->native.rsp_prod_pvt);
break; break;
case BLKIF_PROTOCOL_X86_32: case BLKIF_PROTOCOL_X86_32:
memcpy(RING_GET_RESPONSE(&blk_rings->x86_32, blk_rings->x86_32.rsp_prod_pvt), resp = RING_GET_RESPONSE(&blk_rings->x86_32,
&resp, sizeof(resp)); blk_rings->x86_32.rsp_prod_pvt);
break; break;
case BLKIF_PROTOCOL_X86_64: case BLKIF_PROTOCOL_X86_64:
memcpy(RING_GET_RESPONSE(&blk_rings->x86_64, blk_rings->x86_64.rsp_prod_pvt), resp = RING_GET_RESPONSE(&blk_rings->x86_64,
&resp, sizeof(resp)); blk_rings->x86_64.rsp_prod_pvt);
break; break;
default: default:
BUG(); BUG();
} }
resp->id = id;
resp->operation = op;
resp->status = st;
blk_rings->common.rsp_prod_pvt++; blk_rings->common.rsp_prod_pvt++;
RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&blk_rings->common, notify); RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&blk_rings->common, notify);
spin_unlock_irqrestore(&ring->blk_ring_lock, flags); spin_unlock_irqrestore(&ring->blk_ring_lock, flags);

View File

@ -75,9 +75,8 @@ extern unsigned int xenblk_max_queues;
struct blkif_common_request { struct blkif_common_request {
char dummy; char dummy;
}; };
struct blkif_common_response {
char dummy; /* i386 protocol version */
};
struct blkif_x86_32_request_rw { struct blkif_x86_32_request_rw {
uint8_t nr_segments; /* number of segments */ uint8_t nr_segments; /* number of segments */
@ -129,14 +128,6 @@ struct blkif_x86_32_request {
} u; } u;
} __attribute__((__packed__)); } __attribute__((__packed__));
/* i386 protocol version */
#pragma pack(push, 4)
struct blkif_x86_32_response {
uint64_t id; /* copied from request */
uint8_t operation; /* copied from request */
int16_t status; /* BLKIF_RSP_??? */
};
#pragma pack(pop)
/* x86_64 protocol version */ /* x86_64 protocol version */
struct blkif_x86_64_request_rw { struct blkif_x86_64_request_rw {
@ -193,18 +184,12 @@ struct blkif_x86_64_request {
} u; } u;
} __attribute__((__packed__)); } __attribute__((__packed__));
struct blkif_x86_64_response {
uint64_t __attribute__((__aligned__(8))) id;
uint8_t operation; /* copied from request */
int16_t status; /* BLKIF_RSP_??? */
};
DEFINE_RING_TYPES(blkif_common, struct blkif_common_request, DEFINE_RING_TYPES(blkif_common, struct blkif_common_request,
struct blkif_common_response); struct blkif_response);
DEFINE_RING_TYPES(blkif_x86_32, struct blkif_x86_32_request, DEFINE_RING_TYPES(blkif_x86_32, struct blkif_x86_32_request,
struct blkif_x86_32_response); struct blkif_response __packed);
DEFINE_RING_TYPES(blkif_x86_64, struct blkif_x86_64_request, DEFINE_RING_TYPES(blkif_x86_64, struct blkif_x86_64_request,
struct blkif_x86_64_response); struct blkif_response);
union blkif_back_rings { union blkif_back_rings {
struct blkif_back_ring native; struct blkif_back_ring native;
@ -281,6 +266,7 @@ struct xen_blkif_ring {
wait_queue_head_t wq; wait_queue_head_t wq;
atomic_t inflight; atomic_t inflight;
bool active;
/* One thread per blkif ring. */ /* One thread per blkif ring. */
struct task_struct *xenblkd; struct task_struct *xenblkd;
unsigned int waiting_reqs; unsigned int waiting_reqs;

View File

@ -159,7 +159,7 @@ static int xen_blkif_alloc_rings(struct xen_blkif *blkif)
init_waitqueue_head(&ring->shutdown_wq); init_waitqueue_head(&ring->shutdown_wq);
ring->blkif = blkif; ring->blkif = blkif;
ring->st_print = jiffies; ring->st_print = jiffies;
xen_blkif_get(blkif); ring->active = true;
} }
return 0; return 0;
@ -249,10 +249,12 @@ static int xen_blkif_disconnect(struct xen_blkif *blkif)
struct xen_blkif_ring *ring = &blkif->rings[r]; struct xen_blkif_ring *ring = &blkif->rings[r];
unsigned int i = 0; unsigned int i = 0;
if (!ring->active)
continue;
if (ring->xenblkd) { if (ring->xenblkd) {
kthread_stop(ring->xenblkd); kthread_stop(ring->xenblkd);
wake_up(&ring->shutdown_wq); wake_up(&ring->shutdown_wq);
ring->xenblkd = NULL;
} }
/* The above kthread_stop() guarantees that at this point we /* The above kthread_stop() guarantees that at this point we
@ -296,7 +298,7 @@ static int xen_blkif_disconnect(struct xen_blkif *blkif)
BUG_ON(ring->free_pages_num != 0); BUG_ON(ring->free_pages_num != 0);
BUG_ON(ring->persistent_gnt_c != 0); BUG_ON(ring->persistent_gnt_c != 0);
WARN_ON(i != (XEN_BLKIF_REQS_PER_PAGE * blkif->nr_ring_pages)); WARN_ON(i != (XEN_BLKIF_REQS_PER_PAGE * blkif->nr_ring_pages));
xen_blkif_put(blkif); ring->active = false;
} }
blkif->nr_ring_pages = 0; blkif->nr_ring_pages = 0;
/* /*
@ -312,9 +314,10 @@ static int xen_blkif_disconnect(struct xen_blkif *blkif)
static void xen_blkif_free(struct xen_blkif *blkif) static void xen_blkif_free(struct xen_blkif *blkif)
{ {
WARN_ON(xen_blkif_disconnect(blkif));
xen_blkif_disconnect(blkif);
xen_vbd_free(&blkif->vbd); xen_vbd_free(&blkif->vbd);
kfree(blkif->be->mode);
kfree(blkif->be);
/* Make sure everything is drained before shutting down */ /* Make sure everything is drained before shutting down */
kmem_cache_free(xen_blkif_cachep, blkif); kmem_cache_free(xen_blkif_cachep, blkif);
@ -511,8 +514,6 @@ static int xen_blkbk_remove(struct xenbus_device *dev)
xen_blkif_put(be->blkif); xen_blkif_put(be->blkif);
} }
kfree(be->mode);
kfree(be);
return 0; return 0;
} }

View File

@ -803,13 +803,13 @@ static int crng_fast_load(const char *cp, size_t len)
p[crng_init_cnt % CHACHA20_KEY_SIZE] ^= *cp; p[crng_init_cnt % CHACHA20_KEY_SIZE] ^= *cp;
cp++; crng_init_cnt++; len--; cp++; crng_init_cnt++; len--;
} }
spin_unlock_irqrestore(&primary_crng.lock, flags);
if (crng_init_cnt >= CRNG_INIT_CNT_THRESH) { if (crng_init_cnt >= CRNG_INIT_CNT_THRESH) {
invalidate_batched_entropy(); invalidate_batched_entropy();
crng_init = 1; crng_init = 1;
wake_up_interruptible(&crng_init_wait); wake_up_interruptible(&crng_init_wait);
pr_notice("random: fast init done\n"); pr_notice("random: fast init done\n");
} }
spin_unlock_irqrestore(&primary_crng.lock, flags);
return 1; return 1;
} }
@ -841,6 +841,7 @@ static void crng_reseed(struct crng_state *crng, struct entropy_store *r)
} }
memzero_explicit(&buf, sizeof(buf)); memzero_explicit(&buf, sizeof(buf));
crng->init_time = jiffies; crng->init_time = jiffies;
spin_unlock_irqrestore(&primary_crng.lock, flags);
if (crng == &primary_crng && crng_init < 2) { if (crng == &primary_crng && crng_init < 2) {
invalidate_batched_entropy(); invalidate_batched_entropy();
crng_init = 2; crng_init = 2;
@ -848,7 +849,6 @@ static void crng_reseed(struct crng_state *crng, struct entropy_store *r)
wake_up_interruptible(&crng_init_wait); wake_up_interruptible(&crng_init_wait);
pr_notice("random: crng init done\n"); pr_notice("random: crng init done\n");
} }
spin_unlock_irqrestore(&primary_crng.lock, flags);
} }
static inline void crng_wait_ready(void) static inline void crng_wait_ready(void)
@ -2041,8 +2041,8 @@ static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_u64);
u64 get_random_u64(void) u64 get_random_u64(void)
{ {
u64 ret; u64 ret;
bool use_lock = crng_init < 2; bool use_lock = READ_ONCE(crng_init) < 2;
unsigned long flags; unsigned long flags = 0;
struct batched_entropy *batch; struct batched_entropy *batch;
#if BITS_PER_LONG == 64 #if BITS_PER_LONG == 64
@ -2073,8 +2073,8 @@ static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_u32);
u32 get_random_u32(void) u32 get_random_u32(void)
{ {
u32 ret; u32 ret;
bool use_lock = crng_init < 2; bool use_lock = READ_ONCE(crng_init) < 2;
unsigned long flags; unsigned long flags = 0;
struct batched_entropy *batch; struct batched_entropy *batch;
if (arch_get_random_int(&ret)) if (arch_get_random_int(&ret))

View File

@ -1209,9 +1209,9 @@ arch_timer_mem_frame_get_cntfrq(struct arch_timer_mem_frame *frame)
return 0; return 0;
} }
rate = readl_relaxed(frame + CNTFRQ); rate = readl_relaxed(base + CNTFRQ);
iounmap(frame); iounmap(base);
return rate; return rate;
} }

View File

@ -18,6 +18,7 @@
#include <linux/clk.h> #include <linux/clk.h>
#include <linux/interrupt.h> #include <linux/interrupt.h>
#include <linux/clockchips.h> #include <linux/clockchips.h>
#include <linux/clocksource.h>
#include <linux/of_address.h> #include <linux/of_address.h>
#include <linux/of_irq.h> #include <linux/of_irq.h>
#include <linux/slab.h> #include <linux/slab.h>

View File

@ -12,6 +12,7 @@
#include <linux/clk.h> #include <linux/clk.h>
#include <linux/clockchips.h> #include <linux/clockchips.h>
#include <linux/clocksource.h>
#include <linux/delay.h> #include <linux/delay.h>
#include <linux/interrupt.h> #include <linux/interrupt.h>
#include <linux/irq.h> #include <linux/irq.h>

View File

@ -721,7 +721,7 @@ static int mvebu_pwm_probe(struct platform_device *pdev,
u32 set; u32 set;
if (!of_device_is_compatible(mvchip->chip.of_node, if (!of_device_is_compatible(mvchip->chip.of_node,
"marvell,armada-370-xp-gpio")) "marvell,armada-370-gpio"))
return 0; return 0;
if (IS_ERR(mvchip->clk)) if (IS_ERR(mvchip->clk))
@ -852,7 +852,7 @@ static const struct of_device_id mvebu_gpio_of_match[] = {
.data = (void *) MVEBU_GPIO_SOC_VARIANT_ARMADAXP, .data = (void *) MVEBU_GPIO_SOC_VARIANT_ARMADAXP,
}, },
{ {
.compatible = "marvell,armada-370-xp-gpio", .compatible = "marvell,armada-370-gpio",
.data = (void *) MVEBU_GPIO_SOC_VARIANT_ORION, .data = (void *) MVEBU_GPIO_SOC_VARIANT_ORION,
}, },
{ {
@ -1128,7 +1128,7 @@ static int mvebu_gpio_probe(struct platform_device *pdev)
mvchip); mvchip);
} }
/* Armada 370/XP has simple PWM support for GPIO lines */ /* Some MVEBU SoCs have simple PWM support for GPIO lines */
if (IS_ENABLED(CONFIG_PWM)) if (IS_ENABLED(CONFIG_PWM))
return mvebu_pwm_probe(pdev, mvchip, id); return mvebu_pwm_probe(pdev, mvchip, id);

View File

@ -693,6 +693,10 @@ int amdgpu_atombios_get_clock_info(struct amdgpu_device *adev)
DRM_INFO("Changing default dispclk from %dMhz to 600Mhz\n", DRM_INFO("Changing default dispclk from %dMhz to 600Mhz\n",
adev->clock.default_dispclk / 100); adev->clock.default_dispclk / 100);
adev->clock.default_dispclk = 60000; adev->clock.default_dispclk = 60000;
} else if (adev->clock.default_dispclk <= 60000) {
DRM_INFO("Changing default dispclk from %dMhz to 625Mhz\n",
adev->clock.default_dispclk / 100);
adev->clock.default_dispclk = 62500;
} }
adev->clock.dp_extclk = adev->clock.dp_extclk =
le16_to_cpu(firmware_info->info_21.usUniphyDPModeExtClkFreq); le16_to_cpu(firmware_info->info_21.usUniphyDPModeExtClkFreq);

View File

@ -449,6 +449,7 @@ static const struct pci_device_id pciidlist[] = {
{0x1002, 0x6986, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12}, {0x1002, 0x6986, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12},
{0x1002, 0x6987, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12}, {0x1002, 0x6987, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12},
{0x1002, 0x6995, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12}, {0x1002, 0x6995, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12},
{0x1002, 0x6997, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12},
{0x1002, 0x699F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12}, {0x1002, 0x699F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12},
/* Vega 10 */ /* Vega 10 */
{0x1002, 0x6860, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10|AMD_EXP_HW_SUPPORT}, {0x1002, 0x6860, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10|AMD_EXP_HW_SUPPORT},

View File

@ -165,7 +165,7 @@ void amdgpu_atombios_crtc_powergate(struct drm_crtc *crtc, int state)
struct drm_device *dev = crtc->dev; struct drm_device *dev = crtc->dev;
struct amdgpu_device *adev = dev->dev_private; struct amdgpu_device *adev = dev->dev_private;
int index = GetIndexIntoMasterTable(COMMAND, EnableDispPowerGating); int index = GetIndexIntoMasterTable(COMMAND, EnableDispPowerGating);
ENABLE_DISP_POWER_GATING_PARAMETERS_V2_1 args; ENABLE_DISP_POWER_GATING_PS_ALLOCATION args;
memset(&args, 0, sizeof(args)); memset(&args, 0, sizeof(args));
@ -178,7 +178,7 @@ void amdgpu_atombios_crtc_powergate(struct drm_crtc *crtc, int state)
void amdgpu_atombios_crtc_powergate_init(struct amdgpu_device *adev) void amdgpu_atombios_crtc_powergate_init(struct amdgpu_device *adev)
{ {
int index = GetIndexIntoMasterTable(COMMAND, EnableDispPowerGating); int index = GetIndexIntoMasterTable(COMMAND, EnableDispPowerGating);
ENABLE_DISP_POWER_GATING_PARAMETERS_V2_1 args; ENABLE_DISP_POWER_GATING_PS_ALLOCATION args;
memset(&args, 0, sizeof(args)); memset(&args, 0, sizeof(args));

View File

@ -1229,21 +1229,6 @@ int drm_mode_getconnector(struct drm_device *dev, void *data,
if (!connector) if (!connector)
return -ENOENT; return -ENOENT;
drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
encoder = drm_connector_get_encoder(connector);
if (encoder)
out_resp->encoder_id = encoder->base.id;
else
out_resp->encoder_id = 0;
ret = drm_mode_object_get_properties(&connector->base, file_priv->atomic,
(uint32_t __user *)(unsigned long)(out_resp->props_ptr),
(uint64_t __user *)(unsigned long)(out_resp->prop_values_ptr),
&out_resp->count_props);
drm_modeset_unlock(&dev->mode_config.connection_mutex);
if (ret)
goto out_unref;
for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++)
if (connector->encoder_ids[i] != 0) if (connector->encoder_ids[i] != 0)
encoders_count++; encoders_count++;
@ -1256,7 +1241,7 @@ int drm_mode_getconnector(struct drm_device *dev, void *data,
if (put_user(connector->encoder_ids[i], if (put_user(connector->encoder_ids[i],
encoder_ptr + copied)) { encoder_ptr + copied)) {
ret = -EFAULT; ret = -EFAULT;
goto out_unref; goto out;
} }
copied++; copied++;
} }
@ -1300,15 +1285,32 @@ int drm_mode_getconnector(struct drm_device *dev, void *data,
if (copy_to_user(mode_ptr + copied, if (copy_to_user(mode_ptr + copied,
&u_mode, sizeof(u_mode))) { &u_mode, sizeof(u_mode))) {
ret = -EFAULT; ret = -EFAULT;
mutex_unlock(&dev->mode_config.mutex);
goto out; goto out;
} }
copied++; copied++;
} }
} }
out_resp->count_modes = mode_count; out_resp->count_modes = mode_count;
out:
mutex_unlock(&dev->mode_config.mutex); mutex_unlock(&dev->mode_config.mutex);
out_unref:
drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
encoder = drm_connector_get_encoder(connector);
if (encoder)
out_resp->encoder_id = encoder->base.id;
else
out_resp->encoder_id = 0;
/* Only grab properties after probing, to make sure EDID and other
* properties reflect the latest status. */
ret = drm_mode_object_get_properties(&connector->base, file_priv->atomic,
(uint32_t __user *)(unsigned long)(out_resp->props_ptr),
(uint64_t __user *)(unsigned long)(out_resp->prop_values_ptr),
&out_resp->count_props);
drm_modeset_unlock(&dev->mode_config.connection_mutex);
out:
drm_connector_put(connector); drm_connector_put(connector);
return ret; return ret;

View File

@ -106,9 +106,10 @@ struct etnaviv_gem_submit {
struct etnaviv_gpu *gpu; struct etnaviv_gpu *gpu;
struct ww_acquire_ctx ticket; struct ww_acquire_ctx ticket;
struct dma_fence *fence; struct dma_fence *fence;
u32 flags;
unsigned int nr_bos; unsigned int nr_bos;
struct etnaviv_gem_submit_bo bos[0]; struct etnaviv_gem_submit_bo bos[0];
u32 flags; /* No new members here, the previous one is variable-length! */
}; };
int etnaviv_gem_wait_bo(struct etnaviv_gpu *gpu, struct drm_gem_object *obj, int etnaviv_gem_wait_bo(struct etnaviv_gpu *gpu, struct drm_gem_object *obj,

View File

@ -172,7 +172,7 @@ static int submit_fence_sync(const struct etnaviv_gem_submit *submit)
for (i = 0; i < submit->nr_bos; i++) { for (i = 0; i < submit->nr_bos; i++) {
struct etnaviv_gem_object *etnaviv_obj = submit->bos[i].obj; struct etnaviv_gem_object *etnaviv_obj = submit->bos[i].obj;
bool write = submit->bos[i].flags & ETNA_SUBMIT_BO_WRITE; bool write = submit->bos[i].flags & ETNA_SUBMIT_BO_WRITE;
bool explicit = !(submit->flags & ETNA_SUBMIT_NO_IMPLICIT); bool explicit = !!(submit->flags & ETNA_SUBMIT_NO_IMPLICIT);
ret = etnaviv_gpu_fence_sync_obj(etnaviv_obj, context, write, ret = etnaviv_gpu_fence_sync_obj(etnaviv_obj, context, write,
explicit); explicit);

View File

@ -292,6 +292,8 @@ static int per_file_stats(int id, void *ptr, void *data)
struct file_stats *stats = data; struct file_stats *stats = data;
struct i915_vma *vma; struct i915_vma *vma;
lockdep_assert_held(&obj->base.dev->struct_mutex);
stats->count++; stats->count++;
stats->total += obj->base.size; stats->total += obj->base.size;
if (!obj->bind_count) if (!obj->bind_count)
@ -476,6 +478,8 @@ static int i915_gem_object_info(struct seq_file *m, void *data)
struct drm_i915_gem_request *request; struct drm_i915_gem_request *request;
struct task_struct *task; struct task_struct *task;
mutex_lock(&dev->struct_mutex);
memset(&stats, 0, sizeof(stats)); memset(&stats, 0, sizeof(stats));
stats.file_priv = file->driver_priv; stats.file_priv = file->driver_priv;
spin_lock(&file->table_lock); spin_lock(&file->table_lock);
@ -487,7 +491,6 @@ static int i915_gem_object_info(struct seq_file *m, void *data)
* still alive (e.g. get_pid(current) => fork() => exit()). * still alive (e.g. get_pid(current) => fork() => exit()).
* Therefore, we need to protect this ->comm access using RCU. * Therefore, we need to protect this ->comm access using RCU.
*/ */
mutex_lock(&dev->struct_mutex);
request = list_first_entry_or_null(&file_priv->mm.request_list, request = list_first_entry_or_null(&file_priv->mm.request_list,
struct drm_i915_gem_request, struct drm_i915_gem_request,
client_link); client_link);
@ -497,6 +500,7 @@ static int i915_gem_object_info(struct seq_file *m, void *data)
PIDTYPE_PID); PIDTYPE_PID);
print_file_stats(m, task ? task->comm : "<unknown>", stats); print_file_stats(m, task ? task->comm : "<unknown>", stats);
rcu_read_unlock(); rcu_read_unlock();
mutex_unlock(&dev->struct_mutex); mutex_unlock(&dev->struct_mutex);
} }
mutex_unlock(&dev->filelist_mutex); mutex_unlock(&dev->filelist_mutex);

View File

@ -2285,8 +2285,8 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
struct page *page; struct page *page;
unsigned long last_pfn = 0; /* suppress gcc warning */ unsigned long last_pfn = 0; /* suppress gcc warning */
unsigned int max_segment; unsigned int max_segment;
gfp_t noreclaim;
int ret; int ret;
gfp_t gfp;
/* Assert that the object is not currently in any GPU domain. As it /* Assert that the object is not currently in any GPU domain. As it
* wasn't in the GTT, there shouldn't be any way it could have been in * wasn't in the GTT, there shouldn't be any way it could have been in
@ -2315,22 +2315,31 @@ rebuild_st:
* Fail silently without starting the shrinker * Fail silently without starting the shrinker
*/ */
mapping = obj->base.filp->f_mapping; mapping = obj->base.filp->f_mapping;
gfp = mapping_gfp_constraint(mapping, ~(__GFP_IO | __GFP_RECLAIM)); noreclaim = mapping_gfp_constraint(mapping,
gfp |= __GFP_NORETRY | __GFP_NOWARN; ~(__GFP_IO | __GFP_RECLAIM));
noreclaim |= __GFP_NORETRY | __GFP_NOWARN;
sg = st->sgl; sg = st->sgl;
st->nents = 0; st->nents = 0;
for (i = 0; i < page_count; i++) { for (i = 0; i < page_count; i++) {
page = shmem_read_mapping_page_gfp(mapping, i, gfp); const unsigned int shrink[] = {
if (unlikely(IS_ERR(page))) { I915_SHRINK_BOUND | I915_SHRINK_UNBOUND | I915_SHRINK_PURGEABLE,
i915_gem_shrink(dev_priv, 0,
page_count, }, *s = shrink;
I915_SHRINK_BOUND | gfp_t gfp = noreclaim;
I915_SHRINK_UNBOUND |
I915_SHRINK_PURGEABLE); do {
page = shmem_read_mapping_page_gfp(mapping, i, gfp); page = shmem_read_mapping_page_gfp(mapping, i, gfp);
} if (likely(!IS_ERR(page)))
if (unlikely(IS_ERR(page))) { break;
gfp_t reclaim;
if (!*s) {
ret = PTR_ERR(page);
goto err_sg;
}
i915_gem_shrink(dev_priv, 2 * page_count, *s++);
cond_resched();
/* We've tried hard to allocate the memory by reaping /* We've tried hard to allocate the memory by reaping
* our own buffer, now let the real VM do its job and * our own buffer, now let the real VM do its job and
@ -2340,15 +2349,26 @@ rebuild_st:
* defer the oom here by reporting the ENOMEM back * defer the oom here by reporting the ENOMEM back
* to userspace. * to userspace.
*/ */
reclaim = mapping_gfp_mask(mapping); if (!*s) {
reclaim |= __GFP_NORETRY; /* reclaim, but no oom */ /* reclaim and warn, but no oom */
gfp = mapping_gfp_mask(mapping);
page = shmem_read_mapping_page_gfp(mapping, i, reclaim); /* Our bo are always dirty and so we require
if (IS_ERR(page)) { * kswapd to reclaim our pages (direct reclaim
ret = PTR_ERR(page); * does not effectively begin pageout of our
goto err_sg; * buffers on its own). However, direct reclaim
* only waits for kswapd when under allocation
* congestion. So as a result __GFP_RECLAIM is
* unreliable and fails to actually reclaim our
* dirty pages -- unless you try over and over
* again with !__GFP_NORETRY. However, we still
* want to fail this allocation rather than
* trigger the out-of-memory killer and for
* this we want the future __GFP_MAYFAIL.
*/
} }
} } while (1);
if (!i || if (!i ||
sg->length >= max_segment || sg->length >= max_segment ||
page_to_pfn(page) != last_pfn + 1) { page_to_pfn(page) != last_pfn + 1) {
@ -4222,6 +4242,7 @@ i915_gem_object_create(struct drm_i915_private *dev_priv, u64 size)
mapping = obj->base.filp->f_mapping; mapping = obj->base.filp->f_mapping;
mapping_set_gfp_mask(mapping, mask); mapping_set_gfp_mask(mapping, mask);
GEM_BUG_ON(!(mapping_gfp_mask(mapping) & __GFP_RECLAIM));
i915_gem_object_init(obj, &i915_gem_object_ops); i915_gem_object_init(obj, &i915_gem_object_ops);

View File

@ -546,11 +546,12 @@ repeat:
} }
static int static int
i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj, i915_gem_execbuffer_relocate_entry(struct i915_vma *vma,
struct eb_vmas *eb, struct eb_vmas *eb,
struct drm_i915_gem_relocation_entry *reloc, struct drm_i915_gem_relocation_entry *reloc,
struct reloc_cache *cache) struct reloc_cache *cache)
{ {
struct drm_i915_gem_object *obj = vma->obj;
struct drm_i915_private *dev_priv = to_i915(obj->base.dev); struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
struct drm_gem_object *target_obj; struct drm_gem_object *target_obj;
struct drm_i915_gem_object *target_i915_obj; struct drm_i915_gem_object *target_i915_obj;
@ -628,6 +629,16 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
return -EINVAL; return -EINVAL;
} }
/*
* If we write into the object, we need to force the synchronisation
* barrier, either with an asynchronous clflush or if we executed the
* patching using the GPU (though that should be serialised by the
* timeline). To be completely sure, and since we are required to
* do relocations we are already stalling, disable the user's opt
* of our synchronisation.
*/
vma->exec_entry->flags &= ~EXEC_OBJECT_ASYNC;
ret = relocate_entry(obj, reloc, cache, target_offset); ret = relocate_entry(obj, reloc, cache, target_offset);
if (ret) if (ret)
return ret; return ret;
@ -678,7 +689,7 @@ i915_gem_execbuffer_relocate_vma(struct i915_vma *vma,
do { do {
u64 offset = r->presumed_offset; u64 offset = r->presumed_offset;
ret = i915_gem_execbuffer_relocate_entry(vma->obj, eb, r, &cache); ret = i915_gem_execbuffer_relocate_entry(vma, eb, r, &cache);
if (ret) if (ret)
goto out; goto out;
@ -726,7 +737,7 @@ i915_gem_execbuffer_relocate_vma_slow(struct i915_vma *vma,
reloc_cache_init(&cache, eb->i915); reloc_cache_init(&cache, eb->i915);
for (i = 0; i < entry->relocation_count; i++) { for (i = 0; i < entry->relocation_count; i++) {
ret = i915_gem_execbuffer_relocate_entry(vma->obj, eb, &relocs[i], &cache); ret = i915_gem_execbuffer_relocate_entry(vma, eb, &relocs[i], &cache);
if (ret) if (ret)
break; break;
} }

View File

@ -623,7 +623,7 @@ i915_gem_request_alloc(struct intel_engine_cs *engine,
* GPU processing the request, we never over-estimate the * GPU processing the request, we never over-estimate the
* position of the head. * position of the head.
*/ */
req->head = req->ring->tail; req->head = req->ring->emit;
/* Check that we didn't interrupt ourselves with a new request */ /* Check that we didn't interrupt ourselves with a new request */
GEM_BUG_ON(req->timeline->seqno != req->fence.seqno); GEM_BUG_ON(req->timeline->seqno != req->fence.seqno);

View File

@ -480,9 +480,7 @@ static void guc_wq_item_append(struct i915_guc_client *client,
GEM_BUG_ON(freespace < wqi_size); GEM_BUG_ON(freespace < wqi_size);
/* The GuC firmware wants the tail index in QWords, not bytes */ /* The GuC firmware wants the tail index in QWords, not bytes */
tail = rq->tail; tail = intel_ring_set_tail(rq->ring, rq->tail) >> 3;
assert_ring_tail_valid(rq->ring, rq->tail);
tail >>= 3;
GEM_BUG_ON(tail > WQ_RING_TAIL_MAX); GEM_BUG_ON(tail > WQ_RING_TAIL_MAX);
/* For now workqueue item is 4 DWs; workqueue buffer is 2 pages. So we /* For now workqueue item is 4 DWs; workqueue buffer is 2 pages. So we

View File

@ -650,6 +650,11 @@ int i915_vma_unbind(struct i915_vma *vma)
break; break;
} }
if (!ret) {
ret = i915_gem_active_retire(&vma->last_fence,
&vma->vm->i915->drm.struct_mutex);
}
__i915_vma_unpin(vma); __i915_vma_unpin(vma);
if (ret) if (ret)
return ret; return ret;

View File

@ -120,7 +120,8 @@ static void intel_crtc_init_scalers(struct intel_crtc *crtc,
static void skylake_pfit_enable(struct intel_crtc *crtc); static void skylake_pfit_enable(struct intel_crtc *crtc);
static void ironlake_pfit_disable(struct intel_crtc *crtc, bool force); static void ironlake_pfit_disable(struct intel_crtc *crtc, bool force);
static void ironlake_pfit_enable(struct intel_crtc *crtc); static void ironlake_pfit_enable(struct intel_crtc *crtc);
static void intel_modeset_setup_hw_state(struct drm_device *dev); static void intel_modeset_setup_hw_state(struct drm_device *dev,
struct drm_modeset_acquire_ctx *ctx);
static void intel_pre_disable_primary_noatomic(struct drm_crtc *crtc); static void intel_pre_disable_primary_noatomic(struct drm_crtc *crtc);
struct intel_limit { struct intel_limit {
@ -3449,7 +3450,7 @@ __intel_display_resume(struct drm_device *dev,
struct drm_crtc *crtc; struct drm_crtc *crtc;
int i, ret; int i, ret;
intel_modeset_setup_hw_state(dev); intel_modeset_setup_hw_state(dev, ctx);
i915_redisable_vga(to_i915(dev)); i915_redisable_vga(to_i915(dev));
if (!state) if (!state)
@ -5825,7 +5826,8 @@ static void i9xx_crtc_disable(struct intel_crtc_state *old_crtc_state,
intel_update_watermarks(intel_crtc); intel_update_watermarks(intel_crtc);
} }
static void intel_crtc_disable_noatomic(struct drm_crtc *crtc) static void intel_crtc_disable_noatomic(struct drm_crtc *crtc,
struct drm_modeset_acquire_ctx *ctx)
{ {
struct intel_encoder *encoder; struct intel_encoder *encoder;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc); struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
@ -5855,7 +5857,7 @@ static void intel_crtc_disable_noatomic(struct drm_crtc *crtc)
return; return;
} }
state->acquire_ctx = crtc->dev->mode_config.acquire_ctx; state->acquire_ctx = ctx;
/* Everything's already locked, -EDEADLK can't happen. */ /* Everything's already locked, -EDEADLK can't happen. */
crtc_state = intel_atomic_get_crtc_state(state, intel_crtc); crtc_state = intel_atomic_get_crtc_state(state, intel_crtc);
@ -15030,7 +15032,7 @@ int intel_modeset_init(struct drm_device *dev)
intel_setup_outputs(dev_priv); intel_setup_outputs(dev_priv);
drm_modeset_lock_all(dev); drm_modeset_lock_all(dev);
intel_modeset_setup_hw_state(dev); intel_modeset_setup_hw_state(dev, dev->mode_config.acquire_ctx);
drm_modeset_unlock_all(dev); drm_modeset_unlock_all(dev);
for_each_intel_crtc(dev, crtc) { for_each_intel_crtc(dev, crtc) {
@ -15067,13 +15069,13 @@ int intel_modeset_init(struct drm_device *dev)
return 0; return 0;
} }
static void intel_enable_pipe_a(struct drm_device *dev) static void intel_enable_pipe_a(struct drm_device *dev,
struct drm_modeset_acquire_ctx *ctx)
{ {
struct intel_connector *connector; struct intel_connector *connector;
struct drm_connector_list_iter conn_iter; struct drm_connector_list_iter conn_iter;
struct drm_connector *crt = NULL; struct drm_connector *crt = NULL;
struct intel_load_detect_pipe load_detect_temp; struct intel_load_detect_pipe load_detect_temp;
struct drm_modeset_acquire_ctx *ctx = dev->mode_config.acquire_ctx;
int ret; int ret;
/* We can't just switch on the pipe A, we need to set things up with a /* We can't just switch on the pipe A, we need to set things up with a
@ -15145,7 +15147,8 @@ static bool has_pch_trancoder(struct drm_i915_private *dev_priv,
(HAS_PCH_LPT_H(dev_priv) && pch_transcoder == TRANSCODER_A); (HAS_PCH_LPT_H(dev_priv) && pch_transcoder == TRANSCODER_A);
} }
static void intel_sanitize_crtc(struct intel_crtc *crtc) static void intel_sanitize_crtc(struct intel_crtc *crtc,
struct drm_modeset_acquire_ctx *ctx)
{ {
struct drm_device *dev = crtc->base.dev; struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev); struct drm_i915_private *dev_priv = to_i915(dev);
@ -15191,7 +15194,7 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc)
plane = crtc->plane; plane = crtc->plane;
crtc->base.primary->state->visible = true; crtc->base.primary->state->visible = true;
crtc->plane = !plane; crtc->plane = !plane;
intel_crtc_disable_noatomic(&crtc->base); intel_crtc_disable_noatomic(&crtc->base, ctx);
crtc->plane = plane; crtc->plane = plane;
} }
@ -15201,13 +15204,13 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc)
* resume. Force-enable the pipe to fix this, the update_dpms * resume. Force-enable the pipe to fix this, the update_dpms
* call below we restore the pipe to the right state, but leave * call below we restore the pipe to the right state, but leave
* the required bits on. */ * the required bits on. */
intel_enable_pipe_a(dev); intel_enable_pipe_a(dev, ctx);
} }
/* Adjust the state of the output pipe according to whether we /* Adjust the state of the output pipe according to whether we
* have active connectors/encoders. */ * have active connectors/encoders. */
if (crtc->active && !intel_crtc_has_encoders(crtc)) if (crtc->active && !intel_crtc_has_encoders(crtc))
intel_crtc_disable_noatomic(&crtc->base); intel_crtc_disable_noatomic(&crtc->base, ctx);
if (crtc->active || HAS_GMCH_DISPLAY(dev_priv)) { if (crtc->active || HAS_GMCH_DISPLAY(dev_priv)) {
/* /*
@ -15505,7 +15508,8 @@ get_encoder_power_domains(struct drm_i915_private *dev_priv)
* and sanitizes it to the current state * and sanitizes it to the current state
*/ */
static void static void
intel_modeset_setup_hw_state(struct drm_device *dev) intel_modeset_setup_hw_state(struct drm_device *dev,
struct drm_modeset_acquire_ctx *ctx)
{ {
struct drm_i915_private *dev_priv = to_i915(dev); struct drm_i915_private *dev_priv = to_i915(dev);
enum pipe pipe; enum pipe pipe;
@ -15525,7 +15529,7 @@ intel_modeset_setup_hw_state(struct drm_device *dev)
for_each_pipe(dev_priv, pipe) { for_each_pipe(dev_priv, pipe) {
crtc = intel_get_crtc_for_pipe(dev_priv, pipe); crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
intel_sanitize_crtc(crtc); intel_sanitize_crtc(crtc, ctx);
intel_dump_pipe_config(crtc, crtc->config, intel_dump_pipe_config(crtc, crtc->config,
"[setup_hw_state]"); "[setup_hw_state]");
} }

View File

@ -119,8 +119,6 @@ static int intel_dp_aux_setup_backlight(struct intel_connector *connector,
struct intel_dp *intel_dp = enc_to_intel_dp(&connector->encoder->base); struct intel_dp *intel_dp = enc_to_intel_dp(&connector->encoder->base);
struct intel_panel *panel = &connector->panel; struct intel_panel *panel = &connector->panel;
intel_dp_aux_enable_backlight(connector);
if (intel_dp->edp_dpcd[2] & DP_EDP_BACKLIGHT_BRIGHTNESS_BYTE_COUNT) if (intel_dp->edp_dpcd[2] & DP_EDP_BACKLIGHT_BRIGHTNESS_BYTE_COUNT)
panel->backlight.max = 0xFFFF; panel->backlight.max = 0xFFFF;
else else

View File

@ -326,8 +326,7 @@ static u64 execlists_update_context(struct drm_i915_gem_request *rq)
rq->ctx->ppgtt ?: rq->i915->mm.aliasing_ppgtt; rq->ctx->ppgtt ?: rq->i915->mm.aliasing_ppgtt;
u32 *reg_state = ce->lrc_reg_state; u32 *reg_state = ce->lrc_reg_state;
assert_ring_tail_valid(rq->ring, rq->tail); reg_state[CTX_RING_TAIL+1] = intel_ring_set_tail(rq->ring, rq->tail);
reg_state[CTX_RING_TAIL+1] = rq->tail;
/* True 32b PPGTT with dynamic page allocation: update PDP /* True 32b PPGTT with dynamic page allocation: update PDP
* registers and point the unallocated PDPs to scratch page. * registers and point the unallocated PDPs to scratch page.
@ -2036,8 +2035,7 @@ void intel_lr_context_resume(struct drm_i915_private *dev_priv)
ce->state->obj->mm.dirty = true; ce->state->obj->mm.dirty = true;
i915_gem_object_unpin_map(ce->state->obj); i915_gem_object_unpin_map(ce->state->obj);
ce->ring->head = ce->ring->tail = 0; intel_ring_reset(ce->ring, 0);
intel_ring_update_space(ce->ring);
} }
} }
} }

View File

@ -49,7 +49,7 @@ static int __intel_ring_space(int head, int tail, int size)
void intel_ring_update_space(struct intel_ring *ring) void intel_ring_update_space(struct intel_ring *ring)
{ {
ring->space = __intel_ring_space(ring->head, ring->tail, ring->size); ring->space = __intel_ring_space(ring->head, ring->emit, ring->size);
} }
static int static int
@ -774,8 +774,8 @@ static void i9xx_submit_request(struct drm_i915_gem_request *request)
i915_gem_request_submit(request); i915_gem_request_submit(request);
assert_ring_tail_valid(request->ring, request->tail); I915_WRITE_TAIL(request->engine,
I915_WRITE_TAIL(request->engine, request->tail); intel_ring_set_tail(request->ring, request->tail));
} }
static void i9xx_emit_breadcrumb(struct drm_i915_gem_request *req, u32 *cs) static void i9xx_emit_breadcrumb(struct drm_i915_gem_request *req, u32 *cs)
@ -1316,11 +1316,23 @@ err:
return PTR_ERR(addr); return PTR_ERR(addr);
} }
void intel_ring_reset(struct intel_ring *ring, u32 tail)
{
GEM_BUG_ON(!list_empty(&ring->request_list));
ring->tail = tail;
ring->head = tail;
ring->emit = tail;
intel_ring_update_space(ring);
}
void intel_ring_unpin(struct intel_ring *ring) void intel_ring_unpin(struct intel_ring *ring)
{ {
GEM_BUG_ON(!ring->vma); GEM_BUG_ON(!ring->vma);
GEM_BUG_ON(!ring->vaddr); GEM_BUG_ON(!ring->vaddr);
/* Discard any unused bytes beyond that submitted to hw. */
intel_ring_reset(ring, ring->tail);
if (i915_vma_is_map_and_fenceable(ring->vma)) if (i915_vma_is_map_and_fenceable(ring->vma))
i915_vma_unpin_iomap(ring->vma); i915_vma_unpin_iomap(ring->vma);
else else
@ -1562,8 +1574,9 @@ void intel_legacy_submission_resume(struct drm_i915_private *dev_priv)
struct intel_engine_cs *engine; struct intel_engine_cs *engine;
enum intel_engine_id id; enum intel_engine_id id;
/* Restart from the beginning of the rings for convenience */
for_each_engine(engine, dev_priv, id) for_each_engine(engine, dev_priv, id)
engine->buffer->head = engine->buffer->tail; intel_ring_reset(engine->buffer, 0);
} }
static int ring_request_alloc(struct drm_i915_gem_request *request) static int ring_request_alloc(struct drm_i915_gem_request *request)
@ -1616,7 +1629,7 @@ static int wait_for_space(struct drm_i915_gem_request *req, int bytes)
unsigned space; unsigned space;
/* Would completion of this request free enough space? */ /* Would completion of this request free enough space? */
space = __intel_ring_space(target->postfix, ring->tail, space = __intel_ring_space(target->postfix, ring->emit,
ring->size); ring->size);
if (space >= bytes) if (space >= bytes)
break; break;
@ -1641,8 +1654,8 @@ static int wait_for_space(struct drm_i915_gem_request *req, int bytes)
u32 *intel_ring_begin(struct drm_i915_gem_request *req, int num_dwords) u32 *intel_ring_begin(struct drm_i915_gem_request *req, int num_dwords)
{ {
struct intel_ring *ring = req->ring; struct intel_ring *ring = req->ring;
int remain_actual = ring->size - ring->tail; int remain_actual = ring->size - ring->emit;
int remain_usable = ring->effective_size - ring->tail; int remain_usable = ring->effective_size - ring->emit;
int bytes = num_dwords * sizeof(u32); int bytes = num_dwords * sizeof(u32);
int total_bytes, wait_bytes; int total_bytes, wait_bytes;
bool need_wrap = false; bool need_wrap = false;
@ -1678,17 +1691,17 @@ u32 *intel_ring_begin(struct drm_i915_gem_request *req, int num_dwords)
if (unlikely(need_wrap)) { if (unlikely(need_wrap)) {
GEM_BUG_ON(remain_actual > ring->space); GEM_BUG_ON(remain_actual > ring->space);
GEM_BUG_ON(ring->tail + remain_actual > ring->size); GEM_BUG_ON(ring->emit + remain_actual > ring->size);
/* Fill the tail with MI_NOOP */ /* Fill the tail with MI_NOOP */
memset(ring->vaddr + ring->tail, 0, remain_actual); memset(ring->vaddr + ring->emit, 0, remain_actual);
ring->tail = 0; ring->emit = 0;
ring->space -= remain_actual; ring->space -= remain_actual;
} }
GEM_BUG_ON(ring->tail > ring->size - bytes); GEM_BUG_ON(ring->emit > ring->size - bytes);
cs = ring->vaddr + ring->tail; cs = ring->vaddr + ring->emit;
ring->tail += bytes; ring->emit += bytes;
ring->space -= bytes; ring->space -= bytes;
GEM_BUG_ON(ring->space < 0); GEM_BUG_ON(ring->space < 0);
@ -1699,7 +1712,7 @@ u32 *intel_ring_begin(struct drm_i915_gem_request *req, int num_dwords)
int intel_ring_cacheline_align(struct drm_i915_gem_request *req) int intel_ring_cacheline_align(struct drm_i915_gem_request *req)
{ {
int num_dwords = int num_dwords =
(req->ring->tail & (CACHELINE_BYTES - 1)) / sizeof(uint32_t); (req->ring->emit & (CACHELINE_BYTES - 1)) / sizeof(uint32_t);
u32 *cs; u32 *cs;
if (num_dwords == 0) if (num_dwords == 0)

View File

@ -145,6 +145,7 @@ struct intel_ring {
u32 head; u32 head;
u32 tail; u32 tail;
u32 emit;
int space; int space;
int size; int size;
@ -488,6 +489,8 @@ intel_write_status_page(struct intel_engine_cs *engine, int reg, u32 value)
struct intel_ring * struct intel_ring *
intel_engine_create_ring(struct intel_engine_cs *engine, int size); intel_engine_create_ring(struct intel_engine_cs *engine, int size);
int intel_ring_pin(struct intel_ring *ring, unsigned int offset_bias); int intel_ring_pin(struct intel_ring *ring, unsigned int offset_bias);
void intel_ring_reset(struct intel_ring *ring, u32 tail);
void intel_ring_update_space(struct intel_ring *ring);
void intel_ring_unpin(struct intel_ring *ring); void intel_ring_unpin(struct intel_ring *ring);
void intel_ring_free(struct intel_ring *ring); void intel_ring_free(struct intel_ring *ring);
@ -511,7 +514,7 @@ intel_ring_advance(struct drm_i915_gem_request *req, u32 *cs)
* reserved for the command packet (i.e. the value passed to * reserved for the command packet (i.e. the value passed to
* intel_ring_begin()). * intel_ring_begin()).
*/ */
GEM_BUG_ON((req->ring->vaddr + req->ring->tail) != cs); GEM_BUG_ON((req->ring->vaddr + req->ring->emit) != cs);
} }
static inline u32 static inline u32
@ -540,7 +543,19 @@ assert_ring_tail_valid(const struct intel_ring *ring, unsigned int tail)
GEM_BUG_ON(tail >= ring->size); GEM_BUG_ON(tail >= ring->size);
} }
void intel_ring_update_space(struct intel_ring *ring); static inline unsigned int
intel_ring_set_tail(struct intel_ring *ring, unsigned int tail)
{
/* Whilst writes to the tail are strictly order, there is no
* serialisation between readers and the writers. The tail may be
* read by i915_gem_request_retire() just as it is being updated
* by execlists, as although the breadcrumb is complete, the context
* switch hasn't been seen.
*/
assert_ring_tail_valid(ring, tail);
ring->tail = tail;
return tail;
}
void intel_engine_init_global_seqno(struct intel_engine_cs *engine, u32 seqno); void intel_engine_init_global_seqno(struct intel_engine_cs *engine, u32 seqno);

View File

@ -3393,6 +3393,13 @@ void radeon_combios_asic_init(struct drm_device *dev)
rdev->pdev->subsystem_vendor == 0x103c && rdev->pdev->subsystem_vendor == 0x103c &&
rdev->pdev->subsystem_device == 0x280a) rdev->pdev->subsystem_device == 0x280a)
return; return;
/* quirk for rs4xx Toshiba Sattellite L20-183 latop to make it resume
* - it hangs on resume inside the dynclk 1 table.
*/
if (rdev->family == CHIP_RS400 &&
rdev->pdev->subsystem_vendor == 0x1179 &&
rdev->pdev->subsystem_device == 0xff31)
return;
/* DYN CLK 1 */ /* DYN CLK 1 */
table = combios_get_table_offset(dev, COMBIOS_DYN_CLK_1_TABLE); table = combios_get_table_offset(dev, COMBIOS_DYN_CLK_1_TABLE);

View File

@ -136,6 +136,10 @@ static struct radeon_px_quirk radeon_px_quirk_list[] = {
* https://bugzilla.kernel.org/show_bug.cgi?id=51381 * https://bugzilla.kernel.org/show_bug.cgi?id=51381
*/ */
{ PCI_VENDOR_ID_ATI, 0x6840, 0x1043, 0x2122, RADEON_PX_QUIRK_DISABLE_PX }, { PCI_VENDOR_ID_ATI, 0x6840, 0x1043, 0x2122, RADEON_PX_QUIRK_DISABLE_PX },
/* Asus K53TK laptop with AMD A6-3420M APU and Radeon 7670m GPU
* https://bugs.freedesktop.org/show_bug.cgi?id=101491
*/
{ PCI_VENDOR_ID_ATI, 0x6741, 0x1043, 0x2122, RADEON_PX_QUIRK_DISABLE_PX },
/* macbook pro 8.2 */ /* macbook pro 8.2 */
{ PCI_VENDOR_ID_ATI, 0x6741, PCI_VENDOR_ID_APPLE, 0x00e2, RADEON_PX_QUIRK_LONG_WAKEUP }, { PCI_VENDOR_ID_ATI, 0x6741, PCI_VENDOR_ID_APPLE, 0x00e2, RADEON_PX_QUIRK_LONG_WAKEUP },
{ 0, 0, 0, 0, 0 }, { 0, 0, 0, 0, 0 },

Some files were not shown because too many files have changed in this diff Show More