Merge branch 'topic/pci-cleanup' into for-next

This commit is contained in:
Takashi Iwai 2015-01-07 15:54:25 +01:00
commit 3b6fe95a07
237 changed files with 2500 additions and 2509 deletions

View File

@ -10,12 +10,13 @@ Optional properties:
Each button (key) is represented as a sub-node of "gpio-keys":
Subnode properties:
- gpios: OF device-tree gpio specification.
- interrupts: the interrupt line for that input.
- label: Descriptive name of the key.
- linux,code: Keycode to emit.
Required mutual exclusive subnode-properties:
- gpios: OF device-tree gpio specification.
- interrupts: the interrupt line for that input
Note that either "interrupts" or "gpios" properties can be omitted, but not
both at the same time. Specifying both properties is allowed.
Optional subnode-properties:
- linux,input-type: Specify event type this button/key generates.
@ -23,6 +24,9 @@ Optional subnode-properties:
- debounce-interval: Debouncing interval time in milliseconds.
If not specified defaults to 5.
- gpio-key,wakeup: Boolean, button can wake-up the system.
- linux,can-disable: Boolean, indicates that button is connected
to dedicated (not shared) interrupt which can be disabled to
suppress events from the button.
Example nodes:

View File

@ -8,6 +8,8 @@ Optional properties:
- debounce-interval : Debouncing interval time in milliseconds
- st,scan-count : Scanning cycles elapsed before key data is updated
- st,no-autorepeat : If specified device will not autorepeat
- keypad,num-rows : See ./matrix-keymap.txt
- keypad,num-columns : See ./matrix-keymap.txt
Example:

View File

@ -1,7 +1,7 @@
VERSION = 3
PATCHLEVEL = 19
SUBLEVEL = 0
EXTRAVERSION = -rc1
EXTRAVERSION = -rc3
NAME = Diseased Newt
# *DOCUMENTATION*

View File

@ -203,27 +203,3 @@
compatible = "linux,spdif-dir";
};
};
&pinctrl {
/*
* These pins might be muxed as I2S by
* the bootloader, but it conflicts
* with the real I2S pins that are
* muxed using i2s_pins. We must mux
* those pins to a function other than
* I2S.
*/
pinctrl-0 = <&hog_pins1 &hog_pins2>;
pinctrl-names = "default";
hog_pins1: hog-pins1 {
marvell,pins = "mpp6", "mpp8", "mpp10",
"mpp12", "mpp13";
marvell,function = "gpio";
};
hog_pins2: hog-pins2 {
marvell,pins = "mpp5", "mpp7", "mpp9";
marvell,function = "gpo";
};
};

View File

@ -338,6 +338,7 @@ CONFIG_USB=y
CONFIG_USB_XHCI_HCD=y
CONFIG_USB_XHCI_MVEBU=y
CONFIG_USB_EHCI_HCD=y
CONFIG_USB_EHCI_EXYNOS=y
CONFIG_USB_EHCI_TEGRA=y
CONFIG_USB_EHCI_HCD_STI=y
CONFIG_USB_EHCI_HCD_PLATFORM=y

View File

@ -1046,6 +1046,15 @@ static int c_show(struct seq_file *m, void *v)
seq_printf(m, "model name\t: %s rev %d (%s)\n",
cpu_name, cpuid & 15, elf_platform);
#if defined(CONFIG_SMP)
seq_printf(m, "BogoMIPS\t: %lu.%02lu\n",
per_cpu(cpu_data, i).loops_per_jiffy / (500000UL/HZ),
(per_cpu(cpu_data, i).loops_per_jiffy / (5000UL/HZ)) % 100);
#else
seq_printf(m, "BogoMIPS\t: %lu.%02lu\n",
loops_per_jiffy / (500000/HZ),
(loops_per_jiffy / (5000/HZ)) % 100);
#endif
/* dump out the processor features */
seq_puts(m, "Features\t: ");

View File

@ -387,6 +387,18 @@ asmlinkage void secondary_start_kernel(void)
void __init smp_cpus_done(unsigned int max_cpus)
{
int cpu;
unsigned long bogosum = 0;
for_each_online_cpu(cpu)
bogosum += per_cpu(cpu_data, cpu).loops_per_jiffy;
printk(KERN_INFO "SMP: Total of %d processors activated "
"(%lu.%02lu BogoMIPS).\n",
num_online_cpus(),
bogosum / (500000/HZ),
(bogosum / (5000/HZ)) % 100);
hyp_mode_check();
}

View File

@ -1,6 +1,7 @@
# CONFIG_LOCALVERSION_AUTO is not set
CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y
CONFIG_FHANDLE=y
CONFIG_AUDIT=y
CONFIG_NO_HZ_IDLE=y
CONFIG_HIGH_RES_TIMERS=y
@ -13,14 +14,12 @@ CONFIG_TASK_IO_ACCOUNTING=y
CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
CONFIG_LOG_BUF_SHIFT=14
CONFIG_RESOURCE_COUNTERS=y
CONFIG_MEMCG=y
CONFIG_MEMCG_SWAP=y
CONFIG_MEMCG_KMEM=y
CONFIG_CGROUP_HUGETLB=y
# CONFIG_UTS_NS is not set
# CONFIG_IPC_NS is not set
# CONFIG_PID_NS is not set
# CONFIG_NET_NS is not set
CONFIG_SCHED_AUTOGROUP=y
CONFIG_BLK_DEV_INITRD=y
@ -92,7 +91,6 @@ CONFIG_SERIAL_AMBA_PL011_CONSOLE=y
CONFIG_SERIAL_OF_PLATFORM=y
CONFIG_VIRTIO_CONSOLE=y
# CONFIG_HW_RANDOM is not set
# CONFIG_HMC_DRV is not set
CONFIG_SPI=y
CONFIG_SPI_PL022=y
CONFIG_GPIO_PL061=y
@ -133,6 +131,8 @@ CONFIG_EXT3_FS=y
CONFIG_EXT4_FS=y
CONFIG_FANOTIFY=y
CONFIG_FANOTIFY_ACCESS_PERMISSIONS=y
CONFIG_QUOTA=y
CONFIG_AUTOFS4_FS=y
CONFIG_FUSE_FS=y
CONFIG_CUSE=y
CONFIG_VFAT_FS=y
@ -152,14 +152,15 @@ CONFIG_MAGIC_SYSRQ=y
CONFIG_DEBUG_KERNEL=y
CONFIG_LOCKUP_DETECTOR=y
# CONFIG_SCHED_DEBUG is not set
# CONFIG_DEBUG_PREEMPT is not set
# CONFIG_FTRACE is not set
CONFIG_KEYS=y
CONFIG_SECURITY=y
CONFIG_CRYPTO_ANSI_CPRNG=y
CONFIG_ARM64_CRYPTO=y
CONFIG_CRYPTO_SHA1_ARM64_CE=y
CONFIG_CRYPTO_SHA2_ARM64_CE=y
CONFIG_CRYPTO_GHASH_ARM64_CE=y
CONFIG_CRYPTO_AES_ARM64_CE=y
CONFIG_CRYPTO_AES_ARM64_CE_CCM=y
CONFIG_CRYPTO_AES_ARM64_CE_BLK=y
CONFIG_CRYPTO_AES_ARM64_NEON_BLK=y

View File

@ -52,13 +52,14 @@ static inline void set_dma_ops(struct device *dev, struct dma_map_ops *ops)
dev->archdata.dma_ops = ops;
}
static inline int set_arch_dma_coherent_ops(struct device *dev)
static inline void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
struct iommu_ops *iommu, bool coherent)
{
dev->archdata.dma_coherent = true;
set_dma_ops(dev, &coherent_swiotlb_dma_ops);
return 0;
dev->archdata.dma_coherent = coherent;
if (coherent)
set_dma_ops(dev, &coherent_swiotlb_dma_ops);
}
#define set_arch_dma_coherent_ops set_arch_dma_coherent_ops
#define arch_setup_dma_ops arch_setup_dma_ops
/* do not use this function in a driver */
static inline bool is_device_dma_coherent(struct device *dev)

View File

@ -298,7 +298,6 @@ void pmdp_splitting_flush(struct vm_area_struct *vma, unsigned long address,
#define pfn_pmd(pfn,prot) (__pmd(((phys_addr_t)(pfn) << PAGE_SHIFT) | pgprot_val(prot)))
#define mk_pmd(page,prot) pfn_pmd(page_to_pfn(page),prot)
#define pmd_page(pmd) pfn_to_page(__phys_to_pfn(pmd_val(pmd) & PHYS_MASK))
#define pud_write(pud) pte_write(pud_pte(pud))
#define pud_pfn(pud) (((pud_val(pud) & PUD_MASK) & PHYS_MASK) >> PAGE_SHIFT)
@ -401,7 +400,7 @@ static inline pmd_t *pmd_offset(pud_t *pud, unsigned long addr)
return (pmd_t *)pud_page_vaddr(*pud) + pmd_index(addr);
}
#define pud_page(pud) pmd_page(pud_pmd(pud))
#define pud_page(pud) pfn_to_page(__phys_to_pfn(pud_val(pud) & PHYS_MASK))
#endif /* CONFIG_ARM64_PGTABLE_LEVELS > 2 */
@ -437,6 +436,8 @@ static inline pud_t *pud_offset(pgd_t *pgd, unsigned long addr)
return (pud_t *)pgd_page_vaddr(*pgd) + pud_index(addr);
}
#define pgd_page(pgd) pfn_to_page(__phys_to_pfn(pgd_val(pgd) & PHYS_MASK))
#endif /* CONFIG_ARM64_PGTABLE_LEVELS > 3 */
#define pgd_ERROR(pgd) __pgd_error(__FILE__, __LINE__, pgd_val(pgd))

View File

@ -5,6 +5,7 @@
#include <asm/debug-monitors.h>
#include <asm/pgtable.h>
#include <asm/memory.h>
#include <asm/mmu_context.h>
#include <asm/smp_plat.h>
#include <asm/suspend.h>
#include <asm/tlbflush.h>
@ -98,7 +99,18 @@ int __cpu_suspend(unsigned long arg, int (*fn)(unsigned long))
*/
ret = __cpu_suspend_enter(arg, fn);
if (ret == 0) {
cpu_switch_mm(mm->pgd, mm);
/*
* We are resuming from reset with TTBR0_EL1 set to the
* idmap to enable the MMU; restore the active_mm mappings in
* TTBR0_EL1 unless the active_mm == &init_mm, in which case
* the thread entered __cpu_suspend with TTBR0_EL1 set to
* reserved TTBR0 page tables and should be restored as such.
*/
if (mm == &init_mm)
cpu_set_reserved_ttbr0();
else
cpu_switch_mm(mm->pgd, mm);
flush_tlb_all();
/*

View File

@ -11,7 +11,7 @@
#define NR_syscalls 318 /* length of syscall table */
#define NR_syscalls 319 /* length of syscall table */
/*
* The following defines stop scripts/checksyscalls.sh from complaining about

View File

@ -331,5 +331,6 @@
#define __NR_getrandom 1339
#define __NR_memfd_create 1340
#define __NR_bpf 1341
#define __NR_execveat 1342
#endif /* _UAPI_ASM_IA64_UNISTD_H */

View File

@ -1779,6 +1779,7 @@ sys_call_table:
data8 sys_getrandom
data8 sys_memfd_create // 1340
data8 sys_bpf
data8 sys_execveat
.org sys_call_table + 8*NR_syscalls // guard against failures to increase NR_syscalls
#endif /* __IA64_ASM_PARAVIRTUALIZED_NATIVE */

View File

@ -72,6 +72,7 @@ void __init setup_cpuinfo(void)
cpuinfo.has_div = fcpu_has(cpu, "altr,has-div");
cpuinfo.has_mul = fcpu_has(cpu, "altr,has-mul");
cpuinfo.has_mulx = fcpu_has(cpu, "altr,has-mulx");
cpuinfo.mmu = fcpu_has(cpu, "altr,has-mmu");
if (IS_ENABLED(CONFIG_NIOS2_HW_DIV_SUPPORT) && !cpuinfo.has_div)
err_cpu("DIV");

View File

@ -365,30 +365,14 @@ ENTRY(ret_from_interrupt)
GET_THREAD_INFO r1
ldw r4, TI_PREEMPT_COUNT(r1)
bne r4, r0, restore_all
need_resched:
ldw r4, TI_FLAGS(r1) /* ? Need resched set */
BTBZ r10, r4, TIF_NEED_RESCHED, restore_all
ldw r4, PT_ESTATUS(sp) /* ? Interrupts off */
andi r10, r4, ESTATUS_EPIE
beq r10, r0, restore_all
movia r4, PREEMPT_ACTIVE
stw r4, TI_PREEMPT_COUNT(r1)
rdctl r10, status /* enable intrs again */
ori r10, r10 ,STATUS_PIE
wrctl status, r10
PUSH r1
call schedule
POP r1
mov r4, r0
stw r4, TI_PREEMPT_COUNT(r1)
rdctl r10, status /* disable intrs */
andi r10, r10, %lo(~STATUS_PIE)
wrctl status, r10
br need_resched
#else
br restore_all
call preempt_schedule_irq
#endif
br restore_all
/***********************************************************************
* A few syscall wrappers

View File

@ -33,11 +33,18 @@
#endif /*!CONFIG_PA20*/
/* LDCW, the only atomic read-write operation PA-RISC has. *sigh*. */
/* LDCW, the only atomic read-write operation PA-RISC has. *sigh*.
We don't explicitly expose that "*a" may be written as reload
fails to find a register in class R1_REGS when "a" needs to be
reloaded when generating 64-bit PIC code. Instead, we clobber
memory to indicate to the compiler that the assembly code reads
or writes to items other than those listed in the input and output
operands. This may pessimize the code somewhat but __ldcw is
usually used within code blocks surrounded by memory barriors. */
#define __ldcw(a) ({ \
unsigned __ret; \
__asm__ __volatile__(__LDCW " 0(%2),%0" \
: "=r" (__ret), "+m" (*(a)) : "r" (a)); \
__asm__ __volatile__(__LDCW " 0(%1),%0" \
: "=r" (__ret) : "r" (a) : "memory"); \
__ret; \
})

View File

@ -86,6 +86,11 @@ extern int overlaps_crashkernel(unsigned long start, unsigned long size);
extern void reserve_crashkernel(void);
extern void machine_kexec_mask_interrupts(void);
static inline bool kdump_in_progress(void)
{
return crashing_cpu >= 0;
}
#else /* !CONFIG_KEXEC */
static inline void crash_kexec_secondary(struct pt_regs *regs) { }
@ -106,6 +111,11 @@ static inline int crash_shutdown_unregister(crash_shutdown_t handler)
return 0;
}
static inline bool kdump_in_progress(void)
{
return false;
}
#endif /* CONFIG_KEXEC */
#endif /* ! __ASSEMBLY__ */
#endif /* __KERNEL__ */

View File

@ -366,3 +366,4 @@ SYSCALL_SPU(seccomp)
SYSCALL_SPU(getrandom)
SYSCALL_SPU(memfd_create)
SYSCALL_SPU(bpf)
COMPAT_SYS(execveat)

View File

@ -12,7 +12,7 @@
#include <uapi/asm/unistd.h>
#define __NR_syscalls 362
#define __NR_syscalls 363
#define __NR__exit __NR_exit
#define NR_syscalls __NR_syscalls

View File

@ -384,5 +384,6 @@
#define __NR_getrandom 359
#define __NR_memfd_create 360
#define __NR_bpf 361
#define __NR_execveat 362
#endif /* _UAPI_ASM_POWERPC_UNISTD_H_ */

View File

@ -330,7 +330,7 @@ void default_machine_kexec(struct kimage *image)
* using debugger IPI.
*/
if (crashing_cpu == -1)
if (!kdump_in_progress())
kexec_prepare_cpus();
pr_debug("kexec: Starting switchover sequence.\n");

View File

@ -700,6 +700,7 @@ void start_secondary(void *unused)
smp_store_cpu_info(cpu);
set_dec(tb_ticks_per_jiffy);
preempt_disable();
cpu_callin_map[cpu] = 1;
if (smp_ops->setup_cpu)
smp_ops->setup_cpu(cpu);
@ -738,14 +739,6 @@ void start_secondary(void *unused)
notify_cpu_starting(cpu);
set_cpu_online(cpu, true);
/*
* CPU must be marked active and online before we signal back to the
* master, because the scheduler needs to see the cpu_online and
* cpu_active bits set.
*/
smp_wmb();
cpu_callin_map[cpu] = 1;
local_irq_enable();
cpu_startup_entry(CPUHP_ONLINE);

View File

@ -43,6 +43,7 @@
#include <asm/trace.h>
#include <asm/firmware.h>
#include <asm/plpar_wrappers.h>
#include <asm/kexec.h>
#include <asm/fadump.h>
#include "pseries.h"
@ -267,8 +268,13 @@ static void pSeries_lpar_hptab_clear(void)
* out to the user, but at least this will stop us from
* continuing on further and creating an even more
* difficult to debug situation.
*
* There is a known problem when kdump'ing, if cpus are offline
* the above call will fail. Rather than panicking again, keep
* going and hope the kdump kernel is also little endian, which
* it usually is.
*/
if (rc)
if (rc && !kdump_in_progress())
panic("Could not enable big endian exceptions");
}
#endif

View File

@ -3,6 +3,7 @@ config UML
default y
select HAVE_ARCH_AUDITSYSCALL
select HAVE_UID16
select HAVE_FUTEX_CMPXCHG if FUTEX
select GENERIC_IRQ_SHOW
select GENERIC_CPU_DEVICES
select GENERIC_IO

View File

@ -4448,7 +4448,7 @@ void kvm_mmu_invalidate_mmio_sptes(struct kvm *kvm)
* zap all shadow pages.
*/
if (unlikely(kvm_current_mmio_generation(kvm) == 0)) {
printk_ratelimited(KERN_INFO "kvm: zapping shadow pages for mmio generation wraparound\n");
printk_ratelimited(KERN_DEBUG "kvm: zapping shadow pages for mmio generation wraparound\n");
kvm_mmu_invalidate_zap_all_pages(kvm);
}
}

View File

@ -5840,53 +5840,10 @@ static __init int hardware_setup(void)
memset(vmx_msr_bitmap_legacy, 0xff, PAGE_SIZE);
memset(vmx_msr_bitmap_longmode, 0xff, PAGE_SIZE);
vmx_disable_intercept_for_msr(MSR_FS_BASE, false);
vmx_disable_intercept_for_msr(MSR_GS_BASE, false);
vmx_disable_intercept_for_msr(MSR_KERNEL_GS_BASE, true);
vmx_disable_intercept_for_msr(MSR_IA32_SYSENTER_CS, false);
vmx_disable_intercept_for_msr(MSR_IA32_SYSENTER_ESP, false);
vmx_disable_intercept_for_msr(MSR_IA32_SYSENTER_EIP, false);
vmx_disable_intercept_for_msr(MSR_IA32_BNDCFGS, true);
memcpy(vmx_msr_bitmap_legacy_x2apic,
vmx_msr_bitmap_legacy, PAGE_SIZE);
memcpy(vmx_msr_bitmap_longmode_x2apic,
vmx_msr_bitmap_longmode, PAGE_SIZE);
if (enable_apicv) {
for (msr = 0x800; msr <= 0x8ff; msr++)
vmx_disable_intercept_msr_read_x2apic(msr);
/* According SDM, in x2apic mode, the whole id reg is used.
* But in KVM, it only use the highest eight bits. Need to
* intercept it */
vmx_enable_intercept_msr_read_x2apic(0x802);
/* TMCCT */
vmx_enable_intercept_msr_read_x2apic(0x839);
/* TPR */
vmx_disable_intercept_msr_write_x2apic(0x808);
/* EOI */
vmx_disable_intercept_msr_write_x2apic(0x80b);
/* SELF-IPI */
vmx_disable_intercept_msr_write_x2apic(0x83f);
}
if (enable_ept) {
kvm_mmu_set_mask_ptes(0ull,
(enable_ept_ad_bits) ? VMX_EPT_ACCESS_BIT : 0ull,
(enable_ept_ad_bits) ? VMX_EPT_DIRTY_BIT : 0ull,
0ull, VMX_EPT_EXECUTABLE_MASK);
ept_set_mmio_spte_mask();
kvm_enable_tdp();
} else
kvm_disable_tdp();
update_ple_window_actual_max();
if (setup_vmcs_config(&vmcs_config) < 0) {
r = -EIO;
goto out7;
}
}
if (boot_cpu_has(X86_FEATURE_NX))
kvm_enable_efer_bits(EFER_NX);
@ -5945,6 +5902,49 @@ static __init int hardware_setup(void)
if (nested)
nested_vmx_setup_ctls_msrs();
vmx_disable_intercept_for_msr(MSR_FS_BASE, false);
vmx_disable_intercept_for_msr(MSR_GS_BASE, false);
vmx_disable_intercept_for_msr(MSR_KERNEL_GS_BASE, true);
vmx_disable_intercept_for_msr(MSR_IA32_SYSENTER_CS, false);
vmx_disable_intercept_for_msr(MSR_IA32_SYSENTER_ESP, false);
vmx_disable_intercept_for_msr(MSR_IA32_SYSENTER_EIP, false);
vmx_disable_intercept_for_msr(MSR_IA32_BNDCFGS, true);
memcpy(vmx_msr_bitmap_legacy_x2apic,
vmx_msr_bitmap_legacy, PAGE_SIZE);
memcpy(vmx_msr_bitmap_longmode_x2apic,
vmx_msr_bitmap_longmode, PAGE_SIZE);
if (enable_apicv) {
for (msr = 0x800; msr <= 0x8ff; msr++)
vmx_disable_intercept_msr_read_x2apic(msr);
/* According SDM, in x2apic mode, the whole id reg is used.
* But in KVM, it only use the highest eight bits. Need to
* intercept it */
vmx_enable_intercept_msr_read_x2apic(0x802);
/* TMCCT */
vmx_enable_intercept_msr_read_x2apic(0x839);
/* TPR */
vmx_disable_intercept_msr_write_x2apic(0x808);
/* EOI */
vmx_disable_intercept_msr_write_x2apic(0x80b);
/* SELF-IPI */
vmx_disable_intercept_msr_write_x2apic(0x83f);
}
if (enable_ept) {
kvm_mmu_set_mask_ptes(0ull,
(enable_ept_ad_bits) ? VMX_EPT_ACCESS_BIT : 0ull,
(enable_ept_ad_bits) ? VMX_EPT_DIRTY_BIT : 0ull,
0ull, VMX_EPT_EXECUTABLE_MASK);
ept_set_mmio_spte_mask();
kvm_enable_tdp();
} else
kvm_disable_tdp();
update_ple_window_actual_max();
return alloc_kvm_area();
out7:

View File

@ -34,7 +34,7 @@ typedef asmlinkage void (*sys_call_ptr_t)(void);
extern asmlinkage void sys_ni_syscall(void);
const sys_call_ptr_t sys_call_table[] __cacheline_aligned = {
const sys_call_ptr_t sys_call_table[] ____cacheline_aligned = {
/*
* Smells like a compiler bug -- it doesn't work
* when the & below is removed.

View File

@ -47,7 +47,7 @@ typedef void (*sys_call_ptr_t)(void);
extern void sys_ni_syscall(void);
const sys_call_ptr_t sys_call_table[] __cacheline_aligned = {
const sys_call_ptr_t sys_call_table[] ____cacheline_aligned = {
/*
* Smells like a compiler bug -- it doesn't work
* when the & below is removed.

View File

@ -455,6 +455,9 @@ void af_alg_complete(struct crypto_async_request *req, int err)
{
struct af_alg_completion *completion = req->data;
if (err == -EINPROGRESS)
return;
completion->err = err;
complete(&completion->completion);
}

View File

@ -985,8 +985,6 @@ static int acpi_processor_setup_cpuidle_states(struct acpi_processor *pr)
state->flags = 0;
switch (cx->type) {
case ACPI_STATE_C1:
if (cx->entry_method != ACPI_CSTATE_FFH)
state->flags |= CPUIDLE_FLAG_TIME_INVALID;
state->enter = acpi_idle_enter_c1;
state->enter_dead = acpi_idle_play_dead;

View File

@ -505,6 +505,23 @@ static struct dmi_system_id video_dmi_table[] __initdata = {
DMI_MATCH(DMI_PRODUCT_NAME, "HP ENVY 15 Notebook PC"),
},
},
{
.callback = video_disable_native_backlight,
.ident = "SAMSUNG 870Z5E/880Z5E/680Z5E",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."),
DMI_MATCH(DMI_PRODUCT_NAME, "870Z5E/880Z5E/680Z5E"),
},
},
{
.callback = video_disable_native_backlight,
.ident = "SAMSUNG 370R4E/370R4V/370R5E/3570RE/370R5V",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."),
DMI_MATCH(DMI_PRODUCT_NAME, "370R4E/370R4V/370R5E/3570RE/370R5V"),
},
},
{}
};

View File

@ -2088,7 +2088,7 @@ EXPORT_SYMBOL_GPL(of_genpd_del_provider);
* Returns a valid pointer to struct generic_pm_domain on success or ERR_PTR()
* on failure.
*/
static struct generic_pm_domain *of_genpd_get_from_provider(
struct generic_pm_domain *of_genpd_get_from_provider(
struct of_phandle_args *genpdspec)
{
struct generic_pm_domain *genpd = ERR_PTR(-ENOENT);
@ -2108,6 +2108,7 @@ static struct generic_pm_domain *of_genpd_get_from_provider(
return genpd;
}
EXPORT_SYMBOL_GPL(of_genpd_get_from_provider);
/**
* genpd_dev_pm_detach - Detach a device from its PM domain.

View File

@ -108,6 +108,14 @@ static LIST_HEAD(dev_opp_list);
/* Lock to allow exclusive modification to the device and opp lists */
static DEFINE_MUTEX(dev_opp_list_lock);
#define opp_rcu_lockdep_assert() \
do { \
rcu_lockdep_assert(rcu_read_lock_held() || \
lockdep_is_held(&dev_opp_list_lock), \
"Missing rcu_read_lock() or " \
"dev_opp_list_lock protection"); \
} while (0)
/**
* find_device_opp() - find device_opp struct using device pointer
* @dev: device pointer used to lookup device OPPs
@ -208,9 +216,7 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_get_freq);
* This function returns the number of available opps if there are any,
* else returns 0 if none or the corresponding error value.
*
* Locking: This function must be called under rcu_read_lock(). This function
* internally references two RCU protected structures: device_opp and opp which
* are safe as long as we are under a common RCU locked section.
* Locking: This function takes rcu_read_lock().
*/
int dev_pm_opp_get_opp_count(struct device *dev)
{
@ -218,11 +224,14 @@ int dev_pm_opp_get_opp_count(struct device *dev)
struct dev_pm_opp *temp_opp;
int count = 0;
rcu_read_lock();
dev_opp = find_device_opp(dev);
if (IS_ERR(dev_opp)) {
int r = PTR_ERR(dev_opp);
dev_err(dev, "%s: device OPP not found (%d)\n", __func__, r);
return r;
count = PTR_ERR(dev_opp);
dev_err(dev, "%s: device OPP not found (%d)\n",
__func__, count);
goto out_unlock;
}
list_for_each_entry_rcu(temp_opp, &dev_opp->opp_list, node) {
@ -230,6 +239,8 @@ int dev_pm_opp_get_opp_count(struct device *dev)
count++;
}
out_unlock:
rcu_read_unlock();
return count;
}
EXPORT_SYMBOL_GPL(dev_pm_opp_get_opp_count);
@ -267,6 +278,8 @@ struct dev_pm_opp *dev_pm_opp_find_freq_exact(struct device *dev,
struct device_opp *dev_opp;
struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE);
opp_rcu_lockdep_assert();
dev_opp = find_device_opp(dev);
if (IS_ERR(dev_opp)) {
int r = PTR_ERR(dev_opp);
@ -313,6 +326,8 @@ struct dev_pm_opp *dev_pm_opp_find_freq_ceil(struct device *dev,
struct device_opp *dev_opp;
struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE);
opp_rcu_lockdep_assert();
if (!dev || !freq) {
dev_err(dev, "%s: Invalid argument freq=%p\n", __func__, freq);
return ERR_PTR(-EINVAL);
@ -361,6 +376,8 @@ struct dev_pm_opp *dev_pm_opp_find_freq_floor(struct device *dev,
struct device_opp *dev_opp;
struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE);
opp_rcu_lockdep_assert();
if (!dev || !freq) {
dev_err(dev, "%s: Invalid argument freq=%p\n", __func__, freq);
return ERR_PTR(-EINVAL);
@ -783,9 +800,15 @@ void of_free_opp_table(struct device *dev)
/* Check for existing list for 'dev' */
dev_opp = find_device_opp(dev);
if (WARN(IS_ERR(dev_opp), "%s: dev_opp: %ld\n", dev_name(dev),
PTR_ERR(dev_opp)))
if (IS_ERR(dev_opp)) {
int error = PTR_ERR(dev_opp);
if (error != -ENODEV)
WARN(1, "%s: dev_opp: %d\n",
IS_ERR_OR_NULL(dev) ?
"Invalid device" : dev_name(dev),
error);
return;
}
/* Hold our list modification lock here */
mutex_lock(&dev_opp_list_lock);

View File

@ -417,6 +417,6 @@ static void __exit agp_ali_cleanup(void)
module_init(agp_ali_init);
module_exit(agp_ali_cleanup);
MODULE_AUTHOR("Dave Jones <davej@redhat.com>");
MODULE_AUTHOR("Dave Jones");
MODULE_LICENSE("GPL and additional rights");

View File

@ -813,6 +813,6 @@ static void __exit agp_amd64_cleanup(void)
module_init(agp_amd64_mod_init);
module_exit(agp_amd64_cleanup);
MODULE_AUTHOR("Dave Jones <davej@redhat.com>, Andi Kleen");
MODULE_AUTHOR("Dave Jones, Andi Kleen");
module_param(agp_try_unsupported, bool, 0);
MODULE_LICENSE("GPL");

View File

@ -579,6 +579,6 @@ static void __exit agp_ati_cleanup(void)
module_init(agp_ati_init);
module_exit(agp_ati_cleanup);
MODULE_AUTHOR("Dave Jones <davej@redhat.com>");
MODULE_AUTHOR("Dave Jones");
MODULE_LICENSE("GPL and additional rights");

View File

@ -356,7 +356,7 @@ static __init int agp_setup(char *s)
__setup("agp=", agp_setup);
#endif
MODULE_AUTHOR("Dave Jones <davej@redhat.com>");
MODULE_AUTHOR("Dave Jones, Jeff Hartmann");
MODULE_DESCRIPTION("AGP GART driver");
MODULE_LICENSE("GPL and additional rights");
MODULE_ALIAS_MISCDEV(AGPGART_MINOR);

View File

@ -920,5 +920,5 @@ static void __exit agp_intel_cleanup(void)
module_init(agp_intel_init);
module_exit(agp_intel_cleanup);
MODULE_AUTHOR("Dave Jones <davej@redhat.com>");
MODULE_AUTHOR("Dave Jones, Various @Intel");
MODULE_LICENSE("GPL and additional rights");

View File

@ -1438,5 +1438,5 @@ void intel_gmch_remove(void)
}
EXPORT_SYMBOL(intel_gmch_remove);
MODULE_AUTHOR("Dave Jones <davej@redhat.com>");
MODULE_AUTHOR("Dave Jones, Various @Intel");
MODULE_LICENSE("GPL and additional rights");

View File

@ -1,7 +1,7 @@
/*
* Nvidia AGPGART routines.
* Based upon a 2.4 agpgart diff by the folks from NVIDIA, and hacked up
* to work in 2.5 by Dave Jones <davej@redhat.com>
* to work in 2.5 by Dave Jones.
*/
#include <linux/module.h>

View File

@ -595,4 +595,4 @@ module_init(agp_via_init);
module_exit(agp_via_cleanup);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Dave Jones <davej@redhat.com>");
MODULE_AUTHOR("Dave Jones");

View File

@ -199,18 +199,6 @@ struct bmc_device {
int guid_set;
char name[16];
struct kref usecount;
/* bmc device attributes */
struct device_attribute device_id_attr;
struct device_attribute provides_dev_sdrs_attr;
struct device_attribute revision_attr;
struct device_attribute firmware_rev_attr;
struct device_attribute version_attr;
struct device_attribute add_dev_support_attr;
struct device_attribute manufacturer_id_attr;
struct device_attribute product_id_attr;
struct device_attribute guid_attr;
struct device_attribute aux_firmware_rev_attr;
};
#define to_bmc_device(x) container_of((x), struct bmc_device, pdev.dev)
@ -2252,7 +2240,7 @@ static ssize_t device_id_show(struct device *dev,
return snprintf(buf, 10, "%u\n", bmc->id.device_id);
}
DEVICE_ATTR(device_id, S_IRUGO, device_id_show, NULL);
static DEVICE_ATTR(device_id, S_IRUGO, device_id_show, NULL);
static ssize_t provides_device_sdrs_show(struct device *dev,
struct device_attribute *attr,
@ -2263,7 +2251,8 @@ static ssize_t provides_device_sdrs_show(struct device *dev,
return snprintf(buf, 10, "%u\n",
(bmc->id.device_revision & 0x80) >> 7);
}
DEVICE_ATTR(provides_device_sdrs, S_IRUGO, provides_device_sdrs_show, NULL);
static DEVICE_ATTR(provides_device_sdrs, S_IRUGO, provides_device_sdrs_show,
NULL);
static ssize_t revision_show(struct device *dev, struct device_attribute *attr,
char *buf)
@ -2273,7 +2262,7 @@ static ssize_t revision_show(struct device *dev, struct device_attribute *attr,
return snprintf(buf, 20, "%u\n",
bmc->id.device_revision & 0x0F);
}
DEVICE_ATTR(revision, S_IRUGO, revision_show, NULL);
static DEVICE_ATTR(revision, S_IRUGO, revision_show, NULL);
static ssize_t firmware_revision_show(struct device *dev,
struct device_attribute *attr,
@ -2284,7 +2273,7 @@ static ssize_t firmware_revision_show(struct device *dev,
return snprintf(buf, 20, "%u.%x\n", bmc->id.firmware_revision_1,
bmc->id.firmware_revision_2);
}
DEVICE_ATTR(firmware_revision, S_IRUGO, firmware_revision_show, NULL);
static DEVICE_ATTR(firmware_revision, S_IRUGO, firmware_revision_show, NULL);
static ssize_t ipmi_version_show(struct device *dev,
struct device_attribute *attr,
@ -2296,7 +2285,7 @@ static ssize_t ipmi_version_show(struct device *dev,
ipmi_version_major(&bmc->id),
ipmi_version_minor(&bmc->id));
}
DEVICE_ATTR(ipmi_version, S_IRUGO, ipmi_version_show, NULL);
static DEVICE_ATTR(ipmi_version, S_IRUGO, ipmi_version_show, NULL);
static ssize_t add_dev_support_show(struct device *dev,
struct device_attribute *attr,
@ -2307,7 +2296,8 @@ static ssize_t add_dev_support_show(struct device *dev,
return snprintf(buf, 10, "0x%02x\n",
bmc->id.additional_device_support);
}
DEVICE_ATTR(additional_device_support, S_IRUGO, add_dev_support_show, NULL);
static DEVICE_ATTR(additional_device_support, S_IRUGO, add_dev_support_show,
NULL);
static ssize_t manufacturer_id_show(struct device *dev,
struct device_attribute *attr,
@ -2317,7 +2307,7 @@ static ssize_t manufacturer_id_show(struct device *dev,
return snprintf(buf, 20, "0x%6.6x\n", bmc->id.manufacturer_id);
}
DEVICE_ATTR(manufacturer_id, S_IRUGO, manufacturer_id_show, NULL);
static DEVICE_ATTR(manufacturer_id, S_IRUGO, manufacturer_id_show, NULL);
static ssize_t product_id_show(struct device *dev,
struct device_attribute *attr,
@ -2327,7 +2317,7 @@ static ssize_t product_id_show(struct device *dev,
return snprintf(buf, 10, "0x%4.4x\n", bmc->id.product_id);
}
DEVICE_ATTR(product_id, S_IRUGO, product_id_show, NULL);
static DEVICE_ATTR(product_id, S_IRUGO, product_id_show, NULL);
static ssize_t aux_firmware_rev_show(struct device *dev,
struct device_attribute *attr,
@ -2341,7 +2331,7 @@ static ssize_t aux_firmware_rev_show(struct device *dev,
bmc->id.aux_firmware_revision[1],
bmc->id.aux_firmware_revision[0]);
}
DEVICE_ATTR(aux_firmware_revision, S_IRUGO, aux_firmware_rev_show, NULL);
static DEVICE_ATTR(aux_firmware_revision, S_IRUGO, aux_firmware_rev_show, NULL);
static ssize_t guid_show(struct device *dev, struct device_attribute *attr,
char *buf)
@ -2352,7 +2342,7 @@ static ssize_t guid_show(struct device *dev, struct device_attribute *attr,
(long long) bmc->guid[0],
(long long) bmc->guid[8]);
}
DEVICE_ATTR(guid, S_IRUGO, guid_show, NULL);
static DEVICE_ATTR(guid, S_IRUGO, guid_show, NULL);
static struct attribute *bmc_dev_attrs[] = {
&dev_attr_device_id.attr,
@ -2392,10 +2382,10 @@ cleanup_bmc_device(struct kref *ref)
if (bmc->id.aux_firmware_revision_set)
device_remove_file(&bmc->pdev.dev,
&bmc->aux_firmware_rev_attr);
&dev_attr_aux_firmware_revision);
if (bmc->guid_set)
device_remove_file(&bmc->pdev.dev,
&bmc->guid_attr);
&dev_attr_guid);
platform_device_unregister(&bmc->pdev);
}
@ -2422,16 +2412,14 @@ static int create_bmc_files(struct bmc_device *bmc)
int err;
if (bmc->id.aux_firmware_revision_set) {
bmc->aux_firmware_rev_attr.attr.name = "aux_firmware_revision";
err = device_create_file(&bmc->pdev.dev,
&bmc->aux_firmware_rev_attr);
&dev_attr_aux_firmware_revision);
if (err)
goto out;
}
if (bmc->guid_set) {
bmc->guid_attr.attr.name = "guid";
err = device_create_file(&bmc->pdev.dev,
&bmc->guid_attr);
&dev_attr_guid);
if (err)
goto out_aux_firm;
}
@ -2441,7 +2429,7 @@ static int create_bmc_files(struct bmc_device *bmc)
out_aux_firm:
if (bmc->id.aux_firmware_revision_set)
device_remove_file(&bmc->pdev.dev,
&bmc->aux_firmware_rev_attr);
&dev_attr_aux_firmware_revision);
out:
return err;
}

View File

@ -52,6 +52,7 @@
#include <linux/dmi.h>
#include <linux/kthread.h>
#include <linux/acpi.h>
#include <linux/ctype.h>
#define PFX "ipmi_ssif: "
#define DEVICE_NAME "ipmi_ssif"

View File

@ -462,7 +462,7 @@ static void __init arch_counter_register(unsigned type)
/* Register the CP15 based counter if we have one */
if (type & ARCH_CP15_TIMER) {
if (arch_timer_use_virtual)
if (IS_ENABLED(CONFIG_ARM64) || arch_timer_use_virtual)
arch_timer_read_counter = arch_counter_get_cntvct;
else
arch_timer_read_counter = arch_counter_get_cntpct;

View File

@ -211,6 +211,17 @@ static int cpufreq_init(struct cpufreq_policy *policy)
/* OPPs might be populated at runtime, don't check for error here */
of_init_opp_table(cpu_dev);
/*
* But we need OPP table to function so if it is not there let's
* give platform code chance to provide it for us.
*/
ret = dev_pm_opp_get_opp_count(cpu_dev);
if (ret <= 0) {
pr_debug("OPP table is not ready, deferring probe\n");
ret = -EPROBE_DEFER;
goto out_free_opp;
}
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
if (!priv) {
ret = -ENOMEM;

View File

@ -2028,6 +2028,12 @@ static int __cpufreq_governor(struct cpufreq_policy *policy,
/* Don't start any governor operations if we are entering suspend */
if (cpufreq_suspended)
return 0;
/*
* Governor might not be initiated here if ACPI _PPC changed
* notification happened, so check it.
*/
if (!policy->governor)
return -EINVAL;
if (policy->governor->max_transition_latency &&
policy->cpuinfo.transition_latency >

View File

@ -79,12 +79,7 @@ static int ladder_select_state(struct cpuidle_driver *drv,
last_state = &ldev->states[last_idx];
if (!(drv->states[last_idx].flags & CPUIDLE_FLAG_TIME_INVALID)) {
last_residency = cpuidle_get_last_residency(dev) - \
drv->states[last_idx].exit_latency;
}
else
last_residency = last_state->threshold.promotion_time + 1;
last_residency = cpuidle_get_last_residency(dev) - drv->states[last_idx].exit_latency;
/* consider promotion */
if (last_idx < drv->state_count - 1 &&

View File

@ -396,8 +396,8 @@ static void menu_update(struct cpuidle_driver *drv, struct cpuidle_device *dev)
* power state and occurrence of the wakeup event.
*
* If the entered idle state didn't support residency measurements,
* we are basically lost in the dark how much time passed.
* As a compromise, assume we slept for the whole expected time.
* we use them anyway if they are short, and if long,
* truncate to the whole expected time.
*
* Any measured amount of time will include the exit latency.
* Since we are interested in when the wakeup begun, not when it
@ -405,23 +405,18 @@ static void menu_update(struct cpuidle_driver *drv, struct cpuidle_device *dev)
* the measured amount of time is less than the exit latency,
* assume the state was never reached and the exit latency is 0.
*/
if (unlikely(target->flags & CPUIDLE_FLAG_TIME_INVALID)) {
/* Use timer value as is */
/* measured value */
measured_us = cpuidle_get_last_residency(dev);
/* Deduct exit latency */
if (measured_us > target->exit_latency)
measured_us -= target->exit_latency;
/* Make sure our coefficients do not exceed unity */
if (measured_us > data->next_timer_us)
measured_us = data->next_timer_us;
} else {
/* Use measured value */
measured_us = cpuidle_get_last_residency(dev);
/* Deduct exit latency */
if (measured_us > target->exit_latency)
measured_us -= target->exit_latency;
/* Make sure our coefficients do not exceed unity */
if (measured_us > data->next_timer_us)
measured_us = data->next_timer_us;
}
/* Update our correction ratio */
new_factor = data->correction_factor[data->bucket];
new_factor -= new_factor / DECAY;

View File

@ -121,13 +121,9 @@ static int kfd_open(struct inode *inode, struct file *filep)
if (IS_ERR(process))
return PTR_ERR(process);
process->is_32bit_user_mode = is_32bit_user_mode;
dev_dbg(kfd_device, "process %d opened, compat mode (32 bit) - %d\n",
process->pasid, process->is_32bit_user_mode);
kfd_init_apertures(process);
return 0;
}

View File

@ -299,13 +299,13 @@ int kfd_init_apertures(struct kfd_process *process)
struct kfd_dev *dev;
struct kfd_process_device *pdd;
mutex_lock(&process->mutex);
/*Iterating over all devices*/
while ((dev = kfd_topology_enum_kfd_devices(id)) != NULL &&
id < NUM_OF_SUPPORTED_GPUS) {
pdd = kfd_get_process_device_data(dev, process, 1);
if (!pdd)
return -1;
/*
* For 64 bit process aperture will be statically reserved in
@ -348,8 +348,6 @@ int kfd_init_apertures(struct kfd_process *process)
id++;
}
mutex_unlock(&process->mutex);
return 0;
}

View File

@ -26,6 +26,8 @@
#include <linux/slab.h>
#include <linux/amd-iommu.h>
#include <linux/notifier.h>
#include <linux/compat.h>
struct mm_struct;
#include "kfd_priv.h"
@ -285,8 +287,15 @@ static struct kfd_process *create_process(const struct task_struct *thread)
if (err != 0)
goto err_process_pqm_init;
/* init process apertures*/
process->is_32bit_user_mode = is_compat_task();
if (kfd_init_apertures(process) != 0)
goto err_init_apretures;
return process;
err_init_apretures:
pqm_uninit(&process->pqm);
err_process_pqm_init:
hash_del_rcu(&process->kfd_processes);
synchronize_rcu();

View File

@ -700,8 +700,6 @@ static ssize_t node_show(struct kobject *kobj, struct attribute *attr,
dev->node_props.simd_per_cu);
sysfs_show_32bit_prop(buffer, "max_slots_scratch_cu",
dev->node_props.max_slots_scratch_cu);
sysfs_show_32bit_prop(buffer, "engine_id",
dev->node_props.engine_id);
sysfs_show_32bit_prop(buffer, "vendor_id",
dev->node_props.vendor_id);
sysfs_show_32bit_prop(buffer, "device_id",
@ -715,6 +713,12 @@ static ssize_t node_show(struct kobject *kobj, struct attribute *attr,
dev->gpu->kgd));
sysfs_show_64bit_prop(buffer, "local_mem_size",
kfd2kgd->get_vmem_size(dev->gpu->kgd));
sysfs_show_32bit_prop(buffer, "fw_version",
kfd2kgd->get_fw_version(
dev->gpu->kgd,
KGD_ENGINE_MEC1));
}
ret = sysfs_show_32bit_prop(buffer, "max_engine_clk_ccompute",

View File

@ -45,6 +45,17 @@ enum kgd_memory_pool {
KGD_POOL_FRAMEBUFFER = 3,
};
enum kgd_engine_type {
KGD_ENGINE_PFP = 1,
KGD_ENGINE_ME,
KGD_ENGINE_CE,
KGD_ENGINE_MEC1,
KGD_ENGINE_MEC2,
KGD_ENGINE_RLC,
KGD_ENGINE_SDMA,
KGD_ENGINE_MAX
};
struct kgd2kfd_shared_resources {
/* Bit n == 1 means VMID n is available for KFD. */
unsigned int compute_vmid_bitmap;
@ -137,6 +148,8 @@ struct kgd2kfd_calls {
*
* @hqd_destroy: Destructs and preempts the queue assigned to that hqd slot.
*
* @get_fw_version: Returns FW versions from the header
*
* This structure contains function pointers to services that the kgd driver
* provides to amdkfd driver.
*
@ -176,6 +189,8 @@ struct kfd2kgd_calls {
int (*hqd_destroy)(struct kgd_dev *kgd, uint32_t reset_type,
unsigned int timeout, uint32_t pipe_id,
uint32_t queue_id);
uint16_t (*get_fw_version)(struct kgd_dev *kgd,
enum kgd_engine_type type);
};
bool kgd2kfd_init(unsigned interface_version,

View File

@ -61,7 +61,7 @@ drm_atomic_helper_plane_changed(struct drm_atomic_state *state,
struct drm_crtc_state *crtc_state;
if (plane->state->crtc) {
crtc_state = state->crtc_states[drm_crtc_index(plane->crtc)];
crtc_state = state->crtc_states[drm_crtc_index(plane->state->crtc)];
if (WARN_ON(!crtc_state))
return;

View File

@ -830,6 +830,8 @@ drm_get_last_vbltimestamp(struct drm_device *dev, int crtc,
* vblank events since the system was booted, including lost events due to
* modesetting activity.
*
* This is the legacy version of drm_crtc_vblank_count().
*
* Returns:
* The software vblank counter.
*/
@ -843,6 +845,25 @@ u32 drm_vblank_count(struct drm_device *dev, int crtc)
}
EXPORT_SYMBOL(drm_vblank_count);
/**
* drm_crtc_vblank_count - retrieve "cooked" vblank counter value
* @crtc: which counter to retrieve
*
* Fetches the "cooked" vblank count value that represents the number of
* vblank events since the system was booted, including lost events due to
* modesetting activity.
*
* This is the native KMS version of drm_vblank_count().
*
* Returns:
* The software vblank counter.
*/
u32 drm_crtc_vblank_count(struct drm_crtc *crtc)
{
return drm_vblank_count(crtc->dev, drm_crtc_index(crtc));
}
EXPORT_SYMBOL(drm_crtc_vblank_count);
/**
* drm_vblank_count_and_time - retrieve "cooked" vblank counter value
* and the system timestamp corresponding to that vblank counter value.
@ -904,6 +925,8 @@ static void send_vblank_event(struct drm_device *dev,
*
* Updates sequence # and timestamp on event, and sends it to userspace.
* Caller must hold event lock.
*
* This is the legacy version of drm_crtc_send_vblank_event().
*/
void drm_send_vblank_event(struct drm_device *dev, int crtc,
struct drm_pending_vblank_event *e)
@ -922,6 +945,23 @@ void drm_send_vblank_event(struct drm_device *dev, int crtc,
}
EXPORT_SYMBOL(drm_send_vblank_event);
/**
* drm_crtc_send_vblank_event - helper to send vblank event after pageflip
* @crtc: the source CRTC of the vblank event
* @e: the event to send
*
* Updates sequence # and timestamp on event, and sends it to userspace.
* Caller must hold event lock.
*
* This is the native KMS version of drm_send_vblank_event().
*/
void drm_crtc_send_vblank_event(struct drm_crtc *crtc,
struct drm_pending_vblank_event *e)
{
drm_send_vblank_event(crtc->dev, drm_crtc_index(crtc), e);
}
EXPORT_SYMBOL(drm_crtc_send_vblank_event);
/**
* drm_vblank_enable - enable the vblank interrupt on a CRTC
* @dev: DRM device
@ -1594,6 +1634,8 @@ static void drm_handle_vblank_events(struct drm_device *dev, int crtc)
*
* Drivers should call this routine in their vblank interrupt handlers to
* update the vblank counter and send any signals that may be pending.
*
* This is the legacy version of drm_crtc_handle_vblank().
*/
bool drm_handle_vblank(struct drm_device *dev, int crtc)
{
@ -1670,3 +1712,21 @@ bool drm_handle_vblank(struct drm_device *dev, int crtc)
return true;
}
EXPORT_SYMBOL(drm_handle_vblank);
/**
* drm_crtc_handle_vblank - handle a vblank event
* @crtc: where this event occurred
*
* Drivers should call this routine in their vblank interrupt handlers to
* update the vblank counter and send any signals that may be pending.
*
* This is the native KMS version of drm_handle_vblank().
*
* Returns:
* True if the event was successfully handled, false on failure.
*/
bool drm_crtc_handle_vblank(struct drm_crtc *crtc)
{
return drm_handle_vblank(crtc->dev, drm_crtc_index(crtc));
}
EXPORT_SYMBOL(drm_crtc_handle_vblank);

View File

@ -811,6 +811,8 @@ int i915_reset(struct drm_device *dev)
if (!i915.reset)
return 0;
intel_reset_gt_powersave(dev);
mutex_lock(&dev->struct_mutex);
i915_gem_reset(dev);
@ -880,7 +882,7 @@ int i915_reset(struct drm_device *dev)
* of re-init after reset.
*/
if (INTEL_INFO(dev)->gen > 5)
intel_reset_gt_powersave(dev);
intel_enable_gt_powersave(dev);
} else {
mutex_unlock(&dev->struct_mutex);
}
@ -1584,7 +1586,7 @@ static struct drm_driver driver = {
.gem_prime_import = i915_gem_prime_import,
.dumb_create = i915_gem_dumb_create,
.dumb_map_offset = i915_gem_dumb_map_offset,
.dumb_map_offset = i915_gem_mmap_gtt,
.dumb_destroy = drm_gem_dumb_destroy,
.ioctls = i915_ioctls,
.fops = &i915_driver_fops,

View File

@ -2501,9 +2501,8 @@ void i915_vma_move_to_active(struct i915_vma *vma,
int i915_gem_dumb_create(struct drm_file *file_priv,
struct drm_device *dev,
struct drm_mode_create_dumb *args);
int i915_gem_dumb_map_offset(struct drm_file *file_priv,
struct drm_device *dev, uint32_t handle,
uint64_t *offset);
int i915_gem_mmap_gtt(struct drm_file *file_priv, struct drm_device *dev,
uint32_t handle, uint64_t *offset);
/**
* Returns true if seq1 is later than seq2.
*/

View File

@ -401,7 +401,6 @@ static int
i915_gem_create(struct drm_file *file,
struct drm_device *dev,
uint64_t size,
bool dumb,
uint32_t *handle_p)
{
struct drm_i915_gem_object *obj;
@ -417,7 +416,6 @@ i915_gem_create(struct drm_file *file,
if (obj == NULL)
return -ENOMEM;
obj->base.dumb = dumb;
ret = drm_gem_handle_create(file, &obj->base, &handle);
/* drop reference from allocate - handle holds it now */
drm_gem_object_unreference_unlocked(&obj->base);
@ -437,7 +435,7 @@ i915_gem_dumb_create(struct drm_file *file,
args->pitch = ALIGN(args->width * DIV_ROUND_UP(args->bpp, 8), 64);
args->size = args->pitch * args->height;
return i915_gem_create(file, dev,
args->size, true, &args->handle);
args->size, &args->handle);
}
/**
@ -450,7 +448,7 @@ i915_gem_create_ioctl(struct drm_device *dev, void *data,
struct drm_i915_gem_create *args = data;
return i915_gem_create(file, dev,
args->size, false, &args->handle);
args->size, &args->handle);
}
static inline int
@ -1840,10 +1838,10 @@ static void i915_gem_object_free_mmap_offset(struct drm_i915_gem_object *obj)
drm_gem_free_mmap_offset(&obj->base);
}
static int
int
i915_gem_mmap_gtt(struct drm_file *file,
struct drm_device *dev,
uint32_t handle, bool dumb,
uint32_t handle,
uint64_t *offset)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@ -1860,13 +1858,6 @@ i915_gem_mmap_gtt(struct drm_file *file,
goto unlock;
}
/*
* We don't allow dumb mmaps on objects created using another
* interface.
*/
WARN_ONCE(dumb && !(obj->base.dumb || obj->base.import_attach),
"Illegal dumb map of accelerated buffer.\n");
if (obj->base.size > dev_priv->gtt.mappable_end) {
ret = -E2BIG;
goto out;
@ -1891,15 +1882,6 @@ unlock:
return ret;
}
int
i915_gem_dumb_map_offset(struct drm_file *file,
struct drm_device *dev,
uint32_t handle,
uint64_t *offset)
{
return i915_gem_mmap_gtt(file, dev, handle, true, offset);
}
/**
* i915_gem_mmap_gtt_ioctl - prepare an object for GTT mmap'ing
* @dev: DRM device
@ -1921,7 +1903,7 @@ i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
{
struct drm_i915_gem_mmap_gtt *args = data;
return i915_gem_mmap_gtt(file, dev, args->handle, false, &args->offset);
return i915_gem_mmap_gtt(file, dev, args->handle, &args->offset);
}
static inline int

View File

@ -473,7 +473,12 @@ mi_set_context(struct intel_engine_cs *ring,
u32 hw_flags)
{
u32 flags = hw_flags | MI_MM_SPACE_GTT;
int ret;
const int num_rings =
/* Use an extended w/a on ivb+ if signalling from other rings */
i915_semaphore_is_enabled(ring->dev) ?
hweight32(INTEL_INFO(ring->dev)->ring_mask) - 1 :
0;
int len, i, ret;
/* w/a: If Flush TLB Invalidation Mode is enabled, driver must do a TLB
* invalidation prior to MI_SET_CONTEXT. On GEN6 we don't set the value
@ -490,15 +495,31 @@ mi_set_context(struct intel_engine_cs *ring,
if (!IS_HASWELL(ring->dev) && INTEL_INFO(ring->dev)->gen < 8)
flags |= (MI_SAVE_EXT_STATE_EN | MI_RESTORE_EXT_STATE_EN);
ret = intel_ring_begin(ring, 6);
len = 4;
if (INTEL_INFO(ring->dev)->gen >= 7)
len += 2 + (num_rings ? 4*num_rings + 2 : 0);
ret = intel_ring_begin(ring, len);
if (ret)
return ret;
/* WaProgramMiArbOnOffAroundMiSetContext:ivb,vlv,hsw,bdw,chv */
if (INTEL_INFO(ring->dev)->gen >= 7)
if (INTEL_INFO(ring->dev)->gen >= 7) {
intel_ring_emit(ring, MI_ARB_ON_OFF | MI_ARB_DISABLE);
else
intel_ring_emit(ring, MI_NOOP);
if (num_rings) {
struct intel_engine_cs *signaller;
intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(num_rings));
for_each_ring(signaller, to_i915(ring->dev), i) {
if (signaller == ring)
continue;
intel_ring_emit(ring, RING_PSMI_CTL(signaller->mmio_base));
intel_ring_emit(ring, _MASKED_BIT_ENABLE(GEN6_PSMI_SLEEP_MSG_DISABLE));
}
}
}
intel_ring_emit(ring, MI_NOOP);
intel_ring_emit(ring, MI_SET_CONTEXT);
@ -510,10 +531,21 @@ mi_set_context(struct intel_engine_cs *ring,
*/
intel_ring_emit(ring, MI_NOOP);
if (INTEL_INFO(ring->dev)->gen >= 7)
if (INTEL_INFO(ring->dev)->gen >= 7) {
if (num_rings) {
struct intel_engine_cs *signaller;
intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(num_rings));
for_each_ring(signaller, to_i915(ring->dev), i) {
if (signaller == ring)
continue;
intel_ring_emit(ring, RING_PSMI_CTL(signaller->mmio_base));
intel_ring_emit(ring, _MASKED_BIT_DISABLE(GEN6_PSMI_SLEEP_MSG_DISABLE));
}
}
intel_ring_emit(ring, MI_ARB_ON_OFF | MI_ARB_ENABLE);
else
intel_ring_emit(ring, MI_NOOP);
}
intel_ring_advance(ring);

View File

@ -121,9 +121,6 @@ eb_lookup_vmas(struct eb_vmas *eb,
goto err;
}
WARN_ONCE(obj->base.dumb,
"GPU use of dumb buffer is illegal.\n");
drm_gem_object_reference(&obj->base);
list_add_tail(&obj->obj_exec_link, &objects);
}

View File

@ -281,10 +281,14 @@ void gen6_enable_rps_interrupts(struct drm_device *dev)
struct drm_i915_private *dev_priv = dev->dev_private;
spin_lock_irq(&dev_priv->irq_lock);
WARN_ON(dev_priv->rps.pm_iir);
WARN_ON(I915_READ(gen6_pm_iir(dev_priv)) & dev_priv->pm_rps_events);
dev_priv->rps.interrupts_enabled = true;
I915_WRITE(gen6_pm_ier(dev_priv), I915_READ(gen6_pm_ier(dev_priv)) |
dev_priv->pm_rps_events);
gen6_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
spin_unlock_irq(&dev_priv->irq_lock);
}
@ -3307,8 +3311,10 @@ static void gen5_gt_irq_postinstall(struct drm_device *dev)
GEN5_IRQ_INIT(GT, dev_priv->gt_irq_mask, gt_irqs);
if (INTEL_INFO(dev)->gen >= 6) {
pm_irqs |= dev_priv->pm_rps_events;
/*
* RPS interrupts will get enabled/disabled on demand when RPS
* itself is enabled/disabled.
*/
if (HAS_VEBOX(dev))
pm_irqs |= PM_VEBOX_USER_INTERRUPT;
@ -3520,7 +3526,11 @@ static void gen8_gt_irq_postinstall(struct drm_i915_private *dev_priv)
dev_priv->pm_irq_mask = 0xffffffff;
GEN8_IRQ_INIT_NDX(GT, 0, ~gt_interrupts[0], gt_interrupts[0]);
GEN8_IRQ_INIT_NDX(GT, 1, ~gt_interrupts[1], gt_interrupts[1]);
GEN8_IRQ_INIT_NDX(GT, 2, dev_priv->pm_irq_mask, dev_priv->pm_rps_events);
/*
* RPS interrupts will get enabled/disabled on demand when RPS itself
* is enabled/disabled.
*/
GEN8_IRQ_INIT_NDX(GT, 2, dev_priv->pm_irq_mask, 0);
GEN8_IRQ_INIT_NDX(GT, 3, ~gt_interrupts[3], gt_interrupts[3]);
}
@ -3609,7 +3619,7 @@ static void vlv_display_irq_uninstall(struct drm_i915_private *dev_priv)
vlv_display_irq_reset(dev_priv);
dev_priv->irq_mask = 0;
dev_priv->irq_mask = ~0;
}
static void valleyview_irq_uninstall(struct drm_device *dev)

View File

@ -395,6 +395,7 @@
#define PIPE_CONTROL_STORE_DATA_INDEX (1<<21)
#define PIPE_CONTROL_CS_STALL (1<<20)
#define PIPE_CONTROL_TLB_INVALIDATE (1<<18)
#define PIPE_CONTROL_MEDIA_STATE_CLEAR (1<<16)
#define PIPE_CONTROL_QW_WRITE (1<<14)
#define PIPE_CONTROL_POST_SYNC_OP_MASK (3<<14)
#define PIPE_CONTROL_DEPTH_STALL (1<<13)
@ -1128,6 +1129,7 @@ enum punit_power_well {
#define GEN6_VERSYNC (RING_SYNC_1(VEBOX_RING_BASE))
#define GEN6_VEVSYNC (RING_SYNC_2(VEBOX_RING_BASE))
#define GEN6_NOSYNC 0
#define RING_PSMI_CTL(base) ((base)+0x50)
#define RING_MAX_IDLE(base) ((base)+0x54)
#define RING_HWS_PGA(base) ((base)+0x80)
#define RING_HWS_PGA_GEN6(base) ((base)+0x2080)
@ -1458,6 +1460,7 @@ enum punit_power_well {
#define GEN6_BLITTER_FBC_NOTIFY (1<<3)
#define GEN6_RC_SLEEP_PSMI_CONTROL 0x2050
#define GEN6_PSMI_SLEEP_MSG_DISABLE (1 << 0)
#define GEN8_RC_SEMA_IDLE_MSG_DISABLE (1 << 12)
#define GEN8_FF_DOP_CLOCK_GATE_DISABLE (1<<10)

View File

@ -6191,6 +6191,20 @@ void intel_cleanup_gt_powersave(struct drm_device *dev)
valleyview_cleanup_gt_powersave(dev);
}
static void gen6_suspend_rps(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
flush_delayed_work(&dev_priv->rps.delayed_resume_work);
/*
* TODO: disable RPS interrupts on GEN9+ too once RPS support
* is added for it.
*/
if (INTEL_INFO(dev)->gen < 9)
gen6_disable_rps_interrupts(dev);
}
/**
* intel_suspend_gt_powersave - suspend PM work and helper threads
* @dev: drm device
@ -6206,14 +6220,7 @@ void intel_suspend_gt_powersave(struct drm_device *dev)
if (INTEL_INFO(dev)->gen < 6)
return;
flush_delayed_work(&dev_priv->rps.delayed_resume_work);
/*
* TODO: disable RPS interrupts on GEN9+ too once RPS support
* is added for it.
*/
if (INTEL_INFO(dev)->gen < 9)
gen6_disable_rps_interrupts(dev);
gen6_suspend_rps(dev);
/* Force GPU to min freq during suspend */
gen6_rps_idle(dev_priv);
@ -6316,8 +6323,11 @@ void intel_reset_gt_powersave(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
if (INTEL_INFO(dev)->gen < 6)
return;
gen6_suspend_rps(dev);
dev_priv->rps.enabled = false;
intel_enable_gt_powersave(dev);
}
static void ibx_init_clock_gating(struct drm_device *dev)

View File

@ -362,12 +362,15 @@ gen7_render_ring_flush(struct intel_engine_cs *ring,
flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE;
flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE;
flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
flags |= PIPE_CONTROL_MEDIA_STATE_CLEAR;
/*
* TLB invalidate requires a post-sync write.
*/
flags |= PIPE_CONTROL_QW_WRITE;
flags |= PIPE_CONTROL_GLOBAL_GTT_IVB;
flags |= PIPE_CONTROL_STALL_AT_SCOREBOARD;
/* Workaround: we must issue a pipe_control with CS-stall bit
* set before a pipe_control command that has the state cache
* invalidate bit set. */

View File

@ -386,9 +386,7 @@ void adreno_gpu_cleanup(struct adreno_gpu *gpu)
msm_gem_put_iova(gpu->memptrs_bo, gpu->base.id);
drm_gem_object_unreference(gpu->memptrs_bo);
}
if (gpu->pm4)
release_firmware(gpu->pm4);
if (gpu->pfp)
release_firmware(gpu->pfp);
release_firmware(gpu->pm4);
release_firmware(gpu->pfp);
msm_gpu_cleanup(&gpu->base);
}

View File

@ -141,6 +141,15 @@ static int hpd_enable(struct hdmi_connector *hdmi_connector)
uint32_t hpd_ctrl;
int i, ret;
for (i = 0; i < config->hpd_reg_cnt; i++) {
ret = regulator_enable(hdmi->hpd_regs[i]);
if (ret) {
dev_err(dev->dev, "failed to enable hpd regulator: %s (%d)\n",
config->hpd_reg_names[i], ret);
goto fail;
}
}
ret = gpio_config(hdmi, true);
if (ret) {
dev_err(dev->dev, "failed to configure GPIOs: %d\n", ret);
@ -164,15 +173,6 @@ static int hpd_enable(struct hdmi_connector *hdmi_connector)
}
}
for (i = 0; i < config->hpd_reg_cnt; i++) {
ret = regulator_enable(hdmi->hpd_regs[i]);
if (ret) {
dev_err(dev->dev, "failed to enable hpd regulator: %s (%d)\n",
config->hpd_reg_names[i], ret);
goto fail;
}
}
hdmi_set_mode(hdmi, false);
phy->funcs->reset(phy);
hdmi_set_mode(hdmi, true);
@ -200,7 +200,7 @@ fail:
return ret;
}
static int hdp_disable(struct hdmi_connector *hdmi_connector)
static void hdp_disable(struct hdmi_connector *hdmi_connector)
{
struct hdmi *hdmi = hdmi_connector->hdmi;
const struct hdmi_platform_config *config = hdmi->config;
@ -212,28 +212,19 @@ static int hdp_disable(struct hdmi_connector *hdmi_connector)
hdmi_set_mode(hdmi, false);
for (i = 0; i < config->hpd_reg_cnt; i++) {
ret = regulator_disable(hdmi->hpd_regs[i]);
if (ret) {
dev_err(dev->dev, "failed to disable hpd regulator: %s (%d)\n",
config->hpd_reg_names[i], ret);
goto fail;
}
}
for (i = 0; i < config->hpd_clk_cnt; i++)
clk_disable_unprepare(hdmi->hpd_clks[i]);
ret = gpio_config(hdmi, false);
if (ret) {
dev_err(dev->dev, "failed to unconfigure GPIOs: %d\n", ret);
goto fail;
if (ret)
dev_warn(dev->dev, "failed to unconfigure GPIOs: %d\n", ret);
for (i = 0; i < config->hpd_reg_cnt; i++) {
ret = regulator_disable(hdmi->hpd_regs[i]);
if (ret)
dev_warn(dev->dev, "failed to disable hpd regulator: %s (%d)\n",
config->hpd_reg_names[i], ret);
}
return 0;
fail:
return ret;
}
static void
@ -260,11 +251,11 @@ void hdmi_connector_irq(struct drm_connector *connector)
(hpd_int_status & HDMI_HPD_INT_STATUS_INT)) {
bool detected = !!(hpd_int_status & HDMI_HPD_INT_STATUS_CABLE_DETECTED);
DBG("status=%04x, ctrl=%04x", hpd_int_status, hpd_int_ctrl);
/* ack the irq: */
/* ack & disable (temporarily) HPD events: */
hdmi_write(hdmi, REG_HDMI_HPD_INT_CTRL,
hpd_int_ctrl | HDMI_HPD_INT_CTRL_INT_ACK);
HDMI_HPD_INT_CTRL_INT_ACK);
DBG("status=%04x, ctrl=%04x", hpd_int_status, hpd_int_ctrl);
/* detect disconnect if we are connected or visa versa: */
hpd_int_ctrl = HDMI_HPD_INT_CTRL_INT_EN;

View File

@ -331,17 +331,8 @@ static int mdp4_crtc_atomic_check(struct drm_crtc *crtc,
struct drm_crtc_state *state)
{
struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
struct drm_device *dev = crtc->dev;
DBG("%s: check", mdp4_crtc->name);
if (mdp4_crtc->event) {
dev_err(dev->dev, "already pending flip!\n");
return -EBUSY;
}
// TODO anything else to check?
return 0;
}
@ -357,7 +348,7 @@ static void mdp4_crtc_atomic_flush(struct drm_crtc *crtc)
struct drm_device *dev = crtc->dev;
unsigned long flags;
DBG("%s: flush", mdp4_crtc->name);
DBG("%s: event: %p", mdp4_crtc->name, crtc->state->event);
WARN_ON(mdp4_crtc->event);

View File

@ -303,11 +303,6 @@ static int mdp5_crtc_atomic_check(struct drm_crtc *crtc,
DBG("%s: check", mdp5_crtc->name);
if (mdp5_crtc->event) {
dev_err(dev->dev, "already pending flip!\n");
return -EBUSY;
}
/* request a free CTL, if none is already allocated for this CRTC */
if (state->enable && !mdp5_crtc->ctl) {
mdp5_crtc->ctl = mdp5_ctlm_request(mdp5_kms->ctlm, crtc);
@ -364,7 +359,7 @@ static void mdp5_crtc_atomic_flush(struct drm_crtc *crtc)
struct drm_device *dev = crtc->dev;
unsigned long flags;
DBG("%s: flush", mdp5_crtc->name);
DBG("%s: event: %p", mdp5_crtc->name, crtc->state->event);
WARN_ON(mdp5_crtc->event);
@ -460,10 +455,7 @@ void mdp5_crtc_set_intf(struct drm_crtc *crtc, int intf,
/* now that we know what irq's we want: */
mdp5_crtc->err.irqmask = intf2err(intf);
mdp5_crtc->vblank.irqmask = intf2vblank(intf);
/* when called from modeset_init(), skip the rest until later: */
if (!mdp5_kms)
return;
mdp_irq_update(&mdp5_kms->base);
spin_lock_irqsave(&mdp5_kms->resource_lock, flags);
intf_sel = mdp5_read(mdp5_kms, REG_MDP5_DISP_INTF_SEL);

View File

@ -216,17 +216,7 @@ static int modeset_init(struct mdp5_kms *mdp5_kms)
goto fail;
}
/* NOTE: the vsync and error irq's are actually associated with
* the INTF/encoder.. the easiest way to deal with this (ie. what
* we do now) is assume a fixed relationship between crtc's and
* encoders. I'm not sure if there is ever a need to more freely
* assign crtcs to encoders, but if there is then we need to take
* care of error and vblank irq's that the crtc has registered,
* and also update user-requested vblank_mask.
*/
encoder->possible_crtcs = BIT(0);
mdp5_crtc_set_intf(priv->crtcs[0], 3, INTF_HDMI);
encoder->possible_crtcs = (1 << priv->num_crtcs) - 1;;
priv->encoders[priv->num_encoders++] = encoder;
/* Construct bridge/connector for HDMI: */

View File

@ -42,7 +42,10 @@ static void update_irq(struct mdp_kms *mdp_kms)
mdp_kms->funcs->set_irqmask(mdp_kms, irqmask);
}
static void update_irq_unlocked(struct mdp_kms *mdp_kms)
/* if an mdp_irq's irqmask has changed, such as when mdp5 crtc<->encoder
* link changes, this must be called to figure out the new global irqmask
*/
void mdp_irq_update(struct mdp_kms *mdp_kms)
{
unsigned long flags;
spin_lock_irqsave(&list_lock, flags);
@ -122,7 +125,7 @@ void mdp_irq_register(struct mdp_kms *mdp_kms, struct mdp_irq *irq)
spin_unlock_irqrestore(&list_lock, flags);
if (needs_update)
update_irq_unlocked(mdp_kms);
mdp_irq_update(mdp_kms);
}
void mdp_irq_unregister(struct mdp_kms *mdp_kms, struct mdp_irq *irq)
@ -141,5 +144,5 @@ void mdp_irq_unregister(struct mdp_kms *mdp_kms, struct mdp_irq *irq)
spin_unlock_irqrestore(&list_lock, flags);
if (needs_update)
update_irq_unlocked(mdp_kms);
mdp_irq_update(mdp_kms);
}

View File

@ -75,7 +75,7 @@ void mdp_update_vblank_mask(struct mdp_kms *mdp_kms, uint32_t mask, bool enable)
void mdp_irq_wait(struct mdp_kms *mdp_kms, uint32_t irqmask);
void mdp_irq_register(struct mdp_kms *mdp_kms, struct mdp_irq *irq);
void mdp_irq_unregister(struct mdp_kms *mdp_kms, struct mdp_irq *irq);
void mdp_irq_update(struct mdp_kms *mdp_kms);
/*
* pixel format helpers:

View File

@ -23,10 +23,41 @@ struct msm_commit {
struct drm_atomic_state *state;
uint32_t fence;
struct msm_fence_cb fence_cb;
uint32_t crtc_mask;
};
static void fence_cb(struct msm_fence_cb *cb);
/* block until specified crtcs are no longer pending update, and
* atomically mark them as pending update
*/
static int start_atomic(struct msm_drm_private *priv, uint32_t crtc_mask)
{
int ret;
spin_lock(&priv->pending_crtcs_event.lock);
ret = wait_event_interruptible_locked(priv->pending_crtcs_event,
!(priv->pending_crtcs & crtc_mask));
if (ret == 0) {
DBG("start: %08x", crtc_mask);
priv->pending_crtcs |= crtc_mask;
}
spin_unlock(&priv->pending_crtcs_event.lock);
return ret;
}
/* clear specified crtcs (no longer pending update)
*/
static void end_atomic(struct msm_drm_private *priv, uint32_t crtc_mask)
{
spin_lock(&priv->pending_crtcs_event.lock);
DBG("end: %08x", crtc_mask);
priv->pending_crtcs &= ~crtc_mask;
wake_up_all_locked(&priv->pending_crtcs_event);
spin_unlock(&priv->pending_crtcs_event.lock);
}
static struct msm_commit *new_commit(struct drm_atomic_state *state)
{
struct msm_commit *c = kzalloc(sizeof(*c), GFP_KERNEL);
@ -58,12 +89,27 @@ static void complete_commit(struct msm_commit *c)
drm_atomic_helper_commit_post_planes(dev, state);
/* NOTE: _wait_for_vblanks() only waits for vblank on
* enabled CRTCs. So we end up faulting when disabling
* due to (potentially) unref'ing the outgoing fb's
* before the vblank when the disable has latched.
*
* But if it did wait on disabled (or newly disabled)
* CRTCs, that would be racy (ie. we could have missed
* the irq. We need some way to poll for pipe shut
* down. Or just live with occasionally hitting the
* timeout in the CRTC disable path (which really should
* not be critical path)
*/
drm_atomic_helper_wait_for_vblanks(dev, state);
drm_atomic_helper_cleanup_planes(dev, state);
drm_atomic_state_free(state);
end_atomic(dev->dev_private, c->crtc_mask);
kfree(c);
}
@ -97,8 +143,9 @@ static void add_fb(struct msm_commit *c, struct drm_framebuffer *fb)
int msm_atomic_commit(struct drm_device *dev,
struct drm_atomic_state *state, bool async)
{
struct msm_commit *c;
int nplanes = dev->mode_config.num_total_plane;
int ncrtcs = dev->mode_config.num_crtc;
struct msm_commit *c;
int i, ret;
ret = drm_atomic_helper_prepare_planes(dev, state);
@ -106,6 +153,18 @@ int msm_atomic_commit(struct drm_device *dev,
return ret;
c = new_commit(state);
if (!c)
return -ENOMEM;
/*
* Figure out what crtcs we have:
*/
for (i = 0; i < ncrtcs; i++) {
struct drm_crtc *crtc = state->crtcs[i];
if (!crtc)
continue;
c->crtc_mask |= (1 << drm_crtc_index(crtc));
}
/*
* Figure out what fence to wait for:
@ -121,6 +180,14 @@ int msm_atomic_commit(struct drm_device *dev,
add_fb(c, new_state->fb);
}
/*
* Wait for pending updates on any of the same crtc's and then
* mark our set of crtc's as busy:
*/
ret = start_atomic(dev->dev_private, c->crtc_mask);
if (ret)
return ret;
/*
* This is the point of no return - everything below never fails except
* when the hw goes bonghits. Which means we can commit the new state on

View File

@ -193,6 +193,7 @@ static int msm_load(struct drm_device *dev, unsigned long flags)
priv->wq = alloc_ordered_workqueue("msm", 0);
init_waitqueue_head(&priv->fence_event);
init_waitqueue_head(&priv->pending_crtcs_event);
INIT_LIST_HEAD(&priv->inactive_list);
INIT_LIST_HEAD(&priv->fence_cbs);

View File

@ -96,6 +96,10 @@ struct msm_drm_private {
/* callbacks deferred until bo is inactive: */
struct list_head fence_cbs;
/* crtcs pending async atomic updates: */
uint32_t pending_crtcs;
wait_queue_head_t pending_crtcs_event;
/* registered MMUs: */
unsigned int num_mmus;
struct msm_mmu *mmus[NUM_DOMAINS];

View File

@ -190,8 +190,7 @@ fail_unlock:
fail:
if (ret) {
if (fbi)
framebuffer_release(fbi);
framebuffer_release(fbi);
if (fb) {
drm_framebuffer_unregister_private(fb);
drm_framebuffer_remove(fb);

View File

@ -535,8 +535,7 @@ void msm_gem_free_object(struct drm_gem_object *obj)
drm_free_large(msm_obj->pages);
} else {
if (msm_obj->vaddr)
vunmap(msm_obj->vaddr);
vunmap(msm_obj->vaddr);
put_pages(obj);
}

View File

@ -876,7 +876,6 @@ nouveau_display_dumb_create(struct drm_file *file_priv, struct drm_device *dev,
if (ret)
return ret;
bo->gem.dumb = true;
ret = drm_gem_handle_create(file_priv, &bo->gem, &args->handle);
drm_gem_object_unreference_unlocked(&bo->gem);
return ret;
@ -892,14 +891,6 @@ nouveau_display_dumb_map_offset(struct drm_file *file_priv,
gem = drm_gem_object_lookup(dev, file_priv, handle);
if (gem) {
struct nouveau_bo *bo = nouveau_gem_object(gem);
/*
* We don't allow dumb mmaps on objects created using another
* interface.
*/
WARN_ONCE(!(gem->dumb || gem->import_attach),
"Illegal dumb map of accelerated buffer.\n");
*poffset = drm_vma_node_offset_addr(&bo->bo.vma_node);
drm_gem_object_unreference_unlocked(gem);
return 0;

View File

@ -444,9 +444,6 @@ validate_list(struct nouveau_channel *chan, struct nouveau_cli *cli,
list_for_each_entry(nvbo, list, entry) {
struct drm_nouveau_gem_pushbuf_bo *b = &pbbo[nvbo->pbbo_index];
WARN_ONCE(nvbo->gem.dumb,
"GPU use of dumb buffer is illegal.\n");
ret = nouveau_gem_set_domain(&nvbo->gem, b->read_domains,
b->write_domains,
b->valid_domains);

View File

@ -28,6 +28,7 @@
#include "nouveau_ttm.h"
#include "nouveau_gem.h"
#include "drm_legacy.h"
static int
nouveau_vram_manager_init(struct ttm_mem_type_manager *man, unsigned long psize)
{
@ -281,7 +282,7 @@ nouveau_ttm_mmap(struct file *filp, struct vm_area_struct *vma)
struct nouveau_drm *drm = nouveau_drm(file_priv->minor->dev);
if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET))
return -EINVAL;
return drm_legacy_mmap(filp, vma);
return ttm_bo_mmap(filp, vma, &drm->ttm.bdev);
}

View File

@ -394,10 +394,9 @@ int radeon_gem_set_domain_ioctl(struct drm_device *dev, void *data,
return r;
}
static int radeon_mode_mmap(struct drm_file *filp,
struct drm_device *dev,
uint32_t handle, bool dumb,
uint64_t *offset_p)
int radeon_mode_dumb_mmap(struct drm_file *filp,
struct drm_device *dev,
uint32_t handle, uint64_t *offset_p)
{
struct drm_gem_object *gobj;
struct radeon_bo *robj;
@ -406,14 +405,6 @@ static int radeon_mode_mmap(struct drm_file *filp,
if (gobj == NULL) {
return -ENOENT;
}
/*
* We don't allow dumb mmaps on objects created using another
* interface.
*/
WARN_ONCE(dumb && !(gobj->dumb || gobj->import_attach),
"Illegal dumb map of GPU buffer.\n");
robj = gem_to_radeon_bo(gobj);
if (radeon_ttm_tt_has_userptr(robj->tbo.ttm)) {
drm_gem_object_unreference_unlocked(gobj);
@ -424,20 +415,12 @@ static int radeon_mode_mmap(struct drm_file *filp,
return 0;
}
int radeon_mode_dumb_mmap(struct drm_file *filp,
struct drm_device *dev,
uint32_t handle, uint64_t *offset_p)
{
return radeon_mode_mmap(filp, dev, handle, true, offset_p);
}
int radeon_gem_mmap_ioctl(struct drm_device *dev, void *data,
struct drm_file *filp)
{
struct drm_radeon_gem_mmap *args = data;
return radeon_mode_mmap(filp, dev, args->handle, false,
&args->addr_ptr);
return radeon_mode_dumb_mmap(filp, dev, args->handle, &args->addr_ptr);
}
int radeon_gem_busy_ioctl(struct drm_device *dev, void *data,
@ -763,7 +746,6 @@ int radeon_mode_dumb_create(struct drm_file *file_priv,
return -ENOMEM;
r = drm_gem_handle_create(file_priv, gobj, &handle);
gobj->dumb = true;
/* drop reference from allocate - handle holds it now */
drm_gem_object_unreference_unlocked(gobj);
if (r) {

View File

@ -28,6 +28,8 @@
#include "cikd.h"
#include "cik_reg.h"
#include "radeon_kfd.h"
#include "radeon_ucode.h"
#include <linux/firmware.h>
#define CIK_PIPE_PER_MEC (4)
@ -49,6 +51,7 @@ static uint64_t get_vmem_size(struct kgd_dev *kgd);
static uint64_t get_gpu_clock_counter(struct kgd_dev *kgd);
static uint32_t get_max_engine_clock_in_mhz(struct kgd_dev *kgd);
static uint16_t get_fw_version(struct kgd_dev *kgd, enum kgd_engine_type type);
/*
* Register access functions
@ -91,6 +94,7 @@ static const struct kfd2kgd_calls kfd2kgd = {
.hqd_load = kgd_hqd_load,
.hqd_is_occupies = kgd_hqd_is_occupies,
.hqd_destroy = kgd_hqd_destroy,
.get_fw_version = get_fw_version
};
static const struct kgd2kfd_calls *kgd2kfd;
@ -561,3 +565,52 @@ static int kgd_hqd_destroy(struct kgd_dev *kgd, uint32_t reset_type,
release_queue(kgd);
return 0;
}
static uint16_t get_fw_version(struct kgd_dev *kgd, enum kgd_engine_type type)
{
struct radeon_device *rdev = (struct radeon_device *) kgd;
const union radeon_firmware_header *hdr;
BUG_ON(kgd == NULL || rdev->mec_fw == NULL);
switch (type) {
case KGD_ENGINE_PFP:
hdr = (const union radeon_firmware_header *) rdev->pfp_fw->data;
break;
case KGD_ENGINE_ME:
hdr = (const union radeon_firmware_header *) rdev->me_fw->data;
break;
case KGD_ENGINE_CE:
hdr = (const union radeon_firmware_header *) rdev->ce_fw->data;
break;
case KGD_ENGINE_MEC1:
hdr = (const union radeon_firmware_header *) rdev->mec_fw->data;
break;
case KGD_ENGINE_MEC2:
hdr = (const union radeon_firmware_header *)
rdev->mec2_fw->data;
break;
case KGD_ENGINE_RLC:
hdr = (const union radeon_firmware_header *) rdev->rlc_fw->data;
break;
case KGD_ENGINE_SDMA:
hdr = (const union radeon_firmware_header *)
rdev->sdma_fw->data;
break;
default:
return 0;
}
if (hdr == NULL)
return 0;
/* Only 12 bit in use*/
return hdr->common.ucode_version;
}

View File

@ -529,9 +529,6 @@ int radeon_bo_list_validate(struct radeon_device *rdev,
u32 current_domain =
radeon_mem_type_to_domain(bo->tbo.mem.mem_type);
WARN_ONCE(bo->gem_base.dumb,
"GPU use of dumb buffer is illegal.\n");
/* Check if this buffer will be moved and don't move it
* if we have moved too many buffers for this IB already.
*

View File

@ -168,7 +168,7 @@ static int tegra_dc_setup_window(struct tegra_dc *dc, unsigned int index,
const struct tegra_dc_window *window)
{
unsigned h_offset, v_offset, h_size, v_size, h_dda, v_dda, bpp;
unsigned long value;
unsigned long value, flags;
bool yuv, planar;
/*
@ -181,6 +181,8 @@ static int tegra_dc_setup_window(struct tegra_dc *dc, unsigned int index,
else
bpp = planar ? 1 : 2;
spin_lock_irqsave(&dc->lock, flags);
value = WINDOW_A_SELECT << index;
tegra_dc_writel(dc, value, DC_CMD_DISPLAY_WINDOW_HEADER);
@ -273,6 +275,7 @@ static int tegra_dc_setup_window(struct tegra_dc *dc, unsigned int index,
case TEGRA_BO_TILING_MODE_BLOCK:
DRM_ERROR("hardware doesn't support block linear mode\n");
spin_unlock_irqrestore(&dc->lock, flags);
return -EINVAL;
}
@ -331,6 +334,8 @@ static int tegra_dc_setup_window(struct tegra_dc *dc, unsigned int index,
tegra_dc_window_commit(dc, index);
spin_unlock_irqrestore(&dc->lock, flags);
return 0;
}
@ -338,11 +343,14 @@ static int tegra_window_plane_disable(struct drm_plane *plane)
{
struct tegra_dc *dc = to_tegra_dc(plane->crtc);
struct tegra_plane *p = to_tegra_plane(plane);
unsigned long flags;
u32 value;
if (!plane->crtc)
return 0;
spin_lock_irqsave(&dc->lock, flags);
value = WINDOW_A_SELECT << p->index;
tegra_dc_writel(dc, value, DC_CMD_DISPLAY_WINDOW_HEADER);
@ -352,6 +360,8 @@ static int tegra_window_plane_disable(struct drm_plane *plane)
tegra_dc_window_commit(dc, p->index);
spin_unlock_irqrestore(&dc->lock, flags);
return 0;
}
@ -699,14 +709,16 @@ static int tegra_dc_set_base(struct tegra_dc *dc, int x, int y,
struct tegra_bo *bo = tegra_fb_get_plane(fb, 0);
unsigned int h_offset = 0, v_offset = 0;
struct tegra_bo_tiling tiling;
unsigned long value, flags;
unsigned int format, swap;
unsigned long value;
int err;
err = tegra_fb_get_tiling(fb, &tiling);
if (err < 0)
return err;
spin_lock_irqsave(&dc->lock, flags);
tegra_dc_writel(dc, WINDOW_A_SELECT, DC_CMD_DISPLAY_WINDOW_HEADER);
value = fb->offsets[0] + y * fb->pitches[0] +
@ -752,6 +764,7 @@ static int tegra_dc_set_base(struct tegra_dc *dc, int x, int y,
case TEGRA_BO_TILING_MODE_BLOCK:
DRM_ERROR("hardware doesn't support block linear mode\n");
spin_unlock_irqrestore(&dc->lock, flags);
return -EINVAL;
}
@ -778,6 +791,8 @@ static int tegra_dc_set_base(struct tegra_dc *dc, int x, int y,
tegra_dc_writel(dc, value << 8, DC_CMD_STATE_CONTROL);
tegra_dc_writel(dc, value, DC_CMD_STATE_CONTROL);
spin_unlock_irqrestore(&dc->lock, flags);
return 0;
}
@ -814,23 +829,32 @@ static void tegra_dc_finish_page_flip(struct tegra_dc *dc)
unsigned long flags, base;
struct tegra_bo *bo;
if (!dc->event)
spin_lock_irqsave(&drm->event_lock, flags);
if (!dc->event) {
spin_unlock_irqrestore(&drm->event_lock, flags);
return;
}
bo = tegra_fb_get_plane(crtc->primary->fb, 0);
spin_lock_irqsave(&dc->lock, flags);
/* check if new start address has been latched */
tegra_dc_writel(dc, WINDOW_A_SELECT, DC_CMD_DISPLAY_WINDOW_HEADER);
tegra_dc_writel(dc, READ_MUX, DC_CMD_STATE_ACCESS);
base = tegra_dc_readl(dc, DC_WINBUF_START_ADDR);
tegra_dc_writel(dc, 0, DC_CMD_STATE_ACCESS);
spin_unlock_irqrestore(&dc->lock, flags);
if (base == bo->paddr + crtc->primary->fb->offsets[0]) {
spin_lock_irqsave(&drm->event_lock, flags);
drm_send_vblank_event(drm, dc->pipe, dc->event);
drm_vblank_put(drm, dc->pipe);
drm_crtc_send_vblank_event(crtc, dc->event);
drm_crtc_vblank_put(crtc);
dc->event = NULL;
spin_unlock_irqrestore(&drm->event_lock, flags);
}
spin_unlock_irqrestore(&drm->event_lock, flags);
}
void tegra_dc_cancel_page_flip(struct drm_crtc *crtc, struct drm_file *file)
@ -843,7 +867,7 @@ void tegra_dc_cancel_page_flip(struct drm_crtc *crtc, struct drm_file *file)
if (dc->event && dc->event->base.file_priv == file) {
dc->event->base.destroy(&dc->event->base);
drm_vblank_put(drm, dc->pipe);
drm_crtc_vblank_put(crtc);
dc->event = NULL;
}
@ -853,16 +877,16 @@ void tegra_dc_cancel_page_flip(struct drm_crtc *crtc, struct drm_file *file)
static int tegra_dc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb,
struct drm_pending_vblank_event *event, uint32_t page_flip_flags)
{
unsigned int pipe = drm_crtc_index(crtc);
struct tegra_dc *dc = to_tegra_dc(crtc);
struct drm_device *drm = crtc->dev;
if (dc->event)
return -EBUSY;
if (event) {
event->pipe = dc->pipe;
event->pipe = pipe;
dc->event = event;
drm_vblank_get(drm, dc->pipe);
drm_crtc_vblank_get(crtc);
}
tegra_dc_set_base(dc, 0, 0, fb);
@ -1127,7 +1151,7 @@ static irqreturn_t tegra_dc_irq(int irq, void *data)
/*
dev_dbg(dc->dev, "%s(): vertical blank\n", __func__);
*/
drm_handle_vblank(dc->base.dev, dc->pipe);
drm_crtc_handle_vblank(&dc->base);
tegra_dc_finish_page_flip(dc);
}

View File

@ -694,24 +694,28 @@ static const struct file_operations tegra_drm_fops = {
.llseek = noop_llseek,
};
static struct drm_crtc *tegra_crtc_from_pipe(struct drm_device *drm, int pipe)
static struct drm_crtc *tegra_crtc_from_pipe(struct drm_device *drm,
unsigned int pipe)
{
struct drm_crtc *crtc;
list_for_each_entry(crtc, &drm->mode_config.crtc_list, head) {
struct tegra_dc *dc = to_tegra_dc(crtc);
if (dc->pipe == pipe)
if (pipe == drm_crtc_index(crtc))
return crtc;
}
return NULL;
}
static u32 tegra_drm_get_vblank_counter(struct drm_device *dev, int crtc)
static u32 tegra_drm_get_vblank_counter(struct drm_device *drm, int pipe)
{
struct drm_crtc *crtc = tegra_crtc_from_pipe(drm, pipe);
if (!crtc)
return 0;
/* TODO: implement real hardware counter using syncpoints */
return drm_vblank_count(dev, crtc);
return drm_crtc_vblank_count(crtc);
}
static int tegra_drm_enable_vblank(struct drm_device *drm, int pipe)

View File

@ -216,32 +216,58 @@ static void tegra_bo_free(struct drm_device *drm, struct tegra_bo *bo)
}
}
static int tegra_bo_get_pages(struct drm_device *drm, struct tegra_bo *bo,
size_t size)
static int tegra_bo_get_pages(struct drm_device *drm, struct tegra_bo *bo)
{
struct scatterlist *s;
struct sg_table *sgt;
unsigned int i;
bo->pages = drm_gem_get_pages(&bo->gem);
if (IS_ERR(bo->pages))
return PTR_ERR(bo->pages);
bo->num_pages = size >> PAGE_SHIFT;
bo->num_pages = bo->gem.size >> PAGE_SHIFT;
bo->sgt = drm_prime_pages_to_sg(bo->pages, bo->num_pages);
if (IS_ERR(bo->sgt)) {
drm_gem_put_pages(&bo->gem, bo->pages, false, false);
return PTR_ERR(bo->sgt);
sgt = drm_prime_pages_to_sg(bo->pages, bo->num_pages);
if (IS_ERR(sgt))
goto put_pages;
/*
* Fake up the SG table so that dma_map_sg() can be used to flush the
* pages associated with it. Note that this relies on the fact that
* the DMA API doesn't hook into IOMMU on Tegra, therefore mapping is
* only cache maintenance.
*
* TODO: Replace this by drm_clflash_sg() once it can be implemented
* without relying on symbols that are not exported.
*/
for_each_sg(sgt->sgl, s, sgt->nents, i)
sg_dma_address(s) = sg_phys(s);
if (dma_map_sg(drm->dev, sgt->sgl, sgt->nents, DMA_TO_DEVICE) == 0) {
sgt = ERR_PTR(-ENOMEM);
goto release_sgt;
}
bo->sgt = sgt;
return 0;
release_sgt:
sg_free_table(sgt);
kfree(sgt);
put_pages:
drm_gem_put_pages(&bo->gem, bo->pages, false, false);
return PTR_ERR(sgt);
}
static int tegra_bo_alloc(struct drm_device *drm, struct tegra_bo *bo,
size_t size)
static int tegra_bo_alloc(struct drm_device *drm, struct tegra_bo *bo)
{
struct tegra_drm *tegra = drm->dev_private;
int err;
if (tegra->domain) {
err = tegra_bo_get_pages(drm, bo, size);
err = tegra_bo_get_pages(drm, bo);
if (err < 0)
return err;
@ -251,6 +277,8 @@ static int tegra_bo_alloc(struct drm_device *drm, struct tegra_bo *bo,
return err;
}
} else {
size_t size = bo->gem.size;
bo->vaddr = dma_alloc_writecombine(drm->dev, size, &bo->paddr,
GFP_KERNEL | __GFP_NOWARN);
if (!bo->vaddr) {
@ -274,7 +302,7 @@ struct tegra_bo *tegra_bo_create(struct drm_device *drm, size_t size,
if (IS_ERR(bo))
return bo;
err = tegra_bo_alloc(drm, bo, size);
err = tegra_bo_alloc(drm, bo);
if (err < 0)
goto release;

View File

@ -28,6 +28,13 @@
#include <linux/cdev.h>
#include "input-compat.h"
enum evdev_clock_type {
EV_CLK_REAL = 0,
EV_CLK_MONO,
EV_CLK_BOOT,
EV_CLK_MAX
};
struct evdev {
int open;
struct input_handle handle;
@ -49,12 +56,32 @@ struct evdev_client {
struct fasync_struct *fasync;
struct evdev *evdev;
struct list_head node;
int clkid;
int clk_type;
bool revoked;
unsigned int bufsize;
struct input_event buffer[];
};
static int evdev_set_clk_type(struct evdev_client *client, unsigned int clkid)
{
switch (clkid) {
case CLOCK_REALTIME:
client->clk_type = EV_CLK_REAL;
break;
case CLOCK_MONOTONIC:
client->clk_type = EV_CLK_MONO;
break;
case CLOCK_BOOTTIME:
client->clk_type = EV_CLK_BOOT;
break;
default:
return -EINVAL;
}
return 0;
}
/* flush queued events of type @type, caller must hold client->buffer_lock */
static void __evdev_flush_queue(struct evdev_client *client, unsigned int type)
{
@ -108,8 +135,11 @@ static void evdev_queue_syn_dropped(struct evdev_client *client)
struct input_event ev;
ktime_t time;
time = (client->clkid == CLOCK_MONOTONIC) ?
ktime_get() : ktime_get_real();
time = client->clk_type == EV_CLK_REAL ?
ktime_get_real() :
client->clk_type == EV_CLK_MONO ?
ktime_get() :
ktime_get_boottime();
ev.time = ktime_to_timeval(time);
ev.type = EV_SYN;
@ -159,7 +189,7 @@ static void __pass_event(struct evdev_client *client,
static void evdev_pass_values(struct evdev_client *client,
const struct input_value *vals, unsigned int count,
ktime_t mono, ktime_t real)
ktime_t *ev_time)
{
struct evdev *evdev = client->evdev;
const struct input_value *v;
@ -169,8 +199,7 @@ static void evdev_pass_values(struct evdev_client *client,
if (client->revoked)
return;
event.time = ktime_to_timeval(client->clkid == CLOCK_MONOTONIC ?
mono : real);
event.time = ktime_to_timeval(ev_time[client->clk_type]);
/* Interrupts are disabled, just acquire the lock. */
spin_lock(&client->buffer_lock);
@ -198,21 +227,22 @@ static void evdev_events(struct input_handle *handle,
{
struct evdev *evdev = handle->private;
struct evdev_client *client;
ktime_t time_mono, time_real;
ktime_t ev_time[EV_CLK_MAX];
time_mono = ktime_get();
time_real = ktime_mono_to_real(time_mono);
ev_time[EV_CLK_MONO] = ktime_get();
ev_time[EV_CLK_REAL] = ktime_mono_to_real(ev_time[EV_CLK_MONO]);
ev_time[EV_CLK_BOOT] = ktime_mono_to_any(ev_time[EV_CLK_MONO],
TK_OFFS_BOOT);
rcu_read_lock();
client = rcu_dereference(evdev->grab);
if (client)
evdev_pass_values(client, vals, count, time_mono, time_real);
evdev_pass_values(client, vals, count, ev_time);
else
list_for_each_entry_rcu(client, &evdev->client_list, node)
evdev_pass_values(client, vals, count,
time_mono, time_real);
evdev_pass_values(client, vals, count, ev_time);
rcu_read_unlock();
}
@ -877,10 +907,8 @@ static long evdev_do_ioctl(struct file *file, unsigned int cmd,
case EVIOCSCLOCKID:
if (copy_from_user(&i, p, sizeof(unsigned int)))
return -EFAULT;
if (i != CLOCK_MONOTONIC && i != CLOCK_REALTIME)
return -EINVAL;
client->clkid = i;
return 0;
return evdev_set_clk_type(client, i);
case EVIOCGKEYCODE:
return evdev_handle_get_keycode(dev, p);

View File

@ -1974,18 +1974,22 @@ static unsigned int input_estimate_events_per_packet(struct input_dev *dev)
events = mt_slots + 1; /* count SYN_MT_REPORT and SYN_REPORT */
for (i = 0; i < ABS_CNT; i++) {
if (test_bit(i, dev->absbit)) {
if (input_is_mt_axis(i))
events += mt_slots;
else
events++;
if (test_bit(EV_ABS, dev->evbit)) {
for (i = 0; i < ABS_CNT; i++) {
if (test_bit(i, dev->absbit)) {
if (input_is_mt_axis(i))
events += mt_slots;
else
events++;
}
}
}
for (i = 0; i < REL_CNT; i++)
if (test_bit(i, dev->relbit))
events++;
if (test_bit(EV_REL, dev->evbit)) {
for (i = 0; i < REL_CNT; i++)
if (test_bit(i, dev->relbit))
events++;
}
/* Make room for KEY and MSC events */
events += 7;

View File

@ -559,6 +559,7 @@ config KEYBOARD_SH_KEYSC
config KEYBOARD_STMPE
tristate "STMPE keypad support"
depends on MFD_STMPE
depends on OF
select INPUT_MATRIXKMAP
help
Say Y here if you want to use the keypad controller on STMPE I/O

View File

@ -35,9 +35,13 @@
struct gpio_button_data {
const struct gpio_keys_button *button;
struct input_dev *input;
struct timer_list timer;
struct work_struct work;
unsigned int timer_debounce; /* in msecs */
struct timer_list release_timer;
unsigned int release_delay; /* in msecs, for IRQ-only buttons */
struct delayed_work work;
unsigned int software_debounce; /* in msecs, for GPIO-driven buttons */
unsigned int irq;
spinlock_t lock;
bool disabled;
@ -116,11 +120,14 @@ static void gpio_keys_disable_button(struct gpio_button_data *bdata)
{
if (!bdata->disabled) {
/*
* Disable IRQ and possible debouncing timer.
* Disable IRQ and associated timer/work structure.
*/
disable_irq(bdata->irq);
if (bdata->timer_debounce)
del_timer_sync(&bdata->timer);
if (gpio_is_valid(bdata->button->gpio))
cancel_delayed_work_sync(&bdata->work);
else
del_timer_sync(&bdata->release_timer);
bdata->disabled = true;
}
@ -343,7 +350,7 @@ static void gpio_keys_gpio_report_event(struct gpio_button_data *bdata)
static void gpio_keys_gpio_work_func(struct work_struct *work)
{
struct gpio_button_data *bdata =
container_of(work, struct gpio_button_data, work);
container_of(work, struct gpio_button_data, work.work);
gpio_keys_gpio_report_event(bdata);
@ -351,13 +358,6 @@ static void gpio_keys_gpio_work_func(struct work_struct *work)
pm_relax(bdata->input->dev.parent);
}
static void gpio_keys_gpio_timer(unsigned long _data)
{
struct gpio_button_data *bdata = (struct gpio_button_data *)_data;
schedule_work(&bdata->work);
}
static irqreturn_t gpio_keys_gpio_isr(int irq, void *dev_id)
{
struct gpio_button_data *bdata = dev_id;
@ -366,11 +366,10 @@ static irqreturn_t gpio_keys_gpio_isr(int irq, void *dev_id)
if (bdata->button->wakeup)
pm_stay_awake(bdata->input->dev.parent);
if (bdata->timer_debounce)
mod_timer(&bdata->timer,
jiffies + msecs_to_jiffies(bdata->timer_debounce));
else
schedule_work(&bdata->work);
mod_delayed_work(system_wq,
&bdata->work,
msecs_to_jiffies(bdata->software_debounce));
return IRQ_HANDLED;
}
@ -408,7 +407,7 @@ static irqreturn_t gpio_keys_irq_isr(int irq, void *dev_id)
input_event(input, EV_KEY, button->code, 1);
input_sync(input);
if (!bdata->timer_debounce) {
if (!bdata->release_delay) {
input_event(input, EV_KEY, button->code, 0);
input_sync(input);
goto out;
@ -417,9 +416,9 @@ static irqreturn_t gpio_keys_irq_isr(int irq, void *dev_id)
bdata->key_pressed = true;
}
if (bdata->timer_debounce)
mod_timer(&bdata->timer,
jiffies + msecs_to_jiffies(bdata->timer_debounce));
if (bdata->release_delay)
mod_timer(&bdata->release_timer,
jiffies + msecs_to_jiffies(bdata->release_delay));
out:
spin_unlock_irqrestore(&bdata->lock, flags);
return IRQ_HANDLED;
@ -429,10 +428,10 @@ static void gpio_keys_quiesce_key(void *data)
{
struct gpio_button_data *bdata = data;
if (bdata->timer_debounce)
del_timer_sync(&bdata->timer);
cancel_work_sync(&bdata->work);
if (gpio_is_valid(bdata->button->gpio))
cancel_delayed_work_sync(&bdata->work);
else
del_timer_sync(&bdata->release_timer);
}
static int gpio_keys_setup_key(struct platform_device *pdev,
@ -466,23 +465,25 @@ static int gpio_keys_setup_key(struct platform_device *pdev,
button->debounce_interval * 1000);
/* use timer if gpiolib doesn't provide debounce */
if (error < 0)
bdata->timer_debounce =
bdata->software_debounce =
button->debounce_interval;
}
irq = gpio_to_irq(button->gpio);
if (irq < 0) {
error = irq;
dev_err(dev,
"Unable to get irq number for GPIO %d, error %d\n",
button->gpio, error);
return error;
if (button->irq) {
bdata->irq = button->irq;
} else {
irq = gpio_to_irq(button->gpio);
if (irq < 0) {
error = irq;
dev_err(dev,
"Unable to get irq number for GPIO %d, error %d\n",
button->gpio, error);
return error;
}
bdata->irq = irq;
}
bdata->irq = irq;
INIT_WORK(&bdata->work, gpio_keys_gpio_work_func);
setup_timer(&bdata->timer,
gpio_keys_gpio_timer, (unsigned long)bdata);
INIT_DELAYED_WORK(&bdata->work, gpio_keys_gpio_work_func);
isr = gpio_keys_gpio_isr;
irqflags = IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING;
@ -499,8 +500,8 @@ static int gpio_keys_setup_key(struct platform_device *pdev,
return -EINVAL;
}
bdata->timer_debounce = button->debounce_interval;
setup_timer(&bdata->timer,
bdata->release_delay = button->debounce_interval;
setup_timer(&bdata->release_timer,
gpio_keys_irq_timer, (unsigned long)bdata);
isr = gpio_keys_irq_isr;
@ -510,7 +511,7 @@ static int gpio_keys_setup_key(struct platform_device *pdev,
input_set_capability(input, button->type ?: EV_KEY, button->code);
/*
* Install custom action to cancel debounce timer and
* Install custom action to cancel release timer and
* workqueue item.
*/
error = devm_add_action(&pdev->dev, gpio_keys_quiesce_key, bdata);
@ -618,33 +619,30 @@ gpio_keys_get_devtree_pdata(struct device *dev)
i = 0;
for_each_child_of_node(node, pp) {
int gpio = -1;
enum of_gpio_flags flags;
button = &pdata->buttons[i++];
if (!of_find_property(pp, "gpios", NULL)) {
button->irq = irq_of_parse_and_map(pp, 0);
if (button->irq == 0) {
i--;
pdata->nbuttons--;
dev_warn(dev, "Found button without gpios or irqs\n");
continue;
}
} else {
gpio = of_get_gpio_flags(pp, 0, &flags);
if (gpio < 0) {
error = gpio;
button->gpio = of_get_gpio_flags(pp, 0, &flags);
if (button->gpio < 0) {
error = button->gpio;
if (error != -ENOENT) {
if (error != -EPROBE_DEFER)
dev_err(dev,
"Failed to get gpio flags, error: %d\n",
error);
return ERR_PTR(error);
}
} else {
button->active_low = flags & OF_GPIO_ACTIVE_LOW;
}
button->gpio = gpio;
button->active_low = flags & OF_GPIO_ACTIVE_LOW;
button->irq = irq_of_parse_and_map(pp, 0);
if (!gpio_is_valid(button->gpio) && !button->irq) {
dev_err(dev, "Found button without gpios or irqs\n");
return ERR_PTR(-EINVAL);
}
if (of_property_read_u32(pp, "linux,code", &button->code)) {
dev_err(dev, "Button without keycode: 0x%x\n",
@ -659,6 +657,8 @@ gpio_keys_get_devtree_pdata(struct device *dev)
button->wakeup = !!of_get_property(pp, "gpio-key,wakeup", NULL);
button->can_disable = !!of_get_property(pp, "linux,can-disable", NULL);
if (of_property_read_u32(pp, "debounce-interval",
&button->debounce_interval))
button->debounce_interval = 5;

View File

@ -473,7 +473,7 @@ static int hil_dev_connect(struct serio *serio, struct serio_driver *drv)
if (error)
goto bail1;
init_completion(&dev->cmd_done);
reinit_completion(&dev->cmd_done);
serio_write(serio, 0);
serio_write(serio, 0);
serio_write(serio, HIL_PKT_CMD >> 8);
@ -482,7 +482,7 @@ static int hil_dev_connect(struct serio *serio, struct serio_driver *drv)
if (error)
goto bail1;
init_completion(&dev->cmd_done);
reinit_completion(&dev->cmd_done);
serio_write(serio, 0);
serio_write(serio, 0);
serio_write(serio, HIL_PKT_CMD >> 8);
@ -491,7 +491,7 @@ static int hil_dev_connect(struct serio *serio, struct serio_driver *drv)
if (error)
goto bail1;
init_completion(&dev->cmd_done);
reinit_completion(&dev->cmd_done);
serio_write(serio, 0);
serio_write(serio, 0);
serio_write(serio, HIL_PKT_CMD >> 8);

View File

@ -45,13 +45,14 @@
#define STMPE_KEYPAD_MAX_ROWS 8
#define STMPE_KEYPAD_MAX_COLS 8
#define STMPE_KEYPAD_ROW_SHIFT 3
#define STMPE_KEYPAD_KEYMAP_SIZE \
#define STMPE_KEYPAD_KEYMAP_MAX_SIZE \
(STMPE_KEYPAD_MAX_ROWS * STMPE_KEYPAD_MAX_COLS)
/**
* struct stmpe_keypad_variant - model-specific attributes
* @auto_increment: whether the KPC_DATA_BYTE register address
* auto-increments on multiple read
* @set_pullup: whether the pins need to have their pull-ups set
* @num_data: number of data bytes
* @num_normal_data: number of normal keys' data bytes
* @max_cols: maximum number of columns supported
@ -61,6 +62,7 @@
*/
struct stmpe_keypad_variant {
bool auto_increment;
bool set_pullup;
int num_data;
int num_normal_data;
int max_cols;
@ -81,6 +83,7 @@ static const struct stmpe_keypad_variant stmpe_keypad_variants[] = {
},
[STMPE2401] = {
.auto_increment = false,
.set_pullup = true,
.num_data = 3,
.num_normal_data = 2,
.max_cols = 8,
@ -90,6 +93,7 @@ static const struct stmpe_keypad_variant stmpe_keypad_variants[] = {
},
[STMPE2403] = {
.auto_increment = true,
.set_pullup = true,
.num_data = 5,
.num_normal_data = 3,
.max_cols = 8,
@ -99,16 +103,30 @@ static const struct stmpe_keypad_variant stmpe_keypad_variants[] = {
},
};
/**
* struct stmpe_keypad - STMPE keypad state container
* @stmpe: pointer to parent STMPE device
* @input: spawned input device
* @variant: STMPE variant
* @debounce_ms: debounce interval, in ms. Maximum is
* %STMPE_KEYPAD_MAX_DEBOUNCE.
* @scan_count: number of key scanning cycles to confirm key data.
* Maximum is %STMPE_KEYPAD_MAX_SCAN_COUNT.
* @no_autorepeat: disable key autorepeat
* @rows: bitmask for the rows
* @cols: bitmask for the columns
* @keymap: the keymap
*/
struct stmpe_keypad {
struct stmpe *stmpe;
struct input_dev *input;
const struct stmpe_keypad_variant *variant;
const struct stmpe_keypad_platform_data *plat;
unsigned int debounce_ms;
unsigned int scan_count;
bool no_autorepeat;
unsigned int rows;
unsigned int cols;
unsigned short keymap[STMPE_KEYPAD_KEYMAP_SIZE];
unsigned short keymap[STMPE_KEYPAD_KEYMAP_MAX_SIZE];
};
static int stmpe_keypad_read_data(struct stmpe_keypad *keypad, u8 *data)
@ -171,7 +189,10 @@ static int stmpe_keypad_altfunc_init(struct stmpe_keypad *keypad)
unsigned int col_gpios = variant->col_gpios;
unsigned int row_gpios = variant->row_gpios;
struct stmpe *stmpe = keypad->stmpe;
u8 pureg = stmpe->regs[STMPE_IDX_GPPUR_LSB];
unsigned int pins = 0;
unsigned int pu_pins = 0;
int ret;
int i;
/*
@ -188,8 +209,10 @@ static int stmpe_keypad_altfunc_init(struct stmpe_keypad *keypad)
for (i = 0; i < variant->max_cols; i++) {
int num = __ffs(col_gpios);
if (keypad->cols & (1 << i))
if (keypad->cols & (1 << i)) {
pins |= 1 << num;
pu_pins |= 1 << num;
}
col_gpios &= ~(1 << num);
}
@ -203,20 +226,43 @@ static int stmpe_keypad_altfunc_init(struct stmpe_keypad *keypad)
row_gpios &= ~(1 << num);
}
return stmpe_set_altfunc(stmpe, pins, STMPE_BLOCK_KEYPAD);
ret = stmpe_set_altfunc(stmpe, pins, STMPE_BLOCK_KEYPAD);
if (ret)
return ret;
/*
* On STMPE24xx, set pin bias to pull-up on all keypad input
* pins (columns), this incidentally happen to be maximum 8 pins
* and placed at GPIO0-7 so only the LSB of the pull up register
* ever needs to be written.
*/
if (variant->set_pullup) {
u8 val;
ret = stmpe_reg_read(stmpe, pureg);
if (ret)
return ret;
/* Do not touch unused pins, may be used for GPIO */
val = ret & ~pu_pins;
val |= pu_pins;
ret = stmpe_reg_write(stmpe, pureg, val);
}
return 0;
}
static int stmpe_keypad_chip_init(struct stmpe_keypad *keypad)
{
const struct stmpe_keypad_platform_data *plat = keypad->plat;
const struct stmpe_keypad_variant *variant = keypad->variant;
struct stmpe *stmpe = keypad->stmpe;
int ret;
if (plat->debounce_ms > STMPE_KEYPAD_MAX_DEBOUNCE)
if (keypad->debounce_ms > STMPE_KEYPAD_MAX_DEBOUNCE)
return -EINVAL;
if (plat->scan_count > STMPE_KEYPAD_MAX_SCAN_COUNT)
if (keypad->scan_count > STMPE_KEYPAD_MAX_SCAN_COUNT)
return -EINVAL;
ret = stmpe_enable(stmpe, STMPE_BLOCK_KEYPAD);
@ -245,7 +291,7 @@ static int stmpe_keypad_chip_init(struct stmpe_keypad *keypad)
ret = stmpe_set_bits(stmpe, STMPE_KPC_CTRL_MSB,
STMPE_KPC_CTRL_MSB_SCAN_COUNT,
plat->scan_count << 4);
keypad->scan_count << 4);
if (ret < 0)
return ret;
@ -253,17 +299,18 @@ static int stmpe_keypad_chip_init(struct stmpe_keypad *keypad)
STMPE_KPC_CTRL_LSB_SCAN |
STMPE_KPC_CTRL_LSB_DEBOUNCE,
STMPE_KPC_CTRL_LSB_SCAN |
(plat->debounce_ms << 1));
(keypad->debounce_ms << 1));
}
static void stmpe_keypad_fill_used_pins(struct stmpe_keypad *keypad)
static void stmpe_keypad_fill_used_pins(struct stmpe_keypad *keypad,
u32 used_rows, u32 used_cols)
{
int row, col;
for (row = 0; row < STMPE_KEYPAD_MAX_ROWS; row++) {
for (col = 0; col < STMPE_KEYPAD_MAX_COLS; col++) {
for (row = 0; row < used_rows; row++) {
for (col = 0; col < used_cols; col++) {
int code = MATRIX_SCAN_CODE(row, col,
STMPE_KEYPAD_ROW_SHIFT);
STMPE_KEYPAD_ROW_SHIFT);
if (keypad->keymap[code] != KEY_RESERVED) {
keypad->rows |= 1 << row;
keypad->cols |= 1 << col;
@ -272,51 +319,17 @@ static void stmpe_keypad_fill_used_pins(struct stmpe_keypad *keypad)
}
}
#ifdef CONFIG_OF
static const struct stmpe_keypad_platform_data *
stmpe_keypad_of_probe(struct device *dev)
{
struct device_node *np = dev->of_node;
struct stmpe_keypad_platform_data *plat;
if (!np)
return ERR_PTR(-ENODEV);
plat = devm_kzalloc(dev, sizeof(*plat), GFP_KERNEL);
if (!plat)
return ERR_PTR(-ENOMEM);
of_property_read_u32(np, "debounce-interval", &plat->debounce_ms);
of_property_read_u32(np, "st,scan-count", &plat->scan_count);
plat->no_autorepeat = of_property_read_bool(np, "st,no-autorepeat");
return plat;
}
#else
static inline const struct stmpe_keypad_platform_data *
stmpe_keypad_of_probe(struct device *dev)
{
return ERR_PTR(-EINVAL);
}
#endif
static int stmpe_keypad_probe(struct platform_device *pdev)
{
struct stmpe *stmpe = dev_get_drvdata(pdev->dev.parent);
const struct stmpe_keypad_platform_data *plat;
struct device_node *np = pdev->dev.of_node;
struct stmpe_keypad *keypad;
struct input_dev *input;
u32 rows;
u32 cols;
int error;
int irq;
plat = stmpe->pdata->keypad;
if (!plat) {
plat = stmpe_keypad_of_probe(&pdev->dev);
if (IS_ERR(plat))
return PTR_ERR(plat);
}
irq = platform_get_irq(pdev, 0);
if (irq < 0)
return irq;
@ -326,6 +339,13 @@ static int stmpe_keypad_probe(struct platform_device *pdev)
if (!keypad)
return -ENOMEM;
keypad->stmpe = stmpe;
keypad->variant = &stmpe_keypad_variants[stmpe->partnum];
of_property_read_u32(np, "debounce-interval", &keypad->debounce_ms);
of_property_read_u32(np, "st,scan-count", &keypad->scan_count);
keypad->no_autorepeat = of_property_read_bool(np, "st,no-autorepeat");
input = devm_input_allocate_device(&pdev->dev);
if (!input)
return -ENOMEM;
@ -334,23 +354,22 @@ static int stmpe_keypad_probe(struct platform_device *pdev)
input->id.bustype = BUS_I2C;
input->dev.parent = &pdev->dev;
error = matrix_keypad_build_keymap(plat->keymap_data, NULL,
STMPE_KEYPAD_MAX_ROWS,
STMPE_KEYPAD_MAX_COLS,
error = matrix_keypad_parse_of_params(&pdev->dev, &rows, &cols);
if (error)
return error;
error = matrix_keypad_build_keymap(NULL, NULL, rows, cols,
keypad->keymap, input);
if (error)
return error;
input_set_capability(input, EV_MSC, MSC_SCAN);
if (!plat->no_autorepeat)
if (!keypad->no_autorepeat)
__set_bit(EV_REP, input->evbit);
stmpe_keypad_fill_used_pins(keypad);
stmpe_keypad_fill_used_pins(keypad, rows, cols);
keypad->stmpe = stmpe;
keypad->plat = plat;
keypad->input = input;
keypad->variant = &stmpe_keypad_variants[stmpe->partnum];
error = stmpe_keypad_chip_init(keypad);
if (error < 0)

View File

@ -881,6 +881,34 @@ static void alps_get_finger_coordinate_v7(struct input_mt_pos *mt,
unsigned char *pkt,
unsigned char pkt_id)
{
/*
* packet-fmt b7 b6 b5 b4 b3 b2 b1 b0
* Byte0 TWO & MULTI L 1 R M 1 Y0-2 Y0-1 Y0-0
* Byte0 NEW L 1 X1-5 1 1 Y0-2 Y0-1 Y0-0
* Byte1 Y0-10 Y0-9 Y0-8 Y0-7 Y0-6 Y0-5 Y0-4 Y0-3
* Byte2 X0-11 1 X0-10 X0-9 X0-8 X0-7 X0-6 X0-5
* Byte3 X1-11 1 X0-4 X0-3 1 X0-2 X0-1 X0-0
* Byte4 TWO X1-10 TWO X1-9 X1-8 X1-7 X1-6 X1-5 X1-4
* Byte4 MULTI X1-10 TWO X1-9 X1-8 X1-7 X1-6 Y1-5 1
* Byte4 NEW X1-10 TWO X1-9 X1-8 X1-7 X1-6 0 0
* Byte5 TWO & NEW Y1-10 0 Y1-9 Y1-8 Y1-7 Y1-6 Y1-5 Y1-4
* Byte5 MULTI Y1-10 0 Y1-9 Y1-8 Y1-7 Y1-6 F-1 F-0
* L: Left button
* R / M: Non-clickpads: Right / Middle button
* Clickpads: When > 2 fingers are down, and some fingers
* are in the button area, then the 2 coordinates reported
* are for fingers outside the button area and these report
* extra fingers being present in the right / left button
* area. Note these fingers are not added to the F field!
* so if a TWO packet is received and R = 1 then there are
* 3 fingers down, etc.
* TWO: 1: Two touches present, byte 0/4/5 are in TWO fmt
* 0: If byte 4 bit 0 is 1, then byte 0/4/5 are in MULTI fmt
* otherwise byte 0 bit 4 must be set and byte 0/4/5 are
* in NEW fmt
* F: Number of fingers - 3, 0 means 3 fingers, 1 means 4 ...
*/
mt[0].x = ((pkt[2] & 0x80) << 4);
mt[0].x |= ((pkt[2] & 0x3F) << 5);
mt[0].x |= ((pkt[3] & 0x30) >> 1);
@ -919,18 +947,21 @@ static void alps_get_finger_coordinate_v7(struct input_mt_pos *mt,
static int alps_get_mt_count(struct input_mt_pos *mt)
{
int i;
int i, fingers = 0;
for (i = 0; i < MAX_TOUCHES && mt[i].x != 0 && mt[i].y != 0; i++)
/* empty */;
for (i = 0; i < MAX_TOUCHES; i++) {
if (mt[i].x != 0 || mt[i].y != 0)
fingers++;
}
return i;
return fingers;
}
static int alps_decode_packet_v7(struct alps_fields *f,
unsigned char *p,
struct psmouse *psmouse)
{
struct alps_data *priv = psmouse->private;
unsigned char pkt_id;
pkt_id = alps_get_packet_id_v7(p);
@ -938,19 +969,52 @@ static int alps_decode_packet_v7(struct alps_fields *f,
return 0;
if (pkt_id == V7_PACKET_ID_UNKNOWN)
return -1;
/*
* NEW packets are send to indicate a discontinuity in the finger
* coordinate reporting. Specifically a finger may have moved from
* slot 0 to 1 or vice versa. INPUT_MT_TRACK takes care of this for
* us.
*
* NEW packets have 3 problems:
* 1) They do not contain middle / right button info (on non clickpads)
* this can be worked around by preserving the old button state
* 2) They do not contain an accurate fingercount, and they are
* typically send when the number of fingers changes. We cannot use
* the old finger count as that may mismatch with the amount of
* touch coordinates we've available in the NEW packet
* 3) Their x data for the second touch is inaccurate leading to
* a possible jump of the x coordinate by 16 units when the first
* non NEW packet comes in
* Since problems 2 & 3 cannot be worked around, just ignore them.
*/
if (pkt_id == V7_PACKET_ID_NEW)
return 1;
alps_get_finger_coordinate_v7(f->mt, p, pkt_id);
if (pkt_id == V7_PACKET_ID_TWO || pkt_id == V7_PACKET_ID_MULTI) {
f->left = (p[0] & 0x80) >> 7;
if (pkt_id == V7_PACKET_ID_TWO)
f->fingers = alps_get_mt_count(f->mt);
else /* pkt_id == V7_PACKET_ID_MULTI */
f->fingers = 3 + (p[5] & 0x03);
f->left = (p[0] & 0x80) >> 7;
if (priv->flags & ALPS_BUTTONPAD) {
if (p[0] & 0x20)
f->fingers++;
if (p[0] & 0x10)
f->fingers++;
} else {
f->right = (p[0] & 0x20) >> 5;
f->middle = (p[0] & 0x10) >> 4;
}
if (pkt_id == V7_PACKET_ID_TWO)
f->fingers = alps_get_mt_count(f->mt);
else if (pkt_id == V7_PACKET_ID_MULTI)
f->fingers = 3 + (p[5] & 0x03);
/* Sometimes a single touch is reported in mt[1] rather then mt[0] */
if (f->fingers == 1 && f->mt[0].x == 0 && f->mt[0].y == 0) {
f->mt[0].x = f->mt[1].x;
f->mt[0].y = f->mt[1].y;
f->mt[1].x = 0;
f->mt[1].y = 0;
}
return 0;
}

View File

@ -227,6 +227,7 @@ TRACKPOINT_INT_ATTR(thresh, TP_THRESH, TP_DEF_THRESH);
TRACKPOINT_INT_ATTR(upthresh, TP_UP_THRESH, TP_DEF_UP_THRESH);
TRACKPOINT_INT_ATTR(ztime, TP_Z_TIME, TP_DEF_Z_TIME);
TRACKPOINT_INT_ATTR(jenks, TP_JENKS_CURV, TP_DEF_JENKS_CURV);
TRACKPOINT_INT_ATTR(drift_time, TP_DRIFT_TIME, TP_DEF_DRIFT_TIME);
TRACKPOINT_BIT_ATTR(press_to_select, TP_TOGGLE_PTSON, TP_MASK_PTSON, 0,
TP_DEF_PTSON);
@ -246,6 +247,7 @@ static struct attribute *trackpoint_attrs[] = {
&psmouse_attr_upthresh.dattr.attr,
&psmouse_attr_ztime.dattr.attr,
&psmouse_attr_jenks.dattr.attr,
&psmouse_attr_drift_time.dattr.attr,
&psmouse_attr_press_to_select.dattr.attr,
&psmouse_attr_skipback.dattr.attr,
&psmouse_attr_ext_dev.dattr.attr,
@ -312,6 +314,7 @@ static int trackpoint_sync(struct psmouse *psmouse, bool in_power_on_state)
TRACKPOINT_UPDATE(in_power_on_state, psmouse, tp, upthresh);
TRACKPOINT_UPDATE(in_power_on_state, psmouse, tp, ztime);
TRACKPOINT_UPDATE(in_power_on_state, psmouse, tp, jenks);
TRACKPOINT_UPDATE(in_power_on_state, psmouse, tp, drift_time);
/* toggles */
TRACKPOINT_UPDATE(in_power_on_state, psmouse, tp, press_to_select);
@ -332,6 +335,7 @@ static void trackpoint_defaults(struct trackpoint_data *tp)
TRACKPOINT_SET_POWER_ON_DEFAULT(tp, upthresh);
TRACKPOINT_SET_POWER_ON_DEFAULT(tp, ztime);
TRACKPOINT_SET_POWER_ON_DEFAULT(tp, jenks);
TRACKPOINT_SET_POWER_ON_DEFAULT(tp, drift_time);
TRACKPOINT_SET_POWER_ON_DEFAULT(tp, inertia);
/* toggles */

View File

@ -70,6 +70,9 @@
#define TP_UP_THRESH 0x5A /* Used to generate a 'click' on Z-axis */
#define TP_Z_TIME 0x5E /* How sharp of a press */
#define TP_JENKS_CURV 0x5D /* Minimum curvature for double click */
#define TP_DRIFT_TIME 0x5F /* How long a 'hands off' condition */
/* must last (x*107ms) for drift */
/* correction to occur */
/*
* Toggling Flag bits
@ -120,6 +123,7 @@
#define TP_DEF_UP_THRESH 0xFF
#define TP_DEF_Z_TIME 0x26
#define TP_DEF_JENKS_CURV 0x87
#define TP_DEF_DRIFT_TIME 0x05
/* Toggles */
#define TP_DEF_MB 0x00
@ -137,6 +141,7 @@ struct trackpoint_data
unsigned char draghys, mindrag;
unsigned char thresh, upthresh;
unsigned char ztime, jenks;
unsigned char drift_time;
/* toggles */
unsigned char press_to_select;

View File

@ -99,13 +99,9 @@
#define MXT_T6_STATUS_COMSERR (1 << 2)
/* MXT_GEN_POWER_T7 field */
struct t7_config {
u8 idle;
u8 active;
} __packed;
#define MXT_POWER_CFG_RUN 0
#define MXT_POWER_CFG_DEEPSLEEP 1
#define MXT_POWER_IDLEACQINT 0
#define MXT_POWER_ACTVACQINT 1
#define MXT_POWER_ACTV2IDLETO 2
/* MXT_GEN_ACQUIRE_T8 field */
#define MXT_ACQUIRE_CHRGTIME 0
@ -117,6 +113,7 @@ struct t7_config {
#define MXT_ACQUIRE_ATCHCALSTHR 7
/* MXT_TOUCH_MULTI_T9 field */
#define MXT_TOUCH_CTRL 0
#define MXT_T9_ORIENT 9
#define MXT_T9_RANGE 18
@ -256,7 +253,6 @@ struct mxt_data {
bool update_input;
u8 last_message_count;
u8 num_touchids;
struct t7_config t7_cfg;
/* Cached parameters from object table */
u16 T5_address;
@ -672,6 +668,20 @@ static void mxt_proc_t6_messages(struct mxt_data *data, u8 *msg)
data->t6_status = status;
}
static int mxt_write_object(struct mxt_data *data,
u8 type, u8 offset, u8 val)
{
struct mxt_object *object;
u16 reg;
object = mxt_get_object(data, type);
if (!object || offset >= mxt_obj_size(object))
return -EINVAL;
reg = object->start_address;
return mxt_write_reg(data->client, reg + offset, val);
}
static void mxt_input_button(struct mxt_data *data, u8 *message)
{
struct input_dev *input = data->input_dev;
@ -1742,60 +1752,6 @@ err_free_object_table:
return error;
}
static int mxt_set_t7_power_cfg(struct mxt_data *data, u8 sleep)
{
struct device *dev = &data->client->dev;
int error;
struct t7_config *new_config;
struct t7_config deepsleep = { .active = 0, .idle = 0 };
if (sleep == MXT_POWER_CFG_DEEPSLEEP)
new_config = &deepsleep;
else
new_config = &data->t7_cfg;
error = __mxt_write_reg(data->client, data->T7_address,
sizeof(data->t7_cfg), new_config);
if (error)
return error;
dev_dbg(dev, "Set T7 ACTV:%d IDLE:%d\n",
new_config->active, new_config->idle);
return 0;
}
static int mxt_init_t7_power_cfg(struct mxt_data *data)
{
struct device *dev = &data->client->dev;
int error;
bool retry = false;
recheck:
error = __mxt_read_reg(data->client, data->T7_address,
sizeof(data->t7_cfg), &data->t7_cfg);
if (error)
return error;
if (data->t7_cfg.active == 0 || data->t7_cfg.idle == 0) {
if (!retry) {
dev_dbg(dev, "T7 cfg zero, resetting\n");
mxt_soft_reset(data);
retry = true;
goto recheck;
} else {
dev_dbg(dev, "T7 cfg zero after reset, overriding\n");
data->t7_cfg.active = 20;
data->t7_cfg.idle = 100;
return mxt_set_t7_power_cfg(data, MXT_POWER_CFG_RUN);
}
}
dev_dbg(dev, "Initialized power cfg: ACTV %d, IDLE %d\n",
data->t7_cfg.active, data->t7_cfg.idle);
return 0;
}
static int mxt_configure_objects(struct mxt_data *data,
const struct firmware *cfg)
{
@ -1809,12 +1765,6 @@ static int mxt_configure_objects(struct mxt_data *data,
dev_warn(dev, "Error %d updating config\n", error);
}
error = mxt_init_t7_power_cfg(data);
if (error) {
dev_err(dev, "Failed to initialize power cfg\n");
return error;
}
error = mxt_initialize_t9_input_device(data);
if (error)
return error;
@ -2093,15 +2043,16 @@ static const struct attribute_group mxt_attr_group = {
static void mxt_start(struct mxt_data *data)
{
mxt_set_t7_power_cfg(data, MXT_POWER_CFG_RUN);
/* Recalibrate since chip has been in deep sleep */
mxt_t6_command(data, MXT_COMMAND_CALIBRATE, 1, false);
/* Touch enable */
mxt_write_object(data,
MXT_TOUCH_MULTI_T9, MXT_TOUCH_CTRL, 0x83);
}
static void mxt_stop(struct mxt_data *data)
{
mxt_set_t7_power_cfg(data, MXT_POWER_CFG_DEEPSLEEP);
/* Touch disable */
mxt_write_object(data,
MXT_TOUCH_MULTI_T9, MXT_TOUCH_CTRL, 0);
}
static int mxt_input_open(struct input_dev *dev)
@ -2266,6 +2217,8 @@ static int __maybe_unused mxt_resume(struct device *dev)
struct mxt_data *data = i2c_get_clientdata(client);
struct input_dev *input_dev = data->input_dev;
mxt_soft_reset(data);
mutex_lock(&input_dev->mutex);
if (input_dev->users)

View File

@ -850,9 +850,11 @@ static int edt_ft5x06_ts_identify(struct i2c_client *client,
}
#define EDT_ATTR_CHECKSET(name, reg) \
do { \
if (pdata->name >= edt_ft5x06_attr_##name.limit_low && \
pdata->name <= edt_ft5x06_attr_##name.limit_high) \
edt_ft5x06_register_write(tsdata, reg, pdata->name)
edt_ft5x06_register_write(tsdata, reg, pdata->name); \
} while (0)
#define EDT_GET_PROP(name, reg) { \
u32 val; \

View File

@ -1127,6 +1127,24 @@ static void schedule_external_copy(struct thin_c *tc, dm_block_t virt_block,
schedule_zero(tc, virt_block, data_dest, cell, bio);
}
static void set_pool_mode(struct pool *pool, enum pool_mode new_mode);
static void check_for_space(struct pool *pool)
{
int r;
dm_block_t nr_free;
if (get_pool_mode(pool) != PM_OUT_OF_DATA_SPACE)
return;
r = dm_pool_get_free_block_count(pool->pmd, &nr_free);
if (r)
return;
if (nr_free)
set_pool_mode(pool, PM_WRITE);
}
/*
* A non-zero return indicates read_only or fail_io mode.
* Many callers don't care about the return value.
@ -1141,6 +1159,8 @@ static int commit(struct pool *pool)
r = dm_pool_commit_metadata(pool->pmd);
if (r)
metadata_operation_failed(pool, "dm_pool_commit_metadata", r);
else
check_for_space(pool);
return r;
}
@ -1159,8 +1179,6 @@ static void check_low_water_mark(struct pool *pool, dm_block_t free_blocks)
}
}
static void set_pool_mode(struct pool *pool, enum pool_mode new_mode);
static int alloc_data_block(struct thin_c *tc, dm_block_t *result)
{
int r;
@ -2155,7 +2173,7 @@ static void set_pool_mode(struct pool *pool, enum pool_mode new_mode)
pool->process_cell = process_cell_read_only;
pool->process_discard_cell = process_discard_cell;
pool->process_prepared_mapping = process_prepared_mapping;
pool->process_prepared_discard = process_prepared_discard_passdown;
pool->process_prepared_discard = process_prepared_discard;
if (!pool->pf.error_if_no_space && no_space_timeout)
queue_delayed_work(pool->wq, &pool->no_space_timeout, no_space_timeout);
@ -3814,6 +3832,8 @@ static int thin_ctr(struct dm_target *ti, unsigned argc, char **argv)
r = -EINVAL;
goto bad;
}
atomic_set(&tc->refcount, 1);
init_completion(&tc->can_destroy);
list_add_tail_rcu(&tc->list, &tc->pool->active_thins);
spin_unlock_irqrestore(&tc->pool->lock, flags);
/*
@ -3826,9 +3846,6 @@ static int thin_ctr(struct dm_target *ti, unsigned argc, char **argv)
dm_put(pool_md);
atomic_set(&tc->refcount, 1);
init_completion(&tc->can_destroy);
return 0;
bad:

View File

@ -899,7 +899,7 @@ static void disable_write_same(struct mapped_device *md)
static void clone_endio(struct bio *bio, int error)
{
int r = 0;
int r = error;
struct dm_target_io *tio = container_of(bio, struct dm_target_io, clone);
struct dm_io *io = tio->io;
struct mapped_device *md = tio->io->md;

View File

@ -519,6 +519,7 @@ static const u8 stmpe1601_regs[] = {
[STMPE_IDX_GPDR_LSB] = STMPE1601_REG_GPIO_SET_DIR_LSB,
[STMPE_IDX_GPRER_LSB] = STMPE1601_REG_GPIO_RE_LSB,
[STMPE_IDX_GPFER_LSB] = STMPE1601_REG_GPIO_FE_LSB,
[STMPE_IDX_GPPUR_LSB] = STMPE1601_REG_GPIO_PU_LSB,
[STMPE_IDX_GPAFR_U_MSB] = STMPE1601_REG_GPIO_AF_U_MSB,
[STMPE_IDX_IEGPIOR_LSB] = STMPE1601_REG_INT_EN_GPIO_MASK_LSB,
[STMPE_IDX_ISGPIOR_MSB] = STMPE1601_REG_INT_STA_GPIO_MSB,
@ -667,6 +668,7 @@ static const u8 stmpe1801_regs[] = {
[STMPE_IDX_GPDR_LSB] = STMPE1801_REG_GPIO_SET_DIR_LOW,
[STMPE_IDX_GPRER_LSB] = STMPE1801_REG_GPIO_RE_LOW,
[STMPE_IDX_GPFER_LSB] = STMPE1801_REG_GPIO_FE_LOW,
[STMPE_IDX_GPPUR_LSB] = STMPE1801_REG_GPIO_PULL_UP_LOW,
[STMPE_IDX_IEGPIOR_LSB] = STMPE1801_REG_INT_EN_GPIO_MASK_LOW,
[STMPE_IDX_ISGPIOR_LSB] = STMPE1801_REG_INT_STA_GPIO_LOW,
};
@ -750,6 +752,8 @@ static const u8 stmpe24xx_regs[] = {
[STMPE_IDX_GPDR_LSB] = STMPE24XX_REG_GPDR_LSB,
[STMPE_IDX_GPRER_LSB] = STMPE24XX_REG_GPRER_LSB,
[STMPE_IDX_GPFER_LSB] = STMPE24XX_REG_GPFER_LSB,
[STMPE_IDX_GPPUR_LSB] = STMPE24XX_REG_GPPUR_LSB,
[STMPE_IDX_GPPDR_LSB] = STMPE24XX_REG_GPPDR_LSB,
[STMPE_IDX_GPAFR_U_MSB] = STMPE24XX_REG_GPAFR_U_MSB,
[STMPE_IDX_IEGPIOR_LSB] = STMPE24XX_REG_IEGPIOR_LSB,
[STMPE_IDX_ISGPIOR_MSB] = STMPE24XX_REG_ISGPIOR_MSB,

Some files were not shown because too many files have changed in this diff Show More