mirror of
https://github.com/torvalds/linux.git
synced 2024-11-18 01:51:53 +00:00
Merge 3.10-rc7 into usb-next
We want the USB fixes and other good stuff in this branch as well. Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
commit
f797d37ead
@ -1,18 +1,27 @@
|
|||||||
<title>Codec Interface</title>
|
<title>Codec Interface</title>
|
||||||
|
|
||||||
<note>
|
|
||||||
<title>Suspended</title>
|
|
||||||
|
|
||||||
<para>This interface has been be suspended from the V4L2 API
|
|
||||||
implemented in Linux 2.6 until we have more experience with codec
|
|
||||||
device interfaces.</para>
|
|
||||||
</note>
|
|
||||||
|
|
||||||
<para>A V4L2 codec can compress, decompress, transform, or otherwise
|
<para>A V4L2 codec can compress, decompress, transform, or otherwise
|
||||||
convert video data from one format into another format, in memory.
|
convert video data from one format into another format, in memory. Typically
|
||||||
Applications send data to be converted to the driver through a
|
such devices are memory-to-memory devices (i.e. devices with the
|
||||||
&func-write; call, and receive the converted data through a
|
<constant>V4L2_CAP_VIDEO_M2M</constant> or <constant>V4L2_CAP_VIDEO_M2M_MPLANE</constant>
|
||||||
&func-read; call. For efficiency a driver may also support streaming
|
capability set).
|
||||||
I/O.</para>
|
</para>
|
||||||
|
|
||||||
<para>[to do]</para>
|
<para>A memory-to-memory video node acts just like a normal video node, but it
|
||||||
|
supports both output (sending frames from memory to the codec hardware) and
|
||||||
|
capture (receiving the processed frames from the codec hardware into memory)
|
||||||
|
stream I/O. An application will have to setup the stream
|
||||||
|
I/O for both sides and finally call &VIDIOC-STREAMON; for both capture and output
|
||||||
|
to start the codec.</para>
|
||||||
|
|
||||||
|
<para>Video compression codecs use the MPEG controls to setup their codec parameters
|
||||||
|
(note that the MPEG controls actually support many more codecs than just MPEG).
|
||||||
|
See <xref linkend="mpeg-controls"></xref>.</para>
|
||||||
|
|
||||||
|
<para>Memory-to-memory devices can often be used as a shared resource: you can
|
||||||
|
open the video node multiple times, each application setting up their own codec properties
|
||||||
|
that are local to the file handle, and each can use it independently from the others.
|
||||||
|
The driver will arbitrate access to the codec and reprogram it whenever another file
|
||||||
|
handler gets access. This is different from the usual video node behavior where the video properties
|
||||||
|
are global to the device (i.e. changing something through one file handle is visible
|
||||||
|
through another file handle).</para>
|
||||||
|
@ -493,7 +493,7 @@ and discussions on the V4L mailing list.</revremark>
|
|||||||
</partinfo>
|
</partinfo>
|
||||||
|
|
||||||
<title>Video for Linux Two API Specification</title>
|
<title>Video for Linux Two API Specification</title>
|
||||||
<subtitle>Revision 3.9</subtitle>
|
<subtitle>Revision 3.10</subtitle>
|
||||||
|
|
||||||
<chapter id="common">
|
<chapter id="common">
|
||||||
&sub-common;
|
&sub-common;
|
||||||
|
@ -2,7 +2,7 @@ Exynos4x12/Exynos5 SoC series camera host interface (FIMC-LITE)
|
|||||||
|
|
||||||
Required properties:
|
Required properties:
|
||||||
|
|
||||||
- compatible : should be "samsung,exynos4212-fimc" for Exynos4212 and
|
- compatible : should be "samsung,exynos4212-fimc-lite" for Exynos4212 and
|
||||||
Exynos4412 SoCs;
|
Exynos4412 SoCs;
|
||||||
- reg : physical base address and size of the device memory mapped
|
- reg : physical base address and size of the device memory mapped
|
||||||
registers;
|
registers;
|
||||||
|
@ -29,6 +29,8 @@ ALC269/270/275/276/280/282
|
|||||||
alc271-dmic Enable ALC271X digital mic workaround
|
alc271-dmic Enable ALC271X digital mic workaround
|
||||||
inv-dmic Inverted internal mic workaround
|
inv-dmic Inverted internal mic workaround
|
||||||
lenovo-dock Enables docking station I/O for some Lenovos
|
lenovo-dock Enables docking station I/O for some Lenovos
|
||||||
|
dell-headset-multi Headset jack, which can also be used as mic-in
|
||||||
|
dell-headset-dock Headset jack (without mic-in), and also dock I/O
|
||||||
|
|
||||||
ALC662/663/272
|
ALC662/663/272
|
||||||
==============
|
==============
|
||||||
@ -42,6 +44,7 @@ ALC662/663/272
|
|||||||
asus-mode7 ASUS
|
asus-mode7 ASUS
|
||||||
asus-mode8 ASUS
|
asus-mode8 ASUS
|
||||||
inv-dmic Inverted internal mic workaround
|
inv-dmic Inverted internal mic workaround
|
||||||
|
dell-headset-multi Headset jack, which can also be used as mic-in
|
||||||
|
|
||||||
ALC680
|
ALC680
|
||||||
======
|
======
|
||||||
|
2
Makefile
2
Makefile
@ -1,7 +1,7 @@
|
|||||||
VERSION = 3
|
VERSION = 3
|
||||||
PATCHLEVEL = 10
|
PATCHLEVEL = 10
|
||||||
SUBLEVEL = 0
|
SUBLEVEL = 0
|
||||||
EXTRAVERSION = -rc6
|
EXTRAVERSION = -rc7
|
||||||
NAME = Unicycling Gorilla
|
NAME = Unicycling Gorilla
|
||||||
|
|
||||||
# *DOCUMENTATION*
|
# *DOCUMENTATION*
|
||||||
|
@ -1189,6 +1189,16 @@ config PL310_ERRATA_588369
|
|||||||
is not correctly implemented in PL310 as clean lines are not
|
is not correctly implemented in PL310 as clean lines are not
|
||||||
invalidated as a result of these operations.
|
invalidated as a result of these operations.
|
||||||
|
|
||||||
|
config ARM_ERRATA_643719
|
||||||
|
bool "ARM errata: LoUIS bit field in CLIDR register is incorrect"
|
||||||
|
depends on CPU_V7 && SMP
|
||||||
|
help
|
||||||
|
This option enables the workaround for the 643719 Cortex-A9 (prior to
|
||||||
|
r1p0) erratum. On affected cores the LoUIS bit field of the CLIDR
|
||||||
|
register returns zero when it should return one. The workaround
|
||||||
|
corrects this value, ensuring cache maintenance operations which use
|
||||||
|
it behave as intended and avoiding data corruption.
|
||||||
|
|
||||||
config ARM_ERRATA_720789
|
config ARM_ERRATA_720789
|
||||||
bool "ARM errata: TLBIASIDIS and TLBIMVAIS operations can broadcast a faulty ASID"
|
bool "ARM errata: TLBIASIDIS and TLBIMVAIS operations can broadcast a faulty ASID"
|
||||||
depends on CPU_V7
|
depends on CPU_V7
|
||||||
@ -2006,7 +2016,7 @@ config XIP_PHYS_ADDR
|
|||||||
|
|
||||||
config KEXEC
|
config KEXEC
|
||||||
bool "Kexec system call (EXPERIMENTAL)"
|
bool "Kexec system call (EXPERIMENTAL)"
|
||||||
depends on (!SMP || HOTPLUG_CPU)
|
depends on (!SMP || PM_SLEEP_SMP)
|
||||||
help
|
help
|
||||||
kexec is a system call that implements the ability to shutdown your
|
kexec is a system call that implements the ability to shutdown your
|
||||||
current kernel, and to start another kernel. It is like a reboot
|
current kernel, and to start another kernel. It is like a reboot
|
||||||
|
@ -116,7 +116,8 @@ targets := vmlinux vmlinux.lds \
|
|||||||
|
|
||||||
# Make sure files are removed during clean
|
# Make sure files are removed during clean
|
||||||
extra-y += piggy.gzip piggy.lzo piggy.lzma piggy.xzkern \
|
extra-y += piggy.gzip piggy.lzo piggy.lzma piggy.xzkern \
|
||||||
lib1funcs.S ashldi3.S $(libfdt) $(libfdt_hdrs)
|
lib1funcs.S ashldi3.S $(libfdt) $(libfdt_hdrs) \
|
||||||
|
hyp-stub.S
|
||||||
|
|
||||||
ifeq ($(CONFIG_FUNCTION_TRACER),y)
|
ifeq ($(CONFIG_FUNCTION_TRACER),y)
|
||||||
ORIG_CFLAGS := $(KBUILD_CFLAGS)
|
ORIG_CFLAGS := $(KBUILD_CFLAGS)
|
||||||
|
@ -763,7 +763,7 @@
|
|||||||
};
|
};
|
||||||
};
|
};
|
||||||
|
|
||||||
pinctrl@03680000 {
|
pinctrl@03860000 {
|
||||||
gpz: gpz {
|
gpz: gpz {
|
||||||
gpio-controller;
|
gpio-controller;
|
||||||
#gpio-cells = <2>;
|
#gpio-cells = <2>;
|
||||||
|
@ -161,9 +161,9 @@
|
|||||||
interrupts = <0 50 0>;
|
interrupts = <0 50 0>;
|
||||||
};
|
};
|
||||||
|
|
||||||
pinctrl_3: pinctrl@03680000 {
|
pinctrl_3: pinctrl@03860000 {
|
||||||
compatible = "samsung,exynos5250-pinctrl";
|
compatible = "samsung,exynos5250-pinctrl";
|
||||||
reg = <0x0368000 0x1000>;
|
reg = <0x03860000 0x1000>;
|
||||||
interrupts = <0 47 0>;
|
interrupts = <0 47 0>;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -320,9 +320,7 @@ static inline void flush_anon_page(struct vm_area_struct *vma,
|
|||||||
}
|
}
|
||||||
|
|
||||||
#define ARCH_HAS_FLUSH_KERNEL_DCACHE_PAGE
|
#define ARCH_HAS_FLUSH_KERNEL_DCACHE_PAGE
|
||||||
static inline void flush_kernel_dcache_page(struct page *page)
|
extern void flush_kernel_dcache_page(struct page *);
|
||||||
{
|
|
||||||
}
|
|
||||||
|
|
||||||
#define flush_dcache_mmap_lock(mapping) \
|
#define flush_dcache_mmap_lock(mapping) \
|
||||||
spin_lock_irq(&(mapping)->tree_lock)
|
spin_lock_irq(&(mapping)->tree_lock)
|
||||||
|
@ -134,6 +134,10 @@ void machine_kexec(struct kimage *image)
|
|||||||
unsigned long reboot_code_buffer_phys;
|
unsigned long reboot_code_buffer_phys;
|
||||||
void *reboot_code_buffer;
|
void *reboot_code_buffer;
|
||||||
|
|
||||||
|
if (num_online_cpus() > 1) {
|
||||||
|
pr_err("kexec: error: multiple CPUs still online\n");
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
page_list = image->head & PAGE_MASK;
|
page_list = image->head & PAGE_MASK;
|
||||||
|
|
||||||
|
@ -184,30 +184,61 @@ int __init reboot_setup(char *str)
|
|||||||
|
|
||||||
__setup("reboot=", reboot_setup);
|
__setup("reboot=", reboot_setup);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Called by kexec, immediately prior to machine_kexec().
|
||||||
|
*
|
||||||
|
* This must completely disable all secondary CPUs; simply causing those CPUs
|
||||||
|
* to execute e.g. a RAM-based pin loop is not sufficient. This allows the
|
||||||
|
* kexec'd kernel to use any and all RAM as it sees fit, without having to
|
||||||
|
* avoid any code or data used by any SW CPU pin loop. The CPU hotplug
|
||||||
|
* functionality embodied in disable_nonboot_cpus() to achieve this.
|
||||||
|
*/
|
||||||
void machine_shutdown(void)
|
void machine_shutdown(void)
|
||||||
{
|
{
|
||||||
#ifdef CONFIG_SMP
|
disable_nonboot_cpus();
|
||||||
smp_send_stop();
|
|
||||||
#endif
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Halting simply requires that the secondary CPUs stop performing any
|
||||||
|
* activity (executing tasks, handling interrupts). smp_send_stop()
|
||||||
|
* achieves this.
|
||||||
|
*/
|
||||||
void machine_halt(void)
|
void machine_halt(void)
|
||||||
{
|
{
|
||||||
machine_shutdown();
|
smp_send_stop();
|
||||||
|
|
||||||
local_irq_disable();
|
local_irq_disable();
|
||||||
while (1);
|
while (1);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Power-off simply requires that the secondary CPUs stop performing any
|
||||||
|
* activity (executing tasks, handling interrupts). smp_send_stop()
|
||||||
|
* achieves this. When the system power is turned off, it will take all CPUs
|
||||||
|
* with it.
|
||||||
|
*/
|
||||||
void machine_power_off(void)
|
void machine_power_off(void)
|
||||||
{
|
{
|
||||||
machine_shutdown();
|
smp_send_stop();
|
||||||
|
|
||||||
if (pm_power_off)
|
if (pm_power_off)
|
||||||
pm_power_off();
|
pm_power_off();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Restart requires that the secondary CPUs stop performing any activity
|
||||||
|
* while the primary CPU resets the system. Systems with a single CPU can
|
||||||
|
* use soft_restart() as their machine descriptor's .restart hook, since that
|
||||||
|
* will cause the only available CPU to reset. Systems with multiple CPUs must
|
||||||
|
* provide a HW restart implementation, to ensure that all CPUs reset at once.
|
||||||
|
* This is required so that any code running after reset on the primary CPU
|
||||||
|
* doesn't have to co-ordinate with other CPUs to ensure they aren't still
|
||||||
|
* executing pre-reset code, and using RAM that the primary CPU's code wishes
|
||||||
|
* to use. Implementing such co-ordination would be essentially impossible.
|
||||||
|
*/
|
||||||
void machine_restart(char *cmd)
|
void machine_restart(char *cmd)
|
||||||
{
|
{
|
||||||
machine_shutdown();
|
smp_send_stop();
|
||||||
|
|
||||||
arm_pm_restart(reboot_mode, cmd);
|
arm_pm_restart(reboot_mode, cmd);
|
||||||
|
|
||||||
|
@ -651,17 +651,6 @@ void smp_send_reschedule(int cpu)
|
|||||||
smp_cross_call(cpumask_of(cpu), IPI_RESCHEDULE);
|
smp_cross_call(cpumask_of(cpu), IPI_RESCHEDULE);
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_HOTPLUG_CPU
|
|
||||||
static void smp_kill_cpus(cpumask_t *mask)
|
|
||||||
{
|
|
||||||
unsigned int cpu;
|
|
||||||
for_each_cpu(cpu, mask)
|
|
||||||
platform_cpu_kill(cpu);
|
|
||||||
}
|
|
||||||
#else
|
|
||||||
static void smp_kill_cpus(cpumask_t *mask) { }
|
|
||||||
#endif
|
|
||||||
|
|
||||||
void smp_send_stop(void)
|
void smp_send_stop(void)
|
||||||
{
|
{
|
||||||
unsigned long timeout;
|
unsigned long timeout;
|
||||||
@ -679,8 +668,6 @@ void smp_send_stop(void)
|
|||||||
|
|
||||||
if (num_online_cpus() > 1)
|
if (num_online_cpus() > 1)
|
||||||
pr_warning("SMP: failed to stop secondary CPUs\n");
|
pr_warning("SMP: failed to stop secondary CPUs\n");
|
||||||
|
|
||||||
smp_kill_cpus(&mask);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -92,6 +92,14 @@ ENTRY(v7_flush_dcache_louis)
|
|||||||
mrc p15, 1, r0, c0, c0, 1 @ read clidr, r0 = clidr
|
mrc p15, 1, r0, c0, c0, 1 @ read clidr, r0 = clidr
|
||||||
ALT_SMP(ands r3, r0, #(7 << 21)) @ extract LoUIS from clidr
|
ALT_SMP(ands r3, r0, #(7 << 21)) @ extract LoUIS from clidr
|
||||||
ALT_UP(ands r3, r0, #(7 << 27)) @ extract LoUU from clidr
|
ALT_UP(ands r3, r0, #(7 << 27)) @ extract LoUU from clidr
|
||||||
|
#ifdef CONFIG_ARM_ERRATA_643719
|
||||||
|
ALT_SMP(mrceq p15, 0, r2, c0, c0, 0) @ read main ID register
|
||||||
|
ALT_UP(moveq pc, lr) @ LoUU is zero, so nothing to do
|
||||||
|
ldreq r1, =0x410fc090 @ ID of ARM Cortex A9 r0p?
|
||||||
|
biceq r2, r2, #0x0000000f @ clear minor revision number
|
||||||
|
teqeq r2, r1 @ test for errata affected core and if so...
|
||||||
|
orreqs r3, #(1 << 21) @ fix LoUIS value (and set flags state to 'ne')
|
||||||
|
#endif
|
||||||
ALT_SMP(mov r3, r3, lsr #20) @ r3 = LoUIS * 2
|
ALT_SMP(mov r3, r3, lsr #20) @ r3 = LoUIS * 2
|
||||||
ALT_UP(mov r3, r3, lsr #26) @ r3 = LoUU * 2
|
ALT_UP(mov r3, r3, lsr #26) @ r3 = LoUU * 2
|
||||||
moveq pc, lr @ return if level == 0
|
moveq pc, lr @ return if level == 0
|
||||||
|
@ -300,6 +300,39 @@ void flush_dcache_page(struct page *page)
|
|||||||
}
|
}
|
||||||
EXPORT_SYMBOL(flush_dcache_page);
|
EXPORT_SYMBOL(flush_dcache_page);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Ensure cache coherency for the kernel mapping of this page. We can
|
||||||
|
* assume that the page is pinned via kmap.
|
||||||
|
*
|
||||||
|
* If the page only exists in the page cache and there are no user
|
||||||
|
* space mappings, this is a no-op since the page was already marked
|
||||||
|
* dirty at creation. Otherwise, we need to flush the dirty kernel
|
||||||
|
* cache lines directly.
|
||||||
|
*/
|
||||||
|
void flush_kernel_dcache_page(struct page *page)
|
||||||
|
{
|
||||||
|
if (cache_is_vivt() || cache_is_vipt_aliasing()) {
|
||||||
|
struct address_space *mapping;
|
||||||
|
|
||||||
|
mapping = page_mapping(page);
|
||||||
|
|
||||||
|
if (!mapping || mapping_mapped(mapping)) {
|
||||||
|
void *addr;
|
||||||
|
|
||||||
|
addr = page_address(page);
|
||||||
|
/*
|
||||||
|
* kmap_atomic() doesn't set the page virtual
|
||||||
|
* address for highmem pages, and
|
||||||
|
* kunmap_atomic() takes care of cache
|
||||||
|
* flushing already.
|
||||||
|
*/
|
||||||
|
if (!IS_ENABLED(CONFIG_HIGHMEM) || addr)
|
||||||
|
__cpuc_flush_dcache_area(addr, PAGE_SIZE);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL(flush_kernel_dcache_page);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Flush an anonymous page so that users of get_user_pages()
|
* Flush an anonymous page so that users of get_user_pages()
|
||||||
* can safely access the data. The expected sequence is:
|
* can safely access the data. The expected sequence is:
|
||||||
|
@ -616,10 +616,12 @@ static void __init alloc_init_pte(pmd_t *pmd, unsigned long addr,
|
|||||||
} while (pte++, addr += PAGE_SIZE, addr != end);
|
} while (pte++, addr += PAGE_SIZE, addr != end);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void __init map_init_section(pmd_t *pmd, unsigned long addr,
|
static void __init __map_init_section(pmd_t *pmd, unsigned long addr,
|
||||||
unsigned long end, phys_addr_t phys,
|
unsigned long end, phys_addr_t phys,
|
||||||
const struct mem_type *type)
|
const struct mem_type *type)
|
||||||
{
|
{
|
||||||
|
pmd_t *p = pmd;
|
||||||
|
|
||||||
#ifndef CONFIG_ARM_LPAE
|
#ifndef CONFIG_ARM_LPAE
|
||||||
/*
|
/*
|
||||||
* In classic MMU format, puds and pmds are folded in to
|
* In classic MMU format, puds and pmds are folded in to
|
||||||
@ -638,7 +640,7 @@ static void __init map_init_section(pmd_t *pmd, unsigned long addr,
|
|||||||
phys += SECTION_SIZE;
|
phys += SECTION_SIZE;
|
||||||
} while (pmd++, addr += SECTION_SIZE, addr != end);
|
} while (pmd++, addr += SECTION_SIZE, addr != end);
|
||||||
|
|
||||||
flush_pmd_entry(pmd);
|
flush_pmd_entry(p);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void __init alloc_init_pmd(pud_t *pud, unsigned long addr,
|
static void __init alloc_init_pmd(pud_t *pud, unsigned long addr,
|
||||||
@ -661,7 +663,7 @@ static void __init alloc_init_pmd(pud_t *pud, unsigned long addr,
|
|||||||
*/
|
*/
|
||||||
if (type->prot_sect &&
|
if (type->prot_sect &&
|
||||||
((addr | next | phys) & ~SECTION_MASK) == 0) {
|
((addr | next | phys) & ~SECTION_MASK) == 0) {
|
||||||
map_init_section(pmd, addr, next, phys, type);
|
__map_init_section(pmd, addr, next, phys, type);
|
||||||
} else {
|
} else {
|
||||||
alloc_init_pte(pmd, addr, next,
|
alloc_init_pte(pmd, addr, next,
|
||||||
__phys_to_pfn(phys), type);
|
__phys_to_pfn(phys), type);
|
||||||
|
@ -409,8 +409,8 @@ __v7_ca9mp_proc_info:
|
|||||||
*/
|
*/
|
||||||
.type __v7_pj4b_proc_info, #object
|
.type __v7_pj4b_proc_info, #object
|
||||||
__v7_pj4b_proc_info:
|
__v7_pj4b_proc_info:
|
||||||
.long 0x562f5840
|
.long 0x560f5800
|
||||||
.long 0xfffffff0
|
.long 0xff0fff00
|
||||||
__v7_proc __v7_pj4b_setup
|
__v7_proc __v7_pj4b_setup
|
||||||
.size __v7_pj4b_proc_info, . - __v7_pj4b_proc_info
|
.size __v7_pj4b_proc_info, . - __v7_pj4b_proc_info
|
||||||
|
|
||||||
|
@ -1336,6 +1336,7 @@ void perf_callchain_user(struct perf_callchain_entry *entry,
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
perf_callchain_store(entry, regs->pc);
|
||||||
tail = (struct frame_tail __user *)regs->regs[29];
|
tail = (struct frame_tail __user *)regs->regs[29];
|
||||||
|
|
||||||
while (entry->nr < PERF_MAX_STACK_DEPTH &&
|
while (entry->nr < PERF_MAX_STACK_DEPTH &&
|
||||||
|
@ -11,6 +11,7 @@
|
|||||||
#define _ASM_IA64_IRQFLAGS_H
|
#define _ASM_IA64_IRQFLAGS_H
|
||||||
|
|
||||||
#include <asm/pal.h>
|
#include <asm/pal.h>
|
||||||
|
#include <asm/kregs.h>
|
||||||
|
|
||||||
#ifdef CONFIG_IA64_DEBUG_IRQ
|
#ifdef CONFIG_IA64_DEBUG_IRQ
|
||||||
extern unsigned long last_cli_ip;
|
extern unsigned long last_cli_ip;
|
||||||
|
@ -2,6 +2,7 @@
|
|||||||
#define _ASM_METAG_HUGETLB_H
|
#define _ASM_METAG_HUGETLB_H
|
||||||
|
|
||||||
#include <asm/page.h>
|
#include <asm/page.h>
|
||||||
|
#include <asm-generic/hugetlb.h>
|
||||||
|
|
||||||
|
|
||||||
static inline int is_hugepage_only_range(struct mm_struct *mm,
|
static inline int is_hugepage_only_range(struct mm_struct *mm,
|
||||||
|
@ -13,9 +13,8 @@
|
|||||||
#define _ASM_IRQFLAGS_H
|
#define _ASM_IRQFLAGS_H
|
||||||
|
|
||||||
#include <asm/cpu-regs.h>
|
#include <asm/cpu-regs.h>
|
||||||
#ifndef __ASSEMBLY__
|
/* linux/smp.h <- linux/irqflags.h needs asm/smp.h first */
|
||||||
#include <linux/smp.h>
|
#include <asm/smp.h>
|
||||||
#endif
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* interrupt control
|
* interrupt control
|
||||||
|
@ -24,6 +24,7 @@
|
|||||||
#ifndef __ASSEMBLY__
|
#ifndef __ASSEMBLY__
|
||||||
#include <linux/threads.h>
|
#include <linux/threads.h>
|
||||||
#include <linux/cpumask.h>
|
#include <linux/cpumask.h>
|
||||||
|
#include <linux/thread_info.h>
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#ifdef CONFIG_SMP
|
#ifdef CONFIG_SMP
|
||||||
@ -85,7 +86,7 @@ extern cpumask_t cpu_boot_map;
|
|||||||
extern void smp_init_cpus(void);
|
extern void smp_init_cpus(void);
|
||||||
extern void smp_cache_interrupt(void);
|
extern void smp_cache_interrupt(void);
|
||||||
extern void send_IPI_allbutself(int irq);
|
extern void send_IPI_allbutself(int irq);
|
||||||
extern int smp_nmi_call_function(smp_call_func_t func, void *info, int wait);
|
extern int smp_nmi_call_function(void (*func)(void *), void *info, int wait);
|
||||||
|
|
||||||
extern void arch_send_call_function_single_ipi(int cpu);
|
extern void arch_send_call_function_single_ipi(int cpu);
|
||||||
extern void arch_send_call_function_ipi_mask(const struct cpumask *mask);
|
extern void arch_send_call_function_ipi_mask(const struct cpumask *mask);
|
||||||
@ -100,6 +101,7 @@ extern void __cpu_die(unsigned int cpu);
|
|||||||
#ifndef __ASSEMBLY__
|
#ifndef __ASSEMBLY__
|
||||||
|
|
||||||
static inline void smp_init_cpus(void) {}
|
static inline void smp_init_cpus(void) {}
|
||||||
|
#define raw_smp_processor_id() 0
|
||||||
|
|
||||||
#endif /* __ASSEMBLY__ */
|
#endif /* __ASSEMBLY__ */
|
||||||
#endif /* CONFIG_SMP */
|
#endif /* CONFIG_SMP */
|
||||||
|
@ -27,7 +27,7 @@ extern struct node_map_data node_data[];
|
|||||||
|
|
||||||
#define PFNNID_SHIFT (30 - PAGE_SHIFT)
|
#define PFNNID_SHIFT (30 - PAGE_SHIFT)
|
||||||
#define PFNNID_MAP_MAX 512 /* support 512GB */
|
#define PFNNID_MAP_MAX 512 /* support 512GB */
|
||||||
extern unsigned char pfnnid_map[PFNNID_MAP_MAX];
|
extern signed char pfnnid_map[PFNNID_MAP_MAX];
|
||||||
|
|
||||||
#ifndef CONFIG_64BIT
|
#ifndef CONFIG_64BIT
|
||||||
#define pfn_is_io(pfn) ((pfn & (0xf0000000UL >> PAGE_SHIFT)) == (0xf0000000UL >> PAGE_SHIFT))
|
#define pfn_is_io(pfn) ((pfn & (0xf0000000UL >> PAGE_SHIFT)) == (0xf0000000UL >> PAGE_SHIFT))
|
||||||
@ -46,7 +46,7 @@ static inline int pfn_to_nid(unsigned long pfn)
|
|||||||
i = pfn >> PFNNID_SHIFT;
|
i = pfn >> PFNNID_SHIFT;
|
||||||
BUG_ON(i >= ARRAY_SIZE(pfnnid_map));
|
BUG_ON(i >= ARRAY_SIZE(pfnnid_map));
|
||||||
|
|
||||||
return (int)pfnnid_map[i];
|
return pfnnid_map[i];
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline int pfn_valid(int pfn)
|
static inline int pfn_valid(int pfn)
|
||||||
|
@ -225,4 +225,9 @@ static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel)
|
|||||||
return channel ? 15 : 14;
|
return channel ? 15 : 14;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#define HAVE_PCI_MMAP
|
||||||
|
|
||||||
|
extern int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
|
||||||
|
enum pci_mmap_state mmap_state, int write_combine);
|
||||||
|
|
||||||
#endif /* __ASM_PARISC_PCI_H */
|
#endif /* __ASM_PARISC_PCI_H */
|
||||||
|
@ -1205,6 +1205,7 @@ static struct hp_hardware hp_hardware_list[] = {
|
|||||||
{HPHW_FIO, 0x004, 0x00320, 0x0, "Metheus Frame Buffer"},
|
{HPHW_FIO, 0x004, 0x00320, 0x0, "Metheus Frame Buffer"},
|
||||||
{HPHW_FIO, 0x004, 0x00340, 0x0, "BARCO CX4500 VME Grphx Cnsl"},
|
{HPHW_FIO, 0x004, 0x00340, 0x0, "BARCO CX4500 VME Grphx Cnsl"},
|
||||||
{HPHW_FIO, 0x004, 0x00360, 0x0, "Hughes TOG VME FDDI"},
|
{HPHW_FIO, 0x004, 0x00360, 0x0, "Hughes TOG VME FDDI"},
|
||||||
|
{HPHW_FIO, 0x076, 0x000AD, 0x00, "Crestone Peak RS-232"},
|
||||||
{HPHW_IOA, 0x185, 0x0000B, 0x00, "Java BC Summit Port"},
|
{HPHW_IOA, 0x185, 0x0000B, 0x00, "Java BC Summit Port"},
|
||||||
{HPHW_IOA, 0x1FF, 0x0000B, 0x00, "Hitachi Ghostview Summit Port"},
|
{HPHW_IOA, 0x1FF, 0x0000B, 0x00, "Hitachi Ghostview Summit Port"},
|
||||||
{HPHW_IOA, 0x580, 0x0000B, 0x10, "U2-IOA BC Runway Port"},
|
{HPHW_IOA, 0x580, 0x0000B, 0x10, "U2-IOA BC Runway Port"},
|
||||||
|
@ -860,7 +860,7 @@ ENTRY(flush_dcache_page_asm)
|
|||||||
#endif
|
#endif
|
||||||
|
|
||||||
ldil L%dcache_stride, %r1
|
ldil L%dcache_stride, %r1
|
||||||
ldw R%dcache_stride(%r1), %r1
|
ldw R%dcache_stride(%r1), r31
|
||||||
|
|
||||||
#ifdef CONFIG_64BIT
|
#ifdef CONFIG_64BIT
|
||||||
depdi,z 1, 63-PAGE_SHIFT,1, %r25
|
depdi,z 1, 63-PAGE_SHIFT,1, %r25
|
||||||
@ -868,26 +868,26 @@ ENTRY(flush_dcache_page_asm)
|
|||||||
depwi,z 1, 31-PAGE_SHIFT,1, %r25
|
depwi,z 1, 31-PAGE_SHIFT,1, %r25
|
||||||
#endif
|
#endif
|
||||||
add %r28, %r25, %r25
|
add %r28, %r25, %r25
|
||||||
sub %r25, %r1, %r25
|
sub %r25, r31, %r25
|
||||||
|
|
||||||
|
|
||||||
1: fdc,m %r1(%r28)
|
1: fdc,m r31(%r28)
|
||||||
fdc,m %r1(%r28)
|
fdc,m r31(%r28)
|
||||||
fdc,m %r1(%r28)
|
fdc,m r31(%r28)
|
||||||
fdc,m %r1(%r28)
|
fdc,m r31(%r28)
|
||||||
fdc,m %r1(%r28)
|
fdc,m r31(%r28)
|
||||||
fdc,m %r1(%r28)
|
fdc,m r31(%r28)
|
||||||
fdc,m %r1(%r28)
|
fdc,m r31(%r28)
|
||||||
fdc,m %r1(%r28)
|
fdc,m r31(%r28)
|
||||||
fdc,m %r1(%r28)
|
fdc,m r31(%r28)
|
||||||
fdc,m %r1(%r28)
|
fdc,m r31(%r28)
|
||||||
fdc,m %r1(%r28)
|
fdc,m r31(%r28)
|
||||||
fdc,m %r1(%r28)
|
fdc,m r31(%r28)
|
||||||
fdc,m %r1(%r28)
|
fdc,m r31(%r28)
|
||||||
fdc,m %r1(%r28)
|
fdc,m r31(%r28)
|
||||||
fdc,m %r1(%r28)
|
fdc,m r31(%r28)
|
||||||
cmpb,COND(<<) %r28, %r25,1b
|
cmpb,COND(<<) %r28, %r25,1b
|
||||||
fdc,m %r1(%r28)
|
fdc,m r31(%r28)
|
||||||
|
|
||||||
sync
|
sync
|
||||||
|
|
||||||
@ -936,7 +936,7 @@ ENTRY(flush_icache_page_asm)
|
|||||||
#endif
|
#endif
|
||||||
|
|
||||||
ldil L%icache_stride, %r1
|
ldil L%icache_stride, %r1
|
||||||
ldw R%icache_stride(%r1), %r1
|
ldw R%icache_stride(%r1), %r31
|
||||||
|
|
||||||
#ifdef CONFIG_64BIT
|
#ifdef CONFIG_64BIT
|
||||||
depdi,z 1, 63-PAGE_SHIFT,1, %r25
|
depdi,z 1, 63-PAGE_SHIFT,1, %r25
|
||||||
@ -944,28 +944,28 @@ ENTRY(flush_icache_page_asm)
|
|||||||
depwi,z 1, 31-PAGE_SHIFT,1, %r25
|
depwi,z 1, 31-PAGE_SHIFT,1, %r25
|
||||||
#endif
|
#endif
|
||||||
add %r28, %r25, %r25
|
add %r28, %r25, %r25
|
||||||
sub %r25, %r1, %r25
|
sub %r25, %r31, %r25
|
||||||
|
|
||||||
|
|
||||||
/* fic only has the type 26 form on PA1.1, requiring an
|
/* fic only has the type 26 form on PA1.1, requiring an
|
||||||
* explicit space specification, so use %sr4 */
|
* explicit space specification, so use %sr4 */
|
||||||
1: fic,m %r1(%sr4,%r28)
|
1: fic,m %r31(%sr4,%r28)
|
||||||
fic,m %r1(%sr4,%r28)
|
fic,m %r31(%sr4,%r28)
|
||||||
fic,m %r1(%sr4,%r28)
|
fic,m %r31(%sr4,%r28)
|
||||||
fic,m %r1(%sr4,%r28)
|
fic,m %r31(%sr4,%r28)
|
||||||
fic,m %r1(%sr4,%r28)
|
fic,m %r31(%sr4,%r28)
|
||||||
fic,m %r1(%sr4,%r28)
|
fic,m %r31(%sr4,%r28)
|
||||||
fic,m %r1(%sr4,%r28)
|
fic,m %r31(%sr4,%r28)
|
||||||
fic,m %r1(%sr4,%r28)
|
fic,m %r31(%sr4,%r28)
|
||||||
fic,m %r1(%sr4,%r28)
|
fic,m %r31(%sr4,%r28)
|
||||||
fic,m %r1(%sr4,%r28)
|
fic,m %r31(%sr4,%r28)
|
||||||
fic,m %r1(%sr4,%r28)
|
fic,m %r31(%sr4,%r28)
|
||||||
fic,m %r1(%sr4,%r28)
|
fic,m %r31(%sr4,%r28)
|
||||||
fic,m %r1(%sr4,%r28)
|
fic,m %r31(%sr4,%r28)
|
||||||
fic,m %r1(%sr4,%r28)
|
fic,m %r31(%sr4,%r28)
|
||||||
fic,m %r1(%sr4,%r28)
|
fic,m %r31(%sr4,%r28)
|
||||||
cmpb,COND(<<) %r28, %r25,1b
|
cmpb,COND(<<) %r28, %r25,1b
|
||||||
fic,m %r1(%sr4,%r28)
|
fic,m %r31(%sr4,%r28)
|
||||||
|
|
||||||
sync
|
sync
|
||||||
|
|
||||||
|
@ -220,6 +220,33 @@ resource_size_t pcibios_align_resource(void *data, const struct resource *res,
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
|
||||||
|
enum pci_mmap_state mmap_state, int write_combine)
|
||||||
|
{
|
||||||
|
unsigned long prot;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* I/O space can be accessed via normal processor loads and stores on
|
||||||
|
* this platform but for now we elect not to do this and portable
|
||||||
|
* drivers should not do this anyway.
|
||||||
|
*/
|
||||||
|
if (mmap_state == pci_mmap_io)
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
|
if (write_combine)
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Ignore write-combine; for now only return uncached mappings.
|
||||||
|
*/
|
||||||
|
prot = pgprot_val(vma->vm_page_prot);
|
||||||
|
prot |= _PAGE_NO_CACHE;
|
||||||
|
vma->vm_page_prot = __pgprot(prot);
|
||||||
|
|
||||||
|
return remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
|
||||||
|
vma->vm_end - vma->vm_start, vma->vm_page_prot);
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* A driver is enabling the device. We make sure that all the appropriate
|
* A driver is enabling the device. We make sure that all the appropriate
|
||||||
* bits are set to allow the device to operate as the driver is expecting.
|
* bits are set to allow the device to operate as the driver is expecting.
|
||||||
|
@ -47,7 +47,7 @@ pte_t pg0[PT_INITIAL * PTRS_PER_PTE] __attribute__ ((__section__ (".data..vm0.pt
|
|||||||
|
|
||||||
#ifdef CONFIG_DISCONTIGMEM
|
#ifdef CONFIG_DISCONTIGMEM
|
||||||
struct node_map_data node_data[MAX_NUMNODES] __read_mostly;
|
struct node_map_data node_data[MAX_NUMNODES] __read_mostly;
|
||||||
unsigned char pfnnid_map[PFNNID_MAP_MAX] __read_mostly;
|
signed char pfnnid_map[PFNNID_MAP_MAX] __read_mostly;
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
static struct resource data_resource = {
|
static struct resource data_resource = {
|
||||||
|
@ -673,7 +673,6 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
|
|||||||
ret = s;
|
ret = s;
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
kvmppc_lazy_ee_enable();
|
|
||||||
|
|
||||||
kvm_guest_enter();
|
kvm_guest_enter();
|
||||||
|
|
||||||
@ -699,6 +698,8 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
|
|||||||
kvmppc_load_guest_fp(vcpu);
|
kvmppc_load_guest_fp(vcpu);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
kvmppc_lazy_ee_enable();
|
||||||
|
|
||||||
ret = __kvmppc_vcpu_run(kvm_run, vcpu);
|
ret = __kvmppc_vcpu_run(kvm_run, vcpu);
|
||||||
|
|
||||||
/* No need for kvm_guest_exit. It's done in handle_exit.
|
/* No need for kvm_guest_exit. It's done in handle_exit.
|
||||||
|
@ -592,8 +592,14 @@ static void hugetlb_free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
|
|||||||
do {
|
do {
|
||||||
pmd = pmd_offset(pud, addr);
|
pmd = pmd_offset(pud, addr);
|
||||||
next = pmd_addr_end(addr, end);
|
next = pmd_addr_end(addr, end);
|
||||||
if (pmd_none_or_clear_bad(pmd))
|
if (!is_hugepd(pmd)) {
|
||||||
|
/*
|
||||||
|
* if it is not hugepd pointer, we should already find
|
||||||
|
* it cleared.
|
||||||
|
*/
|
||||||
|
WARN_ON(!pmd_none_or_clear_bad(pmd));
|
||||||
continue;
|
continue;
|
||||||
|
}
|
||||||
#ifdef CONFIG_PPC_FSL_BOOK3E
|
#ifdef CONFIG_PPC_FSL_BOOK3E
|
||||||
/*
|
/*
|
||||||
* Increment next by the size of the huge mapping since
|
* Increment next by the size of the huge mapping since
|
||||||
|
@ -6,6 +6,7 @@ generic-y += cputime.h
|
|||||||
generic-y += div64.h
|
generic-y += div64.h
|
||||||
generic-y += emergency-restart.h
|
generic-y += emergency-restart.h
|
||||||
generic-y += exec.h
|
generic-y += exec.h
|
||||||
|
generic-y += linkage.h
|
||||||
generic-y += local64.h
|
generic-y += local64.h
|
||||||
generic-y += mutex.h
|
generic-y += mutex.h
|
||||||
generic-y += irq_regs.h
|
generic-y += irq_regs.h
|
||||||
|
@ -135,7 +135,7 @@ static inline int sparc_leon3_cpuid(void)
|
|||||||
|
|
||||||
#ifdef CONFIG_SMP
|
#ifdef CONFIG_SMP
|
||||||
# define LEON3_IRQ_IPI_DEFAULT 13
|
# define LEON3_IRQ_IPI_DEFAULT 13
|
||||||
# define LEON3_IRQ_TICKER (leon3_ticker_irq)
|
# define LEON3_IRQ_TICKER (leon3_gptimer_irq)
|
||||||
# define LEON3_IRQ_CROSS_CALL 15
|
# define LEON3_IRQ_CROSS_CALL 15
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
@ -47,6 +47,7 @@ struct amba_prom_registers {
|
|||||||
#define LEON3_GPTIMER_LD 4
|
#define LEON3_GPTIMER_LD 4
|
||||||
#define LEON3_GPTIMER_IRQEN 8
|
#define LEON3_GPTIMER_IRQEN 8
|
||||||
#define LEON3_GPTIMER_SEPIRQ 8
|
#define LEON3_GPTIMER_SEPIRQ 8
|
||||||
|
#define LEON3_GPTIMER_TIMERS 0x7
|
||||||
|
|
||||||
#define LEON23_REG_TIMER_CONTROL_EN 0x00000001 /* 1 = enable counting */
|
#define LEON23_REG_TIMER_CONTROL_EN 0x00000001 /* 1 = enable counting */
|
||||||
/* 0 = hold scalar and counter */
|
/* 0 = hold scalar and counter */
|
||||||
|
@ -1,6 +0,0 @@
|
|||||||
#ifndef __ASM_LINKAGE_H
|
|
||||||
#define __ASM_LINKAGE_H
|
|
||||||
|
|
||||||
/* Nothing to see here... */
|
|
||||||
|
|
||||||
#endif
|
|
@ -843,7 +843,8 @@ void ldom_reboot(const char *boot_command)
|
|||||||
unsigned long len;
|
unsigned long len;
|
||||||
|
|
||||||
strcpy(full_boot_str, "boot ");
|
strcpy(full_boot_str, "boot ");
|
||||||
strcpy(full_boot_str + strlen("boot "), boot_command);
|
strlcpy(full_boot_str + strlen("boot "), boot_command,
|
||||||
|
sizeof(full_boot_str + strlen("boot ")));
|
||||||
len = strlen(full_boot_str);
|
len = strlen(full_boot_str);
|
||||||
|
|
||||||
if (reboot_data_supported) {
|
if (reboot_data_supported) {
|
||||||
|
@ -38,7 +38,6 @@ static DEFINE_SPINLOCK(leon_irq_lock);
|
|||||||
|
|
||||||
unsigned long leon3_gptimer_irq; /* interrupt controller irq number */
|
unsigned long leon3_gptimer_irq; /* interrupt controller irq number */
|
||||||
unsigned long leon3_gptimer_idx; /* Timer Index (0..6) within Timer Core */
|
unsigned long leon3_gptimer_idx; /* Timer Index (0..6) within Timer Core */
|
||||||
int leon3_ticker_irq; /* Timer ticker IRQ */
|
|
||||||
unsigned int sparc_leon_eirq;
|
unsigned int sparc_leon_eirq;
|
||||||
#define LEON_IMASK(cpu) (&leon3_irqctrl_regs->mask[cpu])
|
#define LEON_IMASK(cpu) (&leon3_irqctrl_regs->mask[cpu])
|
||||||
#define LEON_IACK (&leon3_irqctrl_regs->iclear)
|
#define LEON_IACK (&leon3_irqctrl_regs->iclear)
|
||||||
@ -278,6 +277,9 @@ irqreturn_t leon_percpu_timer_ce_interrupt(int irq, void *unused)
|
|||||||
|
|
||||||
leon_clear_profile_irq(cpu);
|
leon_clear_profile_irq(cpu);
|
||||||
|
|
||||||
|
if (cpu == boot_cpu_id)
|
||||||
|
timer_interrupt(irq, NULL);
|
||||||
|
|
||||||
ce = &per_cpu(sparc32_clockevent, cpu);
|
ce = &per_cpu(sparc32_clockevent, cpu);
|
||||||
|
|
||||||
irq_enter();
|
irq_enter();
|
||||||
@ -299,6 +301,7 @@ void __init leon_init_timers(void)
|
|||||||
int icsel;
|
int icsel;
|
||||||
int ampopts;
|
int ampopts;
|
||||||
int err;
|
int err;
|
||||||
|
u32 config;
|
||||||
|
|
||||||
sparc_config.get_cycles_offset = leon_cycles_offset;
|
sparc_config.get_cycles_offset = leon_cycles_offset;
|
||||||
sparc_config.cs_period = 1000000 / HZ;
|
sparc_config.cs_period = 1000000 / HZ;
|
||||||
@ -377,23 +380,6 @@ void __init leon_init_timers(void)
|
|||||||
LEON3_BYPASS_STORE_PA(
|
LEON3_BYPASS_STORE_PA(
|
||||||
&leon3_gptimer_regs->e[leon3_gptimer_idx].ctrl, 0);
|
&leon3_gptimer_regs->e[leon3_gptimer_idx].ctrl, 0);
|
||||||
|
|
||||||
#ifdef CONFIG_SMP
|
|
||||||
leon3_ticker_irq = leon3_gptimer_irq + 1 + leon3_gptimer_idx;
|
|
||||||
|
|
||||||
if (!(LEON3_BYPASS_LOAD_PA(&leon3_gptimer_regs->config) &
|
|
||||||
(1<<LEON3_GPTIMER_SEPIRQ))) {
|
|
||||||
printk(KERN_ERR "timer not configured with separate irqs\n");
|
|
||||||
BUG();
|
|
||||||
}
|
|
||||||
|
|
||||||
LEON3_BYPASS_STORE_PA(&leon3_gptimer_regs->e[leon3_gptimer_idx+1].val,
|
|
||||||
0);
|
|
||||||
LEON3_BYPASS_STORE_PA(&leon3_gptimer_regs->e[leon3_gptimer_idx+1].rld,
|
|
||||||
(((1000000/HZ) - 1)));
|
|
||||||
LEON3_BYPASS_STORE_PA(&leon3_gptimer_regs->e[leon3_gptimer_idx+1].ctrl,
|
|
||||||
0);
|
|
||||||
#endif
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* The IRQ controller may (if implemented) consist of multiple
|
* The IRQ controller may (if implemented) consist of multiple
|
||||||
* IRQ controllers, each mapped on a 4Kb boundary.
|
* IRQ controllers, each mapped on a 4Kb boundary.
|
||||||
@ -416,13 +402,6 @@ void __init leon_init_timers(void)
|
|||||||
if (eirq != 0)
|
if (eirq != 0)
|
||||||
leon_eirq_setup(eirq);
|
leon_eirq_setup(eirq);
|
||||||
|
|
||||||
irq = _leon_build_device_irq(NULL, leon3_gptimer_irq+leon3_gptimer_idx);
|
|
||||||
err = request_irq(irq, timer_interrupt, IRQF_TIMER, "timer", NULL);
|
|
||||||
if (err) {
|
|
||||||
printk(KERN_ERR "unable to attach timer IRQ%d\n", irq);
|
|
||||||
prom_halt();
|
|
||||||
}
|
|
||||||
|
|
||||||
#ifdef CONFIG_SMP
|
#ifdef CONFIG_SMP
|
||||||
{
|
{
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
@ -439,30 +418,31 @@ void __init leon_init_timers(void)
|
|||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
config = LEON3_BYPASS_LOAD_PA(&leon3_gptimer_regs->config);
|
||||||
|
if (config & (1 << LEON3_GPTIMER_SEPIRQ))
|
||||||
|
leon3_gptimer_irq += leon3_gptimer_idx;
|
||||||
|
else if ((config & LEON3_GPTIMER_TIMERS) > 1)
|
||||||
|
pr_warn("GPTIMER uses shared irqs, using other timers of the same core will fail.\n");
|
||||||
|
|
||||||
|
#ifdef CONFIG_SMP
|
||||||
|
/* Install per-cpu IRQ handler for broadcasted ticker */
|
||||||
|
irq = leon_build_device_irq(leon3_gptimer_irq, handle_percpu_irq,
|
||||||
|
"per-cpu", 0);
|
||||||
|
err = request_irq(irq, leon_percpu_timer_ce_interrupt,
|
||||||
|
IRQF_PERCPU | IRQF_TIMER, "timer", NULL);
|
||||||
|
#else
|
||||||
|
irq = _leon_build_device_irq(NULL, leon3_gptimer_irq);
|
||||||
|
err = request_irq(irq, timer_interrupt, IRQF_TIMER, "timer", NULL);
|
||||||
|
#endif
|
||||||
|
if (err) {
|
||||||
|
pr_err("Unable to attach timer IRQ%d\n", irq);
|
||||||
|
prom_halt();
|
||||||
|
}
|
||||||
LEON3_BYPASS_STORE_PA(&leon3_gptimer_regs->e[leon3_gptimer_idx].ctrl,
|
LEON3_BYPASS_STORE_PA(&leon3_gptimer_regs->e[leon3_gptimer_idx].ctrl,
|
||||||
LEON3_GPTIMER_EN |
|
LEON3_GPTIMER_EN |
|
||||||
LEON3_GPTIMER_RL |
|
LEON3_GPTIMER_RL |
|
||||||
LEON3_GPTIMER_LD |
|
LEON3_GPTIMER_LD |
|
||||||
LEON3_GPTIMER_IRQEN);
|
LEON3_GPTIMER_IRQEN);
|
||||||
|
|
||||||
#ifdef CONFIG_SMP
|
|
||||||
/* Install per-cpu IRQ handler for broadcasted ticker */
|
|
||||||
irq = leon_build_device_irq(leon3_ticker_irq, handle_percpu_irq,
|
|
||||||
"per-cpu", 0);
|
|
||||||
err = request_irq(irq, leon_percpu_timer_ce_interrupt,
|
|
||||||
IRQF_PERCPU | IRQF_TIMER, "ticker",
|
|
||||||
NULL);
|
|
||||||
if (err) {
|
|
||||||
printk(KERN_ERR "unable to attach ticker IRQ%d\n", irq);
|
|
||||||
prom_halt();
|
|
||||||
}
|
|
||||||
|
|
||||||
LEON3_BYPASS_STORE_PA(&leon3_gptimer_regs->e[leon3_gptimer_idx+1].ctrl,
|
|
||||||
LEON3_GPTIMER_EN |
|
|
||||||
LEON3_GPTIMER_RL |
|
|
||||||
LEON3_GPTIMER_LD |
|
|
||||||
LEON3_GPTIMER_IRQEN);
|
|
||||||
#endif
|
|
||||||
return;
|
return;
|
||||||
bad:
|
bad:
|
||||||
printk(KERN_ERR "No Timer/irqctrl found\n");
|
printk(KERN_ERR "No Timer/irqctrl found\n");
|
||||||
|
@ -536,11 +536,9 @@ static int grpci1_of_probe(struct platform_device *ofdev)
|
|||||||
|
|
||||||
/* find device register base address */
|
/* find device register base address */
|
||||||
res = platform_get_resource(ofdev, IORESOURCE_MEM, 0);
|
res = platform_get_resource(ofdev, IORESOURCE_MEM, 0);
|
||||||
regs = devm_request_and_ioremap(&ofdev->dev, res);
|
regs = devm_ioremap_resource(&ofdev->dev, res);
|
||||||
if (!regs) {
|
if (IS_ERR(regs))
|
||||||
dev_err(&ofdev->dev, "io-regs mapping failed\n");
|
return PTR_ERR(regs);
|
||||||
return -EADDRNOTAVAIL;
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* check that we're in Host Slot and that we can act as a Host Bridge
|
* check that we're in Host Slot and that we can act as a Host Bridge
|
||||||
|
@ -47,6 +47,10 @@ void pmc_leon_idle_fixup(void)
|
|||||||
* MMU does not get a TLB miss here by using the MMU BYPASS ASI.
|
* MMU does not get a TLB miss here by using the MMU BYPASS ASI.
|
||||||
*/
|
*/
|
||||||
register unsigned int address = (unsigned int)leon3_irqctrl_regs;
|
register unsigned int address = (unsigned int)leon3_irqctrl_regs;
|
||||||
|
|
||||||
|
/* Interrupts need to be enabled to not hang the CPU */
|
||||||
|
local_irq_enable();
|
||||||
|
|
||||||
__asm__ __volatile__ (
|
__asm__ __volatile__ (
|
||||||
"wr %%g0, %%asr19\n"
|
"wr %%g0, %%asr19\n"
|
||||||
"lda [%0] %1, %%g0\n"
|
"lda [%0] %1, %%g0\n"
|
||||||
@ -60,6 +64,9 @@ void pmc_leon_idle_fixup(void)
|
|||||||
*/
|
*/
|
||||||
void pmc_leon_idle(void)
|
void pmc_leon_idle(void)
|
||||||
{
|
{
|
||||||
|
/* Interrupts need to be enabled to not hang the CPU */
|
||||||
|
local_irq_enable();
|
||||||
|
|
||||||
/* For systems without power-down, this will be no-op */
|
/* For systems without power-down, this will be no-op */
|
||||||
__asm__ __volatile__ ("wr %g0, %asr19\n\t");
|
__asm__ __volatile__ ("wr %g0, %asr19\n\t");
|
||||||
}
|
}
|
||||||
|
@ -304,7 +304,7 @@ void __init setup_arch(char **cmdline_p)
|
|||||||
|
|
||||||
/* Initialize PROM console and command line. */
|
/* Initialize PROM console and command line. */
|
||||||
*cmdline_p = prom_getbootargs();
|
*cmdline_p = prom_getbootargs();
|
||||||
strcpy(boot_command_line, *cmdline_p);
|
strlcpy(boot_command_line, *cmdline_p, COMMAND_LINE_SIZE);
|
||||||
parse_early_param();
|
parse_early_param();
|
||||||
|
|
||||||
boot_flags_init(*cmdline_p);
|
boot_flags_init(*cmdline_p);
|
||||||
|
@ -555,7 +555,7 @@ void __init setup_arch(char **cmdline_p)
|
|||||||
{
|
{
|
||||||
/* Initialize PROM console and command line. */
|
/* Initialize PROM console and command line. */
|
||||||
*cmdline_p = prom_getbootargs();
|
*cmdline_p = prom_getbootargs();
|
||||||
strcpy(boot_command_line, *cmdline_p);
|
strlcpy(boot_command_line, *cmdline_p, COMMAND_LINE_SIZE);
|
||||||
parse_early_param();
|
parse_early_param();
|
||||||
|
|
||||||
boot_flags_init(*cmdline_p);
|
boot_flags_init(*cmdline_p);
|
||||||
|
@ -1098,7 +1098,14 @@ static int __init grab_mblocks(struct mdesc_handle *md)
|
|||||||
m->size = *val;
|
m->size = *val;
|
||||||
val = mdesc_get_property(md, node,
|
val = mdesc_get_property(md, node,
|
||||||
"address-congruence-offset", NULL);
|
"address-congruence-offset", NULL);
|
||||||
m->offset = *val;
|
|
||||||
|
/* The address-congruence-offset property is optional.
|
||||||
|
* Explicity zero it be identifty this.
|
||||||
|
*/
|
||||||
|
if (val)
|
||||||
|
m->offset = *val;
|
||||||
|
else
|
||||||
|
m->offset = 0UL;
|
||||||
|
|
||||||
numadbg("MBLOCK[%d]: base[%llx] size[%llx] offset[%llx]\n",
|
numadbg("MBLOCK[%d]: base[%llx] size[%llx] offset[%llx]\n",
|
||||||
count - 1, m->base, m->size, m->offset);
|
count - 1, m->base, m->size, m->offset);
|
||||||
|
@ -85,8 +85,8 @@ static void tlb_batch_add_one(struct mm_struct *mm, unsigned long vaddr,
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (!tb->active) {
|
if (!tb->active) {
|
||||||
global_flush_tlb_page(mm, vaddr);
|
|
||||||
flush_tsb_user_page(mm, vaddr);
|
flush_tsb_user_page(mm, vaddr);
|
||||||
|
global_flush_tlb_page(mm, vaddr);
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -23,23 +23,25 @@ prom_getbootargs(void)
|
|||||||
return barg_buf;
|
return barg_buf;
|
||||||
}
|
}
|
||||||
|
|
||||||
switch(prom_vers) {
|
switch (prom_vers) {
|
||||||
case PROM_V0:
|
case PROM_V0:
|
||||||
cp = barg_buf;
|
cp = barg_buf;
|
||||||
/* Start from 1 and go over fd(0,0,0)kernel */
|
/* Start from 1 and go over fd(0,0,0)kernel */
|
||||||
for(iter = 1; iter < 8; iter++) {
|
for (iter = 1; iter < 8; iter++) {
|
||||||
arg = (*(romvec->pv_v0bootargs))->argv[iter];
|
arg = (*(romvec->pv_v0bootargs))->argv[iter];
|
||||||
if (arg == NULL)
|
if (arg == NULL)
|
||||||
break;
|
break;
|
||||||
while(*arg != 0) {
|
while (*arg != 0) {
|
||||||
/* Leave place for space and null. */
|
/* Leave place for space and null. */
|
||||||
if(cp >= barg_buf + BARG_LEN-2){
|
if (cp >= barg_buf + BARG_LEN - 2)
|
||||||
/* We might issue a warning here. */
|
/* We might issue a warning here. */
|
||||||
break;
|
break;
|
||||||
}
|
|
||||||
*cp++ = *arg++;
|
*cp++ = *arg++;
|
||||||
}
|
}
|
||||||
*cp++ = ' ';
|
*cp++ = ' ';
|
||||||
|
if (cp >= barg_buf + BARG_LEN - 1)
|
||||||
|
/* We might issue a warning here. */
|
||||||
|
break;
|
||||||
}
|
}
|
||||||
*cp = 0;
|
*cp = 0;
|
||||||
break;
|
break;
|
||||||
|
@ -39,7 +39,7 @@ inline phandle __prom_getchild(phandle node)
|
|||||||
return prom_node_to_node("child", node);
|
return prom_node_to_node("child", node);
|
||||||
}
|
}
|
||||||
|
|
||||||
inline phandle prom_getchild(phandle node)
|
phandle prom_getchild(phandle node)
|
||||||
{
|
{
|
||||||
phandle cnode;
|
phandle cnode;
|
||||||
|
|
||||||
@ -72,7 +72,7 @@ inline phandle __prom_getsibling(phandle node)
|
|||||||
return prom_node_to_node(prom_peer_name, node);
|
return prom_node_to_node(prom_peer_name, node);
|
||||||
}
|
}
|
||||||
|
|
||||||
inline phandle prom_getsibling(phandle node)
|
phandle prom_getsibling(phandle node)
|
||||||
{
|
{
|
||||||
phandle sibnode;
|
phandle sibnode;
|
||||||
|
|
||||||
@ -89,7 +89,7 @@ EXPORT_SYMBOL(prom_getsibling);
|
|||||||
/* Return the length in bytes of property 'prop' at node 'node'.
|
/* Return the length in bytes of property 'prop' at node 'node'.
|
||||||
* Return -1 on error.
|
* Return -1 on error.
|
||||||
*/
|
*/
|
||||||
inline int prom_getproplen(phandle node, const char *prop)
|
int prom_getproplen(phandle node, const char *prop)
|
||||||
{
|
{
|
||||||
unsigned long args[6];
|
unsigned long args[6];
|
||||||
|
|
||||||
@ -113,8 +113,8 @@ EXPORT_SYMBOL(prom_getproplen);
|
|||||||
* 'buffer' which has a size of 'bufsize'. If the acquisition
|
* 'buffer' which has a size of 'bufsize'. If the acquisition
|
||||||
* was successful the length will be returned, else -1 is returned.
|
* was successful the length will be returned, else -1 is returned.
|
||||||
*/
|
*/
|
||||||
inline int prom_getproperty(phandle node, const char *prop,
|
int prom_getproperty(phandle node, const char *prop,
|
||||||
char *buffer, int bufsize)
|
char *buffer, int bufsize)
|
||||||
{
|
{
|
||||||
unsigned long args[8];
|
unsigned long args[8];
|
||||||
int plen;
|
int plen;
|
||||||
@ -141,7 +141,7 @@ EXPORT_SYMBOL(prom_getproperty);
|
|||||||
/* Acquire an integer property and return its value. Returns -1
|
/* Acquire an integer property and return its value. Returns -1
|
||||||
* on failure.
|
* on failure.
|
||||||
*/
|
*/
|
||||||
inline int prom_getint(phandle node, const char *prop)
|
int prom_getint(phandle node, const char *prop)
|
||||||
{
|
{
|
||||||
int intprop;
|
int intprop;
|
||||||
|
|
||||||
@ -235,7 +235,7 @@ static const char *prom_nextprop_name = "nextprop";
|
|||||||
/* Return the first property type for node 'node'.
|
/* Return the first property type for node 'node'.
|
||||||
* buffer should be at least 32B in length
|
* buffer should be at least 32B in length
|
||||||
*/
|
*/
|
||||||
inline char *prom_firstprop(phandle node, char *buffer)
|
char *prom_firstprop(phandle node, char *buffer)
|
||||||
{
|
{
|
||||||
unsigned long args[7];
|
unsigned long args[7];
|
||||||
|
|
||||||
@ -261,7 +261,7 @@ EXPORT_SYMBOL(prom_firstprop);
|
|||||||
* at node 'node' . Returns NULL string if no more
|
* at node 'node' . Returns NULL string if no more
|
||||||
* property types for this node.
|
* property types for this node.
|
||||||
*/
|
*/
|
||||||
inline char *prom_nextprop(phandle node, const char *oprop, char *buffer)
|
char *prom_nextprop(phandle node, const char *oprop, char *buffer)
|
||||||
{
|
{
|
||||||
unsigned long args[7];
|
unsigned long args[7];
|
||||||
char buf[32];
|
char buf[32];
|
||||||
|
@ -84,4 +84,6 @@ uint64_t __ashrdi3(uint64_t, unsigned int);
|
|||||||
EXPORT_SYMBOL(__ashrdi3);
|
EXPORT_SYMBOL(__ashrdi3);
|
||||||
uint64_t __ashldi3(uint64_t, unsigned int);
|
uint64_t __ashldi3(uint64_t, unsigned int);
|
||||||
EXPORT_SYMBOL(__ashldi3);
|
EXPORT_SYMBOL(__ashldi3);
|
||||||
|
int __ffsdi2(uint64_t);
|
||||||
|
EXPORT_SYMBOL(__ffsdi2);
|
||||||
#endif
|
#endif
|
||||||
|
@ -147,7 +147,7 @@ void mconsole_proc(struct mc_request *req)
|
|||||||
}
|
}
|
||||||
|
|
||||||
do {
|
do {
|
||||||
loff_t pos;
|
loff_t pos = file->f_pos;
|
||||||
mm_segment_t old_fs = get_fs();
|
mm_segment_t old_fs = get_fs();
|
||||||
set_fs(KERNEL_DS);
|
set_fs(KERNEL_DS);
|
||||||
len = vfs_read(file, buf, PAGE_SIZE - 1, &pos);
|
len = vfs_read(file, buf, PAGE_SIZE - 1, &pos);
|
||||||
|
@ -2265,6 +2265,7 @@ source "fs/Kconfig.binfmt"
|
|||||||
config IA32_EMULATION
|
config IA32_EMULATION
|
||||||
bool "IA32 Emulation"
|
bool "IA32 Emulation"
|
||||||
depends on X86_64
|
depends on X86_64
|
||||||
|
select BINFMT_ELF
|
||||||
select COMPAT_BINFMT_ELF
|
select COMPAT_BINFMT_ELF
|
||||||
select HAVE_UID16
|
select HAVE_UID16
|
||||||
---help---
|
---help---
|
||||||
|
@ -2681,56 +2681,68 @@ ENTRY(aesni_xts_crypt8)
|
|||||||
addq %rcx, KEYP
|
addq %rcx, KEYP
|
||||||
|
|
||||||
movdqa IV, STATE1
|
movdqa IV, STATE1
|
||||||
pxor 0x00(INP), STATE1
|
movdqu 0x00(INP), INC
|
||||||
|
pxor INC, STATE1
|
||||||
movdqu IV, 0x00(OUTP)
|
movdqu IV, 0x00(OUTP)
|
||||||
|
|
||||||
_aesni_gf128mul_x_ble()
|
_aesni_gf128mul_x_ble()
|
||||||
movdqa IV, STATE2
|
movdqa IV, STATE2
|
||||||
pxor 0x10(INP), STATE2
|
movdqu 0x10(INP), INC
|
||||||
|
pxor INC, STATE2
|
||||||
movdqu IV, 0x10(OUTP)
|
movdqu IV, 0x10(OUTP)
|
||||||
|
|
||||||
_aesni_gf128mul_x_ble()
|
_aesni_gf128mul_x_ble()
|
||||||
movdqa IV, STATE3
|
movdqa IV, STATE3
|
||||||
pxor 0x20(INP), STATE3
|
movdqu 0x20(INP), INC
|
||||||
|
pxor INC, STATE3
|
||||||
movdqu IV, 0x20(OUTP)
|
movdqu IV, 0x20(OUTP)
|
||||||
|
|
||||||
_aesni_gf128mul_x_ble()
|
_aesni_gf128mul_x_ble()
|
||||||
movdqa IV, STATE4
|
movdqa IV, STATE4
|
||||||
pxor 0x30(INP), STATE4
|
movdqu 0x30(INP), INC
|
||||||
|
pxor INC, STATE4
|
||||||
movdqu IV, 0x30(OUTP)
|
movdqu IV, 0x30(OUTP)
|
||||||
|
|
||||||
call *%r11
|
call *%r11
|
||||||
|
|
||||||
pxor 0x00(OUTP), STATE1
|
movdqu 0x00(OUTP), INC
|
||||||
|
pxor INC, STATE1
|
||||||
movdqu STATE1, 0x00(OUTP)
|
movdqu STATE1, 0x00(OUTP)
|
||||||
|
|
||||||
_aesni_gf128mul_x_ble()
|
_aesni_gf128mul_x_ble()
|
||||||
movdqa IV, STATE1
|
movdqa IV, STATE1
|
||||||
pxor 0x40(INP), STATE1
|
movdqu 0x40(INP), INC
|
||||||
|
pxor INC, STATE1
|
||||||
movdqu IV, 0x40(OUTP)
|
movdqu IV, 0x40(OUTP)
|
||||||
|
|
||||||
pxor 0x10(OUTP), STATE2
|
movdqu 0x10(OUTP), INC
|
||||||
|
pxor INC, STATE2
|
||||||
movdqu STATE2, 0x10(OUTP)
|
movdqu STATE2, 0x10(OUTP)
|
||||||
|
|
||||||
_aesni_gf128mul_x_ble()
|
_aesni_gf128mul_x_ble()
|
||||||
movdqa IV, STATE2
|
movdqa IV, STATE2
|
||||||
pxor 0x50(INP), STATE2
|
movdqu 0x50(INP), INC
|
||||||
|
pxor INC, STATE2
|
||||||
movdqu IV, 0x50(OUTP)
|
movdqu IV, 0x50(OUTP)
|
||||||
|
|
||||||
pxor 0x20(OUTP), STATE3
|
movdqu 0x20(OUTP), INC
|
||||||
|
pxor INC, STATE3
|
||||||
movdqu STATE3, 0x20(OUTP)
|
movdqu STATE3, 0x20(OUTP)
|
||||||
|
|
||||||
_aesni_gf128mul_x_ble()
|
_aesni_gf128mul_x_ble()
|
||||||
movdqa IV, STATE3
|
movdqa IV, STATE3
|
||||||
pxor 0x60(INP), STATE3
|
movdqu 0x60(INP), INC
|
||||||
|
pxor INC, STATE3
|
||||||
movdqu IV, 0x60(OUTP)
|
movdqu IV, 0x60(OUTP)
|
||||||
|
|
||||||
pxor 0x30(OUTP), STATE4
|
movdqu 0x30(OUTP), INC
|
||||||
|
pxor INC, STATE4
|
||||||
movdqu STATE4, 0x30(OUTP)
|
movdqu STATE4, 0x30(OUTP)
|
||||||
|
|
||||||
_aesni_gf128mul_x_ble()
|
_aesni_gf128mul_x_ble()
|
||||||
movdqa IV, STATE4
|
movdqa IV, STATE4
|
||||||
pxor 0x70(INP), STATE4
|
movdqu 0x70(INP), INC
|
||||||
|
pxor INC, STATE4
|
||||||
movdqu IV, 0x70(OUTP)
|
movdqu IV, 0x70(OUTP)
|
||||||
|
|
||||||
_aesni_gf128mul_x_ble()
|
_aesni_gf128mul_x_ble()
|
||||||
@ -2738,16 +2750,20 @@ ENTRY(aesni_xts_crypt8)
|
|||||||
|
|
||||||
call *%r11
|
call *%r11
|
||||||
|
|
||||||
pxor 0x40(OUTP), STATE1
|
movdqu 0x40(OUTP), INC
|
||||||
|
pxor INC, STATE1
|
||||||
movdqu STATE1, 0x40(OUTP)
|
movdqu STATE1, 0x40(OUTP)
|
||||||
|
|
||||||
pxor 0x50(OUTP), STATE2
|
movdqu 0x50(OUTP), INC
|
||||||
|
pxor INC, STATE2
|
||||||
movdqu STATE2, 0x50(OUTP)
|
movdqu STATE2, 0x50(OUTP)
|
||||||
|
|
||||||
pxor 0x60(OUTP), STATE3
|
movdqu 0x60(OUTP), INC
|
||||||
|
pxor INC, STATE3
|
||||||
movdqu STATE3, 0x60(OUTP)
|
movdqu STATE3, 0x60(OUTP)
|
||||||
|
|
||||||
pxor 0x70(OUTP), STATE4
|
movdqu 0x70(OUTP), INC
|
||||||
|
pxor INC, STATE4
|
||||||
movdqu STATE4, 0x70(OUTP)
|
movdqu STATE4, 0x70(OUTP)
|
||||||
|
|
||||||
ret
|
ret
|
||||||
|
@ -192,7 +192,7 @@ static int aout_core_dump(long signr, struct pt_regs *regs, struct file *file,
|
|||||||
/* struct user */
|
/* struct user */
|
||||||
DUMP_WRITE(&dump, sizeof(dump));
|
DUMP_WRITE(&dump, sizeof(dump));
|
||||||
/* Now dump all of the user data. Include malloced stuff as well */
|
/* Now dump all of the user data. Include malloced stuff as well */
|
||||||
DUMP_SEEK(PAGE_SIZE);
|
DUMP_SEEK(PAGE_SIZE - sizeof(dump));
|
||||||
/* now we start writing out the user space info */
|
/* now we start writing out the user space info */
|
||||||
set_fs(USER_DS);
|
set_fs(USER_DS);
|
||||||
/* Dump the data area */
|
/* Dump the data area */
|
||||||
|
@ -41,4 +41,9 @@ extern int vector_used_by_percpu_irq(unsigned int vector);
|
|||||||
|
|
||||||
extern void init_ISA_irqs(void);
|
extern void init_ISA_irqs(void);
|
||||||
|
|
||||||
|
#ifdef CONFIG_X86_LOCAL_APIC
|
||||||
|
void arch_trigger_all_cpu_backtrace(void);
|
||||||
|
#define arch_trigger_all_cpu_backtrace arch_trigger_all_cpu_backtrace
|
||||||
|
#endif
|
||||||
|
|
||||||
#endif /* _ASM_X86_IRQ_H */
|
#endif /* _ASM_X86_IRQ_H */
|
||||||
|
@ -60,11 +60,11 @@ static inline void __exit exit_amd_microcode(void) {}
|
|||||||
#ifdef CONFIG_MICROCODE_EARLY
|
#ifdef CONFIG_MICROCODE_EARLY
|
||||||
#define MAX_UCODE_COUNT 128
|
#define MAX_UCODE_COUNT 128
|
||||||
extern void __init load_ucode_bsp(void);
|
extern void __init load_ucode_bsp(void);
|
||||||
extern __init void load_ucode_ap(void);
|
extern void __cpuinit load_ucode_ap(void);
|
||||||
extern int __init save_microcode_in_initrd(void);
|
extern int __init save_microcode_in_initrd(void);
|
||||||
#else
|
#else
|
||||||
static inline void __init load_ucode_bsp(void) {}
|
static inline void __init load_ucode_bsp(void) {}
|
||||||
static inline __init void load_ucode_ap(void) {}
|
static inline void __cpuinit load_ucode_ap(void) {}
|
||||||
static inline int __init save_microcode_in_initrd(void)
|
static inline int __init save_microcode_in_initrd(void)
|
||||||
{
|
{
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -18,9 +18,7 @@ extern int proc_nmi_enabled(struct ctl_table *, int ,
|
|||||||
void __user *, size_t *, loff_t *);
|
void __user *, size_t *, loff_t *);
|
||||||
extern int unknown_nmi_panic;
|
extern int unknown_nmi_panic;
|
||||||
|
|
||||||
void arch_trigger_all_cpu_backtrace(void);
|
#endif /* CONFIG_X86_LOCAL_APIC */
|
||||||
#define arch_trigger_all_cpu_backtrace arch_trigger_all_cpu_backtrace
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#define NMI_FLAG_FIRST 1
|
#define NMI_FLAG_FIRST 1
|
||||||
|
|
||||||
|
@ -9,6 +9,7 @@
|
|||||||
*
|
*
|
||||||
*/
|
*/
|
||||||
#include <asm/apic.h>
|
#include <asm/apic.h>
|
||||||
|
#include <asm/nmi.h>
|
||||||
|
|
||||||
#include <linux/cpumask.h>
|
#include <linux/cpumask.h>
|
||||||
#include <linux/kdebug.h>
|
#include <linux/kdebug.h>
|
||||||
|
@ -714,15 +714,15 @@ int __init mtrr_cleanup(unsigned address_bits)
|
|||||||
if (mtrr_tom2)
|
if (mtrr_tom2)
|
||||||
x_remove_size = (mtrr_tom2 >> PAGE_SHIFT) - x_remove_base;
|
x_remove_size = (mtrr_tom2 >> PAGE_SHIFT) - x_remove_base;
|
||||||
|
|
||||||
nr_range = x86_get_mtrr_mem_range(range, 0, x_remove_base, x_remove_size);
|
|
||||||
/*
|
/*
|
||||||
* [0, 1M) should always be covered by var mtrr with WB
|
* [0, 1M) should always be covered by var mtrr with WB
|
||||||
* and fixed mtrrs should take effect before var mtrr for it:
|
* and fixed mtrrs should take effect before var mtrr for it:
|
||||||
*/
|
*/
|
||||||
nr_range = add_range_with_merge(range, RANGE_NUM, nr_range, 0,
|
nr_range = add_range_with_merge(range, RANGE_NUM, 0, 0,
|
||||||
1ULL<<(20 - PAGE_SHIFT));
|
1ULL<<(20 - PAGE_SHIFT));
|
||||||
/* Sort the ranges: */
|
/* add from var mtrr at last */
|
||||||
sort_range(range, nr_range);
|
nr_range = x86_get_mtrr_mem_range(range, nr_range,
|
||||||
|
x_remove_base, x_remove_size);
|
||||||
|
|
||||||
range_sums = sum_ranges(range, nr_range);
|
range_sums = sum_ranges(range, nr_range);
|
||||||
printk(KERN_INFO "total RAM covered: %ldM\n",
|
printk(KERN_INFO "total RAM covered: %ldM\n",
|
||||||
|
@ -165,13 +165,13 @@ static struct extra_reg intel_snb_extra_regs[] __read_mostly = {
|
|||||||
INTEL_EVENT_EXTRA_REG(0xb7, MSR_OFFCORE_RSP_0, 0x3f807f8fffull, RSP_0),
|
INTEL_EVENT_EXTRA_REG(0xb7, MSR_OFFCORE_RSP_0, 0x3f807f8fffull, RSP_0),
|
||||||
INTEL_EVENT_EXTRA_REG(0xbb, MSR_OFFCORE_RSP_1, 0x3f807f8fffull, RSP_1),
|
INTEL_EVENT_EXTRA_REG(0xbb, MSR_OFFCORE_RSP_1, 0x3f807f8fffull, RSP_1),
|
||||||
INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x01cd),
|
INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x01cd),
|
||||||
INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x01cd),
|
|
||||||
EVENT_EXTRA_END
|
EVENT_EXTRA_END
|
||||||
};
|
};
|
||||||
|
|
||||||
static struct extra_reg intel_snbep_extra_regs[] __read_mostly = {
|
static struct extra_reg intel_snbep_extra_regs[] __read_mostly = {
|
||||||
INTEL_EVENT_EXTRA_REG(0xb7, MSR_OFFCORE_RSP_0, 0x3fffff8fffull, RSP_0),
|
INTEL_EVENT_EXTRA_REG(0xb7, MSR_OFFCORE_RSP_0, 0x3fffff8fffull, RSP_0),
|
||||||
INTEL_EVENT_EXTRA_REG(0xbb, MSR_OFFCORE_RSP_1, 0x3fffff8fffull, RSP_1),
|
INTEL_EVENT_EXTRA_REG(0xbb, MSR_OFFCORE_RSP_1, 0x3fffff8fffull, RSP_1),
|
||||||
|
INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x01cd),
|
||||||
EVENT_EXTRA_END
|
EVENT_EXTRA_END
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -242,6 +242,7 @@ void __init kvmclock_init(void)
|
|||||||
if (!mem)
|
if (!mem)
|
||||||
return;
|
return;
|
||||||
hv_clock = __va(mem);
|
hv_clock = __va(mem);
|
||||||
|
memset(hv_clock, 0, size);
|
||||||
|
|
||||||
if (kvm_register_clock("boot clock")) {
|
if (kvm_register_clock("boot clock")) {
|
||||||
hv_clock = NULL;
|
hv_clock = NULL;
|
||||||
|
@ -277,18 +277,6 @@ void exit_idle(void)
|
|||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
void arch_cpu_idle_prepare(void)
|
|
||||||
{
|
|
||||||
/*
|
|
||||||
* If we're the non-boot CPU, nothing set the stack canary up
|
|
||||||
* for us. CPU0 already has it initialized but no harm in
|
|
||||||
* doing it again. This is a good place for updating it, as
|
|
||||||
* we wont ever return from this function (so the invalid
|
|
||||||
* canaries already on the stack wont ever trigger).
|
|
||||||
*/
|
|
||||||
boot_init_stack_canary();
|
|
||||||
}
|
|
||||||
|
|
||||||
void arch_cpu_idle_enter(void)
|
void arch_cpu_idle_enter(void)
|
||||||
{
|
{
|
||||||
local_touch_nmi();
|
local_touch_nmi();
|
||||||
|
@ -372,15 +372,15 @@ static bool __cpuinit match_mc(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o)
|
|||||||
|
|
||||||
void __cpuinit set_cpu_sibling_map(int cpu)
|
void __cpuinit set_cpu_sibling_map(int cpu)
|
||||||
{
|
{
|
||||||
bool has_mc = boot_cpu_data.x86_max_cores > 1;
|
|
||||||
bool has_smt = smp_num_siblings > 1;
|
bool has_smt = smp_num_siblings > 1;
|
||||||
|
bool has_mp = has_smt || boot_cpu_data.x86_max_cores > 1;
|
||||||
struct cpuinfo_x86 *c = &cpu_data(cpu);
|
struct cpuinfo_x86 *c = &cpu_data(cpu);
|
||||||
struct cpuinfo_x86 *o;
|
struct cpuinfo_x86 *o;
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
cpumask_set_cpu(cpu, cpu_sibling_setup_mask);
|
cpumask_set_cpu(cpu, cpu_sibling_setup_mask);
|
||||||
|
|
||||||
if (!has_smt && !has_mc) {
|
if (!has_mp) {
|
||||||
cpumask_set_cpu(cpu, cpu_sibling_mask(cpu));
|
cpumask_set_cpu(cpu, cpu_sibling_mask(cpu));
|
||||||
cpumask_set_cpu(cpu, cpu_llc_shared_mask(cpu));
|
cpumask_set_cpu(cpu, cpu_llc_shared_mask(cpu));
|
||||||
cpumask_set_cpu(cpu, cpu_core_mask(cpu));
|
cpumask_set_cpu(cpu, cpu_core_mask(cpu));
|
||||||
@ -394,7 +394,7 @@ void __cpuinit set_cpu_sibling_map(int cpu)
|
|||||||
if ((i == cpu) || (has_smt && match_smt(c, o)))
|
if ((i == cpu) || (has_smt && match_smt(c, o)))
|
||||||
link_mask(sibling, cpu, i);
|
link_mask(sibling, cpu, i);
|
||||||
|
|
||||||
if ((i == cpu) || (has_mc && match_llc(c, o)))
|
if ((i == cpu) || (has_mp && match_llc(c, o)))
|
||||||
link_mask(llc_shared, cpu, i);
|
link_mask(llc_shared, cpu, i);
|
||||||
|
|
||||||
}
|
}
|
||||||
@ -406,7 +406,7 @@ void __cpuinit set_cpu_sibling_map(int cpu)
|
|||||||
for_each_cpu(i, cpu_sibling_setup_mask) {
|
for_each_cpu(i, cpu_sibling_setup_mask) {
|
||||||
o = &cpu_data(i);
|
o = &cpu_data(i);
|
||||||
|
|
||||||
if ((i == cpu) || (has_mc && match_mc(c, o))) {
|
if ((i == cpu) || (has_mp && match_mc(c, o))) {
|
||||||
link_mask(core, cpu, i);
|
link_mask(core, cpu, i);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -582,8 +582,6 @@ int __kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr)
|
|||||||
if (index != XCR_XFEATURE_ENABLED_MASK)
|
if (index != XCR_XFEATURE_ENABLED_MASK)
|
||||||
return 1;
|
return 1;
|
||||||
xcr0 = xcr;
|
xcr0 = xcr;
|
||||||
if (kvm_x86_ops->get_cpl(vcpu) != 0)
|
|
||||||
return 1;
|
|
||||||
if (!(xcr0 & XSTATE_FP))
|
if (!(xcr0 & XSTATE_FP))
|
||||||
return 1;
|
return 1;
|
||||||
if ((xcr0 & XSTATE_YMM) && !(xcr0 & XSTATE_SSE))
|
if ((xcr0 & XSTATE_YMM) && !(xcr0 & XSTATE_SSE))
|
||||||
@ -597,7 +595,8 @@ int __kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr)
|
|||||||
|
|
||||||
int kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr)
|
int kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr)
|
||||||
{
|
{
|
||||||
if (__kvm_set_xcr(vcpu, index, xcr)) {
|
if (kvm_x86_ops->get_cpl(vcpu) != 0 ||
|
||||||
|
__kvm_set_xcr(vcpu, index, xcr)) {
|
||||||
kvm_inject_gp(vcpu, 0);
|
kvm_inject_gp(vcpu, 0);
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
|
@ -1069,7 +1069,10 @@ efi_status_t efi_query_variable_store(u32 attributes, unsigned long size)
|
|||||||
* that by attempting to use more space than is available.
|
* that by attempting to use more space than is available.
|
||||||
*/
|
*/
|
||||||
unsigned long dummy_size = remaining_size + 1024;
|
unsigned long dummy_size = remaining_size + 1024;
|
||||||
void *dummy = kmalloc(dummy_size, GFP_ATOMIC);
|
void *dummy = kzalloc(dummy_size, GFP_ATOMIC);
|
||||||
|
|
||||||
|
if (!dummy)
|
||||||
|
return EFI_OUT_OF_RESOURCES;
|
||||||
|
|
||||||
status = efi.set_variable(efi_dummy_name, &EFI_DUMMY_GUID,
|
status = efi.set_variable(efi_dummy_name, &EFI_DUMMY_GUID,
|
||||||
EFI_VARIABLE_NON_VOLATILE |
|
EFI_VARIABLE_NON_VOLATILE |
|
||||||
@ -1089,6 +1092,8 @@ efi_status_t efi_query_variable_store(u32 attributes, unsigned long size)
|
|||||||
0, dummy);
|
0, dummy);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
kfree(dummy);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* The runtime code may now have triggered a garbage collection
|
* The runtime code may now have triggered a garbage collection
|
||||||
* run, so check the variable info again
|
* run, so check the variable info again
|
||||||
|
@ -164,15 +164,24 @@ static int acpi_lpss_create_device(struct acpi_device *adev,
|
|||||||
if (dev_desc->clk_required) {
|
if (dev_desc->clk_required) {
|
||||||
ret = register_device_clock(adev, pdata);
|
ret = register_device_clock(adev, pdata);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
/*
|
/* Skip the device, but continue the namespace scan. */
|
||||||
* Skip the device, but don't terminate the namespace
|
ret = 0;
|
||||||
* scan.
|
goto err_out;
|
||||||
*/
|
|
||||||
kfree(pdata);
|
|
||||||
return 0;
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* This works around a known issue in ACPI tables where LPSS devices
|
||||||
|
* have _PS0 and _PS3 without _PSC (and no power resources), so
|
||||||
|
* acpi_bus_init_power() will assume that the BIOS has put them into D0.
|
||||||
|
*/
|
||||||
|
ret = acpi_device_fix_up_power(adev);
|
||||||
|
if (ret) {
|
||||||
|
/* Skip the device, but continue the namespace scan. */
|
||||||
|
ret = 0;
|
||||||
|
goto err_out;
|
||||||
|
}
|
||||||
|
|
||||||
adev->driver_data = pdata;
|
adev->driver_data = pdata;
|
||||||
ret = acpi_create_platform_device(adev, id);
|
ret = acpi_create_platform_device(adev, id);
|
||||||
if (ret > 0)
|
if (ret > 0)
|
||||||
|
@ -290,6 +290,26 @@ int acpi_bus_init_power(struct acpi_device *device)
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* acpi_device_fix_up_power - Force device with missing _PSC into D0.
|
||||||
|
* @device: Device object whose power state is to be fixed up.
|
||||||
|
*
|
||||||
|
* Devices without power resources and _PSC, but having _PS0 and _PS3 defined,
|
||||||
|
* are assumed to be put into D0 by the BIOS. However, in some cases that may
|
||||||
|
* not be the case and this function should be used then.
|
||||||
|
*/
|
||||||
|
int acpi_device_fix_up_power(struct acpi_device *device)
|
||||||
|
{
|
||||||
|
int ret = 0;
|
||||||
|
|
||||||
|
if (!device->power.flags.power_resources
|
||||||
|
&& !device->power.flags.explicit_get
|
||||||
|
&& device->power.state == ACPI_STATE_D0)
|
||||||
|
ret = acpi_dev_pm_explicit_set(device, ACPI_STATE_D0);
|
||||||
|
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
int acpi_bus_update_power(acpi_handle handle, int *state_p)
|
int acpi_bus_update_power(acpi_handle handle, int *state_p)
|
||||||
{
|
{
|
||||||
struct acpi_device *device;
|
struct acpi_device *device;
|
||||||
|
@ -868,8 +868,10 @@ static ssize_t write_undock(struct device *dev, struct device_attribute *attr,
|
|||||||
if (!count)
|
if (!count)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
|
acpi_scan_lock_acquire();
|
||||||
begin_undock(dock_station);
|
begin_undock(dock_station);
|
||||||
ret = handle_eject_request(dock_station, ACPI_NOTIFY_EJECT_REQUEST);
|
ret = handle_eject_request(dock_station, ACPI_NOTIFY_EJECT_REQUEST);
|
||||||
|
acpi_scan_lock_release();
|
||||||
return ret ? ret: count;
|
return ret ? ret: count;
|
||||||
}
|
}
|
||||||
static DEVICE_ATTR(undock, S_IWUSR, NULL, write_undock);
|
static DEVICE_ATTR(undock, S_IWUSR, NULL, write_undock);
|
||||||
|
@ -885,6 +885,7 @@ int acpi_add_power_resource(acpi_handle handle)
|
|||||||
ACPI_STA_DEFAULT);
|
ACPI_STA_DEFAULT);
|
||||||
mutex_init(&resource->resource_lock);
|
mutex_init(&resource->resource_lock);
|
||||||
INIT_LIST_HEAD(&resource->dependent);
|
INIT_LIST_HEAD(&resource->dependent);
|
||||||
|
INIT_LIST_HEAD(&resource->list_node);
|
||||||
resource->name = device->pnp.bus_id;
|
resource->name = device->pnp.bus_id;
|
||||||
strcpy(acpi_device_name(device), ACPI_POWER_DEVICE_NAME);
|
strcpy(acpi_device_name(device), ACPI_POWER_DEVICE_NAME);
|
||||||
strcpy(acpi_device_class(device), ACPI_POWER_CLASS);
|
strcpy(acpi_device_class(device), ACPI_POWER_CLASS);
|
||||||
|
@ -304,7 +304,8 @@ static void acpi_dev_irqresource_disabled(struct resource *res, u32 gsi)
|
|||||||
}
|
}
|
||||||
|
|
||||||
static void acpi_dev_get_irqresource(struct resource *res, u32 gsi,
|
static void acpi_dev_get_irqresource(struct resource *res, u32 gsi,
|
||||||
u8 triggering, u8 polarity, u8 shareable)
|
u8 triggering, u8 polarity, u8 shareable,
|
||||||
|
bool legacy)
|
||||||
{
|
{
|
||||||
int irq, p, t;
|
int irq, p, t;
|
||||||
|
|
||||||
@ -317,14 +318,19 @@ static void acpi_dev_get_irqresource(struct resource *res, u32 gsi,
|
|||||||
* In IO-APIC mode, use overrided attribute. Two reasons:
|
* In IO-APIC mode, use overrided attribute. Two reasons:
|
||||||
* 1. BIOS bug in DSDT
|
* 1. BIOS bug in DSDT
|
||||||
* 2. BIOS uses IO-APIC mode Interrupt Source Override
|
* 2. BIOS uses IO-APIC mode Interrupt Source Override
|
||||||
|
*
|
||||||
|
* We do this only if we are dealing with IRQ() or IRQNoFlags()
|
||||||
|
* resource (the legacy ISA resources). With modern ACPI 5 devices
|
||||||
|
* using extended IRQ descriptors we take the IRQ configuration
|
||||||
|
* from _CRS directly.
|
||||||
*/
|
*/
|
||||||
if (!acpi_get_override_irq(gsi, &t, &p)) {
|
if (legacy && !acpi_get_override_irq(gsi, &t, &p)) {
|
||||||
u8 trig = t ? ACPI_LEVEL_SENSITIVE : ACPI_EDGE_SENSITIVE;
|
u8 trig = t ? ACPI_LEVEL_SENSITIVE : ACPI_EDGE_SENSITIVE;
|
||||||
u8 pol = p ? ACPI_ACTIVE_LOW : ACPI_ACTIVE_HIGH;
|
u8 pol = p ? ACPI_ACTIVE_LOW : ACPI_ACTIVE_HIGH;
|
||||||
|
|
||||||
if (triggering != trig || polarity != pol) {
|
if (triggering != trig || polarity != pol) {
|
||||||
pr_warning("ACPI: IRQ %d override to %s, %s\n", gsi,
|
pr_warning("ACPI: IRQ %d override to %s, %s\n", gsi,
|
||||||
t ? "edge" : "level", p ? "low" : "high");
|
t ? "level" : "edge", p ? "low" : "high");
|
||||||
triggering = trig;
|
triggering = trig;
|
||||||
polarity = pol;
|
polarity = pol;
|
||||||
}
|
}
|
||||||
@ -373,7 +379,7 @@ bool acpi_dev_resource_interrupt(struct acpi_resource *ares, int index,
|
|||||||
}
|
}
|
||||||
acpi_dev_get_irqresource(res, irq->interrupts[index],
|
acpi_dev_get_irqresource(res, irq->interrupts[index],
|
||||||
irq->triggering, irq->polarity,
|
irq->triggering, irq->polarity,
|
||||||
irq->sharable);
|
irq->sharable, true);
|
||||||
break;
|
break;
|
||||||
case ACPI_RESOURCE_TYPE_EXTENDED_IRQ:
|
case ACPI_RESOURCE_TYPE_EXTENDED_IRQ:
|
||||||
ext_irq = &ares->data.extended_irq;
|
ext_irq = &ares->data.extended_irq;
|
||||||
@ -383,7 +389,7 @@ bool acpi_dev_resource_interrupt(struct acpi_resource *ares, int index,
|
|||||||
}
|
}
|
||||||
acpi_dev_get_irqresource(res, ext_irq->interrupts[index],
|
acpi_dev_get_irqresource(res, ext_irq->interrupts[index],
|
||||||
ext_irq->triggering, ext_irq->polarity,
|
ext_irq->triggering, ext_irq->polarity,
|
||||||
ext_irq->sharable);
|
ext_irq->sharable, false);
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
return false;
|
return false;
|
||||||
|
@ -450,8 +450,18 @@ static void fw_load_abort(struct firmware_priv *fw_priv)
|
|||||||
{
|
{
|
||||||
struct firmware_buf *buf = fw_priv->buf;
|
struct firmware_buf *buf = fw_priv->buf;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* There is a small window in which user can write to 'loading'
|
||||||
|
* between loading done and disappearance of 'loading'
|
||||||
|
*/
|
||||||
|
if (test_bit(FW_STATUS_DONE, &buf->status))
|
||||||
|
return;
|
||||||
|
|
||||||
set_bit(FW_STATUS_ABORT, &buf->status);
|
set_bit(FW_STATUS_ABORT, &buf->status);
|
||||||
complete_all(&buf->completion);
|
complete_all(&buf->completion);
|
||||||
|
|
||||||
|
/* avoid user action after loading abort */
|
||||||
|
fw_priv->buf = NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
#define is_fw_load_aborted(buf) \
|
#define is_fw_load_aborted(buf) \
|
||||||
@ -528,7 +538,12 @@ static ssize_t firmware_loading_show(struct device *dev,
|
|||||||
struct device_attribute *attr, char *buf)
|
struct device_attribute *attr, char *buf)
|
||||||
{
|
{
|
||||||
struct firmware_priv *fw_priv = to_firmware_priv(dev);
|
struct firmware_priv *fw_priv = to_firmware_priv(dev);
|
||||||
int loading = test_bit(FW_STATUS_LOADING, &fw_priv->buf->status);
|
int loading = 0;
|
||||||
|
|
||||||
|
mutex_lock(&fw_lock);
|
||||||
|
if (fw_priv->buf)
|
||||||
|
loading = test_bit(FW_STATUS_LOADING, &fw_priv->buf->status);
|
||||||
|
mutex_unlock(&fw_lock);
|
||||||
|
|
||||||
return sprintf(buf, "%d\n", loading);
|
return sprintf(buf, "%d\n", loading);
|
||||||
}
|
}
|
||||||
@ -570,12 +585,12 @@ static ssize_t firmware_loading_store(struct device *dev,
|
|||||||
const char *buf, size_t count)
|
const char *buf, size_t count)
|
||||||
{
|
{
|
||||||
struct firmware_priv *fw_priv = to_firmware_priv(dev);
|
struct firmware_priv *fw_priv = to_firmware_priv(dev);
|
||||||
struct firmware_buf *fw_buf = fw_priv->buf;
|
struct firmware_buf *fw_buf;
|
||||||
int loading = simple_strtol(buf, NULL, 10);
|
int loading = simple_strtol(buf, NULL, 10);
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
mutex_lock(&fw_lock);
|
mutex_lock(&fw_lock);
|
||||||
|
fw_buf = fw_priv->buf;
|
||||||
if (!fw_buf)
|
if (!fw_buf)
|
||||||
goto out;
|
goto out;
|
||||||
|
|
||||||
@ -777,10 +792,6 @@ static void firmware_class_timeout_work(struct work_struct *work)
|
|||||||
struct firmware_priv, timeout_work.work);
|
struct firmware_priv, timeout_work.work);
|
||||||
|
|
||||||
mutex_lock(&fw_lock);
|
mutex_lock(&fw_lock);
|
||||||
if (test_bit(FW_STATUS_DONE, &(fw_priv->buf->status))) {
|
|
||||||
mutex_unlock(&fw_lock);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
fw_load_abort(fw_priv);
|
fw_load_abort(fw_priv);
|
||||||
mutex_unlock(&fw_lock);
|
mutex_unlock(&fw_lock);
|
||||||
}
|
}
|
||||||
@ -861,8 +872,6 @@ static int _request_firmware_load(struct firmware_priv *fw_priv, bool uevent,
|
|||||||
|
|
||||||
cancel_delayed_work_sync(&fw_priv->timeout_work);
|
cancel_delayed_work_sync(&fw_priv->timeout_work);
|
||||||
|
|
||||||
fw_priv->buf = NULL;
|
|
||||||
|
|
||||||
device_remove_file(f_dev, &dev_attr_loading);
|
device_remove_file(f_dev, &dev_attr_loading);
|
||||||
err_del_bin_attr:
|
err_del_bin_attr:
|
||||||
device_remove_bin_file(f_dev, &firmware_attr_data);
|
device_remove_bin_file(f_dev, &firmware_attr_data);
|
||||||
|
@ -1036,12 +1036,16 @@ static const char *rbd_segment_name(struct rbd_device *rbd_dev, u64 offset)
|
|||||||
char *name;
|
char *name;
|
||||||
u64 segment;
|
u64 segment;
|
||||||
int ret;
|
int ret;
|
||||||
|
char *name_format;
|
||||||
|
|
||||||
name = kmem_cache_alloc(rbd_segment_name_cache, GFP_NOIO);
|
name = kmem_cache_alloc(rbd_segment_name_cache, GFP_NOIO);
|
||||||
if (!name)
|
if (!name)
|
||||||
return NULL;
|
return NULL;
|
||||||
segment = offset >> rbd_dev->header.obj_order;
|
segment = offset >> rbd_dev->header.obj_order;
|
||||||
ret = snprintf(name, MAX_OBJ_NAME_SIZE + 1, "%s.%012llx",
|
name_format = "%s.%012llx";
|
||||||
|
if (rbd_dev->image_format == 2)
|
||||||
|
name_format = "%s.%016llx";
|
||||||
|
ret = snprintf(name, MAX_OBJ_NAME_SIZE + 1, name_format,
|
||||||
rbd_dev->header.object_prefix, segment);
|
rbd_dev->header.object_prefix, segment);
|
||||||
if (ret < 0 || ret > MAX_OBJ_NAME_SIZE) {
|
if (ret < 0 || ret > MAX_OBJ_NAME_SIZE) {
|
||||||
pr_err("error formatting segment name for #%llu (%d)\n",
|
pr_err("error formatting segment name for #%llu (%d)\n",
|
||||||
|
@ -1955,6 +1955,7 @@ int clk_notifier_unregister(struct clk *clk, struct notifier_block *nb)
|
|||||||
/* XXX the notifier code should handle this better */
|
/* XXX the notifier code should handle this better */
|
||||||
if (!cn->notifier_head.head) {
|
if (!cn->notifier_head.head) {
|
||||||
srcu_cleanup_notifier_head(&cn->notifier_head);
|
srcu_cleanup_notifier_head(&cn->notifier_head);
|
||||||
|
list_del(&cn->node);
|
||||||
kfree(cn);
|
kfree(cn);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -155,7 +155,7 @@ static __initdata unsigned long exynos5250_clk_regs[] = {
|
|||||||
|
|
||||||
/* list of all parent clock list */
|
/* list of all parent clock list */
|
||||||
PNAME(mout_apll_p) = { "fin_pll", "fout_apll", };
|
PNAME(mout_apll_p) = { "fin_pll", "fout_apll", };
|
||||||
PNAME(mout_cpu_p) = { "mout_apll", "mout_mpll", };
|
PNAME(mout_cpu_p) = { "mout_apll", "sclk_mpll", };
|
||||||
PNAME(mout_mpll_fout_p) = { "fout_mplldiv2", "fout_mpll" };
|
PNAME(mout_mpll_fout_p) = { "fout_mplldiv2", "fout_mpll" };
|
||||||
PNAME(mout_mpll_p) = { "fin_pll", "mout_mpll_fout" };
|
PNAME(mout_mpll_p) = { "fin_pll", "mout_mpll_fout" };
|
||||||
PNAME(mout_bpll_fout_p) = { "fout_bplldiv2", "fout_bpll" };
|
PNAME(mout_bpll_fout_p) = { "fout_bplldiv2", "fout_bpll" };
|
||||||
@ -208,10 +208,10 @@ struct samsung_fixed_factor_clock exynos5250_fixed_factor_clks[] __initdata = {
|
|||||||
};
|
};
|
||||||
|
|
||||||
struct samsung_mux_clock exynos5250_mux_clks[] __initdata = {
|
struct samsung_mux_clock exynos5250_mux_clks[] __initdata = {
|
||||||
MUX(none, "mout_apll", mout_apll_p, SRC_CPU, 0, 1),
|
MUX_A(none, "mout_apll", mout_apll_p, SRC_CPU, 0, 1, "mout_apll"),
|
||||||
MUX(none, "mout_cpu", mout_cpu_p, SRC_CPU, 16, 1),
|
MUX_A(none, "mout_cpu", mout_cpu_p, SRC_CPU, 16, 1, "mout_cpu"),
|
||||||
MUX(none, "mout_mpll_fout", mout_mpll_fout_p, PLL_DIV2_SEL, 4, 1),
|
MUX(none, "mout_mpll_fout", mout_mpll_fout_p, PLL_DIV2_SEL, 4, 1),
|
||||||
MUX(none, "sclk_mpll", mout_mpll_p, SRC_CORE1, 8, 1),
|
MUX_A(none, "sclk_mpll", mout_mpll_p, SRC_CORE1, 8, 1, "mout_mpll"),
|
||||||
MUX(none, "mout_bpll_fout", mout_bpll_fout_p, PLL_DIV2_SEL, 0, 1),
|
MUX(none, "mout_bpll_fout", mout_bpll_fout_p, PLL_DIV2_SEL, 0, 1),
|
||||||
MUX(none, "sclk_bpll", mout_bpll_p, SRC_CDREX, 0, 1),
|
MUX(none, "sclk_bpll", mout_bpll_p, SRC_CDREX, 0, 1),
|
||||||
MUX(none, "mout_vpllsrc", mout_vpllsrc_p, SRC_TOP2, 0, 1),
|
MUX(none, "mout_vpllsrc", mout_vpllsrc_p, SRC_TOP2, 0, 1),
|
||||||
@ -378,7 +378,7 @@ struct samsung_gate_clock exynos5250_gate_clks[] __initdata = {
|
|||||||
GATE(hsi2c3, "hsi2c3", "aclk66", GATE_IP_PERIC, 31, 0, 0),
|
GATE(hsi2c3, "hsi2c3", "aclk66", GATE_IP_PERIC, 31, 0, 0),
|
||||||
GATE(chipid, "chipid", "aclk66", GATE_IP_PERIS, 0, 0, 0),
|
GATE(chipid, "chipid", "aclk66", GATE_IP_PERIS, 0, 0, 0),
|
||||||
GATE(sysreg, "sysreg", "aclk66", GATE_IP_PERIS, 1, 0, 0),
|
GATE(sysreg, "sysreg", "aclk66", GATE_IP_PERIS, 1, 0, 0),
|
||||||
GATE(pmu, "pmu", "aclk66", GATE_IP_PERIS, 2, 0, 0),
|
GATE(pmu, "pmu", "aclk66", GATE_IP_PERIS, 2, CLK_IGNORE_UNUSED, 0),
|
||||||
GATE(tzpc0, "tzpc0", "aclk66", GATE_IP_PERIS, 6, 0, 0),
|
GATE(tzpc0, "tzpc0", "aclk66", GATE_IP_PERIS, 6, 0, 0),
|
||||||
GATE(tzpc1, "tzpc1", "aclk66", GATE_IP_PERIS, 7, 0, 0),
|
GATE(tzpc1, "tzpc1", "aclk66", GATE_IP_PERIS, 7, 0, 0),
|
||||||
GATE(tzpc2, "tzpc2", "aclk66", GATE_IP_PERIS, 8, 0, 0),
|
GATE(tzpc2, "tzpc2", "aclk66", GATE_IP_PERIS, 8, 0, 0),
|
||||||
|
@ -111,7 +111,8 @@ static unsigned long samsung_pll36xx_recalc_rate(struct clk_hw *hw,
|
|||||||
unsigned long parent_rate)
|
unsigned long parent_rate)
|
||||||
{
|
{
|
||||||
struct samsung_clk_pll36xx *pll = to_clk_pll36xx(hw);
|
struct samsung_clk_pll36xx *pll = to_clk_pll36xx(hw);
|
||||||
u32 mdiv, pdiv, sdiv, kdiv, pll_con0, pll_con1;
|
u32 mdiv, pdiv, sdiv, pll_con0, pll_con1;
|
||||||
|
s16 kdiv;
|
||||||
u64 fvco = parent_rate;
|
u64 fvco = parent_rate;
|
||||||
|
|
||||||
pll_con0 = __raw_readl(pll->con_reg);
|
pll_con0 = __raw_readl(pll->con_reg);
|
||||||
@ -119,7 +120,7 @@ static unsigned long samsung_pll36xx_recalc_rate(struct clk_hw *hw,
|
|||||||
mdiv = (pll_con0 >> PLL36XX_MDIV_SHIFT) & PLL36XX_MDIV_MASK;
|
mdiv = (pll_con0 >> PLL36XX_MDIV_SHIFT) & PLL36XX_MDIV_MASK;
|
||||||
pdiv = (pll_con0 >> PLL36XX_PDIV_SHIFT) & PLL36XX_PDIV_MASK;
|
pdiv = (pll_con0 >> PLL36XX_PDIV_SHIFT) & PLL36XX_PDIV_MASK;
|
||||||
sdiv = (pll_con0 >> PLL36XX_SDIV_SHIFT) & PLL36XX_SDIV_MASK;
|
sdiv = (pll_con0 >> PLL36XX_SDIV_SHIFT) & PLL36XX_SDIV_MASK;
|
||||||
kdiv = pll_con1 & PLL36XX_KDIV_MASK;
|
kdiv = (s16)(pll_con1 & PLL36XX_KDIV_MASK);
|
||||||
|
|
||||||
fvco *= (mdiv << 16) + kdiv;
|
fvco *= (mdiv << 16) + kdiv;
|
||||||
do_div(fvco, (pdiv << sdiv));
|
do_div(fvco, (pdiv << sdiv));
|
||||||
|
@ -369,7 +369,7 @@ static void __init spear320_clk_init(void __iomem *soc_config_base)
|
|||||||
clk_register_clkdev(clk, NULL, "60100000.serial");
|
clk_register_clkdev(clk, NULL, "60100000.serial");
|
||||||
}
|
}
|
||||||
#else
|
#else
|
||||||
static inline void spear320_clk_init(void) { }
|
static inline void spear320_clk_init(void __iomem *soc_config_base) { }
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
void __init spear3xx_clk_init(void __iomem *misc_base, void __iomem *soc_config_base)
|
void __init spear3xx_clk_init(void __iomem *misc_base, void __iomem *soc_config_base)
|
||||||
|
@ -1598,6 +1598,12 @@ static void __init tegra30_periph_clk_init(void)
|
|||||||
clk_register_clkdev(clk, "afi", "tegra-pcie");
|
clk_register_clkdev(clk, "afi", "tegra-pcie");
|
||||||
clks[afi] = clk;
|
clks[afi] = clk;
|
||||||
|
|
||||||
|
/* pciex */
|
||||||
|
clk = tegra_clk_register_periph_gate("pciex", "pll_e", 0, clk_base, 0,
|
||||||
|
74, &periph_u_regs, periph_clk_enb_refcnt);
|
||||||
|
clk_register_clkdev(clk, "pciex", "tegra-pcie");
|
||||||
|
clks[pciex] = clk;
|
||||||
|
|
||||||
/* kfuse */
|
/* kfuse */
|
||||||
clk = tegra_clk_register_periph_gate("kfuse", "clk_m",
|
clk = tegra_clk_register_periph_gate("kfuse", "clk_m",
|
||||||
TEGRA_PERIPH_ON_APB,
|
TEGRA_PERIPH_ON_APB,
|
||||||
@ -1716,11 +1722,6 @@ static void __init tegra30_fixed_clk_init(void)
|
|||||||
1, 0, &cml_lock);
|
1, 0, &cml_lock);
|
||||||
clk_register_clkdev(clk, "cml1", NULL);
|
clk_register_clkdev(clk, "cml1", NULL);
|
||||||
clks[cml1] = clk;
|
clks[cml1] = clk;
|
||||||
|
|
||||||
/* pciex */
|
|
||||||
clk = clk_register_fixed_rate(NULL, "pciex", "pll_e", 0, 100000000);
|
|
||||||
clk_register_clkdev(clk, "pciex", NULL);
|
|
||||||
clks[pciex] = clk;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void __init tegra30_osc_clk_init(void)
|
static void __init tegra30_osc_clk_init(void)
|
||||||
|
@ -190,8 +190,7 @@ struct dma_buf *drm_gem_prime_export(struct drm_device *dev,
|
|||||||
if (ret)
|
if (ret)
|
||||||
return ERR_PTR(ret);
|
return ERR_PTR(ret);
|
||||||
}
|
}
|
||||||
return dma_buf_export(obj, &drm_gem_prime_dmabuf_ops, obj->size,
|
return dma_buf_export(obj, &drm_gem_prime_dmabuf_ops, obj->size, flags);
|
||||||
0600);
|
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(drm_gem_prime_export);
|
EXPORT_SYMBOL(drm_gem_prime_export);
|
||||||
|
|
||||||
|
@ -2687,6 +2687,9 @@ void r600_uvd_rbc_stop(struct radeon_device *rdev)
|
|||||||
int r600_uvd_init(struct radeon_device *rdev)
|
int r600_uvd_init(struct radeon_device *rdev)
|
||||||
{
|
{
|
||||||
int i, j, r;
|
int i, j, r;
|
||||||
|
/* disable byte swapping */
|
||||||
|
u32 lmi_swap_cntl = 0;
|
||||||
|
u32 mp_swap_cntl = 0;
|
||||||
|
|
||||||
/* raise clocks while booting up the VCPU */
|
/* raise clocks while booting up the VCPU */
|
||||||
radeon_set_uvd_clocks(rdev, 53300, 40000);
|
radeon_set_uvd_clocks(rdev, 53300, 40000);
|
||||||
@ -2711,9 +2714,13 @@ int r600_uvd_init(struct radeon_device *rdev)
|
|||||||
WREG32(UVD_LMI_CTRL, 0x40 | (1 << 8) | (1 << 13) |
|
WREG32(UVD_LMI_CTRL, 0x40 | (1 << 8) | (1 << 13) |
|
||||||
(1 << 21) | (1 << 9) | (1 << 20));
|
(1 << 21) | (1 << 9) | (1 << 20));
|
||||||
|
|
||||||
/* disable byte swapping */
|
#ifdef __BIG_ENDIAN
|
||||||
WREG32(UVD_LMI_SWAP_CNTL, 0);
|
/* swap (8 in 32) RB and IB */
|
||||||
WREG32(UVD_MP_SWAP_CNTL, 0);
|
lmi_swap_cntl = 0xa;
|
||||||
|
mp_swap_cntl = 0;
|
||||||
|
#endif
|
||||||
|
WREG32(UVD_LMI_SWAP_CNTL, lmi_swap_cntl);
|
||||||
|
WREG32(UVD_MP_SWAP_CNTL, mp_swap_cntl);
|
||||||
|
|
||||||
WREG32(UVD_MPC_SET_MUXA0, 0x40c2040);
|
WREG32(UVD_MPC_SET_MUXA0, 0x40c2040);
|
||||||
WREG32(UVD_MPC_SET_MUXA1, 0x0);
|
WREG32(UVD_MPC_SET_MUXA1, 0x0);
|
||||||
|
@ -244,16 +244,6 @@ void radeon_scratch_free(struct radeon_device *rdev, uint32_t reg)
|
|||||||
*/
|
*/
|
||||||
void radeon_wb_disable(struct radeon_device *rdev)
|
void radeon_wb_disable(struct radeon_device *rdev)
|
||||||
{
|
{
|
||||||
int r;
|
|
||||||
|
|
||||||
if (rdev->wb.wb_obj) {
|
|
||||||
r = radeon_bo_reserve(rdev->wb.wb_obj, false);
|
|
||||||
if (unlikely(r != 0))
|
|
||||||
return;
|
|
||||||
radeon_bo_kunmap(rdev->wb.wb_obj);
|
|
||||||
radeon_bo_unpin(rdev->wb.wb_obj);
|
|
||||||
radeon_bo_unreserve(rdev->wb.wb_obj);
|
|
||||||
}
|
|
||||||
rdev->wb.enabled = false;
|
rdev->wb.enabled = false;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -269,6 +259,11 @@ void radeon_wb_fini(struct radeon_device *rdev)
|
|||||||
{
|
{
|
||||||
radeon_wb_disable(rdev);
|
radeon_wb_disable(rdev);
|
||||||
if (rdev->wb.wb_obj) {
|
if (rdev->wb.wb_obj) {
|
||||||
|
if (!radeon_bo_reserve(rdev->wb.wb_obj, false)) {
|
||||||
|
radeon_bo_kunmap(rdev->wb.wb_obj);
|
||||||
|
radeon_bo_unpin(rdev->wb.wb_obj);
|
||||||
|
radeon_bo_unreserve(rdev->wb.wb_obj);
|
||||||
|
}
|
||||||
radeon_bo_unref(&rdev->wb.wb_obj);
|
radeon_bo_unref(&rdev->wb.wb_obj);
|
||||||
rdev->wb.wb = NULL;
|
rdev->wb.wb = NULL;
|
||||||
rdev->wb.wb_obj = NULL;
|
rdev->wb.wb_obj = NULL;
|
||||||
@ -295,26 +290,26 @@ int radeon_wb_init(struct radeon_device *rdev)
|
|||||||
dev_warn(rdev->dev, "(%d) create WB bo failed\n", r);
|
dev_warn(rdev->dev, "(%d) create WB bo failed\n", r);
|
||||||
return r;
|
return r;
|
||||||
}
|
}
|
||||||
}
|
r = radeon_bo_reserve(rdev->wb.wb_obj, false);
|
||||||
r = radeon_bo_reserve(rdev->wb.wb_obj, false);
|
if (unlikely(r != 0)) {
|
||||||
if (unlikely(r != 0)) {
|
radeon_wb_fini(rdev);
|
||||||
radeon_wb_fini(rdev);
|
return r;
|
||||||
return r;
|
}
|
||||||
}
|
r = radeon_bo_pin(rdev->wb.wb_obj, RADEON_GEM_DOMAIN_GTT,
|
||||||
r = radeon_bo_pin(rdev->wb.wb_obj, RADEON_GEM_DOMAIN_GTT,
|
&rdev->wb.gpu_addr);
|
||||||
&rdev->wb.gpu_addr);
|
if (r) {
|
||||||
if (r) {
|
radeon_bo_unreserve(rdev->wb.wb_obj);
|
||||||
|
dev_warn(rdev->dev, "(%d) pin WB bo failed\n", r);
|
||||||
|
radeon_wb_fini(rdev);
|
||||||
|
return r;
|
||||||
|
}
|
||||||
|
r = radeon_bo_kmap(rdev->wb.wb_obj, (void **)&rdev->wb.wb);
|
||||||
radeon_bo_unreserve(rdev->wb.wb_obj);
|
radeon_bo_unreserve(rdev->wb.wb_obj);
|
||||||
dev_warn(rdev->dev, "(%d) pin WB bo failed\n", r);
|
if (r) {
|
||||||
radeon_wb_fini(rdev);
|
dev_warn(rdev->dev, "(%d) map WB bo failed\n", r);
|
||||||
return r;
|
radeon_wb_fini(rdev);
|
||||||
}
|
return r;
|
||||||
r = radeon_bo_kmap(rdev->wb.wb_obj, (void **)&rdev->wb.wb);
|
}
|
||||||
radeon_bo_unreserve(rdev->wb.wb_obj);
|
|
||||||
if (r) {
|
|
||||||
dev_warn(rdev->dev, "(%d) map WB bo failed\n", r);
|
|
||||||
radeon_wb_fini(rdev);
|
|
||||||
return r;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/* clear wb memory */
|
/* clear wb memory */
|
||||||
|
@ -63,7 +63,9 @@ static void radeon_fence_write(struct radeon_device *rdev, u32 seq, int ring)
|
|||||||
{
|
{
|
||||||
struct radeon_fence_driver *drv = &rdev->fence_drv[ring];
|
struct radeon_fence_driver *drv = &rdev->fence_drv[ring];
|
||||||
if (likely(rdev->wb.enabled || !drv->scratch_reg)) {
|
if (likely(rdev->wb.enabled || !drv->scratch_reg)) {
|
||||||
*drv->cpu_addr = cpu_to_le32(seq);
|
if (drv->cpu_addr) {
|
||||||
|
*drv->cpu_addr = cpu_to_le32(seq);
|
||||||
|
}
|
||||||
} else {
|
} else {
|
||||||
WREG32(drv->scratch_reg, seq);
|
WREG32(drv->scratch_reg, seq);
|
||||||
}
|
}
|
||||||
@ -84,7 +86,11 @@ static u32 radeon_fence_read(struct radeon_device *rdev, int ring)
|
|||||||
u32 seq = 0;
|
u32 seq = 0;
|
||||||
|
|
||||||
if (likely(rdev->wb.enabled || !drv->scratch_reg)) {
|
if (likely(rdev->wb.enabled || !drv->scratch_reg)) {
|
||||||
seq = le32_to_cpu(*drv->cpu_addr);
|
if (drv->cpu_addr) {
|
||||||
|
seq = le32_to_cpu(*drv->cpu_addr);
|
||||||
|
} else {
|
||||||
|
seq = lower_32_bits(atomic64_read(&drv->last_seq));
|
||||||
|
}
|
||||||
} else {
|
} else {
|
||||||
seq = RREG32(drv->scratch_reg);
|
seq = RREG32(drv->scratch_reg);
|
||||||
}
|
}
|
||||||
|
@ -1197,11 +1197,13 @@ int radeon_vm_bo_update_pte(struct radeon_device *rdev,
|
|||||||
int radeon_vm_bo_rmv(struct radeon_device *rdev,
|
int radeon_vm_bo_rmv(struct radeon_device *rdev,
|
||||||
struct radeon_bo_va *bo_va)
|
struct radeon_bo_va *bo_va)
|
||||||
{
|
{
|
||||||
int r;
|
int r = 0;
|
||||||
|
|
||||||
mutex_lock(&rdev->vm_manager.lock);
|
mutex_lock(&rdev->vm_manager.lock);
|
||||||
mutex_lock(&bo_va->vm->mutex);
|
mutex_lock(&bo_va->vm->mutex);
|
||||||
r = radeon_vm_bo_update_pte(rdev, bo_va->vm, bo_va->bo, NULL);
|
if (bo_va->soffset) {
|
||||||
|
r = radeon_vm_bo_update_pte(rdev, bo_va->vm, bo_va->bo, NULL);
|
||||||
|
}
|
||||||
mutex_unlock(&rdev->vm_manager.lock);
|
mutex_unlock(&rdev->vm_manager.lock);
|
||||||
list_del(&bo_va->vm_list);
|
list_del(&bo_va->vm_list);
|
||||||
mutex_unlock(&bo_va->vm->mutex);
|
mutex_unlock(&bo_va->vm->mutex);
|
||||||
|
@ -402,6 +402,13 @@ int radeon_ring_alloc(struct radeon_device *rdev, struct radeon_ring *ring, unsi
|
|||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
/* Align requested size with padding so unlock_commit can
|
/* Align requested size with padding so unlock_commit can
|
||||||
* pad safely */
|
* pad safely */
|
||||||
|
radeon_ring_free_size(rdev, ring);
|
||||||
|
if (ring->ring_free_dw == (ring->ring_size / 4)) {
|
||||||
|
/* This is an empty ring update lockup info to avoid
|
||||||
|
* false positive.
|
||||||
|
*/
|
||||||
|
radeon_ring_lockup_update(ring);
|
||||||
|
}
|
||||||
ndw = (ndw + ring->align_mask) & ~ring->align_mask;
|
ndw = (ndw + ring->align_mask) & ~ring->align_mask;
|
||||||
while (ndw > (ring->ring_free_dw - 1)) {
|
while (ndw > (ring->ring_free_dw - 1)) {
|
||||||
radeon_ring_free_size(rdev, ring);
|
radeon_ring_free_size(rdev, ring);
|
||||||
|
@ -159,7 +159,17 @@ int radeon_uvd_suspend(struct radeon_device *rdev)
|
|||||||
if (!r) {
|
if (!r) {
|
||||||
radeon_bo_kunmap(rdev->uvd.vcpu_bo);
|
radeon_bo_kunmap(rdev->uvd.vcpu_bo);
|
||||||
radeon_bo_unpin(rdev->uvd.vcpu_bo);
|
radeon_bo_unpin(rdev->uvd.vcpu_bo);
|
||||||
|
rdev->uvd.cpu_addr = NULL;
|
||||||
|
if (!radeon_bo_pin(rdev->uvd.vcpu_bo, RADEON_GEM_DOMAIN_CPU, NULL)) {
|
||||||
|
radeon_bo_kmap(rdev->uvd.vcpu_bo, &rdev->uvd.cpu_addr);
|
||||||
|
}
|
||||||
radeon_bo_unreserve(rdev->uvd.vcpu_bo);
|
radeon_bo_unreserve(rdev->uvd.vcpu_bo);
|
||||||
|
|
||||||
|
if (rdev->uvd.cpu_addr) {
|
||||||
|
radeon_fence_driver_start_ring(rdev, R600_RING_TYPE_UVD_INDEX);
|
||||||
|
} else {
|
||||||
|
rdev->fence_drv[R600_RING_TYPE_UVD_INDEX].cpu_addr = NULL;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
return r;
|
return r;
|
||||||
}
|
}
|
||||||
@ -178,6 +188,10 @@ int radeon_uvd_resume(struct radeon_device *rdev)
|
|||||||
return r;
|
return r;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* Have been pin in cpu unmap unpin */
|
||||||
|
radeon_bo_kunmap(rdev->uvd.vcpu_bo);
|
||||||
|
radeon_bo_unpin(rdev->uvd.vcpu_bo);
|
||||||
|
|
||||||
r = radeon_bo_pin(rdev->uvd.vcpu_bo, RADEON_GEM_DOMAIN_VRAM,
|
r = radeon_bo_pin(rdev->uvd.vcpu_bo, RADEON_GEM_DOMAIN_VRAM,
|
||||||
&rdev->uvd.gpu_addr);
|
&rdev->uvd.gpu_addr);
|
||||||
if (r) {
|
if (r) {
|
||||||
@ -613,19 +627,19 @@ int radeon_uvd_get_create_msg(struct radeon_device *rdev, int ring,
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* stitch together an UVD create msg */
|
/* stitch together an UVD create msg */
|
||||||
msg[0] = 0x00000de4;
|
msg[0] = cpu_to_le32(0x00000de4);
|
||||||
msg[1] = 0x00000000;
|
msg[1] = cpu_to_le32(0x00000000);
|
||||||
msg[2] = handle;
|
msg[2] = cpu_to_le32(handle);
|
||||||
msg[3] = 0x00000000;
|
msg[3] = cpu_to_le32(0x00000000);
|
||||||
msg[4] = 0x00000000;
|
msg[4] = cpu_to_le32(0x00000000);
|
||||||
msg[5] = 0x00000000;
|
msg[5] = cpu_to_le32(0x00000000);
|
||||||
msg[6] = 0x00000000;
|
msg[6] = cpu_to_le32(0x00000000);
|
||||||
msg[7] = 0x00000780;
|
msg[7] = cpu_to_le32(0x00000780);
|
||||||
msg[8] = 0x00000440;
|
msg[8] = cpu_to_le32(0x00000440);
|
||||||
msg[9] = 0x00000000;
|
msg[9] = cpu_to_le32(0x00000000);
|
||||||
msg[10] = 0x01b37000;
|
msg[10] = cpu_to_le32(0x01b37000);
|
||||||
for (i = 11; i < 1024; ++i)
|
for (i = 11; i < 1024; ++i)
|
||||||
msg[i] = 0x0;
|
msg[i] = cpu_to_le32(0x0);
|
||||||
|
|
||||||
radeon_bo_kunmap(bo);
|
radeon_bo_kunmap(bo);
|
||||||
radeon_bo_unreserve(bo);
|
radeon_bo_unreserve(bo);
|
||||||
@ -659,12 +673,12 @@ int radeon_uvd_get_destroy_msg(struct radeon_device *rdev, int ring,
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* stitch together an UVD destroy msg */
|
/* stitch together an UVD destroy msg */
|
||||||
msg[0] = 0x00000de4;
|
msg[0] = cpu_to_le32(0x00000de4);
|
||||||
msg[1] = 0x00000002;
|
msg[1] = cpu_to_le32(0x00000002);
|
||||||
msg[2] = handle;
|
msg[2] = cpu_to_le32(handle);
|
||||||
msg[3] = 0x00000000;
|
msg[3] = cpu_to_le32(0x00000000);
|
||||||
for (i = 4; i < 1024; ++i)
|
for (i = 4; i < 1024; ++i)
|
||||||
msg[i] = 0x0;
|
msg[i] = cpu_to_le32(0x0);
|
||||||
|
|
||||||
radeon_bo_kunmap(bo);
|
radeon_bo_kunmap(bo);
|
||||||
radeon_bo_unreserve(bo);
|
radeon_bo_unreserve(bo);
|
||||||
|
@ -705,7 +705,7 @@ static int gic_irq_domain_xlate(struct irq_domain *d,
|
|||||||
static int __cpuinit gic_secondary_init(struct notifier_block *nfb,
|
static int __cpuinit gic_secondary_init(struct notifier_block *nfb,
|
||||||
unsigned long action, void *hcpu)
|
unsigned long action, void *hcpu)
|
||||||
{
|
{
|
||||||
if (action == CPU_STARTING)
|
if (action == CPU_STARTING || action == CPU_STARTING_FROZEN)
|
||||||
gic_cpu_init(&gic_data[0]);
|
gic_cpu_init(&gic_data[0]);
|
||||||
return NOTIFY_OK;
|
return NOTIFY_OK;
|
||||||
}
|
}
|
||||||
|
@ -136,9 +136,9 @@ config DVB_NET
|
|||||||
|
|
||||||
# This Kconfig option is used by both PCI and USB drivers
|
# This Kconfig option is used by both PCI and USB drivers
|
||||||
config TTPCI_EEPROM
|
config TTPCI_EEPROM
|
||||||
tristate
|
tristate
|
||||||
depends on I2C
|
depends on I2C
|
||||||
default n
|
default n
|
||||||
|
|
||||||
source "drivers/media/dvb-core/Kconfig"
|
source "drivers/media/dvb-core/Kconfig"
|
||||||
|
|
||||||
@ -189,6 +189,12 @@ config MEDIA_SUBDRV_AUTOSELECT
|
|||||||
|
|
||||||
If unsure say Y.
|
If unsure say Y.
|
||||||
|
|
||||||
|
config MEDIA_ATTACH
|
||||||
|
bool
|
||||||
|
depends on MEDIA_ANALOG_TV_SUPPORT || MEDIA_DIGITAL_TV_SUPPORT || MEDIA_RADIO_SUPPORT
|
||||||
|
depends on MODULES
|
||||||
|
default MODULES
|
||||||
|
|
||||||
source "drivers/media/i2c/Kconfig"
|
source "drivers/media/i2c/Kconfig"
|
||||||
source "drivers/media/tuners/Kconfig"
|
source "drivers/media/tuners/Kconfig"
|
||||||
source "drivers/media/dvb-frontends/Kconfig"
|
source "drivers/media/dvb-frontends/Kconfig"
|
||||||
|
@ -956,7 +956,7 @@ static int s5c73m3_oif_enum_frame_interval(struct v4l2_subdev *sd,
|
|||||||
|
|
||||||
if (fie->pad != OIF_SOURCE_PAD)
|
if (fie->pad != OIF_SOURCE_PAD)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
if (fie->index > ARRAY_SIZE(s5c73m3_intervals))
|
if (fie->index >= ARRAY_SIZE(s5c73m3_intervals))
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
mutex_lock(&state->lock);
|
mutex_lock(&state->lock);
|
||||||
|
@ -615,7 +615,7 @@ static int snd_cx88_volume_put(struct snd_kcontrol *kcontrol,
|
|||||||
int changed = 0;
|
int changed = 0;
|
||||||
u32 old;
|
u32 old;
|
||||||
|
|
||||||
if (core->board.audio_chip == V4L2_IDENT_WM8775)
|
if (core->sd_wm8775)
|
||||||
snd_cx88_wm8775_volume_put(kcontrol, value);
|
snd_cx88_wm8775_volume_put(kcontrol, value);
|
||||||
|
|
||||||
left = value->value.integer.value[0] & 0x3f;
|
left = value->value.integer.value[0] & 0x3f;
|
||||||
@ -682,8 +682,7 @@ static int snd_cx88_switch_put(struct snd_kcontrol *kcontrol,
|
|||||||
vol ^= bit;
|
vol ^= bit;
|
||||||
cx_swrite(SHADOW_AUD_VOL_CTL, AUD_VOL_CTL, vol);
|
cx_swrite(SHADOW_AUD_VOL_CTL, AUD_VOL_CTL, vol);
|
||||||
/* Pass mute onto any WM8775 */
|
/* Pass mute onto any WM8775 */
|
||||||
if ((core->board.audio_chip == V4L2_IDENT_WM8775) &&
|
if (core->sd_wm8775 && ((1<<6) == bit))
|
||||||
((1<<6) == bit))
|
|
||||||
wm8775_s_ctrl(core, V4L2_CID_AUDIO_MUTE, 0 != (vol & bit));
|
wm8775_s_ctrl(core, V4L2_CID_AUDIO_MUTE, 0 != (vol & bit));
|
||||||
ret = 1;
|
ret = 1;
|
||||||
}
|
}
|
||||||
@ -903,7 +902,7 @@ static int cx88_audio_initdev(struct pci_dev *pci,
|
|||||||
goto error;
|
goto error;
|
||||||
|
|
||||||
/* If there's a wm8775 then add a Line-In ALC switch */
|
/* If there's a wm8775 then add a Line-In ALC switch */
|
||||||
if (core->board.audio_chip == V4L2_IDENT_WM8775)
|
if (core->sd_wm8775)
|
||||||
snd_ctl_add(card, snd_ctl_new1(&snd_cx88_alc_switch, chip));
|
snd_ctl_add(card, snd_ctl_new1(&snd_cx88_alc_switch, chip));
|
||||||
|
|
||||||
strcpy (card->driver, "CX88x");
|
strcpy (card->driver, "CX88x");
|
||||||
|
@ -385,8 +385,7 @@ int cx88_video_mux(struct cx88_core *core, unsigned int input)
|
|||||||
/* The wm8775 module has the "2" route hardwired into
|
/* The wm8775 module has the "2" route hardwired into
|
||||||
the initialization. Some boards may use different
|
the initialization. Some boards may use different
|
||||||
routes for different inputs. HVR-1300 surely does */
|
routes for different inputs. HVR-1300 surely does */
|
||||||
if (core->board.audio_chip &&
|
if (core->sd_wm8775) {
|
||||||
core->board.audio_chip == V4L2_IDENT_WM8775) {
|
|
||||||
call_all(core, audio, s_routing,
|
call_all(core, audio, s_routing,
|
||||||
INPUT(input).audioroute, 0, 0);
|
INPUT(input).audioroute, 0, 0);
|
||||||
}
|
}
|
||||||
@ -771,8 +770,7 @@ static int video_open(struct file *file)
|
|||||||
cx_write(MO_GP1_IO, core->board.radio.gpio1);
|
cx_write(MO_GP1_IO, core->board.radio.gpio1);
|
||||||
cx_write(MO_GP2_IO, core->board.radio.gpio2);
|
cx_write(MO_GP2_IO, core->board.radio.gpio2);
|
||||||
if (core->board.radio.audioroute) {
|
if (core->board.radio.audioroute) {
|
||||||
if(core->board.audio_chip &&
|
if (core->sd_wm8775) {
|
||||||
core->board.audio_chip == V4L2_IDENT_WM8775) {
|
|
||||||
call_all(core, audio, s_routing,
|
call_all(core, audio, s_routing,
|
||||||
core->board.radio.audioroute, 0, 0);
|
core->board.radio.audioroute, 0, 0);
|
||||||
}
|
}
|
||||||
@ -959,7 +957,7 @@ static int cx8800_s_aud_ctrl(struct v4l2_ctrl *ctrl)
|
|||||||
u32 value,mask;
|
u32 value,mask;
|
||||||
|
|
||||||
/* Pass changes onto any WM8775 */
|
/* Pass changes onto any WM8775 */
|
||||||
if (core->board.audio_chip == V4L2_IDENT_WM8775) {
|
if (core->sd_wm8775) {
|
||||||
switch (ctrl->id) {
|
switch (ctrl->id) {
|
||||||
case V4L2_CID_AUDIO_MUTE:
|
case V4L2_CID_AUDIO_MUTE:
|
||||||
wm8775_s_ctrl(core, ctrl->id, ctrl->val);
|
wm8775_s_ctrl(core, ctrl->id, ctrl->val);
|
||||||
|
@ -576,6 +576,14 @@ static int vidioc_dqbuf(struct file *file, void *priv, struct v4l2_buffer *buf)
|
|||||||
return v4l2_m2m_dqbuf(file, ctx->m2m_ctx, buf);
|
return v4l2_m2m_dqbuf(file, ctx->m2m_ctx, buf);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int vidioc_create_bufs(struct file *file, void *priv,
|
||||||
|
struct v4l2_create_buffers *create)
|
||||||
|
{
|
||||||
|
struct coda_ctx *ctx = fh_to_ctx(priv);
|
||||||
|
|
||||||
|
return v4l2_m2m_create_bufs(file, ctx->m2m_ctx, create);
|
||||||
|
}
|
||||||
|
|
||||||
static int vidioc_streamon(struct file *file, void *priv,
|
static int vidioc_streamon(struct file *file, void *priv,
|
||||||
enum v4l2_buf_type type)
|
enum v4l2_buf_type type)
|
||||||
{
|
{
|
||||||
@ -610,6 +618,7 @@ static const struct v4l2_ioctl_ops coda_ioctl_ops = {
|
|||||||
|
|
||||||
.vidioc_qbuf = vidioc_qbuf,
|
.vidioc_qbuf = vidioc_qbuf,
|
||||||
.vidioc_dqbuf = vidioc_dqbuf,
|
.vidioc_dqbuf = vidioc_dqbuf,
|
||||||
|
.vidioc_create_bufs = vidioc_create_bufs,
|
||||||
|
|
||||||
.vidioc_streamon = vidioc_streamon,
|
.vidioc_streamon = vidioc_streamon,
|
||||||
.vidioc_streamoff = vidioc_streamoff,
|
.vidioc_streamoff = vidioc_streamoff,
|
||||||
|
@ -916,6 +916,21 @@ static int vpbe_display_s_fmt(struct file *file, void *priv,
|
|||||||
other video window */
|
other video window */
|
||||||
|
|
||||||
layer->pix_fmt = *pixfmt;
|
layer->pix_fmt = *pixfmt;
|
||||||
|
if (pixfmt->pixelformat == V4L2_PIX_FMT_NV12) {
|
||||||
|
struct vpbe_layer *otherlayer;
|
||||||
|
|
||||||
|
otherlayer = _vpbe_display_get_other_win_layer(disp_dev, layer);
|
||||||
|
/* if other layer is available, only
|
||||||
|
* claim it, do not configure it
|
||||||
|
*/
|
||||||
|
ret = osd_device->ops.request_layer(osd_device,
|
||||||
|
otherlayer->layer_info.id);
|
||||||
|
if (ret < 0) {
|
||||||
|
v4l2_err(&vpbe_dev->v4l2_dev,
|
||||||
|
"Display Manager failed to allocate layer\n");
|
||||||
|
return -EBUSY;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/* Get osd layer config */
|
/* Get osd layer config */
|
||||||
osd_device->ops.get_layer_config(osd_device,
|
osd_device->ops.get_layer_config(osd_device,
|
||||||
|
@ -1837,7 +1837,7 @@ static int vpfe_probe(struct platform_device *pdev)
|
|||||||
if (NULL == ccdc_cfg) {
|
if (NULL == ccdc_cfg) {
|
||||||
v4l2_err(pdev->dev.driver,
|
v4l2_err(pdev->dev.driver,
|
||||||
"Memory allocation failed for ccdc_cfg\n");
|
"Memory allocation failed for ccdc_cfg\n");
|
||||||
goto probe_free_lock;
|
goto probe_free_dev_mem;
|
||||||
}
|
}
|
||||||
|
|
||||||
mutex_lock(&ccdc_lock);
|
mutex_lock(&ccdc_lock);
|
||||||
@ -1991,7 +1991,6 @@ probe_out_release_irq:
|
|||||||
free_irq(vpfe_dev->ccdc_irq0, vpfe_dev);
|
free_irq(vpfe_dev->ccdc_irq0, vpfe_dev);
|
||||||
probe_free_ccdc_cfg_mem:
|
probe_free_ccdc_cfg_mem:
|
||||||
kfree(ccdc_cfg);
|
kfree(ccdc_cfg);
|
||||||
probe_free_lock:
|
|
||||||
mutex_unlock(&ccdc_lock);
|
mutex_unlock(&ccdc_lock);
|
||||||
probe_free_dev_mem:
|
probe_free_dev_mem:
|
||||||
kfree(vpfe_dev);
|
kfree(vpfe_dev);
|
||||||
|
@ -174,7 +174,7 @@ int fimc_is_hw_change_mode(struct fimc_is *is)
|
|||||||
HIC_CAPTURE_STILL, HIC_CAPTURE_VIDEO,
|
HIC_CAPTURE_STILL, HIC_CAPTURE_VIDEO,
|
||||||
};
|
};
|
||||||
|
|
||||||
if (WARN_ON(is->config_index > ARRAY_SIZE(cmd)))
|
if (WARN_ON(is->config_index >= ARRAY_SIZE(cmd)))
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
mcuctl_write(cmd[is->config_index], is, MCUCTL_REG_ISSR(0));
|
mcuctl_write(cmd[is->config_index], is, MCUCTL_REG_ISSR(0));
|
||||||
|
@ -48,7 +48,6 @@ static char *fimc_is_clocks[ISS_CLKS_MAX] = {
|
|||||||
[ISS_CLK_LITE0] = "lite0",
|
[ISS_CLK_LITE0] = "lite0",
|
||||||
[ISS_CLK_LITE1] = "lite1",
|
[ISS_CLK_LITE1] = "lite1",
|
||||||
[ISS_CLK_MPLL] = "mpll",
|
[ISS_CLK_MPLL] = "mpll",
|
||||||
[ISS_CLK_SYSREG] = "sysreg",
|
|
||||||
[ISS_CLK_ISP] = "isp",
|
[ISS_CLK_ISP] = "isp",
|
||||||
[ISS_CLK_DRC] = "drc",
|
[ISS_CLK_DRC] = "drc",
|
||||||
[ISS_CLK_FD] = "fd",
|
[ISS_CLK_FD] = "fd",
|
||||||
@ -71,7 +70,6 @@ static void fimc_is_put_clocks(struct fimc_is *is)
|
|||||||
for (i = 0; i < ISS_CLKS_MAX; i++) {
|
for (i = 0; i < ISS_CLKS_MAX; i++) {
|
||||||
if (IS_ERR(is->clocks[i]))
|
if (IS_ERR(is->clocks[i]))
|
||||||
continue;
|
continue;
|
||||||
clk_unprepare(is->clocks[i]);
|
|
||||||
clk_put(is->clocks[i]);
|
clk_put(is->clocks[i]);
|
||||||
is->clocks[i] = ERR_PTR(-EINVAL);
|
is->clocks[i] = ERR_PTR(-EINVAL);
|
||||||
}
|
}
|
||||||
@ -90,12 +88,6 @@ static int fimc_is_get_clocks(struct fimc_is *is)
|
|||||||
ret = PTR_ERR(is->clocks[i]);
|
ret = PTR_ERR(is->clocks[i]);
|
||||||
goto err;
|
goto err;
|
||||||
}
|
}
|
||||||
ret = clk_prepare(is->clocks[i]);
|
|
||||||
if (ret < 0) {
|
|
||||||
clk_put(is->clocks[i]);
|
|
||||||
is->clocks[i] = ERR_PTR(-EINVAL);
|
|
||||||
goto err;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
@ -103,7 +95,7 @@ err:
|
|||||||
fimc_is_put_clocks(is);
|
fimc_is_put_clocks(is);
|
||||||
dev_err(&is->pdev->dev, "failed to get clock: %s\n",
|
dev_err(&is->pdev->dev, "failed to get clock: %s\n",
|
||||||
fimc_is_clocks[i]);
|
fimc_is_clocks[i]);
|
||||||
return -ENXIO;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int fimc_is_setup_clocks(struct fimc_is *is)
|
static int fimc_is_setup_clocks(struct fimc_is *is)
|
||||||
@ -144,7 +136,7 @@ int fimc_is_enable_clocks(struct fimc_is *is)
|
|||||||
for (i = 0; i < ISS_GATE_CLKS_MAX; i++) {
|
for (i = 0; i < ISS_GATE_CLKS_MAX; i++) {
|
||||||
if (IS_ERR(is->clocks[i]))
|
if (IS_ERR(is->clocks[i]))
|
||||||
continue;
|
continue;
|
||||||
ret = clk_enable(is->clocks[i]);
|
ret = clk_prepare_enable(is->clocks[i]);
|
||||||
if (ret < 0) {
|
if (ret < 0) {
|
||||||
dev_err(&is->pdev->dev, "clock %s enable failed\n",
|
dev_err(&is->pdev->dev, "clock %s enable failed\n",
|
||||||
fimc_is_clocks[i]);
|
fimc_is_clocks[i]);
|
||||||
@ -163,7 +155,7 @@ void fimc_is_disable_clocks(struct fimc_is *is)
|
|||||||
|
|
||||||
for (i = 0; i < ISS_GATE_CLKS_MAX; i++) {
|
for (i = 0; i < ISS_GATE_CLKS_MAX; i++) {
|
||||||
if (!IS_ERR(is->clocks[i])) {
|
if (!IS_ERR(is->clocks[i])) {
|
||||||
clk_disable(is->clocks[i]);
|
clk_disable_unprepare(is->clocks[i]);
|
||||||
pr_debug("disabled clock: %s\n", fimc_is_clocks[i]);
|
pr_debug("disabled clock: %s\n", fimc_is_clocks[i]);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -326,6 +318,11 @@ int fimc_is_start_firmware(struct fimc_is *is)
|
|||||||
struct device *dev = &is->pdev->dev;
|
struct device *dev = &is->pdev->dev;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
|
if (is->fw.f_w == NULL) {
|
||||||
|
dev_err(dev, "firmware is not loaded\n");
|
||||||
|
return -EINVAL;
|
||||||
|
}
|
||||||
|
|
||||||
memcpy(is->memory.vaddr, is->fw.f_w->data, is->fw.f_w->size);
|
memcpy(is->memory.vaddr, is->fw.f_w->data, is->fw.f_w->size);
|
||||||
wmb();
|
wmb();
|
||||||
|
|
||||||
@ -837,23 +834,11 @@ static int fimc_is_probe(struct platform_device *pdev)
|
|||||||
goto err_clk;
|
goto err_clk;
|
||||||
}
|
}
|
||||||
pm_runtime_enable(dev);
|
pm_runtime_enable(dev);
|
||||||
/*
|
|
||||||
* Enable only the ISP power domain, keep FIMC-IS clocks off until
|
|
||||||
* the whole clock tree is configured. The ISP power domain needs
|
|
||||||
* be active in order to acces any CMU_ISP clock registers.
|
|
||||||
*/
|
|
||||||
ret = pm_runtime_get_sync(dev);
|
ret = pm_runtime_get_sync(dev);
|
||||||
if (ret < 0)
|
if (ret < 0)
|
||||||
goto err_irq;
|
goto err_irq;
|
||||||
|
|
||||||
ret = fimc_is_setup_clocks(is);
|
|
||||||
pm_runtime_put_sync(dev);
|
|
||||||
|
|
||||||
if (ret < 0)
|
|
||||||
goto err_irq;
|
|
||||||
|
|
||||||
is->clk_init = true;
|
|
||||||
|
|
||||||
is->alloc_ctx = vb2_dma_contig_init_ctx(dev);
|
is->alloc_ctx = vb2_dma_contig_init_ctx(dev);
|
||||||
if (IS_ERR(is->alloc_ctx)) {
|
if (IS_ERR(is->alloc_ctx)) {
|
||||||
ret = PTR_ERR(is->alloc_ctx);
|
ret = PTR_ERR(is->alloc_ctx);
|
||||||
@ -875,6 +860,8 @@ static int fimc_is_probe(struct platform_device *pdev)
|
|||||||
if (ret < 0)
|
if (ret < 0)
|
||||||
goto err_dfs;
|
goto err_dfs;
|
||||||
|
|
||||||
|
pm_runtime_put_sync(dev);
|
||||||
|
|
||||||
dev_dbg(dev, "FIMC-IS registered successfully\n");
|
dev_dbg(dev, "FIMC-IS registered successfully\n");
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
@ -894,9 +881,11 @@ err_clk:
|
|||||||
static int fimc_is_runtime_resume(struct device *dev)
|
static int fimc_is_runtime_resume(struct device *dev)
|
||||||
{
|
{
|
||||||
struct fimc_is *is = dev_get_drvdata(dev);
|
struct fimc_is *is = dev_get_drvdata(dev);
|
||||||
|
int ret;
|
||||||
|
|
||||||
if (!is->clk_init)
|
ret = fimc_is_setup_clocks(is);
|
||||||
return 0;
|
if (ret)
|
||||||
|
return ret;
|
||||||
|
|
||||||
return fimc_is_enable_clocks(is);
|
return fimc_is_enable_clocks(is);
|
||||||
}
|
}
|
||||||
@ -905,9 +894,7 @@ static int fimc_is_runtime_suspend(struct device *dev)
|
|||||||
{
|
{
|
||||||
struct fimc_is *is = dev_get_drvdata(dev);
|
struct fimc_is *is = dev_get_drvdata(dev);
|
||||||
|
|
||||||
if (is->clk_init)
|
fimc_is_disable_clocks(is);
|
||||||
fimc_is_disable_clocks(is);
|
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -941,7 +928,8 @@ static int fimc_is_remove(struct platform_device *pdev)
|
|||||||
vb2_dma_contig_cleanup_ctx(is->alloc_ctx);
|
vb2_dma_contig_cleanup_ctx(is->alloc_ctx);
|
||||||
fimc_is_put_clocks(is);
|
fimc_is_put_clocks(is);
|
||||||
fimc_is_debugfs_remove(is);
|
fimc_is_debugfs_remove(is);
|
||||||
release_firmware(is->fw.f_w);
|
if (is->fw.f_w)
|
||||||
|
release_firmware(is->fw.f_w);
|
||||||
fimc_is_free_cpu_memory(is);
|
fimc_is_free_cpu_memory(is);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -73,7 +73,6 @@ enum {
|
|||||||
ISS_CLK_LITE0,
|
ISS_CLK_LITE0,
|
||||||
ISS_CLK_LITE1,
|
ISS_CLK_LITE1,
|
||||||
ISS_CLK_MPLL,
|
ISS_CLK_MPLL,
|
||||||
ISS_CLK_SYSREG,
|
|
||||||
ISS_CLK_ISP,
|
ISS_CLK_ISP,
|
||||||
ISS_CLK_DRC,
|
ISS_CLK_DRC,
|
||||||
ISS_CLK_FD,
|
ISS_CLK_FD,
|
||||||
@ -265,7 +264,6 @@ struct fimc_is {
|
|||||||
spinlock_t slock;
|
spinlock_t slock;
|
||||||
|
|
||||||
struct clk *clocks[ISS_CLKS_MAX];
|
struct clk *clocks[ISS_CLKS_MAX];
|
||||||
bool clk_init;
|
|
||||||
void __iomem *regs;
|
void __iomem *regs;
|
||||||
void __iomem *pmu_regs;
|
void __iomem *pmu_regs;
|
||||||
int irq;
|
int irq;
|
||||||
|
@ -138,7 +138,7 @@ static int fimc_isp_subdev_get_fmt(struct v4l2_subdev *sd,
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
mf->colorspace = V4L2_COLORSPACE_JPEG;
|
mf->colorspace = V4L2_COLORSPACE_SRGB;
|
||||||
|
|
||||||
mutex_lock(&isp->subdev_lock);
|
mutex_lock(&isp->subdev_lock);
|
||||||
__is_get_frame_size(is, &cur_fmt);
|
__is_get_frame_size(is, &cur_fmt);
|
||||||
@ -194,7 +194,7 @@ static int fimc_isp_subdev_set_fmt(struct v4l2_subdev *sd,
|
|||||||
v4l2_dbg(1, debug, sd, "%s: pad%d: code: 0x%x, %dx%d\n",
|
v4l2_dbg(1, debug, sd, "%s: pad%d: code: 0x%x, %dx%d\n",
|
||||||
__func__, fmt->pad, mf->code, mf->width, mf->height);
|
__func__, fmt->pad, mf->code, mf->width, mf->height);
|
||||||
|
|
||||||
mf->colorspace = V4L2_COLORSPACE_JPEG;
|
mf->colorspace = V4L2_COLORSPACE_SRGB;
|
||||||
|
|
||||||
mutex_lock(&isp->subdev_lock);
|
mutex_lock(&isp->subdev_lock);
|
||||||
__isp_subdev_try_format(isp, fmt);
|
__isp_subdev_try_format(isp, fmt);
|
||||||
|
@ -746,7 +746,7 @@ static int s5pcsis_parse_dt(struct platform_device *pdev,
|
|||||||
node = v4l2_of_get_next_endpoint(node, NULL);
|
node = v4l2_of_get_next_endpoint(node, NULL);
|
||||||
if (!node) {
|
if (!node) {
|
||||||
dev_err(&pdev->dev, "No port node at %s\n",
|
dev_err(&pdev->dev, "No port node at %s\n",
|
||||||
node->full_name);
|
pdev->dev.of_node->full_name);
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
/* Get port node and validate MIPI-CSI channel id. */
|
/* Get port node and validate MIPI-CSI channel id. */
|
||||||
|
@ -229,7 +229,7 @@ struct camif_vp {
|
|||||||
unsigned int state;
|
unsigned int state;
|
||||||
u16 fmt_flags;
|
u16 fmt_flags;
|
||||||
u8 id;
|
u8 id;
|
||||||
u8 rotation;
|
u16 rotation;
|
||||||
u8 hflip;
|
u8 hflip;
|
||||||
u8 vflip;
|
u8 vflip;
|
||||||
unsigned int offset;
|
unsigned int offset;
|
||||||
|
@ -1,2 +1,2 @@
|
|||||||
s5p-jpeg-objs := jpeg-core.o
|
s5p-jpeg-objs := jpeg-core.o
|
||||||
obj-$(CONFIG_VIDEO_SAMSUNG_S5P_JPEG) := s5p-jpeg.o
|
obj-$(CONFIG_VIDEO_SAMSUNG_S5P_JPEG) += s5p-jpeg.o
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
obj-$(CONFIG_VIDEO_SAMSUNG_S5P_MFC) := s5p-mfc.o
|
obj-$(CONFIG_VIDEO_SAMSUNG_S5P_MFC) += s5p-mfc.o
|
||||||
s5p-mfc-y += s5p_mfc.o s5p_mfc_intr.o
|
s5p-mfc-y += s5p_mfc.o s5p_mfc_intr.o
|
||||||
s5p-mfc-y += s5p_mfc_dec.o s5p_mfc_enc.o
|
s5p-mfc-y += s5p_mfc_dec.o s5p_mfc_enc.o
|
||||||
s5p-mfc-y += s5p_mfc_ctrl.o s5p_mfc_pm.o
|
s5p-mfc-y += s5p_mfc_ctrl.o s5p_mfc_pm.o
|
||||||
|
@ -397,7 +397,7 @@ static void s5p_mfc_handle_frame(struct s5p_mfc_ctx *ctx,
|
|||||||
leave_handle_frame:
|
leave_handle_frame:
|
||||||
spin_unlock_irqrestore(&dev->irqlock, flags);
|
spin_unlock_irqrestore(&dev->irqlock, flags);
|
||||||
if ((ctx->src_queue_cnt == 0 && ctx->state != MFCINST_FINISHING)
|
if ((ctx->src_queue_cnt == 0 && ctx->state != MFCINST_FINISHING)
|
||||||
|| ctx->dst_queue_cnt < ctx->dpb_count)
|
|| ctx->dst_queue_cnt < ctx->pb_count)
|
||||||
clear_work_bit(ctx);
|
clear_work_bit(ctx);
|
||||||
s5p_mfc_hw_call(dev->mfc_ops, clear_int_flags, dev);
|
s5p_mfc_hw_call(dev->mfc_ops, clear_int_flags, dev);
|
||||||
wake_up_ctx(ctx, reason, err);
|
wake_up_ctx(ctx, reason, err);
|
||||||
@ -473,7 +473,7 @@ static void s5p_mfc_handle_seq_done(struct s5p_mfc_ctx *ctx,
|
|||||||
|
|
||||||
s5p_mfc_hw_call(dev->mfc_ops, dec_calc_dpb_size, ctx);
|
s5p_mfc_hw_call(dev->mfc_ops, dec_calc_dpb_size, ctx);
|
||||||
|
|
||||||
ctx->dpb_count = s5p_mfc_hw_call(dev->mfc_ops, get_dpb_count,
|
ctx->pb_count = s5p_mfc_hw_call(dev->mfc_ops, get_dpb_count,
|
||||||
dev);
|
dev);
|
||||||
ctx->mv_count = s5p_mfc_hw_call(dev->mfc_ops, get_mv_count,
|
ctx->mv_count = s5p_mfc_hw_call(dev->mfc_ops, get_mv_count,
|
||||||
dev);
|
dev);
|
||||||
@ -562,7 +562,7 @@ static void s5p_mfc_handle_stream_complete(struct s5p_mfc_ctx *ctx,
|
|||||||
struct s5p_mfc_dev *dev = ctx->dev;
|
struct s5p_mfc_dev *dev = ctx->dev;
|
||||||
struct s5p_mfc_buf *mb_entry;
|
struct s5p_mfc_buf *mb_entry;
|
||||||
|
|
||||||
mfc_debug(2, "Stream completed");
|
mfc_debug(2, "Stream completed\n");
|
||||||
|
|
||||||
s5p_mfc_clear_int_flags(dev);
|
s5p_mfc_clear_int_flags(dev);
|
||||||
ctx->int_type = reason;
|
ctx->int_type = reason;
|
||||||
@ -1362,7 +1362,6 @@ static struct s5p_mfc_variant mfc_drvdata_v5 = {
|
|||||||
.port_num = MFC_NUM_PORTS,
|
.port_num = MFC_NUM_PORTS,
|
||||||
.buf_size = &buf_size_v5,
|
.buf_size = &buf_size_v5,
|
||||||
.buf_align = &mfc_buf_align_v5,
|
.buf_align = &mfc_buf_align_v5,
|
||||||
.mclk_name = "sclk_mfc",
|
|
||||||
.fw_name = "s5p-mfc.fw",
|
.fw_name = "s5p-mfc.fw",
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -1389,7 +1388,6 @@ static struct s5p_mfc_variant mfc_drvdata_v6 = {
|
|||||||
.port_num = MFC_NUM_PORTS_V6,
|
.port_num = MFC_NUM_PORTS_V6,
|
||||||
.buf_size = &buf_size_v6,
|
.buf_size = &buf_size_v6,
|
||||||
.buf_align = &mfc_buf_align_v6,
|
.buf_align = &mfc_buf_align_v6,
|
||||||
.mclk_name = "aclk_333",
|
|
||||||
.fw_name = "s5p-mfc-v6.fw",
|
.fw_name = "s5p-mfc-v6.fw",
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -138,6 +138,7 @@ enum s5p_mfc_inst_state {
|
|||||||
MFCINST_INIT = 100,
|
MFCINST_INIT = 100,
|
||||||
MFCINST_GOT_INST,
|
MFCINST_GOT_INST,
|
||||||
MFCINST_HEAD_PARSED,
|
MFCINST_HEAD_PARSED,
|
||||||
|
MFCINST_HEAD_PRODUCED,
|
||||||
MFCINST_BUFS_SET,
|
MFCINST_BUFS_SET,
|
||||||
MFCINST_RUNNING,
|
MFCINST_RUNNING,
|
||||||
MFCINST_FINISHING,
|
MFCINST_FINISHING,
|
||||||
@ -231,7 +232,6 @@ struct s5p_mfc_variant {
|
|||||||
unsigned int port_num;
|
unsigned int port_num;
|
||||||
struct s5p_mfc_buf_size *buf_size;
|
struct s5p_mfc_buf_size *buf_size;
|
||||||
struct s5p_mfc_buf_align *buf_align;
|
struct s5p_mfc_buf_align *buf_align;
|
||||||
char *mclk_name;
|
|
||||||
char *fw_name;
|
char *fw_name;
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -438,7 +438,7 @@ struct s5p_mfc_enc_params {
|
|||||||
u32 rc_framerate_num;
|
u32 rc_framerate_num;
|
||||||
u32 rc_framerate_denom;
|
u32 rc_framerate_denom;
|
||||||
|
|
||||||
union {
|
struct {
|
||||||
struct s5p_mfc_h264_enc_params h264;
|
struct s5p_mfc_h264_enc_params h264;
|
||||||
struct s5p_mfc_mpeg4_enc_params mpeg4;
|
struct s5p_mfc_mpeg4_enc_params mpeg4;
|
||||||
} codec;
|
} codec;
|
||||||
@ -602,7 +602,7 @@ struct s5p_mfc_ctx {
|
|||||||
int after_packed_pb;
|
int after_packed_pb;
|
||||||
int sei_fp_parse;
|
int sei_fp_parse;
|
||||||
|
|
||||||
int dpb_count;
|
int pb_count;
|
||||||
int total_dpb_count;
|
int total_dpb_count;
|
||||||
int mv_count;
|
int mv_count;
|
||||||
/* Buffers */
|
/* Buffers */
|
||||||
|
@ -38,7 +38,7 @@ int s5p_mfc_alloc_firmware(struct s5p_mfc_dev *dev)
|
|||||||
dev->fw_virt_addr = dma_alloc_coherent(dev->mem_dev_l, dev->fw_size,
|
dev->fw_virt_addr = dma_alloc_coherent(dev->mem_dev_l, dev->fw_size,
|
||||||
&dev->bank1, GFP_KERNEL);
|
&dev->bank1, GFP_KERNEL);
|
||||||
|
|
||||||
if (IS_ERR(dev->fw_virt_addr)) {
|
if (IS_ERR_OR_NULL(dev->fw_virt_addr)) {
|
||||||
dev->fw_virt_addr = NULL;
|
dev->fw_virt_addr = NULL;
|
||||||
mfc_err("Allocating bitprocessor buffer failed\n");
|
mfc_err("Allocating bitprocessor buffer failed\n");
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
@ -30,8 +30,8 @@ extern int debug;
|
|||||||
#define mfc_debug(level, fmt, args...)
|
#define mfc_debug(level, fmt, args...)
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#define mfc_debug_enter() mfc_debug(5, "enter")
|
#define mfc_debug_enter() mfc_debug(5, "enter\n")
|
||||||
#define mfc_debug_leave() mfc_debug(5, "leave")
|
#define mfc_debug_leave() mfc_debug(5, "leave\n")
|
||||||
|
|
||||||
#define mfc_err(fmt, args...) \
|
#define mfc_err(fmt, args...) \
|
||||||
do { \
|
do { \
|
||||||
|
@ -210,11 +210,11 @@ static int s5p_mfc_ctx_ready(struct s5p_mfc_ctx *ctx)
|
|||||||
/* Context is to decode a frame */
|
/* Context is to decode a frame */
|
||||||
if (ctx->src_queue_cnt >= 1 &&
|
if (ctx->src_queue_cnt >= 1 &&
|
||||||
ctx->state == MFCINST_RUNNING &&
|
ctx->state == MFCINST_RUNNING &&
|
||||||
ctx->dst_queue_cnt >= ctx->dpb_count)
|
ctx->dst_queue_cnt >= ctx->pb_count)
|
||||||
return 1;
|
return 1;
|
||||||
/* Context is to return last frame */
|
/* Context is to return last frame */
|
||||||
if (ctx->state == MFCINST_FINISHING &&
|
if (ctx->state == MFCINST_FINISHING &&
|
||||||
ctx->dst_queue_cnt >= ctx->dpb_count)
|
ctx->dst_queue_cnt >= ctx->pb_count)
|
||||||
return 1;
|
return 1;
|
||||||
/* Context is to set buffers */
|
/* Context is to set buffers */
|
||||||
if (ctx->src_queue_cnt >= 1 &&
|
if (ctx->src_queue_cnt >= 1 &&
|
||||||
@ -224,7 +224,7 @@ static int s5p_mfc_ctx_ready(struct s5p_mfc_ctx *ctx)
|
|||||||
/* Resolution change */
|
/* Resolution change */
|
||||||
if ((ctx->state == MFCINST_RES_CHANGE_INIT ||
|
if ((ctx->state == MFCINST_RES_CHANGE_INIT ||
|
||||||
ctx->state == MFCINST_RES_CHANGE_FLUSH) &&
|
ctx->state == MFCINST_RES_CHANGE_FLUSH) &&
|
||||||
ctx->dst_queue_cnt >= ctx->dpb_count)
|
ctx->dst_queue_cnt >= ctx->pb_count)
|
||||||
return 1;
|
return 1;
|
||||||
if (ctx->state == MFCINST_RES_CHANGE_END &&
|
if (ctx->state == MFCINST_RES_CHANGE_END &&
|
||||||
ctx->src_queue_cnt >= 1)
|
ctx->src_queue_cnt >= 1)
|
||||||
@ -537,7 +537,7 @@ static int vidioc_reqbufs(struct file *file, void *priv,
|
|||||||
mfc_err("vb2_reqbufs on capture failed\n");
|
mfc_err("vb2_reqbufs on capture failed\n");
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
if (reqbufs->count < ctx->dpb_count) {
|
if (reqbufs->count < ctx->pb_count) {
|
||||||
mfc_err("Not enough buffers allocated\n");
|
mfc_err("Not enough buffers allocated\n");
|
||||||
reqbufs->count = 0;
|
reqbufs->count = 0;
|
||||||
s5p_mfc_clock_on();
|
s5p_mfc_clock_on();
|
||||||
@ -751,7 +751,7 @@ static int s5p_mfc_dec_g_v_ctrl(struct v4l2_ctrl *ctrl)
|
|||||||
case V4L2_CID_MIN_BUFFERS_FOR_CAPTURE:
|
case V4L2_CID_MIN_BUFFERS_FOR_CAPTURE:
|
||||||
if (ctx->state >= MFCINST_HEAD_PARSED &&
|
if (ctx->state >= MFCINST_HEAD_PARSED &&
|
||||||
ctx->state < MFCINST_ABORT) {
|
ctx->state < MFCINST_ABORT) {
|
||||||
ctrl->val = ctx->dpb_count;
|
ctrl->val = ctx->pb_count;
|
||||||
break;
|
break;
|
||||||
} else if (ctx->state != MFCINST_INIT) {
|
} else if (ctx->state != MFCINST_INIT) {
|
||||||
v4l2_err(&dev->v4l2_dev, "Decoding not initialised\n");
|
v4l2_err(&dev->v4l2_dev, "Decoding not initialised\n");
|
||||||
@ -763,7 +763,7 @@ static int s5p_mfc_dec_g_v_ctrl(struct v4l2_ctrl *ctrl)
|
|||||||
S5P_MFC_R2H_CMD_SEQ_DONE_RET, 0);
|
S5P_MFC_R2H_CMD_SEQ_DONE_RET, 0);
|
||||||
if (ctx->state >= MFCINST_HEAD_PARSED &&
|
if (ctx->state >= MFCINST_HEAD_PARSED &&
|
||||||
ctx->state < MFCINST_ABORT) {
|
ctx->state < MFCINST_ABORT) {
|
||||||
ctrl->val = ctx->dpb_count;
|
ctrl->val = ctx->pb_count;
|
||||||
} else {
|
} else {
|
||||||
v4l2_err(&dev->v4l2_dev, "Decoding not initialised\n");
|
v4l2_err(&dev->v4l2_dev, "Decoding not initialised\n");
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
@ -924,10 +924,10 @@ static int s5p_mfc_queue_setup(struct vb2_queue *vq,
|
|||||||
/* Output plane count is 2 - one for Y and one for CbCr */
|
/* Output plane count is 2 - one for Y and one for CbCr */
|
||||||
*plane_count = 2;
|
*plane_count = 2;
|
||||||
/* Setup buffer count */
|
/* Setup buffer count */
|
||||||
if (*buf_count < ctx->dpb_count)
|
if (*buf_count < ctx->pb_count)
|
||||||
*buf_count = ctx->dpb_count;
|
*buf_count = ctx->pb_count;
|
||||||
if (*buf_count > ctx->dpb_count + MFC_MAX_EXTRA_DPB)
|
if (*buf_count > ctx->pb_count + MFC_MAX_EXTRA_DPB)
|
||||||
*buf_count = ctx->dpb_count + MFC_MAX_EXTRA_DPB;
|
*buf_count = ctx->pb_count + MFC_MAX_EXTRA_DPB;
|
||||||
if (*buf_count > MFC_MAX_BUFFERS)
|
if (*buf_count > MFC_MAX_BUFFERS)
|
||||||
*buf_count = MFC_MAX_BUFFERS;
|
*buf_count = MFC_MAX_BUFFERS;
|
||||||
} else {
|
} else {
|
||||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user