mirror of
https://github.com/torvalds/linux.git
synced 2024-11-11 22:51:42 +00:00
I accidentally removed some mux code for omap4 that I thought was
dead code as omap4 has been booting with device tree only since v3.10. Turns out I also removed some display related mux code, so let's revert that except for the dead code parts. -----BEGIN PGP SIGNATURE----- Version: GnuPG v1.4.15 (GNU/Linux) iQIcBAABAgAGBQJSsd7KAAoJEBvUPslcq6Vz19gQAK6IyA51RxgyTF6ads5SyNny HAl7AO/O4yoverXibgdGEe3V6Caye8QbsAVE0qyZAVgtPCattCclRGYUGy8fcy00 mV/KhW6EcQXxApU37f+6uyAm7RrkMPzTP6glqH9IllvFZYsOTNlt6W5+W0juW97n 1xZMvx75bz6aiOvox5mRmSySr0+D3nHGfWhixmPAF09afUWlow2RGMZlPKtEZ27x wKEV6uxBjGrQoBKIkM1INwB6PzcYsoxwMVDfIzIiGU3Ck/59yVmKqT2UfaqkneXC poGA9FZ2eeSmh/0fxPue4gDRNYW5f7rjzjjf+x5kKTJi8+G5dX9TpaZhDd7cwvAy U0hnBWz6P3x0XRDzAAIuUeZP2S9JzedXbBKgAE3447r8MNH74TRA2y07ERpakS45 DpyfepUcueEs9EMSgW/gPbG79cbdA9AiK9dPFVGchlvyk8eUD0KCRoBBPCuv99Z6 mQzcSrQ7XBJV4zq7zomlsL9ERd8R9DjgF89ZWUeiQKbFFxceldkclaVX7s5m2H5h HzHYTKKNhcP+ZBwoX/IabRhE/N6hNMDKsrnz2GllVHfpocWRJXOZYnosVI/AS/iR 5lLwIdsX6EQYHX1ZrVR8ROJ3R0ft52kNwS6bKt10hor0D/Y5DVpb2i9D34uYMjPj QBAIwd4yXIJhBwv+ldHp =gI+4 -----END PGP SIGNATURE----- Merge tag 'omap-for-v3.13/display-fix' of git://git.kernel.org/pub/scm/linux/kernel/git/tmlind/linux-omap into fixes I accidentally removed some mux code for omap4 that I thought was dead code as omap4 has been booting with device tree only since v3.10. Turns out I also removed some display related mux code, so let's revert that except for the dead code parts. * tag 'omap-for-v3.13/display-fix' of git://git.kernel.org/pub/scm/linux/kernel/git/tmlind/linux-omap: (439 commits) Revert "ARM: OMAP2+: Remove legacy mux code for display.c" +Linux 3.13-rc4
This commit is contained in:
commit
cd15c51d6c
5
CREDITS
5
CREDITS
@ -655,6 +655,11 @@ S: Stanford University
|
||||
S: Stanford, California 94305
|
||||
S: USA
|
||||
|
||||
N: Carlos Chinea
|
||||
E: carlos.chinea@nokia.com
|
||||
E: cch.devel@gmail.com
|
||||
D: Author of HSI Subsystem
|
||||
|
||||
N: Randolph Chung
|
||||
E: tausq@debian.org
|
||||
D: Linux/PA-RISC hacker
|
||||
|
@ -73,7 +73,8 @@ range from zero to the maximal number of valid planes for the currently active
|
||||
format. For the single-planar API, applications must set <structfield> plane
|
||||
</structfield> to zero. Additional flags may be posted in the <structfield>
|
||||
flags </structfield> field. Refer to a manual for open() for details.
|
||||
Currently only O_CLOEXEC is supported. All other fields must be set to zero.
|
||||
Currently only O_CLOEXEC, O_RDONLY, O_WRONLY, and O_RDWR are supported. All
|
||||
other fields must be set to zero.
|
||||
In the case of multi-planar API, every plane is exported separately using
|
||||
multiple <constant> VIDIOC_EXPBUF </constant> calls. </para>
|
||||
|
||||
@ -170,8 +171,9 @@ multi-planar API. Otherwise this value must be set to zero. </entry>
|
||||
<entry>__u32</entry>
|
||||
<entry><structfield>flags</structfield></entry>
|
||||
<entry>Flags for the newly created file, currently only <constant>
|
||||
O_CLOEXEC </constant> is supported, refer to the manual of open() for more
|
||||
details.</entry>
|
||||
O_CLOEXEC </constant>, <constant>O_RDONLY</constant>, <constant>O_WRONLY
|
||||
</constant>, and <constant>O_RDWR</constant> are supported, refer to the manual
|
||||
of open() for more details.</entry>
|
||||
</row>
|
||||
<row>
|
||||
<entry>__s32</entry>
|
||||
|
@ -164,10 +164,10 @@ This points to a number of methods, all of which need to be provided:
|
||||
|
||||
(4) Diff the index keys of two objects.
|
||||
|
||||
int (*diff_objects)(const void *a, const void *b);
|
||||
int (*diff_objects)(const void *object, const void *index_key);
|
||||
|
||||
Return the bit position at which the index keys of two objects differ or
|
||||
-1 if they are the same.
|
||||
Return the bit position at which the index key of the specified object
|
||||
differs from the given index key or -1 if they are the same.
|
||||
|
||||
|
||||
(5) Free an object.
|
||||
|
@ -266,10 +266,12 @@ E.g.
|
||||
Invalidation is removing an entry from the cache without writing it
|
||||
back. Cache blocks can be invalidated via the invalidate_cblocks
|
||||
message, which takes an arbitrary number of cblock ranges. Each cblock
|
||||
must be expressed as a decimal value, in the future a variant message
|
||||
that takes cblock ranges expressed in hexidecimal may be needed to
|
||||
better support efficient invalidation of larger caches. The cache must
|
||||
be in passthrough mode when invalidate_cblocks is used.
|
||||
range's end value is "one past the end", meaning 5-10 expresses a range
|
||||
of values from 5 to 9. Each cblock must be expressed as a decimal
|
||||
value, in the future a variant message that takes cblock ranges
|
||||
expressed in hexidecimal may be needed to better support efficient
|
||||
invalidation of larger caches. The cache must be in passthrough mode
|
||||
when invalidate_cblocks is used.
|
||||
|
||||
invalidate_cblocks [<cblock>|<cblock begin>-<cblock end>]*
|
||||
|
||||
|
@ -4,7 +4,7 @@ This file provides information, what the device node
|
||||
for the davinci_emac interface contains.
|
||||
|
||||
Required properties:
|
||||
- compatible: "ti,davinci-dm6467-emac";
|
||||
- compatible: "ti,davinci-dm6467-emac" or "ti,am3517-emac"
|
||||
- reg: Offset and length of the register set for the device
|
||||
- ti,davinci-ctrl-reg-offset: offset to control register
|
||||
- ti,davinci-ctrl-mod-reg-offset: offset to control module register
|
||||
|
@ -8,3 +8,7 @@ Required properties:
|
||||
Optional properties:
|
||||
- phy-device : phandle to Ethernet phy
|
||||
- local-mac-address : Ethernet mac address to use
|
||||
- reg-io-width : Mask of sizes (in bytes) of the IO accesses that
|
||||
are supported on the device. Valid value for SMSC LAN91c111 are
|
||||
1, 2 or 4. If it's omitted or invalid, the size would be 2 meaning
|
||||
16-bit access only.
|
||||
|
@ -313,7 +313,7 @@ static struct mic_device_desc *get_device_desc(struct mic_info *mic, int type)
|
||||
int i;
|
||||
void *dp = get_dp(mic, type);
|
||||
|
||||
for (i = mic_aligned_size(struct mic_bootparam); i < PAGE_SIZE;
|
||||
for (i = sizeof(struct mic_bootparam); i < PAGE_SIZE;
|
||||
i += mic_total_desc_size(d)) {
|
||||
d = dp + i;
|
||||
|
||||
@ -445,8 +445,8 @@ init_vr(struct mic_info *mic, int fd, int type,
|
||||
__func__, mic->name, vr0->va, vr0->info, vr_size,
|
||||
vring_size(MIC_VRING_ENTRIES, MIC_VIRTIO_RING_ALIGN));
|
||||
mpsslog("magic 0x%x expected 0x%x\n",
|
||||
vr0->info->magic, MIC_MAGIC + type);
|
||||
assert(vr0->info->magic == MIC_MAGIC + type);
|
||||
le32toh(vr0->info->magic), MIC_MAGIC + type);
|
||||
assert(le32toh(vr0->info->magic) == MIC_MAGIC + type);
|
||||
if (vr1) {
|
||||
vr1->va = (struct mic_vring *)
|
||||
&va[MIC_DEVICE_PAGE_END + vr_size];
|
||||
@ -458,8 +458,8 @@ init_vr(struct mic_info *mic, int fd, int type,
|
||||
__func__, mic->name, vr1->va, vr1->info, vr_size,
|
||||
vring_size(MIC_VRING_ENTRIES, MIC_VIRTIO_RING_ALIGN));
|
||||
mpsslog("magic 0x%x expected 0x%x\n",
|
||||
vr1->info->magic, MIC_MAGIC + type + 1);
|
||||
assert(vr1->info->magic == MIC_MAGIC + type + 1);
|
||||
le32toh(vr1->info->magic), MIC_MAGIC + type + 1);
|
||||
assert(le32toh(vr1->info->magic) == MIC_MAGIC + type + 1);
|
||||
}
|
||||
done:
|
||||
return va;
|
||||
@ -520,7 +520,7 @@ static void *
|
||||
virtio_net(void *arg)
|
||||
{
|
||||
static __u8 vnet_hdr[2][sizeof(struct virtio_net_hdr)];
|
||||
static __u8 vnet_buf[2][MAX_NET_PKT_SIZE] __aligned(64);
|
||||
static __u8 vnet_buf[2][MAX_NET_PKT_SIZE] __attribute__ ((aligned(64)));
|
||||
struct iovec vnet_iov[2][2] = {
|
||||
{ { .iov_base = vnet_hdr[0], .iov_len = sizeof(vnet_hdr[0]) },
|
||||
{ .iov_base = vnet_buf[0], .iov_len = sizeof(vnet_buf[0]) } },
|
||||
@ -1412,6 +1412,12 @@ mic_config(void *arg)
|
||||
}
|
||||
|
||||
do {
|
||||
ret = lseek(fd, 0, SEEK_SET);
|
||||
if (ret < 0) {
|
||||
mpsslog("%s: Failed to seek to file start '%s': %s\n",
|
||||
mic->name, pathname, strerror(errno));
|
||||
goto close_error1;
|
||||
}
|
||||
ret = read(fd, value, sizeof(value));
|
||||
if (ret < 0) {
|
||||
mpsslog("%s: Failed to read sysfs entry '%s': %s\n",
|
||||
|
@ -123,6 +123,16 @@ Transmission process is similar to capture as shown below.
|
||||
[shutdown] close() --------> destruction of the transmission socket and
|
||||
deallocation of all associated resources.
|
||||
|
||||
Socket creation and destruction is also straight forward, and is done
|
||||
the same way as in capturing described in the previous paragraph:
|
||||
|
||||
int fd = socket(PF_PACKET, mode, 0);
|
||||
|
||||
The protocol can optionally be 0 in case we only want to transmit
|
||||
via this socket, which avoids an expensive call to packet_rcv().
|
||||
In this case, you also need to bind(2) the TX_RING with sll_protocol = 0
|
||||
set. Otherwise, htons(ETH_P_ALL) or any other protocol, for example.
|
||||
|
||||
Binding the socket to your network interface is mandatory (with zero copy) to
|
||||
know the header size of frames used in the circular buffer.
|
||||
|
||||
|
46
MAINTAINERS
46
MAINTAINERS
@ -2135,7 +2135,8 @@ S: Maintained
|
||||
F: Documentation/zh_CN/
|
||||
|
||||
CHIPIDEA USB HIGH SPEED DUAL ROLE CONTROLLER
|
||||
M: Alexander Shishkin <alexander.shishkin@linux.intel.com>
|
||||
M: Peter Chen <Peter.Chen@freescale.com>
|
||||
T: git://github.com/hzpeterchen/linux-usb.git
|
||||
L: linux-usb@vger.kernel.org
|
||||
S: Maintained
|
||||
F: drivers/usb/chipidea/
|
||||
@ -4041,6 +4042,14 @@ W: http://artax.karlin.mff.cuni.cz/~mikulas/vyplody/hpfs/index-e.cgi
|
||||
S: Maintained
|
||||
F: fs/hpfs/
|
||||
|
||||
HSI SUBSYSTEM
|
||||
M: Sebastian Reichel <sre@debian.org>
|
||||
S: Maintained
|
||||
F: Documentation/ABI/testing/sysfs-bus-hsi
|
||||
F: drivers/hsi/
|
||||
F: include/linux/hsi/
|
||||
F: include/uapi/linux/hsi/
|
||||
|
||||
HSO 3G MODEM DRIVER
|
||||
M: Jan Dumon <j.dumon@option.com>
|
||||
W: http://www.pharscape.org
|
||||
@ -4459,10 +4468,8 @@ M: Bruce Allan <bruce.w.allan@intel.com>
|
||||
M: Carolyn Wyborny <carolyn.wyborny@intel.com>
|
||||
M: Don Skidmore <donald.c.skidmore@intel.com>
|
||||
M: Greg Rose <gregory.v.rose@intel.com>
|
||||
M: Peter P Waskiewicz Jr <peter.p.waskiewicz.jr@intel.com>
|
||||
M: Alex Duyck <alexander.h.duyck@intel.com>
|
||||
M: John Ronciak <john.ronciak@intel.com>
|
||||
M: Tushar Dave <tushar.n.dave@intel.com>
|
||||
L: e1000-devel@lists.sourceforge.net
|
||||
W: http://www.intel.com/support/feedback.htm
|
||||
W: http://e1000.sourceforge.net/
|
||||
@ -6458,19 +6465,52 @@ F: drivers/pci/
|
||||
F: include/linux/pci*
|
||||
F: arch/x86/pci/
|
||||
|
||||
PCI DRIVER FOR IMX6
|
||||
M: Richard Zhu <r65037@freescale.com>
|
||||
M: Shawn Guo <shawn.guo@linaro.org>
|
||||
L: linux-pci@vger.kernel.org
|
||||
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
|
||||
S: Maintained
|
||||
F: drivers/pci/host/*imx6*
|
||||
|
||||
PCI DRIVER FOR MVEBU (Marvell Armada 370 and Armada XP SOC support)
|
||||
M: Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
|
||||
M: Jason Cooper <jason@lakedaemon.net>
|
||||
L: linux-pci@vger.kernel.org
|
||||
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
|
||||
S: Maintained
|
||||
F: drivers/pci/host/*mvebu*
|
||||
|
||||
PCI DRIVER FOR NVIDIA TEGRA
|
||||
M: Thierry Reding <thierry.reding@gmail.com>
|
||||
L: linux-tegra@vger.kernel.org
|
||||
L: linux-pci@vger.kernel.org
|
||||
S: Supported
|
||||
F: Documentation/devicetree/bindings/pci/nvidia,tegra20-pcie.txt
|
||||
F: drivers/pci/host/pci-tegra.c
|
||||
|
||||
PCI DRIVER FOR RENESAS R-CAR
|
||||
M: Simon Horman <horms@verge.net.au>
|
||||
L: linux-pci@vger.kernel.org
|
||||
L: linux-sh@vger.kernel.org
|
||||
S: Maintained
|
||||
F: drivers/pci/host/*rcar*
|
||||
|
||||
PCI DRIVER FOR SAMSUNG EXYNOS
|
||||
M: Jingoo Han <jg1.han@samsung.com>
|
||||
L: linux-pci@vger.kernel.org
|
||||
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
|
||||
L: linux-samsung-soc@vger.kernel.org (moderated for non-subscribers)
|
||||
S: Maintained
|
||||
F: drivers/pci/host/pci-exynos.c
|
||||
|
||||
PCI DRIVER FOR SYNOPSIS DESIGNWARE
|
||||
M: Mohit Kumar <mohit.kumar@st.com>
|
||||
M: Jingoo Han <jg1.han@samsung.com>
|
||||
L: linux-pci@vger.kernel.org
|
||||
S: Maintained
|
||||
F: drivers/pci/host/*designware*
|
||||
|
||||
PCMCIA SUBSYSTEM
|
||||
P: Linux PCMCIA Team
|
||||
L: linux-pcmcia@lists.infradead.org
|
||||
|
2
Makefile
2
Makefile
@ -1,7 +1,7 @@
|
||||
VERSION = 3
|
||||
PATCHLEVEL = 13
|
||||
SUBLEVEL = 0
|
||||
EXTRAVERSION = -rc3
|
||||
EXTRAVERSION = -rc4
|
||||
NAME = One Giant Leap for Frogkind
|
||||
|
||||
# *DOCUMENTATION*
|
||||
|
@ -8,6 +8,7 @@
|
||||
|
||||
config ARC
|
||||
def_bool y
|
||||
select BUILDTIME_EXTABLE_SORT
|
||||
select CLONE_BACKWARDS
|
||||
# ARC Busybox based initramfs absolutely relies on DEVTMPFS for /dev
|
||||
select DEVTMPFS if !INITRAMFS_SOURCE=""
|
||||
|
@ -8,6 +8,9 @@
|
||||
|
||||
/******** no-legacy-syscalls-ABI *******/
|
||||
|
||||
#ifndef _UAPI_ASM_ARC_UNISTD_H
|
||||
#define _UAPI_ASM_ARC_UNISTD_H
|
||||
|
||||
#define __ARCH_WANT_SYS_EXECVE
|
||||
#define __ARCH_WANT_SYS_CLONE
|
||||
#define __ARCH_WANT_SYS_VFORK
|
||||
@ -32,3 +35,5 @@ __SYSCALL(__NR_arc_gettls, sys_arc_gettls)
|
||||
/* Generic syscall (fs/filesystems.c - lost in asm-generic/unistd.h */
|
||||
#define __NR_sysfs (__NR_arch_specific_syscall + 3)
|
||||
__SYSCALL(__NR_sysfs, sys_sysfs)
|
||||
|
||||
#endif
|
||||
|
@ -79,9 +79,9 @@ static int arc_pmu_cache_event(u64 config)
|
||||
cache_result = (config >> 16) & 0xff;
|
||||
if (cache_type >= PERF_COUNT_HW_CACHE_MAX)
|
||||
return -EINVAL;
|
||||
if (cache_type >= PERF_COUNT_HW_CACHE_OP_MAX)
|
||||
if (cache_op >= PERF_COUNT_HW_CACHE_OP_MAX)
|
||||
return -EINVAL;
|
||||
if (cache_type >= PERF_COUNT_HW_CACHE_RESULT_MAX)
|
||||
if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX)
|
||||
return -EINVAL;
|
||||
|
||||
ret = arc_pmu_cache_map[cache_type][cache_op][cache_result];
|
||||
|
@ -100,23 +100,19 @@
|
||||
#define TASK_UNMAPPED_BASE UL(0x00000000)
|
||||
#endif
|
||||
|
||||
#ifndef PHYS_OFFSET
|
||||
#define PHYS_OFFSET UL(CONFIG_DRAM_BASE)
|
||||
#endif
|
||||
|
||||
#ifndef END_MEM
|
||||
#define END_MEM (UL(CONFIG_DRAM_BASE) + CONFIG_DRAM_SIZE)
|
||||
#endif
|
||||
|
||||
#ifndef PAGE_OFFSET
|
||||
#define PAGE_OFFSET (PHYS_OFFSET)
|
||||
#define PAGE_OFFSET PLAT_PHYS_OFFSET
|
||||
#endif
|
||||
|
||||
/*
|
||||
* The module can be at any place in ram in nommu mode.
|
||||
*/
|
||||
#define MODULES_END (END_MEM)
|
||||
#define MODULES_VADDR (PHYS_OFFSET)
|
||||
#define MODULES_VADDR PAGE_OFFSET
|
||||
|
||||
#define XIP_VIRT_ADDR(physaddr) (physaddr)
|
||||
|
||||
@ -157,6 +153,16 @@
|
||||
#endif
|
||||
#define ARCH_PGD_MASK ((1 << ARCH_PGD_SHIFT) - 1)
|
||||
|
||||
/*
|
||||
* PLAT_PHYS_OFFSET is the offset (from zero) of the start of physical
|
||||
* memory. This is used for XIP and NoMMU kernels, or by kernels which
|
||||
* have their own mach/memory.h. Assembly code must always use
|
||||
* PLAT_PHYS_OFFSET and not PHYS_OFFSET.
|
||||
*/
|
||||
#ifndef PLAT_PHYS_OFFSET
|
||||
#define PLAT_PHYS_OFFSET UL(CONFIG_PHYS_OFFSET)
|
||||
#endif
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
|
||||
/*
|
||||
@ -239,6 +245,8 @@ static inline unsigned long __phys_to_virt(phys_addr_t x)
|
||||
|
||||
#else
|
||||
|
||||
#define PHYS_OFFSET PLAT_PHYS_OFFSET
|
||||
|
||||
static inline phys_addr_t __virt_to_phys(unsigned long x)
|
||||
{
|
||||
return (phys_addr_t)x - PAGE_OFFSET + PHYS_OFFSET;
|
||||
@ -251,17 +259,6 @@ static inline unsigned long __phys_to_virt(phys_addr_t x)
|
||||
|
||||
#endif
|
||||
#endif
|
||||
#endif /* __ASSEMBLY__ */
|
||||
|
||||
#ifndef PHYS_OFFSET
|
||||
#ifdef PLAT_PHYS_OFFSET
|
||||
#define PHYS_OFFSET PLAT_PHYS_OFFSET
|
||||
#else
|
||||
#define PHYS_OFFSET UL(CONFIG_PHYS_OFFSET)
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
|
||||
/*
|
||||
* PFNs are used to describe any physical page; this means
|
||||
|
@ -68,7 +68,7 @@ ENTRY(stext)
|
||||
|
||||
#ifdef CONFIG_ARM_MPU
|
||||
/* Calculate the size of a region covering just the kernel */
|
||||
ldr r5, =PHYS_OFFSET @ Region start: PHYS_OFFSET
|
||||
ldr r5, =PLAT_PHYS_OFFSET @ Region start: PHYS_OFFSET
|
||||
ldr r6, =(_end) @ Cover whole kernel
|
||||
sub r6, r6, r5 @ Minimum size of region to map
|
||||
clz r6, r6 @ Region size must be 2^N...
|
||||
@ -213,7 +213,7 @@ ENTRY(__setup_mpu)
|
||||
set_region_nr r0, #MPU_RAM_REGION
|
||||
isb
|
||||
/* Full access from PL0, PL1, shared for CONFIG_SMP, cacheable */
|
||||
ldr r0, =PHYS_OFFSET @ RAM starts at PHYS_OFFSET
|
||||
ldr r0, =PLAT_PHYS_OFFSET @ RAM starts at PHYS_OFFSET
|
||||
ldr r5,=(MPU_AP_PL1RW_PL0RW | MPU_RGN_NORMAL)
|
||||
|
||||
setup_region r0, r5, r6, MPU_DATA_SIDE @ PHYS_OFFSET, shared, enabled
|
||||
|
@ -110,7 +110,7 @@ ENTRY(stext)
|
||||
sub r4, r3, r4 @ (PHYS_OFFSET - PAGE_OFFSET)
|
||||
add r8, r8, r4 @ PHYS_OFFSET
|
||||
#else
|
||||
ldr r8, =PHYS_OFFSET @ always constant in this case
|
||||
ldr r8, =PLAT_PHYS_OFFSET @ always constant in this case
|
||||
#endif
|
||||
|
||||
/*
|
||||
|
@ -404,6 +404,7 @@ EXPORT_SYMBOL(dump_fpu);
|
||||
unsigned long get_wchan(struct task_struct *p)
|
||||
{
|
||||
struct stackframe frame;
|
||||
unsigned long stack_page;
|
||||
int count = 0;
|
||||
if (!p || p == current || p->state == TASK_RUNNING)
|
||||
return 0;
|
||||
@ -412,9 +413,11 @@ unsigned long get_wchan(struct task_struct *p)
|
||||
frame.sp = thread_saved_sp(p);
|
||||
frame.lr = 0; /* recovered from the stack */
|
||||
frame.pc = thread_saved_pc(p);
|
||||
stack_page = (unsigned long)task_stack_page(p);
|
||||
do {
|
||||
int ret = unwind_frame(&frame);
|
||||
if (ret < 0)
|
||||
if (frame.sp < stack_page ||
|
||||
frame.sp >= stack_page + THREAD_SIZE ||
|
||||
unwind_frame(&frame) < 0)
|
||||
return 0;
|
||||
if (!in_sched_functions(frame.pc))
|
||||
return frame.pc;
|
||||
|
@ -873,8 +873,6 @@ void __init setup_arch(char **cmdline_p)
|
||||
machine_desc = mdesc;
|
||||
machine_name = mdesc->name;
|
||||
|
||||
setup_dma_zone(mdesc);
|
||||
|
||||
if (mdesc->reboot_mode != REBOOT_HARD)
|
||||
reboot_mode = mdesc->reboot_mode;
|
||||
|
||||
@ -892,6 +890,7 @@ void __init setup_arch(char **cmdline_p)
|
||||
sort(&meminfo.bank, meminfo.nr_banks, sizeof(meminfo.bank[0]), meminfo_cmp, NULL);
|
||||
|
||||
early_paging_init(mdesc, lookup_processor_type(read_cpuid_id()));
|
||||
setup_dma_zone(mdesc);
|
||||
sanity_check_meminfo();
|
||||
arm_memblock_init(&meminfo, mdesc);
|
||||
|
||||
|
@ -31,7 +31,7 @@ int notrace unwind_frame(struct stackframe *frame)
|
||||
high = ALIGN(low, THREAD_SIZE);
|
||||
|
||||
/* check current frame pointer is within bounds */
|
||||
if (fp < (low + 12) || fp + 4 >= high)
|
||||
if (fp < low + 12 || fp > high - 4)
|
||||
return -EINVAL;
|
||||
|
||||
/* restore the registers from the stack frame */
|
||||
|
@ -509,9 +509,10 @@ static inline int
|
||||
__do_cache_op(unsigned long start, unsigned long end)
|
||||
{
|
||||
int ret;
|
||||
unsigned long chunk = PAGE_SIZE;
|
||||
|
||||
do {
|
||||
unsigned long chunk = min(PAGE_SIZE, end - start);
|
||||
|
||||
if (signal_pending(current)) {
|
||||
struct thread_info *ti = current_thread_info();
|
||||
|
||||
|
@ -101,13 +101,51 @@ static const struct omap_dss_hwmod_data omap4_dss_hwmod_data[] __initconst = {
|
||||
{ "dss_hdmi", "omapdss_hdmi", -1 },
|
||||
};
|
||||
|
||||
static int omap4_dsi_mux_pads(int dsi_id, unsigned lanes)
|
||||
{
|
||||
u32 enable_mask, enable_shift;
|
||||
u32 pipd_mask, pipd_shift;
|
||||
u32 reg;
|
||||
|
||||
if (dsi_id == 0) {
|
||||
enable_mask = OMAP4_DSI1_LANEENABLE_MASK;
|
||||
enable_shift = OMAP4_DSI1_LANEENABLE_SHIFT;
|
||||
pipd_mask = OMAP4_DSI1_PIPD_MASK;
|
||||
pipd_shift = OMAP4_DSI1_PIPD_SHIFT;
|
||||
} else if (dsi_id == 1) {
|
||||
enable_mask = OMAP4_DSI2_LANEENABLE_MASK;
|
||||
enable_shift = OMAP4_DSI2_LANEENABLE_SHIFT;
|
||||
pipd_mask = OMAP4_DSI2_PIPD_MASK;
|
||||
pipd_shift = OMAP4_DSI2_PIPD_SHIFT;
|
||||
} else {
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
reg = omap4_ctrl_pad_readl(OMAP4_CTRL_MODULE_PAD_CORE_CONTROL_DSIPHY);
|
||||
|
||||
reg &= ~enable_mask;
|
||||
reg &= ~pipd_mask;
|
||||
|
||||
reg |= (lanes << enable_shift) & enable_mask;
|
||||
reg |= (lanes << pipd_shift) & pipd_mask;
|
||||
|
||||
omap4_ctrl_pad_writel(reg, OMAP4_CTRL_MODULE_PAD_CORE_CONTROL_DSIPHY);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int omap_dsi_enable_pads(int dsi_id, unsigned lane_mask)
|
||||
{
|
||||
if (cpu_is_omap44xx())
|
||||
return omap4_dsi_mux_pads(dsi_id, lane_mask);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void omap_dsi_disable_pads(int dsi_id, unsigned lane_mask)
|
||||
{
|
||||
if (cpu_is_omap44xx())
|
||||
omap4_dsi_mux_pads(dsi_id, 0);
|
||||
}
|
||||
|
||||
static int omap_dss_set_min_bus_tput(struct device *dev, unsigned long tput)
|
||||
|
@ -158,13 +158,49 @@ struct dma_map_ops arm_coherent_dma_ops = {
|
||||
};
|
||||
EXPORT_SYMBOL(arm_coherent_dma_ops);
|
||||
|
||||
static int __dma_supported(struct device *dev, u64 mask, bool warn)
|
||||
{
|
||||
unsigned long max_dma_pfn;
|
||||
|
||||
/*
|
||||
* If the mask allows for more memory than we can address,
|
||||
* and we actually have that much memory, then we must
|
||||
* indicate that DMA to this device is not supported.
|
||||
*/
|
||||
if (sizeof(mask) != sizeof(dma_addr_t) &&
|
||||
mask > (dma_addr_t)~0 &&
|
||||
dma_to_pfn(dev, ~0) < max_pfn) {
|
||||
if (warn) {
|
||||
dev_warn(dev, "Coherent DMA mask %#llx is larger than dma_addr_t allows\n",
|
||||
mask);
|
||||
dev_warn(dev, "Driver did not use or check the return value from dma_set_coherent_mask()?\n");
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
max_dma_pfn = min(max_pfn, arm_dma_pfn_limit);
|
||||
|
||||
/*
|
||||
* Translate the device's DMA mask to a PFN limit. This
|
||||
* PFN number includes the page which we can DMA to.
|
||||
*/
|
||||
if (dma_to_pfn(dev, mask) < max_dma_pfn) {
|
||||
if (warn)
|
||||
dev_warn(dev, "Coherent DMA mask %#llx (pfn %#lx-%#lx) covers a smaller range of system memory than the DMA zone pfn 0x0-%#lx\n",
|
||||
mask,
|
||||
dma_to_pfn(dev, 0), dma_to_pfn(dev, mask) + 1,
|
||||
max_dma_pfn + 1);
|
||||
return 0;
|
||||
}
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
static u64 get_coherent_dma_mask(struct device *dev)
|
||||
{
|
||||
u64 mask = (u64)DMA_BIT_MASK(32);
|
||||
|
||||
if (dev) {
|
||||
unsigned long max_dma_pfn;
|
||||
|
||||
mask = dev->coherent_dma_mask;
|
||||
|
||||
/*
|
||||
@ -176,34 +212,8 @@ static u64 get_coherent_dma_mask(struct device *dev)
|
||||
return 0;
|
||||
}
|
||||
|
||||
max_dma_pfn = min(max_pfn, arm_dma_pfn_limit);
|
||||
|
||||
/*
|
||||
* If the mask allows for more memory than we can address,
|
||||
* and we actually have that much memory, then fail the
|
||||
* allocation.
|
||||
*/
|
||||
if (sizeof(mask) != sizeof(dma_addr_t) &&
|
||||
mask > (dma_addr_t)~0 &&
|
||||
dma_to_pfn(dev, ~0) > max_dma_pfn) {
|
||||
dev_warn(dev, "Coherent DMA mask %#llx is larger than dma_addr_t allows\n",
|
||||
mask);
|
||||
dev_warn(dev, "Driver did not use or check the return value from dma_set_coherent_mask()?\n");
|
||||
if (!__dma_supported(dev, mask, true))
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Now check that the mask, when translated to a PFN,
|
||||
* fits within the allowable addresses which we can
|
||||
* allocate.
|
||||
*/
|
||||
if (dma_to_pfn(dev, mask) < max_dma_pfn) {
|
||||
dev_warn(dev, "Coherent DMA mask %#llx (pfn %#lx-%#lx) covers a smaller range of system memory than the DMA zone pfn 0x0-%#lx\n",
|
||||
mask,
|
||||
dma_to_pfn(dev, 0), dma_to_pfn(dev, mask) + 1,
|
||||
arm_dma_pfn_limit + 1);
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
return mask;
|
||||
@ -1032,28 +1042,7 @@ void arm_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
|
||||
*/
|
||||
int dma_supported(struct device *dev, u64 mask)
|
||||
{
|
||||
unsigned long limit;
|
||||
|
||||
/*
|
||||
* If the mask allows for more memory than we can address,
|
||||
* and we actually have that much memory, then we must
|
||||
* indicate that DMA to this device is not supported.
|
||||
*/
|
||||
if (sizeof(mask) != sizeof(dma_addr_t) &&
|
||||
mask > (dma_addr_t)~0 &&
|
||||
dma_to_pfn(dev, ~0) > arm_dma_pfn_limit)
|
||||
return 0;
|
||||
|
||||
/*
|
||||
* Translate the device's DMA mask to a PFN limit. This
|
||||
* PFN number includes the page which we can DMA to.
|
||||
*/
|
||||
limit = dma_to_pfn(dev, mask);
|
||||
|
||||
if (limit < arm_dma_pfn_limit)
|
||||
return 0;
|
||||
|
||||
return 1;
|
||||
return __dma_supported(dev, mask, false);
|
||||
}
|
||||
EXPORT_SYMBOL(dma_supported);
|
||||
|
||||
|
@ -229,7 +229,7 @@ void __init setup_dma_zone(const struct machine_desc *mdesc)
|
||||
#ifdef CONFIG_ZONE_DMA
|
||||
if (mdesc->dma_zone_size) {
|
||||
arm_dma_zone_size = mdesc->dma_zone_size;
|
||||
arm_dma_limit = PHYS_OFFSET + arm_dma_zone_size - 1;
|
||||
arm_dma_limit = __pv_phys_offset + arm_dma_zone_size - 1;
|
||||
} else
|
||||
arm_dma_limit = 0xffffffff;
|
||||
arm_dma_pfn_limit = arm_dma_limit >> PAGE_SHIFT;
|
||||
|
@ -159,8 +159,7 @@ config NR_CPUS
|
||||
range 2 32
|
||||
depends on SMP
|
||||
# These have to remain sorted largest to smallest
|
||||
default "8" if ARCH_XGENE
|
||||
default "4"
|
||||
default "8"
|
||||
|
||||
config HOTPLUG_CPU
|
||||
bool "Support for hot-pluggable CPUs"
|
||||
|
@ -229,7 +229,7 @@ extern void __iomem *__ioremap(phys_addr_t phys_addr, size_t size, pgprot_t prot
|
||||
extern void __iounmap(volatile void __iomem *addr);
|
||||
extern void __iomem *ioremap_cache(phys_addr_t phys_addr, size_t size);
|
||||
|
||||
#define PROT_DEFAULT (PTE_TYPE_PAGE | PTE_AF | PTE_DIRTY)
|
||||
#define PROT_DEFAULT (pgprot_default | PTE_DIRTY)
|
||||
#define PROT_DEVICE_nGnRE (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_ATTRINDX(MT_DEVICE_nGnRE))
|
||||
#define PROT_NORMAL_NC (PROT_DEFAULT | PTE_ATTRINDX(MT_NORMAL_NC))
|
||||
#define PROT_NORMAL (PROT_DEFAULT | PTE_ATTRINDX(MT_NORMAL))
|
||||
|
@ -43,7 +43,7 @@
|
||||
* Section
|
||||
*/
|
||||
#define PMD_SECT_VALID (_AT(pmdval_t, 1) << 0)
|
||||
#define PMD_SECT_PROT_NONE (_AT(pmdval_t, 1) << 2)
|
||||
#define PMD_SECT_PROT_NONE (_AT(pmdval_t, 1) << 58)
|
||||
#define PMD_SECT_USER (_AT(pmdval_t, 1) << 6) /* AP[1] */
|
||||
#define PMD_SECT_RDONLY (_AT(pmdval_t, 1) << 7) /* AP[2] */
|
||||
#define PMD_SECT_S (_AT(pmdval_t, 3) << 8)
|
||||
|
@ -282,8 +282,9 @@ ENDPROC(secondary_holding_pen)
|
||||
* be used where CPUs are brought online dynamically by the kernel.
|
||||
*/
|
||||
ENTRY(secondary_entry)
|
||||
bl __calc_phys_offset // x2=phys offset
|
||||
bl el2_setup // Drop to EL1
|
||||
bl __calc_phys_offset // x24=PHYS_OFFSET, x28=PHYS_OFFSET-PAGE_OFFSET
|
||||
bl set_cpu_boot_mode_flag
|
||||
b secondary_startup
|
||||
ENDPROC(secondary_entry)
|
||||
|
||||
|
@ -111,12 +111,12 @@ ENTRY(__cpu_setup)
|
||||
bl __flush_dcache_all
|
||||
mov lr, x28
|
||||
ic iallu // I+BTB cache invalidate
|
||||
tlbi vmalle1is // invalidate I + D TLBs
|
||||
dsb sy
|
||||
|
||||
mov x0, #3 << 20
|
||||
msr cpacr_el1, x0 // Enable FP/ASIMD
|
||||
msr mdscr_el1, xzr // Reset mdscr_el1
|
||||
tlbi vmalle1is // invalidate I + D TLBs
|
||||
/*
|
||||
* Memory region attributes for LPAE:
|
||||
*
|
||||
|
@ -298,8 +298,10 @@ static int __init set_abdac_rate(struct platform_device *pdev)
|
||||
*/
|
||||
retval = clk_round_rate(pll1,
|
||||
CONFIG_BOARD_FAVR32_ABDAC_RATE * 256 * 16);
|
||||
if (retval < 0)
|
||||
if (retval <= 0) {
|
||||
retval = -EINVAL;
|
||||
goto out_abdac;
|
||||
}
|
||||
|
||||
retval = clk_set_rate(pll1, retval);
|
||||
if (retval != 0)
|
||||
|
@ -59,7 +59,6 @@ CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
|
||||
# CONFIG_PREVENT_FIRMWARE_BUILD is not set
|
||||
# CONFIG_FW_LOADER is not set
|
||||
CONFIG_MTD=y
|
||||
CONFIG_MTD_PARTITIONS=y
|
||||
CONFIG_MTD_CMDLINE_PARTS=y
|
||||
CONFIG_MTD_CHAR=y
|
||||
CONFIG_MTD_BLOCK=y
|
||||
|
@ -61,7 +61,6 @@ CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
|
||||
# CONFIG_PREVENT_FIRMWARE_BUILD is not set
|
||||
# CONFIG_FW_LOADER is not set
|
||||
CONFIG_MTD=y
|
||||
CONFIG_MTD_PARTITIONS=y
|
||||
CONFIG_MTD_CMDLINE_PARTS=y
|
||||
CONFIG_MTD_CHAR=y
|
||||
CONFIG_MTD_BLOCK=y
|
||||
|
@ -60,7 +60,6 @@ CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
|
||||
# CONFIG_PREVENT_FIRMWARE_BUILD is not set
|
||||
# CONFIG_FW_LOADER is not set
|
||||
CONFIG_MTD=y
|
||||
CONFIG_MTD_PARTITIONS=y
|
||||
CONFIG_MTD_CMDLINE_PARTS=y
|
||||
CONFIG_MTD_CHAR=y
|
||||
CONFIG_MTD_BLOCK=y
|
||||
|
@ -48,7 +48,6 @@ CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
|
||||
# CONFIG_PREVENT_FIRMWARE_BUILD is not set
|
||||
# CONFIG_FW_LOADER is not set
|
||||
CONFIG_MTD=y
|
||||
CONFIG_MTD_PARTITIONS=y
|
||||
CONFIG_MTD_CMDLINE_PARTS=y
|
||||
CONFIG_MTD_CHAR=y
|
||||
CONFIG_MTD_BLOCK=y
|
||||
|
@ -59,7 +59,6 @@ CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
|
||||
# CONFIG_PREVENT_FIRMWARE_BUILD is not set
|
||||
# CONFIG_FW_LOADER is not set
|
||||
CONFIG_MTD=y
|
||||
CONFIG_MTD_PARTITIONS=y
|
||||
CONFIG_MTD_CMDLINE_PARTS=y
|
||||
CONFIG_MTD_CHAR=y
|
||||
CONFIG_MTD_BLOCK=y
|
||||
|
@ -62,7 +62,6 @@ CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
|
||||
# CONFIG_PREVENT_FIRMWARE_BUILD is not set
|
||||
# CONFIG_FW_LOADER is not set
|
||||
CONFIG_MTD=y
|
||||
CONFIG_MTD_PARTITIONS=y
|
||||
CONFIG_MTD_CMDLINE_PARTS=y
|
||||
CONFIG_MTD_CHAR=y
|
||||
CONFIG_MTD_BLOCK=y
|
||||
|
@ -61,7 +61,6 @@ CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
|
||||
# CONFIG_PREVENT_FIRMWARE_BUILD is not set
|
||||
# CONFIG_FW_LOADER is not set
|
||||
CONFIG_MTD=y
|
||||
CONFIG_MTD_PARTITIONS=y
|
||||
CONFIG_MTD_CMDLINE_PARTS=y
|
||||
CONFIG_MTD_CHAR=y
|
||||
CONFIG_MTD_BLOCK=y
|
||||
|
@ -53,7 +53,6 @@ CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
|
||||
# CONFIG_PREVENT_FIRMWARE_BUILD is not set
|
||||
# CONFIG_FW_LOADER is not set
|
||||
CONFIG_MTD=y
|
||||
CONFIG_MTD_PARTITIONS=y
|
||||
CONFIG_MTD_CMDLINE_PARTS=y
|
||||
CONFIG_MTD_CHAR=y
|
||||
CONFIG_MTD_BLOCK=y
|
||||
|
@ -42,7 +42,6 @@ CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
|
||||
# CONFIG_PREVENT_FIRMWARE_BUILD is not set
|
||||
# CONFIG_FW_LOADER is not set
|
||||
CONFIG_MTD=y
|
||||
CONFIG_MTD_PARTITIONS=y
|
||||
CONFIG_MTD_CMDLINE_PARTS=y
|
||||
CONFIG_MTD_CHAR=y
|
||||
CONFIG_MTD_BLOCK=y
|
||||
|
@ -42,7 +42,6 @@ CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
|
||||
# CONFIG_PREVENT_FIRMWARE_BUILD is not set
|
||||
# CONFIG_FW_LOADER is not set
|
||||
CONFIG_MTD=y
|
||||
CONFIG_MTD_PARTITIONS=y
|
||||
CONFIG_MTD_CMDLINE_PARTS=y
|
||||
CONFIG_MTD_CHAR=y
|
||||
CONFIG_MTD_BLOCK=y
|
||||
|
@ -54,7 +54,6 @@ CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
|
||||
# CONFIG_PREVENT_FIRMWARE_BUILD is not set
|
||||
# CONFIG_FW_LOADER is not set
|
||||
CONFIG_MTD=y
|
||||
CONFIG_MTD_PARTITIONS=y
|
||||
CONFIG_MTD_CMDLINE_PARTS=y
|
||||
CONFIG_MTD_CHAR=y
|
||||
CONFIG_MTD_BLOCK=y
|
||||
|
@ -58,7 +58,6 @@ CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
|
||||
# CONFIG_PREVENT_FIRMWARE_BUILD is not set
|
||||
# CONFIG_FW_LOADER is not set
|
||||
CONFIG_MTD=y
|
||||
CONFIG_MTD_PARTITIONS=y
|
||||
CONFIG_MTD_CMDLINE_PARTS=y
|
||||
CONFIG_MTD_CHAR=y
|
||||
CONFIG_MTD_BLOCK=y
|
||||
|
@ -58,7 +58,6 @@ CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
|
||||
# CONFIG_PREVENT_FIRMWARE_BUILD is not set
|
||||
# CONFIG_FW_LOADER is not set
|
||||
CONFIG_MTD=y
|
||||
CONFIG_MTD_PARTITIONS=y
|
||||
CONFIG_MTD_CMDLINE_PARTS=y
|
||||
CONFIG_MTD_CHAR=y
|
||||
CONFIG_MTD_BLOCK=y
|
||||
|
@ -46,7 +46,6 @@ CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
|
||||
# CONFIG_FW_LOADER is not set
|
||||
CONFIG_MTD=y
|
||||
CONFIG_MTD_CONCAT=y
|
||||
CONFIG_MTD_PARTITIONS=y
|
||||
CONFIG_MTD_CHAR=y
|
||||
CONFIG_MTD_BLOCK=y
|
||||
CONFIG_MTD_CFI=y
|
||||
|
@ -49,7 +49,6 @@ CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
|
||||
# CONFIG_PREVENT_FIRMWARE_BUILD is not set
|
||||
# CONFIG_FW_LOADER is not set
|
||||
CONFIG_MTD=y
|
||||
CONFIG_MTD_PARTITIONS=y
|
||||
CONFIG_MTD_CMDLINE_PARTS=y
|
||||
CONFIG_MTD_CHAR=y
|
||||
CONFIG_MTD_BLOCK=y
|
||||
|
@ -59,7 +59,7 @@ static irqreturn_t timer_interrupt(int irq, void *dev_id)
|
||||
static struct irqaction timer_irqaction = {
|
||||
.handler = timer_interrupt,
|
||||
/* Oprofile uses the same irq as the timer, so allow it to be shared */
|
||||
.flags = IRQF_TIMER | IRQF_DISABLED | IRQF_SHARED,
|
||||
.flags = IRQF_TIMER | IRQF_SHARED,
|
||||
.name = "avr32_comparator",
|
||||
};
|
||||
|
||||
|
@ -181,7 +181,7 @@ static const struct platform_suspend_ops avr32_pm_ops = {
|
||||
.enter = avr32_pm_enter,
|
||||
};
|
||||
|
||||
static unsigned long avr32_pm_offset(void *symbol)
|
||||
static unsigned long __init avr32_pm_offset(void *symbol)
|
||||
{
|
||||
extern u8 pm_exception[];
|
||||
|
||||
|
@ -77,7 +77,6 @@
|
||||
compatible = "fsl,mpc5121-immr";
|
||||
#address-cells = <1>;
|
||||
#size-cells = <1>;
|
||||
#interrupt-cells = <2>;
|
||||
ranges = <0x0 0x80000000 0x400000>;
|
||||
reg = <0x80000000 0x400000>;
|
||||
bus-frequency = <66000000>; /* 66 MHz ips bus */
|
||||
|
@ -12,7 +12,6 @@ CONFIG_EXPERT=y
|
||||
CONFIG_PPC_MPC52xx=y
|
||||
CONFIG_PPC_MPC5200_SIMPLE=y
|
||||
# CONFIG_PPC_PMAC is not set
|
||||
CONFIG_PPC_BESTCOMM=y
|
||||
CONFIG_SPARSE_IRQ=y
|
||||
CONFIG_PM=y
|
||||
# CONFIG_PCI is not set
|
||||
@ -71,6 +70,8 @@ CONFIG_USB_DEVICEFS=y
|
||||
CONFIG_USB_OHCI_HCD=y
|
||||
CONFIG_USB_OHCI_HCD_PPC_OF_BE=y
|
||||
CONFIG_USB_STORAGE=y
|
||||
CONFIG_DMADEVICES=y
|
||||
CONFIG_PPC_BESTCOMM=y
|
||||
CONFIG_EXT2_FS=y
|
||||
CONFIG_EXT3_FS=y
|
||||
# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
|
||||
|
@ -15,7 +15,6 @@ CONFIG_PPC_MPC52xx=y
|
||||
CONFIG_PPC_MPC5200_SIMPLE=y
|
||||
CONFIG_PPC_LITE5200=y
|
||||
# CONFIG_PPC_PMAC is not set
|
||||
CONFIG_PPC_BESTCOMM=y
|
||||
CONFIG_NO_HZ=y
|
||||
CONFIG_HIGH_RES_TIMERS=y
|
||||
CONFIG_SPARSE_IRQ=y
|
||||
@ -59,6 +58,8 @@ CONFIG_I2C_CHARDEV=y
|
||||
CONFIG_I2C_MPC=y
|
||||
# CONFIG_HWMON is not set
|
||||
CONFIG_VIDEO_OUTPUT_CONTROL=m
|
||||
CONFIG_DMADEVICES=y
|
||||
CONFIG_PPC_BESTCOMM=y
|
||||
CONFIG_EXT2_FS=y
|
||||
CONFIG_EXT3_FS=y
|
||||
# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
|
||||
|
@ -12,7 +12,6 @@ CONFIG_EXPERT=y
|
||||
CONFIG_PPC_MPC52xx=y
|
||||
CONFIG_PPC_MPC5200_SIMPLE=y
|
||||
# CONFIG_PPC_PMAC is not set
|
||||
CONFIG_PPC_BESTCOMM=y
|
||||
CONFIG_SPARSE_IRQ=y
|
||||
CONFIG_PM=y
|
||||
# CONFIG_PCI is not set
|
||||
@ -84,6 +83,8 @@ CONFIG_LEDS_TRIGGERS=y
|
||||
CONFIG_LEDS_TRIGGER_TIMER=y
|
||||
CONFIG_RTC_CLASS=y
|
||||
CONFIG_RTC_DRV_DS1307=y
|
||||
CONFIG_DMADEVICES=y
|
||||
CONFIG_PPC_BESTCOMM=y
|
||||
CONFIG_EXT2_FS=y
|
||||
CONFIG_EXT3_FS=y
|
||||
# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
|
||||
|
@ -21,7 +21,6 @@ CONFIG_MODULE_UNLOAD=y
|
||||
CONFIG_PPC_MPC52xx=y
|
||||
CONFIG_PPC_MPC5200_SIMPLE=y
|
||||
# CONFIG_PPC_PMAC is not set
|
||||
CONFIG_PPC_BESTCOMM=y
|
||||
CONFIG_NO_HZ=y
|
||||
CONFIG_HIGH_RES_TIMERS=y
|
||||
CONFIG_HZ_100=y
|
||||
@ -87,6 +86,8 @@ CONFIG_USB_OHCI_HCD_PPC_OF_BE=y
|
||||
CONFIG_USB_STORAGE=m
|
||||
CONFIG_RTC_CLASS=y
|
||||
CONFIG_RTC_DRV_PCF8563=m
|
||||
CONFIG_DMADEVICES=y
|
||||
CONFIG_PPC_BESTCOMM=y
|
||||
CONFIG_EXT2_FS=m
|
||||
CONFIG_EXT3_FS=m
|
||||
# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
|
||||
|
@ -17,7 +17,6 @@ CONFIG_PPC_MPC52xx=y
|
||||
CONFIG_PPC_MPC5200_SIMPLE=y
|
||||
CONFIG_PPC_MPC5200_BUGFIX=y
|
||||
# CONFIG_PPC_PMAC is not set
|
||||
CONFIG_PPC_BESTCOMM=y
|
||||
CONFIG_PM=y
|
||||
# CONFIG_PCI is not set
|
||||
CONFIG_NET=y
|
||||
@ -86,6 +85,8 @@ CONFIG_USB_STORAGE=y
|
||||
CONFIG_RTC_CLASS=y
|
||||
CONFIG_RTC_DRV_DS1307=y
|
||||
CONFIG_RTC_DRV_DS1374=y
|
||||
CONFIG_DMADEVICES=y
|
||||
CONFIG_PPC_BESTCOMM=y
|
||||
CONFIG_EXT2_FS=y
|
||||
CONFIG_EXT3_FS=y
|
||||
# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
|
||||
|
@ -15,7 +15,6 @@ CONFIG_PPC_MEDIA5200=y
|
||||
CONFIG_PPC_MPC5200_BUGFIX=y
|
||||
CONFIG_PPC_MPC5200_LPBFIFO=m
|
||||
# CONFIG_PPC_PMAC is not set
|
||||
CONFIG_PPC_BESTCOMM=y
|
||||
CONFIG_SIMPLE_GPIO=y
|
||||
CONFIG_NO_HZ=y
|
||||
CONFIG_HIGH_RES_TIMERS=y
|
||||
@ -125,6 +124,8 @@ CONFIG_RTC_CLASS=y
|
||||
CONFIG_RTC_DRV_DS1307=y
|
||||
CONFIG_RTC_DRV_DS1374=y
|
||||
CONFIG_RTC_DRV_PCF8563=m
|
||||
CONFIG_DMADEVICES=y
|
||||
CONFIG_PPC_BESTCOMM=y
|
||||
CONFIG_EXT2_FS=y
|
||||
CONFIG_EXT3_FS=y
|
||||
# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
|
||||
|
@ -2,7 +2,6 @@ CONFIG_PPC64=y
|
||||
CONFIG_ALTIVEC=y
|
||||
CONFIG_SMP=y
|
||||
CONFIG_NR_CPUS=2
|
||||
CONFIG_EXPERIMENTAL=y
|
||||
CONFIG_SYSVIPC=y
|
||||
CONFIG_NO_HZ=y
|
||||
CONFIG_HIGH_RES_TIMERS=y
|
||||
@ -45,8 +44,9 @@ CONFIG_INET_AH=y
|
||||
CONFIG_INET_ESP=y
|
||||
# CONFIG_IPV6 is not set
|
||||
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
|
||||
CONFIG_DEVTMPFS=y
|
||||
CONFIG_DEVTMPFS_MOUNT=y
|
||||
CONFIG_MTD=y
|
||||
CONFIG_MTD_CHAR=y
|
||||
CONFIG_MTD_BLOCK=y
|
||||
CONFIG_MTD_SLRAM=y
|
||||
CONFIG_MTD_PHRAM=y
|
||||
@ -88,7 +88,6 @@ CONFIG_BLK_DEV_DM=y
|
||||
CONFIG_DM_CRYPT=y
|
||||
CONFIG_NETDEVICES=y
|
||||
CONFIG_DUMMY=y
|
||||
CONFIG_MII=y
|
||||
CONFIG_TIGON3=y
|
||||
CONFIG_E1000=y
|
||||
CONFIG_PASEMI_MAC=y
|
||||
@ -174,8 +173,8 @@ CONFIG_NLS_CODEPAGE_437=y
|
||||
CONFIG_NLS_ISO8859_1=y
|
||||
CONFIG_CRC_CCITT=y
|
||||
CONFIG_PRINTK_TIME=y
|
||||
CONFIG_MAGIC_SYSRQ=y
|
||||
CONFIG_DEBUG_FS=y
|
||||
CONFIG_MAGIC_SYSRQ=y
|
||||
CONFIG_DEBUG_KERNEL=y
|
||||
CONFIG_DETECT_HUNG_TASK=y
|
||||
# CONFIG_SCHED_DEBUG is not set
|
||||
|
@ -84,10 +84,8 @@ static inline void pgtable_free_tlb(struct mmu_gather *tlb,
|
||||
static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t table,
|
||||
unsigned long address)
|
||||
{
|
||||
struct page *page = page_address(table);
|
||||
|
||||
tlb_flush_pgtable(tlb, address);
|
||||
pgtable_page_dtor(page);
|
||||
pgtable_free_tlb(tlb, page, 0);
|
||||
pgtable_page_dtor(table);
|
||||
pgtable_free_tlb(tlb, page_address(table), 0);
|
||||
}
|
||||
#endif /* _ASM_POWERPC_PGALLOC_32_H */
|
||||
|
@ -148,11 +148,9 @@ static inline void pgtable_free_tlb(struct mmu_gather *tlb,
|
||||
static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t table,
|
||||
unsigned long address)
|
||||
{
|
||||
struct page *page = page_address(table);
|
||||
|
||||
tlb_flush_pgtable(tlb, address);
|
||||
pgtable_page_dtor(page);
|
||||
pgtable_free_tlb(tlb, page, 0);
|
||||
pgtable_page_dtor(table);
|
||||
pgtable_free_tlb(tlb, page_address(table), 0);
|
||||
}
|
||||
|
||||
#else /* if CONFIG_PPC_64K_PAGES */
|
||||
|
@ -148,7 +148,7 @@ void __init reserve_crashkernel(void)
|
||||
* a small SLB (128MB) since the crash kernel needs to place
|
||||
* itself and some stacks to be in the first segment.
|
||||
*/
|
||||
crashk_res.start = min(0x80000000ULL, (ppc64_rma_size / 2));
|
||||
crashk_res.start = min(0x8000000ULL, (ppc64_rma_size / 2));
|
||||
#else
|
||||
crashk_res.start = KDUMP_KERNELBASE;
|
||||
#endif
|
||||
|
@ -246,8 +246,8 @@ _GLOBAL(__bswapdi2)
|
||||
or r3,r7,r9
|
||||
blr
|
||||
|
||||
#if defined(CONFIG_PPC_PMAC) || defined(CONFIG_PPC_MAPLE)
|
||||
|
||||
#ifdef CONFIG_PPC_EARLY_DEBUG_BOOTX
|
||||
_GLOBAL(rmci_on)
|
||||
sync
|
||||
isync
|
||||
@ -277,6 +277,9 @@ _GLOBAL(rmci_off)
|
||||
isync
|
||||
sync
|
||||
blr
|
||||
#endif /* CONFIG_PPC_EARLY_DEBUG_BOOTX */
|
||||
|
||||
#if defined(CONFIG_PPC_PMAC) || defined(CONFIG_PPC_MAPLE)
|
||||
|
||||
/*
|
||||
* Do an IO access in real mode
|
||||
|
@ -720,6 +720,7 @@ static void pnv_pci_ioda2_setup_dma_pe(struct pnv_phb *phb,
|
||||
tbl->it_type = TCE_PCI_SWINV_CREATE | TCE_PCI_SWINV_FREE;
|
||||
}
|
||||
iommu_init_table(tbl, phb->hose->node);
|
||||
iommu_register_group(tbl, pci_domain_nr(pe->pbus), pe->pe_number);
|
||||
|
||||
if (pe->pdev)
|
||||
set_iommu_table_base(&pe->pdev->dev, tbl);
|
||||
|
@ -339,7 +339,7 @@ void *ppc4xx_ocm_alloc(phys_addr_t *phys, int size, int align,
|
||||
if (IS_ERR_VALUE(offset))
|
||||
continue;
|
||||
|
||||
ocm_blk = kzalloc(sizeof(struct ocm_block *), GFP_KERNEL);
|
||||
ocm_blk = kzalloc(sizeof(struct ocm_block), GFP_KERNEL);
|
||||
if (!ocm_blk) {
|
||||
printk(KERN_ERR "PPC4XX OCM: could not allocate ocm block");
|
||||
rh_free(ocm_reg->rh, offset);
|
||||
|
@ -347,14 +347,14 @@ config SMP
|
||||
Even if you don't know what to do here, say Y.
|
||||
|
||||
config NR_CPUS
|
||||
int "Maximum number of CPUs (2-64)"
|
||||
range 2 64
|
||||
int "Maximum number of CPUs (2-256)"
|
||||
range 2 256
|
||||
depends on SMP
|
||||
default "32" if !64BIT
|
||||
default "64" if 64BIT
|
||||
help
|
||||
This allows you to specify the maximum number of CPUs which this
|
||||
kernel will support. The maximum supported value is 64 and the
|
||||
kernel will support. The maximum supported value is 256 and the
|
||||
minimum value which makes sense is 2.
|
||||
|
||||
This is purely to save memory - each supported CPU adds
|
||||
|
@ -8,6 +8,7 @@
|
||||
|
||||
#include <linux/types.h>
|
||||
#include <asm/chpid.h>
|
||||
#include <asm/cpu.h>
|
||||
|
||||
#define SCLP_CHP_INFO_MASK_SIZE 32
|
||||
|
||||
@ -37,7 +38,7 @@ struct sclp_cpu_info {
|
||||
unsigned int standby;
|
||||
unsigned int combined;
|
||||
int has_cpu_type;
|
||||
struct sclp_cpu_entry cpu[255];
|
||||
struct sclp_cpu_entry cpu[MAX_CPU_ADDRESS + 1];
|
||||
};
|
||||
|
||||
int sclp_get_cpu_info(struct sclp_cpu_info *info);
|
||||
|
@ -72,6 +72,7 @@ int main(void)
|
||||
/* constants used by the vdso */
|
||||
DEFINE(__CLOCK_REALTIME, CLOCK_REALTIME);
|
||||
DEFINE(__CLOCK_MONOTONIC, CLOCK_MONOTONIC);
|
||||
DEFINE(__CLOCK_THREAD_CPUTIME_ID, CLOCK_THREAD_CPUTIME_ID);
|
||||
DEFINE(__CLOCK_REALTIME_RES, MONOTONIC_RES_NSEC);
|
||||
BLANK();
|
||||
/* idle data offsets */
|
||||
|
@ -125,7 +125,7 @@ int vdso_alloc_per_cpu(struct _lowcore *lowcore)
|
||||
psal[i] = 0x80000000;
|
||||
|
||||
lowcore->paste[4] = (u32)(addr_t) psal;
|
||||
psal[0] = 0x20000000;
|
||||
psal[0] = 0x02000000;
|
||||
psal[2] = (u32)(addr_t) aste;
|
||||
*(unsigned long *) (aste + 2) = segment_table +
|
||||
_ASCE_TABLE_LENGTH + _ASCE_USER_BITS + _ASCE_TYPE_SEGMENT;
|
||||
|
@ -46,18 +46,13 @@ __kernel_clock_gettime:
|
||||
jnm 3f
|
||||
a %r0,__VDSO_TK_MULT(%r5)
|
||||
3: alr %r0,%r2
|
||||
al %r0,__VDSO_XTIME_NSEC(%r5) /* + tk->xtime_nsec */
|
||||
al %r1,__VDSO_XTIME_NSEC+4(%r5)
|
||||
brc 12,4f
|
||||
ahi %r0,1
|
||||
4: al %r0,__VDSO_WTOM_NSEC(%r5) /* + wall_to_monotonic.nsec */
|
||||
al %r0,__VDSO_WTOM_NSEC(%r5)
|
||||
al %r1,__VDSO_WTOM_NSEC+4(%r5)
|
||||
brc 12,5f
|
||||
ahi %r0,1
|
||||
5: l %r2,__VDSO_TK_SHIFT(%r5) /* Timekeeper shift */
|
||||
srdl %r0,0(%r2) /* >> tk->shift */
|
||||
l %r2,__VDSO_XTIME_SEC+4(%r5)
|
||||
al %r2,__VDSO_WTOM_SEC+4(%r5)
|
||||
l %r2,__VDSO_WTOM_SEC+4(%r5)
|
||||
cl %r4,__VDSO_UPD_COUNT+4(%r5) /* check update counter */
|
||||
jne 1b
|
||||
basr %r5,0
|
||||
|
@ -23,7 +23,9 @@ __kernel_clock_getres:
|
||||
je 0f
|
||||
cghi %r2,__CLOCK_MONOTONIC
|
||||
je 0f
|
||||
cghi %r2,-2 /* CLOCK_THREAD_CPUTIME_ID for this thread */
|
||||
cghi %r2,__CLOCK_THREAD_CPUTIME_ID
|
||||
je 0f
|
||||
cghi %r2,-2 /* Per-thread CPUCLOCK with PID=0, VIRT=1 */
|
||||
jne 2f
|
||||
larl %r5,_vdso_data
|
||||
icm %r0,15,__LC_ECTG_OK(%r5)
|
||||
|
@ -22,7 +22,9 @@ __kernel_clock_gettime:
|
||||
larl %r5,_vdso_data
|
||||
cghi %r2,__CLOCK_REALTIME
|
||||
je 4f
|
||||
cghi %r2,-2 /* CLOCK_THREAD_CPUTIME_ID for this thread */
|
||||
cghi %r2,__CLOCK_THREAD_CPUTIME_ID
|
||||
je 9f
|
||||
cghi %r2,-2 /* Per-thread CPUCLOCK with PID=0, VIRT=1 */
|
||||
je 9f
|
||||
cghi %r2,__CLOCK_MONOTONIC
|
||||
jne 12f
|
||||
@ -35,13 +37,11 @@ __kernel_clock_gettime:
|
||||
jnz 0b
|
||||
stck 48(%r15) /* Store TOD clock */
|
||||
lgf %r2,__VDSO_TK_SHIFT(%r5) /* Timekeeper shift */
|
||||
lg %r0,__VDSO_XTIME_SEC(%r5) /* tk->xtime_sec */
|
||||
alg %r0,__VDSO_WTOM_SEC(%r5) /* + wall_to_monotonic.sec */
|
||||
lg %r0,__VDSO_WTOM_SEC(%r5)
|
||||
lg %r1,48(%r15)
|
||||
sg %r1,__VDSO_XTIME_STAMP(%r5) /* TOD - cycle_last */
|
||||
msgf %r1,__VDSO_TK_MULT(%r5) /* * tk->mult */
|
||||
alg %r1,__VDSO_XTIME_NSEC(%r5) /* + tk->xtime_nsec */
|
||||
alg %r1,__VDSO_WTOM_NSEC(%r5) /* + wall_to_monotonic.nsec */
|
||||
alg %r1,__VDSO_WTOM_NSEC(%r5)
|
||||
srlg %r1,%r1,0(%r2) /* >> tk->shift */
|
||||
clg %r4,__VDSO_UPD_COUNT(%r5) /* check update counter */
|
||||
jne 0b
|
||||
|
@ -31,8 +31,8 @@ ifeq ($(CONFIG_X86_32),y)
|
||||
|
||||
KBUILD_CFLAGS += -msoft-float -mregparm=3 -freg-struct-return
|
||||
|
||||
# Don't autogenerate SSE instructions
|
||||
KBUILD_CFLAGS += -mno-sse
|
||||
# Don't autogenerate MMX or SSE instructions
|
||||
KBUILD_CFLAGS += -mno-mmx -mno-sse
|
||||
|
||||
# Never want PIC in a 32-bit kernel, prevent breakage with GCC built
|
||||
# with nonstandard options
|
||||
@ -60,8 +60,8 @@ else
|
||||
KBUILD_AFLAGS += -m64
|
||||
KBUILD_CFLAGS += -m64
|
||||
|
||||
# Don't autogenerate SSE instructions
|
||||
KBUILD_CFLAGS += -mno-sse
|
||||
# Don't autogenerate MMX or SSE instructions
|
||||
KBUILD_CFLAGS += -mno-mmx -mno-sse
|
||||
|
||||
# Use -mpreferred-stack-boundary=3 if supported.
|
||||
KBUILD_CFLAGS += $(call cc-option,-mpreferred-stack-boundary=3)
|
||||
|
@ -53,18 +53,18 @@ $(obj)/cpustr.h: $(obj)/mkcpustr FORCE
|
||||
|
||||
# How to compile the 16-bit code. Note we always compile for -march=i386,
|
||||
# that way we can complain to the user if the CPU is insufficient.
|
||||
KBUILD_CFLAGS := $(USERINCLUDE) -g -Os -D_SETUP -D__KERNEL__ \
|
||||
KBUILD_CFLAGS := $(USERINCLUDE) -m32 -g -Os -D_SETUP -D__KERNEL__ \
|
||||
-DDISABLE_BRANCH_PROFILING \
|
||||
-Wall -Wstrict-prototypes \
|
||||
-march=i386 -mregparm=3 \
|
||||
-include $(srctree)/$(src)/code16gcc.h \
|
||||
-fno-strict-aliasing -fomit-frame-pointer -fno-pic \
|
||||
-mno-mmx -mno-sse \
|
||||
$(call cc-option, -ffreestanding) \
|
||||
$(call cc-option, -fno-toplevel-reorder,\
|
||||
$(call cc-option, -fno-unit-at-a-time)) \
|
||||
$(call cc-option, -fno-unit-at-a-time)) \
|
||||
$(call cc-option, -fno-stack-protector) \
|
||||
$(call cc-option, -mpreferred-stack-boundary=2)
|
||||
KBUILD_CFLAGS += $(call cc-option, -m32)
|
||||
KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
|
||||
GCOV_PROFILE := n
|
||||
|
||||
|
@ -13,6 +13,7 @@ KBUILD_CFLAGS += -DDISABLE_BRANCH_PROFILING
|
||||
cflags-$(CONFIG_X86_32) := -march=i386
|
||||
cflags-$(CONFIG_X86_64) := -mcmodel=small
|
||||
KBUILD_CFLAGS += $(cflags-y)
|
||||
KBUILD_CFLAGS += -mno-mmx -mno-sse
|
||||
KBUILD_CFLAGS += $(call cc-option,-ffreestanding)
|
||||
KBUILD_CFLAGS += $(call cc-option,-fno-stack-protector)
|
||||
|
||||
|
@ -143,6 +143,8 @@ static inline int kvm_apic_id(struct kvm_lapic *apic)
|
||||
return (kvm_apic_get_reg(apic, APIC_ID) >> 24) & 0xff;
|
||||
}
|
||||
|
||||
#define KVM_X2APIC_CID_BITS 0
|
||||
|
||||
static void recalculate_apic_map(struct kvm *kvm)
|
||||
{
|
||||
struct kvm_apic_map *new, *old = NULL;
|
||||
@ -180,7 +182,8 @@ static void recalculate_apic_map(struct kvm *kvm)
|
||||
if (apic_x2apic_mode(apic)) {
|
||||
new->ldr_bits = 32;
|
||||
new->cid_shift = 16;
|
||||
new->cid_mask = new->lid_mask = 0xffff;
|
||||
new->cid_mask = (1 << KVM_X2APIC_CID_BITS) - 1;
|
||||
new->lid_mask = 0xffff;
|
||||
} else if (kvm_apic_sw_enabled(apic) &&
|
||||
!new->cid_mask /* flat mode */ &&
|
||||
kvm_apic_get_reg(apic, APIC_DFR) == APIC_DFR_CLUSTER) {
|
||||
@ -841,7 +844,8 @@ static u32 apic_get_tmcct(struct kvm_lapic *apic)
|
||||
ASSERT(apic != NULL);
|
||||
|
||||
/* if initial count is 0, current count should also be 0 */
|
||||
if (kvm_apic_get_reg(apic, APIC_TMICT) == 0)
|
||||
if (kvm_apic_get_reg(apic, APIC_TMICT) == 0 ||
|
||||
apic->lapic_timer.period == 0)
|
||||
return 0;
|
||||
|
||||
remaining = hrtimer_get_remaining(&apic->lapic_timer.timer);
|
||||
@ -1691,7 +1695,6 @@ static void apic_sync_pv_eoi_from_guest(struct kvm_vcpu *vcpu,
|
||||
void kvm_lapic_sync_from_vapic(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
u32 data;
|
||||
void *vapic;
|
||||
|
||||
if (test_bit(KVM_APIC_PV_EOI_PENDING, &vcpu->arch.apic_attention))
|
||||
apic_sync_pv_eoi_from_guest(vcpu, vcpu->arch.apic);
|
||||
@ -1699,9 +1702,8 @@ void kvm_lapic_sync_from_vapic(struct kvm_vcpu *vcpu)
|
||||
if (!test_bit(KVM_APIC_CHECK_VAPIC, &vcpu->arch.apic_attention))
|
||||
return;
|
||||
|
||||
vapic = kmap_atomic(vcpu->arch.apic->vapic_page);
|
||||
data = *(u32 *)(vapic + offset_in_page(vcpu->arch.apic->vapic_addr));
|
||||
kunmap_atomic(vapic);
|
||||
kvm_read_guest_cached(vcpu->kvm, &vcpu->arch.apic->vapic_cache, &data,
|
||||
sizeof(u32));
|
||||
|
||||
apic_set_tpr(vcpu->arch.apic, data & 0xff);
|
||||
}
|
||||
@ -1737,7 +1739,6 @@ void kvm_lapic_sync_to_vapic(struct kvm_vcpu *vcpu)
|
||||
u32 data, tpr;
|
||||
int max_irr, max_isr;
|
||||
struct kvm_lapic *apic = vcpu->arch.apic;
|
||||
void *vapic;
|
||||
|
||||
apic_sync_pv_eoi_to_guest(vcpu, apic);
|
||||
|
||||
@ -1753,18 +1754,24 @@ void kvm_lapic_sync_to_vapic(struct kvm_vcpu *vcpu)
|
||||
max_isr = 0;
|
||||
data = (tpr & 0xff) | ((max_isr & 0xf0) << 8) | (max_irr << 24);
|
||||
|
||||
vapic = kmap_atomic(vcpu->arch.apic->vapic_page);
|
||||
*(u32 *)(vapic + offset_in_page(vcpu->arch.apic->vapic_addr)) = data;
|
||||
kunmap_atomic(vapic);
|
||||
kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.apic->vapic_cache, &data,
|
||||
sizeof(u32));
|
||||
}
|
||||
|
||||
void kvm_lapic_set_vapic_addr(struct kvm_vcpu *vcpu, gpa_t vapic_addr)
|
||||
int kvm_lapic_set_vapic_addr(struct kvm_vcpu *vcpu, gpa_t vapic_addr)
|
||||
{
|
||||
vcpu->arch.apic->vapic_addr = vapic_addr;
|
||||
if (vapic_addr)
|
||||
if (vapic_addr) {
|
||||
if (kvm_gfn_to_hva_cache_init(vcpu->kvm,
|
||||
&vcpu->arch.apic->vapic_cache,
|
||||
vapic_addr, sizeof(u32)))
|
||||
return -EINVAL;
|
||||
__set_bit(KVM_APIC_CHECK_VAPIC, &vcpu->arch.apic_attention);
|
||||
else
|
||||
} else {
|
||||
__clear_bit(KVM_APIC_CHECK_VAPIC, &vcpu->arch.apic_attention);
|
||||
}
|
||||
|
||||
vcpu->arch.apic->vapic_addr = vapic_addr;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int kvm_x2apic_msr_write(struct kvm_vcpu *vcpu, u32 msr, u64 data)
|
||||
|
@ -34,7 +34,7 @@ struct kvm_lapic {
|
||||
*/
|
||||
void *regs;
|
||||
gpa_t vapic_addr;
|
||||
struct page *vapic_page;
|
||||
struct gfn_to_hva_cache vapic_cache;
|
||||
unsigned long pending_events;
|
||||
unsigned int sipi_vector;
|
||||
};
|
||||
@ -76,7 +76,7 @@ void kvm_set_lapic_tscdeadline_msr(struct kvm_vcpu *vcpu, u64 data);
|
||||
void kvm_apic_write_nodecode(struct kvm_vcpu *vcpu, u32 offset);
|
||||
void kvm_apic_set_eoi_accelerated(struct kvm_vcpu *vcpu, int vector);
|
||||
|
||||
void kvm_lapic_set_vapic_addr(struct kvm_vcpu *vcpu, gpa_t vapic_addr);
|
||||
int kvm_lapic_set_vapic_addr(struct kvm_vcpu *vcpu, gpa_t vapic_addr);
|
||||
void kvm_lapic_sync_from_vapic(struct kvm_vcpu *vcpu);
|
||||
void kvm_lapic_sync_to_vapic(struct kvm_vcpu *vcpu);
|
||||
|
||||
|
@ -3214,8 +3214,7 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
|
||||
r = -EFAULT;
|
||||
if (copy_from_user(&va, argp, sizeof va))
|
||||
goto out;
|
||||
r = 0;
|
||||
kvm_lapic_set_vapic_addr(vcpu, va.vapic_addr);
|
||||
r = kvm_lapic_set_vapic_addr(vcpu, va.vapic_addr);
|
||||
break;
|
||||
}
|
||||
case KVM_X86_SETUP_MCE: {
|
||||
@ -5739,36 +5738,6 @@ static void post_kvm_run_save(struct kvm_vcpu *vcpu)
|
||||
!kvm_event_needs_reinjection(vcpu);
|
||||
}
|
||||
|
||||
static int vapic_enter(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct kvm_lapic *apic = vcpu->arch.apic;
|
||||
struct page *page;
|
||||
|
||||
if (!apic || !apic->vapic_addr)
|
||||
return 0;
|
||||
|
||||
page = gfn_to_page(vcpu->kvm, apic->vapic_addr >> PAGE_SHIFT);
|
||||
if (is_error_page(page))
|
||||
return -EFAULT;
|
||||
|
||||
vcpu->arch.apic->vapic_page = page;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void vapic_exit(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct kvm_lapic *apic = vcpu->arch.apic;
|
||||
int idx;
|
||||
|
||||
if (!apic || !apic->vapic_addr)
|
||||
return;
|
||||
|
||||
idx = srcu_read_lock(&vcpu->kvm->srcu);
|
||||
kvm_release_page_dirty(apic->vapic_page);
|
||||
mark_page_dirty(vcpu->kvm, apic->vapic_addr >> PAGE_SHIFT);
|
||||
srcu_read_unlock(&vcpu->kvm->srcu, idx);
|
||||
}
|
||||
|
||||
static void update_cr8_intercept(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
int max_irr, tpr;
|
||||
@ -6069,11 +6038,6 @@ static int __vcpu_run(struct kvm_vcpu *vcpu)
|
||||
struct kvm *kvm = vcpu->kvm;
|
||||
|
||||
vcpu->srcu_idx = srcu_read_lock(&kvm->srcu);
|
||||
r = vapic_enter(vcpu);
|
||||
if (r) {
|
||||
srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx);
|
||||
return r;
|
||||
}
|
||||
|
||||
r = 1;
|
||||
while (r > 0) {
|
||||
@ -6132,8 +6096,6 @@ static int __vcpu_run(struct kvm_vcpu *vcpu)
|
||||
|
||||
srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx);
|
||||
|
||||
vapic_exit(vcpu);
|
||||
|
||||
return r;
|
||||
}
|
||||
|
||||
|
@ -690,13 +690,6 @@ void __init efi_init(void)
|
||||
|
||||
set_bit(EFI_MEMMAP, &x86_efi_facility);
|
||||
|
||||
#ifdef CONFIG_X86_32
|
||||
if (efi_is_native()) {
|
||||
x86_platform.get_wallclock = efi_get_time;
|
||||
x86_platform.set_wallclock = efi_set_rtc_mmss;
|
||||
}
|
||||
#endif
|
||||
|
||||
#if EFI_DEBUG
|
||||
print_efi_memmap();
|
||||
#endif
|
||||
|
@ -1070,12 +1070,13 @@ const struct cpumask *uv_flush_tlb_others(const struct cpumask *cpumask,
|
||||
unsigned long status;
|
||||
|
||||
bcp = &per_cpu(bau_control, cpu);
|
||||
stat = bcp->statp;
|
||||
stat->s_enters++;
|
||||
|
||||
if (bcp->nobau)
|
||||
return cpumask;
|
||||
|
||||
stat = bcp->statp;
|
||||
stat->s_enters++;
|
||||
|
||||
if (bcp->busy) {
|
||||
descriptor_status =
|
||||
read_lmmr(UVH_LB_BAU_SB_ACTIVATION_STATUS_0);
|
||||
|
@ -73,9 +73,10 @@ KBUILD_CFLAGS := $(LINUXINCLUDE) -m32 -g -Os -D_SETUP -D__KERNEL__ -D_WAKEUP \
|
||||
-march=i386 -mregparm=3 \
|
||||
-include $(srctree)/$(src)/../../boot/code16gcc.h \
|
||||
-fno-strict-aliasing -fomit-frame-pointer -fno-pic \
|
||||
-mno-mmx -mno-sse \
|
||||
$(call cc-option, -ffreestanding) \
|
||||
$(call cc-option, -fno-toplevel-reorder,\
|
||||
$(call cc-option, -fno-unit-at-a-time)) \
|
||||
$(call cc-option, -fno-unit-at-a-time)) \
|
||||
$(call cc-option, -fno-stack-protector) \
|
||||
$(call cc-option, -mpreferred-stack-boundary=2)
|
||||
KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
|
||||
|
@ -29,7 +29,6 @@
|
||||
#include <linux/async.h>
|
||||
#include <linux/suspend.h>
|
||||
#include <trace/events/power.h>
|
||||
#include <linux/cpufreq.h>
|
||||
#include <linux/cpuidle.h>
|
||||
#include <linux/timer.h>
|
||||
|
||||
@ -541,7 +540,6 @@ static void dpm_resume_noirq(pm_message_t state)
|
||||
dpm_show_time(starttime, state, "noirq");
|
||||
resume_device_irqs();
|
||||
cpuidle_resume();
|
||||
cpufreq_resume();
|
||||
}
|
||||
|
||||
/**
|
||||
@ -957,7 +955,6 @@ static int dpm_suspend_noirq(pm_message_t state)
|
||||
ktime_t starttime = ktime_get();
|
||||
int error = 0;
|
||||
|
||||
cpufreq_suspend();
|
||||
cpuidle_pause();
|
||||
suspend_device_irqs();
|
||||
mutex_lock(&dpm_list_mtx);
|
||||
|
@ -40,7 +40,7 @@ static int regmap_mmio_gather_write(void *context,
|
||||
|
||||
BUG_ON(reg_size != 4);
|
||||
|
||||
if (ctx->clk) {
|
||||
if (!IS_ERR(ctx->clk)) {
|
||||
ret = clk_enable(ctx->clk);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
@ -73,7 +73,7 @@ static int regmap_mmio_gather_write(void *context,
|
||||
offset += ctx->val_bytes;
|
||||
}
|
||||
|
||||
if (ctx->clk)
|
||||
if (!IS_ERR(ctx->clk))
|
||||
clk_disable(ctx->clk);
|
||||
|
||||
return 0;
|
||||
@ -96,7 +96,7 @@ static int regmap_mmio_read(void *context,
|
||||
|
||||
BUG_ON(reg_size != 4);
|
||||
|
||||
if (ctx->clk) {
|
||||
if (!IS_ERR(ctx->clk)) {
|
||||
ret = clk_enable(ctx->clk);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
@ -129,7 +129,7 @@ static int regmap_mmio_read(void *context,
|
||||
offset += ctx->val_bytes;
|
||||
}
|
||||
|
||||
if (ctx->clk)
|
||||
if (!IS_ERR(ctx->clk))
|
||||
clk_disable(ctx->clk);
|
||||
|
||||
return 0;
|
||||
@ -139,7 +139,7 @@ static void regmap_mmio_free_context(void *context)
|
||||
{
|
||||
struct regmap_mmio_context *ctx = context;
|
||||
|
||||
if (ctx->clk) {
|
||||
if (!IS_ERR(ctx->clk)) {
|
||||
clk_unprepare(ctx->clk);
|
||||
clk_put(ctx->clk);
|
||||
}
|
||||
@ -209,6 +209,7 @@ static struct regmap_mmio_context *regmap_mmio_gen_context(struct device *dev,
|
||||
|
||||
ctx->regs = regs;
|
||||
ctx->val_bytes = config->val_bits / 8;
|
||||
ctx->clk = ERR_PTR(-ENODEV);
|
||||
|
||||
if (clk_id == NULL)
|
||||
return ctx;
|
||||
|
@ -1549,7 +1549,7 @@ int regmap_bulk_write(struct regmap *map, unsigned int reg, const void *val,
|
||||
val + (i * val_bytes),
|
||||
val_bytes);
|
||||
if (ret != 0)
|
||||
return ret;
|
||||
goto out;
|
||||
}
|
||||
} else {
|
||||
ret = _regmap_raw_write(map, reg, wval, val_bytes * val_count);
|
||||
@ -1743,7 +1743,7 @@ static int _regmap_read(struct regmap *map, unsigned int reg,
|
||||
/**
|
||||
* regmap_read(): Read a value from a single register
|
||||
*
|
||||
* @map: Register map to write to
|
||||
* @map: Register map to read from
|
||||
* @reg: Register to be read from
|
||||
* @val: Pointer to store read value
|
||||
*
|
||||
@ -1770,7 +1770,7 @@ EXPORT_SYMBOL_GPL(regmap_read);
|
||||
/**
|
||||
* regmap_raw_read(): Read raw data from the device
|
||||
*
|
||||
* @map: Register map to write to
|
||||
* @map: Register map to read from
|
||||
* @reg: First register to be read from
|
||||
* @val: Pointer to store read value
|
||||
* @val_len: Size of data to read
|
||||
@ -1882,7 +1882,7 @@ EXPORT_SYMBOL_GPL(regmap_fields_read);
|
||||
/**
|
||||
* regmap_bulk_read(): Read multiple registers from the device
|
||||
*
|
||||
* @map: Register map to write to
|
||||
* @map: Register map to read from
|
||||
* @reg: First register to be read from
|
||||
* @val: Pointer to store read value, in native register size for device
|
||||
* @val_count: Number of registers to read
|
||||
|
@ -495,23 +495,23 @@ static int null_add_dev(void)
|
||||
|
||||
spin_lock_init(&nullb->lock);
|
||||
|
||||
if (queue_mode == NULL_Q_MQ && use_per_node_hctx)
|
||||
submit_queues = nr_online_nodes;
|
||||
|
||||
if (setup_queues(nullb))
|
||||
goto err;
|
||||
|
||||
if (queue_mode == NULL_Q_MQ) {
|
||||
null_mq_reg.numa_node = home_node;
|
||||
null_mq_reg.queue_depth = hw_queue_depth;
|
||||
null_mq_reg.nr_hw_queues = submit_queues;
|
||||
|
||||
if (use_per_node_hctx) {
|
||||
null_mq_reg.ops->alloc_hctx = null_alloc_hctx;
|
||||
null_mq_reg.ops->free_hctx = null_free_hctx;
|
||||
|
||||
null_mq_reg.nr_hw_queues = nr_online_nodes;
|
||||
} else {
|
||||
null_mq_reg.ops->alloc_hctx = blk_mq_alloc_single_hw_queue;
|
||||
null_mq_reg.ops->free_hctx = blk_mq_free_single_hw_queue;
|
||||
|
||||
null_mq_reg.nr_hw_queues = submit_queues;
|
||||
}
|
||||
|
||||
nullb->q = blk_mq_init_queue(&null_mq_reg, nullb);
|
||||
|
@ -664,6 +664,13 @@ static struct dmi_system_id __initdata i8k_dmi_table[] = {
|
||||
DMI_MATCH(DMI_PRODUCT_NAME, "Vostro"),
|
||||
},
|
||||
},
|
||||
{
|
||||
.ident = "Dell XPS421",
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
|
||||
DMI_MATCH(DMI_PRODUCT_NAME, "XPS L421X"),
|
||||
},
|
||||
},
|
||||
{ }
|
||||
};
|
||||
|
||||
|
@ -58,7 +58,7 @@ static int at32_set_target(struct cpufreq_policy *policy, unsigned int index)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int __init at32_cpufreq_driver_init(struct cpufreq_policy *policy)
|
||||
static int at32_cpufreq_driver_init(struct cpufreq_policy *policy)
|
||||
{
|
||||
unsigned int frequency, rate, min_freq;
|
||||
int retval, steps, i;
|
||||
|
@ -26,7 +26,6 @@
|
||||
#include <linux/module.h>
|
||||
#include <linux/mutex.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/suspend.h>
|
||||
#include <linux/syscore_ops.h>
|
||||
#include <linux/tick.h>
|
||||
#include <trace/events/power.h>
|
||||
@ -48,9 +47,6 @@ static LIST_HEAD(cpufreq_policy_list);
|
||||
static DEFINE_PER_CPU(char[CPUFREQ_NAME_LEN], cpufreq_cpu_governor);
|
||||
#endif
|
||||
|
||||
/* Flag to suspend/resume CPUFreq governors */
|
||||
static bool cpufreq_suspended;
|
||||
|
||||
static inline bool has_target(void)
|
||||
{
|
||||
return cpufreq_driver->target_index || cpufreq_driver->target;
|
||||
@ -1466,41 +1462,6 @@ static struct subsys_interface cpufreq_interface = {
|
||||
.remove_dev = cpufreq_remove_dev,
|
||||
};
|
||||
|
||||
void cpufreq_suspend(void)
|
||||
{
|
||||
struct cpufreq_policy *policy;
|
||||
|
||||
if (!has_target())
|
||||
return;
|
||||
|
||||
pr_debug("%s: Suspending Governors\n", __func__);
|
||||
|
||||
list_for_each_entry(policy, &cpufreq_policy_list, policy_list)
|
||||
if (__cpufreq_governor(policy, CPUFREQ_GOV_STOP))
|
||||
pr_err("%s: Failed to stop governor for policy: %p\n",
|
||||
__func__, policy);
|
||||
|
||||
cpufreq_suspended = true;
|
||||
}
|
||||
|
||||
void cpufreq_resume(void)
|
||||
{
|
||||
struct cpufreq_policy *policy;
|
||||
|
||||
if (!has_target())
|
||||
return;
|
||||
|
||||
pr_debug("%s: Resuming Governors\n", __func__);
|
||||
|
||||
cpufreq_suspended = false;
|
||||
|
||||
list_for_each_entry(policy, &cpufreq_policy_list, policy_list)
|
||||
if (__cpufreq_governor(policy, CPUFREQ_GOV_START)
|
||||
|| __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS))
|
||||
pr_err("%s: Failed to start governor for policy: %p\n",
|
||||
__func__, policy);
|
||||
}
|
||||
|
||||
/**
|
||||
* cpufreq_bp_suspend - Prepare the boot CPU for system suspend.
|
||||
*
|
||||
@ -1803,10 +1764,6 @@ static int __cpufreq_governor(struct cpufreq_policy *policy,
|
||||
struct cpufreq_governor *gov = NULL;
|
||||
#endif
|
||||
|
||||
/* Don't start any governor operations if we are entering suspend */
|
||||
if (cpufreq_suspended)
|
||||
return 0;
|
||||
|
||||
if (policy->governor->max_transition_latency &&
|
||||
policy->cpuinfo.transition_latency >
|
||||
policy->governor->max_transition_latency) {
|
||||
@ -2119,6 +2076,9 @@ static int cpufreq_cpu_callback(struct notifier_block *nfb,
|
||||
dev = get_cpu_device(cpu);
|
||||
if (dev) {
|
||||
|
||||
if (action & CPU_TASKS_FROZEN)
|
||||
frozen = true;
|
||||
|
||||
switch (action & ~CPU_TASKS_FROZEN) {
|
||||
case CPU_ONLINE:
|
||||
__cpufreq_add_dev(dev, NULL, frozen);
|
||||
|
@ -1169,7 +1169,7 @@ static void pl08x_desc_free(struct virt_dma_desc *vd)
|
||||
struct pl08x_txd *txd = to_pl08x_txd(&vd->tx);
|
||||
struct pl08x_dma_chan *plchan = to_pl08x_chan(vd->tx.chan);
|
||||
|
||||
dma_descriptor_unmap(txd);
|
||||
dma_descriptor_unmap(&vd->tx);
|
||||
if (!txd->done)
|
||||
pl08x_release_mux(plchan);
|
||||
|
||||
|
@ -1017,6 +1017,7 @@ static int mmp_pdma_probe(struct platform_device *op)
|
||||
}
|
||||
}
|
||||
|
||||
platform_set_drvdata(op, pdev);
|
||||
dev_info(pdev->device.dev, "initialized %d channels\n", dma_channels);
|
||||
return 0;
|
||||
}
|
||||
|
@ -628,42 +628,13 @@ retry:
|
||||
s3cchan->state = S3C24XX_DMA_CHAN_IDLE;
|
||||
}
|
||||
|
||||
static void s3c24xx_dma_unmap_buffers(struct s3c24xx_txd *txd)
|
||||
{
|
||||
struct device *dev = txd->vd.tx.chan->device->dev;
|
||||
struct s3c24xx_sg *dsg;
|
||||
|
||||
if (!(txd->vd.tx.flags & DMA_COMPL_SKIP_SRC_UNMAP)) {
|
||||
if (txd->vd.tx.flags & DMA_COMPL_SRC_UNMAP_SINGLE)
|
||||
list_for_each_entry(dsg, &txd->dsg_list, node)
|
||||
dma_unmap_single(dev, dsg->src_addr, dsg->len,
|
||||
DMA_TO_DEVICE);
|
||||
else {
|
||||
list_for_each_entry(dsg, &txd->dsg_list, node)
|
||||
dma_unmap_page(dev, dsg->src_addr, dsg->len,
|
||||
DMA_TO_DEVICE);
|
||||
}
|
||||
}
|
||||
|
||||
if (!(txd->vd.tx.flags & DMA_COMPL_SKIP_DEST_UNMAP)) {
|
||||
if (txd->vd.tx.flags & DMA_COMPL_DEST_UNMAP_SINGLE)
|
||||
list_for_each_entry(dsg, &txd->dsg_list, node)
|
||||
dma_unmap_single(dev, dsg->dst_addr, dsg->len,
|
||||
DMA_FROM_DEVICE);
|
||||
else
|
||||
list_for_each_entry(dsg, &txd->dsg_list, node)
|
||||
dma_unmap_page(dev, dsg->dst_addr, dsg->len,
|
||||
DMA_FROM_DEVICE);
|
||||
}
|
||||
}
|
||||
|
||||
static void s3c24xx_dma_desc_free(struct virt_dma_desc *vd)
|
||||
{
|
||||
struct s3c24xx_txd *txd = to_s3c24xx_txd(&vd->tx);
|
||||
struct s3c24xx_dma_chan *s3cchan = to_s3c24xx_dma_chan(vd->tx.chan);
|
||||
|
||||
if (!s3cchan->slave)
|
||||
s3c24xx_dma_unmap_buffers(txd);
|
||||
dma_descriptor_unmap(&vd->tx);
|
||||
|
||||
s3c24xx_dma_free_txd(txd);
|
||||
}
|
||||
@ -795,7 +766,7 @@ static enum dma_status s3c24xx_dma_tx_status(struct dma_chan *chan,
|
||||
|
||||
spin_lock_irqsave(&s3cchan->vc.lock, flags);
|
||||
ret = dma_cookie_status(chan, cookie, txstate);
|
||||
if (ret == DMA_SUCCESS) {
|
||||
if (ret == DMA_COMPLETE) {
|
||||
spin_unlock_irqrestore(&s3cchan->vc.lock, flags);
|
||||
return ret;
|
||||
}
|
||||
|
@ -60,6 +60,7 @@
|
||||
#define HPB_DMAE_DSTPR_DMSTP BIT(0)
|
||||
|
||||
/* DMA status register (DSTSR) bits */
|
||||
#define HPB_DMAE_DSTSR_DQSTS BIT(2)
|
||||
#define HPB_DMAE_DSTSR_DMSTS BIT(0)
|
||||
|
||||
/* DMA common registers */
|
||||
@ -286,6 +287,9 @@ static void hpb_dmae_halt(struct shdma_chan *schan)
|
||||
|
||||
ch_reg_write(chan, HPB_DMAE_DCMDR_DQEND, HPB_DMAE_DCMDR);
|
||||
ch_reg_write(chan, HPB_DMAE_DSTPR_DMSTP, HPB_DMAE_DSTPR);
|
||||
|
||||
chan->plane_idx = 0;
|
||||
chan->first_desc = true;
|
||||
}
|
||||
|
||||
static const struct hpb_dmae_slave_config *
|
||||
@ -385,7 +389,10 @@ static bool hpb_dmae_channel_busy(struct shdma_chan *schan)
|
||||
struct hpb_dmae_chan *chan = to_chan(schan);
|
||||
u32 dstsr = ch_reg_read(chan, HPB_DMAE_DSTSR);
|
||||
|
||||
return (dstsr & HPB_DMAE_DSTSR_DMSTS) == HPB_DMAE_DSTSR_DMSTS;
|
||||
if (chan->xfer_mode == XFER_DOUBLE)
|
||||
return dstsr & HPB_DMAE_DSTSR_DQSTS;
|
||||
else
|
||||
return dstsr & HPB_DMAE_DSTSR_DMSTS;
|
||||
}
|
||||
|
||||
static int
|
||||
@ -510,6 +517,8 @@ static int hpb_dmae_chan_probe(struct hpb_dmae_device *hpbdev, int id)
|
||||
}
|
||||
|
||||
schan = &new_hpb_chan->shdma_chan;
|
||||
schan->max_xfer_len = HPB_DMA_TCR_MAX;
|
||||
|
||||
shdma_chan_probe(sdev, schan, id);
|
||||
|
||||
if (pdev->id >= 0)
|
||||
|
@ -945,7 +945,7 @@ static int get_memory_error_data(struct mem_ctl_info *mci,
|
||||
u32 tad_offset;
|
||||
u32 rir_way;
|
||||
u32 mb, kb;
|
||||
u64 ch_addr, offset, limit, prv = 0;
|
||||
u64 ch_addr, offset, limit = 0, prv = 0;
|
||||
|
||||
|
||||
/*
|
||||
|
@ -1082,7 +1082,7 @@ static void arizona_micd_set_level(struct arizona *arizona, int index,
|
||||
static int arizona_extcon_probe(struct platform_device *pdev)
|
||||
{
|
||||
struct arizona *arizona = dev_get_drvdata(pdev->dev.parent);
|
||||
struct arizona_pdata *pdata;
|
||||
struct arizona_pdata *pdata = &arizona->pdata;
|
||||
struct arizona_extcon_info *info;
|
||||
unsigned int val;
|
||||
int jack_irq_fall, jack_irq_rise;
|
||||
@ -1091,8 +1091,6 @@ static int arizona_extcon_probe(struct platform_device *pdev)
|
||||
if (!arizona->dapm || !arizona->dapm->card)
|
||||
return -EPROBE_DEFER;
|
||||
|
||||
pdata = dev_get_platdata(arizona->dev);
|
||||
|
||||
info = devm_kzalloc(&pdev->dev, sizeof(*info), GFP_KERNEL);
|
||||
if (!info) {
|
||||
dev_err(&pdev->dev, "Failed to allocate memory\n");
|
||||
|
@ -792,6 +792,8 @@ void extcon_dev_unregister(struct extcon_dev *edev)
|
||||
return;
|
||||
}
|
||||
|
||||
device_unregister(&edev->dev);
|
||||
|
||||
if (edev->mutually_exclusive && edev->max_supported) {
|
||||
for (index = 0; edev->mutually_exclusive[index];
|
||||
index++)
|
||||
@ -812,7 +814,6 @@ void extcon_dev_unregister(struct extcon_dev *edev)
|
||||
if (switch_class)
|
||||
class_compat_remove_link(switch_class, &edev->dev, NULL);
|
||||
#endif
|
||||
device_unregister(&edev->dev);
|
||||
put_device(&edev->dev);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(extcon_dev_unregister);
|
||||
|
@ -2674,7 +2674,7 @@ static int add_3d_struct_modes(struct drm_connector *connector, u16 structure,
|
||||
int modes = 0;
|
||||
u8 cea_mode;
|
||||
|
||||
if (video_db == NULL || video_index > video_len)
|
||||
if (video_db == NULL || video_index >= video_len)
|
||||
return 0;
|
||||
|
||||
/* CEA modes are numbered 1..127 */
|
||||
@ -2701,7 +2701,7 @@ static int add_3d_struct_modes(struct drm_connector *connector, u16 structure,
|
||||
if (structure & (1 << 8)) {
|
||||
newmode = drm_mode_duplicate(dev, &edid_cea_modes[cea_mode]);
|
||||
if (newmode) {
|
||||
newmode->flags = DRM_MODE_FLAG_3D_SIDE_BY_SIDE_HALF;
|
||||
newmode->flags |= DRM_MODE_FLAG_3D_SIDE_BY_SIDE_HALF;
|
||||
drm_mode_probed_add(connector, newmode);
|
||||
modes++;
|
||||
}
|
||||
|
@ -173,29 +173,38 @@ static int exynos_drm_open(struct drm_device *dev, struct drm_file *file)
|
||||
static void exynos_drm_preclose(struct drm_device *dev,
|
||||
struct drm_file *file)
|
||||
{
|
||||
struct exynos_drm_private *private = dev->dev_private;
|
||||
struct drm_pending_vblank_event *e, *t;
|
||||
unsigned long flags;
|
||||
|
||||
/* release events of current file */
|
||||
spin_lock_irqsave(&dev->event_lock, flags);
|
||||
list_for_each_entry_safe(e, t, &private->pageflip_event_list,
|
||||
base.link) {
|
||||
if (e->base.file_priv == file) {
|
||||
list_del(&e->base.link);
|
||||
e->base.destroy(&e->base);
|
||||
}
|
||||
}
|
||||
spin_unlock_irqrestore(&dev->event_lock, flags);
|
||||
|
||||
exynos_drm_subdrv_close(dev, file);
|
||||
}
|
||||
|
||||
static void exynos_drm_postclose(struct drm_device *dev, struct drm_file *file)
|
||||
{
|
||||
struct exynos_drm_private *private = dev->dev_private;
|
||||
struct drm_pending_vblank_event *v, *vt;
|
||||
struct drm_pending_event *e, *et;
|
||||
unsigned long flags;
|
||||
|
||||
if (!file->driver_priv)
|
||||
return;
|
||||
|
||||
/* Release all events not unhandled by page flip handler. */
|
||||
spin_lock_irqsave(&dev->event_lock, flags);
|
||||
list_for_each_entry_safe(v, vt, &private->pageflip_event_list,
|
||||
base.link) {
|
||||
if (v->base.file_priv == file) {
|
||||
list_del(&v->base.link);
|
||||
drm_vblank_put(dev, v->pipe);
|
||||
v->base.destroy(&v->base);
|
||||
}
|
||||
}
|
||||
|
||||
/* Release all events handled by page flip handler but not freed. */
|
||||
list_for_each_entry_safe(e, et, &file->event_list, link) {
|
||||
list_del(&e->link);
|
||||
e->destroy(e);
|
||||
}
|
||||
spin_unlock_irqrestore(&dev->event_lock, flags);
|
||||
|
||||
|
||||
kfree(file->driver_priv);
|
||||
file->driver_priv = NULL;
|
||||
}
|
||||
|
@ -31,7 +31,7 @@
|
||||
#include "exynos_drm_iommu.h"
|
||||
|
||||
/*
|
||||
* FIMD is stand for Fully Interactive Mobile Display and
|
||||
* FIMD stands for Fully Interactive Mobile Display and
|
||||
* as a display controller, it transfers contents drawn on memory
|
||||
* to a LCD Panel through Display Interfaces such as RGB or
|
||||
* CPU Interface.
|
||||
|
@ -534,8 +534,10 @@ static int i915_drm_freeze(struct drm_device *dev)
|
||||
* Disable CRTCs directly since we want to preserve sw state
|
||||
* for _thaw.
|
||||
*/
|
||||
mutex_lock(&dev->mode_config.mutex);
|
||||
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
|
||||
dev_priv->display.crtc_disable(crtc);
|
||||
mutex_unlock(&dev->mode_config.mutex);
|
||||
|
||||
intel_modeset_suspend_hw(dev);
|
||||
}
|
||||
|
@ -4442,10 +4442,9 @@ i915_gem_init_hw(struct drm_device *dev)
|
||||
if (dev_priv->ellc_size)
|
||||
I915_WRITE(HSW_IDICR, I915_READ(HSW_IDICR) | IDIHASHMSK(0xf));
|
||||
|
||||
if (IS_HSW_GT3(dev))
|
||||
I915_WRITE(MI_PREDICATE_RESULT_2, LOWER_SLICE_ENABLED);
|
||||
else
|
||||
I915_WRITE(MI_PREDICATE_RESULT_2, LOWER_SLICE_DISABLED);
|
||||
if (IS_HASWELL(dev))
|
||||
I915_WRITE(MI_PREDICATE_RESULT_2, IS_HSW_GT3(dev) ?
|
||||
LOWER_SLICE_ENABLED : LOWER_SLICE_DISABLED);
|
||||
|
||||
if (HAS_PCH_NOP(dev)) {
|
||||
u32 temp = I915_READ(GEN7_MSG_CTL);
|
||||
|
@ -125,13 +125,15 @@ static void *i915_gem_dmabuf_vmap(struct dma_buf *dma_buf)
|
||||
|
||||
ret = i915_gem_object_get_pages(obj);
|
||||
if (ret)
|
||||
goto error;
|
||||
goto err;
|
||||
|
||||
i915_gem_object_pin_pages(obj);
|
||||
|
||||
ret = -ENOMEM;
|
||||
|
||||
pages = drm_malloc_ab(obj->base.size >> PAGE_SHIFT, sizeof(*pages));
|
||||
if (pages == NULL)
|
||||
goto error;
|
||||
goto err_unpin;
|
||||
|
||||
i = 0;
|
||||
for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0)
|
||||
@ -141,15 +143,16 @@ static void *i915_gem_dmabuf_vmap(struct dma_buf *dma_buf)
|
||||
drm_free_large(pages);
|
||||
|
||||
if (!obj->dma_buf_vmapping)
|
||||
goto error;
|
||||
goto err_unpin;
|
||||
|
||||
obj->vmapping_count = 1;
|
||||
i915_gem_object_pin_pages(obj);
|
||||
out_unlock:
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
return obj->dma_buf_vmapping;
|
||||
|
||||
error:
|
||||
err_unpin:
|
||||
i915_gem_object_unpin_pages(obj);
|
||||
err:
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
|
@ -33,6 +33,9 @@
|
||||
#include "intel_drv.h"
|
||||
#include <linux/dma_remapping.h>
|
||||
|
||||
#define __EXEC_OBJECT_HAS_PIN (1<<31)
|
||||
#define __EXEC_OBJECT_HAS_FENCE (1<<30)
|
||||
|
||||
struct eb_vmas {
|
||||
struct list_head vmas;
|
||||
int and;
|
||||
@ -187,7 +190,28 @@ static struct i915_vma *eb_get_vma(struct eb_vmas *eb, unsigned long handle)
|
||||
}
|
||||
}
|
||||
|
||||
static void eb_destroy(struct eb_vmas *eb) {
|
||||
static void
|
||||
i915_gem_execbuffer_unreserve_vma(struct i915_vma *vma)
|
||||
{
|
||||
struct drm_i915_gem_exec_object2 *entry;
|
||||
struct drm_i915_gem_object *obj = vma->obj;
|
||||
|
||||
if (!drm_mm_node_allocated(&vma->node))
|
||||
return;
|
||||
|
||||
entry = vma->exec_entry;
|
||||
|
||||
if (entry->flags & __EXEC_OBJECT_HAS_FENCE)
|
||||
i915_gem_object_unpin_fence(obj);
|
||||
|
||||
if (entry->flags & __EXEC_OBJECT_HAS_PIN)
|
||||
i915_gem_object_unpin(obj);
|
||||
|
||||
entry->flags &= ~(__EXEC_OBJECT_HAS_FENCE | __EXEC_OBJECT_HAS_PIN);
|
||||
}
|
||||
|
||||
static void eb_destroy(struct eb_vmas *eb)
|
||||
{
|
||||
while (!list_empty(&eb->vmas)) {
|
||||
struct i915_vma *vma;
|
||||
|
||||
@ -195,6 +219,7 @@ static void eb_destroy(struct eb_vmas *eb) {
|
||||
struct i915_vma,
|
||||
exec_list);
|
||||
list_del_init(&vma->exec_list);
|
||||
i915_gem_execbuffer_unreserve_vma(vma);
|
||||
drm_gem_object_unreference(&vma->obj->base);
|
||||
}
|
||||
kfree(eb);
|
||||
@ -478,9 +503,6 @@ i915_gem_execbuffer_relocate(struct eb_vmas *eb,
|
||||
return ret;
|
||||
}
|
||||
|
||||
#define __EXEC_OBJECT_HAS_PIN (1<<31)
|
||||
#define __EXEC_OBJECT_HAS_FENCE (1<<30)
|
||||
|
||||
static int
|
||||
need_reloc_mappable(struct i915_vma *vma)
|
||||
{
|
||||
@ -552,26 +574,6 @@ i915_gem_execbuffer_reserve_vma(struct i915_vma *vma,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void
|
||||
i915_gem_execbuffer_unreserve_vma(struct i915_vma *vma)
|
||||
{
|
||||
struct drm_i915_gem_exec_object2 *entry;
|
||||
struct drm_i915_gem_object *obj = vma->obj;
|
||||
|
||||
if (!drm_mm_node_allocated(&vma->node))
|
||||
return;
|
||||
|
||||
entry = vma->exec_entry;
|
||||
|
||||
if (entry->flags & __EXEC_OBJECT_HAS_FENCE)
|
||||
i915_gem_object_unpin_fence(obj);
|
||||
|
||||
if (entry->flags & __EXEC_OBJECT_HAS_PIN)
|
||||
i915_gem_object_unpin(obj);
|
||||
|
||||
entry->flags &= ~(__EXEC_OBJECT_HAS_FENCE | __EXEC_OBJECT_HAS_PIN);
|
||||
}
|
||||
|
||||
static int
|
||||
i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
|
||||
struct list_head *vmas,
|
||||
@ -670,13 +672,14 @@ i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
|
||||
goto err;
|
||||
}
|
||||
|
||||
err: /* Decrement pin count for bound objects */
|
||||
list_for_each_entry(vma, vmas, exec_list)
|
||||
i915_gem_execbuffer_unreserve_vma(vma);
|
||||
|
||||
err:
|
||||
if (ret != -ENOSPC || retry++)
|
||||
return ret;
|
||||
|
||||
/* Decrement pin count for bound objects */
|
||||
list_for_each_entry(vma, vmas, exec_list)
|
||||
i915_gem_execbuffer_unreserve_vma(vma);
|
||||
|
||||
ret = i915_gem_evict_vm(vm, true);
|
||||
if (ret)
|
||||
return ret;
|
||||
@ -708,6 +711,7 @@ i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
|
||||
while (!list_empty(&eb->vmas)) {
|
||||
vma = list_first_entry(&eb->vmas, struct i915_vma, exec_list);
|
||||
list_del_init(&vma->exec_list);
|
||||
i915_gem_execbuffer_unreserve_vma(vma);
|
||||
drm_gem_object_unreference(&vma->obj->base);
|
||||
}
|
||||
|
||||
|
@ -57,7 +57,9 @@ typedef gen8_gtt_pte_t gen8_ppgtt_pde_t;
|
||||
#define HSW_WB_LLC_AGE3 HSW_CACHEABILITY_CONTROL(0x2)
|
||||
#define HSW_WB_LLC_AGE0 HSW_CACHEABILITY_CONTROL(0x3)
|
||||
#define HSW_WB_ELLC_LLC_AGE0 HSW_CACHEABILITY_CONTROL(0xb)
|
||||
#define HSW_WB_ELLC_LLC_AGE3 HSW_CACHEABILITY_CONTROL(0x8)
|
||||
#define HSW_WT_ELLC_LLC_AGE0 HSW_CACHEABILITY_CONTROL(0x6)
|
||||
#define HSW_WT_ELLC_LLC_AGE3 HSW_CACHEABILITY_CONTROL(0x7)
|
||||
|
||||
#define GEN8_PTES_PER_PAGE (PAGE_SIZE / sizeof(gen8_gtt_pte_t))
|
||||
#define GEN8_PDES_PER_PAGE (PAGE_SIZE / sizeof(gen8_ppgtt_pde_t))
|
||||
@ -185,10 +187,10 @@ static gen6_gtt_pte_t iris_pte_encode(dma_addr_t addr,
|
||||
case I915_CACHE_NONE:
|
||||
break;
|
||||
case I915_CACHE_WT:
|
||||
pte |= HSW_WT_ELLC_LLC_AGE0;
|
||||
pte |= HSW_WT_ELLC_LLC_AGE3;
|
||||
break;
|
||||
default:
|
||||
pte |= HSW_WB_ELLC_LLC_AGE0;
|
||||
pte |= HSW_WB_ELLC_LLC_AGE3;
|
||||
break;
|
||||
}
|
||||
|
||||
|
@ -235,6 +235,7 @@
|
||||
*/
|
||||
#define MI_LOAD_REGISTER_IMM(x) MI_INSTR(0x22, 2*x-1)
|
||||
#define MI_STORE_REGISTER_MEM(x) MI_INSTR(0x24, 2*x-1)
|
||||
#define MI_SRM_LRM_GLOBAL_GTT (1<<22)
|
||||
#define MI_FLUSH_DW MI_INSTR(0x26, 1) /* for GEN6 */
|
||||
#define MI_FLUSH_DW_STORE_INDEX (1<<21)
|
||||
#define MI_INVALIDATE_TLB (1<<18)
|
||||
|
@ -173,7 +173,7 @@ static void intel_prepare_ddi_buffers(struct drm_device *dev, enum port port)
|
||||
ddi_translations = ddi_translations_dp;
|
||||
break;
|
||||
case PORT_D:
|
||||
if (intel_dpd_is_edp(dev))
|
||||
if (intel_dp_is_edp(dev, PORT_D))
|
||||
ddi_translations = ddi_translations_edp;
|
||||
else
|
||||
ddi_translations = ddi_translations_dp;
|
||||
@ -1158,9 +1158,10 @@ static void intel_ddi_post_disable(struct intel_encoder *intel_encoder)
|
||||
if (wait)
|
||||
intel_wait_ddi_buf_idle(dev_priv, port);
|
||||
|
||||
if (type == INTEL_OUTPUT_EDP) {
|
||||
if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP) {
|
||||
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
|
||||
ironlake_edp_panel_vdd_on(intel_dp);
|
||||
intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF);
|
||||
ironlake_edp_panel_off(intel_dp);
|
||||
}
|
||||
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user