Merge 3.11-rc6 into usb-next

We want these USB fixes in this branch as well.

Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
Greg Kroah-Hartman 2013-08-18 20:33:01 -07:00
commit bd479f2933
367 changed files with 3437 additions and 1928 deletions

View File

@ -1,6 +1,6 @@
<?xml version="1.0"?>
<!DOCTYPE book PUBLIC "-//OASIS//DTD DocBook XML V4.1.2//EN"
"http://www.oasis-open.org/docbook/xml/4.1.2/docbookx.dtd" [
<!DOCTYPE book PUBLIC "-//OASIS//DTD DocBook XML V4.2//EN"
"http://www.oasis-open.org/docbook/xml/4.2/docbookx.dtd" [
<!ENTITY % media-entities SYSTEM "./media-entities.tmpl"> %media-entities;
<!ENTITY media-indices SYSTEM "./media-indices.tmpl">

View File

@ -4,7 +4,7 @@
Required properties :
- reg : Offset and length of the register set for the device
- compatible : Should be "marvell,mv64xxx-i2c"
- compatible : Should be "marvell,mv64xxx-i2c" or "allwinner,sun4i-i2c"
- interrupts : The interrupt number
Optional properties :

View File

@ -31,9 +31,8 @@ Optional nodes:
Optional sub-node properties:
ti,warm-reset - maintain voltage during warm reset(boolean)
ti,roof-floor - control voltage selection by pin(boolean)
ti,sleep-mode - mode to adopt in pmic sleep 0 - off, 1 - auto,
ti,mode-sleep - mode to adopt in pmic sleep 0 - off, 1 - auto,
2 - eco, 3 - forced pwm
ti,tstep - slope control 0 - Jump, 1 10mV/us, 2 5mV/us, 3 2.5mV/us
ti,smps-range - OTP has the wrong range set for the hardware so override
0 - low range, 1 - high range.
@ -59,7 +58,6 @@ pmic {
ti,warm-reset;
ti,roof-floor;
ti,mode-sleep = <0>;
ti,tstep = <0>;
ti,smps-range = <1>;
};

View File

@ -965,6 +965,12 @@ M: Lennert Buytenhek <kernel@wantstofly.org>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Maintained
ARM/TEXAS INSTRUMENT KEYSTONE ARCHITECTURE
M: Santosh Shilimkar <santosh.shilimkar@ti.com>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Maintained
F: arch/arm/mach-keystone/
ARM/LOGICPD PXA270 MACHINE SUPPORT
M: Lennert Buytenhek <kernel@wantstofly.org>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
@ -1259,7 +1265,6 @@ F: drivers/rtc/rtc-coh901331.c
T: git git://git.kernel.org/pub/scm/linux/kernel/git/linusw/linux-stericsson.git
ARM/Ux500 ARM ARCHITECTURE
M: Srinidhi Kasagar <srinidhi.kasagar@stericsson.com>
M: Linus Walleij <linus.walleij@linaro.org>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Maintained
@ -5576,9 +5581,9 @@ S: Maintained
F: drivers/media/tuners/mxl5007t.*
MYRICOM MYRI-10G 10GbE DRIVER (MYRI10GE)
M: Andrew Gallatin <gallatin@myri.com>
M: Hyong-Youb Kim <hykim@myri.com>
L: netdev@vger.kernel.org
W: http://www.myri.com/scs/download-Myri10GE.html
W: https://www.myricom.com/support/downloads/myri10ge.html
S: Supported
F: drivers/net/ethernet/myricom/myri10ge/
@ -7361,7 +7366,6 @@ F: drivers/net/ethernet/sfc/
SGI GRU DRIVER
M: Dimitri Sivanich <sivanich@sgi.com>
M: Robin Holt <holt@sgi.com>
S: Maintained
F: drivers/misc/sgi-gru/
@ -7381,7 +7385,8 @@ S: Maintained for 2.6.
F: Documentation/sgi-visws.txt
SGI XP/XPC/XPNET DRIVER
M: Robin Holt <holt@sgi.com>
M: Cliff Whickman <cpw@sgi.com>
M: Robin Holt <robinmholt@gmail.com>
S: Maintained
F: drivers/misc/sgi-xp/
@ -8664,6 +8669,11 @@ T: git git://git.alsa-project.org/alsa-kernel.git
S: Maintained
F: sound/usb/midi.*
USB NETWORKING DRIVERS
L: linux-usb@vger.kernel.org
S: Odd Fixes
F: drivers/net/usb/
USB OHCI DRIVER
M: Alan Stern <stern@rowland.harvard.edu>
L: linux-usb@vger.kernel.org

View File

@ -1,7 +1,7 @@
VERSION = 3
PATCHLEVEL = 11
SUBLEVEL = 0
EXTRAVERSION = -rc4
EXTRAVERSION = -rc6
NAME = Linux for Workgroups
# *DOCUMENTATION*

View File

@ -407,6 +407,12 @@ config CLONE_BACKWARDS2
help
Architecture has the first two arguments of clone(2) swapped.
config CLONE_BACKWARDS3
bool
help
Architecture has tls passed as the 3rd argument of clone(2),
not the 5th one.
config ODD_RT_SIGACTION
bool
help

View File

@ -26,7 +26,7 @@
cpu-offset = <0x80000>;
};
msmgpio: gpio@fd510000 {
msmgpio: gpio@800000 {
compatible = "qcom,msm-gpio";
gpio-controller;
#gpio-cells = <2>;
@ -34,7 +34,7 @@
interrupts = <0 32 0x4>;
interrupt-controller;
#interrupt-cells = <2>;
reg = <0xfd510000 0x4000>;
reg = <0x800000 0x4000>;
};
serial@16440000 {

View File

@ -235,7 +235,7 @@
};
&mmc1 {
vmmc-supply = <&vmmcsd_fixed>;
vmmc-supply = <&ldo9_reg>;
bus-width = <4>;
};
@ -282,6 +282,7 @@
regulators {
smps123_reg: smps123 {
/* VDD_OPP_MPU */
regulator-name = "smps123";
regulator-min-microvolt = < 600000>;
regulator-max-microvolt = <1500000>;
@ -290,6 +291,7 @@
};
smps45_reg: smps45 {
/* VDD_OPP_MM */
regulator-name = "smps45";
regulator-min-microvolt = < 600000>;
regulator-max-microvolt = <1310000>;
@ -298,6 +300,7 @@
};
smps6_reg: smps6 {
/* VDD_DDR3 - over VDD_SMPS6 */
regulator-name = "smps6";
regulator-min-microvolt = <1200000>;
regulator-max-microvolt = <1200000>;
@ -306,6 +309,7 @@
};
smps7_reg: smps7 {
/* VDDS_1v8_OMAP over VDDS_1v8_MAIN */
regulator-name = "smps7";
regulator-min-microvolt = <1800000>;
regulator-max-microvolt = <1800000>;
@ -314,6 +318,7 @@
};
smps8_reg: smps8 {
/* VDD_OPP_CORE */
regulator-name = "smps8";
regulator-min-microvolt = < 600000>;
regulator-max-microvolt = <1310000>;
@ -322,15 +327,15 @@
};
smps9_reg: smps9 {
/* VDDA_2v1_AUD over VDD_2v1 */
regulator-name = "smps9";
regulator-min-microvolt = <2100000>;
regulator-max-microvolt = <2100000>;
regulator-always-on;
regulator-boot-on;
ti,smps-range = <0x80>;
};
smps10_reg: smps10 {
/* VBUS_5V_OTG */
regulator-name = "smps10";
regulator-min-microvolt = <5000000>;
regulator-max-microvolt = <5000000>;
@ -339,38 +344,40 @@
};
ldo1_reg: ldo1 {
/* VDDAPHY_CAM: vdda_csiport */
regulator-name = "ldo1";
regulator-min-microvolt = <2800000>;
regulator-max-microvolt = <2800000>;
regulator-always-on;
regulator-boot-on;
regulator-min-microvolt = <1500000>;
regulator-max-microvolt = <1800000>;
};
ldo2_reg: ldo2 {
/* VCC_2V8_DISP: Does not go anywhere */
regulator-name = "ldo2";
regulator-min-microvolt = <2900000>;
regulator-max-microvolt = <2900000>;
regulator-always-on;
regulator-boot-on;
regulator-min-microvolt = <2800000>;
regulator-max-microvolt = <2800000>;
/* Unused */
status = "disabled";
};
ldo3_reg: ldo3 {
/* VDDAPHY_MDM: vdda_lli */
regulator-name = "ldo3";
regulator-min-microvolt = <3000000>;
regulator-max-microvolt = <3000000>;
regulator-always-on;
regulator-min-microvolt = <1500000>;
regulator-max-microvolt = <1500000>;
regulator-boot-on;
/* Only if Modem is used */
status = "disabled";
};
ldo4_reg: ldo4 {
/* VDDAPHY_DISP: vdda_dsiport/hdmi */
regulator-name = "ldo4";
regulator-min-microvolt = <2200000>;
regulator-max-microvolt = <2200000>;
regulator-always-on;
regulator-boot-on;
regulator-min-microvolt = <1500000>;
regulator-max-microvolt = <1800000>;
};
ldo5_reg: ldo5 {
/* VDDA_1V8_PHY: usb/sata/hdmi.. */
regulator-name = "ldo5";
regulator-min-microvolt = <1800000>;
regulator-max-microvolt = <1800000>;
@ -379,38 +386,43 @@
};
ldo6_reg: ldo6 {
/* VDDS_1V2_WKUP: hsic/ldo_emu_wkup */
regulator-name = "ldo6";
regulator-min-microvolt = <1500000>;
regulator-max-microvolt = <1500000>;
regulator-min-microvolt = <1200000>;
regulator-max-microvolt = <1200000>;
regulator-always-on;
regulator-boot-on;
};
ldo7_reg: ldo7 {
/* VDD_VPP: vpp1 */
regulator-name = "ldo7";
regulator-min-microvolt = <1500000>;
regulator-max-microvolt = <1500000>;
regulator-always-on;
regulator-boot-on;
regulator-min-microvolt = <2000000>;
regulator-max-microvolt = <2000000>;
/* Only for efuse reprograming! */
status = "disabled";
};
ldo8_reg: ldo8 {
/* VDD_3v0: Does not go anywhere */
regulator-name = "ldo8";
regulator-min-microvolt = <1500000>;
regulator-max-microvolt = <1500000>;
regulator-always-on;
regulator-min-microvolt = <3000000>;
regulator-max-microvolt = <3000000>;
regulator-boot-on;
/* Unused */
status = "disabled";
};
ldo9_reg: ldo9 {
/* VCC_DV_SDIO: vdds_sdcard */
regulator-name = "ldo9";
regulator-min-microvolt = <1800000>;
regulator-max-microvolt = <3300000>;
regulator-always-on;
regulator-max-microvolt = <3000000>;
regulator-boot-on;
};
ldoln_reg: ldoln {
/* VDDA_1v8_REF: vdds_osc/mm_l4per.. */
regulator-name = "ldoln";
regulator-min-microvolt = <1800000>;
regulator-max-microvolt = <1800000>;
@ -419,12 +431,20 @@
};
ldousb_reg: ldousb {
/* VDDA_3V_USB: VDDA_USBHS33 */
regulator-name = "ldousb";
regulator-min-microvolt = <3250000>;
regulator-max-microvolt = <3250000>;
regulator-always-on;
regulator-boot-on;
};
regen3_reg: regen3 {
/* REGEN3 controls LDO9 supply to card */
regulator-name = "regen3";
regulator-always-on;
regulator-boot-on;
};
};
};
};

View File

@ -6,10 +6,12 @@
#address-cells = <1>;
#size-cells = <0>;
cpu@0 {
device_type = "cpu";
compatible = "arm,cortex-a9";
reg = <0>;
};
cpu@1 {
device_type = "cpu";
compatible = "arm,cortex-a9";
reg = <1>;
};

View File

@ -457,6 +457,7 @@
};
usb-phy@c5004000 {
status = "okay";
nvidia,phy-reset-gpio = <&gpio TEGRA_GPIO(V, 1)
GPIO_ACTIVE_LOW>;
};

View File

@ -88,4 +88,7 @@ static inline u32 mpidr_hash_size(void)
{
return 1 << mpidr_hash.bits;
}
extern int platform_can_cpu_hotplug(void);
#endif

View File

@ -107,7 +107,7 @@ static inline int arch_spin_trylock(arch_spinlock_t *lock)
" subs %1, %0, %0, ror #16\n"
" addeq %0, %0, %4\n"
" strexeq %2, %0, [%3]"
: "=&r" (slock), "=&r" (contended), "=r" (res)
: "=&r" (slock), "=&r" (contended), "=&r" (res)
: "r" (&lock->slock), "I" (1 << TICKET_SHIFT)
: "cc");
} while (res);
@ -168,17 +168,20 @@ static inline void arch_write_lock(arch_rwlock_t *rw)
static inline int arch_write_trylock(arch_rwlock_t *rw)
{
unsigned long tmp;
unsigned long contended, res;
__asm__ __volatile__(
" ldrex %0, [%1]\n"
" teq %0, #0\n"
" strexeq %0, %2, [%1]"
: "=&r" (tmp)
: "r" (&rw->lock), "r" (0x80000000)
: "cc");
do {
__asm__ __volatile__(
" ldrex %0, [%2]\n"
" mov %1, #0\n"
" teq %0, #0\n"
" strexeq %1, %3, [%2]"
: "=&r" (contended), "=&r" (res)
: "r" (&rw->lock), "r" (0x80000000)
: "cc");
} while (res);
if (tmp == 0) {
if (!contended) {
smp_mb();
return 1;
} else {
@ -254,18 +257,26 @@ static inline void arch_read_unlock(arch_rwlock_t *rw)
static inline int arch_read_trylock(arch_rwlock_t *rw)
{
unsigned long tmp, tmp2 = 1;
unsigned long contended, res;
__asm__ __volatile__(
" ldrex %0, [%2]\n"
" adds %0, %0, #1\n"
" strexpl %1, %0, [%2]\n"
: "=&r" (tmp), "+r" (tmp2)
: "r" (&rw->lock)
: "cc");
do {
__asm__ __volatile__(
" ldrex %0, [%2]\n"
" mov %1, #0\n"
" adds %0, %0, #1\n"
" strexpl %1, %0, [%2]"
: "=&r" (contended), "=&r" (res)
: "r" (&rw->lock)
: "cc");
} while (res);
smp_mb();
return tmp2 == 0;
/* If the lock is negative, then it is already held for write. */
if (contended < 0x80000000) {
smp_mb();
return 1;
} else {
return 0;
}
}
/* read_can_lock - would read_trylock() succeed? */

View File

@ -43,6 +43,7 @@ struct mmu_gather {
struct mm_struct *mm;
unsigned int fullmm;
struct vm_area_struct *vma;
unsigned long start, end;
unsigned long range_start;
unsigned long range_end;
unsigned int nr;
@ -107,10 +108,12 @@ static inline void tlb_flush_mmu(struct mmu_gather *tlb)
}
static inline void
tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned int fullmm)
tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end)
{
tlb->mm = mm;
tlb->fullmm = fullmm;
tlb->fullmm = !(start | (end+1));
tlb->start = start;
tlb->end = end;
tlb->vma = NULL;
tlb->max = ARRAY_SIZE(tlb->local);
tlb->pages = tlb->local;

View File

@ -357,7 +357,8 @@ ENDPROC(__pabt_svc)
.endm
.macro kuser_cmpxchg_check
#if !defined(CONFIG_CPU_32v6K) && !defined(CONFIG_NEEDS_SYSCALL_FOR_CMPXCHG)
#if !defined(CONFIG_CPU_32v6K) && defined(CONFIG_KUSER_HELPERS) && \
!defined(CONFIG_NEEDS_SYSCALL_FOR_CMPXCHG)
#ifndef CONFIG_MMU
#warning "NPTL on non MMU needs fixing"
#else

View File

@ -84,17 +84,13 @@ int show_fiq_list(struct seq_file *p, int prec)
void set_fiq_handler(void *start, unsigned int length)
{
#if defined(CONFIG_CPU_USE_DOMAINS)
void *base = (void *)0xffff0000;
#else
void *base = vectors_page;
#endif
unsigned offset = FIQ_OFFSET;
memcpy(base + offset, start, length);
if (!cache_is_vipt_nonaliasing())
flush_icache_range(base + offset, offset + length);
flush_icache_range(0xffff0000 + offset, 0xffff0000 + offset + length);
if (!vectors_high())
flush_icache_range(offset, offset + length);
}
int claim_fiq(struct fiq_handler *f)

View File

@ -15,6 +15,7 @@
#include <asm/mmu_context.h>
#include <asm/cacheflush.h>
#include <asm/mach-types.h>
#include <asm/smp_plat.h>
#include <asm/system_misc.h>
extern const unsigned char relocate_new_kernel[];
@ -38,6 +39,14 @@ int machine_kexec_prepare(struct kimage *image)
__be32 header;
int i, err;
/*
* Validate that if the current HW supports SMP, then the SW supports
* and implements CPU hotplug for the current HW. If not, we won't be
* able to kexec reliably, so fail the prepare operation.
*/
if (num_possible_cpus() > 1 && !platform_can_cpu_hotplug())
return -EINVAL;
/*
* No segment at default ATAGs address. try to locate
* a dtb using magic.
@ -134,10 +143,13 @@ void machine_kexec(struct kimage *image)
unsigned long reboot_code_buffer_phys;
void *reboot_code_buffer;
if (num_online_cpus() > 1) {
pr_err("kexec: error: multiple CPUs still online\n");
return;
}
/*
* This can only happen if machine_shutdown() failed to disable some
* CPU, and that can only happen if the checks in
* machine_kexec_prepare() were not correct. If this fails, we can't
* reliably kexec anyway, so BUG_ON is appropriate.
*/
BUG_ON(num_online_cpus() > 1);
page_list = image->head & PAGE_MASK;

View File

@ -53,7 +53,12 @@ armpmu_map_cache_event(const unsigned (*cache_map)
static int
armpmu_map_hw_event(const unsigned (*event_map)[PERF_COUNT_HW_MAX], u64 config)
{
int mapping = (*event_map)[config];
int mapping;
if (config >= PERF_COUNT_HW_MAX)
return -EINVAL;
mapping = (*event_map)[config];
return mapping == HW_OP_UNSUPPORTED ? -ENOENT : mapping;
}
@ -253,6 +258,9 @@ validate_event(struct pmu_hw_events *hw_events,
struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
struct pmu *leader_pmu = event->group_leader->pmu;
if (is_software_event(event))
return 1;
if (event->pmu != leader_pmu || event->state < PERF_EVENT_STATE_OFF)
return 1;

View File

@ -462,7 +462,7 @@ int in_gate_area_no_mm(unsigned long addr)
{
return in_gate_area(NULL, addr);
}
#define is_gate_vma(vma) ((vma) = &gate_vma)
#define is_gate_vma(vma) ((vma) == &gate_vma)
#else
#define is_gate_vma(vma) 0
#endif

View File

@ -145,6 +145,16 @@ int boot_secondary(unsigned int cpu, struct task_struct *idle)
return -ENOSYS;
}
int platform_can_cpu_hotplug(void)
{
#ifdef CONFIG_HOTPLUG_CPU
if (smp_ops.cpu_kill)
return 1;
#endif
return 0;
}
#ifdef CONFIG_HOTPLUG_CPU
static void percpu_timer_stop(void);

View File

@ -121,8 +121,7 @@ config MSM_SMD
bool
config MSM_GPIOMUX
depends on !(ARCH_MSM8X60 || ARCH_MSM8960)
bool "MSM V1 TLMM GPIOMUX architecture"
bool
help
Support for MSM V1 TLMM GPIOMUX architecture.

View File

@ -1,33 +0,0 @@
/* Copyright (c) 2010, Code Aurora Forum. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA.
*/
#include <linux/kernel.h>
#include "gpiomux.h"
#include "proc_comm.h"
void __msm_gpiomux_write(unsigned gpio, gpiomux_config_t val)
{
unsigned tlmm_config = (val & ~GPIOMUX_CTL_MASK) |
((gpio & 0x3ff) << 4);
unsigned tlmm_disable = 0;
int rc;
rc = msm_proc_comm(PCOM_RPC_GPIO_TLMM_CONFIG_EX,
&tlmm_config, &tlmm_disable);
if (rc)
pr_err("%s: unexpected proc_comm failure %d: %08x %08x\n",
__func__, rc, tlmm_config, tlmm_disable);
}

View File

@ -73,16 +73,6 @@ extern struct msm_gpiomux_config msm_gpiomux_configs[GPIOMUX_NGPIOS];
int msm_gpiomux_write(unsigned gpio,
gpiomux_config_t active,
gpiomux_config_t suspended);
/* Architecture-internal function for use by the framework only.
* This function can assume the following:
* - the gpio value has passed a bounds-check
* - the gpiomux spinlock has been obtained
*
* This function is not for public consumption. External users
* should use msm_gpiomux_write.
*/
void __msm_gpiomux_write(unsigned gpio, gpiomux_config_t val);
#else
static inline int msm_gpiomux_write(unsigned gpio,
gpiomux_config_t active,

View File

@ -42,7 +42,7 @@
/* Using generic display panel */
static struct tfp410_platform_data omap4_dvi_panel = {
.i2c_bus_num = 3,
.i2c_bus_num = 2,
.power_down_gpio = PANDA_DVI_TFP410_POWER_DOWN_GPIO,
};

View File

@ -129,6 +129,7 @@ static int omap_device_build_from_dt(struct platform_device *pdev)
struct device_node *node = pdev->dev.of_node;
const char *oh_name;
int oh_cnt, i, ret = 0;
bool device_active = false;
oh_cnt = of_property_count_strings(node, "ti,hwmods");
if (oh_cnt <= 0) {
@ -152,6 +153,8 @@ static int omap_device_build_from_dt(struct platform_device *pdev)
goto odbfd_exit1;
}
hwmods[i] = oh;
if (oh->flags & HWMOD_INIT_NO_IDLE)
device_active = true;
}
od = omap_device_alloc(pdev, hwmods, oh_cnt);
@ -172,6 +175,11 @@ static int omap_device_build_from_dt(struct platform_device *pdev)
pdev->dev.pm_domain = &omap_device_pm_domain;
if (device_active) {
omap_device_enable(pdev);
pm_runtime_set_active(&pdev->dev);
}
odbfd_exit1:
kfree(hwmods);
odbfd_exit:
@ -842,6 +850,7 @@ static int __init omap_device_late_idle(struct device *dev, void *data)
{
struct platform_device *pdev = to_platform_device(dev);
struct omap_device *od = to_omap_device(pdev);
int i;
if (!od)
return 0;
@ -850,6 +859,15 @@ static int __init omap_device_late_idle(struct device *dev, void *data)
* If omap_device state is enabled, but has no driver bound,
* idle it.
*/
/*
* Some devices (like memory controllers) are always kept
* enabled, and should not be idled even with no drivers.
*/
for (i = 0; i < od->hwmods_cnt; i++)
if (od->hwmods[i]->flags & HWMOD_INIT_NO_IDLE)
return 0;
if (od->_driver_status != BUS_NOTIFY_BOUND_DRIVER) {
if (od->_state == OMAP_DEVICE_STATE_ENABLED) {
dev_warn(dev, "%s: enabled but no driver. Idling\n",

View File

@ -2386,7 +2386,7 @@ static void __init _init_mpu_rt_base(struct omap_hwmod *oh, void *data)
np = of_dev_hwmod_lookup(of_find_node_by_name(NULL, "ocp"), oh);
if (np)
va_start = of_iomap(np, 0);
va_start = of_iomap(np, oh->mpu_rt_idx);
} else {
va_start = ioremap(mem->pa_start, mem->pa_end - mem->pa_start);
}

View File

@ -95,6 +95,54 @@ extern struct omap_hwmod_sysc_fields omap_hwmod_sysc_type3;
#define MODULEMODE_HWCTRL 1
#define MODULEMODE_SWCTRL 2
#define DEBUG_OMAP2UART1_FLAGS 0
#define DEBUG_OMAP2UART2_FLAGS 0
#define DEBUG_OMAP2UART3_FLAGS 0
#define DEBUG_OMAP3UART3_FLAGS 0
#define DEBUG_OMAP3UART4_FLAGS 0
#define DEBUG_OMAP4UART3_FLAGS 0
#define DEBUG_OMAP4UART4_FLAGS 0
#define DEBUG_TI81XXUART1_FLAGS 0
#define DEBUG_TI81XXUART2_FLAGS 0
#define DEBUG_TI81XXUART3_FLAGS 0
#define DEBUG_AM33XXUART1_FLAGS 0
#define DEBUG_OMAPUART_FLAGS (HWMOD_INIT_NO_IDLE | HWMOD_INIT_NO_RESET)
#if defined(CONFIG_DEBUG_OMAP2UART1)
#undef DEBUG_OMAP2UART1_FLAGS
#define DEBUG_OMAP2UART1_FLAGS DEBUG_OMAPUART_FLAGS
#elif defined(CONFIG_DEBUG_OMAP2UART2)
#undef DEBUG_OMAP2UART2_FLAGS
#define DEBUG_OMAP2UART2_FLAGS DEBUG_OMAPUART_FLAGS
#elif defined(CONFIG_DEBUG_OMAP2UART3)
#undef DEBUG_OMAP2UART3_FLAGS
#define DEBUG_OMAP2UART3_FLAGS DEBUG_OMAPUART_FLAGS
#elif defined(CONFIG_DEBUG_OMAP3UART3)
#undef DEBUG_OMAP3UART3_FLAGS
#define DEBUG_OMAP3UART3_FLAGS DEBUG_OMAPUART_FLAGS
#elif defined(CONFIG_DEBUG_OMAP3UART4)
#undef DEBUG_OMAP3UART4_FLAGS
#define DEBUG_OMAP3UART4_FLAGS DEBUG_OMAPUART_FLAGS
#elif defined(CONFIG_DEBUG_OMAP4UART3)
#undef DEBUG_OMAP4UART3_FLAGS
#define DEBUG_OMAP4UART3_FLAGS DEBUG_OMAPUART_FLAGS
#elif defined(CONFIG_DEBUG_OMAP4UART4)
#undef DEBUG_OMAP4UART4_FLAGS
#define DEBUG_OMAP4UART4_FLAGS DEBUG_OMAPUART_FLAGS
#elif defined(CONFIG_DEBUG_TI81XXUART1)
#undef DEBUG_TI81XXUART1_FLAGS
#define DEBUG_TI81XXUART1_FLAGS DEBUG_OMAPUART_FLAGS
#elif defined(CONFIG_DEBUG_TI81XXUART2)
#undef DEBUG_TI81XXUART2_FLAGS
#define DEBUG_TI81XXUART2_FLAGS DEBUG_OMAPUART_FLAGS
#elif defined(CONFIG_DEBUG_TI81XXUART3)
#undef DEBUG_TI81XXUART3_FLAGS
#define DEBUG_TI81XXUART3_FLAGS DEBUG_OMAPUART_FLAGS
#elif defined(CONFIG_DEBUG_AM33XXUART1)
#undef DEBUG_AM33XXUART1_FLAGS
#define DEBUG_AM33XXUART1_FLAGS DEBUG_OMAPUART_FLAGS
#endif
/**
* struct omap_hwmod_mux_info - hwmod specific mux configuration
@ -568,6 +616,7 @@ struct omap_hwmod_link {
* @voltdm: pointer to voltage domain (filled in at runtime)
* @dev_attr: arbitrary device attributes that can be passed to the driver
* @_sysc_cache: internal-use hwmod flags
* @mpu_rt_idx: index of device address space for register target (for DT boot)
* @_mpu_rt_va: cached register target start address (internal use)
* @_mpu_port: cached MPU register target slave (internal use)
* @opt_clks_cnt: number of @opt_clks
@ -617,6 +666,7 @@ struct omap_hwmod {
struct list_head node;
struct omap_hwmod_ocp_if *_mpu_port;
u16 flags;
u8 mpu_rt_idx;
u8 response_lat;
u8 rst_lines_cnt;
u8 opt_clks_cnt;

View File

@ -512,7 +512,7 @@ struct omap_hwmod omap2xxx_uart1_hwmod = {
.mpu_irqs = omap2_uart1_mpu_irqs,
.sdma_reqs = omap2_uart1_sdma_reqs,
.main_clk = "uart1_fck",
.flags = HWMOD_SWSUP_SIDLE_ACT,
.flags = DEBUG_OMAP2UART1_FLAGS | HWMOD_SWSUP_SIDLE_ACT,
.prcm = {
.omap2 = {
.module_offs = CORE_MOD,
@ -532,7 +532,7 @@ struct omap_hwmod omap2xxx_uart2_hwmod = {
.mpu_irqs = omap2_uart2_mpu_irqs,
.sdma_reqs = omap2_uart2_sdma_reqs,
.main_clk = "uart2_fck",
.flags = HWMOD_SWSUP_SIDLE_ACT,
.flags = DEBUG_OMAP2UART2_FLAGS | HWMOD_SWSUP_SIDLE_ACT,
.prcm = {
.omap2 = {
.module_offs = CORE_MOD,
@ -552,7 +552,7 @@ struct omap_hwmod omap2xxx_uart3_hwmod = {
.mpu_irqs = omap2_uart3_mpu_irqs,
.sdma_reqs = omap2_uart3_sdma_reqs,
.main_clk = "uart3_fck",
.flags = HWMOD_SWSUP_SIDLE_ACT,
.flags = DEBUG_OMAP2UART3_FLAGS | HWMOD_SWSUP_SIDLE_ACT,
.prcm = {
.omap2 = {
.module_offs = CORE_MOD,

View File

@ -562,6 +562,7 @@ static struct omap_hwmod am33xx_cpgmac0_hwmod = {
.clkdm_name = "cpsw_125mhz_clkdm",
.flags = (HWMOD_SWSUP_SIDLE | HWMOD_SWSUP_MSTANDBY),
.main_clk = "cpsw_125mhz_gclk",
.mpu_rt_idx = 1,
.prcm = {
.omap4 = {
.clkctrl_offs = AM33XX_CM_PER_CPGMAC0_CLKCTRL_OFFSET,
@ -1512,7 +1513,7 @@ static struct omap_hwmod am33xx_uart1_hwmod = {
.name = "uart1",
.class = &uart_class,
.clkdm_name = "l4_wkup_clkdm",
.flags = HWMOD_SWSUP_SIDLE_ACT,
.flags = DEBUG_AM33XXUART1_FLAGS | HWMOD_SWSUP_SIDLE_ACT,
.main_clk = "dpll_per_m2_div4_wkupdm_ck",
.prcm = {
.omap4 = {

View File

@ -490,7 +490,7 @@ static struct omap_hwmod omap3xxx_uart1_hwmod = {
.mpu_irqs = omap2_uart1_mpu_irqs,
.sdma_reqs = omap2_uart1_sdma_reqs,
.main_clk = "uart1_fck",
.flags = HWMOD_SWSUP_SIDLE_ACT,
.flags = DEBUG_TI81XXUART1_FLAGS | HWMOD_SWSUP_SIDLE_ACT,
.prcm = {
.omap2 = {
.module_offs = CORE_MOD,
@ -509,7 +509,7 @@ static struct omap_hwmod omap3xxx_uart2_hwmod = {
.mpu_irqs = omap2_uart2_mpu_irqs,
.sdma_reqs = omap2_uart2_sdma_reqs,
.main_clk = "uart2_fck",
.flags = HWMOD_SWSUP_SIDLE_ACT,
.flags = DEBUG_TI81XXUART2_FLAGS | HWMOD_SWSUP_SIDLE_ACT,
.prcm = {
.omap2 = {
.module_offs = CORE_MOD,
@ -528,7 +528,8 @@ static struct omap_hwmod omap3xxx_uart3_hwmod = {
.mpu_irqs = omap2_uart3_mpu_irqs,
.sdma_reqs = omap2_uart3_sdma_reqs,
.main_clk = "uart3_fck",
.flags = HWMOD_SWSUP_SIDLE_ACT,
.flags = DEBUG_OMAP3UART3_FLAGS | DEBUG_TI81XXUART3_FLAGS |
HWMOD_SWSUP_SIDLE_ACT,
.prcm = {
.omap2 = {
.module_offs = OMAP3430_PER_MOD,
@ -558,7 +559,7 @@ static struct omap_hwmod omap36xx_uart4_hwmod = {
.mpu_irqs = uart4_mpu_irqs,
.sdma_reqs = uart4_sdma_reqs,
.main_clk = "uart4_fck",
.flags = HWMOD_SWSUP_SIDLE_ACT,
.flags = DEBUG_OMAP3UART4_FLAGS | HWMOD_SWSUP_SIDLE_ACT,
.prcm = {
.omap2 = {
.module_offs = OMAP3430_PER_MOD,

View File

@ -2858,8 +2858,7 @@ static struct omap_hwmod omap44xx_uart3_hwmod = {
.name = "uart3",
.class = &omap44xx_uart_hwmod_class,
.clkdm_name = "l4_per_clkdm",
.flags = HWMOD_INIT_NO_IDLE | HWMOD_INIT_NO_RESET |
HWMOD_SWSUP_SIDLE_ACT,
.flags = DEBUG_OMAP4UART3_FLAGS | HWMOD_SWSUP_SIDLE_ACT,
.main_clk = "func_48m_fclk",
.prcm = {
.omap4 = {
@ -2875,7 +2874,7 @@ static struct omap_hwmod omap44xx_uart4_hwmod = {
.name = "uart4",
.class = &omap44xx_uart_hwmod_class,
.clkdm_name = "l4_per_clkdm",
.flags = HWMOD_SWSUP_SIDLE_ACT,
.flags = DEBUG_OMAP4UART4_FLAGS | HWMOD_SWSUP_SIDLE_ACT,
.main_clk = "func_48m_fclk",
.prcm = {
.omap4 = {

View File

@ -1375,7 +1375,7 @@ static struct omap_hwmod omap54xx_uart3_hwmod = {
.name = "uart3",
.class = &omap54xx_uart_hwmod_class,
.clkdm_name = "l4per_clkdm",
.flags = HWMOD_INIT_NO_IDLE | HWMOD_INIT_NO_RESET,
.flags = DEBUG_OMAP4UART3_FLAGS,
.main_clk = "func_48m_fclk",
.prcm = {
.omap4 = {
@ -1391,6 +1391,7 @@ static struct omap_hwmod omap54xx_uart4_hwmod = {
.name = "uart4",
.class = &omap54xx_uart_hwmod_class,
.clkdm_name = "l4per_clkdm",
.flags = DEBUG_OMAP4UART4_FLAGS,
.main_clk = "func_48m_fclk",
.prcm = {
.omap4 = {

View File

@ -208,17 +208,6 @@ static int __init omap_serial_early_init(void)
pr_info("%s used as console in debug mode: uart%d clocks will not be gated",
uart_name, uart->num);
}
/*
* omap-uart can be used for earlyprintk logs
* So if omap-uart is used as console then prevent
* uart reset and idle to get logs from omap-uart
* until uart console driver is available to take
* care for console messages.
* Idling or resetting omap-uart while printing logs
* early boot logs can stall the boot-up.
*/
oh->flags |= HWMOD_INIT_NO_IDLE | HWMOD_INIT_NO_RESET;
}
} while (1);

View File

@ -1162,9 +1162,6 @@ static void __init eva_init(void)
gpio_request_one(61, GPIOF_OUT_INIT_HIGH, NULL); /* LCDDON */
gpio_request_one(202, GPIOF_OUT_INIT_LOW, NULL); /* LCD0_LED_CONT */
/* Touchscreen */
gpio_request_one(166, GPIOF_OUT_INIT_HIGH, NULL); /* TP_RST_B */
/* GETHER */
gpio_request_one(18, GPIOF_OUT_INIT_HIGH, NULL); /* PHY_RST */

View File

@ -167,7 +167,13 @@ static const struct pinctrl_map bockw_pinctrl_map[] = {
"usb1", "usb1"),
/* SDHI0 */
PIN_MAP_MUX_GROUP_DEFAULT("sh_mobile_sdhi.0", "pfc-r8a7778",
"sdhi0", "sdhi0"),
"sdhi0_data4", "sdhi0"),
PIN_MAP_MUX_GROUP_DEFAULT("sh_mobile_sdhi.0", "pfc-r8a7778",
"sdhi0_ctrl", "sdhi0"),
PIN_MAP_MUX_GROUP_DEFAULT("sh_mobile_sdhi.0", "pfc-r8a7778",
"sdhi0_cd", "sdhi0"),
PIN_MAP_MUX_GROUP_DEFAULT("sh_mobile_sdhi.0", "pfc-r8a7778",
"sdhi0_wp", "sdhi0"),
};
#define FPGA 0x18200000

View File

@ -59,7 +59,7 @@ static __initdata struct gpio_led_platform_data lager_leds_pdata = {
#define GPIO_KEY(c, g, d, ...) \
{ .code = c, .gpio = g, .desc = d, .active_low = 1 }
static __initdata struct gpio_keys_button gpio_buttons[] = {
static struct gpio_keys_button gpio_buttons[] = {
GPIO_KEY(KEY_4, RCAR_GP_PIN(1, 28), "SW2-pin4"),
GPIO_KEY(KEY_3, RCAR_GP_PIN(1, 26), "SW2-pin3"),
GPIO_KEY(KEY_2, RCAR_GP_PIN(1, 24), "SW2-pin2"),

View File

@ -16,8 +16,6 @@
#include <linux/linkage.h>
#include <linux/init.h>
__INIT
/*
* ST specific entry point for secondary CPUs. This provides
* a "holding pen" into which all secondary cores are held until we're

View File

@ -35,6 +35,7 @@ struct mmu_gather {
struct mm_struct *mm;
unsigned int fullmm;
struct vm_area_struct *vma;
unsigned long start, end;
unsigned long range_start;
unsigned long range_end;
unsigned int nr;
@ -97,10 +98,12 @@ static inline void tlb_flush_mmu(struct mmu_gather *tlb)
}
static inline void
tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned int fullmm)
tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end)
{
tlb->mm = mm;
tlb->fullmm = fullmm;
tlb->fullmm = !(start | (end+1));
tlb->start = start;
tlb->end = end;
tlb->vma = NULL;
tlb->max = ARRAY_SIZE(tlb->local);
tlb->pages = tlb->local;

View File

@ -150,7 +150,6 @@ static struct ac97c_platform_data __initdata ac97c0_data = {
static struct platform_device rmt_ts_device = {
.name = "ucb1400_ts",
.id = -1,
}
};
#endif

View File

@ -158,6 +158,7 @@ source "kernel/Kconfig.hz"
endmenu
source "init/Kconfig"
source "kernel/Kconfig.freezer"
source "drivers/Kconfig"
source "fs/Kconfig"

View File

@ -22,7 +22,7 @@
* unmapping a portion of the virtual address space, these hooks are called according to
* the following template:
*
* tlb <- tlb_gather_mmu(mm, full_mm_flush); // start unmap for address space MM
* tlb <- tlb_gather_mmu(mm, start, end); // start unmap for address space MM
* {
* for each vma that needs a shootdown do {
* tlb_start_vma(tlb, vma);
@ -58,6 +58,7 @@ struct mmu_gather {
unsigned int max;
unsigned char fullmm; /* non-zero means full mm flush */
unsigned char need_flush; /* really unmapped some PTEs? */
unsigned long start, end;
unsigned long start_addr;
unsigned long end_addr;
struct page **pages;
@ -155,13 +156,15 @@ static inline void __tlb_alloc_page(struct mmu_gather *tlb)
static inline void
tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned int full_mm_flush)
tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end)
{
tlb->mm = mm;
tlb->max = ARRAY_SIZE(tlb->local);
tlb->pages = tlb->local;
tlb->nr = 0;
tlb->fullmm = full_mm_flush;
tlb->fullmm = !(start | (end+1));
tlb->start = start;
tlb->end = end;
tlb->start_addr = ~0UL;
}

View File

@ -18,9 +18,11 @@
#include <asm/machdep.h>
#include <asm/natfeat.h>
extern long nf_get_id2(const char *feature_name);
asm("\n"
" .global nf_get_id,nf_call\n"
"nf_get_id:\n"
" .global nf_get_id2,nf_call\n"
"nf_get_id2:\n"
" .short 0x7300\n"
" rts\n"
"nf_call:\n"
@ -29,12 +31,25 @@ asm("\n"
"1: moveq.l #0,%d0\n"
" rts\n"
" .section __ex_table,\"a\"\n"
" .long nf_get_id,1b\n"
" .long nf_get_id2,1b\n"
" .long nf_call,1b\n"
" .previous");
EXPORT_SYMBOL_GPL(nf_get_id);
EXPORT_SYMBOL_GPL(nf_call);
long nf_get_id(const char *feature_name)
{
/* feature_name may be in vmalloc()ed memory, so make a copy */
char name_copy[32];
size_t n;
n = strlcpy(name_copy, feature_name, sizeof(name_copy));
if (n >= sizeof(name_copy))
return 0;
return nf_get_id2(name_copy);
}
EXPORT_SYMBOL_GPL(nf_get_id);
void nfprint(const char *fmt, ...)
{
static char buf[256];

View File

@ -15,16 +15,17 @@
unsigned long long n64; \
} __n; \
unsigned long __rem, __upper; \
unsigned long __base = (base); \
\
__n.n64 = (n); \
if ((__upper = __n.n32[0])) { \
asm ("divul.l %2,%1:%0" \
: "=d" (__n.n32[0]), "=d" (__upper) \
: "d" (base), "0" (__n.n32[0])); \
: "=d" (__n.n32[0]), "=d" (__upper) \
: "d" (__base), "0" (__n.n32[0])); \
} \
asm ("divu.l %2,%1:%0" \
: "=d" (__n.n32[1]), "=d" (__rem) \
: "d" (base), "1" (__upper), "0" (__n.n32[1])); \
: "=d" (__n.n32[1]), "=d" (__rem) \
: "d" (__base), "1" (__upper), "0" (__n.n32[1])); \
(n) = __n.n64; \
__rem; \
})

View File

@ -28,7 +28,7 @@ config MICROBLAZE
select GENERIC_CLOCKEVENTS
select GENERIC_IDLE_POLL_SETUP
select MODULES_USE_ELF_RELA
select CLONE_BACKWARDS
select CLONE_BACKWARDS3
config SWAP
def_bool n

View File

@ -17,6 +17,8 @@
#define current_cpu_type() current_cpu_data.cputype
#endif
#define boot_cpu_type() cpu_data[0].cputype
/*
* SMP assumption: Options of CPU 0 are a superset of all processors.
* This is true for all known MIPS systems.

View File

@ -66,6 +66,8 @@ static void __init bmips_smp_setup(void)
int i, cpu = 1, boot_cpu = 0;
#if defined(CONFIG_CPU_BMIPS4350) || defined(CONFIG_CPU_BMIPS4380)
int cpu_hw_intr;
/* arbitration priority */
clear_c0_brcm_cmt_ctrl(0x30);
@ -80,8 +82,12 @@ static void __init bmips_smp_setup(void)
* MIPS interrupt 2 (HW INT 0) is the CPU0 L1 controller output
* MIPS interrupt 3 (HW INT 1) is the CPU1 L1 controller output
*/
change_c0_brcm_cmt_intr(0xf8018000,
(0x02 << 27) | (0x03 << 15));
if (boot_cpu == 0)
cpu_hw_intr = 0x02;
else
cpu_hw_intr = 0x1d;
change_c0_brcm_cmt_intr(0xf8018000, (cpu_hw_intr << 27) | (0x03 << 15));
/* single core, 2 threads (2 pipelines) */
max_cpus = 2;

View File

@ -166,7 +166,7 @@ static void mipsxx_reg_setup(struct op_counter_config *ctr)
reg.control[i] |= M_PERFCTL_USER;
if (ctr[i].exl)
reg.control[i] |= M_PERFCTL_EXL;
if (current_cpu_type() == CPU_XLR)
if (boot_cpu_type() == CPU_XLR)
reg.control[i] |= M_PERFCTL_COUNT_ALL_THREADS;
reg.counter[i] = 0x80000000 - ctr[i].count;
}

View File

@ -206,11 +206,13 @@ static struct resource pnx833x_ethernet_resources[] = {
.end = PNX8335_IP3902_PORTS_END,
.flags = IORESOURCE_MEM,
},
#ifdef CONFIG_SOC_PNX8335
[1] = {
.start = PNX8335_PIC_ETHERNET_INT,
.end = PNX8335_PIC_ETHERNET_INT,
.flags = IORESOURCE_IRQ,
},
#endif
};
static struct platform_device pnx833x_ethernet_device = {

View File

@ -55,6 +55,7 @@ config GENERIC_CSUM
source "init/Kconfig"
source "kernel/Kconfig.freezer"
menu "Processor type and features"

View File

@ -566,7 +566,7 @@ config SCHED_SMT
config PPC_DENORMALISATION
bool "PowerPC denormalisation exception handling"
depends on PPC_BOOK3S_64
default "n"
default "y" if PPC_POWERNV
---help---
Add support for handling denormalisation of single precision
values. Useful for bare metal only. If unsure say Y here.

View File

@ -247,6 +247,10 @@ struct thread_struct {
unsigned long tm_orig_msr; /* Thread's MSR on ctx switch */
struct pt_regs ckpt_regs; /* Checkpointed registers */
unsigned long tm_tar;
unsigned long tm_ppr;
unsigned long tm_dscr;
/*
* Transactional FP and VSX 0-31 register set.
* NOTE: the sense of these is the opposite of the integer ckpt_regs!

View File

@ -254,19 +254,28 @@
#define SPRN_HRMOR 0x139 /* Real mode offset register */
#define SPRN_HSRR0 0x13A /* Hypervisor Save/Restore 0 */
#define SPRN_HSRR1 0x13B /* Hypervisor Save/Restore 1 */
/* HFSCR and FSCR bit numbers are the same */
#define FSCR_TAR_LG 8 /* Enable Target Address Register */
#define FSCR_EBB_LG 7 /* Enable Event Based Branching */
#define FSCR_TM_LG 5 /* Enable Transactional Memory */
#define FSCR_PM_LG 4 /* Enable prob/priv access to PMU SPRs */
#define FSCR_BHRB_LG 3 /* Enable Branch History Rolling Buffer*/
#define FSCR_DSCR_LG 2 /* Enable Data Stream Control Register */
#define FSCR_VECVSX_LG 1 /* Enable VMX/VSX */
#define FSCR_FP_LG 0 /* Enable Floating Point */
#define SPRN_FSCR 0x099 /* Facility Status & Control Register */
#define FSCR_TAR (1 << (63-55)) /* Enable Target Address Register */
#define FSCR_EBB (1 << (63-56)) /* Enable Event Based Branching */
#define FSCR_DSCR (1 << (63-61)) /* Enable Data Stream Control Register */
#define FSCR_TAR __MASK(FSCR_TAR_LG)
#define FSCR_EBB __MASK(FSCR_EBB_LG)
#define FSCR_DSCR __MASK(FSCR_DSCR_LG)
#define SPRN_HFSCR 0xbe /* HV=1 Facility Status & Control Register */
#define HFSCR_TAR (1 << (63-55)) /* Enable Target Address Register */
#define HFSCR_EBB (1 << (63-56)) /* Enable Event Based Branching */
#define HFSCR_TM (1 << (63-58)) /* Enable Transactional Memory */
#define HFSCR_PM (1 << (63-60)) /* Enable prob/priv access to PMU SPRs */
#define HFSCR_BHRB (1 << (63-59)) /* Enable Branch History Rolling Buffer*/
#define HFSCR_DSCR (1 << (63-61)) /* Enable Data Stream Control Register */
#define HFSCR_VECVSX (1 << (63-62)) /* Enable VMX/VSX */
#define HFSCR_FP (1 << (63-63)) /* Enable Floating Point */
#define HFSCR_TAR __MASK(FSCR_TAR_LG)
#define HFSCR_EBB __MASK(FSCR_EBB_LG)
#define HFSCR_TM __MASK(FSCR_TM_LG)
#define HFSCR_PM __MASK(FSCR_PM_LG)
#define HFSCR_BHRB __MASK(FSCR_BHRB_LG)
#define HFSCR_DSCR __MASK(FSCR_DSCR_LG)
#define HFSCR_VECVSX __MASK(FSCR_VECVSX_LG)
#define HFSCR_FP __MASK(FSCR_FP_LG)
#define SPRN_TAR 0x32f /* Target Address Register */
#define SPRN_LPCR 0x13E /* LPAR Control Register */
#define LPCR_VPM0 (1ul << (63-0))

View File

@ -15,6 +15,15 @@ extern struct task_struct *__switch_to(struct task_struct *,
struct thread_struct;
extern struct task_struct *_switch(struct thread_struct *prev,
struct thread_struct *next);
#ifdef CONFIG_PPC_BOOK3S_64
static inline void save_tar(struct thread_struct *prev)
{
if (cpu_has_feature(CPU_FTR_ARCH_207S))
prev->tar = mfspr(SPRN_TAR);
}
#else
static inline void save_tar(struct thread_struct *prev) {}
#endif
extern void giveup_fpu(struct task_struct *);
extern void load_up_fpu(void);

View File

@ -138,6 +138,9 @@ int main(void)
DEFINE(THREAD_TM_TFHAR, offsetof(struct thread_struct, tm_tfhar));
DEFINE(THREAD_TM_TEXASR, offsetof(struct thread_struct, tm_texasr));
DEFINE(THREAD_TM_TFIAR, offsetof(struct thread_struct, tm_tfiar));
DEFINE(THREAD_TM_TAR, offsetof(struct thread_struct, tm_tar));
DEFINE(THREAD_TM_PPR, offsetof(struct thread_struct, tm_ppr));
DEFINE(THREAD_TM_DSCR, offsetof(struct thread_struct, tm_dscr));
DEFINE(PT_CKPT_REGS, offsetof(struct thread_struct, ckpt_regs));
DEFINE(THREAD_TRANSACT_VR0, offsetof(struct thread_struct,
transact_vr[0]));

View File

@ -1061,7 +1061,7 @@ static const struct file_operations proc_eeh_operations = {
static int __init eeh_init_proc(void)
{
if (machine_is(pseries))
if (machine_is(pseries) || machine_is(powernv))
proc_create("powerpc/eeh", 0, NULL, &proc_eeh_operations);
return 0;
}

View File

@ -449,15 +449,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_DSCR)
#ifdef CONFIG_PPC_BOOK3S_64
BEGIN_FTR_SECTION
/*
* Back up the TAR across context switches. Note that the TAR is not
* available for use in the kernel. (To provide this, the TAR should
* be backed up/restored on exception entry/exit instead, and be in
* pt_regs. FIXME, this should be in pt_regs anyway (for debug).)
*/
mfspr r0,SPRN_TAR
std r0,THREAD_TAR(r3)
/* Event based branch registers */
mfspr r0, SPRN_BESCR
std r0, THREAD_BESCR(r3)
@ -584,9 +575,34 @@ BEGIN_FTR_SECTION
ld r7,DSCR_DEFAULT@toc(2)
ld r0,THREAD_DSCR(r4)
cmpwi r6,0
li r8, FSCR_DSCR
bne 1f
ld r0,0(r7)
1: cmpd r0,r25
b 3f
1:
BEGIN_FTR_SECTION_NESTED(70)
mfspr r6, SPRN_FSCR
or r6, r6, r8
mtspr SPRN_FSCR, r6
BEGIN_FTR_SECTION_NESTED(69)
mfspr r6, SPRN_HFSCR
or r6, r6, r8
mtspr SPRN_HFSCR, r6
END_FTR_SECTION_NESTED(CPU_FTR_HVMODE, CPU_FTR_HVMODE, 69)
b 4f
END_FTR_SECTION_NESTED(CPU_FTR_ARCH_207S, CPU_FTR_ARCH_207S, 70)
3:
BEGIN_FTR_SECTION_NESTED(70)
mfspr r6, SPRN_FSCR
andc r6, r6, r8
mtspr SPRN_FSCR, r6
BEGIN_FTR_SECTION_NESTED(69)
mfspr r6, SPRN_HFSCR
andc r6, r6, r8
mtspr SPRN_HFSCR, r6
END_FTR_SECTION_NESTED(CPU_FTR_HVMODE, CPU_FTR_HVMODE, 69)
END_FTR_SECTION_NESTED(CPU_FTR_ARCH_207S, CPU_FTR_ARCH_207S, 70)
4: cmpd r0,r25
beq 2f
mtspr SPRN_DSCR,r0
2:

View File

@ -848,7 +848,7 @@ hv_facility_unavailable_relon_trampoline:
. = 0x4f80
SET_SCRATCH0(r13)
EXCEPTION_PROLOG_0(PACA_EXGEN)
b facility_unavailable_relon_hv
b hv_facility_unavailable_relon_hv
STD_RELON_EXCEPTION_PSERIES(0x5300, 0x1300, instruction_breakpoint)
#ifdef CONFIG_PPC_DENORMALISATION
@ -1175,6 +1175,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_VSX)
b .ret_from_except
STD_EXCEPTION_COMMON(0xf60, facility_unavailable, .facility_unavailable_exception)
STD_EXCEPTION_COMMON(0xf80, hv_facility_unavailable, .facility_unavailable_exception)
.align 7
.globl __end_handlers
@ -1188,7 +1189,7 @@ __end_handlers:
STD_RELON_EXCEPTION_PSERIES_OOL(0xf20, altivec_unavailable)
STD_RELON_EXCEPTION_PSERIES_OOL(0xf40, vsx_unavailable)
STD_RELON_EXCEPTION_PSERIES_OOL(0xf60, facility_unavailable)
STD_RELON_EXCEPTION_HV_OOL(0xf80, facility_unavailable)
STD_RELON_EXCEPTION_HV_OOL(0xf80, hv_facility_unavailable)
#if defined(CONFIG_PPC_PSERIES) || defined(CONFIG_PPC_POWERNV)
/*

View File

@ -600,6 +600,16 @@ struct task_struct *__switch_to(struct task_struct *prev,
struct ppc64_tlb_batch *batch;
#endif
/* Back up the TAR across context switches.
* Note that the TAR is not available for use in the kernel. (To
* provide this, the TAR should be backed up/restored on exception
* entry/exit instead, and be in pt_regs. FIXME, this should be in
* pt_regs anyway (for debug).)
* Save the TAR here before we do treclaim/trecheckpoint as these
* will change the TAR.
*/
save_tar(&prev->thread);
__switch_to_tm(prev);
#ifdef CONFIG_SMP

View File

@ -233,6 +233,16 @@ dont_backup_fp:
std r5, _CCR(r7)
std r6, _XER(r7)
/* ******************** TAR, PPR, DSCR ********** */
mfspr r3, SPRN_TAR
mfspr r4, SPRN_PPR
mfspr r5, SPRN_DSCR
std r3, THREAD_TM_TAR(r12)
std r4, THREAD_TM_PPR(r12)
std r5, THREAD_TM_DSCR(r12)
/* MSR and flags: We don't change CRs, and we don't need to alter
* MSR.
*/
@ -347,6 +357,16 @@ dont_restore_fp:
mtmsr r6 /* FP/Vec off again! */
restore_gprs:
/* ******************** TAR, PPR, DSCR ********** */
ld r4, THREAD_TM_TAR(r3)
ld r5, THREAD_TM_PPR(r3)
ld r6, THREAD_TM_DSCR(r3)
mtspr SPRN_TAR, r4
mtspr SPRN_PPR, r5
mtspr SPRN_DSCR, r6
/* ******************** CR,LR,CCR,MSR ********** */
ld r3, _CTR(r7)
ld r4, _LINK(r7)

View File

@ -44,9 +44,7 @@
#include <asm/machdep.h>
#include <asm/rtas.h>
#include <asm/pmc.h>
#ifdef CONFIG_PPC32
#include <asm/reg.h>
#endif
#ifdef CONFIG_PMAC_BACKLIGHT
#include <asm/backlight.h>
#endif
@ -1296,43 +1294,54 @@ void vsx_unavailable_exception(struct pt_regs *regs)
die("Unrecoverable VSX Unavailable Exception", regs, SIGABRT);
}
#ifdef CONFIG_PPC64
void facility_unavailable_exception(struct pt_regs *regs)
{
static char *facility_strings[] = {
"FPU",
"VMX/VSX",
"DSCR",
"PMU SPRs",
"BHRB",
"TM",
"AT",
"EBB",
"TAR",
[FSCR_FP_LG] = "FPU",
[FSCR_VECVSX_LG] = "VMX/VSX",
[FSCR_DSCR_LG] = "DSCR",
[FSCR_PM_LG] = "PMU SPRs",
[FSCR_BHRB_LG] = "BHRB",
[FSCR_TM_LG] = "TM",
[FSCR_EBB_LG] = "EBB",
[FSCR_TAR_LG] = "TAR",
};
char *facility, *prefix;
char *facility = "unknown";
u64 value;
u8 status;
bool hv;
if (regs->trap == 0xf60) {
value = mfspr(SPRN_FSCR);
prefix = "";
} else {
hv = (regs->trap == 0xf80);
if (hv)
value = mfspr(SPRN_HFSCR);
prefix = "Hypervisor ";
else
value = mfspr(SPRN_FSCR);
status = value >> 56;
if (status == FSCR_DSCR_LG) {
/* User is acessing the DSCR. Set the inherit bit and allow
* the user to set it directly in future by setting via the
* H/FSCR DSCR bit.
*/
current->thread.dscr_inherit = 1;
if (hv)
mtspr(SPRN_HFSCR, value | HFSCR_DSCR);
else
mtspr(SPRN_FSCR, value | FSCR_DSCR);
return;
}
value = value >> 56;
if ((status < ARRAY_SIZE(facility_strings)) &&
facility_strings[status])
facility = facility_strings[status];
/* We restore the interrupt state now */
if (!arch_irq_disabled_regs(regs))
local_irq_enable();
if (value < ARRAY_SIZE(facility_strings))
facility = facility_strings[value];
else
facility = "unknown";
pr_err("%sFacility '%s' unavailable, exception at 0x%lx, MSR=%lx\n",
prefix, facility, regs->nip, regs->msr);
hv ? "Hypervisor " : "", facility, regs->nip, regs->msr);
if (user_mode(regs)) {
_exception(SIGILL, regs, ILL_ILLOPC, regs->nip);
@ -1341,6 +1350,7 @@ void facility_unavailable_exception(struct pt_regs *regs)
die("Unexpected facility unavailable exception", regs, SIGABRT);
}
#endif
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM

View File

@ -1809,7 +1809,7 @@ static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu)
rma_size <<= PAGE_SHIFT;
rmls = lpcr_rmls(rma_size);
err = -EINVAL;
if (rmls < 0) {
if ((long)rmls < 0) {
pr_err("KVM: Can't use RMA of 0x%lx bytes\n", rma_size);
goto out_srcu;
}
@ -1874,7 +1874,7 @@ int kvmppc_core_init_vm(struct kvm *kvm)
/* Allocate the guest's logical partition ID */
lpid = kvmppc_alloc_lpid();
if (lpid < 0)
if ((long)lpid < 0)
return -ENOMEM;
kvm->arch.lpid = lpid;

View File

@ -1047,11 +1047,12 @@ struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id)
if (err)
goto free_shadow_vcpu;
err = -ENOMEM;
p = __get_free_page(GFP_KERNEL|__GFP_ZERO);
/* the real shared page fills the last 4k of our page */
vcpu->arch.shared = (void*)(p + PAGE_SIZE - 4096);
if (!p)
goto uninit_vcpu;
/* the real shared page fills the last 4k of our page */
vcpu->arch.shared = (void *)(p + PAGE_SIZE - 4096);
#ifdef CONFIG_PPC_BOOK3S_64
/* default to book3s_64 (970fx) */

View File

@ -569,35 +569,6 @@ error:
return ret;
}
static int unzip_oops(char *oops_buf, char *big_buf)
{
struct oops_log_info *oops_hdr = (struct oops_log_info *)oops_buf;
u64 timestamp = oops_hdr->timestamp;
char *big_oops_data = NULL;
char *oops_data_buf = NULL;
size_t big_oops_data_sz;
int unzipped_len;
big_oops_data = big_buf + sizeof(struct oops_log_info);
big_oops_data_sz = big_oops_buf_sz - sizeof(struct oops_log_info);
oops_data_buf = oops_buf + sizeof(struct oops_log_info);
unzipped_len = nvram_decompress(oops_data_buf, big_oops_data,
oops_hdr->report_length,
big_oops_data_sz);
if (unzipped_len < 0) {
pr_err("nvram: decompression failed; returned %d\n",
unzipped_len);
return -1;
}
oops_hdr = (struct oops_log_info *)big_buf;
oops_hdr->version = OOPS_HDR_VERSION;
oops_hdr->report_length = (u16) unzipped_len;
oops_hdr->timestamp = timestamp;
return 0;
}
static int nvram_pstore_open(struct pstore_info *psi)
{
/* Reset the iterator to start reading partitions again */
@ -685,10 +656,9 @@ static ssize_t nvram_pstore_read(u64 *id, enum pstore_type_id *type,
unsigned int err_type, id_no, size = 0;
struct nvram_os_partition *part = NULL;
char *buff = NULL, *big_buff = NULL;
int rc, sig = 0;
int sig = 0;
loff_t p;
read_partition:
read_type++;
switch (nvram_type_ids[read_type]) {
@ -749,30 +719,46 @@ read_partition:
*id = id_no;
if (nvram_type_ids[read_type] == PSTORE_TYPE_DMESG) {
int length, unzipped_len;
size_t hdr_size;
oops_hdr = (struct oops_log_info *)buff;
*buf = buff + sizeof(*oops_hdr);
if (oops_hdr->version < OOPS_HDR_VERSION) {
/* Old format oops header had 2-byte record size */
hdr_size = sizeof(u16);
length = oops_hdr->version;
time->tv_sec = 0;
time->tv_nsec = 0;
} else {
hdr_size = sizeof(*oops_hdr);
length = oops_hdr->report_length;
time->tv_sec = oops_hdr->timestamp;
time->tv_nsec = 0;
}
*buf = kmalloc(length, GFP_KERNEL);
if (*buf == NULL)
return -ENOMEM;
memcpy(*buf, buff + hdr_size, length);
kfree(buff);
if (err_type == ERR_TYPE_KERNEL_PANIC_GZ) {
big_buff = kmalloc(big_oops_buf_sz, GFP_KERNEL);
if (!big_buff)
return -ENOMEM;
rc = unzip_oops(buff, big_buff);
unzipped_len = nvram_decompress(*buf, big_buff,
length, big_oops_buf_sz);
if (rc != 0) {
kfree(buff);
if (unzipped_len < 0) {
pr_err("nvram: decompression failed, returned "
"rc %d\n", unzipped_len);
kfree(big_buff);
goto read_partition;
} else {
*buf = big_buff;
length = unzipped_len;
}
oops_hdr = (struct oops_log_info *)big_buff;
*buf = big_buff + sizeof(*oops_hdr);
kfree(buff);
}
time->tv_sec = oops_hdr->timestamp;
time->tv_nsec = 0;
return oops_hdr->report_length;
return length;
}
*buf = buff;
@ -816,6 +802,7 @@ static int nvram_pstore_init(void)
static void __init nvram_init_oops_partition(int rtas_partition_exists)
{
int rc;
size_t size;
rc = pseries_nvram_init_os_partition(&oops_log_partition);
if (rc != 0) {
@ -844,8 +831,9 @@ static void __init nvram_init_oops_partition(int rtas_partition_exists)
big_oops_buf_sz = (oops_data_sz * 100) / 45;
big_oops_buf = kmalloc(big_oops_buf_sz, GFP_KERNEL);
if (big_oops_buf) {
stream.workspace = kmalloc(zlib_deflate_workspacesize(
WINDOW_BITS, MEM_LEVEL), GFP_KERNEL);
size = max(zlib_deflate_workspacesize(WINDOW_BITS, MEM_LEVEL),
zlib_inflate_workspacesize());
stream.workspace = kmalloc(size, GFP_KERNEL);
if (!stream.workspace) {
pr_err("nvram: No memory for compression workspace; "
"skipping compression of %s partition data\n",

View File

@ -118,6 +118,7 @@ config S390
select HAVE_FUNCTION_TRACE_MCOUNT_TEST
select HAVE_KERNEL_BZIP2
select HAVE_KERNEL_GZIP
select HAVE_KERNEL_LZ4
select HAVE_KERNEL_LZMA
select HAVE_KERNEL_LZO
select HAVE_KERNEL_XZ
@ -227,11 +228,12 @@ config MARCH_Z196
not work on older machines.
config MARCH_ZEC12
bool "IBM zEC12"
bool "IBM zBC12 and zEC12"
select HAVE_MARCH_ZEC12_FEATURES if 64BIT
help
Select this to enable optimizations for IBM zEC12 (2827 series). The
kernel will be slightly faster but will not work on older machines.
Select this to enable optimizations for IBM zBC12 and zEC12 (2828 and
2827 series). The kernel will be slightly faster but will not work on
older machines.
endchoice
@ -709,6 +711,7 @@ config S390_GUEST
def_bool y
prompt "s390 support for virtio devices"
depends on 64BIT
select TTY
select VIRTUALIZATION
select VIRTIO
select VIRTIO_CONSOLE

View File

@ -6,9 +6,9 @@
BITS := $(if $(CONFIG_64BIT),64,31)
targets := vmlinux.lds vmlinux vmlinux.bin vmlinux.bin.gz vmlinux.bin.bz2 \
vmlinux.bin.xz vmlinux.bin.lzma vmlinux.bin.lzo misc.o piggy.o \
sizes.h head$(BITS).o
targets := vmlinux.lds vmlinux vmlinux.bin vmlinux.bin.gz vmlinux.bin.bz2
targets += vmlinux.bin.xz vmlinux.bin.lzma vmlinux.bin.lzo vmlinux.bin.lz4
targets += misc.o piggy.o sizes.h head$(BITS).o
KBUILD_CFLAGS := -m$(BITS) -D__KERNEL__ $(LINUX_INCLUDE) -O2
KBUILD_CFLAGS += -DDISABLE_BRANCH_PROFILING
@ -48,6 +48,7 @@ vmlinux.bin.all-y := $(obj)/vmlinux.bin
suffix-$(CONFIG_KERNEL_GZIP) := gz
suffix-$(CONFIG_KERNEL_BZIP2) := bz2
suffix-$(CONFIG_KERNEL_LZ4) := lz4
suffix-$(CONFIG_KERNEL_LZMA) := lzma
suffix-$(CONFIG_KERNEL_LZO) := lzo
suffix-$(CONFIG_KERNEL_XZ) := xz
@ -56,6 +57,8 @@ $(obj)/vmlinux.bin.gz: $(vmlinux.bin.all-y)
$(call if_changed,gzip)
$(obj)/vmlinux.bin.bz2: $(vmlinux.bin.all-y)
$(call if_changed,bzip2)
$(obj)/vmlinux.bin.lz4: $(vmlinux.bin.all-y)
$(call if_changed,lz4)
$(obj)/vmlinux.bin.lzma: $(vmlinux.bin.all-y)
$(call if_changed,lzma)
$(obj)/vmlinux.bin.lzo: $(vmlinux.bin.all-y)

View File

@ -47,6 +47,10 @@ static unsigned long free_mem_end_ptr;
#include "../../../../lib/decompress_bunzip2.c"
#endif
#ifdef CONFIG_KERNEL_LZ4
#include "../../../../lib/decompress_unlz4.c"
#endif
#ifdef CONFIG_KERNEL_LZMA
#include "../../../../lib/decompress_unlzma.c"
#endif

View File

@ -693,7 +693,7 @@ static inline int find_next_bit_left(const unsigned long *addr,
size -= offset;
p = addr + offset / BITS_PER_LONG;
if (bit) {
set = __flo_word(0, *p & (~0UL << bit));
set = __flo_word(0, *p & (~0UL >> bit));
if (set >= size)
return size + offset;
if (set < BITS_PER_LONG)

View File

@ -32,6 +32,7 @@ struct mmu_gather {
struct mm_struct *mm;
struct mmu_table_batch *batch;
unsigned int fullmm;
unsigned long start, end;
};
struct mmu_table_batch {
@ -48,10 +49,13 @@ extern void tlb_remove_table(struct mmu_gather *tlb, void *table);
static inline void tlb_gather_mmu(struct mmu_gather *tlb,
struct mm_struct *mm,
unsigned int full_mm_flush)
unsigned long start,
unsigned long end)
{
tlb->mm = mm;
tlb->fullmm = full_mm_flush;
tlb->start = start;
tlb->end = end;
tlb->fullmm = !(start | (end+1));
tlb->batch = NULL;
if (tlb->fullmm)
__tlb_flush_mm(mm);

View File

@ -52,12 +52,13 @@ static struct kvm_s390_sie_block *sie_block(struct pt_regs *regs)
static bool is_in_guest(struct pt_regs *regs)
{
unsigned long ip = instruction_pointer(regs);
if (user_mode(regs))
return false;
return ip == (unsigned long) &sie_exit;
#if defined(CONFIG_KVM) || defined(CONFIG_KVM_MODULE)
return instruction_pointer(regs) == (unsigned long) &sie_exit;
#else
return false;
#endif
}
static unsigned long guest_is_user_mode(struct pt_regs *regs)

View File

@ -994,6 +994,7 @@ static void __init setup_hwcaps(void)
strcpy(elf_platform, "z196");
break;
case 0x2827:
case 0x2828:
strcpy(elf_platform, "zEC12");
break;
}

View File

@ -702,14 +702,25 @@ static int __vcpu_run(struct kvm_vcpu *vcpu)
return rc;
vcpu->arch.sie_block->icptcode = 0;
preempt_disable();
kvm_guest_enter();
preempt_enable();
VCPU_EVENT(vcpu, 6, "entering sie flags %x",
atomic_read(&vcpu->arch.sie_block->cpuflags));
trace_kvm_s390_sie_enter(vcpu,
atomic_read(&vcpu->arch.sie_block->cpuflags));
/*
* As PF_VCPU will be used in fault handler, between guest_enter
* and guest_exit should be no uaccess.
*/
preempt_disable();
kvm_guest_enter();
preempt_enable();
rc = sie64a(vcpu->arch.sie_block, vcpu->run->s.regs.gprs);
kvm_guest_exit();
VCPU_EVENT(vcpu, 6, "exit sie icptcode %d",
vcpu->arch.sie_block->icptcode);
trace_kvm_s390_sie_exit(vcpu, vcpu->arch.sie_block->icptcode);
if (rc > 0)
rc = 0;
if (rc < 0) {
@ -721,10 +732,6 @@ static int __vcpu_run(struct kvm_vcpu *vcpu)
rc = kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
}
}
VCPU_EVENT(vcpu, 6, "exit sie icptcode %d",
vcpu->arch.sie_block->icptcode);
trace_kvm_s390_sie_exit(vcpu, vcpu->arch.sie_block->icptcode);
kvm_guest_exit();
memcpy(&vcpu->run->s.regs.gprs[14], &vcpu->arch.sie_block->gg14, 16);
return rc;

View File

@ -16,6 +16,7 @@
#include <linux/errno.h>
#include <linux/compat.h>
#include <asm/asm-offsets.h>
#include <asm/facility.h>
#include <asm/current.h>
#include <asm/debug.h>
#include <asm/ebcdic.h>
@ -532,8 +533,7 @@ static int handle_pfmf(struct kvm_vcpu *vcpu)
return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
/* Only provide non-quiescing support if the host supports it */
if (vcpu->run->s.regs.gprs[reg1] & PFMF_NQ &&
S390_lowcore.stfl_fac_list & 0x00020000)
if (vcpu->run->s.regs.gprs[reg1] & PFMF_NQ && !test_facility(14))
return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
/* No support for conditional-SSKE */

View File

@ -69,6 +69,7 @@ static void __init setup_zero_pages(void)
order = 2;
break;
case 0x2827: /* zEC12 */
case 0x2828: /* zEC12 */
default:
order = 5;
break;

View File

@ -440,7 +440,7 @@ static int oprofile_hwsampler_init(struct oprofile_operations *ops)
switch (id.machine) {
case 0x2097: case 0x2098: ops->cpu_type = "s390/z10"; break;
case 0x2817: case 0x2818: ops->cpu_type = "s390/z196"; break;
case 0x2827: ops->cpu_type = "s390/zEC12"; break;
case 0x2827: case 0x2828: ops->cpu_type = "s390/zEC12"; break;
default: return -ENODEV;
}
}

View File

@ -87,6 +87,8 @@ config STACKTRACE_SUPPORT
source "init/Kconfig"
source "kernel/Kconfig.freezer"
config MMU
def_bool y

View File

@ -36,10 +36,12 @@ static inline void init_tlb_gather(struct mmu_gather *tlb)
}
static inline void
tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned int full_mm_flush)
tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end)
{
tlb->mm = mm;
tlb->fullmm = full_mm_flush;
tlb->start = start;
tlb->end = end;
tlb->fullmm = !(start | (end+1));
init_tlb_gather(tlb);
}

View File

@ -45,10 +45,12 @@ static inline void init_tlb_gather(struct mmu_gather *tlb)
}
static inline void
tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned int full_mm_flush)
tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end)
{
tlb->mm = mm;
tlb->fullmm = full_mm_flush;
tlb->start = start;
tlb->end = end;
tlb->fullmm = !(start | (end+1));
init_tlb_gather(tlb);
}

View File

@ -225,7 +225,7 @@ static void low_free(unsigned long size, unsigned long addr)
unsigned long nr_pages;
nr_pages = round_up(size, EFI_PAGE_SIZE) / EFI_PAGE_SIZE;
efi_call_phys2(sys_table->boottime->free_pages, addr, size);
efi_call_phys2(sys_table->boottime->free_pages, addr, nr_pages);
}
static void find_bits(unsigned long mask, u8 *pos, u8 *size)

View File

@ -55,9 +55,53 @@ static inline pmd_t native_pmdp_get_and_clear(pmd_t *xp)
#define native_pmdp_get_and_clear(xp) native_local_pmdp_get_and_clear(xp)
#endif
#ifdef CONFIG_MEM_SOFT_DIRTY
/*
* Bits _PAGE_BIT_PRESENT, _PAGE_BIT_FILE, _PAGE_BIT_SOFT_DIRTY and
* _PAGE_BIT_PROTNONE are taken, split up the 28 bits of offset
* into this range.
*/
#define PTE_FILE_MAX_BITS 28
#define PTE_FILE_SHIFT1 (_PAGE_BIT_PRESENT + 1)
#define PTE_FILE_SHIFT2 (_PAGE_BIT_FILE + 1)
#define PTE_FILE_SHIFT3 (_PAGE_BIT_PROTNONE + 1)
#define PTE_FILE_SHIFT4 (_PAGE_BIT_SOFT_DIRTY + 1)
#define PTE_FILE_BITS1 (PTE_FILE_SHIFT2 - PTE_FILE_SHIFT1 - 1)
#define PTE_FILE_BITS2 (PTE_FILE_SHIFT3 - PTE_FILE_SHIFT2 - 1)
#define PTE_FILE_BITS3 (PTE_FILE_SHIFT4 - PTE_FILE_SHIFT3 - 1)
#define pte_to_pgoff(pte) \
((((pte).pte_low >> (PTE_FILE_SHIFT1)) \
& ((1U << PTE_FILE_BITS1) - 1))) \
+ ((((pte).pte_low >> (PTE_FILE_SHIFT2)) \
& ((1U << PTE_FILE_BITS2) - 1)) \
<< (PTE_FILE_BITS1)) \
+ ((((pte).pte_low >> (PTE_FILE_SHIFT3)) \
& ((1U << PTE_FILE_BITS3) - 1)) \
<< (PTE_FILE_BITS1 + PTE_FILE_BITS2)) \
+ ((((pte).pte_low >> (PTE_FILE_SHIFT4))) \
<< (PTE_FILE_BITS1 + PTE_FILE_BITS2 + PTE_FILE_BITS3))
#define pgoff_to_pte(off) \
((pte_t) { .pte_low = \
((((off)) & ((1U << PTE_FILE_BITS1) - 1)) << PTE_FILE_SHIFT1) \
+ ((((off) >> PTE_FILE_BITS1) \
& ((1U << PTE_FILE_BITS2) - 1)) \
<< PTE_FILE_SHIFT2) \
+ ((((off) >> (PTE_FILE_BITS1 + PTE_FILE_BITS2)) \
& ((1U << PTE_FILE_BITS3) - 1)) \
<< PTE_FILE_SHIFT3) \
+ ((((off) >> \
(PTE_FILE_BITS1 + PTE_FILE_BITS2 + PTE_FILE_BITS3))) \
<< PTE_FILE_SHIFT4) \
+ _PAGE_FILE })
#else /* CONFIG_MEM_SOFT_DIRTY */
/*
* Bits _PAGE_BIT_PRESENT, _PAGE_BIT_FILE and _PAGE_BIT_PROTNONE are taken,
* split up the 29 bits of offset into this range:
* split up the 29 bits of offset into this range.
*/
#define PTE_FILE_MAX_BITS 29
#define PTE_FILE_SHIFT1 (_PAGE_BIT_PRESENT + 1)
@ -88,6 +132,8 @@ static inline pmd_t native_pmdp_get_and_clear(pmd_t *xp)
<< PTE_FILE_SHIFT3) \
+ _PAGE_FILE })
#endif /* CONFIG_MEM_SOFT_DIRTY */
/* Encode and de-code a swap entry */
#if _PAGE_BIT_FILE < _PAGE_BIT_PROTNONE
#define SWP_TYPE_BITS (_PAGE_BIT_FILE - _PAGE_BIT_PRESENT - 1)

View File

@ -179,6 +179,9 @@ static inline pmd_t native_pmdp_get_and_clear(pmd_t *pmdp)
/*
* Bits 0, 6 and 7 are taken in the low part of the pte,
* put the 32 bits of offset into the high part.
*
* For soft-dirty tracking 11 bit is taken from
* the low part of pte as well.
*/
#define pte_to_pgoff(pte) ((pte).pte_high)
#define pgoff_to_pte(off) \

View File

@ -314,6 +314,36 @@ static inline pmd_t pmd_mksoft_dirty(pmd_t pmd)
return pmd_set_flags(pmd, _PAGE_SOFT_DIRTY);
}
static inline pte_t pte_swp_mksoft_dirty(pte_t pte)
{
return pte_set_flags(pte, _PAGE_SWP_SOFT_DIRTY);
}
static inline int pte_swp_soft_dirty(pte_t pte)
{
return pte_flags(pte) & _PAGE_SWP_SOFT_DIRTY;
}
static inline pte_t pte_swp_clear_soft_dirty(pte_t pte)
{
return pte_clear_flags(pte, _PAGE_SWP_SOFT_DIRTY);
}
static inline pte_t pte_file_clear_soft_dirty(pte_t pte)
{
return pte_clear_flags(pte, _PAGE_SOFT_DIRTY);
}
static inline pte_t pte_file_mksoft_dirty(pte_t pte)
{
return pte_set_flags(pte, _PAGE_SOFT_DIRTY);
}
static inline int pte_file_soft_dirty(pte_t pte)
{
return pte_flags(pte) & _PAGE_SOFT_DIRTY;
}
/*
* Mask out unsupported bits in a present pgprot. Non-present pgprots
* can use those bits for other purposes, so leave them be.

View File

@ -61,12 +61,27 @@
* they do not conflict with each other.
*/
#define _PAGE_BIT_SOFT_DIRTY _PAGE_BIT_HIDDEN
#ifdef CONFIG_MEM_SOFT_DIRTY
#define _PAGE_SOFT_DIRTY (_AT(pteval_t, 1) << _PAGE_BIT_HIDDEN)
#define _PAGE_SOFT_DIRTY (_AT(pteval_t, 1) << _PAGE_BIT_SOFT_DIRTY)
#else
#define _PAGE_SOFT_DIRTY (_AT(pteval_t, 0))
#endif
/*
* Tracking soft dirty bit when a page goes to a swap is tricky.
* We need a bit which can be stored in pte _and_ not conflict
* with swap entry format. On x86 bits 6 and 7 are *not* involved
* into swap entry computation, but bit 6 is used for nonlinear
* file mapping, so we borrow bit 7 for soft dirty tracking.
*/
#ifdef CONFIG_MEM_SOFT_DIRTY
#define _PAGE_SWP_SOFT_DIRTY _PAGE_PSE
#else
#define _PAGE_SWP_SOFT_DIRTY (_AT(pteval_t, 0))
#endif
#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
#define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_NX)
#else

View File

@ -233,8 +233,4 @@ static inline void arch_write_unlock(arch_rwlock_t *rw)
#define arch_read_relax(lock) cpu_relax()
#define arch_write_relax(lock) cpu_relax()
/* The {read|write|spin}_lock() on x86 are full memory barriers. */
static inline void smp_mb__after_lock(void) { }
#define ARCH_HAS_SMP_MB_AFTER_LOCK
#endif /* _ASM_X86_SPINLOCK_H */

View File

@ -2270,6 +2270,7 @@ __init int intel_pmu_init(void)
case 70:
case 71:
case 63:
case 69:
x86_pmu.late_ack = true;
memcpy(hw_cache_event_ids, snb_hw_cache_event_ids, sizeof(hw_cache_event_ids));
memcpy(hw_cache_extra_regs, snb_hw_cache_extra_regs, sizeof(hw_cache_extra_regs));

View File

@ -314,8 +314,8 @@ static struct uncore_event_desc snbep_uncore_imc_events[] = {
static struct uncore_event_desc snbep_uncore_qpi_events[] = {
INTEL_UNCORE_EVENT_DESC(clockticks, "event=0x14"),
INTEL_UNCORE_EVENT_DESC(txl_flits_active, "event=0x00,umask=0x06"),
INTEL_UNCORE_EVENT_DESC(drs_data, "event=0x02,umask=0x08"),
INTEL_UNCORE_EVENT_DESC(ncb_data, "event=0x03,umask=0x04"),
INTEL_UNCORE_EVENT_DESC(drs_data, "event=0x102,umask=0x08"),
INTEL_UNCORE_EVENT_DESC(ncb_data, "event=0x103,umask=0x04"),
{ /* end: all zeroes */ },
};

View File

@ -196,15 +196,23 @@ static void __init ati_bugs_contd(int num, int slot, int func)
static void __init intel_remapping_check(int num, int slot, int func)
{
u8 revision;
u16 device;
device = read_pci_config_16(num, slot, func, PCI_DEVICE_ID);
revision = read_pci_config_byte(num, slot, func, PCI_REVISION_ID);
/*
* Revision 0x13 of this chipset supports irq remapping
* but has an erratum that breaks its behavior, flag it as such
* Revision 13 of all triggering devices id in this quirk have
* a problem draining interrupts when irq remapping is enabled,
* and should be flagged as broken. Additionally revisions 0x12
* and 0x22 of device id 0x3405 has this problem.
*/
if (revision == 0x13)
set_irq_remapping_broken();
else if ((device == 0x3405) &&
((revision == 0x12) ||
(revision == 0x22)))
set_irq_remapping_broken();
}
@ -239,6 +247,8 @@ static struct chipset early_qrk[] __initdata = {
PCI_CLASS_SERIAL_SMBUS, PCI_ANY_ID, 0, ati_bugs_contd },
{ PCI_VENDOR_ID_INTEL, 0x3403, PCI_CLASS_BRIDGE_HOST,
PCI_BASE_CLASS_BRIDGE, 0, intel_remapping_check },
{ PCI_VENDOR_ID_INTEL, 0x3405, PCI_CLASS_BRIDGE_HOST,
PCI_BASE_CLASS_BRIDGE, 0, intel_remapping_check },
{ PCI_VENDOR_ID_INTEL, 0x3406, PCI_CLASS_BRIDGE_HOST,
PCI_BASE_CLASS_BRIDGE, 0, intel_remapping_check },
{}

View File

@ -116,7 +116,7 @@ static void mxcsr_feature_mask_init(void)
if (cpu_has_fxsr) {
memset(&fx_scratch, 0, sizeof(struct i387_fxsave_struct));
asm volatile("fxsave %0" : : "m" (fx_scratch));
asm volatile("fxsave %0" : "+m" (fx_scratch));
mask = fx_scratch.mxcsr_mask;
if (mask == 0)
mask = 0x0000ffbf;

View File

@ -220,12 +220,13 @@ int apply_microcode_amd(int cpu)
return 0;
}
if (__apply_microcode_amd(mc_amd))
if (__apply_microcode_amd(mc_amd)) {
pr_err("CPU%d: update failed for patch_level=0x%08x\n",
cpu, mc_amd->hdr.patch_id);
else
pr_info("CPU%d: new patch_level=0x%08x\n", cpu,
mc_amd->hdr.patch_id);
return -1;
}
pr_info("CPU%d: new patch_level=0x%08x\n", cpu,
mc_amd->hdr.patch_id);
uci->cpu_sig.rev = mc_amd->hdr.patch_id;
c->microcode = mc_amd->hdr.patch_id;

View File

@ -101,7 +101,7 @@ static void find_start_end(unsigned long flags, unsigned long *begin,
*begin = new_begin;
}
} else {
*begin = TASK_UNMAPPED_BASE;
*begin = mmap_legacy_base();
*end = TASK_SIZE;
}
}

View File

@ -98,7 +98,7 @@ static unsigned long mmap_base(void)
* Bottom-up (legacy) layout on X86_32 did not support randomization, X86_64
* does, but not when emulating X86_32
*/
static unsigned long mmap_legacy_base(void)
unsigned long mmap_legacy_base(void)
{
if (mmap_is_ia32())
return TASK_UNMAPPED_BASE;

View File

@ -451,7 +451,6 @@ static void acpi_processor_remove(struct acpi_device *device)
/* Clean up. */
per_cpu(processor_device_array, pr->id) = NULL;
per_cpu(processors, pr->id) = NULL;
try_offline_node(cpu_to_node(pr->id));
/* Remove the CPU. */
get_online_cpus();
@ -459,6 +458,8 @@ static void acpi_processor_remove(struct acpi_device *device)
acpi_unmap_lsapic(pr->id);
put_online_cpus();
try_offline_node(cpu_to_node(pr->id));
out:
free_cpumask_var(pr->throttling.shared_cpu_map);
kfree(pr);

View File

@ -31,6 +31,7 @@ static LIST_HEAD(bus_type_list);
static DECLARE_RWSEM(bus_type_sem);
#define PHYSICAL_NODE_STRING "physical_node"
#define PHYSICAL_NODE_NAME_SIZE (sizeof(PHYSICAL_NODE_STRING) + 10)
int register_acpi_bus_type(struct acpi_bus_type *type)
{
@ -78,41 +79,108 @@ static struct acpi_bus_type *acpi_get_bus_type(struct device *dev)
return ret;
}
static acpi_status do_acpi_find_child(acpi_handle handle, u32 lvl_not_used,
void *addr_p, void **ret_p)
static acpi_status acpi_dev_present(acpi_handle handle, u32 lvl_not_used,
void *not_used, void **ret_p)
{
unsigned long long addr, sta;
acpi_status status;
struct acpi_device *adev = NULL;
status = acpi_evaluate_integer(handle, METHOD_NAME__ADR, NULL, &addr);
if (ACPI_SUCCESS(status) && addr == *((u64 *)addr_p)) {
acpi_bus_get_device(handle, &adev);
if (adev) {
*ret_p = handle;
status = acpi_bus_get_status_handle(handle, &sta);
if (ACPI_SUCCESS(status) && (sta & ACPI_STA_DEVICE_ENABLED))
return AE_CTRL_TERMINATE;
return AE_CTRL_TERMINATE;
}
return AE_OK;
}
acpi_handle acpi_get_child(acpi_handle parent, u64 address)
static bool acpi_extra_checks_passed(acpi_handle handle, bool is_bridge)
{
void *ret = NULL;
unsigned long long sta;
acpi_status status;
if (!parent)
return NULL;
status = acpi_bus_get_status_handle(handle, &sta);
if (ACPI_FAILURE(status) || !(sta & ACPI_STA_DEVICE_ENABLED))
return false;
acpi_walk_namespace(ACPI_TYPE_DEVICE, parent, 1, NULL,
do_acpi_find_child, &address, &ret);
return (acpi_handle)ret;
if (is_bridge) {
void *test = NULL;
/* Check if this object has at least one child device. */
acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, 1,
acpi_dev_present, NULL, NULL, &test);
return !!test;
}
return true;
}
EXPORT_SYMBOL(acpi_get_child);
struct find_child_context {
u64 addr;
bool is_bridge;
acpi_handle ret;
bool ret_checked;
};
static acpi_status do_find_child(acpi_handle handle, u32 lvl_not_used,
void *data, void **not_used)
{
struct find_child_context *context = data;
unsigned long long addr;
acpi_status status;
status = acpi_evaluate_integer(handle, METHOD_NAME__ADR, NULL, &addr);
if (ACPI_FAILURE(status) || addr != context->addr)
return AE_OK;
if (!context->ret) {
/* This is the first matching object. Save its handle. */
context->ret = handle;
return AE_OK;
}
/*
* There is more than one matching object with the same _ADR value.
* That really is unexpected, so we are kind of beyond the scope of the
* spec here. We have to choose which one to return, though.
*
* First, check if the previously found object is good enough and return
* its handle if so. Second, check the same for the object that we've
* just found.
*/
if (!context->ret_checked) {
if (acpi_extra_checks_passed(context->ret, context->is_bridge))
return AE_CTRL_TERMINATE;
else
context->ret_checked = true;
}
if (acpi_extra_checks_passed(handle, context->is_bridge)) {
context->ret = handle;
return AE_CTRL_TERMINATE;
}
return AE_OK;
}
acpi_handle acpi_find_child(acpi_handle parent, u64 addr, bool is_bridge)
{
if (parent) {
struct find_child_context context = {
.addr = addr,
.is_bridge = is_bridge,
};
acpi_walk_namespace(ACPI_TYPE_DEVICE, parent, 1, do_find_child,
NULL, &context, NULL);
return context.ret;
}
return NULL;
}
EXPORT_SYMBOL_GPL(acpi_find_child);
int acpi_bind_one(struct device *dev, acpi_handle handle)
{
struct acpi_device *acpi_dev;
acpi_status status;
struct acpi_device_physical_node *physical_node, *pn;
char physical_node_name[sizeof(PHYSICAL_NODE_STRING) + 2];
char physical_node_name[PHYSICAL_NODE_NAME_SIZE];
struct list_head *physnode_list;
unsigned int node_id;
int retval = -EINVAL;
if (ACPI_HANDLE(dev)) {
@ -139,25 +207,27 @@ int acpi_bind_one(struct device *dev, acpi_handle handle)
mutex_lock(&acpi_dev->physical_node_lock);
/* Sanity check. */
list_for_each_entry(pn, &acpi_dev->physical_node_list, node)
/*
* Keep the list sorted by node_id so that the IDs of removed nodes can
* be recycled easily.
*/
physnode_list = &acpi_dev->physical_node_list;
node_id = 0;
list_for_each_entry(pn, &acpi_dev->physical_node_list, node) {
/* Sanity check. */
if (pn->dev == dev) {
dev_warn(dev, "Already associated with ACPI node\n");
goto err_free;
}
/* allocate physical node id according to physical_node_id_bitmap */
physical_node->node_id =
find_first_zero_bit(acpi_dev->physical_node_id_bitmap,
ACPI_MAX_PHYSICAL_NODE);
if (physical_node->node_id >= ACPI_MAX_PHYSICAL_NODE) {
retval = -ENOSPC;
goto err_free;
if (pn->node_id == node_id) {
physnode_list = &pn->node;
node_id++;
}
}
set_bit(physical_node->node_id, acpi_dev->physical_node_id_bitmap);
physical_node->node_id = node_id;
physical_node->dev = dev;
list_add_tail(&physical_node->node, &acpi_dev->physical_node_list);
list_add(&physical_node->node, physnode_list);
acpi_dev->physical_node_count++;
mutex_unlock(&acpi_dev->physical_node_lock);
@ -208,7 +278,7 @@ int acpi_unbind_one(struct device *dev)
mutex_lock(&acpi_dev->physical_node_lock);
list_for_each_safe(node, next, &acpi_dev->physical_node_list) {
char physical_node_name[sizeof(PHYSICAL_NODE_STRING) + 2];
char physical_node_name[PHYSICAL_NODE_NAME_SIZE];
entry = list_entry(node, struct acpi_device_physical_node,
node);
@ -216,7 +286,6 @@ int acpi_unbind_one(struct device *dev)
continue;
list_del(node);
clear_bit(entry->node_id, acpi_dev->physical_node_id_bitmap);
acpi_dev->physical_node_count--;

View File

@ -311,6 +311,8 @@ acpi_system_wakeup_device_seq_show(struct seq_file *seq, void *offset)
dev->pnp.bus_id,
(u32) dev->wakeup.sleep_state);
mutex_lock(&dev->physical_node_lock);
if (!dev->physical_node_count) {
seq_printf(seq, "%c%-8s\n",
dev->wakeup.flags.run_wake ? '*' : ' ',
@ -338,6 +340,8 @@ acpi_system_wakeup_device_seq_show(struct seq_file *seq, void *offset)
put_device(ldev);
}
}
mutex_unlock(&dev->physical_node_lock);
}
mutex_unlock(&acpi_device_lock);
return 0;
@ -347,12 +351,16 @@ static void physical_device_enable_wakeup(struct acpi_device *adev)
{
struct acpi_device_physical_node *entry;
mutex_lock(&adev->physical_node_lock);
list_for_each_entry(entry,
&adev->physical_node_list, node)
if (entry->dev && device_can_wakeup(entry->dev)) {
bool enable = !device_may_wakeup(entry->dev);
device_set_wakeup_enable(entry->dev, enable);
}
mutex_unlock(&adev->physical_node_lock);
}
static ssize_t

View File

@ -689,7 +689,7 @@ static int acpi_video_bqc_quirk(struct acpi_video_device *device,
* Some systems always report current brightness level as maximum
* through _BQC, we need to test another value for them.
*/
test_level = current_level == max_level ? br->levels[2] : max_level;
test_level = current_level == max_level ? br->levels[3] : max_level;
result = acpi_video_device_lcd_set_level(device, test_level);
if (result)

View File

@ -237,6 +237,7 @@ static const struct of_device_id imx_pata_dt_ids[] = {
/* sentinel */
}
};
MODULE_DEVICE_TABLE(of, imx_pata_dt_ids);
static struct platform_driver pata_imx_driver = {
.probe = pata_imx_probe,

View File

@ -719,7 +719,8 @@ static int regcache_sync_block_raw(struct regmap *map, void *block,
}
}
return regcache_sync_block_raw_flush(map, &data, base, regtmp);
return regcache_sync_block_raw_flush(map, &data, base, regtmp +
map->reg_stride);
}
int regcache_sync_block(struct regmap *map, void *block,

View File

@ -906,16 +906,10 @@ bio_pageinc(struct bio *bio)
int i;
bio_for_each_segment(bv, bio, i) {
page = bv->bv_page;
/* Non-zero page count for non-head members of
* compound pages is no longer allowed by the kernel,
* but this has never been seen here.
* compound pages is no longer allowed by the kernel.
*/
if (unlikely(PageCompound(page)))
if (compound_trans_head(page) != page) {
pr_crit("page tail used for block I/O\n");
BUG();
}
page = compound_trans_head(bv->bv_page);
atomic_inc(&page->_count);
}
}
@ -924,10 +918,13 @@ static void
bio_pagedec(struct bio *bio)
{
struct bio_vec *bv;
struct page *page;
int i;
bio_for_each_segment(bv, bio, i)
atomic_dec(&bv->bv_page->_count);
bio_for_each_segment(bv, bio, i) {
page = compound_trans_head(bv->bv_page);
atomic_dec(&page->_count);
}
}
static void

View File

@ -272,9 +272,12 @@ static struct port *find_port_by_devt_in_portdev(struct ports_device *portdev,
unsigned long flags;
spin_lock_irqsave(&portdev->ports_lock, flags);
list_for_each_entry(port, &portdev->ports, list)
if (port->cdev->dev == dev)
list_for_each_entry(port, &portdev->ports, list) {
if (port->cdev->dev == dev) {
kref_get(&port->kref);
goto out;
}
}
port = NULL;
out:
spin_unlock_irqrestore(&portdev->ports_lock, flags);
@ -746,6 +749,10 @@ static ssize_t port_fops_read(struct file *filp, char __user *ubuf,
port = filp->private_data;
/* Port is hot-unplugged. */
if (!port->guest_connected)
return -ENODEV;
if (!port_has_data(port)) {
/*
* If nothing's connected on the host just return 0 in
@ -762,7 +769,7 @@ static ssize_t port_fops_read(struct file *filp, char __user *ubuf,
if (ret < 0)
return ret;
}
/* Port got hot-unplugged. */
/* Port got hot-unplugged while we were waiting above. */
if (!port->guest_connected)
return -ENODEV;
/*
@ -932,13 +939,25 @@ static ssize_t port_fops_splice_write(struct pipe_inode_info *pipe,
if (is_rproc_serial(port->out_vq->vdev))
return -EINVAL;
/*
* pipe->nrbufs == 0 means there are no data to transfer,
* so this returns just 0 for no data.
*/
pipe_lock(pipe);
if (!pipe->nrbufs) {
ret = 0;
goto error_out;
}
ret = wait_port_writable(port, filp->f_flags & O_NONBLOCK);
if (ret < 0)
return ret;
goto error_out;
buf = alloc_buf(port->out_vq, 0, pipe->nrbufs);
if (!buf)
return -ENOMEM;
if (!buf) {
ret = -ENOMEM;
goto error_out;
}
sgl.n = 0;
sgl.len = 0;
@ -946,12 +965,17 @@ static ssize_t port_fops_splice_write(struct pipe_inode_info *pipe,
sgl.sg = buf->sg;
sg_init_table(sgl.sg, sgl.size);
ret = __splice_from_pipe(pipe, &sd, pipe_to_sg);
pipe_unlock(pipe);
if (likely(ret > 0))
ret = __send_to_port(port, buf->sg, sgl.n, sgl.len, buf, true);
if (unlikely(ret <= 0))
free_buf(buf, true);
return ret;
error_out:
pipe_unlock(pipe);
return ret;
}
static unsigned int port_fops_poll(struct file *filp, poll_table *wait)
@ -1019,14 +1043,14 @@ static int port_fops_open(struct inode *inode, struct file *filp)
struct port *port;
int ret;
/* We get the port with a kref here */
port = find_port_by_devt(cdev->dev);
if (!port) {
/* Port was unplugged before we could proceed */
return -ENXIO;
}
filp->private_data = port;
/* Prevent against a port getting hot-unplugged at the same time */
spin_lock_irq(&port->portdev->ports_lock);
kref_get(&port->kref);
spin_unlock_irq(&port->portdev->ports_lock);
/*
* Don't allow opening of console port devices -- that's done
* via /dev/hvc
@ -1498,14 +1522,6 @@ static void remove_port(struct kref *kref)
port = container_of(kref, struct port, kref);
sysfs_remove_group(&port->dev->kobj, &port_attribute_group);
device_destroy(pdrvdata.class, port->dev->devt);
cdev_del(port->cdev);
kfree(port->name);
debugfs_remove(port->debugfs_file);
kfree(port);
}
@ -1539,12 +1555,14 @@ static void unplug_port(struct port *port)
spin_unlock_irq(&port->portdev->ports_lock);
if (port->guest_connected) {
port->guest_connected = false;
port->host_connected = false;
wake_up_interruptible(&port->waitqueue);
/* Let the app know the port is going down. */
send_sigio_to_port(port);
/* Do this after sigio is actually sent */
port->guest_connected = false;
port->host_connected = false;
wake_up_interruptible(&port->waitqueue);
}
if (is_console_port(port)) {
@ -1563,6 +1581,14 @@ static void unplug_port(struct port *port)
*/
port->portdev = NULL;
sysfs_remove_group(&port->dev->kobj, &port_attribute_group);
device_destroy(pdrvdata.class, port->dev->devt);
cdev_del(port->cdev);
kfree(port->name);
debugfs_remove(port->debugfs_file);
/*
* Locks around here are not necessary - a port can't be
* opened after we removed the port struct from ports_list

View File

@ -581,11 +581,15 @@ struct samsung_div_clock exynos4x12_div_clks[] __initdata = {
DIV(none, "div_spi1_isp", "mout_spi1_isp", E4X12_DIV_ISP, 16, 4),
DIV(none, "div_spi1_isp_pre", "div_spi1_isp", E4X12_DIV_ISP, 20, 8),
DIV(none, "div_uart_isp", "mout_uart_isp", E4X12_DIV_ISP, 28, 4),
DIV(div_isp0, "div_isp0", "aclk200", E4X12_DIV_ISP0, 0, 3),
DIV(div_isp1, "div_isp1", "aclk200", E4X12_DIV_ISP0, 4, 3),
DIV_F(div_isp0, "div_isp0", "aclk200", E4X12_DIV_ISP0, 0, 3,
CLK_GET_RATE_NOCACHE, 0),
DIV_F(div_isp1, "div_isp1", "aclk200", E4X12_DIV_ISP0, 4, 3,
CLK_GET_RATE_NOCACHE, 0),
DIV(none, "div_mpwm", "div_isp1", E4X12_DIV_ISP1, 0, 3),
DIV(div_mcuisp0, "div_mcuisp0", "aclk400_mcuisp", E4X12_DIV_ISP1, 4, 3),
DIV(div_mcuisp1, "div_mcuisp1", "div_mcuisp0", E4X12_DIV_ISP1, 8, 3),
DIV_F(div_mcuisp0, "div_mcuisp0", "aclk400_mcuisp", E4X12_DIV_ISP1,
4, 3, CLK_GET_RATE_NOCACHE, 0),
DIV_F(div_mcuisp1, "div_mcuisp1", "div_mcuisp0", E4X12_DIV_ISP1,
8, 3, CLK_GET_RATE_NOCACHE, 0),
DIV(sclk_fimg2d, "sclk_fimg2d", "mout_g2d", DIV_DMC1, 0, 4),
};
@ -863,57 +867,57 @@ struct samsung_gate_clock exynos4x12_gate_clks[] __initdata = {
GATE_DA(i2s0, "samsung-i2s.0", "i2s0", "aclk100",
E4X12_GATE_IP_MAUDIO, 3, 0, 0, "iis"),
GATE(fimc_isp, "isp", "aclk200", E4X12_GATE_ISP0, 0,
CLK_IGNORE_UNUSED, 0),
CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0),
GATE(fimc_drc, "drc", "aclk200", E4X12_GATE_ISP0, 1,
CLK_IGNORE_UNUSED, 0),
CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0),
GATE(fimc_fd, "fd", "aclk200", E4X12_GATE_ISP0, 2,
CLK_IGNORE_UNUSED, 0),
CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0),
GATE(fimc_lite0, "lite0", "aclk200", E4X12_GATE_ISP0, 3,
CLK_IGNORE_UNUSED, 0),
CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0),
GATE(fimc_lite1, "lite1", "aclk200", E4X12_GATE_ISP0, 4,
CLK_IGNORE_UNUSED, 0),
CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0),
GATE(mcuisp, "mcuisp", "aclk200", E4X12_GATE_ISP0, 5,
CLK_IGNORE_UNUSED, 0),
CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0),
GATE(gicisp, "gicisp", "aclk200", E4X12_GATE_ISP0, 7,
CLK_IGNORE_UNUSED, 0),
CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0),
GATE(smmu_isp, "smmu_isp", "aclk200", E4X12_GATE_ISP0, 8,
CLK_IGNORE_UNUSED, 0),
CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0),
GATE(smmu_drc, "smmu_drc", "aclk200", E4X12_GATE_ISP0, 9,
CLK_IGNORE_UNUSED, 0),
CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0),
GATE(smmu_fd, "smmu_fd", "aclk200", E4X12_GATE_ISP0, 10,
CLK_IGNORE_UNUSED, 0),
CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0),
GATE(smmu_lite0, "smmu_lite0", "aclk200", E4X12_GATE_ISP0, 11,
CLK_IGNORE_UNUSED, 0),
CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0),
GATE(smmu_lite1, "smmu_lite1", "aclk200", E4X12_GATE_ISP0, 12,
CLK_IGNORE_UNUSED, 0),
CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0),
GATE(ppmuispmx, "ppmuispmx", "aclk200", E4X12_GATE_ISP0, 20,
CLK_IGNORE_UNUSED, 0),
CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0),
GATE(ppmuispx, "ppmuispx", "aclk200", E4X12_GATE_ISP0, 21,
CLK_IGNORE_UNUSED, 0),
CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0),
GATE(mcuctl_isp, "mcuctl_isp", "aclk200", E4X12_GATE_ISP0, 23,
CLK_IGNORE_UNUSED, 0),
CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0),
GATE(mpwm_isp, "mpwm_isp", "aclk200", E4X12_GATE_ISP0, 24,
CLK_IGNORE_UNUSED, 0),
CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0),
GATE(i2c0_isp, "i2c0_isp", "aclk200", E4X12_GATE_ISP0, 25,
CLK_IGNORE_UNUSED, 0),
CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0),
GATE(i2c1_isp, "i2c1_isp", "aclk200", E4X12_GATE_ISP0, 26,
CLK_IGNORE_UNUSED, 0),
CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0),
GATE(mtcadc_isp, "mtcadc_isp", "aclk200", E4X12_GATE_ISP0, 27,
CLK_IGNORE_UNUSED, 0),
CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0),
GATE(pwm_isp, "pwm_isp", "aclk200", E4X12_GATE_ISP0, 28,
CLK_IGNORE_UNUSED, 0),
CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0),
GATE(wdt_isp, "wdt_isp", "aclk200", E4X12_GATE_ISP0, 30,
CLK_IGNORE_UNUSED, 0),
CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0),
GATE(uart_isp, "uart_isp", "aclk200", E4X12_GATE_ISP0, 31,
CLK_IGNORE_UNUSED, 0),
CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0),
GATE(asyncaxim, "asyncaxim", "aclk200", E4X12_GATE_ISP1, 0,
CLK_IGNORE_UNUSED, 0),
CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0),
GATE(smmu_ispcx, "smmu_ispcx", "aclk200", E4X12_GATE_ISP1, 4,
CLK_IGNORE_UNUSED, 0),
CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0),
GATE(spi0_isp, "spi0_isp", "aclk200", E4X12_GATE_ISP1, 12,
CLK_IGNORE_UNUSED, 0),
CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0),
GATE(spi1_isp, "spi1_isp", "aclk200", E4X12_GATE_ISP1, 13,
CLK_IGNORE_UNUSED, 0),
CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0),
GATE(g2d, "g2d", "aclk200", GATE_IP_DMC, 23, 0, 0),
};

View File

@ -71,6 +71,7 @@ static DEFINE_SPINLOCK(armpll_lock);
static DEFINE_SPINLOCK(ddrpll_lock);
static DEFINE_SPINLOCK(iopll_lock);
static DEFINE_SPINLOCK(armclk_lock);
static DEFINE_SPINLOCK(swdtclk_lock);
static DEFINE_SPINLOCK(ddrclk_lock);
static DEFINE_SPINLOCK(dciclk_lock);
static DEFINE_SPINLOCK(gem0clk_lock);
@ -293,7 +294,7 @@ static void __init zynq_clk_setup(struct device_node *np)
}
clks[swdt] = clk_register_mux(NULL, clk_output_name[swdt],
swdt_ext_clk_mux_parents, 2, CLK_SET_RATE_PARENT,
SLCR_SWDT_CLK_SEL, 0, 1, 0, &gem0clk_lock);
SLCR_SWDT_CLK_SEL, 0, 1, 0, &swdtclk_lock);
/* DDR clocks */
clk = clk_register_divider(NULL, "ddr2x_div", "ddrpll", 0,
@ -364,8 +365,9 @@ static void __init zynq_clk_setup(struct device_node *np)
CLK_SET_RATE_PARENT, SLCR_GEM0_CLK_CTRL, 20, 6,
CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO,
&gem0clk_lock);
clk = clk_register_mux(NULL, "gem0_emio_mux", gem0_mux_parents, 2, 0,
SLCR_GEM0_CLK_CTRL, 6, 1, 0, &gem0clk_lock);
clk = clk_register_mux(NULL, "gem0_emio_mux", gem0_mux_parents, 2,
CLK_SET_RATE_PARENT, SLCR_GEM0_CLK_CTRL, 6, 1, 0,
&gem0clk_lock);
clks[gem0] = clk_register_gate(NULL, clk_output_name[gem0],
"gem0_emio_mux", CLK_SET_RATE_PARENT,
SLCR_GEM0_CLK_CTRL, 0, 0, &gem0clk_lock);
@ -386,8 +388,9 @@ static void __init zynq_clk_setup(struct device_node *np)
CLK_SET_RATE_PARENT, SLCR_GEM1_CLK_CTRL, 20, 6,
CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO,
&gem1clk_lock);
clk = clk_register_mux(NULL, "gem1_emio_mux", gem1_mux_parents, 2, 0,
SLCR_GEM1_CLK_CTRL, 6, 1, 0, &gem1clk_lock);
clk = clk_register_mux(NULL, "gem1_emio_mux", gem1_mux_parents, 2,
CLK_SET_RATE_PARENT, SLCR_GEM1_CLK_CTRL, 6, 1, 0,
&gem1clk_lock);
clks[gem1] = clk_register_gate(NULL, clk_output_name[gem1],
"gem1_emio_mux", CLK_SET_RATE_PARENT,
SLCR_GEM1_CLK_CTRL, 0, 0, &gem1clk_lock);

View File

@ -221,8 +221,8 @@ static ssize_t store_down_threshold(struct dbs_data *dbs_data, const char *buf,
return count;
}
static ssize_t store_ignore_nice(struct dbs_data *dbs_data, const char *buf,
size_t count)
static ssize_t store_ignore_nice_load(struct dbs_data *dbs_data,
const char *buf, size_t count)
{
struct cs_dbs_tuners *cs_tuners = dbs_data->tuners;
unsigned int input, j;
@ -235,10 +235,10 @@ static ssize_t store_ignore_nice(struct dbs_data *dbs_data, const char *buf,
if (input > 1)
input = 1;
if (input == cs_tuners->ignore_nice) /* nothing to do */
if (input == cs_tuners->ignore_nice_load) /* nothing to do */
return count;
cs_tuners->ignore_nice = input;
cs_tuners->ignore_nice_load = input;
/* we need to re-evaluate prev_cpu_idle */
for_each_online_cpu(j) {
@ -246,7 +246,7 @@ static ssize_t store_ignore_nice(struct dbs_data *dbs_data, const char *buf,
dbs_info = &per_cpu(cs_cpu_dbs_info, j);
dbs_info->cdbs.prev_cpu_idle = get_cpu_idle_time(j,
&dbs_info->cdbs.prev_cpu_wall, 0);
if (cs_tuners->ignore_nice)
if (cs_tuners->ignore_nice_load)
dbs_info->cdbs.prev_cpu_nice =
kcpustat_cpu(j).cpustat[CPUTIME_NICE];
}
@ -279,7 +279,7 @@ show_store_one(cs, sampling_rate);
show_store_one(cs, sampling_down_factor);
show_store_one(cs, up_threshold);
show_store_one(cs, down_threshold);
show_store_one(cs, ignore_nice);
show_store_one(cs, ignore_nice_load);
show_store_one(cs, freq_step);
declare_show_sampling_rate_min(cs);
@ -287,7 +287,7 @@ gov_sys_pol_attr_rw(sampling_rate);
gov_sys_pol_attr_rw(sampling_down_factor);
gov_sys_pol_attr_rw(up_threshold);
gov_sys_pol_attr_rw(down_threshold);
gov_sys_pol_attr_rw(ignore_nice);
gov_sys_pol_attr_rw(ignore_nice_load);
gov_sys_pol_attr_rw(freq_step);
gov_sys_pol_attr_ro(sampling_rate_min);
@ -297,7 +297,7 @@ static struct attribute *dbs_attributes_gov_sys[] = {
&sampling_down_factor_gov_sys.attr,
&up_threshold_gov_sys.attr,
&down_threshold_gov_sys.attr,
&ignore_nice_gov_sys.attr,
&ignore_nice_load_gov_sys.attr,
&freq_step_gov_sys.attr,
NULL
};
@ -313,7 +313,7 @@ static struct attribute *dbs_attributes_gov_pol[] = {
&sampling_down_factor_gov_pol.attr,
&up_threshold_gov_pol.attr,
&down_threshold_gov_pol.attr,
&ignore_nice_gov_pol.attr,
&ignore_nice_load_gov_pol.attr,
&freq_step_gov_pol.attr,
NULL
};
@ -338,7 +338,7 @@ static int cs_init(struct dbs_data *dbs_data)
tuners->up_threshold = DEF_FREQUENCY_UP_THRESHOLD;
tuners->down_threshold = DEF_FREQUENCY_DOWN_THRESHOLD;
tuners->sampling_down_factor = DEF_SAMPLING_DOWN_FACTOR;
tuners->ignore_nice = 0;
tuners->ignore_nice_load = 0;
tuners->freq_step = DEF_FREQUENCY_STEP;
dbs_data->tuners = tuners;

Some files were not shown because too many files have changed in this diff Show More