Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net

Minor line offset auto-merges.

Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
David S. Miller 2012-11-17 22:00:43 -05:00
commit 67f4efdce7
384 changed files with 4492 additions and 2372 deletions

View File

@ -27,17 +27,17 @@ Start End Size Use
-----------------------------------------------------------------------
0000000000000000 0000007fffffffff 512GB user
ffffff8000000000 ffffffbbfffcffff ~240GB vmalloc
ffffff8000000000 ffffffbbfffeffff ~240GB vmalloc
ffffffbbfffd0000 ffffffbcfffdffff 64KB [guard page]
ffffffbbfffe0000 ffffffbcfffeffff 64KB PCI I/O space
ffffffbbffff0000 ffffffbcffffffff 64KB [guard page]
ffffffbbffff0000 ffffffbbffffffff 64KB [guard page]
ffffffbc00000000 ffffffbdffffffff 8GB vmemmap
ffffffbe00000000 ffffffbffbffffff ~8GB [guard, future vmmemap]
ffffffbe00000000 ffffffbffbbfffff ~8GB [guard, future vmmemap]
ffffffbffbe00000 ffffffbffbe0ffff 64KB PCI I/O space
ffffffbbffff0000 ffffffbcffffffff ~2MB [guard]
ffffffbffc000000 ffffffbfffffffff 64MB modules

View File

@ -466,6 +466,10 @@ Note:
5.3 swappiness
Similar to /proc/sys/vm/swappiness, but affecting a hierarchy of groups only.
Please note that unlike the global swappiness, memcg knob set to 0
really prevents from any swapping even if there is a swap storage
available. This might lead to memcg OOM killer if there are no file
pages to reclaim.
Following cgroups' swappiness can't be changed.
- root cgroup (uses /proc/sys/vm/swappiness).

View File

@ -33,7 +33,7 @@ Table of Contents
2 Modifying System Parameters
3 Per-Process Parameters
3.1 /proc/<pid>/oom_score_adj - Adjust the oom-killer
3.1 /proc/<pid>/oom_adj & /proc/<pid>/oom_score_adj - Adjust the oom-killer
score
3.2 /proc/<pid>/oom_score - Display current oom-killer score
3.3 /proc/<pid>/io - Display the IO accounting fields
@ -1320,10 +1320,10 @@ of the kernel.
CHAPTER 3: PER-PROCESS PARAMETERS
------------------------------------------------------------------------------
3.1 /proc/<pid>/oom_score_adj- Adjust the oom-killer score
3.1 /proc/<pid>/oom_adj & /proc/<pid>/oom_score_adj- Adjust the oom-killer score
--------------------------------------------------------------------------------
This file can be used to adjust the badness heuristic used to select which
These file can be used to adjust the badness heuristic used to select which
process gets killed in out of memory conditions.
The badness heuristic assigns a value to each candidate task ranging from 0
@ -1361,6 +1361,12 @@ same system, cpuset, mempolicy, or memory controller resources to use at least
equivalent to discounting 50% of the task's allowed memory from being considered
as scoring against the task.
For backwards compatibility with previous kernels, /proc/<pid>/oom_adj may also
be used to tune the badness score. Its acceptable values range from -16
(OOM_ADJUST_MIN) to +15 (OOM_ADJUST_MAX) and a special value of -17
(OOM_DISABLE) to disable oom killing entirely for that task. Its value is
scaled linearly with /proc/<pid>/oom_score_adj.
The value of /proc/<pid>/oom_score_adj may be reduced no lower than the last
value set by a CAP_SYS_RESOURCE process. To reduce the value any lower
requires CAP_SYS_RESOURCE.
@ -1375,7 +1381,9 @@ minimal amount of work.
-------------------------------------------------------------
This file can be used to check the current score used by the oom-killer is for
any given <pid>.
any given <pid>. Use it together with /proc/<pid>/oom_score_adj to tune which
process should be killed in an out-of-memory situation.
3.3 /proc/<pid>/io - Display the IO accounting fields
-------------------------------------------------------

View File

@ -164,4 +164,4 @@ read the CRC recorded by the NIC on receipt of the packet.
This requests that the NIC receive all possible frames, including errored
frames (such as bad FCS, etc). This can be helpful when sniffing a link with
bad packets on it. Some NICs may receive more packets if also put into normal
PROMISC mdoe.
PROMISC mode.

View File

@ -2507,6 +2507,7 @@ M: Joonyoung Shim <jy0922.shim@samsung.com>
M: Seung-Woo Kim <sw0312.kim@samsung.com>
M: Kyungmin Park <kyungmin.park@samsung.com>
L: dri-devel@lists.freedesktop.org
T: git git://git.kernel.org/pub/scm/linux/kernel/git/daeinki/drm-exynos.git
S: Supported
F: drivers/gpu/drm/exynos
F: include/drm/exynos*
@ -3597,6 +3598,49 @@ F: drivers/hid/hid-hyperv.c
F: drivers/net/hyperv/
F: drivers/staging/hv/
I2C OVER PARALLEL PORT
M: Jean Delvare <khali@linux-fr.org>
L: linux-i2c@vger.kernel.org
S: Maintained
F: Documentation/i2c/busses/i2c-parport
F: Documentation/i2c/busses/i2c-parport-light
F: drivers/i2c/busses/i2c-parport.c
F: drivers/i2c/busses/i2c-parport-light.c
I2C/SMBUS CONTROLLER DRIVERS FOR PC
M: Jean Delvare <khali@linux-fr.org>
L: linux-i2c@vger.kernel.org
S: Maintained
F: Documentation/i2c/busses/i2c-ali1535
F: Documentation/i2c/busses/i2c-ali1563
F: Documentation/i2c/busses/i2c-ali15x3
F: Documentation/i2c/busses/i2c-amd756
F: Documentation/i2c/busses/i2c-amd8111
F: Documentation/i2c/busses/i2c-i801
F: Documentation/i2c/busses/i2c-nforce2
F: Documentation/i2c/busses/i2c-piix4
F: Documentation/i2c/busses/i2c-sis5595
F: Documentation/i2c/busses/i2c-sis630
F: Documentation/i2c/busses/i2c-sis96x
F: Documentation/i2c/busses/i2c-via
F: Documentation/i2c/busses/i2c-viapro
F: drivers/i2c/busses/i2c-ali1535.c
F: drivers/i2c/busses/i2c-ali1563.c
F: drivers/i2c/busses/i2c-ali15x3.c
F: drivers/i2c/busses/i2c-amd756.c
F: drivers/i2c/busses/i2c-amd756-s4882.c
F: drivers/i2c/busses/i2c-amd8111.c
F: drivers/i2c/busses/i2c-i801.c
F: drivers/i2c/busses/i2c-isch.c
F: drivers/i2c/busses/i2c-nforce2.c
F: drivers/i2c/busses/i2c-nforce2-s4985.c
F: drivers/i2c/busses/i2c-piix4.c
F: drivers/i2c/busses/i2c-sis5595.c
F: drivers/i2c/busses/i2c-sis630.c
F: drivers/i2c/busses/i2c-sis96x.c
F: drivers/i2c/busses/i2c-via.c
F: drivers/i2c/busses/i2c-viapro.c
I2C/SMBUS STUB DRIVER
M: "Mark M. Hoffman" <mhoffman@lightlink.com>
L: linux-i2c@vger.kernel.org
@ -3604,9 +3648,8 @@ S: Maintained
F: drivers/i2c/busses/i2c-stub.c
I2C SUBSYSTEM
M: "Jean Delvare (PC drivers, core)" <khali@linux-fr.org>
M: Wolfram Sang <w.sang@pengutronix.de>
M: "Ben Dooks (embedded platforms)" <ben-linux@fluff.org>
M: "Wolfram Sang (embedded platforms)" <w.sang@pengutronix.de>
L: linux-i2c@vger.kernel.org
W: http://i2c.wiki.kernel.org/
T: quilt kernel.org/pub/linux/kernel/people/jdelvare/linux-2.6/jdelvare-i2c/
@ -3617,6 +3660,13 @@ F: drivers/i2c/
F: include/linux/i2c.h
F: include/linux/i2c-*.h
I2C-TAOS-EVM DRIVER
M: Jean Delvare <khali@linux-fr.org>
L: linux-i2c@vger.kernel.org
S: Maintained
F: Documentation/i2c/busses/i2c-taos-evm
F: drivers/i2c/busses/i2c-taos-evm.c
I2C-TINY-USB DRIVER
M: Till Harbaum <till@harbaum.org>
L: linux-i2c@vger.kernel.org
@ -7212,6 +7262,14 @@ L: linux-xtensa@linux-xtensa.org
S: Maintained
F: arch/xtensa/
THERMAL
M: Zhang Rui <rui.zhang@intel.com>
L: linux-pm@vger.kernel.org
T: git git://git.kernel.org/pub/scm/linux/kernel/git/rzhang/linux.git
S: Supported
F: drivers/thermal/
F: include/linux/thermal.h
THINKPAD ACPI EXTRAS DRIVER
M: Henrique de Moraes Holschuh <ibm-acpi@hmh.eng.br>
L: ibm-acpi-devel@lists.sourceforge.net
@ -7895,13 +7953,6 @@ M: Roger Luethi <rl@hellgate.ch>
S: Maintained
F: drivers/net/ethernet/via/via-rhine.c
VIAPRO SMBUS DRIVER
M: Jean Delvare <khali@linux-fr.org>
L: linux-i2c@vger.kernel.org
S: Maintained
F: Documentation/i2c/busses/i2c-viapro
F: drivers/i2c/busses/i2c-viapro.c
VIA SD/MMC CARD CONTROLLER DRIVER
M: Bruce Chang <brucechang@via.com.tw>
M: Harald Welte <HaraldWelte@viatech.com>

View File

@ -1,7 +1,7 @@
VERSION = 3
PATCHLEVEL = 7
SUBLEVEL = 0
EXTRAVERSION = -rc3
EXTRAVERSION = -rc6
NAME = Terrified Chipmunk
# *DOCUMENTATION*

View File

@ -33,7 +33,7 @@ ifeq ($(CONFIG_XIP_KERNEL),y)
$(obj)/xipImage: vmlinux FORCE
$(call if_changed,objcopy)
$(kecho) ' Kernel: $@ is ready (physical address: $(CONFIG_XIP_PHYS_ADDR))'
@$(kecho) ' Kernel: $@ is ready (physical address: $(CONFIG_XIP_PHYS_ADDR))'
$(obj)/Image $(obj)/zImage: FORCE
@echo 'Kernel configured for XIP (CONFIG_XIP_KERNEL=y)'
@ -48,14 +48,14 @@ $(obj)/xipImage: FORCE
$(obj)/Image: vmlinux FORCE
$(call if_changed,objcopy)
$(kecho) ' Kernel: $@ is ready'
@$(kecho) ' Kernel: $@ is ready'
$(obj)/compressed/vmlinux: $(obj)/Image FORCE
$(Q)$(MAKE) $(build)=$(obj)/compressed $@
$(obj)/zImage: $(obj)/compressed/vmlinux FORCE
$(call if_changed,objcopy)
$(kecho) ' Kernel: $@ is ready'
@$(kecho) ' Kernel: $@ is ready'
endif
@ -90,7 +90,7 @@ fi
$(obj)/uImage: $(obj)/zImage FORCE
@$(check_for_multiple_loadaddr)
$(call if_changed,uimage)
$(kecho) ' Image $@ is ready'
@$(kecho) ' Image $@ is ready'
$(obj)/bootp/bootp: $(obj)/zImage initrd FORCE
$(Q)$(MAKE) $(build)=$(obj)/bootp $@
@ -98,7 +98,7 @@ $(obj)/bootp/bootp: $(obj)/zImage initrd FORCE
$(obj)/bootpImage: $(obj)/bootp/bootp FORCE
$(call if_changed,objcopy)
$(kecho) ' Kernel: $@ is ready'
@$(kecho) ' Kernel: $@ is ready'
PHONY += initrd FORCE
initrd:

View File

@ -73,8 +73,8 @@
pinmux: pinmux {
compatible = "nvidia,tegra30-pinmux";
reg = <0x70000868 0xd0 /* Pad control registers */
0x70003000 0x3e0>; /* Mux registers */
reg = <0x70000868 0xd4 /* Pad control registers */
0x70003000 0x3e4>; /* Mux registers */
};
serial@70006000 {

View File

@ -64,7 +64,7 @@ extern void __raw_readsl(const void __iomem *addr, void *data, int longlen);
static inline void __raw_writew(u16 val, volatile void __iomem *addr)
{
asm volatile("strh %1, %0"
: "+Qo" (*(volatile u16 __force *)addr)
: "+Q" (*(volatile u16 __force *)addr)
: "r" (val));
}
@ -72,7 +72,7 @@ static inline u16 __raw_readw(const volatile void __iomem *addr)
{
u16 val;
asm volatile("ldrh %1, %0"
: "+Qo" (*(volatile u16 __force *)addr),
: "+Q" (*(volatile u16 __force *)addr),
"=r" (val));
return val;
}

View File

@ -10,7 +10,5 @@
extern void sched_clock_postinit(void);
extern void setup_sched_clock(u32 (*read)(void), int bits, unsigned long rate);
extern void setup_sched_clock_needs_suspend(u32 (*read)(void), int bits,
unsigned long rate);
#endif

View File

@ -27,9 +27,9 @@
#if __LINUX_ARM_ARCH__ <= 6
ldr \tmp, =elf_hwcap @ may not have MVFR regs
ldr \tmp, [\tmp, #0]
tst \tmp, #HWCAP_VFPv3D16
ldceql p11, cr0, [\base],#32*4 @ FLDMIAD \base!, {d16-d31}
addne \base, \base, #32*4 @ step over unused register space
tst \tmp, #HWCAP_VFPD32
ldcnel p11, cr0, [\base],#32*4 @ FLDMIAD \base!, {d16-d31}
addeq \base, \base, #32*4 @ step over unused register space
#else
VFPFMRX \tmp, MVFR0 @ Media and VFP Feature Register 0
and \tmp, \tmp, #MVFR0_A_SIMD_MASK @ A_SIMD field
@ -51,9 +51,9 @@
#if __LINUX_ARM_ARCH__ <= 6
ldr \tmp, =elf_hwcap @ may not have MVFR regs
ldr \tmp, [\tmp, #0]
tst \tmp, #HWCAP_VFPv3D16
stceql p11, cr0, [\base],#32*4 @ FSTMIAD \base!, {d16-d31}
addne \base, \base, #32*4 @ step over unused register space
tst \tmp, #HWCAP_VFPD32
stcnel p11, cr0, [\base],#32*4 @ FSTMIAD \base!, {d16-d31}
addeq \base, \base, #32*4 @ step over unused register space
#else
VFPFMRX \tmp, MVFR0 @ Media and VFP Feature Register 0
and \tmp, \tmp, #MVFR0_A_SIMD_MASK @ A_SIMD field

View File

@ -18,11 +18,12 @@
#define HWCAP_THUMBEE (1 << 11)
#define HWCAP_NEON (1 << 12)
#define HWCAP_VFPv3 (1 << 13)
#define HWCAP_VFPv3D16 (1 << 14)
#define HWCAP_VFPv3D16 (1 << 14) /* also set for VFPv4-D16 */
#define HWCAP_TLS (1 << 15)
#define HWCAP_VFPv4 (1 << 16)
#define HWCAP_IDIVA (1 << 17)
#define HWCAP_IDIVT (1 << 18)
#define HWCAP_VFPD32 (1 << 19) /* set if VFP has 32 regs (not 16) */
#define HWCAP_IDIV (HWCAP_IDIVA | HWCAP_IDIVT)

View File

@ -107,13 +107,6 @@ static void sched_clock_poll(unsigned long wrap_ticks)
update_sched_clock();
}
void __init setup_sched_clock_needs_suspend(u32 (*read)(void), int bits,
unsigned long rate)
{
setup_sched_clock(read, bits, rate);
cd.needs_suspend = true;
}
void __init setup_sched_clock(u32 (*read)(void), int bits, unsigned long rate)
{
unsigned long r, w;
@ -189,18 +182,15 @@ void __init sched_clock_postinit(void)
static int sched_clock_suspend(void)
{
sched_clock_poll(sched_clock_timer.data);
if (cd.needs_suspend)
cd.suspended = true;
cd.suspended = true;
return 0;
}
static void sched_clock_resume(void)
{
if (cd.needs_suspend) {
cd.epoch_cyc = read_sched_clock();
cd.epoch_cyc_copy = cd.epoch_cyc;
cd.suspended = false;
}
cd.epoch_cyc = read_sched_clock();
cd.epoch_cyc_copy = cd.epoch_cyc;
cd.suspended = false;
}
static struct syscore_ops sched_clock_ops = {

View File

@ -68,7 +68,7 @@ void __init at91_add_device_usbh(struct at91_usbh_data *data)
/* Enable overcurrent notification */
for (i = 0; i < data->ports; i++) {
if (data->overcurrent_pin[i])
if (gpio_is_valid(data->overcurrent_pin[i]))
at91_set_gpio_input(data->overcurrent_pin[i], 1);
}

View File

@ -72,7 +72,7 @@ void __init at91_add_device_usbh(struct at91_usbh_data *data)
/* Enable overcurrent notification */
for (i = 0; i < data->ports; i++) {
if (data->overcurrent_pin[i])
if (gpio_is_valid(data->overcurrent_pin[i]))
at91_set_gpio_input(data->overcurrent_pin[i], 1);
}

View File

@ -72,7 +72,7 @@ void __init at91_add_device_usbh(struct at91_usbh_data *data)
/* Enable overcurrent notification */
for (i = 0; i < data->ports; i++) {
if (data->overcurrent_pin[i])
if (gpio_is_valid(data->overcurrent_pin[i]))
at91_set_gpio_input(data->overcurrent_pin[i], 1);
}

View File

@ -78,7 +78,7 @@ void __init at91_add_device_usbh(struct at91_usbh_data *data)
/* Enable overcurrent notification */
for (i = 0; i < data->ports; i++) {
if (data->overcurrent_pin[i])
if (gpio_is_valid(data->overcurrent_pin[i]))
at91_set_gpio_input(data->overcurrent_pin[i], 1);
}

View File

@ -1841,8 +1841,8 @@ static struct resource sha_resources[] = {
.flags = IORESOURCE_MEM,
},
[1] = {
.start = AT91SAM9G45_ID_AESTDESSHA,
.end = AT91SAM9G45_ID_AESTDESSHA,
.start = NR_IRQS_LEGACY + AT91SAM9G45_ID_AESTDESSHA,
.end = NR_IRQS_LEGACY + AT91SAM9G45_ID_AESTDESSHA,
.flags = IORESOURCE_IRQ,
},
};
@ -1874,8 +1874,8 @@ static struct resource tdes_resources[] = {
.flags = IORESOURCE_MEM,
},
[1] = {
.start = AT91SAM9G45_ID_AESTDESSHA,
.end = AT91SAM9G45_ID_AESTDESSHA,
.start = NR_IRQS_LEGACY + AT91SAM9G45_ID_AESTDESSHA,
.end = NR_IRQS_LEGACY + AT91SAM9G45_ID_AESTDESSHA,
.flags = IORESOURCE_IRQ,
},
};
@ -1910,8 +1910,8 @@ static struct resource aes_resources[] = {
.flags = IORESOURCE_MEM,
},
[1] = {
.start = AT91SAM9G45_ID_AESTDESSHA,
.end = AT91SAM9G45_ID_AESTDESSHA,
.start = NR_IRQS_LEGACY + AT91SAM9G45_ID_AESTDESSHA,
.end = NR_IRQS_LEGACY + AT91SAM9G45_ID_AESTDESSHA,
.flags = IORESOURCE_IRQ,
},
};

View File

@ -28,6 +28,7 @@ void highbank_restart(char mode, const char *cmd)
hignbank_set_pwr_soft_reset();
scu_power_mode(scu_base_addr, SCU_PM_POWEROFF);
cpu_do_idle();
while (1)
cpu_do_idle();
}

View File

@ -112,7 +112,7 @@ struct clk *clk_register_gate2(struct device *dev, const char *name,
clk = clk_register(dev, &gate->hw);
if (IS_ERR(clk))
kfree(clk);
kfree(gate);
return clk;
}

View File

@ -30,7 +30,7 @@
#define MX25_H1_SIC_SHIFT 21
#define MX25_H1_SIC_MASK (0x3 << MX25_H1_SIC_SHIFT)
#define MX25_H1_PP_BIT (1 << 18)
#define MX25_H1_PM_BIT (1 << 8)
#define MX25_H1_PM_BIT (1 << 16)
#define MX25_H1_IPPUE_UP_BIT (1 << 7)
#define MX25_H1_IPPUE_DOWN_BIT (1 << 6)
#define MX25_H1_TLL_BIT (1 << 5)

View File

@ -30,7 +30,7 @@
#define MX35_H1_SIC_SHIFT 21
#define MX35_H1_SIC_MASK (0x3 << MX35_H1_SIC_SHIFT)
#define MX35_H1_PP_BIT (1 << 18)
#define MX35_H1_PM_BIT (1 << 8)
#define MX35_H1_PM_BIT (1 << 16)
#define MX35_H1_IPPUE_UP_BIT (1 << 7)
#define MX35_H1_IPPUE_DOWN_BIT (1 << 6)
#define MX35_H1_TLL_BIT (1 << 5)

View File

@ -359,7 +359,7 @@ static struct clockdomain iss_44xx_clkdm = {
.clkdm_offs = OMAP4430_CM2_CAM_CAM_CDOFFS,
.wkdep_srcs = iss_wkup_sleep_deps,
.sleepdep_srcs = iss_wkup_sleep_deps,
.flags = CLKDM_CAN_HWSUP_SWSUP,
.flags = CLKDM_CAN_SWSUP,
};
static struct clockdomain l3_dss_44xx_clkdm = {

View File

@ -19,6 +19,7 @@
#include <linux/of.h>
#include <linux/pinctrl/machine.h>
#include <linux/platform_data/omap4-keypad.h>
#include <linux/platform_data/omap_ocp2scp.h>
#include <asm/mach-types.h>
#include <asm/mach/map.h>
@ -613,6 +614,83 @@ static void omap_init_vout(void)
static inline void omap_init_vout(void) {}
#endif
#if defined(CONFIG_OMAP_OCP2SCP) || defined(CONFIG_OMAP_OCP2SCP_MODULE)
static int count_ocp2scp_devices(struct omap_ocp2scp_dev *ocp2scp_dev)
{
int cnt = 0;
while (ocp2scp_dev->drv_name != NULL) {
cnt++;
ocp2scp_dev++;
}
return cnt;
}
static void omap_init_ocp2scp(void)
{
struct omap_hwmod *oh;
struct platform_device *pdev;
int bus_id = -1, dev_cnt = 0, i;
struct omap_ocp2scp_dev *ocp2scp_dev;
const char *oh_name, *name;
struct omap_ocp2scp_platform_data *pdata;
if (!cpu_is_omap44xx())
return;
oh_name = "ocp2scp_usb_phy";
name = "omap-ocp2scp";
oh = omap_hwmod_lookup(oh_name);
if (!oh) {
pr_err("%s: could not find omap_hwmod for %s\n", __func__,
oh_name);
return;
}
pdata = kzalloc(sizeof(*pdata), GFP_KERNEL);
if (!pdata) {
pr_err("%s: No memory for ocp2scp pdata\n", __func__);
return;
}
ocp2scp_dev = oh->dev_attr;
dev_cnt = count_ocp2scp_devices(ocp2scp_dev);
if (!dev_cnt) {
pr_err("%s: No devices connected to ocp2scp\n", __func__);
kfree(pdata);
return;
}
pdata->devices = kzalloc(sizeof(struct omap_ocp2scp_dev *)
* dev_cnt, GFP_KERNEL);
if (!pdata->devices) {
pr_err("%s: No memory for ocp2scp pdata devices\n", __func__);
kfree(pdata);
return;
}
for (i = 0; i < dev_cnt; i++, ocp2scp_dev++)
pdata->devices[i] = ocp2scp_dev;
pdata->dev_cnt = dev_cnt;
pdev = omap_device_build(name, bus_id, oh, pdata, sizeof(*pdata), NULL,
0, false);
if (IS_ERR(pdev)) {
pr_err("Could not build omap_device for %s %s\n",
name, oh_name);
kfree(pdata->devices);
kfree(pdata);
return;
}
}
#else
static inline void omap_init_ocp2scp(void) { }
#endif
/*-------------------------------------------------------------------------*/
static int __init omap2_init_devices(void)
@ -640,6 +718,7 @@ static int __init omap2_init_devices(void)
omap_init_sham();
omap_init_aes();
omap_init_vout();
omap_init_ocp2scp();
return 0;
}

View File

@ -421,6 +421,38 @@ static int _set_softreset(struct omap_hwmod *oh, u32 *v)
return 0;
}
/**
* _wait_softreset_complete - wait for an OCP softreset to complete
* @oh: struct omap_hwmod * to wait on
*
* Wait until the IP block represented by @oh reports that its OCP
* softreset is complete. This can be triggered by software (see
* _ocp_softreset()) or by hardware upon returning from off-mode (one
* example is HSMMC). Waits for up to MAX_MODULE_SOFTRESET_WAIT
* microseconds. Returns the number of microseconds waited.
*/
static int _wait_softreset_complete(struct omap_hwmod *oh)
{
struct omap_hwmod_class_sysconfig *sysc;
u32 softrst_mask;
int c = 0;
sysc = oh->class->sysc;
if (sysc->sysc_flags & SYSS_HAS_RESET_STATUS)
omap_test_timeout((omap_hwmod_read(oh, sysc->syss_offs)
& SYSS_RESETDONE_MASK),
MAX_MODULE_SOFTRESET_WAIT, c);
else if (sysc->sysc_flags & SYSC_HAS_RESET_STATUS) {
softrst_mask = (0x1 << sysc->sysc_fields->srst_shift);
omap_test_timeout(!(omap_hwmod_read(oh, sysc->sysc_offs)
& softrst_mask),
MAX_MODULE_SOFTRESET_WAIT, c);
}
return c;
}
/**
* _set_dmadisable: set OCP_SYSCONFIG.DMADISABLE bit in @v
* @oh: struct omap_hwmod *
@ -1282,6 +1314,18 @@ static void _enable_sysc(struct omap_hwmod *oh)
if (!oh->class->sysc)
return;
/*
* Wait until reset has completed, this is needed as the IP
* block is reset automatically by hardware in some cases
* (off-mode for example), and the drivers require the
* IP to be ready when they access it
*/
if (oh->flags & HWMOD_CONTROL_OPT_CLKS_IN_RESET)
_enable_optional_clocks(oh);
_wait_softreset_complete(oh);
if (oh->flags & HWMOD_CONTROL_OPT_CLKS_IN_RESET)
_disable_optional_clocks(oh);
v = oh->_sysc_cache;
sf = oh->class->sysc->sysc_flags;
@ -1804,7 +1848,7 @@ static int _am33xx_disable_module(struct omap_hwmod *oh)
*/
static int _ocp_softreset(struct omap_hwmod *oh)
{
u32 v, softrst_mask;
u32 v;
int c = 0;
int ret = 0;
@ -1834,19 +1878,7 @@ static int _ocp_softreset(struct omap_hwmod *oh)
if (oh->class->sysc->srst_udelay)
udelay(oh->class->sysc->srst_udelay);
if (oh->class->sysc->sysc_flags & SYSS_HAS_RESET_STATUS)
omap_test_timeout((omap_hwmod_read(oh,
oh->class->sysc->syss_offs)
& SYSS_RESETDONE_MASK),
MAX_MODULE_SOFTRESET_WAIT, c);
else if (oh->class->sysc->sysc_flags & SYSC_HAS_RESET_STATUS) {
softrst_mask = (0x1 << oh->class->sysc->sysc_fields->srst_shift);
omap_test_timeout(!(omap_hwmod_read(oh,
oh->class->sysc->sysc_offs)
& softrst_mask),
MAX_MODULE_SOFTRESET_WAIT, c);
}
c = _wait_softreset_complete(oh);
if (c == MAX_MODULE_SOFTRESET_WAIT)
pr_warning("omap_hwmod: %s: softreset failed (waited %d usec)\n",
oh->name, MAX_MODULE_SOFTRESET_WAIT);
@ -2352,6 +2384,9 @@ static int __init _setup_reset(struct omap_hwmod *oh)
if (oh->_state != _HWMOD_STATE_INITIALIZED)
return -EINVAL;
if (oh->flags & HWMOD_EXT_OPT_MAIN_CLK)
return -EPERM;
if (oh->rst_lines_cnt == 0) {
r = _enable(oh);
if (r) {

View File

@ -21,6 +21,7 @@
#include <linux/io.h>
#include <linux/platform_data/gpio-omap.h>
#include <linux/power/smartreflex.h>
#include <linux/platform_data/omap_ocp2scp.h>
#include <plat/omap_hwmod.h>
#include <plat/i2c.h>
@ -2125,6 +2126,14 @@ static struct omap_hwmod omap44xx_mcpdm_hwmod = {
.name = "mcpdm",
.class = &omap44xx_mcpdm_hwmod_class,
.clkdm_name = "abe_clkdm",
/*
* It's suspected that the McPDM requires an off-chip main
* functional clock, controlled via I2C. This IP block is
* currently reset very early during boot, before I2C is
* available, so it doesn't seem that we have any choice in
* the kernel other than to avoid resetting it.
*/
.flags = HWMOD_EXT_OPT_MAIN_CLK,
.mpu_irqs = omap44xx_mcpdm_irqs,
.sdma_reqs = omap44xx_mcpdm_sdma_reqs,
.main_clk = "mcpdm_fck",
@ -2681,6 +2690,32 @@ static struct omap_hwmod_class omap44xx_ocp2scp_hwmod_class = {
.sysc = &omap44xx_ocp2scp_sysc,
};
/* ocp2scp dev_attr */
static struct resource omap44xx_usb_phy_and_pll_addrs[] = {
{
.name = "usb_phy",
.start = 0x4a0ad080,
.end = 0x4a0ae000,
.flags = IORESOURCE_MEM,
},
{
/* XXX: Remove this once control module driver is in place */
.name = "ctrl_dev",
.start = 0x4a002300,
.end = 0x4a002303,
.flags = IORESOURCE_MEM,
},
{ }
};
static struct omap_ocp2scp_dev ocp2scp_dev_attr[] = {
{
.drv_name = "omap-usb2",
.res = omap44xx_usb_phy_and_pll_addrs,
},
{ }
};
/* ocp2scp_usb_phy */
static struct omap_hwmod omap44xx_ocp2scp_usb_phy_hwmod = {
.name = "ocp2scp_usb_phy",
@ -2694,6 +2729,7 @@ static struct omap_hwmod omap44xx_ocp2scp_usb_phy_hwmod = {
.modulemode = MODULEMODE_HWCTRL,
},
},
.dev_attr = ocp2scp_dev_attr,
};
/*

View File

@ -366,7 +366,7 @@ static struct regulator_init_data omap4_clk32kg_idata = {
};
static struct regulator_consumer_supply omap4_vdd1_supply[] = {
REGULATOR_SUPPLY("vcc", "mpu.0"),
REGULATOR_SUPPLY("vcc", "cpu0"),
};
static struct regulator_consumer_supply omap4_vdd2_supply[] = {

View File

@ -264,7 +264,7 @@ static void __init omap_vc_i2c_init(struct voltagedomain *voltdm)
if (initialized) {
if (voltdm->pmic->i2c_high_speed != i2c_high_speed)
pr_warn("%s: I2C config for vdd_%s does not match other channels (%u).",
pr_warn("%s: I2C config for vdd_%s does not match other channels (%u).\n",
__func__, voltdm->name, i2c_high_speed);
return;
}

View File

@ -28,6 +28,7 @@
#include <linux/mfd/asic3.h>
#include <linux/mtd/physmap.h>
#include <linux/pda_power.h>
#include <linux/pwm.h>
#include <linux/pwm_backlight.h>
#include <linux/regulator/driver.h>
#include <linux/regulator/gpio-regulator.h>
@ -556,7 +557,7 @@ static struct platform_device hx4700_lcd = {
*/
static struct platform_pwm_backlight_data backlight_data = {
.pwm_id = 1,
.pwm_id = -1, /* Superseded by pwm_lookup */
.max_brightness = 200,
.dft_brightness = 100,
.pwm_period_ns = 30923,
@ -571,6 +572,10 @@ static struct platform_device backlight = {
},
};
static struct pwm_lookup hx4700_pwm_lookup[] = {
PWM_LOOKUP("pxa27x-pwm.1", 0, "pwm-backlight", NULL),
};
/*
* USB "Transceiver"
*/
@ -872,6 +877,7 @@ static void __init hx4700_init(void)
pxa_set_stuart_info(NULL);
platform_add_devices(devices, ARRAY_SIZE(devices));
pwm_add_table(hx4700_pwm_lookup, ARRAY_SIZE(hx4700_pwm_lookup));
pxa_set_ficp_info(&ficp_info);
pxa27x_set_i2c_power_info(NULL);

View File

@ -86,10 +86,7 @@ static void spitz_discharge1(int on)
gpio_set_value(SPITZ_GPIO_LED_GREEN, on);
}
static unsigned long gpio18_config[] = {
GPIO18_RDY,
GPIO18_GPIO,
};
static unsigned long gpio18_config = GPIO18_GPIO;
static void spitz_presuspend(void)
{
@ -112,7 +109,7 @@ static void spitz_presuspend(void)
PGSR3 &= ~SPITZ_GPIO_G3_STROBE_BIT;
PGSR2 |= GPIO_bit(SPITZ_GPIO_KEY_STROBE0);
pxa2xx_mfp_config(&gpio18_config[0], 1);
pxa2xx_mfp_config(&gpio18_config, 1);
gpio_request_one(18, GPIOF_OUT_INIT_HIGH, "Unknown");
gpio_free(18);
@ -131,7 +128,6 @@ static void spitz_presuspend(void)
static void spitz_postsuspend(void)
{
pxa2xx_mfp_config(&gpio18_config[1], 1);
}
static int spitz_should_wakeup(unsigned int resume_on_alarm)

View File

@ -745,7 +745,7 @@ do_alignment_t32_to_handler(unsigned long *pinstr, struct pt_regs *regs,
static int
do_alignment(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
{
union offset_union offset;
union offset_union uninitialized_var(offset);
unsigned long instr = 0, instrptr;
int (*handler)(unsigned long addr, unsigned long instr, struct pt_regs *regs);
unsigned int type;

View File

@ -443,6 +443,11 @@ struct omap_hwmod_omap4_prcm {
* in order to complete the reset. Optional clocks will be disabled
* again after the reset.
* HWMOD_16BIT_REG: Module has 16bit registers
* HWMOD_EXT_OPT_MAIN_CLK: The only main functional clock source for
* this IP block comes from an off-chip source and is not always
* enabled. This prevents the hwmod code from being able to
* enable and reset the IP block early. XXX Eventually it should
* be possible to query the clock framework for this information.
*/
#define HWMOD_SWSUP_SIDLE (1 << 0)
#define HWMOD_SWSUP_MSTANDBY (1 << 1)
@ -453,6 +458,7 @@ struct omap_hwmod_omap4_prcm {
#define HWMOD_NO_IDLEST (1 << 6)
#define HWMOD_CONTROL_OPT_CLKS_IN_RESET (1 << 7)
#define HWMOD_16BIT_REG (1 << 8)
#define HWMOD_EXT_OPT_MAIN_CLK (1 << 9)
/*
* omap_hwmod._int_flags definitions

View File

@ -5,6 +5,6 @@
#
include/generated/mach-types.h: $(src)/gen-mach-types $(src)/mach-types
$(kecho) ' Generating $@'
@$(kecho) ' Generating $@'
@mkdir -p $(dir $@)
$(Q)$(AWK) -f $^ > $@ || { rm -f $@; /bin/false; }

View File

@ -701,11 +701,14 @@ static int __init vfp_init(void)
elf_hwcap |= HWCAP_VFPv3;
/*
* Check for VFPv3 D16. CPUs in this configuration
* only have 16 x 64bit registers.
* Check for VFPv3 D16 and VFPv4 D16. CPUs in
* this configuration only have 16 x 64bit
* registers.
*/
if (((fmrx(MVFR0) & MVFR0_A_SIMD_MASK)) == 1)
elf_hwcap |= HWCAP_VFPv3D16;
elf_hwcap |= HWCAP_VFPv3D16; /* also v4-D16 */
else
elf_hwcap |= HWCAP_VFPD32;
}
#endif
/*

View File

@ -166,3 +166,14 @@ void free_xenballooned_pages(int nr_pages, struct page **pages)
*pages = NULL;
}
EXPORT_SYMBOL_GPL(free_xenballooned_pages);
/* In the hypervisor.S file. */
EXPORT_SYMBOL_GPL(HYPERVISOR_event_channel_op);
EXPORT_SYMBOL_GPL(HYPERVISOR_grant_table_op);
EXPORT_SYMBOL_GPL(HYPERVISOR_xen_version);
EXPORT_SYMBOL_GPL(HYPERVISOR_console_io);
EXPORT_SYMBOL_GPL(HYPERVISOR_sched_op);
EXPORT_SYMBOL_GPL(HYPERVISOR_hvm_op);
EXPORT_SYMBOL_GPL(HYPERVISOR_memory_op);
EXPORT_SYMBOL_GPL(HYPERVISOR_physdev_op);
EXPORT_SYMBOL_GPL(privcmd_call);

View File

@ -1,6 +1,7 @@
config ARM64
def_bool y
select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE
select ARCH_WANT_COMPAT_IPC_PARSE_VERSION
select GENERIC_CLOCKEVENTS
select GENERIC_HARDIRQS_NO_DEPRECATED
select GENERIC_IOMAP

View File

@ -25,12 +25,10 @@
#include <asm/user.h>
typedef unsigned long elf_greg_t;
typedef unsigned long elf_freg_t[3];
#define ELF_NGREG (sizeof (struct pt_regs) / sizeof(elf_greg_t))
typedef elf_greg_t elf_gregset_t[ELF_NGREG];
typedef struct user_fp elf_fpregset_t;
typedef struct user_fpsimd_state elf_fpregset_t;
#define EM_AARCH64 183
@ -87,7 +85,6 @@ typedef struct user_fp elf_fpregset_t;
#define R_AARCH64_MOVW_PREL_G2_NC 292
#define R_AARCH64_MOVW_PREL_G3 293
/*
* These are used to set parameters in the core dumps.
*/

View File

@ -25,9 +25,8 @@
* - FPSR and FPCR
* - 32 128-bit data registers
*
* Note that user_fp forms a prefix of this structure, which is relied
* upon in the ptrace FP/SIMD accessors. struct user_fpsimd_state must
* form a prefix of struct fpsimd_state.
* Note that user_fpsimd forms a prefix of this structure, which is
* relied upon in the ptrace FP/SIMD accessors.
*/
struct fpsimd_state {
union {

View File

@ -114,7 +114,7 @@ static inline u64 __raw_readq(const volatile void __iomem *addr)
* I/O port access primitives.
*/
#define IO_SPACE_LIMIT 0xffff
#define PCI_IOBASE ((void __iomem *)0xffffffbbfffe0000UL)
#define PCI_IOBASE ((void __iomem *)(MODULES_VADDR - SZ_2M))
static inline u8 inb(unsigned long addr)
{
@ -222,12 +222,12 @@ extern void __iomem *__ioremap(phys_addr_t phys_addr, size_t size, pgprot_t prot
extern void __iounmap(volatile void __iomem *addr);
#define PROT_DEFAULT (PTE_TYPE_PAGE | PTE_AF | PTE_DIRTY)
#define PROT_DEVICE_nGnRE (PROT_DEFAULT | PTE_XN | PTE_ATTRINDX(MT_DEVICE_nGnRE))
#define PROT_DEVICE_nGnRE (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_ATTRINDX(MT_DEVICE_nGnRE))
#define PROT_NORMAL_NC (PROT_DEFAULT | PTE_ATTRINDX(MT_NORMAL_NC))
#define ioremap(addr, size) __ioremap((addr), (size), PROT_DEVICE_nGnRE)
#define ioremap_nocache(addr, size) __ioremap((addr), (size), PROT_DEVICE_nGnRE)
#define ioremap_wc(addr, size) __ioremap((addr), (size), PROT_NORMAL_NC)
#define ioremap(addr, size) __ioremap((addr), (size), __pgprot(PROT_DEVICE_nGnRE))
#define ioremap_nocache(addr, size) __ioremap((addr), (size), __pgprot(PROT_DEVICE_nGnRE))
#define ioremap_wc(addr, size) __ioremap((addr), (size), __pgprot(PROT_NORMAL_NC))
#define iounmap __iounmap
#define ARCH_HAS_IOREMAP_WC

View File

@ -38,7 +38,8 @@
#define PMD_SECT_S (_AT(pmdval_t, 3) << 8)
#define PMD_SECT_AF (_AT(pmdval_t, 1) << 10)
#define PMD_SECT_NG (_AT(pmdval_t, 1) << 11)
#define PMD_SECT_XN (_AT(pmdval_t, 1) << 54)
#define PMD_SECT_PXN (_AT(pmdval_t, 1) << 53)
#define PMD_SECT_UXN (_AT(pmdval_t, 1) << 54)
/*
* AttrIndx[2:0] encoding (mapping attributes defined in the MAIR* registers).
@ -57,7 +58,8 @@
#define PTE_SHARED (_AT(pteval_t, 3) << 8) /* SH[1:0], inner shareable */
#define PTE_AF (_AT(pteval_t, 1) << 10) /* Access Flag */
#define PTE_NG (_AT(pteval_t, 1) << 11) /* nG */
#define PTE_XN (_AT(pteval_t, 1) << 54) /* XN */
#define PTE_PXN (_AT(pteval_t, 1) << 53) /* Privileged XN */
#define PTE_UXN (_AT(pteval_t, 1) << 54) /* User XN */
/*
* AttrIndx[2:0] encoding (mapping attributes defined in the MAIR* registers).

View File

@ -62,23 +62,23 @@ extern pgprot_t pgprot_default;
#define _MOD_PROT(p, b) __pgprot(pgprot_val(p) | (b))
#define PAGE_NONE _MOD_PROT(pgprot_default, PTE_NG | PTE_XN | PTE_RDONLY)
#define PAGE_SHARED _MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_XN)
#define PAGE_SHARED_EXEC _MOD_PROT(pgprot_default, PTE_USER | PTE_NG)
#define PAGE_COPY _MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_XN | PTE_RDONLY)
#define PAGE_COPY_EXEC _MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_RDONLY)
#define PAGE_READONLY _MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_XN | PTE_RDONLY)
#define PAGE_READONLY_EXEC _MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_RDONLY)
#define PAGE_KERNEL _MOD_PROT(pgprot_default, PTE_XN | PTE_DIRTY)
#define PAGE_KERNEL_EXEC _MOD_PROT(pgprot_default, PTE_DIRTY)
#define PAGE_NONE _MOD_PROT(pgprot_default, PTE_NG | PTE_PXN | PTE_UXN | PTE_RDONLY)
#define PAGE_SHARED _MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_PXN | PTE_UXN)
#define PAGE_SHARED_EXEC _MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_PXN)
#define PAGE_COPY _MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_PXN | PTE_UXN | PTE_RDONLY)
#define PAGE_COPY_EXEC _MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_PXN | PTE_RDONLY)
#define PAGE_READONLY _MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_PXN | PTE_UXN | PTE_RDONLY)
#define PAGE_READONLY_EXEC _MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_PXN | PTE_RDONLY)
#define PAGE_KERNEL _MOD_PROT(pgprot_default, PTE_PXN | PTE_UXN | PTE_DIRTY)
#define PAGE_KERNEL_EXEC _MOD_PROT(pgprot_default, PTE_UXN | PTE_DIRTY)
#define __PAGE_NONE __pgprot(_PAGE_DEFAULT | PTE_NG | PTE_XN | PTE_RDONLY)
#define __PAGE_SHARED __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_XN)
#define __PAGE_SHARED_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG)
#define __PAGE_COPY __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_XN | PTE_RDONLY)
#define __PAGE_COPY_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_RDONLY)
#define __PAGE_READONLY __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_XN | PTE_RDONLY)
#define __PAGE_READONLY_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_RDONLY)
#define __PAGE_NONE __pgprot(_PAGE_DEFAULT | PTE_NG | PTE_PXN | PTE_UXN | PTE_RDONLY)
#define __PAGE_SHARED __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_UXN)
#define __PAGE_SHARED_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN)
#define __PAGE_COPY __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_UXN | PTE_RDONLY)
#define __PAGE_COPY_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_RDONLY)
#define __PAGE_READONLY __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_UXN | PTE_RDONLY)
#define __PAGE_READONLY_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_RDONLY)
#endif /* __ASSEMBLY__ */
@ -130,10 +130,10 @@ extern struct page *empty_zero_page;
#define pte_young(pte) (pte_val(pte) & PTE_AF)
#define pte_special(pte) (pte_val(pte) & PTE_SPECIAL)
#define pte_write(pte) (!(pte_val(pte) & PTE_RDONLY))
#define pte_exec(pte) (!(pte_val(pte) & PTE_XN))
#define pte_exec(pte) (!(pte_val(pte) & PTE_UXN))
#define pte_present_exec_user(pte) \
((pte_val(pte) & (PTE_VALID | PTE_USER | PTE_XN)) == \
((pte_val(pte) & (PTE_VALID | PTE_USER | PTE_UXN)) == \
(PTE_VALID | PTE_USER))
#define PTE_BIT_FUNC(fn,op) \
@ -262,7 +262,7 @@ static inline pmd_t *pmd_offset(pud_t *pud, unsigned long addr)
static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
{
const pteval_t mask = PTE_USER | PTE_XN | PTE_RDONLY;
const pteval_t mask = PTE_USER | PTE_PXN | PTE_UXN | PTE_RDONLY;
pte_val(pte) = (pte_val(pte) & ~mask) | (pgprot_val(newprot) & mask);
return pte;
}

View File

@ -43,6 +43,8 @@
#else
#define STACK_TOP STACK_TOP_MAX
#endif /* CONFIG_COMPAT */
#define ARCH_LOW_ADDRESS_LIMIT PHYS_MASK
#endif /* __KERNEL__ */
struct debug_info {

View File

@ -14,7 +14,6 @@
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#ifdef CONFIG_COMPAT
#define __ARCH_WANT_COMPAT_IPC_PARSE_VERSION
#define __ARCH_WANT_COMPAT_STAT64
#define __ARCH_WANT_SYS_GETHOSTNAME
#define __ARCH_WANT_SYS_PAUSE

View File

@ -613,17 +613,11 @@ enum armv8_pmuv3_perf_types {
ARMV8_PMUV3_PERFCTR_BUS_ACCESS = 0x19,
ARMV8_PMUV3_PERFCTR_MEM_ERROR = 0x1A,
ARMV8_PMUV3_PERFCTR_BUS_CYCLES = 0x1D,
/*
* This isn't an architected event.
* We detect this event number and use the cycle counter instead.
*/
ARMV8_PMUV3_PERFCTR_CPU_CYCLES = 0xFF,
};
/* PMUv3 HW events mapping. */
static const unsigned armv8_pmuv3_perf_map[PERF_COUNT_HW_MAX] = {
[PERF_COUNT_HW_CPU_CYCLES] = ARMV8_PMUV3_PERFCTR_CPU_CYCLES,
[PERF_COUNT_HW_CPU_CYCLES] = ARMV8_PMUV3_PERFCTR_CLOCK_CYCLES,
[PERF_COUNT_HW_INSTRUCTIONS] = ARMV8_PMUV3_PERFCTR_INSTR_EXECUTED,
[PERF_COUNT_HW_CACHE_REFERENCES] = ARMV8_PMUV3_PERFCTR_L1_DCACHE_ACCESS,
[PERF_COUNT_HW_CACHE_MISSES] = ARMV8_PMUV3_PERFCTR_L1_DCACHE_REFILL,
@ -1106,7 +1100,7 @@ static int armv8pmu_get_event_idx(struct pmu_hw_events *cpuc,
unsigned long evtype = event->config_base & ARMV8_EVTYPE_EVENT;
/* Always place a cycle counter into the cycle counter. */
if (evtype == ARMV8_PMUV3_PERFCTR_CPU_CYCLES) {
if (evtype == ARMV8_PMUV3_PERFCTR_CLOCK_CYCLES) {
if (test_and_set_bit(ARMV8_IDX_CYCLE_COUNTER, cpuc->used_mask))
return -EAGAIN;

View File

@ -309,24 +309,6 @@ struct task_struct *__switch_to(struct task_struct *prev,
return last;
}
/*
* Fill in the task's elfregs structure for a core dump.
*/
int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs)
{
elf_core_copy_regs(elfregs, task_pt_regs(t));
return 1;
}
/*
* fill in the fpe structure for a core dump...
*/
int dump_fpu (struct pt_regs *regs, struct user_fp *fp)
{
return 0;
}
EXPORT_SYMBOL(dump_fpu);
/*
* Shuffle the argument into the correct register before calling the
* thread function. x1 is the thread argument, x2 is the pointer to

View File

@ -211,8 +211,7 @@ asmlinkage void __cpuinit secondary_start_kernel(void)
* before we continue.
*/
set_cpu_online(cpu, true);
while (!cpu_active(cpu))
cpu_relax();
complete(&cpu_running);
/*
* OK, it's off to the idle thread for us

View File

@ -80,7 +80,7 @@ static void __init zone_sizes_init(unsigned long min, unsigned long max)
#ifdef CONFIG_ZONE_DMA32
/* 4GB maximum for 32-bit only capable devices */
max_dma32 = min(max, MAX_DMA32_PFN);
zone_size[ZONE_DMA32] = max_dma32 - min;
zone_size[ZONE_DMA32] = max(min, max_dma32) - min;
#endif
zone_size[ZONE_NORMAL] = max - max_dma32;

View File

@ -2,7 +2,8 @@
#define __ARCH_H8300_CACHE_H
/* bytes per L1 cache line */
#define L1_CACHE_BYTES 4
#define L1_CACHE_SHIFT 2
#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
/* m68k-elf-gcc 2.95.2 doesn't like these */

View File

@ -637,7 +637,6 @@ mem_init (void)
high_memory = __va(max_low_pfn * PAGE_SIZE);
reset_zone_present_pages();
for_each_online_pgdat(pgdat)
if (pgdat->bdata->node_bootmem_map)
totalram_pages += free_all_bootmem_node(pgdat);

View File

@ -30,6 +30,7 @@
* measurement, and debugging facilities.
*/
#include <linux/irqflags.h>
#include <asm/octeon/cvmx.h>
#include <asm/octeon/cvmx-l2c.h>
#include <asm/octeon/cvmx-spinlock.h>

View File

@ -11,6 +11,7 @@
*/
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/irqflags.h>
#include <asm/bcache.h>

View File

@ -14,7 +14,6 @@
#endif
#include <linux/compiler.h>
#include <linux/irqflags.h>
#include <linux/types.h>
#include <asm/barrier.h>
#include <asm/byteorder.h> /* sigh ... */
@ -44,6 +43,24 @@
#define smp_mb__before_clear_bit() smp_mb__before_llsc()
#define smp_mb__after_clear_bit() smp_llsc_mb()
/*
* These are the "slower" versions of the functions and are in bitops.c.
* These functions call raw_local_irq_{save,restore}().
*/
void __mips_set_bit(unsigned long nr, volatile unsigned long *addr);
void __mips_clear_bit(unsigned long nr, volatile unsigned long *addr);
void __mips_change_bit(unsigned long nr, volatile unsigned long *addr);
int __mips_test_and_set_bit(unsigned long nr,
volatile unsigned long *addr);
int __mips_test_and_set_bit_lock(unsigned long nr,
volatile unsigned long *addr);
int __mips_test_and_clear_bit(unsigned long nr,
volatile unsigned long *addr);
int __mips_test_and_change_bit(unsigned long nr,
volatile unsigned long *addr);
/*
* set_bit - Atomically set a bit in memory
* @nr: the bit to set
@ -57,7 +74,7 @@
static inline void set_bit(unsigned long nr, volatile unsigned long *addr)
{
unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
unsigned short bit = nr & SZLONG_MASK;
int bit = nr & SZLONG_MASK;
unsigned long temp;
if (kernel_uses_llsc && R10000_LLSC_WAR) {
@ -92,17 +109,8 @@ static inline void set_bit(unsigned long nr, volatile unsigned long *addr)
: "=&r" (temp), "+m" (*m)
: "ir" (1UL << bit));
} while (unlikely(!temp));
} else {
volatile unsigned long *a = addr;
unsigned long mask;
unsigned long flags;
a += nr >> SZLONG_LOG;
mask = 1UL << bit;
raw_local_irq_save(flags);
*a |= mask;
raw_local_irq_restore(flags);
}
} else
__mips_set_bit(nr, addr);
}
/*
@ -118,7 +126,7 @@ static inline void set_bit(unsigned long nr, volatile unsigned long *addr)
static inline void clear_bit(unsigned long nr, volatile unsigned long *addr)
{
unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
unsigned short bit = nr & SZLONG_MASK;
int bit = nr & SZLONG_MASK;
unsigned long temp;
if (kernel_uses_llsc && R10000_LLSC_WAR) {
@ -153,17 +161,8 @@ static inline void clear_bit(unsigned long nr, volatile unsigned long *addr)
: "=&r" (temp), "+m" (*m)
: "ir" (~(1UL << bit)));
} while (unlikely(!temp));
} else {
volatile unsigned long *a = addr;
unsigned long mask;
unsigned long flags;
a += nr >> SZLONG_LOG;
mask = 1UL << bit;
raw_local_irq_save(flags);
*a &= ~mask;
raw_local_irq_restore(flags);
}
} else
__mips_clear_bit(nr, addr);
}
/*
@ -191,7 +190,7 @@ static inline void clear_bit_unlock(unsigned long nr, volatile unsigned long *ad
*/
static inline void change_bit(unsigned long nr, volatile unsigned long *addr)
{
unsigned short bit = nr & SZLONG_MASK;
int bit = nr & SZLONG_MASK;
if (kernel_uses_llsc && R10000_LLSC_WAR) {
unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
@ -220,17 +219,8 @@ static inline void change_bit(unsigned long nr, volatile unsigned long *addr)
: "=&r" (temp), "+m" (*m)
: "ir" (1UL << bit));
} while (unlikely(!temp));
} else {
volatile unsigned long *a = addr;
unsigned long mask;
unsigned long flags;
a += nr >> SZLONG_LOG;
mask = 1UL << bit;
raw_local_irq_save(flags);
*a ^= mask;
raw_local_irq_restore(flags);
}
} else
__mips_change_bit(nr, addr);
}
/*
@ -244,7 +234,7 @@ static inline void change_bit(unsigned long nr, volatile unsigned long *addr)
static inline int test_and_set_bit(unsigned long nr,
volatile unsigned long *addr)
{
unsigned short bit = nr & SZLONG_MASK;
int bit = nr & SZLONG_MASK;
unsigned long res;
smp_mb__before_llsc();
@ -281,18 +271,8 @@ static inline int test_and_set_bit(unsigned long nr,
} while (unlikely(!res));
res = temp & (1UL << bit);
} else {
volatile unsigned long *a = addr;
unsigned long mask;
unsigned long flags;
a += nr >> SZLONG_LOG;
mask = 1UL << bit;
raw_local_irq_save(flags);
res = (mask & *a);
*a |= mask;
raw_local_irq_restore(flags);
}
} else
res = __mips_test_and_set_bit(nr, addr);
smp_llsc_mb();
@ -310,7 +290,7 @@ static inline int test_and_set_bit(unsigned long nr,
static inline int test_and_set_bit_lock(unsigned long nr,
volatile unsigned long *addr)
{
unsigned short bit = nr & SZLONG_MASK;
int bit = nr & SZLONG_MASK;
unsigned long res;
if (kernel_uses_llsc && R10000_LLSC_WAR) {
@ -345,18 +325,8 @@ static inline int test_and_set_bit_lock(unsigned long nr,
} while (unlikely(!res));
res = temp & (1UL << bit);
} else {
volatile unsigned long *a = addr;
unsigned long mask;
unsigned long flags;
a += nr >> SZLONG_LOG;
mask = 1UL << bit;
raw_local_irq_save(flags);
res = (mask & *a);
*a |= mask;
raw_local_irq_restore(flags);
}
} else
res = __mips_test_and_set_bit_lock(nr, addr);
smp_llsc_mb();
@ -373,7 +343,7 @@ static inline int test_and_set_bit_lock(unsigned long nr,
static inline int test_and_clear_bit(unsigned long nr,
volatile unsigned long *addr)
{
unsigned short bit = nr & SZLONG_MASK;
int bit = nr & SZLONG_MASK;
unsigned long res;
smp_mb__before_llsc();
@ -428,18 +398,8 @@ static inline int test_and_clear_bit(unsigned long nr,
} while (unlikely(!res));
res = temp & (1UL << bit);
} else {
volatile unsigned long *a = addr;
unsigned long mask;
unsigned long flags;
a += nr >> SZLONG_LOG;
mask = 1UL << bit;
raw_local_irq_save(flags);
res = (mask & *a);
*a &= ~mask;
raw_local_irq_restore(flags);
}
} else
res = __mips_test_and_clear_bit(nr, addr);
smp_llsc_mb();
@ -457,7 +417,7 @@ static inline int test_and_clear_bit(unsigned long nr,
static inline int test_and_change_bit(unsigned long nr,
volatile unsigned long *addr)
{
unsigned short bit = nr & SZLONG_MASK;
int bit = nr & SZLONG_MASK;
unsigned long res;
smp_mb__before_llsc();
@ -494,18 +454,8 @@ static inline int test_and_change_bit(unsigned long nr,
} while (unlikely(!res));
res = temp & (1UL << bit);
} else {
volatile unsigned long *a = addr;
unsigned long mask;
unsigned long flags;
a += nr >> SZLONG_LOG;
mask = 1UL << bit;
raw_local_irq_save(flags);
res = (mask & *a);
*a ^= mask;
raw_local_irq_restore(flags);
}
} else
res = __mips_test_and_change_bit(nr, addr);
smp_llsc_mb();

View File

@ -290,7 +290,7 @@ struct compat_shmid64_ds {
static inline int is_compat_task(void)
{
return test_thread_flag(TIF_32BIT);
return test_thread_flag(TIF_32BIT_ADDR);
}
#endif /* _ASM_COMPAT_H */

View File

@ -15,6 +15,7 @@
#include <linux/compiler.h>
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/irqflags.h>
#include <asm/addrspace.h>
#include <asm/bug.h>

View File

@ -16,6 +16,105 @@
#include <linux/compiler.h>
#include <asm/hazards.h>
#if defined(CONFIG_CPU_MIPSR2) && !defined(CONFIG_MIPS_MT_SMTC)
__asm__(
" .macro arch_local_irq_disable\n"
" .set push \n"
" .set noat \n"
" di \n"
" irq_disable_hazard \n"
" .set pop \n"
" .endm \n");
static inline void arch_local_irq_disable(void)
{
__asm__ __volatile__(
"arch_local_irq_disable"
: /* no outputs */
: /* no inputs */
: "memory");
}
__asm__(
" .macro arch_local_irq_save result \n"
" .set push \n"
" .set reorder \n"
" .set noat \n"
" di \\result \n"
" andi \\result, 1 \n"
" irq_disable_hazard \n"
" .set pop \n"
" .endm \n");
static inline unsigned long arch_local_irq_save(void)
{
unsigned long flags;
asm volatile("arch_local_irq_save\t%0"
: "=r" (flags)
: /* no inputs */
: "memory");
return flags;
}
__asm__(
" .macro arch_local_irq_restore flags \n"
" .set push \n"
" .set noreorder \n"
" .set noat \n"
#if defined(CONFIG_IRQ_CPU)
/*
* Slow, but doesn't suffer from a relatively unlikely race
* condition we're having since days 1.
*/
" beqz \\flags, 1f \n"
" di \n"
" ei \n"
"1: \n"
#else
/*
* Fast, dangerous. Life is fun, life is good.
*/
" mfc0 $1, $12 \n"
" ins $1, \\flags, 0, 1 \n"
" mtc0 $1, $12 \n"
#endif
" irq_disable_hazard \n"
" .set pop \n"
" .endm \n");
static inline void arch_local_irq_restore(unsigned long flags)
{
unsigned long __tmp1;
__asm__ __volatile__(
"arch_local_irq_restore\t%0"
: "=r" (__tmp1)
: "0" (flags)
: "memory");
}
static inline void __arch_local_irq_restore(unsigned long flags)
{
unsigned long __tmp1;
__asm__ __volatile__(
"arch_local_irq_restore\t%0"
: "=r" (__tmp1)
: "0" (flags)
: "memory");
}
#else
/* Functions that require preempt_{dis,en}able() are in mips-atomic.c */
void arch_local_irq_disable(void);
unsigned long arch_local_irq_save(void);
void arch_local_irq_restore(unsigned long flags);
void __arch_local_irq_restore(unsigned long flags);
#endif /* if defined(CONFIG_CPU_MIPSR2) && !defined(CONFIG_MIPS_MT_SMTC) */
__asm__(
" .macro arch_local_irq_enable \n"
" .set push \n"
@ -57,55 +156,6 @@ static inline void arch_local_irq_enable(void)
}
/*
* For cli() we have to insert nops to make sure that the new value
* has actually arrived in the status register before the end of this
* macro.
* R4000/R4400 need three nops, the R4600 two nops and the R10000 needs
* no nops at all.
*/
/*
* For TX49, operating only IE bit is not enough.
*
* If mfc0 $12 follows store and the mfc0 is last instruction of a
* page and fetching the next instruction causes TLB miss, the result
* of the mfc0 might wrongly contain EXL bit.
*
* ERT-TX49H2-027, ERT-TX49H3-012, ERT-TX49HL3-006, ERT-TX49H4-008
*
* Workaround: mask EXL bit of the result or place a nop before mfc0.
*/
__asm__(
" .macro arch_local_irq_disable\n"
" .set push \n"
" .set noat \n"
#ifdef CONFIG_MIPS_MT_SMTC
" mfc0 $1, $2, 1 \n"
" ori $1, 0x400 \n"
" .set noreorder \n"
" mtc0 $1, $2, 1 \n"
#elif defined(CONFIG_CPU_MIPSR2)
" di \n"
#else
" mfc0 $1,$12 \n"
" ori $1,0x1f \n"
" xori $1,0x1f \n"
" .set noreorder \n"
" mtc0 $1,$12 \n"
#endif
" irq_disable_hazard \n"
" .set pop \n"
" .endm \n");
static inline void arch_local_irq_disable(void)
{
__asm__ __volatile__(
"arch_local_irq_disable"
: /* no outputs */
: /* no inputs */
: "memory");
}
__asm__(
" .macro arch_local_save_flags flags \n"
" .set push \n"
@ -125,113 +175,6 @@ static inline unsigned long arch_local_save_flags(void)
return flags;
}
__asm__(
" .macro arch_local_irq_save result \n"
" .set push \n"
" .set reorder \n"
" .set noat \n"
#ifdef CONFIG_MIPS_MT_SMTC
" mfc0 \\result, $2, 1 \n"
" ori $1, \\result, 0x400 \n"
" .set noreorder \n"
" mtc0 $1, $2, 1 \n"
" andi \\result, \\result, 0x400 \n"
#elif defined(CONFIG_CPU_MIPSR2)
" di \\result \n"
" andi \\result, 1 \n"
#else
" mfc0 \\result, $12 \n"
" ori $1, \\result, 0x1f \n"
" xori $1, 0x1f \n"
" .set noreorder \n"
" mtc0 $1, $12 \n"
#endif
" irq_disable_hazard \n"
" .set pop \n"
" .endm \n");
static inline unsigned long arch_local_irq_save(void)
{
unsigned long flags;
asm volatile("arch_local_irq_save\t%0"
: "=r" (flags)
: /* no inputs */
: "memory");
return flags;
}
__asm__(
" .macro arch_local_irq_restore flags \n"
" .set push \n"
" .set noreorder \n"
" .set noat \n"
#ifdef CONFIG_MIPS_MT_SMTC
"mfc0 $1, $2, 1 \n"
"andi \\flags, 0x400 \n"
"ori $1, 0x400 \n"
"xori $1, 0x400 \n"
"or \\flags, $1 \n"
"mtc0 \\flags, $2, 1 \n"
#elif defined(CONFIG_CPU_MIPSR2) && defined(CONFIG_IRQ_CPU)
/*
* Slow, but doesn't suffer from a relatively unlikely race
* condition we're having since days 1.
*/
" beqz \\flags, 1f \n"
" di \n"
" ei \n"
"1: \n"
#elif defined(CONFIG_CPU_MIPSR2)
/*
* Fast, dangerous. Life is fun, life is good.
*/
" mfc0 $1, $12 \n"
" ins $1, \\flags, 0, 1 \n"
" mtc0 $1, $12 \n"
#else
" mfc0 $1, $12 \n"
" andi \\flags, 1 \n"
" ori $1, 0x1f \n"
" xori $1, 0x1f \n"
" or \\flags, $1 \n"
" mtc0 \\flags, $12 \n"
#endif
" irq_disable_hazard \n"
" .set pop \n"
" .endm \n");
static inline void arch_local_irq_restore(unsigned long flags)
{
unsigned long __tmp1;
#ifdef CONFIG_MIPS_MT_SMTC
/*
* SMTC kernel needs to do a software replay of queued
* IPIs, at the cost of branch and call overhead on each
* local_irq_restore()
*/
if (unlikely(!(flags & 0x0400)))
smtc_ipi_replay();
#endif
__asm__ __volatile__(
"arch_local_irq_restore\t%0"
: "=r" (__tmp1)
: "0" (flags)
: "memory");
}
static inline void __arch_local_irq_restore(unsigned long flags)
{
unsigned long __tmp1;
__asm__ __volatile__(
"arch_local_irq_restore\t%0"
: "=r" (__tmp1)
: "0" (flags)
: "memory");
}
static inline int arch_irqs_disabled_flags(unsigned long flags)
{
@ -245,7 +188,7 @@ static inline int arch_irqs_disabled_flags(unsigned long flags)
#endif
}
#endif
#endif /* #ifndef __ASSEMBLY__ */
/*
* Do the CPU's IRQ-state tracing from assembly code.

View File

@ -112,12 +112,6 @@ register struct thread_info *__current_thread_info __asm__("$28");
#define TIF_LOAD_WATCH 25 /* If set, load watch registers */
#define TIF_SYSCALL_TRACE 31 /* syscall trace active */
#ifdef CONFIG_MIPS32_O32
#define TIF_32BIT TIF_32BIT_REGS
#elif defined(CONFIG_MIPS32_N32)
#define TIF_32BIT _TIF_32BIT_ADDR
#endif /* CONFIG_MIPS32_O32 */
#define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
#define _TIF_SIGPENDING (1<<TIF_SIGPENDING)
#define _TIF_NEED_RESCHED (1<<TIF_NEED_RESCHED)

View File

@ -2,8 +2,9 @@
# Makefile for MIPS-specific library files..
#
lib-y += csum_partial.o delay.o memcpy.o memset.o \
strlen_user.o strncpy_user.o strnlen_user.o uncached.o
lib-y += bitops.o csum_partial.o delay.o memcpy.o memset.o \
mips-atomic.o strlen_user.o strncpy_user.o \
strnlen_user.o uncached.o
obj-y += iomap.o
obj-$(CONFIG_PCI) += iomap-pci.o

179
arch/mips/lib/bitops.c Normal file
View File

@ -0,0 +1,179 @@
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (c) 1994-1997, 99, 2000, 06, 07 Ralf Baechle (ralf@linux-mips.org)
* Copyright (c) 1999, 2000 Silicon Graphics, Inc.
*/
#include <linux/bitops.h>
#include <linux/irqflags.h>
#include <linux/export.h>
/**
* __mips_set_bit - Atomically set a bit in memory. This is called by
* set_bit() if it cannot find a faster solution.
* @nr: the bit to set
* @addr: the address to start counting from
*/
void __mips_set_bit(unsigned long nr, volatile unsigned long *addr)
{
volatile unsigned long *a = addr;
unsigned bit = nr & SZLONG_MASK;
unsigned long mask;
unsigned long flags;
a += nr >> SZLONG_LOG;
mask = 1UL << bit;
raw_local_irq_save(flags);
*a |= mask;
raw_local_irq_restore(flags);
}
EXPORT_SYMBOL(__mips_set_bit);
/**
* __mips_clear_bit - Clears a bit in memory. This is called by clear_bit() if
* it cannot find a faster solution.
* @nr: Bit to clear
* @addr: Address to start counting from
*/
void __mips_clear_bit(unsigned long nr, volatile unsigned long *addr)
{
volatile unsigned long *a = addr;
unsigned bit = nr & SZLONG_MASK;
unsigned long mask;
unsigned long flags;
a += nr >> SZLONG_LOG;
mask = 1UL << bit;
raw_local_irq_save(flags);
*a &= ~mask;
raw_local_irq_restore(flags);
}
EXPORT_SYMBOL(__mips_clear_bit);
/**
* __mips_change_bit - Toggle a bit in memory. This is called by change_bit()
* if it cannot find a faster solution.
* @nr: Bit to change
* @addr: Address to start counting from
*/
void __mips_change_bit(unsigned long nr, volatile unsigned long *addr)
{
volatile unsigned long *a = addr;
unsigned bit = nr & SZLONG_MASK;
unsigned long mask;
unsigned long flags;
a += nr >> SZLONG_LOG;
mask = 1UL << bit;
raw_local_irq_save(flags);
*a ^= mask;
raw_local_irq_restore(flags);
}
EXPORT_SYMBOL(__mips_change_bit);
/**
* __mips_test_and_set_bit - Set a bit and return its old value. This is
* called by test_and_set_bit() if it cannot find a faster solution.
* @nr: Bit to set
* @addr: Address to count from
*/
int __mips_test_and_set_bit(unsigned long nr,
volatile unsigned long *addr)
{
volatile unsigned long *a = addr;
unsigned bit = nr & SZLONG_MASK;
unsigned long mask;
unsigned long flags;
unsigned long res;
a += nr >> SZLONG_LOG;
mask = 1UL << bit;
raw_local_irq_save(flags);
res = (mask & *a);
*a |= mask;
raw_local_irq_restore(flags);
return res;
}
EXPORT_SYMBOL(__mips_test_and_set_bit);
/**
* __mips_test_and_set_bit_lock - Set a bit and return its old value. This is
* called by test_and_set_bit_lock() if it cannot find a faster solution.
* @nr: Bit to set
* @addr: Address to count from
*/
int __mips_test_and_set_bit_lock(unsigned long nr,
volatile unsigned long *addr)
{
volatile unsigned long *a = addr;
unsigned bit = nr & SZLONG_MASK;
unsigned long mask;
unsigned long flags;
unsigned long res;
a += nr >> SZLONG_LOG;
mask = 1UL << bit;
raw_local_irq_save(flags);
res = (mask & *a);
*a |= mask;
raw_local_irq_restore(flags);
return res;
}
EXPORT_SYMBOL(__mips_test_and_set_bit_lock);
/**
* __mips_test_and_clear_bit - Clear a bit and return its old value. This is
* called by test_and_clear_bit() if it cannot find a faster solution.
* @nr: Bit to clear
* @addr: Address to count from
*/
int __mips_test_and_clear_bit(unsigned long nr, volatile unsigned long *addr)
{
volatile unsigned long *a = addr;
unsigned bit = nr & SZLONG_MASK;
unsigned long mask;
unsigned long flags;
unsigned long res;
a += nr >> SZLONG_LOG;
mask = 1UL << bit;
raw_local_irq_save(flags);
res = (mask & *a);
*a &= ~mask;
raw_local_irq_restore(flags);
return res;
}
EXPORT_SYMBOL(__mips_test_and_clear_bit);
/**
* __mips_test_and_change_bit - Change a bit and return its old value. This is
* called by test_and_change_bit() if it cannot find a faster solution.
* @nr: Bit to change
* @addr: Address to count from
*/
int __mips_test_and_change_bit(unsigned long nr, volatile unsigned long *addr)
{
volatile unsigned long *a = addr;
unsigned bit = nr & SZLONG_MASK;
unsigned long mask;
unsigned long flags;
unsigned long res;
a += nr >> SZLONG_LOG;
mask = 1UL << bit;
raw_local_irq_save(flags);
res = (mask & *a);
*a ^= mask;
raw_local_irq_restore(flags);
return res;
}
EXPORT_SYMBOL(__mips_test_and_change_bit);

176
arch/mips/lib/mips-atomic.c Normal file
View File

@ -0,0 +1,176 @@
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 1994, 95, 96, 97, 98, 99, 2003 by Ralf Baechle
* Copyright (C) 1996 by Paul M. Antoine
* Copyright (C) 1999 Silicon Graphics
* Copyright (C) 2000 MIPS Technologies, Inc.
*/
#include <asm/irqflags.h>
#include <asm/hazards.h>
#include <linux/compiler.h>
#include <linux/preempt.h>
#include <linux/export.h>
#if !defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_MIPS_MT_SMTC)
/*
* For cli() we have to insert nops to make sure that the new value
* has actually arrived in the status register before the end of this
* macro.
* R4000/R4400 need three nops, the R4600 two nops and the R10000 needs
* no nops at all.
*/
/*
* For TX49, operating only IE bit is not enough.
*
* If mfc0 $12 follows store and the mfc0 is last instruction of a
* page and fetching the next instruction causes TLB miss, the result
* of the mfc0 might wrongly contain EXL bit.
*
* ERT-TX49H2-027, ERT-TX49H3-012, ERT-TX49HL3-006, ERT-TX49H4-008
*
* Workaround: mask EXL bit of the result or place a nop before mfc0.
*/
__asm__(
" .macro arch_local_irq_disable\n"
" .set push \n"
" .set noat \n"
#ifdef CONFIG_MIPS_MT_SMTC
" mfc0 $1, $2, 1 \n"
" ori $1, 0x400 \n"
" .set noreorder \n"
" mtc0 $1, $2, 1 \n"
#elif defined(CONFIG_CPU_MIPSR2)
/* see irqflags.h for inline function */
#else
" mfc0 $1,$12 \n"
" ori $1,0x1f \n"
" xori $1,0x1f \n"
" .set noreorder \n"
" mtc0 $1,$12 \n"
#endif
" irq_disable_hazard \n"
" .set pop \n"
" .endm \n");
void arch_local_irq_disable(void)
{
preempt_disable();
__asm__ __volatile__(
"arch_local_irq_disable"
: /* no outputs */
: /* no inputs */
: "memory");
preempt_enable();
}
EXPORT_SYMBOL(arch_local_irq_disable);
__asm__(
" .macro arch_local_irq_save result \n"
" .set push \n"
" .set reorder \n"
" .set noat \n"
#ifdef CONFIG_MIPS_MT_SMTC
" mfc0 \\result, $2, 1 \n"
" ori $1, \\result, 0x400 \n"
" .set noreorder \n"
" mtc0 $1, $2, 1 \n"
" andi \\result, \\result, 0x400 \n"
#elif defined(CONFIG_CPU_MIPSR2)
/* see irqflags.h for inline function */
#else
" mfc0 \\result, $12 \n"
" ori $1, \\result, 0x1f \n"
" xori $1, 0x1f \n"
" .set noreorder \n"
" mtc0 $1, $12 \n"
#endif
" irq_disable_hazard \n"
" .set pop \n"
" .endm \n");
unsigned long arch_local_irq_save(void)
{
unsigned long flags;
preempt_disable();
asm volatile("arch_local_irq_save\t%0"
: "=r" (flags)
: /* no inputs */
: "memory");
preempt_enable();
return flags;
}
EXPORT_SYMBOL(arch_local_irq_save);
__asm__(
" .macro arch_local_irq_restore flags \n"
" .set push \n"
" .set noreorder \n"
" .set noat \n"
#ifdef CONFIG_MIPS_MT_SMTC
"mfc0 $1, $2, 1 \n"
"andi \\flags, 0x400 \n"
"ori $1, 0x400 \n"
"xori $1, 0x400 \n"
"or \\flags, $1 \n"
"mtc0 \\flags, $2, 1 \n"
#elif defined(CONFIG_CPU_MIPSR2) && defined(CONFIG_IRQ_CPU)
/* see irqflags.h for inline function */
#elif defined(CONFIG_CPU_MIPSR2)
/* see irqflags.h for inline function */
#else
" mfc0 $1, $12 \n"
" andi \\flags, 1 \n"
" ori $1, 0x1f \n"
" xori $1, 0x1f \n"
" or \\flags, $1 \n"
" mtc0 \\flags, $12 \n"
#endif
" irq_disable_hazard \n"
" .set pop \n"
" .endm \n");
void arch_local_irq_restore(unsigned long flags)
{
unsigned long __tmp1;
#ifdef CONFIG_MIPS_MT_SMTC
/*
* SMTC kernel needs to do a software replay of queued
* IPIs, at the cost of branch and call overhead on each
* local_irq_restore()
*/
if (unlikely(!(flags & 0x0400)))
smtc_ipi_replay();
#endif
preempt_disable();
__asm__ __volatile__(
"arch_local_irq_restore\t%0"
: "=r" (__tmp1)
: "0" (flags)
: "memory");
preempt_enable();
}
EXPORT_SYMBOL(arch_local_irq_restore);
void __arch_local_irq_restore(unsigned long flags)
{
unsigned long __tmp1;
preempt_disable();
__asm__ __volatile__(
"arch_local_irq_restore\t%0"
: "=r" (__tmp1)
: "0" (flags)
: "memory");
preempt_enable();
}
EXPORT_SYMBOL(__arch_local_irq_restore);
#endif /* !defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_MIPS_MT_SMTC) */

View File

@ -29,6 +29,7 @@
#include <linux/mtd/partitions.h>
#include <linux/mtd/physmap.h>
#include <linux/platform_device.h>
#include <asm/mips-boards/maltaint.h>
#include <mtd/mtd-abi.h>
#define SMC_PORT(base, int) \
@ -48,7 +49,7 @@ static struct plat_serial8250_port uart8250_data[] = {
SMC_PORT(0x2F8, 3),
{
.mapbase = 0x1f000900, /* The CBUS UART */
.irq = MIPS_CPU_IRQ_BASE + 2,
.irq = MIPS_CPU_IRQ_BASE + MIPSCPU_INT_MB2,
.uartclk = 3686400, /* Twice the usual clk! */
.iotype = UPIO_MEM32,
.flags = CBUS_UART_FLAGS,

View File

@ -96,6 +96,7 @@ config S390
select HAVE_MEMBLOCK_NODE_MAP
select HAVE_CMPXCHG_LOCAL
select HAVE_CMPXCHG_DOUBLE
select HAVE_ALIGNED_STRUCT_PAGE if SLUB
select HAVE_VIRT_CPU_ACCOUNTING
select VIRT_CPU_ACCOUNTING
select ARCH_DISCARD_MEMBLOCK

View File

@ -9,6 +9,8 @@
#define LPM_ANYPATH 0xff
#define __MAX_CSSID 0
#define __MAX_SUBCHANNEL 65535
#define __MAX_SSID 3
#include <asm/scsw.h>

View File

@ -20,7 +20,7 @@
#define PSW32_MASK_CC 0x00003000UL
#define PSW32_MASK_PM 0x00000f00UL
#define PSW32_MASK_USER 0x00003F00UL
#define PSW32_MASK_USER 0x0000FF00UL
#define PSW32_ADDR_AMODE 0x80000000UL
#define PSW32_ADDR_INSN 0x7FFFFFFFUL

View File

@ -506,12 +506,15 @@ static inline int pud_bad(pud_t pud)
static inline int pmd_present(pmd_t pmd)
{
return (pmd_val(pmd) & _SEGMENT_ENTRY_ORIGIN) != 0UL;
unsigned long mask = _SEGMENT_ENTRY_INV | _SEGMENT_ENTRY_RO;
return (pmd_val(pmd) & mask) == _HPAGE_TYPE_NONE ||
!(pmd_val(pmd) & _SEGMENT_ENTRY_INV);
}
static inline int pmd_none(pmd_t pmd)
{
return (pmd_val(pmd) & _SEGMENT_ENTRY_INV) != 0UL;
return (pmd_val(pmd) & _SEGMENT_ENTRY_INV) &&
!(pmd_val(pmd) & _SEGMENT_ENTRY_RO);
}
static inline int pmd_large(pmd_t pmd)
@ -1223,6 +1226,11 @@ static inline void __pmd_idte(unsigned long address, pmd_t *pmdp)
}
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
#define SEGMENT_NONE __pgprot(_HPAGE_TYPE_NONE)
#define SEGMENT_RO __pgprot(_HPAGE_TYPE_RO)
#define SEGMENT_RW __pgprot(_HPAGE_TYPE_RW)
#define __HAVE_ARCH_PGTABLE_DEPOSIT
extern void pgtable_trans_huge_deposit(struct mm_struct *mm, pgtable_t pgtable);
@ -1242,16 +1250,15 @@ static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr,
static inline unsigned long massage_pgprot_pmd(pgprot_t pgprot)
{
unsigned long pgprot_pmd = 0;
if (pgprot_val(pgprot) & _PAGE_INVALID) {
if (pgprot_val(pgprot) & _PAGE_SWT)
pgprot_pmd |= _HPAGE_TYPE_NONE;
pgprot_pmd |= _SEGMENT_ENTRY_INV;
}
if (pgprot_val(pgprot) & _PAGE_RO)
pgprot_pmd |= _SEGMENT_ENTRY_RO;
return pgprot_pmd;
/*
* pgprot is PAGE_NONE, PAGE_RO, or PAGE_RW (see __Pxxx / __Sxxx)
* Convert to segment table entry format.
*/
if (pgprot_val(pgprot) == pgprot_val(PAGE_NONE))
return pgprot_val(SEGMENT_NONE);
if (pgprot_val(pgprot) == pgprot_val(PAGE_RO))
return pgprot_val(SEGMENT_RO);
return pgprot_val(SEGMENT_RW);
}
static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
@ -1269,7 +1276,9 @@ static inline pmd_t pmd_mkhuge(pmd_t pmd)
static inline pmd_t pmd_mkwrite(pmd_t pmd)
{
pmd_val(pmd) &= ~_SEGMENT_ENTRY_RO;
/* Do not clobber _HPAGE_TYPE_NONE pages! */
if (!(pmd_val(pmd) & _SEGMENT_ENTRY_INV))
pmd_val(pmd) &= ~_SEGMENT_ENTRY_RO;
return pmd;
}

View File

@ -8,6 +8,9 @@ struct cpu;
#ifdef CONFIG_SCHED_BOOK
extern unsigned char cpu_socket_id[NR_CPUS];
#define topology_physical_package_id(cpu) (cpu_socket_id[cpu])
extern unsigned char cpu_core_id[NR_CPUS];
extern cpumask_t cpu_core_map[NR_CPUS];

View File

@ -239,7 +239,7 @@ typedef struct
#define PSW_MASK_EA 0x00000000UL
#define PSW_MASK_BA 0x00000000UL
#define PSW_MASK_USER 0x00003F00UL
#define PSW_MASK_USER 0x0000FF00UL
#define PSW_ADDR_AMODE 0x80000000UL
#define PSW_ADDR_INSN 0x7FFFFFFFUL
@ -269,7 +269,7 @@ typedef struct
#define PSW_MASK_EA 0x0000000100000000UL
#define PSW_MASK_BA 0x0000000080000000UL
#define PSW_MASK_USER 0x00003F8180000000UL
#define PSW_MASK_USER 0x0000FF8180000000UL
#define PSW_ADDR_AMODE 0x0000000000000000UL
#define PSW_ADDR_INSN 0xFFFFFFFFFFFFFFFFUL

View File

@ -309,6 +309,10 @@ static int restore_sigregs32(struct pt_regs *regs,_sigregs32 __user *sregs)
regs->psw.mask = (regs->psw.mask & ~PSW_MASK_USER) |
(__u64)(regs32.psw.mask & PSW32_MASK_USER) << 32 |
(__u64)(regs32.psw.addr & PSW32_ADDR_AMODE);
/* Check for invalid user address space control. */
if ((regs->psw.mask & PSW_MASK_ASC) >= (psw_kernel_bits & PSW_MASK_ASC))
regs->psw.mask = (psw_user_bits & PSW_MASK_ASC) |
(regs->psw.mask & ~PSW_MASK_ASC);
regs->psw.addr = (__u64)(regs32.psw.addr & PSW32_ADDR_INSN);
for (i = 0; i < NUM_GPRS; i++)
regs->gprs[i] = (__u64) regs32.gprs[i];
@ -481,7 +485,10 @@ static int setup_frame32(int sig, struct k_sigaction *ka,
/* Set up registers for signal handler */
regs->gprs[15] = (__force __u64) frame;
regs->psw.mask |= PSW_MASK_BA; /* force amode 31 */
/* Force 31 bit amode and default user address space control. */
regs->psw.mask = PSW_MASK_BA |
(psw_user_bits & PSW_MASK_ASC) |
(regs->psw.mask & ~PSW_MASK_ASC);
regs->psw.addr = (__force __u64) ka->sa.sa_handler;
regs->gprs[2] = map_signal(sig);
@ -549,7 +556,10 @@ static int setup_rt_frame32(int sig, struct k_sigaction *ka, siginfo_t *info,
/* Set up registers for signal handler */
regs->gprs[15] = (__force __u64) frame;
regs->psw.mask |= PSW_MASK_BA; /* force amode 31 */
/* Force 31 bit amode and default user address space control. */
regs->psw.mask = PSW_MASK_BA |
(psw_user_bits & PSW_MASK_ASC) |
(regs->psw.mask & ~PSW_MASK_ASC);
regs->psw.addr = (__u64) ka->sa.sa_handler;
regs->gprs[2] = map_signal(sig);

View File

@ -44,6 +44,12 @@ _sclp_wait_int:
#endif
mvc .LoldpswS1-.LbaseS1(16,%r13),0(%r8)
mvc 0(16,%r8),0(%r9)
#ifdef CONFIG_64BIT
epsw %r6,%r7 # set current addressing mode
nill %r6,0x1 # in new psw (31 or 64 bit mode)
nilh %r7,0x8000
stm %r6,%r7,0(%r8)
#endif
lhi %r6,0x0200 # cr mask for ext int (cr0.54)
ltr %r2,%r2
jz .LsetctS1
@ -87,7 +93,7 @@ _sclp_wait_int:
.long 0x00080000, 0x80000000+.LwaitS1 # PSW to handle ext int
#ifdef CONFIG_64BIT
.LextpswS1_64:
.quad 0x0000000180000000, .LwaitS1 # PSW to handle ext int, 64 bit
.quad 0, .LwaitS1 # PSW to handle ext int, 64 bit
#endif
.LwaitpswS1:
.long 0x010a0000, 0x00000000+.LloopS1 # PSW to wait for ext int

View File

@ -136,6 +136,10 @@ static int restore_sigregs(struct pt_regs *regs, _sigregs __user *sregs)
/* Use regs->psw.mask instead of psw_user_bits to preserve PER bit. */
regs->psw.mask = (regs->psw.mask & ~PSW_MASK_USER) |
(user_sregs.regs.psw.mask & PSW_MASK_USER);
/* Check for invalid user address space control. */
if ((regs->psw.mask & PSW_MASK_ASC) >= (psw_kernel_bits & PSW_MASK_ASC))
regs->psw.mask = (psw_user_bits & PSW_MASK_ASC) |
(regs->psw.mask & ~PSW_MASK_ASC);
/* Check for invalid amode */
if (regs->psw.mask & PSW_MASK_EA)
regs->psw.mask |= PSW_MASK_BA;
@ -273,7 +277,10 @@ static int setup_frame(int sig, struct k_sigaction *ka,
/* Set up registers for signal handler */
regs->gprs[15] = (unsigned long) frame;
regs->psw.mask |= PSW_MASK_EA | PSW_MASK_BA; /* 64 bit amode */
/* Force default amode and default user address space control. */
regs->psw.mask = PSW_MASK_EA | PSW_MASK_BA |
(psw_user_bits & PSW_MASK_ASC) |
(regs->psw.mask & ~PSW_MASK_ASC);
regs->psw.addr = (unsigned long) ka->sa.sa_handler | PSW_ADDR_AMODE;
regs->gprs[2] = map_signal(sig);
@ -346,7 +353,10 @@ static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
/* Set up registers for signal handler */
regs->gprs[15] = (unsigned long) frame;
regs->psw.mask |= PSW_MASK_EA | PSW_MASK_BA; /* 64 bit amode */
/* Force default amode and default user address space control. */
regs->psw.mask = PSW_MASK_EA | PSW_MASK_BA |
(psw_user_bits & PSW_MASK_ASC) |
(regs->psw.mask & ~PSW_MASK_ASC);
regs->psw.addr = (unsigned long) ka->sa.sa_handler | PSW_ADDR_AMODE;
regs->gprs[2] = map_signal(sig);

View File

@ -40,6 +40,7 @@ static DEFINE_SPINLOCK(topology_lock);
static struct mask_info core_info;
cpumask_t cpu_core_map[NR_CPUS];
unsigned char cpu_core_id[NR_CPUS];
unsigned char cpu_socket_id[NR_CPUS];
static struct mask_info book_info;
cpumask_t cpu_book_map[NR_CPUS];
@ -83,11 +84,12 @@ static struct mask_info *add_cpus_to_mask(struct topology_cpu *tl_cpu,
cpumask_set_cpu(lcpu, &book->mask);
cpu_book_id[lcpu] = book->id;
cpumask_set_cpu(lcpu, &core->mask);
cpu_core_id[lcpu] = rcpu;
if (one_core_per_cpu) {
cpu_core_id[lcpu] = rcpu;
cpu_socket_id[lcpu] = rcpu;
core = core->next;
} else {
cpu_core_id[lcpu] = core->id;
cpu_socket_id[lcpu] = core->id;
}
smp_cpu_set_polarization(lcpu, tl_cpu->pp);
}

View File

@ -39,7 +39,7 @@ static __always_inline unsigned long follow_table(struct mm_struct *mm,
pmd = pmd_offset(pud, addr);
if (pmd_none(*pmd))
return -0x10UL;
if (pmd_huge(*pmd)) {
if (pmd_large(*pmd)) {
if (write && (pmd_val(*pmd) & _SEGMENT_ENTRY_RO))
return -0x04UL;
return (pmd_val(*pmd) & HPAGE_MASK) + (addr & ~HPAGE_MASK);

View File

@ -126,7 +126,7 @@ static inline int gup_pmd_range(pud_t *pudp, pud_t pud, unsigned long addr,
*/
if (pmd_none(pmd) || pmd_trans_splitting(pmd))
return 0;
if (unlikely(pmd_huge(pmd))) {
if (unlikely(pmd_large(pmd))) {
if (!gup_huge_pmd(pmdp, pmd, addr, next,
write, pages, nr))
return 0;
@ -180,8 +180,7 @@ int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
addr = start;
len = (unsigned long) nr_pages << PAGE_SHIFT;
end = start + len;
if (unlikely(!access_ok(write ? VERIFY_WRITE : VERIFY_READ,
(void __user *)start, len)))
if ((end < start) || (end > TASK_SIZE))
return 0;
local_irq_save(flags);
@ -229,7 +228,7 @@ int get_user_pages_fast(unsigned long start, int nr_pages, int write,
addr = start;
len = (unsigned long) nr_pages << PAGE_SHIFT;
end = start + len;
if (end < start)
if ((end < start) || (end > TASK_SIZE))
goto slow_irqon;
/*

View File

@ -20,6 +20,7 @@ config SPARC
select HAVE_ARCH_TRACEHOOK
select SYSCTL_EXCEPTION_TRACE
select ARCH_WANT_OPTIONAL_GPIOLIB
select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE
select RTC_CLASS
select RTC_DRV_M48T59
select HAVE_IRQ_WORK

View File

@ -13,13 +13,13 @@ obj-$(CONFIG_CRYPTO_DES_SPARC64) += camellia-sparc64.o
obj-$(CONFIG_CRYPTO_CRC32C_SPARC64) += crc32c-sparc64.o
sha1-sparc64-y := sha1_asm.o sha1_glue.o crop_devid.o
sha256-sparc64-y := sha256_asm.o sha256_glue.o crop_devid.o
sha512-sparc64-y := sha512_asm.o sha512_glue.o crop_devid.o
md5-sparc64-y := md5_asm.o md5_glue.o crop_devid.o
sha1-sparc64-y := sha1_asm.o sha1_glue.o
sha256-sparc64-y := sha256_asm.o sha256_glue.o
sha512-sparc64-y := sha512_asm.o sha512_glue.o
md5-sparc64-y := md5_asm.o md5_glue.o
aes-sparc64-y := aes_asm.o aes_glue.o crop_devid.o
des-sparc64-y := des_asm.o des_glue.o crop_devid.o
camellia-sparc64-y := camellia_asm.o camellia_glue.o crop_devid.o
aes-sparc64-y := aes_asm.o aes_glue.o
des-sparc64-y := des_asm.o des_glue.o
camellia-sparc64-y := camellia_asm.o camellia_glue.o
crc32c-sparc64-y := crc32c_asm.o crc32c_glue.o crop_devid.o
crc32c-sparc64-y := crc32c_asm.o crc32c_glue.o

View File

@ -475,3 +475,5 @@ MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("AES Secure Hash Algorithm, sparc64 aes opcode accelerated");
MODULE_ALIAS("aes");
#include "crop_devid.c"

View File

@ -320,3 +320,5 @@ MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Camellia Cipher Algorithm, sparc64 camellia opcode accelerated");
MODULE_ALIAS("aes");
#include "crop_devid.c"

View File

@ -177,3 +177,5 @@ MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("CRC32c (Castagnoli), sparc64 crc32c opcode accelerated");
MODULE_ALIAS("crc32c");
#include "crop_devid.c"

View File

@ -527,3 +527,5 @@ MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("DES & Triple DES EDE Cipher Algorithms, sparc64 des opcode accelerated");
MODULE_ALIAS("des");
#include "crop_devid.c"

View File

@ -186,3 +186,5 @@ MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("MD5 Secure Hash Algorithm, sparc64 md5 opcode accelerated");
MODULE_ALIAS("md5");
#include "crop_devid.c"

View File

@ -181,3 +181,5 @@ MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("SHA1 Secure Hash Algorithm, sparc64 sha1 opcode accelerated");
MODULE_ALIAS("sha1");
#include "crop_devid.c"

View File

@ -239,3 +239,5 @@ MODULE_DESCRIPTION("SHA-224 and SHA-256 Secure Hash Algorithm, sparc64 sha256 op
MODULE_ALIAS("sha224");
MODULE_ALIAS("sha256");
#include "crop_devid.c"

View File

@ -224,3 +224,5 @@ MODULE_DESCRIPTION("SHA-384 and SHA-512 Secure Hash Algorithm, sparc64 sha512 op
MODULE_ALIAS("sha384");
MODULE_ALIAS("sha512");
#include "crop_devid.c"

View File

@ -1,7 +1,7 @@
/* atomic.h: Thankfully the V9 is at least reasonable for this
* stuff.
*
* Copyright (C) 1996, 1997, 2000 David S. Miller (davem@redhat.com)
* Copyright (C) 1996, 1997, 2000, 2012 David S. Miller (davem@redhat.com)
*/
#ifndef __ARCH_SPARC64_ATOMIC__
@ -106,6 +106,8 @@ static inline long atomic64_add_unless(atomic64_t *v, long a, long u)
#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
extern long atomic64_dec_if_positive(atomic64_t *v);
/* Atomic operations are already serializing */
#define smp_mb__before_atomic_dec() barrier()
#define smp_mb__after_atomic_dec() barrier()

View File

@ -1,6 +1,46 @@
#ifndef _SPARC64_BACKOFF_H
#define _SPARC64_BACKOFF_H
/* The macros in this file implement an exponential backoff facility
* for atomic operations.
*
* When multiple threads compete on an atomic operation, it is
* possible for one thread to be continually denied a successful
* completion of the compare-and-swap instruction. Heavily
* threaded cpu implementations like Niagara can compound this
* problem even further.
*
* When an atomic operation fails and needs to be retried, we spin a
* certain number of times. At each subsequent failure of the same
* operation we double the spin count, realizing an exponential
* backoff.
*
* When we spin, we try to use an operation that will cause the
* current cpu strand to block, and therefore make the core fully
* available to any other other runnable strands. There are two
* options, based upon cpu capabilities.
*
* On all cpus prior to SPARC-T4 we do three dummy reads of the
* condition code register. Each read blocks the strand for something
* between 40 and 50 cpu cycles.
*
* For SPARC-T4 and later we have a special "pause" instruction
* available. This is implemented using writes to register %asr27.
* The cpu will block the number of cycles written into the register,
* unless a disrupting trap happens first. SPARC-T4 specifically
* implements pause with a granularity of 8 cycles. Each strand has
* an internal pause counter which decrements every 8 cycles. So the
* chip shifts the %asr27 value down by 3 bits, and writes the result
* into the pause counter. If a value smaller than 8 is written, the
* chip blocks for 1 cycle.
*
* To achieve the same amount of backoff as the three %ccr reads give
* on earlier chips, we shift the backoff value up by 7 bits. (Three
* %ccr reads block for about 128 cycles, 1 << 7 == 128) We write the
* whole amount we want to block into the pause register, rather than
* loop writing 128 each time.
*/
#define BACKOFF_LIMIT (4 * 1024)
#ifdef CONFIG_SMP
@ -11,16 +51,25 @@
#define BACKOFF_LABEL(spin_label, continue_label) \
spin_label
#define BACKOFF_SPIN(reg, tmp, label) \
mov reg, tmp; \
88: brnz,pt tmp, 88b; \
sub tmp, 1, tmp; \
set BACKOFF_LIMIT, tmp; \
cmp reg, tmp; \
bg,pn %xcc, label; \
nop; \
ba,pt %xcc, label; \
sllx reg, 1, reg;
#define BACKOFF_SPIN(reg, tmp, label) \
mov reg, tmp; \
88: rd %ccr, %g0; \
rd %ccr, %g0; \
rd %ccr, %g0; \
.section .pause_3insn_patch,"ax";\
.word 88b; \
sllx tmp, 7, tmp; \
wr tmp, 0, %asr27; \
clr tmp; \
.previous; \
brnz,pt tmp, 88b; \
sub tmp, 1, tmp; \
set BACKOFF_LIMIT, tmp; \
cmp reg, tmp; \
bg,pn %xcc, label; \
nop; \
ba,pt %xcc, label; \
sllx reg, 1, reg;
#else

View File

@ -232,9 +232,10 @@ static inline void __user *arch_compat_alloc_user_space(long len)
struct pt_regs *regs = current_thread_info()->kregs;
unsigned long usp = regs->u_regs[UREG_I6];
if (!(test_thread_flag(TIF_32BIT)))
if (test_thread_64bit_stack(usp))
usp += STACK_BIAS;
else
if (test_thread_flag(TIF_32BIT))
usp &= 0xffffffffUL;
usp -= len;

View File

@ -196,7 +196,22 @@ extern unsigned long get_wchan(struct task_struct *task);
#define KSTK_EIP(tsk) (task_pt_regs(tsk)->tpc)
#define KSTK_ESP(tsk) (task_pt_regs(tsk)->u_regs[UREG_FP])
#define cpu_relax() barrier()
/* Please see the commentary in asm/backoff.h for a description of
* what these instructions are doing and how they have been choosen.
* To make a long story short, we are trying to yield the current cpu
* strand during busy loops.
*/
#define cpu_relax() asm volatile("\n99:\n\t" \
"rd %%ccr, %%g0\n\t" \
"rd %%ccr, %%g0\n\t" \
"rd %%ccr, %%g0\n\t" \
".section .pause_3insn_patch,\"ax\"\n\t"\
".word 99b\n\t" \
"wr %%g0, 128, %%asr27\n\t" \
"nop\n\t" \
"nop\n\t" \
".previous" \
::: "memory")
/* Prefetch support. This is tuned for UltraSPARC-III and later.
* UltraSPARC-I will treat these as nops, and UltraSPARC-II has

View File

@ -63,5 +63,10 @@ extern char *of_console_options;
extern void irq_trans_init(struct device_node *dp);
extern char *build_path_component(struct device_node *dp);
/* SPARC has a local implementation */
extern int of_address_to_resource(struct device_node *dev, int index,
struct resource *r);
#define of_address_to_resource of_address_to_resource
#endif /* __KERNEL__ */
#endif /* _SPARC_PROM_H */

View File

@ -259,6 +259,11 @@ static inline bool test_and_clear_restore_sigmask(void)
#define tsk_is_polling(t) test_tsk_thread_flag(t, TIF_POLLING_NRFLAG)
#define thread32_stack_is_64bit(__SP) (((__SP) & 0x1) != 0)
#define test_thread_64bit_stack(__SP) \
((test_thread_flag(TIF_32BIT) && !thread32_stack_is_64bit(__SP)) ? \
false : true)
#endif /* !__ASSEMBLY__ */
#endif /* __KERNEL__ */

View File

@ -372,7 +372,9 @@ etrap_spill_fixup_64bit: \
/* Normal 32bit spill */
#define SPILL_2_GENERIC(ASI) \
srl %sp, 0, %sp; \
and %sp, 1, %g3; \
brnz,pn %g3, (. - (128 + 4)); \
srl %sp, 0, %sp; \
stwa %l0, [%sp + %g0] ASI; \
mov 0x04, %g3; \
stwa %l1, [%sp + %g3] ASI; \
@ -398,14 +400,16 @@ etrap_spill_fixup_64bit: \
stwa %i6, [%g1 + %g0] ASI; \
stwa %i7, [%g1 + %g3] ASI; \
saved; \
retry; nop; nop; \
retry; \
b,a,pt %xcc, spill_fixup_dax; \
b,a,pt %xcc, spill_fixup_mna; \
b,a,pt %xcc, spill_fixup;
#define SPILL_2_GENERIC_ETRAP \
etrap_user_spill_32bit: \
srl %sp, 0, %sp; \
and %sp, 1, %g3; \
brnz,pn %g3, etrap_user_spill_64bit; \
srl %sp, 0, %sp; \
stwa %l0, [%sp + 0x00] %asi; \
stwa %l1, [%sp + 0x04] %asi; \
stwa %l2, [%sp + 0x08] %asi; \
@ -427,7 +431,7 @@ etrap_user_spill_32bit: \
ba,pt %xcc, etrap_save; \
wrpr %g1, %cwp; \
nop; nop; nop; nop; \
nop; nop; nop; nop; \
nop; nop; \
ba,a,pt %xcc, etrap_spill_fixup_32bit; \
ba,a,pt %xcc, etrap_spill_fixup_32bit; \
ba,a,pt %xcc, etrap_spill_fixup_32bit;
@ -592,7 +596,9 @@ user_rtt_fill_64bit: \
/* Normal 32bit fill */
#define FILL_2_GENERIC(ASI) \
srl %sp, 0, %sp; \
and %sp, 1, %g3; \
brnz,pn %g3, (. - (128 + 4)); \
srl %sp, 0, %sp; \
lduwa [%sp + %g0] ASI, %l0; \
mov 0x04, %g2; \
mov 0x08, %g3; \
@ -616,14 +622,16 @@ user_rtt_fill_64bit: \
lduwa [%g1 + %g3] ASI, %i6; \
lduwa [%g1 + %g5] ASI, %i7; \
restored; \
retry; nop; nop; nop; nop; \
retry; nop; nop; \
b,a,pt %xcc, fill_fixup_dax; \
b,a,pt %xcc, fill_fixup_mna; \
b,a,pt %xcc, fill_fixup;
#define FILL_2_GENERIC_RTRAP \
user_rtt_fill_32bit: \
srl %sp, 0, %sp; \
and %sp, 1, %g3; \
brnz,pn %g3, user_rtt_fill_64bit; \
srl %sp, 0, %sp; \
lduwa [%sp + 0x00] %asi, %l0; \
lduwa [%sp + 0x04] %asi, %l1; \
lduwa [%sp + 0x08] %asi, %l2; \
@ -643,7 +651,7 @@ user_rtt_fill_32bit: \
ba,pt %xcc, user_rtt_pre_restore; \
restored; \
nop; nop; nop; nop; nop; \
nop; nop; nop; nop; nop; \
nop; nop; nop; \
ba,a,pt %xcc, user_rtt_fill_fixup; \
ba,a,pt %xcc, user_rtt_fill_fixup; \
ba,a,pt %xcc, user_rtt_fill_fixup;

View File

@ -405,8 +405,13 @@
#define __NR_setns 337
#define __NR_process_vm_readv 338
#define __NR_process_vm_writev 339
#define __NR_kern_features 340
#define __NR_kcmp 341
#define NR_syscalls 340
#define NR_syscalls 342
/* Bitmask values returned from kern_features system call. */
#define KERN_FEATURE_MIXED_MODE_STACK 0x00000001
#ifdef __32bit_syscall_numbers__
/* Sparc 32-bit only has the "setresuid32", "getresuid32" variants,

View File

@ -59,6 +59,13 @@ struct popc_6insn_patch_entry {
extern struct popc_6insn_patch_entry __popc_6insn_patch,
__popc_6insn_patch_end;
struct pause_patch_entry {
unsigned int addr;
unsigned int insns[3];
};
extern struct pause_patch_entry __pause_3insn_patch,
__pause_3insn_patch_end;
extern void __init per_cpu_patch(void);
extern void sun4v_patch_1insn_range(struct sun4v_1insn_patch_entry *,
struct sun4v_1insn_patch_entry *);

View File

@ -56,11 +56,13 @@ static inline unsigned int leon_eirq_get(int cpu)
static void leon_handle_ext_irq(unsigned int irq, struct irq_desc *desc)
{
unsigned int eirq;
struct irq_bucket *p;
int cpu = sparc_leon3_cpuid();
eirq = leon_eirq_get(cpu);
if ((eirq & 0x10) && irq_map[eirq]->irq) /* bit4 tells if IRQ happened */
generic_handle_irq(irq_map[eirq]->irq);
p = irq_map[eirq];
if ((eirq & 0x10) && p && p->irq) /* bit4 tells if IRQ happened */
generic_handle_irq(p->irq);
}
/* The extended IRQ controller has been found, this function registers it */

View File

@ -1762,15 +1762,25 @@ static void perf_callchain_user_32(struct perf_callchain_entry *entry,
ufp = regs->u_regs[UREG_I6] & 0xffffffffUL;
do {
struct sparc_stackf32 *usf, sf;
unsigned long pc;
usf = (struct sparc_stackf32 *) ufp;
if (__copy_from_user_inatomic(&sf, usf, sizeof(sf)))
break;
if (thread32_stack_is_64bit(ufp)) {
struct sparc_stackf *usf, sf;
pc = sf.callers_pc;
ufp = (unsigned long)sf.fp;
ufp += STACK_BIAS;
usf = (struct sparc_stackf *) ufp;
if (__copy_from_user_inatomic(&sf, usf, sizeof(sf)))
break;
pc = sf.callers_pc & 0xffffffff;
ufp = ((unsigned long) sf.fp) & 0xffffffff;
} else {
struct sparc_stackf32 *usf, sf;
usf = (struct sparc_stackf32 *) ufp;
if (__copy_from_user_inatomic(&sf, usf, sizeof(sf)))
break;
pc = sf.callers_pc;
ufp = (unsigned long)sf.fp;
}
perf_callchain_store(entry, pc);
} while (entry->nr < PERF_MAX_STACK_DEPTH);
}

View File

@ -452,13 +452,16 @@ void flush_thread(void)
/* It's a bit more tricky when 64-bit tasks are involved... */
static unsigned long clone_stackframe(unsigned long csp, unsigned long psp)
{
bool stack_64bit = test_thread_64bit_stack(psp);
unsigned long fp, distance, rval;
if (!(test_thread_flag(TIF_32BIT))) {
if (stack_64bit) {
csp += STACK_BIAS;
psp += STACK_BIAS;
__get_user(fp, &(((struct reg_window __user *)psp)->ins[6]));
fp += STACK_BIAS;
if (test_thread_flag(TIF_32BIT))
fp &= 0xffffffff;
} else
__get_user(fp, &(((struct reg_window32 __user *)psp)->ins[6]));
@ -472,7 +475,7 @@ static unsigned long clone_stackframe(unsigned long csp, unsigned long psp)
rval = (csp - distance);
if (copy_in_user((void __user *) rval, (void __user *) psp, distance))
rval = 0;
else if (test_thread_flag(TIF_32BIT)) {
else if (!stack_64bit) {
if (put_user(((u32)csp),
&(((struct reg_window32 __user *)rval)->ins[6])))
rval = 0;
@ -507,18 +510,18 @@ void synchronize_user_stack(void)
flush_user_windows();
if ((window = get_thread_wsaved()) != 0) {
int winsize = sizeof(struct reg_window);
int bias = 0;
if (test_thread_flag(TIF_32BIT))
winsize = sizeof(struct reg_window32);
else
bias = STACK_BIAS;
window -= 1;
do {
unsigned long sp = (t->rwbuf_stkptrs[window] + bias);
struct reg_window *rwin = &t->reg_window[window];
int winsize = sizeof(struct reg_window);
unsigned long sp;
sp = t->rwbuf_stkptrs[window];
if (test_thread_64bit_stack(sp))
sp += STACK_BIAS;
else
winsize = sizeof(struct reg_window32);
if (!copy_to_user((char __user *)sp, rwin, winsize)) {
shift_window_buffer(window, get_thread_wsaved() - 1, t);
@ -544,13 +547,6 @@ void fault_in_user_windows(void)
{
struct thread_info *t = current_thread_info();
unsigned long window;
int winsize = sizeof(struct reg_window);
int bias = 0;
if (test_thread_flag(TIF_32BIT))
winsize = sizeof(struct reg_window32);
else
bias = STACK_BIAS;
flush_user_windows();
window = get_thread_wsaved();
@ -558,8 +554,16 @@ void fault_in_user_windows(void)
if (likely(window != 0)) {
window -= 1;
do {
unsigned long sp = (t->rwbuf_stkptrs[window] + bias);
struct reg_window *rwin = &t->reg_window[window];
int winsize = sizeof(struct reg_window);
unsigned long sp;
sp = t->rwbuf_stkptrs[window];
if (test_thread_64bit_stack(sp))
sp += STACK_BIAS;
else
winsize = sizeof(struct reg_window32);
if (unlikely(sp & 0x7UL))
stack_unaligned(sp);

View File

@ -151,7 +151,7 @@ static int regwindow64_get(struct task_struct *target,
{
unsigned long rw_addr = regs->u_regs[UREG_I6];
if (test_tsk_thread_flag(current, TIF_32BIT)) {
if (!test_thread_64bit_stack(rw_addr)) {
struct reg_window32 win32;
int i;
@ -176,7 +176,7 @@ static int regwindow64_set(struct task_struct *target,
{
unsigned long rw_addr = regs->u_regs[UREG_I6];
if (test_tsk_thread_flag(current, TIF_32BIT)) {
if (!test_thread_64bit_stack(rw_addr)) {
struct reg_window32 win32;
int i;

View File

@ -316,6 +316,25 @@ static void __init popc_patch(void)
}
}
static void __init pause_patch(void)
{
struct pause_patch_entry *p;
p = &__pause_3insn_patch;
while (p < &__pause_3insn_patch_end) {
unsigned long i, addr = p->addr;
for (i = 0; i < 3; i++) {
*(unsigned int *) (addr + (i * 4)) = p->insns[i];
wmb();
__asm__ __volatile__("flush %0"
: : "r" (addr + (i * 4)));
}
p++;
}
}
#ifdef CONFIG_SMP
void __init boot_cpu_id_too_large(int cpu)
{
@ -528,6 +547,8 @@ static void __init init_sparc64_elf_hwcap(void)
if (sparc64_elf_hwcap & AV_SPARC_POPC)
popc_patch();
if (sparc64_elf_hwcap & AV_SPARC_PAUSE)
pause_patch();
}
void __init setup_arch(char **cmdline_p)

View File

@ -751,3 +751,8 @@ int kernel_execve(const char *filename,
: "cc");
return __res;
}
asmlinkage long sys_kern_features(void)
{
return KERN_FEATURE_MIXED_MODE_STACK;
}

View File

@ -85,3 +85,4 @@ sys_call_table:
/*325*/ .long sys_pwritev, sys_rt_tgsigqueueinfo, sys_perf_event_open, sys_recvmmsg, sys_fanotify_init
/*330*/ .long sys_fanotify_mark, sys_prlimit64, sys_name_to_handle_at, sys_open_by_handle_at, sys_clock_adjtime
/*335*/ .long sys_syncfs, sys_sendmmsg, sys_setns, sys_process_vm_readv, sys_process_vm_writev
/*340*/ .long sys_ni_syscall, sys_kcmp

View File

@ -86,6 +86,7 @@ sys_call_table32:
.word compat_sys_pwritev, compat_sys_rt_tgsigqueueinfo, sys_perf_event_open, compat_sys_recvmmsg, sys_fanotify_init
/*330*/ .word sys32_fanotify_mark, sys_prlimit64, sys_name_to_handle_at, compat_sys_open_by_handle_at, compat_sys_clock_adjtime
.word sys_syncfs, compat_sys_sendmmsg, sys_setns, compat_sys_process_vm_readv, compat_sys_process_vm_writev
/*340*/ .word sys_kern_features, sys_kcmp
#endif /* CONFIG_COMPAT */
@ -163,3 +164,4 @@ sys_call_table:
.word sys_pwritev, sys_rt_tgsigqueueinfo, sys_perf_event_open, sys_recvmmsg, sys_fanotify_init
/*330*/ .word sys_fanotify_mark, sys_prlimit64, sys_name_to_handle_at, sys_open_by_handle_at, sys_clock_adjtime
.word sys_syncfs, sys_sendmmsg, sys_setns, sys_process_vm_readv, sys_process_vm_writev
/*340*/ .word sys_kern_features, sys_kcmp

View File

@ -113,21 +113,24 @@ static inline long sign_extend_imm13(long imm)
static unsigned long fetch_reg(unsigned int reg, struct pt_regs *regs)
{
unsigned long value;
unsigned long value, fp;
if (reg < 16)
return (!reg ? 0 : regs->u_regs[reg]);
fp = regs->u_regs[UREG_FP];
if (regs->tstate & TSTATE_PRIV) {
struct reg_window *win;
win = (struct reg_window *)(regs->u_regs[UREG_FP] + STACK_BIAS);
win = (struct reg_window *)(fp + STACK_BIAS);
value = win->locals[reg - 16];
} else if (test_thread_flag(TIF_32BIT)) {
} else if (!test_thread_64bit_stack(fp)) {
struct reg_window32 __user *win32;
win32 = (struct reg_window32 __user *)((unsigned long)((u32)regs->u_regs[UREG_FP]));
win32 = (struct reg_window32 __user *)((unsigned long)((u32)fp));
get_user(value, &win32->locals[reg - 16]);
} else {
struct reg_window __user *win;
win = (struct reg_window __user *)(regs->u_regs[UREG_FP] + STACK_BIAS);
win = (struct reg_window __user *)(fp + STACK_BIAS);
get_user(value, &win->locals[reg - 16]);
}
return value;
@ -135,19 +138,24 @@ static unsigned long fetch_reg(unsigned int reg, struct pt_regs *regs)
static unsigned long *fetch_reg_addr(unsigned int reg, struct pt_regs *regs)
{
unsigned long fp;
if (reg < 16)
return &regs->u_regs[reg];
fp = regs->u_regs[UREG_FP];
if (regs->tstate & TSTATE_PRIV) {
struct reg_window *win;
win = (struct reg_window *)(regs->u_regs[UREG_FP] + STACK_BIAS);
win = (struct reg_window *)(fp + STACK_BIAS);
return &win->locals[reg - 16];
} else if (test_thread_flag(TIF_32BIT)) {
} else if (!test_thread_64bit_stack(fp)) {
struct reg_window32 *win32;
win32 = (struct reg_window32 *)((unsigned long)((u32)regs->u_regs[UREG_FP]));
win32 = (struct reg_window32 *)((unsigned long)((u32)fp));
return (unsigned long *)&win32->locals[reg - 16];
} else {
struct reg_window *win;
win = (struct reg_window *)(regs->u_regs[UREG_FP] + STACK_BIAS);
win = (struct reg_window *)(fp + STACK_BIAS);
return &win->locals[reg - 16];
}
}
@ -392,13 +400,15 @@ int handle_popc(u32 insn, struct pt_regs *regs)
if (rd)
regs->u_regs[rd] = ret;
} else {
if (test_thread_flag(TIF_32BIT)) {
unsigned long fp = regs->u_regs[UREG_FP];
if (!test_thread_64bit_stack(fp)) {
struct reg_window32 __user *win32;
win32 = (struct reg_window32 __user *)((unsigned long)((u32)regs->u_regs[UREG_FP]));
win32 = (struct reg_window32 __user *)((unsigned long)((u32)fp));
put_user(ret, &win32->locals[rd - 16]);
} else {
struct reg_window __user *win;
win = (struct reg_window __user *)(regs->u_regs[UREG_FP] + STACK_BIAS);
win = (struct reg_window __user *)(fp + STACK_BIAS);
put_user(ret, &win->locals[rd - 16]);
}
}
@ -554,7 +564,7 @@ void handle_ld_nf(u32 insn, struct pt_regs *regs)
reg[0] = 0;
if ((insn & 0x780000) == 0x180000)
reg[1] = 0;
} else if (test_thread_flag(TIF_32BIT)) {
} else if (!test_thread_64bit_stack(regs->u_regs[UREG_FP])) {
put_user(0, (int __user *) reg);
if ((insn & 0x780000) == 0x180000)
put_user(0, ((int __user *) reg) + 1);

Some files were not shown because too many files have changed in this diff Show More