Merge branch 'for-linus' of master.kernel.org:/home/rmk/linux-2.6-arm

* 'for-linus' of master.kernel.org:/home/rmk/linux-2.6-arm: (91 commits)
  ARM: 6806/1: irq: introduce entry and exit functions for chained handlers
  ARM: 6781/1: Thumb-2: Work around buggy Thumb-2 short branch relocations in gas
  ARM: 6747/1: P2V: Thumb2 support
  ARM: 6798/1: aout-core: zero thread debug registers in a.out core dump
  ARM: 6796/1: Footbridge: Fix I/O mappings for NOMMU mode
  ARM: 6784/1: errata: no automatic Store Buffer drain on Cortex-A9
  ARM: 6772/1: errata: possible fault MMU translations following an ASID switch
  ARM: 6776/1: mach-ux500: activate fix for errata 753970
  ARM: 6794/1: SPEAr: Append UL to device address macros.
  ARM: 6793/1: SPEAr: Remove unused *_SIZE macros from spear*.h files
  ARM: 6792/1: SPEAr: Replace SIZE macro's with SZ_4K macros
  ARM: 6791/1: SPEAr3xx: Declare device structures after shirq code
  ARM: 6790/1: SPEAr: Clock Framework: Rename usbd clock and align apb_clk entry
  ARM: 6789/1: SPEAr3xx: Rename sdio to sdhci
  ARM: 6788/1: SPEAr: Include mach/hardware.h instead of mach/spear.h
  ARM: 6787/1: SPEAr: Reorder #includes in .h & .c files.
  ARM: 6681/1: SPEAr: add debugfs support to clk API
  ARM: 6703/1: SPEAr: update clk API support
  ARM: 6679/1: SPEAr: make clk API functions more generic
  ARM: 6737/1: SPEAr: formalized timer support
  ...
This commit is contained in:
Linus Torvalds 2011-03-16 19:03:06 -07:00
commit 16d8775700
295 changed files with 7692 additions and 3397 deletions

View File

@ -0,0 +1,8 @@
BIN := vrl4
.PHONY: all
all: $(BIN)
.PHONY: clean
clean:
rm -f *.o $(BIN)

View File

@ -0,0 +1,169 @@
/*
* vrl4 format generator
*
* Copyright (C) 2010 Simon Horman
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*/
/*
* usage: vrl4 < zImage > out
* dd if=out of=/dev/sdx bs=512 seek=1 # Write the image to sector 1
*
* Reads a zImage from stdin and writes a vrl4 image to stdout.
* In practice this means writing a padded vrl4 header to stdout followed
* by the zImage.
*
* The padding places the zImage at ALIGN bytes into the output.
* The vrl4 uses ALIGN + START_BASE as the start_address.
* This is where the mask ROM will jump to after verifying the header.
*
* The header sets copy_size to min(sizeof(zImage), MAX_BOOT_PROG_LEN) + ALIGN.
* That is, the mask ROM will load the padded header (ALIGN bytes)
* And then MAX_BOOT_PROG_LEN bytes of the image, or the entire image,
* whichever is smaller.
*
* The zImage is not modified in any way.
*/
#define _BSD_SOURCE
#include <endian.h>
#include <unistd.h>
#include <stdint.h>
#include <stdio.h>
#include <errno.h>
struct hdr {
uint32_t magic1;
uint32_t reserved1;
uint32_t magic2;
uint32_t reserved2;
uint16_t copy_size;
uint16_t boot_options;
uint32_t reserved3;
uint32_t start_address;
uint32_t reserved4;
uint32_t reserved5;
char reserved6[308];
};
#define DECLARE_HDR(h) \
struct hdr (h) = { \
.magic1 = htole32(0xea000000), \
.reserved1 = htole32(0x56), \
.magic2 = htole32(0xe59ff008), \
.reserved3 = htole16(0x1) }
/* Align to 512 bytes, the MMCIF sector size */
#define ALIGN_BITS 9
#define ALIGN (1 << ALIGN_BITS)
#define START_BASE 0xe55b0000
/*
* With an alignment of 512 the header uses the first sector.
* There is a 128 sector (64kbyte) limit on the data loaded by the mask ROM.
* So there are 127 sectors left for the boot programme. But in practice
* Only a small portion of a zImage is needed, 16 sectors should be more
* than enough.
*
* Note that this sets how much of the zImage is copied by the mask ROM.
* The entire zImage is present after the header and is loaded
* by the code in the boot program (which is the first portion of the zImage).
*/
#define MAX_BOOT_PROG_LEN (16 * 512)
#define ROUND_UP(x) ((x + ALIGN - 1) & ~(ALIGN - 1))
ssize_t do_read(int fd, void *buf, size_t count)
{
size_t offset = 0;
ssize_t l;
while (offset < count) {
l = read(fd, buf + offset, count - offset);
if (!l)
break;
if (l < 0) {
if (errno == EAGAIN || errno == EWOULDBLOCK)
continue;
perror("read");
return -1;
}
offset += l;
}
return offset;
}
ssize_t do_write(int fd, const void *buf, size_t count)
{
size_t offset = 0;
ssize_t l;
while (offset < count) {
l = write(fd, buf + offset, count - offset);
if (l < 0) {
if (errno == EAGAIN || errno == EWOULDBLOCK)
continue;
perror("write");
return -1;
}
offset += l;
}
return offset;
}
ssize_t write_zero(int fd, size_t len)
{
size_t i = len;
while (i--) {
const char x = 0;
if (do_write(fd, &x, 1) < 0)
return -1;
}
return len;
}
int main(void)
{
DECLARE_HDR(hdr);
char boot_program[MAX_BOOT_PROG_LEN];
size_t aligned_hdr_len, alligned_prog_len;
ssize_t prog_len;
prog_len = do_read(0, boot_program, sizeof(boot_program));
if (prog_len <= 0)
return -1;
aligned_hdr_len = ROUND_UP(sizeof(hdr));
hdr.start_address = htole32(START_BASE + aligned_hdr_len);
alligned_prog_len = ROUND_UP(prog_len);
hdr.copy_size = htole16(aligned_hdr_len + alligned_prog_len);
if (do_write(1, &hdr, sizeof(hdr)) < 0)
return -1;
if (write_zero(1, aligned_hdr_len - sizeof(hdr)) < 0)
return -1;
if (do_write(1, boot_program, prog_len) < 0)
return 1;
/* Write out the rest of the kernel */
while (1) {
prog_len = do_read(0, boot_program, sizeof(boot_program));
if (prog_len < 0)
return 1;
if (prog_len == 0)
break;
if (do_write(1, boot_program, prog_len) < 0)
return 1;
}
return 0;
}

View File

@ -0,0 +1,29 @@
ROM-able zImage boot from MMC
-----------------------------
An ROM-able zImage compiled with ZBOOT_ROM_MMCIF may be written to MMC and
SuperH Mobile ARM will to boot directly from the MMCIF hardware block.
This is achieved by the mask ROM loading the first portion of the image into
MERAM and then jumping to it. This portion contains loader code which
copies the entire image to SDRAM and jumps to it. From there the zImage
boot code proceeds as normal, uncompressing the image into its final
location and then jumping to it.
This code has been tested on an AP4EB board using the developer 1A eMMC
boot mode which is configured using the following jumper settings.
The board used for testing required a patched mask ROM in order for
this mode to function.
8 7 6 5 4 3 2 1
x|x|x|x|x| |x|
S4 -+-+-+-+-+-+-+-
| | | | |x| |x on
The zImage must be written to the MMC card at sector 1 (512 bytes) in
vrl4 format. A utility vrl4 is supplied to accomplish this.
e.g.
vrl4 < zImage | dd of=/dev/sdX bs=512 seek=1
A dual-voltage MMC 4.0 card was used for testing.

View File

@ -7,7 +7,7 @@ config ARM
select HAVE_MEMBLOCK
select RTC_LIB
select SYS_SUPPORTS_APM_EMULATION
select GENERIC_ATOMIC64 if (!CPU_32v6K || !AEABI)
select GENERIC_ATOMIC64 if (CPU_V6 || !CPU_32v6K || !AEABI)
select HAVE_OPROFILE if (HAVE_PERF_EVENTS)
select HAVE_ARCH_KGDB
select HAVE_KPROBES if (!XIP_KERNEL && !THUMB2_KERNEL)
@ -24,7 +24,7 @@ config ARM
select HAVE_PERF_EVENTS
select PERF_USE_VMALLOC
select HAVE_REGS_AND_STACK_ACCESS_API
select HAVE_HW_BREAKPOINT if (PERF_EVENTS && (CPU_V6 || CPU_V7))
select HAVE_HW_BREAKPOINT if (PERF_EVENTS && (CPU_V6 || CPU_V6K || CPU_V7))
select HAVE_C_RECORDMCOUNT
select HAVE_GENERIC_HARDIRQS
select HAVE_SPARSE_IRQ
@ -63,6 +63,10 @@ config GENERIC_CLOCKEVENTS_BROADCAST
depends on GENERIC_CLOCKEVENTS
default y if SMP
config KTIME_SCALAR
bool
default y
config HAVE_TCM
bool
select GENERIC_ALLOCATOR
@ -178,11 +182,6 @@ config FIQ
config ARCH_MTD_XIP
bool
config ARM_L1_CACHE_SHIFT_6
bool
help
Setting ARM L1 cache line size to 64 Bytes.
config VECTORS_BASE
hex
default 0xffff0000 if MMU || CPU_HIGH_VECTOR
@ -191,6 +190,22 @@ config VECTORS_BASE
help
The base address of exception vectors.
config ARM_PATCH_PHYS_VIRT
bool "Patch physical to virtual translations at runtime (EXPERIMENTAL)"
depends on EXPERIMENTAL
depends on !XIP_KERNEL && MMU
depends on !ARCH_REALVIEW || !SPARSEMEM
help
Patch phys-to-virt translation functions at runtime according to
the position of the kernel in system memory.
This can only be used with non-XIP with MMU kernels where
the base of physical memory is at a 16MB boundary.
config ARM_PATCH_PHYS_VIRT_16BIT
def_bool y
depends on ARM_PATCH_PHYS_VIRT && ARCH_MSM
source "init/Kconfig"
source "kernel/Kconfig.freezer"
@ -346,7 +361,7 @@ config ARCH_FOOTBRIDGE
bool "FootBridge"
select CPU_SA110
select FOOTBRIDGE
select ARCH_USES_GETTIMEOFFSET
select GENERIC_CLOCKEVENTS
help
Support for systems based on the DC21285 companion chip
("FootBridge"), such as the Simtec CATS and the Rebel NetWinder.
@ -457,6 +472,7 @@ config ARCH_IXP4XX
config ARCH_DOVE
bool "Marvell Dove"
select CPU_V6K
select PCI
select ARCH_REQUIRE_GPIOLIB
select GENERIC_CLOCKEVENTS
@ -875,6 +891,16 @@ config PLAT_SPEAR
help
Support for ST's SPEAr platform (SPEAr3xx, SPEAr6xx and SPEAr13xx).
config ARCH_VT8500
bool "VIA/WonderMedia 85xx"
select CPU_ARM926T
select GENERIC_GPIO
select ARCH_HAS_CPUFREQ
select GENERIC_CLOCKEVENTS
select ARCH_REQUIRE_GPIOLIB
select HAVE_PWM
help
Support for VIA/WonderMedia VT8500/WM85xx System-on-Chip.
endchoice
#
@ -1007,6 +1033,8 @@ source "arch/arm/mach-versatile/Kconfig"
source "arch/arm/mach-vexpress/Kconfig"
source "arch/arm/mach-vt8500/Kconfig"
source "arch/arm/mach-w90x900/Kconfig"
# Definitions to make life easier
@ -1048,7 +1076,7 @@ config XSCALE_PMU
default y
config CPU_HAS_PMU
depends on (CPU_V6 || CPU_V7 || XSCALE_PMU) && \
depends on (CPU_V6 || CPU_V6K || CPU_V7 || XSCALE_PMU) && \
(!ARCH_OMAP3 || OMAP3_EMU)
default y
bool
@ -1064,7 +1092,7 @@ endif
config ARM_ERRATA_411920
bool "ARM errata: Invalidation of the Instruction Cache operation can fail"
depends on CPU_V6
depends on CPU_V6 || CPU_V6K
help
Invalidation of the Instruction Cache operation can
fail. This erratum is present in 1136 (before r1p4), 1156 and 1176.
@ -1140,7 +1168,7 @@ config ARM_ERRATA_742231
config PL310_ERRATA_588369
bool "Clean & Invalidate maintenance operations do not invalidate clean lines"
depends on CACHE_L2X0 && ARCH_OMAP4
depends on CACHE_L2X0
help
The PL310 L2 cache controller implements three types of Clean &
Invalidate maintenance operations: by Physical Address
@ -1149,8 +1177,7 @@ config PL310_ERRATA_588369
clean operation followed immediately by an invalidate operation,
both performing to the same memory location. This functionality
is not correctly implemented in PL310 as clean lines are not
invalidated as a result of these operations. Note that this errata
uses Texas Instrument's secure monitor api.
invalidated as a result of these operations.
config ARM_ERRATA_720789
bool "ARM errata: TLBIASIDIS and TLBIMVAIS operations can broadcast a faulty ASID"
@ -1164,6 +1191,17 @@ config ARM_ERRATA_720789
tables. The workaround changes the TLB flushing routines to invalidate
entries regardless of the ASID.
config PL310_ERRATA_727915
bool "Background Clean & Invalidate by Way operation can cause data corruption"
depends on CACHE_L2X0
help
PL310 implements the Clean & Invalidate by Way L2 cache maintenance
operation (offset 0x7FC). This operation runs in background so that
PL310 can handle normal accesses while it is in progress. Under very
rare circumstances, due to this erratum, write data can be lost when
PL310 treats a cacheable write transaction during a Clean &
Invalidate by Way operation.
config ARM_ERRATA_743622
bool "ARM errata: Faulty hazard checking in the Store Buffer may lead to data corruption"
depends on CPU_V7
@ -1202,6 +1240,28 @@ config ARM_ERRATA_753970
This has the same effect as the cache sync operation: store buffer
drain and waiting for all buffers empty.
config ARM_ERRATA_754322
bool "ARM errata: possible faulty MMU translations following an ASID switch"
depends on CPU_V7
help
This option enables the workaround for the 754322 Cortex-A9 (r2p*,
r3p*) erratum. A speculative memory access may cause a page table walk
which starts prior to an ASID switch but completes afterwards. This
can populate the micro-TLB with a stale entry which may be hit with
the new ASID. This workaround places two dsb instructions in the mm
switching code so that no page table walks can cross the ASID switch.
config ARM_ERRATA_754327
bool "ARM errata: no automatic Store Buffer drain"
depends on CPU_V7 && SMP
help
This option enables the workaround for the 754327 Cortex-A9 (prior to
r2p0) erratum. The Store Buffer does not have any automatic draining
mechanism and therefore a livelock may occur if an external agent
continuously polls a memory location waiting to observe an update.
This workaround defines cpu_relax() as smp_mb(), preventing correctly
written polling loops from denying visibility of updates to memory.
endmenu
source "arch/arm/common/Kconfig"
@ -1275,6 +1335,7 @@ source "kernel/time/Kconfig"
config SMP
bool "Symmetric Multi-Processing (EXPERIMENTAL)"
depends on EXPERIMENTAL
depends on CPU_V6K || CPU_V7
depends on GENERIC_CLOCKEVENTS
depends on REALVIEW_EB_ARM11MP || REALVIEW_EB_A9MP || \
MACH_REALVIEW_PB11MP || MACH_REALVIEW_PBX || ARCH_OMAP4 || \
@ -1386,7 +1447,7 @@ config HZ
config THUMB2_KERNEL
bool "Compile the kernel in Thumb-2 mode (EXPERIMENTAL)"
depends on CPU_V7 && !CPU_V6 && EXPERIMENTAL
depends on CPU_V7 && !CPU_V6 && !CPU_V6K && EXPERIMENTAL
select AEABI
select ARM_ASM_UNIFIED
help
@ -1396,6 +1457,37 @@ config THUMB2_KERNEL
If unsure, say N.
config THUMB2_AVOID_R_ARM_THM_JUMP11
bool "Work around buggy Thumb-2 short branch relocations in gas"
depends on THUMB2_KERNEL && MODULES
default y
help
Various binutils versions can resolve Thumb-2 branches to
locally-defined, preemptible global symbols as short-range "b.n"
branch instructions.
This is a problem, because there's no guarantee the final
destination of the symbol, or any candidate locations for a
trampoline, are within range of the branch. For this reason, the
kernel does not support fixing up the R_ARM_THM_JUMP11 (102)
relocation in modules at all, and it makes little sense to add
support.
The symptom is that the kernel fails with an "unsupported
relocation" error when loading some modules.
Until fixed tools are available, passing
-fno-optimize-sibling-calls to gcc should prevent gcc generating
code which hits this problem, at the cost of a bit of extra runtime
stack usage in some cases.
The problem is described in more detail at:
https://bugs.launchpad.net/binutils-linaro/+bug/725126
Only Thumb-2 kernels are affected.
Unless you are sure your tools don't have this problem, say Y.
config ARM_ASM_UNIFIED
bool
@ -1644,6 +1736,18 @@ config ZBOOT_ROM
Say Y here if you intend to execute your compressed kernel image
(zImage) directly from ROM or flash. If unsure, say N.
config ZBOOT_ROM_MMCIF
bool "Include MMCIF loader in zImage (EXPERIMENTAL)"
depends on ZBOOT_ROM && ARCH_SH7372 && EXPERIMENTAL
help
Say Y here to include experimental MMCIF loading code in the
ROM-able zImage. With this enabled it is possible to write the
the ROM-able zImage kernel image to an MMC card and boot the
kernel straight from the reset vector. At reset the processor
Mask ROM will load the first part of the the ROM-able zImage
which in turn loads the rest the kernel image to RAM using the
MMCIF hardware block.
config CMDLINE
string "Default kernel command string"
default ""
@ -1877,7 +1981,7 @@ config FPE_FASTFPE
config VFP
bool "VFP-format floating point maths"
depends on CPU_V6 || CPU_ARM926T || CPU_V7 || CPU_FEROCEON
depends on CPU_V6 || CPU_V6K || CPU_ARM926T || CPU_V7 || CPU_FEROCEON
help
Say Y to include VFP support code in the kernel. This is needed
if your hardware includes a VFP unit.

View File

@ -89,6 +89,7 @@ tune-$(CONFIG_CPU_XSCALE) :=$(call cc-option,-mtune=xscale,-mtune=strongarm110)
tune-$(CONFIG_CPU_XSC3) :=$(call cc-option,-mtune=xscale,-mtune=strongarm110) -Wa,-mcpu=xscale
tune-$(CONFIG_CPU_FEROCEON) :=$(call cc-option,-mtune=marvell-f,-mtune=xscale)
tune-$(CONFIG_CPU_V6) :=$(call cc-option,-mtune=arm1136j-s,-mtune=strongarm)
tune-$(CONFIG_CPU_V6K) :=$(call cc-option,-mtune=arm1136j-s,-mtune=strongarm)
ifeq ($(CONFIG_AEABI),y)
CFLAGS_ABI :=-mabi=aapcs-linux -mno-thumb-interwork
@ -105,6 +106,10 @@ AFLAGS_AUTOIT :=$(call as-option,-Wa$(comma)-mimplicit-it=always,-Wa$(comma)-mau
AFLAGS_NOWARN :=$(call as-option,-Wa$(comma)-mno-warn-deprecated,-Wa$(comma)-W)
CFLAGS_THUMB2 :=-mthumb $(AFLAGS_AUTOIT) $(AFLAGS_NOWARN)
AFLAGS_THUMB2 :=$(CFLAGS_THUMB2) -Wa$(comma)-mthumb
# Work around buggy relocation from gas if requested:
ifeq ($(CONFIG_THUMB2_AVOID_R_ARM_THM_JUMP11),y)
CFLAGS_MODULE +=-fno-optimize-sibling-calls
endif
endif
# Need -Uarm for gcc < 3.x
@ -190,6 +195,7 @@ machine-$(CONFIG_ARCH_U300) := u300
machine-$(CONFIG_ARCH_U8500) := ux500
machine-$(CONFIG_ARCH_VERSATILE) := versatile
machine-$(CONFIG_ARCH_VEXPRESS) := vexpress
machine-$(CONFIG_ARCH_VT8500) := vt8500
machine-$(CONFIG_ARCH_W90X900) := w90x900
machine-$(CONFIG_ARCH_NUC93X) := nuc93x
machine-$(CONFIG_FOOTBRIDGE) := footbridge
@ -280,7 +286,7 @@ bzImage: zImage
zImage Image xipImage bootpImage uImage: vmlinux
$(Q)$(MAKE) $(build)=$(boot) MACHINE=$(MACHINE) $(boot)/$@
zinstall install: vmlinux
zinstall uinstall install: vmlinux
$(Q)$(MAKE) $(build)=$(boot) MACHINE=$(MACHINE) $@
# We use MRPROPER_FILES and CLEAN_FILES now
@ -301,6 +307,7 @@ define archhelp
echo ' (supply initrd image via make variable INITRD=<path>)'
echo ' install - Install uncompressed kernel'
echo ' zinstall - Install compressed kernel'
echo ' uinstall - Install U-Boot wrapped compressed kernel'
echo ' Install using (your) ~/bin/$(INSTALLKERNEL) or'
echo ' (distribution) /sbin/$(INSTALLKERNEL) or'
echo ' install to $$(INSTALL_PATH) and run lilo'

View File

@ -99,6 +99,10 @@ zinstall: $(obj)/zImage
$(CONFIG_SHELL) $(srctree)/$(src)/install.sh $(KERNELRELEASE) \
$(obj)/zImage System.map "$(INSTALL_PATH)"
uinstall: $(obj)/uImage
$(CONFIG_SHELL) $(srctree)/$(src)/install.sh $(KERNELRELEASE) \
$(obj)/uImage System.map "$(INSTALL_PATH)"
zi:
$(CONFIG_SHELL) $(srctree)/$(src)/install.sh $(KERNELRELEASE) \
$(obj)/zImage System.map "$(INSTALL_PATH)"

View File

@ -4,9 +4,20 @@
# create a compressed vmlinuz image from the original vmlinux
#
OBJS =
# Ensure that mmcif loader code appears early in the image
# to minimise that number of bocks that have to be read in
# order to load it.
ifeq ($(CONFIG_ZBOOT_ROM_MMCIF),y)
ifeq ($(CONFIG_ARCH_SH7372),y)
OBJS += mmcif-sh7372.o
endif
endif
AFLAGS_head.o += -DTEXT_OFFSET=$(TEXT_OFFSET)
HEAD = head.o
OBJS = misc.o decompress.o
OBJS += misc.o decompress.o
FONTC = $(srctree)/drivers/video/console/font_acorn_8x8.c
#
@ -29,6 +40,10 @@ ifeq ($(CONFIG_ARCH_SA1100),y)
OBJS += head-sa1100.o
endif
ifeq ($(CONFIG_ARCH_VT8500),y)
OBJS += head-vt8500.o
endif
ifeq ($(CONFIG_CPU_XSCALE),y)
OBJS += head-xscale.o
endif
@ -83,9 +98,11 @@ endif
EXTRA_CFLAGS := -fpic -fno-builtin
EXTRA_AFLAGS := -Wa,-march=all
# Provide size of uncompressed kernel to the decompressor via a linker symbol.
LDFLAGS_vmlinux = --defsym _image_size=$(shell stat -c "%s" $(obj)/../Image)
# Supply ZRELADDR to the decompressor via a linker symbol.
ifneq ($(CONFIG_AUTO_ZRELADDR),y)
LDFLAGS_vmlinux := --defsym zreladdr=$(ZRELADDR)
LDFLAGS_vmlinux += --defsym zreladdr=$(ZRELADDR)
endif
ifeq ($(CONFIG_CPU_ENDIAN_BE8),y)
LDFLAGS_vmlinux += --be8

View File

@ -25,6 +25,36 @@
/* load board-specific initialization code */
#include <mach/zboot.h>
#ifdef CONFIG_ZBOOT_ROM_MMCIF
/* Load image from MMC */
adr sp, __tmp_stack + 128
ldr r0, __image_start
ldr r1, __image_end
subs r1, r1, r0
ldr r0, __load_base
bl mmcif_loader
/* Jump to loaded code */
ldr r0, __loaded
ldr r1, __image_start
sub r0, r0, r1
ldr r1, __load_base
add pc, r0, r1
__image_start:
.long _start
__image_end:
.long _got_end
__load_base:
.long CONFIG_MEMORY_START + 0x02000000 @ Load at 32Mb into SDRAM
__loaded:
.long __continue
.align
__tmp_stack:
.space 128
__continue:
#endif /* CONFIG_ZBOOT_ROM_MMCIF */
b 1f
__atags:@ tag #1
.long 12 @ tag->hdr.size = tag_size(tag_core);

View File

@ -0,0 +1,46 @@
/*
* linux/arch/arm/boot/compressed/head-vt8500.S
*
* Copyright (C) 2010 Alexey Charkov <alchark@gmail.com>
*
* VIA VT8500 specific tweaks. This is merged into head.S by the linker.
*
*/
#include <linux/linkage.h>
#include <asm/mach-types.h>
.section ".start", "ax"
__VT8500_start:
@ Compare the SCC ID register against a list of known values
ldr r1, .SCCID
ldr r3, [r1]
@ VT8500 override
ldr r4, .VT8500SCC
cmp r3, r4
ldreq r7, .ID_BV07
beq .Lendvt8500
@ WM8505 override
ldr r4, .WM8505SCC
cmp r3, r4
ldreq r7, .ID_8505
beq .Lendvt8500
@ Otherwise, leave the bootloader's machine id untouched
.SCCID:
.word 0xd8120000
.VT8500SCC:
.word 0x34000102
.WM8505SCC:
.word 0x34260103
.ID_BV07:
.word MACH_TYPE_BV07
.ID_8505:
.word MACH_TYPE_WM8505_7IN_NETBOOK
.Lendvt8500:

View File

@ -21,7 +21,7 @@
#if defined(CONFIG_DEBUG_ICEDCC)
#ifdef CONFIG_CPU_V6
#if defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_V6K)
.macro loadsp, rb, tmp
.endm
.macro writeb, ch, rb
@ -128,14 +128,14 @@ wait: mrc p14, 0, pc, c0, c1, 0
.arm @ Always enter in ARM state
start:
.type start,#function
THUMB( adr r12, BSYM(1f) )
THUMB( bx r12 )
THUMB( .rept 6 )
ARM( .rept 8 )
.rept 7
mov r0, r0
.endr
ARM( mov r0, r0 )
ARM( b 1f )
THUMB( adr r12, BSYM(1f) )
THUMB( bx r12 )
b 1f
.word 0x016f2818 @ Magic numbers to help the loader
.word start @ absolute load/run zImage address
.word _edata @ zImage end address
@ -174,9 +174,7 @@ not_angel:
*/
.text
adr r0, LC0
ldmia r0, {r1, r2, r3, r5, r6, r11, ip}
ldr sp, [r0, #28]
#ifdef CONFIG_AUTO_ZRELADDR
@ determine final kernel image address
mov r4, pc
@ -185,35 +183,108 @@ not_angel:
#else
ldr r4, =zreladdr
#endif
subs r0, r0, r1 @ calculate the delta offset
@ if delta is zero, we are
beq not_relocated @ running at the address we
@ were linked at.
bl cache_on
restart: adr r0, LC0
ldmia r0, {r1, r2, r3, r5, r6, r9, r11, r12}
ldr sp, [r0, #32]
/*
* We're running at a different address. We need to fix
* up various pointers:
* r5 - zImage base address (_start)
* r6 - size of decompressed image
* r11 - GOT start
* ip - GOT end
* We might be running at a different address. We need
* to fix up various pointers.
*/
add r5, r5, r0
sub r0, r0, r1 @ calculate the delta offset
add r5, r5, r0 @ _start
add r6, r6, r0 @ _edata
#ifndef CONFIG_ZBOOT_ROM
/* malloc space is above the relocated stack (64k max) */
add sp, sp, r0
add r10, sp, #0x10000
#else
/*
* With ZBOOT_ROM the bss/stack is non relocatable,
* but someone could still run this code from RAM,
* in which case our reference is _edata.
*/
mov r10, r6
#endif
/*
* Check to see if we will overwrite ourselves.
* r4 = final kernel address
* r5 = start of this image
* r9 = size of decompressed image
* r10 = end of this image, including bss/stack/malloc space if non XIP
* We basically want:
* r4 >= r10 -> OK
* r4 + image length <= r5 -> OK
*/
cmp r4, r10
bhs wont_overwrite
add r10, r4, r9
cmp r10, r5
bls wont_overwrite
/*
* Relocate ourselves past the end of the decompressed kernel.
* r5 = start of this image
* r6 = _edata
* r10 = end of the decompressed kernel
* Because we always copy ahead, we need to do it from the end and go
* backward in case the source and destination overlap.
*/
/* Round up to next 256-byte boundary. */
add r10, r10, #256
bic r10, r10, #255
sub r9, r6, r5 @ size to copy
add r9, r9, #31 @ rounded up to a multiple
bic r9, r9, #31 @ ... of 32 bytes
add r6, r9, r5
add r9, r9, r10
1: ldmdb r6!, {r0 - r3, r10 - r12, lr}
cmp r6, r5
stmdb r9!, {r0 - r3, r10 - r12, lr}
bhi 1b
/* Preserve offset to relocated code. */
sub r6, r9, r6
bl cache_clean_flush
adr r0, BSYM(restart)
add r0, r0, r6
mov pc, r0
wont_overwrite:
/*
* If delta is zero, we are running at the address we were linked at.
* r0 = delta
* r2 = BSS start
* r3 = BSS end
* r4 = kernel execution address
* r7 = architecture ID
* r8 = atags pointer
* r11 = GOT start
* r12 = GOT end
* sp = stack pointer
*/
teq r0, #0
beq not_relocated
add r11, r11, r0
add ip, ip, r0
add r12, r12, r0
#ifndef CONFIG_ZBOOT_ROM
/*
* If we're running fully PIC === CONFIG_ZBOOT_ROM = n,
* we need to fix up pointers into the BSS region.
* r2 - BSS start
* r3 - BSS end
* sp - stack pointer
* Note that the stack pointer has already been fixed up.
*/
add r2, r2, r0
add r3, r3, r0
add sp, sp, r0
/*
* Relocate all entries in the GOT table.
@ -221,7 +292,7 @@ not_angel:
1: ldr r1, [r11, #0] @ relocate entries in the GOT
add r1, r1, r0 @ table. This fixes up the
str r1, [r11], #4 @ C references.
cmp r11, ip
cmp r11, r12
blo 1b
#else
@ -234,7 +305,7 @@ not_angel:
cmphs r3, r1 @ _end < entry
addlo r1, r1, r0 @ table. This fixes up the
str r1, [r11], #4 @ C references.
cmp r11, ip
cmp r11, r12
blo 1b
#endif
@ -246,76 +317,24 @@ not_relocated: mov r0, #0
cmp r2, r3
blo 1b
/*
* The C runtime environment should now be setup
* sufficiently. Turn the cache on, set up some
* pointers, and start decompressing.
*/
bl cache_on
/*
* The C runtime environment should now be setup sufficiently.
* Set up some pointers, and start decompressing.
* r4 = kernel execution address
* r7 = architecture ID
* r8 = atags pointer
*/
mov r0, r4
mov r1, sp @ malloc space above stack
add r2, sp, #0x10000 @ 64k max
/*
* Check to see if we will overwrite ourselves.
* r4 = final kernel address
* r5 = start of this image
* r6 = size of decompressed image
* r2 = end of malloc space (and therefore this image)
* We basically want:
* r4 >= r2 -> OK
* r4 + image length <= r5 -> OK
*/
cmp r4, r2
bhs wont_overwrite
add r0, r4, r6
cmp r0, r5
bls wont_overwrite
mov r5, r2 @ decompress after malloc space
mov r0, r5
mov r3, r7
bl decompress_kernel
add r0, r0, #127 + 128 @ alignment + stack
bic r0, r0, #127 @ align the kernel length
/*
* r0 = decompressed kernel length
* r1-r3 = unused
* r4 = kernel execution address
* r5 = decompressed kernel start
* r7 = architecture ID
* r8 = atags pointer
* r9-r12,r14 = corrupted
*/
add r1, r5, r0 @ end of decompressed kernel
adr r2, reloc_start
ldr r3, LC1
add r3, r2, r3
1: ldmia r2!, {r9 - r12, r14} @ copy relocation code
stmia r1!, {r9 - r12, r14}
ldmia r2!, {r9 - r12, r14}
stmia r1!, {r9 - r12, r14}
cmp r2, r3
blo 1b
mov sp, r1
add sp, sp, #128 @ relocate the stack
bl cache_clean_flush
ARM( add pc, r5, r0 ) @ call relocation code
THUMB( add r12, r5, r0 )
THUMB( mov pc, r12 ) @ call relocation code
/*
* We're not in danger of overwriting ourselves. Do this the simple way.
*
* r4 = kernel execution address
* r7 = architecture ID
*/
wont_overwrite: mov r0, r4
mov r3, r7
bl decompress_kernel
b call_kernel
bl cache_off
mov r0, #0 @ must be zero
mov r1, r7 @ restore architecture number
mov r2, r8 @ restore atags pointer
mov pc, r4 @ call kernel
.align 2
.type LC0, #object
@ -323,11 +342,11 @@ LC0: .word LC0 @ r1
.word __bss_start @ r2
.word _end @ r3
.word _start @ r5
.word _image_size @ r6
.word _edata @ r6
.word _image_size @ r9
.word _got_start @ r11
.word _got_end @ ip
.word user_stack_end @ sp
LC1: .word reloc_end - reloc_start
.size LC0, . - LC0
#ifdef CONFIG_ARCH_RPC
@ -353,7 +372,7 @@ params: ldr r0, =0x10000100 @ params_phys for RPC
* On exit,
* r0, r1, r2, r3, r9, r10, r12 corrupted
* This routine must preserve:
* r4, r5, r6, r7, r8
* r4, r7, r8
*/
.align 5
cache_on: mov r3, #8 @ cache_on function
@ -550,43 +569,6 @@ __common_mmu_cache_on:
sub pc, lr, r0, lsr #32 @ properly flush pipeline
#endif
/*
* All code following this line is relocatable. It is relocated by
* the above code to the end of the decompressed kernel image and
* executed there. During this time, we have no stacks.
*
* r0 = decompressed kernel length
* r1-r3 = unused
* r4 = kernel execution address
* r5 = decompressed kernel start
* r7 = architecture ID
* r8 = atags pointer
* r9-r12,r14 = corrupted
*/
.align 5
reloc_start: add r9, r5, r0
sub r9, r9, #128 @ do not copy the stack
debug_reloc_start
mov r1, r4
1:
.rept 4
ldmia r5!, {r0, r2, r3, r10 - r12, r14} @ relocate kernel
stmia r1!, {r0, r2, r3, r10 - r12, r14}
.endr
cmp r5, r9
blo 1b
mov sp, r1
add sp, sp, #128 @ relocate the stack
debug_reloc_end
call_kernel: bl cache_clean_flush
bl cache_off
mov r0, #0 @ must be zero
mov r1, r7 @ restore architecture number
mov r2, r8 @ restore atags pointer
mov pc, r4 @ call kernel
/*
* Here follow the relocatable cache support functions for the
* various processors. This is a generic hook for locating an
@ -791,7 +773,7 @@ proc_types:
* On exit,
* r0, r1, r2, r3, r9, r12 corrupted
* This routine must preserve:
* r4, r6, r7
* r4, r7, r8
*/
.align 5
cache_off: mov r3, #12 @ cache_off function
@ -866,7 +848,7 @@ __armv3_mmu_cache_off:
* On exit,
* r1, r2, r3, r9, r10, r11, r12 corrupted
* This routine must preserve:
* r0, r4, r5, r6, r7
* r4, r6, r7, r8
*/
.align 5
cache_clean_flush:
@ -1088,7 +1070,6 @@ memdump: mov r12, r0
#endif
.ltorg
reloc_end:
.align
.section ".stack", "aw", %nobits

View File

@ -36,7 +36,7 @@ extern void error(char *x);
#ifdef CONFIG_DEBUG_ICEDCC
#ifdef CONFIG_CPU_V6
#if defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_V6K)
static void icedcc_putc(int ch)
{

View File

@ -0,0 +1,87 @@
/*
* sh7372 MMCIF loader
*
* Copyright (C) 2010 Magnus Damm
* Copyright (C) 2010 Simon Horman
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*/
#include <linux/mmc/sh_mmcif.h>
#include <mach/mmcif.h>
#define MMCIF_BASE (void __iomem *)0xe6bd0000
#define PORT84CR (void __iomem *)0xe6050054
#define PORT85CR (void __iomem *)0xe6050055
#define PORT86CR (void __iomem *)0xe6050056
#define PORT87CR (void __iomem *)0xe6050057
#define PORT88CR (void __iomem *)0xe6050058
#define PORT89CR (void __iomem *)0xe6050059
#define PORT90CR (void __iomem *)0xe605005a
#define PORT91CR (void __iomem *)0xe605005b
#define PORT92CR (void __iomem *)0xe605005c
#define PORT99CR (void __iomem *)0xe6050063
#define SMSTPCR3 (void __iomem *)0xe615013c
/* SH7372 specific MMCIF loader
*
* loads the zImage from an MMC card starting from block 1.
*
* The image must be start with a vrl4 header and
* the zImage must start at offset 512 of the image. That is,
* at block 2 (=byte 1024) on the media
*
* Use the following line to write the vrl4 formated zImage
* to an MMC card
* # dd if=vrl4.out of=/dev/sdx bs=512 seek=1
*/
asmlinkage void mmcif_loader(unsigned char *buf, unsigned long len)
{
mmcif_init_progress();
mmcif_update_progress(MMCIF_PROGRESS_ENTER);
/* Initialise MMC
* registers: PORT84CR-PORT92CR
* (MMCD0_0-MMCD0_7,MMCCMD0 Control)
* value: 0x04 - select function 4
*/
__raw_writeb(0x04, PORT84CR);
__raw_writeb(0x04, PORT85CR);
__raw_writeb(0x04, PORT86CR);
__raw_writeb(0x04, PORT87CR);
__raw_writeb(0x04, PORT88CR);
__raw_writeb(0x04, PORT89CR);
__raw_writeb(0x04, PORT90CR);
__raw_writeb(0x04, PORT91CR);
__raw_writeb(0x04, PORT92CR);
/* Initialise MMC
* registers: PORT99CR (MMCCLK0 Control)
* value: 0x10 | 0x04 - enable output | select function 4
*/
__raw_writeb(0x14, PORT99CR);
/* Enable clock to MMC hardware block */
__raw_writel(__raw_readl(SMSTPCR3) & ~(1 << 12), SMSTPCR3);
mmcif_update_progress(MMCIF_PROGRESS_INIT);
/* setup MMCIF hardware */
sh_mmcif_boot_init(MMCIF_BASE);
mmcif_update_progress(MMCIF_PROGRESS_LOAD);
/* load kernel via MMCIF interface */
sh_mmcif_boot_do_read(MMCIF_BASE, 2, /* Kernel is at block 2 */
(len + SH_MMCIF_BBS - 1) / SH_MMCIF_BBS, buf);
/* Disable clock to MMC hardware block */
__raw_writel(__raw_readl(SMSTPCR3) & (1 << 12), SMSTPCR3);
mmcif_update_progress(MMCIF_PROGRESS_DONE);
}

View File

@ -43,9 +43,6 @@ SECTIONS
_etext = .;
/* Assume size of decompressed image is 4x the compressed image */
_image_size = (_etext - _text) * 4;
_got_start = .;
.got : { *(.got) }
_got_end = .;

View File

@ -44,6 +44,19 @@ struct gic_chip_data {
void __iomem *cpu_base;
};
/*
* Supported arch specific GIC irq extension.
* Default make them NULL.
*/
struct irq_chip gic_arch_extn = {
.irq_ack = NULL,
.irq_mask = NULL,
.irq_unmask = NULL,
.irq_retrigger = NULL,
.irq_set_type = NULL,
.irq_set_wake = NULL,
};
#ifndef MAX_GIC_NR
#define MAX_GIC_NR 1
#endif
@ -74,6 +87,8 @@ static inline unsigned int gic_irq(struct irq_data *d)
static void gic_ack_irq(struct irq_data *d)
{
spin_lock(&irq_controller_lock);
if (gic_arch_extn.irq_ack)
gic_arch_extn.irq_ack(d);
writel(gic_irq(d), gic_cpu_base(d) + GIC_CPU_EOI);
spin_unlock(&irq_controller_lock);
}
@ -84,6 +99,8 @@ static void gic_mask_irq(struct irq_data *d)
spin_lock(&irq_controller_lock);
writel(mask, gic_dist_base(d) + GIC_DIST_ENABLE_CLEAR + (gic_irq(d) / 32) * 4);
if (gic_arch_extn.irq_mask)
gic_arch_extn.irq_mask(d);
spin_unlock(&irq_controller_lock);
}
@ -92,6 +109,8 @@ static void gic_unmask_irq(struct irq_data *d)
u32 mask = 1 << (d->irq % 32);
spin_lock(&irq_controller_lock);
if (gic_arch_extn.irq_unmask)
gic_arch_extn.irq_unmask(d);
writel(mask, gic_dist_base(d) + GIC_DIST_ENABLE_SET + (gic_irq(d) / 32) * 4);
spin_unlock(&irq_controller_lock);
}
@ -116,6 +135,9 @@ static int gic_set_type(struct irq_data *d, unsigned int type)
spin_lock(&irq_controller_lock);
if (gic_arch_extn.irq_set_type)
gic_arch_extn.irq_set_type(d, type);
val = readl(base + GIC_DIST_CONFIG + confoff);
if (type == IRQ_TYPE_LEVEL_HIGH)
val &= ~confmask;
@ -141,32 +163,54 @@ static int gic_set_type(struct irq_data *d, unsigned int type)
return 0;
}
static int gic_retrigger(struct irq_data *d)
{
if (gic_arch_extn.irq_retrigger)
return gic_arch_extn.irq_retrigger(d);
return -ENXIO;
}
#ifdef CONFIG_SMP
static int
gic_set_cpu(struct irq_data *d, const struct cpumask *mask_val, bool force)
static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
bool force)
{
void __iomem *reg = gic_dist_base(d) + GIC_DIST_TARGET + (gic_irq(d) & ~3);
unsigned int shift = (d->irq % 4) * 8;
unsigned int cpu = cpumask_first(mask_val);
u32 val;
struct irq_desc *desc;
u32 val, mask, bit;
if (cpu >= 8)
return -EINVAL;
mask = 0xff << shift;
bit = 1 << (cpu + shift);
spin_lock(&irq_controller_lock);
desc = irq_to_desc(d->irq);
if (desc == NULL) {
spin_unlock(&irq_controller_lock);
return -EINVAL;
}
d->node = cpu;
val = readl(reg) & ~(0xff << shift);
val |= 1 << (cpu + shift);
writel(val, reg);
val = readl(reg) & ~mask;
writel(val | bit, reg);
spin_unlock(&irq_controller_lock);
return 0;
}
#endif
#ifdef CONFIG_PM
static int gic_set_wake(struct irq_data *d, unsigned int on)
{
int ret = -ENXIO;
if (gic_arch_extn.irq_set_wake)
ret = gic_arch_extn.irq_set_wake(d, on);
return ret;
}
#else
#define gic_set_wake NULL
#endif
static void gic_handle_cascade_irq(unsigned int irq, struct irq_desc *desc)
{
struct gic_chip_data *chip_data = get_irq_data(irq);
@ -202,9 +246,11 @@ static struct irq_chip gic_chip = {
.irq_mask = gic_mask_irq,
.irq_unmask = gic_unmask_irq,
.irq_set_type = gic_set_type,
.irq_retrigger = gic_retrigger,
#ifdef CONFIG_SMP
.irq_set_affinity = gic_set_cpu,
.irq_set_affinity = gic_set_affinity,
#endif
.irq_set_wake = gic_set_wake,
};
void __init gic_cascade_irq(unsigned int gic_nr, unsigned int irq)

View File

@ -32,11 +32,7 @@ static inline void aout_dump_thread(struct pt_regs *regs, struct user *dump)
dump->u_dsize = (tsk->mm->brk - tsk->mm->start_data + PAGE_SIZE - 1) >> PAGE_SHIFT;
dump->u_ssize = 0;
dump->u_debugreg[0] = tsk->thread.debug.bp[0].address;
dump->u_debugreg[1] = tsk->thread.debug.bp[1].address;
dump->u_debugreg[2] = tsk->thread.debug.bp[0].insn.arm;
dump->u_debugreg[3] = tsk->thread.debug.bp[1].insn.arm;
dump->u_debugreg[4] = tsk->thread.debug.nsaved;
memset(dump->u_debugreg, 0, sizeof(dump->u_debugreg));
if (dump->start_stack < 0x04000000)
dump->u_ssize = (0x04000000 - dump->start_stack) >> PAGE_SHIFT;

View File

@ -148,15 +148,19 @@ ____atomic_test_and_change_bit(unsigned int bit, volatile unsigned long *p)
* Note that bit 0 is defined to be 32-bit word bit 0, not byte 0 bit 0.
*/
/*
* Native endian assembly bitops. nr = 0 -> word 0 bit 0.
*/
extern void _set_bit(int nr, volatile unsigned long * p);
extern void _clear_bit(int nr, volatile unsigned long * p);
extern void _change_bit(int nr, volatile unsigned long * p);
extern int _test_and_set_bit(int nr, volatile unsigned long * p);
extern int _test_and_clear_bit(int nr, volatile unsigned long * p);
extern int _test_and_change_bit(int nr, volatile unsigned long * p);
/*
* Little endian assembly bitops. nr = 0 -> byte 0 bit 0.
*/
extern void _set_bit_le(int nr, volatile unsigned long * p);
extern void _clear_bit_le(int nr, volatile unsigned long * p);
extern void _change_bit_le(int nr, volatile unsigned long * p);
extern int _test_and_set_bit_le(int nr, volatile unsigned long * p);
extern int _test_and_clear_bit_le(int nr, volatile unsigned long * p);
extern int _test_and_change_bit_le(int nr, volatile unsigned long * p);
extern int _find_first_zero_bit_le(const void * p, unsigned size);
extern int _find_next_zero_bit_le(const void * p, int size, int offset);
extern int _find_first_bit_le(const unsigned long *p, unsigned size);
@ -165,12 +169,6 @@ extern int _find_next_bit_le(const unsigned long *p, int size, int offset);
/*
* Big endian assembly bitops. nr = 0 -> byte 3 bit 0.
*/
extern void _set_bit_be(int nr, volatile unsigned long * p);
extern void _clear_bit_be(int nr, volatile unsigned long * p);
extern void _change_bit_be(int nr, volatile unsigned long * p);
extern int _test_and_set_bit_be(int nr, volatile unsigned long * p);
extern int _test_and_clear_bit_be(int nr, volatile unsigned long * p);
extern int _test_and_change_bit_be(int nr, volatile unsigned long * p);
extern int _find_first_zero_bit_be(const void * p, unsigned size);
extern int _find_next_zero_bit_be(const void * p, int size, int offset);
extern int _find_first_bit_be(const unsigned long *p, unsigned size);
@ -180,33 +178,26 @@ extern int _find_next_bit_be(const unsigned long *p, int size, int offset);
/*
* The __* form of bitops are non-atomic and may be reordered.
*/
#define ATOMIC_BITOP_LE(name,nr,p) \
(__builtin_constant_p(nr) ? \
____atomic_##name(nr, p) : \
_##name##_le(nr,p))
#define ATOMIC_BITOP_BE(name,nr,p) \
(__builtin_constant_p(nr) ? \
____atomic_##name(nr, p) : \
_##name##_be(nr,p))
#define ATOMIC_BITOP(name,nr,p) \
(__builtin_constant_p(nr) ? ____atomic_##name(nr, p) : _##name(nr,p))
#else
#define ATOMIC_BITOP_LE(name,nr,p) _##name##_le(nr,p)
#define ATOMIC_BITOP_BE(name,nr,p) _##name##_be(nr,p)
#define ATOMIC_BITOP(name,nr,p) _##name(nr,p)
#endif
#define NONATOMIC_BITOP(name,nr,p) \
(____nonatomic_##name(nr, p))
/*
* Native endian atomic definitions.
*/
#define set_bit(nr,p) ATOMIC_BITOP(set_bit,nr,p)
#define clear_bit(nr,p) ATOMIC_BITOP(clear_bit,nr,p)
#define change_bit(nr,p) ATOMIC_BITOP(change_bit,nr,p)
#define test_and_set_bit(nr,p) ATOMIC_BITOP(test_and_set_bit,nr,p)
#define test_and_clear_bit(nr,p) ATOMIC_BITOP(test_and_clear_bit,nr,p)
#define test_and_change_bit(nr,p) ATOMIC_BITOP(test_and_change_bit,nr,p)
#ifndef __ARMEB__
/*
* These are the little endian, atomic definitions.
*/
#define set_bit(nr,p) ATOMIC_BITOP_LE(set_bit,nr,p)
#define clear_bit(nr,p) ATOMIC_BITOP_LE(clear_bit,nr,p)
#define change_bit(nr,p) ATOMIC_BITOP_LE(change_bit,nr,p)
#define test_and_set_bit(nr,p) ATOMIC_BITOP_LE(test_and_set_bit,nr,p)
#define test_and_clear_bit(nr,p) ATOMIC_BITOP_LE(test_and_clear_bit,nr,p)
#define test_and_change_bit(nr,p) ATOMIC_BITOP_LE(test_and_change_bit,nr,p)
#define find_first_zero_bit(p,sz) _find_first_zero_bit_le(p,sz)
#define find_next_zero_bit(p,sz,off) _find_next_zero_bit_le(p,sz,off)
#define find_first_bit(p,sz) _find_first_bit_le(p,sz)
@ -215,16 +206,9 @@ extern int _find_next_bit_be(const unsigned long *p, int size, int offset);
#define WORD_BITOFF_TO_LE(x) ((x))
#else
/*
* These are the big endian, atomic definitions.
*/
#define set_bit(nr,p) ATOMIC_BITOP_BE(set_bit,nr,p)
#define clear_bit(nr,p) ATOMIC_BITOP_BE(clear_bit,nr,p)
#define change_bit(nr,p) ATOMIC_BITOP_BE(change_bit,nr,p)
#define test_and_set_bit(nr,p) ATOMIC_BITOP_BE(test_and_set_bit,nr,p)
#define test_and_clear_bit(nr,p) ATOMIC_BITOP_BE(test_and_clear_bit,nr,p)
#define test_and_change_bit(nr,p) ATOMIC_BITOP_BE(test_and_change_bit,nr,p)
#define find_first_zero_bit(p,sz) _find_first_zero_bit_be(p,sz)
#define find_next_zero_bit(p,sz,off) _find_next_zero_bit_be(p,sz,off)
#define find_first_bit(p,sz) _find_first_bit_be(p,sz)

View File

@ -12,130 +12,13 @@
#include <linux/mm.h>
#include <asm/glue.h>
#include <asm/glue-cache.h>
#include <asm/shmparam.h>
#include <asm/cachetype.h>
#include <asm/outercache.h>
#define CACHE_COLOUR(vaddr) ((vaddr & (SHMLBA - 1)) >> PAGE_SHIFT)
/*
* Cache Model
* ===========
*/
#undef _CACHE
#undef MULTI_CACHE
#if defined(CONFIG_CPU_CACHE_V3)
# ifdef _CACHE
# define MULTI_CACHE 1
# else
# define _CACHE v3
# endif
#endif
#if defined(CONFIG_CPU_CACHE_V4)
# ifdef _CACHE
# define MULTI_CACHE 1
# else
# define _CACHE v4
# endif
#endif
#if defined(CONFIG_CPU_ARM920T) || defined(CONFIG_CPU_ARM922T) || \
defined(CONFIG_CPU_ARM925T) || defined(CONFIG_CPU_ARM1020) || \
defined(CONFIG_CPU_ARM1026)
# define MULTI_CACHE 1
#endif
#if defined(CONFIG_CPU_FA526)
# ifdef _CACHE
# define MULTI_CACHE 1
# else
# define _CACHE fa
# endif
#endif
#if defined(CONFIG_CPU_ARM926T)
# ifdef _CACHE
# define MULTI_CACHE 1
# else
# define _CACHE arm926
# endif
#endif
#if defined(CONFIG_CPU_ARM940T)
# ifdef _CACHE
# define MULTI_CACHE 1
# else
# define _CACHE arm940
# endif
#endif
#if defined(CONFIG_CPU_ARM946E)
# ifdef _CACHE
# define MULTI_CACHE 1
# else
# define _CACHE arm946
# endif
#endif
#if defined(CONFIG_CPU_CACHE_V4WB)
# ifdef _CACHE
# define MULTI_CACHE 1
# else
# define _CACHE v4wb
# endif
#endif
#if defined(CONFIG_CPU_XSCALE)
# ifdef _CACHE
# define MULTI_CACHE 1
# else
# define _CACHE xscale
# endif
#endif
#if defined(CONFIG_CPU_XSC3)
# ifdef _CACHE
# define MULTI_CACHE 1
# else
# define _CACHE xsc3
# endif
#endif
#if defined(CONFIG_CPU_MOHAWK)
# ifdef _CACHE
# define MULTI_CACHE 1
# else
# define _CACHE mohawk
# endif
#endif
#if defined(CONFIG_CPU_FEROCEON)
# define MULTI_CACHE 1
#endif
#if defined(CONFIG_CPU_V6)
//# ifdef _CACHE
# define MULTI_CACHE 1
//# else
//# define _CACHE v6
//# endif
#endif
#if defined(CONFIG_CPU_V7)
//# ifdef _CACHE
# define MULTI_CACHE 1
//# else
//# define _CACHE v7
//# endif
#endif
#if !defined(_CACHE) && !defined(MULTI_CACHE)
#error Unknown cache maintainence model
#endif
/*
* This flag is used to indicate that the page pointed to by a pte is clean
* and does not require cleaning before returning it to the user.
@ -249,19 +132,11 @@ extern struct cpu_cache_fns cpu_cache;
* visible to the CPU.
*/
#define dmac_map_area cpu_cache.dma_map_area
#define dmac_unmap_area cpu_cache.dma_unmap_area
#define dmac_unmap_area cpu_cache.dma_unmap_area
#define dmac_flush_range cpu_cache.dma_flush_range
#else
#define __cpuc_flush_icache_all __glue(_CACHE,_flush_icache_all)
#define __cpuc_flush_kern_all __glue(_CACHE,_flush_kern_cache_all)
#define __cpuc_flush_user_all __glue(_CACHE,_flush_user_cache_all)
#define __cpuc_flush_user_range __glue(_CACHE,_flush_user_cache_range)
#define __cpuc_coherent_kern_range __glue(_CACHE,_coherent_kern_range)
#define __cpuc_coherent_user_range __glue(_CACHE,_coherent_user_range)
#define __cpuc_flush_dcache_area __glue(_CACHE,_flush_kern_dcache_area)
extern void __cpuc_flush_icache_all(void);
extern void __cpuc_flush_kern_all(void);
extern void __cpuc_flush_user_all(void);
@ -276,10 +151,6 @@ extern void __cpuc_flush_dcache_area(void *, size_t);
* is visible to DMA, or data written by DMA to system memory is
* visible to the CPU.
*/
#define dmac_map_area __glue(_CACHE,_dma_map_area)
#define dmac_unmap_area __glue(_CACHE,_dma_unmap_area)
#define dmac_flush_range __glue(_CACHE,_dma_flush_range)
extern void dmac_map_area(const void *, size_t, int);
extern void dmac_unmap_area(const void *, size_t, int);
extern void dmac_flush_range(const void *, const void *);
@ -316,7 +187,8 @@ extern void copy_to_user_page(struct vm_area_struct *, struct page *,
* Optimized __flush_icache_all for the common cases. Note that UP ARMv7
* will fall through to use __flush_icache_all_generic.
*/
#if (defined(CONFIG_CPU_V7) && defined(CONFIG_CPU_V6)) || \
#if (defined(CONFIG_CPU_V7) && \
(defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_V6K))) || \
defined(CONFIG_SMP_ON_UP)
#define __flush_icache_preferred __cpuc_flush_icache_all
#elif __LINUX_ARM_ARCH__ >= 7 && defined(CONFIG_SMP)

View File

@ -1,69 +0,0 @@
/*
* arch/arm/include/asm/cpu-multi32.h
*
* Copyright (C) 2000 Russell King
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <asm/page.h>
struct mm_struct;
/*
* Don't change this structure - ASM code
* relies on it.
*/
extern struct processor {
/* MISC
* get data abort address/flags
*/
void (*_data_abort)(unsigned long pc);
/*
* Retrieve prefetch fault address
*/
unsigned long (*_prefetch_abort)(unsigned long lr);
/*
* Set up any processor specifics
*/
void (*_proc_init)(void);
/*
* Disable any processor specifics
*/
void (*_proc_fin)(void);
/*
* Special stuff for a reset
*/
void (*reset)(unsigned long addr) __attribute__((noreturn));
/*
* Idle the processor
*/
int (*_do_idle)(void);
/*
* Processor architecture specific
*/
/*
* clean a virtual address range from the
* D-cache without flushing the cache.
*/
void (*dcache_clean_area)(void *addr, int size);
/*
* Set the page table
*/
void (*switch_mm)(unsigned long pgd_phys, struct mm_struct *mm);
/*
* Set a possibly extended PTE. Non-extended PTEs should
* ignore 'ext'.
*/
void (*set_pte_ext)(pte_t *ptep, pte_t pte, unsigned int ext);
} processor;
#define cpu_proc_init() processor._proc_init()
#define cpu_proc_fin() processor._proc_fin()
#define cpu_reset(addr) processor.reset(addr)
#define cpu_do_idle() processor._do_idle()
#define cpu_dcache_clean_area(addr,sz) processor.dcache_clean_area(addr,sz)
#define cpu_set_pte_ext(ptep,pte,ext) processor.set_pte_ext(ptep,pte,ext)
#define cpu_do_switch_mm(pgd,mm) processor.switch_mm(pgd,mm)

View File

@ -1,44 +0,0 @@
/*
* arch/arm/include/asm/cpu-single.h
*
* Copyright (C) 2000 Russell King
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
/*
* Single CPU
*/
#ifdef __STDC__
#define __catify_fn(name,x) name##x
#else
#define __catify_fn(name,x) name/**/x
#endif
#define __cpu_fn(name,x) __catify_fn(name,x)
/*
* If we are supporting multiple CPUs, then we must use a table of
* function pointers for this lot. Otherwise, we can optimise the
* table away.
*/
#define cpu_proc_init __cpu_fn(CPU_NAME,_proc_init)
#define cpu_proc_fin __cpu_fn(CPU_NAME,_proc_fin)
#define cpu_reset __cpu_fn(CPU_NAME,_reset)
#define cpu_do_idle __cpu_fn(CPU_NAME,_do_idle)
#define cpu_dcache_clean_area __cpu_fn(CPU_NAME,_dcache_clean_area)
#define cpu_do_switch_mm __cpu_fn(CPU_NAME,_switch_mm)
#define cpu_set_pte_ext __cpu_fn(CPU_NAME,_set_pte_ext)
#include <asm/page.h>
struct mm_struct;
/* declare all the functions as extern */
extern void cpu_proc_init(void);
extern void cpu_proc_fin(void);
extern int cpu_do_idle(void);
extern void cpu_dcache_clean_area(void *, int);
extern void cpu_do_switch_mm(unsigned long pgd_phys, struct mm_struct *mm);
extern void cpu_set_pte_ext(pte_t *ptep, pte_t pte, unsigned int ext);
extern void cpu_reset(unsigned long addr) __attribute__((noreturn));

View File

@ -23,6 +23,8 @@
#define CPUID_EXT_ISAR4 "c2, 4"
#define CPUID_EXT_ISAR5 "c2, 5"
extern unsigned int processor_id;
#ifdef CONFIG_CPU_CP15
#define read_cpuid(reg) \
({ \
@ -43,7 +45,6 @@
__val; \
})
#else
extern unsigned int processor_id;
#define read_cpuid(reg) (processor_id)
#define read_cpuid_ext(reg) 0
#endif

View File

@ -0,0 +1,94 @@
/*
* arch/arm/include/asm/fncpy.h - helper macros for function body copying
*
* Copyright (C) 2011 Linaro Limited
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
/*
* These macros are intended for use when there is a need to copy a low-level
* function body into special memory.
*
* For example, when reconfiguring the SDRAM controller, the code doing the
* reconfiguration may need to run from SRAM.
*
* NOTE: that the copied function body must be entirely self-contained and
* position-independent in order for this to work properly.
*
* NOTE: in order for embedded literals and data to get referenced correctly,
* the alignment of functions must be preserved when copying. To ensure this,
* the source and destination addresses for fncpy() must be aligned to a
* multiple of 8 bytes: you will be get a BUG() if this condition is not met.
* You will typically need a ".align 3" directive in the assembler where the
* function to be copied is defined, and ensure that your allocator for the
* destination buffer returns 8-byte-aligned pointers.
*
* Typical usage example:
*
* extern int f(args);
* extern uint32_t size_of_f;
* int (*copied_f)(args);
* void *sram_buffer;
*
* copied_f = fncpy(sram_buffer, &f, size_of_f);
*
* ... later, call the function: ...
*
* copied_f(args);
*
* The size of the function to be copied can't be determined from C:
* this must be determined by other means, such as adding assmbler directives
* in the file where f is defined.
*/
#ifndef __ASM_FNCPY_H
#define __ASM_FNCPY_H
#include <linux/types.h>
#include <linux/string.h>
#include <asm/bug.h>
#include <asm/cacheflush.h>
/*
* Minimum alignment requirement for the source and destination addresses
* for function copying.
*/
#define FNCPY_ALIGN 8
#define fncpy(dest_buf, funcp, size) ({ \
uintptr_t __funcp_address; \
typeof(funcp) __result; \
\
asm("" : "=r" (__funcp_address) : "0" (funcp)); \
\
/* \
* Ensure alignment of source and destination addresses, \
* disregarding the function's Thumb bit: \
*/ \
BUG_ON((uintptr_t)(dest_buf) & (FNCPY_ALIGN - 1) || \
(__funcp_address & ~(uintptr_t)1 & (FNCPY_ALIGN - 1))); \
\
memcpy(dest_buf, (void const *)(__funcp_address & ~1), size); \
flush_icache_range((unsigned long)(dest_buf), \
(unsigned long)(dest_buf) + (size)); \
\
asm("" : "=r" (__result) \
: "0" ((uintptr_t)(dest_buf) | (__funcp_address & 1))); \
\
__result; \
})
#endif /* !__ASM_FNCPY_H */

View File

@ -0,0 +1,146 @@
/*
* arch/arm/include/asm/glue-cache.h
*
* Copyright (C) 1999-2002 Russell King
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#ifndef ASM_GLUE_CACHE_H
#define ASM_GLUE_CACHE_H
#include <asm/glue.h>
/*
* Cache Model
* ===========
*/
#undef _CACHE
#undef MULTI_CACHE
#if defined(CONFIG_CPU_CACHE_V3)
# ifdef _CACHE
# define MULTI_CACHE 1
# else
# define _CACHE v3
# endif
#endif
#if defined(CONFIG_CPU_CACHE_V4)
# ifdef _CACHE
# define MULTI_CACHE 1
# else
# define _CACHE v4
# endif
#endif
#if defined(CONFIG_CPU_ARM920T) || defined(CONFIG_CPU_ARM922T) || \
defined(CONFIG_CPU_ARM925T) || defined(CONFIG_CPU_ARM1020) || \
defined(CONFIG_CPU_ARM1026)
# define MULTI_CACHE 1
#endif
#if defined(CONFIG_CPU_FA526)
# ifdef _CACHE
# define MULTI_CACHE 1
# else
# define _CACHE fa
# endif
#endif
#if defined(CONFIG_CPU_ARM926T)
# ifdef _CACHE
# define MULTI_CACHE 1
# else
# define _CACHE arm926
# endif
#endif
#if defined(CONFIG_CPU_ARM940T)
# ifdef _CACHE
# define MULTI_CACHE 1
# else
# define _CACHE arm940
# endif
#endif
#if defined(CONFIG_CPU_ARM946E)
# ifdef _CACHE
# define MULTI_CACHE 1
# else
# define _CACHE arm946
# endif
#endif
#if defined(CONFIG_CPU_CACHE_V4WB)
# ifdef _CACHE
# define MULTI_CACHE 1
# else
# define _CACHE v4wb
# endif
#endif
#if defined(CONFIG_CPU_XSCALE)
# ifdef _CACHE
# define MULTI_CACHE 1
# else
# define _CACHE xscale
# endif
#endif
#if defined(CONFIG_CPU_XSC3)
# ifdef _CACHE
# define MULTI_CACHE 1
# else
# define _CACHE xsc3
# endif
#endif
#if defined(CONFIG_CPU_MOHAWK)
# ifdef _CACHE
# define MULTI_CACHE 1
# else
# define _CACHE mohawk
# endif
#endif
#if defined(CONFIG_CPU_FEROCEON)
# define MULTI_CACHE 1
#endif
#if defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_V6K)
//# ifdef _CACHE
# define MULTI_CACHE 1
//# else
//# define _CACHE v6
//# endif
#endif
#if defined(CONFIG_CPU_V7)
//# ifdef _CACHE
# define MULTI_CACHE 1
//# else
//# define _CACHE v7
//# endif
#endif
#if !defined(_CACHE) && !defined(MULTI_CACHE)
#error Unknown cache maintainence model
#endif
#ifndef MULTI_CACHE
#define __cpuc_flush_icache_all __glue(_CACHE,_flush_icache_all)
#define __cpuc_flush_kern_all __glue(_CACHE,_flush_kern_cache_all)
#define __cpuc_flush_user_all __glue(_CACHE,_flush_user_cache_all)
#define __cpuc_flush_user_range __glue(_CACHE,_flush_user_cache_range)
#define __cpuc_coherent_kern_range __glue(_CACHE,_coherent_kern_range)
#define __cpuc_coherent_user_range __glue(_CACHE,_coherent_user_range)
#define __cpuc_flush_dcache_area __glue(_CACHE,_flush_kern_dcache_area)
#define dmac_map_area __glue(_CACHE,_dma_map_area)
#define dmac_unmap_area __glue(_CACHE,_dma_unmap_area)
#define dmac_flush_range __glue(_CACHE,_dma_flush_range)
#endif
#endif

View File

@ -0,0 +1,110 @@
/*
* arch/arm/include/asm/glue-df.h
*
* Copyright (C) 1997-1999 Russell King
* Copyright (C) 2000-2002 Deep Blue Solutions Ltd.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#ifndef ASM_GLUE_DF_H
#define ASM_GLUE_DF_H
#include <asm/glue.h>
/*
* Data Abort Model
* ================
*
* We have the following to choose from:
* arm6 - ARM6 style
* arm7 - ARM7 style
* v4_early - ARMv4 without Thumb early abort handler
* v4t_late - ARMv4 with Thumb late abort handler
* v4t_early - ARMv4 with Thumb early abort handler
* v5tej_early - ARMv5 with Thumb and Java early abort handler
* xscale - ARMv5 with Thumb with Xscale extensions
* v6_early - ARMv6 generic early abort handler
* v7_early - ARMv7 generic early abort handler
*/
#undef CPU_DABORT_HANDLER
#undef MULTI_DABORT
#if defined(CONFIG_CPU_ARM610)
# ifdef CPU_DABORT_HANDLER
# define MULTI_DABORT 1
# else
# define CPU_DABORT_HANDLER cpu_arm6_data_abort
# endif
#endif
#if defined(CONFIG_CPU_ARM710)
# ifdef CPU_DABORT_HANDLER
# define MULTI_DABORT 1
# else
# define CPU_DABORT_HANDLER cpu_arm7_data_abort
# endif
#endif
#ifdef CONFIG_CPU_ABRT_LV4T
# ifdef CPU_DABORT_HANDLER
# define MULTI_DABORT 1
# else
# define CPU_DABORT_HANDLER v4t_late_abort
# endif
#endif
#ifdef CONFIG_CPU_ABRT_EV4
# ifdef CPU_DABORT_HANDLER
# define MULTI_DABORT 1
# else
# define CPU_DABORT_HANDLER v4_early_abort
# endif
#endif
#ifdef CONFIG_CPU_ABRT_EV4T
# ifdef CPU_DABORT_HANDLER
# define MULTI_DABORT 1
# else
# define CPU_DABORT_HANDLER v4t_early_abort
# endif
#endif
#ifdef CONFIG_CPU_ABRT_EV5TJ
# ifdef CPU_DABORT_HANDLER
# define MULTI_DABORT 1
# else
# define CPU_DABORT_HANDLER v5tj_early_abort
# endif
#endif
#ifdef CONFIG_CPU_ABRT_EV5T
# ifdef CPU_DABORT_HANDLER
# define MULTI_DABORT 1
# else
# define CPU_DABORT_HANDLER v5t_early_abort
# endif
#endif
#ifdef CONFIG_CPU_ABRT_EV6
# ifdef CPU_DABORT_HANDLER
# define MULTI_DABORT 1
# else
# define CPU_DABORT_HANDLER v6_early_abort
# endif
#endif
#ifdef CONFIG_CPU_ABRT_EV7
# ifdef CPU_DABORT_HANDLER
# define MULTI_DABORT 1
# else
# define CPU_DABORT_HANDLER v7_early_abort
# endif
#endif
#ifndef CPU_DABORT_HANDLER
#error Unknown data abort handler type
#endif
#endif

View File

@ -0,0 +1,57 @@
/*
* arch/arm/include/asm/glue-pf.h
*
* Copyright (C) 1997-1999 Russell King
* Copyright (C) 2000-2002 Deep Blue Solutions Ltd.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#ifndef ASM_GLUE_PF_H
#define ASM_GLUE_PF_H
#include <asm/glue.h>
/*
* Prefetch Abort Model
* ================
*
* We have the following to choose from:
* legacy - no IFSR, no IFAR
* v6 - ARMv6: IFSR, no IFAR
* v7 - ARMv7: IFSR and IFAR
*/
#undef CPU_PABORT_HANDLER
#undef MULTI_PABORT
#ifdef CONFIG_CPU_PABRT_LEGACY
# ifdef CPU_PABORT_HANDLER
# define MULTI_PABORT 1
# else
# define CPU_PABORT_HANDLER legacy_pabort
# endif
#endif
#ifdef CONFIG_CPU_PABRT_V6
# ifdef CPU_PABORT_HANDLER
# define MULTI_PABORT 1
# else
# define CPU_PABORT_HANDLER v6_pabort
# endif
#endif
#ifdef CONFIG_CPU_PABRT_V7
# ifdef CPU_PABORT_HANDLER
# define MULTI_PABORT 1
# else
# define CPU_PABORT_HANDLER v7_pabort
# endif
#endif
#ifndef CPU_PABORT_HANDLER
#error Unknown prefetch abort handler type
#endif
#endif

View File

@ -0,0 +1,264 @@
/*
* arch/arm/include/asm/glue-proc.h
*
* Copyright (C) 1997-1999 Russell King
* Copyright (C) 2000 Deep Blue Solutions Ltd
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#ifndef ASM_GLUE_PROC_H
#define ASM_GLUE_PROC_H
#include <asm/glue.h>
/*
* Work out if we need multiple CPU support
*/
#undef MULTI_CPU
#undef CPU_NAME
/*
* CPU_NAME - the prefix for CPU related functions
*/
#ifdef CONFIG_CPU_ARM610
# ifdef CPU_NAME
# undef MULTI_CPU
# define MULTI_CPU
# else
# define CPU_NAME cpu_arm6
# endif
#endif
#ifdef CONFIG_CPU_ARM7TDMI
# ifdef CPU_NAME
# undef MULTI_CPU
# define MULTI_CPU
# else
# define CPU_NAME cpu_arm7tdmi
# endif
#endif
#ifdef CONFIG_CPU_ARM710
# ifdef CPU_NAME
# undef MULTI_CPU
# define MULTI_CPU
# else
# define CPU_NAME cpu_arm7
# endif
#endif
#ifdef CONFIG_CPU_ARM720T
# ifdef CPU_NAME
# undef MULTI_CPU
# define MULTI_CPU
# else
# define CPU_NAME cpu_arm720
# endif
#endif
#ifdef CONFIG_CPU_ARM740T
# ifdef CPU_NAME
# undef MULTI_CPU
# define MULTI_CPU
# else
# define CPU_NAME cpu_arm740
# endif
#endif
#ifdef CONFIG_CPU_ARM9TDMI
# ifdef CPU_NAME
# undef MULTI_CPU
# define MULTI_CPU
# else
# define CPU_NAME cpu_arm9tdmi
# endif
#endif
#ifdef CONFIG_CPU_ARM920T
# ifdef CPU_NAME
# undef MULTI_CPU
# define MULTI_CPU
# else
# define CPU_NAME cpu_arm920
# endif
#endif
#ifdef CONFIG_CPU_ARM922T
# ifdef CPU_NAME
# undef MULTI_CPU
# define MULTI_CPU
# else
# define CPU_NAME cpu_arm922
# endif
#endif
#ifdef CONFIG_CPU_FA526
# ifdef CPU_NAME
# undef MULTI_CPU
# define MULTI_CPU
# else
# define CPU_NAME cpu_fa526
# endif
#endif
#ifdef CONFIG_CPU_ARM925T
# ifdef CPU_NAME
# undef MULTI_CPU
# define MULTI_CPU
# else
# define CPU_NAME cpu_arm925
# endif
#endif
#ifdef CONFIG_CPU_ARM926T
# ifdef CPU_NAME
# undef MULTI_CPU
# define MULTI_CPU
# else
# define CPU_NAME cpu_arm926
# endif
#endif
#ifdef CONFIG_CPU_ARM940T
# ifdef CPU_NAME
# undef MULTI_CPU
# define MULTI_CPU
# else
# define CPU_NAME cpu_arm940
# endif
#endif
#ifdef CONFIG_CPU_ARM946E
# ifdef CPU_NAME
# undef MULTI_CPU
# define MULTI_CPU
# else
# define CPU_NAME cpu_arm946
# endif
#endif
#ifdef CONFIG_CPU_SA110
# ifdef CPU_NAME
# undef MULTI_CPU
# define MULTI_CPU
# else
# define CPU_NAME cpu_sa110
# endif
#endif
#ifdef CONFIG_CPU_SA1100
# ifdef CPU_NAME
# undef MULTI_CPU
# define MULTI_CPU
# else
# define CPU_NAME cpu_sa1100
# endif
#endif
#ifdef CONFIG_CPU_ARM1020
# ifdef CPU_NAME
# undef MULTI_CPU
# define MULTI_CPU
# else
# define CPU_NAME cpu_arm1020
# endif
#endif
#ifdef CONFIG_CPU_ARM1020E
# ifdef CPU_NAME
# undef MULTI_CPU
# define MULTI_CPU
# else
# define CPU_NAME cpu_arm1020e
# endif
#endif
#ifdef CONFIG_CPU_ARM1022
# ifdef CPU_NAME
# undef MULTI_CPU
# define MULTI_CPU
# else
# define CPU_NAME cpu_arm1022
# endif
#endif
#ifdef CONFIG_CPU_ARM1026
# ifdef CPU_NAME
# undef MULTI_CPU
# define MULTI_CPU
# else
# define CPU_NAME cpu_arm1026
# endif
#endif
#ifdef CONFIG_CPU_XSCALE
# ifdef CPU_NAME
# undef MULTI_CPU
# define MULTI_CPU
# else
# define CPU_NAME cpu_xscale
# endif
#endif
#ifdef CONFIG_CPU_XSC3
# ifdef CPU_NAME
# undef MULTI_CPU
# define MULTI_CPU
# else
# define CPU_NAME cpu_xsc3
# endif
#endif
#ifdef CONFIG_CPU_MOHAWK
# ifdef CPU_NAME
# undef MULTI_CPU
# define MULTI_CPU
# else
# define CPU_NAME cpu_mohawk
# endif
#endif
#ifdef CONFIG_CPU_FEROCEON
# ifdef CPU_NAME
# undef MULTI_CPU
# define MULTI_CPU
# else
# define CPU_NAME cpu_feroceon
# endif
#endif
#if defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_V6K)
# ifdef CPU_NAME
# undef MULTI_CPU
# define MULTI_CPU
# else
# define CPU_NAME cpu_v6
# endif
#endif
#ifdef CONFIG_CPU_V7
# ifdef CPU_NAME
# undef MULTI_CPU
# define MULTI_CPU
# else
# define CPU_NAME cpu_v7
# endif
#endif
#ifndef MULTI_CPU
#define cpu_proc_init __glue(CPU_NAME,_proc_init)
#define cpu_proc_fin __glue(CPU_NAME,_proc_fin)
#define cpu_reset __glue(CPU_NAME,_reset)
#define cpu_do_idle __glue(CPU_NAME,_do_idle)
#define cpu_dcache_clean_area __glue(CPU_NAME,_dcache_clean_area)
#define cpu_do_switch_mm __glue(CPU_NAME,_switch_mm)
#define cpu_set_pte_ext __glue(CPU_NAME,_set_pte_ext)
#define cpu_suspend_size __glue(CPU_NAME,_suspend_size)
#define cpu_do_suspend __glue(CPU_NAME,_do_suspend)
#define cpu_do_resume __glue(CPU_NAME,_do_resume)
#endif
#endif

View File

@ -15,7 +15,6 @@
*/
#ifdef __KERNEL__
#ifdef __STDC__
#define ____glue(name,fn) name##fn
#else
@ -23,141 +22,4 @@
#endif
#define __glue(name,fn) ____glue(name,fn)
/*
* Data Abort Model
* ================
*
* We have the following to choose from:
* arm6 - ARM6 style
* arm7 - ARM7 style
* v4_early - ARMv4 without Thumb early abort handler
* v4t_late - ARMv4 with Thumb late abort handler
* v4t_early - ARMv4 with Thumb early abort handler
* v5tej_early - ARMv5 with Thumb and Java early abort handler
* xscale - ARMv5 with Thumb with Xscale extensions
* v6_early - ARMv6 generic early abort handler
* v7_early - ARMv7 generic early abort handler
*/
#undef CPU_DABORT_HANDLER
#undef MULTI_DABORT
#if defined(CONFIG_CPU_ARM610)
# ifdef CPU_DABORT_HANDLER
# define MULTI_DABORT 1
# else
# define CPU_DABORT_HANDLER cpu_arm6_data_abort
# endif
#endif
#if defined(CONFIG_CPU_ARM710)
# ifdef CPU_DABORT_HANDLER
# define MULTI_DABORT 1
# else
# define CPU_DABORT_HANDLER cpu_arm7_data_abort
# endif
#endif
#ifdef CONFIG_CPU_ABRT_LV4T
# ifdef CPU_DABORT_HANDLER
# define MULTI_DABORT 1
# else
# define CPU_DABORT_HANDLER v4t_late_abort
# endif
#endif
#ifdef CONFIG_CPU_ABRT_EV4
# ifdef CPU_DABORT_HANDLER
# define MULTI_DABORT 1
# else
# define CPU_DABORT_HANDLER v4_early_abort
# endif
#endif
#ifdef CONFIG_CPU_ABRT_EV4T
# ifdef CPU_DABORT_HANDLER
# define MULTI_DABORT 1
# else
# define CPU_DABORT_HANDLER v4t_early_abort
# endif
#endif
#ifdef CONFIG_CPU_ABRT_EV5TJ
# ifdef CPU_DABORT_HANDLER
# define MULTI_DABORT 1
# else
# define CPU_DABORT_HANDLER v5tj_early_abort
# endif
#endif
#ifdef CONFIG_CPU_ABRT_EV5T
# ifdef CPU_DABORT_HANDLER
# define MULTI_DABORT 1
# else
# define CPU_DABORT_HANDLER v5t_early_abort
# endif
#endif
#ifdef CONFIG_CPU_ABRT_EV6
# ifdef CPU_DABORT_HANDLER
# define MULTI_DABORT 1
# else
# define CPU_DABORT_HANDLER v6_early_abort
# endif
#endif
#ifdef CONFIG_CPU_ABRT_EV7
# ifdef CPU_DABORT_HANDLER
# define MULTI_DABORT 1
# else
# define CPU_DABORT_HANDLER v7_early_abort
# endif
#endif
#ifndef CPU_DABORT_HANDLER
#error Unknown data abort handler type
#endif
/*
* Prefetch Abort Model
* ================
*
* We have the following to choose from:
* legacy - no IFSR, no IFAR
* v6 - ARMv6: IFSR, no IFAR
* v7 - ARMv7: IFSR and IFAR
*/
#undef CPU_PABORT_HANDLER
#undef MULTI_PABORT
#ifdef CONFIG_CPU_PABRT_LEGACY
# ifdef CPU_PABORT_HANDLER
# define MULTI_PABORT 1
# else
# define CPU_PABORT_HANDLER legacy_pabort
# endif
#endif
#ifdef CONFIG_CPU_PABRT_V6
# ifdef CPU_PABORT_HANDLER
# define MULTI_PABORT 1
# else
# define CPU_PABORT_HANDLER v6_pabort
# endif
#endif
#ifdef CONFIG_CPU_PABRT_V7
# ifdef CPU_PABORT_HANDLER
# define MULTI_PABORT 1
# else
# define CPU_PABORT_HANDLER v7_pabort
# endif
#endif
#ifndef CPU_PABORT_HANDLER
#error Unknown prefetch abort handler type
#endif
#endif

View File

@ -34,6 +34,7 @@
#ifndef __ASSEMBLY__
extern void __iomem *gic_cpu_base_addr;
extern struct irq_chip gic_arch_extn;
void gic_init(unsigned int, unsigned int, void __iomem *, void __iomem *);
void gic_secondary_init(unsigned int);

View File

@ -19,12 +19,37 @@
extern pte_t *pkmap_page_table;
#define ARCH_NEEDS_KMAP_HIGH_GET
extern void *kmap_high(struct page *page);
extern void *kmap_high_get(struct page *page);
extern void kunmap_high(struct page *page);
/*
* The reason for kmap_high_get() is to ensure that the currently kmap'd
* page usage count does not decrease to zero while we're using its
* existing virtual mapping in an atomic context. With a VIVT cache this
* is essential to do, but with a VIPT cache this is only an optimization
* so not to pay the price of establishing a second mapping if an existing
* one can be used. However, on platforms without hardware TLB maintenance
* broadcast, we simply cannot use ARCH_NEEDS_KMAP_HIGH_GET at all since
* the locking involved must also disable IRQs which is incompatible with
* the IPI mechanism used by global TLB operations.
*/
#define ARCH_NEEDS_KMAP_HIGH_GET
#if defined(CONFIG_SMP) && defined(CONFIG_CPU_TLB_V6)
#undef ARCH_NEEDS_KMAP_HIGH_GET
#if defined(CONFIG_HIGHMEM) && defined(CONFIG_CPU_CACHE_VIVT)
#error "The sum of features in your kernel config cannot be supported together"
#endif
#endif
#ifdef ARCH_NEEDS_KMAP_HIGH_GET
extern void *kmap_high_get(struct page *page);
#else
static inline void *kmap_high_get(struct page *page)
{
return NULL;
}
#endif
/*
* The following functions are already defined by <linux/highmem.h>
* when CONFIG_HIGHMEM is not set.

View File

@ -34,4 +34,35 @@ do { \
raw_spin_unlock(&desc->lock); \
} while(0)
#ifndef __ASSEMBLY__
/*
* Entry/exit functions for chained handlers where the primary IRQ chip
* may implement either fasteoi or level-trigger flow control.
*/
static inline void chained_irq_enter(struct irq_chip *chip,
struct irq_desc *desc)
{
/* FastEOI controllers require no action on entry. */
if (chip->irq_eoi)
return;
if (chip->irq_mask_ack) {
chip->irq_mask_ack(&desc->irq_data);
} else {
chip->irq_mask(&desc->irq_data);
if (chip->irq_ack)
chip->irq_ack(&desc->irq_data);
}
}
static inline void chained_irq_exit(struct irq_chip *chip,
struct irq_desc *desc)
{
if (chip->irq_eoi)
chip->irq_eoi(&desc->irq_data);
else
chip->irq_unmask(&desc->irq_data);
}
#endif
#endif

View File

@ -15,6 +15,7 @@
#include <linux/compiler.h>
#include <linux/const.h>
#include <linux/types.h>
#include <mach/memory.h>
#include <asm/sizes.h>
@ -132,21 +133,11 @@
#define DTCM_OFFSET UL(0xfffe8000)
#endif
/*
* Physical vs virtual RAM address space conversion. These are
* private definitions which should NOT be used outside memory.h
* files. Use virt_to_phys/phys_to_virt/__pa/__va instead.
*/
#ifndef __virt_to_phys
#define __virt_to_phys(x) ((x) - PAGE_OFFSET + PHYS_OFFSET)
#define __phys_to_virt(x) ((x) - PHYS_OFFSET + PAGE_OFFSET)
#endif
/*
* Convert a physical address to a Page Frame Number and back
*/
#define __phys_to_pfn(paddr) ((paddr) >> PAGE_SHIFT)
#define __pfn_to_phys(pfn) ((pfn) << PAGE_SHIFT)
#define __phys_to_pfn(paddr) ((unsigned long)((paddr) >> PAGE_SHIFT))
#define __pfn_to_phys(pfn) ((phys_addr_t)(pfn) << PAGE_SHIFT)
/*
* Convert a page to/from a physical address
@ -156,6 +147,62 @@
#ifndef __ASSEMBLY__
/*
* Physical vs virtual RAM address space conversion. These are
* private definitions which should NOT be used outside memory.h
* files. Use virt_to_phys/phys_to_virt/__pa/__va instead.
*/
#ifndef __virt_to_phys
#ifdef CONFIG_ARM_PATCH_PHYS_VIRT
/*
* Constants used to force the right instruction encodings and shifts
* so that all we need to do is modify the 8-bit constant field.
*/
#define __PV_BITS_31_24 0x81000000
#define __PV_BITS_23_16 0x00810000
extern unsigned long __pv_phys_offset;
#define PHYS_OFFSET __pv_phys_offset
#define __pv_stub(from,to,instr,type) \
__asm__("@ __pv_stub\n" \
"1: " instr " %0, %1, %2\n" \
" .pushsection .pv_table,\"a\"\n" \
" .long 1b\n" \
" .popsection\n" \
: "=r" (to) \
: "r" (from), "I" (type))
static inline unsigned long __virt_to_phys(unsigned long x)
{
unsigned long t;
__pv_stub(x, t, "add", __PV_BITS_31_24);
#ifdef CONFIG_ARM_PATCH_PHYS_VIRT_16BIT
__pv_stub(t, t, "add", __PV_BITS_23_16);
#endif
return t;
}
static inline unsigned long __phys_to_virt(unsigned long x)
{
unsigned long t;
__pv_stub(x, t, "sub", __PV_BITS_31_24);
#ifdef CONFIG_ARM_PATCH_PHYS_VIRT_16BIT
__pv_stub(t, t, "sub", __PV_BITS_23_16);
#endif
return t;
}
#else
#define __virt_to_phys(x) ((x) - PAGE_OFFSET + PHYS_OFFSET)
#define __phys_to_virt(x) ((x) - PHYS_OFFSET + PAGE_OFFSET)
#endif
#endif
#ifndef PHYS_OFFSET
#define PHYS_OFFSET PLAT_PHYS_OFFSET
#endif
/*
* The DMA mask corresponding to the maximum bus address allocatable
* using GFP_DMA. The default here places no restriction on DMA
@ -188,12 +235,12 @@
* translation for translating DMA addresses. Use the driver
* DMA support - see dma-mapping.h.
*/
static inline unsigned long virt_to_phys(const volatile void *x)
static inline phys_addr_t virt_to_phys(const volatile void *x)
{
return __virt_to_phys((unsigned long)(x));
}
static inline void *phys_to_virt(unsigned long x)
static inline void *phys_to_virt(phys_addr_t x)
{
return (void *)(__phys_to_virt((unsigned long)(x)));
}

View File

@ -25,8 +25,31 @@ struct mod_arch_specific {
};
/*
* Include the ARM architecture version.
* Add the ARM architecture version to the version magic string
*/
#define MODULE_ARCH_VERMAGIC "ARMv" __stringify(__LINUX_ARM_ARCH__) " "
#define MODULE_ARCH_VERMAGIC_ARMVSN "ARMv" __stringify(__LINUX_ARM_ARCH__) " "
/* Add __virt_to_phys patching state as well */
#ifdef CONFIG_ARM_PATCH_PHYS_VIRT
#ifdef CONFIG_ARM_PATCH_PHYS_VIRT_16BIT
#define MODULE_ARCH_VERMAGIC_P2V "p2v16 "
#else
#define MODULE_ARCH_VERMAGIC_P2V "p2v8 "
#endif
#else
#define MODULE_ARCH_VERMAGIC_P2V ""
#endif
/* Add instruction set architecture tag to distinguish ARM/Thumb kernels */
#ifdef CONFIG_THUMB2_KERNEL
#define MODULE_ARCH_VERMAGIC_ARMTHUMB "thumb2 "
#else
#define MODULE_ARCH_VERMAGIC_ARMTHUMB ""
#endif
#define MODULE_ARCH_VERMAGIC \
MODULE_ARCH_VERMAGIC_ARMVSN \
MODULE_ARCH_VERMAGIC_ARMTHUMB \
MODULE_ARCH_VERMAGIC_P2V
#endif /* _ASM_ARM_MODULE_H */

View File

@ -31,6 +31,7 @@ struct outer_cache_fns {
#ifdef CONFIG_OUTER_CACHE_SYNC
void (*sync)(void);
#endif
void (*set_debug)(unsigned long);
};
#ifdef CONFIG_OUTER_CACHE

View File

@ -13,250 +13,86 @@
#ifdef __KERNEL__
/*
* Work out if we need multiple CPU support
*/
#undef MULTI_CPU
#undef CPU_NAME
/*
* CPU_NAME - the prefix for CPU related functions
*/
#ifdef CONFIG_CPU_ARM610
# ifdef CPU_NAME
# undef MULTI_CPU
# define MULTI_CPU
# else
# define CPU_NAME cpu_arm6
# endif
#endif
#ifdef CONFIG_CPU_ARM7TDMI
# ifdef CPU_NAME
# undef MULTI_CPU
# define MULTI_CPU
# else
# define CPU_NAME cpu_arm7tdmi
# endif
#endif
#ifdef CONFIG_CPU_ARM710
# ifdef CPU_NAME
# undef MULTI_CPU
# define MULTI_CPU
# else
# define CPU_NAME cpu_arm7
# endif
#endif
#ifdef CONFIG_CPU_ARM720T
# ifdef CPU_NAME
# undef MULTI_CPU
# define MULTI_CPU
# else
# define CPU_NAME cpu_arm720
# endif
#endif
#ifdef CONFIG_CPU_ARM740T
# ifdef CPU_NAME
# undef MULTI_CPU
# define MULTI_CPU
# else
# define CPU_NAME cpu_arm740
# endif
#endif
#ifdef CONFIG_CPU_ARM9TDMI
# ifdef CPU_NAME
# undef MULTI_CPU
# define MULTI_CPU
# else
# define CPU_NAME cpu_arm9tdmi
# endif
#endif
#ifdef CONFIG_CPU_ARM920T
# ifdef CPU_NAME
# undef MULTI_CPU
# define MULTI_CPU
# else
# define CPU_NAME cpu_arm920
# endif
#endif
#ifdef CONFIG_CPU_ARM922T
# ifdef CPU_NAME
# undef MULTI_CPU
# define MULTI_CPU
# else
# define CPU_NAME cpu_arm922
# endif
#endif
#ifdef CONFIG_CPU_FA526
# ifdef CPU_NAME
# undef MULTI_CPU
# define MULTI_CPU
# else
# define CPU_NAME cpu_fa526
# endif
#endif
#ifdef CONFIG_CPU_ARM925T
# ifdef CPU_NAME
# undef MULTI_CPU
# define MULTI_CPU
# else
# define CPU_NAME cpu_arm925
# endif
#endif
#ifdef CONFIG_CPU_ARM926T
# ifdef CPU_NAME
# undef MULTI_CPU
# define MULTI_CPU
# else
# define CPU_NAME cpu_arm926
# endif
#endif
#ifdef CONFIG_CPU_ARM940T
# ifdef CPU_NAME
# undef MULTI_CPU
# define MULTI_CPU
# else
# define CPU_NAME cpu_arm940
# endif
#endif
#ifdef CONFIG_CPU_ARM946E
# ifdef CPU_NAME
# undef MULTI_CPU
# define MULTI_CPU
# else
# define CPU_NAME cpu_arm946
# endif
#endif
#ifdef CONFIG_CPU_SA110
# ifdef CPU_NAME
# undef MULTI_CPU
# define MULTI_CPU
# else
# define CPU_NAME cpu_sa110
# endif
#endif
#ifdef CONFIG_CPU_SA1100
# ifdef CPU_NAME
# undef MULTI_CPU
# define MULTI_CPU
# else
# define CPU_NAME cpu_sa1100
# endif
#endif
#ifdef CONFIG_CPU_ARM1020
# ifdef CPU_NAME
# undef MULTI_CPU
# define MULTI_CPU
# else
# define CPU_NAME cpu_arm1020
# endif
#endif
#ifdef CONFIG_CPU_ARM1020E
# ifdef CPU_NAME
# undef MULTI_CPU
# define MULTI_CPU
# else
# define CPU_NAME cpu_arm1020e
# endif
#endif
#ifdef CONFIG_CPU_ARM1022
# ifdef CPU_NAME
# undef MULTI_CPU
# define MULTI_CPU
# else
# define CPU_NAME cpu_arm1022
# endif
#endif
#ifdef CONFIG_CPU_ARM1026
# ifdef CPU_NAME
# undef MULTI_CPU
# define MULTI_CPU
# else
# define CPU_NAME cpu_arm1026
# endif
#endif
#ifdef CONFIG_CPU_XSCALE
# ifdef CPU_NAME
# undef MULTI_CPU
# define MULTI_CPU
# else
# define CPU_NAME cpu_xscale
# endif
#endif
#ifdef CONFIG_CPU_XSC3
# ifdef CPU_NAME
# undef MULTI_CPU
# define MULTI_CPU
# else
# define CPU_NAME cpu_xsc3
# endif
#endif
#ifdef CONFIG_CPU_MOHAWK
# ifdef CPU_NAME
# undef MULTI_CPU
# define MULTI_CPU
# else
# define CPU_NAME cpu_mohawk
# endif
#endif
#ifdef CONFIG_CPU_FEROCEON
# ifdef CPU_NAME
# undef MULTI_CPU
# define MULTI_CPU
# else
# define CPU_NAME cpu_feroceon
# endif
#endif
#ifdef CONFIG_CPU_V6
# ifdef CPU_NAME
# undef MULTI_CPU
# define MULTI_CPU
# else
# define CPU_NAME cpu_v6
# endif
#endif
#ifdef CONFIG_CPU_V7
# ifdef CPU_NAME
# undef MULTI_CPU
# define MULTI_CPU
# else
# define CPU_NAME cpu_v7
# endif
#endif
#include <asm/glue-proc.h>
#include <asm/page.h>
#ifndef __ASSEMBLY__
struct mm_struct;
/*
* Don't change this structure - ASM code relies on it.
*/
extern struct processor {
/* MISC
* get data abort address/flags
*/
void (*_data_abort)(unsigned long pc);
/*
* Retrieve prefetch fault address
*/
unsigned long (*_prefetch_abort)(unsigned long lr);
/*
* Set up any processor specifics
*/
void (*_proc_init)(void);
/*
* Disable any processor specifics
*/
void (*_proc_fin)(void);
/*
* Special stuff for a reset
*/
void (*reset)(unsigned long addr) __attribute__((noreturn));
/*
* Idle the processor
*/
int (*_do_idle)(void);
/*
* Processor architecture specific
*/
/*
* clean a virtual address range from the
* D-cache without flushing the cache.
*/
void (*dcache_clean_area)(void *addr, int size);
/*
* Set the page table
*/
void (*switch_mm)(unsigned long pgd_phys, struct mm_struct *mm);
/*
* Set a possibly extended PTE. Non-extended PTEs should
* ignore 'ext'.
*/
void (*set_pte_ext)(pte_t *ptep, pte_t pte, unsigned int ext);
/* Suspend/resume */
unsigned int suspend_size;
void (*do_suspend)(void *);
void (*do_resume)(void *);
} processor;
#ifndef MULTI_CPU
#include <asm/cpu-single.h>
extern void cpu_proc_init(void);
extern void cpu_proc_fin(void);
extern int cpu_do_idle(void);
extern void cpu_dcache_clean_area(void *, int);
extern void cpu_do_switch_mm(unsigned long pgd_phys, struct mm_struct *mm);
extern void cpu_set_pte_ext(pte_t *ptep, pte_t pte, unsigned int ext);
extern void cpu_reset(unsigned long addr) __attribute__((noreturn));
#else
#include <asm/cpu-multi32.h>
#define cpu_proc_init() processor._proc_init()
#define cpu_proc_fin() processor._proc_fin()
#define cpu_reset(addr) processor.reset(addr)
#define cpu_do_idle() processor._do_idle()
#define cpu_dcache_clean_area(addr,sz) processor.dcache_clean_area(addr,sz)
#define cpu_set_pte_ext(ptep,pte,ext) processor.set_pte_ext(ptep,pte,ext)
#define cpu_do_switch_mm(pgd,mm) processor.switch_mm(pgd,mm)
#endif
extern void cpu_resume(void);
#include <asm/memory.h>
#ifdef CONFIG_MMU

View File

@ -29,19 +29,7 @@
#define STACK_TOP_MAX TASK_SIZE
#endif
union debug_insn {
u32 arm;
u16 thumb;
};
struct debug_entry {
u32 address;
union debug_insn insn;
};
struct debug_info {
int nsaved;
struct debug_entry bp[2];
#ifdef CONFIG_HAVE_HW_BREAKPOINT
struct perf_event *hbp[ARM_MAX_HBP_SLOTS];
#endif
@ -95,7 +83,7 @@ extern void release_thread(struct task_struct *);
unsigned long get_wchan(struct task_struct *p);
#if __LINUX_ARM_ARCH__ == 6
#if __LINUX_ARM_ARCH__ == 6 || defined(CONFIG_ARM_ERRATA_754327)
#define cpu_relax() smp_mb()
#else
#define cpu_relax() barrier()

View File

@ -130,8 +130,6 @@ struct pt_regs {
#ifdef __KERNEL__
#define arch_has_single_step() (1)
#define user_mode(regs) \
(((regs)->ARM_cpsr & 0xf) == 0)

View File

@ -1,7 +1,14 @@
#ifndef __ASMARM_ARCH_SCU_H
#define __ASMARM_ARCH_SCU_H
#define SCU_PM_NORMAL 0
#define SCU_PM_DORMANT 2
#define SCU_PM_POWEROFF 3
#ifndef __ASSEMBLER__
unsigned int scu_get_core_count(void __iomem *);
void scu_enable(void __iomem *);
int scu_power_mode(void __iomem *, unsigned int);
#endif
#endif

View File

@ -5,17 +5,52 @@
#error SMP not supported on pre-ARMv6 CPUs
#endif
/*
* sev and wfe are ARMv6K extensions. Uniprocessor ARMv6 may not have the K
* extensions, so when running on UP, we have to patch these instructions away.
*/
#define ALT_SMP(smp, up) \
"9998: " smp "\n" \
" .pushsection \".alt.smp.init\", \"a\"\n" \
" .long 9998b\n" \
" " up "\n" \
" .popsection\n"
#ifdef CONFIG_THUMB2_KERNEL
#define SEV ALT_SMP("sev.w", "nop.w")
/*
* For Thumb-2, special care is needed to ensure that the conditional WFE
* instruction really does assemble to exactly 4 bytes (as required by
* the SMP_ON_UP fixup code). By itself "wfene" might cause the
* assembler to insert a extra (16-bit) IT instruction, depending on the
* presence or absence of neighbouring conditional instructions.
*
* To avoid this unpredictableness, an approprite IT is inserted explicitly:
* the assembler won't change IT instructions which are explicitly present
* in the input.
*/
#define WFE(cond) ALT_SMP( \
"it " cond "\n\t" \
"wfe" cond ".n", \
\
"nop.w" \
)
#else
#define SEV ALT_SMP("sev", "nop")
#define WFE(cond) ALT_SMP("wfe" cond, "nop")
#endif
static inline void dsb_sev(void)
{
#if __LINUX_ARM_ARCH__ >= 7
__asm__ __volatile__ (
"dsb\n"
"sev"
SEV
);
#elif defined(CONFIG_CPU_32v6K)
#else
__asm__ __volatile__ (
"mcr p15, 0, %0, c7, c10, 4\n"
"sev"
SEV
: : "r" (0)
);
#endif
@ -46,9 +81,7 @@ static inline void arch_spin_lock(arch_spinlock_t *lock)
__asm__ __volatile__(
"1: ldrex %0, [%1]\n"
" teq %0, #0\n"
#ifdef CONFIG_CPU_32v6K
" wfene\n"
#endif
WFE("ne")
" strexeq %0, %2, [%1]\n"
" teqeq %0, #0\n"
" bne 1b"
@ -107,9 +140,7 @@ static inline void arch_write_lock(arch_rwlock_t *rw)
__asm__ __volatile__(
"1: ldrex %0, [%1]\n"
" teq %0, #0\n"
#ifdef CONFIG_CPU_32v6K
" wfene\n"
#endif
WFE("ne")
" strexeq %0, %2, [%1]\n"
" teq %0, #0\n"
" bne 1b"
@ -176,9 +207,7 @@ static inline void arch_read_lock(arch_rwlock_t *rw)
"1: ldrex %0, [%2]\n"
" adds %0, %0, #1\n"
" strexpl %1, %0, [%2]\n"
#ifdef CONFIG_CPU_32v6K
" wfemi\n"
#endif
WFE("mi")
" rsbpls %0, %1, #0\n"
" bmi 1b"
: "=&r" (tmp), "=&r" (tmp2)

View File

@ -347,6 +347,7 @@ void cpu_idle_wait(void);
#include <asm-generic/cmpxchg-local.h>
#if __LINUX_ARM_ARCH__ < 6
/* min ARCH < ARMv6 */
#ifdef CONFIG_SMP
#error "SMP is not supported on this platform"
@ -365,7 +366,7 @@ void cpu_idle_wait(void);
#include <asm-generic/cmpxchg.h>
#endif
#else /* __LINUX_ARM_ARCH__ >= 6 */
#else /* min ARCH >= ARMv6 */
extern void __bad_cmpxchg(volatile void *ptr, int size);
@ -379,7 +380,7 @@ static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
unsigned long oldval, res;
switch (size) {
#ifdef CONFIG_CPU_32v6K
#ifndef CONFIG_CPU_V6 /* min ARCH >= ARMv6K */
case 1:
do {
asm volatile("@ __cmpxchg1\n"
@ -404,7 +405,7 @@ static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
: "memory", "cc");
} while (res);
break;
#endif /* CONFIG_CPU_32v6K */
#endif
case 4:
do {
asm volatile("@ __cmpxchg4\n"
@ -450,12 +451,12 @@ static inline unsigned long __cmpxchg_local(volatile void *ptr,
unsigned long ret;
switch (size) {
#ifndef CONFIG_CPU_32v6K
#ifdef CONFIG_CPU_V6 /* min ARCH == ARMv6 */
case 1:
case 2:
ret = __cmpxchg_local_generic(ptr, old, new, size);
break;
#endif /* !CONFIG_CPU_32v6K */
#endif
default:
ret = __cmpxchg(ptr, old, new, size);
}
@ -469,7 +470,7 @@ static inline unsigned long __cmpxchg_local(volatile void *ptr,
(unsigned long)(n), \
sizeof(*(ptr))))
#ifdef CONFIG_CPU_32v6K
#ifndef CONFIG_CPU_V6 /* min ARCH >= ARMv6K */
/*
* Note : ARMv7-M (currently unsupported by Linux) does not support
@ -524,11 +525,11 @@ static inline unsigned long long __cmpxchg64_mb(volatile void *ptr,
(unsigned long long)(o), \
(unsigned long long)(n)))
#else /* !CONFIG_CPU_32v6K */
#else /* min ARCH = ARMv6 */
#define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
#endif /* CONFIG_CPU_32v6K */
#endif
#endif /* __LINUX_ARM_ARCH__ >= 6 */

View File

@ -28,15 +28,14 @@
#define tls_emu 1
#define has_tls_reg 1
#define set_tls set_tls_none
#elif __LINUX_ARM_ARCH__ >= 7 || \
(__LINUX_ARM_ARCH__ == 6 && defined(CONFIG_CPU_32v6K))
#define tls_emu 0
#define has_tls_reg 1
#define set_tls set_tls_v6k
#elif __LINUX_ARM_ARCH__ == 6
#elif defined(CONFIG_CPU_V6)
#define tls_emu 0
#define has_tls_reg (elf_hwcap & HWCAP_TLS)
#define set_tls set_tls_v6
#elif defined(CONFIG_CPU_32v6K)
#define tls_emu 0
#define has_tls_reg 1
#define set_tls set_tls_v6k
#else
#define tls_emu 0
#define has_tls_reg 0

View File

@ -45,6 +45,7 @@ static inline int in_exception_text(unsigned long ptr)
extern void __init early_trap_init(void);
extern void dump_backtrace_entry(unsigned long where, unsigned long from, unsigned long frame);
extern void ptrace_break(struct task_struct *tsk, struct pt_regs *regs);
extern void *vectors_page;

View File

@ -71,7 +71,7 @@ struct user{
/* the registers. */
unsigned long magic; /* To uniquely identify a core file */
char u_comm[32]; /* User command that was responsible */
int u_debugreg[8];
int u_debugreg[8]; /* No longer used */
struct user_fp u_fp; /* FP state */
struct user_fp_struct * u_fp0;/* Used by gdb to help find the values for */
/* the FP registers. */

View File

@ -29,6 +29,7 @@ obj-$(CONFIG_MODULES) += armksyms.o module.o
obj-$(CONFIG_ARTHUR) += arthur.o
obj-$(CONFIG_ISA_DMA) += dma-isa.o
obj-$(CONFIG_PCI) += bios32.o isa.o
obj-$(CONFIG_PM) += sleep.o
obj-$(CONFIG_HAVE_SCHED_CLOCK) += sched_clock.o
obj-$(CONFIG_SMP) += smp.o smp_tlb.o
obj-$(CONFIG_HAVE_ARM_SCU) += smp_scu.o

View File

@ -140,24 +140,18 @@ EXPORT_SYMBOL(__aeabi_ulcmp);
#endif
/* bitops */
EXPORT_SYMBOL(_set_bit_le);
EXPORT_SYMBOL(_test_and_set_bit_le);
EXPORT_SYMBOL(_clear_bit_le);
EXPORT_SYMBOL(_test_and_clear_bit_le);
EXPORT_SYMBOL(_change_bit_le);
EXPORT_SYMBOL(_test_and_change_bit_le);
EXPORT_SYMBOL(_set_bit);
EXPORT_SYMBOL(_test_and_set_bit);
EXPORT_SYMBOL(_clear_bit);
EXPORT_SYMBOL(_test_and_clear_bit);
EXPORT_SYMBOL(_change_bit);
EXPORT_SYMBOL(_test_and_change_bit);
EXPORT_SYMBOL(_find_first_zero_bit_le);
EXPORT_SYMBOL(_find_next_zero_bit_le);
EXPORT_SYMBOL(_find_first_bit_le);
EXPORT_SYMBOL(_find_next_bit_le);
#ifdef __ARMEB__
EXPORT_SYMBOL(_set_bit_be);
EXPORT_SYMBOL(_test_and_set_bit_be);
EXPORT_SYMBOL(_clear_bit_be);
EXPORT_SYMBOL(_test_and_clear_bit_be);
EXPORT_SYMBOL(_change_bit_be);
EXPORT_SYMBOL(_test_and_change_bit_be);
EXPORT_SYMBOL(_find_first_zero_bit_be);
EXPORT_SYMBOL(_find_next_zero_bit_be);
EXPORT_SYMBOL(_find_first_bit_be);
@ -170,3 +164,7 @@ EXPORT_SYMBOL(mcount);
#endif
EXPORT_SYMBOL(__gnu_mcount_nc);
#endif
#ifdef CONFIG_ARM_PATCH_PHYS_VIRT
EXPORT_SYMBOL(__pv_phys_offset);
#endif

View File

@ -13,6 +13,9 @@
#include <linux/sched.h>
#include <linux/mm.h>
#include <linux/dma-mapping.h>
#include <asm/cacheflush.h>
#include <asm/glue-df.h>
#include <asm/glue-pf.h>
#include <asm/mach/arch.h>
#include <asm/thread_info.h>
#include <asm/memory.h>
@ -113,6 +116,14 @@ int main(void)
#endif
#ifdef MULTI_PABORT
DEFINE(PROCESSOR_PABT_FUNC, offsetof(struct processor, _prefetch_abort));
#endif
#ifdef MULTI_CPU
DEFINE(CPU_SLEEP_SIZE, offsetof(struct processor, suspend_size));
DEFINE(CPU_DO_SUSPEND, offsetof(struct processor, do_suspend));
DEFINE(CPU_DO_RESUME, offsetof(struct processor, do_resume));
#endif
#ifdef MULTI_CACHE
DEFINE(CACHE_FLUSH_KERN_ALL, offsetof(struct cpu_cache_fns, flush_kern_all));
#endif
BLANK();
DEFINE(DMA_BIDIRECTIONAL, DMA_BIDIRECTIONAL);

View File

@ -583,6 +583,11 @@ void __init pci_common_init(struct hw_pci *hw)
* Assign resources.
*/
pci_bus_assign_resources(bus);
/*
* Enable bridges
*/
pci_enable_bridges(bus);
}
/*

View File

@ -25,7 +25,7 @@
.macro addruart, rp, rv
.endm
#if defined(CONFIG_CPU_V6)
#if defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_V6K)
.macro senduart, rd, rx
mcr p14, 0, \rd, c0, c5, 0

View File

@ -16,7 +16,8 @@
*/
#include <asm/memory.h>
#include <asm/glue.h>
#include <asm/glue-df.h>
#include <asm/glue-pf.h>
#include <asm/vfpmacros.h>
#include <mach/entry-macro.S>
#include <asm/thread_notify.h>

View File

@ -76,13 +76,13 @@
#ifndef CONFIG_THUMB2_KERNEL
.macro svc_exit, rpsr
msr spsr_cxsf, \rpsr
#if defined(CONFIG_CPU_32v6K)
clrex @ clear the exclusive monitor
ldmia sp, {r0 - pc}^ @ load r0 - pc, cpsr
#elif defined (CONFIG_CPU_V6)
#if defined(CONFIG_CPU_V6)
ldr r0, [sp]
strex r1, r2, [sp] @ clear the exclusive monitor
ldmib sp, {r1 - pc}^ @ load r1 - pc, cpsr
#elif defined(CONFIG_CPU_32v6K)
clrex @ clear the exclusive monitor
ldmia sp, {r0 - pc}^ @ load r0 - pc, cpsr
#else
ldmia sp, {r0 - pc}^ @ load r0 - pc, cpsr
#endif
@ -92,10 +92,10 @@
ldr r1, [sp, #\offset + S_PSR] @ get calling cpsr
ldr lr, [sp, #\offset + S_PC]! @ get pc
msr spsr_cxsf, r1 @ save in spsr_svc
#if defined(CONFIG_CPU_32v6K)
clrex @ clear the exclusive monitor
#elif defined (CONFIG_CPU_V6)
#if defined(CONFIG_CPU_V6)
strex r1, r2, [sp] @ clear the exclusive monitor
#elif defined(CONFIG_CPU_32v6K)
clrex @ clear the exclusive monitor
#endif
.if \fast
ldmdb sp, {r1 - lr}^ @ get calling r1 - lr

View File

@ -25,83 +25,6 @@
* machine ID for example).
*/
__HEAD
__error_a:
#ifdef CONFIG_DEBUG_LL
mov r4, r1 @ preserve machine ID
adr r0, str_a1
bl printascii
mov r0, r4
bl printhex8
adr r0, str_a2
bl printascii
adr r3, __lookup_machine_type_data
ldmia r3, {r4, r5, r6} @ get machine desc list
sub r4, r3, r4 @ get offset between virt&phys
add r5, r5, r4 @ convert virt addresses to
add r6, r6, r4 @ physical address space
1: ldr r0, [r5, #MACHINFO_TYPE] @ get machine type
bl printhex8
mov r0, #'\t'
bl printch
ldr r0, [r5, #MACHINFO_NAME] @ get machine name
add r0, r0, r4
bl printascii
mov r0, #'\n'
bl printch
add r5, r5, #SIZEOF_MACHINE_DESC @ next machine_desc
cmp r5, r6
blo 1b
adr r0, str_a3
bl printascii
b __error
ENDPROC(__error_a)
str_a1: .asciz "\nError: unrecognized/unsupported machine ID (r1 = 0x"
str_a2: .asciz ").\n\nAvailable machine support:\n\nID (hex)\tNAME\n"
str_a3: .asciz "\nPlease check your kernel config and/or bootloader.\n"
.align
#else
b __error
#endif
/*
* Lookup machine architecture in the linker-build list of architectures.
* Note that we can't use the absolute addresses for the __arch_info
* lists since we aren't running with the MMU on (and therefore, we are
* not in the correct address space). We have to calculate the offset.
*
* r1 = machine architecture number
* Returns:
* r3, r4, r6 corrupted
* r5 = mach_info pointer in physical address space
*/
__lookup_machine_type:
adr r3, __lookup_machine_type_data
ldmia r3, {r4, r5, r6}
sub r3, r3, r4 @ get offset between virt&phys
add r5, r5, r3 @ convert virt addresses to
add r6, r6, r3 @ physical address space
1: ldr r3, [r5, #MACHINFO_TYPE] @ get machine type
teq r3, r1 @ matches loader number?
beq 2f @ found
add r5, r5, #SIZEOF_MACHINE_DESC @ next machine_desc
cmp r5, r6
blo 1b
mov r5, #0 @ unknown machine
2: mov pc, lr
ENDPROC(__lookup_machine_type)
/*
* Look in arch/arm/kernel/arch.[ch] for information about the
* __arch_info structures.
*/
.align 2
.type __lookup_machine_type_data, %object
__lookup_machine_type_data:
.long .
.long __arch_info_begin
.long __arch_info_end
.size __lookup_machine_type_data, . - __lookup_machine_type_data
/* Determine validity of the r2 atags pointer. The heuristic requires
* that the pointer be aligned, in the first 16k of physical RAM and
@ -109,8 +32,6 @@ __lookup_machine_type_data:
* of this function may be more lenient with the physical address and
* may also be able to move the ATAGS block if necessary.
*
* r8 = machinfo
*
* Returns:
* r2 either valid atags pointer, or zero
* r5, r6 corrupted
@ -184,17 +105,6 @@ __mmap_switched_data:
.long init_thread_union + THREAD_START_SP @ sp
.size __mmap_switched_data, . - __mmap_switched_data
/*
* This provides a C-API version of __lookup_machine_type
*/
ENTRY(lookup_machine_type)
stmfd sp!, {r4 - r6, lr}
mov r1, r0
bl __lookup_machine_type
mov r0, r5
ldmfd sp!, {r4 - r6, pc}
ENDPROC(lookup_machine_type)
/*
* This provides a C-API version of __lookup_processor_type
*/

View File

@ -44,9 +44,6 @@ ENTRY(stext)
bl __lookup_processor_type @ r5=procinfo r9=cpuid
movs r10, r5 @ invalid processor (r5=0)?
beq __error_p @ yes, error 'p'
bl __lookup_machine_type @ r5=machinfo
movs r8, r5 @ invalid machine (r5=0)?
beq __error_a @ yes, error 'a'
adr lr, BSYM(__after_proc_init) @ return (PIC) address
ARM( add pc, r10, #PROCINFO_INITFUNC )

View File

@ -26,14 +26,6 @@
#include <mach/debug-macro.S>
#endif
#if (PHYS_OFFSET & 0x001fffff)
#error "PHYS_OFFSET must be at an even 2MiB boundary!"
#endif
#define KERNEL_RAM_VADDR (PAGE_OFFSET + TEXT_OFFSET)
#define KERNEL_RAM_PADDR (PHYS_OFFSET + TEXT_OFFSET)
/*
* swapper_pg_dir is the virtual address of the initial page table.
* We place the page tables 16K below KERNEL_RAM_VADDR. Therefore, we must
@ -41,6 +33,7 @@
* the least significant 16 bits to be 0x8000, but we could probably
* relax this restriction to KERNEL_RAM_VADDR >= PAGE_OFFSET + 0x4000.
*/
#define KERNEL_RAM_VADDR (PAGE_OFFSET + TEXT_OFFSET)
#if (KERNEL_RAM_VADDR & 0xffff) != 0x8000
#error KERNEL_RAM_VADDR must start at 0xXXXX8000
#endif
@ -48,8 +41,8 @@
.globl swapper_pg_dir
.equ swapper_pg_dir, KERNEL_RAM_VADDR - 0x4000
.macro pgtbl, rd
ldr \rd, =(KERNEL_RAM_PADDR - 0x4000)
.macro pgtbl, rd, phys
add \rd, \phys, #TEXT_OFFSET - 0x4000
.endm
#ifdef CONFIG_XIP_KERNEL
@ -87,25 +80,33 @@ ENTRY(stext)
movs r10, r5 @ invalid processor (r5=0)?
THUMB( it eq ) @ force fixup-able long branch encoding
beq __error_p @ yes, error 'p'
bl __lookup_machine_type @ r5=machinfo
movs r8, r5 @ invalid machine (r5=0)?
THUMB( it eq ) @ force fixup-able long branch encoding
beq __error_a @ yes, error 'a'
#ifndef CONFIG_XIP_KERNEL
adr r3, 2f
ldmia r3, {r4, r8}
sub r4, r3, r4 @ (PHYS_OFFSET - PAGE_OFFSET)
add r8, r8, r4 @ PHYS_OFFSET
#else
ldr r8, =PLAT_PHYS_OFFSET
#endif
/*
* r1 = machine no, r2 = atags,
* r8 = machinfo, r9 = cpuid, r10 = procinfo
* r8 = phys_offset, r9 = cpuid, r10 = procinfo
*/
bl __vet_atags
#ifdef CONFIG_SMP_ON_UP
bl __fixup_smp
#endif
#ifdef CONFIG_ARM_PATCH_PHYS_VIRT
bl __fixup_pv_table
#endif
bl __create_page_tables
/*
* The following calls CPU specific code in a position independent
* manner. See arch/arm/mm/proc-*.S for details. r10 = base of
* xxx_proc_info structure selected by __lookup_machine_type
* xxx_proc_info structure selected by __lookup_processor_type
* above. On return, the CPU will be ready for the MMU to be
* turned on, and r0 will hold the CPU control register value.
*/
@ -118,22 +119,24 @@ ENTRY(stext)
1: b __enable_mmu
ENDPROC(stext)
.ltorg
#ifndef CONFIG_XIP_KERNEL
2: .long .
.long PAGE_OFFSET
#endif
/*
* Setup the initial page tables. We only setup the barest
* amount which are required to get the kernel running, which
* generally means mapping in the kernel code.
*
* r8 = machinfo
* r9 = cpuid
* r10 = procinfo
* r8 = phys_offset, r9 = cpuid, r10 = procinfo
*
* Returns:
* r0, r3, r5-r7 corrupted
* r4 = physical page table address
*/
__create_page_tables:
pgtbl r4 @ page table address
pgtbl r4, r8 @ page table address
/*
* Clear the 16K level 1 swapper page table
@ -189,10 +192,8 @@ __create_page_tables:
/*
* Map some ram to cover our .data and .bss areas.
*/
orr r3, r7, #(KERNEL_RAM_PADDR & 0xff000000)
.if (KERNEL_RAM_PADDR & 0x00f00000)
orr r3, r3, #(KERNEL_RAM_PADDR & 0x00f00000)
.endif
add r3, r8, #TEXT_OFFSET
orr r3, r3, r7
add r0, r4, #(KERNEL_RAM_VADDR & 0xff000000) >> 18
str r3, [r0, #(KERNEL_RAM_VADDR & 0x00f00000) >> 18]!
ldr r6, =(_end - 1)
@ -205,14 +206,17 @@ __create_page_tables:
#endif
/*
* Then map first 1MB of ram in case it contains our boot params.
* Then map boot params address in r2 or
* the first 1MB of ram if boot params address is not specified.
*/
add r0, r4, #PAGE_OFFSET >> 18
orr r6, r7, #(PHYS_OFFSET & 0xff000000)
.if (PHYS_OFFSET & 0x00f00000)
orr r6, r6, #(PHYS_OFFSET & 0x00f00000)
.endif
str r6, [r0]
mov r0, r2, lsr #20
movs r0, r0, lsl #20
moveq r0, r8
sub r3, r0, r8
add r3, r3, #PAGE_OFFSET
add r3, r4, r3, lsr #18
orr r6, r7, r0
str r6, [r3]
#ifdef CONFIG_DEBUG_LL
#ifndef CONFIG_DEBUG_ICEDCC
@ -457,4 +461,129 @@ ENTRY(fixup_smp)
ldmfd sp!, {r4 - r6, pc}
ENDPROC(fixup_smp)
#ifdef CONFIG_ARM_PATCH_PHYS_VIRT
/* __fixup_pv_table - patch the stub instructions with the delta between
* PHYS_OFFSET and PAGE_OFFSET, which is assumed to be 16MiB aligned and
* can be expressed by an immediate shifter operand. The stub instruction
* has a form of '(add|sub) rd, rn, #imm'.
*/
__HEAD
__fixup_pv_table:
adr r0, 1f
ldmia r0, {r3-r5, r7}
sub r3, r0, r3 @ PHYS_OFFSET - PAGE_OFFSET
add r4, r4, r3 @ adjust table start address
add r5, r5, r3 @ adjust table end address
add r7, r7, r3 @ adjust __pv_phys_offset address
str r8, [r7] @ save computed PHYS_OFFSET to __pv_phys_offset
#ifndef CONFIG_ARM_PATCH_PHYS_VIRT_16BIT
mov r6, r3, lsr #24 @ constant for add/sub instructions
teq r3, r6, lsl #24 @ must be 16MiB aligned
#else
mov r6, r3, lsr #16 @ constant for add/sub instructions
teq r3, r6, lsl #16 @ must be 64kiB aligned
#endif
THUMB( it ne @ cross section branch )
bne __error
str r6, [r7, #4] @ save to __pv_offset
b __fixup_a_pv_table
ENDPROC(__fixup_pv_table)
.align
1: .long .
.long __pv_table_begin
.long __pv_table_end
2: .long __pv_phys_offset
.text
__fixup_a_pv_table:
#ifdef CONFIG_THUMB2_KERNEL
#ifdef CONFIG_ARM_PATCH_PHYS_VIRT_16BIT
lsls r0, r6, #24
lsr r6, #8
beq 1f
clz r7, r0
lsr r0, #24
lsl r0, r7
bic r0, 0x0080
lsrs r7, #1
orrcs r0, #0x0080
orr r0, r0, r7, lsl #12
#endif
1: lsls r6, #24
beq 4f
clz r7, r6
lsr r6, #24
lsl r6, r7
bic r6, #0x0080
lsrs r7, #1
orrcs r6, #0x0080
orr r6, r6, r7, lsl #12
orr r6, #0x4000
b 4f
2: @ at this point the C flag is always clear
add r7, r3
#ifdef CONFIG_ARM_PATCH_PHYS_VIRT_16BIT
ldrh ip, [r7]
tst ip, 0x0400 @ the i bit tells us LS or MS byte
beq 3f
cmp r0, #0 @ set C flag, and ...
biceq ip, 0x0400 @ immediate zero value has a special encoding
streqh ip, [r7] @ that requires the i bit cleared
#endif
3: ldrh ip, [r7, #2]
and ip, 0x8f00
orrcc ip, r6 @ mask in offset bits 31-24
orrcs ip, r0 @ mask in offset bits 23-16
strh ip, [r7, #2]
4: cmp r4, r5
ldrcc r7, [r4], #4 @ use branch for delay slot
bcc 2b
bx lr
#else
#ifdef CONFIG_ARM_PATCH_PHYS_VIRT_16BIT
and r0, r6, #255 @ offset bits 23-16
mov r6, r6, lsr #8 @ offset bits 31-24
#else
mov r0, #0 @ just in case...
#endif
b 3f
2: ldr ip, [r7, r3]
bic ip, ip, #0x000000ff
tst ip, #0x400 @ rotate shift tells us LS or MS byte
orrne ip, ip, r6 @ mask in offset bits 31-24
orreq ip, ip, r0 @ mask in offset bits 23-16
str ip, [r7, r3]
3: cmp r4, r5
ldrcc r7, [r4], #4 @ use branch for delay slot
bcc 2b
mov pc, lr
#endif
ENDPROC(__fixup_a_pv_table)
ENTRY(fixup_pv_table)
stmfd sp!, {r4 - r7, lr}
ldr r2, 2f @ get address of __pv_phys_offset
mov r3, #0 @ no offset
mov r4, r0 @ r0 = table start
add r5, r0, r1 @ r1 = table size
ldr r6, [r2, #4] @ get __pv_offset
bl __fixup_a_pv_table
ldmfd sp!, {r4 - r7, pc}
ENDPROC(fixup_pv_table)
.align
2: .long __pv_phys_offset
.data
.globl __pv_phys_offset
.type __pv_phys_offset, %object
__pv_phys_offset:
.long 0
.size __pv_phys_offset, . - __pv_phys_offset
__pv_offset:
.long 0
#endif
#include "head-common.S"

View File

@ -179,14 +179,21 @@ int __init arch_probe_nr_irqs(void)
#ifdef CONFIG_HOTPLUG_CPU
static void route_irq(struct irq_desc *desc, unsigned int irq, unsigned int cpu)
static bool migrate_one_irq(struct irq_data *d)
{
pr_debug("IRQ%u: moving from cpu%u to cpu%u\n", irq, desc->irq_data.node, cpu);
unsigned int cpu = cpumask_any_and(d->affinity, cpu_online_mask);
bool ret = false;
raw_spin_lock_irq(&desc->lock);
desc->irq_data.chip->irq_set_affinity(&desc->irq_data,
cpumask_of(cpu), false);
raw_spin_unlock_irq(&desc->lock);
if (cpu >= nr_cpu_ids) {
cpu = cpumask_any(cpu_online_mask);
ret = true;
}
pr_debug("IRQ%u: moving from cpu%u to cpu%u\n", d->irq, d->node, cpu);
d->chip->irq_set_affinity(d, cpumask_of(cpu), true);
return ret;
}
/*
@ -198,25 +205,30 @@ void migrate_irqs(void)
{
unsigned int i, cpu = smp_processor_id();
struct irq_desc *desc;
unsigned long flags;
local_irq_save(flags);
for_each_irq_desc(i, desc) {
struct irq_data *d = &desc->irq_data;
bool affinity_broken = false;
if (d->node == cpu) {
unsigned int newcpu = cpumask_any_and(d->affinity,
cpu_online_mask);
if (newcpu >= nr_cpu_ids) {
if (printk_ratelimit())
printk(KERN_INFO "IRQ%u no longer affine to CPU%u\n",
i, cpu);
raw_spin_lock(&desc->lock);
do {
if (desc->action == NULL)
break;
cpumask_setall(d->affinity);
newcpu = cpumask_any_and(d->affinity,
cpu_online_mask);
}
if (d->node != cpu)
break;
route_irq(desc, i, newcpu);
}
affinity_broken = migrate_one_irq(d);
} while (0);
raw_spin_unlock(&desc->lock);
if (affinity_broken && printk_ratelimit())
pr_warning("IRQ%u no longer affine to CPU%u\n", i, cpu);
}
local_irq_restore(flags);
}
#endif /* CONFIG_HOTPLUG_CPU */

View File

@ -76,6 +76,7 @@ apply_relocate(Elf32_Shdr *sechdrs, const char *strtab, unsigned int symindex,
for (i = 0; i < relsec->sh_size / sizeof(Elf32_Rel); i++, rel++) {
unsigned long loc;
Elf32_Sym *sym;
const char *symname;
s32 offset;
#ifdef CONFIG_THUMB2_KERNEL
u32 upper, lower, sign, j1, j2;
@ -83,18 +84,18 @@ apply_relocate(Elf32_Shdr *sechdrs, const char *strtab, unsigned int symindex,
offset = ELF32_R_SYM(rel->r_info);
if (offset < 0 || offset > (symsec->sh_size / sizeof(Elf32_Sym))) {
printk(KERN_ERR "%s: bad relocation, section %d reloc %d\n",
pr_err("%s: section %u reloc %u: bad relocation sym offset\n",
module->name, relindex, i);
return -ENOEXEC;
}
sym = ((Elf32_Sym *)symsec->sh_addr) + offset;
symname = strtab + sym->st_name;
if (rel->r_offset < 0 || rel->r_offset > dstsec->sh_size - sizeof(u32)) {
printk(KERN_ERR "%s: out of bounds relocation, "
"section %d reloc %d offset %d size %d\n",
module->name, relindex, i, rel->r_offset,
dstsec->sh_size);
pr_err("%s: section %u reloc %u sym '%s': out of bounds relocation, offset %d size %u\n",
module->name, relindex, i, symname,
rel->r_offset, dstsec->sh_size);
return -ENOEXEC;
}
@ -120,10 +121,10 @@ apply_relocate(Elf32_Shdr *sechdrs, const char *strtab, unsigned int symindex,
if (offset & 3 ||
offset <= (s32)0xfe000000 ||
offset >= (s32)0x02000000) {
printk(KERN_ERR
"%s: relocation out of range, section "
"%d reloc %d sym '%s'\n", module->name,
relindex, i, strtab + sym->st_name);
pr_err("%s: section %u reloc %u sym '%s': relocation %u out of range (%#lx -> %#x)\n",
module->name, relindex, i, symname,
ELF32_R_TYPE(rel->r_info), loc,
sym->st_value);
return -ENOEXEC;
}
@ -196,10 +197,10 @@ apply_relocate(Elf32_Shdr *sechdrs, const char *strtab, unsigned int symindex,
if (!(offset & 1) ||
offset <= (s32)0xff000000 ||
offset >= (s32)0x01000000) {
printk(KERN_ERR
"%s: relocation out of range, section "
"%d reloc %d sym '%s'\n", module->name,
relindex, i, strtab + sym->st_name);
pr_err("%s: section %u reloc %u sym '%s': relocation %u out of range (%#lx -> %#x)\n",
module->name, relindex, i, symname,
ELF32_R_TYPE(rel->r_info), loc,
sym->st_value);
return -ENOEXEC;
}
@ -282,12 +283,13 @@ static const Elf_Shdr *find_mod_section(const Elf32_Ehdr *hdr,
return NULL;
}
extern void fixup_pv_table(const void *, unsigned long);
extern void fixup_smp(const void *, unsigned long);
int module_finalize(const Elf32_Ehdr *hdr, const Elf_Shdr *sechdrs,
struct module *mod)
{
const Elf_Shdr * __maybe_unused s = NULL;
const Elf_Shdr *s = NULL;
#ifdef CONFIG_ARM_UNWIND
const char *secstrs = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset;
const Elf_Shdr *sechdrs_end = sechdrs + hdr->e_shnum;
@ -331,6 +333,11 @@ int module_finalize(const Elf32_Ehdr *hdr, const Elf_Shdr *sechdrs,
maps[i].unw_sec->sh_size,
maps[i].txt_sec->sh_addr,
maps[i].txt_sec->sh_size);
#endif
#ifdef CONFIG_ARM_PATCH_PHYS_VIRT
s = find_mod_section(hdr, sechdrs, ".pv_table");
if (s)
fixup_pv_table((void *)s->sh_addr, s->sh_size);
#endif
s = find_mod_section(hdr, sechdrs, ".alt.smp.init");
if (s && !is_smp())

View File

@ -30,7 +30,7 @@
* enable the interrupt.
*/
#ifdef CONFIG_CPU_V6
#if defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_V6K)
enum armv6_perf_types {
ARMV6_PERFCTR_ICACHE_MISS = 0x0,
ARMV6_PERFCTR_IBUF_STALL = 0x1,
@ -669,4 +669,4 @@ static const struct arm_pmu *__init armv6mpcore_pmu_init(void)
{
return NULL;
}
#endif /* CONFIG_CPU_V6 */
#endif /* CONFIG_CPU_V6 || CONFIG_CPU_V6K */

View File

@ -26,8 +26,6 @@
#include <asm/system.h>
#include <asm/traps.h>
#include "ptrace.h"
#define REG_PC 15
#define REG_PSR 16
/*
@ -184,389 +182,12 @@ put_user_reg(struct task_struct *task, int offset, long data)
return ret;
}
static inline int
read_u32(struct task_struct *task, unsigned long addr, u32 *res)
{
int ret;
ret = access_process_vm(task, addr, res, sizeof(*res), 0);
return ret == sizeof(*res) ? 0 : -EIO;
}
static inline int
read_instr(struct task_struct *task, unsigned long addr, u32 *res)
{
int ret;
if (addr & 1) {
u16 val;
ret = access_process_vm(task, addr & ~1, &val, sizeof(val), 0);
ret = ret == sizeof(val) ? 0 : -EIO;
*res = val;
} else {
u32 val;
ret = access_process_vm(task, addr & ~3, &val, sizeof(val), 0);
ret = ret == sizeof(val) ? 0 : -EIO;
*res = val;
}
return ret;
}
/*
* Get value of register `rn' (in the instruction)
*/
static unsigned long
ptrace_getrn(struct task_struct *child, unsigned long insn)
{
unsigned int reg = (insn >> 16) & 15;
unsigned long val;
val = get_user_reg(child, reg);
if (reg == 15)
val += 8;
return val;
}
/*
* Get value of operand 2 (in an ALU instruction)
*/
static unsigned long
ptrace_getaluop2(struct task_struct *child, unsigned long insn)
{
unsigned long val;
int shift;
int type;
if (insn & 1 << 25) {
val = insn & 255;
shift = (insn >> 8) & 15;
type = 3;
} else {
val = get_user_reg (child, insn & 15);
if (insn & (1 << 4))
shift = (int)get_user_reg (child, (insn >> 8) & 15);
else
shift = (insn >> 7) & 31;
type = (insn >> 5) & 3;
}
switch (type) {
case 0: val <<= shift; break;
case 1: val >>= shift; break;
case 2:
val = (((signed long)val) >> shift);
break;
case 3:
val = (val >> shift) | (val << (32 - shift));
break;
}
return val;
}
/*
* Get value of operand 2 (in a LDR instruction)
*/
static unsigned long
ptrace_getldrop2(struct task_struct *child, unsigned long insn)
{
unsigned long val;
int shift;
int type;
val = get_user_reg(child, insn & 15);
shift = (insn >> 7) & 31;
type = (insn >> 5) & 3;
switch (type) {
case 0: val <<= shift; break;
case 1: val >>= shift; break;
case 2:
val = (((signed long)val) >> shift);
break;
case 3:
val = (val >> shift) | (val << (32 - shift));
break;
}
return val;
}
#define OP_MASK 0x01e00000
#define OP_AND 0x00000000
#define OP_EOR 0x00200000
#define OP_SUB 0x00400000
#define OP_RSB 0x00600000
#define OP_ADD 0x00800000
#define OP_ADC 0x00a00000
#define OP_SBC 0x00c00000
#define OP_RSC 0x00e00000
#define OP_ORR 0x01800000
#define OP_MOV 0x01a00000
#define OP_BIC 0x01c00000
#define OP_MVN 0x01e00000
static unsigned long
get_branch_address(struct task_struct *child, unsigned long pc, unsigned long insn)
{
u32 alt = 0;
switch (insn & 0x0e000000) {
case 0x00000000:
case 0x02000000: {
/*
* data processing
*/
long aluop1, aluop2, ccbit;
if ((insn & 0x0fffffd0) == 0x012fff10) {
/*
* bx or blx
*/
alt = get_user_reg(child, insn & 15);
break;
}
if ((insn & 0xf000) != 0xf000)
break;
aluop1 = ptrace_getrn(child, insn);
aluop2 = ptrace_getaluop2(child, insn);
ccbit = get_user_reg(child, REG_PSR) & PSR_C_BIT ? 1 : 0;
switch (insn & OP_MASK) {
case OP_AND: alt = aluop1 & aluop2; break;
case OP_EOR: alt = aluop1 ^ aluop2; break;
case OP_SUB: alt = aluop1 - aluop2; break;
case OP_RSB: alt = aluop2 - aluop1; break;
case OP_ADD: alt = aluop1 + aluop2; break;
case OP_ADC: alt = aluop1 + aluop2 + ccbit; break;
case OP_SBC: alt = aluop1 - aluop2 + ccbit; break;
case OP_RSC: alt = aluop2 - aluop1 + ccbit; break;
case OP_ORR: alt = aluop1 | aluop2; break;
case OP_MOV: alt = aluop2; break;
case OP_BIC: alt = aluop1 & ~aluop2; break;
case OP_MVN: alt = ~aluop2; break;
}
break;
}
case 0x04000000:
case 0x06000000:
/*
* ldr
*/
if ((insn & 0x0010f000) == 0x0010f000) {
unsigned long base;
base = ptrace_getrn(child, insn);
if (insn & 1 << 24) {
long aluop2;
if (insn & 0x02000000)
aluop2 = ptrace_getldrop2(child, insn);
else
aluop2 = insn & 0xfff;
if (insn & 1 << 23)
base += aluop2;
else
base -= aluop2;
}
read_u32(child, base, &alt);
}
break;
case 0x08000000:
/*
* ldm
*/
if ((insn & 0x00108000) == 0x00108000) {
unsigned long base;
unsigned int nr_regs;
if (insn & (1 << 23)) {
nr_regs = hweight16(insn & 65535) << 2;
if (!(insn & (1 << 24)))
nr_regs -= 4;
} else {
if (insn & (1 << 24))
nr_regs = -4;
else
nr_regs = 0;
}
base = ptrace_getrn(child, insn);
read_u32(child, base + nr_regs, &alt);
break;
}
break;
case 0x0a000000: {
/*
* bl or b
*/
signed long displ;
/* It's a branch/branch link: instead of trying to
* figure out whether the branch will be taken or not,
* we'll put a breakpoint at both locations. This is
* simpler, more reliable, and probably not a whole lot
* slower than the alternative approach of emulating the
* branch.
*/
displ = (insn & 0x00ffffff) << 8;
displ = (displ >> 6) + 8;
if (displ != 0 && displ != 4)
alt = pc + displ;
}
break;
}
return alt;
}
static int
swap_insn(struct task_struct *task, unsigned long addr,
void *old_insn, void *new_insn, int size)
{
int ret;
ret = access_process_vm(task, addr, old_insn, size, 0);
if (ret == size)
ret = access_process_vm(task, addr, new_insn, size, 1);
return ret;
}
static void
add_breakpoint(struct task_struct *task, struct debug_info *dbg, unsigned long addr)
{
int nr = dbg->nsaved;
if (nr < 2) {
u32 new_insn = BREAKINST_ARM;
int res;
res = swap_insn(task, addr, &dbg->bp[nr].insn, &new_insn, 4);
if (res == 4) {
dbg->bp[nr].address = addr;
dbg->nsaved += 1;
}
} else
printk(KERN_ERR "ptrace: too many breakpoints\n");
}
/*
* Clear one breakpoint in the user program. We copy what the hardware
* does and use bit 0 of the address to indicate whether this is a Thumb
* breakpoint or an ARM breakpoint.
*/
static void clear_breakpoint(struct task_struct *task, struct debug_entry *bp)
{
unsigned long addr = bp->address;
union debug_insn old_insn;
int ret;
if (addr & 1) {
ret = swap_insn(task, addr & ~1, &old_insn.thumb,
&bp->insn.thumb, 2);
if (ret != 2 || old_insn.thumb != BREAKINST_THUMB)
printk(KERN_ERR "%s:%d: corrupted Thumb breakpoint at "
"0x%08lx (0x%04x)\n", task->comm,
task_pid_nr(task), addr, old_insn.thumb);
} else {
ret = swap_insn(task, addr & ~3, &old_insn.arm,
&bp->insn.arm, 4);
if (ret != 4 || old_insn.arm != BREAKINST_ARM)
printk(KERN_ERR "%s:%d: corrupted ARM breakpoint at "
"0x%08lx (0x%08x)\n", task->comm,
task_pid_nr(task), addr, old_insn.arm);
}
}
void ptrace_set_bpt(struct task_struct *child)
{
struct pt_regs *regs;
unsigned long pc;
u32 insn;
int res;
regs = task_pt_regs(child);
pc = instruction_pointer(regs);
if (thumb_mode(regs)) {
printk(KERN_WARNING "ptrace: can't handle thumb mode\n");
return;
}
res = read_instr(child, pc, &insn);
if (!res) {
struct debug_info *dbg = &child->thread.debug;
unsigned long alt;
dbg->nsaved = 0;
alt = get_branch_address(child, pc, insn);
if (alt)
add_breakpoint(child, dbg, alt);
/*
* Note that we ignore the result of setting the above
* breakpoint since it may fail. When it does, this is
* not so much an error, but a forewarning that we may
* be receiving a prefetch abort shortly.
*
* If we don't set this breakpoint here, then we can
* lose control of the thread during single stepping.
*/
if (!alt || predicate(insn) != PREDICATE_ALWAYS)
add_breakpoint(child, dbg, pc + 4);
}
}
/*
* Ensure no single-step breakpoint is pending. Returns non-zero
* value if child was being single-stepped.
*/
void ptrace_cancel_bpt(struct task_struct *child)
{
int i, nsaved = child->thread.debug.nsaved;
child->thread.debug.nsaved = 0;
if (nsaved > 2) {
printk("ptrace_cancel_bpt: bogus nsaved: %d!\n", nsaved);
nsaved = 2;
}
for (i = 0; i < nsaved; i++)
clear_breakpoint(child, &child->thread.debug.bp[i]);
}
void user_disable_single_step(struct task_struct *task)
{
task->ptrace &= ~PT_SINGLESTEP;
ptrace_cancel_bpt(task);
}
void user_enable_single_step(struct task_struct *task)
{
task->ptrace |= PT_SINGLESTEP;
}
/*
* Called by kernel/ptrace.c when detaching..
*/
void ptrace_disable(struct task_struct *child)
{
user_disable_single_step(child);
/* Nothing to do. */
}
/*
@ -576,8 +197,6 @@ void ptrace_break(struct task_struct *tsk, struct pt_regs *regs)
{
siginfo_t info;
ptrace_cancel_bpt(tsk);
info.si_signo = SIGTRAP;
info.si_errno = 0;
info.si_code = TRAP_BRKPT;

View File

@ -1,37 +0,0 @@
/*
* linux/arch/arm/kernel/ptrace.h
*
* Copyright (C) 2000-2003 Russell King
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/ptrace.h>
extern void ptrace_cancel_bpt(struct task_struct *);
extern void ptrace_set_bpt(struct task_struct *);
extern void ptrace_break(struct task_struct *, struct pt_regs *);
/*
* Send SIGTRAP if we're single-stepping
*/
static inline void single_step_trap(struct task_struct *task)
{
if (task->ptrace & PT_SINGLESTEP) {
ptrace_cancel_bpt(task);
send_sig(SIGTRAP, task, 1);
}
}
static inline void single_step_clear(struct task_struct *task)
{
if (task->ptrace & PT_SINGLESTEP)
ptrace_cancel_bpt(task);
}
static inline void single_step_set(struct task_struct *task)
{
if (task->ptrace & PT_SINGLESTEP)
ptrace_set_bpt(task);
}

View File

@ -9,6 +9,7 @@
* the Free Software Foundation.
*/
#include <linux/module.h>
#include <linux/ftrace.h>
#if defined(CONFIG_FRAME_POINTER) && !defined(CONFIG_ARM_UNWIND)
#include <linux/sched.h>

View File

@ -308,7 +308,22 @@ static void __init cacheid_init(void)
* already provide the required functionality.
*/
extern struct proc_info_list *lookup_processor_type(unsigned int);
extern struct machine_desc *lookup_machine_type(unsigned int);
static void __init early_print(const char *str, ...)
{
extern void printascii(const char *);
char buf[256];
va_list ap;
va_start(ap, str);
vsnprintf(buf, sizeof(buf), str, ap);
va_end(ap);
#ifdef CONFIG_DEBUG_LL
printascii(buf);
#endif
printk("%s", buf);
}
static void __init feat_v6_fixup(void)
{
@ -426,21 +441,29 @@ void cpu_init(void)
static struct machine_desc * __init setup_machine(unsigned int nr)
{
struct machine_desc *list;
extern struct machine_desc __arch_info_begin[], __arch_info_end[];
struct machine_desc *p;
/*
* locate machine in the list of supported machines.
*/
list = lookup_machine_type(nr);
if (!list) {
printk("Machine configuration botched (nr %d), unable "
"to continue.\n", nr);
while (1);
}
for (p = __arch_info_begin; p < __arch_info_end; p++)
if (nr == p->nr) {
printk("Machine: %s\n", p->name);
return p;
}
printk("Machine: %s\n", list->name);
early_print("\n"
"Error: unrecognized/unsupported machine ID (r1 = 0x%08x).\n\n"
"Available machine support:\n\nID (hex)\tNAME\n", nr);
return list;
for (p = __arch_info_begin; p < __arch_info_end; p++)
early_print("%08x\t%s\n", p->nr, p->name);
early_print("\nPlease check your kernel config and/or bootloader.\n");
while (true)
/* can't use cpu_relax() here as it may require MMU setup */;
}
static int __init arm_add_memory(unsigned long start, unsigned long size)
@ -703,7 +726,7 @@ static struct init_tags {
{ tag_size(tag_core), ATAG_CORE },
{ 1, PAGE_SIZE, 0xff },
{ tag_size(tag_mem32), ATAG_MEM },
{ MEM_SIZE, PHYS_OFFSET },
{ MEM_SIZE },
{ 0, ATAG_NONE }
};
@ -802,6 +825,8 @@ void __init setup_arch(char **cmdline_p)
struct machine_desc *mdesc;
char *from = default_command_line;
init_tags.mem.start = PHYS_OFFSET;
unwind_init();
setup_processor();
@ -814,8 +839,25 @@ void __init setup_arch(char **cmdline_p)
if (__atags_pointer)
tags = phys_to_virt(__atags_pointer);
else if (mdesc->boot_params)
tags = phys_to_virt(mdesc->boot_params);
else if (mdesc->boot_params) {
#ifdef CONFIG_MMU
/*
* We still are executing with a minimal MMU mapping created
* with the presumption that the machine default for this
* is located in the first MB of RAM. Anything else will
* fault and silently hang the kernel at this point.
*/
if (mdesc->boot_params < PHYS_OFFSET ||
mdesc->boot_params >= PHYS_OFFSET + SZ_1M) {
printk(KERN_WARNING
"Default boot params at physical 0x%08lx out of reach\n",
mdesc->boot_params);
} else
#endif
{
tags = phys_to_virt(mdesc->boot_params);
}
}
#if defined(CONFIG_DEPRECATED_PARAM_STRUCT)
/*

View File

@ -20,7 +20,6 @@
#include <asm/unistd.h>
#include <asm/vfp.h>
#include "ptrace.h"
#include "signal.h"
#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
@ -348,8 +347,6 @@ asmlinkage int sys_sigreturn(struct pt_regs *regs)
if (restore_sigframe(regs, frame))
goto badframe;
single_step_trap(current);
return regs->ARM_r0;
badframe:
@ -383,8 +380,6 @@ asmlinkage int sys_rt_sigreturn(struct pt_regs *regs)
if (do_sigaltstack(&frame->sig.uc.uc_stack, NULL, regs->ARM_sp) == -EFAULT)
goto badframe;
single_step_trap(current);
return regs->ARM_r0;
badframe:
@ -706,8 +701,6 @@ static void do_signal(struct pt_regs *regs, int syscall)
if (try_to_freeze())
goto no_signal;
single_step_clear(current);
signr = get_signal_to_deliver(&info, &ka, regs, NULL);
if (signr > 0) {
sigset_t *oldset;
@ -726,7 +719,6 @@ static void do_signal(struct pt_regs *regs, int syscall)
if (test_thread_flag(TIF_RESTORE_SIGMASK))
clear_thread_flag(TIF_RESTORE_SIGMASK);
}
single_step_set(current);
return;
}
@ -772,7 +764,6 @@ static void do_signal(struct pt_regs *regs, int syscall)
sigprocmask(SIG_SETMASK, &current->saved_sigmask, NULL);
}
}
single_step_set(current);
}
asmlinkage void

134
arch/arm/kernel/sleep.S Normal file
View File

@ -0,0 +1,134 @@
#include <linux/linkage.h>
#include <linux/threads.h>
#include <asm/asm-offsets.h>
#include <asm/assembler.h>
#include <asm/glue-cache.h>
#include <asm/glue-proc.h>
#include <asm/system.h>
.text
/*
* Save CPU state for a suspend
* r1 = v:p offset
* r3 = virtual return function
* Note: sp is decremented to allocate space for CPU state on stack
* r0-r3,r9,r10,lr corrupted
*/
ENTRY(cpu_suspend)
mov r9, lr
#ifdef MULTI_CPU
ldr r10, =processor
mov r2, sp @ current virtual SP
ldr r0, [r10, #CPU_SLEEP_SIZE] @ size of CPU sleep state
ldr ip, [r10, #CPU_DO_RESUME] @ virtual resume function
sub sp, sp, r0 @ allocate CPU state on stack
mov r0, sp @ save pointer
add ip, ip, r1 @ convert resume fn to phys
stmfd sp!, {r1, r2, r3, ip} @ save v:p, virt SP, retfn, phys resume fn
ldr r3, =sleep_save_sp
add r2, sp, r1 @ convert SP to phys
#ifdef CONFIG_SMP
ALT_SMP(mrc p15, 0, lr, c0, c0, 5)
ALT_UP(mov lr, #0)
and lr, lr, #15
str r2, [r3, lr, lsl #2] @ save phys SP
#else
str r2, [r3] @ save phys SP
#endif
mov lr, pc
ldr pc, [r10, #CPU_DO_SUSPEND] @ save CPU state
#else
mov r2, sp @ current virtual SP
ldr r0, =cpu_suspend_size
sub sp, sp, r0 @ allocate CPU state on stack
mov r0, sp @ save pointer
stmfd sp!, {r1, r2, r3} @ save v:p, virt SP, return fn
ldr r3, =sleep_save_sp
add r2, sp, r1 @ convert SP to phys
#ifdef CONFIG_SMP
ALT_SMP(mrc p15, 0, lr, c0, c0, 5)
ALT_UP(mov lr, #0)
and lr, lr, #15
str r2, [r3, lr, lsl #2] @ save phys SP
#else
str r2, [r3] @ save phys SP
#endif
bl cpu_do_suspend
#endif
@ flush data cache
#ifdef MULTI_CACHE
ldr r10, =cpu_cache
mov lr, r9
ldr pc, [r10, #CACHE_FLUSH_KERN_ALL]
#else
mov lr, r9
b __cpuc_flush_kern_all
#endif
ENDPROC(cpu_suspend)
.ltorg
/*
* r0 = control register value
* r1 = v:p offset (preserved by cpu_do_resume)
* r2 = phys page table base
* r3 = L1 section flags
*/
ENTRY(cpu_resume_mmu)
adr r4, cpu_resume_turn_mmu_on
mov r4, r4, lsr #20
orr r3, r3, r4, lsl #20
ldr r5, [r2, r4, lsl #2] @ save old mapping
str r3, [r2, r4, lsl #2] @ setup 1:1 mapping for mmu code
sub r2, r2, r1
ldr r3, =cpu_resume_after_mmu
bic r1, r0, #CR_C @ ensure D-cache is disabled
b cpu_resume_turn_mmu_on
ENDPROC(cpu_resume_mmu)
.ltorg
.align 5
cpu_resume_turn_mmu_on:
mcr p15, 0, r1, c1, c0, 0 @ turn on MMU, I-cache, etc
mrc p15, 0, r1, c0, c0, 0 @ read id reg
mov r1, r1
mov r1, r1
mov pc, r3 @ jump to virtual address
ENDPROC(cpu_resume_turn_mmu_on)
cpu_resume_after_mmu:
str r5, [r2, r4, lsl #2] @ restore old mapping
mcr p15, 0, r0, c1, c0, 0 @ turn on D-cache
mov pc, lr
ENDPROC(cpu_resume_after_mmu)
/*
* Note: Yes, part of the following code is located into the .data section.
* This is to allow sleep_save_sp to be accessed with a relative load
* while we can't rely on any MMU translation. We could have put
* sleep_save_sp in the .text section as well, but some setups might
* insist on it to be truly read-only.
*/
.data
.align
ENTRY(cpu_resume)
#ifdef CONFIG_SMP
adr r0, sleep_save_sp
ALT_SMP(mrc p15, 0, r1, c0, c0, 5)
ALT_UP(mov r1, #0)
and r1, r1, #15
ldr r0, [r0, r1, lsl #2] @ stack phys addr
#else
ldr r0, sleep_save_sp @ stack phys addr
#endif
msr cpsr_c, #PSR_I_BIT | PSR_F_BIT | SVC_MODE @ set SVC, irqs off
#ifdef MULTI_CPU
ldmia r0!, {r1, sp, lr, pc} @ load v:p, stack, return fn, resume fn
#else
ldmia r0!, {r1, sp, lr} @ load v:p, stack, return fn
b cpu_do_resume
#endif
ENDPROC(cpu_resume)
sleep_save_sp:
.rept CONFIG_NR_CPUS
.long 0 @ preserve stack phys ptr here
.endr

View File

@ -50,3 +50,26 @@ void __init scu_enable(void __iomem *scu_base)
*/
flush_cache_all();
}
/*
* Set the executing CPUs power mode as defined. This will be in
* preparation for it executing a WFI instruction.
*
* This function must be called with preemption disabled, and as it
* has the side effect of disabling coherency, caches must have been
* flushed. Interrupts must also have been disabled.
*/
int scu_power_mode(void __iomem *scu_base, unsigned int mode)
{
unsigned int val;
int cpu = smp_processor_id();
if (mode > 3 || mode == 1 || cpu > 3)
return -EINVAL;
val = __raw_readb(scu_base + SCU_CPU_STATUS + cpu) & ~0x03;
val |= mode;
__raw_writeb(val, scu_base + SCU_CPU_STATUS + cpu);
return 0;
}

View File

@ -15,7 +15,7 @@
#include <linux/string.h> /* memcpy */
#include <asm/cputype.h>
#include <asm/mach/map.h>
#include <mach/memory.h>
#include <asm/memory.h>
#include "tcm.h"
static struct gen_pool *tcm_pool;

View File

@ -23,6 +23,7 @@
#include <linux/kexec.h>
#include <linux/delay.h>
#include <linux/init.h>
#include <linux/sched.h>
#include <asm/atomic.h>
#include <asm/cacheflush.h>
@ -32,7 +33,6 @@
#include <asm/unwind.h>
#include <asm/tls.h>
#include "ptrace.h"
#include "signal.h"
static const char *handler[]= { "prefetch abort", "data abort", "address exception", "interrupt" };
@ -256,7 +256,7 @@ static int __die(const char *str, int err, struct thread_info *thread, struct pt
return ret;
}
DEFINE_SPINLOCK(die_lock);
static DEFINE_SPINLOCK(die_lock);
/*
* This function is protected against re-entrancy.

View File

@ -64,6 +64,10 @@ SECTIONS
__smpalt_end = .;
#endif
__pv_table_begin = .;
*(.pv_table)
__pv_table_end = .;
INIT_SETUP(16)
INIT_CALLS

View File

@ -1,44 +1,52 @@
#if __LINUX_ARM_ARCH__ >= 6 && defined(CONFIG_CPU_32v6K)
#if __LINUX_ARM_ARCH__ >= 6
.macro bitop, instr
ands ip, r1, #3
strneb r1, [ip] @ assert word-aligned
mov r2, #1
and r3, r0, #7 @ Get bit offset
add r1, r1, r0, lsr #3 @ Get byte offset
and r3, r0, #31 @ Get bit offset
mov r0, r0, lsr #5
add r1, r1, r0, lsl #2 @ Get word offset
mov r3, r2, lsl r3
1: ldrexb r2, [r1]
1: ldrex r2, [r1]
\instr r2, r2, r3
strexb r0, r2, [r1]
strex r0, r2, [r1]
cmp r0, #0
bne 1b
mov pc, lr
bx lr
.endm
.macro testop, instr, store
and r3, r0, #7 @ Get bit offset
ands ip, r1, #3
strneb r1, [ip] @ assert word-aligned
mov r2, #1
add r1, r1, r0, lsr #3 @ Get byte offset
and r3, r0, #31 @ Get bit offset
mov r0, r0, lsr #5
add r1, r1, r0, lsl #2 @ Get word offset
mov r3, r2, lsl r3 @ create mask
smp_dmb
1: ldrexb r2, [r1]
1: ldrex r2, [r1]
ands r0, r2, r3 @ save old value of bit
\instr r2, r2, r3 @ toggle bit
strexb ip, r2, [r1]
\instr r2, r2, r3 @ toggle bit
strex ip, r2, [r1]
cmp ip, #0
bne 1b
smp_dmb
cmp r0, #0
movne r0, #1
2: mov pc, lr
2: bx lr
.endm
#else
.macro bitop, instr
and r2, r0, #7
ands ip, r1, #3
strneb r1, [ip] @ assert word-aligned
and r2, r0, #31
mov r0, r0, lsr #5
mov r3, #1
mov r3, r3, lsl r2
save_and_disable_irqs ip
ldrb r2, [r1, r0, lsr #3]
ldr r2, [r1, r0, lsl #2]
\instr r2, r2, r3
strb r2, [r1, r0, lsr #3]
str r2, [r1, r0, lsl #2]
restore_irqs ip
mov pc, lr
.endm
@ -52,11 +60,13 @@
* to avoid dirtying the data cache.
*/
.macro testop, instr, store
add r1, r1, r0, lsr #3
and r3, r0, #7
mov r0, #1
ands ip, r1, #3
strneb r1, [ip] @ assert word-aligned
and r3, r0, #31
mov r0, r0, lsr #5
save_and_disable_irqs ip
ldrb r2, [r1]
ldr r2, [r1, r0, lsl #2]!
mov r0, #1
tst r2, r0, lsl r3
\instr r2, r2, r0, lsl r3
\store r2, [r1]

View File

@ -12,12 +12,6 @@
#include "bitops.h"
.text
/* Purpose : Function to change a bit
* Prototype: int change_bit(int bit, void *addr)
*/
ENTRY(_change_bit_be)
eor r0, r0, #0x18 @ big endian byte ordering
ENTRY(_change_bit_le)
ENTRY(_change_bit)
bitop eor
ENDPROC(_change_bit_be)
ENDPROC(_change_bit_le)
ENDPROC(_change_bit)

View File

@ -12,13 +12,6 @@
#include "bitops.h"
.text
/*
* Purpose : Function to clear a bit
* Prototype: int clear_bit(int bit, void *addr)
*/
ENTRY(_clear_bit_be)
eor r0, r0, #0x18 @ big endian byte ordering
ENTRY(_clear_bit_le)
ENTRY(_clear_bit)
bitop bic
ENDPROC(_clear_bit_be)
ENDPROC(_clear_bit_le)
ENDPROC(_clear_bit)

View File

@ -12,13 +12,6 @@
#include "bitops.h"
.text
/*
* Purpose : Function to set a bit
* Prototype: int set_bit(int bit, void *addr)
*/
ENTRY(_set_bit_be)
eor r0, r0, #0x18 @ big endian byte ordering
ENTRY(_set_bit_le)
ENTRY(_set_bit)
bitop orr
ENDPROC(_set_bit_be)
ENDPROC(_set_bit_le)
ENDPROC(_set_bit)

View File

@ -12,9 +12,6 @@
#include "bitops.h"
.text
ENTRY(_test_and_change_bit_be)
eor r0, r0, #0x18 @ big endian byte ordering
ENTRY(_test_and_change_bit_le)
testop eor, strb
ENDPROC(_test_and_change_bit_be)
ENDPROC(_test_and_change_bit_le)
ENTRY(_test_and_change_bit)
testop eor, str
ENDPROC(_test_and_change_bit)

View File

@ -12,9 +12,6 @@
#include "bitops.h"
.text
ENTRY(_test_and_clear_bit_be)
eor r0, r0, #0x18 @ big endian byte ordering
ENTRY(_test_and_clear_bit_le)
testop bicne, strneb
ENDPROC(_test_and_clear_bit_be)
ENDPROC(_test_and_clear_bit_le)
ENTRY(_test_and_clear_bit)
testop bicne, strne
ENDPROC(_test_and_clear_bit)

View File

@ -12,9 +12,6 @@
#include "bitops.h"
.text
ENTRY(_test_and_set_bit_be)
eor r0, r0, #0x18 @ big endian byte ordering
ENTRY(_test_and_set_bit_le)
testop orreq, streqb
ENDPROC(_test_and_set_bit_be)
ENDPROC(_test_and_set_bit_le)
ENTRY(_test_and_set_bit)
testop orreq, streq
ENDPROC(_test_and_set_bit)

View File

@ -12,6 +12,6 @@
#define __ASM_ARCH_MEMORY_H
#define PHYS_OFFSET UL(0xf0000000)
#define PLAT_PHYS_OFFSET UL(0xf0000000)
#endif /* __ASM_ARCH_MEMORY_H */

View File

@ -153,6 +153,7 @@ static struct i2c_board_info __initdata snapper9260_i2c_devices[] = {
{
/* RTC */
I2C_BOARD_INFO("isl1208", 0x6f),
.irq = gpio_to_irq(AT91_PIN_PA31),
},
};

View File

@ -220,15 +220,8 @@ extern void at91_gpio_resume(void);
#define gpio_set_value __gpio_set_value
#define gpio_cansleep __gpio_cansleep
static inline int gpio_to_irq(unsigned gpio)
{
return gpio;
}
static inline int irq_to_gpio(unsigned irq)
{
return irq;
}
#define gpio_to_irq(gpio) (gpio)
#define irq_to_gpio(irq) (irq)
#endif /* __ASSEMBLY__ */

View File

@ -23,6 +23,6 @@
#include <mach/hardware.h>
#define PHYS_OFFSET (AT91_SDRAM_BASE)
#define PLAT_PHYS_OFFSET (AT91_SDRAM_BASE)
#endif

View File

@ -31,7 +31,7 @@
* *_SIZE is the size of the region
* *_BASE is the virtual address
*/
#define RAM_START PHYS_OFFSET
#define RAM_START PLAT_PHYS_OFFSET
#define RAM_SIZE (CFG_GLOBAL_RAM_SIZE-CFG_GLOBAL_RAM_SIZE_RESERVED)
#define RAM_BASE PAGE_OFFSET

View File

@ -23,7 +23,7 @@
* files. Use virt_to_phys/phys_to_virt/__pa/__va instead.
*/
#define PHYS_OFFSET CFG_GLOBAL_RAM_BASE
#define PLAT_PHYS_OFFSET CFG_GLOBAL_RAM_BASE
/*
* Maximum DMA memory allowed is 14M

View File

@ -23,7 +23,7 @@
/*
* Physical DRAM offset.
*/
#define PHYS_OFFSET UL(0xc0000000)
#define PLAT_PHYS_OFFSET UL(0xc0000000)
#if !defined(CONFIG_ARCH_CDB89712) && !defined (CONFIG_ARCH_AUTCPU12)

View File

@ -13,7 +13,7 @@
/*
* Physical DRAM offset.
*/
#define PHYS_OFFSET UL(0x00000000)
#define PLAT_PHYS_OFFSET UL(0x00000000)
#define __phys_to_bus(x) ((x) + PHYS_OFFSET)
#define __bus_to_phys(x) ((x) - PHYS_OFFSET)

View File

@ -26,9 +26,9 @@
#if defined(CONFIG_ARCH_DAVINCI_DA8XX) && defined(CONFIG_ARCH_DAVINCI_DMx)
#error Cannot enable DaVinci and DA8XX platforms concurrently
#elif defined(CONFIG_ARCH_DAVINCI_DA8XX)
#define PHYS_OFFSET DA8XX_DDR_BASE
#define PLAT_PHYS_OFFSET DA8XX_DDR_BASE
#else
#define PHYS_OFFSET DAVINCI_DDR_BASE
#define PLAT_PHYS_OFFSET DAVINCI_DDR_BASE
#endif
#define DDR2_SDRCR_OFFSET 0xc

View File

@ -9,7 +9,7 @@ config MACH_DOVE_DB
Say 'Y' here if you want your kernel to support the
Marvell DB-MV88AP510 Development Board.
config MACH_CM_A510
config MACH_CM_A510
bool "CompuLab CM-A510 Board"
help
Say 'Y' here if you want your kernel to support the

View File

@ -5,6 +5,6 @@
#ifndef __ASM_ARCH_MEMORY_H
#define __ASM_ARCH_MEMORY_H
#define PHYS_OFFSET UL(0x00000000)
#define PLAT_PHYS_OFFSET UL(0x00000000)
#endif

View File

@ -19,7 +19,7 @@
/*
* Physical DRAM offset.
*/
#define PHYS_OFFSET UL(0x00000000)
#define PLAT_PHYS_OFFSET UL(0x00000000)
/*
* Cache flushing area - SRAM

View File

@ -30,8 +30,13 @@
#include <linux/gpio.h>
#include <linux/i2c.h>
#include <linux/i2c-gpio.h>
#include <linux/spi/spi.h>
#include <sound/cs4271.h>
#include <mach/hardware.h>
#include <mach/fb.h>
#include <mach/ep93xx_spi.h>
#include <asm/mach-types.h>
#include <asm/mach/arch.h>
@ -92,6 +97,83 @@ static void __init edb93xx_register_i2c(void)
}
/*************************************************************************
* EDB93xx SPI peripheral handling
*************************************************************************/
static struct cs4271_platform_data edb93xx_cs4271_data = {
.gpio_nreset = -EINVAL, /* filled in later */
};
static int edb93xx_cs4271_hw_setup(struct spi_device *spi)
{
return gpio_request_one(EP93XX_GPIO_LINE_EGPIO6,
GPIOF_OUT_INIT_HIGH, spi->modalias);
}
static void edb93xx_cs4271_hw_cleanup(struct spi_device *spi)
{
gpio_free(EP93XX_GPIO_LINE_EGPIO6);
}
static void edb93xx_cs4271_hw_cs_control(struct spi_device *spi, int value)
{
gpio_set_value(EP93XX_GPIO_LINE_EGPIO6, value);
}
static struct ep93xx_spi_chip_ops edb93xx_cs4271_hw = {
.setup = edb93xx_cs4271_hw_setup,
.cleanup = edb93xx_cs4271_hw_cleanup,
.cs_control = edb93xx_cs4271_hw_cs_control,
};
static struct spi_board_info edb93xx_spi_board_info[] __initdata = {
{
.modalias = "cs4271",
.platform_data = &edb93xx_cs4271_data,
.controller_data = &edb93xx_cs4271_hw,
.max_speed_hz = 6000000,
.bus_num = 0,
.chip_select = 0,
.mode = SPI_MODE_3,
},
};
static struct ep93xx_spi_info edb93xx_spi_info __initdata = {
.num_chipselect = ARRAY_SIZE(edb93xx_spi_board_info),
};
static void __init edb93xx_register_spi(void)
{
if (machine_is_edb9301() || machine_is_edb9302())
edb93xx_cs4271_data.gpio_nreset = EP93XX_GPIO_LINE_EGPIO1;
else if (machine_is_edb9302a() || machine_is_edb9307a())
edb93xx_cs4271_data.gpio_nreset = EP93XX_GPIO_LINE_H(2);
else if (machine_is_edb9315a())
edb93xx_cs4271_data.gpio_nreset = EP93XX_GPIO_LINE_EGPIO14;
ep93xx_register_spi(&edb93xx_spi_info, edb93xx_spi_board_info,
ARRAY_SIZE(edb93xx_spi_board_info));
}
/*************************************************************************
* EDB93xx I2S
*************************************************************************/
static int __init edb93xx_has_audio(void)
{
return (machine_is_edb9301() || machine_is_edb9302() ||
machine_is_edb9302a() || machine_is_edb9307a() ||
machine_is_edb9315a());
}
static void __init edb93xx_register_i2s(void)
{
if (edb93xx_has_audio()) {
ep93xx_register_i2s();
}
}
/*************************************************************************
* EDB93xx pwm
*************************************************************************/
@ -111,13 +193,47 @@ static void __init edb93xx_register_pwm(void)
}
/*************************************************************************
* EDB93xx framebuffer
*************************************************************************/
static struct ep93xxfb_mach_info __initdata edb93xxfb_info = {
.num_modes = EP93XXFB_USE_MODEDB,
.bpp = 16,
.flags = 0,
};
static int __init edb93xx_has_fb(void)
{
/* These platforms have an ep93xx with video capability */
return machine_is_edb9307() || machine_is_edb9307a() ||
machine_is_edb9312() || machine_is_edb9315() ||
machine_is_edb9315a();
}
static void __init edb93xx_register_fb(void)
{
if (!edb93xx_has_fb())
return;
if (machine_is_edb9307a() || machine_is_edb9315a())
edb93xxfb_info.flags |= EP93XXFB_USE_SDCSN0;
else
edb93xxfb_info.flags |= EP93XXFB_USE_SDCSN3;
ep93xx_register_fb(&edb93xxfb_info);
}
static void __init edb93xx_init_machine(void)
{
ep93xx_init_devices();
edb93xx_register_flash();
ep93xx_register_eth(&edb93xx_eth_data, 1);
edb93xx_register_i2c();
edb93xx_register_spi();
edb93xx_register_i2s();
edb93xx_register_pwm();
edb93xx_register_fb();
}

View File

@ -61,7 +61,7 @@ static inline void ep93xx_gpio_int_mask(unsigned line)
gpio_int_unmasked[line >> 3] &= ~(1 << (line & 7));
}
void ep93xx_gpio_int_debounce(unsigned int irq, int enable)
static void ep93xx_gpio_int_debounce(unsigned int irq, bool enable)
{
int line = irq_to_gpio(irq);
int port = line >> 3;
@ -75,7 +75,6 @@ void ep93xx_gpio_int_debounce(unsigned int irq, int enable)
__raw_writeb(gpio_int_debounce[port],
EP93XX_GPIO_REG(int_debounce_register_offset[port]));
}
EXPORT_SYMBOL(ep93xx_gpio_int_debounce);
static void ep93xx_gpio_ab_irq_handler(unsigned int irq, struct irq_desc *desc)
{
@ -335,6 +334,20 @@ static void ep93xx_gpio_set(struct gpio_chip *chip, unsigned offset, int val)
local_irq_restore(flags);
}
static int ep93xx_gpio_set_debounce(struct gpio_chip *chip,
unsigned offset, unsigned debounce)
{
int gpio = chip->base + offset;
int irq = gpio_to_irq(gpio);
if (irq < 0)
return -EINVAL;
ep93xx_gpio_int_debounce(irq, debounce ? true : false);
return 0;
}
static void ep93xx_gpio_dbg_show(struct seq_file *s, struct gpio_chip *chip)
{
struct ep93xx_gpio_chip *ep93xx_chip = to_ep93xx_gpio_chip(chip);
@ -434,6 +447,18 @@ void __init ep93xx_gpio_init(void)
EP93XX_SYSCON_DEVCFG_GONIDE |
EP93XX_SYSCON_DEVCFG_HONIDE);
for (i = 0; i < ARRAY_SIZE(ep93xx_gpio_banks); i++)
gpiochip_add(&ep93xx_gpio_banks[i].chip);
for (i = 0; i < ARRAY_SIZE(ep93xx_gpio_banks); i++) {
struct gpio_chip *chip = &ep93xx_gpio_banks[i].chip;
/*
* Ports A, B, and F support input debouncing when
* used as interrupts.
*/
if (!strcmp(chip->label, "A") ||
!strcmp(chip->label, "B") ||
!strcmp(chip->label, "F"))
chip->set_debounce = ep93xx_gpio_set_debounce;
gpiochip_add(chip);
}
}

View File

@ -99,8 +99,6 @@
/* maximum value for irq capable line identifiers */
#define EP93XX_GPIO_LINE_MAX_IRQ EP93XX_GPIO_LINE_F(7)
extern void ep93xx_gpio_int_debounce(unsigned int irq, int enable);
/* new generic GPIO API - see Documentation/gpio.txt */
#include <asm-generic/gpio.h>

View File

@ -6,15 +6,15 @@
#define __ASM_ARCH_MEMORY_H
#if defined(CONFIG_EP93XX_SDCE3_SYNC_PHYS_OFFSET)
#define PHYS_OFFSET UL(0x00000000)
#define PLAT_PHYS_OFFSET UL(0x00000000)
#elif defined(CONFIG_EP93XX_SDCE0_PHYS_OFFSET)
#define PHYS_OFFSET UL(0xc0000000)
#define PLAT_PHYS_OFFSET UL(0xc0000000)
#elif defined(CONFIG_EP93XX_SDCE1_PHYS_OFFSET)
#define PHYS_OFFSET UL(0xd0000000)
#define PLAT_PHYS_OFFSET UL(0xd0000000)
#elif defined(CONFIG_EP93XX_SDCE2_PHYS_OFFSET)
#define PHYS_OFFSET UL(0xe0000000)
#define PLAT_PHYS_OFFSET UL(0xe0000000)
#elif defined(CONFIG_EP93XX_SDCE3_ASYNC_PHYS_OFFSET)
#define PHYS_OFFSET UL(0xf0000000)
#define PLAT_PHYS_OFFSET UL(0xf0000000)
#else
#error "Kconfig bug: No EP93xx PHYS_OFFSET set"
#endif

View File

@ -4,10 +4,11 @@
* Copyright (C) 1998 Russell King.
* Copyright (C) 1998 Phil Blundell
*/
#include <linux/clockchips.h>
#include <linux/clocksource.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/spinlock.h>
#include <asm/irq.h>
@ -16,32 +17,76 @@
#include "common.h"
/*
* Footbridge timer 1 support.
*/
static unsigned long timer1_latch;
static unsigned long timer1_gettimeoffset (void)
static cycle_t cksrc_dc21285_read(struct clocksource *cs)
{
unsigned long value = timer1_latch - *CSR_TIMER1_VALUE;
return ((tick_nsec / 1000) * value) / timer1_latch;
return cs->mask - *CSR_TIMER2_VALUE;
}
static irqreturn_t
timer1_interrupt(int irq, void *dev_id)
static int cksrc_dc21285_enable(struct clocksource *cs)
{
*CSR_TIMER2_LOAD = cs->mask;
*CSR_TIMER2_CLR = 0;
*CSR_TIMER2_CNTL = TIMER_CNTL_ENABLE | TIMER_CNTL_DIV16;
return 0;
}
static int cksrc_dc21285_disable(struct clocksource *cs)
{
*CSR_TIMER2_CNTL = 0;
}
static struct clocksource cksrc_dc21285 = {
.name = "dc21285_timer2",
.rating = 200,
.read = cksrc_dc21285_read,
.enable = cksrc_dc21285_enable,
.disable = cksrc_dc21285_disable,
.mask = CLOCKSOURCE_MASK(24),
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
};
static void ckevt_dc21285_set_mode(enum clock_event_mode mode,
struct clock_event_device *c)
{
switch (mode) {
case CLOCK_EVT_MODE_RESUME:
case CLOCK_EVT_MODE_PERIODIC:
*CSR_TIMER1_CLR = 0;
*CSR_TIMER1_LOAD = (mem_fclk_21285 + 8 * HZ) / (16 * HZ);
*CSR_TIMER1_CNTL = TIMER_CNTL_ENABLE | TIMER_CNTL_AUTORELOAD |
TIMER_CNTL_DIV16;
break;
default:
*CSR_TIMER1_CNTL = 0;
break;
}
}
static struct clock_event_device ckevt_dc21285 = {
.name = "dc21285_timer1",
.features = CLOCK_EVT_FEAT_PERIODIC,
.rating = 200,
.irq = IRQ_TIMER1,
.set_mode = ckevt_dc21285_set_mode,
};
static irqreturn_t timer1_interrupt(int irq, void *dev_id)
{
struct clock_event_device *ce = dev_id;
*CSR_TIMER1_CLR = 0;
timer_tick();
ce->event_handler(ce);
return IRQ_HANDLED;
}
static struct irqaction footbridge_timer_irq = {
.name = "Timer1 timer tick",
.name = "dc21285_timer1",
.handler = timer1_interrupt,
.flags = IRQF_DISABLED | IRQF_TIMER | IRQF_IRQPOLL,
.dev_id = &ckevt_dc21285,
};
/*
@ -49,16 +94,19 @@ static struct irqaction footbridge_timer_irq = {
*/
static void __init footbridge_timer_init(void)
{
timer1_latch = (mem_fclk_21285 + 8 * HZ) / (16 * HZ);
struct clock_event_device *ce = &ckevt_dc21285;
*CSR_TIMER1_CLR = 0;
*CSR_TIMER1_LOAD = timer1_latch;
*CSR_TIMER1_CNTL = TIMER_CNTL_ENABLE | TIMER_CNTL_AUTORELOAD | TIMER_CNTL_DIV16;
clocksource_register_hz(&cksrc_dc21285, (mem_fclk_21285 + 8) / 16);
setup_irq(IRQ_TIMER1, &footbridge_timer_irq);
setup_irq(ce->irq, &footbridge_timer_irq);
clockevents_calc_mult_shift(ce, mem_fclk_21285, 5);
ce->max_delta_ns = clockevent_delta2ns(0xffffff, ce);
ce->min_delta_ns = clockevent_delta2ns(0x000004, ce);
clockevents_register_device(ce);
}
struct sys_timer footbridge_timer = {
.init = footbridge_timer_init,
.offset = timer1_gettimeoffset,
};

View File

@ -23,26 +23,33 @@
* 0xf9000000 0x50000000 1MB Cache flush
* 0xf0000000 0x80000000 16MB ISA memory
*/
#ifdef CONFIG_MMU
#define MMU_IO(a, b) (a)
#else
#define MMU_IO(a, b) (b)
#endif
#define XBUS_SIZE 0x00100000
#define XBUS_BASE 0xff800000
#define XBUS_BASE MMU_IO(0xff800000, 0x40000000)
#define ARMCSR_SIZE 0x00100000
#define ARMCSR_BASE 0xfe000000
#define ARMCSR_BASE MMU_IO(0xfe000000, 0x42000000)
#define WFLUSH_SIZE 0x00100000
#define WFLUSH_BASE 0xfd000000
#define WFLUSH_BASE MMU_IO(0xfd000000, 0x78000000)
#define PCIIACK_SIZE 0x00100000
#define PCIIACK_BASE 0xfc000000
#define PCIIACK_BASE MMU_IO(0xfc000000, 0x79000000)
#define PCICFG1_SIZE 0x01000000
#define PCICFG1_BASE 0xfb000000
#define PCICFG1_BASE MMU_IO(0xfb000000, 0x7a000000)
#define PCICFG0_SIZE 0x01000000
#define PCICFG0_BASE 0xfa000000
#define PCICFG0_BASE MMU_IO(0xfa000000, 0x7b000000)
#define PCIMEM_SIZE 0x01000000
#define PCIMEM_BASE 0xf0000000
#define PCIMEM_BASE MMU_IO(0xf0000000, 0x80000000)
#define XBUS_LEDS ((volatile unsigned char *)(XBUS_BASE + 0x12000))
#define XBUS_LED_AMBER (1 << 0)

View File

@ -14,8 +14,14 @@
#ifndef __ASM_ARM_ARCH_IO_H
#define __ASM_ARM_ARCH_IO_H
#define PCIO_SIZE 0x00100000
#define PCIO_BASE 0xff000000
#ifdef CONFIG_MMU
#define MMU_IO(a, b) (a)
#else
#define MMU_IO(a, b) (b)
#endif
#define PCIO_SIZE 0x00100000
#define PCIO_BASE MMU_IO(0xff000000, 0x7c000000)
#define IO_SPACE_LIMIT 0xffff

View File

@ -62,7 +62,7 @@ extern unsigned long __bus_to_pfn(unsigned long);
/*
* Physical DRAM offset.
*/
#define PHYS_OFFSET UL(0x00000000)
#define PLAT_PHYS_OFFSET UL(0x00000000)
#define FLUSH_BASE_PHYS 0x50000000

View File

@ -4,10 +4,13 @@
* Copyright (C) 1998 Russell King.
* Copyright (C) 1998 Phil Blundell
*/
#include <linux/clockchips.h>
#include <linux/clocksource.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/io.h>
#include <linux/timex.h>
#include <asm/irq.h>
@ -15,77 +18,115 @@
#include "common.h"
/*
* ISA timer tick support
*/
#define mSEC_10_from_14 ((14318180 + 100) / 200)
#define PIT_MODE 0x43
#define PIT_CH0 0x40
static unsigned long isa_gettimeoffset(void)
#define PIT_LATCH ((PIT_TICK_RATE + HZ / 2) / HZ)
static cycle_t pit_read(struct clocksource *cs)
{
unsigned long flags;
static int old_count;
static u32 old_jifs;
int count;
u32 jifs;
static int count_p = (mSEC_10_from_14/6); /* for the first call after boot */
static unsigned long jiffies_p = 0;
raw_local_irq_save(flags);
/*
* cache volatile jiffies temporarily; we have IRQs turned off.
*/
unsigned long jiffies_t;
jifs = jiffies;
outb_p(0x00, PIT_MODE); /* latch the count */
count = inb_p(PIT_CH0); /* read the latched count */
count |= inb_p(PIT_CH0) << 8;
/* timer count may underflow right here */
outb_p(0x00, 0x43); /* latch the count ASAP */
if (count > old_count && jifs == old_jifs)
count = old_count;
count = inb_p(0x40); /* read the latched count */
old_count = count;
old_jifs = jifs;
/*
* We do this guaranteed double memory access instead of a _p
* postfix in the previous port access. Wheee, hackady hack
*/
jiffies_t = jiffies;
raw_local_irq_restore(flags);
count |= inb_p(0x40) << 8;
count = (PIT_LATCH - 1) - count;
/* Detect timer underflows. If we haven't had a timer tick since
the last time we were called, and time is apparently going
backwards, the counter must have wrapped during this routine. */
if ((jiffies_t == jiffies_p) && (count > count_p))
count -= (mSEC_10_from_14/6);
else
jiffies_p = jiffies_t;
count_p = count;
count = (((mSEC_10_from_14/6)-1) - count) * (tick_nsec / 1000);
count = (count + (mSEC_10_from_14/6)/2) / (mSEC_10_from_14/6);
return count;
return (cycle_t)(jifs * PIT_LATCH) + count;
}
static irqreturn_t
isa_timer_interrupt(int irq, void *dev_id)
static struct clocksource pit_cs = {
.name = "pit",
.rating = 110,
.read = pit_read,
.mask = CLOCKSOURCE_MASK(32),
};
static void pit_set_mode(enum clock_event_mode mode,
struct clock_event_device *evt)
{
timer_tick();
unsigned long flags;
raw_local_irq_save(flags);
switch (mode) {
case CLOCK_EVT_MODE_PERIODIC:
outb_p(0x34, PIT_MODE);
outb_p(PIT_LATCH & 0xff, PIT_CH0);
outb_p(PIT_LATCH >> 8, PIT_CH0);
break;
case CLOCK_EVT_MODE_SHUTDOWN:
case CLOCK_EVT_MODE_UNUSED:
outb_p(0x30, PIT_MODE);
outb_p(0, PIT_CH0);
outb_p(0, PIT_CH0);
break;
case CLOCK_EVT_MODE_ONESHOT:
case CLOCK_EVT_MODE_RESUME:
break;
}
local_irq_restore(flags);
}
static int pit_set_next_event(unsigned long delta,
struct clock_event_device *evt)
{
return 0;
}
static struct clock_event_device pit_ce = {
.name = "pit",
.features = CLOCK_EVT_FEAT_PERIODIC,
.set_mode = pit_set_mode,
.set_next_event = pit_set_next_event,
.shift = 32,
};
static irqreturn_t pit_timer_interrupt(int irq, void *dev_id)
{
struct clock_event_device *ce = dev_id;
ce->event_handler(ce);
return IRQ_HANDLED;
}
static struct irqaction isa_timer_irq = {
.name = "ISA timer tick",
.handler = isa_timer_interrupt,
static struct irqaction pit_timer_irq = {
.name = "pit",
.handler = pit_timer_interrupt,
.flags = IRQF_DISABLED | IRQF_TIMER | IRQF_IRQPOLL,
.dev_id = &pit_ce,
};
static void __init isa_timer_init(void)
{
/* enable PIT timer */
/* set for periodic (4) and LSB/MSB write (0x30) */
outb(0x34, 0x43);
outb((mSEC_10_from_14/6) & 0xFF, 0x40);
outb((mSEC_10_from_14/6) >> 8, 0x40);
pit_ce.cpumask = cpumask_of(smp_processor_id());
pit_ce.mult = div_sc(PIT_TICK_RATE, NSEC_PER_SEC, pit_ce.shift);
pit_ce.max_delta_ns = clockevent_delta2ns(0x7fff, &pit_ce);
pit_ce.min_delta_ns = clockevent_delta2ns(0x000f, &pit_ce);
setup_irq(IRQ_ISA_TIMER, &isa_timer_irq);
clocksource_register_hz(&pit_cs, PIT_TICK_RATE);
setup_irq(pit_ce.irq, &pit_timer_irq);
clockevents_register_device(&pit_ce);
}
struct sys_timer isa_timer = {
.init = isa_timer_init,
.offset = isa_gettimeoffset,
};

View File

@ -98,6 +98,7 @@ static void __init ib4220b_init(void)
platform_register_pflash(SZ_16M, NULL, 0);
platform_device_register(&ib4220b_led_device);
platform_device_register(&ib4220b_key_device);
platform_register_rtc();
}
MACHINE_START(NAS4220B, "Raidsonic NAS IB-4220-B")

View File

@ -82,6 +82,7 @@ static void __init rut1xx_init(void)
platform_register_pflash(SZ_8M, NULL, 0);
platform_device_register(&rut1xx_leds);
platform_device_register(&rut1xx_keys_device);
platform_register_rtc();
}
MACHINE_START(RUT100, "Teltonika RUT100")

View File

@ -130,6 +130,7 @@ static void __init wbd111_init(void)
wbd111_num_partitions);
platform_device_register(&wbd111_leds_device);
platform_device_register(&wbd111_keys_device);
platform_register_rtc();
}
MACHINE_START(WBD111, "Wiliboard WBD-111")

View File

@ -130,6 +130,7 @@ static void __init wbd222_init(void)
wbd222_num_partitions);
platform_device_register(&wbd222_leds_device);
platform_device_register(&wbd222_keys_device);
platform_register_rtc();
}
MACHINE_START(WBD222, "Wiliboard WBD-222")

View File

@ -18,6 +18,7 @@ extern void gemini_map_io(void);
extern void gemini_init_irq(void);
extern void gemini_timer_init(void);
extern void gemini_gpio_init(void);
extern void platform_register_rtc(void);
/* Common platform devices registration functions */
extern int platform_register_uart(void);

View File

@ -90,3 +90,29 @@ int platform_register_pflash(unsigned int size, struct mtd_partition *parts,
return platform_device_register(&pflash_device);
}
static struct resource gemini_rtc_resources[] = {
[0] = {
.start = GEMINI_RTC_BASE,
.end = GEMINI_RTC_BASE + 0x24,
.flags = IORESOURCE_MEM,
},
[1] = {
.start = IRQ_RTC,
.end = IRQ_RTC,
.flags = IORESOURCE_IRQ,
},
};
static struct platform_device gemini_rtc_device = {
.name = "rtc-gemini",
.id = 0,
.num_resources = ARRAY_SIZE(gemini_rtc_resources),
.resource = gemini_rtc_resources,
};
int __init platform_register_rtc(void)
{
return platform_device_register(&gemini_rtc_device);
}

View File

@ -11,9 +11,9 @@
#define __MACH_MEMORY_H
#ifdef CONFIG_GEMINI_MEM_SWAP
# define PHYS_OFFSET UL(0x00000000)
# define PLAT_PHYS_OFFSET UL(0x00000000)
#else
# define PHYS_OFFSET UL(0x10000000)
# define PLAT_PHYS_OFFSET UL(0x10000000)
#endif
#endif /* __MACH_MEMORY_H */

View File

@ -7,7 +7,7 @@
#ifndef __ASM_ARCH_MEMORY_H
#define __ASM_ARCH_MEMORY_H
#define PHYS_OFFSET UL(0x40000000)
#define PLAT_PHYS_OFFSET UL(0x40000000)
/*
* This is the maximum DMA address that can be DMAd to.
* There should not be more than (0xd0000000 - 0xc0000000)

Some files were not shown because too many files have changed in this diff Show More