arm64 updates for 3.18:

- eBPF JIT compiler for arm64
 - CPU suspend backend for PSCI (firmware interface) with standard idle
   states defined in DT (generic idle driver to be merged via a different
   tree)
 - Support for CONFIG_DEBUG_SET_MODULE_RONX
 - Support for unmapped cpu-release-addr (outside kernel linear mapping)
 - set_arch_dma_coherent_ops() implemented and bus notifiers removed
 - EFI_STUB improvements when base of DRAM is occupied
 - Typos in KGDB macros
 - Clean-up to (partially) allow kernel building with LLVM
 - Other clean-ups (extern keyword, phys_addr_t usage)
 -----BEGIN PGP SIGNATURE-----
 Version: GnuPG v1
 
 iQIcBAABAgAGBQJUNB6NAAoJEGvWsS0AyF7x22sP/1qPQvFoY71fSqTZmSY+kfgW
 UMXhDFZOd+khD2TPHWptbgBRDElTQjRPHyISv/8ILKwDNoMlUDLlYkp1XPLM/nlB
 ea9ou2GX8iktqgM2JF5r4vk1hjH6JqEGOUHyWKZc7ibphTVm3dhg3nWL1A4peOUG
 0UyX79kl8BLAaggLSUhjtUz1GMpSNlb6Pc1ForUXaPMayBlOcVoOzh1ir7b5wb3e
 IvotUY1gv+opE9uK0QPr1AJSfpCogPEfQ2TSCP8MQZjxkrEz69n0HaFvdy60rwf4
 DaJiqBoQ5MSP3Bw+qvoYgyz+tfiPFAvEF+O3YQ5x3LBTteoooriFYH4mL7DsicAs
 2WLor/342mHykE0bOc44/gNl8B/xaZNzvO2ezLYrjVGsiY2QHTZ7fXB8arPUvQSS
 RUXVfHmcv4qthZjI17rgreBKvsfeFIMighSfvMJnVhGqDSvB8abjiPwZjzqB91Bq
 pu5MDitNgR3k3ctwzRaS6JtH2CluVFv97xIS4VaD/hm3JnS5NPeTXFou3Gb3lvon
 d/wXOIB3vY8FDMIt+BMCQPzWiU0liZ/sN7p1bsOmkgZ1wLOZ0nmsaHF09PDRGbtA
 vifopwaw9qtNlcVrTB/rDBCDaT0Ds/mTYD/a3+ch5CYUeLmQmfW/vBMfq/3gUt65
 JdI/nTVXawbl2CpBWw36
 =SAfQ
 -----END PGP SIGNATURE-----

Merge tag 'arm64-upstream' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux

Pull arm64 updates from Catalin Marinas:
 - eBPF JIT compiler for arm64
 - CPU suspend backend for PSCI (firmware interface) with standard idle
   states defined in DT (generic idle driver to be merged via a
   different tree)
 - Support for CONFIG_DEBUG_SET_MODULE_RONX
 - Support for unmapped cpu-release-addr (outside kernel linear mapping)
 - set_arch_dma_coherent_ops() implemented and bus notifiers removed
 - EFI_STUB improvements when base of DRAM is occupied
 - Typos in KGDB macros
 - Clean-up to (partially) allow kernel building with LLVM
 - Other clean-ups (extern keyword, phys_addr_t usage)

* tag 'arm64-upstream' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux: (51 commits)
  arm64: Remove unneeded extern keyword
  ARM64: make of_device_ids const
  arm64: Use phys_addr_t type for physical address
  aarch64: filter $x from kallsyms
  arm64: Use DMA_ERROR_CODE to denote failed allocation
  arm64: Fix typos in KGDB macros
  arm64: insn: Add return statements after BUG_ON()
  arm64: debug: don't re-enable debug exceptions on return from el1_dbg
  Revert "arm64: dmi: Add SMBIOS/DMI support"
  arm64: Implement set_arch_dma_coherent_ops() to replace bus notifiers
  of: amba: use of_dma_configure for AMBA devices
  arm64: dmi: Add SMBIOS/DMI support
  arm64: Correct ftrace calls to aarch64_insn_gen_branch_imm()
  arm64:mm: initialize max_mapnr using function set_max_mapnr
  setup: Move unmask of async interrupts after possible earlycon setup
  arm64: LLVMLinux: Fix inline arm64 assembly for use with clang
  arm64: pageattr: Correctly adjust unaligned start addresses
  net: bpf: arm64: fix module memory leak when JIT image build fails
  arm64: add PSCI CPU_SUSPEND based cpu_suspend support
  arm64: kernel: introduce cpu_init_idle CPU operation
  ...
This commit is contained in:
Linus Torvalds 2014-10-08 05:34:24 -04:00
commit 6325e940e7
54 changed files with 2979 additions and 189 deletions

View File

@ -219,6 +219,12 @@ nodes to be present and contain the properties described below.
Value type: <phandle>
Definition: Specifies the ACC[2] node associated with this CPU.
- cpu-idle-states
Usage: Optional
Value type: <prop-encoded-array>
Definition:
# List of phandles to idle state nodes supported
by this cpu [3].
Example 1 (dual-cluster big.LITTLE system 32-bit):
@ -415,3 +421,5 @@ cpus {
--
[1] arm/msm/qcom,saw2.txt
[2] arm/msm/qcom,kpss-acc.txt
[3] ARM Linux kernel documentation - idle states bindings
Documentation/devicetree/bindings/arm/idle-states.txt

View File

@ -0,0 +1,679 @@
==========================================
ARM idle states binding description
==========================================
==========================================
1 - Introduction
==========================================
ARM systems contain HW capable of managing power consumption dynamically,
where cores can be put in different low-power states (ranging from simple
wfi to power gating) according to OS PM policies. The CPU states representing
the range of dynamic idle states that a processor can enter at run-time, can be
specified through device tree bindings representing the parameters required
to enter/exit specific idle states on a given processor.
According to the Server Base System Architecture document (SBSA, [3]), the
power states an ARM CPU can be put into are identified by the following list:
- Running
- Idle_standby
- Idle_retention
- Sleep
- Off
The power states described in the SBSA document define the basic CPU states on
top of which ARM platforms implement power management schemes that allow an OS
PM implementation to put the processor in different idle states (which include
states listed above; "off" state is not an idle state since it does not have
wake-up capabilities, hence it is not considered in this document).
Idle state parameters (eg entry latency) are platform specific and need to be
characterized with bindings that provide the required information to OS PM
code so that it can build the required tables and use them at runtime.
The device tree binding definition for ARM idle states is the subject of this
document.
===========================================
2 - idle-states definitions
===========================================
Idle states are characterized for a specific system through a set of
timing and energy related properties, that underline the HW behaviour
triggered upon idle states entry and exit.
The following diagram depicts the CPU execution phases and related timing
properties required to enter and exit an idle state:
..__[EXEC]__|__[PREP]__|__[ENTRY]__|__[IDLE]__|__[EXIT]__|__[EXEC]__..
| | | | |
|<------ entry ------->|
| latency |
|<- exit ->|
| latency |
|<-------- min-residency -------->|
|<------- wakeup-latency ------->|
Diagram 1: CPU idle state execution phases
EXEC: Normal CPU execution.
PREP: Preparation phase before committing the hardware to idle mode
like cache flushing. This is abortable on pending wake-up
event conditions. The abort latency is assumed to be negligible
(i.e. less than the ENTRY + EXIT duration). If aborted, CPU
goes back to EXEC. This phase is optional. If not abortable,
this should be included in the ENTRY phase instead.
ENTRY: The hardware is committed to idle mode. This period must run
to completion up to IDLE before anything else can happen.
IDLE: This is the actual energy-saving idle period. This may last
between 0 and infinite time, until a wake-up event occurs.
EXIT: Period during which the CPU is brought back to operational
mode (EXEC).
entry-latency: Worst case latency required to enter the idle state. The
exit-latency may be guaranteed only after entry-latency has passed.
min-residency: Minimum period, including preparation and entry, for a given
idle state to be worthwhile energywise.
wakeup-latency: Maximum delay between the signaling of a wake-up event and the
CPU being able to execute normal code again. If not specified, this is assumed
to be entry-latency + exit-latency.
These timing parameters can be used by an OS in different circumstances.
An idle CPU requires the expected min-residency time to select the most
appropriate idle state based on the expected expiry time of the next IRQ
(ie wake-up) that causes the CPU to return to the EXEC phase.
An operating system scheduler may need to compute the shortest wake-up delay
for CPUs in the system by detecting how long will it take to get a CPU out
of an idle state, eg:
wakeup-delay = exit-latency + max(entry-latency - (now - entry-timestamp), 0)
In other words, the scheduler can make its scheduling decision by selecting
(eg waking-up) the CPU with the shortest wake-up latency.
The wake-up latency must take into account the entry latency if that period
has not expired. The abortable nature of the PREP period can be ignored
if it cannot be relied upon (e.g. the PREP deadline may occur much sooner than
the worst case since it depends on the CPU operating conditions, ie caches
state).
An OS has to reliably probe the wakeup-latency since some devices can enforce
latency constraints guarantees to work properly, so the OS has to detect the
worst case wake-up latency it can incur if a CPU is allowed to enter an
idle state, and possibly to prevent that to guarantee reliable device
functioning.
The min-residency time parameter deserves further explanation since it is
expressed in time units but must factor in energy consumption coefficients.
The energy consumption of a cpu when it enters a power state can be roughly
characterised by the following graph:
|
|
|
e |
n | /---
e | /------
r | /------
g | /-----
y | /------
| ----
| /|
| / |
| / |
| / |
| / |
| / |
|/ |
-----|-------+----------------------------------
0| 1 time(ms)
Graph 1: Energy vs time example
The graph is split in two parts delimited by time 1ms on the X-axis.
The graph curve with X-axis values = { x | 0 < x < 1ms } has a steep slope
and denotes the energy costs incurred whilst entering and leaving the idle
state.
The graph curve in the area delimited by X-axis values = {x | x > 1ms } has
shallower slope and essentially represents the energy consumption of the idle
state.
min-residency is defined for a given idle state as the minimum expected
residency time for a state (inclusive of preparation and entry) after
which choosing that state become the most energy efficient option. A good
way to visualise this, is by taking the same graph above and comparing some
states energy consumptions plots.
For sake of simplicity, let's consider a system with two idle states IDLE1,
and IDLE2:
|
|
|
| /-- IDLE1
e | /---
n | /----
e | /---
r | /-----/--------- IDLE2
g | /-------/---------
y | ------------ /---|
| / /---- |
| / /--- |
| / /---- |
| / /--- |
| --- |
| / |
| / |
|/ | time
---/----------------------------+------------------------
|IDLE1-energy < IDLE2-energy | IDLE2-energy < IDLE1-energy
|
IDLE2-min-residency
Graph 2: idle states min-residency example
In graph 2 above, that takes into account idle states entry/exit energy
costs, it is clear that if the idle state residency time (ie time till next
wake-up IRQ) is less than IDLE2-min-residency, IDLE1 is the better idle state
choice energywise.
This is mainly down to the fact that IDLE1 entry/exit energy costs are lower
than IDLE2.
However, the lower power consumption (ie shallower energy curve slope) of idle
state IDLE2 implies that after a suitable time, IDLE2 becomes more energy
efficient.
The time at which IDLE2 becomes more energy efficient than IDLE1 (and other
shallower states in a system with multiple idle states) is defined
IDLE2-min-residency and corresponds to the time when energy consumption of
IDLE1 and IDLE2 states breaks even.
The definitions provided in this section underpin the idle states
properties specification that is the subject of the following sections.
===========================================
3 - idle-states node
===========================================
ARM processor idle states are defined within the idle-states node, which is
a direct child of the cpus node [1] and provides a container where the
processor idle states, defined as device tree nodes, are listed.
- idle-states node
Usage: Optional - On ARM systems, it is a container of processor idle
states nodes. If the system does not provide CPU
power management capabilities or the processor just
supports idle_standby an idle-states node is not
required.
Description: idle-states node is a container node, where its
subnodes describe the CPU idle states.
Node name must be "idle-states".
The idle-states node's parent node must be the cpus node.
The idle-states node's child nodes can be:
- one or more state nodes
Any other configuration is considered invalid.
An idle-states node defines the following properties:
- entry-method
Value type: <stringlist>
Usage and definition depend on ARM architecture version.
# On ARM v8 64-bit this property is required and must
be one of:
- "psci" (see bindings in [2])
# On ARM 32-bit systems this property is optional
The nodes describing the idle states (state) can only be defined within the
idle-states node, any other configuration is considered invalid and therefore
must be ignored.
===========================================
4 - state node
===========================================
A state node represents an idle state description and must be defined as
follows:
- state node
Description: must be child of the idle-states node
The state node name shall follow standard device tree naming
rules ([5], 2.2.1 "Node names"), in particular state nodes which
are siblings within a single common parent must be given a unique name.
The idle state entered by executing the wfi instruction (idle_standby
SBSA,[3][4]) is considered standard on all ARM platforms and therefore
must not be listed.
With the definitions provided above, the following list represents
the valid properties for a state node:
- compatible
Usage: Required
Value type: <stringlist>
Definition: Must be "arm,idle-state".
- local-timer-stop
Usage: See definition
Value type: <none>
Definition: if present the CPU local timer control logic is
lost on state entry, otherwise it is retained.
- entry-latency-us
Usage: Required
Value type: <prop-encoded-array>
Definition: u32 value representing worst case latency in
microseconds required to enter the idle state.
The exit-latency-us duration may be guaranteed
only after entry-latency-us has passed.
- exit-latency-us
Usage: Required
Value type: <prop-encoded-array>
Definition: u32 value representing worst case latency
in microseconds required to exit the idle state.
- min-residency-us
Usage: Required
Value type: <prop-encoded-array>
Definition: u32 value representing minimum residency duration
in microseconds, inclusive of preparation and
entry, for this idle state to be considered
worthwhile energy wise (refer to section 2 of
this document for a complete description).
- wakeup-latency-us:
Usage: Optional
Value type: <prop-encoded-array>
Definition: u32 value representing maximum delay between the
signaling of a wake-up event and the CPU being
able to execute normal code again. If omitted,
this is assumed to be equal to:
entry-latency-us + exit-latency-us
It is important to supply this value on systems
where the duration of PREP phase (see diagram 1,
section 2) is non-neglibigle.
In such systems entry-latency-us + exit-latency-us
will exceed wakeup-latency-us by this duration.
In addition to the properties listed above, a state node may require
additional properties specifics to the entry-method defined in the
idle-states node, please refer to the entry-method bindings
documentation for properties definitions.
===========================================
4 - Examples
===========================================
Example 1 (ARM 64-bit, 16-cpu system, PSCI enable-method):
cpus {
#size-cells = <0>;
#address-cells = <2>;
CPU0: cpu@0 {
device_type = "cpu";
compatible = "arm,cortex-a57";
reg = <0x0 0x0>;
enable-method = "psci";
cpu-idle-states = <&CPU_RETENTION_0_0 &CPU_SLEEP_0_0
&CLUSTER_RETENTION_0 &CLUSTER_SLEEP_0>;
};
CPU1: cpu@1 {
device_type = "cpu";
compatible = "arm,cortex-a57";
reg = <0x0 0x1>;
enable-method = "psci";
cpu-idle-states = <&CPU_RETENTION_0_0 &CPU_SLEEP_0_0
&CLUSTER_RETENTION_0 &CLUSTER_SLEEP_0>;
};
CPU2: cpu@100 {
device_type = "cpu";
compatible = "arm,cortex-a57";
reg = <0x0 0x100>;
enable-method = "psci";
cpu-idle-states = <&CPU_RETENTION_0_0 &CPU_SLEEP_0_0
&CLUSTER_RETENTION_0 &CLUSTER_SLEEP_0>;
};
CPU3: cpu@101 {
device_type = "cpu";
compatible = "arm,cortex-a57";
reg = <0x0 0x101>;
enable-method = "psci";
cpu-idle-states = <&CPU_RETENTION_0_0 &CPU_SLEEP_0_0
&CLUSTER_RETENTION_0 &CLUSTER_SLEEP_0>;
};
CPU4: cpu@10000 {
device_type = "cpu";
compatible = "arm,cortex-a57";
reg = <0x0 0x10000>;
enable-method = "psci";
cpu-idle-states = <&CPU_RETENTION_0_0 &CPU_SLEEP_0_0
&CLUSTER_RETENTION_0 &CLUSTER_SLEEP_0>;
};
CPU5: cpu@10001 {
device_type = "cpu";
compatible = "arm,cortex-a57";
reg = <0x0 0x10001>;
enable-method = "psci";
cpu-idle-states = <&CPU_RETENTION_0_0 &CPU_SLEEP_0_0
&CLUSTER_RETENTION_0 &CLUSTER_SLEEP_0>;
};
CPU6: cpu@10100 {
device_type = "cpu";
compatible = "arm,cortex-a57";
reg = <0x0 0x10100>;
enable-method = "psci";
cpu-idle-states = <&CPU_RETENTION_0_0 &CPU_SLEEP_0_0
&CLUSTER_RETENTION_0 &CLUSTER_SLEEP_0>;
};
CPU7: cpu@10101 {
device_type = "cpu";
compatible = "arm,cortex-a57";
reg = <0x0 0x10101>;
enable-method = "psci";
cpu-idle-states = <&CPU_RETENTION_0_0 &CPU_SLEEP_0_0
&CLUSTER_RETENTION_0 &CLUSTER_SLEEP_0>;
};
CPU8: cpu@100000000 {
device_type = "cpu";
compatible = "arm,cortex-a53";
reg = <0x1 0x0>;
enable-method = "psci";
cpu-idle-states = <&CPU_RETENTION_1_0 &CPU_SLEEP_1_0
&CLUSTER_RETENTION_1 &CLUSTER_SLEEP_1>;
};
CPU9: cpu@100000001 {
device_type = "cpu";
compatible = "arm,cortex-a53";
reg = <0x1 0x1>;
enable-method = "psci";
cpu-idle-states = <&CPU_RETENTION_1_0 &CPU_SLEEP_1_0
&CLUSTER_RETENTION_1 &CLUSTER_SLEEP_1>;
};
CPU10: cpu@100000100 {
device_type = "cpu";
compatible = "arm,cortex-a53";
reg = <0x1 0x100>;
enable-method = "psci";
cpu-idle-states = <&CPU_RETENTION_1_0 &CPU_SLEEP_1_0
&CLUSTER_RETENTION_1 &CLUSTER_SLEEP_1>;
};
CPU11: cpu@100000101 {
device_type = "cpu";
compatible = "arm,cortex-a53";
reg = <0x1 0x101>;
enable-method = "psci";
cpu-idle-states = <&CPU_RETENTION_1_0 &CPU_SLEEP_1_0
&CLUSTER_RETENTION_1 &CLUSTER_SLEEP_1>;
};
CPU12: cpu@100010000 {
device_type = "cpu";
compatible = "arm,cortex-a53";
reg = <0x1 0x10000>;
enable-method = "psci";
cpu-idle-states = <&CPU_RETENTION_1_0 &CPU_SLEEP_1_0
&CLUSTER_RETENTION_1 &CLUSTER_SLEEP_1>;
};
CPU13: cpu@100010001 {
device_type = "cpu";
compatible = "arm,cortex-a53";
reg = <0x1 0x10001>;
enable-method = "psci";
cpu-idle-states = <&CPU_RETENTION_1_0 &CPU_SLEEP_1_0
&CLUSTER_RETENTION_1 &CLUSTER_SLEEP_1>;
};
CPU14: cpu@100010100 {
device_type = "cpu";
compatible = "arm,cortex-a53";
reg = <0x1 0x10100>;
enable-method = "psci";
cpu-idle-states = <&CPU_RETENTION_1_0 &CPU_SLEEP_1_0
&CLUSTER_RETENTION_1 &CLUSTER_SLEEP_1>;
};
CPU15: cpu@100010101 {
device_type = "cpu";
compatible = "arm,cortex-a53";
reg = <0x1 0x10101>;
enable-method = "psci";
cpu-idle-states = <&CPU_RETENTION_1_0 &CPU_SLEEP_1_0
&CLUSTER_RETENTION_1 &CLUSTER_SLEEP_1>;
};
idle-states {
entry-method = "arm,psci";
CPU_RETENTION_0_0: cpu-retention-0-0 {
compatible = "arm,idle-state";
arm,psci-suspend-param = <0x0010000>;
entry-latency-us = <20>;
exit-latency-us = <40>;
min-residency-us = <80>;
};
CLUSTER_RETENTION_0: cluster-retention-0 {
compatible = "arm,idle-state";
local-timer-stop;
arm,psci-suspend-param = <0x1010000>;
entry-latency-us = <50>;
exit-latency-us = <100>;
min-residency-us = <250>;
wakeup-latency-us = <130>;
};
CPU_SLEEP_0_0: cpu-sleep-0-0 {
compatible = "arm,idle-state";
local-timer-stop;
arm,psci-suspend-param = <0x0010000>;
entry-latency-us = <250>;
exit-latency-us = <500>;
min-residency-us = <950>;
};
CLUSTER_SLEEP_0: cluster-sleep-0 {
compatible = "arm,idle-state";
local-timer-stop;
arm,psci-suspend-param = <0x1010000>;
entry-latency-us = <600>;
exit-latency-us = <1100>;
min-residency-us = <2700>;
wakeup-latency-us = <1500>;
};
CPU_RETENTION_1_0: cpu-retention-1-0 {
compatible = "arm,idle-state";
arm,psci-suspend-param = <0x0010000>;
entry-latency-us = <20>;
exit-latency-us = <40>;
min-residency-us = <90>;
};
CLUSTER_RETENTION_1: cluster-retention-1 {
compatible = "arm,idle-state";
local-timer-stop;
arm,psci-suspend-param = <0x1010000>;
entry-latency-us = <50>;
exit-latency-us = <100>;
min-residency-us = <270>;
wakeup-latency-us = <100>;
};
CPU_SLEEP_1_0: cpu-sleep-1-0 {
compatible = "arm,idle-state";
local-timer-stop;
arm,psci-suspend-param = <0x0010000>;
entry-latency-us = <70>;
exit-latency-us = <100>;
min-residency-us = <300>;
wakeup-latency-us = <150>;
};
CLUSTER_SLEEP_1: cluster-sleep-1 {
compatible = "arm,idle-state";
local-timer-stop;
arm,psci-suspend-param = <0x1010000>;
entry-latency-us = <500>;
exit-latency-us = <1200>;
min-residency-us = <3500>;
wakeup-latency-us = <1300>;
};
};
};
Example 2 (ARM 32-bit, 8-cpu system, two clusters):
cpus {
#size-cells = <0>;
#address-cells = <1>;
CPU0: cpu@0 {
device_type = "cpu";
compatible = "arm,cortex-a15";
reg = <0x0>;
cpu-idle-states = <&CPU_SLEEP_0_0 &CLUSTER_SLEEP_0>;
};
CPU1: cpu@1 {
device_type = "cpu";
compatible = "arm,cortex-a15";
reg = <0x1>;
cpu-idle-states = <&CPU_SLEEP_0_0 &CLUSTER_SLEEP_0>;
};
CPU2: cpu@2 {
device_type = "cpu";
compatible = "arm,cortex-a15";
reg = <0x2>;
cpu-idle-states = <&CPU_SLEEP_0_0 &CLUSTER_SLEEP_0>;
};
CPU3: cpu@3 {
device_type = "cpu";
compatible = "arm,cortex-a15";
reg = <0x3>;
cpu-idle-states = <&CPU_SLEEP_0_0 &CLUSTER_SLEEP_0>;
};
CPU4: cpu@100 {
device_type = "cpu";
compatible = "arm,cortex-a7";
reg = <0x100>;
cpu-idle-states = <&CPU_SLEEP_1_0 &CLUSTER_SLEEP_1>;
};
CPU5: cpu@101 {
device_type = "cpu";
compatible = "arm,cortex-a7";
reg = <0x101>;
cpu-idle-states = <&CPU_SLEEP_1_0 &CLUSTER_SLEEP_1>;
};
CPU6: cpu@102 {
device_type = "cpu";
compatible = "arm,cortex-a7";
reg = <0x102>;
cpu-idle-states = <&CPU_SLEEP_1_0 &CLUSTER_SLEEP_1>;
};
CPU7: cpu@103 {
device_type = "cpu";
compatible = "arm,cortex-a7";
reg = <0x103>;
cpu-idle-states = <&CPU_SLEEP_1_0 &CLUSTER_SLEEP_1>;
};
idle-states {
CPU_SLEEP_0_0: cpu-sleep-0-0 {
compatible = "arm,idle-state";
local-timer-stop;
entry-latency-us = <200>;
exit-latency-us = <100>;
min-residency-us = <400>;
wakeup-latency-us = <250>;
};
CLUSTER_SLEEP_0: cluster-sleep-0 {
compatible = "arm,idle-state";
local-timer-stop;
entry-latency-us = <500>;
exit-latency-us = <1500>;
min-residency-us = <2500>;
wakeup-latency-us = <1700>;
};
CPU_SLEEP_1_0: cpu-sleep-1-0 {
compatible = "arm,idle-state";
local-timer-stop;
entry-latency-us = <300>;
exit-latency-us = <500>;
min-residency-us = <900>;
wakeup-latency-us = <600>;
};
CLUSTER_SLEEP_1: cluster-sleep-1 {
compatible = "arm,idle-state";
local-timer-stop;
entry-latency-us = <800>;
exit-latency-us = <2000>;
min-residency-us = <6500>;
wakeup-latency-us = <2300>;
};
};
};
===========================================
5 - References
===========================================
[1] ARM Linux Kernel documentation - CPUs bindings
Documentation/devicetree/bindings/arm/cpus.txt
[2] ARM Linux Kernel documentation - PSCI bindings
Documentation/devicetree/bindings/arm/psci.txt
[3] ARM Server Base System Architecture (SBSA)
http://infocenter.arm.com/help/index.jsp
[4] ARM Architecture Reference Manuals
http://infocenter.arm.com/help/index.jsp
[5] ePAPR standard
https://www.power.org/documentation/epapr-version-1-1/

View File

@ -50,6 +50,16 @@ Main node optional properties:
- migrate : Function ID for MIGRATE operation
Device tree nodes that require usage of PSCI CPU_SUSPEND function (ie idle
state nodes, as per bindings in [1]) must specify the following properties:
- arm,psci-suspend-param
Usage: Required for state nodes[1] if the corresponding
idle-states node entry-method property is set
to "psci".
Value type: <u32>
Definition: power_state parameter to pass to the PSCI
suspend call.
Example:
@ -64,7 +74,6 @@ Case 1: PSCI v0.1 only.
migrate = <0x95c10003>;
};
Case 2: PSCI v0.2 only
psci {
@ -88,3 +97,6 @@ Case 3: PSCI v0.2 and PSCI v0.1.
...
};
[1] Kernel documentation - ARM idle states bindings
Documentation/devicetree/bindings/arm/idle-states.txt

View File

@ -462,9 +462,9 @@ JIT compiler
------------
The Linux kernel has a built-in BPF JIT compiler for x86_64, SPARC, PowerPC,
ARM, MIPS and s390 and can be enabled through CONFIG_BPF_JIT. The JIT compiler
is transparently invoked for each attached filter from user space or for
internal kernel users if it has been previously enabled by root:
ARM, ARM64, MIPS and s390 and can be enabled through CONFIG_BPF_JIT. The JIT
compiler is transparently invoked for each attached filter from user space
or for internal kernel users if it has been previously enabled by root:
echo 1 > /proc/sys/net/core/bpf_jit_enable

View File

@ -35,6 +35,7 @@ config ARM64
select HAVE_ARCH_JUMP_LABEL
select HAVE_ARCH_KGDB
select HAVE_ARCH_TRACEHOOK
select HAVE_BPF_JIT
select HAVE_C_RECORDMCOUNT
select HAVE_CC_STACKPROTECTOR
select HAVE_DEBUG_BUGVERBOSE
@ -252,11 +253,11 @@ config SCHED_SMT
places. If unsure say N here.
config NR_CPUS
int "Maximum number of CPUs (2-32)"
range 2 32
int "Maximum number of CPUs (2-64)"
range 2 64
depends on SMP
# These have to remain sorted largest to smallest
default "8"
default "64"
config HOTPLUG_CPU
bool "Support for hot-pluggable CPUs"

View File

@ -43,4 +43,15 @@ config ARM64_RANDOMIZE_TEXT_OFFSET
of TEXT_OFFSET and platforms must not require a specific
value.
config DEBUG_SET_MODULE_RONX
bool "Set loadable kernel module data as NX and text as RO"
depends on MODULES
help
This option helps catch unintended modifications to loadable
kernel module's text and read-only data. It also prevents execution
of module data. Such protection may interfere with run-time code
patching and dynamic kernel tracing - and they might also protect
against certain classes of kernel exploits.
If in doubt, say "N".
endmenu

View File

@ -47,6 +47,7 @@ endif
export TEXT_OFFSET GZFLAGS
core-y += arch/arm64/kernel/ arch/arm64/mm/
core-$(CONFIG_NET) += arch/arm64/net/
core-$(CONFIG_KVM) += arch/arm64/kvm/
core-$(CONFIG_XEN) += arch/arm64/xen/
core-$(CONFIG_CRYPTO) += arch/arm64/crypto/

View File

@ -148,4 +148,8 @@ static inline void flush_cache_vunmap(unsigned long start, unsigned long end)
{
}
int set_memory_ro(unsigned long addr, int numpages);
int set_memory_rw(unsigned long addr, int numpages);
int set_memory_x(unsigned long addr, int numpages);
int set_memory_nx(unsigned long addr, int numpages);
#endif

View File

@ -39,6 +39,26 @@
extern unsigned long __icache_flags;
#define CCSIDR_EL1_LINESIZE_MASK 0x7
#define CCSIDR_EL1_LINESIZE(x) ((x) & CCSIDR_EL1_LINESIZE_MASK)
#define CCSIDR_EL1_NUMSETS_SHIFT 13
#define CCSIDR_EL1_NUMSETS_MASK (0x7fff << CCSIDR_EL1_NUMSETS_SHIFT)
#define CCSIDR_EL1_NUMSETS(x) \
(((x) & CCSIDR_EL1_NUMSETS_MASK) >> CCSIDR_EL1_NUMSETS_SHIFT)
extern u64 __attribute_const__ icache_get_ccsidr(void);
static inline int icache_get_linesize(void)
{
return 16 << CCSIDR_EL1_LINESIZE(icache_get_ccsidr());
}
static inline int icache_get_numsets(void)
{
return 1 + CCSIDR_EL1_NUMSETS(icache_get_ccsidr());
}
/*
* Whilst the D-side always behaves as PIPT on AArch64, aliasing is
* permitted in the I-cache.

View File

@ -28,6 +28,8 @@ struct device_node;
* enable-method property.
* @cpu_init: Reads any data necessary for a specific enable-method from the
* devicetree, for a given cpu node and proposed logical id.
* @cpu_init_idle: Reads any data necessary to initialize CPU idle states from
* devicetree, for a given cpu node and proposed logical id.
* @cpu_prepare: Early one-time preparation step for a cpu. If there is a
* mechanism for doing so, tests whether it is possible to boot
* the given CPU.
@ -47,6 +49,7 @@ struct device_node;
struct cpu_operations {
const char *name;
int (*cpu_init)(struct device_node *, unsigned int);
int (*cpu_init_idle)(struct device_node *, unsigned int);
int (*cpu_prepare)(unsigned int);
int (*cpu_boot)(unsigned int);
void (*cpu_postboot)(void);
@ -61,7 +64,7 @@ struct cpu_operations {
};
extern const struct cpu_operations *cpu_ops[NR_CPUS];
extern int __init cpu_read_ops(struct device_node *dn, int cpu);
extern void __init cpu_read_bootcpu_ops(void);
int __init cpu_read_ops(struct device_node *dn, int cpu);
void __init cpu_read_bootcpu_ops(void);
#endif /* ifndef __ASM_CPU_OPS_H */

View File

@ -0,0 +1,13 @@
#ifndef __ASM_CPUIDLE_H
#define __ASM_CPUIDLE_H
#ifdef CONFIG_CPU_IDLE
extern int cpu_init_idle(unsigned int cpu);
#else
static inline int cpu_init_idle(unsigned int cpu)
{
return -EOPNOTSUPP;
}
#endif
#endif

View File

@ -48,11 +48,13 @@
/*
* #imm16 values used for BRK instruction generation
* Allowed values for kgbd are 0x400 - 0x7ff
* 0x100: for triggering a fault on purpose (reserved)
* 0x400: for dynamic BRK instruction
* 0x401: for compile time BRK instruction
*/
#define KGDB_DYN_DGB_BRK_IMM 0x400
#define KDBG_COMPILED_DBG_BRK_IMM 0x401
#define FAULT_BRK_IMM 0x100
#define KGDB_DYN_DBG_BRK_IMM 0x400
#define KGDB_COMPILED_DBG_BRK_IMM 0x401
/*
* BRK instruction encoding
@ -60,25 +62,31 @@
*/
#define AARCH64_BREAK_MON 0xd4200000
/*
* BRK instruction for provoking a fault on purpose
* Unlike kgdb, #imm16 value with unallocated handler is used for faulting.
*/
#define AARCH64_BREAK_FAULT (AARCH64_BREAK_MON | (FAULT_BRK_IMM << 5))
/*
* Extract byte from BRK instruction
*/
#define KGDB_DYN_DGB_BRK_INS_BYTE(x) \
#define KGDB_DYN_DBG_BRK_INS_BYTE(x) \
((((AARCH64_BREAK_MON) & 0xffe0001f) >> (x * 8)) & 0xff)
/*
* Extract byte from BRK #imm16
*/
#define KGBD_DYN_DGB_BRK_IMM_BYTE(x) \
(((((KGDB_DYN_DGB_BRK_IMM) & 0xffff) << 5) >> (x * 8)) & 0xff)
#define KGBD_DYN_DBG_BRK_IMM_BYTE(x) \
(((((KGDB_DYN_DBG_BRK_IMM) & 0xffff) << 5) >> (x * 8)) & 0xff)
#define KGDB_DYN_DGB_BRK_BYTE(x) \
(KGDB_DYN_DGB_BRK_INS_BYTE(x) | KGBD_DYN_DGB_BRK_IMM_BYTE(x))
#define KGDB_DYN_DBG_BRK_BYTE(x) \
(KGDB_DYN_DBG_BRK_INS_BYTE(x) | KGBD_DYN_DBG_BRK_IMM_BYTE(x))
#define KGDB_DYN_BRK_INS_BYTE0 KGDB_DYN_DGB_BRK_BYTE(0)
#define KGDB_DYN_BRK_INS_BYTE1 KGDB_DYN_DGB_BRK_BYTE(1)
#define KGDB_DYN_BRK_INS_BYTE2 KGDB_DYN_DGB_BRK_BYTE(2)
#define KGDB_DYN_BRK_INS_BYTE3 KGDB_DYN_DGB_BRK_BYTE(3)
#define KGDB_DYN_BRK_INS_BYTE0 KGDB_DYN_DBG_BRK_BYTE(0)
#define KGDB_DYN_BRK_INS_BYTE1 KGDB_DYN_DBG_BRK_BYTE(1)
#define KGDB_DYN_BRK_INS_BYTE2 KGDB_DYN_DBG_BRK_BYTE(2)
#define KGDB_DYN_BRK_INS_BYTE3 KGDB_DYN_DBG_BRK_BYTE(3)
#define CACHE_FLUSH_IS_SAFE 1

View File

@ -52,6 +52,13 @@ static inline void set_dma_ops(struct device *dev, struct dma_map_ops *ops)
dev->archdata.dma_ops = ops;
}
static inline int set_arch_dma_coherent_ops(struct device *dev)
{
set_dma_ops(dev, &coherent_swiotlb_dma_ops);
return 0;
}
#define set_arch_dma_coherent_ops set_arch_dma_coherent_ops
#include <asm-generic/dma-mapping-common.h>
static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)

View File

@ -2,6 +2,8 @@
* Copyright (C) 2013 Huawei Ltd.
* Author: Jiang Liu <liuj97@gmail.com>
*
* Copyright (C) 2014 Zi Shen Lim <zlim.lnx@gmail.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
@ -64,12 +66,155 @@ enum aarch64_insn_imm_type {
AARCH64_INSN_IMM_14,
AARCH64_INSN_IMM_12,
AARCH64_INSN_IMM_9,
AARCH64_INSN_IMM_7,
AARCH64_INSN_IMM_6,
AARCH64_INSN_IMM_S,
AARCH64_INSN_IMM_R,
AARCH64_INSN_IMM_MAX
};
enum aarch64_insn_register_type {
AARCH64_INSN_REGTYPE_RT,
AARCH64_INSN_REGTYPE_RN,
AARCH64_INSN_REGTYPE_RT2,
AARCH64_INSN_REGTYPE_RM,
AARCH64_INSN_REGTYPE_RD,
AARCH64_INSN_REGTYPE_RA,
};
enum aarch64_insn_register {
AARCH64_INSN_REG_0 = 0,
AARCH64_INSN_REG_1 = 1,
AARCH64_INSN_REG_2 = 2,
AARCH64_INSN_REG_3 = 3,
AARCH64_INSN_REG_4 = 4,
AARCH64_INSN_REG_5 = 5,
AARCH64_INSN_REG_6 = 6,
AARCH64_INSN_REG_7 = 7,
AARCH64_INSN_REG_8 = 8,
AARCH64_INSN_REG_9 = 9,
AARCH64_INSN_REG_10 = 10,
AARCH64_INSN_REG_11 = 11,
AARCH64_INSN_REG_12 = 12,
AARCH64_INSN_REG_13 = 13,
AARCH64_INSN_REG_14 = 14,
AARCH64_INSN_REG_15 = 15,
AARCH64_INSN_REG_16 = 16,
AARCH64_INSN_REG_17 = 17,
AARCH64_INSN_REG_18 = 18,
AARCH64_INSN_REG_19 = 19,
AARCH64_INSN_REG_20 = 20,
AARCH64_INSN_REG_21 = 21,
AARCH64_INSN_REG_22 = 22,
AARCH64_INSN_REG_23 = 23,
AARCH64_INSN_REG_24 = 24,
AARCH64_INSN_REG_25 = 25,
AARCH64_INSN_REG_26 = 26,
AARCH64_INSN_REG_27 = 27,
AARCH64_INSN_REG_28 = 28,
AARCH64_INSN_REG_29 = 29,
AARCH64_INSN_REG_FP = 29, /* Frame pointer */
AARCH64_INSN_REG_30 = 30,
AARCH64_INSN_REG_LR = 30, /* Link register */
AARCH64_INSN_REG_ZR = 31, /* Zero: as source register */
AARCH64_INSN_REG_SP = 31 /* Stack pointer: as load/store base reg */
};
enum aarch64_insn_variant {
AARCH64_INSN_VARIANT_32BIT,
AARCH64_INSN_VARIANT_64BIT
};
enum aarch64_insn_condition {
AARCH64_INSN_COND_EQ = 0x0, /* == */
AARCH64_INSN_COND_NE = 0x1, /* != */
AARCH64_INSN_COND_CS = 0x2, /* unsigned >= */
AARCH64_INSN_COND_CC = 0x3, /* unsigned < */
AARCH64_INSN_COND_MI = 0x4, /* < 0 */
AARCH64_INSN_COND_PL = 0x5, /* >= 0 */
AARCH64_INSN_COND_VS = 0x6, /* overflow */
AARCH64_INSN_COND_VC = 0x7, /* no overflow */
AARCH64_INSN_COND_HI = 0x8, /* unsigned > */
AARCH64_INSN_COND_LS = 0x9, /* unsigned <= */
AARCH64_INSN_COND_GE = 0xa, /* signed >= */
AARCH64_INSN_COND_LT = 0xb, /* signed < */
AARCH64_INSN_COND_GT = 0xc, /* signed > */
AARCH64_INSN_COND_LE = 0xd, /* signed <= */
AARCH64_INSN_COND_AL = 0xe, /* always */
};
enum aarch64_insn_branch_type {
AARCH64_INSN_BRANCH_NOLINK,
AARCH64_INSN_BRANCH_LINK,
AARCH64_INSN_BRANCH_RETURN,
AARCH64_INSN_BRANCH_COMP_ZERO,
AARCH64_INSN_BRANCH_COMP_NONZERO,
};
enum aarch64_insn_size_type {
AARCH64_INSN_SIZE_8,
AARCH64_INSN_SIZE_16,
AARCH64_INSN_SIZE_32,
AARCH64_INSN_SIZE_64,
};
enum aarch64_insn_ldst_type {
AARCH64_INSN_LDST_LOAD_REG_OFFSET,
AARCH64_INSN_LDST_STORE_REG_OFFSET,
AARCH64_INSN_LDST_LOAD_PAIR_PRE_INDEX,
AARCH64_INSN_LDST_STORE_PAIR_PRE_INDEX,
AARCH64_INSN_LDST_LOAD_PAIR_POST_INDEX,
AARCH64_INSN_LDST_STORE_PAIR_POST_INDEX,
};
enum aarch64_insn_adsb_type {
AARCH64_INSN_ADSB_ADD,
AARCH64_INSN_ADSB_SUB,
AARCH64_INSN_ADSB_ADD_SETFLAGS,
AARCH64_INSN_ADSB_SUB_SETFLAGS
};
enum aarch64_insn_movewide_type {
AARCH64_INSN_MOVEWIDE_ZERO,
AARCH64_INSN_MOVEWIDE_KEEP,
AARCH64_INSN_MOVEWIDE_INVERSE
};
enum aarch64_insn_bitfield_type {
AARCH64_INSN_BITFIELD_MOVE,
AARCH64_INSN_BITFIELD_MOVE_UNSIGNED,
AARCH64_INSN_BITFIELD_MOVE_SIGNED
};
enum aarch64_insn_data1_type {
AARCH64_INSN_DATA1_REVERSE_16,
AARCH64_INSN_DATA1_REVERSE_32,
AARCH64_INSN_DATA1_REVERSE_64,
};
enum aarch64_insn_data2_type {
AARCH64_INSN_DATA2_UDIV,
AARCH64_INSN_DATA2_SDIV,
AARCH64_INSN_DATA2_LSLV,
AARCH64_INSN_DATA2_LSRV,
AARCH64_INSN_DATA2_ASRV,
AARCH64_INSN_DATA2_RORV,
};
enum aarch64_insn_data3_type {
AARCH64_INSN_DATA3_MADD,
AARCH64_INSN_DATA3_MSUB,
};
enum aarch64_insn_logic_type {
AARCH64_INSN_LOGIC_AND,
AARCH64_INSN_LOGIC_BIC,
AARCH64_INSN_LOGIC_ORR,
AARCH64_INSN_LOGIC_ORN,
AARCH64_INSN_LOGIC_EOR,
AARCH64_INSN_LOGIC_EON,
AARCH64_INSN_LOGIC_AND_SETFLAGS,
AARCH64_INSN_LOGIC_BIC_SETFLAGS
};
#define __AARCH64_INSN_FUNCS(abbr, mask, val) \
@ -78,13 +223,58 @@ static __always_inline bool aarch64_insn_is_##abbr(u32 code) \
static __always_inline u32 aarch64_insn_get_##abbr##_value(void) \
{ return (val); }
__AARCH64_INSN_FUNCS(str_reg, 0x3FE0EC00, 0x38206800)
__AARCH64_INSN_FUNCS(ldr_reg, 0x3FE0EC00, 0x38606800)
__AARCH64_INSN_FUNCS(stp_post, 0x7FC00000, 0x28800000)
__AARCH64_INSN_FUNCS(ldp_post, 0x7FC00000, 0x28C00000)
__AARCH64_INSN_FUNCS(stp_pre, 0x7FC00000, 0x29800000)
__AARCH64_INSN_FUNCS(ldp_pre, 0x7FC00000, 0x29C00000)
__AARCH64_INSN_FUNCS(add_imm, 0x7F000000, 0x11000000)
__AARCH64_INSN_FUNCS(adds_imm, 0x7F000000, 0x31000000)
__AARCH64_INSN_FUNCS(sub_imm, 0x7F000000, 0x51000000)
__AARCH64_INSN_FUNCS(subs_imm, 0x7F000000, 0x71000000)
__AARCH64_INSN_FUNCS(movn, 0x7F800000, 0x12800000)
__AARCH64_INSN_FUNCS(sbfm, 0x7F800000, 0x13000000)
__AARCH64_INSN_FUNCS(bfm, 0x7F800000, 0x33000000)
__AARCH64_INSN_FUNCS(movz, 0x7F800000, 0x52800000)
__AARCH64_INSN_FUNCS(ubfm, 0x7F800000, 0x53000000)
__AARCH64_INSN_FUNCS(movk, 0x7F800000, 0x72800000)
__AARCH64_INSN_FUNCS(add, 0x7F200000, 0x0B000000)
__AARCH64_INSN_FUNCS(adds, 0x7F200000, 0x2B000000)
__AARCH64_INSN_FUNCS(sub, 0x7F200000, 0x4B000000)
__AARCH64_INSN_FUNCS(subs, 0x7F200000, 0x6B000000)
__AARCH64_INSN_FUNCS(madd, 0x7FE08000, 0x1B000000)
__AARCH64_INSN_FUNCS(msub, 0x7FE08000, 0x1B008000)
__AARCH64_INSN_FUNCS(udiv, 0x7FE0FC00, 0x1AC00800)
__AARCH64_INSN_FUNCS(sdiv, 0x7FE0FC00, 0x1AC00C00)
__AARCH64_INSN_FUNCS(lslv, 0x7FE0FC00, 0x1AC02000)
__AARCH64_INSN_FUNCS(lsrv, 0x7FE0FC00, 0x1AC02400)
__AARCH64_INSN_FUNCS(asrv, 0x7FE0FC00, 0x1AC02800)
__AARCH64_INSN_FUNCS(rorv, 0x7FE0FC00, 0x1AC02C00)
__AARCH64_INSN_FUNCS(rev16, 0x7FFFFC00, 0x5AC00400)
__AARCH64_INSN_FUNCS(rev32, 0x7FFFFC00, 0x5AC00800)
__AARCH64_INSN_FUNCS(rev64, 0x7FFFFC00, 0x5AC00C00)
__AARCH64_INSN_FUNCS(and, 0x7F200000, 0x0A000000)
__AARCH64_INSN_FUNCS(bic, 0x7F200000, 0x0A200000)
__AARCH64_INSN_FUNCS(orr, 0x7F200000, 0x2A000000)
__AARCH64_INSN_FUNCS(orn, 0x7F200000, 0x2A200000)
__AARCH64_INSN_FUNCS(eor, 0x7F200000, 0x4A000000)
__AARCH64_INSN_FUNCS(eon, 0x7F200000, 0x4A200000)
__AARCH64_INSN_FUNCS(ands, 0x7F200000, 0x6A000000)
__AARCH64_INSN_FUNCS(bics, 0x7F200000, 0x6A200000)
__AARCH64_INSN_FUNCS(b, 0xFC000000, 0x14000000)
__AARCH64_INSN_FUNCS(bl, 0xFC000000, 0x94000000)
__AARCH64_INSN_FUNCS(cbz, 0xFE000000, 0x34000000)
__AARCH64_INSN_FUNCS(cbnz, 0xFE000000, 0x35000000)
__AARCH64_INSN_FUNCS(bcond, 0xFF000010, 0x54000000)
__AARCH64_INSN_FUNCS(svc, 0xFFE0001F, 0xD4000001)
__AARCH64_INSN_FUNCS(hvc, 0xFFE0001F, 0xD4000002)
__AARCH64_INSN_FUNCS(smc, 0xFFE0001F, 0xD4000003)
__AARCH64_INSN_FUNCS(brk, 0xFFE0001F, 0xD4200000)
__AARCH64_INSN_FUNCS(hint, 0xFFFFF01F, 0xD503201F)
__AARCH64_INSN_FUNCS(br, 0xFFFFFC1F, 0xD61F0000)
__AARCH64_INSN_FUNCS(blr, 0xFFFFFC1F, 0xD63F0000)
__AARCH64_INSN_FUNCS(ret, 0xFFFFFC1F, 0xD65F0000)
#undef __AARCH64_INSN_FUNCS
@ -97,8 +287,67 @@ u32 aarch64_insn_encode_immediate(enum aarch64_insn_imm_type type,
u32 insn, u64 imm);
u32 aarch64_insn_gen_branch_imm(unsigned long pc, unsigned long addr,
enum aarch64_insn_branch_type type);
u32 aarch64_insn_gen_comp_branch_imm(unsigned long pc, unsigned long addr,
enum aarch64_insn_register reg,
enum aarch64_insn_variant variant,
enum aarch64_insn_branch_type type);
u32 aarch64_insn_gen_cond_branch_imm(unsigned long pc, unsigned long addr,
enum aarch64_insn_condition cond);
u32 aarch64_insn_gen_hint(enum aarch64_insn_hint_op op);
u32 aarch64_insn_gen_nop(void);
u32 aarch64_insn_gen_branch_reg(enum aarch64_insn_register reg,
enum aarch64_insn_branch_type type);
u32 aarch64_insn_gen_load_store_reg(enum aarch64_insn_register reg,
enum aarch64_insn_register base,
enum aarch64_insn_register offset,
enum aarch64_insn_size_type size,
enum aarch64_insn_ldst_type type);
u32 aarch64_insn_gen_load_store_pair(enum aarch64_insn_register reg1,
enum aarch64_insn_register reg2,
enum aarch64_insn_register base,
int offset,
enum aarch64_insn_variant variant,
enum aarch64_insn_ldst_type type);
u32 aarch64_insn_gen_add_sub_imm(enum aarch64_insn_register dst,
enum aarch64_insn_register src,
int imm, enum aarch64_insn_variant variant,
enum aarch64_insn_adsb_type type);
u32 aarch64_insn_gen_bitfield(enum aarch64_insn_register dst,
enum aarch64_insn_register src,
int immr, int imms,
enum aarch64_insn_variant variant,
enum aarch64_insn_bitfield_type type);
u32 aarch64_insn_gen_movewide(enum aarch64_insn_register dst,
int imm, int shift,
enum aarch64_insn_variant variant,
enum aarch64_insn_movewide_type type);
u32 aarch64_insn_gen_add_sub_shifted_reg(enum aarch64_insn_register dst,
enum aarch64_insn_register src,
enum aarch64_insn_register reg,
int shift,
enum aarch64_insn_variant variant,
enum aarch64_insn_adsb_type type);
u32 aarch64_insn_gen_data1(enum aarch64_insn_register dst,
enum aarch64_insn_register src,
enum aarch64_insn_variant variant,
enum aarch64_insn_data1_type type);
u32 aarch64_insn_gen_data2(enum aarch64_insn_register dst,
enum aarch64_insn_register src,
enum aarch64_insn_register reg,
enum aarch64_insn_variant variant,
enum aarch64_insn_data2_type type);
u32 aarch64_insn_gen_data3(enum aarch64_insn_register dst,
enum aarch64_insn_register src,
enum aarch64_insn_register reg1,
enum aarch64_insn_register reg2,
enum aarch64_insn_variant variant,
enum aarch64_insn_data3_type type);
u32 aarch64_insn_gen_logical_shifted_reg(enum aarch64_insn_register dst,
enum aarch64_insn_register src,
enum aarch64_insn_register reg,
int shift,
enum aarch64_insn_variant variant,
enum aarch64_insn_logic_type type);
bool aarch64_insn_hotpatch_safe(u32 old_insn, u32 new_insn);

View File

@ -243,7 +243,7 @@ extern void __iomem *ioremap_cache(phys_addr_t phys_addr, size_t size);
* (PHYS_OFFSET and PHYS_MASK taken into account).
*/
#define ARCH_HAS_VALID_PHYS_ADDR_RANGE
extern int valid_phys_addr_range(unsigned long addr, size_t size);
extern int valid_phys_addr_range(phys_addr_t addr, size_t size);
extern int valid_mmap_phys_addr_range(unsigned long pfn, size_t size);
extern int devmem_is_allowed(unsigned long pfn);

View File

@ -29,7 +29,7 @@
static inline void arch_kgdb_breakpoint(void)
{
asm ("brk %0" : : "I" (KDBG_COMPILED_DBG_BRK_IMM));
asm ("brk %0" : : "I" (KGDB_COMPILED_DBG_BRK_IMM));
}
extern void kgdb_handle_bus_error(void);

View File

@ -26,13 +26,13 @@ static inline void set_my_cpu_offset(unsigned long off)
static inline unsigned long __my_cpu_offset(void)
{
unsigned long off;
register unsigned long *sp asm ("sp");
/*
* We want to allow caching the value, so avoid using volatile and
* instead use a fake stack read to hazard against barrier().
*/
asm("mrs %0, tpidr_el1" : "=r" (off) : "Q" (*sp));
asm("mrs %0, tpidr_el1" : "=r" (off) :
"Q" (*(const unsigned long *)current_stack_pointer));
return off;
}

View File

@ -149,46 +149,51 @@ extern struct page *empty_zero_page;
#define pte_valid_not_user(pte) \
((pte_val(pte) & (PTE_VALID | PTE_USER)) == PTE_VALID)
static inline pte_t clear_pte_bit(pte_t pte, pgprot_t prot)
{
pte_val(pte) &= ~pgprot_val(prot);
return pte;
}
static inline pte_t set_pte_bit(pte_t pte, pgprot_t prot)
{
pte_val(pte) |= pgprot_val(prot);
return pte;
}
static inline pte_t pte_wrprotect(pte_t pte)
{
pte_val(pte) &= ~PTE_WRITE;
return pte;
return clear_pte_bit(pte, __pgprot(PTE_WRITE));
}
static inline pte_t pte_mkwrite(pte_t pte)
{
pte_val(pte) |= PTE_WRITE;
return pte;
return set_pte_bit(pte, __pgprot(PTE_WRITE));
}
static inline pte_t pte_mkclean(pte_t pte)
{
pte_val(pte) &= ~PTE_DIRTY;
return pte;
return clear_pte_bit(pte, __pgprot(PTE_DIRTY));
}
static inline pte_t pte_mkdirty(pte_t pte)
{
pte_val(pte) |= PTE_DIRTY;
return pte;
return set_pte_bit(pte, __pgprot(PTE_DIRTY));
}
static inline pte_t pte_mkold(pte_t pte)
{
pte_val(pte) &= ~PTE_AF;
return pte;
return clear_pte_bit(pte, __pgprot(PTE_AF));
}
static inline pte_t pte_mkyoung(pte_t pte)
{
pte_val(pte) |= PTE_AF;
return pte;
return set_pte_bit(pte, __pgprot(PTE_AF));
}
static inline pte_t pte_mkspecial(pte_t pte)
{
pte_val(pte) |= PTE_SPECIAL;
return pte;
return set_pte_bit(pte, __pgprot(PTE_SPECIAL));
}
static inline void set_pte(pte_t *ptep, pte_t pte)

View File

@ -32,6 +32,8 @@ extern void cpu_cache_off(void);
extern void cpu_do_idle(void);
extern void cpu_do_switch_mm(unsigned long pgd_phys, struct mm_struct *mm);
extern void cpu_reset(unsigned long addr) __attribute__((noreturn));
void cpu_soft_restart(phys_addr_t cpu_reset,
unsigned long addr) __attribute__((noreturn));
extern void cpu_do_suspend(struct cpu_suspend_ctx *ptr);
extern u64 cpu_do_resume(phys_addr_t ptr, u64 idmap_ttbr);

View File

@ -21,6 +21,7 @@ struct sleep_save_sp {
phys_addr_t save_ptr_stash_phys;
};
extern int __cpu_suspend(unsigned long arg, int (*fn)(unsigned long));
extern void cpu_resume(void);
extern int cpu_suspend(unsigned long);

View File

@ -68,6 +68,11 @@ struct thread_info {
#define init_thread_info (init_thread_union.thread_info)
#define init_stack (init_thread_union.stack)
/*
* how to get the current stack pointer from C
*/
register unsigned long current_stack_pointer asm ("sp");
/*
* how to get the thread information struct from C
*/
@ -75,8 +80,8 @@ static inline struct thread_info *current_thread_info(void) __attribute_const__;
static inline struct thread_info *current_thread_info(void)
{
register unsigned long sp asm ("sp");
return (struct thread_info *)(sp & ~(THREAD_SIZE - 1));
return (struct thread_info *)
(current_stack_pointer & ~(THREAD_SIZE - 1));
}
#define thread_saved_pc(tsk) \

View File

@ -26,6 +26,7 @@ arm64-obj-$(CONFIG_PERF_EVENTS) += perf_regs.o
arm64-obj-$(CONFIG_HW_PERF_EVENTS) += perf_event.o
arm64-obj-$(CONFIG_HAVE_HW_BREAKPOINT) += hw_breakpoint.o
arm64-obj-$(CONFIG_ARM64_CPU_SUSPEND) += sleep.o suspend.o
arm64-obj-$(CONFIG_CPU_IDLE) += cpuidle.o
arm64-obj-$(CONFIG_JUMP_LABEL) += jump_label.o
arm64-obj-$(CONFIG_KGDB) += kgdb.o
arm64-obj-$(CONFIG_EFI) += efi.o efi-stub.o efi-entry.o

View File

@ -0,0 +1,31 @@
/*
* ARM64 CPU idle arch support
*
* Copyright (C) 2014 ARM Ltd.
* Author: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/of.h>
#include <linux/of_device.h>
#include <asm/cpuidle.h>
#include <asm/cpu_ops.h>
int cpu_init_idle(unsigned int cpu)
{
int ret = -EOPNOTSUPP;
struct device_node *cpu_node = of_cpu_device_node_get(cpu);
if (!cpu_node)
return -ENODEV;
if (cpu_ops[cpu] && cpu_ops[cpu]->cpu_init_idle)
ret = cpu_ops[cpu]->cpu_init_idle(cpu_node, cpu);
of_node_put(cpu_node);
return ret;
}

View File

@ -20,8 +20,10 @@
#include <asm/cputype.h>
#include <linux/bitops.h>
#include <linux/bug.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/preempt.h>
#include <linux/printk.h>
#include <linux/smp.h>
@ -47,8 +49,18 @@ static void cpuinfo_detect_icache_policy(struct cpuinfo_arm64 *info)
unsigned int cpu = smp_processor_id();
u32 l1ip = CTR_L1IP(info->reg_ctr);
if (l1ip != ICACHE_POLICY_PIPT)
set_bit(ICACHEF_ALIASING, &__icache_flags);
if (l1ip != ICACHE_POLICY_PIPT) {
/*
* VIPT caches are non-aliasing if the VA always equals the PA
* in all bit positions that are covered by the index. This is
* the case if the size of a way (# of sets * line size) does
* not exceed PAGE_SIZE.
*/
u32 waysize = icache_get_numsets() * icache_get_linesize();
if (l1ip != ICACHE_POLICY_VIPT || waysize > PAGE_SIZE)
set_bit(ICACHEF_ALIASING, &__icache_flags);
}
if (l1ip == ICACHE_POLICY_AIVIVT)
set_bit(ICACHEF_AIVIVT, &__icache_flags);
@ -190,3 +202,15 @@ void __init cpuinfo_store_boot_cpu(void)
boot_cpu_data = *info;
}
u64 __attribute_const__ icache_get_ccsidr(void)
{
u64 ccsidr;
WARN_ON(preemptible());
/* Select L1 I-cache and read its size ID register */
asm("msr csselr_el1, %1; isb; mrs %0, ccsidr_el1"
: "=r"(ccsidr) : "r"(1L));
return ccsidr;
}

View File

@ -28,20 +28,16 @@ efi_status_t handle_kernel_image(efi_system_table_t *sys_table,
kernel_size = _edata - _text;
if (*image_addr != (dram_base + TEXT_OFFSET)) {
kernel_memsize = kernel_size + (_end - _edata);
status = efi_relocate_kernel(sys_table, image_addr,
kernel_size, kernel_memsize,
dram_base + TEXT_OFFSET,
PAGE_SIZE);
status = efi_low_alloc(sys_table, kernel_memsize + TEXT_OFFSET,
SZ_2M, reserve_addr);
if (status != EFI_SUCCESS) {
pr_efi_err(sys_table, "Failed to relocate kernel\n");
return status;
}
if (*image_addr != (dram_base + TEXT_OFFSET)) {
pr_efi_err(sys_table, "Failed to alloc kernel memory\n");
efi_free(sys_table, kernel_memsize, *image_addr);
return EFI_LOAD_ERROR;
}
*image_size = kernel_memsize;
memcpy((void *)*reserve_addr + TEXT_OFFSET, (void *)*image_addr,
kernel_size);
*image_addr = *reserve_addr + TEXT_OFFSET;
*reserve_size = kernel_memsize + TEXT_OFFSET;
}

View File

@ -324,7 +324,6 @@ el1_dbg:
mrs x0, far_el1
mov x2, sp // struct pt_regs
bl do_debug_exception
enable_dbg
kernel_exit 1
el1_inv:
// TODO: add support for undefined instructions in kernel mode

View File

@ -58,7 +58,8 @@ int ftrace_update_ftrace_func(ftrace_func_t func)
u32 new;
pc = (unsigned long)&ftrace_call;
new = aarch64_insn_gen_branch_imm(pc, (unsigned long)func, true);
new = aarch64_insn_gen_branch_imm(pc, (unsigned long)func,
AARCH64_INSN_BRANCH_LINK);
return ftrace_modify_code(pc, 0, new, false);
}
@ -72,7 +73,7 @@ int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
u32 old, new;
old = aarch64_insn_gen_nop();
new = aarch64_insn_gen_branch_imm(pc, addr, true);
new = aarch64_insn_gen_branch_imm(pc, addr, AARCH64_INSN_BRANCH_LINK);
return ftrace_modify_code(pc, old, new, true);
}
@ -86,7 +87,7 @@ int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec,
unsigned long pc = rec->ip;
u32 old, new;
old = aarch64_insn_gen_branch_imm(pc, addr, true);
old = aarch64_insn_gen_branch_imm(pc, addr, AARCH64_INSN_BRANCH_LINK);
new = aarch64_insn_gen_nop();
return ftrace_modify_code(pc, old, new, true);
@ -154,7 +155,8 @@ static int ftrace_modify_graph_caller(bool enable)
u32 branch, nop;
branch = aarch64_insn_gen_branch_imm(pc,
(unsigned long)ftrace_graph_caller, false);
(unsigned long)ftrace_graph_caller,
AARCH64_INSN_BRANCH_LINK);
nop = aarch64_insn_gen_nop();
if (enable)

View File

@ -151,7 +151,7 @@ optional_header:
.short 0x20b // PE32+ format
.byte 0x02 // MajorLinkerVersion
.byte 0x14 // MinorLinkerVersion
.long _edata - stext // SizeOfCode
.long _end - stext // SizeOfCode
.long 0 // SizeOfInitializedData
.long 0 // SizeOfUninitializedData
.long efi_stub_entry - efi_head // AddressOfEntryPoint
@ -169,7 +169,7 @@ extra_header_fields:
.short 0 // MinorSubsystemVersion
.long 0 // Win32VersionValue
.long _edata - efi_head // SizeOfImage
.long _end - efi_head // SizeOfImage
// Everything before the kernel image is considered part of the header
.long stext - efi_head // SizeOfHeaders
@ -216,7 +216,7 @@ section_table:
.byte 0
.byte 0
.byte 0 // end of 0 padding of section name
.long _edata - stext // VirtualSize
.long _end - stext // VirtualSize
.long stext - efi_head // VirtualAddress
.long _edata - stext // SizeOfRawData
.long stext - efi_head // PointerToRawData

View File

@ -2,6 +2,8 @@
* Copyright (C) 2013 Huawei Ltd.
* Author: Jiang Liu <liuj97@gmail.com>
*
* Copyright (C) 2014 Zi Shen Lim <zlim.lnx@gmail.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
@ -20,9 +22,14 @@
#include <linux/smp.h>
#include <linux/stop_machine.h>
#include <linux/uaccess.h>
#include <asm/cacheflush.h>
#include <asm/debug-monitors.h>
#include <asm/insn.h>
#define AARCH64_INSN_SF_BIT BIT(31)
#define AARCH64_INSN_N_BIT BIT(22)
static int aarch64_insn_encoding_class[] = {
AARCH64_INSN_CLS_UNKNOWN,
AARCH64_INSN_CLS_UNKNOWN,
@ -251,6 +258,19 @@ u32 __kprobes aarch64_insn_encode_immediate(enum aarch64_insn_imm_type type,
mask = BIT(9) - 1;
shift = 12;
break;
case AARCH64_INSN_IMM_7:
mask = BIT(7) - 1;
shift = 15;
break;
case AARCH64_INSN_IMM_6:
case AARCH64_INSN_IMM_S:
mask = BIT(6) - 1;
shift = 10;
break;
case AARCH64_INSN_IMM_R:
mask = BIT(6) - 1;
shift = 16;
break;
default:
pr_err("aarch64_insn_encode_immediate: unknown immediate encoding %d\n",
type);
@ -264,10 +284,76 @@ u32 __kprobes aarch64_insn_encode_immediate(enum aarch64_insn_imm_type type,
return insn;
}
u32 __kprobes aarch64_insn_gen_branch_imm(unsigned long pc, unsigned long addr,
enum aarch64_insn_branch_type type)
static u32 aarch64_insn_encode_register(enum aarch64_insn_register_type type,
u32 insn,
enum aarch64_insn_register reg)
{
int shift;
if (reg < AARCH64_INSN_REG_0 || reg > AARCH64_INSN_REG_SP) {
pr_err("%s: unknown register encoding %d\n", __func__, reg);
return 0;
}
switch (type) {
case AARCH64_INSN_REGTYPE_RT:
case AARCH64_INSN_REGTYPE_RD:
shift = 0;
break;
case AARCH64_INSN_REGTYPE_RN:
shift = 5;
break;
case AARCH64_INSN_REGTYPE_RT2:
case AARCH64_INSN_REGTYPE_RA:
shift = 10;
break;
case AARCH64_INSN_REGTYPE_RM:
shift = 16;
break;
default:
pr_err("%s: unknown register type encoding %d\n", __func__,
type);
return 0;
}
insn &= ~(GENMASK(4, 0) << shift);
insn |= reg << shift;
return insn;
}
static u32 aarch64_insn_encode_ldst_size(enum aarch64_insn_size_type type,
u32 insn)
{
u32 size;
switch (type) {
case AARCH64_INSN_SIZE_8:
size = 0;
break;
case AARCH64_INSN_SIZE_16:
size = 1;
break;
case AARCH64_INSN_SIZE_32:
size = 2;
break;
case AARCH64_INSN_SIZE_64:
size = 3;
break;
default:
pr_err("%s: unknown size encoding %d\n", __func__, type);
return 0;
}
insn &= ~GENMASK(31, 30);
insn |= size << 30;
return insn;
}
static inline long branch_imm_common(unsigned long pc, unsigned long addr,
long range)
{
u32 insn;
long offset;
/*
@ -276,23 +362,97 @@ u32 __kprobes aarch64_insn_gen_branch_imm(unsigned long pc, unsigned long addr,
*/
BUG_ON((pc & 0x3) || (addr & 0x3));
offset = ((long)addr - (long)pc);
BUG_ON(offset < -range || offset >= range);
return offset;
}
u32 __kprobes aarch64_insn_gen_branch_imm(unsigned long pc, unsigned long addr,
enum aarch64_insn_branch_type type)
{
u32 insn;
long offset;
/*
* B/BL support [-128M, 128M) offset
* ARM64 virtual address arrangement guarantees all kernel and module
* texts are within +/-128M.
*/
offset = ((long)addr - (long)pc);
BUG_ON(offset < -SZ_128M || offset >= SZ_128M);
offset = branch_imm_common(pc, addr, SZ_128M);
if (type == AARCH64_INSN_BRANCH_LINK)
switch (type) {
case AARCH64_INSN_BRANCH_LINK:
insn = aarch64_insn_get_bl_value();
else
break;
case AARCH64_INSN_BRANCH_NOLINK:
insn = aarch64_insn_get_b_value();
break;
default:
BUG_ON(1);
return AARCH64_BREAK_FAULT;
}
return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_26, insn,
offset >> 2);
}
u32 aarch64_insn_gen_comp_branch_imm(unsigned long pc, unsigned long addr,
enum aarch64_insn_register reg,
enum aarch64_insn_variant variant,
enum aarch64_insn_branch_type type)
{
u32 insn;
long offset;
offset = branch_imm_common(pc, addr, SZ_1M);
switch (type) {
case AARCH64_INSN_BRANCH_COMP_ZERO:
insn = aarch64_insn_get_cbz_value();
break;
case AARCH64_INSN_BRANCH_COMP_NONZERO:
insn = aarch64_insn_get_cbnz_value();
break;
default:
BUG_ON(1);
return AARCH64_BREAK_FAULT;
}
switch (variant) {
case AARCH64_INSN_VARIANT_32BIT:
break;
case AARCH64_INSN_VARIANT_64BIT:
insn |= AARCH64_INSN_SF_BIT;
break;
default:
BUG_ON(1);
return AARCH64_BREAK_FAULT;
}
insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT, insn, reg);
return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_19, insn,
offset >> 2);
}
u32 aarch64_insn_gen_cond_branch_imm(unsigned long pc, unsigned long addr,
enum aarch64_insn_condition cond)
{
u32 insn;
long offset;
offset = branch_imm_common(pc, addr, SZ_1M);
insn = aarch64_insn_get_bcond_value();
BUG_ON(cond < AARCH64_INSN_COND_EQ || cond > AARCH64_INSN_COND_AL);
insn |= cond;
return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_19, insn,
offset >> 2);
}
u32 __kprobes aarch64_insn_gen_hint(enum aarch64_insn_hint_op op)
{
return aarch64_insn_get_hint_value() | op;
@ -302,3 +462,500 @@ u32 __kprobes aarch64_insn_gen_nop(void)
{
return aarch64_insn_gen_hint(AARCH64_INSN_HINT_NOP);
}
u32 aarch64_insn_gen_branch_reg(enum aarch64_insn_register reg,
enum aarch64_insn_branch_type type)
{
u32 insn;
switch (type) {
case AARCH64_INSN_BRANCH_NOLINK:
insn = aarch64_insn_get_br_value();
break;
case AARCH64_INSN_BRANCH_LINK:
insn = aarch64_insn_get_blr_value();
break;
case AARCH64_INSN_BRANCH_RETURN:
insn = aarch64_insn_get_ret_value();
break;
default:
BUG_ON(1);
return AARCH64_BREAK_FAULT;
}
return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, reg);
}
u32 aarch64_insn_gen_load_store_reg(enum aarch64_insn_register reg,
enum aarch64_insn_register base,
enum aarch64_insn_register offset,
enum aarch64_insn_size_type size,
enum aarch64_insn_ldst_type type)
{
u32 insn;
switch (type) {
case AARCH64_INSN_LDST_LOAD_REG_OFFSET:
insn = aarch64_insn_get_ldr_reg_value();
break;
case AARCH64_INSN_LDST_STORE_REG_OFFSET:
insn = aarch64_insn_get_str_reg_value();
break;
default:
BUG_ON(1);
return AARCH64_BREAK_FAULT;
}
insn = aarch64_insn_encode_ldst_size(size, insn);
insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT, insn, reg);
insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn,
base);
return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RM, insn,
offset);
}
u32 aarch64_insn_gen_load_store_pair(enum aarch64_insn_register reg1,
enum aarch64_insn_register reg2,
enum aarch64_insn_register base,
int offset,
enum aarch64_insn_variant variant,
enum aarch64_insn_ldst_type type)
{
u32 insn;
int shift;
switch (type) {
case AARCH64_INSN_LDST_LOAD_PAIR_PRE_INDEX:
insn = aarch64_insn_get_ldp_pre_value();
break;
case AARCH64_INSN_LDST_STORE_PAIR_PRE_INDEX:
insn = aarch64_insn_get_stp_pre_value();
break;
case AARCH64_INSN_LDST_LOAD_PAIR_POST_INDEX:
insn = aarch64_insn_get_ldp_post_value();
break;
case AARCH64_INSN_LDST_STORE_PAIR_POST_INDEX:
insn = aarch64_insn_get_stp_post_value();
break;
default:
BUG_ON(1);
return AARCH64_BREAK_FAULT;
}
switch (variant) {
case AARCH64_INSN_VARIANT_32BIT:
/* offset must be multiples of 4 in the range [-256, 252] */
BUG_ON(offset & 0x3);
BUG_ON(offset < -256 || offset > 252);
shift = 2;
break;
case AARCH64_INSN_VARIANT_64BIT:
/* offset must be multiples of 8 in the range [-512, 504] */
BUG_ON(offset & 0x7);
BUG_ON(offset < -512 || offset > 504);
shift = 3;
insn |= AARCH64_INSN_SF_BIT;
break;
default:
BUG_ON(1);
return AARCH64_BREAK_FAULT;
}
insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT, insn,
reg1);
insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT2, insn,
reg2);
insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn,
base);
return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_7, insn,
offset >> shift);
}
u32 aarch64_insn_gen_add_sub_imm(enum aarch64_insn_register dst,
enum aarch64_insn_register src,
int imm, enum aarch64_insn_variant variant,
enum aarch64_insn_adsb_type type)
{
u32 insn;
switch (type) {
case AARCH64_INSN_ADSB_ADD:
insn = aarch64_insn_get_add_imm_value();
break;
case AARCH64_INSN_ADSB_SUB:
insn = aarch64_insn_get_sub_imm_value();
break;
case AARCH64_INSN_ADSB_ADD_SETFLAGS:
insn = aarch64_insn_get_adds_imm_value();
break;
case AARCH64_INSN_ADSB_SUB_SETFLAGS:
insn = aarch64_insn_get_subs_imm_value();
break;
default:
BUG_ON(1);
return AARCH64_BREAK_FAULT;
}
switch (variant) {
case AARCH64_INSN_VARIANT_32BIT:
break;
case AARCH64_INSN_VARIANT_64BIT:
insn |= AARCH64_INSN_SF_BIT;
break;
default:
BUG_ON(1);
return AARCH64_BREAK_FAULT;
}
BUG_ON(imm & ~(SZ_4K - 1));
insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, src);
return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_12, insn, imm);
}
u32 aarch64_insn_gen_bitfield(enum aarch64_insn_register dst,
enum aarch64_insn_register src,
int immr, int imms,
enum aarch64_insn_variant variant,
enum aarch64_insn_bitfield_type type)
{
u32 insn;
u32 mask;
switch (type) {
case AARCH64_INSN_BITFIELD_MOVE:
insn = aarch64_insn_get_bfm_value();
break;
case AARCH64_INSN_BITFIELD_MOVE_UNSIGNED:
insn = aarch64_insn_get_ubfm_value();
break;
case AARCH64_INSN_BITFIELD_MOVE_SIGNED:
insn = aarch64_insn_get_sbfm_value();
break;
default:
BUG_ON(1);
return AARCH64_BREAK_FAULT;
}
switch (variant) {
case AARCH64_INSN_VARIANT_32BIT:
mask = GENMASK(4, 0);
break;
case AARCH64_INSN_VARIANT_64BIT:
insn |= AARCH64_INSN_SF_BIT | AARCH64_INSN_N_BIT;
mask = GENMASK(5, 0);
break;
default:
BUG_ON(1);
return AARCH64_BREAK_FAULT;
}
BUG_ON(immr & ~mask);
BUG_ON(imms & ~mask);
insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, src);
insn = aarch64_insn_encode_immediate(AARCH64_INSN_IMM_R, insn, immr);
return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_S, insn, imms);
}
u32 aarch64_insn_gen_movewide(enum aarch64_insn_register dst,
int imm, int shift,
enum aarch64_insn_variant variant,
enum aarch64_insn_movewide_type type)
{
u32 insn;
switch (type) {
case AARCH64_INSN_MOVEWIDE_ZERO:
insn = aarch64_insn_get_movz_value();
break;
case AARCH64_INSN_MOVEWIDE_KEEP:
insn = aarch64_insn_get_movk_value();
break;
case AARCH64_INSN_MOVEWIDE_INVERSE:
insn = aarch64_insn_get_movn_value();
break;
default:
BUG_ON(1);
return AARCH64_BREAK_FAULT;
}
BUG_ON(imm & ~(SZ_64K - 1));
switch (variant) {
case AARCH64_INSN_VARIANT_32BIT:
BUG_ON(shift != 0 && shift != 16);
break;
case AARCH64_INSN_VARIANT_64BIT:
insn |= AARCH64_INSN_SF_BIT;
BUG_ON(shift != 0 && shift != 16 && shift != 32 &&
shift != 48);
break;
default:
BUG_ON(1);
return AARCH64_BREAK_FAULT;
}
insn |= (shift >> 4) << 21;
insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_16, insn, imm);
}
u32 aarch64_insn_gen_add_sub_shifted_reg(enum aarch64_insn_register dst,
enum aarch64_insn_register src,
enum aarch64_insn_register reg,
int shift,
enum aarch64_insn_variant variant,
enum aarch64_insn_adsb_type type)
{
u32 insn;
switch (type) {
case AARCH64_INSN_ADSB_ADD:
insn = aarch64_insn_get_add_value();
break;
case AARCH64_INSN_ADSB_SUB:
insn = aarch64_insn_get_sub_value();
break;
case AARCH64_INSN_ADSB_ADD_SETFLAGS:
insn = aarch64_insn_get_adds_value();
break;
case AARCH64_INSN_ADSB_SUB_SETFLAGS:
insn = aarch64_insn_get_subs_value();
break;
default:
BUG_ON(1);
return AARCH64_BREAK_FAULT;
}
switch (variant) {
case AARCH64_INSN_VARIANT_32BIT:
BUG_ON(shift & ~(SZ_32 - 1));
break;
case AARCH64_INSN_VARIANT_64BIT:
insn |= AARCH64_INSN_SF_BIT;
BUG_ON(shift & ~(SZ_64 - 1));
break;
default:
BUG_ON(1);
return AARCH64_BREAK_FAULT;
}
insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, src);
insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RM, insn, reg);
return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_6, insn, shift);
}
u32 aarch64_insn_gen_data1(enum aarch64_insn_register dst,
enum aarch64_insn_register src,
enum aarch64_insn_variant variant,
enum aarch64_insn_data1_type type)
{
u32 insn;
switch (type) {
case AARCH64_INSN_DATA1_REVERSE_16:
insn = aarch64_insn_get_rev16_value();
break;
case AARCH64_INSN_DATA1_REVERSE_32:
insn = aarch64_insn_get_rev32_value();
break;
case AARCH64_INSN_DATA1_REVERSE_64:
BUG_ON(variant != AARCH64_INSN_VARIANT_64BIT);
insn = aarch64_insn_get_rev64_value();
break;
default:
BUG_ON(1);
return AARCH64_BREAK_FAULT;
}
switch (variant) {
case AARCH64_INSN_VARIANT_32BIT:
break;
case AARCH64_INSN_VARIANT_64BIT:
insn |= AARCH64_INSN_SF_BIT;
break;
default:
BUG_ON(1);
return AARCH64_BREAK_FAULT;
}
insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, src);
}
u32 aarch64_insn_gen_data2(enum aarch64_insn_register dst,
enum aarch64_insn_register src,
enum aarch64_insn_register reg,
enum aarch64_insn_variant variant,
enum aarch64_insn_data2_type type)
{
u32 insn;
switch (type) {
case AARCH64_INSN_DATA2_UDIV:
insn = aarch64_insn_get_udiv_value();
break;
case AARCH64_INSN_DATA2_SDIV:
insn = aarch64_insn_get_sdiv_value();
break;
case AARCH64_INSN_DATA2_LSLV:
insn = aarch64_insn_get_lslv_value();
break;
case AARCH64_INSN_DATA2_LSRV:
insn = aarch64_insn_get_lsrv_value();
break;
case AARCH64_INSN_DATA2_ASRV:
insn = aarch64_insn_get_asrv_value();
break;
case AARCH64_INSN_DATA2_RORV:
insn = aarch64_insn_get_rorv_value();
break;
default:
BUG_ON(1);
return AARCH64_BREAK_FAULT;
}
switch (variant) {
case AARCH64_INSN_VARIANT_32BIT:
break;
case AARCH64_INSN_VARIANT_64BIT:
insn |= AARCH64_INSN_SF_BIT;
break;
default:
BUG_ON(1);
return AARCH64_BREAK_FAULT;
}
insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, src);
return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RM, insn, reg);
}
u32 aarch64_insn_gen_data3(enum aarch64_insn_register dst,
enum aarch64_insn_register src,
enum aarch64_insn_register reg1,
enum aarch64_insn_register reg2,
enum aarch64_insn_variant variant,
enum aarch64_insn_data3_type type)
{
u32 insn;
switch (type) {
case AARCH64_INSN_DATA3_MADD:
insn = aarch64_insn_get_madd_value();
break;
case AARCH64_INSN_DATA3_MSUB:
insn = aarch64_insn_get_msub_value();
break;
default:
BUG_ON(1);
return AARCH64_BREAK_FAULT;
}
switch (variant) {
case AARCH64_INSN_VARIANT_32BIT:
break;
case AARCH64_INSN_VARIANT_64BIT:
insn |= AARCH64_INSN_SF_BIT;
break;
default:
BUG_ON(1);
return AARCH64_BREAK_FAULT;
}
insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RA, insn, src);
insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn,
reg1);
return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RM, insn,
reg2);
}
u32 aarch64_insn_gen_logical_shifted_reg(enum aarch64_insn_register dst,
enum aarch64_insn_register src,
enum aarch64_insn_register reg,
int shift,
enum aarch64_insn_variant variant,
enum aarch64_insn_logic_type type)
{
u32 insn;
switch (type) {
case AARCH64_INSN_LOGIC_AND:
insn = aarch64_insn_get_and_value();
break;
case AARCH64_INSN_LOGIC_BIC:
insn = aarch64_insn_get_bic_value();
break;
case AARCH64_INSN_LOGIC_ORR:
insn = aarch64_insn_get_orr_value();
break;
case AARCH64_INSN_LOGIC_ORN:
insn = aarch64_insn_get_orn_value();
break;
case AARCH64_INSN_LOGIC_EOR:
insn = aarch64_insn_get_eor_value();
break;
case AARCH64_INSN_LOGIC_EON:
insn = aarch64_insn_get_eon_value();
break;
case AARCH64_INSN_LOGIC_AND_SETFLAGS:
insn = aarch64_insn_get_ands_value();
break;
case AARCH64_INSN_LOGIC_BIC_SETFLAGS:
insn = aarch64_insn_get_bics_value();
break;
default:
BUG_ON(1);
return AARCH64_BREAK_FAULT;
}
switch (variant) {
case AARCH64_INSN_VARIANT_32BIT:
BUG_ON(shift & ~(SZ_32 - 1));
break;
case AARCH64_INSN_VARIANT_64BIT:
insn |= AARCH64_INSN_SF_BIT;
BUG_ON(shift & ~(SZ_64 - 1));
break;
default:
BUG_ON(1);
return AARCH64_BREAK_FAULT;
}
insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, src);
insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RM, insn, reg);
return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_6, insn, shift);
}

View File

@ -235,13 +235,13 @@ static int kgdb_step_brk_fn(struct pt_regs *regs, unsigned int esr)
static struct break_hook kgdb_brkpt_hook = {
.esr_mask = 0xffffffff,
.esr_val = DBG_ESR_VAL_BRK(KGDB_DYN_DGB_BRK_IMM),
.esr_val = DBG_ESR_VAL_BRK(KGDB_DYN_DBG_BRK_IMM),
.fn = kgdb_brk_fn
};
static struct break_hook kgdb_compiled_brkpt_hook = {
.esr_mask = 0xffffffff,
.esr_val = DBG_ESR_VAL_BRK(KDBG_COMPILED_DBG_BRK_IMM),
.esr_val = DBG_ESR_VAL_BRK(KGDB_COMPILED_DBG_BRK_IMM),
.fn = kgdb_compiled_brk_fn
};

View File

@ -1276,7 +1276,7 @@ arch_initcall(cpu_pmu_reset);
/*
* PMU platform driver and devicetree bindings.
*/
static struct of_device_id armpmu_of_device_ids[] = {
static const struct of_device_id armpmu_of_device_ids[] = {
{.compatible = "arm,armv8-pmuv3"},
{},
};

View File

@ -57,36 +57,10 @@ unsigned long __stack_chk_guard __read_mostly;
EXPORT_SYMBOL(__stack_chk_guard);
#endif
static void setup_restart(void)
{
/*
* Tell the mm system that we are going to reboot -
* we may need it to insert some 1:1 mappings so that
* soft boot works.
*/
setup_mm_for_reboot();
/* Clean and invalidate caches */
flush_cache_all();
/* Turn D-cache off */
cpu_cache_off();
/* Push out any further dirty data, and ensure cache is empty */
flush_cache_all();
}
void soft_restart(unsigned long addr)
{
typedef void (*phys_reset_t)(unsigned long);
phys_reset_t phys_reset;
setup_restart();
/* Switch to the identity mapping */
phys_reset = (phys_reset_t)virt_to_phys(cpu_reset);
phys_reset(addr);
setup_mm_for_reboot();
cpu_soft_restart(virt_to_phys(cpu_reset), addr);
/* Should never get here */
BUG();
}

View File

@ -21,6 +21,7 @@
#include <linux/reboot.h>
#include <linux/pm.h>
#include <linux/delay.h>
#include <linux/slab.h>
#include <uapi/linux/psci.h>
#include <asm/compiler.h>
@ -28,6 +29,7 @@
#include <asm/errno.h>
#include <asm/psci.h>
#include <asm/smp_plat.h>
#include <asm/suspend.h>
#include <asm/system_misc.h>
#define PSCI_POWER_STATE_TYPE_STANDBY 0
@ -65,6 +67,8 @@ enum psci_function {
PSCI_FN_MAX,
};
static DEFINE_PER_CPU_READ_MOSTLY(struct psci_power_state *, psci_power_state);
static u32 psci_function_id[PSCI_FN_MAX];
static int psci_to_linux_errno(int errno)
@ -93,6 +97,18 @@ static u32 psci_power_state_pack(struct psci_power_state state)
& PSCI_0_2_POWER_STATE_AFFL_MASK);
}
static void psci_power_state_unpack(u32 power_state,
struct psci_power_state *state)
{
state->id = (power_state & PSCI_0_2_POWER_STATE_ID_MASK) >>
PSCI_0_2_POWER_STATE_ID_SHIFT;
state->type = (power_state & PSCI_0_2_POWER_STATE_TYPE_MASK) >>
PSCI_0_2_POWER_STATE_TYPE_SHIFT;
state->affinity_level =
(power_state & PSCI_0_2_POWER_STATE_AFFL_MASK) >>
PSCI_0_2_POWER_STATE_AFFL_SHIFT;
}
/*
* The following two functions are invoked via the invoke_psci_fn pointer
* and will not be inlined, allowing us to piggyback on the AAPCS.
@ -199,6 +215,63 @@ static int psci_migrate_info_type(void)
return err;
}
static int __maybe_unused cpu_psci_cpu_init_idle(struct device_node *cpu_node,
unsigned int cpu)
{
int i, ret, count = 0;
struct psci_power_state *psci_states;
struct device_node *state_node;
/*
* If the PSCI cpu_suspend function hook has not been initialized
* idle states must not be enabled, so bail out
*/
if (!psci_ops.cpu_suspend)
return -EOPNOTSUPP;
/* Count idle states */
while ((state_node = of_parse_phandle(cpu_node, "cpu-idle-states",
count))) {
count++;
of_node_put(state_node);
}
if (!count)
return -ENODEV;
psci_states = kcalloc(count, sizeof(*psci_states), GFP_KERNEL);
if (!psci_states)
return -ENOMEM;
for (i = 0; i < count; i++) {
u32 psci_power_state;
state_node = of_parse_phandle(cpu_node, "cpu-idle-states", i);
ret = of_property_read_u32(state_node,
"arm,psci-suspend-param",
&psci_power_state);
if (ret) {
pr_warn(" * %s missing arm,psci-suspend-param property\n",
state_node->full_name);
of_node_put(state_node);
goto free_mem;
}
of_node_put(state_node);
pr_debug("psci-power-state %#x index %d\n", psci_power_state,
i);
psci_power_state_unpack(psci_power_state, &psci_states[i]);
}
/* Idle states parsed correctly, initialize per-cpu pointer */
per_cpu(psci_power_state, cpu) = psci_states;
return 0;
free_mem:
kfree(psci_states);
return ret;
}
static int get_set_conduit_method(struct device_node *np)
{
const char *method;
@ -436,8 +509,39 @@ static int cpu_psci_cpu_kill(unsigned int cpu)
#endif
#endif
static int psci_suspend_finisher(unsigned long index)
{
struct psci_power_state *state = __get_cpu_var(psci_power_state);
return psci_ops.cpu_suspend(state[index - 1],
virt_to_phys(cpu_resume));
}
static int __maybe_unused cpu_psci_cpu_suspend(unsigned long index)
{
int ret;
struct psci_power_state *state = __get_cpu_var(psci_power_state);
/*
* idle state index 0 corresponds to wfi, should never be called
* from the cpu_suspend operations
*/
if (WARN_ON_ONCE(!index))
return -EINVAL;
if (state->type == PSCI_POWER_STATE_TYPE_STANDBY)
ret = psci_ops.cpu_suspend(state[index - 1], 0);
else
ret = __cpu_suspend(index, psci_suspend_finisher);
return ret;
}
const struct cpu_operations cpu_psci_ops = {
.name = "psci",
#ifdef CONFIG_CPU_IDLE
.cpu_init_idle = cpu_psci_cpu_init_idle,
.cpu_suspend = cpu_psci_cpu_suspend,
#endif
#ifdef CONFIG_SMP
.cpu_init = cpu_psci_cpu_init,
.cpu_prepare = cpu_psci_cpu_prepare,

View File

@ -36,13 +36,12 @@ void *return_address(unsigned int level)
{
struct return_address_data data;
struct stackframe frame;
register unsigned long current_sp asm ("sp");
data.level = level + 2;
data.addr = NULL;
frame.fp = (unsigned long)__builtin_frame_address(0);
frame.sp = current_sp;
frame.sp = current_stack_pointer;
frame.pc = (unsigned long)return_address; /* dummy */
walk_stackframe(&frame, save_return_addr, &data);

View File

@ -365,11 +365,6 @@ u64 __cpu_logical_map[NR_CPUS] = { [0 ... NR_CPUS-1] = INVALID_HWID };
void __init setup_arch(char **cmdline_p)
{
/*
* Unmask asynchronous aborts early to catch possible system errors.
*/
local_async_enable();
setup_processor();
setup_machine_fdt(__fdt_pointer);
@ -385,6 +380,12 @@ void __init setup_arch(char **cmdline_p)
parse_early_param();
/*
* Unmask asynchronous aborts after bringing up possible earlycon.
* (Report possible System Errors once we can report this occurred)
*/
local_async_enable();
efi_init();
arm64_memblock_init();

View File

@ -49,28 +49,39 @@
orr \dst, \dst, \mask // dst|=(aff3>>rs3)
.endm
/*
* Save CPU state for a suspend. This saves callee registers, and allocates
* space on the kernel stack to save the CPU specific registers + some
* other data for resume.
* Save CPU state for a suspend and execute the suspend finisher.
* On success it will return 0 through cpu_resume - ie through a CPU
* soft/hard reboot from the reset vector.
* On failure it returns the suspend finisher return value or force
* -EOPNOTSUPP if the finisher erroneously returns 0 (the suspend finisher
* is not allowed to return, if it does this must be considered failure).
* It saves callee registers, and allocates space on the kernel stack
* to save the CPU specific registers + some other data for resume.
*
* x0 = suspend finisher argument
* x1 = suspend finisher function pointer
*/
ENTRY(__cpu_suspend)
ENTRY(__cpu_suspend_enter)
stp x29, lr, [sp, #-96]!
stp x19, x20, [sp,#16]
stp x21, x22, [sp,#32]
stp x23, x24, [sp,#48]
stp x25, x26, [sp,#64]
stp x27, x28, [sp,#80]
/*
* Stash suspend finisher and its argument in x20 and x19
*/
mov x19, x0
mov x20, x1
mov x2, sp
sub sp, sp, #CPU_SUSPEND_SZ // allocate cpu_suspend_ctx
mov x1, sp
mov x0, sp
/*
* x1 now points to struct cpu_suspend_ctx allocated on the stack
* x0 now points to struct cpu_suspend_ctx allocated on the stack
*/
str x2, [x1, #CPU_CTX_SP]
ldr x2, =sleep_save_sp
ldr x2, [x2, #SLEEP_SAVE_SP_VIRT]
str x2, [x0, #CPU_CTX_SP]
ldr x1, =sleep_save_sp
ldr x1, [x1, #SLEEP_SAVE_SP_VIRT]
#ifdef CONFIG_SMP
mrs x7, mpidr_el1
ldr x9, =mpidr_hash
@ -82,11 +93,21 @@ ENTRY(__cpu_suspend)
ldp w3, w4, [x9, #MPIDR_HASH_SHIFTS]
ldp w5, w6, [x9, #(MPIDR_HASH_SHIFTS + 8)]
compute_mpidr_hash x8, x3, x4, x5, x6, x7, x10
add x2, x2, x8, lsl #3
add x1, x1, x8, lsl #3
#endif
bl __cpu_suspend_finisher
bl __cpu_suspend_save
/*
* Grab suspend finisher in x20 and its argument in x19
*/
mov x0, x19
mov x1, x20
/*
* We are ready for power down, fire off the suspend finisher
* in x1, with argument in x0
*/
blr x1
/*
* Never gets here, unless suspend fails.
* Never gets here, unless suspend finisher fails.
* Successful cpu_suspend should return from cpu_resume, returning
* through this code path is considered an error
* If the return value is set to 0 force x0 = -EOPNOTSUPP
@ -103,7 +124,7 @@ ENTRY(__cpu_suspend)
ldp x27, x28, [sp, #80]
ldp x29, lr, [sp], #96
ret
ENDPROC(__cpu_suspend)
ENDPROC(__cpu_suspend_enter)
.ltorg
/*

View File

@ -20,6 +20,7 @@
#include <linux/init.h>
#include <linux/of.h>
#include <linux/smp.h>
#include <linux/types.h>
#include <asm/cacheflush.h>
#include <asm/cpu_ops.h>
@ -65,12 +66,21 @@ static int smp_spin_table_cpu_init(struct device_node *dn, unsigned int cpu)
static int smp_spin_table_cpu_prepare(unsigned int cpu)
{
void **release_addr;
__le64 __iomem *release_addr;
if (!cpu_release_addr[cpu])
return -ENODEV;
release_addr = __va(cpu_release_addr[cpu]);
/*
* The cpu-release-addr may or may not be inside the linear mapping.
* As ioremap_cache will either give us a new mapping or reuse the
* existing linear mapping, we can use it to cover both cases. In
* either case the memory will be MT_NORMAL.
*/
release_addr = ioremap_cache(cpu_release_addr[cpu],
sizeof(*release_addr));
if (!release_addr)
return -ENOMEM;
/*
* We write the release address as LE regardless of the native
@ -79,15 +89,17 @@ static int smp_spin_table_cpu_prepare(unsigned int cpu)
* boot-loader's endianess before jumping. This is mandated by
* the boot protocol.
*/
release_addr[0] = (void *) cpu_to_le64(__pa(secondary_holding_pen));
__flush_dcache_area(release_addr, sizeof(release_addr[0]));
writeq_relaxed(__pa(secondary_holding_pen), release_addr);
__flush_dcache_area((__force void *)release_addr,
sizeof(*release_addr));
/*
* Send an event to wake up the secondary CPU.
*/
sev();
iounmap(release_addr);
return 0;
}

View File

@ -111,10 +111,9 @@ void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
frame.sp = thread_saved_sp(tsk);
frame.pc = thread_saved_pc(tsk);
} else {
register unsigned long current_sp asm("sp");
data.no_sched_functions = 0;
frame.fp = (unsigned long)__builtin_frame_address(0);
frame.sp = current_sp;
frame.sp = current_stack_pointer;
frame.pc = (unsigned long)save_stack_trace_tsk;
}

View File

@ -9,22 +9,19 @@
#include <asm/suspend.h>
#include <asm/tlbflush.h>
extern int __cpu_suspend(unsigned long);
extern int __cpu_suspend_enter(unsigned long arg, int (*fn)(unsigned long));
/*
* This is called by __cpu_suspend() to save the state, and do whatever
* This is called by __cpu_suspend_enter() to save the state, and do whatever
* flushing is required to ensure that when the CPU goes to sleep we have
* the necessary data available when the caches are not searched.
*
* @arg: Argument to pass to suspend operations
* @ptr: CPU context virtual address
* @save_ptr: address of the location where the context physical address
* must be saved
* ptr: CPU context virtual address
* save_ptr: address of the location where the context physical address
* must be saved
*/
int __cpu_suspend_finisher(unsigned long arg, struct cpu_suspend_ctx *ptr,
phys_addr_t *save_ptr)
void notrace __cpu_suspend_save(struct cpu_suspend_ctx *ptr,
phys_addr_t *save_ptr)
{
int cpu = smp_processor_id();
*save_ptr = virt_to_phys(ptr);
cpu_do_suspend(ptr);
@ -35,8 +32,6 @@ int __cpu_suspend_finisher(unsigned long arg, struct cpu_suspend_ctx *ptr,
*/
__flush_dcache_area(ptr, sizeof(*ptr));
__flush_dcache_area(save_ptr, sizeof(*save_ptr));
return cpu_ops[cpu]->cpu_suspend(arg);
}
/*
@ -56,15 +51,15 @@ void __init cpu_suspend_set_dbg_restorer(void (*hw_bp_restore)(void *))
}
/**
* cpu_suspend
* cpu_suspend() - function to enter a low-power state
* @arg: argument to pass to CPU suspend operations
*
* @arg: argument to pass to the finisher function
* Return: 0 on success, -EOPNOTSUPP if CPU suspend hook not initialized, CPU
* operations back-end error code otherwise.
*/
int cpu_suspend(unsigned long arg)
{
struct mm_struct *mm = current->active_mm;
int ret, cpu = smp_processor_id();
unsigned long flags;
int cpu = smp_processor_id();
/*
* If cpu_ops have not been registered or suspend
@ -72,6 +67,21 @@ int cpu_suspend(unsigned long arg)
*/
if (!cpu_ops[cpu] || !cpu_ops[cpu]->cpu_suspend)
return -EOPNOTSUPP;
return cpu_ops[cpu]->cpu_suspend(arg);
}
/*
* __cpu_suspend
*
* arg: argument to pass to the finisher function
* fn: finisher function pointer
*
*/
int __cpu_suspend(unsigned long arg, int (*fn)(unsigned long))
{
struct mm_struct *mm = current->active_mm;
int ret;
unsigned long flags;
/*
* From this point debug exceptions are disabled to prevent
@ -86,7 +96,7 @@ int cpu_suspend(unsigned long arg)
* page tables, so that the thread address space is properly
* set-up on function return.
*/
ret = __cpu_suspend(arg);
ret = __cpu_suspend_enter(arg, fn);
if (ret == 0) {
cpu_switch_mm(mm->pgd, mm);
flush_tlb_all();
@ -95,7 +105,7 @@ int cpu_suspend(unsigned long arg)
* Restore per-cpu offset before any kernel
* subsystem relying on it has a chance to run.
*/
set_my_cpu_offset(per_cpu_offset(cpu));
set_my_cpu_offset(per_cpu_offset(smp_processor_id()));
/*
* Restore HW breakpoint registers to sane values

View File

@ -132,7 +132,6 @@ static void dump_instr(const char *lvl, struct pt_regs *regs)
static void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk)
{
struct stackframe frame;
const register unsigned long current_sp asm ("sp");
pr_debug("%s(regs = %p tsk = %p)\n", __func__, regs, tsk);
@ -145,7 +144,7 @@ static void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk)
frame.pc = regs->pc;
} else if (tsk == current) {
frame.fp = (unsigned long)__builtin_frame_address(0);
frame.sp = current_sp;
frame.sp = current_stack_pointer;
frame.pc = (unsigned long)dump_backtrace;
} else {
/*

View File

@ -1,5 +1,5 @@
obj-y := dma-mapping.o extable.o fault.o init.o \
cache.o copypage.o flush.o \
ioremap.o mmap.o pgd.o mmu.o \
context.o proc.o
context.o proc.o pageattr.o
obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o

View File

@ -22,11 +22,8 @@
#include <linux/slab.h>
#include <linux/dma-mapping.h>
#include <linux/dma-contiguous.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/vmalloc.h>
#include <linux/swiotlb.h>
#include <linux/amba/bus.h>
#include <asm/cacheflush.h>
@ -125,7 +122,7 @@ static void *__dma_alloc_noncoherent(struct device *dev, size_t size,
no_map:
__dma_free_coherent(dev, size, ptr, *dma_handle, attrs);
no_mem:
*dma_handle = ~0;
*dma_handle = DMA_ERROR_CODE;
return NULL;
}
@ -308,40 +305,12 @@ struct dma_map_ops coherent_swiotlb_dma_ops = {
};
EXPORT_SYMBOL(coherent_swiotlb_dma_ops);
static int dma_bus_notifier(struct notifier_block *nb,
unsigned long event, void *_dev)
{
struct device *dev = _dev;
if (event != BUS_NOTIFY_ADD_DEVICE)
return NOTIFY_DONE;
if (of_property_read_bool(dev->of_node, "dma-coherent"))
set_dma_ops(dev, &coherent_swiotlb_dma_ops);
return NOTIFY_OK;
}
static struct notifier_block platform_bus_nb = {
.notifier_call = dma_bus_notifier,
};
static struct notifier_block amba_bus_nb = {
.notifier_call = dma_bus_notifier,
};
extern int swiotlb_late_init_with_default_size(size_t default_size);
static int __init swiotlb_late_init(void)
{
size_t swiotlb_size = min(SZ_64M, MAX_ORDER_NR_PAGES << PAGE_SHIFT);
/*
* These must be registered before of_platform_populate().
*/
bus_register_notifier(&platform_bus_type, &platform_bus_nb);
bus_register_notifier(&amba_bustype, &amba_bus_nb);
dma_ops = &noncoherent_swiotlb_dma_ops;
return swiotlb_late_init_with_default_size(swiotlb_size);

View File

@ -255,7 +255,7 @@ static void __init free_unused_memmap(void)
*/
void __init mem_init(void)
{
max_mapnr = pfn_to_page(max_pfn + PHYS_PFN_OFFSET) - mem_map;
set_max_mapnr(pfn_to_page(max_pfn) - mem_map);
#ifndef CONFIG_SPARSEMEM_VMEMMAP
free_unused_memmap();

View File

@ -102,7 +102,7 @@ EXPORT_SYMBOL_GPL(arch_pick_mmap_layout);
* You really shouldn't be using read() or write() on /dev/mem. This might go
* away in the future.
*/
int valid_phys_addr_range(unsigned long addr, size_t size)
int valid_phys_addr_range(phys_addr_t addr, size_t size)
{
if (addr < PHYS_OFFSET)
return 0;

View File

@ -94,7 +94,7 @@ static int __init early_cachepolicy(char *p)
*/
asm volatile(
" mrs %0, mair_el1\n"
" bfi %0, %1, #%2, #8\n"
" bfi %0, %1, %2, #8\n"
" msr mair_el1, %0\n"
" isb\n"
: "=&r" (tmp)

97
arch/arm64/mm/pageattr.c Normal file
View File

@ -0,0 +1,97 @@
/*
* Copyright (c) 2014, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/sched.h>
#include <asm/pgtable.h>
#include <asm/tlbflush.h>
struct page_change_data {
pgprot_t set_mask;
pgprot_t clear_mask;
};
static int change_page_range(pte_t *ptep, pgtable_t token, unsigned long addr,
void *data)
{
struct page_change_data *cdata = data;
pte_t pte = *ptep;
pte = clear_pte_bit(pte, cdata->clear_mask);
pte = set_pte_bit(pte, cdata->set_mask);
set_pte(ptep, pte);
return 0;
}
static int change_memory_common(unsigned long addr, int numpages,
pgprot_t set_mask, pgprot_t clear_mask)
{
unsigned long start = addr;
unsigned long size = PAGE_SIZE*numpages;
unsigned long end = start + size;
int ret;
struct page_change_data data;
if (!IS_ALIGNED(addr, PAGE_SIZE)) {
start &= PAGE_MASK;
end = start + size;
WARN_ON_ONCE(1);
}
if (!is_module_address(start) || !is_module_address(end - 1))
return -EINVAL;
data.set_mask = set_mask;
data.clear_mask = clear_mask;
ret = apply_to_page_range(&init_mm, start, size, change_page_range,
&data);
flush_tlb_kernel_range(start, end);
return ret;
}
int set_memory_ro(unsigned long addr, int numpages)
{
return change_memory_common(addr, numpages,
__pgprot(PTE_RDONLY),
__pgprot(PTE_WRITE));
}
EXPORT_SYMBOL_GPL(set_memory_ro);
int set_memory_rw(unsigned long addr, int numpages)
{
return change_memory_common(addr, numpages,
__pgprot(PTE_WRITE),
__pgprot(PTE_RDONLY));
}
EXPORT_SYMBOL_GPL(set_memory_rw);
int set_memory_nx(unsigned long addr, int numpages)
{
return change_memory_common(addr, numpages,
__pgprot(PTE_PXN),
__pgprot(0));
}
EXPORT_SYMBOL_GPL(set_memory_nx);
int set_memory_x(unsigned long addr, int numpages)
{
return change_memory_common(addr, numpages,
__pgprot(0),
__pgprot(PTE_PXN));
}
EXPORT_SYMBOL_GPL(set_memory_x);

View File

@ -76,6 +76,21 @@ ENTRY(cpu_reset)
ret x0
ENDPROC(cpu_reset)
ENTRY(cpu_soft_restart)
/* Save address of cpu_reset() and reset address */
mov x19, x0
mov x20, x1
/* Turn D-cache off */
bl cpu_cache_off
/* Push out all dirty data, and ensure cache is empty */
bl flush_cache_all
mov x0, x20
ret x19
ENDPROC(cpu_soft_restart)
/*
* cpu_do_idle()
*

4
arch/arm64/net/Makefile Normal file
View File

@ -0,0 +1,4 @@
#
# ARM64 networking code
#
obj-$(CONFIG_BPF_JIT) += bpf_jit_comp.o

169
arch/arm64/net/bpf_jit.h Normal file
View File

@ -0,0 +1,169 @@
/*
* BPF JIT compiler for ARM64
*
* Copyright (C) 2014 Zi Shen Lim <zlim.lnx@gmail.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef _BPF_JIT_H
#define _BPF_JIT_H
#include <asm/insn.h>
/* 5-bit Register Operand */
#define A64_R(x) AARCH64_INSN_REG_##x
#define A64_FP AARCH64_INSN_REG_FP
#define A64_LR AARCH64_INSN_REG_LR
#define A64_ZR AARCH64_INSN_REG_ZR
#define A64_SP AARCH64_INSN_REG_SP
#define A64_VARIANT(sf) \
((sf) ? AARCH64_INSN_VARIANT_64BIT : AARCH64_INSN_VARIANT_32BIT)
/* Compare & branch (immediate) */
#define A64_COMP_BRANCH(sf, Rt, offset, type) \
aarch64_insn_gen_comp_branch_imm(0, offset, Rt, A64_VARIANT(sf), \
AARCH64_INSN_BRANCH_COMP_##type)
#define A64_CBZ(sf, Rt, imm19) A64_COMP_BRANCH(sf, Rt, (imm19) << 2, ZERO)
/* Conditional branch (immediate) */
#define A64_COND_BRANCH(cond, offset) \
aarch64_insn_gen_cond_branch_imm(0, offset, cond)
#define A64_COND_EQ AARCH64_INSN_COND_EQ /* == */
#define A64_COND_NE AARCH64_INSN_COND_NE /* != */
#define A64_COND_CS AARCH64_INSN_COND_CS /* unsigned >= */
#define A64_COND_HI AARCH64_INSN_COND_HI /* unsigned > */
#define A64_COND_GE AARCH64_INSN_COND_GE /* signed >= */
#define A64_COND_GT AARCH64_INSN_COND_GT /* signed > */
#define A64_B_(cond, imm19) A64_COND_BRANCH(cond, (imm19) << 2)
/* Unconditional branch (immediate) */
#define A64_BRANCH(offset, type) aarch64_insn_gen_branch_imm(0, offset, \
AARCH64_INSN_BRANCH_##type)
#define A64_B(imm26) A64_BRANCH((imm26) << 2, NOLINK)
#define A64_BL(imm26) A64_BRANCH((imm26) << 2, LINK)
/* Unconditional branch (register) */
#define A64_BLR(Rn) aarch64_insn_gen_branch_reg(Rn, AARCH64_INSN_BRANCH_LINK)
#define A64_RET(Rn) aarch64_insn_gen_branch_reg(Rn, AARCH64_INSN_BRANCH_RETURN)
/* Load/store register (register offset) */
#define A64_LS_REG(Rt, Rn, Rm, size, type) \
aarch64_insn_gen_load_store_reg(Rt, Rn, Rm, \
AARCH64_INSN_SIZE_##size, \
AARCH64_INSN_LDST_##type##_REG_OFFSET)
#define A64_STRB(Wt, Xn, Xm) A64_LS_REG(Wt, Xn, Xm, 8, STORE)
#define A64_LDRB(Wt, Xn, Xm) A64_LS_REG(Wt, Xn, Xm, 8, LOAD)
#define A64_STRH(Wt, Xn, Xm) A64_LS_REG(Wt, Xn, Xm, 16, STORE)
#define A64_LDRH(Wt, Xn, Xm) A64_LS_REG(Wt, Xn, Xm, 16, LOAD)
#define A64_STR32(Wt, Xn, Xm) A64_LS_REG(Wt, Xn, Xm, 32, STORE)
#define A64_LDR32(Wt, Xn, Xm) A64_LS_REG(Wt, Xn, Xm, 32, LOAD)
#define A64_STR64(Xt, Xn, Xm) A64_LS_REG(Xt, Xn, Xm, 64, STORE)
#define A64_LDR64(Xt, Xn, Xm) A64_LS_REG(Xt, Xn, Xm, 64, LOAD)
/* Load/store register pair */
#define A64_LS_PAIR(Rt, Rt2, Rn, offset, ls, type) \
aarch64_insn_gen_load_store_pair(Rt, Rt2, Rn, offset, \
AARCH64_INSN_VARIANT_64BIT, \
AARCH64_INSN_LDST_##ls##_PAIR_##type)
/* Rn -= 16; Rn[0] = Rt; Rn[8] = Rt2; */
#define A64_PUSH(Rt, Rt2, Rn) A64_LS_PAIR(Rt, Rt2, Rn, -16, STORE, PRE_INDEX)
/* Rt = Rn[0]; Rt2 = Rn[8]; Rn += 16; */
#define A64_POP(Rt, Rt2, Rn) A64_LS_PAIR(Rt, Rt2, Rn, 16, LOAD, POST_INDEX)
/* Add/subtract (immediate) */
#define A64_ADDSUB_IMM(sf, Rd, Rn, imm12, type) \
aarch64_insn_gen_add_sub_imm(Rd, Rn, imm12, \
A64_VARIANT(sf), AARCH64_INSN_ADSB_##type)
/* Rd = Rn OP imm12 */
#define A64_ADD_I(sf, Rd, Rn, imm12) A64_ADDSUB_IMM(sf, Rd, Rn, imm12, ADD)
#define A64_SUB_I(sf, Rd, Rn, imm12) A64_ADDSUB_IMM(sf, Rd, Rn, imm12, SUB)
/* Rd = Rn */
#define A64_MOV(sf, Rd, Rn) A64_ADD_I(sf, Rd, Rn, 0)
/* Bitfield move */
#define A64_BITFIELD(sf, Rd, Rn, immr, imms, type) \
aarch64_insn_gen_bitfield(Rd, Rn, immr, imms, \
A64_VARIANT(sf), AARCH64_INSN_BITFIELD_MOVE_##type)
/* Signed, with sign replication to left and zeros to right */
#define A64_SBFM(sf, Rd, Rn, ir, is) A64_BITFIELD(sf, Rd, Rn, ir, is, SIGNED)
/* Unsigned, with zeros to left and right */
#define A64_UBFM(sf, Rd, Rn, ir, is) A64_BITFIELD(sf, Rd, Rn, ir, is, UNSIGNED)
/* Rd = Rn << shift */
#define A64_LSL(sf, Rd, Rn, shift) ({ \
int sz = (sf) ? 64 : 32; \
A64_UBFM(sf, Rd, Rn, (unsigned)-(shift) % sz, sz - 1 - (shift)); \
})
/* Rd = Rn >> shift */
#define A64_LSR(sf, Rd, Rn, shift) A64_UBFM(sf, Rd, Rn, shift, (sf) ? 63 : 31)
/* Rd = Rn >> shift; signed */
#define A64_ASR(sf, Rd, Rn, shift) A64_SBFM(sf, Rd, Rn, shift, (sf) ? 63 : 31)
/* Move wide (immediate) */
#define A64_MOVEW(sf, Rd, imm16, shift, type) \
aarch64_insn_gen_movewide(Rd, imm16, shift, \
A64_VARIANT(sf), AARCH64_INSN_MOVEWIDE_##type)
/* Rd = Zeros (for MOVZ);
* Rd |= imm16 << shift (where shift is {0, 16, 32, 48});
* Rd = ~Rd; (for MOVN); */
#define A64_MOVN(sf, Rd, imm16, shift) A64_MOVEW(sf, Rd, imm16, shift, INVERSE)
#define A64_MOVZ(sf, Rd, imm16, shift) A64_MOVEW(sf, Rd, imm16, shift, ZERO)
#define A64_MOVK(sf, Rd, imm16, shift) A64_MOVEW(sf, Rd, imm16, shift, KEEP)
/* Add/subtract (shifted register) */
#define A64_ADDSUB_SREG(sf, Rd, Rn, Rm, type) \
aarch64_insn_gen_add_sub_shifted_reg(Rd, Rn, Rm, 0, \
A64_VARIANT(sf), AARCH64_INSN_ADSB_##type)
/* Rd = Rn OP Rm */
#define A64_ADD(sf, Rd, Rn, Rm) A64_ADDSUB_SREG(sf, Rd, Rn, Rm, ADD)
#define A64_SUB(sf, Rd, Rn, Rm) A64_ADDSUB_SREG(sf, Rd, Rn, Rm, SUB)
#define A64_SUBS(sf, Rd, Rn, Rm) A64_ADDSUB_SREG(sf, Rd, Rn, Rm, SUB_SETFLAGS)
/* Rd = -Rm */
#define A64_NEG(sf, Rd, Rm) A64_SUB(sf, Rd, A64_ZR, Rm)
/* Rn - Rm; set condition flags */
#define A64_CMP(sf, Rn, Rm) A64_SUBS(sf, A64_ZR, Rn, Rm)
/* Data-processing (1 source) */
#define A64_DATA1(sf, Rd, Rn, type) aarch64_insn_gen_data1(Rd, Rn, \
A64_VARIANT(sf), AARCH64_INSN_DATA1_##type)
/* Rd = BSWAPx(Rn) */
#define A64_REV16(sf, Rd, Rn) A64_DATA1(sf, Rd, Rn, REVERSE_16)
#define A64_REV32(sf, Rd, Rn) A64_DATA1(sf, Rd, Rn, REVERSE_32)
#define A64_REV64(Rd, Rn) A64_DATA1(1, Rd, Rn, REVERSE_64)
/* Data-processing (2 source) */
/* Rd = Rn OP Rm */
#define A64_UDIV(sf, Rd, Rn, Rm) aarch64_insn_gen_data2(Rd, Rn, Rm, \
A64_VARIANT(sf), AARCH64_INSN_DATA2_UDIV)
/* Data-processing (3 source) */
/* Rd = Ra + Rn * Rm */
#define A64_MADD(sf, Rd, Ra, Rn, Rm) aarch64_insn_gen_data3(Rd, Ra, Rn, Rm, \
A64_VARIANT(sf), AARCH64_INSN_DATA3_MADD)
/* Rd = Rn * Rm */
#define A64_MUL(sf, Rd, Rn, Rm) A64_MADD(sf, Rd, A64_ZR, Rn, Rm)
/* Logical (shifted register) */
#define A64_LOGIC_SREG(sf, Rd, Rn, Rm, type) \
aarch64_insn_gen_logical_shifted_reg(Rd, Rn, Rm, 0, \
A64_VARIANT(sf), AARCH64_INSN_LOGIC_##type)
/* Rd = Rn OP Rm */
#define A64_AND(sf, Rd, Rn, Rm) A64_LOGIC_SREG(sf, Rd, Rn, Rm, AND)
#define A64_ORR(sf, Rd, Rn, Rm) A64_LOGIC_SREG(sf, Rd, Rn, Rm, ORR)
#define A64_EOR(sf, Rd, Rn, Rm) A64_LOGIC_SREG(sf, Rd, Rn, Rm, EOR)
#define A64_ANDS(sf, Rd, Rn, Rm) A64_LOGIC_SREG(sf, Rd, Rn, Rm, AND_SETFLAGS)
/* Rn & Rm; set condition flags */
#define A64_TST(sf, Rn, Rm) A64_ANDS(sf, A64_ZR, Rn, Rm)
#endif /* _BPF_JIT_H */

View File

@ -0,0 +1,679 @@
/*
* BPF JIT compiler for ARM64
*
* Copyright (C) 2014 Zi Shen Lim <zlim.lnx@gmail.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#define pr_fmt(fmt) "bpf_jit: " fmt
#include <linux/filter.h>
#include <linux/moduleloader.h>
#include <linux/printk.h>
#include <linux/skbuff.h>
#include <linux/slab.h>
#include <asm/byteorder.h>
#include <asm/cacheflush.h>
#include "bpf_jit.h"
int bpf_jit_enable __read_mostly;
#define TMP_REG_1 (MAX_BPF_REG + 0)
#define TMP_REG_2 (MAX_BPF_REG + 1)
/* Map BPF registers to A64 registers */
static const int bpf2a64[] = {
/* return value from in-kernel function, and exit value from eBPF */
[BPF_REG_0] = A64_R(7),
/* arguments from eBPF program to in-kernel function */
[BPF_REG_1] = A64_R(0),
[BPF_REG_2] = A64_R(1),
[BPF_REG_3] = A64_R(2),
[BPF_REG_4] = A64_R(3),
[BPF_REG_5] = A64_R(4),
/* callee saved registers that in-kernel function will preserve */
[BPF_REG_6] = A64_R(19),
[BPF_REG_7] = A64_R(20),
[BPF_REG_8] = A64_R(21),
[BPF_REG_9] = A64_R(22),
/* read-only frame pointer to access stack */
[BPF_REG_FP] = A64_FP,
/* temporary register for internal BPF JIT */
[TMP_REG_1] = A64_R(23),
[TMP_REG_2] = A64_R(24),
};
struct jit_ctx {
const struct bpf_prog *prog;
int idx;
int tmp_used;
int body_offset;
int *offset;
u32 *image;
};
static inline void emit(const u32 insn, struct jit_ctx *ctx)
{
if (ctx->image != NULL)
ctx->image[ctx->idx] = cpu_to_le32(insn);
ctx->idx++;
}
static inline void emit_a64_mov_i64(const int reg, const u64 val,
struct jit_ctx *ctx)
{
u64 tmp = val;
int shift = 0;
emit(A64_MOVZ(1, reg, tmp & 0xffff, shift), ctx);
tmp >>= 16;
shift += 16;
while (tmp) {
if (tmp & 0xffff)
emit(A64_MOVK(1, reg, tmp & 0xffff, shift), ctx);
tmp >>= 16;
shift += 16;
}
}
static inline void emit_a64_mov_i(const int is64, const int reg,
const s32 val, struct jit_ctx *ctx)
{
u16 hi = val >> 16;
u16 lo = val & 0xffff;
if (hi & 0x8000) {
if (hi == 0xffff) {
emit(A64_MOVN(is64, reg, (u16)~lo, 0), ctx);
} else {
emit(A64_MOVN(is64, reg, (u16)~hi, 16), ctx);
emit(A64_MOVK(is64, reg, lo, 0), ctx);
}
} else {
emit(A64_MOVZ(is64, reg, lo, 0), ctx);
if (hi)
emit(A64_MOVK(is64, reg, hi, 16), ctx);
}
}
static inline int bpf2a64_offset(int bpf_to, int bpf_from,
const struct jit_ctx *ctx)
{
int to = ctx->offset[bpf_to + 1];
/* -1 to account for the Branch instruction */
int from = ctx->offset[bpf_from + 1] - 1;
return to - from;
}
static inline int epilogue_offset(const struct jit_ctx *ctx)
{
int to = ctx->offset[ctx->prog->len - 1];
int from = ctx->idx - ctx->body_offset;
return to - from;
}
/* Stack must be multiples of 16B */
#define STACK_ALIGN(sz) (((sz) + 15) & ~15)
static void build_prologue(struct jit_ctx *ctx)
{
const u8 r6 = bpf2a64[BPF_REG_6];
const u8 r7 = bpf2a64[BPF_REG_7];
const u8 r8 = bpf2a64[BPF_REG_8];
const u8 r9 = bpf2a64[BPF_REG_9];
const u8 fp = bpf2a64[BPF_REG_FP];
const u8 ra = bpf2a64[BPF_REG_A];
const u8 rx = bpf2a64[BPF_REG_X];
const u8 tmp1 = bpf2a64[TMP_REG_1];
const u8 tmp2 = bpf2a64[TMP_REG_2];
int stack_size = MAX_BPF_STACK;
stack_size += 4; /* extra for skb_copy_bits buffer */
stack_size = STACK_ALIGN(stack_size);
/* Save callee-saved register */
emit(A64_PUSH(r6, r7, A64_SP), ctx);
emit(A64_PUSH(r8, r9, A64_SP), ctx);
if (ctx->tmp_used)
emit(A64_PUSH(tmp1, tmp2, A64_SP), ctx);
/* Set up BPF stack */
emit(A64_SUB_I(1, A64_SP, A64_SP, stack_size), ctx);
/* Set up frame pointer */
emit(A64_MOV(1, fp, A64_SP), ctx);
/* Clear registers A and X */
emit_a64_mov_i64(ra, 0, ctx);
emit_a64_mov_i64(rx, 0, ctx);
}
static void build_epilogue(struct jit_ctx *ctx)
{
const u8 r0 = bpf2a64[BPF_REG_0];
const u8 r6 = bpf2a64[BPF_REG_6];
const u8 r7 = bpf2a64[BPF_REG_7];
const u8 r8 = bpf2a64[BPF_REG_8];
const u8 r9 = bpf2a64[BPF_REG_9];
const u8 fp = bpf2a64[BPF_REG_FP];
const u8 tmp1 = bpf2a64[TMP_REG_1];
const u8 tmp2 = bpf2a64[TMP_REG_2];
int stack_size = MAX_BPF_STACK;
stack_size += 4; /* extra for skb_copy_bits buffer */
stack_size = STACK_ALIGN(stack_size);
/* We're done with BPF stack */
emit(A64_ADD_I(1, A64_SP, A64_SP, stack_size), ctx);
/* Restore callee-saved register */
if (ctx->tmp_used)
emit(A64_POP(tmp1, tmp2, A64_SP), ctx);
emit(A64_POP(r8, r9, A64_SP), ctx);
emit(A64_POP(r6, r7, A64_SP), ctx);
/* Restore frame pointer */
emit(A64_MOV(1, fp, A64_SP), ctx);
/* Set return value */
emit(A64_MOV(1, A64_R(0), r0), ctx);
emit(A64_RET(A64_LR), ctx);
}
static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx)
{
const u8 code = insn->code;
const u8 dst = bpf2a64[insn->dst_reg];
const u8 src = bpf2a64[insn->src_reg];
const u8 tmp = bpf2a64[TMP_REG_1];
const u8 tmp2 = bpf2a64[TMP_REG_2];
const s16 off = insn->off;
const s32 imm = insn->imm;
const int i = insn - ctx->prog->insnsi;
const bool is64 = BPF_CLASS(code) == BPF_ALU64;
u8 jmp_cond;
s32 jmp_offset;
switch (code) {
/* dst = src */
case BPF_ALU | BPF_MOV | BPF_X:
case BPF_ALU64 | BPF_MOV | BPF_X:
emit(A64_MOV(is64, dst, src), ctx);
break;
/* dst = dst OP src */
case BPF_ALU | BPF_ADD | BPF_X:
case BPF_ALU64 | BPF_ADD | BPF_X:
emit(A64_ADD(is64, dst, dst, src), ctx);
break;
case BPF_ALU | BPF_SUB | BPF_X:
case BPF_ALU64 | BPF_SUB | BPF_X:
emit(A64_SUB(is64, dst, dst, src), ctx);
break;
case BPF_ALU | BPF_AND | BPF_X:
case BPF_ALU64 | BPF_AND | BPF_X:
emit(A64_AND(is64, dst, dst, src), ctx);
break;
case BPF_ALU | BPF_OR | BPF_X:
case BPF_ALU64 | BPF_OR | BPF_X:
emit(A64_ORR(is64, dst, dst, src), ctx);
break;
case BPF_ALU | BPF_XOR | BPF_X:
case BPF_ALU64 | BPF_XOR | BPF_X:
emit(A64_EOR(is64, dst, dst, src), ctx);
break;
case BPF_ALU | BPF_MUL | BPF_X:
case BPF_ALU64 | BPF_MUL | BPF_X:
emit(A64_MUL(is64, dst, dst, src), ctx);
break;
case BPF_ALU | BPF_DIV | BPF_X:
case BPF_ALU64 | BPF_DIV | BPF_X:
emit(A64_UDIV(is64, dst, dst, src), ctx);
break;
case BPF_ALU | BPF_MOD | BPF_X:
case BPF_ALU64 | BPF_MOD | BPF_X:
ctx->tmp_used = 1;
emit(A64_UDIV(is64, tmp, dst, src), ctx);
emit(A64_MUL(is64, tmp, tmp, src), ctx);
emit(A64_SUB(is64, dst, dst, tmp), ctx);
break;
/* dst = -dst */
case BPF_ALU | BPF_NEG:
case BPF_ALU64 | BPF_NEG:
emit(A64_NEG(is64, dst, dst), ctx);
break;
/* dst = BSWAP##imm(dst) */
case BPF_ALU | BPF_END | BPF_FROM_LE:
case BPF_ALU | BPF_END | BPF_FROM_BE:
#ifdef CONFIG_CPU_BIG_ENDIAN
if (BPF_SRC(code) == BPF_FROM_BE)
break;
#else /* !CONFIG_CPU_BIG_ENDIAN */
if (BPF_SRC(code) == BPF_FROM_LE)
break;
#endif
switch (imm) {
case 16:
emit(A64_REV16(is64, dst, dst), ctx);
break;
case 32:
emit(A64_REV32(is64, dst, dst), ctx);
break;
case 64:
emit(A64_REV64(dst, dst), ctx);
break;
}
break;
/* dst = imm */
case BPF_ALU | BPF_MOV | BPF_K:
case BPF_ALU64 | BPF_MOV | BPF_K:
emit_a64_mov_i(is64, dst, imm, ctx);
break;
/* dst = dst OP imm */
case BPF_ALU | BPF_ADD | BPF_K:
case BPF_ALU64 | BPF_ADD | BPF_K:
ctx->tmp_used = 1;
emit_a64_mov_i(is64, tmp, imm, ctx);
emit(A64_ADD(is64, dst, dst, tmp), ctx);
break;
case BPF_ALU | BPF_SUB | BPF_K:
case BPF_ALU64 | BPF_SUB | BPF_K:
ctx->tmp_used = 1;
emit_a64_mov_i(is64, tmp, imm, ctx);
emit(A64_SUB(is64, dst, dst, tmp), ctx);
break;
case BPF_ALU | BPF_AND | BPF_K:
case BPF_ALU64 | BPF_AND | BPF_K:
ctx->tmp_used = 1;
emit_a64_mov_i(is64, tmp, imm, ctx);
emit(A64_AND(is64, dst, dst, tmp), ctx);
break;
case BPF_ALU | BPF_OR | BPF_K:
case BPF_ALU64 | BPF_OR | BPF_K:
ctx->tmp_used = 1;
emit_a64_mov_i(is64, tmp, imm, ctx);
emit(A64_ORR(is64, dst, dst, tmp), ctx);
break;
case BPF_ALU | BPF_XOR | BPF_K:
case BPF_ALU64 | BPF_XOR | BPF_K:
ctx->tmp_used = 1;
emit_a64_mov_i(is64, tmp, imm, ctx);
emit(A64_EOR(is64, dst, dst, tmp), ctx);
break;
case BPF_ALU | BPF_MUL | BPF_K:
case BPF_ALU64 | BPF_MUL | BPF_K:
ctx->tmp_used = 1;
emit_a64_mov_i(is64, tmp, imm, ctx);
emit(A64_MUL(is64, dst, dst, tmp), ctx);
break;
case BPF_ALU | BPF_DIV | BPF_K:
case BPF_ALU64 | BPF_DIV | BPF_K:
ctx->tmp_used = 1;
emit_a64_mov_i(is64, tmp, imm, ctx);
emit(A64_UDIV(is64, dst, dst, tmp), ctx);
break;
case BPF_ALU | BPF_MOD | BPF_K:
case BPF_ALU64 | BPF_MOD | BPF_K:
ctx->tmp_used = 1;
emit_a64_mov_i(is64, tmp2, imm, ctx);
emit(A64_UDIV(is64, tmp, dst, tmp2), ctx);
emit(A64_MUL(is64, tmp, tmp, tmp2), ctx);
emit(A64_SUB(is64, dst, dst, tmp), ctx);
break;
case BPF_ALU | BPF_LSH | BPF_K:
case BPF_ALU64 | BPF_LSH | BPF_K:
emit(A64_LSL(is64, dst, dst, imm), ctx);
break;
case BPF_ALU | BPF_RSH | BPF_K:
case BPF_ALU64 | BPF_RSH | BPF_K:
emit(A64_LSR(is64, dst, dst, imm), ctx);
break;
case BPF_ALU | BPF_ARSH | BPF_K:
case BPF_ALU64 | BPF_ARSH | BPF_K:
emit(A64_ASR(is64, dst, dst, imm), ctx);
break;
#define check_imm(bits, imm) do { \
if ((((imm) > 0) && ((imm) >> (bits))) || \
(((imm) < 0) && (~(imm) >> (bits)))) { \
pr_info("[%2d] imm=%d(0x%x) out of range\n", \
i, imm, imm); \
return -EINVAL; \
} \
} while (0)
#define check_imm19(imm) check_imm(19, imm)
#define check_imm26(imm) check_imm(26, imm)
/* JUMP off */
case BPF_JMP | BPF_JA:
jmp_offset = bpf2a64_offset(i + off, i, ctx);
check_imm26(jmp_offset);
emit(A64_B(jmp_offset), ctx);
break;
/* IF (dst COND src) JUMP off */
case BPF_JMP | BPF_JEQ | BPF_X:
case BPF_JMP | BPF_JGT | BPF_X:
case BPF_JMP | BPF_JGE | BPF_X:
case BPF_JMP | BPF_JNE | BPF_X:
case BPF_JMP | BPF_JSGT | BPF_X:
case BPF_JMP | BPF_JSGE | BPF_X:
emit(A64_CMP(1, dst, src), ctx);
emit_cond_jmp:
jmp_offset = bpf2a64_offset(i + off, i, ctx);
check_imm19(jmp_offset);
switch (BPF_OP(code)) {
case BPF_JEQ:
jmp_cond = A64_COND_EQ;
break;
case BPF_JGT:
jmp_cond = A64_COND_HI;
break;
case BPF_JGE:
jmp_cond = A64_COND_CS;
break;
case BPF_JNE:
jmp_cond = A64_COND_NE;
break;
case BPF_JSGT:
jmp_cond = A64_COND_GT;
break;
case BPF_JSGE:
jmp_cond = A64_COND_GE;
break;
default:
return -EFAULT;
}
emit(A64_B_(jmp_cond, jmp_offset), ctx);
break;
case BPF_JMP | BPF_JSET | BPF_X:
emit(A64_TST(1, dst, src), ctx);
goto emit_cond_jmp;
/* IF (dst COND imm) JUMP off */
case BPF_JMP | BPF_JEQ | BPF_K:
case BPF_JMP | BPF_JGT | BPF_K:
case BPF_JMP | BPF_JGE | BPF_K:
case BPF_JMP | BPF_JNE | BPF_K:
case BPF_JMP | BPF_JSGT | BPF_K:
case BPF_JMP | BPF_JSGE | BPF_K:
ctx->tmp_used = 1;
emit_a64_mov_i(1, tmp, imm, ctx);
emit(A64_CMP(1, dst, tmp), ctx);
goto emit_cond_jmp;
case BPF_JMP | BPF_JSET | BPF_K:
ctx->tmp_used = 1;
emit_a64_mov_i(1, tmp, imm, ctx);
emit(A64_TST(1, dst, tmp), ctx);
goto emit_cond_jmp;
/* function call */
case BPF_JMP | BPF_CALL:
{
const u8 r0 = bpf2a64[BPF_REG_0];
const u64 func = (u64)__bpf_call_base + imm;
ctx->tmp_used = 1;
emit_a64_mov_i64(tmp, func, ctx);
emit(A64_PUSH(A64_FP, A64_LR, A64_SP), ctx);
emit(A64_MOV(1, A64_FP, A64_SP), ctx);
emit(A64_BLR(tmp), ctx);
emit(A64_MOV(1, r0, A64_R(0)), ctx);
emit(A64_POP(A64_FP, A64_LR, A64_SP), ctx);
break;
}
/* function return */
case BPF_JMP | BPF_EXIT:
if (i == ctx->prog->len - 1)
break;
jmp_offset = epilogue_offset(ctx);
check_imm26(jmp_offset);
emit(A64_B(jmp_offset), ctx);
break;
/* LDX: dst = *(size *)(src + off) */
case BPF_LDX | BPF_MEM | BPF_W:
case BPF_LDX | BPF_MEM | BPF_H:
case BPF_LDX | BPF_MEM | BPF_B:
case BPF_LDX | BPF_MEM | BPF_DW:
ctx->tmp_used = 1;
emit_a64_mov_i(1, tmp, off, ctx);
switch (BPF_SIZE(code)) {
case BPF_W:
emit(A64_LDR32(dst, src, tmp), ctx);
break;
case BPF_H:
emit(A64_LDRH(dst, src, tmp), ctx);
break;
case BPF_B:
emit(A64_LDRB(dst, src, tmp), ctx);
break;
case BPF_DW:
emit(A64_LDR64(dst, src, tmp), ctx);
break;
}
break;
/* ST: *(size *)(dst + off) = imm */
case BPF_ST | BPF_MEM | BPF_W:
case BPF_ST | BPF_MEM | BPF_H:
case BPF_ST | BPF_MEM | BPF_B:
case BPF_ST | BPF_MEM | BPF_DW:
goto notyet;
/* STX: *(size *)(dst + off) = src */
case BPF_STX | BPF_MEM | BPF_W:
case BPF_STX | BPF_MEM | BPF_H:
case BPF_STX | BPF_MEM | BPF_B:
case BPF_STX | BPF_MEM | BPF_DW:
ctx->tmp_used = 1;
emit_a64_mov_i(1, tmp, off, ctx);
switch (BPF_SIZE(code)) {
case BPF_W:
emit(A64_STR32(src, dst, tmp), ctx);
break;
case BPF_H:
emit(A64_STRH(src, dst, tmp), ctx);
break;
case BPF_B:
emit(A64_STRB(src, dst, tmp), ctx);
break;
case BPF_DW:
emit(A64_STR64(src, dst, tmp), ctx);
break;
}
break;
/* STX XADD: lock *(u32 *)(dst + off) += src */
case BPF_STX | BPF_XADD | BPF_W:
/* STX XADD: lock *(u64 *)(dst + off) += src */
case BPF_STX | BPF_XADD | BPF_DW:
goto notyet;
/* R0 = ntohx(*(size *)(((struct sk_buff *)R6)->data + imm)) */
case BPF_LD | BPF_ABS | BPF_W:
case BPF_LD | BPF_ABS | BPF_H:
case BPF_LD | BPF_ABS | BPF_B:
/* R0 = ntohx(*(size *)(((struct sk_buff *)R6)->data + src + imm)) */
case BPF_LD | BPF_IND | BPF_W:
case BPF_LD | BPF_IND | BPF_H:
case BPF_LD | BPF_IND | BPF_B:
{
const u8 r0 = bpf2a64[BPF_REG_0]; /* r0 = return value */
const u8 r6 = bpf2a64[BPF_REG_6]; /* r6 = pointer to sk_buff */
const u8 fp = bpf2a64[BPF_REG_FP];
const u8 r1 = bpf2a64[BPF_REG_1]; /* r1: struct sk_buff *skb */
const u8 r2 = bpf2a64[BPF_REG_2]; /* r2: int k */
const u8 r3 = bpf2a64[BPF_REG_3]; /* r3: unsigned int size */
const u8 r4 = bpf2a64[BPF_REG_4]; /* r4: void *buffer */
const u8 r5 = bpf2a64[BPF_REG_5]; /* r5: void *(*func)(...) */
int size;
emit(A64_MOV(1, r1, r6), ctx);
emit_a64_mov_i(0, r2, imm, ctx);
if (BPF_MODE(code) == BPF_IND)
emit(A64_ADD(0, r2, r2, src), ctx);
switch (BPF_SIZE(code)) {
case BPF_W:
size = 4;
break;
case BPF_H:
size = 2;
break;
case BPF_B:
size = 1;
break;
default:
return -EINVAL;
}
emit_a64_mov_i64(r3, size, ctx);
emit(A64_ADD_I(1, r4, fp, MAX_BPF_STACK), ctx);
emit_a64_mov_i64(r5, (unsigned long)bpf_load_pointer, ctx);
emit(A64_PUSH(A64_FP, A64_LR, A64_SP), ctx);
emit(A64_MOV(1, A64_FP, A64_SP), ctx);
emit(A64_BLR(r5), ctx);
emit(A64_MOV(1, r0, A64_R(0)), ctx);
emit(A64_POP(A64_FP, A64_LR, A64_SP), ctx);
jmp_offset = epilogue_offset(ctx);
check_imm19(jmp_offset);
emit(A64_CBZ(1, r0, jmp_offset), ctx);
emit(A64_MOV(1, r5, r0), ctx);
switch (BPF_SIZE(code)) {
case BPF_W:
emit(A64_LDR32(r0, r5, A64_ZR), ctx);
#ifndef CONFIG_CPU_BIG_ENDIAN
emit(A64_REV32(0, r0, r0), ctx);
#endif
break;
case BPF_H:
emit(A64_LDRH(r0, r5, A64_ZR), ctx);
#ifndef CONFIG_CPU_BIG_ENDIAN
emit(A64_REV16(0, r0, r0), ctx);
#endif
break;
case BPF_B:
emit(A64_LDRB(r0, r5, A64_ZR), ctx);
break;
}
break;
}
notyet:
pr_info_once("*** NOT YET: opcode %02x ***\n", code);
return -EFAULT;
default:
pr_err_once("unknown opcode %02x\n", code);
return -EINVAL;
}
return 0;
}
static int build_body(struct jit_ctx *ctx)
{
const struct bpf_prog *prog = ctx->prog;
int i;
for (i = 0; i < prog->len; i++) {
const struct bpf_insn *insn = &prog->insnsi[i];
int ret;
if (ctx->image == NULL)
ctx->offset[i] = ctx->idx;
ret = build_insn(insn, ctx);
if (ret)
return ret;
}
return 0;
}
static inline void bpf_flush_icache(void *start, void *end)
{
flush_icache_range((unsigned long)start, (unsigned long)end);
}
void bpf_jit_compile(struct bpf_prog *prog)
{
/* Nothing to do here. We support Internal BPF. */
}
void bpf_int_jit_compile(struct bpf_prog *prog)
{
struct jit_ctx ctx;
int image_size;
if (!bpf_jit_enable)
return;
if (!prog || !prog->len)
return;
memset(&ctx, 0, sizeof(ctx));
ctx.prog = prog;
ctx.offset = kcalloc(prog->len, sizeof(int), GFP_KERNEL);
if (ctx.offset == NULL)
return;
/* 1. Initial fake pass to compute ctx->idx. */
/* Fake pass to fill in ctx->offset. */
if (build_body(&ctx))
goto out;
build_prologue(&ctx);
build_epilogue(&ctx);
/* Now we know the actual image size. */
image_size = sizeof(u32) * ctx.idx;
ctx.image = module_alloc(image_size);
if (unlikely(ctx.image == NULL))
goto out;
/* 2. Now, the actual pass. */
ctx.idx = 0;
build_prologue(&ctx);
ctx.body_offset = ctx.idx;
if (build_body(&ctx)) {
module_free(NULL, ctx.image);
goto out;
}
build_epilogue(&ctx);
/* And we're done. */
if (bpf_jit_enable > 1)
bpf_jit_dump(prog->len, image_size, 2, ctx.image);
bpf_flush_icache(ctx.image, ctx.image + ctx.idx);
prog->bpf_func = (void *)ctx.image;
prog->jited = 1;
out:
kfree(ctx.offset);
}
void bpf_jit_free(struct bpf_prog *prog)
{
if (prog->jited)
module_free(NULL, prog->bpf_func);
kfree(prog);
}

View File

@ -160,11 +160,10 @@ EXPORT_SYMBOL(of_device_alloc);
* can use Platform bus notifier and handle BUS_NOTIFY_ADD_DEVICE event
* to fix up DMA configuration.
*/
static void of_dma_configure(struct platform_device *pdev)
static void of_dma_configure(struct device *dev)
{
u64 dma_addr, paddr, size;
int ret;
struct device *dev = &pdev->dev;
/*
* Set default dma-mask to 32 bit. Drivers are expected to setup
@ -229,7 +228,7 @@ static struct platform_device *of_platform_device_create_pdata(
if (!dev)
goto err_clear_flag;
of_dma_configure(dev);
of_dma_configure(&dev->dev);
dev->dev.bus = &platform_bus_type;
dev->dev.platform_data = platform_data;
@ -291,7 +290,6 @@ static struct amba_device *of_amba_device_create(struct device_node *node,
}
/* setup generic device info */
dev->dev.coherent_dma_mask = ~0;
dev->dev.of_node = of_node_get(node);
dev->dev.parent = parent;
dev->dev.platform_data = platform_data;
@ -299,6 +297,7 @@ static struct amba_device *of_amba_device_create(struct device_node *node,
dev_set_name(&dev->dev, "%s", bus_id);
else
of_device_make_bus_id(&dev->dev);
of_dma_configure(&dev->dev);
/* Allow the HW Peripheral ID to be overridden */
prop = of_get_property(node, "arm,primecell-periphid", NULL);

View File

@ -3388,7 +3388,7 @@ static inline int is_arm_mapping_symbol(const char *str)
{
if (str[0] == '.' && str[1] == 'L')
return true;
return str[0] == '$' && strchr("atd", str[1])
return str[0] == '$' && strchr("axtd", str[1])
&& (str[2] == '\0' || str[2] == '.');
}

View File

@ -84,7 +84,7 @@ static void usage(void)
*/
static inline int is_arm_mapping_symbol(const char *str)
{
return str[0] == '$' && strchr("atd", str[1])
return str[0] == '$' && strchr("axtd", str[1])
&& (str[2] == '\0' || str[2] == '.');
}

View File

@ -1147,7 +1147,7 @@ static Elf_Sym *find_elf_symbol(struct elf_info *elf, Elf64_Sword addr,
static inline int is_arm_mapping_symbol(const char *str)
{
return str[0] == '$' && strchr("atd", str[1])
return str[0] == '$' && strchr("axtd", str[1])
&& (str[2] == '\0' || str[2] == '.');
}