mirror of
https://github.com/torvalds/linux.git
synced 2024-11-10 22:21:40 +00:00
Merge branch 'for-linus' into for-next
Merging for-linus branch for syncing the latest STAC/IDT codec changes to be affected by the upcoming hda-jack rewrites.
This commit is contained in:
commit
998052b745
107
Documentation/devicetree/bindings/mfd/tc3589x.txt
Normal file
107
Documentation/devicetree/bindings/mfd/tc3589x.txt
Normal file
@ -0,0 +1,107 @@
|
||||
* Toshiba TC3589x multi-purpose expander
|
||||
|
||||
The Toshiba TC3589x series are I2C-based MFD devices which may expose the
|
||||
following built-in devices: gpio, keypad, rotator (vibrator), PWM (for
|
||||
e.g. LEDs or vibrators) The included models are:
|
||||
|
||||
- TC35890
|
||||
- TC35892
|
||||
- TC35893
|
||||
- TC35894
|
||||
- TC35895
|
||||
- TC35896
|
||||
|
||||
Required properties:
|
||||
- compatible : must be "toshiba,tc35890", "toshiba,tc35892", "toshiba,tc35893",
|
||||
"toshiba,tc35894", "toshiba,tc35895" or "toshiba,tc35896"
|
||||
- reg : I2C address of the device
|
||||
- interrupt-parent : specifies which IRQ controller we're connected to
|
||||
- interrupts : the interrupt on the parent the controller is connected to
|
||||
- interrupt-controller : marks the device node as an interrupt controller
|
||||
- #interrupt-cells : should be <1>, the first cell is the IRQ offset on this
|
||||
TC3589x interrupt controller.
|
||||
|
||||
Optional nodes:
|
||||
|
||||
- GPIO
|
||||
This GPIO module inside the TC3589x has 24 (TC35890, TC35892) or 20
|
||||
(other models) GPIO lines.
|
||||
- compatible : must be "toshiba,tc3589x-gpio"
|
||||
- interrupts : interrupt on the parent, which must be the tc3589x MFD device
|
||||
- interrupt-controller : marks the device node as an interrupt controller
|
||||
- #interrupt-cells : should be <2>, the first cell is the IRQ offset on this
|
||||
TC3589x GPIO interrupt controller, the second cell is the interrupt flags
|
||||
in accordance with <dt-bindings/interrupt-controller/irq.h>. The following
|
||||
flags are valid:
|
||||
- IRQ_TYPE_LEVEL_LOW
|
||||
- IRQ_TYPE_LEVEL_HIGH
|
||||
- IRQ_TYPE_EDGE_RISING
|
||||
- IRQ_TYPE_EDGE_FALLING
|
||||
- IRQ_TYPE_EDGE_BOTH
|
||||
- gpio-controller : marks the device node as a GPIO controller
|
||||
- #gpio-cells : should be <2>, the first cell is the GPIO offset on this
|
||||
GPIO controller, the second cell is the flags.
|
||||
|
||||
- Keypad
|
||||
This keypad is the same on all variants, supporting up to 96 different
|
||||
keys. The linux-specific properties are modeled on those already existing
|
||||
in other input drivers.
|
||||
- compatible : must be "toshiba,tc3589x-keypad"
|
||||
- debounce-delay-ms : debounce interval in milliseconds
|
||||
- keypad,num-rows : number of rows in the matrix, see
|
||||
bindings/input/matrix-keymap.txt
|
||||
- keypad,num-columns : number of columns in the matrix, see
|
||||
bindings/input/matrix-keymap.txt
|
||||
- linux,keymap: the definition can be found in
|
||||
bindings/input/matrix-keymap.txt
|
||||
- linux,no-autorepeat: do no enable autorepeat feature.
|
||||
- linux,wakeup: use any event on keypad as wakeup event.
|
||||
|
||||
Example:
|
||||
|
||||
tc35893@44 {
|
||||
compatible = "toshiba,tc35893";
|
||||
reg = <0x44>;
|
||||
interrupt-parent = <&gpio6>;
|
||||
interrupts = <26 IRQ_TYPE_EDGE_RISING>;
|
||||
|
||||
interrupt-controller;
|
||||
#interrupt-cells = <1>;
|
||||
|
||||
tc3589x_gpio {
|
||||
compatible = "toshiba,tc3589x-gpio";
|
||||
interrupts = <0>;
|
||||
|
||||
interrupt-controller;
|
||||
#interrupt-cells = <2>;
|
||||
gpio-controller;
|
||||
#gpio-cells = <2>;
|
||||
};
|
||||
tc3589x_keypad {
|
||||
compatible = "toshiba,tc3589x-keypad";
|
||||
interrupts = <6>;
|
||||
debounce-delay-ms = <4>;
|
||||
keypad,num-columns = <8>;
|
||||
keypad,num-rows = <8>;
|
||||
linux,no-autorepeat;
|
||||
linux,wakeup;
|
||||
linux,keymap = <0x0301006b
|
||||
0x04010066
|
||||
0x06040072
|
||||
0x040200d7
|
||||
0x0303006a
|
||||
0x0205000e
|
||||
0x0607008b
|
||||
0x0500001c
|
||||
0x0403000b
|
||||
0x03040034
|
||||
0x05020067
|
||||
0x0305006c
|
||||
0x040500e7
|
||||
0x0005009e
|
||||
0x06020073
|
||||
0x01030039
|
||||
0x07060069
|
||||
0x050500d9>;
|
||||
};
|
||||
};
|
@ -22,7 +22,7 @@ Optional properties:
|
||||
width of 8 is assumed.
|
||||
|
||||
- ti,nand-ecc-opt: A string setting the ECC layout to use. One of:
|
||||
"sw" <deprecated> use "ham1" instead
|
||||
"sw" 1-bit Hamming ecc code via software
|
||||
"hw" <deprecated> use "ham1" instead
|
||||
"hw-romcode" <deprecated> use "ham1" instead
|
||||
"ham1" 1-bit Hamming ecc code
|
||||
|
@ -62,7 +62,7 @@ Example:
|
||||
#gpio-cells = <2>;
|
||||
interrupt-controller;
|
||||
#interrupt-cells = <2>;
|
||||
interrupts = <0 32 0x4>;
|
||||
interrupts = <0 16 0x4>;
|
||||
|
||||
pinctrl-names = "default";
|
||||
pinctrl-0 = <&gsbi5_uart_default>;
|
||||
|
@ -1,7 +1,7 @@
|
||||
ADI AXI-SPDIF controller
|
||||
|
||||
Required properties:
|
||||
- compatible : Must be "adi,axi-spdif-1.00.a"
|
||||
- compatible : Must be "adi,axi-spdif-tx-1.00.a"
|
||||
- reg : Must contain SPDIF core's registers location and length
|
||||
- clocks : Pairs of phandle and specifier referencing the controller's clocks.
|
||||
The controller expects two clocks, the clock used for the AXI interface and
|
||||
|
@ -56,10 +56,10 @@ The dma_buf buffer sharing API usage contains the following steps:
|
||||
size_t size, int flags,
|
||||
const char *exp_name)
|
||||
|
||||
If this succeeds, dma_buf_export allocates a dma_buf structure, and returns a
|
||||
pointer to the same. It also associates an anonymous file with this buffer,
|
||||
so it can be exported. On failure to allocate the dma_buf object, it returns
|
||||
NULL.
|
||||
If this succeeds, dma_buf_export_named allocates a dma_buf structure, and
|
||||
returns a pointer to the same. It also associates an anonymous file with this
|
||||
buffer, so it can be exported. On failure to allocate the dma_buf object,
|
||||
it returns NULL.
|
||||
|
||||
'exp_name' is the name of exporter - to facilitate information while
|
||||
debugging.
|
||||
@ -76,7 +76,7 @@ The dma_buf buffer sharing API usage contains the following steps:
|
||||
drivers and/or processes.
|
||||
|
||||
Interface:
|
||||
int dma_buf_fd(struct dma_buf *dmabuf)
|
||||
int dma_buf_fd(struct dma_buf *dmabuf, int flags)
|
||||
|
||||
This API installs an fd for the anonymous file associated with this buffer;
|
||||
returns either 'fd', or error.
|
||||
@ -157,7 +157,9 @@ to request use of buffer for allocation.
|
||||
"dma_buf->ops->" indirection from the users of this interface.
|
||||
|
||||
In struct dma_buf_ops, unmap_dma_buf is defined as
|
||||
void (*unmap_dma_buf)(struct dma_buf_attachment *, struct sg_table *);
|
||||
void (*unmap_dma_buf)(struct dma_buf_attachment *,
|
||||
struct sg_table *,
|
||||
enum dma_data_direction);
|
||||
|
||||
unmap_dma_buf signifies the end-of-DMA for the attachment provided. Like
|
||||
map_dma_buf, this API also must be implemented by the exporter.
|
||||
|
@ -18,7 +18,7 @@ memory image to a dump file on the local disk, or across the network to
|
||||
a remote system.
|
||||
|
||||
Kdump and kexec are currently supported on the x86, x86_64, ppc64, ia64,
|
||||
and s390x architectures.
|
||||
s390x and arm architectures.
|
||||
|
||||
When the system kernel boots, it reserves a small section of memory for
|
||||
the dump-capture kernel. This ensures that ongoing Direct Memory Access
|
||||
@ -112,7 +112,7 @@ There are two possible methods of using Kdump.
|
||||
2) Or use the system kernel binary itself as dump-capture kernel and there is
|
||||
no need to build a separate dump-capture kernel. This is possible
|
||||
only with the architectures which support a relocatable kernel. As
|
||||
of today, i386, x86_64, ppc64 and ia64 architectures support relocatable
|
||||
of today, i386, x86_64, ppc64, ia64 and arm architectures support relocatable
|
||||
kernel.
|
||||
|
||||
Building a relocatable kernel is advantageous from the point of view that
|
||||
@ -241,6 +241,13 @@ Dump-capture kernel config options (Arch Dependent, ia64)
|
||||
kernel will be aligned to 64Mb, so if the start address is not then
|
||||
any space below the alignment point will be wasted.
|
||||
|
||||
Dump-capture kernel config options (Arch Dependent, arm)
|
||||
----------------------------------------------------------
|
||||
|
||||
- To use a relocatable kernel,
|
||||
Enable "AUTO_ZRELADDR" support under "Boot" options:
|
||||
|
||||
AUTO_ZRELADDR=y
|
||||
|
||||
Extended crashkernel syntax
|
||||
===========================
|
||||
@ -256,6 +263,10 @@ The syntax is:
|
||||
crashkernel=<range1>:<size1>[,<range2>:<size2>,...][@offset]
|
||||
range=start-[end]
|
||||
|
||||
Please note, on arm, the offset is required.
|
||||
crashkernel=<range1>:<size1>[,<range2>:<size2>,...]@offset
|
||||
range=start-[end]
|
||||
|
||||
'start' is inclusive and 'end' is exclusive.
|
||||
|
||||
For example:
|
||||
@ -296,6 +307,12 @@ Boot into System Kernel
|
||||
on the memory consumption of the kdump system. In general this is not
|
||||
dependent on the memory size of the production system.
|
||||
|
||||
On arm, use "crashkernel=Y@X". Note that the start address of the kernel
|
||||
will be aligned to 128MiB (0x08000000), so if the start address is not then
|
||||
any space below the alignment point may be overwritten by the dump-capture kernel,
|
||||
which means it is possible that the vmcore is not that precise as expected.
|
||||
|
||||
|
||||
Load the Dump-capture Kernel
|
||||
============================
|
||||
|
||||
@ -315,7 +332,8 @@ For ia64:
|
||||
- Use vmlinux or vmlinuz.gz
|
||||
For s390x:
|
||||
- Use image or bzImage
|
||||
|
||||
For arm:
|
||||
- Use zImage
|
||||
|
||||
If you are using a uncompressed vmlinux image then use following command
|
||||
to load dump-capture kernel.
|
||||
@ -331,6 +349,15 @@ to load dump-capture kernel.
|
||||
--initrd=<initrd-for-dump-capture-kernel> \
|
||||
--append="root=<root-dev> <arch-specific-options>"
|
||||
|
||||
If you are using a compressed zImage, then use following command
|
||||
to load dump-capture kernel.
|
||||
|
||||
kexec --type zImage -p <dump-capture-kernel-bzImage> \
|
||||
--initrd=<initrd-for-dump-capture-kernel> \
|
||||
--dtb=<dtb-for-dump-capture-kernel> \
|
||||
--append="root=<root-dev> <arch-specific-options>"
|
||||
|
||||
|
||||
Please note, that --args-linux does not need to be specified for ia64.
|
||||
It is planned to make this a no-op on that architecture, but for now
|
||||
it should be omitted
|
||||
@ -347,6 +374,9 @@ For ppc64:
|
||||
For s390x:
|
||||
"1 maxcpus=1 cgroup_disable=memory"
|
||||
|
||||
For arm:
|
||||
"1 maxcpus=1 reset_devices"
|
||||
|
||||
Notes on loading the dump-capture kernel:
|
||||
|
||||
* By default, the ELF headers are stored in ELF64 format to support
|
||||
|
@ -2,26 +2,26 @@ this_cpu operations
|
||||
-------------------
|
||||
|
||||
this_cpu operations are a way of optimizing access to per cpu
|
||||
variables associated with the *currently* executing processor through
|
||||
the use of segment registers (or a dedicated register where the cpu
|
||||
permanently stored the beginning of the per cpu area for a specific
|
||||
processor).
|
||||
variables associated with the *currently* executing processor. This is
|
||||
done through the use of segment registers (or a dedicated register where
|
||||
the cpu permanently stored the beginning of the per cpu area for a
|
||||
specific processor).
|
||||
|
||||
The this_cpu operations add a per cpu variable offset to the processor
|
||||
specific percpu base and encode that operation in the instruction
|
||||
this_cpu operations add a per cpu variable offset to the processor
|
||||
specific per cpu base and encode that operation in the instruction
|
||||
operating on the per cpu variable.
|
||||
|
||||
This means there are no atomicity issues between the calculation of
|
||||
This means that there are no atomicity issues between the calculation of
|
||||
the offset and the operation on the data. Therefore it is not
|
||||
necessary to disable preempt or interrupts to ensure that the
|
||||
necessary to disable preemption or interrupts to ensure that the
|
||||
processor is not changed between the calculation of the address and
|
||||
the operation on the data.
|
||||
|
||||
Read-modify-write operations are of particular interest. Frequently
|
||||
processors have special lower latency instructions that can operate
|
||||
without the typical synchronization overhead but still provide some
|
||||
sort of relaxed atomicity guarantee. The x86 for example can execute
|
||||
RMV (Read Modify Write) instructions like inc/dec/cmpxchg without the
|
||||
without the typical synchronization overhead, but still provide some
|
||||
sort of relaxed atomicity guarantees. The x86, for example, can execute
|
||||
RMW (Read Modify Write) instructions like inc/dec/cmpxchg without the
|
||||
lock prefix and the associated latency penalty.
|
||||
|
||||
Access to the variable without the lock prefix is not synchronized but
|
||||
@ -30,6 +30,38 @@ data specific to the currently executing processor. Only the current
|
||||
processor should be accessing that variable and therefore there are no
|
||||
concurrency issues with other processors in the system.
|
||||
|
||||
Please note that accesses by remote processors to a per cpu area are
|
||||
exceptional situations and may impact performance and/or correctness
|
||||
(remote write operations) of local RMW operations via this_cpu_*.
|
||||
|
||||
The main use of the this_cpu operations has been to optimize counter
|
||||
operations.
|
||||
|
||||
The following this_cpu() operations with implied preemption protection
|
||||
are defined. These operations can be used without worrying about
|
||||
preemption and interrupts.
|
||||
|
||||
this_cpu_add()
|
||||
this_cpu_read(pcp)
|
||||
this_cpu_write(pcp, val)
|
||||
this_cpu_add(pcp, val)
|
||||
this_cpu_and(pcp, val)
|
||||
this_cpu_or(pcp, val)
|
||||
this_cpu_add_return(pcp, val)
|
||||
this_cpu_xchg(pcp, nval)
|
||||
this_cpu_cmpxchg(pcp, oval, nval)
|
||||
this_cpu_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)
|
||||
this_cpu_sub(pcp, val)
|
||||
this_cpu_inc(pcp)
|
||||
this_cpu_dec(pcp)
|
||||
this_cpu_sub_return(pcp, val)
|
||||
this_cpu_inc_return(pcp)
|
||||
this_cpu_dec_return(pcp)
|
||||
|
||||
|
||||
Inner working of this_cpu operations
|
||||
------------------------------------
|
||||
|
||||
On x86 the fs: or the gs: segment registers contain the base of the
|
||||
per cpu area. It is then possible to simply use the segment override
|
||||
to relocate a per cpu relative address to the proper per cpu area for
|
||||
@ -48,22 +80,21 @@ results in a single instruction
|
||||
mov ax, gs:[x]
|
||||
|
||||
instead of a sequence of calculation of the address and then a fetch
|
||||
from that address which occurs with the percpu operations. Before
|
||||
from that address which occurs with the per cpu operations. Before
|
||||
this_cpu_ops such sequence also required preempt disable/enable to
|
||||
prevent the kernel from moving the thread to a different processor
|
||||
while the calculation is performed.
|
||||
|
||||
The main use of the this_cpu operations has been to optimize counter
|
||||
operations.
|
||||
Consider the following this_cpu operation:
|
||||
|
||||
this_cpu_inc(x)
|
||||
|
||||
results in the following single instruction (no lock prefix!)
|
||||
The above results in the following single instruction (no lock prefix!)
|
||||
|
||||
inc gs:[x]
|
||||
|
||||
instead of the following operations required if there is no segment
|
||||
register.
|
||||
register:
|
||||
|
||||
int *y;
|
||||
int cpu;
|
||||
@ -73,10 +104,10 @@ register.
|
||||
(*y)++;
|
||||
put_cpu();
|
||||
|
||||
Note that these operations can only be used on percpu data that is
|
||||
Note that these operations can only be used on per cpu data that is
|
||||
reserved for a specific processor. Without disabling preemption in the
|
||||
surrounding code this_cpu_inc() will only guarantee that one of the
|
||||
percpu counters is correctly incremented. However, there is no
|
||||
per cpu counters is correctly incremented. However, there is no
|
||||
guarantee that the OS will not move the process directly before or
|
||||
after the this_cpu instruction is executed. In general this means that
|
||||
the value of the individual counters for each processor are
|
||||
@ -86,9 +117,9 @@ that is of interest.
|
||||
Per cpu variables are used for performance reasons. Bouncing cache
|
||||
lines can be avoided if multiple processors concurrently go through
|
||||
the same code paths. Since each processor has its own per cpu
|
||||
variables no concurrent cacheline updates take place. The price that
|
||||
variables no concurrent cache line updates take place. The price that
|
||||
has to be paid for this optimization is the need to add up the per cpu
|
||||
counters when the value of the counter is needed.
|
||||
counters when the value of a counter is needed.
|
||||
|
||||
|
||||
Special operations:
|
||||
@ -100,33 +131,39 @@ Takes the offset of a per cpu variable (&x !) and returns the address
|
||||
of the per cpu variable that belongs to the currently executing
|
||||
processor. this_cpu_ptr avoids multiple steps that the common
|
||||
get_cpu/put_cpu sequence requires. No processor number is
|
||||
available. Instead the offset of the local per cpu area is simply
|
||||
added to the percpu offset.
|
||||
available. Instead, the offset of the local per cpu area is simply
|
||||
added to the per cpu offset.
|
||||
|
||||
Note that this operation is usually used in a code segment when
|
||||
preemption has been disabled. The pointer is then used to
|
||||
access local per cpu data in a critical section. When preemption
|
||||
is re-enabled this pointer is usually no longer useful since it may
|
||||
no longer point to per cpu data of the current processor.
|
||||
|
||||
|
||||
Per cpu variables and offsets
|
||||
-----------------------------
|
||||
|
||||
Per cpu variables have *offsets* to the beginning of the percpu
|
||||
Per cpu variables have *offsets* to the beginning of the per cpu
|
||||
area. They do not have addresses although they look like that in the
|
||||
code. Offsets cannot be directly dereferenced. The offset must be
|
||||
added to a base pointer of a percpu area of a processor in order to
|
||||
added to a base pointer of a per cpu area of a processor in order to
|
||||
form a valid address.
|
||||
|
||||
Therefore the use of x or &x outside of the context of per cpu
|
||||
operations is invalid and will generally be treated like a NULL
|
||||
pointer dereference.
|
||||
|
||||
In the context of per cpu operations
|
||||
DEFINE_PER_CPU(int, x);
|
||||
|
||||
x is a per cpu variable. Most this_cpu operations take a cpu
|
||||
variable.
|
||||
In the context of per cpu operations the above implies that x is a per
|
||||
cpu variable. Most this_cpu operations take a cpu variable.
|
||||
|
||||
&x is the *offset* a per cpu variable. this_cpu_ptr() takes
|
||||
the offset of a per cpu variable which makes this look a bit
|
||||
strange.
|
||||
int __percpu *p = &x;
|
||||
|
||||
&x and hence p is the *offset* of a per cpu variable. this_cpu_ptr()
|
||||
takes the offset of a per cpu variable which makes this look a bit
|
||||
strange.
|
||||
|
||||
|
||||
Operations on a field of a per cpu structure
|
||||
@ -152,7 +189,7 @@ If we have an offset to struct s:
|
||||
|
||||
struct s __percpu *ps = &p;
|
||||
|
||||
z = this_cpu_dec(ps->m);
|
||||
this_cpu_dec(ps->m);
|
||||
|
||||
z = this_cpu_inc_return(ps->n);
|
||||
|
||||
@ -172,29 +209,52 @@ if we do not make use of this_cpu ops later to manipulate fields:
|
||||
Variants of this_cpu ops
|
||||
-------------------------
|
||||
|
||||
this_cpu ops are interrupt safe. Some architecture do not support
|
||||
this_cpu ops are interrupt safe. Some architectures do not support
|
||||
these per cpu local operations. In that case the operation must be
|
||||
replaced by code that disables interrupts, then does the operations
|
||||
that are guaranteed to be atomic and then reenable interrupts. Doing
|
||||
that are guaranteed to be atomic and then re-enable interrupts. Doing
|
||||
so is expensive. If there are other reasons why the scheduler cannot
|
||||
change the processor we are executing on then there is no reason to
|
||||
disable interrupts. For that purpose the __this_cpu operations are
|
||||
provided. For example.
|
||||
disable interrupts. For that purpose the following __this_cpu operations
|
||||
are provided.
|
||||
|
||||
__this_cpu_inc(x);
|
||||
These operations have no guarantee against concurrent interrupts or
|
||||
preemption. If a per cpu variable is not used in an interrupt context
|
||||
and the scheduler cannot preempt, then they are safe. If any interrupts
|
||||
still occur while an operation is in progress and if the interrupt too
|
||||
modifies the variable, then RMW actions can not be guaranteed to be
|
||||
safe.
|
||||
|
||||
Will increment x and will not fallback to code that disables
|
||||
__this_cpu_add()
|
||||
__this_cpu_read(pcp)
|
||||
__this_cpu_write(pcp, val)
|
||||
__this_cpu_add(pcp, val)
|
||||
__this_cpu_and(pcp, val)
|
||||
__this_cpu_or(pcp, val)
|
||||
__this_cpu_add_return(pcp, val)
|
||||
__this_cpu_xchg(pcp, nval)
|
||||
__this_cpu_cmpxchg(pcp, oval, nval)
|
||||
__this_cpu_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)
|
||||
__this_cpu_sub(pcp, val)
|
||||
__this_cpu_inc(pcp)
|
||||
__this_cpu_dec(pcp)
|
||||
__this_cpu_sub_return(pcp, val)
|
||||
__this_cpu_inc_return(pcp)
|
||||
__this_cpu_dec_return(pcp)
|
||||
|
||||
|
||||
Will increment x and will not fall-back to code that disables
|
||||
interrupts on platforms that cannot accomplish atomicity through
|
||||
address relocation and a Read-Modify-Write operation in the same
|
||||
instruction.
|
||||
|
||||
|
||||
|
||||
&this_cpu_ptr(pp)->n vs this_cpu_ptr(&pp->n)
|
||||
--------------------------------------------
|
||||
|
||||
The first operation takes the offset and forms an address and then
|
||||
adds the offset of the n field.
|
||||
adds the offset of the n field. This may result in two add
|
||||
instructions emitted by the compiler.
|
||||
|
||||
The second one first adds the two offsets and then does the
|
||||
relocation. IMHO the second form looks cleaner and has an easier time
|
||||
@ -202,4 +262,73 @@ with (). The second form also is consistent with the way
|
||||
this_cpu_read() and friends are used.
|
||||
|
||||
|
||||
Christoph Lameter, April 3rd, 2013
|
||||
Remote access to per cpu data
|
||||
------------------------------
|
||||
|
||||
Per cpu data structures are designed to be used by one cpu exclusively.
|
||||
If you use the variables as intended, this_cpu_ops() are guaranteed to
|
||||
be "atomic" as no other CPU has access to these data structures.
|
||||
|
||||
There are special cases where you might need to access per cpu data
|
||||
structures remotely. It is usually safe to do a remote read access
|
||||
and that is frequently done to summarize counters. Remote write access
|
||||
something which could be problematic because this_cpu ops do not
|
||||
have lock semantics. A remote write may interfere with a this_cpu
|
||||
RMW operation.
|
||||
|
||||
Remote write accesses to percpu data structures are highly discouraged
|
||||
unless absolutely necessary. Please consider using an IPI to wake up
|
||||
the remote CPU and perform the update to its per cpu area.
|
||||
|
||||
To access per-cpu data structure remotely, typically the per_cpu_ptr()
|
||||
function is used:
|
||||
|
||||
|
||||
DEFINE_PER_CPU(struct data, datap);
|
||||
|
||||
struct data *p = per_cpu_ptr(&datap, cpu);
|
||||
|
||||
This makes it explicit that we are getting ready to access a percpu
|
||||
area remotely.
|
||||
|
||||
You can also do the following to convert the datap offset to an address
|
||||
|
||||
struct data *p = this_cpu_ptr(&datap);
|
||||
|
||||
but, passing of pointers calculated via this_cpu_ptr to other cpus is
|
||||
unusual and should be avoided.
|
||||
|
||||
Remote access are typically only for reading the status of another cpus
|
||||
per cpu data. Write accesses can cause unique problems due to the
|
||||
relaxed synchronization requirements for this_cpu operations.
|
||||
|
||||
One example that illustrates some concerns with write operations is
|
||||
the following scenario that occurs because two per cpu variables
|
||||
share a cache-line but the relaxed synchronization is applied to
|
||||
only one process updating the cache-line.
|
||||
|
||||
Consider the following example
|
||||
|
||||
|
||||
struct test {
|
||||
atomic_t a;
|
||||
int b;
|
||||
};
|
||||
|
||||
DEFINE_PER_CPU(struct test, onecacheline);
|
||||
|
||||
There is some concern about what would happen if the field 'a' is updated
|
||||
remotely from one processor and the local processor would use this_cpu ops
|
||||
to update field b. Care should be taken that such simultaneous accesses to
|
||||
data within the same cache line are avoided. Also costly synchronization
|
||||
may be necessary. IPIs are generally recommended in such scenarios instead
|
||||
of a remote write to the per cpu area of another processor.
|
||||
|
||||
Even in cases where the remote writes are rare, please bear in
|
||||
mind that a remote write will evict the cache line from the processor
|
||||
that most likely will access it. If the processor wakes up and finds a
|
||||
missing local cache line of a per cpu area, its performance and hence
|
||||
the wake up times will be affected.
|
||||
|
||||
Christoph Lameter, August 4th, 2014
|
||||
Pranith Kumar, Aug 2nd, 2014
|
||||
|
13
MAINTAINERS
13
MAINTAINERS
@ -1279,8 +1279,13 @@ M: Heiko Stuebner <heiko@sntech.de>
|
||||
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
|
||||
L: linux-rockchip@lists.infradead.org
|
||||
S: Maintained
|
||||
F: arch/arm/boot/dts/rk3*
|
||||
F: arch/arm/mach-rockchip/
|
||||
F: drivers/clk/rockchip/
|
||||
F: drivers/i2c/busses/i2c-rk3x.c
|
||||
F: drivers/*/*rockchip*
|
||||
F: drivers/*/*/*rockchip*
|
||||
F: sound/soc/rockchip/
|
||||
|
||||
ARM/SAMSUNG ARM ARCHITECTURES
|
||||
M: Ben Dooks <ben-linux@fluff.org>
|
||||
@ -9557,6 +9562,14 @@ S: Maintained
|
||||
F: Documentation/usb/ohci.txt
|
||||
F: drivers/usb/host/ohci*
|
||||
|
||||
USB OVER IP DRIVER
|
||||
M: Valentina Manea <valentina.manea.m@gmail.com>
|
||||
M: Shuah Khan <shuah.kh@samsung.com>
|
||||
L: linux-usb@vger.kernel.org
|
||||
S: Maintained
|
||||
F: drivers/usb/usbip/
|
||||
F: tools/usb/usbip/
|
||||
|
||||
USB PEGASUS DRIVER
|
||||
M: Petko Manolov <petkan@nucleusys.com>
|
||||
L: linux-usb@vger.kernel.org
|
||||
|
2
Makefile
2
Makefile
@ -1,7 +1,7 @@
|
||||
VERSION = 3
|
||||
PATCHLEVEL = 17
|
||||
SUBLEVEL = 0
|
||||
EXTRAVERSION = -rc2
|
||||
EXTRAVERSION = -rc3
|
||||
NAME = Shuffling Zombie Juror
|
||||
|
||||
# *DOCUMENTATION*
|
||||
|
@ -500,10 +500,14 @@ extern inline void writeq(u64 b, volatile void __iomem *addr)
|
||||
#define outb_p outb
|
||||
#define outw_p outw
|
||||
#define outl_p outl
|
||||
#define readb_relaxed(addr) __raw_readb(addr)
|
||||
#define readw_relaxed(addr) __raw_readw(addr)
|
||||
#define readl_relaxed(addr) __raw_readl(addr)
|
||||
#define readq_relaxed(addr) __raw_readq(addr)
|
||||
#define readb_relaxed(addr) __raw_readb(addr)
|
||||
#define readw_relaxed(addr) __raw_readw(addr)
|
||||
#define readl_relaxed(addr) __raw_readl(addr)
|
||||
#define readq_relaxed(addr) __raw_readq(addr)
|
||||
#define writeb_relaxed(b, addr) __raw_writeb(b, addr)
|
||||
#define writew_relaxed(b, addr) __raw_writew(b, addr)
|
||||
#define writel_relaxed(b, addr) __raw_writel(b, addr)
|
||||
#define writeq_relaxed(b, addr) __raw_writeq(b, addr)
|
||||
|
||||
#define mmiowb()
|
||||
|
||||
|
@ -3,7 +3,7 @@
|
||||
|
||||
#include <uapi/asm/unistd.h>
|
||||
|
||||
#define NR_SYSCALLS 508
|
||||
#define NR_SYSCALLS 511
|
||||
|
||||
#define __ARCH_WANT_OLD_READDIR
|
||||
#define __ARCH_WANT_STAT64
|
||||
|
@ -469,5 +469,8 @@
|
||||
#define __NR_process_vm_writev 505
|
||||
#define __NR_kcmp 506
|
||||
#define __NR_finit_module 507
|
||||
#define __NR_sched_setattr 508
|
||||
#define __NR_sched_getattr 509
|
||||
#define __NR_renameat2 510
|
||||
|
||||
#endif /* _UAPI_ALPHA_UNISTD_H */
|
||||
|
@ -526,6 +526,9 @@ sys_call_table:
|
||||
.quad sys_process_vm_writev /* 505 */
|
||||
.quad sys_kcmp
|
||||
.quad sys_finit_module
|
||||
.quad sys_sched_setattr
|
||||
.quad sys_sched_getattr
|
||||
.quad sys_renameat2 /* 510 */
|
||||
|
||||
.size sys_call_table, . - sys_call_table
|
||||
.type sys_call_table, @object
|
||||
|
@ -581,6 +581,7 @@ void flush_icache_range(unsigned long kstart, unsigned long kend)
|
||||
tot_sz -= sz;
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL(flush_icache_range);
|
||||
|
||||
/*
|
||||
* General purpose helper to make I and D cache lines consistent.
|
||||
|
@ -1983,8 +1983,6 @@ config XIP_PHYS_ADDR
|
||||
config KEXEC
|
||||
bool "Kexec system call (EXPERIMENTAL)"
|
||||
depends on (!SMP || PM_SLEEP_SMP)
|
||||
select CRYPTO
|
||||
select CRYPTO_SHA256
|
||||
help
|
||||
kexec is a system call that implements the ability to shutdown your
|
||||
current kernel, and to start another kernel. It is like a reboot
|
||||
|
@ -245,7 +245,7 @@
|
||||
gpio-controller;
|
||||
#gpio-cells = <2>;
|
||||
interrupt-controller;
|
||||
#interrupt-cells = <1>;
|
||||
#interrupt-cells = <2>;
|
||||
};
|
||||
|
||||
gpio2: gpio@48055000 {
|
||||
@ -256,7 +256,7 @@
|
||||
gpio-controller;
|
||||
#gpio-cells = <2>;
|
||||
interrupt-controller;
|
||||
#interrupt-cells = <1>;
|
||||
#interrupt-cells = <2>;
|
||||
};
|
||||
|
||||
gpio3: gpio@48057000 {
|
||||
@ -267,7 +267,7 @@
|
||||
gpio-controller;
|
||||
#gpio-cells = <2>;
|
||||
interrupt-controller;
|
||||
#interrupt-cells = <1>;
|
||||
#interrupt-cells = <2>;
|
||||
};
|
||||
|
||||
gpio4: gpio@48059000 {
|
||||
@ -278,7 +278,7 @@
|
||||
gpio-controller;
|
||||
#gpio-cells = <2>;
|
||||
interrupt-controller;
|
||||
#interrupt-cells = <1>;
|
||||
#interrupt-cells = <2>;
|
||||
};
|
||||
|
||||
gpio5: gpio@4805b000 {
|
||||
@ -289,7 +289,7 @@
|
||||
gpio-controller;
|
||||
#gpio-cells = <2>;
|
||||
interrupt-controller;
|
||||
#interrupt-cells = <1>;
|
||||
#interrupt-cells = <2>;
|
||||
};
|
||||
|
||||
gpio6: gpio@4805d000 {
|
||||
@ -300,7 +300,7 @@
|
||||
gpio-controller;
|
||||
#gpio-cells = <2>;
|
||||
interrupt-controller;
|
||||
#interrupt-cells = <1>;
|
||||
#interrupt-cells = <2>;
|
||||
};
|
||||
|
||||
gpio7: gpio@48051000 {
|
||||
@ -311,7 +311,7 @@
|
||||
gpio-controller;
|
||||
#gpio-cells = <2>;
|
||||
interrupt-controller;
|
||||
#interrupt-cells = <1>;
|
||||
#interrupt-cells = <2>;
|
||||
};
|
||||
|
||||
gpio8: gpio@48053000 {
|
||||
@ -322,7 +322,7 @@
|
||||
gpio-controller;
|
||||
#gpio-cells = <2>;
|
||||
interrupt-controller;
|
||||
#interrupt-cells = <1>;
|
||||
#interrupt-cells = <2>;
|
||||
};
|
||||
|
||||
uart1: serial@4806a000 {
|
||||
|
@ -28,6 +28,12 @@
|
||||
MX53_PAD_CSI0_DAT9__I2C1_SCL 0x400001ec
|
||||
>;
|
||||
};
|
||||
|
||||
pinctrl_pmic: pmicgrp {
|
||||
fsl,pins = <
|
||||
MX53_PAD_CSI0_DAT5__GPIO5_23 0x1e4 /* IRQ */
|
||||
>;
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
@ -38,6 +44,8 @@
|
||||
|
||||
pmic: mc34708@8 {
|
||||
compatible = "fsl,mc34708";
|
||||
pinctrl-names = "default";
|
||||
pinctrl-0 = <&pinctrl_pmic>;
|
||||
reg = <0x08>;
|
||||
interrupt-parent = <&gpio5>;
|
||||
interrupts = <23 0x8>;
|
||||
|
@ -58,7 +58,7 @@
|
||||
|
||||
sound-spdif {
|
||||
compatible = "fsl,imx-audio-spdif";
|
||||
model = "imx-spdif";
|
||||
model = "On-board SPDIF";
|
||||
/* IMX6 doesn't implement this yet */
|
||||
spdif-controller = <&spdif>;
|
||||
spdif-out;
|
||||
@ -181,11 +181,13 @@
|
||||
};
|
||||
|
||||
&usbh1 {
|
||||
disable-over-current;
|
||||
vbus-supply = <®_usbh1_vbus>;
|
||||
status = "okay";
|
||||
};
|
||||
|
||||
&usbotg {
|
||||
disable-over-current;
|
||||
pinctrl-names = "default";
|
||||
pinctrl-0 = <&pinctrl_hummingboard_usbotg_id>;
|
||||
vbus-supply = <®_usbotg_vbus>;
|
||||
|
@ -61,7 +61,7 @@
|
||||
|
||||
sound-spdif {
|
||||
compatible = "fsl,imx-audio-spdif";
|
||||
model = "imx-spdif";
|
||||
model = "Integrated SPDIF";
|
||||
/* IMX6 doesn't implement this yet */
|
||||
spdif-controller = <&spdif>;
|
||||
spdif-out;
|
||||
@ -130,16 +130,23 @@
|
||||
fsl,pins = <MX6QDL_PAD_GPIO_17__SPDIF_OUT 0x13091>;
|
||||
};
|
||||
|
||||
pinctrl_cubox_i_usbh1: cubox-i-usbh1 {
|
||||
fsl,pins = <MX6QDL_PAD_GPIO_3__USB_H1_OC 0x1b0b0>;
|
||||
};
|
||||
|
||||
pinctrl_cubox_i_usbh1_vbus: cubox-i-usbh1-vbus {
|
||||
fsl,pins = <MX6QDL_PAD_GPIO_0__GPIO1_IO00 0x4001b0b0>;
|
||||
};
|
||||
|
||||
pinctrl_cubox_i_usbotg_id: cubox-i-usbotg-id {
|
||||
pinctrl_cubox_i_usbotg: cubox-i-usbotg {
|
||||
/*
|
||||
* The Cubox-i pulls this low, but as it's pointless
|
||||
* The Cubox-i pulls ID low, but as it's pointless
|
||||
* leaving it as a pull-up, even if it is just 10uA.
|
||||
*/
|
||||
fsl,pins = <MX6QDL_PAD_GPIO_1__USB_OTG_ID 0x13059>;
|
||||
fsl,pins = <
|
||||
MX6QDL_PAD_GPIO_1__USB_OTG_ID 0x13059
|
||||
MX6QDL_PAD_KEY_COL4__USB_OTG_OC 0x1b0b0
|
||||
>;
|
||||
};
|
||||
|
||||
pinctrl_cubox_i_usbotg_vbus: cubox-i-usbotg-vbus {
|
||||
@ -173,13 +180,15 @@
|
||||
};
|
||||
|
||||
&usbh1 {
|
||||
pinctrl-names = "default";
|
||||
pinctrl-0 = <&pinctrl_cubox_i_usbh1>;
|
||||
vbus-supply = <®_usbh1_vbus>;
|
||||
status = "okay";
|
||||
};
|
||||
|
||||
&usbotg {
|
||||
pinctrl-names = "default";
|
||||
pinctrl-0 = <&pinctrl_cubox_i_usbotg_id>;
|
||||
pinctrl-0 = <&pinctrl_cubox_i_usbotg>;
|
||||
vbus-supply = <®_usbotg_vbus>;
|
||||
status = "okay";
|
||||
};
|
||||
|
@ -17,7 +17,7 @@
|
||||
enet {
|
||||
pinctrl_microsom_enet_ar8035: microsom-enet-ar8035 {
|
||||
fsl,pins = <
|
||||
MX6QDL_PAD_ENET_MDIO__ENET_MDIO 0x1b0b0
|
||||
MX6QDL_PAD_ENET_MDIO__ENET_MDIO 0x1b8b0
|
||||
MX6QDL_PAD_ENET_MDC__ENET_MDC 0x1b0b0
|
||||
/* AR8035 reset */
|
||||
MX6QDL_PAD_KEY_ROW4__GPIO4_IO15 0x130b0
|
||||
|
@ -292,6 +292,7 @@
|
||||
&uart3 {
|
||||
pinctrl-names = "default";
|
||||
pinctrl-0 = <&uart3_pins>;
|
||||
interrupts-extended = <&intc 74 &omap3_pmx_core OMAP3_UART3_RX>;
|
||||
};
|
||||
|
||||
&gpio1 {
|
||||
|
@ -353,7 +353,7 @@
|
||||
};
|
||||
|
||||
twl_power: power {
|
||||
compatible = "ti,twl4030-power-n900";
|
||||
compatible = "ti,twl4030-power-n900", "ti,twl4030-power-idle-osc-off";
|
||||
ti,use_poweroff;
|
||||
};
|
||||
};
|
||||
|
@ -107,7 +107,7 @@
|
||||
#address-cells = <1>;
|
||||
#size-cells = <1>;
|
||||
reg = <1 0 0x08000000>;
|
||||
ti,nand-ecc-opt = "ham1";
|
||||
ti,nand-ecc-opt = "sw";
|
||||
nand-bus-width = <8>;
|
||||
gpmc,cs-on-ns = <0>;
|
||||
gpmc,cs-rd-off-ns = <36>;
|
||||
|
@ -367,10 +367,12 @@
|
||||
|
||||
l3_iclk_div: l3_iclk_div {
|
||||
#clock-cells = <0>;
|
||||
compatible = "fixed-factor-clock";
|
||||
compatible = "ti,divider-clock";
|
||||
ti,max-div = <2>;
|
||||
ti,bit-shift = <4>;
|
||||
reg = <0x100>;
|
||||
clocks = <&dpll_core_h12x2_ck>;
|
||||
clock-mult = <1>;
|
||||
clock-div = <1>;
|
||||
ti,index-power-of-two;
|
||||
};
|
||||
|
||||
gpu_l3_iclk: gpu_l3_iclk {
|
||||
@ -383,10 +385,12 @@
|
||||
|
||||
l4_root_clk_div: l4_root_clk_div {
|
||||
#clock-cells = <0>;
|
||||
compatible = "fixed-factor-clock";
|
||||
compatible = "ti,divider-clock";
|
||||
ti,max-div = <2>;
|
||||
ti,bit-shift = <8>;
|
||||
reg = <0x100>;
|
||||
clocks = <&l3_iclk_div>;
|
||||
clock-mult = <1>;
|
||||
clock-div = <1>;
|
||||
ti,index-power-of-two;
|
||||
};
|
||||
|
||||
slimbus1_slimbus_clk: slimbus1_slimbus_clk {
|
||||
|
@ -83,10 +83,6 @@
|
||||
regulator-always-on;
|
||||
};
|
||||
|
||||
clk32kg: regulator-clk32kg {
|
||||
compatible = "ti,twl6030-clk32kg";
|
||||
};
|
||||
|
||||
twl_usb_comparator: usb-comparator {
|
||||
compatible = "ti,twl6030-usb";
|
||||
interrupts = <4>, <10>;
|
||||
|
@ -472,7 +472,6 @@ static inline void __sync_cache_range_r(volatile void *p, size_t size)
|
||||
"mcr p15, 0, r0, c1, c0, 0 @ set SCTLR \n\t" \
|
||||
"isb \n\t" \
|
||||
"bl v7_flush_dcache_"__stringify(level)" \n\t" \
|
||||
"clrex \n\t" \
|
||||
"mrc p15, 0, r0, c1, c0, 1 @ get ACTLR \n\t" \
|
||||
"bic r0, r0, #(1 << 6) @ disable local coherency \n\t" \
|
||||
"mcr p15, 0, r0, c1, c0, 1 @ set ACTLR \n\t" \
|
||||
|
@ -74,6 +74,7 @@
|
||||
#define ARM_CPU_PART_CORTEX_A12 0x4100c0d0
|
||||
#define ARM_CPU_PART_CORTEX_A17 0x4100c0e0
|
||||
#define ARM_CPU_PART_CORTEX_A15 0x4100c0f0
|
||||
#define ARM_CPU_PART_MASK 0xff00fff0
|
||||
|
||||
#define ARM_CPU_XSCALE_ARCH_MASK 0xe000
|
||||
#define ARM_CPU_XSCALE_ARCH_V1 0x2000
|
||||
@ -179,7 +180,7 @@ static inline unsigned int __attribute_const__ read_cpuid_implementor(void)
|
||||
*/
|
||||
static inline unsigned int __attribute_const__ read_cpuid_part(void)
|
||||
{
|
||||
return read_cpuid_id() & 0xff00fff0;
|
||||
return read_cpuid_id() & ARM_CPU_PART_MASK;
|
||||
}
|
||||
|
||||
static inline unsigned int __attribute_const__ __deprecated read_cpuid_part_number(void)
|
||||
|
@ -50,6 +50,7 @@ typedef struct user_fp elf_fpregset_t;
|
||||
#define R_ARM_ABS32 2
|
||||
#define R_ARM_CALL 28
|
||||
#define R_ARM_JUMP24 29
|
||||
#define R_ARM_TARGET1 38
|
||||
#define R_ARM_V4BX 40
|
||||
#define R_ARM_PREL31 42
|
||||
#define R_ARM_MOVW_ABS_NC 43
|
||||
|
@ -8,6 +8,7 @@
|
||||
#include <linux/cpumask.h>
|
||||
#include <linux/err.h>
|
||||
|
||||
#include <asm/cpu.h>
|
||||
#include <asm/cputype.h>
|
||||
|
||||
/*
|
||||
@ -25,6 +26,20 @@ static inline bool is_smp(void)
|
||||
#endif
|
||||
}
|
||||
|
||||
/**
|
||||
* smp_cpuid_part() - return part id for a given cpu
|
||||
* @cpu: logical cpu id.
|
||||
*
|
||||
* Return: part id of logical cpu passed as argument.
|
||||
*/
|
||||
static inline unsigned int smp_cpuid_part(int cpu)
|
||||
{
|
||||
struct cpuinfo_arm *cpu_info = &per_cpu(cpu_data, cpu);
|
||||
|
||||
return is_smp() ? cpu_info->cpuid & ARM_CPU_PART_MASK :
|
||||
read_cpuid_part();
|
||||
}
|
||||
|
||||
/* all SMP configurations have the extended CPUID registers */
|
||||
#ifndef CONFIG_MMU
|
||||
#define tlb_ops_need_broadcast() 0
|
||||
|
@ -208,26 +208,21 @@
|
||||
#endif
|
||||
.endif
|
||||
msr spsr_cxsf, \rpsr
|
||||
#if defined(CONFIG_CPU_V6)
|
||||
ldr r0, [sp]
|
||||
strex r1, r2, [sp] @ clear the exclusive monitor
|
||||
ldmib sp, {r1 - pc}^ @ load r1 - pc, cpsr
|
||||
#elif defined(CONFIG_CPU_32v6K)
|
||||
clrex @ clear the exclusive monitor
|
||||
ldmia sp, {r0 - pc}^ @ load r0 - pc, cpsr
|
||||
#else
|
||||
ldmia sp, {r0 - pc}^ @ load r0 - pc, cpsr
|
||||
#if defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_32v6K)
|
||||
@ We must avoid clrex due to Cortex-A15 erratum #830321
|
||||
sub r0, sp, #4 @ uninhabited address
|
||||
strex r1, r2, [r0] @ clear the exclusive monitor
|
||||
#endif
|
||||
ldmia sp, {r0 - pc}^ @ load r0 - pc, cpsr
|
||||
.endm
|
||||
|
||||
.macro restore_user_regs, fast = 0, offset = 0
|
||||
ldr r1, [sp, #\offset + S_PSR] @ get calling cpsr
|
||||
ldr lr, [sp, #\offset + S_PC]! @ get pc
|
||||
msr spsr_cxsf, r1 @ save in spsr_svc
|
||||
#if defined(CONFIG_CPU_V6)
|
||||
#if defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_32v6K)
|
||||
@ We must avoid clrex due to Cortex-A15 erratum #830321
|
||||
strex r1, r2, [sp] @ clear the exclusive monitor
|
||||
#elif defined(CONFIG_CPU_32v6K)
|
||||
clrex @ clear the exclusive monitor
|
||||
#endif
|
||||
.if \fast
|
||||
ldmdb sp, {r1 - lr}^ @ get calling r1 - lr
|
||||
@ -261,7 +256,10 @@
|
||||
.endif
|
||||
ldr lr, [sp, #S_SP] @ top of the stack
|
||||
ldrd r0, r1, [sp, #S_LR] @ calling lr and pc
|
||||
clrex @ clear the exclusive monitor
|
||||
|
||||
@ We must avoid clrex due to Cortex-A15 erratum #830321
|
||||
strex r2, r1, [sp, #S_LR] @ clear the exclusive monitor
|
||||
|
||||
stmdb lr!, {r0, r1, \rpsr} @ calling lr and rfe context
|
||||
ldmia sp, {r0 - r12}
|
||||
mov sp, lr
|
||||
@ -282,13 +280,16 @@
|
||||
.endm
|
||||
#else /* ifdef CONFIG_CPU_V7M */
|
||||
.macro restore_user_regs, fast = 0, offset = 0
|
||||
clrex @ clear the exclusive monitor
|
||||
mov r2, sp
|
||||
load_user_sp_lr r2, r3, \offset + S_SP @ calling sp, lr
|
||||
ldr r1, [sp, #\offset + S_PSR] @ get calling cpsr
|
||||
ldr lr, [sp, #\offset + S_PC] @ get pc
|
||||
add sp, sp, #\offset + S_SP
|
||||
msr spsr_cxsf, r1 @ save in spsr_svc
|
||||
|
||||
@ We must avoid clrex due to Cortex-A15 erratum #830321
|
||||
strex r1, r2, [sp] @ clear the exclusive monitor
|
||||
|
||||
.if \fast
|
||||
ldmdb sp, {r1 - r12} @ get calling r1 - r12
|
||||
.else
|
||||
|
@ -91,6 +91,7 @@ apply_relocate(Elf32_Shdr *sechdrs, const char *strtab, unsigned int symindex,
|
||||
break;
|
||||
|
||||
case R_ARM_ABS32:
|
||||
case R_ARM_TARGET1:
|
||||
*(u32 *)loc += sym->st_value;
|
||||
break;
|
||||
|
||||
|
@ -36,5 +36,4 @@ obj-$(CONFIG_ARCH_BCM_5301X) += bcm_5301x.o
|
||||
|
||||
ifeq ($(CONFIG_ARCH_BRCMSTB),y)
|
||||
obj-y += brcmstb.o
|
||||
obj-$(CONFIG_SMP) += headsmp-brcmstb.o platsmp-brcmstb.o
|
||||
endif
|
||||
|
@ -1,19 +0,0 @@
|
||||
/*
|
||||
* Copyright (C) 2013-2014 Broadcom Corporation
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License as
|
||||
* published by the Free Software Foundation version 2.
|
||||
*
|
||||
* This program is distributed "as is" WITHOUT ANY WARRANTY of any
|
||||
* kind, whether express or implied; without even the implied warranty
|
||||
* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*/
|
||||
|
||||
#ifndef __BRCMSTB_H__
|
||||
#define __BRCMSTB_H__
|
||||
|
||||
void brcmstb_secondary_startup(void);
|
||||
|
||||
#endif /* __BRCMSTB_H__ */
|
@ -1,33 +0,0 @@
|
||||
/*
|
||||
* SMP boot code for secondary CPUs
|
||||
* Based on arch/arm/mach-tegra/headsmp.S
|
||||
*
|
||||
* Copyright (C) 2010 NVIDIA, Inc.
|
||||
* Copyright (C) 2013-2014 Broadcom Corporation
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License as
|
||||
* published by the Free Software Foundation version 2.
|
||||
*
|
||||
* This program is distributed "as is" WITHOUT ANY WARRANTY of any
|
||||
* kind, whether express or implied; without even the implied warranty
|
||||
* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*/
|
||||
|
||||
#include <asm/assembler.h>
|
||||
#include <linux/linkage.h>
|
||||
#include <linux/init.h>
|
||||
|
||||
.section ".text.head", "ax"
|
||||
|
||||
ENTRY(brcmstb_secondary_startup)
|
||||
/*
|
||||
* Ensure CPU is in a sane state by disabling all IRQs and switching
|
||||
* into SVC mode.
|
||||
*/
|
||||
setmode PSR_I_BIT | PSR_F_BIT | SVC_MODE, r0
|
||||
|
||||
bl v7_invalidate_l1
|
||||
b secondary_startup
|
||||
ENDPROC(brcmstb_secondary_startup)
|
@ -1,363 +0,0 @@
|
||||
/*
|
||||
* Broadcom STB CPU SMP and hotplug support for ARM
|
||||
*
|
||||
* Copyright (C) 2013-2014 Broadcom Corporation
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License as
|
||||
* published by the Free Software Foundation version 2.
|
||||
*
|
||||
* This program is distributed "as is" WITHOUT ANY WARRANTY of any
|
||||
* kind, whether express or implied; without even the implied warranty
|
||||
* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*/
|
||||
|
||||
#include <linux/delay.h>
|
||||
#include <linux/errno.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/io.h>
|
||||
#include <linux/of_address.h>
|
||||
#include <linux/of_platform.h>
|
||||
#include <linux/printk.h>
|
||||
#include <linux/regmap.h>
|
||||
#include <linux/smp.h>
|
||||
#include <linux/mfd/syscon.h>
|
||||
#include <linux/spinlock.h>
|
||||
|
||||
#include <asm/cacheflush.h>
|
||||
#include <asm/cp15.h>
|
||||
#include <asm/mach-types.h>
|
||||
#include <asm/smp_plat.h>
|
||||
|
||||
#include "brcmstb.h"
|
||||
|
||||
enum {
|
||||
ZONE_MAN_CLKEN_MASK = BIT(0),
|
||||
ZONE_MAN_RESET_CNTL_MASK = BIT(1),
|
||||
ZONE_MAN_MEM_PWR_MASK = BIT(4),
|
||||
ZONE_RESERVED_1_MASK = BIT(5),
|
||||
ZONE_MAN_ISO_CNTL_MASK = BIT(6),
|
||||
ZONE_MANUAL_CONTROL_MASK = BIT(7),
|
||||
ZONE_PWR_DN_REQ_MASK = BIT(9),
|
||||
ZONE_PWR_UP_REQ_MASK = BIT(10),
|
||||
ZONE_BLK_RST_ASSERT_MASK = BIT(12),
|
||||
ZONE_PWR_OFF_STATE_MASK = BIT(25),
|
||||
ZONE_PWR_ON_STATE_MASK = BIT(26),
|
||||
ZONE_DPG_PWR_STATE_MASK = BIT(28),
|
||||
ZONE_MEM_PWR_STATE_MASK = BIT(29),
|
||||
ZONE_RESET_STATE_MASK = BIT(31),
|
||||
CPU0_PWR_ZONE_CTRL_REG = 1,
|
||||
CPU_RESET_CONFIG_REG = 2,
|
||||
};
|
||||
|
||||
static void __iomem *cpubiuctrl_block;
|
||||
static void __iomem *hif_cont_block;
|
||||
static u32 cpu0_pwr_zone_ctrl_reg;
|
||||
static u32 cpu_rst_cfg_reg;
|
||||
static u32 hif_cont_reg;
|
||||
|
||||
#ifdef CONFIG_HOTPLUG_CPU
|
||||
static DEFINE_PER_CPU_ALIGNED(int, per_cpu_sw_state);
|
||||
|
||||
static int per_cpu_sw_state_rd(u32 cpu)
|
||||
{
|
||||
sync_cache_r(SHIFT_PERCPU_PTR(&per_cpu_sw_state, per_cpu_offset(cpu)));
|
||||
return per_cpu(per_cpu_sw_state, cpu);
|
||||
}
|
||||
|
||||
static void per_cpu_sw_state_wr(u32 cpu, int val)
|
||||
{
|
||||
per_cpu(per_cpu_sw_state, cpu) = val;
|
||||
dmb();
|
||||
sync_cache_w(SHIFT_PERCPU_PTR(&per_cpu_sw_state, per_cpu_offset(cpu)));
|
||||
dsb_sev();
|
||||
}
|
||||
#else
|
||||
static inline void per_cpu_sw_state_wr(u32 cpu, int val) { }
|
||||
#endif
|
||||
|
||||
static void __iomem *pwr_ctrl_get_base(u32 cpu)
|
||||
{
|
||||
void __iomem *base = cpubiuctrl_block + cpu0_pwr_zone_ctrl_reg;
|
||||
base += (cpu_logical_map(cpu) * 4);
|
||||
return base;
|
||||
}
|
||||
|
||||
static u32 pwr_ctrl_rd(u32 cpu)
|
||||
{
|
||||
void __iomem *base = pwr_ctrl_get_base(cpu);
|
||||
return readl_relaxed(base);
|
||||
}
|
||||
|
||||
static void pwr_ctrl_wr(u32 cpu, u32 val)
|
||||
{
|
||||
void __iomem *base = pwr_ctrl_get_base(cpu);
|
||||
writel(val, base);
|
||||
}
|
||||
|
||||
static void cpu_rst_cfg_set(u32 cpu, int set)
|
||||
{
|
||||
u32 val;
|
||||
val = readl_relaxed(cpubiuctrl_block + cpu_rst_cfg_reg);
|
||||
if (set)
|
||||
val |= BIT(cpu_logical_map(cpu));
|
||||
else
|
||||
val &= ~BIT(cpu_logical_map(cpu));
|
||||
writel_relaxed(val, cpubiuctrl_block + cpu_rst_cfg_reg);
|
||||
}
|
||||
|
||||
static void cpu_set_boot_addr(u32 cpu, unsigned long boot_addr)
|
||||
{
|
||||
const int reg_ofs = cpu_logical_map(cpu) * 8;
|
||||
writel_relaxed(0, hif_cont_block + hif_cont_reg + reg_ofs);
|
||||
writel_relaxed(boot_addr, hif_cont_block + hif_cont_reg + 4 + reg_ofs);
|
||||
}
|
||||
|
||||
static void brcmstb_cpu_boot(u32 cpu)
|
||||
{
|
||||
pr_info("SMP: Booting CPU%d...\n", cpu);
|
||||
|
||||
/*
|
||||
* set the reset vector to point to the secondary_startup
|
||||
* routine
|
||||
*/
|
||||
cpu_set_boot_addr(cpu, virt_to_phys(brcmstb_secondary_startup));
|
||||
|
||||
/* unhalt the cpu */
|
||||
cpu_rst_cfg_set(cpu, 0);
|
||||
}
|
||||
|
||||
static void brcmstb_cpu_power_on(u32 cpu)
|
||||
{
|
||||
/*
|
||||
* The secondary cores power was cut, so we must go through
|
||||
* power-on initialization.
|
||||
*/
|
||||
u32 tmp;
|
||||
|
||||
pr_info("SMP: Powering up CPU%d...\n", cpu);
|
||||
|
||||
/* Request zone power up */
|
||||
pwr_ctrl_wr(cpu, ZONE_PWR_UP_REQ_MASK);
|
||||
|
||||
/* Wait for the power up FSM to complete */
|
||||
do {
|
||||
tmp = pwr_ctrl_rd(cpu);
|
||||
} while (!(tmp & ZONE_PWR_ON_STATE_MASK));
|
||||
|
||||
per_cpu_sw_state_wr(cpu, 1);
|
||||
}
|
||||
|
||||
static int brcmstb_cpu_get_power_state(u32 cpu)
|
||||
{
|
||||
int tmp = pwr_ctrl_rd(cpu);
|
||||
return (tmp & ZONE_RESET_STATE_MASK) ? 0 : 1;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_HOTPLUG_CPU
|
||||
|
||||
static void brcmstb_cpu_die(u32 cpu)
|
||||
{
|
||||
v7_exit_coherency_flush(all);
|
||||
|
||||
/* Prevent all interrupts from reaching this CPU. */
|
||||
arch_local_irq_disable();
|
||||
|
||||
/*
|
||||
* Final full barrier to ensure everything before this instruction has
|
||||
* quiesced.
|
||||
*/
|
||||
isb();
|
||||
dsb();
|
||||
|
||||
per_cpu_sw_state_wr(cpu, 0);
|
||||
|
||||
/* Sit and wait to die */
|
||||
wfi();
|
||||
|
||||
/* We should never get here... */
|
||||
panic("Spurious interrupt on CPU %d received!\n", cpu);
|
||||
}
|
||||
|
||||
static int brcmstb_cpu_kill(u32 cpu)
|
||||
{
|
||||
u32 tmp;
|
||||
|
||||
pr_info("SMP: Powering down CPU%d...\n", cpu);
|
||||
|
||||
while (per_cpu_sw_state_rd(cpu))
|
||||
;
|
||||
|
||||
/* Program zone reset */
|
||||
pwr_ctrl_wr(cpu, ZONE_RESET_STATE_MASK | ZONE_BLK_RST_ASSERT_MASK |
|
||||
ZONE_PWR_DN_REQ_MASK);
|
||||
|
||||
/* Verify zone reset */
|
||||
tmp = pwr_ctrl_rd(cpu);
|
||||
if (!(tmp & ZONE_RESET_STATE_MASK))
|
||||
pr_err("%s: Zone reset bit for CPU %d not asserted!\n",
|
||||
__func__, cpu);
|
||||
|
||||
/* Wait for power down */
|
||||
do {
|
||||
tmp = pwr_ctrl_rd(cpu);
|
||||
} while (!(tmp & ZONE_PWR_OFF_STATE_MASK));
|
||||
|
||||
/* Settle-time from Broadcom-internal DVT reference code */
|
||||
udelay(7);
|
||||
|
||||
/* Assert reset on the CPU */
|
||||
cpu_rst_cfg_set(cpu, 1);
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
#endif /* CONFIG_HOTPLUG_CPU */
|
||||
|
||||
static int __init setup_hifcpubiuctrl_regs(struct device_node *np)
|
||||
{
|
||||
int rc = 0;
|
||||
char *name;
|
||||
struct device_node *syscon_np = NULL;
|
||||
|
||||
name = "syscon-cpu";
|
||||
|
||||
syscon_np = of_parse_phandle(np, name, 0);
|
||||
if (!syscon_np) {
|
||||
pr_err("can't find phandle %s\n", name);
|
||||
rc = -EINVAL;
|
||||
goto cleanup;
|
||||
}
|
||||
|
||||
cpubiuctrl_block = of_iomap(syscon_np, 0);
|
||||
if (!cpubiuctrl_block) {
|
||||
pr_err("iomap failed for cpubiuctrl_block\n");
|
||||
rc = -EINVAL;
|
||||
goto cleanup;
|
||||
}
|
||||
|
||||
rc = of_property_read_u32_index(np, name, CPU0_PWR_ZONE_CTRL_REG,
|
||||
&cpu0_pwr_zone_ctrl_reg);
|
||||
if (rc) {
|
||||
pr_err("failed to read 1st entry from %s property (%d)\n", name,
|
||||
rc);
|
||||
rc = -EINVAL;
|
||||
goto cleanup;
|
||||
}
|
||||
|
||||
rc = of_property_read_u32_index(np, name, CPU_RESET_CONFIG_REG,
|
||||
&cpu_rst_cfg_reg);
|
||||
if (rc) {
|
||||
pr_err("failed to read 2nd entry from %s property (%d)\n", name,
|
||||
rc);
|
||||
rc = -EINVAL;
|
||||
goto cleanup;
|
||||
}
|
||||
|
||||
cleanup:
|
||||
if (syscon_np)
|
||||
of_node_put(syscon_np);
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
static int __init setup_hifcont_regs(struct device_node *np)
|
||||
{
|
||||
int rc = 0;
|
||||
char *name;
|
||||
struct device_node *syscon_np = NULL;
|
||||
|
||||
name = "syscon-cont";
|
||||
|
||||
syscon_np = of_parse_phandle(np, name, 0);
|
||||
if (!syscon_np) {
|
||||
pr_err("can't find phandle %s\n", name);
|
||||
rc = -EINVAL;
|
||||
goto cleanup;
|
||||
}
|
||||
|
||||
hif_cont_block = of_iomap(syscon_np, 0);
|
||||
if (!hif_cont_block) {
|
||||
pr_err("iomap failed for hif_cont_block\n");
|
||||
rc = -EINVAL;
|
||||
goto cleanup;
|
||||
}
|
||||
|
||||
/* offset is at top of hif_cont_block */
|
||||
hif_cont_reg = 0;
|
||||
|
||||
cleanup:
|
||||
if (syscon_np)
|
||||
of_node_put(syscon_np);
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
static void __init brcmstb_cpu_ctrl_setup(unsigned int max_cpus)
|
||||
{
|
||||
int rc;
|
||||
struct device_node *np;
|
||||
char *name;
|
||||
|
||||
name = "brcm,brcmstb-smpboot";
|
||||
np = of_find_compatible_node(NULL, NULL, name);
|
||||
if (!np) {
|
||||
pr_err("can't find compatible node %s\n", name);
|
||||
return;
|
||||
}
|
||||
|
||||
rc = setup_hifcpubiuctrl_regs(np);
|
||||
if (rc)
|
||||
return;
|
||||
|
||||
rc = setup_hifcont_regs(np);
|
||||
if (rc)
|
||||
return;
|
||||
}
|
||||
|
||||
static DEFINE_SPINLOCK(boot_lock);
|
||||
|
||||
static void brcmstb_secondary_init(unsigned int cpu)
|
||||
{
|
||||
/*
|
||||
* Synchronise with the boot thread.
|
||||
*/
|
||||
spin_lock(&boot_lock);
|
||||
spin_unlock(&boot_lock);
|
||||
}
|
||||
|
||||
static int brcmstb_boot_secondary(unsigned int cpu, struct task_struct *idle)
|
||||
{
|
||||
/*
|
||||
* set synchronisation state between this boot processor
|
||||
* and the secondary one
|
||||
*/
|
||||
spin_lock(&boot_lock);
|
||||
|
||||
/* Bring up power to the core if necessary */
|
||||
if (brcmstb_cpu_get_power_state(cpu) == 0)
|
||||
brcmstb_cpu_power_on(cpu);
|
||||
|
||||
brcmstb_cpu_boot(cpu);
|
||||
|
||||
/*
|
||||
* now the secondary core is starting up let it run its
|
||||
* calibrations, then wait for it to finish
|
||||
*/
|
||||
spin_unlock(&boot_lock);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct smp_operations brcmstb_smp_ops __initdata = {
|
||||
.smp_prepare_cpus = brcmstb_cpu_ctrl_setup,
|
||||
.smp_secondary_init = brcmstb_secondary_init,
|
||||
.smp_boot_secondary = brcmstb_boot_secondary,
|
||||
#ifdef CONFIG_HOTPLUG_CPU
|
||||
.cpu_kill = brcmstb_cpu_kill,
|
||||
.cpu_die = brcmstb_cpu_die,
|
||||
#endif
|
||||
};
|
||||
|
||||
CPU_METHOD_OF_DECLARE(brcmstb_smp, "brcm,brahma-b15", &brcmstb_smp_ops);
|
@ -43,7 +43,6 @@
|
||||
"mcr p15, 0, r0, c1, c0, 0 @ set SCTLR\n\t" \
|
||||
"isb\n\t"\
|
||||
"bl v7_flush_dcache_"__stringify(level)"\n\t" \
|
||||
"clrex\n\t"\
|
||||
"mrc p15, 0, r0, c1, c0, 1 @ get ACTLR\n\t" \
|
||||
"bic r0, r0, #(1 << 6) @ disable local coherency\n\t" \
|
||||
/* Dummy Load of a device register to avoid Erratum 799270 */ \
|
||||
|
@ -142,7 +142,7 @@ __init board_nand_init(struct mtd_partition *nand_parts, u8 nr_parts, u8 cs,
|
||||
board_nand_data.nr_parts = nr_parts;
|
||||
board_nand_data.devsize = nand_type;
|
||||
|
||||
board_nand_data.ecc_opt = OMAP_ECC_HAM1_CODE_HW;
|
||||
board_nand_data.ecc_opt = OMAP_ECC_HAM1_CODE_SW;
|
||||
gpmc_nand_init(&board_nand_data, gpmc_t);
|
||||
}
|
||||
#endif /* CONFIG_MTD_NAND_OMAP2 || CONFIG_MTD_NAND_OMAP2_MODULE */
|
||||
|
@ -49,7 +49,8 @@ static bool gpmc_hwecc_bch_capable(enum omap_ecc ecc_opt)
|
||||
return 0;
|
||||
|
||||
/* legacy platforms support only HAM1 (1-bit Hamming) ECC scheme */
|
||||
if (ecc_opt == OMAP_ECC_HAM1_CODE_HW)
|
||||
if (ecc_opt == OMAP_ECC_HAM1_CODE_HW ||
|
||||
ecc_opt == OMAP_ECC_HAM1_CODE_SW)
|
||||
return 1;
|
||||
else
|
||||
return 0;
|
||||
|
@ -1403,8 +1403,11 @@ static int gpmc_probe_nand_child(struct platform_device *pdev,
|
||||
pr_err("%s: ti,nand-ecc-opt not found\n", __func__);
|
||||
return -ENODEV;
|
||||
}
|
||||
if (!strcmp(s, "ham1") || !strcmp(s, "sw") ||
|
||||
!strcmp(s, "hw") || !strcmp(s, "hw-romcode"))
|
||||
|
||||
if (!strcmp(s, "sw"))
|
||||
gpmc_nand_data->ecc_opt = OMAP_ECC_HAM1_CODE_SW;
|
||||
else if (!strcmp(s, "ham1") ||
|
||||
!strcmp(s, "hw") || !strcmp(s, "hw-romcode"))
|
||||
gpmc_nand_data->ecc_opt =
|
||||
OMAP_ECC_HAM1_CODE_HW;
|
||||
else if (!strcmp(s, "bch4"))
|
||||
|
@ -663,7 +663,7 @@ void __init dra7xxx_check_revision(void)
|
||||
|
||||
default:
|
||||
/* Unknown default to latest silicon rev as default*/
|
||||
pr_warn("%s: unknown idcode=0x%08x (hawkeye=0x%08x,rev=0x%d)\n",
|
||||
pr_warn("%s: unknown idcode=0x%08x (hawkeye=0x%08x,rev=0x%x)\n",
|
||||
__func__, idcode, hawkeye, rev);
|
||||
omap_revision = DRA752_REV_ES1_1;
|
||||
}
|
||||
|
@ -56,7 +56,7 @@ static void _add_clkdev(struct omap_device *od, const char *clk_alias,
|
||||
|
||||
r = clk_get_sys(dev_name(&od->pdev->dev), clk_alias);
|
||||
if (!IS_ERR(r)) {
|
||||
dev_warn(&od->pdev->dev,
|
||||
dev_dbg(&od->pdev->dev,
|
||||
"alias %s already exists\n", clk_alias);
|
||||
clk_put(r);
|
||||
return;
|
||||
|
@ -2185,6 +2185,8 @@ static int _enable(struct omap_hwmod *oh)
|
||||
oh->mux->pads_dynamic))) {
|
||||
omap_hwmod_mux(oh->mux, _HWMOD_STATE_ENABLED);
|
||||
_reconfigure_io_chain();
|
||||
} else if (oh->flags & HWMOD_FORCE_MSTANDBY) {
|
||||
_reconfigure_io_chain();
|
||||
}
|
||||
|
||||
_add_initiator_dep(oh, mpu_oh);
|
||||
@ -2291,6 +2293,8 @@ static int _idle(struct omap_hwmod *oh)
|
||||
if (oh->mux && oh->mux->pads_dynamic) {
|
||||
omap_hwmod_mux(oh->mux, _HWMOD_STATE_IDLE);
|
||||
_reconfigure_io_chain();
|
||||
} else if (oh->flags & HWMOD_FORCE_MSTANDBY) {
|
||||
_reconfigure_io_chain();
|
||||
}
|
||||
|
||||
oh->_state = _HWMOD_STATE_IDLE;
|
||||
@ -3345,6 +3349,9 @@ int __init omap_hwmod_register_links(struct omap_hwmod_ocp_if **ois)
|
||||
if (!ois)
|
||||
return 0;
|
||||
|
||||
if (ois[0] == NULL) /* Empty list */
|
||||
return 0;
|
||||
|
||||
if (!linkspace) {
|
||||
if (_alloc_linkspace(ois)) {
|
||||
pr_err("omap_hwmod: could not allocate link space\n");
|
||||
|
@ -35,6 +35,7 @@
|
||||
#include "i2c.h"
|
||||
#include "mmc.h"
|
||||
#include "wd_timer.h"
|
||||
#include "soc.h"
|
||||
|
||||
/* Base offset for all DRA7XX interrupts external to MPUSS */
|
||||
#define DRA7XX_IRQ_GIC_START 32
|
||||
@ -3261,7 +3262,6 @@ static struct omap_hwmod_ocp_if *dra7xx_hwmod_ocp_ifs[] __initdata = {
|
||||
&dra7xx_l4_per3__usb_otg_ss1,
|
||||
&dra7xx_l4_per3__usb_otg_ss2,
|
||||
&dra7xx_l4_per3__usb_otg_ss3,
|
||||
&dra7xx_l4_per3__usb_otg_ss4,
|
||||
&dra7xx_l3_main_1__vcp1,
|
||||
&dra7xx_l4_per2__vcp1,
|
||||
&dra7xx_l3_main_1__vcp2,
|
||||
@ -3270,8 +3270,26 @@ static struct omap_hwmod_ocp_if *dra7xx_hwmod_ocp_ifs[] __initdata = {
|
||||
NULL,
|
||||
};
|
||||
|
||||
static struct omap_hwmod_ocp_if *dra74x_hwmod_ocp_ifs[] __initdata = {
|
||||
&dra7xx_l4_per3__usb_otg_ss4,
|
||||
NULL,
|
||||
};
|
||||
|
||||
static struct omap_hwmod_ocp_if *dra72x_hwmod_ocp_ifs[] __initdata = {
|
||||
NULL,
|
||||
};
|
||||
|
||||
int __init dra7xx_hwmod_init(void)
|
||||
{
|
||||
int ret;
|
||||
|
||||
omap_hwmod_init();
|
||||
return omap_hwmod_register_links(dra7xx_hwmod_ocp_ifs);
|
||||
ret = omap_hwmod_register_links(dra7xx_hwmod_ocp_ifs);
|
||||
|
||||
if (!ret && soc_is_dra74x())
|
||||
return omap_hwmod_register_links(dra74x_hwmod_ocp_ifs);
|
||||
else if (!ret && soc_is_dra72x())
|
||||
return omap_hwmod_register_links(dra72x_hwmod_ocp_ifs);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@ -245,6 +245,8 @@ IS_AM_SUBCLASS(437x, 0x437)
|
||||
#define soc_is_omap54xx() 0
|
||||
#define soc_is_omap543x() 0
|
||||
#define soc_is_dra7xx() 0
|
||||
#define soc_is_dra74x() 0
|
||||
#define soc_is_dra72x() 0
|
||||
|
||||
#if defined(MULTI_OMAP2)
|
||||
# if defined(CONFIG_ARCH_OMAP2)
|
||||
@ -393,7 +395,11 @@ IS_OMAP_TYPE(3430, 0x3430)
|
||||
|
||||
#if defined(CONFIG_SOC_DRA7XX)
|
||||
#undef soc_is_dra7xx
|
||||
#undef soc_is_dra74x
|
||||
#undef soc_is_dra72x
|
||||
#define soc_is_dra7xx() (of_machine_is_compatible("ti,dra7"))
|
||||
#define soc_is_dra74x() (of_machine_is_compatible("ti,dra74"))
|
||||
#define soc_is_dra72x() (of_machine_is_compatible("ti,dra72"))
|
||||
#endif
|
||||
|
||||
/* Various silicon revisions for omap2 */
|
||||
|
@ -183,8 +183,8 @@ enum {
|
||||
|
||||
static struct clk div4_clks[DIV4_NR] = {
|
||||
[DIV4_SDH] = SH_CLK_DIV4(&pll1_clk, SDCKCR, 8, 0x0dff, CLK_ENABLE_ON_INIT),
|
||||
[DIV4_SD0] = SH_CLK_DIV4(&pll1_clk, SDCKCR, 4, 0x1de0, CLK_ENABLE_ON_INIT),
|
||||
[DIV4_SD1] = SH_CLK_DIV4(&pll1_clk, SDCKCR, 0, 0x1de0, CLK_ENABLE_ON_INIT),
|
||||
[DIV4_SD0] = SH_CLK_DIV4(&pll1_clk, SDCKCR, 4, 0x1df0, CLK_ENABLE_ON_INIT),
|
||||
[DIV4_SD1] = SH_CLK_DIV4(&pll1_clk, SDCKCR, 0, 0x1df0, CLK_ENABLE_ON_INIT),
|
||||
};
|
||||
|
||||
/* DIV6 clocks */
|
||||
|
@ -152,7 +152,7 @@ enum {
|
||||
|
||||
static struct clk div4_clks[DIV4_NR] = {
|
||||
[DIV4_SDH] = SH_CLK_DIV4(&pll1_clk, SDCKCR, 8, 0x0dff, CLK_ENABLE_ON_INIT),
|
||||
[DIV4_SD0] = SH_CLK_DIV4(&pll1_clk, SDCKCR, 4, 0x1de0, CLK_ENABLE_ON_INIT),
|
||||
[DIV4_SD0] = SH_CLK_DIV4(&pll1_clk, SDCKCR, 4, 0x1df0, CLK_ENABLE_ON_INIT),
|
||||
};
|
||||
|
||||
/* DIV6 clocks */
|
||||
|
@ -644,7 +644,7 @@ static struct clk_lookup lookups[] = {
|
||||
CLKDEV_DEV_ID("sh-sci.5", &mstp_clks[MSTP207]), /* SCIFA5 */
|
||||
CLKDEV_DEV_ID("e6cb0000.serial", &mstp_clks[MSTP207]), /* SCIFA5 */
|
||||
CLKDEV_DEV_ID("sh-sci.8", &mstp_clks[MSTP206]), /* SCIFB */
|
||||
CLKDEV_DEV_ID("0xe6c3000.serial", &mstp_clks[MSTP206]), /* SCIFB */
|
||||
CLKDEV_DEV_ID("e6c3000.serial", &mstp_clks[MSTP206]), /* SCIFB */
|
||||
CLKDEV_DEV_ID("sh-sci.0", &mstp_clks[MSTP204]), /* SCIFA0 */
|
||||
CLKDEV_DEV_ID("e6c40000.serial", &mstp_clks[MSTP204]), /* SCIFA0 */
|
||||
CLKDEV_DEV_ID("sh-sci.1", &mstp_clks[MSTP203]), /* SCIFA1 */
|
||||
|
@ -426,9 +426,15 @@ static int ve_spc_populate_opps(uint32_t cluster)
|
||||
|
||||
static int ve_init_opp_table(struct device *cpu_dev)
|
||||
{
|
||||
int cluster = topology_physical_package_id(cpu_dev->id);
|
||||
int idx, ret = 0, max_opp = info->num_opps[cluster];
|
||||
struct ve_spc_opp *opps = info->opps[cluster];
|
||||
int cluster;
|
||||
int idx, ret = 0, max_opp;
|
||||
struct ve_spc_opp *opps;
|
||||
|
||||
cluster = topology_physical_package_id(cpu_dev->id);
|
||||
cluster = cluster < 0 ? 0 : cluster;
|
||||
|
||||
max_opp = info->num_opps[cluster];
|
||||
opps = info->opps[cluster];
|
||||
|
||||
for (idx = 0; idx < max_opp; idx++, opps++) {
|
||||
ret = dev_pm_opp_add(cpu_dev, opps->freq * 1000, opps->u_volt);
|
||||
@ -537,6 +543,8 @@ static struct clk *ve_spc_clk_register(struct device *cpu_dev)
|
||||
spc->hw.init = &init;
|
||||
spc->cluster = topology_physical_package_id(cpu_dev->id);
|
||||
|
||||
spc->cluster = spc->cluster < 0 ? 0 : spc->cluster;
|
||||
|
||||
init.name = dev_name(cpu_dev);
|
||||
init.ops = &clk_spc_ops;
|
||||
init.flags = CLK_IS_ROOT | CLK_GET_RATE_NOCACHE;
|
||||
|
@ -17,12 +17,6 @@
|
||||
*/
|
||||
.align 5
|
||||
ENTRY(v6_early_abort)
|
||||
#ifdef CONFIG_CPU_V6
|
||||
sub r1, sp, #4 @ Get unused stack location
|
||||
strex r0, r1, [r1] @ Clear the exclusive monitor
|
||||
#elif defined(CONFIG_CPU_32v6K)
|
||||
clrex
|
||||
#endif
|
||||
mrc p15, 0, r1, c5, c0, 0 @ get FSR
|
||||
mrc p15, 0, r0, c6, c0, 0 @ get FAR
|
||||
/*
|
||||
|
@ -13,12 +13,6 @@
|
||||
*/
|
||||
.align 5
|
||||
ENTRY(v7_early_abort)
|
||||
/*
|
||||
* The effect of data aborts on on the exclusive access monitor are
|
||||
* UNPREDICTABLE. Do a CLREX to clear the state
|
||||
*/
|
||||
clrex
|
||||
|
||||
mrc p15, 0, r1, c5, c0, 0 @ get FSR
|
||||
mrc p15, 0, r0, c6, c0, 0 @ get FAR
|
||||
|
||||
|
@ -68,6 +68,7 @@ void flush_icache_range(unsigned long start, unsigned long end)
|
||||
);
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
EXPORT_SYMBOL(flush_icache_range);
|
||||
|
||||
void hexagon_clean_dcache_range(unsigned long start, unsigned long end)
|
||||
{
|
||||
|
@ -549,8 +549,6 @@ source "drivers/sn/Kconfig"
|
||||
config KEXEC
|
||||
bool "kexec system call"
|
||||
depends on !IA64_HP_SIM && (!SMP || HOTPLUG_CPU)
|
||||
select CRYPTO
|
||||
select CRYPTO_SHA256
|
||||
help
|
||||
kexec is a system call that implements the ability to shutdown your
|
||||
current kernel, and to start another kernel. It is like a reboot
|
||||
|
@ -91,8 +91,6 @@ config MMU_SUN3
|
||||
config KEXEC
|
||||
bool "kexec system call"
|
||||
depends on M68KCLASSIC
|
||||
select CRYPTO
|
||||
select CRYPTO_SHA256
|
||||
help
|
||||
kexec is a system call that implements the ability to shutdown your
|
||||
current kernel, and to start another kernel. It is like a reboot
|
||||
|
@ -2396,8 +2396,6 @@ source "kernel/Kconfig.preempt"
|
||||
|
||||
config KEXEC
|
||||
bool "Kexec system call"
|
||||
select CRYPTO
|
||||
select CRYPTO_SHA256
|
||||
help
|
||||
kexec is a system call that implements the ability to shutdown your
|
||||
current kernel, and to start another kernel. It is like a reboot
|
||||
|
@ -399,8 +399,6 @@ config PPC64_SUPPORTS_MEMORY_FAILURE
|
||||
config KEXEC
|
||||
bool "kexec system call"
|
||||
depends on (PPC_BOOK3S || FSL_BOOKE || (44x && !SMP))
|
||||
select CRYPTO
|
||||
select CRYPTO_SHA256
|
||||
help
|
||||
kexec is a system call that implements the ability to shutdown your
|
||||
current kernel, and to start another kernel. It is like a reboot
|
||||
|
@ -48,8 +48,6 @@ config ARCH_SUPPORTS_DEBUG_PAGEALLOC
|
||||
|
||||
config KEXEC
|
||||
def_bool y
|
||||
select CRYPTO
|
||||
select CRYPTO_SHA256
|
||||
|
||||
config AUDIT_ARCH
|
||||
def_bool y
|
||||
|
@ -283,7 +283,10 @@
|
||||
#define __NR_sched_setattr 345
|
||||
#define __NR_sched_getattr 346
|
||||
#define __NR_renameat2 347
|
||||
#define NR_syscalls 348
|
||||
#define __NR_seccomp 348
|
||||
#define __NR_getrandom 349
|
||||
#define __NR_memfd_create 350
|
||||
#define NR_syscalls 351
|
||||
|
||||
/*
|
||||
* There are some system calls that are not present on 64 bit, some
|
||||
|
@ -214,3 +214,6 @@ COMPAT_SYSCALL_WRAP3(finit_module, int, fd, const char __user *, uargs, int, fla
|
||||
COMPAT_SYSCALL_WRAP3(sched_setattr, pid_t, pid, struct sched_attr __user *, attr, unsigned int, flags);
|
||||
COMPAT_SYSCALL_WRAP4(sched_getattr, pid_t, pid, struct sched_attr __user *, attr, unsigned int, size, unsigned int, flags);
|
||||
COMPAT_SYSCALL_WRAP5(renameat2, int, olddfd, const char __user *, oldname, int, newdfd, const char __user *, newname, unsigned int, flags);
|
||||
COMPAT_SYSCALL_WRAP3(seccomp, unsigned int, op, unsigned int, flags, const char __user *, uargs)
|
||||
COMPAT_SYSCALL_WRAP3(getrandom, char __user *, buf, size_t, count, unsigned int, flags)
|
||||
COMPAT_SYSCALL_WRAP2(memfd_create, const char __user *, uname, unsigned int, flags)
|
||||
|
@ -2060,6 +2060,13 @@ void s390_reset_system(void (*func)(void *), void *data)
|
||||
S390_lowcore.program_new_psw.addr =
|
||||
PSW_ADDR_AMODE | (unsigned long) s390_base_pgm_handler;
|
||||
|
||||
/*
|
||||
* Clear subchannel ID and number to signal new kernel that no CCW or
|
||||
* SCSI IPL has been done (for kexec and kdump)
|
||||
*/
|
||||
S390_lowcore.subchannel_id = 0;
|
||||
S390_lowcore.subchannel_nr = 0;
|
||||
|
||||
/* Store status at absolute zero */
|
||||
store_status();
|
||||
|
||||
|
@ -24,6 +24,7 @@
|
||||
#include <linux/stddef.h>
|
||||
#include <linux/unistd.h>
|
||||
#include <linux/ptrace.h>
|
||||
#include <linux/random.h>
|
||||
#include <linux/user.h>
|
||||
#include <linux/tty.h>
|
||||
#include <linux/ioport.h>
|
||||
@ -61,6 +62,7 @@
|
||||
#include <asm/diag.h>
|
||||
#include <asm/os_info.h>
|
||||
#include <asm/sclp.h>
|
||||
#include <asm/sysinfo.h>
|
||||
#include "entry.h"
|
||||
|
||||
/*
|
||||
@ -766,6 +768,7 @@ static void __init setup_hwcaps(void)
|
||||
#endif
|
||||
|
||||
get_cpu_id(&cpu_id);
|
||||
add_device_randomness(&cpu_id, sizeof(cpu_id));
|
||||
switch (cpu_id.machine) {
|
||||
case 0x9672:
|
||||
#if !defined(CONFIG_64BIT)
|
||||
@ -803,6 +806,19 @@ static void __init setup_hwcaps(void)
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Add system information as device randomness
|
||||
*/
|
||||
static void __init setup_randomness(void)
|
||||
{
|
||||
struct sysinfo_3_2_2 *vmms;
|
||||
|
||||
vmms = (struct sysinfo_3_2_2 *) alloc_page(GFP_KERNEL);
|
||||
if (vmms && stsi(vmms, 3, 2, 2) == 0 && vmms->count)
|
||||
add_device_randomness(&vmms, vmms->count);
|
||||
free_page((unsigned long) vmms);
|
||||
}
|
||||
|
||||
/*
|
||||
* Setup function called from init/main.c just after the banner
|
||||
* was printed.
|
||||
@ -901,6 +917,9 @@ void __init setup_arch(char **cmdline_p)
|
||||
|
||||
/* Setup zfcpdump support */
|
||||
setup_zfcpdump();
|
||||
|
||||
/* Add system specific data to the random pool */
|
||||
setup_randomness();
|
||||
}
|
||||
|
||||
#ifdef CONFIG_32BIT
|
||||
|
@ -356,3 +356,6 @@ SYSCALL(sys_finit_module,sys_finit_module,compat_sys_finit_module)
|
||||
SYSCALL(sys_sched_setattr,sys_sched_setattr,compat_sys_sched_setattr) /* 345 */
|
||||
SYSCALL(sys_sched_getattr,sys_sched_getattr,compat_sys_sched_getattr)
|
||||
SYSCALL(sys_renameat2,sys_renameat2,compat_sys_renameat2)
|
||||
SYSCALL(sys_seccomp,sys_seccomp,compat_sys_seccomp)
|
||||
SYSCALL(sys_getrandom,sys_getrandom,compat_sys_getrandom)
|
||||
SYSCALL(sys_memfd_create,sys_memfd_create,compat_sys_memfd_create) /* 350 */
|
||||
|
@ -598,8 +598,6 @@ source kernel/Kconfig.hz
|
||||
config KEXEC
|
||||
bool "kexec system call (EXPERIMENTAL)"
|
||||
depends on SUPERH32 && MMU
|
||||
select CRYPTO
|
||||
select CRYPTO_SHA256
|
||||
help
|
||||
kexec is a system call that implements the ability to shutdown your
|
||||
current kernel, and to start another kernel. It is like a reboot
|
||||
|
@ -229,6 +229,7 @@ void flush_icache_range(unsigned long start, unsigned long end)
|
||||
|
||||
cacheop_on_each_cpu(local_flush_icache_range, (void *)&data, 1);
|
||||
}
|
||||
EXPORT_SYMBOL(flush_icache_range);
|
||||
|
||||
void flush_icache_page(struct vm_area_struct *vma, struct page *page)
|
||||
{
|
||||
|
@ -191,8 +191,6 @@ source "kernel/Kconfig.hz"
|
||||
|
||||
config KEXEC
|
||||
bool "kexec system call"
|
||||
select CRYPTO
|
||||
select CRYPTO_SHA256
|
||||
---help---
|
||||
kexec is a system call that implements the ability to shutdown your
|
||||
current kernel, and to start another kernel. It is like a reboot
|
||||
|
@ -183,6 +183,7 @@ void flush_icache_range(unsigned long start, unsigned long end)
|
||||
preempt_enable();
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL(flush_icache_range);
|
||||
|
||||
|
||||
/* Called when smp_send_reschedule() triggers IRQ_RESCHEDULE. */
|
||||
|
@ -254,7 +254,8 @@ static int setup_frame(struct ksignal *ksig, sigset_t *set,
|
||||
|
||||
err |= setup_sigframe(frame, regs, set);
|
||||
if (err == 0)
|
||||
err |= setup_return(regs, &ksig->ka, frame->retcode, frame, usig);
|
||||
err |= setup_return(regs, &ksig->ka, frame->retcode, frame,
|
||||
ksig->sig);
|
||||
|
||||
return err;
|
||||
}
|
||||
@ -276,7 +277,8 @@ static int setup_rt_frame(struct ksignal *ksig, sigset_t *set,
|
||||
err |= __save_altstack(&frame->sig.uc.uc_stack, regs->UCreg_sp);
|
||||
err |= setup_sigframe(&frame->sig, regs, set);
|
||||
if (err == 0)
|
||||
err |= setup_return(regs, &ksig->ka, frame->sig.retcode, frame, usig);
|
||||
err |= setup_return(regs, &ksig->ka, frame->sig.retcode, frame,
|
||||
ksig->sig);
|
||||
|
||||
if (err == 0) {
|
||||
/*
|
||||
@ -303,7 +305,6 @@ static void handle_signal(struct ksignal *ksig, struct pt_regs *regs,
|
||||
int syscall)
|
||||
{
|
||||
struct thread_info *thread = current_thread_info();
|
||||
struct task_struct *tsk = current;
|
||||
sigset_t *oldset = sigmask_to_save();
|
||||
int usig = ksig->sig;
|
||||
int ret;
|
||||
@ -373,7 +374,7 @@ static void do_signal(struct pt_regs *regs, int syscall)
|
||||
if (!user_mode(regs))
|
||||
return;
|
||||
|
||||
if (get_signsl(&ksig)) {
|
||||
if (get_signal(&ksig)) {
|
||||
handle_signal(&ksig, regs, syscall);
|
||||
return;
|
||||
}
|
||||
|
@ -17,6 +17,4 @@ obj-$(CONFIG_IA32_EMULATION) += ia32/
|
||||
obj-y += platform/
|
||||
obj-y += net/
|
||||
|
||||
ifeq ($(CONFIG_X86_64),y)
|
||||
obj-$(CONFIG_KEXEC) += purgatory/
|
||||
endif
|
||||
obj-$(CONFIG_KEXEC_FILE) += purgatory/
|
||||
|
@ -1585,9 +1585,6 @@ source kernel/Kconfig.hz
|
||||
|
||||
config KEXEC
|
||||
bool "kexec system call"
|
||||
select BUILD_BIN2C
|
||||
select CRYPTO
|
||||
select CRYPTO_SHA256
|
||||
---help---
|
||||
kexec is a system call that implements the ability to shutdown your
|
||||
current kernel, and to start another kernel. It is like a reboot
|
||||
@ -1602,9 +1599,22 @@ config KEXEC
|
||||
interface is strongly in flux, so no good recommendation can be
|
||||
made.
|
||||
|
||||
config KEXEC_FILE
|
||||
bool "kexec file based system call"
|
||||
select BUILD_BIN2C
|
||||
depends on KEXEC
|
||||
depends on X86_64
|
||||
depends on CRYPTO=y
|
||||
depends on CRYPTO_SHA256=y
|
||||
---help---
|
||||
This is new version of kexec system call. This system call is
|
||||
file based and takes file descriptors as system call argument
|
||||
for kernel and initramfs as opposed to list of segments as
|
||||
accepted by previous system call.
|
||||
|
||||
config KEXEC_VERIFY_SIG
|
||||
bool "Verify kernel signature during kexec_file_load() syscall"
|
||||
depends on KEXEC
|
||||
depends on KEXEC_FILE
|
||||
---help---
|
||||
This option makes kernel signature verification mandatory for
|
||||
kexec_file_load() syscall. If kernel is signature can not be
|
||||
|
@ -184,11 +184,8 @@ archheaders:
|
||||
$(Q)$(MAKE) $(build)=arch/x86/syscalls all
|
||||
|
||||
archprepare:
|
||||
ifeq ($(CONFIG_KEXEC),y)
|
||||
# Build only for 64bit. No loaders for 32bit yet.
|
||||
ifeq ($(CONFIG_X86_64),y)
|
||||
ifeq ($(CONFIG_KEXEC_FILE),y)
|
||||
$(Q)$(MAKE) $(build)=arch/x86/purgatory arch/x86/purgatory/kexec-purgatory.c
|
||||
endif
|
||||
endif
|
||||
|
||||
###
|
||||
@ -254,6 +251,7 @@ archclean:
|
||||
$(Q)rm -rf $(objtree)/arch/x86_64
|
||||
$(Q)$(MAKE) $(clean)=$(boot)
|
||||
$(Q)$(MAKE) $(clean)=arch/x86/tools
|
||||
$(Q)$(MAKE) $(clean)=arch/x86/purgatory
|
||||
|
||||
PHONY += kvmconfig
|
||||
kvmconfig:
|
||||
|
@ -227,6 +227,8 @@ static inline void io_apic_modify(unsigned int apic, unsigned int reg, unsigned
|
||||
|
||||
extern void io_apic_eoi(unsigned int apic, unsigned int vector);
|
||||
|
||||
extern bool mp_should_keep_irq(struct device *dev);
|
||||
|
||||
#else /* !CONFIG_X86_IO_APIC */
|
||||
|
||||
#define io_apic_assign_pci_irqs 0
|
||||
|
@ -131,8 +131,13 @@ static inline int pte_exec(pte_t pte)
|
||||
|
||||
static inline int pte_special(pte_t pte)
|
||||
{
|
||||
return (pte_flags(pte) & (_PAGE_PRESENT|_PAGE_SPECIAL)) ==
|
||||
(_PAGE_PRESENT|_PAGE_SPECIAL);
|
||||
/*
|
||||
* See CONFIG_NUMA_BALANCING pte_numa in include/asm-generic/pgtable.h.
|
||||
* On x86 we have _PAGE_BIT_NUMA == _PAGE_BIT_GLOBAL+1 ==
|
||||
* __PAGE_BIT_SOFTW1 == _PAGE_BIT_SPECIAL.
|
||||
*/
|
||||
return (pte_flags(pte) & _PAGE_SPECIAL) &&
|
||||
(pte_flags(pte) & (_PAGE_PRESENT|_PAGE_PROTNONE));
|
||||
}
|
||||
|
||||
static inline unsigned long pte_pfn(pte_t pte)
|
||||
|
@ -71,6 +71,7 @@ obj-$(CONFIG_FTRACE_SYSCALLS) += ftrace.o
|
||||
obj-$(CONFIG_X86_TSC) += trace_clock.o
|
||||
obj-$(CONFIG_KEXEC) += machine_kexec_$(BITS).o
|
||||
obj-$(CONFIG_KEXEC) += relocate_kernel_$(BITS).o crash.o
|
||||
obj-$(CONFIG_KEXEC_FILE) += kexec-bzimage64.o
|
||||
obj-$(CONFIG_CRASH_DUMP) += crash_dump_$(BITS).o
|
||||
obj-y += kprobes/
|
||||
obj-$(CONFIG_MODULES) += module.o
|
||||
@ -118,5 +119,4 @@ ifeq ($(CONFIG_X86_64),y)
|
||||
|
||||
obj-$(CONFIG_PCI_MMCONFIG) += mmconf-fam10h_64.o
|
||||
obj-y += vsmp_64.o
|
||||
obj-$(CONFIG_KEXEC) += kexec-bzimage64.o
|
||||
endif
|
||||
|
@ -1070,6 +1070,11 @@ static int mp_map_pin_to_irq(u32 gsi, int idx, int ioapic, int pin,
|
||||
}
|
||||
|
||||
if (flags & IOAPIC_MAP_ALLOC) {
|
||||
/* special handling for legacy IRQs */
|
||||
if (irq < nr_legacy_irqs() && info->count == 1 &&
|
||||
mp_irqdomain_map(domain, irq, pin) != 0)
|
||||
irq = -1;
|
||||
|
||||
if (irq > 0)
|
||||
info->count++;
|
||||
else if (info->count == 0)
|
||||
@ -3896,7 +3901,15 @@ int mp_irqdomain_map(struct irq_domain *domain, unsigned int virq,
|
||||
info->polarity = 1;
|
||||
}
|
||||
info->node = NUMA_NO_NODE;
|
||||
info->set = 1;
|
||||
|
||||
/*
|
||||
* setup_IO_APIC_irqs() programs all legacy IRQs with default
|
||||
* trigger and polarity attributes. Don't set the flag for that
|
||||
* case so the first legacy IRQ user could reprogram the pin
|
||||
* with real trigger and polarity attributes.
|
||||
*/
|
||||
if (virq >= nr_legacy_irqs() || info->count)
|
||||
info->set = 1;
|
||||
}
|
||||
set_io_apic_irq_attr(&attr, ioapic, hwirq, info->trigger,
|
||||
info->polarity);
|
||||
@ -3946,6 +3959,18 @@ int mp_set_gsi_attr(u32 gsi, int trigger, int polarity, int node)
|
||||
return ret;
|
||||
}
|
||||
|
||||
bool mp_should_keep_irq(struct device *dev)
|
||||
{
|
||||
if (dev->power.is_prepared)
|
||||
return true;
|
||||
#ifdef CONFIG_PM_RUNTIME
|
||||
if (dev->power.runtime_status == RPM_SUSPENDING)
|
||||
return true;
|
||||
#endif
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
/* Enable IOAPIC early just for system timer */
|
||||
void __init pre_init_apic_IRQ0(void)
|
||||
{
|
||||
|
@ -182,8 +182,7 @@ void native_machine_crash_shutdown(struct pt_regs *regs)
|
||||
crash_save_cpu(regs, safe_smp_processor_id());
|
||||
}
|
||||
|
||||
#ifdef CONFIG_X86_64
|
||||
|
||||
#ifdef CONFIG_KEXEC_FILE
|
||||
static int get_nr_ram_ranges_callback(unsigned long start_pfn,
|
||||
unsigned long nr_pfn, void *arg)
|
||||
{
|
||||
@ -696,5 +695,4 @@ int crash_load_segments(struct kimage *image)
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
#endif /* CONFIG_X86_64 */
|
||||
#endif /* CONFIG_KEXEC_FILE */
|
||||
|
@ -203,7 +203,7 @@ void __init native_init_IRQ(void)
|
||||
set_intr_gate(i, interrupt[i - FIRST_EXTERNAL_VECTOR]);
|
||||
}
|
||||
|
||||
if (!acpi_ioapic && !of_ioapic)
|
||||
if (!acpi_ioapic && !of_ioapic && nr_legacy_irqs())
|
||||
setup_irq(2, &irq2);
|
||||
|
||||
#ifdef CONFIG_X86_32
|
||||
|
@ -25,9 +25,11 @@
|
||||
#include <asm/debugreg.h>
|
||||
#include <asm/kexec-bzimage64.h>
|
||||
|
||||
#ifdef CONFIG_KEXEC_FILE
|
||||
static struct kexec_file_ops *kexec_file_loaders[] = {
|
||||
&kexec_bzImage64_ops,
|
||||
};
|
||||
#endif
|
||||
|
||||
static void free_transition_pgtable(struct kimage *image)
|
||||
{
|
||||
@ -178,6 +180,7 @@ static void load_segments(void)
|
||||
);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_KEXEC_FILE
|
||||
/* Update purgatory as needed after various image segments have been prepared */
|
||||
static int arch_update_purgatory(struct kimage *image)
|
||||
{
|
||||
@ -209,6 +212,12 @@ static int arch_update_purgatory(struct kimage *image)
|
||||
|
||||
return ret;
|
||||
}
|
||||
#else /* !CONFIG_KEXEC_FILE */
|
||||
static inline int arch_update_purgatory(struct kimage *image)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
#endif /* CONFIG_KEXEC_FILE */
|
||||
|
||||
int machine_kexec_prepare(struct kimage *image)
|
||||
{
|
||||
@ -329,6 +338,7 @@ void arch_crash_save_vmcoreinfo(void)
|
||||
|
||||
/* arch-dependent functionality related to kexec file-based syscall */
|
||||
|
||||
#ifdef CONFIG_KEXEC_FILE
|
||||
int arch_kexec_kernel_image_probe(struct kimage *image, void *buf,
|
||||
unsigned long buf_len)
|
||||
{
|
||||
@ -522,3 +532,4 @@ overflow:
|
||||
(int)ELF64_R_TYPE(rel[i].r_info), value);
|
||||
return -ENOEXEC;
|
||||
}
|
||||
#endif /* CONFIG_KEXEC_FILE */
|
||||
|
@ -68,6 +68,8 @@ static struct irqaction irq0 = {
|
||||
|
||||
void __init setup_default_timer_irq(void)
|
||||
{
|
||||
if (!nr_legacy_irqs())
|
||||
return;
|
||||
setup_irq(0, &irq0);
|
||||
}
|
||||
|
||||
|
@ -229,7 +229,7 @@ static int intel_mid_pci_irq_enable(struct pci_dev *dev)
|
||||
|
||||
static void intel_mid_pci_irq_disable(struct pci_dev *dev)
|
||||
{
|
||||
if (!dev->dev.power.is_prepared && dev->irq > 0)
|
||||
if (!mp_should_keep_irq(&dev->dev) && dev->irq > 0)
|
||||
mp_unmap_irq(dev->irq);
|
||||
}
|
||||
|
||||
|
@ -1256,7 +1256,7 @@ static int pirq_enable_irq(struct pci_dev *dev)
|
||||
|
||||
static void pirq_disable_irq(struct pci_dev *dev)
|
||||
{
|
||||
if (io_apic_assign_pci_irqs && !dev->dev.power.is_prepared &&
|
||||
if (io_apic_assign_pci_irqs && !mp_should_keep_irq(&dev->dev) &&
|
||||
dev->irq) {
|
||||
mp_unmap_irq(dev->irq);
|
||||
dev->irq = 0;
|
||||
|
@ -11,6 +11,7 @@ targets += purgatory.ro
|
||||
# sure how to relocate those. Like kexec-tools, use custom flags.
|
||||
|
||||
KBUILD_CFLAGS := -fno-strict-aliasing -Wall -Wstrict-prototypes -fno-zero-initialized-in-bss -fno-builtin -ffreestanding -c -MD -Os -mcmodel=large
|
||||
KBUILD_CFLAGS += -m$(BITS)
|
||||
|
||||
$(obj)/purgatory.ro: $(PURGATORY_OBJS) FORCE
|
||||
$(call if_changed,ld)
|
||||
@ -24,7 +25,4 @@ $(obj)/kexec-purgatory.c: $(obj)/purgatory.ro FORCE
|
||||
$(call if_changed,bin2c)
|
||||
|
||||
|
||||
# No loaders for 32bits yet.
|
||||
ifeq ($(CONFIG_X86_64),y)
|
||||
obj-$(CONFIG_KEXEC) += kexec-purgatory.o
|
||||
endif
|
||||
obj-$(CONFIG_KEXEC_FILE) += kexec-purgatory.o
|
||||
|
@ -4,24 +4,23 @@ config ZONE_DMA
|
||||
config XTENSA
|
||||
def_bool y
|
||||
select ARCH_WANT_FRAME_POINTERS
|
||||
select HAVE_IDE
|
||||
select GENERIC_ATOMIC64
|
||||
select GENERIC_CLOCKEVENTS
|
||||
select VIRT_TO_BUS
|
||||
select GENERIC_IRQ_SHOW
|
||||
select GENERIC_SCHED_CLOCK
|
||||
select MODULES_USE_ELF_RELA
|
||||
select GENERIC_PCI_IOMAP
|
||||
select ARCH_WANT_IPC_PARSE_VERSION
|
||||
select ARCH_WANT_OPTIONAL_GPIOLIB
|
||||
select BUILDTIME_EXTABLE_SORT
|
||||
select CLONE_BACKWARDS
|
||||
select IRQ_DOMAIN
|
||||
select HAVE_OPROFILE
|
||||
select COMMON_CLK
|
||||
select GENERIC_ATOMIC64
|
||||
select GENERIC_CLOCKEVENTS
|
||||
select GENERIC_IRQ_SHOW
|
||||
select GENERIC_PCI_IOMAP
|
||||
select GENERIC_SCHED_CLOCK
|
||||
select HAVE_FUNCTION_TRACER
|
||||
select HAVE_IRQ_TIME_ACCOUNTING
|
||||
select HAVE_OPROFILE
|
||||
select HAVE_PERF_EVENTS
|
||||
select COMMON_CLK
|
||||
select IRQ_DOMAIN
|
||||
select MODULES_USE_ELF_RELA
|
||||
select VIRT_TO_BUS
|
||||
help
|
||||
Xtensa processors are 32-bit RISC machines designed by Tensilica
|
||||
primarily for embedded systems. These processors are both
|
||||
@ -62,7 +61,9 @@ config TRACE_IRQFLAGS_SUPPORT
|
||||
def_bool y
|
||||
|
||||
config MMU
|
||||
def_bool n
|
||||
bool
|
||||
default n if !XTENSA_VARIANT_CUSTOM
|
||||
default XTENSA_VARIANT_MMU if XTENSA_VARIANT_CUSTOM
|
||||
|
||||
config VARIANT_IRQ_SWITCH
|
||||
def_bool n
|
||||
@ -102,8 +103,40 @@ config XTENSA_VARIANT_S6000
|
||||
select VARIANT_IRQ_SWITCH
|
||||
select ARCH_REQUIRE_GPIOLIB
|
||||
select XTENSA_CALIBRATE_CCOUNT
|
||||
|
||||
config XTENSA_VARIANT_CUSTOM
|
||||
bool "Custom Xtensa processor configuration"
|
||||
select MAY_HAVE_SMP
|
||||
select HAVE_XTENSA_GPIO32
|
||||
help
|
||||
Select this variant to use a custom Xtensa processor configuration.
|
||||
You will be prompted for a processor variant CORENAME.
|
||||
endchoice
|
||||
|
||||
config XTENSA_VARIANT_CUSTOM_NAME
|
||||
string "Xtensa Processor Custom Core Variant Name"
|
||||
depends on XTENSA_VARIANT_CUSTOM
|
||||
help
|
||||
Provide the name of a custom Xtensa processor variant.
|
||||
This CORENAME selects arch/xtensa/variant/CORENAME.
|
||||
Dont forget you have to select MMU if you have one.
|
||||
|
||||
config XTENSA_VARIANT_NAME
|
||||
string
|
||||
default "dc232b" if XTENSA_VARIANT_DC232B
|
||||
default "dc233c" if XTENSA_VARIANT_DC233C
|
||||
default "fsf" if XTENSA_VARIANT_FSF
|
||||
default "s6000" if XTENSA_VARIANT_S6000
|
||||
default XTENSA_VARIANT_CUSTOM_NAME if XTENSA_VARIANT_CUSTOM
|
||||
|
||||
config XTENSA_VARIANT_MMU
|
||||
bool "Core variant has a Full MMU (TLB, Pages, Protection, etc)"
|
||||
depends on XTENSA_VARIANT_CUSTOM
|
||||
default y
|
||||
help
|
||||
Build a Conventional Kernel with full MMU support,
|
||||
ie: it supports a TLB with auto-loading, page protection.
|
||||
|
||||
config XTENSA_UNALIGNED_USER
|
||||
bool "Unaligned memory access in use space"
|
||||
help
|
||||
@ -156,13 +189,9 @@ config HOTPLUG_CPU
|
||||
|
||||
Say N if you want to disable CPU hotplug.
|
||||
|
||||
config MATH_EMULATION
|
||||
bool "Math emulation"
|
||||
help
|
||||
Can we use information of configuration file?
|
||||
|
||||
config INITIALIZE_XTENSA_MMU_INSIDE_VMLINUX
|
||||
bool "Initialize Xtensa MMU inside the Linux kernel code"
|
||||
depends on MMU
|
||||
default y
|
||||
help
|
||||
Earlier version initialized the MMU in the exception vector
|
||||
@ -192,6 +221,7 @@ config INITIALIZE_XTENSA_MMU_INSIDE_VMLINUX
|
||||
|
||||
config HIGHMEM
|
||||
bool "High Memory Support"
|
||||
depends on MMU
|
||||
help
|
||||
Linux can use the full amount of RAM in the system by
|
||||
default. However, the default MMUv2 setup only maps the
|
||||
@ -208,6 +238,32 @@ config HIGHMEM
|
||||
|
||||
If unsure, say Y.
|
||||
|
||||
config FAST_SYSCALL_XTENSA
|
||||
bool "Enable fast atomic syscalls"
|
||||
default n
|
||||
help
|
||||
fast_syscall_xtensa is a syscall that can make atomic operations
|
||||
on UP kernel when processor has no s32c1i support.
|
||||
|
||||
This syscall is deprecated. It may have issues when called with
|
||||
invalid arguments. It is provided only for backwards compatibility.
|
||||
Only enable it if your userspace software requires it.
|
||||
|
||||
If unsure, say N.
|
||||
|
||||
config FAST_SYSCALL_SPILL_REGISTERS
|
||||
bool "Enable spill registers syscall"
|
||||
default n
|
||||
help
|
||||
fast_syscall_spill_registers is a syscall that spills all active
|
||||
register windows of a calling userspace task onto its stack.
|
||||
|
||||
This syscall is deprecated. It may have issues when called with
|
||||
invalid arguments. It is provided only for backwards compatibility.
|
||||
Only enable it if your userspace software requires it.
|
||||
|
||||
If unsure, say N.
|
||||
|
||||
endmenu
|
||||
|
||||
config XTENSA_CALIBRATE_CCOUNT
|
||||
@ -250,12 +306,14 @@ config XTENSA_PLATFORM_ISS
|
||||
|
||||
config XTENSA_PLATFORM_XT2000
|
||||
bool "XT2000"
|
||||
select HAVE_IDE
|
||||
help
|
||||
XT2000 is the name of Tensilica's feature-rich emulation platform.
|
||||
This hardware is capable of running a full Linux distribution.
|
||||
|
||||
config XTENSA_PLATFORM_S6105
|
||||
bool "S6105"
|
||||
select HAVE_IDE
|
||||
select SERIAL_CONSOLE
|
||||
select NO_IOPORT_MAP
|
||||
|
||||
|
@ -4,6 +4,7 @@
|
||||
# for more details.
|
||||
#
|
||||
# Copyright (C) 2001 - 2005 Tensilica Inc.
|
||||
# Copyright (C) 2014 Cadence Design Systems Inc.
|
||||
#
|
||||
# This file is included by the global makefile so that you can add your own
|
||||
# architecture-specific flags and dependencies. Remember to do have actions
|
||||
@ -13,11 +14,7 @@
|
||||
# Core configuration.
|
||||
# (Use VAR=<xtensa_config> to use another default compiler.)
|
||||
|
||||
variant-$(CONFIG_XTENSA_VARIANT_FSF) := fsf
|
||||
variant-$(CONFIG_XTENSA_VARIANT_DC232B) := dc232b
|
||||
variant-$(CONFIG_XTENSA_VARIANT_DC233C) := dc233c
|
||||
variant-$(CONFIG_XTENSA_VARIANT_S6000) := s6000
|
||||
variant-$(CONFIG_XTENSA_VARIANT_LINUX_CUSTOM) := custom
|
||||
variant-y := $(patsubst "%",%,$(CONFIG_XTENSA_VARIANT_NAME))
|
||||
|
||||
VARIANT = $(variant-y)
|
||||
export VARIANT
|
||||
|
@ -4,8 +4,11 @@
|
||||
|
||||
/ {
|
||||
compatible = "cdns,xtensa-kc705";
|
||||
chosen {
|
||||
bootargs = "earlycon=uart8250,mmio32,0xfd050020,115200n8 console=ttyS0,115200n8 ip=dhcp root=/dev/nfs rw debug memmap=0x38000000";
|
||||
};
|
||||
memory@0 {
|
||||
device_type = "memory";
|
||||
reg = <0x00000000 0x08000000>;
|
||||
reg = <0x00000000 0x38000000>;
|
||||
};
|
||||
};
|
||||
|
@ -66,7 +66,6 @@ CONFIG_XTENSA_ARCH_LINUX_BE=y
|
||||
CONFIG_MMU=y
|
||||
# CONFIG_XTENSA_UNALIGNED_USER is not set
|
||||
# CONFIG_PREEMPT is not set
|
||||
# CONFIG_MATH_EMULATION is not set
|
||||
# CONFIG_HIGHMEM is not set
|
||||
|
||||
#
|
||||
|
@ -146,7 +146,6 @@ CONFIG_XTENSA_VARIANT_FSF=y
|
||||
# CONFIG_XTENSA_VARIANT_S6000 is not set
|
||||
# CONFIG_XTENSA_UNALIGNED_USER is not set
|
||||
# CONFIG_PREEMPT is not set
|
||||
# CONFIG_MATH_EMULATION is not set
|
||||
CONFIG_XTENSA_CALIBRATE_CCOUNT=y
|
||||
CONFIG_SERIAL_CONSOLE=y
|
||||
CONFIG_XTENSA_ISS_NETWORK=y
|
||||
@ -308,7 +307,7 @@ CONFIG_MISC_DEVICES=y
|
||||
# EEPROM support
|
||||
#
|
||||
# CONFIG_EEPROM_93CX6 is not set
|
||||
CONFIG_HAVE_IDE=y
|
||||
# CONFIG_HAVE_IDE is not set
|
||||
# CONFIG_IDE is not set
|
||||
|
||||
#
|
||||
|
@ -109,7 +109,6 @@ CONFIG_VARIANT_IRQ_SWITCH=y
|
||||
CONFIG_XTENSA_VARIANT_S6000=y
|
||||
# CONFIG_XTENSA_UNALIGNED_USER is not set
|
||||
CONFIG_PREEMPT=y
|
||||
# CONFIG_MATH_EMULATION is not set
|
||||
# CONFIG_HIGHMEM is not set
|
||||
CONFIG_XTENSA_CALIBRATE_CCOUNT=y
|
||||
CONFIG_SERIAL_CONSOLE=y
|
||||
|
@ -37,6 +37,7 @@
|
||||
* specials for cache aliasing:
|
||||
*
|
||||
* __flush_invalidate_dcache_page_alias(vaddr,paddr)
|
||||
* __invalidate_dcache_page_alias(vaddr,paddr)
|
||||
* __invalidate_icache_page_alias(vaddr,paddr)
|
||||
*/
|
||||
|
||||
@ -62,6 +63,7 @@ extern void __flush_invalidate_dcache_range(unsigned long, unsigned long);
|
||||
|
||||
#if defined(CONFIG_MMU) && (DCACHE_WAY_SIZE > PAGE_SIZE)
|
||||
extern void __flush_invalidate_dcache_page_alias(unsigned long, unsigned long);
|
||||
extern void __invalidate_dcache_page_alias(unsigned long, unsigned long);
|
||||
#else
|
||||
static inline void __flush_invalidate_dcache_page_alias(unsigned long virt,
|
||||
unsigned long phys) { }
|
||||
|
@ -23,8 +23,8 @@
|
||||
* Here we define all the compile-time 'special' virtual
|
||||
* addresses. The point is to have a constant address at
|
||||
* compile time, but to set the physical address only
|
||||
* in the boot process. We allocate these special addresses
|
||||
* from the end of the consistent memory region backwards.
|
||||
* in the boot process. We allocate these special addresses
|
||||
* from the start of the consistent memory region upwards.
|
||||
* Also this lets us do fail-safe vmalloc(), we
|
||||
* can guarantee that these special addresses and
|
||||
* vmalloc()-ed addresses never overlap.
|
||||
@ -38,7 +38,8 @@ enum fixed_addresses {
|
||||
#ifdef CONFIG_HIGHMEM
|
||||
/* reserved pte's for temporary kernel mappings */
|
||||
FIX_KMAP_BEGIN,
|
||||
FIX_KMAP_END = FIX_KMAP_BEGIN + (KM_TYPE_NR * NR_CPUS) - 1,
|
||||
FIX_KMAP_END = FIX_KMAP_BEGIN +
|
||||
(KM_TYPE_NR * NR_CPUS * DCACHE_N_COLORS) - 1,
|
||||
#endif
|
||||
__end_of_fixed_addresses
|
||||
};
|
||||
@ -47,7 +48,28 @@ enum fixed_addresses {
|
||||
#define FIXADDR_SIZE (__end_of_fixed_addresses << PAGE_SHIFT)
|
||||
#define FIXADDR_START ((FIXADDR_TOP - FIXADDR_SIZE) & PMD_MASK)
|
||||
|
||||
#include <asm-generic/fixmap.h>
|
||||
#define __fix_to_virt(x) (FIXADDR_START + ((x) << PAGE_SHIFT))
|
||||
#define __virt_to_fix(x) (((x) - FIXADDR_START) >> PAGE_SHIFT)
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
/*
|
||||
* 'index to address' translation. If anyone tries to use the idx
|
||||
* directly without translation, we catch the bug with a NULL-deference
|
||||
* kernel oops. Illegal ranges of incoming indices are caught too.
|
||||
*/
|
||||
static __always_inline unsigned long fix_to_virt(const unsigned int idx)
|
||||
{
|
||||
BUILD_BUG_ON(idx >= __end_of_fixed_addresses);
|
||||
return __fix_to_virt(idx);
|
||||
}
|
||||
|
||||
static inline unsigned long virt_to_fix(const unsigned long vaddr)
|
||||
{
|
||||
BUG_ON(vaddr >= FIXADDR_TOP || vaddr < FIXADDR_START);
|
||||
return __virt_to_fix(vaddr);
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
#define kmap_get_fixmap_pte(vaddr) \
|
||||
pte_offset_kernel( \
|
||||
|
@ -12,19 +12,55 @@
|
||||
#ifndef _XTENSA_HIGHMEM_H
|
||||
#define _XTENSA_HIGHMEM_H
|
||||
|
||||
#include <linux/wait.h>
|
||||
#include <asm/cacheflush.h>
|
||||
#include <asm/fixmap.h>
|
||||
#include <asm/kmap_types.h>
|
||||
#include <asm/pgtable.h>
|
||||
|
||||
#define PKMAP_BASE (FIXADDR_START - PMD_SIZE)
|
||||
#define LAST_PKMAP PTRS_PER_PTE
|
||||
#define PKMAP_BASE ((FIXADDR_START - \
|
||||
(LAST_PKMAP + 1) * PAGE_SIZE) & PMD_MASK)
|
||||
#define LAST_PKMAP (PTRS_PER_PTE * DCACHE_N_COLORS)
|
||||
#define LAST_PKMAP_MASK (LAST_PKMAP - 1)
|
||||
#define PKMAP_NR(virt) (((virt) - PKMAP_BASE) >> PAGE_SHIFT)
|
||||
#define PKMAP_ADDR(nr) (PKMAP_BASE + ((nr) << PAGE_SHIFT))
|
||||
|
||||
#define kmap_prot PAGE_KERNEL
|
||||
|
||||
#if DCACHE_WAY_SIZE > PAGE_SIZE
|
||||
#define get_pkmap_color get_pkmap_color
|
||||
static inline int get_pkmap_color(struct page *page)
|
||||
{
|
||||
return DCACHE_ALIAS(page_to_phys(page));
|
||||
}
|
||||
|
||||
extern unsigned int last_pkmap_nr_arr[];
|
||||
|
||||
static inline unsigned int get_next_pkmap_nr(unsigned int color)
|
||||
{
|
||||
last_pkmap_nr_arr[color] =
|
||||
(last_pkmap_nr_arr[color] + DCACHE_N_COLORS) & LAST_PKMAP_MASK;
|
||||
return last_pkmap_nr_arr[color] + color;
|
||||
}
|
||||
|
||||
static inline int no_more_pkmaps(unsigned int pkmap_nr, unsigned int color)
|
||||
{
|
||||
return pkmap_nr < DCACHE_N_COLORS;
|
||||
}
|
||||
|
||||
static inline int get_pkmap_entries_count(unsigned int color)
|
||||
{
|
||||
return LAST_PKMAP / DCACHE_N_COLORS;
|
||||
}
|
||||
|
||||
extern wait_queue_head_t pkmap_map_wait_arr[];
|
||||
|
||||
static inline wait_queue_head_t *get_pkmap_wait_queue_head(unsigned int color)
|
||||
{
|
||||
return pkmap_map_wait_arr + color;
|
||||
}
|
||||
#endif
|
||||
|
||||
extern pte_t *pkmap_page_table;
|
||||
|
||||
void *kmap_high(struct page *page);
|
||||
|
@ -78,7 +78,9 @@
|
||||
# define DCACHE_ALIAS_EQ(a,b) ((((a) ^ (b)) & DCACHE_ALIAS_MASK) == 0)
|
||||
#else
|
||||
# define DCACHE_ALIAS_ORDER 0
|
||||
# define DCACHE_ALIAS(a) ((void)(a), 0)
|
||||
#endif
|
||||
#define DCACHE_N_COLORS (1 << DCACHE_ALIAS_ORDER)
|
||||
|
||||
#if ICACHE_WAY_SIZE > PAGE_SIZE
|
||||
# define ICACHE_ALIAS_ORDER (ICACHE_WAY_SHIFT - PAGE_SHIFT)
|
||||
@ -134,6 +136,7 @@ static inline __attribute_const__ int get_order(unsigned long size)
|
||||
#endif
|
||||
|
||||
struct page;
|
||||
struct vm_area_struct;
|
||||
extern void clear_page(void *page);
|
||||
extern void copy_page(void *to, void *from);
|
||||
|
||||
@ -143,8 +146,15 @@ extern void copy_page(void *to, void *from);
|
||||
*/
|
||||
|
||||
#if DCACHE_WAY_SIZE > PAGE_SIZE
|
||||
extern void clear_user_page(void*, unsigned long, struct page*);
|
||||
extern void copy_user_page(void*, void*, unsigned long, struct page*);
|
||||
extern void clear_page_alias(void *vaddr, unsigned long paddr);
|
||||
extern void copy_page_alias(void *to, void *from,
|
||||
unsigned long to_paddr, unsigned long from_paddr);
|
||||
|
||||
#define clear_user_highpage clear_user_highpage
|
||||
void clear_user_highpage(struct page *page, unsigned long vaddr);
|
||||
#define __HAVE_ARCH_COPY_USER_HIGHPAGE
|
||||
void copy_user_highpage(struct page *to, struct page *from,
|
||||
unsigned long vaddr, struct vm_area_struct *vma);
|
||||
#else
|
||||
# define clear_user_page(page, vaddr, pg) clear_page(page)
|
||||
# define copy_user_page(to, from, vaddr, pg) copy_page(to, from)
|
||||
|
@ -67,7 +67,12 @@
|
||||
#define VMALLOC_START 0xC0000000
|
||||
#define VMALLOC_END 0xC7FEFFFF
|
||||
#define TLBTEMP_BASE_1 0xC7FF0000
|
||||
#define TLBTEMP_BASE_2 0xC7FF8000
|
||||
#define TLBTEMP_BASE_2 (TLBTEMP_BASE_1 + DCACHE_WAY_SIZE)
|
||||
#if 2 * DCACHE_WAY_SIZE > ICACHE_WAY_SIZE
|
||||
#define TLBTEMP_SIZE (2 * DCACHE_WAY_SIZE)
|
||||
#else
|
||||
#define TLBTEMP_SIZE ICACHE_WAY_SIZE
|
||||
#endif
|
||||
|
||||
/*
|
||||
* For the Xtensa architecture, the PTE layout is as follows:
|
||||
|
@ -52,7 +52,12 @@
|
||||
*/
|
||||
.macro get_fs ad, sp
|
||||
GET_CURRENT(\ad,\sp)
|
||||
#if THREAD_CURRENT_DS > 1020
|
||||
addi \ad, \ad, TASK_THREAD
|
||||
l32i \ad, \ad, THREAD_CURRENT_DS - TASK_THREAD
|
||||
#else
|
||||
l32i \ad, \ad, THREAD_CURRENT_DS
|
||||
#endif
|
||||
.endm
|
||||
|
||||
/*
|
||||
|
@ -28,17 +28,17 @@
|
||||
#define TCSETSW 0x5403
|
||||
#define TCSETSF 0x5404
|
||||
|
||||
#define TCGETA _IOR('t', 23, struct termio)
|
||||
#define TCSETA _IOW('t', 24, struct termio)
|
||||
#define TCSETAW _IOW('t', 25, struct termio)
|
||||
#define TCSETAF _IOW('t', 28, struct termio)
|
||||
#define TCGETA 0x80127417 /* _IOR('t', 23, struct termio) */
|
||||
#define TCSETA 0x40127418 /* _IOW('t', 24, struct termio) */
|
||||
#define TCSETAW 0x40127419 /* _IOW('t', 25, struct termio) */
|
||||
#define TCSETAF 0x4012741C /* _IOW('t', 28, struct termio) */
|
||||
|
||||
#define TCSBRK _IO('t', 29)
|
||||
#define TCXONC _IO('t', 30)
|
||||
#define TCFLSH _IO('t', 31)
|
||||
|
||||
#define TIOCSWINSZ _IOW('t', 103, struct winsize)
|
||||
#define TIOCGWINSZ _IOR('t', 104, struct winsize)
|
||||
#define TIOCSWINSZ 0x40087467 /* _IOW('t', 103, struct winsize) */
|
||||
#define TIOCGWINSZ 0x80087468 /* _IOR('t', 104, struct winsize) */
|
||||
#define TIOCSTART _IO('t', 110) /* start output, like ^Q */
|
||||
#define TIOCSTOP _IO('t', 111) /* stop output, like ^S */
|
||||
#define TIOCOUTQ _IOR('t', 115, int) /* output queue size */
|
||||
@ -88,7 +88,6 @@
|
||||
#define TIOCSETD _IOW('T', 35, int)
|
||||
#define TIOCGETD _IOR('T', 36, int)
|
||||
#define TCSBRKP _IOW('T', 37, int) /* Needed for POSIX tcsendbreak()*/
|
||||
#define TIOCTTYGSTRUCT _IOR('T', 38, struct tty_struct) /* For debugging only*/
|
||||
#define TIOCSBRK _IO('T', 39) /* BSD compatibility */
|
||||
#define TIOCCBRK _IO('T', 40) /* BSD compatibility */
|
||||
#define TIOCGSID _IOR('T', 41, pid_t) /* Return the session ID of FD*/
|
||||
@ -114,8 +113,10 @@
|
||||
#define TIOCSERGETLSR _IOR('T', 89, unsigned int) /* Get line status reg. */
|
||||
/* ioctl (fd, TIOCSERGETLSR, &result) where result may be as below */
|
||||
# define TIOCSER_TEMT 0x01 /* Transmitter physically empty */
|
||||
#define TIOCSERGETMULTI _IOR('T', 90, struct serial_multiport_struct) /* Get multiport config */
|
||||
#define TIOCSERSETMULTI _IOW('T', 91, struct serial_multiport_struct) /* Set multiport config */
|
||||
#define TIOCSERGETMULTI 0x80a8545a /* Get multiport config */
|
||||
/* _IOR('T', 90, struct serial_multiport_struct) */
|
||||
#define TIOCSERSETMULTI 0x40a8545b /* Set multiport config */
|
||||
/* _IOW('T', 91, struct serial_multiport_struct) */
|
||||
|
||||
#define TIOCMIWAIT _IO('T', 92) /* wait for a change on serial input line(s) */
|
||||
#define TIOCGICOUNT 0x545D /* read serial port inline interrupt counts */
|
||||
|
@ -739,7 +739,10 @@ __SYSCALL(334, sys_sched_setattr, 2)
|
||||
#define __NR_sched_getattr 335
|
||||
__SYSCALL(335, sys_sched_getattr, 3)
|
||||
|
||||
#define __NR_syscall_count 336
|
||||
#define __NR_renameat2 336
|
||||
__SYSCALL(336, sys_renameat2, 5)
|
||||
|
||||
#define __NR_syscall_count 337
|
||||
|
||||
/*
|
||||
* sysxtensa syscall handler
|
||||
|
@ -8,6 +8,7 @@
|
||||
* this archive for more details.
|
||||
*
|
||||
* Copyright (C) 2001 - 2005 Tensilica, Inc.
|
||||
* Copyright (C) 2014 Cadence Design Systems Inc.
|
||||
*
|
||||
* Rewritten by Chris Zankel <chris@zankel.net>
|
||||
*
|
||||
@ -174,6 +175,10 @@ ENTRY(fast_unaligned)
|
||||
s32i a0, a2, PT_AREG2
|
||||
s32i a3, a2, PT_AREG3
|
||||
|
||||
rsr a3, excsave1
|
||||
movi a4, fast_unaligned_fixup
|
||||
s32i a4, a3, EXC_TABLE_FIXUP
|
||||
|
||||
/* Keep value of SAR in a0 */
|
||||
|
||||
rsr a0, sar
|
||||
@ -225,10 +230,6 @@ ENTRY(fast_unaligned)
|
||||
addx8 a5, a6, a5
|
||||
jx a5 # jump into table
|
||||
|
||||
/* Invalid instruction, CRITICAL! */
|
||||
.Linvalid_instruction_load:
|
||||
j .Linvalid_instruction
|
||||
|
||||
/* Load: Load memory address. */
|
||||
|
||||
.Lload: movi a3, ~3
|
||||
@ -272,18 +273,6 @@ ENTRY(fast_unaligned)
|
||||
/* Set target register. */
|
||||
|
||||
1:
|
||||
|
||||
#if XCHAL_HAVE_LOOPS
|
||||
rsr a5, lend # check if we reached LEND
|
||||
bne a7, a5, 1f
|
||||
rsr a5, lcount # and LCOUNT != 0
|
||||
beqz a5, 1f
|
||||
addi a5, a5, -1 # decrement LCOUNT and set
|
||||
rsr a7, lbeg # set PC to LBEGIN
|
||||
wsr a5, lcount
|
||||
#endif
|
||||
|
||||
1: wsr a7, epc1 # skip load instruction
|
||||
extui a4, a4, INSN_T, 4 # extract target register
|
||||
movi a5, .Lload_table
|
||||
addx8 a4, a4, a5
|
||||
@ -326,6 +315,35 @@ ENTRY(fast_unaligned)
|
||||
mov a3, a14 ; _j 1f; .align 8
|
||||
mov a3, a15 ; _j 1f; .align 8
|
||||
|
||||
/* We cannot handle this exception. */
|
||||
|
||||
.extern _kernel_exception
|
||||
.Linvalid_instruction_load:
|
||||
.Linvalid_instruction_store:
|
||||
|
||||
movi a4, 0
|
||||
rsr a3, excsave1
|
||||
s32i a4, a3, EXC_TABLE_FIXUP
|
||||
|
||||
/* Restore a4...a8 and SAR, set SP, and jump to default exception. */
|
||||
|
||||
l32i a8, a2, PT_AREG8
|
||||
l32i a7, a2, PT_AREG7
|
||||
l32i a6, a2, PT_AREG6
|
||||
l32i a5, a2, PT_AREG5
|
||||
l32i a4, a2, PT_AREG4
|
||||
wsr a0, sar
|
||||
mov a1, a2
|
||||
|
||||
rsr a0, ps
|
||||
bbsi.l a0, PS_UM_BIT, 2f # jump if user mode
|
||||
|
||||
movi a0, _kernel_exception
|
||||
jx a0
|
||||
|
||||
2: movi a0, _user_exception
|
||||
jx a0
|
||||
|
||||
1: # a7: instruction pointer, a4: instruction, a3: value
|
||||
|
||||
movi a6, 0 # mask: ffffffff:00000000
|
||||
@ -353,17 +371,6 @@ ENTRY(fast_unaligned)
|
||||
/* Get memory address */
|
||||
|
||||
1:
|
||||
#if XCHAL_HAVE_LOOPS
|
||||
rsr a4, lend # check if we reached LEND
|
||||
bne a7, a4, 1f
|
||||
rsr a4, lcount # and LCOUNT != 0
|
||||
beqz a4, 1f
|
||||
addi a4, a4, -1 # decrement LCOUNT and set
|
||||
rsr a7, lbeg # set PC to LBEGIN
|
||||
wsr a4, lcount
|
||||
#endif
|
||||
|
||||
1: wsr a7, epc1 # skip store instruction
|
||||
movi a4, ~3
|
||||
and a4, a4, a8 # align memory address
|
||||
|
||||
@ -375,25 +382,25 @@ ENTRY(fast_unaligned)
|
||||
#endif
|
||||
|
||||
__ssa8r a8
|
||||
__src_b a7, a5, a6 # lo-mask F..F0..0 (BE) 0..0F..F (LE)
|
||||
__src_b a8, a5, a6 # lo-mask F..F0..0 (BE) 0..0F..F (LE)
|
||||
__src_b a6, a6, a5 # hi-mask 0..0F..F (BE) F..F0..0 (LE)
|
||||
#ifdef UNALIGNED_USER_EXCEPTION
|
||||
l32e a5, a4, -8
|
||||
#else
|
||||
l32i a5, a4, 0 # load lower address word
|
||||
#endif
|
||||
and a5, a5, a7 # mask
|
||||
__sh a7, a3 # shift value
|
||||
or a5, a5, a7 # or with original value
|
||||
and a5, a5, a8 # mask
|
||||
__sh a8, a3 # shift value
|
||||
or a5, a5, a8 # or with original value
|
||||
#ifdef UNALIGNED_USER_EXCEPTION
|
||||
s32e a5, a4, -8
|
||||
l32e a7, a4, -4
|
||||
l32e a8, a4, -4
|
||||
#else
|
||||
s32i a5, a4, 0 # store
|
||||
l32i a7, a4, 4 # same for upper address word
|
||||
l32i a8, a4, 4 # same for upper address word
|
||||
#endif
|
||||
__sl a5, a3
|
||||
and a6, a7, a6
|
||||
and a6, a8, a6
|
||||
or a6, a6, a5
|
||||
#ifdef UNALIGNED_USER_EXCEPTION
|
||||
s32e a6, a4, -4
|
||||
@ -401,9 +408,27 @@ ENTRY(fast_unaligned)
|
||||
s32i a6, a4, 4
|
||||
#endif
|
||||
|
||||
/* Done. restore stack and return */
|
||||
|
||||
.Lexit:
|
||||
#if XCHAL_HAVE_LOOPS
|
||||
rsr a4, lend # check if we reached LEND
|
||||
bne a7, a4, 1f
|
||||
rsr a4, lcount # and LCOUNT != 0
|
||||
beqz a4, 1f
|
||||
addi a4, a4, -1 # decrement LCOUNT and set
|
||||
rsr a7, lbeg # set PC to LBEGIN
|
||||
wsr a4, lcount
|
||||
#endif
|
||||
|
||||
1: wsr a7, epc1 # skip emulated instruction
|
||||
|
||||
/* Update icount if we're single-stepping in userspace. */
|
||||
rsr a4, icountlevel
|
||||
beqz a4, 1f
|
||||
bgeui a4, LOCKLEVEL + 1, 1f
|
||||
rsr a4, icount
|
||||
addi a4, a4, 1
|
||||
wsr a4, icount
|
||||
1:
|
||||
movi a4, 0
|
||||
rsr a3, excsave1
|
||||
s32i a4, a3, EXC_TABLE_FIXUP
|
||||
@ -424,31 +449,40 @@ ENTRY(fast_unaligned)
|
||||
l32i a2, a2, PT_AREG2
|
||||
rfe
|
||||
|
||||
/* We cannot handle this exception. */
|
||||
ENDPROC(fast_unaligned)
|
||||
|
||||
.extern _kernel_exception
|
||||
.Linvalid_instruction_store:
|
||||
.Linvalid_instruction:
|
||||
ENTRY(fast_unaligned_fixup)
|
||||
|
||||
/* Restore a4...a8 and SAR, set SP, and jump to default exception. */
|
||||
l32i a2, a3, EXC_TABLE_DOUBLE_SAVE
|
||||
wsr a3, excsave1
|
||||
|
||||
l32i a8, a2, PT_AREG8
|
||||
l32i a7, a2, PT_AREG7
|
||||
l32i a6, a2, PT_AREG6
|
||||
l32i a5, a2, PT_AREG5
|
||||
l32i a4, a2, PT_AREG4
|
||||
l32i a0, a2, PT_AREG2
|
||||
xsr a0, depc # restore depc and a0
|
||||
wsr a0, sar
|
||||
mov a1, a2
|
||||
|
||||
rsr a0, exccause
|
||||
s32i a0, a2, PT_DEPC # mark as a regular exception
|
||||
|
||||
rsr a0, ps
|
||||
bbsi.l a2, PS_UM_BIT, 1f # jump if user mode
|
||||
bbsi.l a0, PS_UM_BIT, 1f # jump if user mode
|
||||
|
||||
movi a0, _kernel_exception
|
||||
rsr a0, exccause
|
||||
addx4 a0, a0, a3 # find entry in table
|
||||
l32i a0, a0, EXC_TABLE_FAST_KERNEL # load handler
|
||||
l32i a3, a2, PT_AREG3
|
||||
jx a0
|
||||
1:
|
||||
rsr a0, exccause
|
||||
addx4 a0, a0, a3 # find entry in table
|
||||
l32i a0, a0, EXC_TABLE_FAST_USER # load handler
|
||||
l32i a3, a2, PT_AREG3
|
||||
jx a0
|
||||
|
||||
1: movi a0, _user_exception
|
||||
jx a0
|
||||
|
||||
ENDPROC(fast_unaligned)
|
||||
ENDPROC(fast_unaligned_fixup)
|
||||
|
||||
#endif /* XCHAL_UNALIGNED_LOAD_EXCEPTION || XCHAL_UNALIGNED_STORE_EXCEPTION */
|
||||
|
@ -986,6 +986,8 @@ ENDPROC(fast_syscall_unrecoverable)
|
||||
* j done
|
||||
*/
|
||||
|
||||
#ifdef CONFIG_FAST_SYSCALL_XTENSA
|
||||
|
||||
#define TRY \
|
||||
.section __ex_table, "a"; \
|
||||
.word 66f, 67f; \
|
||||
@ -1001,9 +1003,8 @@ ENTRY(fast_syscall_xtensa)
|
||||
movi a7, 4 # sizeof(unsigned int)
|
||||
access_ok a3, a7, a0, a2, .Leac # a0: scratch reg, a2: sp
|
||||
|
||||
addi a6, a6, -1 # assuming SYS_XTENSA_ATOMIC_SET = 1
|
||||
_bgeui a6, SYS_XTENSA_COUNT - 1, .Lill
|
||||
_bnei a6, SYS_XTENSA_ATOMIC_CMP_SWP - 1, .Lnswp
|
||||
_bgeui a6, SYS_XTENSA_COUNT, .Lill
|
||||
_bnei a6, SYS_XTENSA_ATOMIC_CMP_SWP, .Lnswp
|
||||
|
||||
/* Fall through for ATOMIC_CMP_SWP. */
|
||||
|
||||
@ -1015,27 +1016,26 @@ TRY s32i a5, a3, 0 # different, modify value
|
||||
l32i a7, a2, PT_AREG7 # restore a7
|
||||
l32i a0, a2, PT_AREG0 # restore a0
|
||||
movi a2, 1 # and return 1
|
||||
addi a6, a6, 1 # restore a6 (really necessary?)
|
||||
rfe
|
||||
|
||||
1: l32i a7, a2, PT_AREG7 # restore a7
|
||||
l32i a0, a2, PT_AREG0 # restore a0
|
||||
movi a2, 0 # return 0 (note that we cannot set
|
||||
addi a6, a6, 1 # restore a6 (really necessary?)
|
||||
rfe
|
||||
|
||||
.Lnswp: /* Atomic set, add, and exg_add. */
|
||||
|
||||
TRY l32i a7, a3, 0 # orig
|
||||
addi a6, a6, -SYS_XTENSA_ATOMIC_SET
|
||||
add a0, a4, a7 # + arg
|
||||
moveqz a0, a4, a6 # set
|
||||
addi a6, a6, SYS_XTENSA_ATOMIC_SET
|
||||
TRY s32i a0, a3, 0 # write new value
|
||||
|
||||
mov a0, a2
|
||||
mov a2, a7
|
||||
l32i a7, a0, PT_AREG7 # restore a7
|
||||
l32i a0, a0, PT_AREG0 # restore a0
|
||||
addi a6, a6, 1 # restore a6 (really necessary?)
|
||||
rfe
|
||||
|
||||
CATCH
|
||||
@ -1044,13 +1044,25 @@ CATCH
|
||||
movi a2, -EFAULT
|
||||
rfe
|
||||
|
||||
.Lill: l32i a7, a2, PT_AREG0 # restore a7
|
||||
.Lill: l32i a7, a2, PT_AREG7 # restore a7
|
||||
l32i a0, a2, PT_AREG0 # restore a0
|
||||
movi a2, -EINVAL
|
||||
rfe
|
||||
|
||||
ENDPROC(fast_syscall_xtensa)
|
||||
|
||||
#else /* CONFIG_FAST_SYSCALL_XTENSA */
|
||||
|
||||
ENTRY(fast_syscall_xtensa)
|
||||
|
||||
l32i a0, a2, PT_AREG0 # restore a0
|
||||
movi a2, -ENOSYS
|
||||
rfe
|
||||
|
||||
ENDPROC(fast_syscall_xtensa)
|
||||
|
||||
#endif /* CONFIG_FAST_SYSCALL_XTENSA */
|
||||
|
||||
|
||||
/* fast_syscall_spill_registers.
|
||||
*
|
||||
@ -1066,6 +1078,8 @@ ENDPROC(fast_syscall_xtensa)
|
||||
* Note: We assume the stack pointer is EXC_TABLE_KSTK in the fixup handler.
|
||||
*/
|
||||
|
||||
#ifdef CONFIG_FAST_SYSCALL_SPILL_REGISTERS
|
||||
|
||||
ENTRY(fast_syscall_spill_registers)
|
||||
|
||||
/* Register a FIXUP handler (pass current wb as a parameter) */
|
||||
@ -1400,6 +1414,18 @@ ENTRY(fast_syscall_spill_registers_fixup_return)
|
||||
|
||||
ENDPROC(fast_syscall_spill_registers_fixup_return)
|
||||
|
||||
#else /* CONFIG_FAST_SYSCALL_SPILL_REGISTERS */
|
||||
|
||||
ENTRY(fast_syscall_spill_registers)
|
||||
|
||||
l32i a0, a2, PT_AREG0 # restore a0
|
||||
movi a2, -ENOSYS
|
||||
rfe
|
||||
|
||||
ENDPROC(fast_syscall_spill_registers)
|
||||
|
||||
#endif /* CONFIG_FAST_SYSCALL_SPILL_REGISTERS */
|
||||
|
||||
#ifdef CONFIG_MMU
|
||||
/*
|
||||
* We should never get here. Bail out!
|
||||
@ -1565,7 +1591,7 @@ ENTRY(fast_second_level_miss)
|
||||
rsr a0, excvaddr
|
||||
bltu a0, a3, 2f
|
||||
|
||||
addi a1, a0, -(2 << (DCACHE_ALIAS_ORDER + PAGE_SHIFT))
|
||||
addi a1, a0, -TLBTEMP_SIZE
|
||||
bgeu a1, a3, 2f
|
||||
|
||||
/* Check if we have to restore an ITLB mapping. */
|
||||
@ -1820,7 +1846,6 @@ ENTRY(_switch_to)
|
||||
|
||||
entry a1, 16
|
||||
|
||||
mov a10, a2 # preserve 'prev' (a2)
|
||||
mov a11, a3 # and 'next' (a3)
|
||||
|
||||
l32i a4, a2, TASK_THREAD_INFO
|
||||
@ -1828,8 +1853,14 @@ ENTRY(_switch_to)
|
||||
|
||||
save_xtregs_user a4 a6 a8 a9 a12 a13 THREAD_XTREGS_USER
|
||||
|
||||
s32i a0, a10, THREAD_RA # save return address
|
||||
s32i a1, a10, THREAD_SP # save stack pointer
|
||||
#if THREAD_RA > 1020 || THREAD_SP > 1020
|
||||
addi a10, a2, TASK_THREAD
|
||||
s32i a0, a10, THREAD_RA - TASK_THREAD # save return address
|
||||
s32i a1, a10, THREAD_SP - TASK_THREAD # save stack pointer
|
||||
#else
|
||||
s32i a0, a2, THREAD_RA # save return address
|
||||
s32i a1, a2, THREAD_SP # save stack pointer
|
||||
#endif
|
||||
|
||||
/* Disable ints while we manipulate the stack pointer. */
|
||||
|
||||
@ -1870,7 +1901,6 @@ ENTRY(_switch_to)
|
||||
load_xtregs_user a5 a6 a8 a9 a12 a13 THREAD_XTREGS_USER
|
||||
|
||||
wsr a14, ps
|
||||
mov a2, a10 # return 'prev'
|
||||
rsync
|
||||
|
||||
retw
|
||||
|
@ -49,9 +49,8 @@ dma_alloc_coherent(struct device *dev,size_t size,dma_addr_t *handle,gfp_t flag)
|
||||
|
||||
/* We currently don't support coherent memory outside KSEG */
|
||||
|
||||
if (ret < XCHAL_KSEG_CACHED_VADDR
|
||||
|| ret >= XCHAL_KSEG_CACHED_VADDR + XCHAL_KSEG_SIZE)
|
||||
BUG();
|
||||
BUG_ON(ret < XCHAL_KSEG_CACHED_VADDR ||
|
||||
ret > XCHAL_KSEG_CACHED_VADDR + XCHAL_KSEG_SIZE - 1);
|
||||
|
||||
|
||||
if (ret != 0) {
|
||||
@ -68,10 +67,11 @@ EXPORT_SYMBOL(dma_alloc_coherent);
|
||||
void dma_free_coherent(struct device *hwdev, size_t size,
|
||||
void *vaddr, dma_addr_t dma_handle)
|
||||
{
|
||||
long addr=(long)vaddr+XCHAL_KSEG_CACHED_VADDR-XCHAL_KSEG_BYPASS_VADDR;
|
||||
unsigned long addr = (unsigned long)vaddr +
|
||||
XCHAL_KSEG_CACHED_VADDR - XCHAL_KSEG_BYPASS_VADDR;
|
||||
|
||||
if (addr < 0 || addr >= XCHAL_KSEG_SIZE)
|
||||
BUG();
|
||||
BUG_ON(addr < XCHAL_KSEG_CACHED_VADDR ||
|
||||
addr > XCHAL_KSEG_CACHED_VADDR + XCHAL_KSEG_SIZE - 1);
|
||||
|
||||
free_pages(addr, get_order(size));
|
||||
}
|
||||
|
@ -571,6 +571,7 @@ void flush_icache_range(unsigned long start, unsigned long end)
|
||||
};
|
||||
on_each_cpu(ipi_flush_icache_range, &fd, 1);
|
||||
}
|
||||
EXPORT_SYMBOL(flush_icache_range);
|
||||
|
||||
/* ------------------------------------------------------------------------- */
|
||||
|
||||
|
@ -101,9 +101,8 @@ static dispatch_init_table_t __initdata dispatch_init_table[] = {
|
||||
#if XCHAL_UNALIGNED_LOAD_EXCEPTION || XCHAL_UNALIGNED_STORE_EXCEPTION
|
||||
#ifdef CONFIG_XTENSA_UNALIGNED_USER
|
||||
{ EXCCAUSE_UNALIGNED, USER, fast_unaligned },
|
||||
#else
|
||||
{ EXCCAUSE_UNALIGNED, 0, do_unaligned_user },
|
||||
#endif
|
||||
{ EXCCAUSE_UNALIGNED, 0, do_unaligned_user },
|
||||
{ EXCCAUSE_UNALIGNED, KRNL, fast_unaligned },
|
||||
#endif
|
||||
#ifdef CONFIG_MMU
|
||||
@ -264,7 +263,6 @@ do_illegal_instruction(struct pt_regs *regs)
|
||||
*/
|
||||
|
||||
#if XCHAL_UNALIGNED_LOAD_EXCEPTION || XCHAL_UNALIGNED_STORE_EXCEPTION
|
||||
#ifndef CONFIG_XTENSA_UNALIGNED_USER
|
||||
void
|
||||
do_unaligned_user (struct pt_regs *regs)
|
||||
{
|
||||
@ -286,7 +284,6 @@ do_unaligned_user (struct pt_regs *regs)
|
||||
|
||||
}
|
||||
#endif
|
||||
#endif
|
||||
|
||||
void
|
||||
do_debug(struct pt_regs *regs)
|
||||
|
@ -454,8 +454,14 @@ _DoubleExceptionVector_WindowOverflow:
|
||||
s32i a0, a2, PT_DEPC
|
||||
|
||||
_DoubleExceptionVector_handle_exception:
|
||||
addi a0, a0, -EXCCAUSE_UNALIGNED
|
||||
beqz a0, 2f
|
||||
addx4 a0, a0, a3
|
||||
l32i a0, a0, EXC_TABLE_FAST_USER
|
||||
l32i a0, a0, EXC_TABLE_FAST_USER + 4 * EXCCAUSE_UNALIGNED
|
||||
xsr a3, excsave1
|
||||
jx a0
|
||||
2:
|
||||
movi a0, user_exception
|
||||
xsr a3, excsave1
|
||||
jx a0
|
||||
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user