mirror of
https://github.com/torvalds/linux.git
synced 2024-11-10 14:11:52 +00:00
Merge branch 'sched-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull scheduler updates from Ingo Molnar:
"The main changes are:
- lockless wakeup support for futexes and IPC message queues
(Davidlohr Bueso, Peter Zijlstra)
- Replace spinlocks with atomics in thread_group_cputimer(), to
improve scalability (Jason Low)
- NUMA balancing improvements (Rik van Riel)
- SCHED_DEADLINE improvements (Wanpeng Li)
- clean up and reorganize preemption helpers (Frederic Weisbecker)
- decouple page fault disabling machinery from the preemption
counter, to improve debuggability and robustness (David
Hildenbrand)
- SCHED_DEADLINE documentation updates (Luca Abeni)
- topology CPU masks cleanups (Bartosz Golaszewski)
- /proc/sched_debug improvements (Srikar Dronamraju)"
* 'sched-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (79 commits)
sched/deadline: Remove needless parameter in dl_runtime_exceeded()
sched: Remove superfluous resetting of the p->dl_throttled flag
sched/deadline: Drop duplicate init_sched_dl_class() declaration
sched/deadline: Reduce rq lock contention by eliminating locking of non-feasible target
sched/deadline: Make init_sched_dl_class() __init
sched/deadline: Optimize pull_dl_task()
sched/preempt: Add static_key() to preempt_notifiers
sched/preempt: Fix preempt notifiers documentation about hlist_del() within unsafe iteration
sched/stop_machine: Fix deadlock between multiple stop_two_cpus()
sched/debug: Add sum_sleep_runtime to /proc/<pid>/sched
sched/debug: Replace vruntime with wait_sum in /proc/sched_debug
sched/debug: Properly format runnable tasks in /proc/sched_debug
sched/numa: Only consider less busy nodes as numa balancing destinations
Revert 095bebf61a
("sched/numa: Do not move past the balance point if unbalanced")
sched/fair: Prevent throttling in early pick_next_task_fair()
preempt: Reorganize the notrace definitions a bit
preempt: Use preempt_schedule_context() as the official tracing preemption point
sched: Make preempt_schedule_context() function-tracing safe
x86: Remove cpu_sibling_mask() and cpu_core_mask()
x86: Replace cpu_**_mask() with topology_**_cpumask()
...
This commit is contained in:
commit
23b7776290
@ -1,6 +1,6 @@
|
||||
|
||||
Export CPU topology info via sysfs. Items (attributes) are similar
|
||||
to /proc/cpuinfo.
|
||||
to /proc/cpuinfo output of some architectures:
|
||||
|
||||
1) /sys/devices/system/cpu/cpuX/topology/physical_package_id:
|
||||
|
||||
@ -23,20 +23,35 @@ to /proc/cpuinfo.
|
||||
4) /sys/devices/system/cpu/cpuX/topology/thread_siblings:
|
||||
|
||||
internal kernel map of cpuX's hardware threads within the same
|
||||
core as cpuX
|
||||
core as cpuX.
|
||||
|
||||
5) /sys/devices/system/cpu/cpuX/topology/core_siblings:
|
||||
5) /sys/devices/system/cpu/cpuX/topology/thread_siblings_list:
|
||||
|
||||
human-readable list of cpuX's hardware threads within the same
|
||||
core as cpuX.
|
||||
|
||||
6) /sys/devices/system/cpu/cpuX/topology/core_siblings:
|
||||
|
||||
internal kernel map of cpuX's hardware threads within the same
|
||||
physical_package_id.
|
||||
|
||||
6) /sys/devices/system/cpu/cpuX/topology/book_siblings:
|
||||
7) /sys/devices/system/cpu/cpuX/topology/core_siblings_list:
|
||||
|
||||
human-readable list of cpuX's hardware threads within the same
|
||||
physical_package_id.
|
||||
|
||||
8) /sys/devices/system/cpu/cpuX/topology/book_siblings:
|
||||
|
||||
internal kernel map of cpuX's hardware threads within the same
|
||||
book_id.
|
||||
|
||||
9) /sys/devices/system/cpu/cpuX/topology/book_siblings_list:
|
||||
|
||||
human-readable list of cpuX's hardware threads within the same
|
||||
book_id.
|
||||
|
||||
To implement it in an architecture-neutral way, a new source file,
|
||||
drivers/base/topology.c, is to export the 4 or 6 attributes. The two book
|
||||
drivers/base/topology.c, is to export the 6 or 9 attributes. The three book
|
||||
related sysfs files will only be created if CONFIG_SCHED_BOOK is selected.
|
||||
|
||||
For an architecture to support this feature, it must define some of
|
||||
@ -44,20 +59,22 @@ these macros in include/asm-XXX/topology.h:
|
||||
#define topology_physical_package_id(cpu)
|
||||
#define topology_core_id(cpu)
|
||||
#define topology_book_id(cpu)
|
||||
#define topology_thread_cpumask(cpu)
|
||||
#define topology_sibling_cpumask(cpu)
|
||||
#define topology_core_cpumask(cpu)
|
||||
#define topology_book_cpumask(cpu)
|
||||
|
||||
The type of **_id is int.
|
||||
The type of siblings is (const) struct cpumask *.
|
||||
The type of **_id macros is int.
|
||||
The type of **_cpumask macros is (const) struct cpumask *. The latter
|
||||
correspond with appropriate **_siblings sysfs attributes (except for
|
||||
topology_sibling_cpumask() which corresponds with thread_siblings).
|
||||
|
||||
To be consistent on all architectures, include/linux/topology.h
|
||||
provides default definitions for any of the above macros that are
|
||||
not defined by include/asm-XXX/topology.h:
|
||||
1) physical_package_id: -1
|
||||
2) core_id: 0
|
||||
3) thread_siblings: just the given CPU
|
||||
4) core_siblings: just the given CPU
|
||||
3) sibling_cpumask: just the given CPU
|
||||
4) core_cpumask: just the given CPU
|
||||
|
||||
For architectures that don't support books (CONFIG_SCHED_BOOK) there are no
|
||||
default definitions for topology_book_id() and topology_book_cpumask().
|
||||
|
@ -8,6 +8,10 @@ CONTENTS
|
||||
1. Overview
|
||||
2. Scheduling algorithm
|
||||
3. Scheduling Real-Time Tasks
|
||||
3.1 Definitions
|
||||
3.2 Schedulability Analysis for Uniprocessor Systems
|
||||
3.3 Schedulability Analysis for Multiprocessor Systems
|
||||
3.4 Relationship with SCHED_DEADLINE Parameters
|
||||
4. Bandwidth management
|
||||
4.1 System-wide settings
|
||||
4.2 Task interface
|
||||
@ -43,7 +47,7 @@ CONTENTS
|
||||
"deadline", to schedule tasks. A SCHED_DEADLINE task should receive
|
||||
"runtime" microseconds of execution time every "period" microseconds, and
|
||||
these "runtime" microseconds are available within "deadline" microseconds
|
||||
from the beginning of the period. In order to implement this behaviour,
|
||||
from the beginning of the period. In order to implement this behavior,
|
||||
every time the task wakes up, the scheduler computes a "scheduling deadline"
|
||||
consistent with the guarantee (using the CBS[2,3] algorithm). Tasks are then
|
||||
scheduled using EDF[1] on these scheduling deadlines (the task with the
|
||||
@ -52,7 +56,7 @@ CONTENTS
|
||||
"admission control" strategy (see Section "4. Bandwidth management") is used
|
||||
(clearly, if the system is overloaded this guarantee cannot be respected).
|
||||
|
||||
Summing up, the CBS[2,3] algorithms assigns scheduling deadlines to tasks so
|
||||
Summing up, the CBS[2,3] algorithm assigns scheduling deadlines to tasks so
|
||||
that each task runs for at most its runtime every period, avoiding any
|
||||
interference between different tasks (bandwidth isolation), while the EDF[1]
|
||||
algorithm selects the task with the earliest scheduling deadline as the one
|
||||
@ -63,7 +67,7 @@ CONTENTS
|
||||
In more details, the CBS algorithm assigns scheduling deadlines to
|
||||
tasks in the following way:
|
||||
|
||||
- Each SCHED_DEADLINE task is characterised by the "runtime",
|
||||
- Each SCHED_DEADLINE task is characterized by the "runtime",
|
||||
"deadline", and "period" parameters;
|
||||
|
||||
- The state of the task is described by a "scheduling deadline", and
|
||||
@ -78,7 +82,7 @@ CONTENTS
|
||||
|
||||
then, if the scheduling deadline is smaller than the current time, or
|
||||
this condition is verified, the scheduling deadline and the
|
||||
remaining runtime are re-initialised as
|
||||
remaining runtime are re-initialized as
|
||||
|
||||
scheduling deadline = current time + deadline
|
||||
remaining runtime = runtime
|
||||
@ -126,31 +130,37 @@ CONTENTS
|
||||
suited for periodic or sporadic real-time tasks that need guarantees on their
|
||||
timing behavior, e.g., multimedia, streaming, control applications, etc.
|
||||
|
||||
3.1 Definitions
|
||||
------------------------
|
||||
|
||||
A typical real-time task is composed of a repetition of computation phases
|
||||
(task instances, or jobs) which are activated on a periodic or sporadic
|
||||
fashion.
|
||||
Each job J_j (where J_j is the j^th job of the task) is characterised by an
|
||||
Each job J_j (where J_j is the j^th job of the task) is characterized by an
|
||||
arrival time r_j (the time when the job starts), an amount of computation
|
||||
time c_j needed to finish the job, and a job absolute deadline d_j, which
|
||||
is the time within which the job should be finished. The maximum execution
|
||||
time max_j{c_j} is called "Worst Case Execution Time" (WCET) for the task.
|
||||
time max{c_j} is called "Worst Case Execution Time" (WCET) for the task.
|
||||
A real-time task can be periodic with period P if r_{j+1} = r_j + P, or
|
||||
sporadic with minimum inter-arrival time P is r_{j+1} >= r_j + P. Finally,
|
||||
d_j = r_j + D, where D is the task's relative deadline.
|
||||
The utilisation of a real-time task is defined as the ratio between its
|
||||
Summing up, a real-time task can be described as
|
||||
Task = (WCET, D, P)
|
||||
|
||||
The utilization of a real-time task is defined as the ratio between its
|
||||
WCET and its period (or minimum inter-arrival time), and represents
|
||||
the fraction of CPU time needed to execute the task.
|
||||
|
||||
If the total utilisation sum_i(WCET_i/P_i) is larger than M (with M equal
|
||||
If the total utilization U=sum(WCET_i/P_i) is larger than M (with M equal
|
||||
to the number of CPUs), then the scheduler is unable to respect all the
|
||||
deadlines.
|
||||
Note that total utilisation is defined as the sum of the utilisations
|
||||
Note that total utilization is defined as the sum of the utilizations
|
||||
WCET_i/P_i over all the real-time tasks in the system. When considering
|
||||
multiple real-time tasks, the parameters of the i-th task are indicated
|
||||
with the "_i" suffix.
|
||||
Moreover, if the total utilisation is larger than M, then we risk starving
|
||||
Moreover, if the total utilization is larger than M, then we risk starving
|
||||
non- real-time tasks by real-time tasks.
|
||||
If, instead, the total utilisation is smaller than M, then non real-time
|
||||
If, instead, the total utilization is smaller than M, then non real-time
|
||||
tasks will not be starved and the system might be able to respect all the
|
||||
deadlines.
|
||||
As a matter of fact, in this case it is possible to provide an upper bound
|
||||
@ -159,38 +169,119 @@ CONTENTS
|
||||
More precisely, it can be proven that using a global EDF scheduler the
|
||||
maximum tardiness of each task is smaller or equal than
|
||||
((M − 1) · WCET_max − WCET_min)/(M − (M − 2) · U_max) + WCET_max
|
||||
where WCET_max = max_i{WCET_i} is the maximum WCET, WCET_min=min_i{WCET_i}
|
||||
is the minimum WCET, and U_max = max_i{WCET_i/P_i} is the maximum utilisation.
|
||||
where WCET_max = max{WCET_i} is the maximum WCET, WCET_min=min{WCET_i}
|
||||
is the minimum WCET, and U_max = max{WCET_i/P_i} is the maximum
|
||||
utilization[12].
|
||||
|
||||
3.2 Schedulability Analysis for Uniprocessor Systems
|
||||
------------------------
|
||||
|
||||
If M=1 (uniprocessor system), or in case of partitioned scheduling (each
|
||||
real-time task is statically assigned to one and only one CPU), it is
|
||||
possible to formally check if all the deadlines are respected.
|
||||
If D_i = P_i for all tasks, then EDF is able to respect all the deadlines
|
||||
of all the tasks executing on a CPU if and only if the total utilisation
|
||||
of all the tasks executing on a CPU if and only if the total utilization
|
||||
of the tasks running on such a CPU is smaller or equal than 1.
|
||||
If D_i != P_i for some task, then it is possible to define the density of
|
||||
a task as C_i/min{D_i,T_i}, and EDF is able to respect all the deadlines
|
||||
of all the tasks running on a CPU if the sum sum_i C_i/min{D_i,T_i} of the
|
||||
densities of the tasks running on such a CPU is smaller or equal than 1
|
||||
(notice that this condition is only sufficient, and not necessary).
|
||||
a task as WCET_i/min{D_i,P_i}, and EDF is able to respect all the deadlines
|
||||
of all the tasks running on a CPU if the sum of the densities of the tasks
|
||||
running on such a CPU is smaller or equal than 1:
|
||||
sum(WCET_i / min{D_i, P_i}) <= 1
|
||||
It is important to notice that this condition is only sufficient, and not
|
||||
necessary: there are task sets that are schedulable, but do not respect the
|
||||
condition. For example, consider the task set {Task_1,Task_2} composed by
|
||||
Task_1=(50ms,50ms,100ms) and Task_2=(10ms,100ms,100ms).
|
||||
EDF is clearly able to schedule the two tasks without missing any deadline
|
||||
(Task_1 is scheduled as soon as it is released, and finishes just in time
|
||||
to respect its deadline; Task_2 is scheduled immediately after Task_1, hence
|
||||
its response time cannot be larger than 50ms + 10ms = 60ms) even if
|
||||
50 / min{50,100} + 10 / min{100, 100} = 50 / 50 + 10 / 100 = 1.1
|
||||
Of course it is possible to test the exact schedulability of tasks with
|
||||
D_i != P_i (checking a condition that is both sufficient and necessary),
|
||||
but this cannot be done by comparing the total utilization or density with
|
||||
a constant. Instead, the so called "processor demand" approach can be used,
|
||||
computing the total amount of CPU time h(t) needed by all the tasks to
|
||||
respect all of their deadlines in a time interval of size t, and comparing
|
||||
such a time with the interval size t. If h(t) is smaller than t (that is,
|
||||
the amount of time needed by the tasks in a time interval of size t is
|
||||
smaller than the size of the interval) for all the possible values of t, then
|
||||
EDF is able to schedule the tasks respecting all of their deadlines. Since
|
||||
performing this check for all possible values of t is impossible, it has been
|
||||
proven[4,5,6] that it is sufficient to perform the test for values of t
|
||||
between 0 and a maximum value L. The cited papers contain all of the
|
||||
mathematical details and explain how to compute h(t) and L.
|
||||
In any case, this kind of analysis is too complex as well as too
|
||||
time-consuming to be performed on-line. Hence, as explained in Section
|
||||
4 Linux uses an admission test based on the tasks' utilizations.
|
||||
|
||||
3.3 Schedulability Analysis for Multiprocessor Systems
|
||||
------------------------
|
||||
|
||||
On multiprocessor systems with global EDF scheduling (non partitioned
|
||||
systems), a sufficient test for schedulability can not be based on the
|
||||
utilisations (it can be shown that task sets with utilisations slightly
|
||||
larger than 1 can miss deadlines regardless of the number of CPUs M).
|
||||
However, as previously stated, enforcing that the total utilisation is smaller
|
||||
than M is enough to guarantee that non real-time tasks are not starved and
|
||||
that the tardiness of real-time tasks has an upper bound.
|
||||
utilizations or densities: it can be shown that even if D_i = P_i task
|
||||
sets with utilizations slightly larger than 1 can miss deadlines regardless
|
||||
of the number of CPUs.
|
||||
|
||||
SCHED_DEADLINE can be used to schedule real-time tasks guaranteeing that
|
||||
the jobs' deadlines of a task are respected. In order to do this, a task
|
||||
must be scheduled by setting:
|
||||
Consider a set {Task_1,...Task_{M+1}} of M+1 tasks on a system with M
|
||||
CPUs, with the first task Task_1=(P,P,P) having period, relative deadline
|
||||
and WCET equal to P. The remaining M tasks Task_i=(e,P-1,P-1) have an
|
||||
arbitrarily small worst case execution time (indicated as "e" here) and a
|
||||
period smaller than the one of the first task. Hence, if all the tasks
|
||||
activate at the same time t, global EDF schedules these M tasks first
|
||||
(because their absolute deadlines are equal to t + P - 1, hence they are
|
||||
smaller than the absolute deadline of Task_1, which is t + P). As a
|
||||
result, Task_1 can be scheduled only at time t + e, and will finish at
|
||||
time t + e + P, after its absolute deadline. The total utilization of the
|
||||
task set is U = M · e / (P - 1) + P / P = M · e / (P - 1) + 1, and for small
|
||||
values of e this can become very close to 1. This is known as "Dhall's
|
||||
effect"[7]. Note: the example in the original paper by Dhall has been
|
||||
slightly simplified here (for example, Dhall more correctly computed
|
||||
lim_{e->0}U).
|
||||
|
||||
More complex schedulability tests for global EDF have been developed in
|
||||
real-time literature[8,9], but they are not based on a simple comparison
|
||||
between total utilization (or density) and a fixed constant. If all tasks
|
||||
have D_i = P_i, a sufficient schedulability condition can be expressed in
|
||||
a simple way:
|
||||
sum(WCET_i / P_i) <= M - (M - 1) · U_max
|
||||
where U_max = max{WCET_i / P_i}[10]. Notice that for U_max = 1,
|
||||
M - (M - 1) · U_max becomes M - M + 1 = 1 and this schedulability condition
|
||||
just confirms the Dhall's effect. A more complete survey of the literature
|
||||
about schedulability tests for multi-processor real-time scheduling can be
|
||||
found in [11].
|
||||
|
||||
As seen, enforcing that the total utilization is smaller than M does not
|
||||
guarantee that global EDF schedules the tasks without missing any deadline
|
||||
(in other words, global EDF is not an optimal scheduling algorithm). However,
|
||||
a total utilization smaller than M is enough to guarantee that non real-time
|
||||
tasks are not starved and that the tardiness of real-time tasks has an upper
|
||||
bound[12] (as previously noted). Different bounds on the maximum tardiness
|
||||
experienced by real-time tasks have been developed in various papers[13,14],
|
||||
but the theoretical result that is important for SCHED_DEADLINE is that if
|
||||
the total utilization is smaller or equal than M then the response times of
|
||||
the tasks are limited.
|
||||
|
||||
3.4 Relationship with SCHED_DEADLINE Parameters
|
||||
------------------------
|
||||
|
||||
Finally, it is important to understand the relationship between the
|
||||
SCHED_DEADLINE scheduling parameters described in Section 2 (runtime,
|
||||
deadline and period) and the real-time task parameters (WCET, D, P)
|
||||
described in this section. Note that the tasks' temporal constraints are
|
||||
represented by its absolute deadlines d_j = r_j + D described above, while
|
||||
SCHED_DEADLINE schedules the tasks according to scheduling deadlines (see
|
||||
Section 2).
|
||||
If an admission test is used to guarantee that the scheduling deadlines
|
||||
are respected, then SCHED_DEADLINE can be used to schedule real-time tasks
|
||||
guaranteeing that all the jobs' deadlines of a task are respected.
|
||||
In order to do this, a task must be scheduled by setting:
|
||||
|
||||
- runtime >= WCET
|
||||
- deadline = D
|
||||
- period <= P
|
||||
|
||||
IOW, if runtime >= WCET and if period is >= P, then the scheduling deadlines
|
||||
IOW, if runtime >= WCET and if period is <= P, then the scheduling deadlines
|
||||
and the absolute deadlines (d_j) coincide, so a proper admission control
|
||||
allows to respect the jobs' absolute deadlines for this task (this is what is
|
||||
called "hard schedulability property" and is an extension of Lemma 1 of [2]).
|
||||
@ -206,6 +297,39 @@ CONTENTS
|
||||
Symposium, 1998. http://retis.sssup.it/~giorgio/paps/1998/rtss98-cbs.pdf
|
||||
3 - L. Abeni. Server Mechanisms for Multimedia Applications. ReTiS Lab
|
||||
Technical Report. http://disi.unitn.it/~abeni/tr-98-01.pdf
|
||||
4 - J. Y. Leung and M.L. Merril. A Note on Preemptive Scheduling of
|
||||
Periodic, Real-Time Tasks. Information Processing Letters, vol. 11,
|
||||
no. 3, pp. 115-118, 1980.
|
||||
5 - S. K. Baruah, A. K. Mok and L. E. Rosier. Preemptively Scheduling
|
||||
Hard-Real-Time Sporadic Tasks on One Processor. Proceedings of the
|
||||
11th IEEE Real-time Systems Symposium, 1990.
|
||||
6 - S. K. Baruah, L. E. Rosier and R. R. Howell. Algorithms and Complexity
|
||||
Concerning the Preemptive Scheduling of Periodic Real-Time tasks on
|
||||
One Processor. Real-Time Systems Journal, vol. 4, no. 2, pp 301-324,
|
||||
1990.
|
||||
7 - S. J. Dhall and C. L. Liu. On a real-time scheduling problem. Operations
|
||||
research, vol. 26, no. 1, pp 127-140, 1978.
|
||||
8 - T. Baker. Multiprocessor EDF and Deadline Monotonic Schedulability
|
||||
Analysis. Proceedings of the 24th IEEE Real-Time Systems Symposium, 2003.
|
||||
9 - T. Baker. An Analysis of EDF Schedulability on a Multiprocessor.
|
||||
IEEE Transactions on Parallel and Distributed Systems, vol. 16, no. 8,
|
||||
pp 760-768, 2005.
|
||||
10 - J. Goossens, S. Funk and S. Baruah, Priority-Driven Scheduling of
|
||||
Periodic Task Systems on Multiprocessors. Real-Time Systems Journal,
|
||||
vol. 25, no. 2–3, pp. 187–205, 2003.
|
||||
11 - R. Davis and A. Burns. A Survey of Hard Real-Time Scheduling for
|
||||
Multiprocessor Systems. ACM Computing Surveys, vol. 43, no. 4, 2011.
|
||||
http://www-users.cs.york.ac.uk/~robdavis/papers/MPSurveyv5.0.pdf
|
||||
12 - U. C. Devi and J. H. Anderson. Tardiness Bounds under Global EDF
|
||||
Scheduling on a Multiprocessor. Real-Time Systems Journal, vol. 32,
|
||||
no. 2, pp 133-189, 2008.
|
||||
13 - P. Valente and G. Lipari. An Upper Bound to the Lateness of Soft
|
||||
Real-Time Tasks Scheduled by EDF on Multiprocessors. Proceedings of
|
||||
the 26th IEEE Real-Time Systems Symposium, 2005.
|
||||
14 - J. Erickson, U. Devi and S. Baruah. Improved tardiness bounds for
|
||||
Global EDF. Proceedings of the 22nd Euromicro Conference on
|
||||
Real-Time Systems, 2010.
|
||||
|
||||
|
||||
4. Bandwidth management
|
||||
=======================
|
||||
@ -218,10 +342,10 @@ CONTENTS
|
||||
no guarantee can be given on the actual scheduling of the -deadline tasks.
|
||||
|
||||
As already stated in Section 3, a necessary condition to be respected to
|
||||
correctly schedule a set of real-time tasks is that the total utilisation
|
||||
correctly schedule a set of real-time tasks is that the total utilization
|
||||
is smaller than M. When talking about -deadline tasks, this requires that
|
||||
the sum of the ratio between runtime and period for all tasks is smaller
|
||||
than M. Notice that the ratio runtime/period is equivalent to the utilisation
|
||||
than M. Notice that the ratio runtime/period is equivalent to the utilization
|
||||
of a "traditional" real-time task, and is also often referred to as
|
||||
"bandwidth".
|
||||
The interface used to control the CPU bandwidth that can be allocated
|
||||
@ -251,7 +375,7 @@ CONTENTS
|
||||
The system wide settings are configured under the /proc virtual file system.
|
||||
|
||||
For now the -rt knobs are used for -deadline admission control and the
|
||||
-deadline runtime is accounted against the -rt runtime. We realise that this
|
||||
-deadline runtime is accounted against the -rt runtime. We realize that this
|
||||
isn't entirely desirable; however, it is better to have a small interface for
|
||||
now, and be able to change it easily later. The ideal situation (see 5.) is to
|
||||
run -rt tasks from a -deadline server; in which case the -rt bandwidth is a
|
||||
|
@ -23,8 +23,7 @@
|
||||
#include <linux/smp.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/module.h>
|
||||
|
||||
#include <asm/uaccess.h>
|
||||
#include <linux/uaccess.h>
|
||||
|
||||
extern void die_if_kernel(char *,struct pt_regs *,long, unsigned long *);
|
||||
|
||||
@ -107,7 +106,7 @@ do_page_fault(unsigned long address, unsigned long mmcsr,
|
||||
|
||||
/* If we're in an interrupt context, or have no user context,
|
||||
we must not take the fault. */
|
||||
if (!mm || in_atomic())
|
||||
if (!mm || faulthandler_disabled())
|
||||
goto no_context;
|
||||
|
||||
#ifdef CONFIG_ALPHA_LARGE_VMALLOC
|
||||
|
@ -53,7 +53,7 @@ static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
|
||||
if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int)))
|
||||
return -EFAULT;
|
||||
|
||||
pagefault_disable(); /* implies preempt_disable() */
|
||||
pagefault_disable();
|
||||
|
||||
switch (op) {
|
||||
case FUTEX_OP_SET:
|
||||
@ -75,7 +75,7 @@ static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
|
||||
ret = -ENOSYS;
|
||||
}
|
||||
|
||||
pagefault_enable(); /* subsumes preempt_enable() */
|
||||
pagefault_enable();
|
||||
|
||||
if (!ret) {
|
||||
switch (cmp) {
|
||||
@ -104,7 +104,7 @@ static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* Compare-xchg with preemption disabled.
|
||||
/* Compare-xchg with pagefaults disabled.
|
||||
* Notes:
|
||||
* -Best-Effort: Exchg happens only if compare succeeds.
|
||||
* If compare fails, returns; leaving retry/looping to upper layers
|
||||
@ -121,7 +121,7 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, u32 oldval,
|
||||
if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int)))
|
||||
return -EFAULT;
|
||||
|
||||
pagefault_disable(); /* implies preempt_disable() */
|
||||
pagefault_disable();
|
||||
|
||||
/* TBD : can use llock/scond */
|
||||
__asm__ __volatile__(
|
||||
@ -142,7 +142,7 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, u32 oldval,
|
||||
: "r"(oldval), "r"(newval), "r"(uaddr), "ir"(-EFAULT)
|
||||
: "cc", "memory");
|
||||
|
||||
pagefault_enable(); /* subsumes preempt_enable() */
|
||||
pagefault_enable();
|
||||
|
||||
*uval = val;
|
||||
return val;
|
||||
|
@ -86,7 +86,7 @@ void do_page_fault(unsigned long address, struct pt_regs *regs)
|
||||
* If we're in an interrupt or have no user
|
||||
* context, we must not take the fault..
|
||||
*/
|
||||
if (in_atomic() || !mm)
|
||||
if (faulthandler_disabled() || !mm)
|
||||
goto no_context;
|
||||
|
||||
if (user_mode(regs))
|
||||
|
@ -93,6 +93,7 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
|
||||
if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
|
||||
return -EFAULT;
|
||||
|
||||
preempt_disable();
|
||||
__asm__ __volatile__("@futex_atomic_cmpxchg_inatomic\n"
|
||||
"1: " TUSER(ldr) " %1, [%4]\n"
|
||||
" teq %1, %2\n"
|
||||
@ -104,6 +105,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
|
||||
: "cc", "memory");
|
||||
|
||||
*uval = val;
|
||||
preempt_enable();
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -124,7 +127,10 @@ futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
|
||||
if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
|
||||
return -EFAULT;
|
||||
|
||||
pagefault_disable(); /* implies preempt_disable() */
|
||||
#ifndef CONFIG_SMP
|
||||
preempt_disable();
|
||||
#endif
|
||||
pagefault_disable();
|
||||
|
||||
switch (op) {
|
||||
case FUTEX_OP_SET:
|
||||
@ -146,7 +152,10 @@ futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
|
||||
ret = -ENOSYS;
|
||||
}
|
||||
|
||||
pagefault_enable(); /* subsumes preempt_enable() */
|
||||
pagefault_enable();
|
||||
#ifndef CONFIG_SMP
|
||||
preempt_enable();
|
||||
#endif
|
||||
|
||||
if (!ret) {
|
||||
switch (cmp) {
|
||||
|
@ -18,7 +18,7 @@ extern struct cputopo_arm cpu_topology[NR_CPUS];
|
||||
#define topology_physical_package_id(cpu) (cpu_topology[cpu].socket_id)
|
||||
#define topology_core_id(cpu) (cpu_topology[cpu].core_id)
|
||||
#define topology_core_cpumask(cpu) (&cpu_topology[cpu].core_sibling)
|
||||
#define topology_thread_cpumask(cpu) (&cpu_topology[cpu].thread_sibling)
|
||||
#define topology_sibling_cpumask(cpu) (&cpu_topology[cpu].thread_sibling)
|
||||
|
||||
void init_cpu_topology(void);
|
||||
void store_cpu_topology(unsigned int cpuid);
|
||||
|
@ -276,7 +276,7 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
|
||||
* If we're in an interrupt or have no user
|
||||
* context, we must not take the fault..
|
||||
*/
|
||||
if (in_atomic() || !mm)
|
||||
if (faulthandler_disabled() || !mm)
|
||||
goto no_context;
|
||||
|
||||
if (user_mode(regs))
|
||||
|
@ -59,6 +59,7 @@ void *kmap_atomic(struct page *page)
|
||||
void *kmap;
|
||||
int type;
|
||||
|
||||
preempt_disable();
|
||||
pagefault_disable();
|
||||
if (!PageHighMem(page))
|
||||
return page_address(page);
|
||||
@ -121,6 +122,7 @@ void __kunmap_atomic(void *kvaddr)
|
||||
kunmap_high(pte_page(pkmap_page_table[PKMAP_NR(vaddr)]));
|
||||
}
|
||||
pagefault_enable();
|
||||
preempt_enable();
|
||||
}
|
||||
EXPORT_SYMBOL(__kunmap_atomic);
|
||||
|
||||
@ -130,6 +132,7 @@ void *kmap_atomic_pfn(unsigned long pfn)
|
||||
int idx, type;
|
||||
struct page *page = pfn_to_page(pfn);
|
||||
|
||||
preempt_disable();
|
||||
pagefault_disable();
|
||||
if (!PageHighMem(page))
|
||||
return page_address(page);
|
||||
|
@ -58,7 +58,7 @@ futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
|
||||
if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
|
||||
return -EFAULT;
|
||||
|
||||
pagefault_disable(); /* implies preempt_disable() */
|
||||
pagefault_disable();
|
||||
|
||||
switch (op) {
|
||||
case FUTEX_OP_SET:
|
||||
@ -85,7 +85,7 @@ futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
|
||||
ret = -ENOSYS;
|
||||
}
|
||||
|
||||
pagefault_enable(); /* subsumes preempt_enable() */
|
||||
pagefault_enable();
|
||||
|
||||
if (!ret) {
|
||||
switch (cmp) {
|
||||
|
@ -18,7 +18,7 @@ extern struct cpu_topology cpu_topology[NR_CPUS];
|
||||
#define topology_physical_package_id(cpu) (cpu_topology[cpu].cluster_id)
|
||||
#define topology_core_id(cpu) (cpu_topology[cpu].core_id)
|
||||
#define topology_core_cpumask(cpu) (&cpu_topology[cpu].core_sibling)
|
||||
#define topology_thread_cpumask(cpu) (&cpu_topology[cpu].thread_sibling)
|
||||
#define topology_sibling_cpumask(cpu) (&cpu_topology[cpu].thread_sibling)
|
||||
|
||||
void init_cpu_topology(void);
|
||||
void store_cpu_topology(unsigned int cpuid);
|
||||
|
@ -211,7 +211,7 @@ static int __kprobes do_page_fault(unsigned long addr, unsigned int esr,
|
||||
* If we're in an interrupt or have no user context, we must not take
|
||||
* the fault.
|
||||
*/
|
||||
if (in_atomic() || !mm)
|
||||
if (faulthandler_disabled() || !mm)
|
||||
goto no_context;
|
||||
|
||||
if (user_mode(regs))
|
||||
|
@ -97,7 +97,8 @@ static inline __kernel_size_t __copy_from_user(void *to,
|
||||
* @x: Value to copy to user space.
|
||||
* @ptr: Destination address, in user space.
|
||||
*
|
||||
* Context: User context only. This function may sleep.
|
||||
* Context: User context only. This function may sleep if pagefaults are
|
||||
* enabled.
|
||||
*
|
||||
* This macro copies a single simple value from kernel space to user
|
||||
* space. It supports simple types like char and int, but not larger
|
||||
@ -116,7 +117,8 @@ static inline __kernel_size_t __copy_from_user(void *to,
|
||||
* @x: Variable to store result.
|
||||
* @ptr: Source address, in user space.
|
||||
*
|
||||
* Context: User context only. This function may sleep.
|
||||
* Context: User context only. This function may sleep if pagefaults are
|
||||
* enabled.
|
||||
*
|
||||
* This macro copies a single simple variable from user space to kernel
|
||||
* space. It supports simple types like char and int, but not larger
|
||||
@ -136,7 +138,8 @@ static inline __kernel_size_t __copy_from_user(void *to,
|
||||
* @x: Value to copy to user space.
|
||||
* @ptr: Destination address, in user space.
|
||||
*
|
||||
* Context: User context only. This function may sleep.
|
||||
* Context: User context only. This function may sleep if pagefaults are
|
||||
* enabled.
|
||||
*
|
||||
* This macro copies a single simple value from kernel space to user
|
||||
* space. It supports simple types like char and int, but not larger
|
||||
@ -158,7 +161,8 @@ static inline __kernel_size_t __copy_from_user(void *to,
|
||||
* @x: Variable to store result.
|
||||
* @ptr: Source address, in user space.
|
||||
*
|
||||
* Context: User context only. This function may sleep.
|
||||
* Context: User context only. This function may sleep if pagefaults are
|
||||
* enabled.
|
||||
*
|
||||
* This macro copies a single simple variable from user space to kernel
|
||||
* space. It supports simple types like char and int, but not larger
|
||||
|
@ -14,11 +14,11 @@
|
||||
#include <linux/pagemap.h>
|
||||
#include <linux/kdebug.h>
|
||||
#include <linux/kprobes.h>
|
||||
#include <linux/uaccess.h>
|
||||
|
||||
#include <asm/mmu_context.h>
|
||||
#include <asm/sysreg.h>
|
||||
#include <asm/tlb.h>
|
||||
#include <asm/uaccess.h>
|
||||
|
||||
#ifdef CONFIG_KPROBES
|
||||
static inline int notify_page_fault(struct pt_regs *regs, int trap)
|
||||
@ -81,7 +81,7 @@ asmlinkage void do_page_fault(unsigned long ecr, struct pt_regs *regs)
|
||||
* If we're in an interrupt or have no user context, we must
|
||||
* not take the fault...
|
||||
*/
|
||||
if (in_atomic() || !mm || regs->sr & SYSREG_BIT(GM))
|
||||
if (faulthandler_disabled() || !mm || regs->sr & SYSREG_BIT(GM))
|
||||
goto no_context;
|
||||
|
||||
local_irq_enable();
|
||||
|
@ -8,7 +8,7 @@
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/wait.h>
|
||||
#include <asm/uaccess.h>
|
||||
#include <linux/uaccess.h>
|
||||
#include <arch/system.h>
|
||||
|
||||
extern int find_fixup_code(struct pt_regs *);
|
||||
@ -109,11 +109,11 @@ do_page_fault(unsigned long address, struct pt_regs *regs,
|
||||
info.si_code = SEGV_MAPERR;
|
||||
|
||||
/*
|
||||
* If we're in an interrupt or "atomic" operation or have no
|
||||
* If we're in an interrupt, have pagefaults disabled or have no
|
||||
* user context, we must not take the fault.
|
||||
*/
|
||||
|
||||
if (in_atomic() || !mm)
|
||||
if (faulthandler_disabled() || !mm)
|
||||
goto no_context;
|
||||
|
||||
if (user_mode(regs))
|
||||
|
@ -19,9 +19,9 @@
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/ptrace.h>
|
||||
#include <linux/hardirq.h>
|
||||
#include <linux/uaccess.h>
|
||||
|
||||
#include <asm/pgtable.h>
|
||||
#include <asm/uaccess.h>
|
||||
#include <asm/gdb-stub.h>
|
||||
|
||||
/*****************************************************************************/
|
||||
@ -78,7 +78,7 @@ asmlinkage void do_page_fault(int datammu, unsigned long esr0, unsigned long ear
|
||||
* If we're in an interrupt or have no user
|
||||
* context, we must not take the fault..
|
||||
*/
|
||||
if (in_atomic() || !mm)
|
||||
if (faulthandler_disabled() || !mm)
|
||||
goto no_context;
|
||||
|
||||
if (user_mode(__frame))
|
||||
|
@ -42,6 +42,7 @@ void *kmap_atomic(struct page *page)
|
||||
unsigned long paddr;
|
||||
int type;
|
||||
|
||||
preempt_disable();
|
||||
pagefault_disable();
|
||||
type = kmap_atomic_idx_push();
|
||||
paddr = page_to_phys(page);
|
||||
@ -85,5 +86,6 @@ void __kunmap_atomic(void *kvaddr)
|
||||
}
|
||||
kmap_atomic_idx_pop();
|
||||
pagefault_enable();
|
||||
preempt_enable();
|
||||
}
|
||||
EXPORT_SYMBOL(__kunmap_atomic);
|
||||
|
@ -36,7 +36,8 @@
|
||||
* @addr: User space pointer to start of block to check
|
||||
* @size: Size of block to check
|
||||
*
|
||||
* Context: User context only. This function may sleep.
|
||||
* Context: User context only. This function may sleep if pagefaults are
|
||||
* enabled.
|
||||
*
|
||||
* Checks if a pointer to a block of memory in user space is valid.
|
||||
*
|
||||
|
@ -53,7 +53,7 @@ void build_cpu_to_node_map(void);
|
||||
#define topology_physical_package_id(cpu) (cpu_data(cpu)->socket_id)
|
||||
#define topology_core_id(cpu) (cpu_data(cpu)->core_id)
|
||||
#define topology_core_cpumask(cpu) (&cpu_core_map[cpu])
|
||||
#define topology_thread_cpumask(cpu) (&per_cpu(cpu_sibling_map, cpu))
|
||||
#define topology_sibling_cpumask(cpu) (&per_cpu(cpu_sibling_map, cpu))
|
||||
#endif
|
||||
|
||||
extern void arch_fix_phys_package_id(int num, u32 slot);
|
||||
|
@ -11,10 +11,10 @@
|
||||
#include <linux/kprobes.h>
|
||||
#include <linux/kdebug.h>
|
||||
#include <linux/prefetch.h>
|
||||
#include <linux/uaccess.h>
|
||||
|
||||
#include <asm/pgtable.h>
|
||||
#include <asm/processor.h>
|
||||
#include <asm/uaccess.h>
|
||||
|
||||
extern int die(char *, struct pt_regs *, long);
|
||||
|
||||
@ -96,7 +96,7 @@ ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *re
|
||||
/*
|
||||
* If we're in an interrupt or have no user context, we must not take the fault..
|
||||
*/
|
||||
if (in_atomic() || !mm)
|
||||
if (faulthandler_disabled() || !mm)
|
||||
goto no_context;
|
||||
|
||||
#ifdef CONFIG_VIRTUAL_MEM_MAP
|
||||
|
@ -91,7 +91,8 @@ static inline void set_fs(mm_segment_t s)
|
||||
* @addr: User space pointer to start of block to check
|
||||
* @size: Size of block to check
|
||||
*
|
||||
* Context: User context only. This function may sleep.
|
||||
* Context: User context only. This function may sleep if pagefaults are
|
||||
* enabled.
|
||||
*
|
||||
* Checks if a pointer to a block of memory in user space is valid.
|
||||
*
|
||||
@ -155,7 +156,8 @@ extern int fixup_exception(struct pt_regs *regs);
|
||||
* @x: Variable to store result.
|
||||
* @ptr: Source address, in user space.
|
||||
*
|
||||
* Context: User context only. This function may sleep.
|
||||
* Context: User context only. This function may sleep if pagefaults are
|
||||
* enabled.
|
||||
*
|
||||
* This macro copies a single simple variable from user space to kernel
|
||||
* space. It supports simple types like char and int, but not larger
|
||||
@ -175,7 +177,8 @@ extern int fixup_exception(struct pt_regs *regs);
|
||||
* @x: Value to copy to user space.
|
||||
* @ptr: Destination address, in user space.
|
||||
*
|
||||
* Context: User context only. This function may sleep.
|
||||
* Context: User context only. This function may sleep if pagefaults are
|
||||
* enabled.
|
||||
*
|
||||
* This macro copies a single simple value from kernel space to user
|
||||
* space. It supports simple types like char and int, but not larger
|
||||
@ -194,7 +197,8 @@ extern int fixup_exception(struct pt_regs *regs);
|
||||
* @x: Variable to store result.
|
||||
* @ptr: Source address, in user space.
|
||||
*
|
||||
* Context: User context only. This function may sleep.
|
||||
* Context: User context only. This function may sleep if pagefaults are
|
||||
* enabled.
|
||||
*
|
||||
* This macro copies a single simple variable from user space to kernel
|
||||
* space. It supports simple types like char and int, but not larger
|
||||
@ -274,7 +278,8 @@ do { \
|
||||
* @x: Value to copy to user space.
|
||||
* @ptr: Destination address, in user space.
|
||||
*
|
||||
* Context: User context only. This function may sleep.
|
||||
* Context: User context only. This function may sleep if pagefaults are
|
||||
* enabled.
|
||||
*
|
||||
* This macro copies a single simple value from kernel space to user
|
||||
* space. It supports simple types like char and int, but not larger
|
||||
@ -568,7 +573,8 @@ unsigned long __generic_copy_from_user(void *, const void __user *, unsigned lon
|
||||
* @from: Source address, in kernel space.
|
||||
* @n: Number of bytes to copy.
|
||||
*
|
||||
* Context: User context only. This function may sleep.
|
||||
* Context: User context only. This function may sleep if pagefaults are
|
||||
* enabled.
|
||||
*
|
||||
* Copy data from kernel space to user space. Caller must check
|
||||
* the specified block with access_ok() before calling this function.
|
||||
@ -588,7 +594,8 @@ unsigned long __generic_copy_from_user(void *, const void __user *, unsigned lon
|
||||
* @from: Source address, in kernel space.
|
||||
* @n: Number of bytes to copy.
|
||||
*
|
||||
* Context: User context only. This function may sleep.
|
||||
* Context: User context only. This function may sleep if pagefaults are
|
||||
* enabled.
|
||||
*
|
||||
* Copy data from kernel space to user space.
|
||||
*
|
||||
@ -606,7 +613,8 @@ unsigned long __generic_copy_from_user(void *, const void __user *, unsigned lon
|
||||
* @from: Source address, in user space.
|
||||
* @n: Number of bytes to copy.
|
||||
*
|
||||
* Context: User context only. This function may sleep.
|
||||
* Context: User context only. This function may sleep if pagefaults are
|
||||
* enabled.
|
||||
*
|
||||
* Copy data from user space to kernel space. Caller must check
|
||||
* the specified block with access_ok() before calling this function.
|
||||
@ -626,7 +634,8 @@ unsigned long __generic_copy_from_user(void *, const void __user *, unsigned lon
|
||||
* @from: Source address, in user space.
|
||||
* @n: Number of bytes to copy.
|
||||
*
|
||||
* Context: User context only. This function may sleep.
|
||||
* Context: User context only. This function may sleep if pagefaults are
|
||||
* enabled.
|
||||
*
|
||||
* Copy data from user space to kernel space.
|
||||
*
|
||||
@ -677,7 +686,8 @@ unsigned long clear_user(void __user *mem, unsigned long len);
|
||||
* strlen_user: - Get the size of a string in user space.
|
||||
* @str: The string to measure.
|
||||
*
|
||||
* Context: User context only. This function may sleep.
|
||||
* Context: User context only. This function may sleep if pagefaults are
|
||||
* enabled.
|
||||
*
|
||||
* Get the size of a NUL-terminated string in user space.
|
||||
*
|
||||
|
@ -24,9 +24,9 @@
|
||||
#include <linux/vt_kern.h> /* For unblank_screen() */
|
||||
#include <linux/highmem.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/uaccess.h>
|
||||
|
||||
#include <asm/m32r.h>
|
||||
#include <asm/uaccess.h>
|
||||
#include <asm/hardirq.h>
|
||||
#include <asm/mmu_context.h>
|
||||
#include <asm/tlbflush.h>
|
||||
@ -111,10 +111,10 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long error_code,
|
||||
mm = tsk->mm;
|
||||
|
||||
/*
|
||||
* If we're in an interrupt or have no user context or are running in an
|
||||
* atomic region then we must not take the fault..
|
||||
* If we're in an interrupt or have no user context or have pagefaults
|
||||
* disabled then we must not take the fault.
|
||||
*/
|
||||
if (in_atomic() || !mm)
|
||||
if (faulthandler_disabled() || !mm)
|
||||
goto bad_area_nosemaphore;
|
||||
|
||||
if (error_code & ACE_USERMODE)
|
||||
|
@ -2,9 +2,6 @@
|
||||
#define _M68K_IRQFLAGS_H
|
||||
|
||||
#include <linux/types.h>
|
||||
#ifdef CONFIG_MMU
|
||||
#include <linux/preempt_mask.h>
|
||||
#endif
|
||||
#include <linux/preempt.h>
|
||||
#include <asm/thread_info.h>
|
||||
#include <asm/entry.h>
|
||||
|
@ -10,10 +10,10 @@
|
||||
#include <linux/ptrace.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/uaccess.h>
|
||||
|
||||
#include <asm/setup.h>
|
||||
#include <asm/traps.h>
|
||||
#include <asm/uaccess.h>
|
||||
#include <asm/pgalloc.h>
|
||||
|
||||
extern void die_if_kernel(char *, struct pt_regs *, long);
|
||||
@ -81,7 +81,7 @@ int do_page_fault(struct pt_regs *regs, unsigned long address,
|
||||
* If we're in an interrupt or have no user
|
||||
* context, we must not take the fault..
|
||||
*/
|
||||
if (in_atomic() || !mm)
|
||||
if (faulthandler_disabled() || !mm)
|
||||
goto no_context;
|
||||
|
||||
if (user_mode(regs))
|
||||
|
@ -105,7 +105,7 @@ int do_page_fault(struct pt_regs *regs, unsigned long address,
|
||||
|
||||
mm = tsk->mm;
|
||||
|
||||
if (in_atomic() || !mm)
|
||||
if (faulthandler_disabled() || !mm)
|
||||
goto no_context;
|
||||
|
||||
if (user_mode(regs))
|
||||
|
@ -43,7 +43,7 @@ void *kmap_atomic(struct page *page)
|
||||
unsigned long vaddr;
|
||||
int type;
|
||||
|
||||
/* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */
|
||||
preempt_disable();
|
||||
pagefault_disable();
|
||||
if (!PageHighMem(page))
|
||||
return page_address(page);
|
||||
@ -82,6 +82,7 @@ void __kunmap_atomic(void *kvaddr)
|
||||
}
|
||||
|
||||
pagefault_enable();
|
||||
preempt_enable();
|
||||
}
|
||||
EXPORT_SYMBOL(__kunmap_atomic);
|
||||
|
||||
@ -95,6 +96,7 @@ void *kmap_atomic_pfn(unsigned long pfn)
|
||||
unsigned long vaddr;
|
||||
int type;
|
||||
|
||||
preempt_disable();
|
||||
pagefault_disable();
|
||||
|
||||
type = kmap_atomic_idx_push();
|
||||
|
@ -178,7 +178,8 @@ extern long __user_bad(void);
|
||||
* @x: Variable to store result.
|
||||
* @ptr: Source address, in user space.
|
||||
*
|
||||
* Context: User context only. This function may sleep.
|
||||
* Context: User context only. This function may sleep if pagefaults are
|
||||
* enabled.
|
||||
*
|
||||
* This macro copies a single simple variable from user space to kernel
|
||||
* space. It supports simple types like char and int, but not larger
|
||||
@ -290,7 +291,8 @@ extern long __user_bad(void);
|
||||
* @x: Value to copy to user space.
|
||||
* @ptr: Destination address, in user space.
|
||||
*
|
||||
* Context: User context only. This function may sleep.
|
||||
* Context: User context only. This function may sleep if pagefaults are
|
||||
* enabled.
|
||||
*
|
||||
* This macro copies a single simple value from kernel space to user
|
||||
* space. It supports simple types like char and int, but not larger
|
||||
|
@ -107,14 +107,14 @@ void do_page_fault(struct pt_regs *regs, unsigned long address,
|
||||
if ((error_code & 0x13) == 0x13 || (error_code & 0x11) == 0x11)
|
||||
is_write = 0;
|
||||
|
||||
if (unlikely(in_atomic() || !mm)) {
|
||||
if (unlikely(faulthandler_disabled() || !mm)) {
|
||||
if (kernel_mode(regs))
|
||||
goto bad_area_nosemaphore;
|
||||
|
||||
/* in_atomic() in user mode is really bad,
|
||||
/* faulthandler_disabled() in user mode is really bad,
|
||||
as is current->mm == NULL. */
|
||||
pr_emerg("Page fault in user mode with in_atomic(), mm = %p\n",
|
||||
mm);
|
||||
pr_emerg("Page fault in user mode with faulthandler_disabled(), mm = %p\n",
|
||||
mm);
|
||||
pr_emerg("r15 = %lx MSR = %lx\n",
|
||||
regs->r15, regs->msr);
|
||||
die("Weird page fault", regs, SIGSEGV);
|
||||
|
@ -37,7 +37,7 @@ void *kmap_atomic_prot(struct page *page, pgprot_t prot)
|
||||
unsigned long vaddr;
|
||||
int idx, type;
|
||||
|
||||
/* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */
|
||||
preempt_disable();
|
||||
pagefault_disable();
|
||||
if (!PageHighMem(page))
|
||||
return page_address(page);
|
||||
@ -63,6 +63,7 @@ void __kunmap_atomic(void *kvaddr)
|
||||
|
||||
if (vaddr < __fix_to_virt(FIX_KMAP_END)) {
|
||||
pagefault_enable();
|
||||
preempt_enable();
|
||||
return;
|
||||
}
|
||||
|
||||
@ -84,5 +85,6 @@ void __kunmap_atomic(void *kvaddr)
|
||||
#endif
|
||||
kmap_atomic_idx_pop();
|
||||
pagefault_enable();
|
||||
preempt_enable();
|
||||
}
|
||||
EXPORT_SYMBOL(__kunmap_atomic);
|
||||
|
@ -15,7 +15,7 @@
|
||||
#define topology_physical_package_id(cpu) (cpu_data[cpu].package)
|
||||
#define topology_core_id(cpu) (cpu_data[cpu].core)
|
||||
#define topology_core_cpumask(cpu) (&cpu_core_map[cpu])
|
||||
#define topology_thread_cpumask(cpu) (&cpu_sibling_map[cpu])
|
||||
#define topology_sibling_cpumask(cpu) (&cpu_sibling_map[cpu])
|
||||
#endif
|
||||
|
||||
#endif /* __ASM_TOPOLOGY_H */
|
||||
|
@ -103,7 +103,8 @@ extern u64 __ua_limit;
|
||||
* @addr: User space pointer to start of block to check
|
||||
* @size: Size of block to check
|
||||
*
|
||||
* Context: User context only. This function may sleep.
|
||||
* Context: User context only. This function may sleep if pagefaults are
|
||||
* enabled.
|
||||
*
|
||||
* Checks if a pointer to a block of memory in user space is valid.
|
||||
*
|
||||
@ -138,7 +139,8 @@ extern u64 __ua_limit;
|
||||
* @x: Value to copy to user space.
|
||||
* @ptr: Destination address, in user space.
|
||||
*
|
||||
* Context: User context only. This function may sleep.
|
||||
* Context: User context only. This function may sleep if pagefaults are
|
||||
* enabled.
|
||||
*
|
||||
* This macro copies a single simple value from kernel space to user
|
||||
* space. It supports simple types like char and int, but not larger
|
||||
@ -157,7 +159,8 @@ extern u64 __ua_limit;
|
||||
* @x: Variable to store result.
|
||||
* @ptr: Source address, in user space.
|
||||
*
|
||||
* Context: User context only. This function may sleep.
|
||||
* Context: User context only. This function may sleep if pagefaults are
|
||||
* enabled.
|
||||
*
|
||||
* This macro copies a single simple variable from user space to kernel
|
||||
* space. It supports simple types like char and int, but not larger
|
||||
@ -177,7 +180,8 @@ extern u64 __ua_limit;
|
||||
* @x: Value to copy to user space.
|
||||
* @ptr: Destination address, in user space.
|
||||
*
|
||||
* Context: User context only. This function may sleep.
|
||||
* Context: User context only. This function may sleep if pagefaults are
|
||||
* enabled.
|
||||
*
|
||||
* This macro copies a single simple value from kernel space to user
|
||||
* space. It supports simple types like char and int, but not larger
|
||||
@ -199,7 +203,8 @@ extern u64 __ua_limit;
|
||||
* @x: Variable to store result.
|
||||
* @ptr: Source address, in user space.
|
||||
*
|
||||
* Context: User context only. This function may sleep.
|
||||
* Context: User context only. This function may sleep if pagefaults are
|
||||
* enabled.
|
||||
*
|
||||
* This macro copies a single simple variable from user space to kernel
|
||||
* space. It supports simple types like char and int, but not larger
|
||||
@ -498,7 +503,8 @@ extern void __put_user_unknown(void);
|
||||
* @x: Value to copy to user space.
|
||||
* @ptr: Destination address, in user space.
|
||||
*
|
||||
* Context: User context only. This function may sleep.
|
||||
* Context: User context only. This function may sleep if pagefaults are
|
||||
* enabled.
|
||||
*
|
||||
* This macro copies a single simple value from kernel space to user
|
||||
* space. It supports simple types like char and int, but not larger
|
||||
@ -517,7 +523,8 @@ extern void __put_user_unknown(void);
|
||||
* @x: Variable to store result.
|
||||
* @ptr: Source address, in user space.
|
||||
*
|
||||
* Context: User context only. This function may sleep.
|
||||
* Context: User context only. This function may sleep if pagefaults are
|
||||
* enabled.
|
||||
*
|
||||
* This macro copies a single simple variable from user space to kernel
|
||||
* space. It supports simple types like char and int, but not larger
|
||||
@ -537,7 +544,8 @@ extern void __put_user_unknown(void);
|
||||
* @x: Value to copy to user space.
|
||||
* @ptr: Destination address, in user space.
|
||||
*
|
||||
* Context: User context only. This function may sleep.
|
||||
* Context: User context only. This function may sleep if pagefaults are
|
||||
* enabled.
|
||||
*
|
||||
* This macro copies a single simple value from kernel space to user
|
||||
* space. It supports simple types like char and int, but not larger
|
||||
@ -559,7 +567,8 @@ extern void __put_user_unknown(void);
|
||||
* @x: Variable to store result.
|
||||
* @ptr: Source address, in user space.
|
||||
*
|
||||
* Context: User context only. This function may sleep.
|
||||
* Context: User context only. This function may sleep if pagefaults are
|
||||
* enabled.
|
||||
*
|
||||
* This macro copies a single simple variable from user space to kernel
|
||||
* space. It supports simple types like char and int, but not larger
|
||||
@ -815,7 +824,8 @@ extern size_t __copy_user(void *__to, const void *__from, size_t __n);
|
||||
* @from: Source address, in kernel space.
|
||||
* @n: Number of bytes to copy.
|
||||
*
|
||||
* Context: User context only. This function may sleep.
|
||||
* Context: User context only. This function may sleep if pagefaults are
|
||||
* enabled.
|
||||
*
|
||||
* Copy data from kernel space to user space. Caller must check
|
||||
* the specified block with access_ok() before calling this function.
|
||||
@ -888,7 +898,8 @@ extern size_t __copy_user_inatomic(void *__to, const void *__from, size_t __n);
|
||||
* @from: Source address, in kernel space.
|
||||
* @n: Number of bytes to copy.
|
||||
*
|
||||
* Context: User context only. This function may sleep.
|
||||
* Context: User context only. This function may sleep if pagefaults are
|
||||
* enabled.
|
||||
*
|
||||
* Copy data from kernel space to user space.
|
||||
*
|
||||
@ -1075,7 +1086,8 @@ extern size_t __copy_in_user_eva(void *__to, const void *__from, size_t __n);
|
||||
* @from: Source address, in user space.
|
||||
* @n: Number of bytes to copy.
|
||||
*
|
||||
* Context: User context only. This function may sleep.
|
||||
* Context: User context only. This function may sleep if pagefaults are
|
||||
* enabled.
|
||||
*
|
||||
* Copy data from user space to kernel space. Caller must check
|
||||
* the specified block with access_ok() before calling this function.
|
||||
@ -1107,7 +1119,8 @@ extern size_t __copy_in_user_eva(void *__to, const void *__from, size_t __n);
|
||||
* @from: Source address, in user space.
|
||||
* @n: Number of bytes to copy.
|
||||
*
|
||||
* Context: User context only. This function may sleep.
|
||||
* Context: User context only. This function may sleep if pagefaults are
|
||||
* enabled.
|
||||
*
|
||||
* Copy data from user space to kernel space.
|
||||
*
|
||||
@ -1329,7 +1342,8 @@ strncpy_from_user(char *__to, const char __user *__from, long __len)
|
||||
* strlen_user: - Get the size of a string in user space.
|
||||
* @str: The string to measure.
|
||||
*
|
||||
* Context: User context only. This function may sleep.
|
||||
* Context: User context only. This function may sleep if pagefaults are
|
||||
* enabled.
|
||||
*
|
||||
* Get the size of a NUL-terminated string in user space.
|
||||
*
|
||||
@ -1398,7 +1412,8 @@ static inline long __strnlen_user(const char __user *s, long n)
|
||||
* strnlen_user: - Get the size of a string in user space.
|
||||
* @str: The string to measure.
|
||||
*
|
||||
* Context: User context only. This function may sleep.
|
||||
* Context: User context only. This function may sleep if pagefaults are
|
||||
* enabled.
|
||||
*
|
||||
* Get the size of a NUL-terminated string in user space.
|
||||
*
|
||||
|
@ -28,12 +28,7 @@ extern void __user *get_sigframe(struct ksignal *ksig, struct pt_regs *regs,
|
||||
extern int fpcsr_pending(unsigned int __user *fpcsr);
|
||||
|
||||
/* Make sure we will not lose FPU ownership */
|
||||
#ifdef CONFIG_PREEMPT
|
||||
#define lock_fpu_owner() preempt_disable()
|
||||
#define unlock_fpu_owner() preempt_enable()
|
||||
#else
|
||||
#define lock_fpu_owner() pagefault_disable()
|
||||
#define unlock_fpu_owner() pagefault_enable()
|
||||
#endif
|
||||
#define lock_fpu_owner() ({ preempt_disable(); pagefault_disable(); })
|
||||
#define unlock_fpu_owner() ({ pagefault_enable(); preempt_enable(); })
|
||||
|
||||
#endif /* __SIGNAL_COMMON_H */
|
||||
|
@ -21,10 +21,10 @@
|
||||
#include <linux/module.h>
|
||||
#include <linux/kprobes.h>
|
||||
#include <linux/perf_event.h>
|
||||
#include <linux/uaccess.h>
|
||||
|
||||
#include <asm/branch.h>
|
||||
#include <asm/mmu_context.h>
|
||||
#include <asm/uaccess.h>
|
||||
#include <asm/ptrace.h>
|
||||
#include <asm/highmem.h> /* For VMALLOC_END */
|
||||
#include <linux/kdebug.h>
|
||||
@ -94,7 +94,7 @@ static void __kprobes __do_page_fault(struct pt_regs *regs, unsigned long write,
|
||||
* If we're in an interrupt or have no user
|
||||
* context, we must not take the fault..
|
||||
*/
|
||||
if (in_atomic() || !mm)
|
||||
if (faulthandler_disabled() || !mm)
|
||||
goto bad_area_nosemaphore;
|
||||
|
||||
if (user_mode(regs))
|
||||
|
@ -47,7 +47,7 @@ void *kmap_atomic(struct page *page)
|
||||
unsigned long vaddr;
|
||||
int idx, type;
|
||||
|
||||
/* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */
|
||||
preempt_disable();
|
||||
pagefault_disable();
|
||||
if (!PageHighMem(page))
|
||||
return page_address(page);
|
||||
@ -72,6 +72,7 @@ void __kunmap_atomic(void *kvaddr)
|
||||
|
||||
if (vaddr < FIXADDR_START) { // FIXME
|
||||
pagefault_enable();
|
||||
preempt_enable();
|
||||
return;
|
||||
}
|
||||
|
||||
@ -92,6 +93,7 @@ void __kunmap_atomic(void *kvaddr)
|
||||
#endif
|
||||
kmap_atomic_idx_pop();
|
||||
pagefault_enable();
|
||||
preempt_enable();
|
||||
}
|
||||
EXPORT_SYMBOL(__kunmap_atomic);
|
||||
|
||||
@ -104,6 +106,7 @@ void *kmap_atomic_pfn(unsigned long pfn)
|
||||
unsigned long vaddr;
|
||||
int idx, type;
|
||||
|
||||
preempt_disable();
|
||||
pagefault_disable();
|
||||
|
||||
type = kmap_atomic_idx_push();
|
||||
|
@ -90,6 +90,7 @@ static void *__kmap_pgprot(struct page *page, unsigned long addr, pgprot_t prot)
|
||||
|
||||
BUG_ON(Page_dcache_dirty(page));
|
||||
|
||||
preempt_disable();
|
||||
pagefault_disable();
|
||||
idx = (addr >> PAGE_SHIFT) & (FIX_N_COLOURS - 1);
|
||||
idx += in_interrupt() ? FIX_N_COLOURS : 0;
|
||||
@ -152,6 +153,7 @@ void kunmap_coherent(void)
|
||||
write_c0_entryhi(old_ctx);
|
||||
local_irq_restore(flags);
|
||||
pagefault_enable();
|
||||
preempt_enable();
|
||||
}
|
||||
|
||||
void copy_user_highpage(struct page *to, struct page *from,
|
||||
|
@ -75,6 +75,7 @@ static inline void *kmap_atomic(struct page *page)
|
||||
unsigned long vaddr;
|
||||
int idx, type;
|
||||
|
||||
preempt_disable();
|
||||
pagefault_disable();
|
||||
if (page < highmem_start_page)
|
||||
return page_address(page);
|
||||
@ -98,6 +99,7 @@ static inline void __kunmap_atomic(unsigned long vaddr)
|
||||
|
||||
if (vaddr < FIXADDR_START) { /* FIXME */
|
||||
pagefault_enable();
|
||||
preempt_enable();
|
||||
return;
|
||||
}
|
||||
|
||||
@ -122,6 +124,7 @@ static inline void __kunmap_atomic(unsigned long vaddr)
|
||||
|
||||
kmap_atomic_idx_pop();
|
||||
pagefault_enable();
|
||||
preempt_enable();
|
||||
}
|
||||
#endif /* __KERNEL__ */
|
||||
|
||||
|
@ -23,8 +23,8 @@
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/vt_kern.h> /* For unblank_screen() */
|
||||
#include <linux/uaccess.h>
|
||||
|
||||
#include <asm/uaccess.h>
|
||||
#include <asm/pgalloc.h>
|
||||
#include <asm/hardirq.h>
|
||||
#include <asm/cpu-regs.h>
|
||||
@ -168,7 +168,7 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long fault_code,
|
||||
* If we're in an interrupt or have no user
|
||||
* context, we must not take the fault..
|
||||
*/
|
||||
if (in_atomic() || !mm)
|
||||
if (faulthandler_disabled() || !mm)
|
||||
goto no_context;
|
||||
|
||||
if ((fault_code & MMUFCR_xFC_ACCESS) == MMUFCR_xFC_ACCESS_USR)
|
||||
|
@ -77,7 +77,7 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long cause,
|
||||
* If we're in an interrupt or have no user
|
||||
* context, we must not take the fault..
|
||||
*/
|
||||
if (in_atomic() || !mm)
|
||||
if (faulthandler_disabled() || !mm)
|
||||
goto bad_area_nosemaphore;
|
||||
|
||||
if (user_mode(regs))
|
||||
|
@ -142,6 +142,7 @@ static inline void kunmap(struct page *page)
|
||||
|
||||
static inline void *kmap_atomic(struct page *page)
|
||||
{
|
||||
preempt_disable();
|
||||
pagefault_disable();
|
||||
return page_address(page);
|
||||
}
|
||||
@ -150,6 +151,7 @@ static inline void __kunmap_atomic(void *addr)
|
||||
{
|
||||
flush_kernel_dcache_page_addr(addr);
|
||||
pagefault_enable();
|
||||
preempt_enable();
|
||||
}
|
||||
|
||||
#define kmap_atomic_prot(page, prot) kmap_atomic(page)
|
||||
|
@ -26,9 +26,9 @@
|
||||
#include <linux/console.h>
|
||||
#include <linux/bug.h>
|
||||
#include <linux/ratelimit.h>
|
||||
#include <linux/uaccess.h>
|
||||
|
||||
#include <asm/assembly.h>
|
||||
#include <asm/uaccess.h>
|
||||
#include <asm/io.h>
|
||||
#include <asm/irq.h>
|
||||
#include <asm/traps.h>
|
||||
@ -800,7 +800,7 @@ void notrace handle_interruption(int code, struct pt_regs *regs)
|
||||
* unless pagefault_disable() was called before.
|
||||
*/
|
||||
|
||||
if (fault_space == 0 && !in_atomic())
|
||||
if (fault_space == 0 && !faulthandler_disabled())
|
||||
{
|
||||
pdc_chassis_send_status(PDC_CHASSIS_DIRECT_PANIC);
|
||||
parisc_terminate("Kernel Fault", regs, code, fault_address);
|
||||
|
@ -15,8 +15,8 @@
|
||||
#include <linux/sched.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/uaccess.h>
|
||||
|
||||
#include <asm/uaccess.h>
|
||||
#include <asm/traps.h>
|
||||
|
||||
/* Various important other fields */
|
||||
@ -207,7 +207,7 @@ void do_page_fault(struct pt_regs *regs, unsigned long code,
|
||||
int fault;
|
||||
unsigned int flags;
|
||||
|
||||
if (in_atomic())
|
||||
if (pagefault_disabled())
|
||||
goto no_context;
|
||||
|
||||
tsk = current;
|
||||
|
@ -87,7 +87,7 @@ static inline int prrn_is_enabled(void)
|
||||
#include <asm/smp.h>
|
||||
|
||||
#define topology_physical_package_id(cpu) (cpu_to_chip_id(cpu))
|
||||
#define topology_thread_cpumask(cpu) (per_cpu(cpu_sibling_map, cpu))
|
||||
#define topology_sibling_cpumask(cpu) (per_cpu(cpu_sibling_map, cpu))
|
||||
#define topology_core_cpumask(cpu) (per_cpu(cpu_core_map, cpu))
|
||||
#define topology_core_id(cpu) (cpu_to_core_id(cpu))
|
||||
#endif
|
||||
|
@ -27,11 +27,11 @@ int enter_vmx_usercopy(void)
|
||||
if (in_interrupt())
|
||||
return 0;
|
||||
|
||||
/* This acts as preempt_disable() as well and will make
|
||||
* enable_kernel_altivec(). We need to disable page faults
|
||||
* as they can call schedule and thus make us lose the VMX
|
||||
* context. So on page faults, we just fail which will cause
|
||||
* a fallback to the normal non-vmx copy.
|
||||
preempt_disable();
|
||||
/*
|
||||
* We need to disable page faults as they can call schedule and
|
||||
* thus make us lose the VMX context. So on page faults, we just
|
||||
* fail which will cause a fallback to the normal non-vmx copy.
|
||||
*/
|
||||
pagefault_disable();
|
||||
|
||||
@ -47,6 +47,7 @@ int enter_vmx_usercopy(void)
|
||||
int exit_vmx_usercopy(void)
|
||||
{
|
||||
pagefault_enable();
|
||||
preempt_enable();
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -33,13 +33,13 @@
|
||||
#include <linux/ratelimit.h>
|
||||
#include <linux/context_tracking.h>
|
||||
#include <linux/hugetlb.h>
|
||||
#include <linux/uaccess.h>
|
||||
|
||||
#include <asm/firmware.h>
|
||||
#include <asm/page.h>
|
||||
#include <asm/pgtable.h>
|
||||
#include <asm/mmu.h>
|
||||
#include <asm/mmu_context.h>
|
||||
#include <asm/uaccess.h>
|
||||
#include <asm/tlbflush.h>
|
||||
#include <asm/siginfo.h>
|
||||
#include <asm/debug.h>
|
||||
@ -272,15 +272,16 @@ int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address,
|
||||
if (!arch_irq_disabled_regs(regs))
|
||||
local_irq_enable();
|
||||
|
||||
if (in_atomic() || mm == NULL) {
|
||||
if (faulthandler_disabled() || mm == NULL) {
|
||||
if (!user_mode(regs)) {
|
||||
rc = SIGSEGV;
|
||||
goto bail;
|
||||
}
|
||||
/* in_atomic() in user mode is really bad,
|
||||
/* faulthandler_disabled() in user mode is really bad,
|
||||
as is current->mm == NULL. */
|
||||
printk(KERN_EMERG "Page fault in user mode with "
|
||||
"in_atomic() = %d mm = %p\n", in_atomic(), mm);
|
||||
"faulthandler_disabled() = %d mm = %p\n",
|
||||
faulthandler_disabled(), mm);
|
||||
printk(KERN_EMERG "NIP = %lx MSR = %lx\n",
|
||||
regs->nip, regs->msr);
|
||||
die("Weird page fault", regs, SIGSEGV);
|
||||
|
@ -34,7 +34,7 @@ void *kmap_atomic_prot(struct page *page, pgprot_t prot)
|
||||
unsigned long vaddr;
|
||||
int idx, type;
|
||||
|
||||
/* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */
|
||||
preempt_disable();
|
||||
pagefault_disable();
|
||||
if (!PageHighMem(page))
|
||||
return page_address(page);
|
||||
@ -59,6 +59,7 @@ void __kunmap_atomic(void *kvaddr)
|
||||
|
||||
if (vaddr < __fix_to_virt(FIX_KMAP_END)) {
|
||||
pagefault_enable();
|
||||
preempt_enable();
|
||||
return;
|
||||
}
|
||||
|
||||
@ -82,5 +83,6 @@ void __kunmap_atomic(void *kvaddr)
|
||||
|
||||
kmap_atomic_idx_pop();
|
||||
pagefault_enable();
|
||||
preempt_enable();
|
||||
}
|
||||
EXPORT_SYMBOL(__kunmap_atomic);
|
||||
|
@ -217,7 +217,7 @@ static DEFINE_RAW_SPINLOCK(tlbivax_lock);
|
||||
static int mm_is_core_local(struct mm_struct *mm)
|
||||
{
|
||||
return cpumask_subset(mm_cpumask(mm),
|
||||
topology_thread_cpumask(smp_processor_id()));
|
||||
topology_sibling_cpumask(smp_processor_id()));
|
||||
}
|
||||
|
||||
struct tlb_flush_param {
|
||||
|
@ -22,7 +22,8 @@ DECLARE_PER_CPU(struct cpu_topology_s390, cpu_topology);
|
||||
|
||||
#define topology_physical_package_id(cpu) (per_cpu(cpu_topology, cpu).socket_id)
|
||||
#define topology_thread_id(cpu) (per_cpu(cpu_topology, cpu).thread_id)
|
||||
#define topology_thread_cpumask(cpu) (&per_cpu(cpu_topology, cpu).thread_mask)
|
||||
#define topology_sibling_cpumask(cpu) \
|
||||
(&per_cpu(cpu_topology, cpu).thread_mask)
|
||||
#define topology_core_id(cpu) (per_cpu(cpu_topology, cpu).core_id)
|
||||
#define topology_core_cpumask(cpu) (&per_cpu(cpu_topology, cpu).core_mask)
|
||||
#define topology_book_id(cpu) (per_cpu(cpu_topology, cpu).book_id)
|
||||
|
@ -98,7 +98,8 @@ static inline unsigned long extable_fixup(const struct exception_table_entry *x)
|
||||
* @from: Source address, in user space.
|
||||
* @n: Number of bytes to copy.
|
||||
*
|
||||
* Context: User context only. This function may sleep.
|
||||
* Context: User context only. This function may sleep if pagefaults are
|
||||
* enabled.
|
||||
*
|
||||
* Copy data from user space to kernel space. Caller must check
|
||||
* the specified block with access_ok() before calling this function.
|
||||
@ -118,7 +119,8 @@ unsigned long __must_check __copy_from_user(void *to, const void __user *from,
|
||||
* @from: Source address, in kernel space.
|
||||
* @n: Number of bytes to copy.
|
||||
*
|
||||
* Context: User context only. This function may sleep.
|
||||
* Context: User context only. This function may sleep if pagefaults are
|
||||
* enabled.
|
||||
*
|
||||
* Copy data from kernel space to user space. Caller must check
|
||||
* the specified block with access_ok() before calling this function.
|
||||
@ -264,7 +266,8 @@ int __get_user_bad(void) __attribute__((noreturn));
|
||||
* @from: Source address, in kernel space.
|
||||
* @n: Number of bytes to copy.
|
||||
*
|
||||
* Context: User context only. This function may sleep.
|
||||
* Context: User context only. This function may sleep if pagefaults are
|
||||
* enabled.
|
||||
*
|
||||
* Copy data from kernel space to user space.
|
||||
*
|
||||
@ -290,7 +293,8 @@ __compiletime_warning("copy_from_user() buffer size is not provably correct")
|
||||
* @from: Source address, in user space.
|
||||
* @n: Number of bytes to copy.
|
||||
*
|
||||
* Context: User context only. This function may sleep.
|
||||
* Context: User context only. This function may sleep if pagefaults are
|
||||
* enabled.
|
||||
*
|
||||
* Copy data from user space to kernel space.
|
||||
*
|
||||
@ -348,7 +352,8 @@ static inline unsigned long strnlen_user(const char __user *src, unsigned long n
|
||||
* strlen_user: - Get the size of a string in user space.
|
||||
* @str: The string to measure.
|
||||
*
|
||||
* Context: User context only. This function may sleep.
|
||||
* Context: User context only. This function may sleep if pagefaults are
|
||||
* enabled.
|
||||
*
|
||||
* Get the size of a NUL-terminated string in user space.
|
||||
*
|
||||
|
@ -399,7 +399,7 @@ static inline int do_exception(struct pt_regs *regs, int access)
|
||||
* user context.
|
||||
*/
|
||||
fault = VM_FAULT_BADCONTEXT;
|
||||
if (unlikely(!user_space_fault(regs) || in_atomic() || !mm))
|
||||
if (unlikely(!user_space_fault(regs) || faulthandler_disabled() || !mm))
|
||||
goto out;
|
||||
|
||||
address = trans_exc_code & __FAIL_ADDR_MASK;
|
||||
|
@ -36,7 +36,8 @@
|
||||
* @addr: User space pointer to start of block to check
|
||||
* @size: Size of block to check
|
||||
*
|
||||
* Context: User context only. This function may sleep.
|
||||
* Context: User context only. This function may sleep if pagefaults are
|
||||
* enabled.
|
||||
*
|
||||
* Checks if a pointer to a block of memory in user space is valid.
|
||||
*
|
||||
@ -61,7 +62,8 @@
|
||||
* @x: Value to copy to user space.
|
||||
* @ptr: Destination address, in user space.
|
||||
*
|
||||
* Context: User context only. This function may sleep.
|
||||
* Context: User context only. This function may sleep if pagefaults are
|
||||
* enabled.
|
||||
*
|
||||
* This macro copies a single simple value from kernel space to user
|
||||
* space. It supports simple types like char and int, but not larger
|
||||
@ -79,7 +81,8 @@
|
||||
* @x: Variable to store result.
|
||||
* @ptr: Source address, in user space.
|
||||
*
|
||||
* Context: User context only. This function may sleep.
|
||||
* Context: User context only. This function may sleep if pagefaults are
|
||||
* enabled.
|
||||
*
|
||||
* This macro copies a single simple variable from user space to kernel
|
||||
* space. It supports simple types like char and int, but not larger
|
||||
@ -98,7 +101,8 @@
|
||||
* @x: Value to copy to user space.
|
||||
* @ptr: Destination address, in user space.
|
||||
*
|
||||
* Context: User context only. This function may sleep.
|
||||
* Context: User context only. This function may sleep if pagefaults are
|
||||
* enabled.
|
||||
*
|
||||
* This macro copies a single simple value from kernel space to user
|
||||
* space. It supports simple types like char and int, but not larger
|
||||
@ -119,7 +123,8 @@
|
||||
* @x: Variable to store result.
|
||||
* @ptr: Source address, in user space.
|
||||
*
|
||||
* Context: User context only. This function may sleep.
|
||||
* Context: User context only. This function may sleep if pagefaults are
|
||||
* enabled.
|
||||
*
|
||||
* This macro copies a single simple variable from user space to kernel
|
||||
* space. It supports simple types like char and int, but not larger
|
||||
|
@ -34,6 +34,7 @@
|
||||
#include <linux/string.h>
|
||||
#include <linux/types.h>
|
||||
#include <linux/ptrace.h>
|
||||
#include <linux/uaccess.h>
|
||||
|
||||
/*
|
||||
* This routine handles page faults. It determines the address,
|
||||
@ -73,7 +74,7 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long write,
|
||||
* If we're in an interrupt or have no user
|
||||
* context, we must not take the fault..
|
||||
*/
|
||||
if (in_atomic() || !mm)
|
||||
if (pagefault_disabled() || !mm)
|
||||
goto bad_area_nosemaphore;
|
||||
|
||||
if (user_mode(regs))
|
||||
|
@ -17,6 +17,7 @@
|
||||
#include <linux/kprobes.h>
|
||||
#include <linux/perf_event.h>
|
||||
#include <linux/kdebug.h>
|
||||
#include <linux/uaccess.h>
|
||||
#include <asm/io_trapped.h>
|
||||
#include <asm/mmu_context.h>
|
||||
#include <asm/tlbflush.h>
|
||||
@ -438,9 +439,9 @@ asmlinkage void __kprobes do_page_fault(struct pt_regs *regs,
|
||||
|
||||
/*
|
||||
* If we're in an interrupt, have no user context or are running
|
||||
* in an atomic region then we must not take the fault:
|
||||
* with pagefaults disabled then we must not take the fault:
|
||||
*/
|
||||
if (unlikely(in_atomic() || !mm)) {
|
||||
if (unlikely(faulthandler_disabled() || !mm)) {
|
||||
bad_area_nosemaphore(regs, error_code, address);
|
||||
return;
|
||||
}
|
||||
|
@ -41,7 +41,7 @@ static inline int pcibus_to_node(struct pci_bus *pbus)
|
||||
#define topology_physical_package_id(cpu) (cpu_data(cpu).proc_id)
|
||||
#define topology_core_id(cpu) (cpu_data(cpu).core_id)
|
||||
#define topology_core_cpumask(cpu) (&cpu_core_sib_map[cpu])
|
||||
#define topology_thread_cpumask(cpu) (&per_cpu(cpu_sibling_map, cpu))
|
||||
#define topology_sibling_cpumask(cpu) (&per_cpu(cpu_sibling_map, cpu))
|
||||
#endif /* CONFIG_SMP */
|
||||
|
||||
extern cpumask_t cpu_core_map[NR_CPUS];
|
||||
|
@ -21,6 +21,7 @@
|
||||
#include <linux/perf_event.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/kdebug.h>
|
||||
#include <linux/uaccess.h>
|
||||
|
||||
#include <asm/page.h>
|
||||
#include <asm/pgtable.h>
|
||||
@ -29,7 +30,6 @@
|
||||
#include <asm/setup.h>
|
||||
#include <asm/smp.h>
|
||||
#include <asm/traps.h>
|
||||
#include <asm/uaccess.h>
|
||||
|
||||
#include "mm_32.h"
|
||||
|
||||
@ -196,7 +196,7 @@ asmlinkage void do_sparc_fault(struct pt_regs *regs, int text_fault, int write,
|
||||
* If we're in an interrupt or have no user
|
||||
* context, we must not take the fault..
|
||||
*/
|
||||
if (in_atomic() || !mm)
|
||||
if (pagefault_disabled() || !mm)
|
||||
goto no_context;
|
||||
|
||||
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
|
||||
|
@ -22,12 +22,12 @@
|
||||
#include <linux/kdebug.h>
|
||||
#include <linux/percpu.h>
|
||||
#include <linux/context_tracking.h>
|
||||
#include <linux/uaccess.h>
|
||||
|
||||
#include <asm/page.h>
|
||||
#include <asm/pgtable.h>
|
||||
#include <asm/openprom.h>
|
||||
#include <asm/oplib.h>
|
||||
#include <asm/uaccess.h>
|
||||
#include <asm/asi.h>
|
||||
#include <asm/lsu.h>
|
||||
#include <asm/sections.h>
|
||||
@ -330,7 +330,7 @@ asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
|
||||
* If we're in an interrupt or have no user
|
||||
* context, we must not take the fault..
|
||||
*/
|
||||
if (in_atomic() || !mm)
|
||||
if (faulthandler_disabled() || !mm)
|
||||
goto intr_or_no_mm;
|
||||
|
||||
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
|
||||
|
@ -53,7 +53,7 @@ void *kmap_atomic(struct page *page)
|
||||
unsigned long vaddr;
|
||||
long idx, type;
|
||||
|
||||
/* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */
|
||||
preempt_disable();
|
||||
pagefault_disable();
|
||||
if (!PageHighMem(page))
|
||||
return page_address(page);
|
||||
@ -91,6 +91,7 @@ void __kunmap_atomic(void *kvaddr)
|
||||
|
||||
if (vaddr < FIXADDR_START) { // FIXME
|
||||
pagefault_enable();
|
||||
preempt_enable();
|
||||
return;
|
||||
}
|
||||
|
||||
@ -126,5 +127,6 @@ void __kunmap_atomic(void *kvaddr)
|
||||
|
||||
kmap_atomic_idx_pop();
|
||||
pagefault_enable();
|
||||
preempt_enable();
|
||||
}
|
||||
EXPORT_SYMBOL(__kunmap_atomic);
|
||||
|
@ -2738,7 +2738,7 @@ void hugetlb_setup(struct pt_regs *regs)
|
||||
struct mm_struct *mm = current->mm;
|
||||
struct tsb_config *tp;
|
||||
|
||||
if (in_atomic() || !mm) {
|
||||
if (faulthandler_disabled() || !mm) {
|
||||
const struct exception_table_entry *entry;
|
||||
|
||||
entry = search_exception_tables(regs->tpc);
|
||||
|
@ -55,7 +55,7 @@ static inline const struct cpumask *cpumask_of_node(int node)
|
||||
#define topology_physical_package_id(cpu) ((void)(cpu), 0)
|
||||
#define topology_core_id(cpu) (cpu)
|
||||
#define topology_core_cpumask(cpu) ((void)(cpu), cpu_online_mask)
|
||||
#define topology_thread_cpumask(cpu) cpumask_of(cpu)
|
||||
#define topology_sibling_cpumask(cpu) cpumask_of(cpu)
|
||||
#endif
|
||||
|
||||
#endif /* _ASM_TILE_TOPOLOGY_H */
|
||||
|
@ -78,7 +78,8 @@ int __range_ok(unsigned long addr, unsigned long size);
|
||||
* @addr: User space pointer to start of block to check
|
||||
* @size: Size of block to check
|
||||
*
|
||||
* Context: User context only. This function may sleep.
|
||||
* Context: User context only. This function may sleep if pagefaults are
|
||||
* enabled.
|
||||
*
|
||||
* Checks if a pointer to a block of memory in user space is valid.
|
||||
*
|
||||
@ -192,7 +193,8 @@ extern int __get_user_bad(void)
|
||||
* @x: Variable to store result.
|
||||
* @ptr: Source address, in user space.
|
||||
*
|
||||
* Context: User context only. This function may sleep.
|
||||
* Context: User context only. This function may sleep if pagefaults are
|
||||
* enabled.
|
||||
*
|
||||
* This macro copies a single simple variable from user space to kernel
|
||||
* space. It supports simple types like char and int, but not larger
|
||||
@ -274,7 +276,8 @@ extern int __put_user_bad(void)
|
||||
* @x: Value to copy to user space.
|
||||
* @ptr: Destination address, in user space.
|
||||
*
|
||||
* Context: User context only. This function may sleep.
|
||||
* Context: User context only. This function may sleep if pagefaults are
|
||||
* enabled.
|
||||
*
|
||||
* This macro copies a single simple value from kernel space to user
|
||||
* space. It supports simple types like char and int, but not larger
|
||||
@ -330,7 +333,8 @@ extern int __put_user_bad(void)
|
||||
* @from: Source address, in kernel space.
|
||||
* @n: Number of bytes to copy.
|
||||
*
|
||||
* Context: User context only. This function may sleep.
|
||||
* Context: User context only. This function may sleep if pagefaults are
|
||||
* enabled.
|
||||
*
|
||||
* Copy data from kernel space to user space. Caller must check
|
||||
* the specified block with access_ok() before calling this function.
|
||||
@ -366,7 +370,8 @@ copy_to_user(void __user *to, const void *from, unsigned long n)
|
||||
* @from: Source address, in user space.
|
||||
* @n: Number of bytes to copy.
|
||||
*
|
||||
* Context: User context only. This function may sleep.
|
||||
* Context: User context only. This function may sleep if pagefaults are
|
||||
* enabled.
|
||||
*
|
||||
* Copy data from user space to kernel space. Caller must check
|
||||
* the specified block with access_ok() before calling this function.
|
||||
@ -437,7 +442,8 @@ static inline unsigned long __must_check copy_from_user(void *to,
|
||||
* @from: Source address, in user space.
|
||||
* @n: Number of bytes to copy.
|
||||
*
|
||||
* Context: User context only. This function may sleep.
|
||||
* Context: User context only. This function may sleep if pagefaults are
|
||||
* enabled.
|
||||
*
|
||||
* Copy data from user space to user space. Caller must check
|
||||
* the specified blocks with access_ok() before calling this function.
|
||||
|
@ -354,9 +354,9 @@ static int handle_page_fault(struct pt_regs *regs,
|
||||
|
||||
/*
|
||||
* If we're in an interrupt, have no user context or are running in an
|
||||
* atomic region then we must not take the fault.
|
||||
* region with pagefaults disabled then we must not take the fault.
|
||||
*/
|
||||
if (in_atomic() || !mm) {
|
||||
if (pagefault_disabled() || !mm) {
|
||||
vma = NULL; /* happy compiler */
|
||||
goto bad_area_nosemaphore;
|
||||
}
|
||||
|
@ -201,7 +201,7 @@ void *kmap_atomic_prot(struct page *page, pgprot_t prot)
|
||||
int idx, type;
|
||||
pte_t *pte;
|
||||
|
||||
/* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */
|
||||
preempt_disable();
|
||||
pagefault_disable();
|
||||
|
||||
/* Avoid icache flushes by disallowing atomic executable mappings. */
|
||||
@ -259,6 +259,7 @@ void __kunmap_atomic(void *kvaddr)
|
||||
}
|
||||
|
||||
pagefault_enable();
|
||||
preempt_enable();
|
||||
}
|
||||
EXPORT_SYMBOL(__kunmap_atomic);
|
||||
|
||||
|
@ -7,6 +7,7 @@
|
||||
#include <linux/sched.h>
|
||||
#include <linux/hardirq.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/uaccess.h>
|
||||
#include <asm/current.h>
|
||||
#include <asm/pgtable.h>
|
||||
#include <asm/tlbflush.h>
|
||||
@ -35,10 +36,10 @@ int handle_page_fault(unsigned long address, unsigned long ip,
|
||||
*code_out = SEGV_MAPERR;
|
||||
|
||||
/*
|
||||
* If the fault was during atomic operation, don't take the fault, just
|
||||
* If the fault was with pagefaults disabled, don't take the fault, just
|
||||
* fail.
|
||||
*/
|
||||
if (in_atomic())
|
||||
if (faulthandler_disabled())
|
||||
goto out_nosemaphore;
|
||||
|
||||
if (is_user)
|
||||
|
@ -218,7 +218,7 @@ static int do_pf(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
|
||||
* If we're in an interrupt or have no user
|
||||
* context, we must not take the fault..
|
||||
*/
|
||||
if (in_atomic() || !mm)
|
||||
if (faulthandler_disabled() || !mm)
|
||||
goto no_context;
|
||||
|
||||
if (user_mode(regs))
|
||||
|
@ -99,11 +99,9 @@ static __always_inline bool should_resched(void)
|
||||
extern asmlinkage void ___preempt_schedule(void);
|
||||
# define __preempt_schedule() asm ("call ___preempt_schedule")
|
||||
extern asmlinkage void preempt_schedule(void);
|
||||
# ifdef CONFIG_CONTEXT_TRACKING
|
||||
extern asmlinkage void ___preempt_schedule_context(void);
|
||||
# define __preempt_schedule_context() asm ("call ___preempt_schedule_context")
|
||||
extern asmlinkage void preempt_schedule_context(void);
|
||||
# endif
|
||||
extern asmlinkage void ___preempt_schedule_notrace(void);
|
||||
# define __preempt_schedule_notrace() asm ("call ___preempt_schedule_notrace")
|
||||
extern asmlinkage void preempt_schedule_notrace(void);
|
||||
#endif
|
||||
|
||||
#endif /* __ASM_PREEMPT_H */
|
||||
|
@ -37,16 +37,6 @@ DECLARE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_llc_shared_map);
|
||||
DECLARE_PER_CPU_READ_MOSTLY(u16, cpu_llc_id);
|
||||
DECLARE_PER_CPU_READ_MOSTLY(int, cpu_number);
|
||||
|
||||
static inline struct cpumask *cpu_sibling_mask(int cpu)
|
||||
{
|
||||
return per_cpu(cpu_sibling_map, cpu);
|
||||
}
|
||||
|
||||
static inline struct cpumask *cpu_core_mask(int cpu)
|
||||
{
|
||||
return per_cpu(cpu_core_map, cpu);
|
||||
}
|
||||
|
||||
static inline struct cpumask *cpu_llc_shared_mask(int cpu)
|
||||
{
|
||||
return per_cpu(cpu_llc_shared_map, cpu);
|
||||
|
@ -124,7 +124,7 @@ extern const struct cpumask *cpu_coregroup_mask(int cpu);
|
||||
|
||||
#ifdef ENABLE_TOPO_DEFINES
|
||||
#define topology_core_cpumask(cpu) (per_cpu(cpu_core_map, cpu))
|
||||
#define topology_thread_cpumask(cpu) (per_cpu(cpu_sibling_map, cpu))
|
||||
#define topology_sibling_cpumask(cpu) (per_cpu(cpu_sibling_map, cpu))
|
||||
#endif
|
||||
|
||||
static inline void arch_fix_phys_package_id(int num, u32 slot)
|
||||
|
@ -74,7 +74,8 @@ static inline bool __chk_range_not_ok(unsigned long addr, unsigned long size, un
|
||||
* @addr: User space pointer to start of block to check
|
||||
* @size: Size of block to check
|
||||
*
|
||||
* Context: User context only. This function may sleep.
|
||||
* Context: User context only. This function may sleep if pagefaults are
|
||||
* enabled.
|
||||
*
|
||||
* Checks if a pointer to a block of memory in user space is valid.
|
||||
*
|
||||
@ -145,7 +146,8 @@ __typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL))
|
||||
* @x: Variable to store result.
|
||||
* @ptr: Source address, in user space.
|
||||
*
|
||||
* Context: User context only. This function may sleep.
|
||||
* Context: User context only. This function may sleep if pagefaults are
|
||||
* enabled.
|
||||
*
|
||||
* This macro copies a single simple variable from user space to kernel
|
||||
* space. It supports simple types like char and int, but not larger
|
||||
@ -240,7 +242,8 @@ extern void __put_user_8(void);
|
||||
* @x: Value to copy to user space.
|
||||
* @ptr: Destination address, in user space.
|
||||
*
|
||||
* Context: User context only. This function may sleep.
|
||||
* Context: User context only. This function may sleep if pagefaults are
|
||||
* enabled.
|
||||
*
|
||||
* This macro copies a single simple value from kernel space to user
|
||||
* space. It supports simple types like char and int, but not larger
|
||||
@ -455,7 +458,8 @@ struct __large_struct { unsigned long buf[100]; };
|
||||
* @x: Variable to store result.
|
||||
* @ptr: Source address, in user space.
|
||||
*
|
||||
* Context: User context only. This function may sleep.
|
||||
* Context: User context only. This function may sleep if pagefaults are
|
||||
* enabled.
|
||||
*
|
||||
* This macro copies a single simple variable from user space to kernel
|
||||
* space. It supports simple types like char and int, but not larger
|
||||
@ -479,7 +483,8 @@ struct __large_struct { unsigned long buf[100]; };
|
||||
* @x: Value to copy to user space.
|
||||
* @ptr: Destination address, in user space.
|
||||
*
|
||||
* Context: User context only. This function may sleep.
|
||||
* Context: User context only. This function may sleep if pagefaults are
|
||||
* enabled.
|
||||
*
|
||||
* This macro copies a single simple value from kernel space to user
|
||||
* space. It supports simple types like char and int, but not larger
|
||||
|
@ -70,7 +70,8 @@ __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
|
||||
* @from: Source address, in kernel space.
|
||||
* @n: Number of bytes to copy.
|
||||
*
|
||||
* Context: User context only. This function may sleep.
|
||||
* Context: User context only. This function may sleep if pagefaults are
|
||||
* enabled.
|
||||
*
|
||||
* Copy data from kernel space to user space. Caller must check
|
||||
* the specified block with access_ok() before calling this function.
|
||||
@ -117,7 +118,8 @@ __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
|
||||
* @from: Source address, in user space.
|
||||
* @n: Number of bytes to copy.
|
||||
*
|
||||
* Context: User context only. This function may sleep.
|
||||
* Context: User context only. This function may sleep if pagefaults are
|
||||
* enabled.
|
||||
*
|
||||
* Copy data from user space to kernel space. Caller must check
|
||||
* the specified block with access_ok() before calling this function.
|
||||
|
@ -2576,7 +2576,7 @@ static void intel_pmu_cpu_starting(int cpu)
|
||||
if (!(x86_pmu.flags & PMU_FL_NO_HT_SHARING)) {
|
||||
void **onln = &cpuc->kfree_on_online[X86_PERF_KFREE_SHARED];
|
||||
|
||||
for_each_cpu(i, topology_thread_cpumask(cpu)) {
|
||||
for_each_cpu(i, topology_sibling_cpumask(cpu)) {
|
||||
struct intel_shared_regs *pc;
|
||||
|
||||
pc = per_cpu(cpu_hw_events, i).shared_regs;
|
||||
@ -2594,7 +2594,7 @@ static void intel_pmu_cpu_starting(int cpu)
|
||||
cpuc->lbr_sel = &cpuc->shared_regs->regs[EXTRA_REG_LBR];
|
||||
|
||||
if (x86_pmu.flags & PMU_FL_EXCL_CNTRS) {
|
||||
for_each_cpu(i, topology_thread_cpumask(cpu)) {
|
||||
for_each_cpu(i, topology_sibling_cpumask(cpu)) {
|
||||
struct intel_excl_cntrs *c;
|
||||
|
||||
c = per_cpu(cpu_hw_events, i).excl_cntrs;
|
||||
@ -3362,7 +3362,7 @@ static __init int fixup_ht_bug(void)
|
||||
if (!(x86_pmu.flags & PMU_FL_EXCL_ENABLED))
|
||||
return 0;
|
||||
|
||||
w = cpumask_weight(topology_thread_cpumask(cpu));
|
||||
w = cpumask_weight(topology_sibling_cpumask(cpu));
|
||||
if (w > 1) {
|
||||
pr_info("PMU erratum BJ122, BV98, HSD29 worked around, HT is on\n");
|
||||
return 0;
|
||||
|
@ -12,7 +12,8 @@ static void show_cpuinfo_core(struct seq_file *m, struct cpuinfo_x86 *c,
|
||||
{
|
||||
#ifdef CONFIG_SMP
|
||||
seq_printf(m, "physical id\t: %d\n", c->phys_proc_id);
|
||||
seq_printf(m, "siblings\t: %d\n", cpumask_weight(cpu_core_mask(cpu)));
|
||||
seq_printf(m, "siblings\t: %d\n",
|
||||
cpumask_weight(topology_core_cpumask(cpu)));
|
||||
seq_printf(m, "core id\t\t: %d\n", c->cpu_core_id);
|
||||
seq_printf(m, "cpu cores\t: %d\n", c->booted_cores);
|
||||
seq_printf(m, "apicid\t\t: %d\n", c->apicid);
|
||||
|
@ -40,7 +40,5 @@ EXPORT_SYMBOL(empty_zero_page);
|
||||
|
||||
#ifdef CONFIG_PREEMPT
|
||||
EXPORT_SYMBOL(___preempt_schedule);
|
||||
#ifdef CONFIG_CONTEXT_TRACKING
|
||||
EXPORT_SYMBOL(___preempt_schedule_context);
|
||||
#endif
|
||||
EXPORT_SYMBOL(___preempt_schedule_notrace);
|
||||
#endif
|
||||
|
@ -445,11 +445,10 @@ static int prefer_mwait_c1_over_halt(const struct cpuinfo_x86 *c)
|
||||
}
|
||||
|
||||
/*
|
||||
* MONITOR/MWAIT with no hints, used for default default C1 state.
|
||||
* This invokes MWAIT with interrutps enabled and no flags,
|
||||
* which is backwards compatible with the original MWAIT implementation.
|
||||
* MONITOR/MWAIT with no hints, used for default C1 state. This invokes MWAIT
|
||||
* with interrupts enabled and no flags, which is backwards compatible with the
|
||||
* original MWAIT implementation.
|
||||
*/
|
||||
|
||||
static void mwait_idle(void)
|
||||
{
|
||||
if (!current_set_polling_and_test()) {
|
||||
|
@ -314,10 +314,10 @@ topology_sane(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o, const char *name)
|
||||
cpu1, name, cpu2, cpu_to_node(cpu1), cpu_to_node(cpu2));
|
||||
}
|
||||
|
||||
#define link_mask(_m, c1, c2) \
|
||||
#define link_mask(mfunc, c1, c2) \
|
||||
do { \
|
||||
cpumask_set_cpu((c1), cpu_##_m##_mask(c2)); \
|
||||
cpumask_set_cpu((c2), cpu_##_m##_mask(c1)); \
|
||||
cpumask_set_cpu((c1), mfunc(c2)); \
|
||||
cpumask_set_cpu((c2), mfunc(c1)); \
|
||||
} while (0)
|
||||
|
||||
static bool match_smt(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o)
|
||||
@ -398,9 +398,9 @@ void set_cpu_sibling_map(int cpu)
|
||||
cpumask_set_cpu(cpu, cpu_sibling_setup_mask);
|
||||
|
||||
if (!has_mp) {
|
||||
cpumask_set_cpu(cpu, cpu_sibling_mask(cpu));
|
||||
cpumask_set_cpu(cpu, topology_sibling_cpumask(cpu));
|
||||
cpumask_set_cpu(cpu, cpu_llc_shared_mask(cpu));
|
||||
cpumask_set_cpu(cpu, cpu_core_mask(cpu));
|
||||
cpumask_set_cpu(cpu, topology_core_cpumask(cpu));
|
||||
c->booted_cores = 1;
|
||||
return;
|
||||
}
|
||||
@ -409,32 +409,34 @@ void set_cpu_sibling_map(int cpu)
|
||||
o = &cpu_data(i);
|
||||
|
||||
if ((i == cpu) || (has_smt && match_smt(c, o)))
|
||||
link_mask(sibling, cpu, i);
|
||||
link_mask(topology_sibling_cpumask, cpu, i);
|
||||
|
||||
if ((i == cpu) || (has_mp && match_llc(c, o)))
|
||||
link_mask(llc_shared, cpu, i);
|
||||
link_mask(cpu_llc_shared_mask, cpu, i);
|
||||
|
||||
}
|
||||
|
||||
/*
|
||||
* This needs a separate iteration over the cpus because we rely on all
|
||||
* cpu_sibling_mask links to be set-up.
|
||||
* topology_sibling_cpumask links to be set-up.
|
||||
*/
|
||||
for_each_cpu(i, cpu_sibling_setup_mask) {
|
||||
o = &cpu_data(i);
|
||||
|
||||
if ((i == cpu) || (has_mp && match_die(c, o))) {
|
||||
link_mask(core, cpu, i);
|
||||
link_mask(topology_core_cpumask, cpu, i);
|
||||
|
||||
/*
|
||||
* Does this new cpu bringup a new core?
|
||||
*/
|
||||
if (cpumask_weight(cpu_sibling_mask(cpu)) == 1) {
|
||||
if (cpumask_weight(
|
||||
topology_sibling_cpumask(cpu)) == 1) {
|
||||
/*
|
||||
* for each core in package, increment
|
||||
* the booted_cores for this new cpu
|
||||
*/
|
||||
if (cpumask_first(cpu_sibling_mask(i)) == i)
|
||||
if (cpumask_first(
|
||||
topology_sibling_cpumask(i)) == i)
|
||||
c->booted_cores++;
|
||||
/*
|
||||
* increment the core count for all
|
||||
@ -1009,8 +1011,8 @@ static __init void disable_smp(void)
|
||||
physid_set_mask_of_physid(boot_cpu_physical_apicid, &phys_cpu_present_map);
|
||||
else
|
||||
physid_set_mask_of_physid(0, &phys_cpu_present_map);
|
||||
cpumask_set_cpu(0, cpu_sibling_mask(0));
|
||||
cpumask_set_cpu(0, cpu_core_mask(0));
|
||||
cpumask_set_cpu(0, topology_sibling_cpumask(0));
|
||||
cpumask_set_cpu(0, topology_core_cpumask(0));
|
||||
}
|
||||
|
||||
enum {
|
||||
@ -1293,22 +1295,22 @@ static void remove_siblinginfo(int cpu)
|
||||
int sibling;
|
||||
struct cpuinfo_x86 *c = &cpu_data(cpu);
|
||||
|
||||
for_each_cpu(sibling, cpu_core_mask(cpu)) {
|
||||
cpumask_clear_cpu(cpu, cpu_core_mask(sibling));
|
||||
for_each_cpu(sibling, topology_core_cpumask(cpu)) {
|
||||
cpumask_clear_cpu(cpu, topology_core_cpumask(sibling));
|
||||
/*/
|
||||
* last thread sibling in this cpu core going down
|
||||
*/
|
||||
if (cpumask_weight(cpu_sibling_mask(cpu)) == 1)
|
||||
if (cpumask_weight(topology_sibling_cpumask(cpu)) == 1)
|
||||
cpu_data(sibling).booted_cores--;
|
||||
}
|
||||
|
||||
for_each_cpu(sibling, cpu_sibling_mask(cpu))
|
||||
cpumask_clear_cpu(cpu, cpu_sibling_mask(sibling));
|
||||
for_each_cpu(sibling, topology_sibling_cpumask(cpu))
|
||||
cpumask_clear_cpu(cpu, topology_sibling_cpumask(sibling));
|
||||
for_each_cpu(sibling, cpu_llc_shared_mask(cpu))
|
||||
cpumask_clear_cpu(cpu, cpu_llc_shared_mask(sibling));
|
||||
cpumask_clear(cpu_llc_shared_mask(cpu));
|
||||
cpumask_clear(cpu_sibling_mask(cpu));
|
||||
cpumask_clear(cpu_core_mask(cpu));
|
||||
cpumask_clear(topology_sibling_cpumask(cpu));
|
||||
cpumask_clear(topology_core_cpumask(cpu));
|
||||
c->phys_proc_id = 0;
|
||||
c->cpu_core_id = 0;
|
||||
cpumask_clear_cpu(cpu, cpu_sibling_setup_mask);
|
||||
|
@ -113,7 +113,7 @@ static void check_tsc_warp(unsigned int timeout)
|
||||
*/
|
||||
static inline unsigned int loop_timeout(int cpu)
|
||||
{
|
||||
return (cpumask_weight(cpu_core_mask(cpu)) > 1) ? 2 : 20;
|
||||
return (cpumask_weight(topology_core_cpumask(cpu)) > 1) ? 2 : 20;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -75,7 +75,5 @@ EXPORT_SYMBOL(native_load_gs_index);
|
||||
|
||||
#ifdef CONFIG_PREEMPT
|
||||
EXPORT_SYMBOL(___preempt_schedule);
|
||||
#ifdef CONFIG_CONTEXT_TRACKING
|
||||
EXPORT_SYMBOL(___preempt_schedule_context);
|
||||
#endif
|
||||
EXPORT_SYMBOL(___preempt_schedule_notrace);
|
||||
#endif
|
||||
|
@ -38,8 +38,6 @@
|
||||
|
||||
#ifdef CONFIG_PREEMPT
|
||||
THUNK ___preempt_schedule, preempt_schedule
|
||||
#ifdef CONFIG_CONTEXT_TRACKING
|
||||
THUNK ___preempt_schedule_context, preempt_schedule_context
|
||||
#endif
|
||||
THUNK ___preempt_schedule_notrace, preempt_schedule_notrace
|
||||
#endif
|
||||
|
||||
|
@ -49,9 +49,7 @@
|
||||
|
||||
#ifdef CONFIG_PREEMPT
|
||||
THUNK ___preempt_schedule, preempt_schedule
|
||||
#ifdef CONFIG_CONTEXT_TRACKING
|
||||
THUNK ___preempt_schedule_context, preempt_schedule_context
|
||||
#endif
|
||||
THUNK ___preempt_schedule_notrace, preempt_schedule_notrace
|
||||
#endif
|
||||
|
||||
#if defined(CONFIG_TRACE_IRQFLAGS) \
|
||||
|
@ -647,7 +647,8 @@ EXPORT_SYMBOL(__copy_from_user_ll_nocache_nozero);
|
||||
* @from: Source address, in kernel space.
|
||||
* @n: Number of bytes to copy.
|
||||
*
|
||||
* Context: User context only. This function may sleep.
|
||||
* Context: User context only. This function may sleep if pagefaults are
|
||||
* enabled.
|
||||
*
|
||||
* Copy data from kernel space to user space.
|
||||
*
|
||||
@ -668,7 +669,8 @@ EXPORT_SYMBOL(_copy_to_user);
|
||||
* @from: Source address, in user space.
|
||||
* @n: Number of bytes to copy.
|
||||
*
|
||||
* Context: User context only. This function may sleep.
|
||||
* Context: User context only. This function may sleep if pagefaults are
|
||||
* enabled.
|
||||
*
|
||||
* Copy data from user space to kernel space.
|
||||
*
|
||||
|
@ -13,6 +13,7 @@
|
||||
#include <linux/hugetlb.h> /* hstate_index_to_shift */
|
||||
#include <linux/prefetch.h> /* prefetchw */
|
||||
#include <linux/context_tracking.h> /* exception_enter(), ... */
|
||||
#include <linux/uaccess.h> /* faulthandler_disabled() */
|
||||
|
||||
#include <asm/traps.h> /* dotraplinkage, ... */
|
||||
#include <asm/pgalloc.h> /* pgd_*(), ... */
|
||||
@ -1126,9 +1127,9 @@ __do_page_fault(struct pt_regs *regs, unsigned long error_code,
|
||||
|
||||
/*
|
||||
* If we're in an interrupt, have no user context or are running
|
||||
* in an atomic region then we must not take the fault:
|
||||
* in a region with pagefaults disabled then we must not take the fault
|
||||
*/
|
||||
if (unlikely(in_atomic() || !mm)) {
|
||||
if (unlikely(faulthandler_disabled() || !mm)) {
|
||||
bad_area_nosemaphore(regs, error_code, address);
|
||||
return;
|
||||
}
|
||||
|
@ -35,7 +35,7 @@ void *kmap_atomic_prot(struct page *page, pgprot_t prot)
|
||||
unsigned long vaddr;
|
||||
int idx, type;
|
||||
|
||||
/* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */
|
||||
preempt_disable();
|
||||
pagefault_disable();
|
||||
|
||||
if (!PageHighMem(page))
|
||||
@ -100,6 +100,7 @@ void __kunmap_atomic(void *kvaddr)
|
||||
#endif
|
||||
|
||||
pagefault_enable();
|
||||
preempt_enable();
|
||||
}
|
||||
EXPORT_SYMBOL(__kunmap_atomic);
|
||||
|
||||
|
@ -59,6 +59,7 @@ void *kmap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot)
|
||||
unsigned long vaddr;
|
||||
int idx, type;
|
||||
|
||||
preempt_disable();
|
||||
pagefault_disable();
|
||||
|
||||
type = kmap_atomic_idx_push();
|
||||
@ -117,5 +118,6 @@ iounmap_atomic(void __iomem *kvaddr)
|
||||
}
|
||||
|
||||
pagefault_enable();
|
||||
preempt_enable();
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(iounmap_atomic);
|
||||
|
@ -15,10 +15,10 @@
|
||||
#include <linux/mm.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/hardirq.h>
|
||||
#include <linux/uaccess.h>
|
||||
#include <asm/mmu_context.h>
|
||||
#include <asm/cacheflush.h>
|
||||
#include <asm/hardirq.h>
|
||||
#include <asm/uaccess.h>
|
||||
#include <asm/pgalloc.h>
|
||||
|
||||
DEFINE_PER_CPU(unsigned long, asid_cache) = ASID_USER_FIRST;
|
||||
@ -57,7 +57,7 @@ void do_page_fault(struct pt_regs *regs)
|
||||
/* If we're in an interrupt or have no user
|
||||
* context, we must not take the fault..
|
||||
*/
|
||||
if (in_atomic() || !mm) {
|
||||
if (faulthandler_disabled() || !mm) {
|
||||
bad_page_fault(regs, address, SIGSEGV);
|
||||
return;
|
||||
}
|
||||
|
@ -42,6 +42,7 @@ void *kmap_atomic(struct page *page)
|
||||
enum fixed_addresses idx;
|
||||
unsigned long vaddr;
|
||||
|
||||
preempt_disable();
|
||||
pagefault_disable();
|
||||
if (!PageHighMem(page))
|
||||
return page_address(page);
|
||||
@ -79,6 +80,7 @@ void __kunmap_atomic(void *kvaddr)
|
||||
}
|
||||
|
||||
pagefault_enable();
|
||||
preempt_enable();
|
||||
}
|
||||
EXPORT_SYMBOL(__kunmap_atomic);
|
||||
|
||||
|
@ -24,7 +24,7 @@ static int get_first_sibling(unsigned int cpu)
|
||||
{
|
||||
unsigned int ret;
|
||||
|
||||
ret = cpumask_first(topology_thread_cpumask(cpu));
|
||||
ret = cpumask_first(topology_sibling_cpumask(cpu));
|
||||
if (ret < nr_cpu_ids)
|
||||
return ret;
|
||||
|
||||
|
@ -105,7 +105,7 @@ static void round_robin_cpu(unsigned int tsk_index)
|
||||
mutex_lock(&round_robin_lock);
|
||||
cpumask_clear(tmp);
|
||||
for_each_cpu(cpu, pad_busy_cpus)
|
||||
cpumask_or(tmp, tmp, topology_thread_cpumask(cpu));
|
||||
cpumask_or(tmp, tmp, topology_sibling_cpumask(cpu));
|
||||
cpumask_andnot(tmp, cpu_online_mask, tmp);
|
||||
/* avoid HT sibilings if possible */
|
||||
if (cpumask_empty(tmp))
|
||||
|
@ -61,7 +61,7 @@ static DEVICE_ATTR_RO(physical_package_id);
|
||||
define_id_show_func(core_id);
|
||||
static DEVICE_ATTR_RO(core_id);
|
||||
|
||||
define_siblings_show_func(thread_siblings, thread_cpumask);
|
||||
define_siblings_show_func(thread_siblings, sibling_cpumask);
|
||||
static DEVICE_ATTR_RO(thread_siblings);
|
||||
static DEVICE_ATTR_RO(thread_siblings_list);
|
||||
|
||||
|
@ -699,13 +699,14 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
|
||||
dmi_check_system(sw_any_bug_dmi_table);
|
||||
if (bios_with_sw_any_bug && !policy_is_shared(policy)) {
|
||||
policy->shared_type = CPUFREQ_SHARED_TYPE_ALL;
|
||||
cpumask_copy(policy->cpus, cpu_core_mask(cpu));
|
||||
cpumask_copy(policy->cpus, topology_core_cpumask(cpu));
|
||||
}
|
||||
|
||||
if (check_amd_hwpstate_cpu(cpu) && !acpi_pstate_strict) {
|
||||
cpumask_clear(policy->cpus);
|
||||
cpumask_set_cpu(cpu, policy->cpus);
|
||||
cpumask_copy(data->freqdomain_cpus, cpu_sibling_mask(cpu));
|
||||
cpumask_copy(data->freqdomain_cpus,
|
||||
topology_sibling_cpumask(cpu));
|
||||
policy->shared_type = CPUFREQ_SHARED_TYPE_HW;
|
||||
pr_info_once(PFX "overriding BIOS provided _PSD data\n");
|
||||
}
|
||||
|
@ -172,7 +172,7 @@ static int cpufreq_p4_cpu_init(struct cpufreq_policy *policy)
|
||||
unsigned int i;
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
cpumask_copy(policy->cpus, cpu_sibling_mask(policy->cpu));
|
||||
cpumask_copy(policy->cpus, topology_sibling_cpumask(policy->cpu));
|
||||
#endif
|
||||
|
||||
/* Errata workaround */
|
||||
|
@ -57,13 +57,6 @@ static DEFINE_PER_CPU(struct powernow_k8_data *, powernow_data);
|
||||
|
||||
static struct cpufreq_driver cpufreq_amd64_driver;
|
||||
|
||||
#ifndef CONFIG_SMP
|
||||
static inline const struct cpumask *cpu_core_mask(int cpu)
|
||||
{
|
||||
return cpumask_of(0);
|
||||
}
|
||||
#endif
|
||||
|
||||
/* Return a frequency in MHz, given an input fid */
|
||||
static u32 find_freq_from_fid(u32 fid)
|
||||
{
|
||||
@ -620,7 +613,7 @@ static int fill_powernow_table(struct powernow_k8_data *data,
|
||||
|
||||
pr_debug("cfid 0x%x, cvid 0x%x\n", data->currfid, data->currvid);
|
||||
data->powernow_table = powernow_table;
|
||||
if (cpumask_first(cpu_core_mask(data->cpu)) == data->cpu)
|
||||
if (cpumask_first(topology_core_cpumask(data->cpu)) == data->cpu)
|
||||
print_basics(data);
|
||||
|
||||
for (j = 0; j < data->numps; j++)
|
||||
@ -784,7 +777,7 @@ static int powernow_k8_cpu_init_acpi(struct powernow_k8_data *data)
|
||||
CPUFREQ_TABLE_END;
|
||||
data->powernow_table = powernow_table;
|
||||
|
||||
if (cpumask_first(cpu_core_mask(data->cpu)) == data->cpu)
|
||||
if (cpumask_first(topology_core_cpumask(data->cpu)) == data->cpu)
|
||||
print_basics(data);
|
||||
|
||||
/* notify BIOS that we exist */
|
||||
@ -1090,7 +1083,7 @@ static int powernowk8_cpu_init(struct cpufreq_policy *pol)
|
||||
if (rc != 0)
|
||||
goto err_out_exit_acpi;
|
||||
|
||||
cpumask_copy(pol->cpus, cpu_core_mask(pol->cpu));
|
||||
cpumask_copy(pol->cpus, topology_core_cpumask(pol->cpu));
|
||||
data->available_cores = pol->cpus;
|
||||
|
||||
/* min/max the cpu is capable of */
|
||||
|
@ -292,7 +292,7 @@ static int speedstep_cpu_init(struct cpufreq_policy *policy)
|
||||
|
||||
/* only run on CPU to be set, or on its sibling */
|
||||
#ifdef CONFIG_SMP
|
||||
cpumask_copy(policy->cpus, cpu_sibling_mask(policy->cpu));
|
||||
cpumask_copy(policy->cpus, topology_sibling_cpumask(policy->cpu));
|
||||
#endif
|
||||
policy_cpu = cpumask_any_and(policy->cpus, cpu_online_mask);
|
||||
|
||||
|
@ -78,12 +78,14 @@ static int p8_aes_setkey(struct crypto_tfm *tfm, const u8 *key,
|
||||
int ret;
|
||||
struct p8_aes_ctx *ctx = crypto_tfm_ctx(tfm);
|
||||
|
||||
preempt_disable();
|
||||
pagefault_disable();
|
||||
enable_kernel_altivec();
|
||||
ret = aes_p8_set_encrypt_key(key, keylen * 8, &ctx->enc_key);
|
||||
ret += aes_p8_set_decrypt_key(key, keylen * 8, &ctx->dec_key);
|
||||
pagefault_enable();
|
||||
|
||||
preempt_enable();
|
||||
|
||||
ret += crypto_cipher_setkey(ctx->fallback, key, keylen);
|
||||
return ret;
|
||||
}
|
||||
@ -95,10 +97,12 @@ static void p8_aes_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
|
||||
if (in_interrupt()) {
|
||||
crypto_cipher_encrypt_one(ctx->fallback, dst, src);
|
||||
} else {
|
||||
preempt_disable();
|
||||
pagefault_disable();
|
||||
enable_kernel_altivec();
|
||||
aes_p8_encrypt(src, dst, &ctx->enc_key);
|
||||
pagefault_enable();
|
||||
preempt_enable();
|
||||
}
|
||||
}
|
||||
|
||||
@ -109,10 +113,12 @@ static void p8_aes_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
|
||||
if (in_interrupt()) {
|
||||
crypto_cipher_decrypt_one(ctx->fallback, dst, src);
|
||||
} else {
|
||||
preempt_disable();
|
||||
pagefault_disable();
|
||||
enable_kernel_altivec();
|
||||
aes_p8_decrypt(src, dst, &ctx->dec_key);
|
||||
pagefault_enable();
|
||||
preempt_enable();
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -79,11 +79,13 @@ static int p8_aes_cbc_setkey(struct crypto_tfm *tfm, const u8 *key,
|
||||
int ret;
|
||||
struct p8_aes_cbc_ctx *ctx = crypto_tfm_ctx(tfm);
|
||||
|
||||
preempt_disable();
|
||||
pagefault_disable();
|
||||
enable_kernel_altivec();
|
||||
ret = aes_p8_set_encrypt_key(key, keylen * 8, &ctx->enc_key);
|
||||
ret += aes_p8_set_decrypt_key(key, keylen * 8, &ctx->dec_key);
|
||||
pagefault_enable();
|
||||
preempt_enable();
|
||||
|
||||
ret += crypto_blkcipher_setkey(ctx->fallback, key, keylen);
|
||||
return ret;
|
||||
@ -106,6 +108,7 @@ static int p8_aes_cbc_encrypt(struct blkcipher_desc *desc,
|
||||
if (in_interrupt()) {
|
||||
ret = crypto_blkcipher_encrypt(&fallback_desc, dst, src, nbytes);
|
||||
} else {
|
||||
preempt_disable();
|
||||
pagefault_disable();
|
||||
enable_kernel_altivec();
|
||||
|
||||
@ -119,6 +122,7 @@ static int p8_aes_cbc_encrypt(struct blkcipher_desc *desc,
|
||||
}
|
||||
|
||||
pagefault_enable();
|
||||
preempt_enable();
|
||||
}
|
||||
|
||||
return ret;
|
||||
@ -141,6 +145,7 @@ static int p8_aes_cbc_decrypt(struct blkcipher_desc *desc,
|
||||
if (in_interrupt()) {
|
||||
ret = crypto_blkcipher_decrypt(&fallback_desc, dst, src, nbytes);
|
||||
} else {
|
||||
preempt_disable();
|
||||
pagefault_disable();
|
||||
enable_kernel_altivec();
|
||||
|
||||
@ -154,6 +159,7 @@ static int p8_aes_cbc_decrypt(struct blkcipher_desc *desc,
|
||||
}
|
||||
|
||||
pagefault_enable();
|
||||
preempt_enable();
|
||||
}
|
||||
|
||||
return ret;
|
||||
|
@ -114,11 +114,13 @@ static int p8_ghash_setkey(struct crypto_shash *tfm, const u8 *key,
|
||||
if (keylen != GHASH_KEY_LEN)
|
||||
return -EINVAL;
|
||||
|
||||
preempt_disable();
|
||||
pagefault_disable();
|
||||
enable_kernel_altivec();
|
||||
enable_kernel_fp();
|
||||
gcm_init_p8(ctx->htable, (const u64 *) key);
|
||||
pagefault_enable();
|
||||
preempt_enable();
|
||||
return crypto_shash_setkey(ctx->fallback, key, keylen);
|
||||
}
|
||||
|
||||
@ -140,23 +142,27 @@ static int p8_ghash_update(struct shash_desc *desc,
|
||||
}
|
||||
memcpy(dctx->buffer + dctx->bytes, src,
|
||||
GHASH_DIGEST_SIZE - dctx->bytes);
|
||||
preempt_disable();
|
||||
pagefault_disable();
|
||||
enable_kernel_altivec();
|
||||
enable_kernel_fp();
|
||||
gcm_ghash_p8(dctx->shash, ctx->htable, dctx->buffer,
|
||||
GHASH_DIGEST_SIZE);
|
||||
pagefault_enable();
|
||||
preempt_enable();
|
||||
src += GHASH_DIGEST_SIZE - dctx->bytes;
|
||||
srclen -= GHASH_DIGEST_SIZE - dctx->bytes;
|
||||
dctx->bytes = 0;
|
||||
}
|
||||
len = srclen & ~(GHASH_DIGEST_SIZE - 1);
|
||||
if (len) {
|
||||
preempt_disable();
|
||||
pagefault_disable();
|
||||
enable_kernel_altivec();
|
||||
enable_kernel_fp();
|
||||
gcm_ghash_p8(dctx->shash, ctx->htable, src, len);
|
||||
pagefault_enable();
|
||||
preempt_enable();
|
||||
src += len;
|
||||
srclen -= len;
|
||||
}
|
||||
@ -180,12 +186,14 @@ static int p8_ghash_final(struct shash_desc *desc, u8 *out)
|
||||
if (dctx->bytes) {
|
||||
for (i = dctx->bytes; i < GHASH_DIGEST_SIZE; i++)
|
||||
dctx->buffer[i] = 0;
|
||||
preempt_disable();
|
||||
pagefault_disable();
|
||||
enable_kernel_altivec();
|
||||
enable_kernel_fp();
|
||||
gcm_ghash_p8(dctx->shash, ctx->htable, dctx->buffer,
|
||||
GHASH_DIGEST_SIZE);
|
||||
pagefault_enable();
|
||||
preempt_enable();
|
||||
dctx->bytes = 0;
|
||||
}
|
||||
memcpy(out, dctx->shash, GHASH_DIGEST_SIZE);
|
||||
|
@ -32,6 +32,7 @@
|
||||
#include "i915_trace.h"
|
||||
#include "intel_drv.h"
|
||||
#include <linux/dma_remapping.h>
|
||||
#include <linux/uaccess.h>
|
||||
|
||||
#define __EXEC_OBJECT_HAS_PIN (1<<31)
|
||||
#define __EXEC_OBJECT_HAS_FENCE (1<<30)
|
||||
@ -465,7 +466,7 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
|
||||
}
|
||||
|
||||
/* We can't wait for rendering with pagefaults disabled */
|
||||
if (obj->active && in_atomic())
|
||||
if (obj->active && pagefault_disabled())
|
||||
return -EFAULT;
|
||||
|
||||
if (use_cpu_reloc(obj))
|
||||
|
@ -63,7 +63,8 @@ MODULE_PARM_DESC(tjmax, "TjMax value in degrees Celsius");
|
||||
#define TO_ATTR_NO(cpu) (TO_CORE_ID(cpu) + BASE_SYSFS_ATTR_NO)
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
#define for_each_sibling(i, cpu) for_each_cpu(i, cpu_sibling_mask(cpu))
|
||||
#define for_each_sibling(i, cpu) \
|
||||
for_each_cpu(i, topology_sibling_cpumask(cpu))
|
||||
#else
|
||||
#define for_each_sibling(i, cpu) for (i = 0; false; )
|
||||
#endif
|
||||
|
@ -1304,7 +1304,7 @@ static unsigned int efx_wanted_parallelism(struct efx_nic *efx)
|
||||
if (!cpumask_test_cpu(cpu, thread_mask)) {
|
||||
++count;
|
||||
cpumask_or(thread_mask, thread_mask,
|
||||
topology_thread_cpumask(cpu));
|
||||
topology_sibling_cpumask(cpu));
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -87,7 +87,7 @@ static void cfs_cpu_core_siblings(int cpu, cpumask_t *mask)
|
||||
/* return cpumask of HTs in the same core */
|
||||
static void cfs_cpu_ht_siblings(int cpu, cpumask_t *mask)
|
||||
{
|
||||
cpumask_copy(mask, topology_thread_cpumask(cpu));
|
||||
cpumask_copy(mask, topology_sibling_cpumask(cpu));
|
||||
}
|
||||
|
||||
static void cfs_node_to_cpumask(int node, cpumask_t *mask)
|
||||
|
@ -557,7 +557,7 @@ ptlrpc_server_nthreads_check(struct ptlrpc_service *svc,
|
||||
* there are.
|
||||
*/
|
||||
/* weight is # of HTs */
|
||||
if (cpumask_weight(topology_thread_cpumask(0)) > 1) {
|
||||
if (cpumask_weight(topology_sibling_cpumask(0)) > 1) {
|
||||
/* depress thread factor for hyper-thread */
|
||||
factor = factor - (factor >> 1) + (factor >> 3);
|
||||
}
|
||||
@ -2768,7 +2768,7 @@ int ptlrpc_hr_init(void)
|
||||
|
||||
init_waitqueue_head(&ptlrpc_hr.hr_waitq);
|
||||
|
||||
weight = cpumask_weight(topology_thread_cpumask(0));
|
||||
weight = cpumask_weight(topology_sibling_cpumask(0));
|
||||
|
||||
cfs_percpt_for_each(hrp, i, ptlrpc_hr.hr_partitions) {
|
||||
hrp->hrp_cpt = i;
|
||||
|
@ -8,8 +8,7 @@
|
||||
#ifndef CONFIG_SMP
|
||||
/*
|
||||
* The following implementation only for uniprocessor machines.
|
||||
* For UP, it's relies on the fact that pagefault_disable() also disables
|
||||
* preemption to ensure mutual exclusion.
|
||||
* It relies on preempt_disable() ensuring mutual exclusion.
|
||||
*
|
||||
*/
|
||||
|
||||
@ -38,6 +37,7 @@ futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
|
||||
if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
|
||||
oparg = 1 << oparg;
|
||||
|
||||
preempt_disable();
|
||||
pagefault_disable();
|
||||
|
||||
ret = -EFAULT;
|
||||
@ -72,6 +72,7 @@ futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
|
||||
|
||||
out_pagefault_enable:
|
||||
pagefault_enable();
|
||||
preempt_enable();
|
||||
|
||||
if (ret == 0) {
|
||||
switch (cmp) {
|
||||
@ -106,6 +107,7 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
|
||||
{
|
||||
u32 val;
|
||||
|
||||
preempt_disable();
|
||||
if (unlikely(get_user(val, uaddr) != 0))
|
||||
return -EFAULT;
|
||||
|
||||
@ -113,6 +115,7 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
|
||||
return -EFAULT;
|
||||
|
||||
*uval = val;
|
||||
preempt_enable();
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -79,11 +79,8 @@ static __always_inline bool should_resched(void)
|
||||
#ifdef CONFIG_PREEMPT
|
||||
extern asmlinkage void preempt_schedule(void);
|
||||
#define __preempt_schedule() preempt_schedule()
|
||||
|
||||
#ifdef CONFIG_CONTEXT_TRACKING
|
||||
extern asmlinkage void preempt_schedule_context(void);
|
||||
#define __preempt_schedule_context() preempt_schedule_context()
|
||||
#endif
|
||||
extern asmlinkage void preempt_schedule_notrace(void);
|
||||
#define __preempt_schedule_notrace() preempt_schedule_notrace()
|
||||
#endif /* CONFIG_PREEMPT */
|
||||
|
||||
#endif /* __ASM_PREEMPT_H */
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user