Merge tag 'sched-core-2021-04-28' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull scheduler updates from Ingo Molnar:
- Clean up SCHED_DEBUG: move the decades old mess of sysctl, procfs and
debugfs interfaces to a unified debugfs interface.
- Signals: Allow caching one sigqueue object per task, to improve
performance & latencies.
- Improve newidle_balance() irq-off latencies on systems with a large
number of CPU cgroups.
- Improve energy-aware scheduling
- Improve the PELT metrics for certain workloads
- Reintroduce select_idle_smt() to improve load-balancing locality -
but without the previous regressions
- Add 'scheduler latency debugging': warn after long periods of pending
need_resched. This is an opt-in feature that requires the enabling of
the LATENCY_WARN scheduler feature, or the use of the
resched_latency_warn_ms=xx boot parameter.
- CPU hotplug fixes for HP-rollback, and for the 'fail' interface. Fix
remaining balance_push() vs. hotplug holes/races
- PSI fixes, plus allow /proc/pressure/ files to be written by
CAP_SYS_RESOURCE tasks as well
- Fix/improve various load-balancing corner cases vs. capacity margins
- Fix sched topology on systems with NUMA diameter of 3 or above
- Fix PF_KTHREAD vs to_kthread() race
- Minor rseq optimizations
- Misc cleanups, optimizations, fixes and smaller updates
* tag 'sched-core-2021-04-28' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (61 commits)
cpumask/hotplug: Fix cpu_dying() state tracking
kthread: Fix PF_KTHREAD vs to_kthread() race
sched/debug: Fix cgroup_path[] serialization
sched,psi: Handle potential task count underflow bugs more gracefully
sched: Warn on long periods of pending need_resched
sched/fair: Move update_nohz_stats() to the CONFIG_NO_HZ_COMMON block to simplify the code & fix an unused function warning
sched/debug: Rename the sched_debug parameter to sched_verbose
sched,fair: Alternative sched_slice()
sched: Move /proc/sched_debug to debugfs
sched,debug: Convert sysctl sched_domains to debugfs
debugfs: Implement debugfs_create_str()
sched,preempt: Move preempt_dynamic to debug.c
sched: Move SCHED_DEBUG sysctl to debugfs
sched: Don't make LATENCYTOP select SCHED_DEBUG
sched: Remove sched_schedstats sysctl out from under SCHED_DEBUG
sched/numa: Allow runtime enabling/disabling of NUMA balance without SCHED_DEBUG
sched: Use cpu_dying() to fix balance_push vs hotplug-rollback
cpumask: Introduce DYING mask
cpumask: Make cpu_{online,possible,present,active}() inline
rseq: Optimise rseq_get_rseq_cs() and clear_rseq_cs()
...
This commit is contained in:
@@ -84,6 +84,25 @@ static inline struct kthread *to_kthread(struct task_struct *k)
|
||||
return (__force void *)k->set_child_tid;
|
||||
}
|
||||
|
||||
/*
|
||||
* Variant of to_kthread() that doesn't assume @p is a kthread.
|
||||
*
|
||||
* Per construction; when:
|
||||
*
|
||||
* (p->flags & PF_KTHREAD) && p->set_child_tid
|
||||
*
|
||||
* the task is both a kthread and struct kthread is persistent. However
|
||||
* PF_KTHREAD on it's own is not, kernel_thread() can exec() (See umh.c and
|
||||
* begin_new_exec()).
|
||||
*/
|
||||
static inline struct kthread *__to_kthread(struct task_struct *p)
|
||||
{
|
||||
void *kthread = (__force void *)p->set_child_tid;
|
||||
if (kthread && !(p->flags & PF_KTHREAD))
|
||||
kthread = NULL;
|
||||
return kthread;
|
||||
}
|
||||
|
||||
void free_kthread_struct(struct task_struct *k)
|
||||
{
|
||||
struct kthread *kthread;
|
||||
@@ -168,8 +187,9 @@ EXPORT_SYMBOL_GPL(kthread_freezable_should_stop);
|
||||
*/
|
||||
void *kthread_func(struct task_struct *task)
|
||||
{
|
||||
if (task->flags & PF_KTHREAD)
|
||||
return to_kthread(task)->threadfn;
|
||||
struct kthread *kthread = __to_kthread(task);
|
||||
if (kthread)
|
||||
return kthread->threadfn;
|
||||
return NULL;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(kthread_func);
|
||||
@@ -199,10 +219,11 @@ EXPORT_SYMBOL_GPL(kthread_data);
|
||||
*/
|
||||
void *kthread_probe_data(struct task_struct *task)
|
||||
{
|
||||
struct kthread *kthread = to_kthread(task);
|
||||
struct kthread *kthread = __to_kthread(task);
|
||||
void *data = NULL;
|
||||
|
||||
copy_from_kernel_nofault(&data, &kthread->data, sizeof(data));
|
||||
if (kthread)
|
||||
copy_from_kernel_nofault(&data, &kthread->data, sizeof(data));
|
||||
return data;
|
||||
}
|
||||
|
||||
@@ -514,9 +535,9 @@ void kthread_set_per_cpu(struct task_struct *k, int cpu)
|
||||
set_bit(KTHREAD_IS_PER_CPU, &kthread->flags);
|
||||
}
|
||||
|
||||
bool kthread_is_per_cpu(struct task_struct *k)
|
||||
bool kthread_is_per_cpu(struct task_struct *p)
|
||||
{
|
||||
struct kthread *kthread = to_kthread(k);
|
||||
struct kthread *kthread = __to_kthread(p);
|
||||
if (!kthread)
|
||||
return false;
|
||||
|
||||
|
||||
Reference in New Issue
Block a user