Merge branch 'locking-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull locking updates from Ingo Molnar:
"The main changes in this cycle were:
- Implement wraparound-safe refcount_t and kref_t types based on
generic atomic primitives (Peter Zijlstra)
- Improve and fix the ww_mutex code (Nicolai Hähnle)
- Add self-tests to the ww_mutex code (Chris Wilson)
- Optimize percpu-rwsems with the 'rcuwait' mechanism (Davidlohr
Bueso)
- Micro-optimize the current-task logic all around the core kernel
(Davidlohr Bueso)
- Tidy up after recent optimizations: remove stale code and APIs,
clean up the code (Waiman Long)
- ... plus misc fixes, updates and cleanups"
* 'locking-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (50 commits)
fork: Fix task_struct alignment
locking/spinlock/debug: Remove spinlock lockup detection code
lockdep: Fix incorrect condition to print bug msgs for MAX_LOCKDEP_CHAIN_HLOCKS
lkdtm: Convert to refcount_t testing
kref: Implement 'struct kref' using refcount_t
refcount_t: Introduce a special purpose refcount type
sched/wake_q: Clarify queue reinit comment
sched/wait, rcuwait: Fix typo in comment
locking/mutex: Fix lockdep_assert_held() fail
locking/rtmutex: Flip unlikely() branch to likely() in __rt_mutex_slowlock()
locking/rwsem: Reinit wake_q after use
locking/rwsem: Remove unnecessary atomic_long_t casts
jump_labels: Move header guard #endif down where it belongs
locking/atomic, kref: Implement kref_put_lock()
locking/ww_mutex: Turn off __must_check for now
locking/atomic, kref: Avoid more abuse
locking/atomic, kref: Use kref_get_unless_zero() more
locking/atomic, kref: Kill kref_sub()
locking/atomic, kref: Add kref_read()
locking/atomic, kref: Add KREF_INIT()
...
This commit is contained in:
@@ -55,6 +55,7 @@
|
||||
#include <linux/shm.h>
|
||||
#include <linux/kcov.h>
|
||||
#include <linux/random.h>
|
||||
#include <linux/rcuwait.h>
|
||||
|
||||
#include <linux/uaccess.h>
|
||||
#include <asm/unistd.h>
|
||||
@@ -282,6 +283,35 @@ retry:
|
||||
return task;
|
||||
}
|
||||
|
||||
void rcuwait_wake_up(struct rcuwait *w)
|
||||
{
|
||||
struct task_struct *task;
|
||||
|
||||
rcu_read_lock();
|
||||
|
||||
/*
|
||||
* Order condition vs @task, such that everything prior to the load
|
||||
* of @task is visible. This is the condition as to why the user called
|
||||
* rcuwait_trywake() in the first place. Pairs with set_current_state()
|
||||
* barrier (A) in rcuwait_wait_event().
|
||||
*
|
||||
* WAIT WAKE
|
||||
* [S] tsk = current [S] cond = true
|
||||
* MB (A) MB (B)
|
||||
* [L] cond [L] tsk
|
||||
*/
|
||||
smp_rmb(); /* (B) */
|
||||
|
||||
/*
|
||||
* Avoid using task_rcu_dereference() magic as long as we are careful,
|
||||
* see comment in rcuwait_wait_event() regarding ->exit_state.
|
||||
*/
|
||||
task = rcu_dereference(w->task);
|
||||
if (task)
|
||||
wake_up_process(task);
|
||||
rcu_read_unlock();
|
||||
}
|
||||
|
||||
struct task_struct *try_get_task_struct(struct task_struct **ptask)
|
||||
{
|
||||
struct task_struct *task;
|
||||
@@ -468,12 +498,12 @@ assign_new_owner:
|
||||
* Turn us into a lazy TLB process if we
|
||||
* aren't already..
|
||||
*/
|
||||
static void exit_mm(struct task_struct *tsk)
|
||||
static void exit_mm(void)
|
||||
{
|
||||
struct mm_struct *mm = tsk->mm;
|
||||
struct mm_struct *mm = current->mm;
|
||||
struct core_state *core_state;
|
||||
|
||||
mm_release(tsk, mm);
|
||||
mm_release(current, mm);
|
||||
if (!mm)
|
||||
return;
|
||||
sync_mm_rss(mm);
|
||||
@@ -491,7 +521,7 @@ static void exit_mm(struct task_struct *tsk)
|
||||
|
||||
up_read(&mm->mmap_sem);
|
||||
|
||||
self.task = tsk;
|
||||
self.task = current;
|
||||
self.next = xchg(&core_state->dumper.next, &self);
|
||||
/*
|
||||
* Implies mb(), the result of xchg() must be visible
|
||||
@@ -501,22 +531,22 @@ static void exit_mm(struct task_struct *tsk)
|
||||
complete(&core_state->startup);
|
||||
|
||||
for (;;) {
|
||||
set_task_state(tsk, TASK_UNINTERRUPTIBLE);
|
||||
set_current_state(TASK_UNINTERRUPTIBLE);
|
||||
if (!self.task) /* see coredump_finish() */
|
||||
break;
|
||||
freezable_schedule();
|
||||
}
|
||||
__set_task_state(tsk, TASK_RUNNING);
|
||||
__set_current_state(TASK_RUNNING);
|
||||
down_read(&mm->mmap_sem);
|
||||
}
|
||||
atomic_inc(&mm->mm_count);
|
||||
BUG_ON(mm != tsk->active_mm);
|
||||
BUG_ON(mm != current->active_mm);
|
||||
/* more a memory barrier than a real lock */
|
||||
task_lock(tsk);
|
||||
tsk->mm = NULL;
|
||||
task_lock(current);
|
||||
current->mm = NULL;
|
||||
up_read(&mm->mmap_sem);
|
||||
enter_lazy_tlb(mm, current);
|
||||
task_unlock(tsk);
|
||||
task_unlock(current);
|
||||
mm_update_next_owner(mm);
|
||||
mmput(mm);
|
||||
if (test_thread_flag(TIF_MEMDIE))
|
||||
@@ -823,7 +853,7 @@ void __noreturn do_exit(long code)
|
||||
tsk->exit_code = code;
|
||||
taskstats_exit(tsk, group_dead);
|
||||
|
||||
exit_mm(tsk);
|
||||
exit_mm();
|
||||
|
||||
if (group_dead)
|
||||
acct_process();
|
||||
|
||||
Reference in New Issue
Block a user