mirror of
https://github.com/torvalds/linux.git
synced 2024-11-18 10:01:43 +00:00
2647537197
A sequence counter write side critical section must be protected by some form of locking to serialize writers. A plain seqcount_t does not contain the information of which lock must be held when entering a write side critical section. Use the new seqcount_spinlock_t data type, which allows to associate a spinlock with the sequence counter. This enables lockdep to verify that the spinlock used for writer serialization is held when the write side critical section is entered. If lockdep is disabled this lock association is compiled out and has neither storage size nor runtime overhead. Signed-off-by: Ahmed S. Darwish <a.darwish@linutronix.de> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Link: https://lkml.kernel.org/r/20200720155530.1173732-19-a.darwish@linutronix.de
169 lines
3.4 KiB
C
169 lines
3.4 KiB
C
// SPDX-License-Identifier: GPL-2.0-only
|
|
#include <linux/export.h>
|
|
#include <linux/sched/signal.h>
|
|
#include <linux/sched/task.h>
|
|
#include <linux/fs.h>
|
|
#include <linux/path.h>
|
|
#include <linux/slab.h>
|
|
#include <linux/fs_struct.h>
|
|
#include "internal.h"
|
|
|
|
/*
|
|
* Replace the fs->{rootmnt,root} with {mnt,dentry}. Put the old values.
|
|
* It can block.
|
|
*/
|
|
void set_fs_root(struct fs_struct *fs, const struct path *path)
|
|
{
|
|
struct path old_root;
|
|
|
|
path_get(path);
|
|
spin_lock(&fs->lock);
|
|
write_seqcount_begin(&fs->seq);
|
|
old_root = fs->root;
|
|
fs->root = *path;
|
|
write_seqcount_end(&fs->seq);
|
|
spin_unlock(&fs->lock);
|
|
if (old_root.dentry)
|
|
path_put(&old_root);
|
|
}
|
|
|
|
/*
|
|
* Replace the fs->{pwdmnt,pwd} with {mnt,dentry}. Put the old values.
|
|
* It can block.
|
|
*/
|
|
void set_fs_pwd(struct fs_struct *fs, const struct path *path)
|
|
{
|
|
struct path old_pwd;
|
|
|
|
path_get(path);
|
|
spin_lock(&fs->lock);
|
|
write_seqcount_begin(&fs->seq);
|
|
old_pwd = fs->pwd;
|
|
fs->pwd = *path;
|
|
write_seqcount_end(&fs->seq);
|
|
spin_unlock(&fs->lock);
|
|
|
|
if (old_pwd.dentry)
|
|
path_put(&old_pwd);
|
|
}
|
|
|
|
static inline int replace_path(struct path *p, const struct path *old, const struct path *new)
|
|
{
|
|
if (likely(p->dentry != old->dentry || p->mnt != old->mnt))
|
|
return 0;
|
|
*p = *new;
|
|
return 1;
|
|
}
|
|
|
|
void chroot_fs_refs(const struct path *old_root, const struct path *new_root)
|
|
{
|
|
struct task_struct *g, *p;
|
|
struct fs_struct *fs;
|
|
int count = 0;
|
|
|
|
read_lock(&tasklist_lock);
|
|
do_each_thread(g, p) {
|
|
task_lock(p);
|
|
fs = p->fs;
|
|
if (fs) {
|
|
int hits = 0;
|
|
spin_lock(&fs->lock);
|
|
write_seqcount_begin(&fs->seq);
|
|
hits += replace_path(&fs->root, old_root, new_root);
|
|
hits += replace_path(&fs->pwd, old_root, new_root);
|
|
write_seqcount_end(&fs->seq);
|
|
while (hits--) {
|
|
count++;
|
|
path_get(new_root);
|
|
}
|
|
spin_unlock(&fs->lock);
|
|
}
|
|
task_unlock(p);
|
|
} while_each_thread(g, p);
|
|
read_unlock(&tasklist_lock);
|
|
while (count--)
|
|
path_put(old_root);
|
|
}
|
|
|
|
void free_fs_struct(struct fs_struct *fs)
|
|
{
|
|
path_put(&fs->root);
|
|
path_put(&fs->pwd);
|
|
kmem_cache_free(fs_cachep, fs);
|
|
}
|
|
|
|
void exit_fs(struct task_struct *tsk)
|
|
{
|
|
struct fs_struct *fs = tsk->fs;
|
|
|
|
if (fs) {
|
|
int kill;
|
|
task_lock(tsk);
|
|
spin_lock(&fs->lock);
|
|
tsk->fs = NULL;
|
|
kill = !--fs->users;
|
|
spin_unlock(&fs->lock);
|
|
task_unlock(tsk);
|
|
if (kill)
|
|
free_fs_struct(fs);
|
|
}
|
|
}
|
|
|
|
struct fs_struct *copy_fs_struct(struct fs_struct *old)
|
|
{
|
|
struct fs_struct *fs = kmem_cache_alloc(fs_cachep, GFP_KERNEL);
|
|
/* We don't need to lock fs - think why ;-) */
|
|
if (fs) {
|
|
fs->users = 1;
|
|
fs->in_exec = 0;
|
|
spin_lock_init(&fs->lock);
|
|
seqcount_spinlock_init(&fs->seq, &fs->lock);
|
|
fs->umask = old->umask;
|
|
|
|
spin_lock(&old->lock);
|
|
fs->root = old->root;
|
|
path_get(&fs->root);
|
|
fs->pwd = old->pwd;
|
|
path_get(&fs->pwd);
|
|
spin_unlock(&old->lock);
|
|
}
|
|
return fs;
|
|
}
|
|
|
|
int unshare_fs_struct(void)
|
|
{
|
|
struct fs_struct *fs = current->fs;
|
|
struct fs_struct *new_fs = copy_fs_struct(fs);
|
|
int kill;
|
|
|
|
if (!new_fs)
|
|
return -ENOMEM;
|
|
|
|
task_lock(current);
|
|
spin_lock(&fs->lock);
|
|
kill = !--fs->users;
|
|
current->fs = new_fs;
|
|
spin_unlock(&fs->lock);
|
|
task_unlock(current);
|
|
|
|
if (kill)
|
|
free_fs_struct(fs);
|
|
|
|
return 0;
|
|
}
|
|
EXPORT_SYMBOL_GPL(unshare_fs_struct);
|
|
|
|
int current_umask(void)
|
|
{
|
|
return current->fs->umask;
|
|
}
|
|
EXPORT_SYMBOL(current_umask);
|
|
|
|
/* to be mentioned only in INIT_TASK */
|
|
struct fs_struct init_fs = {
|
|
.users = 1,
|
|
.lock = __SPIN_LOCK_UNLOCKED(init_fs.lock),
|
|
.seq = SEQCNT_SPINLOCK_ZERO(init_fs.seq, &init_fs.lock),
|
|
.umask = 0022,
|
|
};
|