forked from Minki/linux
48a066e72d
* RCU-delayed freeing of vfsmounts * vfsmount_lock replaced with a seqlock (mount_lock) * sequence number from mount_lock is stored in nameidata->m_seq and used when we exit RCU mode * new vfsmount flag - MNT_SYNC_UMOUNT. Set by umount_tree() when its caller knows that vfsmount will have no surviving references. * synchronize_rcu() done between unlocking namespace_sem in namespace_unlock() and doing pending mntput(). * new helper: legitimize_mnt(mnt, seq). Checks the mount_lock sequence number against seq, then grabs reference to mnt. Then it rechecks mount_lock again to close the race and either returns success or drops the reference it has acquired. The subtle point is that in case of MNT_SYNC_UMOUNT we can simply decrement the refcount and sod off - aforementioned synchronize_rcu() makes sure that final mntput() won't come until we leave RCU mode. We need that, since we don't want to end up with some lazy pathwalk racing with umount() and stealing the final mntput() from it - caller of umount() may expect it to return only once the fs is shut down and we don't want to break that. In other cases (i.e. with MNT_SYNC_UMOUNT absent) we have to do full-blown mntput() in case of mount_lock sequence number mismatch happening just as we'd grabbed the reference, but in those cases we won't be stealing the final mntput() from anything that would care. * mntput_no_expire() doesn't lock anything on the fast path now. Incidentally, SMP and UP cases are handled the same way - no ifdefs there. * normal pathname resolution does *not* do any writes to mount_lock. It does, of course, bump the refcounts of vfsmount and dentry in the very end, but that's it. Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
112 lines
2.8 KiB
C
112 lines
2.8 KiB
C
#include <linux/mount.h>
|
|
#include <linux/seq_file.h>
|
|
#include <linux/poll.h>
|
|
|
|
struct mnt_namespace {
|
|
atomic_t count;
|
|
unsigned int proc_inum;
|
|
struct mount * root;
|
|
struct list_head list;
|
|
struct user_namespace *user_ns;
|
|
u64 seq; /* Sequence number to prevent loops */
|
|
wait_queue_head_t poll;
|
|
int event;
|
|
};
|
|
|
|
struct mnt_pcp {
|
|
int mnt_count;
|
|
int mnt_writers;
|
|
};
|
|
|
|
struct mountpoint {
|
|
struct list_head m_hash;
|
|
struct dentry *m_dentry;
|
|
int m_count;
|
|
};
|
|
|
|
struct mount {
|
|
struct list_head mnt_hash;
|
|
struct mount *mnt_parent;
|
|
struct dentry *mnt_mountpoint;
|
|
struct vfsmount mnt;
|
|
struct rcu_head mnt_rcu;
|
|
#ifdef CONFIG_SMP
|
|
struct mnt_pcp __percpu *mnt_pcp;
|
|
#else
|
|
int mnt_count;
|
|
int mnt_writers;
|
|
#endif
|
|
struct list_head mnt_mounts; /* list of children, anchored here */
|
|
struct list_head mnt_child; /* and going through their mnt_child */
|
|
struct list_head mnt_instance; /* mount instance on sb->s_mounts */
|
|
const char *mnt_devname; /* Name of device e.g. /dev/dsk/hda1 */
|
|
struct list_head mnt_list;
|
|
struct list_head mnt_expire; /* link in fs-specific expiry list */
|
|
struct list_head mnt_share; /* circular list of shared mounts */
|
|
struct list_head mnt_slave_list;/* list of slave mounts */
|
|
struct list_head mnt_slave; /* slave list entry */
|
|
struct mount *mnt_master; /* slave is on master->mnt_slave_list */
|
|
struct mnt_namespace *mnt_ns; /* containing namespace */
|
|
struct mountpoint *mnt_mp; /* where is it mounted */
|
|
#ifdef CONFIG_FSNOTIFY
|
|
struct hlist_head mnt_fsnotify_marks;
|
|
__u32 mnt_fsnotify_mask;
|
|
#endif
|
|
int mnt_id; /* mount identifier */
|
|
int mnt_group_id; /* peer group identifier */
|
|
int mnt_expiry_mark; /* true if marked for expiry */
|
|
int mnt_pinned;
|
|
struct path mnt_ex_mountpoint;
|
|
};
|
|
|
|
#define MNT_NS_INTERNAL ERR_PTR(-EINVAL) /* distinct from any mnt_namespace */
|
|
|
|
static inline struct mount *real_mount(struct vfsmount *mnt)
|
|
{
|
|
return container_of(mnt, struct mount, mnt);
|
|
}
|
|
|
|
static inline int mnt_has_parent(struct mount *mnt)
|
|
{
|
|
return mnt != mnt->mnt_parent;
|
|
}
|
|
|
|
static inline int is_mounted(struct vfsmount *mnt)
|
|
{
|
|
/* neither detached nor internal? */
|
|
return !IS_ERR_OR_NULL(real_mount(mnt));
|
|
}
|
|
|
|
extern struct mount *__lookup_mnt(struct vfsmount *, struct dentry *);
|
|
extern struct mount *__lookup_mnt_last(struct vfsmount *, struct dentry *);
|
|
|
|
extern bool legitimize_mnt(struct vfsmount *, unsigned);
|
|
|
|
static inline void get_mnt_ns(struct mnt_namespace *ns)
|
|
{
|
|
atomic_inc(&ns->count);
|
|
}
|
|
|
|
extern seqlock_t mount_lock;
|
|
|
|
static inline void lock_mount_hash(void)
|
|
{
|
|
write_seqlock(&mount_lock);
|
|
}
|
|
|
|
static inline void unlock_mount_hash(void)
|
|
{
|
|
write_sequnlock(&mount_lock);
|
|
}
|
|
|
|
struct proc_mounts {
|
|
struct seq_file m;
|
|
struct mnt_namespace *ns;
|
|
struct path root;
|
|
int (*show)(struct seq_file *, struct vfsmount *);
|
|
};
|
|
|
|
#define proc_mounts(p) (container_of((p), struct proc_mounts, m))
|
|
|
|
extern const struct seq_operations mounts_op;
|