Merge branch 'for-linus' of git://git.infradead.org/users/eparis/notify

* 'for-linus' of git://git.infradead.org/users/eparis/notify:
  fsnotify: allow groups to set freeing_mark to null
  inotify/dnotify: should_send_event shouldn't match on FS_EVENT_ON_CHILD
  dnotify: do not bother to lock entry->lock when reading mask
  dnotify: do not use ?true:false when assigning to a bool
  fsnotify: move events should indicate the event was on a child
  inotify: reimplement inotify using fsnotify
  fsnotify: handle filesystem unmounts with fsnotify marks
  fsnotify: fsnotify marks on inodes pin them in core
  fsnotify: allow groups to add private data to events
  fsnotify: add correlations between events
  fsnotify: include pathnames with entries when possible
  fsnotify: generic notification queue and waitq
  dnotify: reimplement dnotify using fsnotify
  fsnotify: parent event notification
  fsnotify: add marks to inodes so groups can interpret how to handle those inodes
  fsnotify: unified filesystem notification backend
This commit is contained in:
Linus Torvalds 2009-06-11 14:22:55 -07:00
commit 3bb66d7f8c
23 changed files with 2831 additions and 680 deletions

View File

@ -1802,10 +1802,10 @@ F: drivers/char/epca*
F: drivers/char/digi*
DIRECTORY NOTIFICATION (DNOTIFY)
P: Stephen Rothwell
M: sfr@canb.auug.org.au
P: Eric Paris
M: eparis@parisplace.org
L: linux-kernel@vger.kernel.org
S: Supported
S: Maintained
F: Documentation/filesystems/dnotify.txt
F: fs/notify/dnotify/
F: include/linux/dnotify.h
@ -2858,6 +2858,8 @@ P: John McCutchan
M: john@johnmccutchan.com
P: Robert Love
M: rlove@rlove.org
P: Eric Paris
M: eparis@parisplace.org
L: linux-kernel@vger.kernel.org
S: Maintained
F: Documentation/filesystems/inotify.txt

View File

@ -22,6 +22,7 @@
#include <linux/cdev.h>
#include <linux/bootmem.h>
#include <linux/inotify.h>
#include <linux/fsnotify.h>
#include <linux/mount.h>
#include <linux/async.h>
@ -189,6 +190,10 @@ struct inode *inode_init_always(struct super_block *sb, struct inode *inode)
inode->i_private = NULL;
inode->i_mapping = mapping;
#ifdef CONFIG_FSNOTIFY
inode->i_fsnotify_mask = 0;
#endif
return inode;
out_free_security:
@ -221,6 +226,7 @@ void destroy_inode(struct inode *inode)
BUG_ON(inode_has_buffers(inode));
ima_inode_free(inode);
security_inode_free(inode);
fsnotify_inode_delete(inode);
if (inode->i_sb->s_op->destroy_inode)
inode->i_sb->s_op->destroy_inode(inode);
else
@ -252,6 +258,9 @@ void inode_init_once(struct inode *inode)
INIT_LIST_HEAD(&inode->inotify_watches);
mutex_init(&inode->inotify_mutex);
#endif
#ifdef CONFIG_FSNOTIFY
INIT_HLIST_HEAD(&inode->i_fsnotify_mark_entries);
#endif
}
EXPORT_SYMBOL(inode_init_once);
@ -398,6 +407,7 @@ int invalidate_inodes(struct super_block *sb)
mutex_lock(&iprune_mutex);
spin_lock(&inode_lock);
inotify_unmount_inodes(&sb->s_inodes);
fsnotify_unmount_inodes(&sb->s_inodes);
busy = invalidate_list(&sb->s_inodes, &throw_away);
spin_unlock(&inode_lock);

View File

@ -1,2 +1,15 @@
config FSNOTIFY
bool "Filesystem notification backend"
default y
---help---
fsnotify is a backend for filesystem notification. fsnotify does
not provide any userspace interface but does provide the basis
needed for other notification schemes such as dnotify, inotify,
and fanotify.
Say Y here to enable fsnotify suport.
If unsure, say Y.
source "fs/notify/dnotify/Kconfig"
source "fs/notify/inotify/Kconfig"

View File

@ -1,2 +1,4 @@
obj-$(CONFIG_FSNOTIFY) += fsnotify.o notification.o group.o inode_mark.o
obj-y += dnotify/
obj-y += inotify/

View File

@ -1,5 +1,6 @@
config DNOTIFY
bool "Dnotify support"
depends on FSNOTIFY
default y
help
Dnotify is a directory-based per-fd file change notification system

View File

@ -3,6 +3,9 @@
*
* Copyright (C) 2000,2001,2002 Stephen Rothwell
*
* Copyright (C) 2009 Eric Paris <Red Hat Inc>
* dnotify was largly rewritten to use the new fsnotify infrastructure
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2, or (at your option) any
@ -21,170 +24,417 @@
#include <linux/spinlock.h>
#include <linux/slab.h>
#include <linux/fdtable.h>
#include <linux/fsnotify_backend.h>
int dir_notify_enable __read_mostly = 1;
static struct kmem_cache *dn_cache __read_mostly;
static struct kmem_cache *dnotify_struct_cache __read_mostly;
static struct kmem_cache *dnotify_mark_entry_cache __read_mostly;
static struct fsnotify_group *dnotify_group __read_mostly;
static DEFINE_MUTEX(dnotify_mark_mutex);
static void redo_inode_mask(struct inode *inode)
{
unsigned long new_mask;
/*
* dnotify will attach one of these to each inode (i_fsnotify_mark_entries) which
* is being watched by dnotify. If multiple userspace applications are watching
* the same directory with dnotify their information is chained in dn
*/
struct dnotify_mark_entry {
struct fsnotify_mark_entry fsn_entry;
struct dnotify_struct *dn;
};
/*
* When a process starts or stops watching an inode the set of events which
* dnotify cares about for that inode may change. This function runs the
* list of everything receiving dnotify events about this directory and calculates
* the set of all those events. After it updates what dnotify is interested in
* it calls the fsnotify function so it can update the set of all events relevant
* to this inode.
*/
static void dnotify_recalc_inode_mask(struct fsnotify_mark_entry *entry)
{
__u32 new_mask, old_mask;
struct dnotify_struct *dn;
struct dnotify_mark_entry *dnentry = container_of(entry,
struct dnotify_mark_entry,
fsn_entry);
assert_spin_locked(&entry->lock);
old_mask = entry->mask;
new_mask = 0;
for (dn = inode->i_dnotify; dn != NULL; dn = dn->dn_next)
new_mask |= dn->dn_mask & ~DN_MULTISHOT;
inode->i_dnotify_mask = new_mask;
}
for (dn = dnentry->dn; dn != NULL; dn = dn->dn_next)
new_mask |= (dn->dn_mask & ~FS_DN_MULTISHOT);
entry->mask = new_mask;
void dnotify_flush(struct file *filp, fl_owner_t id)
{
struct dnotify_struct *dn;
struct dnotify_struct **prev;
struct inode *inode;
inode = filp->f_path.dentry->d_inode;
if (!S_ISDIR(inode->i_mode))
if (old_mask == new_mask)
return;
spin_lock(&inode->i_lock);
prev = &inode->i_dnotify;
while ((dn = *prev) != NULL) {
if ((dn->dn_owner == id) && (dn->dn_filp == filp)) {
*prev = dn->dn_next;
redo_inode_mask(inode);
kmem_cache_free(dn_cache, dn);
break;
}
prev = &dn->dn_next;
}
spin_unlock(&inode->i_lock);
if (entry->inode)
fsnotify_recalc_inode_mask(entry->inode);
}
int fcntl_dirnotify(int fd, struct file *filp, unsigned long arg)
/*
* Mains fsnotify call where events are delivered to dnotify.
* Find the dnotify mark on the relevant inode, run the list of dnotify structs
* on that mark and determine which of them has expressed interest in receiving
* events of this type. When found send the correct process and signal and
* destroy the dnotify struct if it was not registered to receive multiple
* events.
*/
static int dnotify_handle_event(struct fsnotify_group *group,
struct fsnotify_event *event)
{
struct fsnotify_mark_entry *entry = NULL;
struct dnotify_mark_entry *dnentry;
struct inode *to_tell;
struct dnotify_struct *dn;
struct dnotify_struct *odn;
struct dnotify_struct **prev;
struct inode *inode;
fl_owner_t id = current->files;
struct file *f;
int error = 0;
struct fown_struct *fown;
if ((arg & ~DN_MULTISHOT) == 0) {
dnotify_flush(filp, id);
to_tell = event->to_tell;
spin_lock(&to_tell->i_lock);
entry = fsnotify_find_mark_entry(group, to_tell);
spin_unlock(&to_tell->i_lock);
/* unlikely since we alreay passed dnotify_should_send_event() */
if (unlikely(!entry))
return 0;
}
if (!dir_notify_enable)
return -EINVAL;
inode = filp->f_path.dentry->d_inode;
if (!S_ISDIR(inode->i_mode))
return -ENOTDIR;
dn = kmem_cache_alloc(dn_cache, GFP_KERNEL);
if (dn == NULL)
return -ENOMEM;
spin_lock(&inode->i_lock);
prev = &inode->i_dnotify;
while ((odn = *prev) != NULL) {
if ((odn->dn_owner == id) && (odn->dn_filp == filp)) {
odn->dn_fd = fd;
odn->dn_mask |= arg;
inode->i_dnotify_mask |= arg & ~DN_MULTISHOT;
goto out_free;
}
prev = &odn->dn_next;
}
dnentry = container_of(entry, struct dnotify_mark_entry, fsn_entry);
rcu_read_lock();
f = fcheck(fd);
rcu_read_unlock();
/* we'd lost the race with close(), sod off silently */
/* note that inode->i_lock prevents reordering problems
* between accesses to descriptor table and ->i_dnotify */
if (f != filp)
goto out_free;
error = __f_setown(filp, task_pid(current), PIDTYPE_PID, 0);
if (error)
goto out_free;
dn->dn_mask = arg;
dn->dn_fd = fd;
dn->dn_filp = filp;
dn->dn_owner = id;
inode->i_dnotify_mask |= arg & ~DN_MULTISHOT;
dn->dn_next = inode->i_dnotify;
inode->i_dnotify = dn;
spin_unlock(&inode->i_lock);
return 0;
out_free:
spin_unlock(&inode->i_lock);
kmem_cache_free(dn_cache, dn);
return error;
}
void __inode_dir_notify(struct inode *inode, unsigned long event)
{
struct dnotify_struct * dn;
struct dnotify_struct **prev;
struct fown_struct * fown;
int changed = 0;
spin_lock(&inode->i_lock);
prev = &inode->i_dnotify;
spin_lock(&entry->lock);
prev = &dnentry->dn;
while ((dn = *prev) != NULL) {
if ((dn->dn_mask & event) == 0) {
if ((dn->dn_mask & event->mask) == 0) {
prev = &dn->dn_next;
continue;
}
fown = &dn->dn_filp->f_owner;
send_sigio(fown, dn->dn_fd, POLL_MSG);
if (dn->dn_mask & DN_MULTISHOT)
if (dn->dn_mask & FS_DN_MULTISHOT)
prev = &dn->dn_next;
else {
*prev = dn->dn_next;
changed = 1;
kmem_cache_free(dn_cache, dn);
kmem_cache_free(dnotify_struct_cache, dn);
dnotify_recalc_inode_mask(entry);
}
}
if (changed)
redo_inode_mask(inode);
spin_unlock(&inode->i_lock);
}
EXPORT_SYMBOL(__inode_dir_notify);
spin_unlock(&entry->lock);
fsnotify_put_mark(entry);
return 0;
}
/*
* This is hopelessly wrong, but unfixable without API changes. At
* least it doesn't oops the kernel...
*
* To safely access ->d_parent we need to keep d_move away from it. Use the
* dentry's d_lock for this.
* Given an inode and mask determine if dnotify would be interested in sending
* userspace notification for that pair.
*/
void dnotify_parent(struct dentry *dentry, unsigned long event)
static bool dnotify_should_send_event(struct fsnotify_group *group,
struct inode *inode, __u32 mask)
{
struct dentry *parent;
struct fsnotify_mark_entry *entry;
bool send;
/* !dir_notify_enable should never get here, don't waste time checking
if (!dir_notify_enable)
return 0; */
/* not a dir, dnotify doesn't care */
if (!S_ISDIR(inode->i_mode))
return false;
spin_lock(&inode->i_lock);
entry = fsnotify_find_mark_entry(group, inode);
spin_unlock(&inode->i_lock);
/* no mark means no dnotify watch */
if (!entry)
return false;
mask = (mask & ~FS_EVENT_ON_CHILD);
send = (mask & entry->mask);
fsnotify_put_mark(entry); /* matches fsnotify_find_mark_entry */
return send;
}
static void dnotify_free_mark(struct fsnotify_mark_entry *entry)
{
struct dnotify_mark_entry *dnentry = container_of(entry,
struct dnotify_mark_entry,
fsn_entry);
BUG_ON(dnentry->dn);
kmem_cache_free(dnotify_mark_entry_cache, dnentry);
}
static struct fsnotify_ops dnotify_fsnotify_ops = {
.handle_event = dnotify_handle_event,
.should_send_event = dnotify_should_send_event,
.free_group_priv = NULL,
.freeing_mark = NULL,
.free_event_priv = NULL,
};
/*
* Called every time a file is closed. Looks first for a dnotify mark on the
* inode. If one is found run all of the ->dn entries attached to that
* mark for one relevant to this process closing the file and remove that
* dnotify_struct. If that was the last dnotify_struct also remove the
* fsnotify_mark_entry.
*/
void dnotify_flush(struct file *filp, fl_owner_t id)
{
struct fsnotify_mark_entry *entry;
struct dnotify_mark_entry *dnentry;
struct dnotify_struct *dn;
struct dnotify_struct **prev;
struct inode *inode;
inode = filp->f_path.dentry->d_inode;
if (!S_ISDIR(inode->i_mode))
return;
spin_lock(&dentry->d_lock);
parent = dentry->d_parent;
if (parent->d_inode->i_dnotify_mask & event) {
dget(parent);
spin_unlock(&dentry->d_lock);
__inode_dir_notify(parent->d_inode, event);
dput(parent);
} else {
spin_unlock(&dentry->d_lock);
spin_lock(&inode->i_lock);
entry = fsnotify_find_mark_entry(dnotify_group, inode);
spin_unlock(&inode->i_lock);
if (!entry)
return;
dnentry = container_of(entry, struct dnotify_mark_entry, fsn_entry);
mutex_lock(&dnotify_mark_mutex);
spin_lock(&entry->lock);
prev = &dnentry->dn;
while ((dn = *prev) != NULL) {
if ((dn->dn_owner == id) && (dn->dn_filp == filp)) {
*prev = dn->dn_next;
kmem_cache_free(dnotify_struct_cache, dn);
dnotify_recalc_inode_mask(entry);
break;
}
prev = &dn->dn_next;
}
spin_unlock(&entry->lock);
/* nothing else could have found us thanks to the dnotify_mark_mutex */
if (dnentry->dn == NULL)
fsnotify_destroy_mark_by_entry(entry);
fsnotify_recalc_group_mask(dnotify_group);
mutex_unlock(&dnotify_mark_mutex);
fsnotify_put_mark(entry);
}
/* this conversion is done only at watch creation */
static __u32 convert_arg(unsigned long arg)
{
__u32 new_mask = FS_EVENT_ON_CHILD;
if (arg & DN_MULTISHOT)
new_mask |= FS_DN_MULTISHOT;
if (arg & DN_DELETE)
new_mask |= (FS_DELETE | FS_MOVED_FROM);
if (arg & DN_MODIFY)
new_mask |= FS_MODIFY;
if (arg & DN_ACCESS)
new_mask |= FS_ACCESS;
if (arg & DN_ATTRIB)
new_mask |= FS_ATTRIB;
if (arg & DN_RENAME)
new_mask |= FS_DN_RENAME;
if (arg & DN_CREATE)
new_mask |= (FS_CREATE | FS_MOVED_TO);
return new_mask;
}
/*
* If multiple processes watch the same inode with dnotify there is only one
* dnotify mark in inode->i_fsnotify_mark_entries but we chain a dnotify_struct
* onto that mark. This function either attaches the new dnotify_struct onto
* that list, or it |= the mask onto an existing dnofiy_struct.
*/
static int attach_dn(struct dnotify_struct *dn, struct dnotify_mark_entry *dnentry,
fl_owner_t id, int fd, struct file *filp, __u32 mask)
{
struct dnotify_struct *odn;
odn = dnentry->dn;
while (odn != NULL) {
/* adding more events to existing dnofiy_struct? */
if ((odn->dn_owner == id) && (odn->dn_filp == filp)) {
odn->dn_fd = fd;
odn->dn_mask |= mask;
return -EEXIST;
}
odn = odn->dn_next;
}
dn->dn_mask = mask;
dn->dn_fd = fd;
dn->dn_filp = filp;
dn->dn_owner = id;
dn->dn_next = dnentry->dn;
dnentry->dn = dn;
return 0;
}
/*
* When a process calls fcntl to attach a dnotify watch to a directory it ends
* up here. Allocate both a mark for fsnotify to add and a dnotify_struct to be
* attached to the fsnotify_mark.
*/
int fcntl_dirnotify(int fd, struct file *filp, unsigned long arg)
{
struct dnotify_mark_entry *new_dnentry, *dnentry;
struct fsnotify_mark_entry *new_entry, *entry;
struct dnotify_struct *dn;
struct inode *inode;
fl_owner_t id = current->files;
struct file *f;
int destroy = 0, error = 0;
__u32 mask;
/* we use these to tell if we need to kfree */
new_entry = NULL;
dn = NULL;
if (!dir_notify_enable) {
error = -EINVAL;
goto out_err;
}
/* a 0 mask means we are explicitly removing the watch */
if ((arg & ~DN_MULTISHOT) == 0) {
dnotify_flush(filp, id);
error = 0;
goto out_err;
}
/* dnotify only works on directories */
inode = filp->f_path.dentry->d_inode;
if (!S_ISDIR(inode->i_mode)) {
error = -ENOTDIR;
goto out_err;
}
/* expect most fcntl to add new rather than augment old */
dn = kmem_cache_alloc(dnotify_struct_cache, GFP_KERNEL);
if (!dn) {
error = -ENOMEM;
goto out_err;
}
/* new fsnotify mark, we expect most fcntl calls to add a new mark */
new_dnentry = kmem_cache_alloc(dnotify_mark_entry_cache, GFP_KERNEL);
if (!new_dnentry) {
error = -ENOMEM;
goto out_err;
}
/* convert the userspace DN_* "arg" to the internal FS_* defines in fsnotify */
mask = convert_arg(arg);
/* set up the new_entry and new_dnentry */
new_entry = &new_dnentry->fsn_entry;
fsnotify_init_mark(new_entry, dnotify_free_mark);
new_entry->mask = mask;
new_dnentry->dn = NULL;
/* this is needed to prevent the fcntl/close race described below */
mutex_lock(&dnotify_mark_mutex);
/* add the new_entry or find an old one. */
spin_lock(&inode->i_lock);
entry = fsnotify_find_mark_entry(dnotify_group, inode);
spin_unlock(&inode->i_lock);
if (entry) {
dnentry = container_of(entry, struct dnotify_mark_entry, fsn_entry);
spin_lock(&entry->lock);
} else {
fsnotify_add_mark(new_entry, dnotify_group, inode);
spin_lock(&new_entry->lock);
entry = new_entry;
dnentry = new_dnentry;
/* we used new_entry, so don't free it */
new_entry = NULL;
}
rcu_read_lock();
f = fcheck(fd);
rcu_read_unlock();
/* if (f != filp) means that we lost a race and another task/thread
* actually closed the fd we are still playing with before we grabbed
* the dnotify_mark_mutex and entry->lock. Since closing the fd is the
* only time we clean up the mark entries we need to get our mark off
* the list. */
if (f != filp) {
/* if we added ourselves, shoot ourselves, it's possible that
* the flush actually did shoot this entry. That's fine too
* since multiple calls to destroy_mark is perfectly safe, if
* we found a dnentry already attached to the inode, just sod
* off silently as the flush at close time dealt with it.
*/
if (dnentry == new_dnentry)
destroy = 1;
goto out;
}
error = __f_setown(filp, task_pid(current), PIDTYPE_PID, 0);
if (error) {
/* if we added, we must shoot */
if (dnentry == new_dnentry)
destroy = 1;
goto out;
}
error = attach_dn(dn, dnentry, id, fd, filp, mask);
/* !error means that we attached the dn to the dnentry, so don't free it */
if (!error)
dn = NULL;
/* -EEXIST means that we didn't add this new dn and used an old one.
* that isn't an error (and the unused dn should be freed) */
else if (error == -EEXIST)
error = 0;
dnotify_recalc_inode_mask(entry);
out:
spin_unlock(&entry->lock);
if (destroy)
fsnotify_destroy_mark_by_entry(entry);
fsnotify_recalc_group_mask(dnotify_group);
mutex_unlock(&dnotify_mark_mutex);
fsnotify_put_mark(entry);
out_err:
if (new_entry)
fsnotify_put_mark(new_entry);
if (dn)
kmem_cache_free(dnotify_struct_cache, dn);
return error;
}
EXPORT_SYMBOL_GPL(dnotify_parent);
static int __init dnotify_init(void)
{
dn_cache = kmem_cache_create("dnotify_cache",
sizeof(struct dnotify_struct), 0, SLAB_PANIC, NULL);
dnotify_struct_cache = KMEM_CACHE(dnotify_struct, SLAB_PANIC);
dnotify_mark_entry_cache = KMEM_CACHE(dnotify_mark_entry, SLAB_PANIC);
dnotify_group = fsnotify_obtain_group(DNOTIFY_GROUP_NUM,
0, &dnotify_fsnotify_ops);
if (IS_ERR(dnotify_group))
panic("unable to allocate fsnotify group for dnotify\n");
return 0;
}

186
fs/notify/fsnotify.c Normal file
View File

@ -0,0 +1,186 @@
/*
* Copyright (C) 2008 Red Hat, Inc., Eric Paris <eparis@redhat.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2, or (at your option)
* any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; see the file COPYING. If not, write to
* the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/dcache.h>
#include <linux/fs.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/srcu.h>
#include <linux/fsnotify_backend.h>
#include "fsnotify.h"
/*
* Clear all of the marks on an inode when it is being evicted from core
*/
void __fsnotify_inode_delete(struct inode *inode)
{
fsnotify_clear_marks_by_inode(inode);
}
EXPORT_SYMBOL_GPL(__fsnotify_inode_delete);
/*
* Given an inode, first check if we care what happens to our children. Inotify
* and dnotify both tell their parents about events. If we care about any event
* on a child we run all of our children and set a dentry flag saying that the
* parent cares. Thus when an event happens on a child it can quickly tell if
* if there is a need to find a parent and send the event to the parent.
*/
void __fsnotify_update_child_dentry_flags(struct inode *inode)
{
struct dentry *alias;
int watched;
if (!S_ISDIR(inode->i_mode))
return;
/* determine if the children should tell inode about their events */
watched = fsnotify_inode_watches_children(inode);
spin_lock(&dcache_lock);
/* run all of the dentries associated with this inode. Since this is a
* directory, there damn well better only be one item on this list */
list_for_each_entry(alias, &inode->i_dentry, d_alias) {
struct dentry *child;
/* run all of the children of the original inode and fix their
* d_flags to indicate parental interest (their parent is the
* original inode) */
list_for_each_entry(child, &alias->d_subdirs, d_u.d_child) {
if (!child->d_inode)
continue;
spin_lock(&child->d_lock);
if (watched)
child->d_flags |= DCACHE_FSNOTIFY_PARENT_WATCHED;
else
child->d_flags &= ~DCACHE_FSNOTIFY_PARENT_WATCHED;
spin_unlock(&child->d_lock);
}
}
spin_unlock(&dcache_lock);
}
/* Notify this dentry's parent about a child's events. */
void __fsnotify_parent(struct dentry *dentry, __u32 mask)
{
struct dentry *parent;
struct inode *p_inode;
bool send = false;
bool should_update_children = false;
if (!(dentry->d_flags & DCACHE_FSNOTIFY_PARENT_WATCHED))
return;
spin_lock(&dentry->d_lock);
parent = dentry->d_parent;
p_inode = parent->d_inode;
if (fsnotify_inode_watches_children(p_inode)) {
if (p_inode->i_fsnotify_mask & mask) {
dget(parent);
send = true;
}
} else {
/*
* The parent doesn't care about events on it's children but
* at least one child thought it did. We need to run all the
* children and update their d_flags to let them know p_inode
* doesn't care about them any more.
*/
dget(parent);
should_update_children = true;
}
spin_unlock(&dentry->d_lock);
if (send) {
/* we are notifying a parent so come up with the new mask which
* specifies these are events which came from a child. */
mask |= FS_EVENT_ON_CHILD;
fsnotify(p_inode, mask, dentry->d_inode, FSNOTIFY_EVENT_INODE,
dentry->d_name.name, 0);
dput(parent);
}
if (unlikely(should_update_children)) {
__fsnotify_update_child_dentry_flags(p_inode);
dput(parent);
}
}
EXPORT_SYMBOL_GPL(__fsnotify_parent);
/*
* This is the main call to fsnotify. The VFS calls into hook specific functions
* in linux/fsnotify.h. Those functions then in turn call here. Here will call
* out to all of the registered fsnotify_group. Those groups can then use the
* notification event in whatever means they feel necessary.
*/
void fsnotify(struct inode *to_tell, __u32 mask, void *data, int data_is, const char *file_name, u32 cookie)
{
struct fsnotify_group *group;
struct fsnotify_event *event = NULL;
int idx;
/* global tests shouldn't care about events on child only the specific event */
__u32 test_mask = (mask & ~FS_EVENT_ON_CHILD);
if (list_empty(&fsnotify_groups))
return;
if (!(test_mask & fsnotify_mask))
return;
if (!(test_mask & to_tell->i_fsnotify_mask))
return;
/*
* SRCU!! the groups list is very very much read only and the path is
* very hot. The VAST majority of events are not going to need to do
* anything other than walk the list so it's crazy to pre-allocate.
*/
idx = srcu_read_lock(&fsnotify_grp_srcu);
list_for_each_entry_rcu(group, &fsnotify_groups, group_list) {
if (test_mask & group->mask) {
if (!group->ops->should_send_event(group, to_tell, mask))
continue;
if (!event) {
event = fsnotify_create_event(to_tell, mask, data, data_is, file_name, cookie);
/* shit, we OOM'd and now we can't tell, maybe
* someday someone else will want to do something
* here */
if (!event)
break;
}
group->ops->handle_event(group, event);
}
}
srcu_read_unlock(&fsnotify_grp_srcu, idx);
/*
* fsnotify_create_event() took a reference so the event can't be cleaned
* up while we are still trying to add it to lists, drop that one.
*/
if (event)
fsnotify_put_event(event);
}
EXPORT_SYMBOL_GPL(fsnotify);
static __init int fsnotify_init(void)
{
return init_srcu_struct(&fsnotify_grp_srcu);
}
subsys_initcall(fsnotify_init);

34
fs/notify/fsnotify.h Normal file
View File

@ -0,0 +1,34 @@
#ifndef __FS_NOTIFY_FSNOTIFY_H_
#define __FS_NOTIFY_FSNOTIFY_H_
#include <linux/list.h>
#include <linux/fsnotify.h>
#include <linux/srcu.h>
#include <linux/types.h>
/* protects reads of fsnotify_groups */
extern struct srcu_struct fsnotify_grp_srcu;
/* all groups which receive fsnotify events */
extern struct list_head fsnotify_groups;
/* all bitwise OR of all event types (FS_*) for all fsnotify_groups */
extern __u32 fsnotify_mask;
/* destroy all events sitting in this groups notification queue */
extern void fsnotify_flush_notify(struct fsnotify_group *group);
/* final kfree of a group */
extern void fsnotify_final_destroy_group(struct fsnotify_group *group);
/* run the list of all marks associated with inode and flag them to be freed */
extern void fsnotify_clear_marks_by_inode(struct inode *inode);
/*
* update the dentry->d_flags of all of inode's children to indicate if inode cares
* about events that happen to its children.
*/
extern void __fsnotify_update_child_dentry_flags(struct inode *inode);
/* allocate and destroy and event holder to attach events to notification/access queues */
extern struct fsnotify_event_holder *fsnotify_alloc_event_holder(void);
extern void fsnotify_destroy_event_holder(struct fsnotify_event_holder *holder);
#endif /* __FS_NOTIFY_FSNOTIFY_H_ */

254
fs/notify/group.c Normal file
View File

@ -0,0 +1,254 @@
/*
* Copyright (C) 2008 Red Hat, Inc., Eric Paris <eparis@redhat.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2, or (at your option)
* any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; see the file COPYING. If not, write to
* the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/list.h>
#include <linux/mutex.h>
#include <linux/slab.h>
#include <linux/srcu.h>
#include <linux/rculist.h>
#include <linux/wait.h>
#include <linux/fsnotify_backend.h>
#include "fsnotify.h"
#include <asm/atomic.h>
/* protects writes to fsnotify_groups and fsnotify_mask */
static DEFINE_MUTEX(fsnotify_grp_mutex);
/* protects reads while running the fsnotify_groups list */
struct srcu_struct fsnotify_grp_srcu;
/* all groups registered to receive filesystem notifications */
LIST_HEAD(fsnotify_groups);
/* bitwise OR of all events (FS_*) interesting to some group on this system */
__u32 fsnotify_mask;
/*
* When a new group registers or changes it's set of interesting events
* this function updates the fsnotify_mask to contain all interesting events
*/
void fsnotify_recalc_global_mask(void)
{
struct fsnotify_group *group;
__u32 mask = 0;
int idx;
idx = srcu_read_lock(&fsnotify_grp_srcu);
list_for_each_entry_rcu(group, &fsnotify_groups, group_list)
mask |= group->mask;
srcu_read_unlock(&fsnotify_grp_srcu, idx);
fsnotify_mask = mask;
}
/*
* Update the group->mask by running all of the marks associated with this
* group and finding the bitwise | of all of the mark->mask. If we change
* the group->mask we need to update the global mask of events interesting
* to the system.
*/
void fsnotify_recalc_group_mask(struct fsnotify_group *group)
{
__u32 mask = 0;
__u32 old_mask = group->mask;
struct fsnotify_mark_entry *entry;
spin_lock(&group->mark_lock);
list_for_each_entry(entry, &group->mark_entries, g_list)
mask |= entry->mask;
spin_unlock(&group->mark_lock);
group->mask = mask;
if (old_mask != mask)
fsnotify_recalc_global_mask();
}
/*
* Take a reference to a group so things found under the fsnotify_grp_mutex
* can't get freed under us
*/
static void fsnotify_get_group(struct fsnotify_group *group)
{
atomic_inc(&group->refcnt);
}
/*
* Final freeing of a group
*/
void fsnotify_final_destroy_group(struct fsnotify_group *group)
{
/* clear the notification queue of all events */
fsnotify_flush_notify(group);
if (group->ops->free_group_priv)
group->ops->free_group_priv(group);
kfree(group);
}
/*
* Trying to get rid of a group. We need to first get rid of any outstanding
* allocations and then free the group. Remember that fsnotify_clear_marks_by_group
* could miss marks that are being freed by inode and those marks could still
* hold a reference to this group (via group->num_marks) If we get into that
* situtation, the fsnotify_final_destroy_group will get called when that final
* mark is freed.
*/
static void fsnotify_destroy_group(struct fsnotify_group *group)
{
/* clear all inode mark entries for this group */
fsnotify_clear_marks_by_group(group);
/* past the point of no return, matches the initial value of 1 */
if (atomic_dec_and_test(&group->num_marks))
fsnotify_final_destroy_group(group);
}
/*
* Remove this group from the global list of groups that will get events
* this can be done even if there are still references and things still using
* this group. This just stops the group from getting new events.
*/
static void __fsnotify_evict_group(struct fsnotify_group *group)
{
BUG_ON(!mutex_is_locked(&fsnotify_grp_mutex));
if (group->on_group_list)
list_del_rcu(&group->group_list);
group->on_group_list = 0;
}
/*
* Called when a group is no longer interested in getting events. This can be
* used if a group is misbehaving or if for some reason a group should no longer
* get any filesystem events.
*/
void fsnotify_evict_group(struct fsnotify_group *group)
{
mutex_lock(&fsnotify_grp_mutex);
__fsnotify_evict_group(group);
mutex_unlock(&fsnotify_grp_mutex);
}
/*
* Drop a reference to a group. Free it if it's through.
*/
void fsnotify_put_group(struct fsnotify_group *group)
{
if (!atomic_dec_and_mutex_lock(&group->refcnt, &fsnotify_grp_mutex))
return;
/*
* OK, now we know that there's no other users *and* we hold mutex,
* so no new references will appear
*/
__fsnotify_evict_group(group);
/*
* now it's off the list, so the only thing we might care about is
* srcu access....
*/
mutex_unlock(&fsnotify_grp_mutex);
synchronize_srcu(&fsnotify_grp_srcu);
/* and now it is really dead. _Nothing_ could be seeing it */
fsnotify_recalc_global_mask();
fsnotify_destroy_group(group);
}
/*
* Simply run the fsnotify_groups list and find a group which matches
* the given parameters. If a group is found we take a reference to that
* group.
*/
static struct fsnotify_group *fsnotify_find_group(unsigned int group_num, __u32 mask,
const struct fsnotify_ops *ops)
{
struct fsnotify_group *group_iter;
struct fsnotify_group *group = NULL;
BUG_ON(!mutex_is_locked(&fsnotify_grp_mutex));
list_for_each_entry_rcu(group_iter, &fsnotify_groups, group_list) {
if (group_iter->group_num == group_num) {
if ((group_iter->mask == mask) &&
(group_iter->ops == ops)) {
fsnotify_get_group(group_iter);
group = group_iter;
} else
group = ERR_PTR(-EEXIST);
}
}
return group;
}
/*
* Either finds an existing group which matches the group_num, mask, and ops or
* creates a new group and adds it to the global group list. In either case we
* take a reference for the group returned.
*/
struct fsnotify_group *fsnotify_obtain_group(unsigned int group_num, __u32 mask,
const struct fsnotify_ops *ops)
{
struct fsnotify_group *group, *tgroup;
/* very low use, simpler locking if we just always alloc */
group = kmalloc(sizeof(struct fsnotify_group), GFP_KERNEL);
if (!group)
return ERR_PTR(-ENOMEM);
atomic_set(&group->refcnt, 1);
group->on_group_list = 0;
group->group_num = group_num;
group->mask = mask;
mutex_init(&group->notification_mutex);
INIT_LIST_HEAD(&group->notification_list);
init_waitqueue_head(&group->notification_waitq);
group->q_len = 0;
group->max_events = UINT_MAX;
spin_lock_init(&group->mark_lock);
atomic_set(&group->num_marks, 0);
INIT_LIST_HEAD(&group->mark_entries);
group->ops = ops;
mutex_lock(&fsnotify_grp_mutex);
tgroup = fsnotify_find_group(group_num, mask, ops);
if (tgroup) {
/* group already exists */
mutex_unlock(&fsnotify_grp_mutex);
/* destroy the new one we made */
fsnotify_put_group(group);
return tgroup;
}
/* group not found, add a new one */
list_add_rcu(&group->group_list, &fsnotify_groups);
group->on_group_list = 1;
/* being on the fsnotify_groups list holds one num_marks */
atomic_inc(&group->num_marks);
mutex_unlock(&fsnotify_grp_mutex);
if (mask)
fsnotify_recalc_global_mask();
return group;
}

426
fs/notify/inode_mark.c Normal file
View File

@ -0,0 +1,426 @@
/*
* Copyright (C) 2008 Red Hat, Inc., Eric Paris <eparis@redhat.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2, or (at your option)
* any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; see the file COPYING. If not, write to
* the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
*/
/*
* fsnotify inode mark locking/lifetime/and refcnting
*
* REFCNT:
* The mark->refcnt tells how many "things" in the kernel currently are
* referencing this object. The object typically will live inside the kernel
* with a refcnt of 2, one for each list it is on (i_list, g_list). Any task
* which can find this object holding the appropriete locks, can take a reference
* and the object itself is guarenteed to survive until the reference is dropped.
*
* LOCKING:
* There are 3 spinlocks involved with fsnotify inode marks and they MUST
* be taken in order as follows:
*
* entry->lock
* group->mark_lock
* inode->i_lock
*
* entry->lock protects 2 things, entry->group and entry->inode. You must hold
* that lock to dereference either of these things (they could be NULL even with
* the lock)
*
* group->mark_lock protects the mark_entries list anchored inside a given group
* and each entry is hooked via the g_list. It also sorta protects the
* free_g_list, which when used is anchored by a private list on the stack of the
* task which held the group->mark_lock.
*
* inode->i_lock protects the i_fsnotify_mark_entries list anchored inside a
* given inode and each entry is hooked via the i_list. (and sorta the
* free_i_list)
*
*
* LIFETIME:
* Inode marks survive between when they are added to an inode and when their
* refcnt==0.
*
* The inode mark can be cleared for a number of different reasons including:
* - The inode is unlinked for the last time. (fsnotify_inode_remove)
* - The inode is being evicted from cache. (fsnotify_inode_delete)
* - The fs the inode is on is unmounted. (fsnotify_inode_delete/fsnotify_unmount_inodes)
* - Something explicitly requests that it be removed. (fsnotify_destroy_mark_by_entry)
* - The fsnotify_group associated with the mark is going away and all such marks
* need to be cleaned up. (fsnotify_clear_marks_by_group)
*
* Worst case we are given an inode and need to clean up all the marks on that
* inode. We take i_lock and walk the i_fsnotify_mark_entries safely. For each
* mark on the list we take a reference (so the mark can't disappear under us).
* We remove that mark form the inode's list of marks and we add this mark to a
* private list anchored on the stack using i_free_list; At this point we no
* longer fear anything finding the mark using the inode's list of marks.
*
* We can safely and locklessly run the private list on the stack of everything
* we just unattached from the original inode. For each mark on the private list
* we grab the mark-> and can thus dereference mark->group and mark->inode. If
* we see the group and inode are not NULL we take those locks. Now holding all
* 3 locks we can completely remove the mark from other tasks finding it in the
* future. Remember, 10 things might already be referencing this mark, but they
* better be holding a ref. We drop our reference we took before we unhooked it
* from the inode. When the ref hits 0 we can free the mark.
*
* Very similarly for freeing by group, except we use free_g_list.
*
* This has the very interesting property of being able to run concurrently with
* any (or all) other directions.
*/
#include <linux/fs.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/writeback.h> /* for inode_lock */
#include <asm/atomic.h>
#include <linux/fsnotify_backend.h>
#include "fsnotify.h"
void fsnotify_get_mark(struct fsnotify_mark_entry *entry)
{
atomic_inc(&entry->refcnt);
}
void fsnotify_put_mark(struct fsnotify_mark_entry *entry)
{
if (atomic_dec_and_test(&entry->refcnt))
entry->free_mark(entry);
}
/*
* Recalculate the mask of events relevant to a given inode locked.
*/
static void fsnotify_recalc_inode_mask_locked(struct inode *inode)
{
struct fsnotify_mark_entry *entry;
struct hlist_node *pos;
__u32 new_mask = 0;
assert_spin_locked(&inode->i_lock);
hlist_for_each_entry(entry, pos, &inode->i_fsnotify_mark_entries, i_list)
new_mask |= entry->mask;
inode->i_fsnotify_mask = new_mask;
}
/*
* Recalculate the inode->i_fsnotify_mask, or the mask of all FS_* event types
* any notifier is interested in hearing for this inode.
*/
void fsnotify_recalc_inode_mask(struct inode *inode)
{
spin_lock(&inode->i_lock);
fsnotify_recalc_inode_mask_locked(inode);
spin_unlock(&inode->i_lock);
__fsnotify_update_child_dentry_flags(inode);
}
/*
* Any time a mark is getting freed we end up here.
* The caller had better be holding a reference to this mark so we don't actually
* do the final put under the entry->lock
*/
void fsnotify_destroy_mark_by_entry(struct fsnotify_mark_entry *entry)
{
struct fsnotify_group *group;
struct inode *inode;
spin_lock(&entry->lock);
group = entry->group;
inode = entry->inode;
BUG_ON(group && !inode);
BUG_ON(!group && inode);
/* if !group something else already marked this to die */
if (!group) {
spin_unlock(&entry->lock);
return;
}
/* 1 from caller and 1 for being on i_list/g_list */
BUG_ON(atomic_read(&entry->refcnt) < 2);
spin_lock(&group->mark_lock);
spin_lock(&inode->i_lock);
hlist_del_init(&entry->i_list);
entry->inode = NULL;
list_del_init(&entry->g_list);
entry->group = NULL;
fsnotify_put_mark(entry); /* for i_list and g_list */
/*
* this mark is now off the inode->i_fsnotify_mark_entries list and we
* hold the inode->i_lock, so this is the perfect time to update the
* inode->i_fsnotify_mask
*/
fsnotify_recalc_inode_mask_locked(inode);
spin_unlock(&inode->i_lock);
spin_unlock(&group->mark_lock);
spin_unlock(&entry->lock);
/*
* Some groups like to know that marks are being freed. This is a
* callback to the group function to let it know that this entry
* is being freed.
*/
if (group->ops->freeing_mark)
group->ops->freeing_mark(entry, group);
/*
* __fsnotify_update_child_dentry_flags(inode);
*
* I really want to call that, but we can't, we have no idea if the inode
* still exists the second we drop the entry->lock.
*
* The next time an event arrive to this inode from one of it's children
* __fsnotify_parent will see that the inode doesn't care about it's
* children and will update all of these flags then. So really this
* is just a lazy update (and could be a perf win...)
*/
iput(inode);
/*
* it's possible that this group tried to destroy itself, but this
* this mark was simultaneously being freed by inode. If that's the
* case, we finish freeing the group here.
*/
if (unlikely(atomic_dec_and_test(&group->num_marks)))
fsnotify_final_destroy_group(group);
}
/*
* Given a group, destroy all of the marks associated with that group.
*/
void fsnotify_clear_marks_by_group(struct fsnotify_group *group)
{
struct fsnotify_mark_entry *lentry, *entry;
LIST_HEAD(free_list);
spin_lock(&group->mark_lock);
list_for_each_entry_safe(entry, lentry, &group->mark_entries, g_list) {
list_add(&entry->free_g_list, &free_list);
list_del_init(&entry->g_list);
fsnotify_get_mark(entry);
}
spin_unlock(&group->mark_lock);
list_for_each_entry_safe(entry, lentry, &free_list, free_g_list) {
fsnotify_destroy_mark_by_entry(entry);
fsnotify_put_mark(entry);
}
}
/*
* Given an inode, destroy all of the marks associated with that inode.
*/
void fsnotify_clear_marks_by_inode(struct inode *inode)
{
struct fsnotify_mark_entry *entry, *lentry;
struct hlist_node *pos, *n;
LIST_HEAD(free_list);
spin_lock(&inode->i_lock);
hlist_for_each_entry_safe(entry, pos, n, &inode->i_fsnotify_mark_entries, i_list) {
list_add(&entry->free_i_list, &free_list);
hlist_del_init(&entry->i_list);
fsnotify_get_mark(entry);
}
spin_unlock(&inode->i_lock);
list_for_each_entry_safe(entry, lentry, &free_list, free_i_list) {
fsnotify_destroy_mark_by_entry(entry);
fsnotify_put_mark(entry);
}
}
/*
* given a group and inode, find the mark associated with that combination.
* if found take a reference to that mark and return it, else return NULL
*/
struct fsnotify_mark_entry *fsnotify_find_mark_entry(struct fsnotify_group *group,
struct inode *inode)
{
struct fsnotify_mark_entry *entry;
struct hlist_node *pos;
assert_spin_locked(&inode->i_lock);
hlist_for_each_entry(entry, pos, &inode->i_fsnotify_mark_entries, i_list) {
if (entry->group == group) {
fsnotify_get_mark(entry);
return entry;
}
}
return NULL;
}
/*
* Nothing fancy, just initialize lists and locks and counters.
*/
void fsnotify_init_mark(struct fsnotify_mark_entry *entry,
void (*free_mark)(struct fsnotify_mark_entry *entry))
{
spin_lock_init(&entry->lock);
atomic_set(&entry->refcnt, 1);
INIT_HLIST_NODE(&entry->i_list);
entry->group = NULL;
entry->mask = 0;
entry->inode = NULL;
entry->free_mark = free_mark;
}
/*
* Attach an initialized mark entry to a given group and inode.
* These marks may be used for the fsnotify backend to determine which
* event types should be delivered to which group and for which inodes.
*/
int fsnotify_add_mark(struct fsnotify_mark_entry *entry,
struct fsnotify_group *group, struct inode *inode)
{
struct fsnotify_mark_entry *lentry;
int ret = 0;
inode = igrab(inode);
if (unlikely(!inode))
return -EINVAL;
/*
* LOCKING ORDER!!!!
* entry->lock
* group->mark_lock
* inode->i_lock
*/
spin_lock(&entry->lock);
spin_lock(&group->mark_lock);
spin_lock(&inode->i_lock);
entry->group = group;
entry->inode = inode;
lentry = fsnotify_find_mark_entry(group, inode);
if (!lentry) {
hlist_add_head(&entry->i_list, &inode->i_fsnotify_mark_entries);
list_add(&entry->g_list, &group->mark_entries);
fsnotify_get_mark(entry); /* for i_list and g_list */
atomic_inc(&group->num_marks);
fsnotify_recalc_inode_mask_locked(inode);
}
spin_unlock(&inode->i_lock);
spin_unlock(&group->mark_lock);
spin_unlock(&entry->lock);
if (lentry) {
ret = -EEXIST;
iput(inode);
fsnotify_put_mark(lentry);
} else {
__fsnotify_update_child_dentry_flags(inode);
}
return ret;
}
/**
* fsnotify_unmount_inodes - an sb is unmounting. handle any watched inodes.
* @list: list of inodes being unmounted (sb->s_inodes)
*
* Called with inode_lock held, protecting the unmounting super block's list
* of inodes, and with iprune_mutex held, keeping shrink_icache_memory() at bay.
* We temporarily drop inode_lock, however, and CAN block.
*/
void fsnotify_unmount_inodes(struct list_head *list)
{
struct inode *inode, *next_i, *need_iput = NULL;
list_for_each_entry_safe(inode, next_i, list, i_sb_list) {
struct inode *need_iput_tmp;
/*
* We cannot __iget() an inode in state I_CLEAR, I_FREEING,
* I_WILL_FREE, or I_NEW which is fine because by that point
* the inode cannot have any associated watches.
*/
if (inode->i_state & (I_CLEAR|I_FREEING|I_WILL_FREE|I_NEW))
continue;
/*
* If i_count is zero, the inode cannot have any watches and
* doing an __iget/iput with MS_ACTIVE clear would actually
* evict all inodes with zero i_count from icache which is
* unnecessarily violent and may in fact be illegal to do.
*/
if (!atomic_read(&inode->i_count))
continue;
need_iput_tmp = need_iput;
need_iput = NULL;
/* In case fsnotify_inode_delete() drops a reference. */
if (inode != need_iput_tmp)
__iget(inode);
else
need_iput_tmp = NULL;
/* In case the dropping of a reference would nuke next_i. */
if ((&next_i->i_sb_list != list) &&
atomic_read(&next_i->i_count) &&
!(next_i->i_state & (I_CLEAR | I_FREEING | I_WILL_FREE))) {
__iget(next_i);
need_iput = next_i;
}
/*
* We can safely drop inode_lock here because we hold
* references on both inode and next_i. Also no new inodes
* will be added since the umount has begun. Finally,
* iprune_mutex keeps shrink_icache_memory() away.
*/
spin_unlock(&inode_lock);
if (need_iput_tmp)
iput(need_iput_tmp);
/* for each watch, send FS_UNMOUNT and then remove it */
fsnotify(inode, FS_UNMOUNT, inode, FSNOTIFY_EVENT_INODE, NULL, 0);
fsnotify_inode_delete(inode);
iput(inode);
spin_lock(&inode_lock);
}
}

View File

@ -1,26 +1,30 @@
config INOTIFY
bool "Inotify file change notification support"
default y
default n
---help---
Say Y here to enable inotify support. Inotify is a file change
notification system and a replacement for dnotify. Inotify fixes
numerous shortcomings in dnotify and introduces several new features
including multiple file events, one-shot support, and unmount
notification.
Say Y here to enable legacy in kernel inotify support. Inotify is a
file change notification system. It is a replacement for dnotify.
This option only provides the legacy inotify in kernel API. There
are no in tree kernel users of this interface since it is deprecated.
You only need this if you are loading an out of tree kernel module
that uses inotify.
For more information, see <file:Documentation/filesystems/inotify.txt>
If unsure, say Y.
If unsure, say N.
config INOTIFY_USER
bool "Inotify support for userspace"
depends on INOTIFY
depends on FSNOTIFY
default y
---help---
Say Y here to enable inotify support for userspace, including the
associated system calls. Inotify allows monitoring of both files and
directories via a single open fd. Events are read from the file
descriptor, which is also select()- and poll()-able.
Inotify fixes numerous shortcomings in dnotify and introduces several
new features including multiple file events, one-shot support, and
unmount notification.
For more information, see <file:Documentation/filesystems/inotify.txt>

View File

@ -1,2 +1,2 @@
obj-$(CONFIG_INOTIFY) += inotify.o
obj-$(CONFIG_INOTIFY_USER) += inotify_user.o
obj-$(CONFIG_INOTIFY_USER) += inotify_fsnotify.o inotify_user.o

View File

@ -32,6 +32,7 @@
#include <linux/list.h>
#include <linux/writeback.h>
#include <linux/inotify.h>
#include <linux/fsnotify_backend.h>
static atomic_t inotify_cookie;
@ -905,6 +906,25 @@ EXPORT_SYMBOL_GPL(inotify_rm_watch);
*/
static int __init inotify_setup(void)
{
BUILD_BUG_ON(IN_ACCESS != FS_ACCESS);
BUILD_BUG_ON(IN_MODIFY != FS_MODIFY);
BUILD_BUG_ON(IN_ATTRIB != FS_ATTRIB);
BUILD_BUG_ON(IN_CLOSE_WRITE != FS_CLOSE_WRITE);
BUILD_BUG_ON(IN_CLOSE_NOWRITE != FS_CLOSE_NOWRITE);
BUILD_BUG_ON(IN_OPEN != FS_OPEN);
BUILD_BUG_ON(IN_MOVED_FROM != FS_MOVED_FROM);
BUILD_BUG_ON(IN_MOVED_TO != FS_MOVED_TO);
BUILD_BUG_ON(IN_CREATE != FS_CREATE);
BUILD_BUG_ON(IN_DELETE != FS_DELETE);
BUILD_BUG_ON(IN_DELETE_SELF != FS_DELETE_SELF);
BUILD_BUG_ON(IN_MOVE_SELF != FS_MOVE_SELF);
BUILD_BUG_ON(IN_Q_OVERFLOW != FS_Q_OVERFLOW);
BUILD_BUG_ON(IN_UNMOUNT != FS_UNMOUNT);
BUILD_BUG_ON(IN_ISDIR != FS_IN_ISDIR);
BUILD_BUG_ON(IN_IGNORED != FS_IN_IGNORED);
BUILD_BUG_ON(IN_ONESHOT != FS_IN_ONESHOT);
atomic_set(&inotify_cookie, 0);
return 0;

View File

@ -0,0 +1,21 @@
#include <linux/fsnotify_backend.h>
#include <linux/inotify.h>
#include <linux/slab.h> /* struct kmem_cache */
extern struct kmem_cache *event_priv_cachep;
struct inotify_event_private_data {
struct fsnotify_event_private_data fsnotify_event_priv_data;
int wd;
};
struct inotify_inode_mark_entry {
/* fsnotify_mark_entry MUST be the first thing */
struct fsnotify_mark_entry fsn_entry;
int wd;
};
extern void inotify_destroy_mark_entry(struct fsnotify_mark_entry *entry, struct fsnotify_group *group);
extern void inotify_free_event_priv(struct fsnotify_event_private_data *event_priv);
extern const struct fsnotify_ops inotify_fsnotify_ops;

View File

@ -0,0 +1,138 @@
/*
* fs/inotify_user.c - inotify support for userspace
*
* Authors:
* John McCutchan <ttb@tentacle.dhs.org>
* Robert Love <rml@novell.com>
*
* Copyright (C) 2005 John McCutchan
* Copyright 2006 Hewlett-Packard Development Company, L.P.
*
* Copyright (C) 2009 Eric Paris <Red Hat Inc>
* inotify was largely rewriten to make use of the fsnotify infrastructure
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2, or (at your option) any
* later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*/
#include <linux/fs.h> /* struct inode */
#include <linux/fsnotify_backend.h>
#include <linux/inotify.h>
#include <linux/path.h> /* struct path */
#include <linux/slab.h> /* kmem_* */
#include <linux/types.h>
#include "inotify.h"
static int inotify_handle_event(struct fsnotify_group *group, struct fsnotify_event *event)
{
struct fsnotify_mark_entry *entry;
struct inotify_inode_mark_entry *ientry;
struct inode *to_tell;
struct inotify_event_private_data *event_priv;
struct fsnotify_event_private_data *fsn_event_priv;
int wd, ret;
to_tell = event->to_tell;
spin_lock(&to_tell->i_lock);
entry = fsnotify_find_mark_entry(group, to_tell);
spin_unlock(&to_tell->i_lock);
/* race with watch removal? We already passes should_send */
if (unlikely(!entry))
return 0;
ientry = container_of(entry, struct inotify_inode_mark_entry,
fsn_entry);
wd = ientry->wd;
event_priv = kmem_cache_alloc(event_priv_cachep, GFP_KERNEL);
if (unlikely(!event_priv))
return -ENOMEM;
fsn_event_priv = &event_priv->fsnotify_event_priv_data;
fsn_event_priv->group = group;
event_priv->wd = wd;
ret = fsnotify_add_notify_event(group, event, fsn_event_priv);
/* EEXIST is not an error */
if (ret == -EEXIST)
ret = 0;
/* did event_priv get attached? */
if (list_empty(&fsn_event_priv->event_list))
inotify_free_event_priv(fsn_event_priv);
/*
* If we hold the entry until after the event is on the queue
* IN_IGNORED won't be able to pass this event in the queue
*/
fsnotify_put_mark(entry);
return ret;
}
static void inotify_freeing_mark(struct fsnotify_mark_entry *entry, struct fsnotify_group *group)
{
inotify_destroy_mark_entry(entry, group);
}
static bool inotify_should_send_event(struct fsnotify_group *group, struct inode *inode, __u32 mask)
{
struct fsnotify_mark_entry *entry;
bool send;
spin_lock(&inode->i_lock);
entry = fsnotify_find_mark_entry(group, inode);
spin_unlock(&inode->i_lock);
if (!entry)
return false;
mask = (mask & ~FS_EVENT_ON_CHILD);
send = (entry->mask & mask);
/* find took a reference */
fsnotify_put_mark(entry);
return send;
}
static int idr_callback(int id, void *p, void *data)
{
BUG();
return 0;
}
static void inotify_free_group_priv(struct fsnotify_group *group)
{
/* ideally the idr is empty and we won't hit the BUG in teh callback */
idr_for_each(&group->inotify_data.idr, idr_callback, NULL);
idr_remove_all(&group->inotify_data.idr);
idr_destroy(&group->inotify_data.idr);
}
void inotify_free_event_priv(struct fsnotify_event_private_data *fsn_event_priv)
{
struct inotify_event_private_data *event_priv;
event_priv = container_of(fsn_event_priv, struct inotify_event_private_data,
fsnotify_event_priv_data);
kmem_cache_free(event_priv_cachep, event_priv);
}
const struct fsnotify_ops inotify_fsnotify_ops = {
.handle_event = inotify_handle_event,
.should_send_event = inotify_should_send_event,
.free_group_priv = inotify_free_group_priv,
.free_event_priv = inotify_free_event_priv,
.freeing_mark = inotify_freeing_mark,
};

File diff suppressed because it is too large Load Diff

411
fs/notify/notification.c Normal file
View File

@ -0,0 +1,411 @@
/*
* Copyright (C) 2008 Red Hat, Inc., Eric Paris <eparis@redhat.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2, or (at your option)
* any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; see the file COPYING. If not, write to
* the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
*/
/*
* Basic idea behind the notification queue: An fsnotify group (like inotify)
* sends the userspace notification about events asyncronously some time after
* the event happened. When inotify gets an event it will need to add that
* event to the group notify queue. Since a single event might need to be on
* multiple group's notification queues we can't add the event directly to each
* queue and instead add a small "event_holder" to each queue. This event_holder
* has a pointer back to the original event. Since the majority of events are
* going to end up on one, and only one, notification queue we embed one
* event_holder into each event. This means we have a single allocation instead
* of always needing two. If the embedded event_holder is already in use by
* another group a new event_holder (from fsnotify_event_holder_cachep) will be
* allocated and used.
*/
#include <linux/fs.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/list.h>
#include <linux/module.h>
#include <linux/mount.h>
#include <linux/mutex.h>
#include <linux/namei.h>
#include <linux/path.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <asm/atomic.h>
#include <linux/fsnotify_backend.h>
#include "fsnotify.h"
static struct kmem_cache *fsnotify_event_cachep;
static struct kmem_cache *fsnotify_event_holder_cachep;
/*
* This is a magic event we send when the q is too full. Since it doesn't
* hold real event information we just keep one system wide and use it any time
* it is needed. It's refcnt is set 1 at kernel init time and will never
* get set to 0 so it will never get 'freed'
*/
static struct fsnotify_event q_overflow_event;
static atomic_t fsnotify_sync_cookie = ATOMIC_INIT(0);
/**
* fsnotify_get_cookie - return a unique cookie for use in synchronizing events.
* Called from fsnotify_move, which is inlined into filesystem modules.
*/
u32 fsnotify_get_cookie(void)
{
return atomic_inc_return(&fsnotify_sync_cookie);
}
EXPORT_SYMBOL_GPL(fsnotify_get_cookie);
/* return true if the notify queue is empty, false otherwise */
bool fsnotify_notify_queue_is_empty(struct fsnotify_group *group)
{
BUG_ON(!mutex_is_locked(&group->notification_mutex));
return list_empty(&group->notification_list) ? true : false;
}
void fsnotify_get_event(struct fsnotify_event *event)
{
atomic_inc(&event->refcnt);
}
void fsnotify_put_event(struct fsnotify_event *event)
{
if (!event)
return;
if (atomic_dec_and_test(&event->refcnt)) {
if (event->data_type == FSNOTIFY_EVENT_PATH)
path_put(&event->path);
BUG_ON(!list_empty(&event->private_data_list));
kfree(event->file_name);
kmem_cache_free(fsnotify_event_cachep, event);
}
}
struct fsnotify_event_holder *fsnotify_alloc_event_holder(void)
{
return kmem_cache_alloc(fsnotify_event_holder_cachep, GFP_KERNEL);
}
void fsnotify_destroy_event_holder(struct fsnotify_event_holder *holder)
{
kmem_cache_free(fsnotify_event_holder_cachep, holder);
}
/*
* Find the private data that the group previously attached to this event when
* the group added the event to the notification queue (fsnotify_add_notify_event)
*/
struct fsnotify_event_private_data *fsnotify_remove_priv_from_event(struct fsnotify_group *group, struct fsnotify_event *event)
{
struct fsnotify_event_private_data *lpriv;
struct fsnotify_event_private_data *priv = NULL;
assert_spin_locked(&event->lock);
list_for_each_entry(lpriv, &event->private_data_list, event_list) {
if (lpriv->group == group) {
priv = lpriv;
list_del(&priv->event_list);
break;
}
}
return priv;
}
/*
* Check if 2 events contain the same information. We do not compare private data
* but at this moment that isn't a problem for any know fsnotify listeners.
*/
static bool event_compare(struct fsnotify_event *old, struct fsnotify_event *new)
{
if ((old->mask == new->mask) &&
(old->to_tell == new->to_tell) &&
(old->data_type == new->data_type)) {
switch (old->data_type) {
case (FSNOTIFY_EVENT_INODE):
if (old->inode == new->inode)
return true;
break;
case (FSNOTIFY_EVENT_PATH):
if ((old->path.mnt == new->path.mnt) &&
(old->path.dentry == new->path.dentry))
return true;
case (FSNOTIFY_EVENT_NONE):
return true;
};
}
return false;
}
/*
* Add an event to the group notification queue. The group can later pull this
* event off the queue to deal with. If the event is successfully added to the
* group's notification queue, a reference is taken on event.
*/
int fsnotify_add_notify_event(struct fsnotify_group *group, struct fsnotify_event *event,
struct fsnotify_event_private_data *priv)
{
struct fsnotify_event_holder *holder = NULL;
struct list_head *list = &group->notification_list;
struct fsnotify_event_holder *last_holder;
struct fsnotify_event *last_event;
/* easy to tell if priv was attached to the event */
INIT_LIST_HEAD(&priv->event_list);
/*
* There is one fsnotify_event_holder embedded inside each fsnotify_event.
* Check if we expect to be able to use that holder. If not alloc a new
* holder.
* For the overflow event it's possible that something will use the in
* event holder before we get the lock so we may need to jump back and
* alloc a new holder, this can't happen for most events...
*/
if (!list_empty(&event->holder.event_list)) {
alloc_holder:
holder = fsnotify_alloc_event_holder();
if (!holder)
return -ENOMEM;
}
mutex_lock(&group->notification_mutex);
if (group->q_len >= group->max_events) {
event = &q_overflow_event;
/* sorry, no private data on the overflow event */
priv = NULL;
}
spin_lock(&event->lock);
if (list_empty(&event->holder.event_list)) {
if (unlikely(holder))
fsnotify_destroy_event_holder(holder);
holder = &event->holder;
} else if (unlikely(!holder)) {
/* between the time we checked above and got the lock the in
* event holder was used, go back and get a new one */
spin_unlock(&event->lock);
mutex_unlock(&group->notification_mutex);
goto alloc_holder;
}
if (!list_empty(list)) {
last_holder = list_entry(list->prev, struct fsnotify_event_holder, event_list);
last_event = last_holder->event;
if (event_compare(last_event, event)) {
spin_unlock(&event->lock);
mutex_unlock(&group->notification_mutex);
if (holder != &event->holder)
fsnotify_destroy_event_holder(holder);
return -EEXIST;
}
}
group->q_len++;
holder->event = event;
fsnotify_get_event(event);
list_add_tail(&holder->event_list, list);
if (priv)
list_add_tail(&priv->event_list, &event->private_data_list);
spin_unlock(&event->lock);
mutex_unlock(&group->notification_mutex);
wake_up(&group->notification_waitq);
return 0;
}
/*
* Remove and return the first event from the notification list. There is a
* reference held on this event since it was on the list. It is the responsibility
* of the caller to drop this reference.
*/
struct fsnotify_event *fsnotify_remove_notify_event(struct fsnotify_group *group)
{
struct fsnotify_event *event;
struct fsnotify_event_holder *holder;
BUG_ON(!mutex_is_locked(&group->notification_mutex));
holder = list_first_entry(&group->notification_list, struct fsnotify_event_holder, event_list);
event = holder->event;
spin_lock(&event->lock);
holder->event = NULL;
list_del_init(&holder->event_list);
spin_unlock(&event->lock);
/* event == holder means we are referenced through the in event holder */
if (holder != &event->holder)
fsnotify_destroy_event_holder(holder);
group->q_len--;
return event;
}
/*
* This will not remove the event, that must be done with fsnotify_remove_notify_event()
*/
struct fsnotify_event *fsnotify_peek_notify_event(struct fsnotify_group *group)
{
struct fsnotify_event *event;
struct fsnotify_event_holder *holder;
BUG_ON(!mutex_is_locked(&group->notification_mutex));
holder = list_first_entry(&group->notification_list, struct fsnotify_event_holder, event_list);
event = holder->event;
return event;
}
/*
* Called when a group is being torn down to clean up any outstanding
* event notifications.
*/
void fsnotify_flush_notify(struct fsnotify_group *group)
{
struct fsnotify_event *event;
struct fsnotify_event_private_data *priv;
mutex_lock(&group->notification_mutex);
while (!fsnotify_notify_queue_is_empty(group)) {
event = fsnotify_remove_notify_event(group);
/* if they don't implement free_event_priv they better not have attached any */
if (group->ops->free_event_priv) {
spin_lock(&event->lock);
priv = fsnotify_remove_priv_from_event(group, event);
spin_unlock(&event->lock);
if (priv)
group->ops->free_event_priv(priv);
}
fsnotify_put_event(event); /* matches fsnotify_add_notify_event */
}
mutex_unlock(&group->notification_mutex);
}
static void initialize_event(struct fsnotify_event *event)
{
event->holder.event = NULL;
INIT_LIST_HEAD(&event->holder.event_list);
atomic_set(&event->refcnt, 1);
spin_lock_init(&event->lock);
event->path.dentry = NULL;
event->path.mnt = NULL;
event->inode = NULL;
event->data_type = FSNOTIFY_EVENT_NONE;
INIT_LIST_HEAD(&event->private_data_list);
event->to_tell = NULL;
event->file_name = NULL;
event->name_len = 0;
event->sync_cookie = 0;
}
/*
* fsnotify_create_event - Allocate a new event which will be sent to each
* group's handle_event function if the group was interested in this
* particular event.
*
* @to_tell the inode which is supposed to receive the event (sometimes a
* parent of the inode to which the event happened.
* @mask what actually happened.
* @data pointer to the object which was actually affected
* @data_type flag indication if the data is a file, path, inode, nothing...
* @name the filename, if available
*/
struct fsnotify_event *fsnotify_create_event(struct inode *to_tell, __u32 mask, void *data,
int data_type, const char *name, u32 cookie)
{
struct fsnotify_event *event;
event = kmem_cache_alloc(fsnotify_event_cachep, GFP_KERNEL);
if (!event)
return NULL;
initialize_event(event);
if (name) {
event->file_name = kstrdup(name, GFP_KERNEL);
if (!event->file_name) {
kmem_cache_free(fsnotify_event_cachep, event);
return NULL;
}
event->name_len = strlen(event->file_name);
}
event->sync_cookie = cookie;
event->to_tell = to_tell;
switch (data_type) {
case FSNOTIFY_EVENT_FILE: {
struct file *file = data;
struct path *path = &file->f_path;
event->path.dentry = path->dentry;
event->path.mnt = path->mnt;
path_get(&event->path);
event->data_type = FSNOTIFY_EVENT_PATH;
break;
}
case FSNOTIFY_EVENT_PATH: {
struct path *path = data;
event->path.dentry = path->dentry;
event->path.mnt = path->mnt;
path_get(&event->path);
event->data_type = FSNOTIFY_EVENT_PATH;
break;
}
case FSNOTIFY_EVENT_INODE:
event->inode = data;
event->data_type = FSNOTIFY_EVENT_INODE;
break;
case FSNOTIFY_EVENT_NONE:
event->inode = NULL;
event->path.dentry = NULL;
event->path.mnt = NULL;
break;
default:
BUG();
}
event->mask = mask;
return event;
}
__init int fsnotify_notification_init(void)
{
fsnotify_event_cachep = KMEM_CACHE(fsnotify_event, SLAB_PANIC);
fsnotify_event_holder_cachep = KMEM_CACHE(fsnotify_event_holder, SLAB_PANIC);
initialize_event(&q_overflow_event);
q_overflow_event.mask = FS_Q_OVERFLOW;
return 0;
}
subsys_initcall(fsnotify_notification_init);

View File

@ -180,10 +180,12 @@ d_iput: no no no yes
#define DCACHE_REFERENCED 0x0008 /* Recently used, don't discard. */
#define DCACHE_UNHASHED 0x0010
#define DCACHE_INOTIFY_PARENT_WATCHED 0x0020 /* Parent inode is watched */
#define DCACHE_INOTIFY_PARENT_WATCHED 0x0020 /* Parent inode is watched by inotify */
#define DCACHE_COOKIE 0x0040 /* For use by dcookie subsystem */
#define DCACHE_FSNOTIFY_PARENT_WATCHED 0x0080 /* Parent inode is watched by some fsnotify listener */
extern spinlock_t dcache_lock;
extern seqlock_t rename_lock;

View File

@ -10,7 +10,7 @@
struct dnotify_struct {
struct dnotify_struct * dn_next;
unsigned long dn_mask;
__u32 dn_mask;
int dn_fd;
struct file * dn_filp;
fl_owner_t dn_owner;
@ -21,23 +21,18 @@ struct dnotify_struct {
#ifdef CONFIG_DNOTIFY
extern void __inode_dir_notify(struct inode *, unsigned long);
#define DNOTIFY_ALL_EVENTS (FS_DELETE | FS_DELETE_CHILD |\
FS_MODIFY | FS_MODIFY_CHILD |\
FS_ACCESS | FS_ACCESS_CHILD |\
FS_ATTRIB | FS_ATTRIB_CHILD |\
FS_CREATE | FS_DN_RENAME |\
FS_MOVED_FROM | FS_MOVED_TO)
extern void dnotify_flush(struct file *, fl_owner_t);
extern int fcntl_dirnotify(int, struct file *, unsigned long);
extern void dnotify_parent(struct dentry *, unsigned long);
static inline void inode_dir_notify(struct inode *inode, unsigned long event)
{
if (inode->i_dnotify_mask & (event))
__inode_dir_notify(inode, event);
}
#else
static inline void __inode_dir_notify(struct inode *inode, unsigned long event)
{
}
static inline void dnotify_flush(struct file *filp, fl_owner_t id)
{
}
@ -47,14 +42,6 @@ static inline int fcntl_dirnotify(int fd, struct file *filp, unsigned long arg)
return -EINVAL;
}
static inline void dnotify_parent(struct dentry *dentry, unsigned long event)
{
}
static inline void inode_dir_notify(struct inode *inode, unsigned long event)
{
}
#endif /* CONFIG_DNOTIFY */
#endif /* __KERNEL __ */

View File

@ -755,9 +755,9 @@ struct inode {
__u32 i_generation;
#ifdef CONFIG_DNOTIFY
unsigned long i_dnotify_mask; /* Directory notify events */
struct dnotify_struct *i_dnotify; /* for directory notifications */
#ifdef CONFIG_FSNOTIFY
__u32 i_fsnotify_mask; /* all events this inode cares about */
struct hlist_head i_fsnotify_mark_entries; /* fsnotify mark entries */
#endif
#ifdef CONFIG_INOTIFY

View File

@ -13,6 +13,7 @@
#include <linux/dnotify.h>
#include <linux/inotify.h>
#include <linux/fsnotify_backend.h>
#include <linux/audit.h>
/*
@ -22,18 +23,44 @@
static inline void fsnotify_d_instantiate(struct dentry *entry,
struct inode *inode)
{
__fsnotify_d_instantiate(entry, inode);
inotify_d_instantiate(entry, inode);
}
/* Notify this dentry's parent about a child's events. */
static inline void fsnotify_parent(struct dentry *dentry, __u32 mask)
{
__fsnotify_parent(dentry, mask);
inotify_dentry_parent_queue_event(dentry, mask, 0, dentry->d_name.name);
}
/*
* fsnotify_d_move - entry has been moved
* Called with dcache_lock and entry->d_lock held.
*/
static inline void fsnotify_d_move(struct dentry *entry)
{
/*
* On move we need to update entry->d_flags to indicate if the new parent
* cares about events from this entry.
*/
__fsnotify_update_dcache_flags(entry);
inotify_d_move(entry);
}
/*
* fsnotify_link_count - inode's link count changed
*/
static inline void fsnotify_link_count(struct inode *inode)
{
inotify_inode_queue_event(inode, IN_ATTRIB, 0, NULL, NULL);
fsnotify(inode, FS_ATTRIB, inode, FSNOTIFY_EVENT_INODE, NULL, 0);
}
/*
* fsnotify_move - file old_name at old_dir was moved to new_name at new_dir
*/
@ -42,42 +69,62 @@ static inline void fsnotify_move(struct inode *old_dir, struct inode *new_dir,
int isdir, struct inode *target, struct dentry *moved)
{
struct inode *source = moved->d_inode;
u32 cookie = inotify_get_cookie();
u32 in_cookie = inotify_get_cookie();
u32 fs_cookie = fsnotify_get_cookie();
__u32 old_dir_mask = (FS_EVENT_ON_CHILD | FS_MOVED_FROM);
__u32 new_dir_mask = (FS_EVENT_ON_CHILD | FS_MOVED_TO);
if (old_dir == new_dir)
inode_dir_notify(old_dir, DN_RENAME);
else {
inode_dir_notify(old_dir, DN_DELETE);
inode_dir_notify(new_dir, DN_CREATE);
old_dir_mask |= FS_DN_RENAME;
if (isdir) {
isdir = IN_ISDIR;
old_dir_mask |= FS_IN_ISDIR;
new_dir_mask |= FS_IN_ISDIR;
}
if (isdir)
isdir = IN_ISDIR;
inotify_inode_queue_event(old_dir, IN_MOVED_FROM|isdir,cookie,old_name,
inotify_inode_queue_event(old_dir, IN_MOVED_FROM|isdir, in_cookie, old_name,
source);
inotify_inode_queue_event(new_dir, IN_MOVED_TO|isdir, cookie, new_name,
inotify_inode_queue_event(new_dir, IN_MOVED_TO|isdir, in_cookie, new_name,
source);
fsnotify(old_dir, old_dir_mask, old_dir, FSNOTIFY_EVENT_INODE, old_name, fs_cookie);
fsnotify(new_dir, new_dir_mask, new_dir, FSNOTIFY_EVENT_INODE, new_name, fs_cookie);
if (target) {
inotify_inode_queue_event(target, IN_DELETE_SELF, 0, NULL, NULL);
inotify_inode_is_dead(target);
/* this is really a link_count change not a removal */
fsnotify_link_count(target);
}
if (source) {
inotify_inode_queue_event(source, IN_MOVE_SELF, 0, NULL, NULL);
fsnotify(source, FS_MOVE_SELF, moved->d_inode, FSNOTIFY_EVENT_INODE, NULL, 0);
}
audit_inode_child(new_name, moved, new_dir);
}
/*
* fsnotify_inode_delete - and inode is being evicted from cache, clean up is needed
*/
static inline void fsnotify_inode_delete(struct inode *inode)
{
__fsnotify_inode_delete(inode);
}
/*
* fsnotify_nameremove - a filename was removed from a directory
*/
static inline void fsnotify_nameremove(struct dentry *dentry, int isdir)
{
__u32 mask = FS_DELETE;
if (isdir)
isdir = IN_ISDIR;
dnotify_parent(dentry, DN_DELETE);
inotify_dentry_parent_queue_event(dentry, IN_DELETE|isdir, 0, dentry->d_name.name);
mask |= FS_IN_ISDIR;
fsnotify_parent(dentry, mask);
}
/*
@ -87,14 +134,9 @@ static inline void fsnotify_inoderemove(struct inode *inode)
{
inotify_inode_queue_event(inode, IN_DELETE_SELF, 0, NULL, NULL);
inotify_inode_is_dead(inode);
}
/*
* fsnotify_link_count - inode's link count changed
*/
static inline void fsnotify_link_count(struct inode *inode)
{
inotify_inode_queue_event(inode, IN_ATTRIB, 0, NULL, NULL);
fsnotify(inode, FS_DELETE_SELF, inode, FSNOTIFY_EVENT_INODE, NULL, 0);
__fsnotify_inode_delete(inode);
}
/*
@ -102,10 +144,11 @@ static inline void fsnotify_link_count(struct inode *inode)
*/
static inline void fsnotify_create(struct inode *inode, struct dentry *dentry)
{
inode_dir_notify(inode, DN_CREATE);
inotify_inode_queue_event(inode, IN_CREATE, 0, dentry->d_name.name,
dentry->d_inode);
audit_inode_child(dentry->d_name.name, dentry, inode);
fsnotify(inode, FS_CREATE, dentry->d_inode, FSNOTIFY_EVENT_INODE, dentry->d_name.name, 0);
}
/*
@ -115,11 +158,12 @@ static inline void fsnotify_create(struct inode *inode, struct dentry *dentry)
*/
static inline void fsnotify_link(struct inode *dir, struct inode *inode, struct dentry *new_dentry)
{
inode_dir_notify(dir, DN_CREATE);
inotify_inode_queue_event(dir, IN_CREATE, 0, new_dentry->d_name.name,
inode);
fsnotify_link_count(inode);
audit_inode_child(new_dentry->d_name.name, new_dentry, dir);
fsnotify(dir, FS_CREATE, inode, FSNOTIFY_EVENT_INODE, new_dentry->d_name.name, 0);
}
/*
@ -127,10 +171,13 @@ static inline void fsnotify_link(struct inode *dir, struct inode *inode, struct
*/
static inline void fsnotify_mkdir(struct inode *inode, struct dentry *dentry)
{
inode_dir_notify(inode, DN_CREATE);
inotify_inode_queue_event(inode, IN_CREATE | IN_ISDIR, 0,
dentry->d_name.name, dentry->d_inode);
__u32 mask = (FS_CREATE | FS_IN_ISDIR);
struct inode *d_inode = dentry->d_inode;
inotify_inode_queue_event(inode, mask, 0, dentry->d_name.name, d_inode);
audit_inode_child(dentry->d_name.name, dentry, inode);
fsnotify(inode, mask, d_inode, FSNOTIFY_EVENT_INODE, dentry->d_name.name, 0);
}
/*
@ -139,14 +186,15 @@ static inline void fsnotify_mkdir(struct inode *inode, struct dentry *dentry)
static inline void fsnotify_access(struct dentry *dentry)
{
struct inode *inode = dentry->d_inode;
u32 mask = IN_ACCESS;
__u32 mask = FS_ACCESS;
if (S_ISDIR(inode->i_mode))
mask |= IN_ISDIR;
mask |= FS_IN_ISDIR;
dnotify_parent(dentry, DN_ACCESS);
inotify_dentry_parent_queue_event(dentry, mask, 0, dentry->d_name.name);
inotify_inode_queue_event(inode, mask, 0, NULL, NULL);
fsnotify_parent(dentry, mask);
fsnotify(inode, mask, inode, FSNOTIFY_EVENT_INODE, NULL, 0);
}
/*
@ -155,14 +203,15 @@ static inline void fsnotify_access(struct dentry *dentry)
static inline void fsnotify_modify(struct dentry *dentry)
{
struct inode *inode = dentry->d_inode;
u32 mask = IN_MODIFY;
__u32 mask = FS_MODIFY;
if (S_ISDIR(inode->i_mode))
mask |= IN_ISDIR;
mask |= FS_IN_ISDIR;
dnotify_parent(dentry, DN_MODIFY);
inotify_dentry_parent_queue_event(dentry, mask, 0, dentry->d_name.name);
inotify_inode_queue_event(inode, mask, 0, NULL, NULL);
fsnotify_parent(dentry, mask);
fsnotify(inode, mask, inode, FSNOTIFY_EVENT_INODE, NULL, 0);
}
/*
@ -171,13 +220,15 @@ static inline void fsnotify_modify(struct dentry *dentry)
static inline void fsnotify_open(struct dentry *dentry)
{
struct inode *inode = dentry->d_inode;
u32 mask = IN_OPEN;
__u32 mask = FS_OPEN;
if (S_ISDIR(inode->i_mode))
mask |= IN_ISDIR;
mask |= FS_IN_ISDIR;
inotify_dentry_parent_queue_event(dentry, mask, 0, dentry->d_name.name);
inotify_inode_queue_event(inode, mask, 0, NULL, NULL);
fsnotify_parent(dentry, mask);
fsnotify(inode, mask, inode, FSNOTIFY_EVENT_INODE, NULL, 0);
}
/*
@ -187,15 +238,16 @@ static inline void fsnotify_close(struct file *file)
{
struct dentry *dentry = file->f_path.dentry;
struct inode *inode = dentry->d_inode;
const char *name = dentry->d_name.name;
fmode_t mode = file->f_mode;
u32 mask = (mode & FMODE_WRITE) ? IN_CLOSE_WRITE : IN_CLOSE_NOWRITE;
__u32 mask = (mode & FMODE_WRITE) ? FS_CLOSE_WRITE : FS_CLOSE_NOWRITE;
if (S_ISDIR(inode->i_mode))
mask |= IN_ISDIR;
mask |= FS_IN_ISDIR;
inotify_dentry_parent_queue_event(dentry, mask, 0, name);
inotify_inode_queue_event(inode, mask, 0, NULL, NULL);
fsnotify_parent(dentry, mask);
fsnotify(inode, mask, file, FSNOTIFY_EVENT_FILE, NULL, 0);
}
/*
@ -204,13 +256,15 @@ static inline void fsnotify_close(struct file *file)
static inline void fsnotify_xattr(struct dentry *dentry)
{
struct inode *inode = dentry->d_inode;
u32 mask = IN_ATTRIB;
__u32 mask = FS_ATTRIB;
if (S_ISDIR(inode->i_mode))
mask |= IN_ISDIR;
mask |= FS_IN_ISDIR;
inotify_dentry_parent_queue_event(dentry, mask, 0, dentry->d_name.name);
inotify_inode_queue_event(inode, mask, 0, NULL, NULL);
fsnotify_parent(dentry, mask);
fsnotify(inode, mask, inode, FSNOTIFY_EVENT_INODE, NULL, 0);
}
/*
@ -220,50 +274,37 @@ static inline void fsnotify_xattr(struct dentry *dentry)
static inline void fsnotify_change(struct dentry *dentry, unsigned int ia_valid)
{
struct inode *inode = dentry->d_inode;
int dn_mask = 0;
u32 in_mask = 0;
__u32 mask = 0;
if (ia_valid & ATTR_UID)
mask |= FS_ATTRIB;
if (ia_valid & ATTR_GID)
mask |= FS_ATTRIB;
if (ia_valid & ATTR_SIZE)
mask |= FS_MODIFY;
if (ia_valid & ATTR_UID) {
in_mask |= IN_ATTRIB;
dn_mask |= DN_ATTRIB;
}
if (ia_valid & ATTR_GID) {
in_mask |= IN_ATTRIB;
dn_mask |= DN_ATTRIB;
}
if (ia_valid & ATTR_SIZE) {
in_mask |= IN_MODIFY;
dn_mask |= DN_MODIFY;
}
/* both times implies a utime(s) call */
if ((ia_valid & (ATTR_ATIME | ATTR_MTIME)) == (ATTR_ATIME | ATTR_MTIME))
{
in_mask |= IN_ATTRIB;
dn_mask |= DN_ATTRIB;
} else if (ia_valid & ATTR_ATIME) {
in_mask |= IN_ACCESS;
dn_mask |= DN_ACCESS;
} else if (ia_valid & ATTR_MTIME) {
in_mask |= IN_MODIFY;
dn_mask |= DN_MODIFY;
}
if (ia_valid & ATTR_MODE) {
in_mask |= IN_ATTRIB;
dn_mask |= DN_ATTRIB;
}
mask |= FS_ATTRIB;
else if (ia_valid & ATTR_ATIME)
mask |= FS_ACCESS;
else if (ia_valid & ATTR_MTIME)
mask |= FS_MODIFY;
if (dn_mask)
dnotify_parent(dentry, dn_mask);
if (in_mask) {
if (ia_valid & ATTR_MODE)
mask |= FS_ATTRIB;
if (mask) {
if (S_ISDIR(inode->i_mode))
in_mask |= IN_ISDIR;
inotify_inode_queue_event(inode, in_mask, 0, NULL, NULL);
inotify_dentry_parent_queue_event(dentry, in_mask, 0,
dentry->d_name.name);
mask |= FS_IN_ISDIR;
inotify_inode_queue_event(inode, mask, 0, NULL, NULL);
fsnotify_parent(dentry, mask);
fsnotify(inode, mask, inode, FSNOTIFY_EVENT_INODE, NULL, 0);
}
}
#ifdef CONFIG_INOTIFY /* inotify helpers */
#if defined(CONFIG_INOTIFY) || defined(CONFIG_FSNOTIFY) /* notify helpers */
/*
* fsnotify_oldname_init - save off the old filename before we change it
@ -281,7 +322,7 @@ static inline void fsnotify_oldname_free(const char *old_name)
kfree(old_name);
}
#else /* CONFIG_INOTIFY */
#else /* CONFIG_INOTIFY || CONFIG_FSNOTIFY */
static inline const char *fsnotify_oldname_init(const char *name)
{

View File

@ -0,0 +1,387 @@
/*
* Filesystem access notification for Linux
*
* Copyright (C) 2008 Red Hat, Inc., Eric Paris <eparis@redhat.com>
*/
#ifndef __LINUX_FSNOTIFY_BACKEND_H
#define __LINUX_FSNOTIFY_BACKEND_H
#ifdef __KERNEL__
#include <linux/idr.h> /* inotify uses this */
#include <linux/fs.h> /* struct inode */
#include <linux/list.h>
#include <linux/path.h> /* struct path */
#include <linux/spinlock.h>
#include <linux/types.h>
#include <asm/atomic.h>
/*
* IN_* from inotfy.h lines up EXACTLY with FS_*, this is so we can easily
* convert between them. dnotify only needs conversion at watch creation
* so no perf loss there. fanotify isn't defined yet, so it can use the
* wholes if it needs more events.
*/
#define FS_ACCESS 0x00000001 /* File was accessed */
#define FS_MODIFY 0x00000002 /* File was modified */
#define FS_ATTRIB 0x00000004 /* Metadata changed */
#define FS_CLOSE_WRITE 0x00000008 /* Writtable file was closed */
#define FS_CLOSE_NOWRITE 0x00000010 /* Unwrittable file closed */
#define FS_OPEN 0x00000020 /* File was opened */
#define FS_MOVED_FROM 0x00000040 /* File was moved from X */
#define FS_MOVED_TO 0x00000080 /* File was moved to Y */
#define FS_CREATE 0x00000100 /* Subfile was created */
#define FS_DELETE 0x00000200 /* Subfile was deleted */
#define FS_DELETE_SELF 0x00000400 /* Self was deleted */
#define FS_MOVE_SELF 0x00000800 /* Self was moved */
#define FS_UNMOUNT 0x00002000 /* inode on umount fs */
#define FS_Q_OVERFLOW 0x00004000 /* Event queued overflowed */
#define FS_IN_IGNORED 0x00008000 /* last inotify event here */
#define FS_IN_ISDIR 0x40000000 /* event occurred against dir */
#define FS_IN_ONESHOT 0x80000000 /* only send event once */
#define FS_DN_RENAME 0x10000000 /* file renamed */
#define FS_DN_MULTISHOT 0x20000000 /* dnotify multishot */
/* This inode cares about things that happen to its children. Always set for
* dnotify and inotify. */
#define FS_EVENT_ON_CHILD 0x08000000
/* This is a list of all events that may get sent to a parernt based on fs event
* happening to inodes inside that directory */
#define FS_EVENTS_POSS_ON_CHILD (FS_ACCESS | FS_MODIFY | FS_ATTRIB |\
FS_CLOSE_WRITE | FS_CLOSE_NOWRITE | FS_OPEN |\
FS_MOVED_FROM | FS_MOVED_TO | FS_CREATE |\
FS_DELETE)
/* listeners that hard code group numbers near the top */
#define DNOTIFY_GROUP_NUM UINT_MAX
#define INOTIFY_GROUP_NUM (DNOTIFY_GROUP_NUM-1)
struct fsnotify_group;
struct fsnotify_event;
struct fsnotify_mark_entry;
struct fsnotify_event_private_data;
/*
* Each group much define these ops. The fsnotify infrastructure will call
* these operations for each relevant group.
*
* should_send_event - given a group, inode, and mask this function determines
* if the group is interested in this event.
* handle_event - main call for a group to handle an fs event
* free_group_priv - called when a group refcnt hits 0 to clean up the private union
* freeing-mark - this means that a mark has been flagged to die when everything
* finishes using it. The function is supplied with what must be a
* valid group and inode to use to clean up.
*/
struct fsnotify_ops {
bool (*should_send_event)(struct fsnotify_group *group, struct inode *inode, __u32 mask);
int (*handle_event)(struct fsnotify_group *group, struct fsnotify_event *event);
void (*free_group_priv)(struct fsnotify_group *group);
void (*freeing_mark)(struct fsnotify_mark_entry *entry, struct fsnotify_group *group);
void (*free_event_priv)(struct fsnotify_event_private_data *priv);
};
/*
* A group is a "thing" that wants to receive notification about filesystem
* events. The mask holds the subset of event types this group cares about.
* refcnt on a group is up to the implementor and at any moment if it goes 0
* everything will be cleaned up.
*/
struct fsnotify_group {
/*
* global list of all groups receiving events from fsnotify.
* anchored by fsnotify_groups and protected by either fsnotify_grp_mutex
* or fsnotify_grp_srcu depending on write vs read.
*/
struct list_head group_list;
/*
* Defines all of the event types in which this group is interested.
* This mask is a bitwise OR of the FS_* events from above. Each time
* this mask changes for a group (if it changes) the correct functions
* must be called to update the global structures which indicate global
* interest in event types.
*/
__u32 mask;
/*
* How the refcnt is used is up to each group. When the refcnt hits 0
* fsnotify will clean up all of the resources associated with this group.
* As an example, the dnotify group will always have a refcnt=1 and that
* will never change. Inotify, on the other hand, has a group per
* inotify_init() and the refcnt will hit 0 only when that fd has been
* closed.
*/
atomic_t refcnt; /* things with interest in this group */
unsigned int group_num; /* simply prevents accidental group collision */
const struct fsnotify_ops *ops; /* how this group handles things */
/* needed to send notification to userspace */
struct mutex notification_mutex; /* protect the notification_list */
struct list_head notification_list; /* list of event_holder this group needs to send to userspace */
wait_queue_head_t notification_waitq; /* read() on the notification file blocks on this waitq */
unsigned int q_len; /* events on the queue */
unsigned int max_events; /* maximum events allowed on the list */
/* stores all fastapth entries assoc with this group so they can be cleaned on unregister */
spinlock_t mark_lock; /* protect mark_entries list */
atomic_t num_marks; /* 1 for each mark entry and 1 for not being
* past the point of no return when freeing
* a group */
struct list_head mark_entries; /* all inode mark entries for this group */
/* prevents double list_del of group_list. protected by global fsnotify_grp_mutex */
bool on_group_list;
/* groups can define private fields here or use the void *private */
union {
void *private;
#ifdef CONFIG_INOTIFY_USER
struct inotify_group_private_data {
spinlock_t idr_lock;
struct idr idr;
u32 last_wd;
struct fasync_struct *fa; /* async notification */
struct user_struct *user;
} inotify_data;
#endif
};
};
/*
* A single event can be queued in multiple group->notification_lists.
*
* each group->notification_list will point to an event_holder which in turns points
* to the actual event that needs to be sent to userspace.
*
* Seemed cheaper to create a refcnt'd event and a small holder for every group
* than create a different event for every group
*
*/
struct fsnotify_event_holder {
struct fsnotify_event *event;
struct list_head event_list;
};
/*
* Inotify needs to tack data onto an event. This struct lets us later find the
* correct private data of the correct group.
*/
struct fsnotify_event_private_data {
struct fsnotify_group *group;
struct list_head event_list;
};
/*
* all of the information about the original object we want to now send to
* a group. If you want to carry more info from the accessing task to the
* listener this structure is where you need to be adding fields.
*/
struct fsnotify_event {
/*
* If we create an event we are also likely going to need a holder
* to link to a group. So embed one holder in the event. Means only
* one allocation for the common case where we only have one group
*/
struct fsnotify_event_holder holder;
spinlock_t lock; /* protection for the associated event_holder and private_list */
/* to_tell may ONLY be dereferenced during handle_event(). */
struct inode *to_tell; /* either the inode the event happened to or its parent */
/*
* depending on the event type we should have either a path or inode
* We hold a reference on path, but NOT on inode. Since we have the ref on
* the path, it may be dereferenced at any point during this object's
* lifetime. That reference is dropped when this object's refcnt hits
* 0. If this event contains an inode instead of a path, the inode may
* ONLY be used during handle_event().
*/
union {
struct path path;
struct inode *inode;
};
/* when calling fsnotify tell it if the data is a path or inode */
#define FSNOTIFY_EVENT_NONE 0
#define FSNOTIFY_EVENT_PATH 1
#define FSNOTIFY_EVENT_INODE 2
#define FSNOTIFY_EVENT_FILE 3
int data_type; /* which of the above union we have */
atomic_t refcnt; /* how many groups still are using/need to send this event */
__u32 mask; /* the type of access, bitwise OR for FS_* event types */
u32 sync_cookie; /* used to corrolate events, namely inotify mv events */
char *file_name;
size_t name_len;
struct list_head private_data_list; /* groups can store private data here */
};
/*
* a mark is simply an entry attached to an in core inode which allows an
* fsnotify listener to indicate they are either no longer interested in events
* of a type matching mask or only interested in those events.
*
* these are flushed when an inode is evicted from core and may be flushed
* when the inode is modified (as seen by fsnotify_access). Some fsnotify users
* (such as dnotify) will flush these when the open fd is closed and not at
* inode eviction or modification.
*/
struct fsnotify_mark_entry {
__u32 mask; /* mask this mark entry is for */
/* we hold ref for each i_list and g_list. also one ref for each 'thing'
* in kernel that found and may be using this mark. */
atomic_t refcnt; /* active things looking at this mark */
struct inode *inode; /* inode this entry is associated with */
struct fsnotify_group *group; /* group this mark entry is for */
struct hlist_node i_list; /* list of mark_entries by inode->i_fsnotify_mark_entries */
struct list_head g_list; /* list of mark_entries by group->i_fsnotify_mark_entries */
spinlock_t lock; /* protect group, inode, and killme */
struct list_head free_i_list; /* tmp list used when freeing this mark */
struct list_head free_g_list; /* tmp list used when freeing this mark */
void (*free_mark)(struct fsnotify_mark_entry *entry); /* called on final put+free */
};
#ifdef CONFIG_FSNOTIFY
/* called from the vfs helpers */
/* main fsnotify call to send events */
extern void fsnotify(struct inode *to_tell, __u32 mask, void *data, int data_is,
const char *name, u32 cookie);
extern void __fsnotify_parent(struct dentry *dentry, __u32 mask);
extern void __fsnotify_inode_delete(struct inode *inode);
extern u32 fsnotify_get_cookie(void);
static inline int fsnotify_inode_watches_children(struct inode *inode)
{
/* FS_EVENT_ON_CHILD is set if the inode may care */
if (!(inode->i_fsnotify_mask & FS_EVENT_ON_CHILD))
return 0;
/* this inode might care about child events, does it care about the
* specific set of events that can happen on a child? */
return inode->i_fsnotify_mask & FS_EVENTS_POSS_ON_CHILD;
}
/*
* Update the dentry with a flag indicating the interest of its parent to receive
* filesystem events when those events happens to this dentry->d_inode.
*/
static inline void __fsnotify_update_dcache_flags(struct dentry *dentry)
{
struct dentry *parent;
assert_spin_locked(&dcache_lock);
assert_spin_locked(&dentry->d_lock);
parent = dentry->d_parent;
if (fsnotify_inode_watches_children(parent->d_inode))
dentry->d_flags |= DCACHE_FSNOTIFY_PARENT_WATCHED;
else
dentry->d_flags &= ~DCACHE_FSNOTIFY_PARENT_WATCHED;
}
/*
* fsnotify_d_instantiate - instantiate a dentry for inode
* Called with dcache_lock held.
*/
static inline void __fsnotify_d_instantiate(struct dentry *dentry, struct inode *inode)
{
if (!inode)
return;
assert_spin_locked(&dcache_lock);
spin_lock(&dentry->d_lock);
__fsnotify_update_dcache_flags(dentry);
spin_unlock(&dentry->d_lock);
}
/* called from fsnotify listeners, such as fanotify or dnotify */
/* must call when a group changes its ->mask */
extern void fsnotify_recalc_global_mask(void);
/* get a reference to an existing or create a new group */
extern struct fsnotify_group *fsnotify_obtain_group(unsigned int group_num,
__u32 mask,
const struct fsnotify_ops *ops);
/* run all marks associated with this group and update group->mask */
extern void fsnotify_recalc_group_mask(struct fsnotify_group *group);
/* drop reference on a group from fsnotify_obtain_group */
extern void fsnotify_put_group(struct fsnotify_group *group);
/* take a reference to an event */
extern void fsnotify_get_event(struct fsnotify_event *event);
extern void fsnotify_put_event(struct fsnotify_event *event);
/* find private data previously attached to an event and unlink it */
extern struct fsnotify_event_private_data *fsnotify_remove_priv_from_event(struct fsnotify_group *group,
struct fsnotify_event *event);
/* attach the event to the group notification queue */
extern int fsnotify_add_notify_event(struct fsnotify_group *group, struct fsnotify_event *event,
struct fsnotify_event_private_data *priv);
/* true if the group notification queue is empty */
extern bool fsnotify_notify_queue_is_empty(struct fsnotify_group *group);
/* return, but do not dequeue the first event on the notification queue */
extern struct fsnotify_event *fsnotify_peek_notify_event(struct fsnotify_group *group);
/* return AND dequeue the first event on the notification queue */
extern struct fsnotify_event *fsnotify_remove_notify_event(struct fsnotify_group *group);
/* functions used to manipulate the marks attached to inodes */
/* run all marks associated with an inode and update inode->i_fsnotify_mask */
extern void fsnotify_recalc_inode_mask(struct inode *inode);
extern void fsnotify_init_mark(struct fsnotify_mark_entry *entry, void (*free_mark)(struct fsnotify_mark_entry *entry));
/* find (and take a reference) to a mark associated with group and inode */
extern struct fsnotify_mark_entry *fsnotify_find_mark_entry(struct fsnotify_group *group, struct inode *inode);
/* attach the mark to both the group and the inode */
extern int fsnotify_add_mark(struct fsnotify_mark_entry *entry, struct fsnotify_group *group, struct inode *inode);
/* given a mark, flag it to be freed when all references are dropped */
extern void fsnotify_destroy_mark_by_entry(struct fsnotify_mark_entry *entry);
/* run all the marks in a group, and flag them to be freed */
extern void fsnotify_clear_marks_by_group(struct fsnotify_group *group);
extern void fsnotify_get_mark(struct fsnotify_mark_entry *entry);
extern void fsnotify_put_mark(struct fsnotify_mark_entry *entry);
extern void fsnotify_unmount_inodes(struct list_head *list);
/* put here because inotify does some weird stuff when destroying watches */
extern struct fsnotify_event *fsnotify_create_event(struct inode *to_tell, __u32 mask,
void *data, int data_is, const char *name,
u32 cookie);
#else
static inline void fsnotify(struct inode *to_tell, __u32 mask, void *data, int data_is,
const char *name, u32 cookie)
{}
static inline void __fsnotify_parent(struct dentry *dentry, __u32 mask)
{}
static inline void __fsnotify_inode_delete(struct inode *inode)
{}
static inline void __fsnotify_update_dcache_flags(struct dentry *dentry)
{}
static inline void __fsnotify_d_instantiate(struct dentry *dentry, struct inode *inode)
{}
static inline u32 fsnotify_get_cookie(void)
{
return 0;
}
static inline void fsnotify_unmount_inodes(struct list_head *list)
{}
#endif /* CONFIG_FSNOTIFY */
#endif /* __KERNEL __ */
#endif /* __LINUX_FSNOTIFY_BACKEND_H */

View File

@ -302,7 +302,8 @@ config AUDITSYSCALL
config AUDIT_TREE
def_bool y
depends on AUDITSYSCALL && INOTIFY
depends on AUDITSYSCALL
select INOTIFY
menu "RCU Subsystem"