forked from Minki/linux
fsnotify: put inode specific fields in an fsnotify_mark in a union
The addition of marks on vfs mounts will be simplified if the inode specific parts of a mark and the vfsmnt specific parts of a mark are actually in a union so naming can be easy. This patch just implements the inode struct and the union. Signed-off-by: Eric Paris <eparis@redhat.com>
This commit is contained in:
parent
3a9fb89f4c
commit
2823e04de4
@ -70,8 +70,8 @@ static void dnotify_recalc_inode_mask(struct fsnotify_mark_entry *entry)
|
||||
if (old_mask == new_mask)
|
||||
return;
|
||||
|
||||
if (entry->inode)
|
||||
fsnotify_recalc_inode_mask(entry->inode);
|
||||
if (entry->i.inode)
|
||||
fsnotify_recalc_inode_mask(entry->i.inode);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -117,7 +117,7 @@ static void fsnotify_recalc_inode_mask_locked(struct inode *inode)
|
||||
|
||||
assert_spin_locked(&inode->i_lock);
|
||||
|
||||
hlist_for_each_entry(entry, pos, &inode->i_fsnotify_mark_entries, i_list)
|
||||
hlist_for_each_entry(entry, pos, &inode->i_fsnotify_mark_entries, i.i_list)
|
||||
new_mask |= entry->mask;
|
||||
inode->i_fsnotify_mask = new_mask;
|
||||
}
|
||||
@ -148,7 +148,7 @@ void fsnotify_destroy_mark_by_entry(struct fsnotify_mark_entry *entry)
|
||||
spin_lock(&entry->lock);
|
||||
|
||||
group = entry->group;
|
||||
inode = entry->inode;
|
||||
inode = entry->i.inode;
|
||||
|
||||
BUG_ON(group && !inode);
|
||||
BUG_ON(!group && inode);
|
||||
@ -165,8 +165,8 @@ void fsnotify_destroy_mark_by_entry(struct fsnotify_mark_entry *entry)
|
||||
spin_lock(&group->mark_lock);
|
||||
spin_lock(&inode->i_lock);
|
||||
|
||||
hlist_del_init(&entry->i_list);
|
||||
entry->inode = NULL;
|
||||
hlist_del_init(&entry->i.i_list);
|
||||
entry->i.inode = NULL;
|
||||
|
||||
list_del_init(&entry->g_list);
|
||||
entry->group = NULL;
|
||||
@ -248,14 +248,14 @@ void fsnotify_clear_marks_by_inode(struct inode *inode)
|
||||
LIST_HEAD(free_list);
|
||||
|
||||
spin_lock(&inode->i_lock);
|
||||
hlist_for_each_entry_safe(entry, pos, n, &inode->i_fsnotify_mark_entries, i_list) {
|
||||
list_add(&entry->free_i_list, &free_list);
|
||||
hlist_del_init(&entry->i_list);
|
||||
hlist_for_each_entry_safe(entry, pos, n, &inode->i_fsnotify_mark_entries, i.i_list) {
|
||||
list_add(&entry->i.free_i_list, &free_list);
|
||||
hlist_del_init(&entry->i.i_list);
|
||||
fsnotify_get_mark(entry);
|
||||
}
|
||||
spin_unlock(&inode->i_lock);
|
||||
|
||||
list_for_each_entry_safe(entry, lentry, &free_list, free_i_list) {
|
||||
list_for_each_entry_safe(entry, lentry, &free_list, i.free_i_list) {
|
||||
fsnotify_destroy_mark_by_entry(entry);
|
||||
fsnotify_put_mark(entry);
|
||||
}
|
||||
@ -273,7 +273,7 @@ struct fsnotify_mark_entry *fsnotify_find_mark_entry(struct fsnotify_group *grou
|
||||
|
||||
assert_spin_locked(&inode->i_lock);
|
||||
|
||||
hlist_for_each_entry(entry, pos, &inode->i_fsnotify_mark_entries, i_list) {
|
||||
hlist_for_each_entry(entry, pos, &inode->i_fsnotify_mark_entries, i.i_list) {
|
||||
if (entry->group == group) {
|
||||
fsnotify_get_mark(entry);
|
||||
return entry;
|
||||
@ -285,7 +285,7 @@ struct fsnotify_mark_entry *fsnotify_find_mark_entry(struct fsnotify_group *grou
|
||||
void fsnotify_duplicate_mark(struct fsnotify_mark_entry *new, struct fsnotify_mark_entry *old)
|
||||
{
|
||||
assert_spin_locked(&old->lock);
|
||||
new->inode = old->inode;
|
||||
new->i.inode = old->i.inode;
|
||||
new->group = old->group;
|
||||
new->mask = old->mask;
|
||||
new->free_mark = old->free_mark;
|
||||
@ -299,10 +299,10 @@ void fsnotify_init_mark(struct fsnotify_mark_entry *entry,
|
||||
{
|
||||
spin_lock_init(&entry->lock);
|
||||
atomic_set(&entry->refcnt, 1);
|
||||
INIT_HLIST_NODE(&entry->i_list);
|
||||
INIT_HLIST_NODE(&entry->i.i_list);
|
||||
entry->group = NULL;
|
||||
entry->mask = 0;
|
||||
entry->inode = NULL;
|
||||
entry->i.inode = NULL;
|
||||
entry->free_mark = free_mark;
|
||||
}
|
||||
|
||||
@ -350,9 +350,9 @@ int fsnotify_add_mark(struct fsnotify_mark_entry *entry,
|
||||
lentry = fsnotify_find_mark_entry(group, inode);
|
||||
if (!lentry) {
|
||||
entry->group = group;
|
||||
entry->inode = inode;
|
||||
entry->i.inode = inode;
|
||||
|
||||
hlist_add_head(&entry->i_list, &inode->i_fsnotify_mark_entries);
|
||||
hlist_add_head(&entry->i.i_list, &inode->i_fsnotify_mark_entries);
|
||||
list_add(&entry->g_list, &group->mark_entries);
|
||||
|
||||
fsnotify_get_mark(entry); /* for i_list and g_list */
|
||||
|
@ -193,7 +193,7 @@ static int idr_callback(int id, void *p, void *data)
|
||||
*/
|
||||
if (entry)
|
||||
printk(KERN_WARNING "entry->group=%p inode=%p wd=%d\n",
|
||||
entry->group, entry->inode, ientry->wd);
|
||||
entry->group, entry->i.inode, ientry->wd);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -445,7 +445,7 @@ static void inotify_remove_from_idr(struct fsnotify_group *group,
|
||||
if (wd == -1) {
|
||||
WARN_ONCE(1, "%s: ientry=%p ientry->wd=%d ientry->group=%p"
|
||||
" ientry->inode=%p\n", __func__, ientry, ientry->wd,
|
||||
ientry->fsn_entry.group, ientry->fsn_entry.inode);
|
||||
ientry->fsn_entry.group, ientry->fsn_entry.i.inode);
|
||||
goto out;
|
||||
}
|
||||
|
||||
@ -454,7 +454,7 @@ static void inotify_remove_from_idr(struct fsnotify_group *group,
|
||||
if (unlikely(!found_ientry)) {
|
||||
WARN_ONCE(1, "%s: ientry=%p ientry->wd=%d ientry->group=%p"
|
||||
" ientry->inode=%p\n", __func__, ientry, ientry->wd,
|
||||
ientry->fsn_entry.group, ientry->fsn_entry.inode);
|
||||
ientry->fsn_entry.group, ientry->fsn_entry.i.inode);
|
||||
goto out;
|
||||
}
|
||||
|
||||
@ -468,9 +468,9 @@ static void inotify_remove_from_idr(struct fsnotify_group *group,
|
||||
"entry->inode=%p found_ientry=%p found_ientry->wd=%d "
|
||||
"found_ientry->group=%p found_ientry->inode=%p\n",
|
||||
__func__, ientry, ientry->wd, ientry->fsn_entry.group,
|
||||
ientry->fsn_entry.inode, found_ientry, found_ientry->wd,
|
||||
ientry->fsn_entry.i.inode, found_ientry, found_ientry->wd,
|
||||
found_ientry->fsn_entry.group,
|
||||
found_ientry->fsn_entry.inode);
|
||||
found_ientry->fsn_entry.i.inode);
|
||||
goto out;
|
||||
}
|
||||
|
||||
@ -482,7 +482,7 @@ static void inotify_remove_from_idr(struct fsnotify_group *group,
|
||||
if (unlikely(atomic_read(&ientry->fsn_entry.refcnt) < 3)) {
|
||||
printk(KERN_ERR "%s: ientry=%p ientry->wd=%d ientry->group=%p"
|
||||
" ientry->inode=%p\n", __func__, ientry, ientry->wd,
|
||||
ientry->fsn_entry.group, ientry->fsn_entry.inode);
|
||||
ientry->fsn_entry.group, ientry->fsn_entry.i.inode);
|
||||
/* we can't really recover with bad ref cnting.. */
|
||||
BUG();
|
||||
}
|
||||
|
@ -226,6 +226,15 @@ struct fsnotify_event {
|
||||
struct list_head private_data_list; /* groups can store private data here */
|
||||
};
|
||||
|
||||
/*
|
||||
* Inode specific fields in an fsnotify_mark_entry
|
||||
*/
|
||||
struct fsnotify_inode_mark {
|
||||
struct inode *inode; /* inode this entry is associated with */
|
||||
struct hlist_node i_list; /* list of mark_entries by inode->i_fsnotify_mark_entries */
|
||||
struct list_head free_i_list; /* tmp list used when freeing this mark */
|
||||
};
|
||||
|
||||
/*
|
||||
* a mark is simply an entry attached to an in core inode which allows an
|
||||
* fsnotify listener to indicate they are either no longer interested in events
|
||||
@ -241,12 +250,12 @@ struct fsnotify_mark_entry {
|
||||
/* we hold ref for each i_list and g_list. also one ref for each 'thing'
|
||||
* in kernel that found and may be using this mark. */
|
||||
atomic_t refcnt; /* active things looking at this mark */
|
||||
struct inode *inode; /* inode this entry is associated with */
|
||||
struct fsnotify_group *group; /* group this mark entry is for */
|
||||
struct hlist_node i_list; /* list of mark_entries by inode->i_fsnotify_mark_entries */
|
||||
struct list_head g_list; /* list of mark_entries by group->i_fsnotify_mark_entries */
|
||||
spinlock_t lock; /* protect group, inode, and killme */
|
||||
struct list_head free_i_list; /* tmp list used when freeing this mark */
|
||||
spinlock_t lock; /* protect group and inode */
|
||||
union {
|
||||
struct fsnotify_inode_mark i;
|
||||
};
|
||||
struct list_head free_g_list; /* tmp list used when freeing this mark */
|
||||
void (*free_mark)(struct fsnotify_mark_entry *entry); /* called on final put+free */
|
||||
};
|
||||
|
@ -179,9 +179,9 @@ static void insert_hash(struct audit_chunk *chunk)
|
||||
struct fsnotify_mark_entry *entry = &chunk->mark;
|
||||
struct list_head *list;
|
||||
|
||||
if (!entry->inode)
|
||||
if (!entry->i.inode)
|
||||
return;
|
||||
list = chunk_hash(entry->inode);
|
||||
list = chunk_hash(entry->i.inode);
|
||||
list_add_rcu(&chunk->hash, list);
|
||||
}
|
||||
|
||||
@ -193,7 +193,7 @@ struct audit_chunk *audit_tree_lookup(const struct inode *inode)
|
||||
|
||||
list_for_each_entry_rcu(p, list, hash) {
|
||||
/* mark.inode may have gone NULL, but who cares? */
|
||||
if (p->mark.inode == inode) {
|
||||
if (p->mark.i.inode == inode) {
|
||||
atomic_long_inc(&p->refs);
|
||||
return p;
|
||||
}
|
||||
@ -233,7 +233,7 @@ static void untag_chunk(struct node *p)
|
||||
spin_unlock(&hash_lock);
|
||||
|
||||
spin_lock(&entry->lock);
|
||||
if (chunk->dead || !entry->inode) {
|
||||
if (chunk->dead || !entry->i.inode) {
|
||||
spin_unlock(&entry->lock);
|
||||
goto out;
|
||||
}
|
||||
@ -259,7 +259,7 @@ static void untag_chunk(struct node *p)
|
||||
if (!new)
|
||||
goto Fallback;
|
||||
fsnotify_duplicate_mark(&new->mark, entry);
|
||||
if (fsnotify_add_mark(&new->mark, new->mark.group, new->mark.inode, 1)) {
|
||||
if (fsnotify_add_mark(&new->mark, new->mark.group, new->mark.i.inode, 1)) {
|
||||
free_chunk(new);
|
||||
goto Fallback;
|
||||
}
|
||||
@ -388,7 +388,7 @@ static int tag_chunk(struct inode *inode, struct audit_tree *tree)
|
||||
chunk_entry = &chunk->mark;
|
||||
|
||||
spin_lock(&old_entry->lock);
|
||||
if (!old_entry->inode) {
|
||||
if (!old_entry->i.inode) {
|
||||
/* old_entry is being shot, lets just lie */
|
||||
spin_unlock(&old_entry->lock);
|
||||
fsnotify_put_mark(old_entry);
|
||||
@ -397,7 +397,7 @@ static int tag_chunk(struct inode *inode, struct audit_tree *tree)
|
||||
}
|
||||
|
||||
fsnotify_duplicate_mark(chunk_entry, old_entry);
|
||||
if (fsnotify_add_mark(chunk_entry, chunk_entry->group, chunk_entry->inode, 1)) {
|
||||
if (fsnotify_add_mark(chunk_entry, chunk_entry->group, chunk_entry->i.inode, 1)) {
|
||||
spin_unlock(&old_entry->lock);
|
||||
free_chunk(chunk);
|
||||
fsnotify_put_mark(old_entry);
|
||||
@ -605,7 +605,7 @@ void audit_trim_trees(void)
|
||||
list_for_each_entry(node, &tree->chunks, list) {
|
||||
struct audit_chunk *chunk = find_chunk(node);
|
||||
/* this could be NULL if the watch is dieing else where... */
|
||||
struct inode *inode = chunk->mark.inode;
|
||||
struct inode *inode = chunk->mark.i.inode;
|
||||
node->index |= 1U<<31;
|
||||
if (iterate_mounts(compare_root, inode, root_mnt))
|
||||
node->index &= ~(1U<<31);
|
||||
|
Loading…
Reference in New Issue
Block a user