1 // SPDX-License-Identifier: GPL-2.0
3 #include <linux/fsnotify_backend.h>
4 #include <linux/namei.h>
5 #include <linux/mount.h>
6 #include <linux/kthread.h>
7 #include <linux/refcount.h>
8 #include <linux/slab.h>
16 struct audit_chunk *root;
17 struct list_head chunks;
18 struct list_head rules;
19 struct list_head list;
20 struct list_head same_root;
26 struct list_head hash;
28 struct fsnotify_mark *mark;
29 struct list_head trees; /* with root here */
35 struct list_head list;
36 struct audit_tree *owner;
37 unsigned index; /* index; upper bit indicates 'will prune' */
41 struct audit_tree_mark {
42 struct fsnotify_mark mark;
43 struct audit_chunk *chunk;
46 static LIST_HEAD(tree_list);
47 static LIST_HEAD(prune_list);
48 static struct task_struct *prune_thread;
51 * One struct chunk is attached to each inode of interest.
52 * We replace struct chunk on tagging/untagging.
53 * Rules have pointer to struct audit_tree.
54 * Rules have struct list_head rlist forming a list of rules over
56 * References to struct chunk are collected at audit_inode{,_child}()
57 * time and used in AUDIT_TREE rule matching.
58 * These references are dropped at the same time we are calling
59 * audit_free_names(), etc.
61 * Cyclic lists galore:
62 * tree.chunks anchors chunk.owners[].list hash_lock
63 * tree.rules anchors rule.rlist audit_filter_mutex
64 * chunk.trees anchors tree.same_root hash_lock
65 * chunk.hash is a hash with middle bits of watch.inode as
66 * a hash function. RCU, hash_lock
68 * tree is refcounted; one reference for "some rules on rules_list refer to
69 * it", one for each chunk with pointer to it.
71 * chunk is refcounted by embedded fsnotify_mark + .refs (non-zero refcount
72 * of watch contributes 1 to .refs).
74 * node.index allows to get from node.list to containing chunk.
75 * MSB of that sucker is stolen to mark taggings that we might have to
76 * revert - several operations have very unpleasant cleanup logics and
77 * that makes a difference. Some.
80 static struct fsnotify_group *audit_tree_group;
81 static struct kmem_cache *audit_tree_mark_cachep __read_mostly;
83 static struct audit_tree *alloc_tree(const char *s)
85 struct audit_tree *tree;
87 tree = kmalloc(sizeof(struct audit_tree) + strlen(s) + 1, GFP_KERNEL);
89 refcount_set(&tree->count, 1);
91 INIT_LIST_HEAD(&tree->chunks);
92 INIT_LIST_HEAD(&tree->rules);
93 INIT_LIST_HEAD(&tree->list);
94 INIT_LIST_HEAD(&tree->same_root);
96 strcpy(tree->pathname, s);
101 static inline void get_tree(struct audit_tree *tree)
103 refcount_inc(&tree->count);
106 static inline void put_tree(struct audit_tree *tree)
108 if (refcount_dec_and_test(&tree->count))
109 kfree_rcu(tree, head);
112 /* to avoid bringing the entire thing in audit.h */
113 const char *audit_tree_path(struct audit_tree *tree)
115 return tree->pathname;
118 static void free_chunk(struct audit_chunk *chunk)
122 for (i = 0; i < chunk->count; i++) {
123 if (chunk->owners[i].owner)
124 put_tree(chunk->owners[i].owner);
129 void audit_put_chunk(struct audit_chunk *chunk)
131 if (atomic_long_dec_and_test(&chunk->refs))
135 static void __put_chunk(struct rcu_head *rcu)
137 struct audit_chunk *chunk = container_of(rcu, struct audit_chunk, head);
138 audit_put_chunk(chunk);
142 * Drop reference to the chunk that was held by the mark. This is the reference
143 * that gets dropped after we've removed the chunk from the hash table and we
144 * use it to make sure chunk cannot be freed before RCU grace period expires.
146 static void audit_mark_put_chunk(struct audit_chunk *chunk)
148 call_rcu(&chunk->head, __put_chunk);
151 static inline struct audit_tree_mark *audit_mark(struct fsnotify_mark *entry)
153 return container_of(entry, struct audit_tree_mark, mark);
156 static struct audit_chunk *mark_chunk(struct fsnotify_mark *mark)
158 return audit_mark(mark)->chunk;
161 static void audit_tree_destroy_watch(struct fsnotify_mark *entry)
163 struct audit_chunk *chunk = mark_chunk(entry);
164 audit_mark_put_chunk(chunk);
165 kmem_cache_free(audit_tree_mark_cachep, audit_mark(entry));
168 static struct fsnotify_mark *alloc_mark(void)
170 struct audit_tree_mark *amark;
172 amark = kmem_cache_zalloc(audit_tree_mark_cachep, GFP_KERNEL);
175 fsnotify_init_mark(&amark->mark, audit_tree_group);
176 amark->mark.mask = FS_IN_IGNORED;
180 static struct audit_chunk *alloc_chunk(int count)
182 struct audit_chunk *chunk;
186 size = offsetof(struct audit_chunk, owners) + count * sizeof(struct node);
187 chunk = kzalloc(size, GFP_KERNEL);
191 chunk->mark = alloc_mark();
196 audit_mark(chunk->mark)->chunk = chunk;
198 INIT_LIST_HEAD(&chunk->hash);
199 INIT_LIST_HEAD(&chunk->trees);
200 chunk->count = count;
201 atomic_long_set(&chunk->refs, 1);
202 for (i = 0; i < count; i++) {
203 INIT_LIST_HEAD(&chunk->owners[i].list);
204 chunk->owners[i].index = i;
209 enum {HASH_SIZE = 128};
210 static struct list_head chunk_hash_heads[HASH_SIZE];
211 static __cacheline_aligned_in_smp DEFINE_SPINLOCK(hash_lock);
213 /* Function to return search key in our hash from inode. */
214 static unsigned long inode_to_key(const struct inode *inode)
216 /* Use address pointed to by connector->obj as the key */
217 return (unsigned long)&inode->i_fsnotify_marks;
220 static inline struct list_head *chunk_hash(unsigned long key)
222 unsigned long n = key / L1_CACHE_BYTES;
223 return chunk_hash_heads + n % HASH_SIZE;
226 /* hash_lock & entry->group->mark_mutex is held by caller */
227 static void insert_hash(struct audit_chunk *chunk)
229 struct list_head *list;
232 * Make sure chunk is fully initialized before making it visible in the
233 * hash. Pairs with a data dependency barrier in READ_ONCE() in
234 * audit_tree_lookup().
237 WARN_ON_ONCE(!chunk->key);
238 list = chunk_hash(chunk->key);
239 list_add_rcu(&chunk->hash, list);
242 /* called under rcu_read_lock */
243 struct audit_chunk *audit_tree_lookup(const struct inode *inode)
245 unsigned long key = inode_to_key(inode);
246 struct list_head *list = chunk_hash(key);
247 struct audit_chunk *p;
249 list_for_each_entry_rcu(p, list, hash) {
251 * We use a data dependency barrier in READ_ONCE() to make sure
252 * the chunk we see is fully initialized.
254 if (READ_ONCE(p->key) == key) {
255 atomic_long_inc(&p->refs);
262 bool audit_tree_match(struct audit_chunk *chunk, struct audit_tree *tree)
265 for (n = 0; n < chunk->count; n++)
266 if (chunk->owners[n].owner == tree)
271 /* tagging and untagging inodes with trees */
273 static struct audit_chunk *find_chunk(struct node *p)
275 int index = p->index & ~(1U<<31);
277 return container_of(p, struct audit_chunk, owners[0]);
280 static void replace_chunk(struct audit_chunk *new, struct audit_chunk *old)
282 struct audit_tree *owner;
286 list_splice_init(&old->trees, &new->trees);
287 list_for_each_entry(owner, &new->trees, same_root)
289 for (i = j = 0; j < old->count; i++, j++) {
290 if (!old->owners[j].owner) {
294 owner = old->owners[j].owner;
295 new->owners[i].owner = owner;
296 new->owners[i].index = old->owners[j].index - j + i;
297 if (!owner) /* result of earlier fallback */
300 list_replace_init(&old->owners[j].list, &new->owners[i].list);
303 * Make sure chunk is fully initialized before making it visible in the
304 * hash. Pairs with a data dependency barrier in READ_ONCE() in
305 * audit_tree_lookup().
308 list_replace_rcu(&old->hash, &new->hash);
311 static void remove_chunk_node(struct audit_chunk *chunk, struct node *p)
313 struct audit_tree *owner = p->owner;
315 if (owner->root == chunk) {
316 list_del_init(&owner->same_root);
319 list_del_init(&p->list);
324 static int chunk_count_trees(struct audit_chunk *chunk)
329 for (i = 0; i < chunk->count; i++)
330 if (chunk->owners[i].owner)
335 static void untag_chunk(struct node *p)
337 struct audit_chunk *chunk = find_chunk(p);
338 struct fsnotify_mark *entry = chunk->mark;
339 struct audit_chunk *new = NULL;
342 remove_chunk_node(chunk, p);
343 fsnotify_get_mark(entry);
344 spin_unlock(&hash_lock);
346 mutex_lock(&entry->group->mark_mutex);
348 * mark_mutex protects mark from getting detached and thus also from
349 * mark->connector->obj getting NULL.
351 if (chunk->dead || !(entry->flags & FSNOTIFY_MARK_FLAG_ATTACHED)) {
352 mutex_unlock(&entry->group->mark_mutex);
354 fsnotify_put_mark(new->mark);
358 size = chunk_count_trees(chunk);
361 spin_lock(&hash_lock);
362 list_del_init(&chunk->trees);
363 list_del_rcu(&chunk->hash);
364 spin_unlock(&hash_lock);
365 fsnotify_detach_mark(entry);
366 mutex_unlock(&entry->group->mark_mutex);
367 fsnotify_free_mark(entry);
371 new = alloc_chunk(size);
375 if (fsnotify_add_mark_locked(new->mark, entry->connector->obj,
376 FSNOTIFY_OBJ_TYPE_INODE, 1)) {
377 fsnotify_put_mark(new->mark);
382 spin_lock(&hash_lock);
384 * This has to go last when updating chunk as once replace_chunk() is
385 * called, new RCU readers can see the new chunk.
387 replace_chunk(new, chunk);
388 spin_unlock(&hash_lock);
389 fsnotify_detach_mark(entry);
390 mutex_unlock(&entry->group->mark_mutex);
391 fsnotify_free_mark(entry);
392 fsnotify_put_mark(new->mark); /* drop initial reference */
396 mutex_unlock(&entry->group->mark_mutex);
398 fsnotify_put_mark(entry);
399 spin_lock(&hash_lock);
402 /* Call with group->mark_mutex held, releases it */
403 static int create_chunk(struct inode *inode, struct audit_tree *tree)
405 struct fsnotify_mark *entry;
406 struct audit_chunk *chunk = alloc_chunk(1);
409 mutex_unlock(&audit_tree_group->mark_mutex);
414 if (fsnotify_add_inode_mark_locked(entry, inode, 0)) {
415 mutex_unlock(&audit_tree_group->mark_mutex);
416 fsnotify_put_mark(entry);
420 spin_lock(&hash_lock);
422 spin_unlock(&hash_lock);
424 fsnotify_detach_mark(entry);
425 mutex_unlock(&audit_tree_group->mark_mutex);
426 fsnotify_free_mark(entry);
427 fsnotify_put_mark(entry);
430 chunk->owners[0].index = (1U << 31);
431 chunk->owners[0].owner = tree;
433 list_add(&chunk->owners[0].list, &tree->chunks);
436 list_add(&tree->same_root, &chunk->trees);
438 chunk->key = inode_to_key(inode);
440 * Inserting into the hash table has to go last as once we do that RCU
441 * readers can see the chunk.
444 spin_unlock(&hash_lock);
445 mutex_unlock(&audit_tree_group->mark_mutex);
446 fsnotify_put_mark(entry); /* drop initial reference */
450 /* the first tagged inode becomes root of tree */
451 static int tag_chunk(struct inode *inode, struct audit_tree *tree)
453 struct fsnotify_mark *old_entry, *chunk_entry;
454 struct audit_chunk *chunk, *old;
458 mutex_lock(&audit_tree_group->mark_mutex);
459 old_entry = fsnotify_find_mark(&inode->i_fsnotify_marks,
462 return create_chunk(inode, tree);
464 old = mark_chunk(old_entry);
466 /* are we already there? */
467 spin_lock(&hash_lock);
468 for (n = 0; n < old->count; n++) {
469 if (old->owners[n].owner == tree) {
470 spin_unlock(&hash_lock);
471 mutex_unlock(&audit_tree_group->mark_mutex);
472 fsnotify_put_mark(old_entry);
476 spin_unlock(&hash_lock);
478 chunk = alloc_chunk(old->count + 1);
480 mutex_unlock(&audit_tree_group->mark_mutex);
481 fsnotify_put_mark(old_entry);
485 chunk_entry = chunk->mark;
488 * mark_mutex protects mark from getting detached and thus also from
489 * mark->connector->obj getting NULL.
491 if (!(old_entry->flags & FSNOTIFY_MARK_FLAG_ATTACHED)) {
492 /* old_entry is being shot, lets just lie */
493 mutex_unlock(&audit_tree_group->mark_mutex);
494 fsnotify_put_mark(old_entry);
495 fsnotify_put_mark(chunk->mark);
499 if (fsnotify_add_mark_locked(chunk_entry, old_entry->connector->obj,
500 FSNOTIFY_OBJ_TYPE_INODE, 1)) {
501 mutex_unlock(&audit_tree_group->mark_mutex);
502 fsnotify_put_mark(chunk_entry);
503 fsnotify_put_mark(old_entry);
507 spin_lock(&hash_lock);
509 spin_unlock(&hash_lock);
511 fsnotify_detach_mark(chunk_entry);
512 mutex_unlock(&audit_tree_group->mark_mutex);
513 fsnotify_free_mark(chunk_entry);
514 fsnotify_put_mark(chunk_entry);
515 fsnotify_put_mark(old_entry);
518 p = &chunk->owners[chunk->count - 1];
519 p->index = (chunk->count - 1) | (1U<<31);
522 list_add(&p->list, &tree->chunks);
526 list_add(&tree->same_root, &chunk->trees);
529 * This has to go last when updating chunk as once replace_chunk() is
530 * called, new RCU readers can see the new chunk.
532 replace_chunk(chunk, old);
533 spin_unlock(&hash_lock);
534 fsnotify_detach_mark(old_entry);
535 mutex_unlock(&audit_tree_group->mark_mutex);
536 fsnotify_free_mark(old_entry);
537 fsnotify_put_mark(chunk_entry); /* drop initial reference */
538 fsnotify_put_mark(old_entry); /* pair to fsnotify_find mark_entry */
542 static void audit_tree_log_remove_rule(struct audit_krule *rule)
544 struct audit_buffer *ab;
548 ab = audit_log_start(NULL, GFP_KERNEL, AUDIT_CONFIG_CHANGE);
551 audit_log_format(ab, "op=remove_rule");
552 audit_log_format(ab, " dir=");
553 audit_log_untrustedstring(ab, rule->tree->pathname);
554 audit_log_key(ab, rule->filterkey);
555 audit_log_format(ab, " list=%d res=1", rule->listnr);
559 static void kill_rules(struct audit_tree *tree)
561 struct audit_krule *rule, *next;
562 struct audit_entry *entry;
564 list_for_each_entry_safe(rule, next, &tree->rules, rlist) {
565 entry = container_of(rule, struct audit_entry, rule);
567 list_del_init(&rule->rlist);
569 /* not a half-baked one */
570 audit_tree_log_remove_rule(rule);
572 audit_remove_mark(entry->rule.exe);
574 list_del_rcu(&entry->list);
575 list_del(&entry->rule.list);
576 call_rcu(&entry->rcu, audit_free_rule_rcu);
582 * finish killing struct audit_tree
584 static void prune_one(struct audit_tree *victim)
586 spin_lock(&hash_lock);
587 while (!list_empty(&victim->chunks)) {
590 p = list_entry(victim->chunks.next, struct node, list);
594 spin_unlock(&hash_lock);
598 /* trim the uncommitted chunks from tree */
600 static void trim_marked(struct audit_tree *tree)
602 struct list_head *p, *q;
603 spin_lock(&hash_lock);
605 spin_unlock(&hash_lock);
609 for (p = tree->chunks.next; p != &tree->chunks; p = q) {
610 struct node *node = list_entry(p, struct node, list);
612 if (node->index & (1U<<31)) {
614 list_add(p, &tree->chunks);
618 while (!list_empty(&tree->chunks)) {
621 node = list_entry(tree->chunks.next, struct node, list);
623 /* have we run out of marked? */
624 if (!(node->index & (1U<<31)))
629 if (!tree->root && !tree->goner) {
631 spin_unlock(&hash_lock);
632 mutex_lock(&audit_filter_mutex);
634 list_del_init(&tree->list);
635 mutex_unlock(&audit_filter_mutex);
638 spin_unlock(&hash_lock);
642 static void audit_schedule_prune(void);
644 /* called with audit_filter_mutex */
645 int audit_remove_tree_rule(struct audit_krule *rule)
647 struct audit_tree *tree;
650 spin_lock(&hash_lock);
651 list_del_init(&rule->rlist);
652 if (list_empty(&tree->rules) && !tree->goner) {
654 list_del_init(&tree->same_root);
656 list_move(&tree->list, &prune_list);
658 spin_unlock(&hash_lock);
659 audit_schedule_prune();
663 spin_unlock(&hash_lock);
669 static int compare_root(struct vfsmount *mnt, void *arg)
671 return inode_to_key(d_backing_inode(mnt->mnt_root)) ==
675 void audit_trim_trees(void)
677 struct list_head cursor;
679 mutex_lock(&audit_filter_mutex);
680 list_add(&cursor, &tree_list);
681 while (cursor.next != &tree_list) {
682 struct audit_tree *tree;
684 struct vfsmount *root_mnt;
688 tree = container_of(cursor.next, struct audit_tree, list);
691 list_add(&cursor, &tree->list);
692 mutex_unlock(&audit_filter_mutex);
694 err = kern_path(tree->pathname, 0, &path);
698 root_mnt = collect_mounts(&path);
700 if (IS_ERR(root_mnt))
703 spin_lock(&hash_lock);
704 list_for_each_entry(node, &tree->chunks, list) {
705 struct audit_chunk *chunk = find_chunk(node);
706 /* this could be NULL if the watch is dying else where... */
707 node->index |= 1U<<31;
708 if (iterate_mounts(compare_root,
709 (void *)(chunk->key),
711 node->index &= ~(1U<<31);
713 spin_unlock(&hash_lock);
715 drop_collected_mounts(root_mnt);
718 mutex_lock(&audit_filter_mutex);
721 mutex_unlock(&audit_filter_mutex);
724 int audit_make_tree(struct audit_krule *rule, char *pathname, u32 op)
727 if (pathname[0] != '/' ||
728 rule->listnr != AUDIT_FILTER_EXIT ||
730 rule->inode_f || rule->watch || rule->tree)
732 rule->tree = alloc_tree(pathname);
738 void audit_put_tree(struct audit_tree *tree)
743 static int tag_mount(struct vfsmount *mnt, void *arg)
745 return tag_chunk(d_backing_inode(mnt->mnt_root), arg);
749 * That gets run when evict_chunk() ends up needing to kill audit_tree.
750 * Runs from a separate thread.
752 static int prune_tree_thread(void *unused)
755 if (list_empty(&prune_list)) {
756 set_current_state(TASK_INTERRUPTIBLE);
761 mutex_lock(&audit_filter_mutex);
763 while (!list_empty(&prune_list)) {
764 struct audit_tree *victim;
766 victim = list_entry(prune_list.next,
767 struct audit_tree, list);
768 list_del_init(&victim->list);
770 mutex_unlock(&audit_filter_mutex);
774 mutex_lock(&audit_filter_mutex);
777 mutex_unlock(&audit_filter_mutex);
783 static int audit_launch_prune(void)
787 prune_thread = kthread_run(prune_tree_thread, NULL,
789 if (IS_ERR(prune_thread)) {
790 pr_err("cannot start thread audit_prune_tree");
797 /* called with audit_filter_mutex */
798 int audit_add_tree_rule(struct audit_krule *rule)
800 struct audit_tree *seed = rule->tree, *tree;
802 struct vfsmount *mnt;
806 list_for_each_entry(tree, &tree_list, list) {
807 if (!strcmp(seed->pathname, tree->pathname)) {
810 list_add(&rule->rlist, &tree->rules);
815 list_add(&tree->list, &tree_list);
816 list_add(&rule->rlist, &tree->rules);
817 /* do not set rule->tree yet */
818 mutex_unlock(&audit_filter_mutex);
820 if (unlikely(!prune_thread)) {
821 err = audit_launch_prune();
826 err = kern_path(tree->pathname, 0, &path);
829 mnt = collect_mounts(&path);
837 err = iterate_mounts(tag_mount, tree, mnt);
838 drop_collected_mounts(mnt);
842 spin_lock(&hash_lock);
843 list_for_each_entry(node, &tree->chunks, list)
844 node->index &= ~(1U<<31);
845 spin_unlock(&hash_lock);
851 mutex_lock(&audit_filter_mutex);
852 if (list_empty(&rule->rlist)) {
861 mutex_lock(&audit_filter_mutex);
862 list_del_init(&tree->list);
863 list_del_init(&tree->rules);
868 int audit_tag_tree(char *old, char *new)
870 struct list_head cursor, barrier;
872 struct path path1, path2;
873 struct vfsmount *tagged;
876 err = kern_path(new, 0, &path2);
879 tagged = collect_mounts(&path2);
882 return PTR_ERR(tagged);
884 err = kern_path(old, 0, &path1);
886 drop_collected_mounts(tagged);
890 mutex_lock(&audit_filter_mutex);
891 list_add(&barrier, &tree_list);
892 list_add(&cursor, &barrier);
894 while (cursor.next != &tree_list) {
895 struct audit_tree *tree;
898 tree = container_of(cursor.next, struct audit_tree, list);
901 list_add(&cursor, &tree->list);
902 mutex_unlock(&audit_filter_mutex);
904 err = kern_path(tree->pathname, 0, &path2);
906 good_one = path_is_under(&path1, &path2);
912 mutex_lock(&audit_filter_mutex);
916 failed = iterate_mounts(tag_mount, tree, tagged);
919 mutex_lock(&audit_filter_mutex);
923 mutex_lock(&audit_filter_mutex);
924 spin_lock(&hash_lock);
926 list_del(&tree->list);
927 list_add(&tree->list, &tree_list);
929 spin_unlock(&hash_lock);
933 while (barrier.prev != &tree_list) {
934 struct audit_tree *tree;
936 tree = container_of(barrier.prev, struct audit_tree, list);
938 list_del(&tree->list);
939 list_add(&tree->list, &barrier);
940 mutex_unlock(&audit_filter_mutex);
944 spin_lock(&hash_lock);
945 list_for_each_entry(node, &tree->chunks, list)
946 node->index &= ~(1U<<31);
947 spin_unlock(&hash_lock);
953 mutex_lock(&audit_filter_mutex);
957 mutex_unlock(&audit_filter_mutex);
959 drop_collected_mounts(tagged);
964 static void audit_schedule_prune(void)
966 wake_up_process(prune_thread);
970 * ... and that one is done if evict_chunk() decides to delay until the end
971 * of syscall. Runs synchronously.
973 void audit_kill_trees(struct list_head *list)
976 mutex_lock(&audit_filter_mutex);
978 while (!list_empty(list)) {
979 struct audit_tree *victim;
981 victim = list_entry(list->next, struct audit_tree, list);
983 list_del_init(&victim->list);
985 mutex_unlock(&audit_filter_mutex);
989 mutex_lock(&audit_filter_mutex);
992 mutex_unlock(&audit_filter_mutex);
997 * Here comes the stuff asynchronous to auditctl operations
1000 static void evict_chunk(struct audit_chunk *chunk)
1002 struct audit_tree *owner;
1003 struct list_head *postponed = audit_killed_trees();
1011 mutex_lock(&audit_filter_mutex);
1012 spin_lock(&hash_lock);
1013 while (!list_empty(&chunk->trees)) {
1014 owner = list_entry(chunk->trees.next,
1015 struct audit_tree, same_root);
1018 list_del_init(&owner->same_root);
1019 spin_unlock(&hash_lock);
1022 list_move(&owner->list, &prune_list);
1025 list_move(&owner->list, postponed);
1027 spin_lock(&hash_lock);
1029 list_del_rcu(&chunk->hash);
1030 for (n = 0; n < chunk->count; n++)
1031 list_del_init(&chunk->owners[n].list);
1032 spin_unlock(&hash_lock);
1033 mutex_unlock(&audit_filter_mutex);
1035 audit_schedule_prune();
1038 static int audit_tree_handle_event(struct fsnotify_group *group,
1039 struct inode *to_tell,
1040 u32 mask, const void *data, int data_type,
1041 const unsigned char *file_name, u32 cookie,
1042 struct fsnotify_iter_info *iter_info)
1047 static void audit_tree_freeing_mark(struct fsnotify_mark *entry, struct fsnotify_group *group)
1049 struct audit_chunk *chunk = mark_chunk(entry);
1054 * We are guaranteed to have at least one reference to the mark from
1055 * either the inode or the caller of fsnotify_destroy_mark().
1057 BUG_ON(refcount_read(&entry->refcnt) < 1);
1060 static const struct fsnotify_ops audit_tree_ops = {
1061 .handle_event = audit_tree_handle_event,
1062 .freeing_mark = audit_tree_freeing_mark,
1063 .free_mark = audit_tree_destroy_watch,
1066 static int __init audit_tree_init(void)
1070 audit_tree_mark_cachep = KMEM_CACHE(audit_tree_mark, SLAB_PANIC);
1072 audit_tree_group = fsnotify_alloc_group(&audit_tree_ops);
1073 if (IS_ERR(audit_tree_group))
1074 audit_panic("cannot initialize fsnotify group for rectree watches");
1076 for (i = 0; i < HASH_SIZE; i++)
1077 INIT_LIST_HEAD(&chunk_hash_heads[i]);
1081 __initcall(audit_tree_init);