1 // SPDX-License-Identifier: GPL-2.0
3 #include <linux/fsnotify_backend.h>
4 #include <linux/namei.h>
5 #include <linux/mount.h>
6 #include <linux/kthread.h>
7 #include <linux/refcount.h>
8 #include <linux/slab.h>
16 struct audit_chunk *root;
17 struct list_head chunks;
18 struct list_head rules;
19 struct list_head list;
20 struct list_head same_root;
26 struct list_head hash;
28 struct fsnotify_mark *mark;
29 struct list_head trees; /* with root here */
35 struct list_head list;
36 struct audit_tree *owner;
37 unsigned index; /* index; upper bit indicates 'will prune' */
41 struct audit_tree_mark {
42 struct fsnotify_mark mark;
43 struct audit_chunk *chunk;
46 static LIST_HEAD(tree_list);
47 static LIST_HEAD(prune_list);
48 static struct task_struct *prune_thread;
51 * One struct chunk is attached to each inode of interest.
52 * We replace struct chunk on tagging/untagging.
53 * Rules have pointer to struct audit_tree.
54 * Rules have struct list_head rlist forming a list of rules over
56 * References to struct chunk are collected at audit_inode{,_child}()
57 * time and used in AUDIT_TREE rule matching.
58 * These references are dropped at the same time we are calling
59 * audit_free_names(), etc.
61 * Cyclic lists galore:
62 * tree.chunks anchors chunk.owners[].list hash_lock
63 * tree.rules anchors rule.rlist audit_filter_mutex
64 * chunk.trees anchors tree.same_root hash_lock
65 * chunk.hash is a hash with middle bits of watch.inode as
66 * a hash function. RCU, hash_lock
68 * tree is refcounted; one reference for "some rules on rules_list refer to
69 * it", one for each chunk with pointer to it.
71 * chunk is refcounted by embedded fsnotify_mark + .refs (non-zero refcount
72 * of watch contributes 1 to .refs).
74 * node.index allows to get from node.list to containing chunk.
75 * MSB of that sucker is stolen to mark taggings that we might have to
76 * revert - several operations have very unpleasant cleanup logics and
77 * that makes a difference. Some.
80 static struct fsnotify_group *audit_tree_group;
81 static struct kmem_cache *audit_tree_mark_cachep __read_mostly;
83 static struct audit_tree *alloc_tree(const char *s)
85 struct audit_tree *tree;
87 tree = kmalloc(sizeof(struct audit_tree) + strlen(s) + 1, GFP_KERNEL);
89 refcount_set(&tree->count, 1);
91 INIT_LIST_HEAD(&tree->chunks);
92 INIT_LIST_HEAD(&tree->rules);
93 INIT_LIST_HEAD(&tree->list);
94 INIT_LIST_HEAD(&tree->same_root);
96 strcpy(tree->pathname, s);
101 static inline void get_tree(struct audit_tree *tree)
103 refcount_inc(&tree->count);
106 static inline void put_tree(struct audit_tree *tree)
108 if (refcount_dec_and_test(&tree->count))
109 kfree_rcu(tree, head);
112 /* to avoid bringing the entire thing in audit.h */
113 const char *audit_tree_path(struct audit_tree *tree)
115 return tree->pathname;
118 static void free_chunk(struct audit_chunk *chunk)
122 for (i = 0; i < chunk->count; i++) {
123 if (chunk->owners[i].owner)
124 put_tree(chunk->owners[i].owner);
129 void audit_put_chunk(struct audit_chunk *chunk)
131 if (atomic_long_dec_and_test(&chunk->refs))
135 static void __put_chunk(struct rcu_head *rcu)
137 struct audit_chunk *chunk = container_of(rcu, struct audit_chunk, head);
138 audit_put_chunk(chunk);
142 * Drop reference to the chunk that was held by the mark. This is the reference
143 * that gets dropped after we've removed the chunk from the hash table and we
144 * use it to make sure chunk cannot be freed before RCU grace period expires.
146 static void audit_mark_put_chunk(struct audit_chunk *chunk)
148 call_rcu(&chunk->head, __put_chunk);
151 static inline struct audit_tree_mark *audit_mark(struct fsnotify_mark *entry)
153 return container_of(entry, struct audit_tree_mark, mark);
156 static struct audit_chunk *mark_chunk(struct fsnotify_mark *mark)
158 return audit_mark(mark)->chunk;
161 static void audit_tree_destroy_watch(struct fsnotify_mark *entry)
163 struct audit_chunk *chunk = mark_chunk(entry);
164 audit_mark_put_chunk(chunk);
165 kmem_cache_free(audit_tree_mark_cachep, audit_mark(entry));
168 static struct fsnotify_mark *alloc_mark(void)
170 struct audit_tree_mark *amark;
172 amark = kmem_cache_zalloc(audit_tree_mark_cachep, GFP_KERNEL);
175 fsnotify_init_mark(&amark->mark, audit_tree_group);
176 amark->mark.mask = FS_IN_IGNORED;
180 static struct audit_chunk *alloc_chunk(int count)
182 struct audit_chunk *chunk;
186 size = offsetof(struct audit_chunk, owners) + count * sizeof(struct node);
187 chunk = kzalloc(size, GFP_KERNEL);
191 chunk->mark = alloc_mark();
196 audit_mark(chunk->mark)->chunk = chunk;
198 INIT_LIST_HEAD(&chunk->hash);
199 INIT_LIST_HEAD(&chunk->trees);
200 chunk->count = count;
201 atomic_long_set(&chunk->refs, 1);
202 for (i = 0; i < count; i++) {
203 INIT_LIST_HEAD(&chunk->owners[i].list);
204 chunk->owners[i].index = i;
209 enum {HASH_SIZE = 128};
210 static struct list_head chunk_hash_heads[HASH_SIZE];
211 static __cacheline_aligned_in_smp DEFINE_SPINLOCK(hash_lock);
213 /* Function to return search key in our hash from inode. */
214 static unsigned long inode_to_key(const struct inode *inode)
216 /* Use address pointed to by connector->obj as the key */
217 return (unsigned long)&inode->i_fsnotify_marks;
220 static inline struct list_head *chunk_hash(unsigned long key)
222 unsigned long n = key / L1_CACHE_BYTES;
223 return chunk_hash_heads + n % HASH_SIZE;
226 /* hash_lock & entry->group->mark_mutex is held by caller */
227 static void insert_hash(struct audit_chunk *chunk)
229 struct list_head *list;
232 * Make sure chunk is fully initialized before making it visible in the
233 * hash. Pairs with a data dependency barrier in READ_ONCE() in
234 * audit_tree_lookup().
237 WARN_ON_ONCE(!chunk->key);
238 list = chunk_hash(chunk->key);
239 list_add_rcu(&chunk->hash, list);
242 /* called under rcu_read_lock */
243 struct audit_chunk *audit_tree_lookup(const struct inode *inode)
245 unsigned long key = inode_to_key(inode);
246 struct list_head *list = chunk_hash(key);
247 struct audit_chunk *p;
249 list_for_each_entry_rcu(p, list, hash) {
251 * We use a data dependency barrier in READ_ONCE() to make sure
252 * the chunk we see is fully initialized.
254 if (READ_ONCE(p->key) == key) {
255 atomic_long_inc(&p->refs);
262 bool audit_tree_match(struct audit_chunk *chunk, struct audit_tree *tree)
265 for (n = 0; n < chunk->count; n++)
266 if (chunk->owners[n].owner == tree)
271 /* tagging and untagging inodes with trees */
273 static struct audit_chunk *find_chunk(struct node *p)
275 int index = p->index & ~(1U<<31);
277 return container_of(p, struct audit_chunk, owners[0]);
280 static void replace_chunk(struct audit_chunk *new, struct audit_chunk *old,
283 struct audit_tree *owner;
287 list_splice_init(&old->trees, &new->trees);
288 list_for_each_entry(owner, &new->trees, same_root)
290 for (i = j = 0; j < old->count; i++, j++) {
291 if (&old->owners[j] == skip) {
295 owner = old->owners[j].owner;
296 new->owners[i].owner = owner;
297 new->owners[i].index = old->owners[j].index - j + i;
298 if (!owner) /* result of earlier fallback */
301 list_replace_init(&old->owners[j].list, &new->owners[i].list);
304 * Make sure chunk is fully initialized before making it visible in the
305 * hash. Pairs with a data dependency barrier in READ_ONCE() in
306 * audit_tree_lookup().
309 list_replace_rcu(&old->hash, &new->hash);
312 static void remove_chunk_node(struct audit_chunk *chunk, struct node *p)
314 struct audit_tree *owner = p->owner;
316 if (owner->root == chunk) {
317 list_del_init(&owner->same_root);
320 list_del_init(&p->list);
325 static void untag_chunk(struct node *p)
327 struct audit_chunk *chunk = find_chunk(p);
328 struct fsnotify_mark *entry = chunk->mark;
329 struct audit_chunk *new = NULL;
330 int size = chunk->count - 1;
332 remove_chunk_node(chunk, p);
333 fsnotify_get_mark(entry);
334 spin_unlock(&hash_lock);
337 new = alloc_chunk(size);
339 mutex_lock(&entry->group->mark_mutex);
341 * mark_mutex protects mark from getting detached and thus also from
342 * mark->connector->obj getting NULL.
344 if (chunk->dead || !(entry->flags & FSNOTIFY_MARK_FLAG_ATTACHED)) {
345 mutex_unlock(&entry->group->mark_mutex);
347 fsnotify_put_mark(new->mark);
353 spin_lock(&hash_lock);
354 list_del_init(&chunk->trees);
355 list_del_rcu(&chunk->hash);
356 spin_unlock(&hash_lock);
357 fsnotify_detach_mark(entry);
358 mutex_unlock(&entry->group->mark_mutex);
359 fsnotify_free_mark(entry);
366 if (fsnotify_add_mark_locked(new->mark, entry->connector->obj,
367 FSNOTIFY_OBJ_TYPE_INODE, 1)) {
368 fsnotify_put_mark(new->mark);
373 spin_lock(&hash_lock);
375 * This has to go last when updating chunk as once replace_chunk() is
376 * called, new RCU readers can see the new chunk.
378 replace_chunk(new, chunk, p);
379 spin_unlock(&hash_lock);
380 fsnotify_detach_mark(entry);
381 mutex_unlock(&entry->group->mark_mutex);
382 fsnotify_free_mark(entry);
383 fsnotify_put_mark(new->mark); /* drop initial reference */
387 mutex_unlock(&entry->group->mark_mutex);
389 fsnotify_put_mark(entry);
390 spin_lock(&hash_lock);
393 /* Call with group->mark_mutex held, releases it */
394 static int create_chunk(struct inode *inode, struct audit_tree *tree)
396 struct fsnotify_mark *entry;
397 struct audit_chunk *chunk = alloc_chunk(1);
400 mutex_unlock(&audit_tree_group->mark_mutex);
405 if (fsnotify_add_inode_mark_locked(entry, inode, 0)) {
406 mutex_unlock(&audit_tree_group->mark_mutex);
407 fsnotify_put_mark(entry);
411 spin_lock(&hash_lock);
413 spin_unlock(&hash_lock);
415 fsnotify_detach_mark(entry);
416 mutex_unlock(&audit_tree_group->mark_mutex);
417 fsnotify_free_mark(entry);
418 fsnotify_put_mark(entry);
421 chunk->owners[0].index = (1U << 31);
422 chunk->owners[0].owner = tree;
424 list_add(&chunk->owners[0].list, &tree->chunks);
427 list_add(&tree->same_root, &chunk->trees);
429 chunk->key = inode_to_key(inode);
431 * Inserting into the hash table has to go last as once we do that RCU
432 * readers can see the chunk.
435 spin_unlock(&hash_lock);
436 mutex_unlock(&audit_tree_group->mark_mutex);
437 fsnotify_put_mark(entry); /* drop initial reference */
441 /* the first tagged inode becomes root of tree */
442 static int tag_chunk(struct inode *inode, struct audit_tree *tree)
444 struct fsnotify_mark *old_entry, *chunk_entry;
445 struct audit_chunk *chunk, *old;
449 mutex_lock(&audit_tree_group->mark_mutex);
450 old_entry = fsnotify_find_mark(&inode->i_fsnotify_marks,
453 return create_chunk(inode, tree);
455 old = mark_chunk(old_entry);
457 /* are we already there? */
458 spin_lock(&hash_lock);
459 for (n = 0; n < old->count; n++) {
460 if (old->owners[n].owner == tree) {
461 spin_unlock(&hash_lock);
462 mutex_unlock(&audit_tree_group->mark_mutex);
463 fsnotify_put_mark(old_entry);
467 spin_unlock(&hash_lock);
469 chunk = alloc_chunk(old->count + 1);
471 mutex_unlock(&audit_tree_group->mark_mutex);
472 fsnotify_put_mark(old_entry);
476 chunk_entry = chunk->mark;
479 * mark_mutex protects mark from getting detached and thus also from
480 * mark->connector->obj getting NULL.
482 if (!(old_entry->flags & FSNOTIFY_MARK_FLAG_ATTACHED)) {
483 /* old_entry is being shot, lets just lie */
484 mutex_unlock(&audit_tree_group->mark_mutex);
485 fsnotify_put_mark(old_entry);
486 fsnotify_put_mark(chunk->mark);
490 if (fsnotify_add_mark_locked(chunk_entry, old_entry->connector->obj,
491 FSNOTIFY_OBJ_TYPE_INODE, 1)) {
492 mutex_unlock(&audit_tree_group->mark_mutex);
493 fsnotify_put_mark(chunk_entry);
494 fsnotify_put_mark(old_entry);
498 spin_lock(&hash_lock);
500 spin_unlock(&hash_lock);
502 fsnotify_detach_mark(chunk_entry);
503 mutex_unlock(&audit_tree_group->mark_mutex);
504 fsnotify_free_mark(chunk_entry);
505 fsnotify_put_mark(chunk_entry);
506 fsnotify_put_mark(old_entry);
509 p = &chunk->owners[chunk->count - 1];
510 p->index = (chunk->count - 1) | (1U<<31);
513 list_add(&p->list, &tree->chunks);
517 list_add(&tree->same_root, &chunk->trees);
520 * This has to go last when updating chunk as once replace_chunk() is
521 * called, new RCU readers can see the new chunk.
523 replace_chunk(chunk, old, NULL);
524 spin_unlock(&hash_lock);
525 fsnotify_detach_mark(old_entry);
526 mutex_unlock(&audit_tree_group->mark_mutex);
527 fsnotify_free_mark(old_entry);
528 fsnotify_put_mark(chunk_entry); /* drop initial reference */
529 fsnotify_put_mark(old_entry); /* pair to fsnotify_find mark_entry */
533 static void audit_tree_log_remove_rule(struct audit_krule *rule)
535 struct audit_buffer *ab;
539 ab = audit_log_start(NULL, GFP_KERNEL, AUDIT_CONFIG_CHANGE);
542 audit_log_format(ab, "op=remove_rule");
543 audit_log_format(ab, " dir=");
544 audit_log_untrustedstring(ab, rule->tree->pathname);
545 audit_log_key(ab, rule->filterkey);
546 audit_log_format(ab, " list=%d res=1", rule->listnr);
550 static void kill_rules(struct audit_tree *tree)
552 struct audit_krule *rule, *next;
553 struct audit_entry *entry;
555 list_for_each_entry_safe(rule, next, &tree->rules, rlist) {
556 entry = container_of(rule, struct audit_entry, rule);
558 list_del_init(&rule->rlist);
560 /* not a half-baked one */
561 audit_tree_log_remove_rule(rule);
563 audit_remove_mark(entry->rule.exe);
565 list_del_rcu(&entry->list);
566 list_del(&entry->rule.list);
567 call_rcu(&entry->rcu, audit_free_rule_rcu);
573 * finish killing struct audit_tree
575 static void prune_one(struct audit_tree *victim)
577 spin_lock(&hash_lock);
578 while (!list_empty(&victim->chunks)) {
581 p = list_entry(victim->chunks.next, struct node, list);
585 spin_unlock(&hash_lock);
589 /* trim the uncommitted chunks from tree */
591 static void trim_marked(struct audit_tree *tree)
593 struct list_head *p, *q;
594 spin_lock(&hash_lock);
596 spin_unlock(&hash_lock);
600 for (p = tree->chunks.next; p != &tree->chunks; p = q) {
601 struct node *node = list_entry(p, struct node, list);
603 if (node->index & (1U<<31)) {
605 list_add(p, &tree->chunks);
609 while (!list_empty(&tree->chunks)) {
612 node = list_entry(tree->chunks.next, struct node, list);
614 /* have we run out of marked? */
615 if (!(node->index & (1U<<31)))
620 if (!tree->root && !tree->goner) {
622 spin_unlock(&hash_lock);
623 mutex_lock(&audit_filter_mutex);
625 list_del_init(&tree->list);
626 mutex_unlock(&audit_filter_mutex);
629 spin_unlock(&hash_lock);
633 static void audit_schedule_prune(void);
635 /* called with audit_filter_mutex */
636 int audit_remove_tree_rule(struct audit_krule *rule)
638 struct audit_tree *tree;
641 spin_lock(&hash_lock);
642 list_del_init(&rule->rlist);
643 if (list_empty(&tree->rules) && !tree->goner) {
645 list_del_init(&tree->same_root);
647 list_move(&tree->list, &prune_list);
649 spin_unlock(&hash_lock);
650 audit_schedule_prune();
654 spin_unlock(&hash_lock);
660 static int compare_root(struct vfsmount *mnt, void *arg)
662 return inode_to_key(d_backing_inode(mnt->mnt_root)) ==
666 void audit_trim_trees(void)
668 struct list_head cursor;
670 mutex_lock(&audit_filter_mutex);
671 list_add(&cursor, &tree_list);
672 while (cursor.next != &tree_list) {
673 struct audit_tree *tree;
675 struct vfsmount *root_mnt;
679 tree = container_of(cursor.next, struct audit_tree, list);
682 list_add(&cursor, &tree->list);
683 mutex_unlock(&audit_filter_mutex);
685 err = kern_path(tree->pathname, 0, &path);
689 root_mnt = collect_mounts(&path);
691 if (IS_ERR(root_mnt))
694 spin_lock(&hash_lock);
695 list_for_each_entry(node, &tree->chunks, list) {
696 struct audit_chunk *chunk = find_chunk(node);
697 /* this could be NULL if the watch is dying else where... */
698 node->index |= 1U<<31;
699 if (iterate_mounts(compare_root,
700 (void *)(chunk->key),
702 node->index &= ~(1U<<31);
704 spin_unlock(&hash_lock);
706 drop_collected_mounts(root_mnt);
709 mutex_lock(&audit_filter_mutex);
712 mutex_unlock(&audit_filter_mutex);
715 int audit_make_tree(struct audit_krule *rule, char *pathname, u32 op)
718 if (pathname[0] != '/' ||
719 rule->listnr != AUDIT_FILTER_EXIT ||
721 rule->inode_f || rule->watch || rule->tree)
723 rule->tree = alloc_tree(pathname);
729 void audit_put_tree(struct audit_tree *tree)
734 static int tag_mount(struct vfsmount *mnt, void *arg)
736 return tag_chunk(d_backing_inode(mnt->mnt_root), arg);
740 * That gets run when evict_chunk() ends up needing to kill audit_tree.
741 * Runs from a separate thread.
743 static int prune_tree_thread(void *unused)
746 if (list_empty(&prune_list)) {
747 set_current_state(TASK_INTERRUPTIBLE);
752 mutex_lock(&audit_filter_mutex);
754 while (!list_empty(&prune_list)) {
755 struct audit_tree *victim;
757 victim = list_entry(prune_list.next,
758 struct audit_tree, list);
759 list_del_init(&victim->list);
761 mutex_unlock(&audit_filter_mutex);
765 mutex_lock(&audit_filter_mutex);
768 mutex_unlock(&audit_filter_mutex);
774 static int audit_launch_prune(void)
778 prune_thread = kthread_run(prune_tree_thread, NULL,
780 if (IS_ERR(prune_thread)) {
781 pr_err("cannot start thread audit_prune_tree");
788 /* called with audit_filter_mutex */
789 int audit_add_tree_rule(struct audit_krule *rule)
791 struct audit_tree *seed = rule->tree, *tree;
793 struct vfsmount *mnt;
797 list_for_each_entry(tree, &tree_list, list) {
798 if (!strcmp(seed->pathname, tree->pathname)) {
801 list_add(&rule->rlist, &tree->rules);
806 list_add(&tree->list, &tree_list);
807 list_add(&rule->rlist, &tree->rules);
808 /* do not set rule->tree yet */
809 mutex_unlock(&audit_filter_mutex);
811 if (unlikely(!prune_thread)) {
812 err = audit_launch_prune();
817 err = kern_path(tree->pathname, 0, &path);
820 mnt = collect_mounts(&path);
828 err = iterate_mounts(tag_mount, tree, mnt);
829 drop_collected_mounts(mnt);
833 spin_lock(&hash_lock);
834 list_for_each_entry(node, &tree->chunks, list)
835 node->index &= ~(1U<<31);
836 spin_unlock(&hash_lock);
842 mutex_lock(&audit_filter_mutex);
843 if (list_empty(&rule->rlist)) {
852 mutex_lock(&audit_filter_mutex);
853 list_del_init(&tree->list);
854 list_del_init(&tree->rules);
859 int audit_tag_tree(char *old, char *new)
861 struct list_head cursor, barrier;
863 struct path path1, path2;
864 struct vfsmount *tagged;
867 err = kern_path(new, 0, &path2);
870 tagged = collect_mounts(&path2);
873 return PTR_ERR(tagged);
875 err = kern_path(old, 0, &path1);
877 drop_collected_mounts(tagged);
881 mutex_lock(&audit_filter_mutex);
882 list_add(&barrier, &tree_list);
883 list_add(&cursor, &barrier);
885 while (cursor.next != &tree_list) {
886 struct audit_tree *tree;
889 tree = container_of(cursor.next, struct audit_tree, list);
892 list_add(&cursor, &tree->list);
893 mutex_unlock(&audit_filter_mutex);
895 err = kern_path(tree->pathname, 0, &path2);
897 good_one = path_is_under(&path1, &path2);
903 mutex_lock(&audit_filter_mutex);
907 failed = iterate_mounts(tag_mount, tree, tagged);
910 mutex_lock(&audit_filter_mutex);
914 mutex_lock(&audit_filter_mutex);
915 spin_lock(&hash_lock);
917 list_del(&tree->list);
918 list_add(&tree->list, &tree_list);
920 spin_unlock(&hash_lock);
924 while (barrier.prev != &tree_list) {
925 struct audit_tree *tree;
927 tree = container_of(barrier.prev, struct audit_tree, list);
929 list_del(&tree->list);
930 list_add(&tree->list, &barrier);
931 mutex_unlock(&audit_filter_mutex);
935 spin_lock(&hash_lock);
936 list_for_each_entry(node, &tree->chunks, list)
937 node->index &= ~(1U<<31);
938 spin_unlock(&hash_lock);
944 mutex_lock(&audit_filter_mutex);
948 mutex_unlock(&audit_filter_mutex);
950 drop_collected_mounts(tagged);
955 static void audit_schedule_prune(void)
957 wake_up_process(prune_thread);
961 * ... and that one is done if evict_chunk() decides to delay until the end
962 * of syscall. Runs synchronously.
964 void audit_kill_trees(struct list_head *list)
967 mutex_lock(&audit_filter_mutex);
969 while (!list_empty(list)) {
970 struct audit_tree *victim;
972 victim = list_entry(list->next, struct audit_tree, list);
974 list_del_init(&victim->list);
976 mutex_unlock(&audit_filter_mutex);
980 mutex_lock(&audit_filter_mutex);
983 mutex_unlock(&audit_filter_mutex);
988 * Here comes the stuff asynchronous to auditctl operations
991 static void evict_chunk(struct audit_chunk *chunk)
993 struct audit_tree *owner;
994 struct list_head *postponed = audit_killed_trees();
1002 mutex_lock(&audit_filter_mutex);
1003 spin_lock(&hash_lock);
1004 while (!list_empty(&chunk->trees)) {
1005 owner = list_entry(chunk->trees.next,
1006 struct audit_tree, same_root);
1009 list_del_init(&owner->same_root);
1010 spin_unlock(&hash_lock);
1013 list_move(&owner->list, &prune_list);
1016 list_move(&owner->list, postponed);
1018 spin_lock(&hash_lock);
1020 list_del_rcu(&chunk->hash);
1021 for (n = 0; n < chunk->count; n++)
1022 list_del_init(&chunk->owners[n].list);
1023 spin_unlock(&hash_lock);
1024 mutex_unlock(&audit_filter_mutex);
1026 audit_schedule_prune();
1029 static int audit_tree_handle_event(struct fsnotify_group *group,
1030 struct inode *to_tell,
1031 u32 mask, const void *data, int data_type,
1032 const unsigned char *file_name, u32 cookie,
1033 struct fsnotify_iter_info *iter_info)
1038 static void audit_tree_freeing_mark(struct fsnotify_mark *entry, struct fsnotify_group *group)
1040 struct audit_chunk *chunk = mark_chunk(entry);
1045 * We are guaranteed to have at least one reference to the mark from
1046 * either the inode or the caller of fsnotify_destroy_mark().
1048 BUG_ON(refcount_read(&entry->refcnt) < 1);
1051 static const struct fsnotify_ops audit_tree_ops = {
1052 .handle_event = audit_tree_handle_event,
1053 .freeing_mark = audit_tree_freeing_mark,
1054 .free_mark = audit_tree_destroy_watch,
1057 static int __init audit_tree_init(void)
1061 audit_tree_mark_cachep = KMEM_CACHE(audit_tree_mark, SLAB_PANIC);
1063 audit_tree_group = fsnotify_alloc_group(&audit_tree_ops);
1064 if (IS_ERR(audit_tree_group))
1065 audit_panic("cannot initialize fsnotify group for rectree watches");
1067 for (i = 0; i < HASH_SIZE; i++)
1068 INIT_LIST_HEAD(&chunk_hash_heads[i]);
1072 __initcall(audit_tree_init);