2 #include <linux/inotify.h>
3 #include <linux/namei.h>
4 #include <linux/mount.h>
12 struct audit_chunk *root;
13 struct list_head chunks;
14 struct list_head rules;
15 struct list_head list;
16 struct list_head same_root;
22 struct list_head hash;
23 struct inotify_watch watch;
24 struct list_head trees; /* with root here */
30 struct list_head list;
31 struct audit_tree *owner;
32 unsigned index; /* index; upper bit indicates 'will prune' */
36 static LIST_HEAD(tree_list);
37 static LIST_HEAD(prune_list);
40 * One struct chunk is attached to each inode of interest.
41 * We replace struct chunk on tagging/untagging.
42 * Rules have pointer to struct audit_tree.
43 * Rules have struct list_head rlist forming a list of rules over
45 * References to struct chunk are collected at audit_inode{,_child}()
46 * time and used in AUDIT_TREE rule matching.
47 * These references are dropped at the same time we are calling
48 * audit_free_names(), etc.
50 * Cyclic lists galore:
51 * tree.chunks anchors chunk.owners[].list hash_lock
52 * tree.rules anchors rule.rlist audit_filter_mutex
53 * chunk.trees anchors tree.same_root hash_lock
54 * chunk.hash is a hash with middle bits of watch.inode as
55 * a hash function. RCU, hash_lock
57 * tree is refcounted; one reference for "some rules on rules_list refer to
58 * it", one for each chunk with pointer to it.
60 * chunk is refcounted by embedded inotify_watch + .refs (non-zero refcount
61 * of watch contributes 1 to .refs).
63 * node.index allows to get from node.list to containing chunk.
64 * MSB of that sucker is stolen to mark taggings that we might have to
65 * revert - several operations have very unpleasant cleanup logics and
66 * that makes a difference. Some.
69 static struct inotify_handle *rtree_ih;
71 static struct audit_tree *alloc_tree(const char *s)
73 struct audit_tree *tree;
75 tree = kmalloc(sizeof(struct audit_tree) + strlen(s) + 1, GFP_KERNEL);
77 atomic_set(&tree->count, 1);
79 INIT_LIST_HEAD(&tree->chunks);
80 INIT_LIST_HEAD(&tree->rules);
81 INIT_LIST_HEAD(&tree->list);
82 INIT_LIST_HEAD(&tree->same_root);
84 strcpy(tree->pathname, s);
89 static inline void get_tree(struct audit_tree *tree)
91 atomic_inc(&tree->count);
94 static void __put_tree(struct rcu_head *rcu)
96 struct audit_tree *tree = container_of(rcu, struct audit_tree, head);
100 static inline void put_tree(struct audit_tree *tree)
102 if (atomic_dec_and_test(&tree->count))
103 call_rcu(&tree->head, __put_tree);
106 /* to avoid bringing the entire thing in audit.h */
107 const char *audit_tree_path(struct audit_tree *tree)
109 return tree->pathname;
112 static struct audit_chunk *alloc_chunk(int count)
114 struct audit_chunk *chunk;
118 size = offsetof(struct audit_chunk, owners) + count * sizeof(struct node);
119 chunk = kzalloc(size, GFP_KERNEL);
123 INIT_LIST_HEAD(&chunk->hash);
124 INIT_LIST_HEAD(&chunk->trees);
125 chunk->count = count;
126 atomic_long_set(&chunk->refs, 1);
127 for (i = 0; i < count; i++) {
128 INIT_LIST_HEAD(&chunk->owners[i].list);
129 chunk->owners[i].index = i;
131 inotify_init_watch(&chunk->watch);
135 static void free_chunk(struct audit_chunk *chunk)
139 for (i = 0; i < chunk->count; i++) {
140 if (chunk->owners[i].owner)
141 put_tree(chunk->owners[i].owner);
146 void audit_put_chunk(struct audit_chunk *chunk)
148 if (atomic_long_dec_and_test(&chunk->refs))
152 static void __put_chunk(struct rcu_head *rcu)
154 struct audit_chunk *chunk = container_of(rcu, struct audit_chunk, head);
155 audit_put_chunk(chunk);
158 enum {HASH_SIZE = 128};
159 static struct list_head chunk_hash_heads[HASH_SIZE];
160 static __cacheline_aligned_in_smp DEFINE_SPINLOCK(hash_lock);
162 static inline struct list_head *chunk_hash(const struct inode *inode)
164 unsigned long n = (unsigned long)inode / L1_CACHE_BYTES;
165 return chunk_hash_heads + n % HASH_SIZE;
168 /* hash_lock is held by caller */
169 static void insert_hash(struct audit_chunk *chunk)
171 struct list_head *list = chunk_hash(chunk->watch.inode);
172 list_add_rcu(&chunk->hash, list);
175 /* called under rcu_read_lock */
176 struct audit_chunk *audit_tree_lookup(const struct inode *inode)
178 struct list_head *list = chunk_hash(inode);
179 struct audit_chunk *p;
181 list_for_each_entry_rcu(p, list, hash) {
182 if (p->watch.inode == inode) {
183 atomic_long_inc(&p->refs);
190 int audit_tree_match(struct audit_chunk *chunk, struct audit_tree *tree)
193 for (n = 0; n < chunk->count; n++)
194 if (chunk->owners[n].owner == tree)
199 /* tagging and untagging inodes with trees */
201 static struct audit_chunk *find_chunk(struct node *p)
203 int index = p->index & ~(1U<<31);
205 return container_of(p, struct audit_chunk, owners[0]);
208 static void untag_chunk(struct node *p)
210 struct audit_chunk *chunk = find_chunk(p);
211 struct audit_chunk *new;
212 struct audit_tree *owner;
213 int size = chunk->count - 1;
216 if (!pin_inotify_watch(&chunk->watch)) {
218 * Filesystem is shutting down; all watches are getting
219 * evicted, just take it off the node list for this
220 * tree and let the eviction logics take care of the
224 if (owner->root == chunk) {
225 list_del_init(&owner->same_root);
228 list_del_init(&p->list);
234 spin_unlock(&hash_lock);
237 * pin_inotify_watch() succeeded, so the watch won't go away
240 mutex_lock(&chunk->watch.inode->inotify_mutex);
242 mutex_unlock(&chunk->watch.inode->inotify_mutex);
250 spin_lock(&hash_lock);
251 list_del_init(&chunk->trees);
252 if (owner->root == chunk)
254 list_del_init(&p->list);
255 list_del_rcu(&chunk->hash);
256 spin_unlock(&hash_lock);
257 inotify_evict_watch(&chunk->watch);
258 mutex_unlock(&chunk->watch.inode->inotify_mutex);
259 put_inotify_watch(&chunk->watch);
263 new = alloc_chunk(size);
266 if (inotify_clone_watch(&chunk->watch, &new->watch) < 0) {
272 spin_lock(&hash_lock);
273 list_replace_init(&chunk->trees, &new->trees);
274 if (owner->root == chunk) {
275 list_del_init(&owner->same_root);
279 for (i = j = 0; i < size; i++, j++) {
280 struct audit_tree *s;
281 if (&chunk->owners[j] == p) {
282 list_del_init(&p->list);
286 s = chunk->owners[j].owner;
287 new->owners[i].owner = s;
288 new->owners[i].index = chunk->owners[j].index - j + i;
289 if (!s) /* result of earlier fallback */
292 list_replace_init(&chunk->owners[i].list, &new->owners[j].list);
295 list_replace_rcu(&chunk->hash, &new->hash);
296 list_for_each_entry(owner, &new->trees, same_root)
298 spin_unlock(&hash_lock);
299 inotify_evict_watch(&chunk->watch);
300 mutex_unlock(&chunk->watch.inode->inotify_mutex);
301 put_inotify_watch(&chunk->watch);
305 // do the best we can
306 spin_lock(&hash_lock);
307 if (owner->root == chunk) {
308 list_del_init(&owner->same_root);
311 list_del_init(&p->list);
314 spin_unlock(&hash_lock);
315 mutex_unlock(&chunk->watch.inode->inotify_mutex);
317 unpin_inotify_watch(&chunk->watch);
318 spin_lock(&hash_lock);
321 static int create_chunk(struct inode *inode, struct audit_tree *tree)
323 struct audit_chunk *chunk = alloc_chunk(1);
327 if (inotify_add_watch(rtree_ih, &chunk->watch, inode, IN_IGNORED | IN_DELETE_SELF) < 0) {
332 mutex_lock(&inode->inotify_mutex);
333 spin_lock(&hash_lock);
335 spin_unlock(&hash_lock);
337 inotify_evict_watch(&chunk->watch);
338 mutex_unlock(&inode->inotify_mutex);
339 put_inotify_watch(&chunk->watch);
342 chunk->owners[0].index = (1U << 31);
343 chunk->owners[0].owner = tree;
345 list_add(&chunk->owners[0].list, &tree->chunks);
348 list_add(&tree->same_root, &chunk->trees);
351 spin_unlock(&hash_lock);
352 mutex_unlock(&inode->inotify_mutex);
356 /* the first tagged inode becomes root of tree */
357 static int tag_chunk(struct inode *inode, struct audit_tree *tree)
359 struct inotify_watch *watch;
360 struct audit_tree *owner;
361 struct audit_chunk *chunk, *old;
365 if (inotify_find_watch(rtree_ih, inode, &watch) < 0)
366 return create_chunk(inode, tree);
368 old = container_of(watch, struct audit_chunk, watch);
370 /* are we already there? */
371 spin_lock(&hash_lock);
372 for (n = 0; n < old->count; n++) {
373 if (old->owners[n].owner == tree) {
374 spin_unlock(&hash_lock);
375 put_inotify_watch(watch);
379 spin_unlock(&hash_lock);
381 chunk = alloc_chunk(old->count + 1);
385 mutex_lock(&inode->inotify_mutex);
386 if (inotify_clone_watch(&old->watch, &chunk->watch) < 0) {
387 mutex_unlock(&inode->inotify_mutex);
388 put_inotify_watch(&old->watch);
392 spin_lock(&hash_lock);
394 spin_unlock(&hash_lock);
396 inotify_evict_watch(&chunk->watch);
397 mutex_unlock(&inode->inotify_mutex);
398 put_inotify_watch(&old->watch);
399 put_inotify_watch(&chunk->watch);
402 list_replace_init(&old->trees, &chunk->trees);
403 for (n = 0, p = chunk->owners; n < old->count; n++, p++) {
404 struct audit_tree *s = old->owners[n].owner;
406 p->index = old->owners[n].index;
407 if (!s) /* result of fallback in untag */
410 list_replace_init(&old->owners[n].list, &p->list);
412 p->index = (chunk->count - 1) | (1U<<31);
415 list_add(&p->list, &tree->chunks);
416 list_replace_rcu(&old->hash, &chunk->hash);
417 list_for_each_entry(owner, &chunk->trees, same_root)
422 list_add(&tree->same_root, &chunk->trees);
424 spin_unlock(&hash_lock);
425 inotify_evict_watch(&old->watch);
426 mutex_unlock(&inode->inotify_mutex);
427 put_inotify_watch(&old->watch);
431 static void kill_rules(struct audit_tree *tree)
433 struct audit_krule *rule, *next;
434 struct audit_entry *entry;
435 struct audit_buffer *ab;
437 list_for_each_entry_safe(rule, next, &tree->rules, rlist) {
438 entry = container_of(rule, struct audit_entry, rule);
440 list_del_init(&rule->rlist);
442 /* not a half-baked one */
443 ab = audit_log_start(NULL, GFP_KERNEL, AUDIT_CONFIG_CHANGE);
444 audit_log_format(ab, "op=remove rule dir=");
445 audit_log_untrustedstring(ab, rule->tree->pathname);
446 if (rule->filterkey) {
447 audit_log_format(ab, " key=");
448 audit_log_untrustedstring(ab, rule->filterkey);
450 audit_log_format(ab, " key=(null)");
451 audit_log_format(ab, " list=%d res=1", rule->listnr);
454 list_del_rcu(&entry->list);
455 list_del(&entry->rule.list);
456 call_rcu(&entry->rcu, audit_free_rule_rcu);
462 * finish killing struct audit_tree
464 static void prune_one(struct audit_tree *victim)
466 spin_lock(&hash_lock);
467 while (!list_empty(&victim->chunks)) {
470 p = list_entry(victim->chunks.next, struct node, list);
474 spin_unlock(&hash_lock);
478 /* trim the uncommitted chunks from tree */
480 static void trim_marked(struct audit_tree *tree)
482 struct list_head *p, *q;
483 spin_lock(&hash_lock);
485 spin_unlock(&hash_lock);
489 for (p = tree->chunks.next; p != &tree->chunks; p = q) {
490 struct node *node = list_entry(p, struct node, list);
492 if (node->index & (1U<<31)) {
494 list_add(p, &tree->chunks);
498 while (!list_empty(&tree->chunks)) {
501 node = list_entry(tree->chunks.next, struct node, list);
503 /* have we run out of marked? */
504 if (!(node->index & (1U<<31)))
509 if (!tree->root && !tree->goner) {
511 spin_unlock(&hash_lock);
512 mutex_lock(&audit_filter_mutex);
514 list_del_init(&tree->list);
515 mutex_unlock(&audit_filter_mutex);
518 spin_unlock(&hash_lock);
522 /* called with audit_filter_mutex */
523 int audit_remove_tree_rule(struct audit_krule *rule)
525 struct audit_tree *tree;
528 spin_lock(&hash_lock);
529 list_del_init(&rule->rlist);
530 if (list_empty(&tree->rules) && !tree->goner) {
532 list_del_init(&tree->same_root);
534 list_move(&tree->list, &prune_list);
536 spin_unlock(&hash_lock);
537 audit_schedule_prune();
541 spin_unlock(&hash_lock);
547 void audit_trim_trees(void)
549 struct list_head cursor;
551 mutex_lock(&audit_filter_mutex);
552 list_add(&cursor, &tree_list);
553 while (cursor.next != &tree_list) {
554 struct audit_tree *tree;
556 struct vfsmount *root_mnt;
558 struct list_head list;
561 tree = container_of(cursor.next, struct audit_tree, list);
564 list_add(&cursor, &tree->list);
565 mutex_unlock(&audit_filter_mutex);
567 err = kern_path(tree->pathname, 0, &path);
571 root_mnt = collect_mounts(path.mnt, path.dentry);
576 list_add_tail(&list, &root_mnt->mnt_list);
577 spin_lock(&hash_lock);
578 list_for_each_entry(node, &tree->chunks, list) {
579 struct audit_chunk *chunk = find_chunk(node);
580 struct inode *inode = chunk->watch.inode;
581 struct vfsmount *mnt;
582 node->index |= 1U<<31;
583 list_for_each_entry(mnt, &list, mnt_list) {
584 if (mnt->mnt_root->d_inode == inode) {
585 node->index &= ~(1U<<31);
590 spin_unlock(&hash_lock);
593 list_del_init(&list);
594 drop_collected_mounts(root_mnt);
596 mutex_lock(&audit_filter_mutex);
599 mutex_unlock(&audit_filter_mutex);
602 static int is_under(struct vfsmount *mnt, struct dentry *dentry,
605 if (mnt != path->mnt) {
607 if (mnt->mnt_parent == mnt)
609 if (mnt->mnt_parent == path->mnt)
611 mnt = mnt->mnt_parent;
613 dentry = mnt->mnt_mountpoint;
615 return is_subdir(dentry, path->dentry);
618 int audit_make_tree(struct audit_krule *rule, char *pathname, u32 op)
621 if (pathname[0] != '/' ||
622 rule->listnr != AUDIT_FILTER_EXIT ||
624 rule->inode_f || rule->watch || rule->tree)
626 rule->tree = alloc_tree(pathname);
632 void audit_put_tree(struct audit_tree *tree)
637 /* called with audit_filter_mutex */
638 int audit_add_tree_rule(struct audit_krule *rule)
640 struct audit_tree *seed = rule->tree, *tree;
642 struct vfsmount *mnt, *p;
643 struct list_head list;
646 list_for_each_entry(tree, &tree_list, list) {
647 if (!strcmp(seed->pathname, tree->pathname)) {
650 list_add(&rule->rlist, &tree->rules);
655 list_add(&tree->list, &tree_list);
656 list_add(&rule->rlist, &tree->rules);
657 /* do not set rule->tree yet */
658 mutex_unlock(&audit_filter_mutex);
660 err = kern_path(tree->pathname, 0, &path);
663 mnt = collect_mounts(path.mnt, path.dentry);
669 list_add_tail(&list, &mnt->mnt_list);
672 list_for_each_entry(p, &list, mnt_list) {
673 err = tag_chunk(p->mnt_root->d_inode, tree);
679 drop_collected_mounts(mnt);
683 spin_lock(&hash_lock);
684 list_for_each_entry(node, &tree->chunks, list)
685 node->index &= ~(1U<<31);
686 spin_unlock(&hash_lock);
692 mutex_lock(&audit_filter_mutex);
693 if (list_empty(&rule->rlist)) {
702 mutex_lock(&audit_filter_mutex);
703 list_del_init(&tree->list);
704 list_del_init(&tree->rules);
709 int audit_tag_tree(char *old, char *new)
711 struct list_head cursor, barrier;
714 struct vfsmount *tagged;
715 struct list_head list;
716 struct vfsmount *mnt;
717 struct dentry *dentry;
720 err = kern_path(new, 0, &path);
723 tagged = collect_mounts(path.mnt, path.dentry);
728 err = kern_path(old, 0, &path);
730 drop_collected_mounts(tagged);
733 mnt = mntget(path.mnt);
734 dentry = dget(path.dentry);
737 if (dentry == tagged->mnt_root && dentry == mnt->mnt_root)
738 follow_up(&mnt, &dentry);
740 list_add_tail(&list, &tagged->mnt_list);
742 mutex_lock(&audit_filter_mutex);
743 list_add(&barrier, &tree_list);
744 list_add(&cursor, &barrier);
746 while (cursor.next != &tree_list) {
747 struct audit_tree *tree;
750 tree = container_of(cursor.next, struct audit_tree, list);
753 list_add(&cursor, &tree->list);
754 mutex_unlock(&audit_filter_mutex);
756 err = kern_path(tree->pathname, 0, &path);
759 mutex_lock(&audit_filter_mutex);
763 spin_lock(&vfsmount_lock);
764 if (!is_under(mnt, dentry, &path)) {
765 spin_unlock(&vfsmount_lock);
768 mutex_lock(&audit_filter_mutex);
771 spin_unlock(&vfsmount_lock);
774 list_for_each_entry(p, &list, mnt_list) {
775 failed = tag_chunk(p->mnt_root->d_inode, tree);
782 mutex_lock(&audit_filter_mutex);
786 mutex_lock(&audit_filter_mutex);
787 spin_lock(&hash_lock);
789 list_del(&tree->list);
790 list_add(&tree->list, &tree_list);
792 spin_unlock(&hash_lock);
796 while (barrier.prev != &tree_list) {
797 struct audit_tree *tree;
799 tree = container_of(barrier.prev, struct audit_tree, list);
801 list_del(&tree->list);
802 list_add(&tree->list, &barrier);
803 mutex_unlock(&audit_filter_mutex);
807 spin_lock(&hash_lock);
808 list_for_each_entry(node, &tree->chunks, list)
809 node->index &= ~(1U<<31);
810 spin_unlock(&hash_lock);
816 mutex_lock(&audit_filter_mutex);
821 mutex_unlock(&audit_filter_mutex);
824 drop_collected_mounts(tagged);
829 * That gets run when evict_chunk() ends up needing to kill audit_tree.
830 * Runs from a separate thread, with audit_cmd_mutex held.
832 void audit_prune_trees(void)
834 mutex_lock(&audit_filter_mutex);
836 while (!list_empty(&prune_list)) {
837 struct audit_tree *victim;
839 victim = list_entry(prune_list.next, struct audit_tree, list);
840 list_del_init(&victim->list);
842 mutex_unlock(&audit_filter_mutex);
846 mutex_lock(&audit_filter_mutex);
849 mutex_unlock(&audit_filter_mutex);
853 * Here comes the stuff asynchronous to auditctl operations
856 /* inode->inotify_mutex is locked */
857 static void evict_chunk(struct audit_chunk *chunk)
859 struct audit_tree *owner;
866 mutex_lock(&audit_filter_mutex);
867 spin_lock(&hash_lock);
868 while (!list_empty(&chunk->trees)) {
869 owner = list_entry(chunk->trees.next,
870 struct audit_tree, same_root);
873 list_del_init(&owner->same_root);
874 spin_unlock(&hash_lock);
876 list_move(&owner->list, &prune_list);
877 audit_schedule_prune();
878 spin_lock(&hash_lock);
880 list_del_rcu(&chunk->hash);
881 for (n = 0; n < chunk->count; n++)
882 list_del_init(&chunk->owners[n].list);
883 spin_unlock(&hash_lock);
884 mutex_unlock(&audit_filter_mutex);
887 static void handle_event(struct inotify_watch *watch, u32 wd, u32 mask,
888 u32 cookie, const char *dname, struct inode *inode)
890 struct audit_chunk *chunk = container_of(watch, struct audit_chunk, watch);
892 if (mask & IN_IGNORED) {
894 put_inotify_watch(watch);
898 static void destroy_watch(struct inotify_watch *watch)
900 struct audit_chunk *chunk = container_of(watch, struct audit_chunk, watch);
901 call_rcu(&chunk->head, __put_chunk);
904 static const struct inotify_operations rtree_inotify_ops = {
905 .handle_event = handle_event,
906 .destroy_watch = destroy_watch,
909 static int __init audit_tree_init(void)
913 rtree_ih = inotify_init(&rtree_inotify_ops);
914 if (IS_ERR(rtree_ih))
915 audit_panic("cannot initialize inotify handle for rectree watches");
917 for (i = 0; i < HASH_SIZE; i++)
918 INIT_LIST_HEAD(&chunk_hash_heads[i]);
922 __initcall(audit_tree_init);