]> asedeno.scripts.mit.edu Git - linux.git/commitdiff
fsnotify: convert fsnotify_mark.refcnt from atomic_t to refcount_t
authorElena Reshetova <elena.reshetova@intel.com>
Fri, 20 Oct 2017 10:26:02 +0000 (13:26 +0300)
committerJan Kara <jack@suse.cz>
Tue, 31 Oct 2017 16:54:56 +0000 (17:54 +0100)
atomic_t variables are currently used to implement reference
counters with the following properties:
 - counter is initialized to 1 using atomic_set()
 - a resource is freed upon counter reaching zero
 - once counter reaches zero, its further
   increments aren't allowed
 - counter schema uses basic atomic operations
   (set, inc, inc_not_zero, dec_and_test, etc.)

Such atomic variables should be converted to a newly provided
refcount_t type and API that prevents accidental counter overflows
and underflows. This is important since overflows and underflows
can lead to use-after-free situation and be exploitable.

The variable fsnotify_mark.refcnt is used as pure reference counter.
Convert it to refcount_t and fix up the operations.

Suggested-by: Kees Cook <keescook@chromium.org>
Reviewed-by: David Windsor <dwindsor@gmail.com>
Reviewed-by: Hans Liljestrand <ishkamiel@gmail.com>
Signed-off-by: Elena Reshetova <elena.reshetova@intel.com>
Signed-off-by: Jan Kara <jack@suse.cz>
fs/notify/inotify/inotify_user.c
fs/notify/mark.c
include/linux/fsnotify_backend.h
kernel/audit_tree.c

index 7cc7d3fb1862fcb557933e63ec66361cf8bcfb7a..d3c20e0bb046d21c33c0b0a9e712c571f49b690c 100644 (file)
@@ -376,7 +376,7 @@ static struct inotify_inode_mark *inotify_idr_find_locked(struct fsnotify_group
 
                fsnotify_get_mark(fsn_mark);
                /* One ref for being in the idr, one ref we just took */
-               BUG_ON(atomic_read(&fsn_mark->refcnt) < 2);
+               BUG_ON(refcount_read(&fsn_mark->refcnt) < 2);
        }
 
        return i_mark;
@@ -446,7 +446,7 @@ static void inotify_remove_from_idr(struct fsnotify_group *group,
         * One ref for being in the idr
         * one ref grabbed by inotify_idr_find
         */
-       if (unlikely(atomic_read(&i_mark->fsn_mark.refcnt) < 2)) {
+       if (unlikely(refcount_read(&i_mark->fsn_mark.refcnt) < 2)) {
                printk(KERN_ERR "%s: i_mark=%p i_mark->wd=%d i_mark->group=%p\n",
                         __func__, i_mark, i_mark->wd, i_mark->fsn_mark.group);
                /* we can't really recover with bad ref cnting.. */
index f3a32ea15b4959a03c17543908810ac1fe9b34f7..e9191b416434e68d7794bb5dcf8784c1a402f144 100644 (file)
@@ -105,8 +105,8 @@ static DECLARE_WORK(connector_reaper_work, fsnotify_connector_destroy_workfn);
 
 void fsnotify_get_mark(struct fsnotify_mark *mark)
 {
-       WARN_ON_ONCE(!atomic_read(&mark->refcnt));
-       atomic_inc(&mark->refcnt);
+       WARN_ON_ONCE(!refcount_read(&mark->refcnt));
+       refcount_inc(&mark->refcnt);
 }
 
 static void __fsnotify_recalc_mask(struct fsnotify_mark_connector *conn)
@@ -201,7 +201,7 @@ void fsnotify_put_mark(struct fsnotify_mark *mark)
 
        /* Catch marks that were actually never attached to object */
        if (!mark->connector) {
-               if (atomic_dec_and_test(&mark->refcnt))
+               if (refcount_dec_and_test(&mark->refcnt))
                        fsnotify_final_mark_destroy(mark);
                return;
        }
@@ -210,7 +210,7 @@ void fsnotify_put_mark(struct fsnotify_mark *mark)
         * We have to be careful so that traversals of obj_list under lock can
         * safely grab mark reference.
         */
-       if (!atomic_dec_and_lock(&mark->refcnt, &mark->connector->lock))
+       if (!refcount_dec_and_lock(&mark->refcnt, &mark->connector->lock))
                return;
 
        conn = mark->connector;
@@ -258,7 +258,7 @@ static bool fsnotify_get_mark_safe(struct fsnotify_mark *mark)
        if (!mark)
                return true;
 
-       if (atomic_inc_not_zero(&mark->refcnt)) {
+       if (refcount_inc_not_zero(&mark->refcnt)) {
                spin_lock(&mark->lock);
                if (mark->flags & FSNOTIFY_MARK_FLAG_ATTACHED) {
                        /* mark is attached, group is still alive then */
@@ -335,7 +335,7 @@ void fsnotify_detach_mark(struct fsnotify_mark *mark)
 
        WARN_ON_ONCE(!mutex_is_locked(&group->mark_mutex));
        WARN_ON_ONCE(!srcu_read_lock_held(&fsnotify_mark_srcu) &&
-                    atomic_read(&mark->refcnt) < 1 +
+                    refcount_read(&mark->refcnt) < 1 +
                        !!(mark->flags & FSNOTIFY_MARK_FLAG_ATTACHED));
 
        spin_lock(&mark->lock);
@@ -737,7 +737,7 @@ void fsnotify_init_mark(struct fsnotify_mark *mark,
 {
        memset(mark, 0, sizeof(*mark));
        spin_lock_init(&mark->lock);
-       atomic_set(&mark->refcnt, 1);
+       refcount_set(&mark->refcnt, 1);
        fsnotify_get_group(group);
        mark->group = group;
 }
index 744e2b9969fccad69ff9eb349cfdd421d2418794..9bcb43953f4e8517333d6e06dca0ae3736ac33b4 100644 (file)
@@ -242,7 +242,7 @@ struct fsnotify_mark {
        __u32 mask;
        /* We hold one for presence in g_list. Also one ref for each 'thing'
         * in kernel that found and may be using this mark. */
-       atomic_t refcnt;
+       refcount_t refcnt;
        /* Group this mark is for. Set on mark creation, stable until last ref
         * is dropped */
        struct fsnotify_group *group;
index 011d46e5f73fb448634c603fc474f479fe3e790b..45ec960ad5362df0a8dc0eae6bf6c71772a44ee3 100644 (file)
@@ -1007,7 +1007,7 @@ static void audit_tree_freeing_mark(struct fsnotify_mark *entry, struct fsnotify
         * We are guaranteed to have at least one reference to the mark from
         * either the inode or the caller of fsnotify_destroy_mark().
         */
-       BUG_ON(atomic_read(&entry->refcnt) < 1);
+       BUG_ON(refcount_read(&entry->refcnt) < 1);
 }
 
 static const struct fsnotify_ops audit_tree_ops = {