]> asedeno.scripts.mit.edu Git - linux.git/commitdiff
aio: cleanup: flatten kill_ioctx()
authorBenjamin LaHaise <bcrl@kvack.org>
Tue, 29 Apr 2014 16:55:48 +0000 (12:55 -0400)
committerBenjamin LaHaise <bcrl@kvack.org>
Tue, 29 Apr 2014 16:55:48 +0000 (12:55 -0400)
There is no need to have most of the code in kill_ioctx() indented.  Flatten
it.

Signed-off-by: Benjamin LaHaise <bcrl@kvack.org>
fs/aio.c

index 044c1c86decc139a71b6e7fc7cdc54242beefbc6..79b7e692f5b30d53dccae609059269744420fd5f 100644 (file)
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -730,39 +730,39 @@ static struct kioctx *ioctx_alloc(unsigned nr_events)
 static int kill_ioctx(struct mm_struct *mm, struct kioctx *ctx,
                struct completion *requests_done)
 {
-       if (!atomic_xchg(&ctx->dead, 1)) {
-               struct kioctx_table *table;
+       struct kioctx_table *table;
 
-               spin_lock(&mm->ioctx_lock);
-               rcu_read_lock();
-               table = rcu_dereference(mm->ioctx_table);
+       if (atomic_xchg(&ctx->dead, 1))
+               return -EINVAL;
 
-               WARN_ON(ctx != table->table[ctx->id]);
-               table->table[ctx->id] = NULL;
-               rcu_read_unlock();
-               spin_unlock(&mm->ioctx_lock);
 
-               /* percpu_ref_kill() will do the necessary call_rcu() */
-               wake_up_all(&ctx->wait);
+       spin_lock(&mm->ioctx_lock);
+       rcu_read_lock();
+       table = rcu_dereference(mm->ioctx_table);
+
+       WARN_ON(ctx != table->table[ctx->id]);
+       table->table[ctx->id] = NULL;
+       rcu_read_unlock();
+       spin_unlock(&mm->ioctx_lock);
 
-               /*
-                * It'd be more correct to do this in free_ioctx(), after all
-                * the outstanding kiocbs have finished - but by then io_destroy
-                * has already returned, so io_setup() could potentially return
-                * -EAGAIN with no ioctxs actually in use (as far as userspace
-                *  could tell).
-                */
-               aio_nr_sub(ctx->max_reqs);
+       /* percpu_ref_kill() will do the necessary call_rcu() */
+       wake_up_all(&ctx->wait);
 
-               if (ctx->mmap_size)
-                       vm_munmap(ctx->mmap_base, ctx->mmap_size);
+       /*
+        * It'd be more correct to do this in free_ioctx(), after all
+        * the outstanding kiocbs have finished - but by then io_destroy
+        * has already returned, so io_setup() could potentially return
+        * -EAGAIN with no ioctxs actually in use (as far as userspace
+        *  could tell).
+        */
+       aio_nr_sub(ctx->max_reqs);
 
-               ctx->requests_done = requests_done;
-               percpu_ref_kill(&ctx->users);
-               return 0;
-       }
+       if (ctx->mmap_size)
+               vm_munmap(ctx->mmap_base, ctx->mmap_size);
 
-       return -EINVAL;
+       ctx->requests_done = requests_done;
+       percpu_ref_kill(&ctx->users);
+       return 0;
 }
 
 /* wait_on_sync_kiocb: