]> asedeno.scripts.mit.edu Git - linux.git/blobdiff - kernel/exit.c
Merge branches 'pm-core', 'pm-qos', 'pm-domains' and 'pm-opp'
[linux.git] / kernel / exit.c
index aacff8e2aec0b4f9cb41703722e3ba9435c73082..b67c57faa705d991f87b13e6b17bde64afea9131 100644 (file)
@@ -55,8 +55,9 @@
 #include <linux/shm.h>
 #include <linux/kcov.h>
 #include <linux/random.h>
+#include <linux/rcuwait.h>
 
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
 #include <asm/unistd.h>
 #include <asm/pgtable.h>
 #include <asm/mmu_context.h>
@@ -86,7 +87,7 @@ static void __exit_signal(struct task_struct *tsk)
        bool group_dead = thread_group_leader(tsk);
        struct sighand_struct *sighand;
        struct tty_struct *uninitialized_var(tty);
-       cputime_t utime, stime;
+       u64 utime, stime;
 
        sighand = rcu_dereference_check(tsk->sighand,
                                        lockdep_tasklist_lock_is_held());
@@ -282,6 +283,35 @@ struct task_struct *task_rcu_dereference(struct task_struct **ptask)
        return task;
 }
 
+void rcuwait_wake_up(struct rcuwait *w)
+{
+       struct task_struct *task;
+
+       rcu_read_lock();
+
+       /*
+        * Order condition vs @task, such that everything prior to the load
+        * of @task is visible. This is the condition as to why the user called
+        * rcuwait_trywake() in the first place. Pairs with set_current_state()
+        * barrier (A) in rcuwait_wait_event().
+        *
+        *    WAIT                WAKE
+        *    [S] tsk = current   [S] cond = true
+        *        MB (A)              MB (B)
+        *    [L] cond            [L] tsk
+        */
+       smp_rmb(); /* (B) */
+
+       /*
+        * Avoid using task_rcu_dereference() magic as long as we are careful,
+        * see comment in rcuwait_wait_event() regarding ->exit_state.
+        */
+       task = rcu_dereference(w->task);
+       if (task)
+               wake_up_process(task);
+       rcu_read_unlock();
+}
+
 struct task_struct *try_get_task_struct(struct task_struct **ptask)
 {
        struct task_struct *task;
@@ -468,12 +498,12 @@ void mm_update_next_owner(struct mm_struct *mm)
  * Turn us into a lazy TLB process if we
  * aren't already..
  */
-static void exit_mm(struct task_struct *tsk)
+static void exit_mm(void)
 {
-       struct mm_struct *mm = tsk->mm;
+       struct mm_struct *mm = current->mm;
        struct core_state *core_state;
 
-       mm_release(tsk, mm);
+       mm_release(current, mm);
        if (!mm)
                return;
        sync_mm_rss(mm);
@@ -491,7 +521,7 @@ static void exit_mm(struct task_struct *tsk)
 
                up_read(&mm->mmap_sem);
 
-               self.task = tsk;
+               self.task = current;
                self.next = xchg(&core_state->dumper.next, &self);
                /*
                 * Implies mb(), the result of xchg() must be visible
@@ -501,22 +531,22 @@ static void exit_mm(struct task_struct *tsk)
                        complete(&core_state->startup);
 
                for (;;) {
-                       set_task_state(tsk, TASK_UNINTERRUPTIBLE);
+                       set_current_state(TASK_UNINTERRUPTIBLE);
                        if (!self.task) /* see coredump_finish() */
                                break;
                        freezable_schedule();
                }
-               __set_task_state(tsk, TASK_RUNNING);
+               __set_current_state(TASK_RUNNING);
                down_read(&mm->mmap_sem);
        }
        atomic_inc(&mm->mm_count);
-       BUG_ON(mm != tsk->active_mm);
+       BUG_ON(mm != current->active_mm);
        /* more a memory barrier than a real lock */
-       task_lock(tsk);
-       tsk->mm = NULL;
+       task_lock(current);
+       current->mm = NULL;
        up_read(&mm->mmap_sem);
        enter_lazy_tlb(mm, current);
-       task_unlock(tsk);
+       task_unlock(current);
        mm_update_next_owner(mm);
        mmput(mm);
        if (test_thread_flag(TIF_MEMDIE))
@@ -823,7 +853,7 @@ void __noreturn do_exit(long code)
        tsk->exit_code = code;
        taskstats_exit(tsk, group_dead);
 
-       exit_mm(tsk);
+       exit_mm();
 
        if (group_dead)
                acct_process();
@@ -1091,7 +1121,7 @@ static int wait_task_zombie(struct wait_opts *wo, struct task_struct *p)
                struct signal_struct *sig = p->signal;
                struct signal_struct *psig = current->signal;
                unsigned long maxrss;
-               cputime_t tgutime, tgstime;
+               u64 tgutime, tgstime;
 
                /*
                 * The resource counters for the group leader are in its