]> asedeno.scripts.mit.edu Git - linux.git/commitdiff
android: binder: drop lru lock in isolate callback
authorSherry Yang <sherryy@android.com>
Tue, 3 Oct 2017 23:15:00 +0000 (16:15 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Wed, 4 Oct 2017 00:54:24 +0000 (17:54 -0700)
Drop the global lru lock in isolate callback before calling
zap_page_range which calls cond_resched, and re-acquire the global lru
lock before returning.  Also change return code to LRU_REMOVED_RETRY.

Use mmput_async when fail to acquire mmap sem in an atomic context.

Fix "BUG: sleeping function called from invalid context"
errors when CONFIG_DEBUG_ATOMIC_SLEEP is enabled.

Also restore mmput_async, which was initially introduced in commit
ec8d7c14ea14 ("mm, oom_reaper: do not mmput synchronously from the oom
reaper context"), and was removed in commit 212925802454 ("mm: oom: let
oom_reap_task and exit_mmap run concurrently").

Link: http://lkml.kernel.org/r/20170914182231.90908-1-sherryy@android.com
Fixes: f2517eb76f1f2 ("android: binder: Add global lru shrinker to binder")
Signed-off-by: Sherry Yang <sherryy@android.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Reported-by: Kyle Yan <kyan@codeaurora.org>
Acked-by: Arve Hjønnevåg <arve@android.com>
Acked-by: Michal Hocko <mhocko@suse.com>
Cc: Martijn Coenen <maco@google.com>
Cc: Todd Kjos <tkjos@google.com>
Cc: Riley Andrews <riandrews@android.com>
Cc: Ingo Molnar <mingo@kernel.org>
Cc: Vlastimil Babka <vbabka@suse.cz>
Cc: Hillf Danton <hdanton@sina.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Andy Lutomirski <luto@kernel.org>
Cc: Oleg Nesterov <oleg@redhat.com>
Cc: Hoeun Ryu <hoeun.ryu@gmail.com>
Cc: Christopher Lameter <cl@linux.com>
Cc: Vegard Nossum <vegard.nossum@oracle.com>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
drivers/android/binder_alloc.c
include/linux/sched/mm.h
kernel/fork.c

index 8fe165844e4708a3646a69384fe55db592fbe87f..064f5e31ec55253e830740b4f99f3ba15ee20f1f 100644 (file)
@@ -913,6 +913,7 @@ enum lru_status binder_alloc_free_page(struct list_head *item,
        struct binder_alloc *alloc;
        uintptr_t page_addr;
        size_t index;
+       struct vm_area_struct *vma;
 
        alloc = page->alloc;
        if (!mutex_trylock(&alloc->mutex))
@@ -923,16 +924,22 @@ enum lru_status binder_alloc_free_page(struct list_head *item,
 
        index = page - alloc->pages;
        page_addr = (uintptr_t)alloc->buffer + index * PAGE_SIZE;
-       if (alloc->vma) {
+       vma = alloc->vma;
+       if (vma) {
                mm = get_task_mm(alloc->tsk);
                if (!mm)
                        goto err_get_task_mm_failed;
                if (!down_write_trylock(&mm->mmap_sem))
                        goto err_down_write_mmap_sem_failed;
+       }
+
+       list_lru_isolate(lru, item);
+       spin_unlock(lock);
 
+       if (vma) {
                trace_binder_unmap_user_start(alloc, index);
 
-               zap_page_range(alloc->vma,
+               zap_page_range(vma,
                               page_addr + alloc->user_buffer_offset,
                               PAGE_SIZE);
 
@@ -950,13 +957,12 @@ enum lru_status binder_alloc_free_page(struct list_head *item,
 
        trace_binder_unmap_kernel_end(alloc, index);
 
-       list_lru_isolate(lru, item);
-
+       spin_lock(lock);
        mutex_unlock(&alloc->mutex);
-       return LRU_REMOVED;
+       return LRU_REMOVED_RETRY;
 
 err_down_write_mmap_sem_failed:
-       mmput(mm);
+       mmput_async(mm);
 err_get_task_mm_failed:
 err_page_already_freed:
        mutex_unlock(&alloc->mutex);
index 3a19c253bdb1c52faaed181db73077d6eec6e989..ae53e413fb1311ddc1a9c826e1634fa86f083599 100644 (file)
@@ -84,6 +84,12 @@ static inline bool mmget_not_zero(struct mm_struct *mm)
 
 /* mmput gets rid of the mappings and all user-space */
 extern void mmput(struct mm_struct *);
+#ifdef CONFIG_MMU
+/* same as above but performs the slow path from the async context. Can
+ * be called from the atomic context as well
+ */
+void mmput_async(struct mm_struct *);
+#endif
 
 /* Grab a reference to a task's mm, if it is not already going away */
 extern struct mm_struct *get_task_mm(struct task_struct *task);
index 10646182440fa4c3a3a8da7d9fdb7b45f01aab26..e702cb9ffbd85ab7ff388c17654f729614d0b296 100644 (file)
@@ -946,6 +946,24 @@ void mmput(struct mm_struct *mm)
 }
 EXPORT_SYMBOL_GPL(mmput);
 
+#ifdef CONFIG_MMU
+static void mmput_async_fn(struct work_struct *work)
+{
+       struct mm_struct *mm = container_of(work, struct mm_struct,
+                                           async_put_work);
+
+       __mmput(mm);
+}
+
+void mmput_async(struct mm_struct *mm)
+{
+       if (atomic_dec_and_test(&mm->mm_users)) {
+               INIT_WORK(&mm->async_put_work, mmput_async_fn);
+               schedule_work(&mm->async_put_work);
+       }
+}
+#endif
+
 /**
  * set_mm_exe_file - change a reference to the mm's executable file
  *