]> asedeno.scripts.mit.edu Git - linux.git/blobdiff - mm/util.c
Merge tag 'media/v4.17-4' of git://git.kernel.org/pub/scm/linux/kernel/git/mchehab...
[linux.git] / mm / util.c
index 029fc2f3b395054a08595dca3ec38bae63877261..45fc3169e7b0fdb2677db32083615c12bd5f4b7c 100644 (file)
--- a/mm/util.c
+++ b/mm/util.c
@@ -287,7 +287,7 @@ int vma_is_stack_for_current(struct vm_area_struct *vma)
 }
 
 #if defined(CONFIG_MMU) && !defined(HAVE_ARCH_PICK_MMAP_LAYOUT)
-void arch_pick_mmap_layout(struct mm_struct *mm)
+void arch_pick_mmap_layout(struct mm_struct *mm, struct rlimit *rlim_stack)
 {
        mm->mmap_base = TASK_UNMAPPED_BASE;
        mm->get_unmapped_area = arch_get_unmapped_area;
@@ -297,8 +297,10 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
 /*
  * Like get_user_pages_fast() except its IRQ-safe in that it won't fall
  * back to the regular GUP.
- * If the architecture not support this function, simply return with no
- * page pinned
+ * Note a difference with get_user_pages_fast: this always returns the
+ * number of pages pinned, 0 if no pages were pinned.
+ * If the architecture does not support this function, simply return with no
+ * pages pinned.
  */
 int __weak __get_user_pages_fast(unsigned long start,
                                 int nr_pages, int write, struct page **pages)
@@ -667,6 +669,13 @@ int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin)
                 */
                free += global_node_page_state(NR_SLAB_RECLAIMABLE);
 
+               /*
+                * Part of the kernel memory, which can be released
+                * under memory pressure.
+                */
+               free += global_node_page_state(
+                       NR_INDIRECTLY_RECLAIMABLE_BYTES) >> PAGE_SHIFT;
+
                /*
                 * Leave reserved pages. The pages are not for anonymous pages.
                 */