]> asedeno.scripts.mit.edu Git - linux.git/commitdiff
mm: /proc/pid/smaps: factor out mem stats gathering
authorVlastimil Babka <vbabka@suse.cz>
Wed, 22 Aug 2018 04:52:52 +0000 (21:52 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Wed, 22 Aug 2018 17:52:44 +0000 (10:52 -0700)
To prepare for handling /proc/pid/smaps_rollup differently from
/proc/pid/smaps factor out vma mem stats gathering from show_smap() - it
will be used by both.

Link: http://lkml.kernel.org/r/20180723111933.15443-3-vbabka@suse.cz
Signed-off-by: Vlastimil Babka <vbabka@suse.cz>
Reviewed-by: Alexey Dobriyan <adobriyan@gmail.com>
Cc: Daniel Colascione <dancol@google.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
fs/proc/task_mmu.c

index a3f98ca5098199f73321a12709746e14b36f1450..d2ca88c92d9dd441a8b4b024431d7aa186688080 100644 (file)
@@ -702,14 +702,9 @@ static int smaps_hugetlb_range(pte_t *pte, unsigned long hmask,
 }
 #endif /* HUGETLB_PAGE */
 
-#define SEQ_PUT_DEC(str, val) \
-               seq_put_decimal_ull_width(m, str, (val) >> 10, 8)
-static int show_smap(struct seq_file *m, void *v)
+static void smap_gather_stats(struct vm_area_struct *vma,
+                            struct mem_size_stats *mss)
 {
-       struct proc_maps_private *priv = m->private;
-       struct vm_area_struct *vma = v;
-       struct mem_size_stats mss_stack;
-       struct mem_size_stats *mss;
        struct mm_walk smaps_walk = {
                .pmd_entry = smaps_pte_range,
 #ifdef CONFIG_HUGETLB_PAGE
@@ -717,23 +712,6 @@ static int show_smap(struct seq_file *m, void *v)
 #endif
                .mm = vma->vm_mm,
        };
-       int ret = 0;
-       bool rollup_mode;
-       bool last_vma;
-
-       if (priv->rollup) {
-               rollup_mode = true;
-               mss = priv->rollup;
-               if (mss->first) {
-                       mss->first_vma_start = vma->vm_start;
-                       mss->first = false;
-               }
-               last_vma = !m_next_vma(priv, vma);
-       } else {
-               rollup_mode = false;
-               memset(&mss_stack, 0, sizeof(mss_stack));
-               mss = &mss_stack;
-       }
 
        smaps_walk.private = mss;
 
@@ -765,6 +743,35 @@ static int show_smap(struct seq_file *m, void *v)
        walk_page_vma(vma, &smaps_walk);
        if (vma->vm_flags & VM_LOCKED)
                mss->pss_locked += mss->pss;
+}
+
+#define SEQ_PUT_DEC(str, val) \
+               seq_put_decimal_ull_width(m, str, (val) >> 10, 8)
+static int show_smap(struct seq_file *m, void *v)
+{
+       struct proc_maps_private *priv = m->private;
+       struct vm_area_struct *vma = v;
+       struct mem_size_stats mss_stack;
+       struct mem_size_stats *mss;
+       int ret = 0;
+       bool rollup_mode;
+       bool last_vma;
+
+       if (priv->rollup) {
+               rollup_mode = true;
+               mss = priv->rollup;
+               if (mss->first) {
+                       mss->first_vma_start = vma->vm_start;
+                       mss->first = false;
+               }
+               last_vma = !m_next_vma(priv, vma);
+       } else {
+               rollup_mode = false;
+               memset(&mss_stack, 0, sizeof(mss_stack));
+               mss = &mss_stack;
+       }
+
+       smap_gather_stats(vma, mss);
 
        if (!rollup_mode) {
                show_map_vma(m, vma);