]> asedeno.scripts.mit.edu Git - linux.git/blobdiff - mm/memcontrol.c
Merge tag 'arc-5.2-rc7' of git://git.kernel.org/pub/scm/linux/kernel/git/vgupta/arc
[linux.git] / mm / memcontrol.c
index ca0bc6e6be1316602def0ac42bc4ffddef14cf08..ba9138a4a1de37f744eaf488fc0930b82f60b3e1 100644 (file)
@@ -691,11 +691,12 @@ void __mod_memcg_state(struct mem_cgroup *memcg, int idx, int val)
        if (mem_cgroup_disabled())
                return;
 
+       __this_cpu_add(memcg->vmstats_local->stat[idx], val);
+
        x = val + __this_cpu_read(memcg->vmstats_percpu->stat[idx]);
        if (unlikely(abs(x) > MEMCG_CHARGE_BATCH)) {
                struct mem_cgroup *mi;
 
-               atomic_long_add(x, &memcg->vmstats_local[idx]);
                for (mi = memcg; mi; mi = parent_mem_cgroup(mi))
                        atomic_long_add(x, &mi->vmstats[idx]);
                x = 0;
@@ -745,11 +746,12 @@ void __mod_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx,
        __mod_memcg_state(memcg, idx, val);
 
        /* Update lruvec */
+       __this_cpu_add(pn->lruvec_stat_local->count[idx], val);
+
        x = val + __this_cpu_read(pn->lruvec_stat_cpu->count[idx]);
        if (unlikely(abs(x) > MEMCG_CHARGE_BATCH)) {
                struct mem_cgroup_per_node *pi;
 
-               atomic_long_add(x, &pn->lruvec_stat_local[idx]);
                for (pi = pn; pi; pi = parent_nodeinfo(pi, pgdat->node_id))
                        atomic_long_add(x, &pi->lruvec_stat[idx]);
                x = 0;
@@ -771,11 +773,12 @@ void __count_memcg_events(struct mem_cgroup *memcg, enum vm_event_item idx,
        if (mem_cgroup_disabled())
                return;
 
+       __this_cpu_add(memcg->vmstats_local->events[idx], count);
+
        x = count + __this_cpu_read(memcg->vmstats_percpu->events[idx]);
        if (unlikely(x > MEMCG_CHARGE_BATCH)) {
                struct mem_cgroup *mi;
 
-               atomic_long_add(x, &memcg->vmevents_local[idx]);
                for (mi = memcg; mi; mi = parent_mem_cgroup(mi))
                        atomic_long_add(x, &mi->vmevents[idx]);
                x = 0;
@@ -790,7 +793,12 @@ static unsigned long memcg_events(struct mem_cgroup *memcg, int event)
 
 static unsigned long memcg_events_local(struct mem_cgroup *memcg, int event)
 {
-       return atomic_long_read(&memcg->vmevents_local[event]);
+       long x = 0;
+       int cpu;
+
+       for_each_possible_cpu(cpu)
+               x += per_cpu(memcg->vmstats_local->events[event], cpu);
+       return x;
 }
 
 static void mem_cgroup_charge_statistics(struct mem_cgroup *memcg,
@@ -2191,11 +2199,9 @@ static int memcg_hotplug_cpu_dead(unsigned int cpu)
                        long x;
 
                        x = this_cpu_xchg(memcg->vmstats_percpu->stat[i], 0);
-                       if (x) {
-                               atomic_long_add(x, &memcg->vmstats_local[i]);
+                       if (x)
                                for (mi = memcg; mi; mi = parent_mem_cgroup(mi))
                                        atomic_long_add(x, &memcg->vmstats[i]);
-                       }
 
                        if (i >= NR_VM_NODE_STAT_ITEMS)
                                continue;
@@ -2205,12 +2211,10 @@ static int memcg_hotplug_cpu_dead(unsigned int cpu)
 
                                pn = mem_cgroup_nodeinfo(memcg, nid);
                                x = this_cpu_xchg(pn->lruvec_stat_cpu->count[i], 0);
-                               if (x) {
-                                       atomic_long_add(x, &pn->lruvec_stat_local[i]);
+                               if (x)
                                        do {
                                                atomic_long_add(x, &pn->lruvec_stat[i]);
                                        } while ((pn = parent_nodeinfo(pn, nid)));
-                               }
                        }
                }
 
@@ -2218,11 +2222,9 @@ static int memcg_hotplug_cpu_dead(unsigned int cpu)
                        long x;
 
                        x = this_cpu_xchg(memcg->vmstats_percpu->events[i], 0);
-                       if (x) {
-                               atomic_long_add(x, &memcg->vmevents_local[i]);
+                       if (x)
                                for (mi = memcg; mi; mi = parent_mem_cgroup(mi))
                                        atomic_long_add(x, &memcg->vmevents[i]);
-                       }
                }
        }
 
@@ -4483,8 +4485,15 @@ static int alloc_mem_cgroup_per_node_info(struct mem_cgroup *memcg, int node)
        if (!pn)
                return 1;
 
+       pn->lruvec_stat_local = alloc_percpu(struct lruvec_stat);
+       if (!pn->lruvec_stat_local) {
+               kfree(pn);
+               return 1;
+       }
+
        pn->lruvec_stat_cpu = alloc_percpu(struct lruvec_stat);
        if (!pn->lruvec_stat_cpu) {
+               free_percpu(pn->lruvec_stat_local);
                kfree(pn);
                return 1;
        }
@@ -4506,6 +4515,7 @@ static void free_mem_cgroup_per_node_info(struct mem_cgroup *memcg, int node)
                return;
 
        free_percpu(pn->lruvec_stat_cpu);
+       free_percpu(pn->lruvec_stat_local);
        kfree(pn);
 }
 
@@ -4516,6 +4526,7 @@ static void __mem_cgroup_free(struct mem_cgroup *memcg)
        for_each_node(node)
                free_mem_cgroup_per_node_info(memcg, node);
        free_percpu(memcg->vmstats_percpu);
+       free_percpu(memcg->vmstats_local);
        kfree(memcg);
 }
 
@@ -4544,6 +4555,10 @@ static struct mem_cgroup *mem_cgroup_alloc(void)
        if (memcg->id.id < 0)
                goto fail;
 
+       memcg->vmstats_local = alloc_percpu(struct memcg_vmstats_percpu);
+       if (!memcg->vmstats_local)
+               goto fail;
+
        memcg->vmstats_percpu = alloc_percpu(struct memcg_vmstats_percpu);
        if (!memcg->vmstats_percpu)
                goto fail;