]> asedeno.scripts.mit.edu Git - linux.git/blobdiff - mm/page_counter.c
ovl: Fix ovl_getattr() to get number of blocks from lower
[linux.git] / mm / page_counter.c
index 41937c9a9d11df8aff94d0f94e3e9d7da312b3a5..de31470655f66c3492b1858bd308eedd95917afd 100644 (file)
 #include <linux/bug.h>
 #include <asm/page.h>
 
+static void propagate_protected_usage(struct page_counter *c,
+                                     unsigned long usage)
+{
+       unsigned long protected, old_protected;
+       long delta;
+
+       if (!c->parent)
+               return;
+
+       if (c->min || atomic_long_read(&c->min_usage)) {
+               if (usage <= c->min)
+                       protected = usage;
+               else
+                       protected = 0;
+
+               old_protected = atomic_long_xchg(&c->min_usage, protected);
+               delta = protected - old_protected;
+               if (delta)
+                       atomic_long_add(delta, &c->parent->children_min_usage);
+       }
+
+       if (c->low || atomic_long_read(&c->low_usage)) {
+               if (usage <= c->low)
+                       protected = usage;
+               else
+                       protected = 0;
+
+               old_protected = atomic_long_xchg(&c->low_usage, protected);
+               delta = protected - old_protected;
+               if (delta)
+                       atomic_long_add(delta, &c->parent->children_low_usage);
+       }
+}
+
 /**
  * page_counter_cancel - take pages out of the local counter
  * @counter: counter
@@ -23,6 +57,7 @@ void page_counter_cancel(struct page_counter *counter, unsigned long nr_pages)
        long new;
 
        new = atomic_long_sub_return(nr_pages, &counter->usage);
+       propagate_protected_usage(counter, new);
        /* More uncharges than charges? */
        WARN_ON_ONCE(new < 0);
 }
@@ -42,6 +77,7 @@ void page_counter_charge(struct page_counter *counter, unsigned long nr_pages)
                long new;
 
                new = atomic_long_add_return(nr_pages, &c->usage);
+               propagate_protected_usage(counter, new);
                /*
                 * This is indeed racy, but we can live with some
                 * inaccuracy in the watermark.
@@ -85,6 +121,7 @@ bool page_counter_try_charge(struct page_counter *counter,
                new = atomic_long_add_return(nr_pages, &c->usage);
                if (new > c->max) {
                        atomic_long_sub(nr_pages, &c->usage);
+                       propagate_protected_usage(counter, new);
                        /*
                         * This is racy, but we can live with some
                         * inaccuracy in the failcnt.
@@ -93,6 +130,7 @@ bool page_counter_try_charge(struct page_counter *counter,
                        *fail = c;
                        goto failed;
                }
+               propagate_protected_usage(counter, new);
                /*
                 * Just like with failcnt, we can live with some
                 * inaccuracy in the watermark.
@@ -164,6 +202,40 @@ int page_counter_set_max(struct page_counter *counter, unsigned long nr_pages)
        }
 }
 
+/**
+ * page_counter_set_min - set the amount of protected memory
+ * @counter: counter
+ * @nr_pages: value to set
+ *
+ * The caller must serialize invocations on the same counter.
+ */
+void page_counter_set_min(struct page_counter *counter, unsigned long nr_pages)
+{
+       struct page_counter *c;
+
+       counter->min = nr_pages;
+
+       for (c = counter; c; c = c->parent)
+               propagate_protected_usage(c, atomic_long_read(&c->usage));
+}
+
+/**
+ * page_counter_set_low - set the amount of protected memory
+ * @counter: counter
+ * @nr_pages: value to set
+ *
+ * The caller must serialize invocations on the same counter.
+ */
+void page_counter_set_low(struct page_counter *counter, unsigned long nr_pages)
+{
+       struct page_counter *c;
+
+       counter->low = nr_pages;
+
+       for (c = counter; c; c = c->parent)
+               propagate_protected_usage(c, atomic_long_read(&c->usage));
+}
+
 /**
  * page_counter_memparse - memparse() for page counter limits
  * @buf: string to parse