]> asedeno.scripts.mit.edu Git - linux.git/blobdiff - mm/zswap.c
Merge tags 'cris-for-4.16' and 'cris-for-4.16-urgent' of git://git.kernel.org/pub...
[linux.git] / mm / zswap.c
index d39581a076c3aed1e9db7b2408c3f8da9ef60415..c004aa4fd3f481e6686ecdfe78f64885637556b9 100644 (file)
@@ -49,6 +49,8 @@
 static u64 zswap_pool_total_size;
 /* The number of compressed pages currently stored in zswap */
 static atomic_t zswap_stored_pages = ATOMIC_INIT(0);
+/* The number of same-value filled pages currently stored in zswap */
+static atomic_t zswap_same_filled_pages = ATOMIC_INIT(0);
 
 /*
  * The statistics below are not protected from concurrent access for
@@ -116,6 +118,11 @@ module_param_cb(zpool, &zswap_zpool_param_ops, &zswap_zpool_type, 0644);
 static unsigned int zswap_max_pool_percent = 20;
 module_param_named(max_pool_percent, zswap_max_pool_percent, uint, 0644);
 
+/* Enable/disable handling same-value filled pages (enabled by default) */
+static bool zswap_same_filled_pages_enabled = true;
+module_param_named(same_filled_pages_enabled, zswap_same_filled_pages_enabled,
+                  bool, 0644);
+
 /*********************************
 * data structures
 **********************************/
@@ -145,9 +152,10 @@ struct zswap_pool {
  *            be held while changing the refcount.  Since the lock must
  *            be held, there is no reason to also make refcount atomic.
  * length - the length in bytes of the compressed page data.  Needed during
- *          decompression
+ *          decompression. For a same value filled page length is 0.
  * pool - the zswap_pool the entry's data is in
  * handle - zpool allocation handle that stores the compressed page data
+ * value - value of the same-value filled pages which have same content
  */
 struct zswap_entry {
        struct rb_node rbnode;
@@ -155,7 +163,10 @@ struct zswap_entry {
        int refcount;
        unsigned int length;
        struct zswap_pool *pool;
-       unsigned long handle;
+       union {
+               unsigned long handle;
+               unsigned long value;
+       };
 };
 
 struct zswap_header {
@@ -320,8 +331,12 @@ static void zswap_rb_erase(struct rb_root *root, struct zswap_entry *entry)
  */
 static void zswap_free_entry(struct zswap_entry *entry)
 {
-       zpool_free(entry->pool->zpool, entry->handle);
-       zswap_pool_put(entry->pool);
+       if (!entry->length)
+               atomic_dec(&zswap_same_filled_pages);
+       else {
+               zpool_free(entry->pool->zpool, entry->handle);
+               zswap_pool_put(entry->pool);
+       }
        zswap_entry_cache_free(entry);
        atomic_dec(&zswap_stored_pages);
        zswap_update_total_size();
@@ -953,6 +968,28 @@ static int zswap_shrink(void)
        return ret;
 }
 
+static int zswap_is_page_same_filled(void *ptr, unsigned long *value)
+{
+       unsigned int pos;
+       unsigned long *page;
+
+       page = (unsigned long *)ptr;
+       for (pos = 1; pos < PAGE_SIZE / sizeof(*page); pos++) {
+               if (page[pos] != page[0])
+                       return 0;
+       }
+       *value = page[0];
+       return 1;
+}
+
+static void zswap_fill_page(void *ptr, unsigned long value)
+{
+       unsigned long *page;
+
+       page = (unsigned long *)ptr;
+       memset_l(page, value, PAGE_SIZE / sizeof(unsigned long));
+}
+
 /*********************************
 * frontswap hooks
 **********************************/
@@ -964,11 +1001,11 @@ static int zswap_frontswap_store(unsigned type, pgoff_t offset,
        struct zswap_entry *entry, *dupentry;
        struct crypto_comp *tfm;
        int ret;
-       unsigned int dlen = PAGE_SIZE, len;
-       unsigned long handle;
+       unsigned int hlen, dlen = PAGE_SIZE;
+       unsigned long handle, value;
        char *buf;
        u8 *src, *dst;
-       struct zswap_header *zhdr;
+       struct zswap_header zhdr = { .swpentry = swp_entry(type, offset) };
 
        if (!zswap_enabled || !tree) {
                ret = -ENODEV;
@@ -993,6 +1030,19 @@ static int zswap_frontswap_store(unsigned type, pgoff_t offset,
                goto reject;
        }
 
+       if (zswap_same_filled_pages_enabled) {
+               src = kmap_atomic(page);
+               if (zswap_is_page_same_filled(src, &value)) {
+                       kunmap_atomic(src);
+                       entry->offset = offset;
+                       entry->length = 0;
+                       entry->value = value;
+                       atomic_inc(&zswap_same_filled_pages);
+                       goto insert_entry;
+               }
+               kunmap_atomic(src);
+       }
+
        /* if entry is successfully added, it keeps the reference */
        entry->pool = zswap_pool_current_get();
        if (!entry->pool) {
@@ -1013,8 +1063,8 @@ static int zswap_frontswap_store(unsigned type, pgoff_t offset,
        }
 
        /* store */
-       len = dlen + sizeof(struct zswap_header);
-       ret = zpool_malloc(entry->pool->zpool, len,
+       hlen = zpool_evictable(entry->pool->zpool) ? sizeof(zhdr) : 0;
+       ret = zpool_malloc(entry->pool->zpool, hlen + dlen,
                           __GFP_NORETRY | __GFP_NOWARN | __GFP_KSWAPD_RECLAIM,
                           &handle);
        if (ret == -ENOSPC) {
@@ -1025,10 +1075,9 @@ static int zswap_frontswap_store(unsigned type, pgoff_t offset,
                zswap_reject_alloc_fail++;
                goto put_dstmem;
        }
-       zhdr = zpool_map_handle(entry->pool->zpool, handle, ZPOOL_MM_RW);
-       zhdr->swpentry = swp_entry(type, offset);
-       buf = (u8 *)(zhdr + 1);
-       memcpy(buf, dst, dlen);
+       buf = zpool_map_handle(entry->pool->zpool, handle, ZPOOL_MM_RW);
+       memcpy(buf, &zhdr, hlen);
+       memcpy(buf + hlen, dst, dlen);
        zpool_unmap_handle(entry->pool->zpool, handle);
        put_cpu_var(zswap_dstmem);
 
@@ -1037,6 +1086,7 @@ static int zswap_frontswap_store(unsigned type, pgoff_t offset,
        entry->handle = handle;
        entry->length = dlen;
 
+insert_entry:
        /* map */
        spin_lock(&tree->lock);
        do {
@@ -1089,10 +1139,18 @@ static int zswap_frontswap_load(unsigned type, pgoff_t offset,
        }
        spin_unlock(&tree->lock);
 
+       if (!entry->length) {
+               dst = kmap_atomic(page);
+               zswap_fill_page(dst, entry->value);
+               kunmap_atomic(dst);
+               goto freeentry;
+       }
+
        /* decompress */
        dlen = PAGE_SIZE;
-       src = (u8 *)zpool_map_handle(entry->pool->zpool, entry->handle,
-                       ZPOOL_MM_RO) + sizeof(struct zswap_header);
+       src = zpool_map_handle(entry->pool->zpool, entry->handle, ZPOOL_MM_RO);
+       if (zpool_evictable(entry->pool->zpool))
+               src += sizeof(struct zswap_header);
        dst = kmap_atomic(page);
        tfm = *get_cpu_ptr(entry->pool->tfm);
        ret = crypto_comp_decompress(tfm, src, entry->length, dst, &dlen);
@@ -1101,6 +1159,7 @@ static int zswap_frontswap_load(unsigned type, pgoff_t offset,
        zpool_unmap_handle(entry->pool->zpool, entry->handle);
        BUG_ON(ret);
 
+freeentry:
        spin_lock(&tree->lock);
        zswap_entry_put(tree, entry);
        spin_unlock(&tree->lock);
@@ -1209,6 +1268,8 @@ static int __init zswap_debugfs_init(void)
                        zswap_debugfs_root, &zswap_pool_total_size);
        debugfs_create_atomic_t("stored_pages", S_IRUGO,
                        zswap_debugfs_root, &zswap_stored_pages);
+       debugfs_create_atomic_t("same_filled_pages", 0444,
+                       zswap_debugfs_root, &zswap_same_filled_pages);
 
        return 0;
 }