2 * Compressed RAM block device
4 * Copyright (C) 2008, 2009, 2010 Nitin Gupta
5 * 2012, 2013 Minchan Kim
7 * This code is released using a dual license strategy: BSD/GPL
8 * You can choose the licence that better fits your requirements.
10 * Released under the terms of 3-clause BSD License
11 * Released under the terms of GNU General Public License Version 2.0
15 #define KMSG_COMPONENT "zram"
16 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
18 #include <linux/module.h>
19 #include <linux/kernel.h>
20 #include <linux/bio.h>
21 #include <linux/bitops.h>
22 #include <linux/blkdev.h>
23 #include <linux/buffer_head.h>
24 #include <linux/device.h>
25 #include <linux/genhd.h>
26 #include <linux/highmem.h>
27 #include <linux/slab.h>
28 #include <linux/backing-dev.h>
29 #include <linux/string.h>
30 #include <linux/vmalloc.h>
31 #include <linux/err.h>
32 #include <linux/idr.h>
33 #include <linux/sysfs.h>
34 #include <linux/cpuhotplug.h>
38 static DEFINE_IDR(zram_index_idr);
39 /* idr index must be protected */
40 static DEFINE_MUTEX(zram_index_mutex);
42 static int zram_major;
43 static const char *default_compressor = "lzo";
45 /* Module params (documentation at end) */
46 static unsigned int num_devices = 1;
48 static void zram_free_page(struct zram *zram, size_t index);
50 static inline bool init_done(struct zram *zram)
52 return zram->disksize;
55 static inline struct zram *dev_to_zram(struct device *dev)
57 return (struct zram *)dev_to_disk(dev)->private_data;
60 static unsigned long zram_get_handle(struct zram *zram, u32 index)
62 return zram->table[index].handle;
65 static void zram_set_handle(struct zram *zram, u32 index, unsigned long handle)
67 zram->table[index].handle = handle;
70 /* flag operations require table entry bit_spin_lock() being held */
71 static int zram_test_flag(struct zram *zram, u32 index,
72 enum zram_pageflags flag)
74 return zram->table[index].value & BIT(flag);
77 static void zram_set_flag(struct zram *zram, u32 index,
78 enum zram_pageflags flag)
80 zram->table[index].value |= BIT(flag);
83 static void zram_clear_flag(struct zram *zram, u32 index,
84 enum zram_pageflags flag)
86 zram->table[index].value &= ~BIT(flag);
89 static inline void zram_set_element(struct zram *zram, u32 index,
90 unsigned long element)
92 zram->table[index].element = element;
95 static unsigned long zram_get_element(struct zram *zram, u32 index)
97 return zram->table[index].element;
100 static size_t zram_get_obj_size(struct zram *zram, u32 index)
102 return zram->table[index].value & (BIT(ZRAM_FLAG_SHIFT) - 1);
105 static void zram_set_obj_size(struct zram *zram,
106 u32 index, size_t size)
108 unsigned long flags = zram->table[index].value >> ZRAM_FLAG_SHIFT;
110 zram->table[index].value = (flags << ZRAM_FLAG_SHIFT) | size;
113 #if PAGE_SIZE != 4096
114 static inline bool is_partial_io(struct bio_vec *bvec)
116 return bvec->bv_len != PAGE_SIZE;
119 static inline bool is_partial_io(struct bio_vec *bvec)
125 static void zram_revalidate_disk(struct zram *zram)
127 revalidate_disk(zram->disk);
128 /* revalidate_disk reset the BDI_CAP_STABLE_WRITES so set again */
129 zram->disk->queue->backing_dev_info->capabilities |=
130 BDI_CAP_STABLE_WRITES;
134 * Check if request is within bounds and aligned on zram logical blocks.
136 static inline bool valid_io_request(struct zram *zram,
137 sector_t start, unsigned int size)
141 /* unaligned request */
142 if (unlikely(start & (ZRAM_SECTOR_PER_LOGICAL_BLOCK - 1)))
144 if (unlikely(size & (ZRAM_LOGICAL_BLOCK_SIZE - 1)))
147 end = start + (size >> SECTOR_SHIFT);
148 bound = zram->disksize >> SECTOR_SHIFT;
149 /* out of range range */
150 if (unlikely(start >= bound || end > bound || start > end))
153 /* I/O request is valid */
157 static void update_position(u32 *index, int *offset, struct bio_vec *bvec)
159 *index += (*offset + bvec->bv_len) / PAGE_SIZE;
160 *offset = (*offset + bvec->bv_len) % PAGE_SIZE;
163 static inline void update_used_max(struct zram *zram,
164 const unsigned long pages)
166 unsigned long old_max, cur_max;
168 old_max = atomic_long_read(&zram->stats.max_used_pages);
173 old_max = atomic_long_cmpxchg(
174 &zram->stats.max_used_pages, cur_max, pages);
175 } while (old_max != cur_max);
178 static inline void zram_fill_page(char *ptr, unsigned long len,
182 unsigned long *page = (unsigned long *)ptr;
184 WARN_ON_ONCE(!IS_ALIGNED(len, sizeof(unsigned long)));
186 if (likely(value == 0)) {
189 for (i = 0; i < len / sizeof(*page); i++)
194 static bool page_same_filled(void *ptr, unsigned long *element)
200 page = (unsigned long *)ptr;
203 for (pos = 1; pos < PAGE_SIZE / sizeof(*page); pos++) {
204 if (val != page[pos])
213 static ssize_t initstate_show(struct device *dev,
214 struct device_attribute *attr, char *buf)
217 struct zram *zram = dev_to_zram(dev);
219 down_read(&zram->init_lock);
220 val = init_done(zram);
221 up_read(&zram->init_lock);
223 return scnprintf(buf, PAGE_SIZE, "%u\n", val);
226 static ssize_t disksize_show(struct device *dev,
227 struct device_attribute *attr, char *buf)
229 struct zram *zram = dev_to_zram(dev);
231 return scnprintf(buf, PAGE_SIZE, "%llu\n", zram->disksize);
234 static ssize_t mem_limit_store(struct device *dev,
235 struct device_attribute *attr, const char *buf, size_t len)
239 struct zram *zram = dev_to_zram(dev);
241 limit = memparse(buf, &tmp);
242 if (buf == tmp) /* no chars parsed, invalid input */
245 down_write(&zram->init_lock);
246 zram->limit_pages = PAGE_ALIGN(limit) >> PAGE_SHIFT;
247 up_write(&zram->init_lock);
252 static ssize_t mem_used_max_store(struct device *dev,
253 struct device_attribute *attr, const char *buf, size_t len)
257 struct zram *zram = dev_to_zram(dev);
259 err = kstrtoul(buf, 10, &val);
263 down_read(&zram->init_lock);
264 if (init_done(zram)) {
265 atomic_long_set(&zram->stats.max_used_pages,
266 zs_get_total_pages(zram->mem_pool));
268 up_read(&zram->init_lock);
273 #ifdef CONFIG_ZRAM_WRITEBACK
274 static bool zram_wb_enabled(struct zram *zram)
276 return zram->backing_dev;
279 static void reset_bdev(struct zram *zram)
281 struct block_device *bdev;
283 if (!zram_wb_enabled(zram))
287 if (zram->old_block_size)
288 set_blocksize(bdev, zram->old_block_size);
289 blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
290 /* hope filp_close flush all of IO */
291 filp_close(zram->backing_dev, NULL);
292 zram->backing_dev = NULL;
293 zram->old_block_size = 0;
296 kvfree(zram->bitmap);
300 static ssize_t backing_dev_show(struct device *dev,
301 struct device_attribute *attr, char *buf)
303 struct zram *zram = dev_to_zram(dev);
304 struct file *file = zram->backing_dev;
308 down_read(&zram->init_lock);
309 if (!zram_wb_enabled(zram)) {
310 memcpy(buf, "none\n", 5);
311 up_read(&zram->init_lock);
315 p = file_path(file, buf, PAGE_SIZE - 1);
322 memmove(buf, p, ret);
325 up_read(&zram->init_lock);
329 static ssize_t backing_dev_store(struct device *dev,
330 struct device_attribute *attr, const char *buf, size_t len)
333 struct file *backing_dev = NULL;
335 struct address_space *mapping;
336 unsigned int bitmap_sz, old_block_size = 0;
337 unsigned long nr_pages, *bitmap = NULL;
338 struct block_device *bdev = NULL;
340 struct zram *zram = dev_to_zram(dev);
342 file_name = kmalloc(PATH_MAX, GFP_KERNEL);
346 down_write(&zram->init_lock);
347 if (init_done(zram)) {
348 pr_info("Can't setup backing device for initialized device\n");
353 strlcpy(file_name, buf, len);
355 backing_dev = filp_open(file_name, O_RDWR|O_LARGEFILE, 0);
356 if (IS_ERR(backing_dev)) {
357 err = PTR_ERR(backing_dev);
362 mapping = backing_dev->f_mapping;
363 inode = mapping->host;
365 /* Support only block device in this moment */
366 if (!S_ISBLK(inode->i_mode)) {
371 bdev = bdgrab(I_BDEV(inode));
372 err = blkdev_get(bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL, zram);
376 nr_pages = i_size_read(inode) >> PAGE_SHIFT;
377 bitmap_sz = BITS_TO_LONGS(nr_pages) * sizeof(long);
378 bitmap = kvzalloc(bitmap_sz, GFP_KERNEL);
384 old_block_size = block_size(bdev);
385 err = set_blocksize(bdev, PAGE_SIZE);
390 spin_lock_init(&zram->bitmap_lock);
392 zram->old_block_size = old_block_size;
394 zram->backing_dev = backing_dev;
395 zram->bitmap = bitmap;
396 zram->nr_pages = nr_pages;
397 up_write(&zram->init_lock);
399 pr_info("setup backing device %s\n", file_name);
408 blkdev_put(bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL);
411 filp_close(backing_dev, NULL);
413 up_write(&zram->init_lock);
420 static unsigned long get_entry_bdev(struct zram *zram)
424 spin_lock(&zram->bitmap_lock);
425 /* skip 0 bit to confuse zram.handle = 0 */
426 entry = find_next_zero_bit(zram->bitmap, zram->nr_pages, 1);
427 if (entry == zram->nr_pages) {
428 spin_unlock(&zram->bitmap_lock);
432 set_bit(entry, zram->bitmap);
433 spin_unlock(&zram->bitmap_lock);
438 static void put_entry_bdev(struct zram *zram, unsigned long entry)
442 spin_lock(&zram->bitmap_lock);
443 was_set = test_and_clear_bit(entry, zram->bitmap);
444 spin_unlock(&zram->bitmap_lock);
445 WARN_ON_ONCE(!was_set);
448 void zram_page_end_io(struct bio *bio)
450 struct page *page = bio->bi_io_vec[0].bv_page;
452 page_endio(page, op_is_write(bio_op(bio)),
453 blk_status_to_errno(bio->bi_status));
458 * Returns 1 if the submission is successful.
460 static int read_from_bdev_async(struct zram *zram, struct bio_vec *bvec,
461 unsigned long entry, struct bio *parent)
465 bio = bio_alloc(GFP_ATOMIC, 1);
469 bio->bi_iter.bi_sector = entry * (PAGE_SIZE >> 9);
470 bio_set_dev(bio, zram->bdev);
471 if (!bio_add_page(bio, bvec->bv_page, bvec->bv_len, bvec->bv_offset)) {
477 bio->bi_opf = REQ_OP_READ;
478 bio->bi_end_io = zram_page_end_io;
480 bio->bi_opf = parent->bi_opf;
481 bio_chain(bio, parent);
489 struct work_struct work;
495 #if PAGE_SIZE != 4096
496 static void zram_sync_read(struct work_struct *work)
499 struct zram_work *zw = container_of(work, struct zram_work, work);
500 struct zram *zram = zw->zram;
501 unsigned long entry = zw->entry;
502 struct bio *bio = zw->bio;
504 read_from_bdev_async(zram, &bvec, entry, bio);
508 * Block layer want one ->make_request_fn to be active at a time
509 * so if we use chained IO with parent IO in same context,
510 * it's a deadlock. To avoid, it, it uses worker thread context.
512 static int read_from_bdev_sync(struct zram *zram, struct bio_vec *bvec,
513 unsigned long entry, struct bio *bio)
515 struct zram_work work;
521 INIT_WORK_ONSTACK(&work.work, zram_sync_read);
522 queue_work(system_unbound_wq, &work.work);
523 flush_work(&work.work);
524 destroy_work_on_stack(&work.work);
529 static int read_from_bdev_sync(struct zram *zram, struct bio_vec *bvec,
530 unsigned long entry, struct bio *bio)
537 static int read_from_bdev(struct zram *zram, struct bio_vec *bvec,
538 unsigned long entry, struct bio *parent, bool sync)
541 return read_from_bdev_sync(zram, bvec, entry, parent);
543 return read_from_bdev_async(zram, bvec, entry, parent);
546 static int write_to_bdev(struct zram *zram, struct bio_vec *bvec,
547 u32 index, struct bio *parent,
548 unsigned long *pentry)
553 bio = bio_alloc(GFP_ATOMIC, 1);
557 entry = get_entry_bdev(zram);
563 bio->bi_iter.bi_sector = entry * (PAGE_SIZE >> 9);
564 bio_set_dev(bio, zram->bdev);
565 if (!bio_add_page(bio, bvec->bv_page, bvec->bv_len,
568 put_entry_bdev(zram, entry);
573 bio->bi_opf = REQ_OP_WRITE | REQ_SYNC;
574 bio->bi_end_io = zram_page_end_io;
576 bio->bi_opf = parent->bi_opf;
577 bio_chain(bio, parent);
586 static void zram_wb_clear(struct zram *zram, u32 index)
590 zram_clear_flag(zram, index, ZRAM_WB);
591 entry = zram_get_element(zram, index);
592 zram_set_element(zram, index, 0);
593 put_entry_bdev(zram, entry);
597 static bool zram_wb_enabled(struct zram *zram) { return false; }
598 static inline void reset_bdev(struct zram *zram) {};
599 static int write_to_bdev(struct zram *zram, struct bio_vec *bvec,
600 u32 index, struct bio *parent,
601 unsigned long *pentry)
607 static int read_from_bdev(struct zram *zram, struct bio_vec *bvec,
608 unsigned long entry, struct bio *parent, bool sync)
612 static void zram_wb_clear(struct zram *zram, u32 index) {}
617 * We switched to per-cpu streams and this attr is not needed anymore.
618 * However, we will keep it around for some time, because:
619 * a) we may revert per-cpu streams in the future
620 * b) it's visible to user space and we need to follow our 2 years
621 * retirement rule; but we already have a number of 'soon to be
622 * altered' attrs, so max_comp_streams need to wait for the next
625 static ssize_t max_comp_streams_show(struct device *dev,
626 struct device_attribute *attr, char *buf)
628 return scnprintf(buf, PAGE_SIZE, "%d\n", num_online_cpus());
631 static ssize_t max_comp_streams_store(struct device *dev,
632 struct device_attribute *attr, const char *buf, size_t len)
637 static ssize_t comp_algorithm_show(struct device *dev,
638 struct device_attribute *attr, char *buf)
641 struct zram *zram = dev_to_zram(dev);
643 down_read(&zram->init_lock);
644 sz = zcomp_available_show(zram->compressor, buf);
645 up_read(&zram->init_lock);
650 static ssize_t comp_algorithm_store(struct device *dev,
651 struct device_attribute *attr, const char *buf, size_t len)
653 struct zram *zram = dev_to_zram(dev);
654 char compressor[ARRAY_SIZE(zram->compressor)];
657 strlcpy(compressor, buf, sizeof(compressor));
658 /* ignore trailing newline */
659 sz = strlen(compressor);
660 if (sz > 0 && compressor[sz - 1] == '\n')
661 compressor[sz - 1] = 0x00;
663 if (!zcomp_available_algorithm(compressor))
666 down_write(&zram->init_lock);
667 if (init_done(zram)) {
668 up_write(&zram->init_lock);
669 pr_info("Can't change algorithm for initialized device\n");
673 strcpy(zram->compressor, compressor);
674 up_write(&zram->init_lock);
678 static ssize_t compact_store(struct device *dev,
679 struct device_attribute *attr, const char *buf, size_t len)
681 struct zram *zram = dev_to_zram(dev);
683 down_read(&zram->init_lock);
684 if (!init_done(zram)) {
685 up_read(&zram->init_lock);
689 zs_compact(zram->mem_pool);
690 up_read(&zram->init_lock);
695 static ssize_t io_stat_show(struct device *dev,
696 struct device_attribute *attr, char *buf)
698 struct zram *zram = dev_to_zram(dev);
701 down_read(&zram->init_lock);
702 ret = scnprintf(buf, PAGE_SIZE,
703 "%8llu %8llu %8llu %8llu\n",
704 (u64)atomic64_read(&zram->stats.failed_reads),
705 (u64)atomic64_read(&zram->stats.failed_writes),
706 (u64)atomic64_read(&zram->stats.invalid_io),
707 (u64)atomic64_read(&zram->stats.notify_free));
708 up_read(&zram->init_lock);
713 static ssize_t mm_stat_show(struct device *dev,
714 struct device_attribute *attr, char *buf)
716 struct zram *zram = dev_to_zram(dev);
717 struct zs_pool_stats pool_stats;
718 u64 orig_size, mem_used = 0;
722 memset(&pool_stats, 0x00, sizeof(struct zs_pool_stats));
724 down_read(&zram->init_lock);
725 if (init_done(zram)) {
726 mem_used = zs_get_total_pages(zram->mem_pool);
727 zs_pool_stats(zram->mem_pool, &pool_stats);
730 orig_size = atomic64_read(&zram->stats.pages_stored);
731 max_used = atomic_long_read(&zram->stats.max_used_pages);
733 ret = scnprintf(buf, PAGE_SIZE,
734 "%8llu %8llu %8llu %8lu %8ld %8llu %8lu\n",
735 orig_size << PAGE_SHIFT,
736 (u64)atomic64_read(&zram->stats.compr_data_size),
737 mem_used << PAGE_SHIFT,
738 zram->limit_pages << PAGE_SHIFT,
739 max_used << PAGE_SHIFT,
740 (u64)atomic64_read(&zram->stats.same_pages),
741 pool_stats.pages_compacted);
742 up_read(&zram->init_lock);
747 static ssize_t debug_stat_show(struct device *dev,
748 struct device_attribute *attr, char *buf)
751 struct zram *zram = dev_to_zram(dev);
754 down_read(&zram->init_lock);
755 ret = scnprintf(buf, PAGE_SIZE,
756 "version: %d\n%8llu\n",
758 (u64)atomic64_read(&zram->stats.writestall));
759 up_read(&zram->init_lock);
764 static DEVICE_ATTR_RO(io_stat);
765 static DEVICE_ATTR_RO(mm_stat);
766 static DEVICE_ATTR_RO(debug_stat);
768 static void zram_slot_lock(struct zram *zram, u32 index)
770 bit_spin_lock(ZRAM_ACCESS, &zram->table[index].value);
773 static void zram_slot_unlock(struct zram *zram, u32 index)
775 bit_spin_unlock(ZRAM_ACCESS, &zram->table[index].value);
778 static bool zram_same_page_read(struct zram *zram, u32 index,
780 unsigned int offset, unsigned int len)
782 zram_slot_lock(zram, index);
783 if (unlikely(!zram_get_handle(zram, index) ||
784 zram_test_flag(zram, index, ZRAM_SAME))) {
787 zram_slot_unlock(zram, index);
788 mem = kmap_atomic(page);
789 zram_fill_page(mem + offset, len,
790 zram_get_element(zram, index));
794 zram_slot_unlock(zram, index);
799 static void zram_meta_free(struct zram *zram, u64 disksize)
801 size_t num_pages = disksize >> PAGE_SHIFT;
804 /* Free all pages that are still in this zram device */
805 for (index = 0; index < num_pages; index++)
806 zram_free_page(zram, index);
808 zs_destroy_pool(zram->mem_pool);
812 static bool zram_meta_alloc(struct zram *zram, u64 disksize)
816 num_pages = disksize >> PAGE_SHIFT;
817 zram->table = vzalloc(num_pages * sizeof(*zram->table));
821 zram->mem_pool = zs_create_pool(zram->disk->disk_name);
822 if (!zram->mem_pool) {
831 * To protect concurrent access to the same index entry,
832 * caller should hold this table index entry's bit_spinlock to
833 * indicate this index entry is accessing.
835 static void zram_free_page(struct zram *zram, size_t index)
837 unsigned long handle;
839 if (zram_wb_enabled(zram) && zram_test_flag(zram, index, ZRAM_WB)) {
840 zram_wb_clear(zram, index);
841 atomic64_dec(&zram->stats.pages_stored);
846 * No memory is allocated for same element filled pages.
847 * Simply clear same page flag.
849 if (zram_test_flag(zram, index, ZRAM_SAME)) {
850 zram_clear_flag(zram, index, ZRAM_SAME);
851 zram_set_element(zram, index, 0);
852 atomic64_dec(&zram->stats.same_pages);
853 atomic64_dec(&zram->stats.pages_stored);
857 handle = zram_get_handle(zram, index);
861 zs_free(zram->mem_pool, handle);
863 atomic64_sub(zram_get_obj_size(zram, index),
864 &zram->stats.compr_data_size);
865 atomic64_dec(&zram->stats.pages_stored);
867 zram_set_handle(zram, index, 0);
868 zram_set_obj_size(zram, index, 0);
871 static int __zram_bvec_read(struct zram *zram, struct page *page, u32 index,
872 struct bio *bio, bool partial_io)
875 unsigned long handle;
879 if (zram_wb_enabled(zram)) {
880 zram_slot_lock(zram, index);
881 if (zram_test_flag(zram, index, ZRAM_WB)) {
884 zram_slot_unlock(zram, index);
887 bvec.bv_len = PAGE_SIZE;
889 return read_from_bdev(zram, &bvec,
890 zram_get_element(zram, index),
893 zram_slot_unlock(zram, index);
896 if (zram_same_page_read(zram, index, page, 0, PAGE_SIZE))
899 zram_slot_lock(zram, index);
900 handle = zram_get_handle(zram, index);
901 size = zram_get_obj_size(zram, index);
903 src = zs_map_object(zram->mem_pool, handle, ZS_MM_RO);
904 if (size == PAGE_SIZE) {
905 dst = kmap_atomic(page);
906 memcpy(dst, src, PAGE_SIZE);
910 struct zcomp_strm *zstrm = zcomp_stream_get(zram->comp);
912 dst = kmap_atomic(page);
913 ret = zcomp_decompress(zstrm, src, size, dst);
915 zcomp_stream_put(zram->comp);
917 zs_unmap_object(zram->mem_pool, handle);
918 zram_slot_unlock(zram, index);
920 /* Should NEVER happen. Return bio error if it does. */
922 pr_err("Decompression failed! err=%d, page=%u\n", ret, index);
927 static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec,
928 u32 index, int offset, struct bio *bio)
933 page = bvec->bv_page;
934 if (is_partial_io(bvec)) {
935 /* Use a temporary buffer to decompress the page */
936 page = alloc_page(GFP_NOIO|__GFP_HIGHMEM);
941 ret = __zram_bvec_read(zram, page, index, bio, is_partial_io(bvec));
945 if (is_partial_io(bvec)) {
946 void *dst = kmap_atomic(bvec->bv_page);
947 void *src = kmap_atomic(page);
949 memcpy(dst + bvec->bv_offset, src + offset, bvec->bv_len);
954 if (is_partial_io(bvec))
960 static int __zram_bvec_write(struct zram *zram, struct bio_vec *bvec,
961 u32 index, struct bio *bio)
964 unsigned long alloced_pages;
965 unsigned long handle = 0;
966 unsigned int comp_len = 0;
967 void *src, *dst, *mem;
968 struct zcomp_strm *zstrm;
969 struct page *page = bvec->bv_page;
970 unsigned long element = 0;
971 enum zram_pageflags flags = 0;
972 bool allow_wb = true;
974 mem = kmap_atomic(page);
975 if (page_same_filled(mem, &element)) {
977 /* Free memory associated with this sector now. */
979 atomic64_inc(&zram->stats.same_pages);
985 zstrm = zcomp_stream_get(zram->comp);
986 src = kmap_atomic(page);
987 ret = zcomp_compress(zstrm, src, &comp_len);
991 zcomp_stream_put(zram->comp);
992 pr_err("Compression failed! err=%d\n", ret);
993 zs_free(zram->mem_pool, handle);
997 if (unlikely(comp_len > max_zpage_size)) {
998 if (zram_wb_enabled(zram) && allow_wb) {
999 zcomp_stream_put(zram->comp);
1000 ret = write_to_bdev(zram, bvec, index, bio, &element);
1007 goto compress_again;
1009 comp_len = PAGE_SIZE;
1013 * handle allocation has 2 paths:
1014 * a) fast path is executed with preemption disabled (for
1015 * per-cpu streams) and has __GFP_DIRECT_RECLAIM bit clear,
1016 * since we can't sleep;
1017 * b) slow path enables preemption and attempts to allocate
1018 * the page with __GFP_DIRECT_RECLAIM bit set. we have to
1019 * put per-cpu compression stream and, thus, to re-do
1020 * the compression once handle is allocated.
1022 * if we have a 'non-null' handle here then we are coming
1023 * from the slow path and handle has already been allocated.
1026 handle = zs_malloc(zram->mem_pool, comp_len,
1027 __GFP_KSWAPD_RECLAIM |
1032 zcomp_stream_put(zram->comp);
1033 atomic64_inc(&zram->stats.writestall);
1034 handle = zs_malloc(zram->mem_pool, comp_len,
1035 GFP_NOIO | __GFP_HIGHMEM |
1038 goto compress_again;
1042 alloced_pages = zs_get_total_pages(zram->mem_pool);
1043 update_used_max(zram, alloced_pages);
1045 if (zram->limit_pages && alloced_pages > zram->limit_pages) {
1046 zcomp_stream_put(zram->comp);
1047 zs_free(zram->mem_pool, handle);
1051 dst = zs_map_object(zram->mem_pool, handle, ZS_MM_WO);
1053 src = zstrm->buffer;
1054 if (comp_len == PAGE_SIZE)
1055 src = kmap_atomic(page);
1056 memcpy(dst, src, comp_len);
1057 if (comp_len == PAGE_SIZE)
1060 zcomp_stream_put(zram->comp);
1061 zs_unmap_object(zram->mem_pool, handle);
1062 atomic64_add(comp_len, &zram->stats.compr_data_size);
1065 * Free memory associated with this sector
1066 * before overwriting unused sectors.
1068 zram_slot_lock(zram, index);
1069 zram_free_page(zram, index);
1072 zram_set_flag(zram, index, flags);
1073 zram_set_element(zram, index, element);
1075 zram_set_handle(zram, index, handle);
1076 zram_set_obj_size(zram, index, comp_len);
1078 zram_slot_unlock(zram, index);
1081 atomic64_inc(&zram->stats.pages_stored);
1085 static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec,
1086 u32 index, int offset, struct bio *bio)
1089 struct page *page = NULL;
1094 if (is_partial_io(bvec)) {
1097 * This is a partial IO. We need to read the full page
1098 * before to write the changes.
1100 page = alloc_page(GFP_NOIO|__GFP_HIGHMEM);
1104 ret = __zram_bvec_read(zram, page, index, bio, true);
1108 src = kmap_atomic(bvec->bv_page);
1109 dst = kmap_atomic(page);
1110 memcpy(dst + offset, src + bvec->bv_offset, bvec->bv_len);
1115 vec.bv_len = PAGE_SIZE;
1119 ret = __zram_bvec_write(zram, &vec, index, bio);
1121 if (is_partial_io(bvec))
1127 * zram_bio_discard - handler on discard request
1128 * @index: physical block index in PAGE_SIZE units
1129 * @offset: byte offset within physical block
1131 static void zram_bio_discard(struct zram *zram, u32 index,
1132 int offset, struct bio *bio)
1134 size_t n = bio->bi_iter.bi_size;
1137 * zram manages data in physical block size units. Because logical block
1138 * size isn't identical with physical block size on some arch, we
1139 * could get a discard request pointing to a specific offset within a
1140 * certain physical block. Although we can handle this request by
1141 * reading that physiclal block and decompressing and partially zeroing
1142 * and re-compressing and then re-storing it, this isn't reasonable
1143 * because our intent with a discard request is to save memory. So
1144 * skipping this logical block is appropriate here.
1147 if (n <= (PAGE_SIZE - offset))
1150 n -= (PAGE_SIZE - offset);
1154 while (n >= PAGE_SIZE) {
1155 zram_slot_lock(zram, index);
1156 zram_free_page(zram, index);
1157 zram_slot_unlock(zram, index);
1158 atomic64_inc(&zram->stats.notify_free);
1165 * Returns errno if it has some problem. Otherwise return 0 or 1.
1166 * Returns 0 if IO request was done synchronously
1167 * Returns 1 if IO request was successfully submitted.
1169 static int zram_bvec_rw(struct zram *zram, struct bio_vec *bvec, u32 index,
1170 int offset, bool is_write, struct bio *bio)
1172 unsigned long start_time = jiffies;
1173 int rw_acct = is_write ? REQ_OP_WRITE : REQ_OP_READ;
1174 struct request_queue *q = zram->disk->queue;
1177 generic_start_io_acct(q, rw_acct, bvec->bv_len >> SECTOR_SHIFT,
1178 &zram->disk->part0);
1181 atomic64_inc(&zram->stats.num_reads);
1182 ret = zram_bvec_read(zram, bvec, index, offset, bio);
1183 flush_dcache_page(bvec->bv_page);
1185 atomic64_inc(&zram->stats.num_writes);
1186 ret = zram_bvec_write(zram, bvec, index, offset, bio);
1189 generic_end_io_acct(q, rw_acct, &zram->disk->part0, start_time);
1191 if (unlikely(ret < 0)) {
1193 atomic64_inc(&zram->stats.failed_reads);
1195 atomic64_inc(&zram->stats.failed_writes);
1201 static void __zram_make_request(struct zram *zram, struct bio *bio)
1205 struct bio_vec bvec;
1206 struct bvec_iter iter;
1208 index = bio->bi_iter.bi_sector >> SECTORS_PER_PAGE_SHIFT;
1209 offset = (bio->bi_iter.bi_sector &
1210 (SECTORS_PER_PAGE - 1)) << SECTOR_SHIFT;
1212 switch (bio_op(bio)) {
1213 case REQ_OP_DISCARD:
1214 case REQ_OP_WRITE_ZEROES:
1215 zram_bio_discard(zram, index, offset, bio);
1222 bio_for_each_segment(bvec, bio, iter) {
1223 struct bio_vec bv = bvec;
1224 unsigned int unwritten = bvec.bv_len;
1227 bv.bv_len = min_t(unsigned int, PAGE_SIZE - offset,
1229 if (zram_bvec_rw(zram, &bv, index, offset,
1230 op_is_write(bio_op(bio)), bio) < 0)
1233 bv.bv_offset += bv.bv_len;
1234 unwritten -= bv.bv_len;
1236 update_position(&index, &offset, &bv);
1237 } while (unwritten);
1248 * Handler function for all zram I/O requests.
1250 static blk_qc_t zram_make_request(struct request_queue *queue, struct bio *bio)
1252 struct zram *zram = queue->queuedata;
1254 if (!valid_io_request(zram, bio->bi_iter.bi_sector,
1255 bio->bi_iter.bi_size)) {
1256 atomic64_inc(&zram->stats.invalid_io);
1260 __zram_make_request(zram, bio);
1261 return BLK_QC_T_NONE;
1265 return BLK_QC_T_NONE;
1268 static void zram_slot_free_notify(struct block_device *bdev,
1269 unsigned long index)
1273 zram = bdev->bd_disk->private_data;
1275 zram_slot_lock(zram, index);
1276 zram_free_page(zram, index);
1277 zram_slot_unlock(zram, index);
1278 atomic64_inc(&zram->stats.notify_free);
1281 static int zram_rw_page(struct block_device *bdev, sector_t sector,
1282 struct page *page, bool is_write)
1289 if (PageTransHuge(page))
1291 zram = bdev->bd_disk->private_data;
1293 if (!valid_io_request(zram, sector, PAGE_SIZE)) {
1294 atomic64_inc(&zram->stats.invalid_io);
1299 index = sector >> SECTORS_PER_PAGE_SHIFT;
1300 offset = (sector & (SECTORS_PER_PAGE - 1)) << SECTOR_SHIFT;
1303 bv.bv_len = PAGE_SIZE;
1306 ret = zram_bvec_rw(zram, &bv, index, offset, is_write, NULL);
1309 * If I/O fails, just return error(ie, non-zero) without
1310 * calling page_endio.
1311 * It causes resubmit the I/O with bio request by upper functions
1312 * of rw_page(e.g., swap_readpage, __swap_writepage) and
1313 * bio->bi_end_io does things to handle the error
1314 * (e.g., SetPageError, set_page_dirty and extra works).
1316 if (unlikely(ret < 0))
1321 page_endio(page, is_write, 0);
1332 static void zram_reset_device(struct zram *zram)
1337 down_write(&zram->init_lock);
1339 zram->limit_pages = 0;
1341 if (!init_done(zram)) {
1342 up_write(&zram->init_lock);
1347 disksize = zram->disksize;
1350 set_capacity(zram->disk, 0);
1351 part_stat_set_all(&zram->disk->part0, 0);
1353 up_write(&zram->init_lock);
1354 /* I/O operation under all of CPU are done so let's free */
1355 zram_meta_free(zram, disksize);
1356 memset(&zram->stats, 0, sizeof(zram->stats));
1357 zcomp_destroy(comp);
1361 static ssize_t disksize_store(struct device *dev,
1362 struct device_attribute *attr, const char *buf, size_t len)
1366 struct zram *zram = dev_to_zram(dev);
1369 disksize = memparse(buf, NULL);
1373 down_write(&zram->init_lock);
1374 if (init_done(zram)) {
1375 pr_info("Cannot change disksize for initialized device\n");
1380 disksize = PAGE_ALIGN(disksize);
1381 if (!zram_meta_alloc(zram, disksize)) {
1386 comp = zcomp_create(zram->compressor);
1388 pr_err("Cannot initialise %s compressing backend\n",
1390 err = PTR_ERR(comp);
1395 zram->disksize = disksize;
1396 set_capacity(zram->disk, zram->disksize >> SECTOR_SHIFT);
1397 zram_revalidate_disk(zram);
1398 up_write(&zram->init_lock);
1403 zram_meta_free(zram, disksize);
1405 up_write(&zram->init_lock);
1409 static ssize_t reset_store(struct device *dev,
1410 struct device_attribute *attr, const char *buf, size_t len)
1413 unsigned short do_reset;
1415 struct block_device *bdev;
1417 ret = kstrtou16(buf, 10, &do_reset);
1424 zram = dev_to_zram(dev);
1425 bdev = bdget_disk(zram->disk, 0);
1429 mutex_lock(&bdev->bd_mutex);
1430 /* Do not reset an active device or claimed device */
1431 if (bdev->bd_openers || zram->claim) {
1432 mutex_unlock(&bdev->bd_mutex);
1437 /* From now on, anyone can't open /dev/zram[0-9] */
1439 mutex_unlock(&bdev->bd_mutex);
1441 /* Make sure all the pending I/O are finished */
1443 zram_reset_device(zram);
1444 zram_revalidate_disk(zram);
1447 mutex_lock(&bdev->bd_mutex);
1448 zram->claim = false;
1449 mutex_unlock(&bdev->bd_mutex);
1454 static int zram_open(struct block_device *bdev, fmode_t mode)
1459 WARN_ON(!mutex_is_locked(&bdev->bd_mutex));
1461 zram = bdev->bd_disk->private_data;
1462 /* zram was claimed to reset so open request fails */
1469 static const struct block_device_operations zram_devops = {
1471 .swap_slot_free_notify = zram_slot_free_notify,
1472 .rw_page = zram_rw_page,
1473 .owner = THIS_MODULE
1476 static DEVICE_ATTR_WO(compact);
1477 static DEVICE_ATTR_RW(disksize);
1478 static DEVICE_ATTR_RO(initstate);
1479 static DEVICE_ATTR_WO(reset);
1480 static DEVICE_ATTR_WO(mem_limit);
1481 static DEVICE_ATTR_WO(mem_used_max);
1482 static DEVICE_ATTR_RW(max_comp_streams);
1483 static DEVICE_ATTR_RW(comp_algorithm);
1484 #ifdef CONFIG_ZRAM_WRITEBACK
1485 static DEVICE_ATTR_RW(backing_dev);
1488 static struct attribute *zram_disk_attrs[] = {
1489 &dev_attr_disksize.attr,
1490 &dev_attr_initstate.attr,
1491 &dev_attr_reset.attr,
1492 &dev_attr_compact.attr,
1493 &dev_attr_mem_limit.attr,
1494 &dev_attr_mem_used_max.attr,
1495 &dev_attr_max_comp_streams.attr,
1496 &dev_attr_comp_algorithm.attr,
1497 #ifdef CONFIG_ZRAM_WRITEBACK
1498 &dev_attr_backing_dev.attr,
1500 &dev_attr_io_stat.attr,
1501 &dev_attr_mm_stat.attr,
1502 &dev_attr_debug_stat.attr,
1506 static const struct attribute_group zram_disk_attr_group = {
1507 .attrs = zram_disk_attrs,
1511 * Allocate and initialize new zram device. the function returns
1512 * '>= 0' device_id upon success, and negative value otherwise.
1514 static int zram_add(void)
1517 struct request_queue *queue;
1520 zram = kzalloc(sizeof(struct zram), GFP_KERNEL);
1524 ret = idr_alloc(&zram_index_idr, zram, 0, 0, GFP_KERNEL);
1529 init_rwsem(&zram->init_lock);
1531 queue = blk_alloc_queue(GFP_KERNEL);
1533 pr_err("Error allocating disk queue for device %d\n",
1539 blk_queue_make_request(queue, zram_make_request);
1541 /* gendisk structure */
1542 zram->disk = alloc_disk(1);
1544 pr_err("Error allocating disk structure for device %d\n",
1547 goto out_free_queue;
1550 zram->disk->major = zram_major;
1551 zram->disk->first_minor = device_id;
1552 zram->disk->fops = &zram_devops;
1553 zram->disk->queue = queue;
1554 zram->disk->queue->queuedata = zram;
1555 zram->disk->private_data = zram;
1556 snprintf(zram->disk->disk_name, 16, "zram%d", device_id);
1558 /* Actual capacity set using syfs (/sys/block/zram<id>/disksize */
1559 set_capacity(zram->disk, 0);
1560 /* zram devices sort of resembles non-rotational disks */
1561 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, zram->disk->queue);
1562 queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, zram->disk->queue);
1564 * To ensure that we always get PAGE_SIZE aligned
1565 * and n*PAGE_SIZED sized I/O requests.
1567 blk_queue_physical_block_size(zram->disk->queue, PAGE_SIZE);
1568 blk_queue_logical_block_size(zram->disk->queue,
1569 ZRAM_LOGICAL_BLOCK_SIZE);
1570 blk_queue_io_min(zram->disk->queue, PAGE_SIZE);
1571 blk_queue_io_opt(zram->disk->queue, PAGE_SIZE);
1572 zram->disk->queue->limits.discard_granularity = PAGE_SIZE;
1573 blk_queue_max_discard_sectors(zram->disk->queue, UINT_MAX);
1574 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, zram->disk->queue);
1577 * zram_bio_discard() will clear all logical blocks if logical block
1578 * size is identical with physical block size(PAGE_SIZE). But if it is
1579 * different, we will skip discarding some parts of logical blocks in
1580 * the part of the request range which isn't aligned to physical block
1581 * size. So we can't ensure that all discarded logical blocks are
1584 if (ZRAM_LOGICAL_BLOCK_SIZE == PAGE_SIZE)
1585 blk_queue_max_write_zeroes_sectors(zram->disk->queue, UINT_MAX);
1587 add_disk(zram->disk);
1589 ret = sysfs_create_group(&disk_to_dev(zram->disk)->kobj,
1590 &zram_disk_attr_group);
1592 pr_err("Error creating sysfs group for device %d\n",
1596 strlcpy(zram->compressor, default_compressor, sizeof(zram->compressor));
1598 pr_info("Added device: %s\n", zram->disk->disk_name);
1602 del_gendisk(zram->disk);
1603 put_disk(zram->disk);
1605 blk_cleanup_queue(queue);
1607 idr_remove(&zram_index_idr, device_id);
1613 static int zram_remove(struct zram *zram)
1615 struct block_device *bdev;
1617 bdev = bdget_disk(zram->disk, 0);
1621 mutex_lock(&bdev->bd_mutex);
1622 if (bdev->bd_openers || zram->claim) {
1623 mutex_unlock(&bdev->bd_mutex);
1629 mutex_unlock(&bdev->bd_mutex);
1632 * Remove sysfs first, so no one will perform a disksize
1633 * store while we destroy the devices. This also helps during
1634 * hot_remove -- zram_reset_device() is the last holder of
1635 * ->init_lock, no later/concurrent disksize_store() or any
1636 * other sysfs handlers are possible.
1638 sysfs_remove_group(&disk_to_dev(zram->disk)->kobj,
1639 &zram_disk_attr_group);
1641 /* Make sure all the pending I/O are finished */
1643 zram_reset_device(zram);
1646 pr_info("Removed device: %s\n", zram->disk->disk_name);
1648 blk_cleanup_queue(zram->disk->queue);
1649 del_gendisk(zram->disk);
1650 put_disk(zram->disk);
1655 /* zram-control sysfs attributes */
1658 * NOTE: hot_add attribute is not the usual read-only sysfs attribute. In a
1659 * sense that reading from this file does alter the state of your system -- it
1660 * creates a new un-initialized zram device and returns back this device's
1661 * device_id (or an error code if it fails to create a new device).
1663 static ssize_t hot_add_show(struct class *class,
1664 struct class_attribute *attr,
1669 mutex_lock(&zram_index_mutex);
1671 mutex_unlock(&zram_index_mutex);
1675 return scnprintf(buf, PAGE_SIZE, "%d\n", ret);
1677 static CLASS_ATTR_RO(hot_add);
1679 static ssize_t hot_remove_store(struct class *class,
1680 struct class_attribute *attr,
1687 /* dev_id is gendisk->first_minor, which is `int' */
1688 ret = kstrtoint(buf, 10, &dev_id);
1694 mutex_lock(&zram_index_mutex);
1696 zram = idr_find(&zram_index_idr, dev_id);
1698 ret = zram_remove(zram);
1700 idr_remove(&zram_index_idr, dev_id);
1705 mutex_unlock(&zram_index_mutex);
1706 return ret ? ret : count;
1708 static CLASS_ATTR_WO(hot_remove);
1710 static struct attribute *zram_control_class_attrs[] = {
1711 &class_attr_hot_add.attr,
1712 &class_attr_hot_remove.attr,
1715 ATTRIBUTE_GROUPS(zram_control_class);
1717 static struct class zram_control_class = {
1718 .name = "zram-control",
1719 .owner = THIS_MODULE,
1720 .class_groups = zram_control_class_groups,
1723 static int zram_remove_cb(int id, void *ptr, void *data)
1729 static void destroy_devices(void)
1731 class_unregister(&zram_control_class);
1732 idr_for_each(&zram_index_idr, &zram_remove_cb, NULL);
1733 idr_destroy(&zram_index_idr);
1734 unregister_blkdev(zram_major, "zram");
1735 cpuhp_remove_multi_state(CPUHP_ZCOMP_PREPARE);
1738 static int __init zram_init(void)
1742 ret = cpuhp_setup_state_multi(CPUHP_ZCOMP_PREPARE, "block/zram:prepare",
1743 zcomp_cpu_up_prepare, zcomp_cpu_dead);
1747 ret = class_register(&zram_control_class);
1749 pr_err("Unable to register zram-control class\n");
1750 cpuhp_remove_multi_state(CPUHP_ZCOMP_PREPARE);
1754 zram_major = register_blkdev(0, "zram");
1755 if (zram_major <= 0) {
1756 pr_err("Unable to get major number\n");
1757 class_unregister(&zram_control_class);
1758 cpuhp_remove_multi_state(CPUHP_ZCOMP_PREPARE);
1762 while (num_devices != 0) {
1763 mutex_lock(&zram_index_mutex);
1765 mutex_unlock(&zram_index_mutex);
1778 static void __exit zram_exit(void)
1783 module_init(zram_init);
1784 module_exit(zram_exit);
1786 module_param(num_devices, uint, 0);
1787 MODULE_PARM_DESC(num_devices, "Number of pre-created zram devices");
1789 MODULE_LICENSE("Dual BSD/GPL");
1790 MODULE_AUTHOR("Nitin Gupta <ngupta@vflare.org>");
1791 MODULE_DESCRIPTION("Compressed RAM Block Device");