2 * Compressed RAM block device
4 * Copyright (C) 2008, 2009, 2010 Nitin Gupta
5 * 2012, 2013 Minchan Kim
7 * This code is released using a dual license strategy: BSD/GPL
8 * You can choose the licence that better fits your requirements.
10 * Released under the terms of 3-clause BSD License
11 * Released under the terms of GNU General Public License Version 2.0
15 #define KMSG_COMPONENT "zram"
16 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
18 #include <linux/module.h>
19 #include <linux/kernel.h>
20 #include <linux/bio.h>
21 #include <linux/bitops.h>
22 #include <linux/blkdev.h>
23 #include <linux/buffer_head.h>
24 #include <linux/device.h>
25 #include <linux/genhd.h>
26 #include <linux/highmem.h>
27 #include <linux/slab.h>
28 #include <linux/backing-dev.h>
29 #include <linux/string.h>
30 #include <linux/vmalloc.h>
31 #include <linux/err.h>
32 #include <linux/idr.h>
33 #include <linux/sysfs.h>
34 #include <linux/cpuhotplug.h>
38 static DEFINE_IDR(zram_index_idr);
39 /* idr index must be protected */
40 static DEFINE_MUTEX(zram_index_mutex);
42 static int zram_major;
43 static const char *default_compressor = "lzo";
45 /* Module params (documentation at end) */
46 static unsigned int num_devices = 1;
48 static void zram_free_page(struct zram *zram, size_t index);
50 static inline bool init_done(struct zram *zram)
52 return zram->disksize;
55 static inline struct zram *dev_to_zram(struct device *dev)
57 return (struct zram *)dev_to_disk(dev)->private_data;
60 /* flag operations require table entry bit_spin_lock() being held */
61 static int zram_test_flag(struct zram_meta *meta, u32 index,
62 enum zram_pageflags flag)
64 return meta->table[index].value & BIT(flag);
67 static void zram_set_flag(struct zram_meta *meta, u32 index,
68 enum zram_pageflags flag)
70 meta->table[index].value |= BIT(flag);
73 static void zram_clear_flag(struct zram_meta *meta, u32 index,
74 enum zram_pageflags flag)
76 meta->table[index].value &= ~BIT(flag);
79 static inline void zram_set_element(struct zram_meta *meta, u32 index,
80 unsigned long element)
82 meta->table[index].element = element;
85 static inline void zram_clear_element(struct zram_meta *meta, u32 index)
87 meta->table[index].element = 0;
90 static size_t zram_get_obj_size(struct zram_meta *meta, u32 index)
92 return meta->table[index].value & (BIT(ZRAM_FLAG_SHIFT) - 1);
95 static void zram_set_obj_size(struct zram_meta *meta,
96 u32 index, size_t size)
98 unsigned long flags = meta->table[index].value >> ZRAM_FLAG_SHIFT;
100 meta->table[index].value = (flags << ZRAM_FLAG_SHIFT) | size;
103 #if PAGE_SIZE != 4096
104 static inline bool is_partial_io(struct bio_vec *bvec)
106 return bvec->bv_len != PAGE_SIZE;
109 static inline bool is_partial_io(struct bio_vec *bvec)
115 static void zram_revalidate_disk(struct zram *zram)
117 revalidate_disk(zram->disk);
118 /* revalidate_disk reset the BDI_CAP_STABLE_WRITES so set again */
119 zram->disk->queue->backing_dev_info->capabilities |=
120 BDI_CAP_STABLE_WRITES;
124 * Check if request is within bounds and aligned on zram logical blocks.
126 static inline bool valid_io_request(struct zram *zram,
127 sector_t start, unsigned int size)
131 /* unaligned request */
132 if (unlikely(start & (ZRAM_SECTOR_PER_LOGICAL_BLOCK - 1)))
134 if (unlikely(size & (ZRAM_LOGICAL_BLOCK_SIZE - 1)))
137 end = start + (size >> SECTOR_SHIFT);
138 bound = zram->disksize >> SECTOR_SHIFT;
139 /* out of range range */
140 if (unlikely(start >= bound || end > bound || start > end))
143 /* I/O request is valid */
147 static void update_position(u32 *index, int *offset, struct bio_vec *bvec)
149 *index += (*offset + bvec->bv_len) / PAGE_SIZE;
150 *offset = (*offset + bvec->bv_len) % PAGE_SIZE;
153 static inline void update_used_max(struct zram *zram,
154 const unsigned long pages)
156 unsigned long old_max, cur_max;
158 old_max = atomic_long_read(&zram->stats.max_used_pages);
163 old_max = atomic_long_cmpxchg(
164 &zram->stats.max_used_pages, cur_max, pages);
165 } while (old_max != cur_max);
168 static inline void zram_fill_page(char *ptr, unsigned long len,
172 unsigned long *page = (unsigned long *)ptr;
174 WARN_ON_ONCE(!IS_ALIGNED(len, sizeof(unsigned long)));
176 if (likely(value == 0)) {
179 for (i = 0; i < len / sizeof(*page); i++)
184 static bool page_same_filled(void *ptr, unsigned long *element)
189 page = (unsigned long *)ptr;
191 for (pos = 0; pos < PAGE_SIZE / sizeof(*page) - 1; pos++) {
192 if (page[pos] != page[pos + 1])
196 *element = page[pos];
201 static ssize_t initstate_show(struct device *dev,
202 struct device_attribute *attr, char *buf)
205 struct zram *zram = dev_to_zram(dev);
207 down_read(&zram->init_lock);
208 val = init_done(zram);
209 up_read(&zram->init_lock);
211 return scnprintf(buf, PAGE_SIZE, "%u\n", val);
214 static ssize_t disksize_show(struct device *dev,
215 struct device_attribute *attr, char *buf)
217 struct zram *zram = dev_to_zram(dev);
219 return scnprintf(buf, PAGE_SIZE, "%llu\n", zram->disksize);
222 static ssize_t mem_limit_store(struct device *dev,
223 struct device_attribute *attr, const char *buf, size_t len)
227 struct zram *zram = dev_to_zram(dev);
229 limit = memparse(buf, &tmp);
230 if (buf == tmp) /* no chars parsed, invalid input */
233 down_write(&zram->init_lock);
234 zram->limit_pages = PAGE_ALIGN(limit) >> PAGE_SHIFT;
235 up_write(&zram->init_lock);
240 static ssize_t mem_used_max_store(struct device *dev,
241 struct device_attribute *attr, const char *buf, size_t len)
245 struct zram *zram = dev_to_zram(dev);
247 err = kstrtoul(buf, 10, &val);
251 down_read(&zram->init_lock);
252 if (init_done(zram)) {
253 struct zram_meta *meta = zram->meta;
254 atomic_long_set(&zram->stats.max_used_pages,
255 zs_get_total_pages(meta->mem_pool));
257 up_read(&zram->init_lock);
263 * We switched to per-cpu streams and this attr is not needed anymore.
264 * However, we will keep it around for some time, because:
265 * a) we may revert per-cpu streams in the future
266 * b) it's visible to user space and we need to follow our 2 years
267 * retirement rule; but we already have a number of 'soon to be
268 * altered' attrs, so max_comp_streams need to wait for the next
271 static ssize_t max_comp_streams_show(struct device *dev,
272 struct device_attribute *attr, char *buf)
274 return scnprintf(buf, PAGE_SIZE, "%d\n", num_online_cpus());
277 static ssize_t max_comp_streams_store(struct device *dev,
278 struct device_attribute *attr, const char *buf, size_t len)
283 static ssize_t comp_algorithm_show(struct device *dev,
284 struct device_attribute *attr, char *buf)
287 struct zram *zram = dev_to_zram(dev);
289 down_read(&zram->init_lock);
290 sz = zcomp_available_show(zram->compressor, buf);
291 up_read(&zram->init_lock);
296 static ssize_t comp_algorithm_store(struct device *dev,
297 struct device_attribute *attr, const char *buf, size_t len)
299 struct zram *zram = dev_to_zram(dev);
300 char compressor[CRYPTO_MAX_ALG_NAME];
303 strlcpy(compressor, buf, sizeof(compressor));
304 /* ignore trailing newline */
305 sz = strlen(compressor);
306 if (sz > 0 && compressor[sz - 1] == '\n')
307 compressor[sz - 1] = 0x00;
309 if (!zcomp_available_algorithm(compressor))
312 down_write(&zram->init_lock);
313 if (init_done(zram)) {
314 up_write(&zram->init_lock);
315 pr_info("Can't change algorithm for initialized device\n");
319 strlcpy(zram->compressor, compressor, sizeof(compressor));
320 up_write(&zram->init_lock);
324 static ssize_t compact_store(struct device *dev,
325 struct device_attribute *attr, const char *buf, size_t len)
327 struct zram *zram = dev_to_zram(dev);
328 struct zram_meta *meta;
330 down_read(&zram->init_lock);
331 if (!init_done(zram)) {
332 up_read(&zram->init_lock);
337 zs_compact(meta->mem_pool);
338 up_read(&zram->init_lock);
343 static ssize_t io_stat_show(struct device *dev,
344 struct device_attribute *attr, char *buf)
346 struct zram *zram = dev_to_zram(dev);
349 down_read(&zram->init_lock);
350 ret = scnprintf(buf, PAGE_SIZE,
351 "%8llu %8llu %8llu %8llu\n",
352 (u64)atomic64_read(&zram->stats.failed_reads),
353 (u64)atomic64_read(&zram->stats.failed_writes),
354 (u64)atomic64_read(&zram->stats.invalid_io),
355 (u64)atomic64_read(&zram->stats.notify_free));
356 up_read(&zram->init_lock);
361 static ssize_t mm_stat_show(struct device *dev,
362 struct device_attribute *attr, char *buf)
364 struct zram *zram = dev_to_zram(dev);
365 struct zs_pool_stats pool_stats;
366 u64 orig_size, mem_used = 0;
370 memset(&pool_stats, 0x00, sizeof(struct zs_pool_stats));
372 down_read(&zram->init_lock);
373 if (init_done(zram)) {
374 mem_used = zs_get_total_pages(zram->meta->mem_pool);
375 zs_pool_stats(zram->meta->mem_pool, &pool_stats);
378 orig_size = atomic64_read(&zram->stats.pages_stored);
379 max_used = atomic_long_read(&zram->stats.max_used_pages);
381 ret = scnprintf(buf, PAGE_SIZE,
382 "%8llu %8llu %8llu %8lu %8ld %8llu %8lu\n",
383 orig_size << PAGE_SHIFT,
384 (u64)atomic64_read(&zram->stats.compr_data_size),
385 mem_used << PAGE_SHIFT,
386 zram->limit_pages << PAGE_SHIFT,
387 max_used << PAGE_SHIFT,
388 (u64)atomic64_read(&zram->stats.same_pages),
389 pool_stats.pages_compacted);
390 up_read(&zram->init_lock);
395 static ssize_t debug_stat_show(struct device *dev,
396 struct device_attribute *attr, char *buf)
399 struct zram *zram = dev_to_zram(dev);
402 down_read(&zram->init_lock);
403 ret = scnprintf(buf, PAGE_SIZE,
404 "version: %d\n%8llu\n",
406 (u64)atomic64_read(&zram->stats.writestall));
407 up_read(&zram->init_lock);
412 static DEVICE_ATTR_RO(io_stat);
413 static DEVICE_ATTR_RO(mm_stat);
414 static DEVICE_ATTR_RO(debug_stat);
416 static bool zram_same_page_read(struct zram *zram, u32 index,
418 unsigned int offset, unsigned int len)
420 struct zram_meta *meta = zram->meta;
422 bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value);
423 if (unlikely(!meta->table[index].handle) ||
424 zram_test_flag(meta, index, ZRAM_SAME)) {
427 bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
428 mem = kmap_atomic(page);
429 zram_fill_page(mem + offset, len, meta->table[index].element);
433 bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
438 static bool zram_same_page_write(struct zram *zram, u32 index,
441 unsigned long element;
442 void *mem = kmap_atomic(page);
444 if (page_same_filled(mem, &element)) {
445 struct zram_meta *meta = zram->meta;
448 /* Free memory associated with this sector now. */
449 bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value);
450 zram_free_page(zram, index);
451 zram_set_flag(meta, index, ZRAM_SAME);
452 zram_set_element(meta, index, element);
453 bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
455 atomic64_inc(&zram->stats.same_pages);
463 static void zram_meta_free(struct zram_meta *meta, u64 disksize)
465 size_t num_pages = disksize >> PAGE_SHIFT;
468 /* Free all pages that are still in this zram device */
469 for (index = 0; index < num_pages; index++) {
470 unsigned long handle = meta->table[index].handle;
472 * No memory is allocated for same element filled pages.
473 * Simply clear same page flag.
475 if (!handle || zram_test_flag(meta, index, ZRAM_SAME))
478 zs_free(meta->mem_pool, handle);
481 zs_destroy_pool(meta->mem_pool);
486 static struct zram_meta *zram_meta_alloc(char *pool_name, u64 disksize)
489 struct zram_meta *meta = kmalloc(sizeof(*meta), GFP_KERNEL);
494 num_pages = disksize >> PAGE_SHIFT;
495 meta->table = vzalloc(num_pages * sizeof(*meta->table));
497 pr_err("Error allocating zram address table\n");
501 meta->mem_pool = zs_create_pool(pool_name);
502 if (!meta->mem_pool) {
503 pr_err("Error creating memory pool\n");
516 * To protect concurrent access to the same index entry,
517 * caller should hold this table index entry's bit_spinlock to
518 * indicate this index entry is accessing.
520 static void zram_free_page(struct zram *zram, size_t index)
522 struct zram_meta *meta = zram->meta;
523 unsigned long handle = meta->table[index].handle;
526 * No memory is allocated for same element filled pages.
527 * Simply clear same page flag.
529 if (zram_test_flag(meta, index, ZRAM_SAME)) {
530 zram_clear_flag(meta, index, ZRAM_SAME);
531 zram_clear_element(meta, index);
532 atomic64_dec(&zram->stats.same_pages);
539 zs_free(meta->mem_pool, handle);
541 atomic64_sub(zram_get_obj_size(meta, index),
542 &zram->stats.compr_data_size);
543 atomic64_dec(&zram->stats.pages_stored);
545 meta->table[index].handle = 0;
546 zram_set_obj_size(meta, index, 0);
549 static int zram_decompress_page(struct zram *zram, struct page *page, u32 index)
552 unsigned long handle;
555 struct zram_meta *meta = zram->meta;
557 if (zram_same_page_read(zram, index, page, 0, PAGE_SIZE))
560 bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value);
561 handle = meta->table[index].handle;
562 size = zram_get_obj_size(meta, index);
564 src = zs_map_object(meta->mem_pool, handle, ZS_MM_RO);
565 if (size == PAGE_SIZE) {
566 dst = kmap_atomic(page);
567 memcpy(dst, src, PAGE_SIZE);
571 struct zcomp_strm *zstrm = zcomp_stream_get(zram->comp);
573 dst = kmap_atomic(page);
574 ret = zcomp_decompress(zstrm, src, size, dst);
576 zcomp_stream_put(zram->comp);
578 zs_unmap_object(meta->mem_pool, handle);
579 bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
581 /* Should NEVER happen. Return bio error if it does. */
583 pr_err("Decompression failed! err=%d, page=%u\n", ret, index);
588 static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec,
589 u32 index, int offset)
594 page = bvec->bv_page;
595 if (is_partial_io(bvec)) {
596 /* Use a temporary buffer to decompress the page */
597 page = alloc_page(GFP_NOIO|__GFP_HIGHMEM);
602 ret = zram_decompress_page(zram, page, index);
606 if (is_partial_io(bvec)) {
607 void *dst = kmap_atomic(bvec->bv_page);
608 void *src = kmap_atomic(page);
610 memcpy(dst + bvec->bv_offset, src + offset, bvec->bv_len);
615 if (is_partial_io(bvec))
621 static int zram_compress(struct zram *zram, struct zcomp_strm **zstrm,
623 unsigned long *out_handle, unsigned int *out_comp_len)
626 unsigned int comp_len;
628 unsigned long alloced_pages;
629 unsigned long handle = 0;
630 struct zram_meta *meta = zram->meta;
633 src = kmap_atomic(page);
634 ret = zcomp_compress(*zstrm, src, &comp_len);
638 pr_err("Compression failed! err=%d\n", ret);
640 zs_free(meta->mem_pool, handle);
644 if (unlikely(comp_len > max_zpage_size))
645 comp_len = PAGE_SIZE;
648 * handle allocation has 2 paths:
649 * a) fast path is executed with preemption disabled (for
650 * per-cpu streams) and has __GFP_DIRECT_RECLAIM bit clear,
651 * since we can't sleep;
652 * b) slow path enables preemption and attempts to allocate
653 * the page with __GFP_DIRECT_RECLAIM bit set. we have to
654 * put per-cpu compression stream and, thus, to re-do
655 * the compression once handle is allocated.
657 * if we have a 'non-null' handle here then we are coming
658 * from the slow path and handle has already been allocated.
661 handle = zs_malloc(meta->mem_pool, comp_len,
662 __GFP_KSWAPD_RECLAIM |
667 zcomp_stream_put(zram->comp);
668 atomic64_inc(&zram->stats.writestall);
669 handle = zs_malloc(meta->mem_pool, comp_len,
670 GFP_NOIO | __GFP_HIGHMEM |
672 *zstrm = zcomp_stream_get(zram->comp);
678 alloced_pages = zs_get_total_pages(meta->mem_pool);
679 update_used_max(zram, alloced_pages);
681 if (zram->limit_pages && alloced_pages > zram->limit_pages) {
682 zs_free(meta->mem_pool, handle);
686 *out_handle = handle;
687 *out_comp_len = comp_len;
691 static int __zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index)
694 unsigned long handle;
695 unsigned int comp_len;
697 struct zcomp_strm *zstrm;
698 struct zram_meta *meta = zram->meta;
699 struct page *page = bvec->bv_page;
701 if (zram_same_page_write(zram, index, page))
704 zstrm = zcomp_stream_get(zram->comp);
705 ret = zram_compress(zram, &zstrm, page, &handle, &comp_len);
707 zcomp_stream_put(zram->comp);
712 dst = zs_map_object(meta->mem_pool, handle, ZS_MM_WO);
715 if (comp_len == PAGE_SIZE)
716 src = kmap_atomic(page);
717 memcpy(dst, src, comp_len);
718 if (comp_len == PAGE_SIZE)
721 zcomp_stream_put(zram->comp);
722 zs_unmap_object(meta->mem_pool, handle);
725 * Free memory associated with this sector
726 * before overwriting unused sectors.
728 bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value);
729 zram_free_page(zram, index);
730 meta->table[index].handle = handle;
731 zram_set_obj_size(meta, index, comp_len);
732 bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
735 atomic64_add(comp_len, &zram->stats.compr_data_size);
736 atomic64_inc(&zram->stats.pages_stored);
740 static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec,
741 u32 index, int offset)
744 struct page *page = NULL;
749 if (is_partial_io(bvec)) {
752 * This is a partial IO. We need to read the full page
753 * before to write the changes.
755 page = alloc_page(GFP_NOIO|__GFP_HIGHMEM);
759 ret = zram_decompress_page(zram, page, index);
763 src = kmap_atomic(bvec->bv_page);
764 dst = kmap_atomic(page);
765 memcpy(dst + offset, src + bvec->bv_offset, bvec->bv_len);
770 vec.bv_len = PAGE_SIZE;
774 ret = __zram_bvec_write(zram, &vec, index);
776 if (is_partial_io(bvec))
782 * zram_bio_discard - handler on discard request
783 * @index: physical block index in PAGE_SIZE units
784 * @offset: byte offset within physical block
786 static void zram_bio_discard(struct zram *zram, u32 index,
787 int offset, struct bio *bio)
789 size_t n = bio->bi_iter.bi_size;
790 struct zram_meta *meta = zram->meta;
793 * zram manages data in physical block size units. Because logical block
794 * size isn't identical with physical block size on some arch, we
795 * could get a discard request pointing to a specific offset within a
796 * certain physical block. Although we can handle this request by
797 * reading that physiclal block and decompressing and partially zeroing
798 * and re-compressing and then re-storing it, this isn't reasonable
799 * because our intent with a discard request is to save memory. So
800 * skipping this logical block is appropriate here.
803 if (n <= (PAGE_SIZE - offset))
806 n -= (PAGE_SIZE - offset);
810 while (n >= PAGE_SIZE) {
811 bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value);
812 zram_free_page(zram, index);
813 bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
814 atomic64_inc(&zram->stats.notify_free);
820 static int zram_bvec_rw(struct zram *zram, struct bio_vec *bvec, u32 index,
821 int offset, bool is_write)
823 unsigned long start_time = jiffies;
824 int rw_acct = is_write ? REQ_OP_WRITE : REQ_OP_READ;
827 generic_start_io_acct(rw_acct, bvec->bv_len >> SECTOR_SHIFT,
831 atomic64_inc(&zram->stats.num_reads);
832 ret = zram_bvec_read(zram, bvec, index, offset);
833 flush_dcache_page(bvec->bv_page);
835 atomic64_inc(&zram->stats.num_writes);
836 ret = zram_bvec_write(zram, bvec, index, offset);
839 generic_end_io_acct(rw_acct, &zram->disk->part0, start_time);
843 atomic64_inc(&zram->stats.failed_reads);
845 atomic64_inc(&zram->stats.failed_writes);
851 static void __zram_make_request(struct zram *zram, struct bio *bio)
856 struct bvec_iter iter;
858 index = bio->bi_iter.bi_sector >> SECTORS_PER_PAGE_SHIFT;
859 offset = (bio->bi_iter.bi_sector &
860 (SECTORS_PER_PAGE - 1)) << SECTOR_SHIFT;
862 switch (bio_op(bio)) {
864 case REQ_OP_WRITE_ZEROES:
865 zram_bio_discard(zram, index, offset, bio);
872 bio_for_each_segment(bvec, bio, iter) {
873 struct bio_vec bv = bvec;
874 unsigned int unwritten = bvec.bv_len;
877 bv.bv_len = min_t(unsigned int, PAGE_SIZE - offset,
879 if (zram_bvec_rw(zram, &bv, index, offset,
880 op_is_write(bio_op(bio))) < 0)
883 bv.bv_offset += bv.bv_len;
884 unwritten -= bv.bv_len;
886 update_position(&index, &offset, &bv);
898 * Handler function for all zram I/O requests.
900 static blk_qc_t zram_make_request(struct request_queue *queue, struct bio *bio)
902 struct zram *zram = queue->queuedata;
904 if (!valid_io_request(zram, bio->bi_iter.bi_sector,
905 bio->bi_iter.bi_size)) {
906 atomic64_inc(&zram->stats.invalid_io);
910 __zram_make_request(zram, bio);
911 return BLK_QC_T_NONE;
915 return BLK_QC_T_NONE;
918 static void zram_slot_free_notify(struct block_device *bdev,
922 struct zram_meta *meta;
924 zram = bdev->bd_disk->private_data;
927 bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value);
928 zram_free_page(zram, index);
929 bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
930 atomic64_inc(&zram->stats.notify_free);
933 static int zram_rw_page(struct block_device *bdev, sector_t sector,
934 struct page *page, bool is_write)
936 int offset, err = -EIO;
941 zram = bdev->bd_disk->private_data;
943 if (!valid_io_request(zram, sector, PAGE_SIZE)) {
944 atomic64_inc(&zram->stats.invalid_io);
949 index = sector >> SECTORS_PER_PAGE_SHIFT;
950 offset = (sector & (SECTORS_PER_PAGE - 1)) << SECTOR_SHIFT;
953 bv.bv_len = PAGE_SIZE;
956 err = zram_bvec_rw(zram, &bv, index, offset, is_write);
959 * If I/O fails, just return error(ie, non-zero) without
960 * calling page_endio.
961 * It causes resubmit the I/O with bio request by upper functions
962 * of rw_page(e.g., swap_readpage, __swap_writepage) and
963 * bio->bi_end_io does things to handle the error
964 * (e.g., SetPageError, set_page_dirty and extra works).
967 page_endio(page, is_write, 0);
971 static void zram_reset_device(struct zram *zram)
973 struct zram_meta *meta;
977 down_write(&zram->init_lock);
979 zram->limit_pages = 0;
981 if (!init_done(zram)) {
982 up_write(&zram->init_lock);
988 disksize = zram->disksize;
991 memset(&zram->stats, 0, sizeof(zram->stats));
994 set_capacity(zram->disk, 0);
995 part_stat_set_all(&zram->disk->part0, 0);
997 up_write(&zram->init_lock);
998 /* I/O operation under all of CPU are done so let's free */
999 zram_meta_free(meta, disksize);
1000 zcomp_destroy(comp);
1003 static ssize_t disksize_store(struct device *dev,
1004 struct device_attribute *attr, const char *buf, size_t len)
1008 struct zram_meta *meta;
1009 struct zram *zram = dev_to_zram(dev);
1012 disksize = memparse(buf, NULL);
1016 disksize = PAGE_ALIGN(disksize);
1017 meta = zram_meta_alloc(zram->disk->disk_name, disksize);
1021 comp = zcomp_create(zram->compressor);
1023 pr_err("Cannot initialise %s compressing backend\n",
1025 err = PTR_ERR(comp);
1029 down_write(&zram->init_lock);
1030 if (init_done(zram)) {
1031 pr_info("Cannot change disksize for initialized device\n");
1033 goto out_destroy_comp;
1038 zram->disksize = disksize;
1039 set_capacity(zram->disk, zram->disksize >> SECTOR_SHIFT);
1040 zram_revalidate_disk(zram);
1041 up_write(&zram->init_lock);
1046 up_write(&zram->init_lock);
1047 zcomp_destroy(comp);
1049 zram_meta_free(meta, disksize);
1053 static ssize_t reset_store(struct device *dev,
1054 struct device_attribute *attr, const char *buf, size_t len)
1057 unsigned short do_reset;
1059 struct block_device *bdev;
1061 ret = kstrtou16(buf, 10, &do_reset);
1068 zram = dev_to_zram(dev);
1069 bdev = bdget_disk(zram->disk, 0);
1073 mutex_lock(&bdev->bd_mutex);
1074 /* Do not reset an active device or claimed device */
1075 if (bdev->bd_openers || zram->claim) {
1076 mutex_unlock(&bdev->bd_mutex);
1081 /* From now on, anyone can't open /dev/zram[0-9] */
1083 mutex_unlock(&bdev->bd_mutex);
1085 /* Make sure all the pending I/O are finished */
1087 zram_reset_device(zram);
1088 zram_revalidate_disk(zram);
1091 mutex_lock(&bdev->bd_mutex);
1092 zram->claim = false;
1093 mutex_unlock(&bdev->bd_mutex);
1098 static int zram_open(struct block_device *bdev, fmode_t mode)
1103 WARN_ON(!mutex_is_locked(&bdev->bd_mutex));
1105 zram = bdev->bd_disk->private_data;
1106 /* zram was claimed to reset so open request fails */
1113 static const struct block_device_operations zram_devops = {
1115 .swap_slot_free_notify = zram_slot_free_notify,
1116 .rw_page = zram_rw_page,
1117 .owner = THIS_MODULE
1120 static DEVICE_ATTR_WO(compact);
1121 static DEVICE_ATTR_RW(disksize);
1122 static DEVICE_ATTR_RO(initstate);
1123 static DEVICE_ATTR_WO(reset);
1124 static DEVICE_ATTR_WO(mem_limit);
1125 static DEVICE_ATTR_WO(mem_used_max);
1126 static DEVICE_ATTR_RW(max_comp_streams);
1127 static DEVICE_ATTR_RW(comp_algorithm);
1129 static struct attribute *zram_disk_attrs[] = {
1130 &dev_attr_disksize.attr,
1131 &dev_attr_initstate.attr,
1132 &dev_attr_reset.attr,
1133 &dev_attr_compact.attr,
1134 &dev_attr_mem_limit.attr,
1135 &dev_attr_mem_used_max.attr,
1136 &dev_attr_max_comp_streams.attr,
1137 &dev_attr_comp_algorithm.attr,
1138 &dev_attr_io_stat.attr,
1139 &dev_attr_mm_stat.attr,
1140 &dev_attr_debug_stat.attr,
1144 static struct attribute_group zram_disk_attr_group = {
1145 .attrs = zram_disk_attrs,
1149 * Allocate and initialize new zram device. the function returns
1150 * '>= 0' device_id upon success, and negative value otherwise.
1152 static int zram_add(void)
1155 struct request_queue *queue;
1158 zram = kzalloc(sizeof(struct zram), GFP_KERNEL);
1162 ret = idr_alloc(&zram_index_idr, zram, 0, 0, GFP_KERNEL);
1167 init_rwsem(&zram->init_lock);
1169 queue = blk_alloc_queue(GFP_KERNEL);
1171 pr_err("Error allocating disk queue for device %d\n",
1177 blk_queue_make_request(queue, zram_make_request);
1179 /* gendisk structure */
1180 zram->disk = alloc_disk(1);
1182 pr_err("Error allocating disk structure for device %d\n",
1185 goto out_free_queue;
1188 zram->disk->major = zram_major;
1189 zram->disk->first_minor = device_id;
1190 zram->disk->fops = &zram_devops;
1191 zram->disk->queue = queue;
1192 zram->disk->queue->queuedata = zram;
1193 zram->disk->private_data = zram;
1194 snprintf(zram->disk->disk_name, 16, "zram%d", device_id);
1196 /* Actual capacity set using syfs (/sys/block/zram<id>/disksize */
1197 set_capacity(zram->disk, 0);
1198 /* zram devices sort of resembles non-rotational disks */
1199 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, zram->disk->queue);
1200 queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, zram->disk->queue);
1202 * To ensure that we always get PAGE_SIZE aligned
1203 * and n*PAGE_SIZED sized I/O requests.
1205 blk_queue_physical_block_size(zram->disk->queue, PAGE_SIZE);
1206 blk_queue_logical_block_size(zram->disk->queue,
1207 ZRAM_LOGICAL_BLOCK_SIZE);
1208 blk_queue_io_min(zram->disk->queue, PAGE_SIZE);
1209 blk_queue_io_opt(zram->disk->queue, PAGE_SIZE);
1210 zram->disk->queue->limits.discard_granularity = PAGE_SIZE;
1211 blk_queue_max_discard_sectors(zram->disk->queue, UINT_MAX);
1212 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, zram->disk->queue);
1215 * zram_bio_discard() will clear all logical blocks if logical block
1216 * size is identical with physical block size(PAGE_SIZE). But if it is
1217 * different, we will skip discarding some parts of logical blocks in
1218 * the part of the request range which isn't aligned to physical block
1219 * size. So we can't ensure that all discarded logical blocks are
1222 if (ZRAM_LOGICAL_BLOCK_SIZE == PAGE_SIZE)
1223 blk_queue_max_write_zeroes_sectors(zram->disk->queue, UINT_MAX);
1225 add_disk(zram->disk);
1227 ret = sysfs_create_group(&disk_to_dev(zram->disk)->kobj,
1228 &zram_disk_attr_group);
1230 pr_err("Error creating sysfs group for device %d\n",
1234 strlcpy(zram->compressor, default_compressor, sizeof(zram->compressor));
1237 pr_info("Added device: %s\n", zram->disk->disk_name);
1241 del_gendisk(zram->disk);
1242 put_disk(zram->disk);
1244 blk_cleanup_queue(queue);
1246 idr_remove(&zram_index_idr, device_id);
1252 static int zram_remove(struct zram *zram)
1254 struct block_device *bdev;
1256 bdev = bdget_disk(zram->disk, 0);
1260 mutex_lock(&bdev->bd_mutex);
1261 if (bdev->bd_openers || zram->claim) {
1262 mutex_unlock(&bdev->bd_mutex);
1268 mutex_unlock(&bdev->bd_mutex);
1271 * Remove sysfs first, so no one will perform a disksize
1272 * store while we destroy the devices. This also helps during
1273 * hot_remove -- zram_reset_device() is the last holder of
1274 * ->init_lock, no later/concurrent disksize_store() or any
1275 * other sysfs handlers are possible.
1277 sysfs_remove_group(&disk_to_dev(zram->disk)->kobj,
1278 &zram_disk_attr_group);
1280 /* Make sure all the pending I/O are finished */
1282 zram_reset_device(zram);
1285 pr_info("Removed device: %s\n", zram->disk->disk_name);
1287 blk_cleanup_queue(zram->disk->queue);
1288 del_gendisk(zram->disk);
1289 put_disk(zram->disk);
1294 /* zram-control sysfs attributes */
1295 static ssize_t hot_add_show(struct class *class,
1296 struct class_attribute *attr,
1301 mutex_lock(&zram_index_mutex);
1303 mutex_unlock(&zram_index_mutex);
1307 return scnprintf(buf, PAGE_SIZE, "%d\n", ret);
1310 static ssize_t hot_remove_store(struct class *class,
1311 struct class_attribute *attr,
1318 /* dev_id is gendisk->first_minor, which is `int' */
1319 ret = kstrtoint(buf, 10, &dev_id);
1325 mutex_lock(&zram_index_mutex);
1327 zram = idr_find(&zram_index_idr, dev_id);
1329 ret = zram_remove(zram);
1331 idr_remove(&zram_index_idr, dev_id);
1336 mutex_unlock(&zram_index_mutex);
1337 return ret ? ret : count;
1341 * NOTE: hot_add attribute is not the usual read-only sysfs attribute. In a
1342 * sense that reading from this file does alter the state of your system -- it
1343 * creates a new un-initialized zram device and returns back this device's
1344 * device_id (or an error code if it fails to create a new device).
1346 static struct class_attribute zram_control_class_attrs[] = {
1347 __ATTR(hot_add, 0400, hot_add_show, NULL),
1348 __ATTR_WO(hot_remove),
1352 static struct class zram_control_class = {
1353 .name = "zram-control",
1354 .owner = THIS_MODULE,
1355 .class_attrs = zram_control_class_attrs,
1358 static int zram_remove_cb(int id, void *ptr, void *data)
1364 static void destroy_devices(void)
1366 class_unregister(&zram_control_class);
1367 idr_for_each(&zram_index_idr, &zram_remove_cb, NULL);
1368 idr_destroy(&zram_index_idr);
1369 unregister_blkdev(zram_major, "zram");
1370 cpuhp_remove_multi_state(CPUHP_ZCOMP_PREPARE);
1373 static int __init zram_init(void)
1377 ret = cpuhp_setup_state_multi(CPUHP_ZCOMP_PREPARE, "block/zram:prepare",
1378 zcomp_cpu_up_prepare, zcomp_cpu_dead);
1382 ret = class_register(&zram_control_class);
1384 pr_err("Unable to register zram-control class\n");
1385 cpuhp_remove_multi_state(CPUHP_ZCOMP_PREPARE);
1389 zram_major = register_blkdev(0, "zram");
1390 if (zram_major <= 0) {
1391 pr_err("Unable to get major number\n");
1392 class_unregister(&zram_control_class);
1393 cpuhp_remove_multi_state(CPUHP_ZCOMP_PREPARE);
1397 while (num_devices != 0) {
1398 mutex_lock(&zram_index_mutex);
1400 mutex_unlock(&zram_index_mutex);
1413 static void __exit zram_exit(void)
1418 module_init(zram_init);
1419 module_exit(zram_exit);
1421 module_param(num_devices, uint, 0);
1422 MODULE_PARM_DESC(num_devices, "Number of pre-created zram devices");
1424 MODULE_LICENSE("Dual BSD/GPL");
1425 MODULE_AUTHOR("Nitin Gupta <ngupta@vflare.org>");
1426 MODULE_DESCRIPTION("Compressed RAM Block Device");