2 * Compressed RAM block device
4 * Copyright (C) 2008, 2009, 2010 Nitin Gupta
5 * 2012, 2013 Minchan Kim
7 * This code is released using a dual license strategy: BSD/GPL
8 * You can choose the licence that better fits your requirements.
10 * Released under the terms of 3-clause BSD License
11 * Released under the terms of GNU General Public License Version 2.0
15 #define KMSG_COMPONENT "zram"
16 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
18 #include <linux/module.h>
19 #include <linux/kernel.h>
20 #include <linux/bio.h>
21 #include <linux/bitops.h>
22 #include <linux/blkdev.h>
23 #include <linux/buffer_head.h>
24 #include <linux/device.h>
25 #include <linux/genhd.h>
26 #include <linux/highmem.h>
27 #include <linux/slab.h>
28 #include <linux/backing-dev.h>
29 #include <linux/string.h>
30 #include <linux/vmalloc.h>
31 #include <linux/err.h>
32 #include <linux/idr.h>
33 #include <linux/sysfs.h>
34 #include <linux/cpuhotplug.h>
38 static DEFINE_IDR(zram_index_idr);
39 /* idr index must be protected */
40 static DEFINE_MUTEX(zram_index_mutex);
42 static int zram_major;
43 static const char *default_compressor = "lzo";
45 /* Module params (documentation at end) */
46 static unsigned int num_devices = 1;
48 static inline bool init_done(struct zram *zram)
50 return zram->disksize;
53 static inline struct zram *dev_to_zram(struct device *dev)
55 return (struct zram *)dev_to_disk(dev)->private_data;
58 /* flag operations require table entry bit_spin_lock() being held */
59 static int zram_test_flag(struct zram_meta *meta, u32 index,
60 enum zram_pageflags flag)
62 return meta->table[index].value & BIT(flag);
65 static void zram_set_flag(struct zram_meta *meta, u32 index,
66 enum zram_pageflags flag)
68 meta->table[index].value |= BIT(flag);
71 static void zram_clear_flag(struct zram_meta *meta, u32 index,
72 enum zram_pageflags flag)
74 meta->table[index].value &= ~BIT(flag);
77 static inline void zram_set_element(struct zram_meta *meta, u32 index,
78 unsigned long element)
80 meta->table[index].element = element;
83 static inline void zram_clear_element(struct zram_meta *meta, u32 index)
85 meta->table[index].element = 0;
88 static size_t zram_get_obj_size(struct zram_meta *meta, u32 index)
90 return meta->table[index].value & (BIT(ZRAM_FLAG_SHIFT) - 1);
93 static void zram_set_obj_size(struct zram_meta *meta,
94 u32 index, size_t size)
96 unsigned long flags = meta->table[index].value >> ZRAM_FLAG_SHIFT;
98 meta->table[index].value = (flags << ZRAM_FLAG_SHIFT) | size;
101 static inline bool is_partial_io(struct bio_vec *bvec)
103 return bvec->bv_len != PAGE_SIZE;
106 static void zram_revalidate_disk(struct zram *zram)
108 revalidate_disk(zram->disk);
109 /* revalidate_disk reset the BDI_CAP_STABLE_WRITES so set again */
110 zram->disk->queue->backing_dev_info->capabilities |=
111 BDI_CAP_STABLE_WRITES;
115 * Check if request is within bounds and aligned on zram logical blocks.
117 static inline bool valid_io_request(struct zram *zram,
118 sector_t start, unsigned int size)
122 /* unaligned request */
123 if (unlikely(start & (ZRAM_SECTOR_PER_LOGICAL_BLOCK - 1)))
125 if (unlikely(size & (ZRAM_LOGICAL_BLOCK_SIZE - 1)))
128 end = start + (size >> SECTOR_SHIFT);
129 bound = zram->disksize >> SECTOR_SHIFT;
130 /* out of range range */
131 if (unlikely(start >= bound || end > bound || start > end))
134 /* I/O request is valid */
138 static void update_position(u32 *index, int *offset, struct bio_vec *bvec)
140 *index += (*offset + bvec->bv_len) / PAGE_SIZE;
141 *offset = (*offset + bvec->bv_len) % PAGE_SIZE;
144 static inline void update_used_max(struct zram *zram,
145 const unsigned long pages)
147 unsigned long old_max, cur_max;
149 old_max = atomic_long_read(&zram->stats.max_used_pages);
154 old_max = atomic_long_cmpxchg(
155 &zram->stats.max_used_pages, cur_max, pages);
156 } while (old_max != cur_max);
159 static inline void zram_fill_page(char *ptr, unsigned long len,
163 unsigned long *page = (unsigned long *)ptr;
165 WARN_ON_ONCE(!IS_ALIGNED(len, sizeof(unsigned long)));
167 if (likely(value == 0)) {
170 for (i = 0; i < len / sizeof(*page); i++)
175 static bool page_same_filled(void *ptr, unsigned long *element)
180 page = (unsigned long *)ptr;
182 for (pos = 0; pos < PAGE_SIZE / sizeof(*page) - 1; pos++) {
183 if (page[pos] != page[pos + 1])
187 *element = page[pos];
192 static void handle_same_page(struct bio_vec *bvec, unsigned long element)
194 struct page *page = bvec->bv_page;
197 user_mem = kmap_atomic(page);
198 zram_fill_page(user_mem + bvec->bv_offset, bvec->bv_len, element);
199 kunmap_atomic(user_mem);
201 flush_dcache_page(page);
204 static ssize_t initstate_show(struct device *dev,
205 struct device_attribute *attr, char *buf)
208 struct zram *zram = dev_to_zram(dev);
210 down_read(&zram->init_lock);
211 val = init_done(zram);
212 up_read(&zram->init_lock);
214 return scnprintf(buf, PAGE_SIZE, "%u\n", val);
217 static ssize_t disksize_show(struct device *dev,
218 struct device_attribute *attr, char *buf)
220 struct zram *zram = dev_to_zram(dev);
222 return scnprintf(buf, PAGE_SIZE, "%llu\n", zram->disksize);
225 static ssize_t mem_limit_store(struct device *dev,
226 struct device_attribute *attr, const char *buf, size_t len)
230 struct zram *zram = dev_to_zram(dev);
232 limit = memparse(buf, &tmp);
233 if (buf == tmp) /* no chars parsed, invalid input */
236 down_write(&zram->init_lock);
237 zram->limit_pages = PAGE_ALIGN(limit) >> PAGE_SHIFT;
238 up_write(&zram->init_lock);
243 static ssize_t mem_used_max_store(struct device *dev,
244 struct device_attribute *attr, const char *buf, size_t len)
248 struct zram *zram = dev_to_zram(dev);
250 err = kstrtoul(buf, 10, &val);
254 down_read(&zram->init_lock);
255 if (init_done(zram)) {
256 struct zram_meta *meta = zram->meta;
257 atomic_long_set(&zram->stats.max_used_pages,
258 zs_get_total_pages(meta->mem_pool));
260 up_read(&zram->init_lock);
266 * We switched to per-cpu streams and this attr is not needed anymore.
267 * However, we will keep it around for some time, because:
268 * a) we may revert per-cpu streams in the future
269 * b) it's visible to user space and we need to follow our 2 years
270 * retirement rule; but we already have a number of 'soon to be
271 * altered' attrs, so max_comp_streams need to wait for the next
274 static ssize_t max_comp_streams_show(struct device *dev,
275 struct device_attribute *attr, char *buf)
277 return scnprintf(buf, PAGE_SIZE, "%d\n", num_online_cpus());
280 static ssize_t max_comp_streams_store(struct device *dev,
281 struct device_attribute *attr, const char *buf, size_t len)
286 static ssize_t comp_algorithm_show(struct device *dev,
287 struct device_attribute *attr, char *buf)
290 struct zram *zram = dev_to_zram(dev);
292 down_read(&zram->init_lock);
293 sz = zcomp_available_show(zram->compressor, buf);
294 up_read(&zram->init_lock);
299 static ssize_t comp_algorithm_store(struct device *dev,
300 struct device_attribute *attr, const char *buf, size_t len)
302 struct zram *zram = dev_to_zram(dev);
303 char compressor[CRYPTO_MAX_ALG_NAME];
306 strlcpy(compressor, buf, sizeof(compressor));
307 /* ignore trailing newline */
308 sz = strlen(compressor);
309 if (sz > 0 && compressor[sz - 1] == '\n')
310 compressor[sz - 1] = 0x00;
312 if (!zcomp_available_algorithm(compressor))
315 down_write(&zram->init_lock);
316 if (init_done(zram)) {
317 up_write(&zram->init_lock);
318 pr_info("Can't change algorithm for initialized device\n");
322 strlcpy(zram->compressor, compressor, sizeof(compressor));
323 up_write(&zram->init_lock);
327 static ssize_t compact_store(struct device *dev,
328 struct device_attribute *attr, const char *buf, size_t len)
330 struct zram *zram = dev_to_zram(dev);
331 struct zram_meta *meta;
333 down_read(&zram->init_lock);
334 if (!init_done(zram)) {
335 up_read(&zram->init_lock);
340 zs_compact(meta->mem_pool);
341 up_read(&zram->init_lock);
346 static ssize_t io_stat_show(struct device *dev,
347 struct device_attribute *attr, char *buf)
349 struct zram *zram = dev_to_zram(dev);
352 down_read(&zram->init_lock);
353 ret = scnprintf(buf, PAGE_SIZE,
354 "%8llu %8llu %8llu %8llu\n",
355 (u64)atomic64_read(&zram->stats.failed_reads),
356 (u64)atomic64_read(&zram->stats.failed_writes),
357 (u64)atomic64_read(&zram->stats.invalid_io),
358 (u64)atomic64_read(&zram->stats.notify_free));
359 up_read(&zram->init_lock);
364 static ssize_t mm_stat_show(struct device *dev,
365 struct device_attribute *attr, char *buf)
367 struct zram *zram = dev_to_zram(dev);
368 struct zs_pool_stats pool_stats;
369 u64 orig_size, mem_used = 0;
373 memset(&pool_stats, 0x00, sizeof(struct zs_pool_stats));
375 down_read(&zram->init_lock);
376 if (init_done(zram)) {
377 mem_used = zs_get_total_pages(zram->meta->mem_pool);
378 zs_pool_stats(zram->meta->mem_pool, &pool_stats);
381 orig_size = atomic64_read(&zram->stats.pages_stored);
382 max_used = atomic_long_read(&zram->stats.max_used_pages);
384 ret = scnprintf(buf, PAGE_SIZE,
385 "%8llu %8llu %8llu %8lu %8ld %8llu %8lu\n",
386 orig_size << PAGE_SHIFT,
387 (u64)atomic64_read(&zram->stats.compr_data_size),
388 mem_used << PAGE_SHIFT,
389 zram->limit_pages << PAGE_SHIFT,
390 max_used << PAGE_SHIFT,
391 (u64)atomic64_read(&zram->stats.same_pages),
392 pool_stats.pages_compacted);
393 up_read(&zram->init_lock);
398 static ssize_t debug_stat_show(struct device *dev,
399 struct device_attribute *attr, char *buf)
402 struct zram *zram = dev_to_zram(dev);
405 down_read(&zram->init_lock);
406 ret = scnprintf(buf, PAGE_SIZE,
407 "version: %d\n%8llu\n",
409 (u64)atomic64_read(&zram->stats.writestall));
410 up_read(&zram->init_lock);
415 static DEVICE_ATTR_RO(io_stat);
416 static DEVICE_ATTR_RO(mm_stat);
417 static DEVICE_ATTR_RO(debug_stat);
419 static void zram_meta_free(struct zram_meta *meta, u64 disksize)
421 size_t num_pages = disksize >> PAGE_SHIFT;
424 /* Free all pages that are still in this zram device */
425 for (index = 0; index < num_pages; index++) {
426 unsigned long handle = meta->table[index].handle;
428 * No memory is allocated for same element filled pages.
429 * Simply clear same page flag.
431 if (!handle || zram_test_flag(meta, index, ZRAM_SAME))
434 zs_free(meta->mem_pool, handle);
437 zs_destroy_pool(meta->mem_pool);
442 static struct zram_meta *zram_meta_alloc(char *pool_name, u64 disksize)
445 struct zram_meta *meta = kmalloc(sizeof(*meta), GFP_KERNEL);
450 num_pages = disksize >> PAGE_SHIFT;
451 meta->table = vzalloc(num_pages * sizeof(*meta->table));
453 pr_err("Error allocating zram address table\n");
457 meta->mem_pool = zs_create_pool(pool_name);
458 if (!meta->mem_pool) {
459 pr_err("Error creating memory pool\n");
472 * To protect concurrent access to the same index entry,
473 * caller should hold this table index entry's bit_spinlock to
474 * indicate this index entry is accessing.
476 static void zram_free_page(struct zram *zram, size_t index)
478 struct zram_meta *meta = zram->meta;
479 unsigned long handle = meta->table[index].handle;
482 * No memory is allocated for same element filled pages.
483 * Simply clear same page flag.
485 if (zram_test_flag(meta, index, ZRAM_SAME)) {
486 zram_clear_flag(meta, index, ZRAM_SAME);
487 zram_clear_element(meta, index);
488 atomic64_dec(&zram->stats.same_pages);
495 zs_free(meta->mem_pool, handle);
497 atomic64_sub(zram_get_obj_size(meta, index),
498 &zram->stats.compr_data_size);
499 atomic64_dec(&zram->stats.pages_stored);
501 meta->table[index].handle = 0;
502 zram_set_obj_size(meta, index, 0);
505 static int zram_decompress_page(struct zram *zram, char *mem, u32 index)
509 struct zram_meta *meta = zram->meta;
510 unsigned long handle;
513 bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value);
514 handle = meta->table[index].handle;
515 size = zram_get_obj_size(meta, index);
517 if (!handle || zram_test_flag(meta, index, ZRAM_SAME)) {
518 bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
519 zram_fill_page(mem, PAGE_SIZE, meta->table[index].element);
523 cmem = zs_map_object(meta->mem_pool, handle, ZS_MM_RO);
524 if (size == PAGE_SIZE) {
525 memcpy(mem, cmem, PAGE_SIZE);
527 struct zcomp_strm *zstrm = zcomp_stream_get(zram->comp);
529 ret = zcomp_decompress(zstrm, cmem, size, mem);
530 zcomp_stream_put(zram->comp);
532 zs_unmap_object(meta->mem_pool, handle);
533 bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
535 /* Should NEVER happen. Return bio error if it does. */
537 pr_err("Decompression failed! err=%d, page=%u\n", ret, index);
544 static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec,
545 u32 index, int offset)
549 unsigned char *user_mem, *uncmem = NULL;
550 struct zram_meta *meta = zram->meta;
551 page = bvec->bv_page;
553 bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value);
554 if (unlikely(!meta->table[index].handle) ||
555 zram_test_flag(meta, index, ZRAM_SAME)) {
556 bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
557 handle_same_page(bvec, meta->table[index].element);
560 bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
562 if (is_partial_io(bvec))
563 /* Use a temporary buffer to decompress the page */
564 uncmem = kmalloc(PAGE_SIZE, GFP_NOIO);
566 user_mem = kmap_atomic(page);
567 if (!is_partial_io(bvec))
571 pr_err("Unable to allocate temp memory\n");
576 ret = zram_decompress_page(zram, uncmem, index);
577 /* Should NEVER happen. Return bio error if it does. */
581 if (is_partial_io(bvec))
582 memcpy(user_mem + bvec->bv_offset, uncmem + offset,
585 flush_dcache_page(page);
588 kunmap_atomic(user_mem);
589 if (is_partial_io(bvec))
594 static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
599 unsigned long handle = 0;
601 unsigned char *user_mem, *cmem, *src, *uncmem = NULL;
602 struct zram_meta *meta = zram->meta;
603 struct zcomp_strm *zstrm = NULL;
604 unsigned long alloced_pages;
605 unsigned long element;
607 page = bvec->bv_page;
608 if (is_partial_io(bvec)) {
610 * This is a partial IO. We need to read the full page
611 * before to write the changes.
613 uncmem = kmalloc(PAGE_SIZE, GFP_NOIO);
618 ret = zram_decompress_page(zram, uncmem, index);
624 user_mem = kmap_atomic(page);
625 if (is_partial_io(bvec)) {
626 memcpy(uncmem + offset, user_mem + bvec->bv_offset,
628 kunmap_atomic(user_mem);
634 if (page_same_filled(uncmem, &element)) {
636 kunmap_atomic(user_mem);
637 /* Free memory associated with this sector now. */
638 bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value);
639 zram_free_page(zram, index);
640 zram_set_flag(meta, index, ZRAM_SAME);
641 zram_set_element(meta, index, element);
642 bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
644 atomic64_inc(&zram->stats.same_pages);
649 zstrm = zcomp_stream_get(zram->comp);
650 ret = zcomp_compress(zstrm, uncmem, &clen);
651 if (!is_partial_io(bvec)) {
652 kunmap_atomic(user_mem);
658 pr_err("Compression failed! err=%d\n", ret);
663 if (unlikely(clen > max_zpage_size)) {
665 if (is_partial_io(bvec))
670 * handle allocation has 2 paths:
671 * a) fast path is executed with preemption disabled (for
672 * per-cpu streams) and has __GFP_DIRECT_RECLAIM bit clear,
673 * since we can't sleep;
674 * b) slow path enables preemption and attempts to allocate
675 * the page with __GFP_DIRECT_RECLAIM bit set. we have to
676 * put per-cpu compression stream and, thus, to re-do
677 * the compression once handle is allocated.
679 * if we have a 'non-null' handle here then we are coming
680 * from the slow path and handle has already been allocated.
683 handle = zs_malloc(meta->mem_pool, clen,
684 __GFP_KSWAPD_RECLAIM |
689 zcomp_stream_put(zram->comp);
692 atomic64_inc(&zram->stats.writestall);
694 handle = zs_malloc(meta->mem_pool, clen,
695 GFP_NOIO | __GFP_HIGHMEM |
700 pr_err("Error allocating memory for compressed page: %u, size=%u\n",
706 alloced_pages = zs_get_total_pages(meta->mem_pool);
707 update_used_max(zram, alloced_pages);
709 if (zram->limit_pages && alloced_pages > zram->limit_pages) {
710 zs_free(meta->mem_pool, handle);
715 cmem = zs_map_object(meta->mem_pool, handle, ZS_MM_WO);
717 if ((clen == PAGE_SIZE) && !is_partial_io(bvec)) {
718 src = kmap_atomic(page);
719 memcpy(cmem, src, PAGE_SIZE);
722 memcpy(cmem, src, clen);
725 zcomp_stream_put(zram->comp);
727 zs_unmap_object(meta->mem_pool, handle);
730 * Free memory associated with this sector
731 * before overwriting unused sectors.
733 bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value);
734 zram_free_page(zram, index);
736 meta->table[index].handle = handle;
737 zram_set_obj_size(meta, index, clen);
738 bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
741 atomic64_add(clen, &zram->stats.compr_data_size);
742 atomic64_inc(&zram->stats.pages_stored);
745 zcomp_stream_put(zram->comp);
746 if (is_partial_io(bvec))
752 * zram_bio_discard - handler on discard request
753 * @index: physical block index in PAGE_SIZE units
754 * @offset: byte offset within physical block
756 static void zram_bio_discard(struct zram *zram, u32 index,
757 int offset, struct bio *bio)
759 size_t n = bio->bi_iter.bi_size;
760 struct zram_meta *meta = zram->meta;
763 * zram manages data in physical block size units. Because logical block
764 * size isn't identical with physical block size on some arch, we
765 * could get a discard request pointing to a specific offset within a
766 * certain physical block. Although we can handle this request by
767 * reading that physiclal block and decompressing and partially zeroing
768 * and re-compressing and then re-storing it, this isn't reasonable
769 * because our intent with a discard request is to save memory. So
770 * skipping this logical block is appropriate here.
773 if (n <= (PAGE_SIZE - offset))
776 n -= (PAGE_SIZE - offset);
780 while (n >= PAGE_SIZE) {
781 bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value);
782 zram_free_page(zram, index);
783 bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
784 atomic64_inc(&zram->stats.notify_free);
790 static int zram_bvec_rw(struct zram *zram, struct bio_vec *bvec, u32 index,
791 int offset, bool is_write)
793 unsigned long start_time = jiffies;
794 int rw_acct = is_write ? REQ_OP_WRITE : REQ_OP_READ;
797 generic_start_io_acct(rw_acct, bvec->bv_len >> SECTOR_SHIFT,
801 atomic64_inc(&zram->stats.num_reads);
802 ret = zram_bvec_read(zram, bvec, index, offset);
804 atomic64_inc(&zram->stats.num_writes);
805 ret = zram_bvec_write(zram, bvec, index, offset);
808 generic_end_io_acct(rw_acct, &zram->disk->part0, start_time);
812 atomic64_inc(&zram->stats.failed_reads);
814 atomic64_inc(&zram->stats.failed_writes);
820 static void __zram_make_request(struct zram *zram, struct bio *bio)
825 struct bvec_iter iter;
827 index = bio->bi_iter.bi_sector >> SECTORS_PER_PAGE_SHIFT;
828 offset = (bio->bi_iter.bi_sector &
829 (SECTORS_PER_PAGE - 1)) << SECTOR_SHIFT;
831 switch (bio_op(bio)) {
833 case REQ_OP_WRITE_ZEROES:
834 zram_bio_discard(zram, index, offset, bio);
841 bio_for_each_segment(bvec, bio, iter) {
842 struct bio_vec bv = bvec;
843 unsigned int unwritten = bvec.bv_len;
846 bv.bv_len = min_t(unsigned int, PAGE_SIZE - offset,
848 if (zram_bvec_rw(zram, &bv, index, offset,
849 op_is_write(bio_op(bio))) < 0)
852 bv.bv_offset += bv.bv_len;
853 unwritten -= bv.bv_len;
855 update_position(&index, &offset, &bv);
867 * Handler function for all zram I/O requests.
869 static blk_qc_t zram_make_request(struct request_queue *queue, struct bio *bio)
871 struct zram *zram = queue->queuedata;
873 if (!valid_io_request(zram, bio->bi_iter.bi_sector,
874 bio->bi_iter.bi_size)) {
875 atomic64_inc(&zram->stats.invalid_io);
879 __zram_make_request(zram, bio);
880 return BLK_QC_T_NONE;
884 return BLK_QC_T_NONE;
887 static void zram_slot_free_notify(struct block_device *bdev,
891 struct zram_meta *meta;
893 zram = bdev->bd_disk->private_data;
896 bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value);
897 zram_free_page(zram, index);
898 bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
899 atomic64_inc(&zram->stats.notify_free);
902 static int zram_rw_page(struct block_device *bdev, sector_t sector,
903 struct page *page, bool is_write)
905 int offset, err = -EIO;
910 zram = bdev->bd_disk->private_data;
912 if (!valid_io_request(zram, sector, PAGE_SIZE)) {
913 atomic64_inc(&zram->stats.invalid_io);
918 index = sector >> SECTORS_PER_PAGE_SHIFT;
919 offset = (sector & (SECTORS_PER_PAGE - 1)) << SECTOR_SHIFT;
922 bv.bv_len = PAGE_SIZE;
925 err = zram_bvec_rw(zram, &bv, index, offset, is_write);
928 * If I/O fails, just return error(ie, non-zero) without
929 * calling page_endio.
930 * It causes resubmit the I/O with bio request by upper functions
931 * of rw_page(e.g., swap_readpage, __swap_writepage) and
932 * bio->bi_end_io does things to handle the error
933 * (e.g., SetPageError, set_page_dirty and extra works).
936 page_endio(page, is_write, 0);
940 static void zram_reset_device(struct zram *zram)
942 struct zram_meta *meta;
946 down_write(&zram->init_lock);
948 zram->limit_pages = 0;
950 if (!init_done(zram)) {
951 up_write(&zram->init_lock);
957 disksize = zram->disksize;
960 memset(&zram->stats, 0, sizeof(zram->stats));
963 set_capacity(zram->disk, 0);
964 part_stat_set_all(&zram->disk->part0, 0);
966 up_write(&zram->init_lock);
967 /* I/O operation under all of CPU are done so let's free */
968 zram_meta_free(meta, disksize);
972 static ssize_t disksize_store(struct device *dev,
973 struct device_attribute *attr, const char *buf, size_t len)
977 struct zram_meta *meta;
978 struct zram *zram = dev_to_zram(dev);
981 disksize = memparse(buf, NULL);
985 disksize = PAGE_ALIGN(disksize);
986 meta = zram_meta_alloc(zram->disk->disk_name, disksize);
990 comp = zcomp_create(zram->compressor);
992 pr_err("Cannot initialise %s compressing backend\n",
998 down_write(&zram->init_lock);
999 if (init_done(zram)) {
1000 pr_info("Cannot change disksize for initialized device\n");
1002 goto out_destroy_comp;
1007 zram->disksize = disksize;
1008 set_capacity(zram->disk, zram->disksize >> SECTOR_SHIFT);
1009 zram_revalidate_disk(zram);
1010 up_write(&zram->init_lock);
1015 up_write(&zram->init_lock);
1016 zcomp_destroy(comp);
1018 zram_meta_free(meta, disksize);
1022 static ssize_t reset_store(struct device *dev,
1023 struct device_attribute *attr, const char *buf, size_t len)
1026 unsigned short do_reset;
1028 struct block_device *bdev;
1030 ret = kstrtou16(buf, 10, &do_reset);
1037 zram = dev_to_zram(dev);
1038 bdev = bdget_disk(zram->disk, 0);
1042 mutex_lock(&bdev->bd_mutex);
1043 /* Do not reset an active device or claimed device */
1044 if (bdev->bd_openers || zram->claim) {
1045 mutex_unlock(&bdev->bd_mutex);
1050 /* From now on, anyone can't open /dev/zram[0-9] */
1052 mutex_unlock(&bdev->bd_mutex);
1054 /* Make sure all the pending I/O are finished */
1056 zram_reset_device(zram);
1057 zram_revalidate_disk(zram);
1060 mutex_lock(&bdev->bd_mutex);
1061 zram->claim = false;
1062 mutex_unlock(&bdev->bd_mutex);
1067 static int zram_open(struct block_device *bdev, fmode_t mode)
1072 WARN_ON(!mutex_is_locked(&bdev->bd_mutex));
1074 zram = bdev->bd_disk->private_data;
1075 /* zram was claimed to reset so open request fails */
1082 static const struct block_device_operations zram_devops = {
1084 .swap_slot_free_notify = zram_slot_free_notify,
1085 .rw_page = zram_rw_page,
1086 .owner = THIS_MODULE
1089 static DEVICE_ATTR_WO(compact);
1090 static DEVICE_ATTR_RW(disksize);
1091 static DEVICE_ATTR_RO(initstate);
1092 static DEVICE_ATTR_WO(reset);
1093 static DEVICE_ATTR_WO(mem_limit);
1094 static DEVICE_ATTR_WO(mem_used_max);
1095 static DEVICE_ATTR_RW(max_comp_streams);
1096 static DEVICE_ATTR_RW(comp_algorithm);
1098 static struct attribute *zram_disk_attrs[] = {
1099 &dev_attr_disksize.attr,
1100 &dev_attr_initstate.attr,
1101 &dev_attr_reset.attr,
1102 &dev_attr_compact.attr,
1103 &dev_attr_mem_limit.attr,
1104 &dev_attr_mem_used_max.attr,
1105 &dev_attr_max_comp_streams.attr,
1106 &dev_attr_comp_algorithm.attr,
1107 &dev_attr_io_stat.attr,
1108 &dev_attr_mm_stat.attr,
1109 &dev_attr_debug_stat.attr,
1113 static struct attribute_group zram_disk_attr_group = {
1114 .attrs = zram_disk_attrs,
1118 * Allocate and initialize new zram device. the function returns
1119 * '>= 0' device_id upon success, and negative value otherwise.
1121 static int zram_add(void)
1124 struct request_queue *queue;
1127 zram = kzalloc(sizeof(struct zram), GFP_KERNEL);
1131 ret = idr_alloc(&zram_index_idr, zram, 0, 0, GFP_KERNEL);
1136 init_rwsem(&zram->init_lock);
1138 queue = blk_alloc_queue(GFP_KERNEL);
1140 pr_err("Error allocating disk queue for device %d\n",
1146 blk_queue_make_request(queue, zram_make_request);
1148 /* gendisk structure */
1149 zram->disk = alloc_disk(1);
1151 pr_err("Error allocating disk structure for device %d\n",
1154 goto out_free_queue;
1157 zram->disk->major = zram_major;
1158 zram->disk->first_minor = device_id;
1159 zram->disk->fops = &zram_devops;
1160 zram->disk->queue = queue;
1161 zram->disk->queue->queuedata = zram;
1162 zram->disk->private_data = zram;
1163 snprintf(zram->disk->disk_name, 16, "zram%d", device_id);
1165 /* Actual capacity set using syfs (/sys/block/zram<id>/disksize */
1166 set_capacity(zram->disk, 0);
1167 /* zram devices sort of resembles non-rotational disks */
1168 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, zram->disk->queue);
1169 queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, zram->disk->queue);
1171 * To ensure that we always get PAGE_SIZE aligned
1172 * and n*PAGE_SIZED sized I/O requests.
1174 blk_queue_physical_block_size(zram->disk->queue, PAGE_SIZE);
1175 blk_queue_logical_block_size(zram->disk->queue,
1176 ZRAM_LOGICAL_BLOCK_SIZE);
1177 blk_queue_io_min(zram->disk->queue, PAGE_SIZE);
1178 blk_queue_io_opt(zram->disk->queue, PAGE_SIZE);
1179 zram->disk->queue->limits.discard_granularity = PAGE_SIZE;
1180 blk_queue_max_discard_sectors(zram->disk->queue, UINT_MAX);
1181 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, zram->disk->queue);
1184 * zram_bio_discard() will clear all logical blocks if logical block
1185 * size is identical with physical block size(PAGE_SIZE). But if it is
1186 * different, we will skip discarding some parts of logical blocks in
1187 * the part of the request range which isn't aligned to physical block
1188 * size. So we can't ensure that all discarded logical blocks are
1191 if (ZRAM_LOGICAL_BLOCK_SIZE == PAGE_SIZE)
1192 blk_queue_max_write_zeroes_sectors(zram->disk->queue, UINT_MAX);
1194 add_disk(zram->disk);
1196 ret = sysfs_create_group(&disk_to_dev(zram->disk)->kobj,
1197 &zram_disk_attr_group);
1199 pr_err("Error creating sysfs group for device %d\n",
1203 strlcpy(zram->compressor, default_compressor, sizeof(zram->compressor));
1206 pr_info("Added device: %s\n", zram->disk->disk_name);
1210 del_gendisk(zram->disk);
1211 put_disk(zram->disk);
1213 blk_cleanup_queue(queue);
1215 idr_remove(&zram_index_idr, device_id);
1221 static int zram_remove(struct zram *zram)
1223 struct block_device *bdev;
1225 bdev = bdget_disk(zram->disk, 0);
1229 mutex_lock(&bdev->bd_mutex);
1230 if (bdev->bd_openers || zram->claim) {
1231 mutex_unlock(&bdev->bd_mutex);
1237 mutex_unlock(&bdev->bd_mutex);
1240 * Remove sysfs first, so no one will perform a disksize
1241 * store while we destroy the devices. This also helps during
1242 * hot_remove -- zram_reset_device() is the last holder of
1243 * ->init_lock, no later/concurrent disksize_store() or any
1244 * other sysfs handlers are possible.
1246 sysfs_remove_group(&disk_to_dev(zram->disk)->kobj,
1247 &zram_disk_attr_group);
1249 /* Make sure all the pending I/O are finished */
1251 zram_reset_device(zram);
1254 pr_info("Removed device: %s\n", zram->disk->disk_name);
1256 blk_cleanup_queue(zram->disk->queue);
1257 del_gendisk(zram->disk);
1258 put_disk(zram->disk);
1263 /* zram-control sysfs attributes */
1264 static ssize_t hot_add_show(struct class *class,
1265 struct class_attribute *attr,
1270 mutex_lock(&zram_index_mutex);
1272 mutex_unlock(&zram_index_mutex);
1276 return scnprintf(buf, PAGE_SIZE, "%d\n", ret);
1279 static ssize_t hot_remove_store(struct class *class,
1280 struct class_attribute *attr,
1287 /* dev_id is gendisk->first_minor, which is `int' */
1288 ret = kstrtoint(buf, 10, &dev_id);
1294 mutex_lock(&zram_index_mutex);
1296 zram = idr_find(&zram_index_idr, dev_id);
1298 ret = zram_remove(zram);
1300 idr_remove(&zram_index_idr, dev_id);
1305 mutex_unlock(&zram_index_mutex);
1306 return ret ? ret : count;
1310 * NOTE: hot_add attribute is not the usual read-only sysfs attribute. In a
1311 * sense that reading from this file does alter the state of your system -- it
1312 * creates a new un-initialized zram device and returns back this device's
1313 * device_id (or an error code if it fails to create a new device).
1315 static struct class_attribute zram_control_class_attrs[] = {
1316 __ATTR(hot_add, 0400, hot_add_show, NULL),
1317 __ATTR_WO(hot_remove),
1321 static struct class zram_control_class = {
1322 .name = "zram-control",
1323 .owner = THIS_MODULE,
1324 .class_attrs = zram_control_class_attrs,
1327 static int zram_remove_cb(int id, void *ptr, void *data)
1333 static void destroy_devices(void)
1335 class_unregister(&zram_control_class);
1336 idr_for_each(&zram_index_idr, &zram_remove_cb, NULL);
1337 idr_destroy(&zram_index_idr);
1338 unregister_blkdev(zram_major, "zram");
1339 cpuhp_remove_multi_state(CPUHP_ZCOMP_PREPARE);
1342 static int __init zram_init(void)
1346 ret = cpuhp_setup_state_multi(CPUHP_ZCOMP_PREPARE, "block/zram:prepare",
1347 zcomp_cpu_up_prepare, zcomp_cpu_dead);
1351 ret = class_register(&zram_control_class);
1353 pr_err("Unable to register zram-control class\n");
1354 cpuhp_remove_multi_state(CPUHP_ZCOMP_PREPARE);
1358 zram_major = register_blkdev(0, "zram");
1359 if (zram_major <= 0) {
1360 pr_err("Unable to get major number\n");
1361 class_unregister(&zram_control_class);
1362 cpuhp_remove_multi_state(CPUHP_ZCOMP_PREPARE);
1366 while (num_devices != 0) {
1367 mutex_lock(&zram_index_mutex);
1369 mutex_unlock(&zram_index_mutex);
1382 static void __exit zram_exit(void)
1387 module_init(zram_init);
1388 module_exit(zram_exit);
1390 module_param(num_devices, uint, 0);
1391 MODULE_PARM_DESC(num_devices, "Number of pre-created zram devices");
1393 MODULE_LICENSE("Dual BSD/GPL");
1394 MODULE_AUTHOR("Nitin Gupta <ngupta@vflare.org>");
1395 MODULE_DESCRIPTION("Compressed RAM Block Device");