2 * Copyright (C) 2016-2017 Red Hat, Inc. All rights reserved.
3 * Copyright (C) 2016-2017 Milan Broz
4 * Copyright (C) 2016-2017 Mikulas Patocka
6 * This file is released under the GPL.
9 #include <linux/compiler.h>
10 #include <linux/module.h>
11 #include <linux/device-mapper.h>
12 #include <linux/dm-io.h>
13 #include <linux/vmalloc.h>
14 #include <linux/sort.h>
15 #include <linux/rbtree.h>
16 #include <linux/delay.h>
17 #include <linux/random.h>
18 #include <linux/reboot.h>
19 #include <crypto/hash.h>
20 #include <crypto/skcipher.h>
21 #include <linux/async_tx.h>
22 #include <linux/dm-bufio.h>
24 #define DM_MSG_PREFIX "integrity"
26 #define DEFAULT_INTERLEAVE_SECTORS 32768
27 #define DEFAULT_JOURNAL_SIZE_FACTOR 7
28 #define DEFAULT_SECTORS_PER_BITMAP_BIT 32768
29 #define DEFAULT_BUFFER_SECTORS 128
30 #define DEFAULT_JOURNAL_WATERMARK 50
31 #define DEFAULT_SYNC_MSEC 10000
32 #define DEFAULT_MAX_JOURNAL_SECTORS 131072
33 #define MIN_LOG2_INTERLEAVE_SECTORS 3
34 #define MAX_LOG2_INTERLEAVE_SECTORS 31
35 #define METADATA_WORKQUEUE_MAX_ACTIVE 16
36 #define RECALC_SECTORS 8192
37 #define RECALC_WRITE_SUPER 16
38 #define BITMAP_BLOCK_SIZE 4096 /* don't change it */
39 #define BITMAP_FLUSH_INTERVAL (10 * HZ)
42 * Warning - DEBUG_PRINT prints security-sensitive data to the log,
43 * so it should not be enabled in the official kernel
46 //#define INTERNAL_VERIFY
52 #define SB_MAGIC "integrt"
53 #define SB_VERSION_1 1
54 #define SB_VERSION_2 2
55 #define SB_VERSION_3 3
57 #define MAX_SECTORS_PER_BLOCK 8
62 __u8 log2_interleave_sectors;
63 __u16 integrity_tag_size;
64 __u32 journal_sections;
65 __u64 provided_data_sectors; /* userspace uses this value */
67 __u8 log2_sectors_per_block;
68 __u8 log2_blocks_per_bitmap_bit;
73 #define SB_FLAG_HAVE_JOURNAL_MAC 0x1
74 #define SB_FLAG_RECALCULATING 0x2
75 #define SB_FLAG_DIRTY_BITMAP 0x4
77 #define JOURNAL_ENTRY_ROUNDUP 8
79 typedef __u64 commit_id_t;
80 #define JOURNAL_MAC_PER_SECTOR 8
82 struct journal_entry {
90 commit_id_t last_bytes[0];
94 #define journal_entry_tag(ic, je) ((__u8 *)&(je)->last_bytes[(ic)->sectors_per_block])
96 #if BITS_PER_LONG == 64
97 #define journal_entry_set_sector(je, x) do { smp_wmb(); WRITE_ONCE((je)->u.sector, cpu_to_le64(x)); } while (0)
99 #define journal_entry_set_sector(je, x) do { (je)->u.s.sector_lo = cpu_to_le32(x); smp_wmb(); WRITE_ONCE((je)->u.s.sector_hi, cpu_to_le32((x) >> 32)); } while (0)
101 #define journal_entry_get_sector(je) le64_to_cpu((je)->u.sector)
102 #define journal_entry_is_unused(je) ((je)->u.s.sector_hi == cpu_to_le32(-1))
103 #define journal_entry_set_unused(je) do { ((je)->u.s.sector_hi = cpu_to_le32(-1)); } while (0)
104 #define journal_entry_is_inprogress(je) ((je)->u.s.sector_hi == cpu_to_le32(-2))
105 #define journal_entry_set_inprogress(je) do { ((je)->u.s.sector_hi = cpu_to_le32(-2)); } while (0)
107 #define JOURNAL_BLOCK_SECTORS 8
108 #define JOURNAL_SECTOR_DATA ((1 << SECTOR_SHIFT) - sizeof(commit_id_t))
109 #define JOURNAL_MAC_SIZE (JOURNAL_MAC_PER_SECTOR * JOURNAL_BLOCK_SECTORS)
111 struct journal_sector {
112 __u8 entries[JOURNAL_SECTOR_DATA - JOURNAL_MAC_PER_SECTOR];
113 __u8 mac[JOURNAL_MAC_PER_SECTOR];
114 commit_id_t commit_id;
117 #define MAX_TAG_SIZE (JOURNAL_SECTOR_DATA - JOURNAL_MAC_PER_SECTOR - offsetof(struct journal_entry, last_bytes[MAX_SECTORS_PER_BLOCK]))
119 #define METADATA_PADDING_SECTORS 8
121 #define N_COMMIT_IDS 4
123 static unsigned char prev_commit_seq(unsigned char seq)
125 return (seq + N_COMMIT_IDS - 1) % N_COMMIT_IDS;
128 static unsigned char next_commit_seq(unsigned char seq)
130 return (seq + 1) % N_COMMIT_IDS;
134 * In-memory structures
137 struct journal_node {
149 struct dm_integrity_c {
151 struct dm_dev *meta_dev;
155 mempool_t journal_io_mempool;
156 struct dm_io_client *io;
157 struct dm_bufio_client *bufio;
158 struct workqueue_struct *metadata_wq;
159 struct superblock *sb;
160 unsigned journal_pages;
161 unsigned n_bitmap_blocks;
163 struct page_list *journal;
164 struct page_list *journal_io;
165 struct page_list *journal_xor;
166 struct page_list *recalc_bitmap;
167 struct page_list *may_write_bitmap;
168 struct bitmap_block_status *bbs;
169 unsigned bitmap_flush_interval;
170 int synchronous_mode;
171 struct bio_list synchronous_bios;
172 struct delayed_work bitmap_flush_work;
174 struct crypto_skcipher *journal_crypt;
175 struct scatterlist **journal_scatterlist;
176 struct scatterlist **journal_io_scatterlist;
177 struct skcipher_request **sk_requests;
179 struct crypto_shash *journal_mac;
181 struct journal_node *journal_tree;
182 struct rb_root journal_tree_root;
184 sector_t provided_data_sectors;
186 unsigned short journal_entry_size;
187 unsigned char journal_entries_per_sector;
188 unsigned char journal_section_entries;
189 unsigned short journal_section_sectors;
190 unsigned journal_sections;
191 unsigned journal_entries;
192 sector_t data_device_sectors;
193 sector_t meta_device_sectors;
194 unsigned initial_sectors;
195 unsigned metadata_run;
196 __s8 log2_metadata_run;
197 __u8 log2_buffer_sectors;
198 __u8 sectors_per_block;
199 __u8 log2_blocks_per_bitmap_bit;
206 struct crypto_shash *internal_hash;
208 /* these variables are locked with endio_wait.lock */
209 struct rb_root in_progress;
210 struct list_head wait_list;
211 wait_queue_head_t endio_wait;
212 struct workqueue_struct *wait_wq;
214 unsigned char commit_seq;
215 commit_id_t commit_ids[N_COMMIT_IDS];
217 unsigned committed_section;
218 unsigned n_committed_sections;
220 unsigned uncommitted_section;
221 unsigned n_uncommitted_sections;
223 unsigned free_section;
224 unsigned char free_section_entry;
225 unsigned free_sectors;
227 unsigned free_sectors_threshold;
229 struct workqueue_struct *commit_wq;
230 struct work_struct commit_work;
232 struct workqueue_struct *writer_wq;
233 struct work_struct writer_work;
235 struct workqueue_struct *recalc_wq;
236 struct work_struct recalc_work;
240 struct bio_list flush_bio_list;
242 unsigned long autocommit_jiffies;
243 struct timer_list autocommit_timer;
244 unsigned autocommit_msec;
246 wait_queue_head_t copy_to_journal_wait;
248 struct completion crypto_backoff;
250 bool journal_uptodate;
252 bool recalculate_flag;
254 struct alg_spec internal_hash_alg;
255 struct alg_spec journal_crypt_alg;
256 struct alg_spec journal_mac_alg;
258 atomic64_t number_of_mismatches;
260 struct notifier_block reboot_notifier;
263 struct dm_integrity_range {
264 sector_t logical_sector;
270 struct task_struct *task;
271 struct list_head wait_entry;
276 struct dm_integrity_io {
277 struct work_struct work;
279 struct dm_integrity_c *ic;
283 struct dm_integrity_range range;
285 sector_t metadata_block;
286 unsigned metadata_offset;
289 blk_status_t bi_status;
291 struct completion *completion;
293 struct gendisk *orig_bi_disk;
295 bio_end_io_t *orig_bi_end_io;
296 struct bio_integrity_payload *orig_bi_integrity;
297 struct bvec_iter orig_bi_iter;
300 struct journal_completion {
301 struct dm_integrity_c *ic;
303 struct completion comp;
307 struct dm_integrity_range range;
308 struct journal_completion *comp;
311 struct bitmap_block_status {
312 struct work_struct work;
313 struct dm_integrity_c *ic;
315 unsigned long *bitmap;
316 struct bio_list bio_queue;
317 spinlock_t bio_queue_lock;
321 static struct kmem_cache *journal_io_cache;
323 #define JOURNAL_IO_MEMPOOL 32
326 #define DEBUG_print(x, ...) printk(KERN_DEBUG x, ##__VA_ARGS__)
327 static void __DEBUG_bytes(__u8 *bytes, size_t len, const char *msg, ...)
336 pr_cont(" %02x", *bytes);
342 #define DEBUG_bytes(bytes, len, msg, ...) __DEBUG_bytes(bytes, len, KERN_DEBUG msg, ##__VA_ARGS__)
344 #define DEBUG_print(x, ...) do { } while (0)
345 #define DEBUG_bytes(bytes, len, msg, ...) do { } while (0)
349 * DM Integrity profile, protection is performed layer above (dm-crypt)
351 static const struct blk_integrity_profile dm_integrity_profile = {
352 .name = "DM-DIF-EXT-TAG",
357 static void dm_integrity_map_continue(struct dm_integrity_io *dio, bool from_map);
358 static void integrity_bio_wait(struct work_struct *w);
359 static void dm_integrity_dtr(struct dm_target *ti);
361 static void dm_integrity_io_error(struct dm_integrity_c *ic, const char *msg, int err)
364 atomic64_inc(&ic->number_of_mismatches);
365 if (!cmpxchg(&ic->failed, 0, err))
366 DMERR("Error on %s: %d", msg, err);
369 static int dm_integrity_failed(struct dm_integrity_c *ic)
371 return READ_ONCE(ic->failed);
374 static commit_id_t dm_integrity_commit_id(struct dm_integrity_c *ic, unsigned i,
375 unsigned j, unsigned char seq)
378 * Xor the number with section and sector, so that if a piece of
379 * journal is written at wrong place, it is detected.
381 return ic->commit_ids[seq] ^ cpu_to_le64(((__u64)i << 32) ^ j);
384 static void get_area_and_offset(struct dm_integrity_c *ic, sector_t data_sector,
385 sector_t *area, sector_t *offset)
388 __u8 log2_interleave_sectors = ic->sb->log2_interleave_sectors;
389 *area = data_sector >> log2_interleave_sectors;
390 *offset = (unsigned)data_sector & ((1U << log2_interleave_sectors) - 1);
393 *offset = data_sector;
397 #define sector_to_block(ic, n) \
399 BUG_ON((n) & (unsigned)((ic)->sectors_per_block - 1)); \
400 (n) >>= (ic)->sb->log2_sectors_per_block; \
403 static __u64 get_metadata_sector_and_offset(struct dm_integrity_c *ic, sector_t area,
404 sector_t offset, unsigned *metadata_offset)
409 ms = area << ic->sb->log2_interleave_sectors;
410 if (likely(ic->log2_metadata_run >= 0))
411 ms += area << ic->log2_metadata_run;
413 ms += area * ic->metadata_run;
414 ms >>= ic->log2_buffer_sectors;
416 sector_to_block(ic, offset);
418 if (likely(ic->log2_tag_size >= 0)) {
419 ms += offset >> (SECTOR_SHIFT + ic->log2_buffer_sectors - ic->log2_tag_size);
420 mo = (offset << ic->log2_tag_size) & ((1U << SECTOR_SHIFT << ic->log2_buffer_sectors) - 1);
422 ms += (__u64)offset * ic->tag_size >> (SECTOR_SHIFT + ic->log2_buffer_sectors);
423 mo = (offset * ic->tag_size) & ((1U << SECTOR_SHIFT << ic->log2_buffer_sectors) - 1);
425 *metadata_offset = mo;
429 static sector_t get_data_sector(struct dm_integrity_c *ic, sector_t area, sector_t offset)
436 result = area << ic->sb->log2_interleave_sectors;
437 if (likely(ic->log2_metadata_run >= 0))
438 result += (area + 1) << ic->log2_metadata_run;
440 result += (area + 1) * ic->metadata_run;
442 result += (sector_t)ic->initial_sectors + offset;
448 static void wraparound_section(struct dm_integrity_c *ic, unsigned *sec_ptr)
450 if (unlikely(*sec_ptr >= ic->journal_sections))
451 *sec_ptr -= ic->journal_sections;
454 static void sb_set_version(struct dm_integrity_c *ic)
456 if (ic->mode == 'B' || ic->sb->flags & cpu_to_le32(SB_FLAG_DIRTY_BITMAP))
457 ic->sb->version = SB_VERSION_3;
458 else if (ic->meta_dev || ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING))
459 ic->sb->version = SB_VERSION_2;
461 ic->sb->version = SB_VERSION_1;
464 static int sync_rw_sb(struct dm_integrity_c *ic, int op, int op_flags)
466 struct dm_io_request io_req;
467 struct dm_io_region io_loc;
470 io_req.bi_op_flags = op_flags;
471 io_req.mem.type = DM_IO_KMEM;
472 io_req.mem.ptr.addr = ic->sb;
473 io_req.notify.fn = NULL;
474 io_req.client = ic->io;
475 io_loc.bdev = ic->meta_dev ? ic->meta_dev->bdev : ic->dev->bdev;
476 io_loc.sector = ic->start;
477 io_loc.count = SB_SECTORS;
479 if (op == REQ_OP_WRITE)
482 return dm_io(&io_req, 1, &io_loc, NULL);
485 #define BITMAP_OP_TEST_ALL_SET 0
486 #define BITMAP_OP_TEST_ALL_CLEAR 1
487 #define BITMAP_OP_SET 2
488 #define BITMAP_OP_CLEAR 3
490 static bool block_bitmap_op(struct dm_integrity_c *ic, struct page_list *bitmap,
491 sector_t sector, sector_t n_sectors, int mode)
493 unsigned long bit, end_bit, this_end_bit, page, end_page;
496 if (unlikely(((sector | n_sectors) & ((1 << ic->sb->log2_sectors_per_block) - 1)) != 0)) {
497 DMCRIT("invalid bitmap access (%llx,%llx,%d,%d,%d)",
498 (unsigned long long)sector,
499 (unsigned long long)n_sectors,
500 ic->sb->log2_sectors_per_block,
501 ic->log2_blocks_per_bitmap_bit,
506 if (unlikely(!n_sectors))
509 bit = sector >> (ic->sb->log2_sectors_per_block + ic->log2_blocks_per_bitmap_bit);
510 end_bit = (sector + n_sectors - 1) >>
511 (ic->sb->log2_sectors_per_block + ic->log2_blocks_per_bitmap_bit);
513 page = bit / (PAGE_SIZE * 8);
514 bit %= PAGE_SIZE * 8;
516 end_page = end_bit / (PAGE_SIZE * 8);
517 end_bit %= PAGE_SIZE * 8;
520 if (page < end_page) {
521 this_end_bit = PAGE_SIZE * 8 - 1;
523 this_end_bit = end_bit;
526 data = lowmem_page_address(bitmap[page].page);
528 if (mode == BITMAP_OP_TEST_ALL_SET) {
529 while (bit <= this_end_bit) {
530 if (!(bit % BITS_PER_LONG) && this_end_bit >= bit + BITS_PER_LONG - 1) {
532 if (data[bit / BITS_PER_LONG] != -1)
534 bit += BITS_PER_LONG;
535 } while (this_end_bit >= bit + BITS_PER_LONG - 1);
538 if (!test_bit(bit, data))
542 } else if (mode == BITMAP_OP_TEST_ALL_CLEAR) {
543 while (bit <= this_end_bit) {
544 if (!(bit % BITS_PER_LONG) && this_end_bit >= bit + BITS_PER_LONG - 1) {
546 if (data[bit / BITS_PER_LONG] != 0)
548 bit += BITS_PER_LONG;
549 } while (this_end_bit >= bit + BITS_PER_LONG - 1);
552 if (test_bit(bit, data))
556 } else if (mode == BITMAP_OP_SET) {
557 while (bit <= this_end_bit) {
558 if (!(bit % BITS_PER_LONG) && this_end_bit >= bit + BITS_PER_LONG - 1) {
560 data[bit / BITS_PER_LONG] = -1;
561 bit += BITS_PER_LONG;
562 } while (this_end_bit >= bit + BITS_PER_LONG - 1);
565 __set_bit(bit, data);
568 } else if (mode == BITMAP_OP_CLEAR) {
569 if (!bit && this_end_bit == PAGE_SIZE * 8 - 1)
571 else while (bit <= this_end_bit) {
572 if (!(bit % BITS_PER_LONG) && this_end_bit >= bit + BITS_PER_LONG - 1) {
574 data[bit / BITS_PER_LONG] = 0;
575 bit += BITS_PER_LONG;
576 } while (this_end_bit >= bit + BITS_PER_LONG - 1);
579 __clear_bit(bit, data);
586 if (unlikely(page < end_page)) {
595 static void block_bitmap_copy(struct dm_integrity_c *ic, struct page_list *dst, struct page_list *src)
597 unsigned n_bitmap_pages = DIV_ROUND_UP(ic->n_bitmap_blocks, PAGE_SIZE / BITMAP_BLOCK_SIZE);
600 for (i = 0; i < n_bitmap_pages; i++) {
601 unsigned long *dst_data = lowmem_page_address(dst[i].page);
602 unsigned long *src_data = lowmem_page_address(src[i].page);
603 copy_page(dst_data, src_data);
607 static struct bitmap_block_status *sector_to_bitmap_block(struct dm_integrity_c *ic, sector_t sector)
609 unsigned bit = sector >> (ic->sb->log2_sectors_per_block + ic->log2_blocks_per_bitmap_bit);
610 unsigned bitmap_block = bit / (BITMAP_BLOCK_SIZE * 8);
612 BUG_ON(bitmap_block >= ic->n_bitmap_blocks);
613 return &ic->bbs[bitmap_block];
616 static void access_journal_check(struct dm_integrity_c *ic, unsigned section, unsigned offset,
617 bool e, const char *function)
619 #if defined(CONFIG_DM_DEBUG) || defined(INTERNAL_VERIFY)
620 unsigned limit = e ? ic->journal_section_entries : ic->journal_section_sectors;
622 if (unlikely(section >= ic->journal_sections) ||
623 unlikely(offset >= limit)) {
624 DMCRIT("%s: invalid access at (%u,%u), limit (%u,%u)",
625 function, section, offset, ic->journal_sections, limit);
631 static void page_list_location(struct dm_integrity_c *ic, unsigned section, unsigned offset,
632 unsigned *pl_index, unsigned *pl_offset)
636 access_journal_check(ic, section, offset, false, "page_list_location");
638 sector = section * ic->journal_section_sectors + offset;
640 *pl_index = sector >> (PAGE_SHIFT - SECTOR_SHIFT);
641 *pl_offset = (sector << SECTOR_SHIFT) & (PAGE_SIZE - 1);
644 static struct journal_sector *access_page_list(struct dm_integrity_c *ic, struct page_list *pl,
645 unsigned section, unsigned offset, unsigned *n_sectors)
647 unsigned pl_index, pl_offset;
650 page_list_location(ic, section, offset, &pl_index, &pl_offset);
653 *n_sectors = (PAGE_SIZE - pl_offset) >> SECTOR_SHIFT;
655 va = lowmem_page_address(pl[pl_index].page);
657 return (struct journal_sector *)(va + pl_offset);
660 static struct journal_sector *access_journal(struct dm_integrity_c *ic, unsigned section, unsigned offset)
662 return access_page_list(ic, ic->journal, section, offset, NULL);
665 static struct journal_entry *access_journal_entry(struct dm_integrity_c *ic, unsigned section, unsigned n)
667 unsigned rel_sector, offset;
668 struct journal_sector *js;
670 access_journal_check(ic, section, n, true, "access_journal_entry");
672 rel_sector = n % JOURNAL_BLOCK_SECTORS;
673 offset = n / JOURNAL_BLOCK_SECTORS;
675 js = access_journal(ic, section, rel_sector);
676 return (struct journal_entry *)((char *)js + offset * ic->journal_entry_size);
679 static struct journal_sector *access_journal_data(struct dm_integrity_c *ic, unsigned section, unsigned n)
681 n <<= ic->sb->log2_sectors_per_block;
683 n += JOURNAL_BLOCK_SECTORS;
685 access_journal_check(ic, section, n, false, "access_journal_data");
687 return access_journal(ic, section, n);
690 static void section_mac(struct dm_integrity_c *ic, unsigned section, __u8 result[JOURNAL_MAC_SIZE])
692 SHASH_DESC_ON_STACK(desc, ic->journal_mac);
696 desc->tfm = ic->journal_mac;
698 r = crypto_shash_init(desc);
700 dm_integrity_io_error(ic, "crypto_shash_init", r);
704 for (j = 0; j < ic->journal_section_entries; j++) {
705 struct journal_entry *je = access_journal_entry(ic, section, j);
706 r = crypto_shash_update(desc, (__u8 *)&je->u.sector, sizeof je->u.sector);
708 dm_integrity_io_error(ic, "crypto_shash_update", r);
713 size = crypto_shash_digestsize(ic->journal_mac);
715 if (likely(size <= JOURNAL_MAC_SIZE)) {
716 r = crypto_shash_final(desc, result);
718 dm_integrity_io_error(ic, "crypto_shash_final", r);
721 memset(result + size, 0, JOURNAL_MAC_SIZE - size);
723 __u8 digest[HASH_MAX_DIGESTSIZE];
725 if (WARN_ON(size > sizeof(digest))) {
726 dm_integrity_io_error(ic, "digest_size", -EINVAL);
729 r = crypto_shash_final(desc, digest);
731 dm_integrity_io_error(ic, "crypto_shash_final", r);
734 memcpy(result, digest, JOURNAL_MAC_SIZE);
739 memset(result, 0, JOURNAL_MAC_SIZE);
742 static void rw_section_mac(struct dm_integrity_c *ic, unsigned section, bool wr)
744 __u8 result[JOURNAL_MAC_SIZE];
747 if (!ic->journal_mac)
750 section_mac(ic, section, result);
752 for (j = 0; j < JOURNAL_BLOCK_SECTORS; j++) {
753 struct journal_sector *js = access_journal(ic, section, j);
756 memcpy(&js->mac, result + (j * JOURNAL_MAC_PER_SECTOR), JOURNAL_MAC_PER_SECTOR);
758 if (memcmp(&js->mac, result + (j * JOURNAL_MAC_PER_SECTOR), JOURNAL_MAC_PER_SECTOR))
759 dm_integrity_io_error(ic, "journal mac", -EILSEQ);
764 static void complete_journal_op(void *context)
766 struct journal_completion *comp = context;
767 BUG_ON(!atomic_read(&comp->in_flight));
768 if (likely(atomic_dec_and_test(&comp->in_flight)))
769 complete(&comp->comp);
772 static void xor_journal(struct dm_integrity_c *ic, bool encrypt, unsigned section,
773 unsigned n_sections, struct journal_completion *comp)
775 struct async_submit_ctl submit;
776 size_t n_bytes = (size_t)(n_sections * ic->journal_section_sectors) << SECTOR_SHIFT;
777 unsigned pl_index, pl_offset, section_index;
778 struct page_list *source_pl, *target_pl;
780 if (likely(encrypt)) {
781 source_pl = ic->journal;
782 target_pl = ic->journal_io;
784 source_pl = ic->journal_io;
785 target_pl = ic->journal;
788 page_list_location(ic, section, 0, &pl_index, &pl_offset);
790 atomic_add(roundup(pl_offset + n_bytes, PAGE_SIZE) >> PAGE_SHIFT, &comp->in_flight);
792 init_async_submit(&submit, ASYNC_TX_XOR_ZERO_DST, NULL, complete_journal_op, comp, NULL);
794 section_index = pl_index;
798 struct page *src_pages[2];
799 struct page *dst_page;
801 while (unlikely(pl_index == section_index)) {
804 rw_section_mac(ic, section, true);
809 page_list_location(ic, section, 0, §ion_index, &dummy);
812 this_step = min(n_bytes, (size_t)PAGE_SIZE - pl_offset);
813 dst_page = target_pl[pl_index].page;
814 src_pages[0] = source_pl[pl_index].page;
815 src_pages[1] = ic->journal_xor[pl_index].page;
817 async_xor(dst_page, src_pages, pl_offset, 2, this_step, &submit);
821 n_bytes -= this_step;
826 async_tx_issue_pending_all();
829 static void complete_journal_encrypt(struct crypto_async_request *req, int err)
831 struct journal_completion *comp = req->data;
833 if (likely(err == -EINPROGRESS)) {
834 complete(&comp->ic->crypto_backoff);
837 dm_integrity_io_error(comp->ic, "asynchronous encrypt", err);
839 complete_journal_op(comp);
842 static bool do_crypt(bool encrypt, struct skcipher_request *req, struct journal_completion *comp)
845 skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
846 complete_journal_encrypt, comp);
848 r = crypto_skcipher_encrypt(req);
850 r = crypto_skcipher_decrypt(req);
853 if (likely(r == -EINPROGRESS))
855 if (likely(r == -EBUSY)) {
856 wait_for_completion(&comp->ic->crypto_backoff);
857 reinit_completion(&comp->ic->crypto_backoff);
860 dm_integrity_io_error(comp->ic, "encrypt", r);
864 static void crypt_journal(struct dm_integrity_c *ic, bool encrypt, unsigned section,
865 unsigned n_sections, struct journal_completion *comp)
867 struct scatterlist **source_sg;
868 struct scatterlist **target_sg;
870 atomic_add(2, &comp->in_flight);
872 if (likely(encrypt)) {
873 source_sg = ic->journal_scatterlist;
874 target_sg = ic->journal_io_scatterlist;
876 source_sg = ic->journal_io_scatterlist;
877 target_sg = ic->journal_scatterlist;
881 struct skcipher_request *req;
886 rw_section_mac(ic, section, true);
888 req = ic->sk_requests[section];
889 ivsize = crypto_skcipher_ivsize(ic->journal_crypt);
892 memcpy(iv, iv + ivsize, ivsize);
894 req->src = source_sg[section];
895 req->dst = target_sg[section];
897 if (unlikely(do_crypt(encrypt, req, comp)))
898 atomic_inc(&comp->in_flight);
902 } while (n_sections);
904 atomic_dec(&comp->in_flight);
905 complete_journal_op(comp);
908 static void encrypt_journal(struct dm_integrity_c *ic, bool encrypt, unsigned section,
909 unsigned n_sections, struct journal_completion *comp)
912 return xor_journal(ic, encrypt, section, n_sections, comp);
914 return crypt_journal(ic, encrypt, section, n_sections, comp);
917 static void complete_journal_io(unsigned long error, void *context)
919 struct journal_completion *comp = context;
920 if (unlikely(error != 0))
921 dm_integrity_io_error(comp->ic, "writing journal", -EIO);
922 complete_journal_op(comp);
925 static void rw_journal_sectors(struct dm_integrity_c *ic, int op, int op_flags,
926 unsigned sector, unsigned n_sectors, struct journal_completion *comp)
928 struct dm_io_request io_req;
929 struct dm_io_region io_loc;
930 unsigned pl_index, pl_offset;
933 if (unlikely(dm_integrity_failed(ic))) {
935 complete_journal_io(-1UL, comp);
939 pl_index = sector >> (PAGE_SHIFT - SECTOR_SHIFT);
940 pl_offset = (sector << SECTOR_SHIFT) & (PAGE_SIZE - 1);
943 io_req.bi_op_flags = op_flags;
944 io_req.mem.type = DM_IO_PAGE_LIST;
946 io_req.mem.ptr.pl = &ic->journal_io[pl_index];
948 io_req.mem.ptr.pl = &ic->journal[pl_index];
949 io_req.mem.offset = pl_offset;
950 if (likely(comp != NULL)) {
951 io_req.notify.fn = complete_journal_io;
952 io_req.notify.context = comp;
954 io_req.notify.fn = NULL;
956 io_req.client = ic->io;
957 io_loc.bdev = ic->meta_dev ? ic->meta_dev->bdev : ic->dev->bdev;
958 io_loc.sector = ic->start + SB_SECTORS + sector;
959 io_loc.count = n_sectors;
961 r = dm_io(&io_req, 1, &io_loc, NULL);
963 dm_integrity_io_error(ic, op == REQ_OP_READ ? "reading journal" : "writing journal", r);
965 WARN_ONCE(1, "asynchronous dm_io failed: %d", r);
966 complete_journal_io(-1UL, comp);
971 static void rw_journal(struct dm_integrity_c *ic, int op, int op_flags, unsigned section,
972 unsigned n_sections, struct journal_completion *comp)
974 unsigned sector, n_sectors;
976 sector = section * ic->journal_section_sectors;
977 n_sectors = n_sections * ic->journal_section_sectors;
979 rw_journal_sectors(ic, op, op_flags, sector, n_sectors, comp);
982 static void write_journal(struct dm_integrity_c *ic, unsigned commit_start, unsigned commit_sections)
984 struct journal_completion io_comp;
985 struct journal_completion crypt_comp_1;
986 struct journal_completion crypt_comp_2;
990 init_completion(&io_comp.comp);
992 if (commit_start + commit_sections <= ic->journal_sections) {
993 io_comp.in_flight = (atomic_t)ATOMIC_INIT(1);
994 if (ic->journal_io) {
995 crypt_comp_1.ic = ic;
996 init_completion(&crypt_comp_1.comp);
997 crypt_comp_1.in_flight = (atomic_t)ATOMIC_INIT(0);
998 encrypt_journal(ic, true, commit_start, commit_sections, &crypt_comp_1);
999 wait_for_completion_io(&crypt_comp_1.comp);
1001 for (i = 0; i < commit_sections; i++)
1002 rw_section_mac(ic, commit_start + i, true);
1004 rw_journal(ic, REQ_OP_WRITE, REQ_FUA | REQ_SYNC, commit_start,
1005 commit_sections, &io_comp);
1008 io_comp.in_flight = (atomic_t)ATOMIC_INIT(2);
1009 to_end = ic->journal_sections - commit_start;
1010 if (ic->journal_io) {
1011 crypt_comp_1.ic = ic;
1012 init_completion(&crypt_comp_1.comp);
1013 crypt_comp_1.in_flight = (atomic_t)ATOMIC_INIT(0);
1014 encrypt_journal(ic, true, commit_start, to_end, &crypt_comp_1);
1015 if (try_wait_for_completion(&crypt_comp_1.comp)) {
1016 rw_journal(ic, REQ_OP_WRITE, REQ_FUA, commit_start, to_end, &io_comp);
1017 reinit_completion(&crypt_comp_1.comp);
1018 crypt_comp_1.in_flight = (atomic_t)ATOMIC_INIT(0);
1019 encrypt_journal(ic, true, 0, commit_sections - to_end, &crypt_comp_1);
1020 wait_for_completion_io(&crypt_comp_1.comp);
1022 crypt_comp_2.ic = ic;
1023 init_completion(&crypt_comp_2.comp);
1024 crypt_comp_2.in_flight = (atomic_t)ATOMIC_INIT(0);
1025 encrypt_journal(ic, true, 0, commit_sections - to_end, &crypt_comp_2);
1026 wait_for_completion_io(&crypt_comp_1.comp);
1027 rw_journal(ic, REQ_OP_WRITE, REQ_FUA, commit_start, to_end, &io_comp);
1028 wait_for_completion_io(&crypt_comp_2.comp);
1031 for (i = 0; i < to_end; i++)
1032 rw_section_mac(ic, commit_start + i, true);
1033 rw_journal(ic, REQ_OP_WRITE, REQ_FUA, commit_start, to_end, &io_comp);
1034 for (i = 0; i < commit_sections - to_end; i++)
1035 rw_section_mac(ic, i, true);
1037 rw_journal(ic, REQ_OP_WRITE, REQ_FUA, 0, commit_sections - to_end, &io_comp);
1040 wait_for_completion_io(&io_comp.comp);
1043 static void copy_from_journal(struct dm_integrity_c *ic, unsigned section, unsigned offset,
1044 unsigned n_sectors, sector_t target, io_notify_fn fn, void *data)
1046 struct dm_io_request io_req;
1047 struct dm_io_region io_loc;
1049 unsigned sector, pl_index, pl_offset;
1051 BUG_ON((target | n_sectors | offset) & (unsigned)(ic->sectors_per_block - 1));
1053 if (unlikely(dm_integrity_failed(ic))) {
1058 sector = section * ic->journal_section_sectors + JOURNAL_BLOCK_SECTORS + offset;
1060 pl_index = sector >> (PAGE_SHIFT - SECTOR_SHIFT);
1061 pl_offset = (sector << SECTOR_SHIFT) & (PAGE_SIZE - 1);
1063 io_req.bi_op = REQ_OP_WRITE;
1064 io_req.bi_op_flags = 0;
1065 io_req.mem.type = DM_IO_PAGE_LIST;
1066 io_req.mem.ptr.pl = &ic->journal[pl_index];
1067 io_req.mem.offset = pl_offset;
1068 io_req.notify.fn = fn;
1069 io_req.notify.context = data;
1070 io_req.client = ic->io;
1071 io_loc.bdev = ic->dev->bdev;
1072 io_loc.sector = target;
1073 io_loc.count = n_sectors;
1075 r = dm_io(&io_req, 1, &io_loc, NULL);
1077 WARN_ONCE(1, "asynchronous dm_io failed: %d", r);
1082 static bool ranges_overlap(struct dm_integrity_range *range1, struct dm_integrity_range *range2)
1084 return range1->logical_sector < range2->logical_sector + range2->n_sectors &&
1085 range1->logical_sector + range1->n_sectors > range2->logical_sector;
1088 static bool add_new_range(struct dm_integrity_c *ic, struct dm_integrity_range *new_range, bool check_waiting)
1090 struct rb_node **n = &ic->in_progress.rb_node;
1091 struct rb_node *parent;
1093 BUG_ON((new_range->logical_sector | new_range->n_sectors) & (unsigned)(ic->sectors_per_block - 1));
1095 if (likely(check_waiting)) {
1096 struct dm_integrity_range *range;
1097 list_for_each_entry(range, &ic->wait_list, wait_entry) {
1098 if (unlikely(ranges_overlap(range, new_range)))
1106 struct dm_integrity_range *range = container_of(*n, struct dm_integrity_range, node);
1109 if (new_range->logical_sector + new_range->n_sectors <= range->logical_sector) {
1110 n = &range->node.rb_left;
1111 } else if (new_range->logical_sector >= range->logical_sector + range->n_sectors) {
1112 n = &range->node.rb_right;
1118 rb_link_node(&new_range->node, parent, n);
1119 rb_insert_color(&new_range->node, &ic->in_progress);
1124 static void remove_range_unlocked(struct dm_integrity_c *ic, struct dm_integrity_range *range)
1126 rb_erase(&range->node, &ic->in_progress);
1127 while (unlikely(!list_empty(&ic->wait_list))) {
1128 struct dm_integrity_range *last_range =
1129 list_first_entry(&ic->wait_list, struct dm_integrity_range, wait_entry);
1130 struct task_struct *last_range_task;
1131 last_range_task = last_range->task;
1132 list_del(&last_range->wait_entry);
1133 if (!add_new_range(ic, last_range, false)) {
1134 last_range->task = last_range_task;
1135 list_add(&last_range->wait_entry, &ic->wait_list);
1138 last_range->waiting = false;
1139 wake_up_process(last_range_task);
1143 static void remove_range(struct dm_integrity_c *ic, struct dm_integrity_range *range)
1145 unsigned long flags;
1147 spin_lock_irqsave(&ic->endio_wait.lock, flags);
1148 remove_range_unlocked(ic, range);
1149 spin_unlock_irqrestore(&ic->endio_wait.lock, flags);
1152 static void wait_and_add_new_range(struct dm_integrity_c *ic, struct dm_integrity_range *new_range)
1154 new_range->waiting = true;
1155 list_add_tail(&new_range->wait_entry, &ic->wait_list);
1156 new_range->task = current;
1158 __set_current_state(TASK_UNINTERRUPTIBLE);
1159 spin_unlock_irq(&ic->endio_wait.lock);
1161 spin_lock_irq(&ic->endio_wait.lock);
1162 } while (unlikely(new_range->waiting));
1165 static void add_new_range_and_wait(struct dm_integrity_c *ic, struct dm_integrity_range *new_range)
1167 if (unlikely(!add_new_range(ic, new_range, true)))
1168 wait_and_add_new_range(ic, new_range);
1171 static void init_journal_node(struct journal_node *node)
1173 RB_CLEAR_NODE(&node->node);
1174 node->sector = (sector_t)-1;
1177 static void add_journal_node(struct dm_integrity_c *ic, struct journal_node *node, sector_t sector)
1179 struct rb_node **link;
1180 struct rb_node *parent;
1182 node->sector = sector;
1183 BUG_ON(!RB_EMPTY_NODE(&node->node));
1185 link = &ic->journal_tree_root.rb_node;
1189 struct journal_node *j;
1191 j = container_of(parent, struct journal_node, node);
1192 if (sector < j->sector)
1193 link = &j->node.rb_left;
1195 link = &j->node.rb_right;
1198 rb_link_node(&node->node, parent, link);
1199 rb_insert_color(&node->node, &ic->journal_tree_root);
1202 static void remove_journal_node(struct dm_integrity_c *ic, struct journal_node *node)
1204 BUG_ON(RB_EMPTY_NODE(&node->node));
1205 rb_erase(&node->node, &ic->journal_tree_root);
1206 init_journal_node(node);
1209 #define NOT_FOUND (-1U)
1211 static unsigned find_journal_node(struct dm_integrity_c *ic, sector_t sector, sector_t *next_sector)
1213 struct rb_node *n = ic->journal_tree_root.rb_node;
1214 unsigned found = NOT_FOUND;
1215 *next_sector = (sector_t)-1;
1217 struct journal_node *j = container_of(n, struct journal_node, node);
1218 if (sector == j->sector) {
1219 found = j - ic->journal_tree;
1221 if (sector < j->sector) {
1222 *next_sector = j->sector;
1223 n = j->node.rb_left;
1225 n = j->node.rb_right;
1232 static bool test_journal_node(struct dm_integrity_c *ic, unsigned pos, sector_t sector)
1234 struct journal_node *node, *next_node;
1235 struct rb_node *next;
1237 if (unlikely(pos >= ic->journal_entries))
1239 node = &ic->journal_tree[pos];
1240 if (unlikely(RB_EMPTY_NODE(&node->node)))
1242 if (unlikely(node->sector != sector))
1245 next = rb_next(&node->node);
1246 if (unlikely(!next))
1249 next_node = container_of(next, struct journal_node, node);
1250 return next_node->sector != sector;
1253 static bool find_newer_committed_node(struct dm_integrity_c *ic, struct journal_node *node)
1255 struct rb_node *next;
1256 struct journal_node *next_node;
1257 unsigned next_section;
1259 BUG_ON(RB_EMPTY_NODE(&node->node));
1261 next = rb_next(&node->node);
1262 if (unlikely(!next))
1265 next_node = container_of(next, struct journal_node, node);
1267 if (next_node->sector != node->sector)
1270 next_section = (unsigned)(next_node - ic->journal_tree) / ic->journal_section_entries;
1271 if (next_section >= ic->committed_section &&
1272 next_section < ic->committed_section + ic->n_committed_sections)
1274 if (next_section + ic->journal_sections < ic->committed_section + ic->n_committed_sections)
1284 static int dm_integrity_rw_tag(struct dm_integrity_c *ic, unsigned char *tag, sector_t *metadata_block,
1285 unsigned *metadata_offset, unsigned total_size, int op)
1288 unsigned char *data, *dp;
1289 struct dm_buffer *b;
1293 r = dm_integrity_failed(ic);
1297 data = dm_bufio_read(ic->bufio, *metadata_block, &b);
1299 return PTR_ERR(data);
1301 to_copy = min((1U << SECTOR_SHIFT << ic->log2_buffer_sectors) - *metadata_offset, total_size);
1302 dp = data + *metadata_offset;
1303 if (op == TAG_READ) {
1304 memcpy(tag, dp, to_copy);
1305 } else if (op == TAG_WRITE) {
1306 memcpy(dp, tag, to_copy);
1307 dm_bufio_mark_partial_buffer_dirty(b, *metadata_offset, *metadata_offset + to_copy);
1309 /* e.g.: op == TAG_CMP */
1310 if (unlikely(memcmp(dp, tag, to_copy))) {
1313 for (i = 0; i < to_copy; i++) {
1314 if (dp[i] != tag[i])
1318 dm_bufio_release(b);
1322 dm_bufio_release(b);
1325 *metadata_offset += to_copy;
1326 if (unlikely(*metadata_offset == 1U << SECTOR_SHIFT << ic->log2_buffer_sectors)) {
1327 (*metadata_block)++;
1328 *metadata_offset = 0;
1330 total_size -= to_copy;
1331 } while (unlikely(total_size));
1336 static void dm_integrity_flush_buffers(struct dm_integrity_c *ic)
1339 r = dm_bufio_write_dirty_buffers(ic->bufio);
1341 dm_integrity_io_error(ic, "writing tags", r);
1344 static void sleep_on_endio_wait(struct dm_integrity_c *ic)
1346 DECLARE_WAITQUEUE(wait, current);
1347 __add_wait_queue(&ic->endio_wait, &wait);
1348 __set_current_state(TASK_UNINTERRUPTIBLE);
1349 spin_unlock_irq(&ic->endio_wait.lock);
1351 spin_lock_irq(&ic->endio_wait.lock);
1352 __remove_wait_queue(&ic->endio_wait, &wait);
1355 static void autocommit_fn(struct timer_list *t)
1357 struct dm_integrity_c *ic = from_timer(ic, t, autocommit_timer);
1359 if (likely(!dm_integrity_failed(ic)))
1360 queue_work(ic->commit_wq, &ic->commit_work);
1363 static void schedule_autocommit(struct dm_integrity_c *ic)
1365 if (!timer_pending(&ic->autocommit_timer))
1366 mod_timer(&ic->autocommit_timer, jiffies + ic->autocommit_jiffies);
1369 static void submit_flush_bio(struct dm_integrity_c *ic, struct dm_integrity_io *dio)
1372 unsigned long flags;
1374 spin_lock_irqsave(&ic->endio_wait.lock, flags);
1375 bio = dm_bio_from_per_bio_data(dio, sizeof(struct dm_integrity_io));
1376 bio_list_add(&ic->flush_bio_list, bio);
1377 spin_unlock_irqrestore(&ic->endio_wait.lock, flags);
1379 queue_work(ic->commit_wq, &ic->commit_work);
1382 static void do_endio(struct dm_integrity_c *ic, struct bio *bio)
1384 int r = dm_integrity_failed(ic);
1385 if (unlikely(r) && !bio->bi_status)
1386 bio->bi_status = errno_to_blk_status(r);
1387 if (unlikely(ic->synchronous_mode) && bio_op(bio) == REQ_OP_WRITE) {
1388 unsigned long flags;
1389 spin_lock_irqsave(&ic->endio_wait.lock, flags);
1390 bio_list_add(&ic->synchronous_bios, bio);
1391 queue_delayed_work(ic->commit_wq, &ic->bitmap_flush_work, 0);
1392 spin_unlock_irqrestore(&ic->endio_wait.lock, flags);
1398 static void do_endio_flush(struct dm_integrity_c *ic, struct dm_integrity_io *dio)
1400 struct bio *bio = dm_bio_from_per_bio_data(dio, sizeof(struct dm_integrity_io));
1402 if (unlikely(dio->fua) && likely(!bio->bi_status) && likely(!dm_integrity_failed(ic)))
1403 submit_flush_bio(ic, dio);
1408 static void dec_in_flight(struct dm_integrity_io *dio)
1410 if (atomic_dec_and_test(&dio->in_flight)) {
1411 struct dm_integrity_c *ic = dio->ic;
1414 remove_range(ic, &dio->range);
1416 if (unlikely(dio->write))
1417 schedule_autocommit(ic);
1419 bio = dm_bio_from_per_bio_data(dio, sizeof(struct dm_integrity_io));
1421 if (unlikely(dio->bi_status) && !bio->bi_status)
1422 bio->bi_status = dio->bi_status;
1423 if (likely(!bio->bi_status) && unlikely(bio_sectors(bio) != dio->range.n_sectors)) {
1424 dio->range.logical_sector += dio->range.n_sectors;
1425 bio_advance(bio, dio->range.n_sectors << SECTOR_SHIFT);
1426 INIT_WORK(&dio->work, integrity_bio_wait);
1427 queue_work(ic->wait_wq, &dio->work);
1430 do_endio_flush(ic, dio);
1434 static void integrity_end_io(struct bio *bio)
1436 struct dm_integrity_io *dio = dm_per_bio_data(bio, sizeof(struct dm_integrity_io));
1438 bio->bi_iter = dio->orig_bi_iter;
1439 bio->bi_disk = dio->orig_bi_disk;
1440 bio->bi_partno = dio->orig_bi_partno;
1441 if (dio->orig_bi_integrity) {
1442 bio->bi_integrity = dio->orig_bi_integrity;
1443 bio->bi_opf |= REQ_INTEGRITY;
1445 bio->bi_end_io = dio->orig_bi_end_io;
1447 if (dio->completion)
1448 complete(dio->completion);
1453 static void integrity_sector_checksum(struct dm_integrity_c *ic, sector_t sector,
1454 const char *data, char *result)
1456 __u64 sector_le = cpu_to_le64(sector);
1457 SHASH_DESC_ON_STACK(req, ic->internal_hash);
1459 unsigned digest_size;
1461 req->tfm = ic->internal_hash;
1463 r = crypto_shash_init(req);
1464 if (unlikely(r < 0)) {
1465 dm_integrity_io_error(ic, "crypto_shash_init", r);
1469 r = crypto_shash_update(req, (const __u8 *)§or_le, sizeof sector_le);
1470 if (unlikely(r < 0)) {
1471 dm_integrity_io_error(ic, "crypto_shash_update", r);
1475 r = crypto_shash_update(req, data, ic->sectors_per_block << SECTOR_SHIFT);
1476 if (unlikely(r < 0)) {
1477 dm_integrity_io_error(ic, "crypto_shash_update", r);
1481 r = crypto_shash_final(req, result);
1482 if (unlikely(r < 0)) {
1483 dm_integrity_io_error(ic, "crypto_shash_final", r);
1487 digest_size = crypto_shash_digestsize(ic->internal_hash);
1488 if (unlikely(digest_size < ic->tag_size))
1489 memset(result + digest_size, 0, ic->tag_size - digest_size);
1494 /* this shouldn't happen anyway, the hash functions have no reason to fail */
1495 get_random_bytes(result, ic->tag_size);
1498 static void integrity_metadata(struct work_struct *w)
1500 struct dm_integrity_io *dio = container_of(w, struct dm_integrity_io, work);
1501 struct dm_integrity_c *ic = dio->ic;
1505 if (ic->internal_hash) {
1506 struct bvec_iter iter;
1508 unsigned digest_size = crypto_shash_digestsize(ic->internal_hash);
1509 struct bio *bio = dm_bio_from_per_bio_data(dio, sizeof(struct dm_integrity_io));
1511 unsigned extra_space = unlikely(digest_size > ic->tag_size) ? digest_size - ic->tag_size : 0;
1512 char checksums_onstack[HASH_MAX_DIGESTSIZE];
1513 unsigned sectors_to_process = dio->range.n_sectors;
1514 sector_t sector = dio->range.logical_sector;
1516 if (unlikely(ic->mode == 'R'))
1519 checksums = kmalloc((PAGE_SIZE >> SECTOR_SHIFT >> ic->sb->log2_sectors_per_block) * ic->tag_size + extra_space,
1520 GFP_NOIO | __GFP_NORETRY | __GFP_NOWARN);
1522 checksums = checksums_onstack;
1523 if (WARN_ON(extra_space &&
1524 digest_size > sizeof(checksums_onstack))) {
1530 __bio_for_each_segment(bv, bio, iter, dio->orig_bi_iter) {
1532 char *mem, *checksums_ptr;
1535 mem = (char *)kmap_atomic(bv.bv_page) + bv.bv_offset;
1537 checksums_ptr = checksums;
1539 integrity_sector_checksum(ic, sector, mem + pos, checksums_ptr);
1540 checksums_ptr += ic->tag_size;
1541 sectors_to_process -= ic->sectors_per_block;
1542 pos += ic->sectors_per_block << SECTOR_SHIFT;
1543 sector += ic->sectors_per_block;
1544 } while (pos < bv.bv_len && sectors_to_process && checksums != checksums_onstack);
1547 r = dm_integrity_rw_tag(ic, checksums, &dio->metadata_block, &dio->metadata_offset,
1548 checksums_ptr - checksums, !dio->write ? TAG_CMP : TAG_WRITE);
1551 DMERR_LIMIT("Checksum failed at sector 0x%llx",
1552 (unsigned long long)(sector - ((r + ic->tag_size - 1) / ic->tag_size)));
1554 atomic64_inc(&ic->number_of_mismatches);
1556 if (likely(checksums != checksums_onstack))
1561 if (!sectors_to_process)
1564 if (unlikely(pos < bv.bv_len)) {
1565 bv.bv_offset += pos;
1571 if (likely(checksums != checksums_onstack))
1574 struct bio_integrity_payload *bip = dio->orig_bi_integrity;
1578 struct bvec_iter iter;
1579 unsigned data_to_process = dio->range.n_sectors;
1580 sector_to_block(ic, data_to_process);
1581 data_to_process *= ic->tag_size;
1583 bip_for_each_vec(biv, bip, iter) {
1587 BUG_ON(PageHighMem(biv.bv_page));
1588 tag = lowmem_page_address(biv.bv_page) + biv.bv_offset;
1589 this_len = min(biv.bv_len, data_to_process);
1590 r = dm_integrity_rw_tag(ic, tag, &dio->metadata_block, &dio->metadata_offset,
1591 this_len, !dio->write ? TAG_READ : TAG_WRITE);
1594 data_to_process -= this_len;
1595 if (!data_to_process)
1604 dio->bi_status = errno_to_blk_status(r);
1608 static int dm_integrity_map(struct dm_target *ti, struct bio *bio)
1610 struct dm_integrity_c *ic = ti->private;
1611 struct dm_integrity_io *dio = dm_per_bio_data(bio, sizeof(struct dm_integrity_io));
1612 struct bio_integrity_payload *bip;
1614 sector_t area, offset;
1619 if (unlikely(bio->bi_opf & REQ_PREFLUSH)) {
1620 submit_flush_bio(ic, dio);
1621 return DM_MAPIO_SUBMITTED;
1624 dio->range.logical_sector = dm_target_offset(ti, bio->bi_iter.bi_sector);
1625 dio->write = bio_op(bio) == REQ_OP_WRITE;
1626 dio->fua = dio->write && bio->bi_opf & REQ_FUA;
1627 if (unlikely(dio->fua)) {
1629 * Don't pass down the FUA flag because we have to flush
1630 * disk cache anyway.
1632 bio->bi_opf &= ~REQ_FUA;
1634 if (unlikely(dio->range.logical_sector + bio_sectors(bio) > ic->provided_data_sectors)) {
1635 DMERR("Too big sector number: 0x%llx + 0x%x > 0x%llx",
1636 (unsigned long long)dio->range.logical_sector, bio_sectors(bio),
1637 (unsigned long long)ic->provided_data_sectors);
1638 return DM_MAPIO_KILL;
1640 if (unlikely((dio->range.logical_sector | bio_sectors(bio)) & (unsigned)(ic->sectors_per_block - 1))) {
1641 DMERR("Bio not aligned on %u sectors: 0x%llx, 0x%x",
1642 ic->sectors_per_block,
1643 (unsigned long long)dio->range.logical_sector, bio_sectors(bio));
1644 return DM_MAPIO_KILL;
1647 if (ic->sectors_per_block > 1) {
1648 struct bvec_iter iter;
1650 bio_for_each_segment(bv, bio, iter) {
1651 if (unlikely(bv.bv_len & ((ic->sectors_per_block << SECTOR_SHIFT) - 1))) {
1652 DMERR("Bio vector (%u,%u) is not aligned on %u-sector boundary",
1653 bv.bv_offset, bv.bv_len, ic->sectors_per_block);
1654 return DM_MAPIO_KILL;
1659 bip = bio_integrity(bio);
1660 if (!ic->internal_hash) {
1662 unsigned wanted_tag_size = bio_sectors(bio) >> ic->sb->log2_sectors_per_block;
1663 if (ic->log2_tag_size >= 0)
1664 wanted_tag_size <<= ic->log2_tag_size;
1666 wanted_tag_size *= ic->tag_size;
1667 if (unlikely(wanted_tag_size != bip->bip_iter.bi_size)) {
1668 DMERR("Invalid integrity data size %u, expected %u",
1669 bip->bip_iter.bi_size, wanted_tag_size);
1670 return DM_MAPIO_KILL;
1674 if (unlikely(bip != NULL)) {
1675 DMERR("Unexpected integrity data when using internal hash");
1676 return DM_MAPIO_KILL;
1680 if (unlikely(ic->mode == 'R') && unlikely(dio->write))
1681 return DM_MAPIO_KILL;
1683 get_area_and_offset(ic, dio->range.logical_sector, &area, &offset);
1684 dio->metadata_block = get_metadata_sector_and_offset(ic, area, offset, &dio->metadata_offset);
1685 bio->bi_iter.bi_sector = get_data_sector(ic, area, offset);
1687 dm_integrity_map_continue(dio, true);
1688 return DM_MAPIO_SUBMITTED;
1691 static bool __journal_read_write(struct dm_integrity_io *dio, struct bio *bio,
1692 unsigned journal_section, unsigned journal_entry)
1694 struct dm_integrity_c *ic = dio->ic;
1695 sector_t logical_sector;
1698 logical_sector = dio->range.logical_sector;
1699 n_sectors = dio->range.n_sectors;
1701 struct bio_vec bv = bio_iovec(bio);
1704 if (unlikely(bv.bv_len >> SECTOR_SHIFT > n_sectors))
1705 bv.bv_len = n_sectors << SECTOR_SHIFT;
1706 n_sectors -= bv.bv_len >> SECTOR_SHIFT;
1707 bio_advance_iter(bio, &bio->bi_iter, bv.bv_len);
1709 mem = kmap_atomic(bv.bv_page);
1710 if (likely(dio->write))
1711 flush_dcache_page(bv.bv_page);
1714 struct journal_entry *je = access_journal_entry(ic, journal_section, journal_entry);
1716 if (unlikely(!dio->write)) {
1717 struct journal_sector *js;
1721 if (unlikely(journal_entry_is_inprogress(je))) {
1722 flush_dcache_page(bv.bv_page);
1725 __io_wait_event(ic->copy_to_journal_wait, !journal_entry_is_inprogress(je));
1729 BUG_ON(journal_entry_get_sector(je) != logical_sector);
1730 js = access_journal_data(ic, journal_section, journal_entry);
1731 mem_ptr = mem + bv.bv_offset;
1734 memcpy(mem_ptr, js, JOURNAL_SECTOR_DATA);
1735 *(commit_id_t *)(mem_ptr + JOURNAL_SECTOR_DATA) = je->last_bytes[s];
1737 mem_ptr += 1 << SECTOR_SHIFT;
1738 } while (++s < ic->sectors_per_block);
1739 #ifdef INTERNAL_VERIFY
1740 if (ic->internal_hash) {
1741 char checksums_onstack[max(HASH_MAX_DIGESTSIZE, MAX_TAG_SIZE)];
1743 integrity_sector_checksum(ic, logical_sector, mem + bv.bv_offset, checksums_onstack);
1744 if (unlikely(memcmp(checksums_onstack, journal_entry_tag(ic, je), ic->tag_size))) {
1745 DMERR_LIMIT("Checksum failed when reading from journal, at sector 0x%llx",
1746 (unsigned long long)logical_sector);
1752 if (!ic->internal_hash) {
1753 struct bio_integrity_payload *bip = bio_integrity(bio);
1754 unsigned tag_todo = ic->tag_size;
1755 char *tag_ptr = journal_entry_tag(ic, je);
1758 struct bio_vec biv = bvec_iter_bvec(bip->bip_vec, bip->bip_iter);
1759 unsigned tag_now = min(biv.bv_len, tag_todo);
1761 BUG_ON(PageHighMem(biv.bv_page));
1762 tag_addr = lowmem_page_address(biv.bv_page) + biv.bv_offset;
1763 if (likely(dio->write))
1764 memcpy(tag_ptr, tag_addr, tag_now);
1766 memcpy(tag_addr, tag_ptr, tag_now);
1767 bvec_iter_advance(bip->bip_vec, &bip->bip_iter, tag_now);
1769 tag_todo -= tag_now;
1770 } while (unlikely(tag_todo)); else {
1771 if (likely(dio->write))
1772 memset(tag_ptr, 0, tag_todo);
1776 if (likely(dio->write)) {
1777 struct journal_sector *js;
1780 js = access_journal_data(ic, journal_section, journal_entry);
1781 memcpy(js, mem + bv.bv_offset, ic->sectors_per_block << SECTOR_SHIFT);
1785 je->last_bytes[s] = js[s].commit_id;
1786 } while (++s < ic->sectors_per_block);
1788 if (ic->internal_hash) {
1789 unsigned digest_size = crypto_shash_digestsize(ic->internal_hash);
1790 if (unlikely(digest_size > ic->tag_size)) {
1791 char checksums_onstack[HASH_MAX_DIGESTSIZE];
1792 integrity_sector_checksum(ic, logical_sector, (char *)js, checksums_onstack);
1793 memcpy(journal_entry_tag(ic, je), checksums_onstack, ic->tag_size);
1795 integrity_sector_checksum(ic, logical_sector, (char *)js, journal_entry_tag(ic, je));
1798 journal_entry_set_sector(je, logical_sector);
1800 logical_sector += ic->sectors_per_block;
1803 if (unlikely(journal_entry == ic->journal_section_entries)) {
1806 wraparound_section(ic, &journal_section);
1809 bv.bv_offset += ic->sectors_per_block << SECTOR_SHIFT;
1810 } while (bv.bv_len -= ic->sectors_per_block << SECTOR_SHIFT);
1812 if (unlikely(!dio->write))
1813 flush_dcache_page(bv.bv_page);
1815 } while (n_sectors);
1817 if (likely(dio->write)) {
1819 if (unlikely(waitqueue_active(&ic->copy_to_journal_wait)))
1820 wake_up(&ic->copy_to_journal_wait);
1821 if (READ_ONCE(ic->free_sectors) <= ic->free_sectors_threshold) {
1822 queue_work(ic->commit_wq, &ic->commit_work);
1824 schedule_autocommit(ic);
1827 remove_range(ic, &dio->range);
1830 if (unlikely(bio->bi_iter.bi_size)) {
1831 sector_t area, offset;
1833 dio->range.logical_sector = logical_sector;
1834 get_area_and_offset(ic, dio->range.logical_sector, &area, &offset);
1835 dio->metadata_block = get_metadata_sector_and_offset(ic, area, offset, &dio->metadata_offset);
1842 static void dm_integrity_map_continue(struct dm_integrity_io *dio, bool from_map)
1844 struct dm_integrity_c *ic = dio->ic;
1845 struct bio *bio = dm_bio_from_per_bio_data(dio, sizeof(struct dm_integrity_io));
1846 unsigned journal_section, journal_entry;
1847 unsigned journal_read_pos;
1848 struct completion read_comp;
1849 bool need_sync_io = ic->internal_hash && !dio->write;
1851 if (need_sync_io && from_map) {
1852 INIT_WORK(&dio->work, integrity_bio_wait);
1853 queue_work(ic->metadata_wq, &dio->work);
1858 spin_lock_irq(&ic->endio_wait.lock);
1860 if (unlikely(dm_integrity_failed(ic))) {
1861 spin_unlock_irq(&ic->endio_wait.lock);
1865 dio->range.n_sectors = bio_sectors(bio);
1866 journal_read_pos = NOT_FOUND;
1867 if (likely(ic->mode == 'J')) {
1869 unsigned next_entry, i, pos;
1870 unsigned ws, we, range_sectors;
1872 dio->range.n_sectors = min(dio->range.n_sectors,
1873 (sector_t)ic->free_sectors << ic->sb->log2_sectors_per_block);
1874 if (unlikely(!dio->range.n_sectors)) {
1876 goto offload_to_thread;
1877 sleep_on_endio_wait(ic);
1880 range_sectors = dio->range.n_sectors >> ic->sb->log2_sectors_per_block;
1881 ic->free_sectors -= range_sectors;
1882 journal_section = ic->free_section;
1883 journal_entry = ic->free_section_entry;
1885 next_entry = ic->free_section_entry + range_sectors;
1886 ic->free_section_entry = next_entry % ic->journal_section_entries;
1887 ic->free_section += next_entry / ic->journal_section_entries;
1888 ic->n_uncommitted_sections += next_entry / ic->journal_section_entries;
1889 wraparound_section(ic, &ic->free_section);
1891 pos = journal_section * ic->journal_section_entries + journal_entry;
1892 ws = journal_section;
1896 struct journal_entry *je;
1898 add_journal_node(ic, &ic->journal_tree[pos], dio->range.logical_sector + i);
1900 if (unlikely(pos >= ic->journal_entries))
1903 je = access_journal_entry(ic, ws, we);
1904 BUG_ON(!journal_entry_is_unused(je));
1905 journal_entry_set_inprogress(je);
1907 if (unlikely(we == ic->journal_section_entries)) {
1910 wraparound_section(ic, &ws);
1912 } while ((i += ic->sectors_per_block) < dio->range.n_sectors);
1914 spin_unlock_irq(&ic->endio_wait.lock);
1915 goto journal_read_write;
1917 sector_t next_sector;
1918 journal_read_pos = find_journal_node(ic, dio->range.logical_sector, &next_sector);
1919 if (likely(journal_read_pos == NOT_FOUND)) {
1920 if (unlikely(dio->range.n_sectors > next_sector - dio->range.logical_sector))
1921 dio->range.n_sectors = next_sector - dio->range.logical_sector;
1924 unsigned jp = journal_read_pos + 1;
1925 for (i = ic->sectors_per_block; i < dio->range.n_sectors; i += ic->sectors_per_block, jp++) {
1926 if (!test_journal_node(ic, jp, dio->range.logical_sector + i))
1929 dio->range.n_sectors = i;
1933 if (unlikely(!add_new_range(ic, &dio->range, true))) {
1935 * We must not sleep in the request routine because it could
1936 * stall bios on current->bio_list.
1937 * So, we offload the bio to a workqueue if we have to sleep.
1941 spin_unlock_irq(&ic->endio_wait.lock);
1942 INIT_WORK(&dio->work, integrity_bio_wait);
1943 queue_work(ic->wait_wq, &dio->work);
1946 wait_and_add_new_range(ic, &dio->range);
1948 spin_unlock_irq(&ic->endio_wait.lock);
1950 if (unlikely(journal_read_pos != NOT_FOUND)) {
1951 journal_section = journal_read_pos / ic->journal_section_entries;
1952 journal_entry = journal_read_pos % ic->journal_section_entries;
1953 goto journal_read_write;
1956 if (ic->mode == 'B' && dio->write) {
1957 if (!block_bitmap_op(ic, ic->may_write_bitmap, dio->range.logical_sector,
1958 dio->range.n_sectors, BITMAP_OP_TEST_ALL_SET)) {
1959 struct bitmap_block_status *bbs;
1961 bbs = sector_to_bitmap_block(ic, dio->range.logical_sector);
1962 spin_lock(&bbs->bio_queue_lock);
1963 bio_list_add(&bbs->bio_queue, bio);
1964 spin_unlock(&bbs->bio_queue_lock);
1965 queue_work(ic->writer_wq, &bbs->work);
1970 dio->in_flight = (atomic_t)ATOMIC_INIT(2);
1973 init_completion(&read_comp);
1974 dio->completion = &read_comp;
1976 dio->completion = NULL;
1978 dio->orig_bi_iter = bio->bi_iter;
1980 dio->orig_bi_disk = bio->bi_disk;
1981 dio->orig_bi_partno = bio->bi_partno;
1982 bio_set_dev(bio, ic->dev->bdev);
1984 dio->orig_bi_integrity = bio_integrity(bio);
1985 bio->bi_integrity = NULL;
1986 bio->bi_opf &= ~REQ_INTEGRITY;
1988 dio->orig_bi_end_io = bio->bi_end_io;
1989 bio->bi_end_io = integrity_end_io;
1991 bio->bi_iter.bi_size = dio->range.n_sectors << SECTOR_SHIFT;
1992 generic_make_request(bio);
1995 wait_for_completion_io(&read_comp);
1996 if (ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING) &&
1997 dio->range.logical_sector + dio->range.n_sectors > le64_to_cpu(ic->sb->recalc_sector))
1999 if (ic->mode == 'B') {
2000 if (!block_bitmap_op(ic, ic->recalc_bitmap, dio->range.logical_sector,
2001 dio->range.n_sectors, BITMAP_OP_TEST_ALL_CLEAR))
2005 if (likely(!bio->bi_status))
2006 integrity_metadata(&dio->work);
2012 INIT_WORK(&dio->work, integrity_metadata);
2013 queue_work(ic->metadata_wq, &dio->work);
2019 if (unlikely(__journal_read_write(dio, bio, journal_section, journal_entry)))
2022 do_endio_flush(ic, dio);
2026 static void integrity_bio_wait(struct work_struct *w)
2028 struct dm_integrity_io *dio = container_of(w, struct dm_integrity_io, work);
2030 dm_integrity_map_continue(dio, false);
2033 static void pad_uncommitted(struct dm_integrity_c *ic)
2035 if (ic->free_section_entry) {
2036 ic->free_sectors -= ic->journal_section_entries - ic->free_section_entry;
2037 ic->free_section_entry = 0;
2039 wraparound_section(ic, &ic->free_section);
2040 ic->n_uncommitted_sections++;
2042 if (WARN_ON(ic->journal_sections * ic->journal_section_entries !=
2043 (ic->n_uncommitted_sections + ic->n_committed_sections) *
2044 ic->journal_section_entries + ic->free_sectors)) {
2045 DMCRIT("journal_sections %u, journal_section_entries %u, "
2046 "n_uncommitted_sections %u, n_committed_sections %u, "
2047 "journal_section_entries %u, free_sectors %u",
2048 ic->journal_sections, ic->journal_section_entries,
2049 ic->n_uncommitted_sections, ic->n_committed_sections,
2050 ic->journal_section_entries, ic->free_sectors);
2054 static void integrity_commit(struct work_struct *w)
2056 struct dm_integrity_c *ic = container_of(w, struct dm_integrity_c, commit_work);
2057 unsigned commit_start, commit_sections;
2059 struct bio *flushes;
2061 del_timer(&ic->autocommit_timer);
2063 spin_lock_irq(&ic->endio_wait.lock);
2064 flushes = bio_list_get(&ic->flush_bio_list);
2065 if (unlikely(ic->mode != 'J')) {
2066 spin_unlock_irq(&ic->endio_wait.lock);
2067 dm_integrity_flush_buffers(ic);
2068 goto release_flush_bios;
2071 pad_uncommitted(ic);
2072 commit_start = ic->uncommitted_section;
2073 commit_sections = ic->n_uncommitted_sections;
2074 spin_unlock_irq(&ic->endio_wait.lock);
2076 if (!commit_sections)
2077 goto release_flush_bios;
2080 for (n = 0; n < commit_sections; n++) {
2081 for (j = 0; j < ic->journal_section_entries; j++) {
2082 struct journal_entry *je;
2083 je = access_journal_entry(ic, i, j);
2084 io_wait_event(ic->copy_to_journal_wait, !journal_entry_is_inprogress(je));
2086 for (j = 0; j < ic->journal_section_sectors; j++) {
2087 struct journal_sector *js;
2088 js = access_journal(ic, i, j);
2089 js->commit_id = dm_integrity_commit_id(ic, i, j, ic->commit_seq);
2092 if (unlikely(i >= ic->journal_sections))
2093 ic->commit_seq = next_commit_seq(ic->commit_seq);
2094 wraparound_section(ic, &i);
2098 write_journal(ic, commit_start, commit_sections);
2100 spin_lock_irq(&ic->endio_wait.lock);
2101 ic->uncommitted_section += commit_sections;
2102 wraparound_section(ic, &ic->uncommitted_section);
2103 ic->n_uncommitted_sections -= commit_sections;
2104 ic->n_committed_sections += commit_sections;
2105 spin_unlock_irq(&ic->endio_wait.lock);
2107 if (READ_ONCE(ic->free_sectors) <= ic->free_sectors_threshold)
2108 queue_work(ic->writer_wq, &ic->writer_work);
2112 struct bio *next = flushes->bi_next;
2113 flushes->bi_next = NULL;
2114 do_endio(ic, flushes);
2119 static void complete_copy_from_journal(unsigned long error, void *context)
2121 struct journal_io *io = context;
2122 struct journal_completion *comp = io->comp;
2123 struct dm_integrity_c *ic = comp->ic;
2124 remove_range(ic, &io->range);
2125 mempool_free(io, &ic->journal_io_mempool);
2126 if (unlikely(error != 0))
2127 dm_integrity_io_error(ic, "copying from journal", -EIO);
2128 complete_journal_op(comp);
2131 static void restore_last_bytes(struct dm_integrity_c *ic, struct journal_sector *js,
2132 struct journal_entry *je)
2136 js->commit_id = je->last_bytes[s];
2138 } while (++s < ic->sectors_per_block);
2141 static void do_journal_write(struct dm_integrity_c *ic, unsigned write_start,
2142 unsigned write_sections, bool from_replay)
2145 struct journal_completion comp;
2146 struct blk_plug plug;
2148 blk_start_plug(&plug);
2151 comp.in_flight = (atomic_t)ATOMIC_INIT(1);
2152 init_completion(&comp.comp);
2155 for (n = 0; n < write_sections; n++, i++, wraparound_section(ic, &i)) {
2156 #ifndef INTERNAL_VERIFY
2157 if (unlikely(from_replay))
2159 rw_section_mac(ic, i, false);
2160 for (j = 0; j < ic->journal_section_entries; j++) {
2161 struct journal_entry *je = access_journal_entry(ic, i, j);
2162 sector_t sec, area, offset;
2163 unsigned k, l, next_loop;
2164 sector_t metadata_block;
2165 unsigned metadata_offset;
2166 struct journal_io *io;
2168 if (journal_entry_is_unused(je))
2170 BUG_ON(unlikely(journal_entry_is_inprogress(je)) && !from_replay);
2171 sec = journal_entry_get_sector(je);
2172 if (unlikely(from_replay)) {
2173 if (unlikely(sec & (unsigned)(ic->sectors_per_block - 1))) {
2174 dm_integrity_io_error(ic, "invalid sector in journal", -EIO);
2175 sec &= ~(sector_t)(ic->sectors_per_block - 1);
2178 get_area_and_offset(ic, sec, &area, &offset);
2179 restore_last_bytes(ic, access_journal_data(ic, i, j), je);
2180 for (k = j + 1; k < ic->journal_section_entries; k++) {
2181 struct journal_entry *je2 = access_journal_entry(ic, i, k);
2182 sector_t sec2, area2, offset2;
2183 if (journal_entry_is_unused(je2))
2185 BUG_ON(unlikely(journal_entry_is_inprogress(je2)) && !from_replay);
2186 sec2 = journal_entry_get_sector(je2);
2187 get_area_and_offset(ic, sec2, &area2, &offset2);
2188 if (area2 != area || offset2 != offset + ((k - j) << ic->sb->log2_sectors_per_block))
2190 restore_last_bytes(ic, access_journal_data(ic, i, k), je2);
2194 io = mempool_alloc(&ic->journal_io_mempool, GFP_NOIO);
2196 io->range.logical_sector = sec;
2197 io->range.n_sectors = (k - j) << ic->sb->log2_sectors_per_block;
2199 spin_lock_irq(&ic->endio_wait.lock);
2200 add_new_range_and_wait(ic, &io->range);
2202 if (likely(!from_replay)) {
2203 struct journal_node *section_node = &ic->journal_tree[i * ic->journal_section_entries];
2205 /* don't write if there is newer committed sector */
2206 while (j < k && find_newer_committed_node(ic, §ion_node[j])) {
2207 struct journal_entry *je2 = access_journal_entry(ic, i, j);
2209 journal_entry_set_unused(je2);
2210 remove_journal_node(ic, §ion_node[j]);
2212 sec += ic->sectors_per_block;
2213 offset += ic->sectors_per_block;
2215 while (j < k && find_newer_committed_node(ic, §ion_node[k - 1])) {
2216 struct journal_entry *je2 = access_journal_entry(ic, i, k - 1);
2218 journal_entry_set_unused(je2);
2219 remove_journal_node(ic, §ion_node[k - 1]);
2223 remove_range_unlocked(ic, &io->range);
2224 spin_unlock_irq(&ic->endio_wait.lock);
2225 mempool_free(io, &ic->journal_io_mempool);
2228 for (l = j; l < k; l++) {
2229 remove_journal_node(ic, §ion_node[l]);
2232 spin_unlock_irq(&ic->endio_wait.lock);
2234 metadata_block = get_metadata_sector_and_offset(ic, area, offset, &metadata_offset);
2235 for (l = j; l < k; l++) {
2237 struct journal_entry *je2 = access_journal_entry(ic, i, l);
2240 #ifndef INTERNAL_VERIFY
2241 unlikely(from_replay) &&
2243 ic->internal_hash) {
2244 char test_tag[max_t(size_t, HASH_MAX_DIGESTSIZE, MAX_TAG_SIZE)];
2246 integrity_sector_checksum(ic, sec + ((l - j) << ic->sb->log2_sectors_per_block),
2247 (char *)access_journal_data(ic, i, l), test_tag);
2248 if (unlikely(memcmp(test_tag, journal_entry_tag(ic, je2), ic->tag_size)))
2249 dm_integrity_io_error(ic, "tag mismatch when replaying journal", -EILSEQ);
2252 journal_entry_set_unused(je2);
2253 r = dm_integrity_rw_tag(ic, journal_entry_tag(ic, je2), &metadata_block, &metadata_offset,
2254 ic->tag_size, TAG_WRITE);
2256 dm_integrity_io_error(ic, "reading tags", r);
2260 atomic_inc(&comp.in_flight);
2261 copy_from_journal(ic, i, j << ic->sb->log2_sectors_per_block,
2262 (k - j) << ic->sb->log2_sectors_per_block,
2263 get_data_sector(ic, area, offset),
2264 complete_copy_from_journal, io);
2270 dm_bufio_write_dirty_buffers_async(ic->bufio);
2272 blk_finish_plug(&plug);
2274 complete_journal_op(&comp);
2275 wait_for_completion_io(&comp.comp);
2277 dm_integrity_flush_buffers(ic);
2280 static void integrity_writer(struct work_struct *w)
2282 struct dm_integrity_c *ic = container_of(w, struct dm_integrity_c, writer_work);
2283 unsigned write_start, write_sections;
2285 unsigned prev_free_sectors;
2287 /* the following test is not needed, but it tests the replay code */
2288 if (READ_ONCE(ic->suspending) && !ic->meta_dev)
2291 spin_lock_irq(&ic->endio_wait.lock);
2292 write_start = ic->committed_section;
2293 write_sections = ic->n_committed_sections;
2294 spin_unlock_irq(&ic->endio_wait.lock);
2296 if (!write_sections)
2299 do_journal_write(ic, write_start, write_sections, false);
2301 spin_lock_irq(&ic->endio_wait.lock);
2303 ic->committed_section += write_sections;
2304 wraparound_section(ic, &ic->committed_section);
2305 ic->n_committed_sections -= write_sections;
2307 prev_free_sectors = ic->free_sectors;
2308 ic->free_sectors += write_sections * ic->journal_section_entries;
2309 if (unlikely(!prev_free_sectors))
2310 wake_up_locked(&ic->endio_wait);
2312 spin_unlock_irq(&ic->endio_wait.lock);
2315 static void recalc_write_super(struct dm_integrity_c *ic)
2319 dm_integrity_flush_buffers(ic);
2320 if (dm_integrity_failed(ic))
2323 r = sync_rw_sb(ic, REQ_OP_WRITE, 0);
2325 dm_integrity_io_error(ic, "writing superblock", r);
2328 static void integrity_recalc(struct work_struct *w)
2330 struct dm_integrity_c *ic = container_of(w, struct dm_integrity_c, recalc_work);
2331 struct dm_integrity_range range;
2332 struct dm_io_request io_req;
2333 struct dm_io_region io_loc;
2334 sector_t area, offset;
2335 sector_t metadata_block;
2336 unsigned metadata_offset;
2337 sector_t logical_sector, n_sectors;
2341 unsigned super_counter = 0;
2343 DEBUG_print("start recalculation... (position %llx)\n", le64_to_cpu(ic->sb->recalc_sector));
2345 spin_lock_irq(&ic->endio_wait.lock);
2349 if (unlikely(READ_ONCE(ic->suspending)))
2352 range.logical_sector = le64_to_cpu(ic->sb->recalc_sector);
2353 if (unlikely(range.logical_sector >= ic->provided_data_sectors)) {
2354 if (ic->mode == 'B') {
2355 DEBUG_print("queue_delayed_work: bitmap_flush_work\n");
2356 queue_delayed_work(ic->commit_wq, &ic->bitmap_flush_work, 0);
2361 get_area_and_offset(ic, range.logical_sector, &area, &offset);
2362 range.n_sectors = min((sector_t)RECALC_SECTORS, ic->provided_data_sectors - range.logical_sector);
2364 range.n_sectors = min(range.n_sectors, ((sector_t)1U << ic->sb->log2_interleave_sectors) - (unsigned)offset);
2366 add_new_range_and_wait(ic, &range);
2367 spin_unlock_irq(&ic->endio_wait.lock);
2368 logical_sector = range.logical_sector;
2369 n_sectors = range.n_sectors;
2371 if (ic->mode == 'B') {
2372 if (block_bitmap_op(ic, ic->recalc_bitmap, logical_sector, n_sectors, BITMAP_OP_TEST_ALL_CLEAR)) {
2373 goto advance_and_next;
2375 while (block_bitmap_op(ic, ic->recalc_bitmap, logical_sector,
2376 ic->sectors_per_block, BITMAP_OP_TEST_ALL_CLEAR)) {
2377 logical_sector += ic->sectors_per_block;
2378 n_sectors -= ic->sectors_per_block;
2381 while (block_bitmap_op(ic, ic->recalc_bitmap, logical_sector + n_sectors - ic->sectors_per_block,
2382 ic->sectors_per_block, BITMAP_OP_TEST_ALL_CLEAR)) {
2383 n_sectors -= ic->sectors_per_block;
2386 get_area_and_offset(ic, logical_sector, &area, &offset);
2389 DEBUG_print("recalculating: %lx, %lx\n", logical_sector, n_sectors);
2391 if (unlikely(++super_counter == RECALC_WRITE_SUPER)) {
2392 recalc_write_super(ic);
2393 if (ic->mode == 'B') {
2394 queue_delayed_work(ic->commit_wq, &ic->bitmap_flush_work, ic->bitmap_flush_interval);
2399 if (unlikely(dm_integrity_failed(ic)))
2402 io_req.bi_op = REQ_OP_READ;
2403 io_req.bi_op_flags = 0;
2404 io_req.mem.type = DM_IO_VMA;
2405 io_req.mem.ptr.addr = ic->recalc_buffer;
2406 io_req.notify.fn = NULL;
2407 io_req.client = ic->io;
2408 io_loc.bdev = ic->dev->bdev;
2409 io_loc.sector = get_data_sector(ic, area, offset);
2410 io_loc.count = n_sectors;
2412 r = dm_io(&io_req, 1, &io_loc, NULL);
2414 dm_integrity_io_error(ic, "reading data", r);
2418 t = ic->recalc_tags;
2419 for (i = 0; i < n_sectors; i += ic->sectors_per_block) {
2420 integrity_sector_checksum(ic, logical_sector + i, ic->recalc_buffer + (i << SECTOR_SHIFT), t);
2424 metadata_block = get_metadata_sector_and_offset(ic, area, offset, &metadata_offset);
2426 r = dm_integrity_rw_tag(ic, ic->recalc_tags, &metadata_block, &metadata_offset, t - ic->recalc_tags, TAG_WRITE);
2428 dm_integrity_io_error(ic, "writing tags", r);
2435 spin_lock_irq(&ic->endio_wait.lock);
2436 remove_range_unlocked(ic, &range);
2437 ic->sb->recalc_sector = cpu_to_le64(range.logical_sector + range.n_sectors);
2441 remove_range(ic, &range);
2445 spin_unlock_irq(&ic->endio_wait.lock);
2447 recalc_write_super(ic);
2450 static void bitmap_block_work(struct work_struct *w)
2452 struct bitmap_block_status *bbs = container_of(w, struct bitmap_block_status, work);
2453 struct dm_integrity_c *ic = bbs->ic;
2455 struct bio_list bio_queue;
2456 struct bio_list waiting;
2458 bio_list_init(&waiting);
2460 spin_lock(&bbs->bio_queue_lock);
2461 bio_queue = bbs->bio_queue;
2462 bio_list_init(&bbs->bio_queue);
2463 spin_unlock(&bbs->bio_queue_lock);
2465 while ((bio = bio_list_pop(&bio_queue))) {
2466 struct dm_integrity_io *dio;
2468 dio = dm_per_bio_data(bio, sizeof(struct dm_integrity_io));
2470 if (block_bitmap_op(ic, ic->may_write_bitmap, dio->range.logical_sector,
2471 dio->range.n_sectors, BITMAP_OP_TEST_ALL_SET)) {
2472 remove_range(ic, &dio->range);
2473 INIT_WORK(&dio->work, integrity_bio_wait);
2474 queue_work(ic->wait_wq, &dio->work);
2476 block_bitmap_op(ic, ic->journal, dio->range.logical_sector,
2477 dio->range.n_sectors, BITMAP_OP_SET);
2478 bio_list_add(&waiting, bio);
2482 if (bio_list_empty(&waiting))
2485 rw_journal_sectors(ic, REQ_OP_WRITE, REQ_FUA | REQ_SYNC,
2486 bbs->idx * (BITMAP_BLOCK_SIZE >> SECTOR_SHIFT),
2487 BITMAP_BLOCK_SIZE >> SECTOR_SHIFT, NULL);
2489 while ((bio = bio_list_pop(&waiting))) {
2490 struct dm_integrity_io *dio = dm_per_bio_data(bio, sizeof(struct dm_integrity_io));
2492 block_bitmap_op(ic, ic->may_write_bitmap, dio->range.logical_sector,
2493 dio->range.n_sectors, BITMAP_OP_SET);
2495 remove_range(ic, &dio->range);
2496 INIT_WORK(&dio->work, integrity_bio_wait);
2497 queue_work(ic->wait_wq, &dio->work);
2500 queue_delayed_work(ic->commit_wq, &ic->bitmap_flush_work, ic->bitmap_flush_interval);
2503 static void bitmap_flush_work(struct work_struct *work)
2505 struct dm_integrity_c *ic = container_of(work, struct dm_integrity_c, bitmap_flush_work.work);
2506 struct dm_integrity_range range;
2507 unsigned long limit;
2510 dm_integrity_flush_buffers(ic);
2512 range.logical_sector = 0;
2513 range.n_sectors = ic->provided_data_sectors;
2515 spin_lock_irq(&ic->endio_wait.lock);
2516 add_new_range_and_wait(ic, &range);
2517 spin_unlock_irq(&ic->endio_wait.lock);
2519 dm_integrity_flush_buffers(ic);
2521 blkdev_issue_flush(ic->dev->bdev, GFP_NOIO, NULL);
2523 limit = ic->provided_data_sectors;
2524 if (ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING)) {
2525 limit = le64_to_cpu(ic->sb->recalc_sector)
2526 >> (ic->sb->log2_sectors_per_block + ic->log2_blocks_per_bitmap_bit)
2527 << (ic->sb->log2_sectors_per_block + ic->log2_blocks_per_bitmap_bit);
2529 /*DEBUG_print("zeroing journal\n");*/
2530 block_bitmap_op(ic, ic->journal, 0, limit, BITMAP_OP_CLEAR);
2531 block_bitmap_op(ic, ic->may_write_bitmap, 0, limit, BITMAP_OP_CLEAR);
2533 rw_journal_sectors(ic, REQ_OP_WRITE, REQ_FUA | REQ_SYNC, 0,
2534 ic->n_bitmap_blocks * (BITMAP_BLOCK_SIZE >> SECTOR_SHIFT), NULL);
2536 spin_lock_irq(&ic->endio_wait.lock);
2537 remove_range_unlocked(ic, &range);
2538 while (unlikely((bio = bio_list_pop(&ic->synchronous_bios)) != NULL)) {
2540 spin_unlock_irq(&ic->endio_wait.lock);
2541 spin_lock_irq(&ic->endio_wait.lock);
2543 spin_unlock_irq(&ic->endio_wait.lock);
2547 static void init_journal(struct dm_integrity_c *ic, unsigned start_section,
2548 unsigned n_sections, unsigned char commit_seq)
2555 for (n = 0; n < n_sections; n++) {
2556 i = start_section + n;
2557 wraparound_section(ic, &i);
2558 for (j = 0; j < ic->journal_section_sectors; j++) {
2559 struct journal_sector *js = access_journal(ic, i, j);
2560 memset(&js->entries, 0, JOURNAL_SECTOR_DATA);
2561 js->commit_id = dm_integrity_commit_id(ic, i, j, commit_seq);
2563 for (j = 0; j < ic->journal_section_entries; j++) {
2564 struct journal_entry *je = access_journal_entry(ic, i, j);
2565 journal_entry_set_unused(je);
2569 write_journal(ic, start_section, n_sections);
2572 static int find_commit_seq(struct dm_integrity_c *ic, unsigned i, unsigned j, commit_id_t id)
2575 for (k = 0; k < N_COMMIT_IDS; k++) {
2576 if (dm_integrity_commit_id(ic, i, j, k) == id)
2579 dm_integrity_io_error(ic, "journal commit id", -EIO);
2583 static void replay_journal(struct dm_integrity_c *ic)
2586 bool used_commit_ids[N_COMMIT_IDS];
2587 unsigned max_commit_id_sections[N_COMMIT_IDS];
2588 unsigned write_start, write_sections;
2589 unsigned continue_section;
2591 unsigned char unused, last_used, want_commit_seq;
2593 if (ic->mode == 'R')
2596 if (ic->journal_uptodate)
2602 if (!ic->just_formatted) {
2603 DEBUG_print("reading journal\n");
2604 rw_journal(ic, REQ_OP_READ, 0, 0, ic->journal_sections, NULL);
2606 DEBUG_bytes(lowmem_page_address(ic->journal_io[0].page), 64, "read journal");
2607 if (ic->journal_io) {
2608 struct journal_completion crypt_comp;
2610 init_completion(&crypt_comp.comp);
2611 crypt_comp.in_flight = (atomic_t)ATOMIC_INIT(0);
2612 encrypt_journal(ic, false, 0, ic->journal_sections, &crypt_comp);
2613 wait_for_completion(&crypt_comp.comp);
2615 DEBUG_bytes(lowmem_page_address(ic->journal[0].page), 64, "decrypted journal");
2618 if (dm_integrity_failed(ic))
2621 journal_empty = true;
2622 memset(used_commit_ids, 0, sizeof used_commit_ids);
2623 memset(max_commit_id_sections, 0, sizeof max_commit_id_sections);
2624 for (i = 0; i < ic->journal_sections; i++) {
2625 for (j = 0; j < ic->journal_section_sectors; j++) {
2627 struct journal_sector *js = access_journal(ic, i, j);
2628 k = find_commit_seq(ic, i, j, js->commit_id);
2631 used_commit_ids[k] = true;
2632 max_commit_id_sections[k] = i;
2634 if (journal_empty) {
2635 for (j = 0; j < ic->journal_section_entries; j++) {
2636 struct journal_entry *je = access_journal_entry(ic, i, j);
2637 if (!journal_entry_is_unused(je)) {
2638 journal_empty = false;
2645 if (!used_commit_ids[N_COMMIT_IDS - 1]) {
2646 unused = N_COMMIT_IDS - 1;
2647 while (unused && !used_commit_ids[unused - 1])
2650 for (unused = 0; unused < N_COMMIT_IDS; unused++)
2651 if (!used_commit_ids[unused])
2653 if (unused == N_COMMIT_IDS) {
2654 dm_integrity_io_error(ic, "journal commit ids", -EIO);
2658 DEBUG_print("first unused commit seq %d [%d,%d,%d,%d]\n",
2659 unused, used_commit_ids[0], used_commit_ids[1],
2660 used_commit_ids[2], used_commit_ids[3]);
2662 last_used = prev_commit_seq(unused);
2663 want_commit_seq = prev_commit_seq(last_used);
2665 if (!used_commit_ids[want_commit_seq] && used_commit_ids[prev_commit_seq(want_commit_seq)])
2666 journal_empty = true;
2668 write_start = max_commit_id_sections[last_used] + 1;
2669 if (unlikely(write_start >= ic->journal_sections))
2670 want_commit_seq = next_commit_seq(want_commit_seq);
2671 wraparound_section(ic, &write_start);
2674 for (write_sections = 0; write_sections < ic->journal_sections; write_sections++) {
2675 for (j = 0; j < ic->journal_section_sectors; j++) {
2676 struct journal_sector *js = access_journal(ic, i, j);
2678 if (js->commit_id != dm_integrity_commit_id(ic, i, j, want_commit_seq)) {
2680 * This could be caused by crash during writing.
2681 * We won't replay the inconsistent part of the
2684 DEBUG_print("commit id mismatch at position (%u, %u): %d != %d\n",
2685 i, j, find_commit_seq(ic, i, j, js->commit_id), want_commit_seq);
2690 if (unlikely(i >= ic->journal_sections))
2691 want_commit_seq = next_commit_seq(want_commit_seq);
2692 wraparound_section(ic, &i);
2696 if (!journal_empty) {
2697 DEBUG_print("replaying %u sections, starting at %u, commit seq %d\n",
2698 write_sections, write_start, want_commit_seq);
2699 do_journal_write(ic, write_start, write_sections, true);
2702 if (write_sections == ic->journal_sections && (ic->mode == 'J' || journal_empty)) {
2703 continue_section = write_start;
2704 ic->commit_seq = want_commit_seq;
2705 DEBUG_print("continuing from section %u, commit seq %d\n", write_start, ic->commit_seq);
2708 unsigned char erase_seq;
2710 DEBUG_print("clearing journal\n");
2712 erase_seq = prev_commit_seq(prev_commit_seq(last_used));
2714 init_journal(ic, s, 1, erase_seq);
2716 wraparound_section(ic, &s);
2717 if (ic->journal_sections >= 2) {
2718 init_journal(ic, s, ic->journal_sections - 2, erase_seq);
2719 s += ic->journal_sections - 2;
2720 wraparound_section(ic, &s);
2721 init_journal(ic, s, 1, erase_seq);
2724 continue_section = 0;
2725 ic->commit_seq = next_commit_seq(erase_seq);
2728 ic->committed_section = continue_section;
2729 ic->n_committed_sections = 0;
2731 ic->uncommitted_section = continue_section;
2732 ic->n_uncommitted_sections = 0;
2734 ic->free_section = continue_section;
2735 ic->free_section_entry = 0;
2736 ic->free_sectors = ic->journal_entries;
2738 ic->journal_tree_root = RB_ROOT;
2739 for (i = 0; i < ic->journal_entries; i++)
2740 init_journal_node(&ic->journal_tree[i]);
2743 static void dm_integrity_enter_synchronous_mode(struct dm_integrity_c *ic)
2745 DEBUG_print("dm_integrity_enter_synchronous_mode\n");
2747 if (ic->mode == 'B') {
2748 ic->bitmap_flush_interval = msecs_to_jiffies(10) + 1;
2749 ic->synchronous_mode = 1;
2751 cancel_delayed_work_sync(&ic->bitmap_flush_work);
2752 queue_delayed_work(ic->commit_wq, &ic->bitmap_flush_work, 0);
2753 flush_workqueue(ic->commit_wq);
2757 static int dm_integrity_reboot(struct notifier_block *n, unsigned long code, void *x)
2759 struct dm_integrity_c *ic = container_of(n, struct dm_integrity_c, reboot_notifier);
2761 DEBUG_print("dm_integrity_reboot\n");
2763 dm_integrity_enter_synchronous_mode(ic);
2768 static void dm_integrity_postsuspend(struct dm_target *ti)
2770 struct dm_integrity_c *ic = (struct dm_integrity_c *)ti->private;
2773 WARN_ON(unregister_reboot_notifier(&ic->reboot_notifier));
2775 del_timer_sync(&ic->autocommit_timer);
2777 WRITE_ONCE(ic->suspending, 1);
2780 drain_workqueue(ic->recalc_wq);
2782 if (ic->mode == 'B')
2783 cancel_delayed_work_sync(&ic->bitmap_flush_work);
2785 queue_work(ic->commit_wq, &ic->commit_work);
2786 drain_workqueue(ic->commit_wq);
2788 if (ic->mode == 'J') {
2790 queue_work(ic->writer_wq, &ic->writer_work);
2791 drain_workqueue(ic->writer_wq);
2792 dm_integrity_flush_buffers(ic);
2795 if (ic->mode == 'B') {
2796 dm_integrity_flush_buffers(ic);
2798 /* set to 0 to test bitmap replay code */
2799 init_journal(ic, 0, ic->journal_sections, 0);
2800 ic->sb->flags &= ~cpu_to_le32(SB_FLAG_DIRTY_BITMAP);
2801 r = sync_rw_sb(ic, REQ_OP_WRITE, REQ_FUA);
2803 dm_integrity_io_error(ic, "writing superblock", r);
2807 WRITE_ONCE(ic->suspending, 0);
2809 BUG_ON(!RB_EMPTY_ROOT(&ic->in_progress));
2811 ic->journal_uptodate = true;
2814 static void dm_integrity_resume(struct dm_target *ti)
2816 struct dm_integrity_c *ic = (struct dm_integrity_c *)ti->private;
2818 DEBUG_print("resume\n");
2820 if (ic->sb->flags & cpu_to_le32(SB_FLAG_DIRTY_BITMAP)) {
2821 DEBUG_print("resume dirty_bitmap\n");
2822 rw_journal_sectors(ic, REQ_OP_READ, 0, 0,
2823 ic->n_bitmap_blocks * (BITMAP_BLOCK_SIZE >> SECTOR_SHIFT), NULL);
2824 if (ic->mode == 'B') {
2825 if (ic->sb->log2_blocks_per_bitmap_bit == ic->log2_blocks_per_bitmap_bit) {
2826 block_bitmap_copy(ic, ic->recalc_bitmap, ic->journal);
2827 block_bitmap_copy(ic, ic->may_write_bitmap, ic->journal);
2828 if (!block_bitmap_op(ic, ic->journal, 0, ic->provided_data_sectors,
2829 BITMAP_OP_TEST_ALL_CLEAR)) {
2830 ic->sb->flags |= cpu_to_le32(SB_FLAG_RECALCULATING);
2831 ic->sb->recalc_sector = cpu_to_le64(0);
2834 DEBUG_print("non-matching blocks_per_bitmap_bit: %u, %u\n",
2835 ic->sb->log2_blocks_per_bitmap_bit, ic->log2_blocks_per_bitmap_bit);
2836 ic->sb->log2_blocks_per_bitmap_bit = ic->log2_blocks_per_bitmap_bit;
2837 block_bitmap_op(ic, ic->recalc_bitmap, 0, ic->provided_data_sectors, BITMAP_OP_SET);
2838 block_bitmap_op(ic, ic->may_write_bitmap, 0, ic->provided_data_sectors, BITMAP_OP_SET);
2839 block_bitmap_op(ic, ic->journal, 0, ic->provided_data_sectors, BITMAP_OP_SET);
2840 rw_journal_sectors(ic, REQ_OP_WRITE, REQ_FUA | REQ_SYNC, 0,
2841 ic->n_bitmap_blocks * (BITMAP_BLOCK_SIZE >> SECTOR_SHIFT), NULL);
2842 ic->sb->flags |= cpu_to_le32(SB_FLAG_RECALCULATING);
2843 ic->sb->recalc_sector = cpu_to_le64(0);
2846 if (!(ic->sb->log2_blocks_per_bitmap_bit == ic->log2_blocks_per_bitmap_bit &&
2847 block_bitmap_op(ic, ic->journal, 0, ic->provided_data_sectors, BITMAP_OP_TEST_ALL_CLEAR))) {
2848 ic->sb->flags |= cpu_to_le32(SB_FLAG_RECALCULATING);
2849 ic->sb->recalc_sector = cpu_to_le64(0);
2851 init_journal(ic, 0, ic->journal_sections, 0);
2853 ic->sb->flags &= ~cpu_to_le32(SB_FLAG_DIRTY_BITMAP);
2855 r = sync_rw_sb(ic, REQ_OP_WRITE, REQ_FUA);
2857 dm_integrity_io_error(ic, "writing superblock", r);
2860 if (ic->mode == 'B') {
2862 ic->sb->flags |= cpu_to_le32(SB_FLAG_DIRTY_BITMAP);
2863 ic->sb->log2_blocks_per_bitmap_bit = ic->log2_blocks_per_bitmap_bit;
2864 r = sync_rw_sb(ic, REQ_OP_WRITE, REQ_FUA);
2866 dm_integrity_io_error(ic, "writing superblock", r);
2868 mode = ic->recalculate_flag ? BITMAP_OP_SET : BITMAP_OP_CLEAR;
2869 block_bitmap_op(ic, ic->journal, 0, ic->provided_data_sectors, mode);
2870 block_bitmap_op(ic, ic->recalc_bitmap, 0, ic->provided_data_sectors, mode);
2871 block_bitmap_op(ic, ic->may_write_bitmap, 0, ic->provided_data_sectors, mode);
2872 rw_journal_sectors(ic, REQ_OP_WRITE, REQ_FUA | REQ_SYNC, 0,
2873 ic->n_bitmap_blocks * (BITMAP_BLOCK_SIZE >> SECTOR_SHIFT), NULL);
2877 DEBUG_print("testing recalc: %x\n", ic->sb->flags);
2878 if (ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING)) {
2879 __u64 recalc_pos = le64_to_cpu(ic->sb->recalc_sector);
2880 DEBUG_print("recalc pos: %lx / %lx\n", (long)recalc_pos, ic->provided_data_sectors);
2881 if (recalc_pos < ic->provided_data_sectors) {
2882 queue_work(ic->recalc_wq, &ic->recalc_work);
2883 } else if (recalc_pos > ic->provided_data_sectors) {
2884 ic->sb->recalc_sector = cpu_to_le64(ic->provided_data_sectors);
2885 recalc_write_super(ic);
2889 ic->reboot_notifier.notifier_call = dm_integrity_reboot;
2890 ic->reboot_notifier.next = NULL;
2891 ic->reboot_notifier.priority = INT_MAX - 1; /* be notified after md and before hardware drivers */
2892 WARN_ON(register_reboot_notifier(&ic->reboot_notifier));
2895 /* set to 1 to stress test synchronous mode */
2896 dm_integrity_enter_synchronous_mode(ic);
2900 static void dm_integrity_status(struct dm_target *ti, status_type_t type,
2901 unsigned status_flags, char *result, unsigned maxlen)
2903 struct dm_integrity_c *ic = (struct dm_integrity_c *)ti->private;
2908 case STATUSTYPE_INFO:
2910 (unsigned long long)atomic64_read(&ic->number_of_mismatches),
2911 (unsigned long long)ic->provided_data_sectors);
2912 if (ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING))
2913 DMEMIT(" %llu", (unsigned long long)le64_to_cpu(ic->sb->recalc_sector));
2918 case STATUSTYPE_TABLE: {
2919 __u64 watermark_percentage = (__u64)(ic->journal_entries - ic->free_sectors_threshold) * 100;
2920 watermark_percentage += ic->journal_entries / 2;
2921 do_div(watermark_percentage, ic->journal_entries);
2923 arg_count += !!ic->meta_dev;
2924 arg_count += ic->sectors_per_block != 1;
2925 arg_count += !!(ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING));
2926 arg_count += ic->mode == 'J';
2927 arg_count += ic->mode == 'J';
2928 arg_count += ic->mode == 'B';
2929 arg_count += ic->mode == 'B';
2930 arg_count += !!ic->internal_hash_alg.alg_string;
2931 arg_count += !!ic->journal_crypt_alg.alg_string;
2932 arg_count += !!ic->journal_mac_alg.alg_string;
2933 DMEMIT("%s %llu %u %c %u", ic->dev->name, (unsigned long long)ic->start,
2934 ic->tag_size, ic->mode, arg_count);
2936 DMEMIT(" meta_device:%s", ic->meta_dev->name);
2937 if (ic->sectors_per_block != 1)
2938 DMEMIT(" block_size:%u", ic->sectors_per_block << SECTOR_SHIFT);
2939 if (ic->recalculate_flag)
2940 DMEMIT(" recalculate");
2941 DMEMIT(" journal_sectors:%u", ic->initial_sectors - SB_SECTORS);
2942 DMEMIT(" interleave_sectors:%u", 1U << ic->sb->log2_interleave_sectors);
2943 DMEMIT(" buffer_sectors:%u", 1U << ic->log2_buffer_sectors);
2944 if (ic->mode == 'J') {
2945 DMEMIT(" journal_watermark:%u", (unsigned)watermark_percentage);
2946 DMEMIT(" commit_time:%u", ic->autocommit_msec);
2948 if (ic->mode == 'B') {
2949 DMEMIT(" sectors_per_bit:%llu", (unsigned long long)ic->sectors_per_block << ic->log2_blocks_per_bitmap_bit);
2950 DMEMIT(" bitmap_flush_interval:%u", jiffies_to_msecs(ic->bitmap_flush_interval));
2953 #define EMIT_ALG(a, n) \
2955 if (ic->a.alg_string) { \
2956 DMEMIT(" %s:%s", n, ic->a.alg_string); \
2957 if (ic->a.key_string) \
2958 DMEMIT(":%s", ic->a.key_string);\
2961 EMIT_ALG(internal_hash_alg, "internal_hash");
2962 EMIT_ALG(journal_crypt_alg, "journal_crypt");
2963 EMIT_ALG(journal_mac_alg, "journal_mac");
2969 static int dm_integrity_iterate_devices(struct dm_target *ti,
2970 iterate_devices_callout_fn fn, void *data)
2972 struct dm_integrity_c *ic = ti->private;
2975 return fn(ti, ic->dev, ic->start + ic->initial_sectors + ic->metadata_run, ti->len, data);
2977 return fn(ti, ic->dev, 0, ti->len, data);
2980 static void dm_integrity_io_hints(struct dm_target *ti, struct queue_limits *limits)
2982 struct dm_integrity_c *ic = ti->private;
2984 if (ic->sectors_per_block > 1) {
2985 limits->logical_block_size = ic->sectors_per_block << SECTOR_SHIFT;
2986 limits->physical_block_size = ic->sectors_per_block << SECTOR_SHIFT;
2987 blk_limits_io_min(limits, ic->sectors_per_block << SECTOR_SHIFT);
2991 static void calculate_journal_section_size(struct dm_integrity_c *ic)
2993 unsigned sector_space = JOURNAL_SECTOR_DATA;
2995 ic->journal_sections = le32_to_cpu(ic->sb->journal_sections);
2996 ic->journal_entry_size = roundup(offsetof(struct journal_entry, last_bytes[ic->sectors_per_block]) + ic->tag_size,
2997 JOURNAL_ENTRY_ROUNDUP);
2999 if (ic->sb->flags & cpu_to_le32(SB_FLAG_HAVE_JOURNAL_MAC))
3000 sector_space -= JOURNAL_MAC_PER_SECTOR;
3001 ic->journal_entries_per_sector = sector_space / ic->journal_entry_size;
3002 ic->journal_section_entries = ic->journal_entries_per_sector * JOURNAL_BLOCK_SECTORS;
3003 ic->journal_section_sectors = (ic->journal_section_entries << ic->sb->log2_sectors_per_block) + JOURNAL_BLOCK_SECTORS;
3004 ic->journal_entries = ic->journal_section_entries * ic->journal_sections;
3007 static int calculate_device_limits(struct dm_integrity_c *ic)
3009 __u64 initial_sectors;
3011 calculate_journal_section_size(ic);
3012 initial_sectors = SB_SECTORS + (__u64)ic->journal_section_sectors * ic->journal_sections;
3013 if (initial_sectors + METADATA_PADDING_SECTORS >= ic->meta_device_sectors || initial_sectors > UINT_MAX)
3015 ic->initial_sectors = initial_sectors;
3017 if (!ic->meta_dev) {
3018 sector_t last_sector, last_area, last_offset;
3020 ic->metadata_run = roundup((__u64)ic->tag_size << (ic->sb->log2_interleave_sectors - ic->sb->log2_sectors_per_block),
3021 (__u64)(1 << SECTOR_SHIFT << METADATA_PADDING_SECTORS)) >> SECTOR_SHIFT;
3022 if (!(ic->metadata_run & (ic->metadata_run - 1)))
3023 ic->log2_metadata_run = __ffs(ic->metadata_run);
3025 ic->log2_metadata_run = -1;
3027 get_area_and_offset(ic, ic->provided_data_sectors - 1, &last_area, &last_offset);
3028 last_sector = get_data_sector(ic, last_area, last_offset);
3029 if (last_sector < ic->start || last_sector >= ic->meta_device_sectors)
3032 __u64 meta_size = (ic->provided_data_sectors >> ic->sb->log2_sectors_per_block) * ic->tag_size;
3033 meta_size = (meta_size + ((1U << (ic->log2_buffer_sectors + SECTOR_SHIFT)) - 1))
3034 >> (ic->log2_buffer_sectors + SECTOR_SHIFT);
3035 meta_size <<= ic->log2_buffer_sectors;
3036 if (ic->initial_sectors + meta_size < ic->initial_sectors ||
3037 ic->initial_sectors + meta_size > ic->meta_device_sectors)
3039 ic->metadata_run = 1;
3040 ic->log2_metadata_run = 0;
3046 static int initialize_superblock(struct dm_integrity_c *ic, unsigned journal_sectors, unsigned interleave_sectors)
3048 unsigned journal_sections;
3051 memset(ic->sb, 0, SB_SECTORS << SECTOR_SHIFT);
3052 memcpy(ic->sb->magic, SB_MAGIC, 8);
3053 ic->sb->integrity_tag_size = cpu_to_le16(ic->tag_size);
3054 ic->sb->log2_sectors_per_block = __ffs(ic->sectors_per_block);
3055 if (ic->journal_mac_alg.alg_string)
3056 ic->sb->flags |= cpu_to_le32(SB_FLAG_HAVE_JOURNAL_MAC);
3058 calculate_journal_section_size(ic);
3059 journal_sections = journal_sectors / ic->journal_section_sectors;
3060 if (!journal_sections)
3061 journal_sections = 1;
3063 if (!ic->meta_dev) {
3064 ic->sb->journal_sections = cpu_to_le32(journal_sections);
3065 if (!interleave_sectors)
3066 interleave_sectors = DEFAULT_INTERLEAVE_SECTORS;
3067 ic->sb->log2_interleave_sectors = __fls(interleave_sectors);
3068 ic->sb->log2_interleave_sectors = max((__u8)MIN_LOG2_INTERLEAVE_SECTORS, ic->sb->log2_interleave_sectors);
3069 ic->sb->log2_interleave_sectors = min((__u8)MAX_LOG2_INTERLEAVE_SECTORS, ic->sb->log2_interleave_sectors);
3071 ic->provided_data_sectors = 0;
3072 for (test_bit = fls64(ic->meta_device_sectors) - 1; test_bit >= 3; test_bit--) {
3073 __u64 prev_data_sectors = ic->provided_data_sectors;
3075 ic->provided_data_sectors |= (sector_t)1 << test_bit;
3076 if (calculate_device_limits(ic))
3077 ic->provided_data_sectors = prev_data_sectors;
3079 if (!ic->provided_data_sectors)
3082 ic->sb->log2_interleave_sectors = 0;
3083 ic->provided_data_sectors = ic->data_device_sectors;
3084 ic->provided_data_sectors &= ~(sector_t)(ic->sectors_per_block - 1);
3087 ic->sb->journal_sections = cpu_to_le32(0);
3088 for (test_bit = fls(journal_sections) - 1; test_bit >= 0; test_bit--) {
3089 __u32 prev_journal_sections = le32_to_cpu(ic->sb->journal_sections);
3090 __u32 test_journal_sections = prev_journal_sections | (1U << test_bit);
3091 if (test_journal_sections > journal_sections)
3093 ic->sb->journal_sections = cpu_to_le32(test_journal_sections);
3094 if (calculate_device_limits(ic))
3095 ic->sb->journal_sections = cpu_to_le32(prev_journal_sections);
3098 if (!le32_to_cpu(ic->sb->journal_sections)) {
3099 if (ic->log2_buffer_sectors > 3) {
3100 ic->log2_buffer_sectors--;
3101 goto try_smaller_buffer;
3107 ic->sb->provided_data_sectors = cpu_to_le64(ic->provided_data_sectors);
3114 static void dm_integrity_set(struct dm_target *ti, struct dm_integrity_c *ic)
3116 struct gendisk *disk = dm_disk(dm_table_get_md(ti->table));
3117 struct blk_integrity bi;
3119 memset(&bi, 0, sizeof(bi));
3120 bi.profile = &dm_integrity_profile;
3121 bi.tuple_size = ic->tag_size;
3122 bi.tag_size = bi.tuple_size;
3123 bi.interval_exp = ic->sb->log2_sectors_per_block + SECTOR_SHIFT;
3125 blk_integrity_register(disk, &bi);
3126 blk_queue_max_integrity_segments(disk->queue, UINT_MAX);
3129 static void dm_integrity_free_page_list(struct page_list *pl)
3135 for (i = 0; pl[i].page; i++)
3136 __free_page(pl[i].page);
3140 static struct page_list *dm_integrity_alloc_page_list(unsigned n_pages)
3142 struct page_list *pl;
3145 pl = kvmalloc_array(n_pages + 1, sizeof(struct page_list), GFP_KERNEL | __GFP_ZERO);
3149 for (i = 0; i < n_pages; i++) {
3150 pl[i].page = alloc_page(GFP_KERNEL);
3152 dm_integrity_free_page_list(pl);
3156 pl[i - 1].next = &pl[i];
3164 static void dm_integrity_free_journal_scatterlist(struct dm_integrity_c *ic, struct scatterlist **sl)
3167 for (i = 0; i < ic->journal_sections; i++)
3172 static struct scatterlist **dm_integrity_alloc_journal_scatterlist(struct dm_integrity_c *ic,
3173 struct page_list *pl)
3175 struct scatterlist **sl;
3178 sl = kvmalloc_array(ic->journal_sections,
3179 sizeof(struct scatterlist *),
3180 GFP_KERNEL | __GFP_ZERO);
3184 for (i = 0; i < ic->journal_sections; i++) {
3185 struct scatterlist *s;
3186 unsigned start_index, start_offset;
3187 unsigned end_index, end_offset;
3191 page_list_location(ic, i, 0, &start_index, &start_offset);
3192 page_list_location(ic, i, ic->journal_section_sectors - 1,
3193 &end_index, &end_offset);
3195 n_pages = (end_index - start_index + 1);
3197 s = kvmalloc_array(n_pages, sizeof(struct scatterlist),
3200 dm_integrity_free_journal_scatterlist(ic, sl);
3204 sg_init_table(s, n_pages);
3205 for (idx = start_index; idx <= end_index; idx++) {
3206 char *va = lowmem_page_address(pl[idx].page);
3207 unsigned start = 0, end = PAGE_SIZE;
3208 if (idx == start_index)
3209 start = start_offset;
3210 if (idx == end_index)
3211 end = end_offset + (1 << SECTOR_SHIFT);
3212 sg_set_buf(&s[idx - start_index], va + start, end - start);
3221 static void free_alg(struct alg_spec *a)
3223 kzfree(a->alg_string);
3225 memset(a, 0, sizeof *a);
3228 static int get_alg_and_key(const char *arg, struct alg_spec *a, char **error, char *error_inval)
3234 a->alg_string = kstrdup(strchr(arg, ':') + 1, GFP_KERNEL);
3238 k = strchr(a->alg_string, ':');
3241 a->key_string = k + 1;
3242 if (strlen(a->key_string) & 1)
3245 a->key_size = strlen(a->key_string) / 2;
3246 a->key = kmalloc(a->key_size, GFP_KERNEL);
3249 if (hex2bin(a->key, a->key_string, a->key_size))
3255 *error = error_inval;
3258 *error = "Out of memory for an argument";
3262 static int get_mac(struct crypto_shash **hash, struct alg_spec *a, char **error,
3263 char *error_alg, char *error_key)
3267 if (a->alg_string) {
3268 *hash = crypto_alloc_shash(a->alg_string, 0, 0);
3269 if (IS_ERR(*hash)) {
3277 r = crypto_shash_setkey(*hash, a->key, a->key_size);
3282 } else if (crypto_shash_get_flags(*hash) & CRYPTO_TFM_NEED_KEY) {
3291 static int create_journal(struct dm_integrity_c *ic, char **error)
3295 __u64 journal_pages, journal_desc_size, journal_tree_size;
3296 unsigned char *crypt_data = NULL, *crypt_iv = NULL;
3297 struct skcipher_request *req = NULL;
3299 ic->commit_ids[0] = cpu_to_le64(0x1111111111111111ULL);
3300 ic->commit_ids[1] = cpu_to_le64(0x2222222222222222ULL);
3301 ic->commit_ids[2] = cpu_to_le64(0x3333333333333333ULL);
3302 ic->commit_ids[3] = cpu_to_le64(0x4444444444444444ULL);
3304 journal_pages = roundup((__u64)ic->journal_sections * ic->journal_section_sectors,
3305 PAGE_SIZE >> SECTOR_SHIFT) >> (PAGE_SHIFT - SECTOR_SHIFT);
3306 journal_desc_size = journal_pages * sizeof(struct page_list);
3307 if (journal_pages >= totalram_pages() - totalhigh_pages() || journal_desc_size > ULONG_MAX) {
3308 *error = "Journal doesn't fit into memory";
3312 ic->journal_pages = journal_pages;
3314 ic->journal = dm_integrity_alloc_page_list(ic->journal_pages);
3316 *error = "Could not allocate memory for journal";
3320 if (ic->journal_crypt_alg.alg_string) {
3321 unsigned ivsize, blocksize;
3322 struct journal_completion comp;
3325 ic->journal_crypt = crypto_alloc_skcipher(ic->journal_crypt_alg.alg_string, 0, 0);
3326 if (IS_ERR(ic->journal_crypt)) {
3327 *error = "Invalid journal cipher";
3328 r = PTR_ERR(ic->journal_crypt);
3329 ic->journal_crypt = NULL;
3332 ivsize = crypto_skcipher_ivsize(ic->journal_crypt);
3333 blocksize = crypto_skcipher_blocksize(ic->journal_crypt);
3335 if (ic->journal_crypt_alg.key) {
3336 r = crypto_skcipher_setkey(ic->journal_crypt, ic->journal_crypt_alg.key,
3337 ic->journal_crypt_alg.key_size);
3339 *error = "Error setting encryption key";
3343 DEBUG_print("cipher %s, block size %u iv size %u\n",
3344 ic->journal_crypt_alg.alg_string, blocksize, ivsize);
3346 ic->journal_io = dm_integrity_alloc_page_list(ic->journal_pages);
3347 if (!ic->journal_io) {
3348 *error = "Could not allocate memory for journal io";
3353 if (blocksize == 1) {
3354 struct scatterlist *sg;
3356 req = skcipher_request_alloc(ic->journal_crypt, GFP_KERNEL);
3358 *error = "Could not allocate crypt request";
3363 crypt_iv = kzalloc(ivsize, GFP_KERNEL);
3365 *error = "Could not allocate iv";
3370 ic->journal_xor = dm_integrity_alloc_page_list(ic->journal_pages);
3371 if (!ic->journal_xor) {
3372 *error = "Could not allocate memory for journal xor";
3377 sg = kvmalloc_array(ic->journal_pages + 1,
3378 sizeof(struct scatterlist),
3381 *error = "Unable to allocate sg list";
3385 sg_init_table(sg, ic->journal_pages + 1);
3386 for (i = 0; i < ic->journal_pages; i++) {
3387 char *va = lowmem_page_address(ic->journal_xor[i].page);
3389 sg_set_buf(&sg[i], va, PAGE_SIZE);
3391 sg_set_buf(&sg[i], &ic->commit_ids, sizeof ic->commit_ids);
3393 skcipher_request_set_crypt(req, sg, sg,
3394 PAGE_SIZE * ic->journal_pages + sizeof ic->commit_ids, crypt_iv);
3395 init_completion(&comp.comp);
3396 comp.in_flight = (atomic_t)ATOMIC_INIT(1);
3397 if (do_crypt(true, req, &comp))
3398 wait_for_completion(&comp.comp);
3400 r = dm_integrity_failed(ic);
3402 *error = "Unable to encrypt journal";
3405 DEBUG_bytes(lowmem_page_address(ic->journal_xor[0].page), 64, "xor data");
3407 crypto_free_skcipher(ic->journal_crypt);
3408 ic->journal_crypt = NULL;
3410 unsigned crypt_len = roundup(ivsize, blocksize);
3412 req = skcipher_request_alloc(ic->journal_crypt, GFP_KERNEL);
3414 *error = "Could not allocate crypt request";
3419 crypt_iv = kmalloc(ivsize, GFP_KERNEL);
3421 *error = "Could not allocate iv";
3426 crypt_data = kmalloc(crypt_len, GFP_KERNEL);
3428 *error = "Unable to allocate crypt data";
3433 ic->journal_scatterlist = dm_integrity_alloc_journal_scatterlist(ic, ic->journal);
3434 if (!ic->journal_scatterlist) {
3435 *error = "Unable to allocate sg list";
3439 ic->journal_io_scatterlist = dm_integrity_alloc_journal_scatterlist(ic, ic->journal_io);
3440 if (!ic->journal_io_scatterlist) {
3441 *error = "Unable to allocate sg list";
3445 ic->sk_requests = kvmalloc_array(ic->journal_sections,
3446 sizeof(struct skcipher_request *),
3447 GFP_KERNEL | __GFP_ZERO);
3448 if (!ic->sk_requests) {
3449 *error = "Unable to allocate sk requests";
3453 for (i = 0; i < ic->journal_sections; i++) {
3454 struct scatterlist sg;
3455 struct skcipher_request *section_req;
3456 __u32 section_le = cpu_to_le32(i);
3458 memset(crypt_iv, 0x00, ivsize);
3459 memset(crypt_data, 0x00, crypt_len);
3460 memcpy(crypt_data, §ion_le, min((size_t)crypt_len, sizeof(section_le)));
3462 sg_init_one(&sg, crypt_data, crypt_len);
3463 skcipher_request_set_crypt(req, &sg, &sg, crypt_len, crypt_iv);
3464 init_completion(&comp.comp);
3465 comp.in_flight = (atomic_t)ATOMIC_INIT(1);
3466 if (do_crypt(true, req, &comp))
3467 wait_for_completion(&comp.comp);
3469 r = dm_integrity_failed(ic);
3471 *error = "Unable to generate iv";
3475 section_req = skcipher_request_alloc(ic->journal_crypt, GFP_KERNEL);
3477 *error = "Unable to allocate crypt request";
3481 section_req->iv = kmalloc_array(ivsize, 2,
3483 if (!section_req->iv) {
3484 skcipher_request_free(section_req);
3485 *error = "Unable to allocate iv";
3489 memcpy(section_req->iv + ivsize, crypt_data, ivsize);
3490 section_req->cryptlen = (size_t)ic->journal_section_sectors << SECTOR_SHIFT;
3491 ic->sk_requests[i] = section_req;
3492 DEBUG_bytes(crypt_data, ivsize, "iv(%u)", i);
3497 for (i = 0; i < N_COMMIT_IDS; i++) {
3500 for (j = 0; j < i; j++) {
3501 if (ic->commit_ids[j] == ic->commit_ids[i]) {
3502 ic->commit_ids[i] = cpu_to_le64(le64_to_cpu(ic->commit_ids[i]) + 1);
3503 goto retest_commit_id;
3506 DEBUG_print("commit id %u: %016llx\n", i, ic->commit_ids[i]);
3509 journal_tree_size = (__u64)ic->journal_entries * sizeof(struct journal_node);
3510 if (journal_tree_size > ULONG_MAX) {
3511 *error = "Journal doesn't fit into memory";
3515 ic->journal_tree = kvmalloc(journal_tree_size, GFP_KERNEL);
3516 if (!ic->journal_tree) {
3517 *error = "Could not allocate memory for journal tree";
3523 skcipher_request_free(req);
3529 * Construct a integrity mapping
3533 * offset from the start of the device
3535 * D - direct writes, J - journal writes, B - bitmap mode, R - recovery mode
3536 * number of optional arguments
3537 * optional arguments:
3539 * interleave_sectors
3546 * bitmap_flush_interval
3552 static int dm_integrity_ctr(struct dm_target *ti, unsigned argc, char **argv)
3554 struct dm_integrity_c *ic;
3557 unsigned extra_args;
3558 struct dm_arg_set as;
3559 static const struct dm_arg _args[] = {
3560 {0, 9, "Invalid number of feature args"},
3562 unsigned journal_sectors, interleave_sectors, buffer_sectors, journal_watermark, sync_msec;
3563 bool should_write_sb;
3565 unsigned long long start;
3566 __s8 log2_sectors_per_bitmap_bit = -1;
3567 __s8 log2_blocks_per_bitmap_bit;
3568 __u64 bits_in_journal;
3569 __u64 n_bitmap_bits;
3571 #define DIRECT_ARGUMENTS 4
3573 if (argc <= DIRECT_ARGUMENTS) {
3574 ti->error = "Invalid argument count";
3578 ic = kzalloc(sizeof(struct dm_integrity_c), GFP_KERNEL);
3580 ti->error = "Cannot allocate integrity context";
3584 ti->per_io_data_size = sizeof(struct dm_integrity_io);
3586 ic->in_progress = RB_ROOT;
3587 INIT_LIST_HEAD(&ic->wait_list);
3588 init_waitqueue_head(&ic->endio_wait);
3589 bio_list_init(&ic->flush_bio_list);
3590 init_waitqueue_head(&ic->copy_to_journal_wait);
3591 init_completion(&ic->crypto_backoff);
3592 atomic64_set(&ic->number_of_mismatches, 0);
3593 ic->bitmap_flush_interval = BITMAP_FLUSH_INTERVAL;
3595 r = dm_get_device(ti, argv[0], dm_table_get_mode(ti->table), &ic->dev);
3597 ti->error = "Device lookup failed";
3601 if (sscanf(argv[1], "%llu%c", &start, &dummy) != 1 || start != (sector_t)start) {
3602 ti->error = "Invalid starting offset";
3608 if (strcmp(argv[2], "-")) {
3609 if (sscanf(argv[2], "%u%c", &ic->tag_size, &dummy) != 1 || !ic->tag_size) {
3610 ti->error = "Invalid tag size";
3616 if (!strcmp(argv[3], "J") || !strcmp(argv[3], "B") ||
3617 !strcmp(argv[3], "D") || !strcmp(argv[3], "R")) {
3618 ic->mode = argv[3][0];
3620 ti->error = "Invalid mode (expecting J, B, D, R)";
3625 journal_sectors = 0;
3626 interleave_sectors = DEFAULT_INTERLEAVE_SECTORS;
3627 buffer_sectors = DEFAULT_BUFFER_SECTORS;
3628 journal_watermark = DEFAULT_JOURNAL_WATERMARK;
3629 sync_msec = DEFAULT_SYNC_MSEC;
3630 ic->sectors_per_block = 1;
3632 as.argc = argc - DIRECT_ARGUMENTS;
3633 as.argv = argv + DIRECT_ARGUMENTS;
3634 r = dm_read_arg_group(_args, &as, &extra_args, &ti->error);
3638 while (extra_args--) {
3639 const char *opt_string;
3641 unsigned long long llval;
3642 opt_string = dm_shift_arg(&as);
3645 ti->error = "Not enough feature arguments";
3648 if (sscanf(opt_string, "journal_sectors:%u%c", &val, &dummy) == 1)
3649 journal_sectors = val ? val : 1;
3650 else if (sscanf(opt_string, "interleave_sectors:%u%c", &val, &dummy) == 1)
3651 interleave_sectors = val;
3652 else if (sscanf(opt_string, "buffer_sectors:%u%c", &val, &dummy) == 1)
3653 buffer_sectors = val;
3654 else if (sscanf(opt_string, "journal_watermark:%u%c", &val, &dummy) == 1 && val <= 100)
3655 journal_watermark = val;
3656 else if (sscanf(opt_string, "commit_time:%u%c", &val, &dummy) == 1)
3658 else if (!strncmp(opt_string, "meta_device:", strlen("meta_device:"))) {
3660 dm_put_device(ti, ic->meta_dev);
3661 ic->meta_dev = NULL;
3663 r = dm_get_device(ti, strchr(opt_string, ':') + 1,
3664 dm_table_get_mode(ti->table), &ic->meta_dev);
3666 ti->error = "Device lookup failed";
3669 } else if (sscanf(opt_string, "block_size:%u%c", &val, &dummy) == 1) {
3670 if (val < 1 << SECTOR_SHIFT ||
3671 val > MAX_SECTORS_PER_BLOCK << SECTOR_SHIFT ||
3674 ti->error = "Invalid block_size argument";
3677 ic->sectors_per_block = val >> SECTOR_SHIFT;
3678 } else if (sscanf(opt_string, "sectors_per_bit:%llu%c", &llval, &dummy) == 1) {
3679 log2_sectors_per_bitmap_bit = !llval ? 0 : __ilog2_u64(llval);
3680 } else if (sscanf(opt_string, "bitmap_flush_interval:%u%c", &val, &dummy) == 1) {
3681 if (val >= (uint64_t)UINT_MAX * 1000 / HZ) {
3683 ti->error = "Invalid bitmap_flush_interval argument";
3685 ic->bitmap_flush_interval = msecs_to_jiffies(val);
3686 } else if (!strncmp(opt_string, "internal_hash:", strlen("internal_hash:"))) {
3687 r = get_alg_and_key(opt_string, &ic->internal_hash_alg, &ti->error,
3688 "Invalid internal_hash argument");
3691 } else if (!strncmp(opt_string, "journal_crypt:", strlen("journal_crypt:"))) {
3692 r = get_alg_and_key(opt_string, &ic->journal_crypt_alg, &ti->error,
3693 "Invalid journal_crypt argument");
3696 } else if (!strncmp(opt_string, "journal_mac:", strlen("journal_mac:"))) {
3697 r = get_alg_and_key(opt_string, &ic->journal_mac_alg, &ti->error,
3698 "Invalid journal_mac argument");
3701 } else if (!strcmp(opt_string, "recalculate")) {
3702 ic->recalculate_flag = true;
3705 ti->error = "Invalid argument";
3710 ic->data_device_sectors = i_size_read(ic->dev->bdev->bd_inode) >> SECTOR_SHIFT;
3712 ic->meta_device_sectors = ic->data_device_sectors;
3714 ic->meta_device_sectors = i_size_read(ic->meta_dev->bdev->bd_inode) >> SECTOR_SHIFT;
3716 if (!journal_sectors) {
3717 journal_sectors = min((sector_t)DEFAULT_MAX_JOURNAL_SECTORS,
3718 ic->data_device_sectors >> DEFAULT_JOURNAL_SIZE_FACTOR);
3721 if (!buffer_sectors)
3723 ic->log2_buffer_sectors = min((int)__fls(buffer_sectors), 31 - SECTOR_SHIFT);
3725 r = get_mac(&ic->internal_hash, &ic->internal_hash_alg, &ti->error,
3726 "Invalid internal hash", "Error setting internal hash key");
3730 r = get_mac(&ic->journal_mac, &ic->journal_mac_alg, &ti->error,
3731 "Invalid journal mac", "Error setting journal mac key");
3735 if (!ic->tag_size) {
3736 if (!ic->internal_hash) {
3737 ti->error = "Unknown tag size";
3741 ic->tag_size = crypto_shash_digestsize(ic->internal_hash);
3743 if (ic->tag_size > MAX_TAG_SIZE) {
3744 ti->error = "Too big tag size";
3748 if (!(ic->tag_size & (ic->tag_size - 1)))
3749 ic->log2_tag_size = __ffs(ic->tag_size);
3751 ic->log2_tag_size = -1;
3753 if (ic->mode == 'B' && !ic->internal_hash) {
3755 ti->error = "Bitmap mode can be only used with internal hash";
3759 ic->autocommit_jiffies = msecs_to_jiffies(sync_msec);
3760 ic->autocommit_msec = sync_msec;
3761 timer_setup(&ic->autocommit_timer, autocommit_fn, 0);
3763 ic->io = dm_io_client_create();
3764 if (IS_ERR(ic->io)) {
3765 r = PTR_ERR(ic->io);
3767 ti->error = "Cannot allocate dm io";
3771 r = mempool_init_slab_pool(&ic->journal_io_mempool, JOURNAL_IO_MEMPOOL, journal_io_cache);
3773 ti->error = "Cannot allocate mempool";
3777 ic->metadata_wq = alloc_workqueue("dm-integrity-metadata",
3778 WQ_MEM_RECLAIM, METADATA_WORKQUEUE_MAX_ACTIVE);
3779 if (!ic->metadata_wq) {
3780 ti->error = "Cannot allocate workqueue";
3786 * If this workqueue were percpu, it would cause bio reordering
3787 * and reduced performance.
3789 ic->wait_wq = alloc_workqueue("dm-integrity-wait", WQ_MEM_RECLAIM | WQ_UNBOUND, 1);
3791 ti->error = "Cannot allocate workqueue";
3796 ic->commit_wq = alloc_workqueue("dm-integrity-commit", WQ_MEM_RECLAIM, 1);
3797 if (!ic->commit_wq) {
3798 ti->error = "Cannot allocate workqueue";
3802 INIT_WORK(&ic->commit_work, integrity_commit);
3804 if (ic->mode == 'J' || ic->mode == 'B') {
3805 ic->writer_wq = alloc_workqueue("dm-integrity-writer", WQ_MEM_RECLAIM, 1);
3806 if (!ic->writer_wq) {
3807 ti->error = "Cannot allocate workqueue";
3811 INIT_WORK(&ic->writer_work, integrity_writer);
3814 ic->sb = alloc_pages_exact(SB_SECTORS << SECTOR_SHIFT, GFP_KERNEL);
3817 ti->error = "Cannot allocate superblock area";
3821 r = sync_rw_sb(ic, REQ_OP_READ, 0);
3823 ti->error = "Error reading superblock";
3826 should_write_sb = false;
3827 if (memcmp(ic->sb->magic, SB_MAGIC, 8)) {
3828 if (ic->mode != 'R') {
3829 if (memchr_inv(ic->sb, 0, SB_SECTORS << SECTOR_SHIFT)) {
3831 ti->error = "The device is not initialized";
3836 r = initialize_superblock(ic, journal_sectors, interleave_sectors);
3838 ti->error = "Could not initialize superblock";
3841 if (ic->mode != 'R')
3842 should_write_sb = true;
3845 if (!ic->sb->version || ic->sb->version > SB_VERSION_3) {
3847 ti->error = "Unknown version";
3850 if (le16_to_cpu(ic->sb->integrity_tag_size) != ic->tag_size) {
3852 ti->error = "Tag size doesn't match the information in superblock";
3855 if (ic->sb->log2_sectors_per_block != __ffs(ic->sectors_per_block)) {
3857 ti->error = "Block size doesn't match the information in superblock";
3860 if (!le32_to_cpu(ic->sb->journal_sections)) {
3862 ti->error = "Corrupted superblock, journal_sections is 0";
3865 /* make sure that ti->max_io_len doesn't overflow */
3866 if (!ic->meta_dev) {
3867 if (ic->sb->log2_interleave_sectors < MIN_LOG2_INTERLEAVE_SECTORS ||
3868 ic->sb->log2_interleave_sectors > MAX_LOG2_INTERLEAVE_SECTORS) {
3870 ti->error = "Invalid interleave_sectors in the superblock";
3874 if (ic->sb->log2_interleave_sectors) {
3876 ti->error = "Invalid interleave_sectors in the superblock";
3880 ic->provided_data_sectors = le64_to_cpu(ic->sb->provided_data_sectors);
3881 if (ic->provided_data_sectors != le64_to_cpu(ic->sb->provided_data_sectors)) {
3882 /* test for overflow */
3884 ti->error = "The superblock has 64-bit device size, but the kernel was compiled with 32-bit sectors";
3887 if (!!(ic->sb->flags & cpu_to_le32(SB_FLAG_HAVE_JOURNAL_MAC)) != !!ic->journal_mac_alg.alg_string) {
3889 ti->error = "Journal mac mismatch";
3894 r = calculate_device_limits(ic);
3897 if (ic->log2_buffer_sectors > 3) {
3898 ic->log2_buffer_sectors--;
3899 goto try_smaller_buffer;
3902 ti->error = "The device is too small";
3906 if (log2_sectors_per_bitmap_bit < 0)
3907 log2_sectors_per_bitmap_bit = __fls(DEFAULT_SECTORS_PER_BITMAP_BIT);
3908 if (log2_sectors_per_bitmap_bit < ic->sb->log2_sectors_per_block)
3909 log2_sectors_per_bitmap_bit = ic->sb->log2_sectors_per_block;
3911 bits_in_journal = ((__u64)ic->journal_section_sectors * ic->journal_sections) << (SECTOR_SHIFT + 3);
3912 if (bits_in_journal > UINT_MAX)
3913 bits_in_journal = UINT_MAX;
3914 while (bits_in_journal < (ic->provided_data_sectors + ((sector_t)1 << log2_sectors_per_bitmap_bit) - 1) >> log2_sectors_per_bitmap_bit)
3915 log2_sectors_per_bitmap_bit++;
3917 log2_blocks_per_bitmap_bit = log2_sectors_per_bitmap_bit - ic->sb->log2_sectors_per_block;
3918 ic->log2_blocks_per_bitmap_bit = log2_blocks_per_bitmap_bit;
3919 if (should_write_sb) {
3920 ic->sb->log2_blocks_per_bitmap_bit = log2_blocks_per_bitmap_bit;
3922 n_bitmap_bits = ((ic->provided_data_sectors >> ic->sb->log2_sectors_per_block)
3923 + (((sector_t)1 << log2_blocks_per_bitmap_bit) - 1)) >> log2_blocks_per_bitmap_bit;
3924 ic->n_bitmap_blocks = DIV_ROUND_UP(n_bitmap_bits, BITMAP_BLOCK_SIZE * 8);
3927 ic->log2_buffer_sectors = min(ic->log2_buffer_sectors, (__u8)__ffs(ic->metadata_run));
3929 if (ti->len > ic->provided_data_sectors) {
3931 ti->error = "Not enough provided sectors for requested mapping size";
3936 threshold = (__u64)ic->journal_entries * (100 - journal_watermark);
3938 do_div(threshold, 100);
3939 ic->free_sectors_threshold = threshold;
3941 DEBUG_print("initialized:\n");
3942 DEBUG_print(" integrity_tag_size %u\n", le16_to_cpu(ic->sb->integrity_tag_size));
3943 DEBUG_print(" journal_entry_size %u\n", ic->journal_entry_size);
3944 DEBUG_print(" journal_entries_per_sector %u\n", ic->journal_entries_per_sector);
3945 DEBUG_print(" journal_section_entries %u\n", ic->journal_section_entries);
3946 DEBUG_print(" journal_section_sectors %u\n", ic->journal_section_sectors);
3947 DEBUG_print(" journal_sections %u\n", (unsigned)le32_to_cpu(ic->sb->journal_sections));
3948 DEBUG_print(" journal_entries %u\n", ic->journal_entries);
3949 DEBUG_print(" log2_interleave_sectors %d\n", ic->sb->log2_interleave_sectors);
3950 DEBUG_print(" data_device_sectors 0x%llx\n", i_size_read(ic->dev->bdev->bd_inode) >> SECTOR_SHIFT);
3951 DEBUG_print(" initial_sectors 0x%x\n", ic->initial_sectors);
3952 DEBUG_print(" metadata_run 0x%x\n", ic->metadata_run);
3953 DEBUG_print(" log2_metadata_run %d\n", ic->log2_metadata_run);
3954 DEBUG_print(" provided_data_sectors 0x%llx (%llu)\n", (unsigned long long)ic->provided_data_sectors,
3955 (unsigned long long)ic->provided_data_sectors);
3956 DEBUG_print(" log2_buffer_sectors %u\n", ic->log2_buffer_sectors);
3957 DEBUG_print(" bits_in_journal %llu\n", (unsigned long long)bits_in_journal);
3959 if (ic->recalculate_flag && !(ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING))) {
3960 ic->sb->flags |= cpu_to_le32(SB_FLAG_RECALCULATING);
3961 ic->sb->recalc_sector = cpu_to_le64(0);
3964 if (ic->internal_hash) {
3965 ic->recalc_wq = alloc_workqueue("dm-integrity-recalc", WQ_MEM_RECLAIM, 1);
3966 if (!ic->recalc_wq ) {
3967 ti->error = "Cannot allocate workqueue";
3971 INIT_WORK(&ic->recalc_work, integrity_recalc);
3972 ic->recalc_buffer = vmalloc(RECALC_SECTORS << SECTOR_SHIFT);
3973 if (!ic->recalc_buffer) {
3974 ti->error = "Cannot allocate buffer for recalculating";
3978 ic->recalc_tags = kvmalloc_array(RECALC_SECTORS >> ic->sb->log2_sectors_per_block,
3979 ic->tag_size, GFP_KERNEL);
3980 if (!ic->recalc_tags) {
3981 ti->error = "Cannot allocate tags for recalculating";
3987 ic->bufio = dm_bufio_client_create(ic->meta_dev ? ic->meta_dev->bdev : ic->dev->bdev,
3988 1U << (SECTOR_SHIFT + ic->log2_buffer_sectors), 1, 0, NULL, NULL);
3989 if (IS_ERR(ic->bufio)) {
3990 r = PTR_ERR(ic->bufio);
3991 ti->error = "Cannot initialize dm-bufio";
3995 dm_bufio_set_sector_offset(ic->bufio, ic->start + ic->initial_sectors);
3997 if (ic->mode != 'R') {
3998 r = create_journal(ic, &ti->error);
4004 if (ic->mode == 'B') {
4006 unsigned n_bitmap_pages = DIV_ROUND_UP(ic->n_bitmap_blocks, PAGE_SIZE / BITMAP_BLOCK_SIZE);
4008 ic->recalc_bitmap = dm_integrity_alloc_page_list(n_bitmap_pages);
4009 if (!ic->recalc_bitmap) {
4013 ic->may_write_bitmap = dm_integrity_alloc_page_list(n_bitmap_pages);
4014 if (!ic->may_write_bitmap) {
4018 ic->bbs = kvmalloc_array(ic->n_bitmap_blocks, sizeof(struct bitmap_block_status), GFP_KERNEL);
4023 INIT_DELAYED_WORK(&ic->bitmap_flush_work, bitmap_flush_work);
4024 for (i = 0; i < ic->n_bitmap_blocks; i++) {
4025 struct bitmap_block_status *bbs = &ic->bbs[i];
4026 unsigned sector, pl_index, pl_offset;
4028 INIT_WORK(&bbs->work, bitmap_block_work);
4031 bio_list_init(&bbs->bio_queue);
4032 spin_lock_init(&bbs->bio_queue_lock);
4034 sector = i * (BITMAP_BLOCK_SIZE >> SECTOR_SHIFT);
4035 pl_index = sector >> (PAGE_SHIFT - SECTOR_SHIFT);
4036 pl_offset = (sector << SECTOR_SHIFT) & (PAGE_SIZE - 1);
4038 bbs->bitmap = lowmem_page_address(ic->journal[pl_index].page) + pl_offset;
4042 if (should_write_sb) {
4045 init_journal(ic, 0, ic->journal_sections, 0);
4046 r = dm_integrity_failed(ic);
4048 ti->error = "Error initializing journal";
4051 r = sync_rw_sb(ic, REQ_OP_WRITE, REQ_FUA);
4053 ti->error = "Error initializing superblock";
4056 ic->just_formatted = true;
4059 if (!ic->meta_dev) {
4060 r = dm_set_target_max_io_len(ti, 1U << ic->sb->log2_interleave_sectors);
4064 if (ic->mode == 'B') {
4065 unsigned max_io_len = ((sector_t)ic->sectors_per_block << ic->log2_blocks_per_bitmap_bit) * (BITMAP_BLOCK_SIZE * 8);
4067 max_io_len = 1U << 31;
4068 DEBUG_print("max_io_len: old %u, new %u\n", ti->max_io_len, max_io_len);
4069 if (!ti->max_io_len || ti->max_io_len > max_io_len) {
4070 r = dm_set_target_max_io_len(ti, max_io_len);
4076 if (!ic->internal_hash)
4077 dm_integrity_set(ti, ic);
4079 ti->num_flush_bios = 1;
4080 ti->flush_supported = true;
4085 dm_integrity_dtr(ti);
4089 static void dm_integrity_dtr(struct dm_target *ti)
4091 struct dm_integrity_c *ic = ti->private;
4093 BUG_ON(!RB_EMPTY_ROOT(&ic->in_progress));
4094 BUG_ON(!list_empty(&ic->wait_list));
4096 if (ic->metadata_wq)
4097 destroy_workqueue(ic->metadata_wq);
4099 destroy_workqueue(ic->wait_wq);
4101 destroy_workqueue(ic->commit_wq);
4103 destroy_workqueue(ic->writer_wq);
4105 destroy_workqueue(ic->recalc_wq);
4106 vfree(ic->recalc_buffer);
4107 kvfree(ic->recalc_tags);
4110 dm_bufio_client_destroy(ic->bufio);
4111 mempool_exit(&ic->journal_io_mempool);
4113 dm_io_client_destroy(ic->io);
4115 dm_put_device(ti, ic->dev);
4117 dm_put_device(ti, ic->meta_dev);
4118 dm_integrity_free_page_list(ic->journal);
4119 dm_integrity_free_page_list(ic->journal_io);
4120 dm_integrity_free_page_list(ic->journal_xor);
4121 dm_integrity_free_page_list(ic->recalc_bitmap);
4122 dm_integrity_free_page_list(ic->may_write_bitmap);
4123 if (ic->journal_scatterlist)
4124 dm_integrity_free_journal_scatterlist(ic, ic->journal_scatterlist);
4125 if (ic->journal_io_scatterlist)
4126 dm_integrity_free_journal_scatterlist(ic, ic->journal_io_scatterlist);
4127 if (ic->sk_requests) {
4130 for (i = 0; i < ic->journal_sections; i++) {
4131 struct skcipher_request *req = ic->sk_requests[i];
4134 skcipher_request_free(req);
4137 kvfree(ic->sk_requests);
4139 kvfree(ic->journal_tree);
4141 free_pages_exact(ic->sb, SB_SECTORS << SECTOR_SHIFT);
4143 if (ic->internal_hash)
4144 crypto_free_shash(ic->internal_hash);
4145 free_alg(&ic->internal_hash_alg);
4147 if (ic->journal_crypt)
4148 crypto_free_skcipher(ic->journal_crypt);
4149 free_alg(&ic->journal_crypt_alg);
4151 if (ic->journal_mac)
4152 crypto_free_shash(ic->journal_mac);
4153 free_alg(&ic->journal_mac_alg);
4158 static struct target_type integrity_target = {
4159 .name = "integrity",
4160 .version = {1, 3, 0},
4161 .module = THIS_MODULE,
4162 .features = DM_TARGET_SINGLETON | DM_TARGET_INTEGRITY,
4163 .ctr = dm_integrity_ctr,
4164 .dtr = dm_integrity_dtr,
4165 .map = dm_integrity_map,
4166 .postsuspend = dm_integrity_postsuspend,
4167 .resume = dm_integrity_resume,
4168 .status = dm_integrity_status,
4169 .iterate_devices = dm_integrity_iterate_devices,
4170 .io_hints = dm_integrity_io_hints,
4173 static int __init dm_integrity_init(void)
4177 journal_io_cache = kmem_cache_create("integrity_journal_io",
4178 sizeof(struct journal_io), 0, 0, NULL);
4179 if (!journal_io_cache) {
4180 DMERR("can't allocate journal io cache");
4184 r = dm_register_target(&integrity_target);
4187 DMERR("register failed %d", r);
4192 static void __exit dm_integrity_exit(void)
4194 dm_unregister_target(&integrity_target);
4195 kmem_cache_destroy(journal_io_cache);
4198 module_init(dm_integrity_init);
4199 module_exit(dm_integrity_exit);
4201 MODULE_AUTHOR("Milan Broz");
4202 MODULE_AUTHOR("Mikulas Patocka");
4203 MODULE_DESCRIPTION(DM_NAME " target for integrity tags extension");
4204 MODULE_LICENSE("GPL");