2 * Copyright (C) 2016-2017 Red Hat, Inc. All rights reserved.
3 * Copyright (C) 2016-2017 Milan Broz
4 * Copyright (C) 2016-2017 Mikulas Patocka
6 * This file is released under the GPL.
9 #include <linux/module.h>
10 #include <linux/device-mapper.h>
11 #include <linux/dm-io.h>
12 #include <linux/vmalloc.h>
13 #include <linux/sort.h>
14 #include <linux/rbtree.h>
15 #include <linux/delay.h>
16 #include <linux/random.h>
17 #include <crypto/hash.h>
18 #include <crypto/skcipher.h>
19 #include <linux/async_tx.h>
22 #define DM_MSG_PREFIX "integrity"
24 #define DEFAULT_INTERLEAVE_SECTORS 32768
25 #define DEFAULT_JOURNAL_SIZE_FACTOR 7
26 #define DEFAULT_BUFFER_SECTORS 128
27 #define DEFAULT_JOURNAL_WATERMARK 50
28 #define DEFAULT_SYNC_MSEC 10000
29 #define DEFAULT_MAX_JOURNAL_SECTORS 131072
30 #define MIN_LOG2_INTERLEAVE_SECTORS 3
31 #define MAX_LOG2_INTERLEAVE_SECTORS 31
32 #define METADATA_WORKQUEUE_MAX_ACTIVE 16
35 * Warning - DEBUG_PRINT prints security-sensitive data to the log,
36 * so it should not be enabled in the official kernel
39 //#define INTERNAL_VERIFY
45 #define SB_MAGIC "integrt"
48 #define MAX_SECTORS_PER_BLOCK 8
53 __u8 log2_interleave_sectors;
54 __u16 integrity_tag_size;
55 __u32 journal_sections;
56 __u64 provided_data_sectors; /* userspace uses this value */
58 __u8 log2_sectors_per_block;
61 #define SB_FLAG_HAVE_JOURNAL_MAC 0x1
63 #define JOURNAL_ENTRY_ROUNDUP 8
65 typedef __u64 commit_id_t;
66 #define JOURNAL_MAC_PER_SECTOR 8
68 struct journal_entry {
76 commit_id_t last_bytes[0];
80 #define journal_entry_tag(ic, je) ((__u8 *)&(je)->last_bytes[(ic)->sectors_per_block])
82 #if BITS_PER_LONG == 64
83 #define journal_entry_set_sector(je, x) do { smp_wmb(); ACCESS_ONCE((je)->u.sector) = cpu_to_le64(x); } while (0)
84 #define journal_entry_get_sector(je) le64_to_cpu((je)->u.sector)
85 #elif defined(CONFIG_LBDAF)
86 #define journal_entry_set_sector(je, x) do { (je)->u.s.sector_lo = cpu_to_le32(x); smp_wmb(); ACCESS_ONCE((je)->u.s.sector_hi) = cpu_to_le32((x) >> 32); } while (0)
87 #define journal_entry_get_sector(je) le64_to_cpu((je)->u.sector)
89 #define journal_entry_set_sector(je, x) do { (je)->u.s.sector_lo = cpu_to_le32(x); smp_wmb(); ACCESS_ONCE((je)->u.s.sector_hi) = cpu_to_le32(0); } while (0)
90 #define journal_entry_get_sector(je) le32_to_cpu((je)->u.s.sector_lo)
92 #define journal_entry_is_unused(je) ((je)->u.s.sector_hi == cpu_to_le32(-1))
93 #define journal_entry_set_unused(je) do { ((je)->u.s.sector_hi = cpu_to_le32(-1)); } while (0)
94 #define journal_entry_is_inprogress(je) ((je)->u.s.sector_hi == cpu_to_le32(-2))
95 #define journal_entry_set_inprogress(je) do { ((je)->u.s.sector_hi = cpu_to_le32(-2)); } while (0)
97 #define JOURNAL_BLOCK_SECTORS 8
98 #define JOURNAL_SECTOR_DATA ((1 << SECTOR_SHIFT) - sizeof(commit_id_t))
99 #define JOURNAL_MAC_SIZE (JOURNAL_MAC_PER_SECTOR * JOURNAL_BLOCK_SECTORS)
101 struct journal_sector {
102 __u8 entries[JOURNAL_SECTOR_DATA - JOURNAL_MAC_PER_SECTOR];
103 __u8 mac[JOURNAL_MAC_PER_SECTOR];
104 commit_id_t commit_id;
107 #define MAX_TAG_SIZE (JOURNAL_SECTOR_DATA - JOURNAL_MAC_PER_SECTOR - offsetof(struct journal_entry, last_bytes[MAX_SECTORS_PER_BLOCK]))
109 #define METADATA_PADDING_SECTORS 8
111 #define N_COMMIT_IDS 4
113 static unsigned char prev_commit_seq(unsigned char seq)
115 return (seq + N_COMMIT_IDS - 1) % N_COMMIT_IDS;
118 static unsigned char next_commit_seq(unsigned char seq)
120 return (seq + 1) % N_COMMIT_IDS;
124 * In-memory structures
127 struct journal_node {
139 struct dm_integrity_c {
144 mempool_t *journal_io_mempool;
145 struct dm_io_client *io;
146 struct dm_bufio_client *bufio;
147 struct workqueue_struct *metadata_wq;
148 struct superblock *sb;
149 unsigned journal_pages;
150 struct page_list *journal;
151 struct page_list *journal_io;
152 struct page_list *journal_xor;
154 struct crypto_skcipher *journal_crypt;
155 struct scatterlist **journal_scatterlist;
156 struct scatterlist **journal_io_scatterlist;
157 struct skcipher_request **sk_requests;
159 struct crypto_shash *journal_mac;
161 struct journal_node *journal_tree;
162 struct rb_root journal_tree_root;
164 sector_t provided_data_sectors;
166 unsigned short journal_entry_size;
167 unsigned char journal_entries_per_sector;
168 unsigned char journal_section_entries;
169 unsigned short journal_section_sectors;
170 unsigned journal_sections;
171 unsigned journal_entries;
172 sector_t device_sectors;
173 unsigned initial_sectors;
174 unsigned metadata_run;
175 __s8 log2_metadata_run;
176 __u8 log2_buffer_sectors;
177 __u8 sectors_per_block;
184 struct crypto_shash *internal_hash;
186 /* these variables are locked with endio_wait.lock */
187 struct rb_root in_progress;
188 wait_queue_head_t endio_wait;
189 struct workqueue_struct *wait_wq;
191 unsigned char commit_seq;
192 commit_id_t commit_ids[N_COMMIT_IDS];
194 unsigned committed_section;
195 unsigned n_committed_sections;
197 unsigned uncommitted_section;
198 unsigned n_uncommitted_sections;
200 unsigned free_section;
201 unsigned char free_section_entry;
202 unsigned free_sectors;
204 unsigned free_sectors_threshold;
206 struct workqueue_struct *commit_wq;
207 struct work_struct commit_work;
209 struct workqueue_struct *writer_wq;
210 struct work_struct writer_work;
212 struct bio_list flush_bio_list;
214 unsigned long autocommit_jiffies;
215 struct timer_list autocommit_timer;
216 unsigned autocommit_msec;
218 wait_queue_head_t copy_to_journal_wait;
220 struct completion crypto_backoff;
222 bool journal_uptodate;
225 struct alg_spec internal_hash_alg;
226 struct alg_spec journal_crypt_alg;
227 struct alg_spec journal_mac_alg;
230 struct dm_integrity_range {
231 sector_t logical_sector;
236 struct dm_integrity_io {
237 struct work_struct work;
239 struct dm_integrity_c *ic;
243 struct dm_integrity_range range;
245 sector_t metadata_block;
246 unsigned metadata_offset;
251 struct completion *completion;
253 struct block_device *orig_bi_bdev;
254 bio_end_io_t *orig_bi_end_io;
255 struct bio_integrity_payload *orig_bi_integrity;
256 struct bvec_iter orig_bi_iter;
259 struct journal_completion {
260 struct dm_integrity_c *ic;
262 struct completion comp;
266 struct dm_integrity_range range;
267 struct journal_completion *comp;
270 static struct kmem_cache *journal_io_cache;
272 #define JOURNAL_IO_MEMPOOL 32
275 #define DEBUG_print(x, ...) printk(KERN_DEBUG x, ##__VA_ARGS__)
276 static void __DEBUG_bytes(__u8 *bytes, size_t len, const char *msg, ...)
285 pr_cont(" %02x", *bytes);
291 #define DEBUG_bytes(bytes, len, msg, ...) __DEBUG_bytes(bytes, len, KERN_DEBUG msg, ##__VA_ARGS__)
293 #define DEBUG_print(x, ...) do { } while (0)
294 #define DEBUG_bytes(bytes, len, msg, ...) do { } while (0)
298 * DM Integrity profile, protection is performed layer above (dm-crypt)
300 static struct blk_integrity_profile dm_integrity_profile = {
301 .name = "DM-DIF-EXT-TAG",
306 static void dm_integrity_map_continue(struct dm_integrity_io *dio, bool from_map);
307 static void integrity_bio_wait(struct work_struct *w);
308 static void dm_integrity_dtr(struct dm_target *ti);
310 static void dm_integrity_io_error(struct dm_integrity_c *ic, const char *msg, int err)
312 if (!cmpxchg(&ic->failed, 0, err))
313 DMERR("Error on %s: %d", msg, err);
316 static int dm_integrity_failed(struct dm_integrity_c *ic)
318 return ACCESS_ONCE(ic->failed);
321 static commit_id_t dm_integrity_commit_id(struct dm_integrity_c *ic, unsigned i,
322 unsigned j, unsigned char seq)
325 * Xor the number with section and sector, so that if a piece of
326 * journal is written at wrong place, it is detected.
328 return ic->commit_ids[seq] ^ cpu_to_le64(((__u64)i << 32) ^ j);
331 static void get_area_and_offset(struct dm_integrity_c *ic, sector_t data_sector,
332 sector_t *area, sector_t *offset)
334 __u8 log2_interleave_sectors = ic->sb->log2_interleave_sectors;
336 *area = data_sector >> log2_interleave_sectors;
337 *offset = (unsigned)data_sector & ((1U << log2_interleave_sectors) - 1);
340 #define sector_to_block(ic, n) \
342 BUG_ON((n) & (unsigned)((ic)->sectors_per_block - 1)); \
343 (n) >>= (ic)->sb->log2_sectors_per_block; \
346 static __u64 get_metadata_sector_and_offset(struct dm_integrity_c *ic, sector_t area,
347 sector_t offset, unsigned *metadata_offset)
352 ms = area << ic->sb->log2_interleave_sectors;
353 if (likely(ic->log2_metadata_run >= 0))
354 ms += area << ic->log2_metadata_run;
356 ms += area * ic->metadata_run;
357 ms >>= ic->log2_buffer_sectors;
359 sector_to_block(ic, offset);
361 if (likely(ic->log2_tag_size >= 0)) {
362 ms += offset >> (SECTOR_SHIFT + ic->log2_buffer_sectors - ic->log2_tag_size);
363 mo = (offset << ic->log2_tag_size) & ((1U << SECTOR_SHIFT << ic->log2_buffer_sectors) - 1);
365 ms += (__u64)offset * ic->tag_size >> (SECTOR_SHIFT + ic->log2_buffer_sectors);
366 mo = (offset * ic->tag_size) & ((1U << SECTOR_SHIFT << ic->log2_buffer_sectors) - 1);
368 *metadata_offset = mo;
372 static sector_t get_data_sector(struct dm_integrity_c *ic, sector_t area, sector_t offset)
376 result = area << ic->sb->log2_interleave_sectors;
377 if (likely(ic->log2_metadata_run >= 0))
378 result += (area + 1) << ic->log2_metadata_run;
380 result += (area + 1) * ic->metadata_run;
382 result += (sector_t)ic->initial_sectors + offset;
386 static void wraparound_section(struct dm_integrity_c *ic, unsigned *sec_ptr)
388 if (unlikely(*sec_ptr >= ic->journal_sections))
389 *sec_ptr -= ic->journal_sections;
392 static int sync_rw_sb(struct dm_integrity_c *ic, int op, int op_flags)
394 struct dm_io_request io_req;
395 struct dm_io_region io_loc;
398 io_req.bi_op_flags = op_flags;
399 io_req.mem.type = DM_IO_KMEM;
400 io_req.mem.ptr.addr = ic->sb;
401 io_req.notify.fn = NULL;
402 io_req.client = ic->io;
403 io_loc.bdev = ic->dev->bdev;
404 io_loc.sector = ic->start;
405 io_loc.count = SB_SECTORS;
407 return dm_io(&io_req, 1, &io_loc, NULL);
410 static void access_journal_check(struct dm_integrity_c *ic, unsigned section, unsigned offset,
411 bool e, const char *function)
413 #if defined(CONFIG_DM_DEBUG) || defined(INTERNAL_VERIFY)
414 unsigned limit = e ? ic->journal_section_entries : ic->journal_section_sectors;
416 if (unlikely(section >= ic->journal_sections) ||
417 unlikely(offset >= limit)) {
418 printk(KERN_CRIT "%s: invalid access at (%u,%u), limit (%u,%u)\n",
419 function, section, offset, ic->journal_sections, limit);
425 static void page_list_location(struct dm_integrity_c *ic, unsigned section, unsigned offset,
426 unsigned *pl_index, unsigned *pl_offset)
430 access_journal_check(ic, section, offset, false, "page_list_location");
432 sector = section * ic->journal_section_sectors + offset;
434 *pl_index = sector >> (PAGE_SHIFT - SECTOR_SHIFT);
435 *pl_offset = (sector << SECTOR_SHIFT) & (PAGE_SIZE - 1);
438 static struct journal_sector *access_page_list(struct dm_integrity_c *ic, struct page_list *pl,
439 unsigned section, unsigned offset, unsigned *n_sectors)
441 unsigned pl_index, pl_offset;
444 page_list_location(ic, section, offset, &pl_index, &pl_offset);
447 *n_sectors = (PAGE_SIZE - pl_offset) >> SECTOR_SHIFT;
449 va = lowmem_page_address(pl[pl_index].page);
451 return (struct journal_sector *)(va + pl_offset);
454 static struct journal_sector *access_journal(struct dm_integrity_c *ic, unsigned section, unsigned offset)
456 return access_page_list(ic, ic->journal, section, offset, NULL);
459 static struct journal_entry *access_journal_entry(struct dm_integrity_c *ic, unsigned section, unsigned n)
461 unsigned rel_sector, offset;
462 struct journal_sector *js;
464 access_journal_check(ic, section, n, true, "access_journal_entry");
466 rel_sector = n % JOURNAL_BLOCK_SECTORS;
467 offset = n / JOURNAL_BLOCK_SECTORS;
469 js = access_journal(ic, section, rel_sector);
470 return (struct journal_entry *)((char *)js + offset * ic->journal_entry_size);
473 static struct journal_sector *access_journal_data(struct dm_integrity_c *ic, unsigned section, unsigned n)
475 n <<= ic->sb->log2_sectors_per_block;
477 n += JOURNAL_BLOCK_SECTORS;
479 access_journal_check(ic, section, n, false, "access_journal_data");
481 return access_journal(ic, section, n);
484 static void section_mac(struct dm_integrity_c *ic, unsigned section, __u8 result[JOURNAL_MAC_SIZE])
486 SHASH_DESC_ON_STACK(desc, ic->journal_mac);
490 desc->tfm = ic->journal_mac;
491 desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP;
493 r = crypto_shash_init(desc);
495 dm_integrity_io_error(ic, "crypto_shash_init", r);
499 for (j = 0; j < ic->journal_section_entries; j++) {
500 struct journal_entry *je = access_journal_entry(ic, section, j);
501 r = crypto_shash_update(desc, (__u8 *)&je->u.sector, sizeof je->u.sector);
503 dm_integrity_io_error(ic, "crypto_shash_update", r);
508 size = crypto_shash_digestsize(ic->journal_mac);
510 if (likely(size <= JOURNAL_MAC_SIZE)) {
511 r = crypto_shash_final(desc, result);
513 dm_integrity_io_error(ic, "crypto_shash_final", r);
516 memset(result + size, 0, JOURNAL_MAC_SIZE - size);
519 r = crypto_shash_final(desc, digest);
521 dm_integrity_io_error(ic, "crypto_shash_final", r);
524 memcpy(result, digest, JOURNAL_MAC_SIZE);
529 memset(result, 0, JOURNAL_MAC_SIZE);
532 static void rw_section_mac(struct dm_integrity_c *ic, unsigned section, bool wr)
534 __u8 result[JOURNAL_MAC_SIZE];
537 if (!ic->journal_mac)
540 section_mac(ic, section, result);
542 for (j = 0; j < JOURNAL_BLOCK_SECTORS; j++) {
543 struct journal_sector *js = access_journal(ic, section, j);
546 memcpy(&js->mac, result + (j * JOURNAL_MAC_PER_SECTOR), JOURNAL_MAC_PER_SECTOR);
548 if (memcmp(&js->mac, result + (j * JOURNAL_MAC_PER_SECTOR), JOURNAL_MAC_PER_SECTOR))
549 dm_integrity_io_error(ic, "journal mac", -EILSEQ);
554 static void complete_journal_op(void *context)
556 struct journal_completion *comp = context;
557 BUG_ON(!atomic_read(&comp->in_flight));
558 if (likely(atomic_dec_and_test(&comp->in_flight)))
559 complete(&comp->comp);
562 static void xor_journal(struct dm_integrity_c *ic, bool encrypt, unsigned section,
563 unsigned n_sections, struct journal_completion *comp)
565 struct async_submit_ctl submit;
566 size_t n_bytes = (size_t)(n_sections * ic->journal_section_sectors) << SECTOR_SHIFT;
567 unsigned pl_index, pl_offset, section_index;
568 struct page_list *source_pl, *target_pl;
570 if (likely(encrypt)) {
571 source_pl = ic->journal;
572 target_pl = ic->journal_io;
574 source_pl = ic->journal_io;
575 target_pl = ic->journal;
578 page_list_location(ic, section, 0, &pl_index, &pl_offset);
580 atomic_add(roundup(pl_offset + n_bytes, PAGE_SIZE) >> PAGE_SHIFT, &comp->in_flight);
582 init_async_submit(&submit, ASYNC_TX_XOR_ZERO_DST, NULL, complete_journal_op, comp, NULL);
584 section_index = pl_index;
588 struct page *src_pages[2];
589 struct page *dst_page;
591 while (unlikely(pl_index == section_index)) {
594 rw_section_mac(ic, section, true);
599 page_list_location(ic, section, 0, §ion_index, &dummy);
602 this_step = min(n_bytes, (size_t)PAGE_SIZE - pl_offset);
603 dst_page = target_pl[pl_index].page;
604 src_pages[0] = source_pl[pl_index].page;
605 src_pages[1] = ic->journal_xor[pl_index].page;
607 async_xor(dst_page, src_pages, pl_offset, 2, this_step, &submit);
611 n_bytes -= this_step;
616 async_tx_issue_pending_all();
619 static void complete_journal_encrypt(struct crypto_async_request *req, int err)
621 struct journal_completion *comp = req->data;
623 if (likely(err == -EINPROGRESS)) {
624 complete(&comp->ic->crypto_backoff);
627 dm_integrity_io_error(comp->ic, "asynchronous encrypt", err);
629 complete_journal_op(comp);
632 static bool do_crypt(bool encrypt, struct skcipher_request *req, struct journal_completion *comp)
635 skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP,
636 complete_journal_encrypt, comp);
638 r = crypto_skcipher_encrypt(req);
640 r = crypto_skcipher_decrypt(req);
643 if (likely(r == -EINPROGRESS))
645 if (likely(r == -EBUSY)) {
646 wait_for_completion(&comp->ic->crypto_backoff);
647 reinit_completion(&comp->ic->crypto_backoff);
650 dm_integrity_io_error(comp->ic, "encrypt", r);
654 static void crypt_journal(struct dm_integrity_c *ic, bool encrypt, unsigned section,
655 unsigned n_sections, struct journal_completion *comp)
657 struct scatterlist **source_sg;
658 struct scatterlist **target_sg;
660 atomic_add(2, &comp->in_flight);
662 if (likely(encrypt)) {
663 source_sg = ic->journal_scatterlist;
664 target_sg = ic->journal_io_scatterlist;
666 source_sg = ic->journal_io_scatterlist;
667 target_sg = ic->journal_scatterlist;
671 struct skcipher_request *req;
676 rw_section_mac(ic, section, true);
678 req = ic->sk_requests[section];
679 ivsize = crypto_skcipher_ivsize(ic->journal_crypt);
682 memcpy(iv, iv + ivsize, ivsize);
684 req->src = source_sg[section];
685 req->dst = target_sg[section];
687 if (unlikely(do_crypt(encrypt, req, comp)))
688 atomic_inc(&comp->in_flight);
692 } while (n_sections);
694 atomic_dec(&comp->in_flight);
695 complete_journal_op(comp);
698 static void encrypt_journal(struct dm_integrity_c *ic, bool encrypt, unsigned section,
699 unsigned n_sections, struct journal_completion *comp)
702 return xor_journal(ic, encrypt, section, n_sections, comp);
704 return crypt_journal(ic, encrypt, section, n_sections, comp);
707 static void complete_journal_io(unsigned long error, void *context)
709 struct journal_completion *comp = context;
710 if (unlikely(error != 0))
711 dm_integrity_io_error(comp->ic, "writing journal", -EIO);
712 complete_journal_op(comp);
715 static void rw_journal(struct dm_integrity_c *ic, int op, int op_flags, unsigned section,
716 unsigned n_sections, struct journal_completion *comp)
718 struct dm_io_request io_req;
719 struct dm_io_region io_loc;
720 unsigned sector, n_sectors, pl_index, pl_offset;
723 if (unlikely(dm_integrity_failed(ic))) {
725 complete_journal_io(-1UL, comp);
729 sector = section * ic->journal_section_sectors;
730 n_sectors = n_sections * ic->journal_section_sectors;
732 pl_index = sector >> (PAGE_SHIFT - SECTOR_SHIFT);
733 pl_offset = (sector << SECTOR_SHIFT) & (PAGE_SIZE - 1);
736 io_req.bi_op_flags = op_flags;
737 io_req.mem.type = DM_IO_PAGE_LIST;
739 io_req.mem.ptr.pl = &ic->journal_io[pl_index];
741 io_req.mem.ptr.pl = &ic->journal[pl_index];
742 io_req.mem.offset = pl_offset;
743 if (likely(comp != NULL)) {
744 io_req.notify.fn = complete_journal_io;
745 io_req.notify.context = comp;
747 io_req.notify.fn = NULL;
749 io_req.client = ic->io;
750 io_loc.bdev = ic->dev->bdev;
751 io_loc.sector = ic->start + SB_SECTORS + sector;
752 io_loc.count = n_sectors;
754 r = dm_io(&io_req, 1, &io_loc, NULL);
756 dm_integrity_io_error(ic, op == REQ_OP_READ ? "reading journal" : "writing journal", r);
758 WARN_ONCE(1, "asynchronous dm_io failed: %d", r);
759 complete_journal_io(-1UL, comp);
764 static void write_journal(struct dm_integrity_c *ic, unsigned commit_start, unsigned commit_sections)
766 struct journal_completion io_comp;
767 struct journal_completion crypt_comp_1;
768 struct journal_completion crypt_comp_2;
772 io_comp.comp = COMPLETION_INITIALIZER_ONSTACK(io_comp.comp);
774 if (commit_start + commit_sections <= ic->journal_sections) {
775 io_comp.in_flight = (atomic_t)ATOMIC_INIT(1);
776 if (ic->journal_io) {
777 crypt_comp_1.ic = ic;
778 crypt_comp_1.comp = COMPLETION_INITIALIZER_ONSTACK(crypt_comp_1.comp);
779 crypt_comp_1.in_flight = (atomic_t)ATOMIC_INIT(0);
780 encrypt_journal(ic, true, commit_start, commit_sections, &crypt_comp_1);
781 wait_for_completion_io(&crypt_comp_1.comp);
783 for (i = 0; i < commit_sections; i++)
784 rw_section_mac(ic, commit_start + i, true);
786 rw_journal(ic, REQ_OP_WRITE, REQ_FUA, commit_start, commit_sections, &io_comp);
789 io_comp.in_flight = (atomic_t)ATOMIC_INIT(2);
790 to_end = ic->journal_sections - commit_start;
791 if (ic->journal_io) {
792 crypt_comp_1.ic = ic;
793 crypt_comp_1.comp = COMPLETION_INITIALIZER_ONSTACK(crypt_comp_1.comp);
794 crypt_comp_1.in_flight = (atomic_t)ATOMIC_INIT(0);
795 encrypt_journal(ic, true, commit_start, to_end, &crypt_comp_1);
796 if (try_wait_for_completion(&crypt_comp_1.comp)) {
797 rw_journal(ic, REQ_OP_WRITE, REQ_FUA, commit_start, to_end, &io_comp);
798 crypt_comp_1.comp = COMPLETION_INITIALIZER_ONSTACK(crypt_comp_1.comp);
799 crypt_comp_1.in_flight = (atomic_t)ATOMIC_INIT(0);
800 encrypt_journal(ic, true, 0, commit_sections - to_end, &crypt_comp_1);
801 wait_for_completion_io(&crypt_comp_1.comp);
803 crypt_comp_2.ic = ic;
804 crypt_comp_2.comp = COMPLETION_INITIALIZER_ONSTACK(crypt_comp_2.comp);
805 crypt_comp_2.in_flight = (atomic_t)ATOMIC_INIT(0);
806 encrypt_journal(ic, true, 0, commit_sections - to_end, &crypt_comp_2);
807 wait_for_completion_io(&crypt_comp_1.comp);
808 rw_journal(ic, REQ_OP_WRITE, REQ_FUA, commit_start, to_end, &io_comp);
809 wait_for_completion_io(&crypt_comp_2.comp);
812 for (i = 0; i < to_end; i++)
813 rw_section_mac(ic, commit_start + i, true);
814 rw_journal(ic, REQ_OP_WRITE, REQ_FUA, commit_start, to_end, &io_comp);
815 for (i = 0; i < commit_sections - to_end; i++)
816 rw_section_mac(ic, i, true);
818 rw_journal(ic, REQ_OP_WRITE, REQ_FUA, 0, commit_sections - to_end, &io_comp);
821 wait_for_completion_io(&io_comp.comp);
824 static void copy_from_journal(struct dm_integrity_c *ic, unsigned section, unsigned offset,
825 unsigned n_sectors, sector_t target, io_notify_fn fn, void *data)
827 struct dm_io_request io_req;
828 struct dm_io_region io_loc;
830 unsigned sector, pl_index, pl_offset;
832 BUG_ON((target | n_sectors | offset) & (unsigned)(ic->sectors_per_block - 1));
834 if (unlikely(dm_integrity_failed(ic))) {
839 sector = section * ic->journal_section_sectors + JOURNAL_BLOCK_SECTORS + offset;
841 pl_index = sector >> (PAGE_SHIFT - SECTOR_SHIFT);
842 pl_offset = (sector << SECTOR_SHIFT) & (PAGE_SIZE - 1);
844 io_req.bi_op = REQ_OP_WRITE;
845 io_req.bi_op_flags = 0;
846 io_req.mem.type = DM_IO_PAGE_LIST;
847 io_req.mem.ptr.pl = &ic->journal[pl_index];
848 io_req.mem.offset = pl_offset;
849 io_req.notify.fn = fn;
850 io_req.notify.context = data;
851 io_req.client = ic->io;
852 io_loc.bdev = ic->dev->bdev;
853 io_loc.sector = ic->start + target;
854 io_loc.count = n_sectors;
856 r = dm_io(&io_req, 1, &io_loc, NULL);
858 WARN_ONCE(1, "asynchronous dm_io failed: %d", r);
863 static bool add_new_range(struct dm_integrity_c *ic, struct dm_integrity_range *new_range)
865 struct rb_node **n = &ic->in_progress.rb_node;
866 struct rb_node *parent;
868 BUG_ON((new_range->logical_sector | new_range->n_sectors) & (unsigned)(ic->sectors_per_block - 1));
873 struct dm_integrity_range *range = container_of(*n, struct dm_integrity_range, node);
876 if (new_range->logical_sector + new_range->n_sectors <= range->logical_sector) {
877 n = &range->node.rb_left;
878 } else if (new_range->logical_sector >= range->logical_sector + range->n_sectors) {
879 n = &range->node.rb_right;
885 rb_link_node(&new_range->node, parent, n);
886 rb_insert_color(&new_range->node, &ic->in_progress);
891 static void remove_range_unlocked(struct dm_integrity_c *ic, struct dm_integrity_range *range)
893 rb_erase(&range->node, &ic->in_progress);
894 wake_up_locked(&ic->endio_wait);
897 static void remove_range(struct dm_integrity_c *ic, struct dm_integrity_range *range)
901 spin_lock_irqsave(&ic->endio_wait.lock, flags);
902 remove_range_unlocked(ic, range);
903 spin_unlock_irqrestore(&ic->endio_wait.lock, flags);
906 static void init_journal_node(struct journal_node *node)
908 RB_CLEAR_NODE(&node->node);
909 node->sector = (sector_t)-1;
912 static void add_journal_node(struct dm_integrity_c *ic, struct journal_node *node, sector_t sector)
914 struct rb_node **link;
915 struct rb_node *parent;
917 node->sector = sector;
918 BUG_ON(!RB_EMPTY_NODE(&node->node));
920 link = &ic->journal_tree_root.rb_node;
924 struct journal_node *j;
926 j = container_of(parent, struct journal_node, node);
927 if (sector < j->sector)
928 link = &j->node.rb_left;
930 link = &j->node.rb_right;
933 rb_link_node(&node->node, parent, link);
934 rb_insert_color(&node->node, &ic->journal_tree_root);
937 static void remove_journal_node(struct dm_integrity_c *ic, struct journal_node *node)
939 BUG_ON(RB_EMPTY_NODE(&node->node));
940 rb_erase(&node->node, &ic->journal_tree_root);
941 init_journal_node(node);
944 #define NOT_FOUND (-1U)
946 static unsigned find_journal_node(struct dm_integrity_c *ic, sector_t sector, sector_t *next_sector)
948 struct rb_node *n = ic->journal_tree_root.rb_node;
949 unsigned found = NOT_FOUND;
950 *next_sector = (sector_t)-1;
952 struct journal_node *j = container_of(n, struct journal_node, node);
953 if (sector == j->sector) {
954 found = j - ic->journal_tree;
956 if (sector < j->sector) {
957 *next_sector = j->sector;
960 n = j->node.rb_right;
967 static bool test_journal_node(struct dm_integrity_c *ic, unsigned pos, sector_t sector)
969 struct journal_node *node, *next_node;
970 struct rb_node *next;
972 if (unlikely(pos >= ic->journal_entries))
974 node = &ic->journal_tree[pos];
975 if (unlikely(RB_EMPTY_NODE(&node->node)))
977 if (unlikely(node->sector != sector))
980 next = rb_next(&node->node);
984 next_node = container_of(next, struct journal_node, node);
985 return next_node->sector != sector;
988 static bool find_newer_committed_node(struct dm_integrity_c *ic, struct journal_node *node)
990 struct rb_node *next;
991 struct journal_node *next_node;
992 unsigned next_section;
994 BUG_ON(RB_EMPTY_NODE(&node->node));
996 next = rb_next(&node->node);
1000 next_node = container_of(next, struct journal_node, node);
1002 if (next_node->sector != node->sector)
1005 next_section = (unsigned)(next_node - ic->journal_tree) / ic->journal_section_entries;
1006 if (next_section >= ic->committed_section &&
1007 next_section < ic->committed_section + ic->n_committed_sections)
1009 if (next_section + ic->journal_sections < ic->committed_section + ic->n_committed_sections)
1019 static int dm_integrity_rw_tag(struct dm_integrity_c *ic, unsigned char *tag, sector_t *metadata_block,
1020 unsigned *metadata_offset, unsigned total_size, int op)
1023 unsigned char *data, *dp;
1024 struct dm_buffer *b;
1028 r = dm_integrity_failed(ic);
1032 data = dm_bufio_read(ic->bufio, *metadata_block, &b);
1033 if (unlikely(IS_ERR(data)))
1034 return PTR_ERR(data);
1036 to_copy = min((1U << SECTOR_SHIFT << ic->log2_buffer_sectors) - *metadata_offset, total_size);
1037 dp = data + *metadata_offset;
1038 if (op == TAG_READ) {
1039 memcpy(tag, dp, to_copy);
1040 } else if (op == TAG_WRITE) {
1041 memcpy(dp, tag, to_copy);
1042 dm_bufio_mark_buffer_dirty(b);
1044 /* e.g.: op == TAG_CMP */
1045 if (unlikely(memcmp(dp, tag, to_copy))) {
1048 for (i = 0; i < to_copy; i++) {
1049 if (dp[i] != tag[i])
1053 dm_bufio_release(b);
1057 dm_bufio_release(b);
1060 *metadata_offset += to_copy;
1061 if (unlikely(*metadata_offset == 1U << SECTOR_SHIFT << ic->log2_buffer_sectors)) {
1062 (*metadata_block)++;
1063 *metadata_offset = 0;
1065 total_size -= to_copy;
1066 } while (unlikely(total_size));
1071 static void dm_integrity_flush_buffers(struct dm_integrity_c *ic)
1074 r = dm_bufio_write_dirty_buffers(ic->bufio);
1076 dm_integrity_io_error(ic, "writing tags", r);
1079 static void sleep_on_endio_wait(struct dm_integrity_c *ic)
1081 DECLARE_WAITQUEUE(wait, current);
1082 __add_wait_queue(&ic->endio_wait, &wait);
1083 __set_current_state(TASK_UNINTERRUPTIBLE);
1084 spin_unlock_irq(&ic->endio_wait.lock);
1086 spin_lock_irq(&ic->endio_wait.lock);
1087 __remove_wait_queue(&ic->endio_wait, &wait);
1090 static void autocommit_fn(unsigned long data)
1092 struct dm_integrity_c *ic = (struct dm_integrity_c *)data;
1094 if (likely(!dm_integrity_failed(ic)))
1095 queue_work(ic->commit_wq, &ic->commit_work);
1098 static void schedule_autocommit(struct dm_integrity_c *ic)
1100 if (!timer_pending(&ic->autocommit_timer))
1101 mod_timer(&ic->autocommit_timer, jiffies + ic->autocommit_jiffies);
1104 static void submit_flush_bio(struct dm_integrity_c *ic, struct dm_integrity_io *dio)
1107 spin_lock_irq(&ic->endio_wait.lock);
1108 bio = dm_bio_from_per_bio_data(dio, sizeof(struct dm_integrity_io));
1109 bio_list_add(&ic->flush_bio_list, bio);
1110 spin_unlock_irq(&ic->endio_wait.lock);
1111 queue_work(ic->commit_wq, &ic->commit_work);
1114 static void do_endio(struct dm_integrity_c *ic, struct bio *bio)
1116 int r = dm_integrity_failed(ic);
1117 if (unlikely(r) && !bio->bi_error)
1122 static void do_endio_flush(struct dm_integrity_c *ic, struct dm_integrity_io *dio)
1124 struct bio *bio = dm_bio_from_per_bio_data(dio, sizeof(struct dm_integrity_io));
1126 if (unlikely(dio->fua) && likely(!bio->bi_error) && likely(!dm_integrity_failed(ic)))
1127 submit_flush_bio(ic, dio);
1132 static void dec_in_flight(struct dm_integrity_io *dio)
1134 if (atomic_dec_and_test(&dio->in_flight)) {
1135 struct dm_integrity_c *ic = dio->ic;
1138 remove_range(ic, &dio->range);
1140 if (unlikely(dio->write))
1141 schedule_autocommit(ic);
1143 bio = dm_bio_from_per_bio_data(dio, sizeof(struct dm_integrity_io));
1145 if (unlikely(dio->bi_error) && !bio->bi_error)
1146 bio->bi_error = dio->bi_error;
1147 if (likely(!bio->bi_error) && unlikely(bio_sectors(bio) != dio->range.n_sectors)) {
1148 dio->range.logical_sector += dio->range.n_sectors;
1149 bio_advance(bio, dio->range.n_sectors << SECTOR_SHIFT);
1150 INIT_WORK(&dio->work, integrity_bio_wait);
1151 queue_work(ic->wait_wq, &dio->work);
1154 do_endio_flush(ic, dio);
1158 static void integrity_end_io(struct bio *bio)
1160 struct dm_integrity_io *dio = dm_per_bio_data(bio, sizeof(struct dm_integrity_io));
1162 bio->bi_iter = dio->orig_bi_iter;
1163 bio->bi_bdev = dio->orig_bi_bdev;
1164 if (dio->orig_bi_integrity) {
1165 bio->bi_integrity = dio->orig_bi_integrity;
1166 bio->bi_opf |= REQ_INTEGRITY;
1168 bio->bi_end_io = dio->orig_bi_end_io;
1170 if (dio->completion)
1171 complete(dio->completion);
1176 static void integrity_sector_checksum(struct dm_integrity_c *ic, sector_t sector,
1177 const char *data, char *result)
1179 __u64 sector_le = cpu_to_le64(sector);
1180 SHASH_DESC_ON_STACK(req, ic->internal_hash);
1182 unsigned digest_size;
1184 req->tfm = ic->internal_hash;
1187 r = crypto_shash_init(req);
1188 if (unlikely(r < 0)) {
1189 dm_integrity_io_error(ic, "crypto_shash_init", r);
1193 r = crypto_shash_update(req, (const __u8 *)§or_le, sizeof sector_le);
1194 if (unlikely(r < 0)) {
1195 dm_integrity_io_error(ic, "crypto_shash_update", r);
1199 r = crypto_shash_update(req, data, ic->sectors_per_block << SECTOR_SHIFT);
1200 if (unlikely(r < 0)) {
1201 dm_integrity_io_error(ic, "crypto_shash_update", r);
1205 r = crypto_shash_final(req, result);
1206 if (unlikely(r < 0)) {
1207 dm_integrity_io_error(ic, "crypto_shash_final", r);
1211 digest_size = crypto_shash_digestsize(ic->internal_hash);
1212 if (unlikely(digest_size < ic->tag_size))
1213 memset(result + digest_size, 0, ic->tag_size - digest_size);
1218 /* this shouldn't happen anyway, the hash functions have no reason to fail */
1219 get_random_bytes(result, ic->tag_size);
1222 static void integrity_metadata(struct work_struct *w)
1224 struct dm_integrity_io *dio = container_of(w, struct dm_integrity_io, work);
1225 struct dm_integrity_c *ic = dio->ic;
1229 if (ic->internal_hash) {
1230 struct bvec_iter iter;
1232 unsigned digest_size = crypto_shash_digestsize(ic->internal_hash);
1233 struct bio *bio = dm_bio_from_per_bio_data(dio, sizeof(struct dm_integrity_io));
1235 unsigned extra_space = unlikely(digest_size > ic->tag_size) ? digest_size - ic->tag_size : 0;
1236 char checksums_onstack[ic->tag_size + extra_space];
1237 unsigned sectors_to_process = dio->range.n_sectors;
1238 sector_t sector = dio->range.logical_sector;
1240 if (unlikely(ic->mode == 'R'))
1243 checksums = kmalloc((PAGE_SIZE >> SECTOR_SHIFT >> ic->sb->log2_sectors_per_block) * ic->tag_size + extra_space,
1244 GFP_NOIO | __GFP_NORETRY | __GFP_NOWARN);
1246 checksums = checksums_onstack;
1248 __bio_for_each_segment(bv, bio, iter, dio->orig_bi_iter) {
1250 char *mem, *checksums_ptr;
1253 mem = (char *)kmap_atomic(bv.bv_page) + bv.bv_offset;
1255 checksums_ptr = checksums;
1257 integrity_sector_checksum(ic, sector, mem + pos, checksums_ptr);
1258 checksums_ptr += ic->tag_size;
1259 sectors_to_process -= ic->sectors_per_block;
1260 pos += ic->sectors_per_block << SECTOR_SHIFT;
1261 sector += ic->sectors_per_block;
1262 } while (pos < bv.bv_len && sectors_to_process && checksums != checksums_onstack);
1265 r = dm_integrity_rw_tag(ic, checksums, &dio->metadata_block, &dio->metadata_offset,
1266 checksums_ptr - checksums, !dio->write ? TAG_CMP : TAG_WRITE);
1269 DMERR("Checksum failed at sector 0x%llx",
1270 (unsigned long long)(sector - ((r + ic->tag_size - 1) / ic->tag_size)));
1273 if (likely(checksums != checksums_onstack))
1278 if (!sectors_to_process)
1281 if (unlikely(pos < bv.bv_len)) {
1282 bv.bv_offset += pos;
1288 if (likely(checksums != checksums_onstack))
1291 struct bio_integrity_payload *bip = dio->orig_bi_integrity;
1295 struct bvec_iter iter;
1296 unsigned data_to_process = dio->range.n_sectors;
1297 sector_to_block(ic, data_to_process);
1298 data_to_process *= ic->tag_size;
1300 bip_for_each_vec(biv, bip, iter) {
1304 BUG_ON(PageHighMem(biv.bv_page));
1305 tag = lowmem_page_address(biv.bv_page) + biv.bv_offset;
1306 this_len = min(biv.bv_len, data_to_process);
1307 r = dm_integrity_rw_tag(ic, tag, &dio->metadata_block, &dio->metadata_offset,
1308 this_len, !dio->write ? TAG_READ : TAG_WRITE);
1311 data_to_process -= this_len;
1312 if (!data_to_process)
1325 static int dm_integrity_map(struct dm_target *ti, struct bio *bio)
1327 struct dm_integrity_c *ic = ti->private;
1328 struct dm_integrity_io *dio = dm_per_bio_data(bio, sizeof(struct dm_integrity_io));
1329 struct bio_integrity_payload *bip;
1331 sector_t area, offset;
1336 if (unlikely(bio->bi_opf & REQ_PREFLUSH)) {
1337 submit_flush_bio(ic, dio);
1338 return DM_MAPIO_SUBMITTED;
1341 dio->range.logical_sector = dm_target_offset(ti, bio->bi_iter.bi_sector);
1342 dio->write = bio_op(bio) == REQ_OP_WRITE;
1343 dio->fua = dio->write && bio->bi_opf & REQ_FUA;
1344 if (unlikely(dio->fua)) {
1346 * Don't pass down the FUA flag because we have to flush
1347 * disk cache anyway.
1349 bio->bi_opf &= ~REQ_FUA;
1351 if (unlikely(dio->range.logical_sector + bio_sectors(bio) > ic->provided_data_sectors)) {
1352 DMERR("Too big sector number: 0x%llx + 0x%x > 0x%llx",
1353 (unsigned long long)dio->range.logical_sector, bio_sectors(bio),
1354 (unsigned long long)ic->provided_data_sectors);
1357 if (unlikely((dio->range.logical_sector | bio_sectors(bio)) & (unsigned)(ic->sectors_per_block - 1))) {
1358 DMERR("Bio not aligned on %u sectors: 0x%llx, 0x%x",
1359 ic->sectors_per_block,
1360 (unsigned long long)dio->range.logical_sector, bio_sectors(bio));
1364 if (ic->sectors_per_block > 1) {
1365 struct bvec_iter iter;
1367 bio_for_each_segment(bv, bio, iter) {
1368 if (unlikely((bv.bv_offset | bv.bv_len) & ((ic->sectors_per_block << SECTOR_SHIFT) - 1))) {
1369 DMERR("Bio vector (%u,%u) is not aligned on %u-sector boundary",
1370 bv.bv_offset, bv.bv_len, ic->sectors_per_block);
1376 bip = bio_integrity(bio);
1377 if (!ic->internal_hash) {
1379 unsigned wanted_tag_size = bio_sectors(bio) >> ic->sb->log2_sectors_per_block;
1380 if (ic->log2_tag_size >= 0)
1381 wanted_tag_size <<= ic->log2_tag_size;
1383 wanted_tag_size *= ic->tag_size;
1384 if (unlikely(wanted_tag_size != bip->bip_iter.bi_size)) {
1385 DMERR("Invalid integrity data size %u, expected %u", bip->bip_iter.bi_size, wanted_tag_size);
1390 if (unlikely(bip != NULL)) {
1391 DMERR("Unexpected integrity data when using internal hash");
1396 if (unlikely(ic->mode == 'R') && unlikely(dio->write))
1399 get_area_and_offset(ic, dio->range.logical_sector, &area, &offset);
1400 dio->metadata_block = get_metadata_sector_and_offset(ic, area, offset, &dio->metadata_offset);
1401 bio->bi_iter.bi_sector = get_data_sector(ic, area, offset);
1403 dm_integrity_map_continue(dio, true);
1404 return DM_MAPIO_SUBMITTED;
1407 static bool __journal_read_write(struct dm_integrity_io *dio, struct bio *bio,
1408 unsigned journal_section, unsigned journal_entry)
1410 struct dm_integrity_c *ic = dio->ic;
1411 sector_t logical_sector;
1414 logical_sector = dio->range.logical_sector;
1415 n_sectors = dio->range.n_sectors;
1417 struct bio_vec bv = bio_iovec(bio);
1420 if (unlikely(bv.bv_len >> SECTOR_SHIFT > n_sectors))
1421 bv.bv_len = n_sectors << SECTOR_SHIFT;
1422 n_sectors -= bv.bv_len >> SECTOR_SHIFT;
1423 bio_advance_iter(bio, &bio->bi_iter, bv.bv_len);
1425 mem = kmap_atomic(bv.bv_page);
1426 if (likely(dio->write))
1427 flush_dcache_page(bv.bv_page);
1430 struct journal_entry *je = access_journal_entry(ic, journal_section, journal_entry);
1432 if (unlikely(!dio->write)) {
1433 struct journal_sector *js;
1437 if (unlikely(journal_entry_is_inprogress(je))) {
1438 flush_dcache_page(bv.bv_page);
1441 __io_wait_event(ic->copy_to_journal_wait, !journal_entry_is_inprogress(je));
1445 BUG_ON(journal_entry_get_sector(je) != logical_sector);
1446 js = access_journal_data(ic, journal_section, journal_entry);
1447 mem_ptr = mem + bv.bv_offset;
1450 memcpy(mem_ptr, js, JOURNAL_SECTOR_DATA);
1451 *(commit_id_t *)(mem_ptr + JOURNAL_SECTOR_DATA) = je->last_bytes[s];
1453 mem_ptr += 1 << SECTOR_SHIFT;
1454 } while (++s < ic->sectors_per_block);
1455 #ifdef INTERNAL_VERIFY
1456 if (ic->internal_hash) {
1457 char checksums_onstack[max(crypto_shash_digestsize(ic->internal_hash), ic->tag_size)];
1459 integrity_sector_checksum(ic, logical_sector, mem + bv.bv_offset, checksums_onstack);
1460 if (unlikely(memcmp(checksums_onstack, journal_entry_tag(ic, je), ic->tag_size))) {
1461 DMERR("Checksum failed when reading from journal, at sector 0x%llx",
1462 (unsigned long long)logical_sector);
1468 if (!ic->internal_hash) {
1469 struct bio_integrity_payload *bip = bio_integrity(bio);
1470 unsigned tag_todo = ic->tag_size;
1471 char *tag_ptr = journal_entry_tag(ic, je);
1474 struct bio_vec biv = bvec_iter_bvec(bip->bip_vec, bip->bip_iter);
1475 unsigned tag_now = min(biv.bv_len, tag_todo);
1477 BUG_ON(PageHighMem(biv.bv_page));
1478 tag_addr = lowmem_page_address(biv.bv_page) + biv.bv_offset;
1479 if (likely(dio->write))
1480 memcpy(tag_ptr, tag_addr, tag_now);
1482 memcpy(tag_addr, tag_ptr, tag_now);
1483 bvec_iter_advance(bip->bip_vec, &bip->bip_iter, tag_now);
1485 tag_todo -= tag_now;
1486 } while (unlikely(tag_todo)); else {
1487 if (likely(dio->write))
1488 memset(tag_ptr, 0, tag_todo);
1492 if (likely(dio->write)) {
1493 struct journal_sector *js;
1496 js = access_journal_data(ic, journal_section, journal_entry);
1497 memcpy(js, mem + bv.bv_offset, ic->sectors_per_block << SECTOR_SHIFT);
1501 je->last_bytes[s] = js[s].commit_id;
1502 } while (++s < ic->sectors_per_block);
1504 if (ic->internal_hash) {
1505 unsigned digest_size = crypto_shash_digestsize(ic->internal_hash);
1506 if (unlikely(digest_size > ic->tag_size)) {
1507 char checksums_onstack[digest_size];
1508 integrity_sector_checksum(ic, logical_sector, (char *)js, checksums_onstack);
1509 memcpy(journal_entry_tag(ic, je), checksums_onstack, ic->tag_size);
1511 integrity_sector_checksum(ic, logical_sector, (char *)js, journal_entry_tag(ic, je));
1514 journal_entry_set_sector(je, logical_sector);
1516 logical_sector += ic->sectors_per_block;
1519 if (unlikely(journal_entry == ic->journal_section_entries)) {
1522 wraparound_section(ic, &journal_section);
1525 bv.bv_offset += ic->sectors_per_block << SECTOR_SHIFT;
1526 } while (bv.bv_len -= ic->sectors_per_block << SECTOR_SHIFT);
1528 if (unlikely(!dio->write))
1529 flush_dcache_page(bv.bv_page);
1531 } while (n_sectors);
1533 if (likely(dio->write)) {
1535 if (unlikely(waitqueue_active(&ic->copy_to_journal_wait)))
1536 wake_up(&ic->copy_to_journal_wait);
1537 if (ACCESS_ONCE(ic->free_sectors) <= ic->free_sectors_threshold) {
1538 queue_work(ic->commit_wq, &ic->commit_work);
1540 schedule_autocommit(ic);
1543 remove_range(ic, &dio->range);
1546 if (unlikely(bio->bi_iter.bi_size)) {
1547 sector_t area, offset;
1549 dio->range.logical_sector = logical_sector;
1550 get_area_and_offset(ic, dio->range.logical_sector, &area, &offset);
1551 dio->metadata_block = get_metadata_sector_and_offset(ic, area, offset, &dio->metadata_offset);
1558 static void dm_integrity_map_continue(struct dm_integrity_io *dio, bool from_map)
1560 struct dm_integrity_c *ic = dio->ic;
1561 struct bio *bio = dm_bio_from_per_bio_data(dio, sizeof(struct dm_integrity_io));
1562 unsigned journal_section, journal_entry;
1563 unsigned journal_read_pos;
1564 struct completion read_comp;
1565 bool need_sync_io = ic->internal_hash && !dio->write;
1567 if (need_sync_io && from_map) {
1568 INIT_WORK(&dio->work, integrity_bio_wait);
1569 queue_work(ic->metadata_wq, &dio->work);
1574 spin_lock_irq(&ic->endio_wait.lock);
1576 if (unlikely(dm_integrity_failed(ic))) {
1577 spin_unlock_irq(&ic->endio_wait.lock);
1581 dio->range.n_sectors = bio_sectors(bio);
1582 journal_read_pos = NOT_FOUND;
1583 if (likely(ic->mode == 'J')) {
1585 unsigned next_entry, i, pos;
1588 dio->range.n_sectors = min(dio->range.n_sectors, ic->free_sectors);
1589 if (unlikely(!dio->range.n_sectors))
1591 ic->free_sectors -= dio->range.n_sectors;
1592 journal_section = ic->free_section;
1593 journal_entry = ic->free_section_entry;
1595 next_entry = ic->free_section_entry + dio->range.n_sectors;
1596 ic->free_section_entry = next_entry % ic->journal_section_entries;
1597 ic->free_section += next_entry / ic->journal_section_entries;
1598 ic->n_uncommitted_sections += next_entry / ic->journal_section_entries;
1599 wraparound_section(ic, &ic->free_section);
1601 pos = journal_section * ic->journal_section_entries + journal_entry;
1602 ws = journal_section;
1606 struct journal_entry *je;
1608 add_journal_node(ic, &ic->journal_tree[pos], dio->range.logical_sector + i);
1610 if (unlikely(pos >= ic->journal_entries))
1613 je = access_journal_entry(ic, ws, we);
1614 BUG_ON(!journal_entry_is_unused(je));
1615 journal_entry_set_inprogress(je);
1617 if (unlikely(we == ic->journal_section_entries)) {
1620 wraparound_section(ic, &ws);
1622 } while ((i += ic->sectors_per_block) < dio->range.n_sectors);
1624 spin_unlock_irq(&ic->endio_wait.lock);
1625 goto journal_read_write;
1627 sector_t next_sector;
1628 journal_read_pos = find_journal_node(ic, dio->range.logical_sector, &next_sector);
1629 if (likely(journal_read_pos == NOT_FOUND)) {
1630 if (unlikely(dio->range.n_sectors > next_sector - dio->range.logical_sector))
1631 dio->range.n_sectors = next_sector - dio->range.logical_sector;
1634 unsigned jp = journal_read_pos + 1;
1635 for (i = ic->sectors_per_block; i < dio->range.n_sectors; i += ic->sectors_per_block, jp++) {
1636 if (!test_journal_node(ic, jp, dio->range.logical_sector + i))
1639 dio->range.n_sectors = i;
1643 if (unlikely(!add_new_range(ic, &dio->range))) {
1645 * We must not sleep in the request routine because it could
1646 * stall bios on current->bio_list.
1647 * So, we offload the bio to a workqueue if we have to sleep.
1651 spin_unlock_irq(&ic->endio_wait.lock);
1652 INIT_WORK(&dio->work, integrity_bio_wait);
1653 queue_work(ic->wait_wq, &dio->work);
1656 sleep_on_endio_wait(ic);
1660 spin_unlock_irq(&ic->endio_wait.lock);
1662 if (unlikely(journal_read_pos != NOT_FOUND)) {
1663 journal_section = journal_read_pos / ic->journal_section_entries;
1664 journal_entry = journal_read_pos % ic->journal_section_entries;
1665 goto journal_read_write;
1668 dio->in_flight = (atomic_t)ATOMIC_INIT(2);
1671 read_comp = COMPLETION_INITIALIZER_ONSTACK(read_comp);
1672 dio->completion = &read_comp;
1674 dio->completion = NULL;
1676 dio->orig_bi_iter = bio->bi_iter;
1678 dio->orig_bi_bdev = bio->bi_bdev;
1679 bio->bi_bdev = ic->dev->bdev;
1681 dio->orig_bi_integrity = bio_integrity(bio);
1682 bio->bi_integrity = NULL;
1683 bio->bi_opf &= ~REQ_INTEGRITY;
1685 dio->orig_bi_end_io = bio->bi_end_io;
1686 bio->bi_end_io = integrity_end_io;
1688 bio->bi_iter.bi_size = dio->range.n_sectors << SECTOR_SHIFT;
1689 bio->bi_iter.bi_sector += ic->start;
1690 generic_make_request(bio);
1693 wait_for_completion_io(&read_comp);
1694 integrity_metadata(&dio->work);
1696 INIT_WORK(&dio->work, integrity_metadata);
1697 queue_work(ic->metadata_wq, &dio->work);
1703 if (unlikely(__journal_read_write(dio, bio, journal_section, journal_entry)))
1706 do_endio_flush(ic, dio);
1710 static void integrity_bio_wait(struct work_struct *w)
1712 struct dm_integrity_io *dio = container_of(w, struct dm_integrity_io, work);
1714 dm_integrity_map_continue(dio, false);
1717 static void pad_uncommitted(struct dm_integrity_c *ic)
1719 if (ic->free_section_entry) {
1720 ic->free_sectors -= ic->journal_section_entries - ic->free_section_entry;
1721 ic->free_section_entry = 0;
1723 wraparound_section(ic, &ic->free_section);
1724 ic->n_uncommitted_sections++;
1728 static void integrity_commit(struct work_struct *w)
1730 struct dm_integrity_c *ic = container_of(w, struct dm_integrity_c, commit_work);
1731 unsigned commit_start, commit_sections;
1733 struct bio *flushes;
1735 del_timer(&ic->autocommit_timer);
1737 spin_lock_irq(&ic->endio_wait.lock);
1738 flushes = bio_list_get(&ic->flush_bio_list);
1739 if (unlikely(ic->mode != 'J')) {
1740 spin_unlock_irq(&ic->endio_wait.lock);
1741 dm_integrity_flush_buffers(ic);
1742 goto release_flush_bios;
1745 pad_uncommitted(ic);
1746 commit_start = ic->uncommitted_section;
1747 commit_sections = ic->n_uncommitted_sections;
1748 spin_unlock_irq(&ic->endio_wait.lock);
1750 if (!commit_sections)
1751 goto release_flush_bios;
1754 for (n = 0; n < commit_sections; n++) {
1755 for (j = 0; j < ic->journal_section_entries; j++) {
1756 struct journal_entry *je;
1757 je = access_journal_entry(ic, i, j);
1758 io_wait_event(ic->copy_to_journal_wait, !journal_entry_is_inprogress(je));
1760 for (j = 0; j < ic->journal_section_sectors; j++) {
1761 struct journal_sector *js;
1762 js = access_journal(ic, i, j);
1763 js->commit_id = dm_integrity_commit_id(ic, i, j, ic->commit_seq);
1766 if (unlikely(i >= ic->journal_sections))
1767 ic->commit_seq = next_commit_seq(ic->commit_seq);
1768 wraparound_section(ic, &i);
1772 write_journal(ic, commit_start, commit_sections);
1774 spin_lock_irq(&ic->endio_wait.lock);
1775 ic->uncommitted_section += commit_sections;
1776 wraparound_section(ic, &ic->uncommitted_section);
1777 ic->n_uncommitted_sections -= commit_sections;
1778 ic->n_committed_sections += commit_sections;
1779 spin_unlock_irq(&ic->endio_wait.lock);
1781 if (ACCESS_ONCE(ic->free_sectors) <= ic->free_sectors_threshold)
1782 queue_work(ic->writer_wq, &ic->writer_work);
1786 struct bio *next = flushes->bi_next;
1787 flushes->bi_next = NULL;
1788 do_endio(ic, flushes);
1793 static void complete_copy_from_journal(unsigned long error, void *context)
1795 struct journal_io *io = context;
1796 struct journal_completion *comp = io->comp;
1797 struct dm_integrity_c *ic = comp->ic;
1798 remove_range(ic, &io->range);
1799 mempool_free(io, ic->journal_io_mempool);
1800 if (unlikely(error != 0))
1801 dm_integrity_io_error(ic, "copying from journal", -EIO);
1802 complete_journal_op(comp);
1805 static void restore_last_bytes(struct dm_integrity_c *ic, struct journal_sector *js,
1806 struct journal_entry *je)
1810 js->commit_id = je->last_bytes[s];
1812 } while (++s < ic->sectors_per_block);
1815 static void do_journal_write(struct dm_integrity_c *ic, unsigned write_start,
1816 unsigned write_sections, bool from_replay)
1819 struct journal_completion comp;
1822 comp.in_flight = (atomic_t)ATOMIC_INIT(1);
1823 comp.comp = COMPLETION_INITIALIZER_ONSTACK(comp.comp);
1826 for (n = 0; n < write_sections; n++, i++, wraparound_section(ic, &i)) {
1827 #ifndef INTERNAL_VERIFY
1828 if (unlikely(from_replay))
1830 rw_section_mac(ic, i, false);
1831 for (j = 0; j < ic->journal_section_entries; j++) {
1832 struct journal_entry *je = access_journal_entry(ic, i, j);
1833 sector_t sec, area, offset;
1834 unsigned k, l, next_loop;
1835 sector_t metadata_block;
1836 unsigned metadata_offset;
1837 struct journal_io *io;
1839 if (journal_entry_is_unused(je))
1841 BUG_ON(unlikely(journal_entry_is_inprogress(je)) && !from_replay);
1842 sec = journal_entry_get_sector(je);
1843 if (unlikely(from_replay)) {
1844 if (unlikely(sec & (unsigned)(ic->sectors_per_block - 1))) {
1845 dm_integrity_io_error(ic, "invalid sector in journal", -EIO);
1846 sec &= ~(sector_t)(ic->sectors_per_block - 1);
1849 get_area_and_offset(ic, sec, &area, &offset);
1850 restore_last_bytes(ic, access_journal_data(ic, i, j), je);
1851 for (k = j + 1; k < ic->journal_section_entries; k++) {
1852 struct journal_entry *je2 = access_journal_entry(ic, i, k);
1853 sector_t sec2, area2, offset2;
1854 if (journal_entry_is_unused(je2))
1856 BUG_ON(unlikely(journal_entry_is_inprogress(je2)) && !from_replay);
1857 sec2 = journal_entry_get_sector(je2);
1858 get_area_and_offset(ic, sec2, &area2, &offset2);
1859 if (area2 != area || offset2 != offset + ((k - j) << ic->sb->log2_sectors_per_block))
1861 restore_last_bytes(ic, access_journal_data(ic, i, k), je2);
1865 io = mempool_alloc(ic->journal_io_mempool, GFP_NOIO);
1867 io->range.logical_sector = sec;
1868 io->range.n_sectors = (k - j) << ic->sb->log2_sectors_per_block;
1870 spin_lock_irq(&ic->endio_wait.lock);
1871 while (unlikely(!add_new_range(ic, &io->range)))
1872 sleep_on_endio_wait(ic);
1874 if (likely(!from_replay)) {
1875 struct journal_node *section_node = &ic->journal_tree[i * ic->journal_section_entries];
1877 /* don't write if there is newer committed sector */
1878 while (j < k && find_newer_committed_node(ic, §ion_node[j])) {
1879 struct journal_entry *je2 = access_journal_entry(ic, i, j);
1881 journal_entry_set_unused(je2);
1882 remove_journal_node(ic, §ion_node[j]);
1884 sec += ic->sectors_per_block;
1885 offset += ic->sectors_per_block;
1887 while (j < k && find_newer_committed_node(ic, §ion_node[k - 1])) {
1888 struct journal_entry *je2 = access_journal_entry(ic, i, k - 1);
1890 journal_entry_set_unused(je2);
1891 remove_journal_node(ic, §ion_node[k - 1]);
1895 remove_range_unlocked(ic, &io->range);
1896 spin_unlock_irq(&ic->endio_wait.lock);
1897 mempool_free(io, ic->journal_io_mempool);
1900 for (l = j; l < k; l++) {
1901 remove_journal_node(ic, §ion_node[l]);
1904 spin_unlock_irq(&ic->endio_wait.lock);
1906 metadata_block = get_metadata_sector_and_offset(ic, area, offset, &metadata_offset);
1907 for (l = j; l < k; l++) {
1909 struct journal_entry *je2 = access_journal_entry(ic, i, l);
1912 #ifndef INTERNAL_VERIFY
1913 unlikely(from_replay) &&
1915 ic->internal_hash) {
1916 char test_tag[max(crypto_shash_digestsize(ic->internal_hash), ic->tag_size)];
1918 integrity_sector_checksum(ic, sec + ((l - j) << ic->sb->log2_sectors_per_block),
1919 (char *)access_journal_data(ic, i, l), test_tag);
1920 if (unlikely(memcmp(test_tag, journal_entry_tag(ic, je2), ic->tag_size)))
1921 dm_integrity_io_error(ic, "tag mismatch when replaying journal", -EILSEQ);
1924 journal_entry_set_unused(je2);
1925 r = dm_integrity_rw_tag(ic, journal_entry_tag(ic, je2), &metadata_block, &metadata_offset,
1926 ic->tag_size, TAG_WRITE);
1928 dm_integrity_io_error(ic, "reading tags", r);
1932 atomic_inc(&comp.in_flight);
1933 copy_from_journal(ic, i, j << ic->sb->log2_sectors_per_block,
1934 (k - j) << ic->sb->log2_sectors_per_block,
1935 get_data_sector(ic, area, offset),
1936 complete_copy_from_journal, io);
1942 dm_bufio_write_dirty_buffers_async(ic->bufio);
1944 complete_journal_op(&comp);
1945 wait_for_completion_io(&comp.comp);
1947 dm_integrity_flush_buffers(ic);
1950 static void integrity_writer(struct work_struct *w)
1952 struct dm_integrity_c *ic = container_of(w, struct dm_integrity_c, writer_work);
1953 unsigned write_start, write_sections;
1955 unsigned prev_free_sectors;
1957 /* the following test is not needed, but it tests the replay code */
1958 if (ACCESS_ONCE(ic->suspending))
1961 spin_lock_irq(&ic->endio_wait.lock);
1962 write_start = ic->committed_section;
1963 write_sections = ic->n_committed_sections;
1964 spin_unlock_irq(&ic->endio_wait.lock);
1966 if (!write_sections)
1969 do_journal_write(ic, write_start, write_sections, false);
1971 spin_lock_irq(&ic->endio_wait.lock);
1973 ic->committed_section += write_sections;
1974 wraparound_section(ic, &ic->committed_section);
1975 ic->n_committed_sections -= write_sections;
1977 prev_free_sectors = ic->free_sectors;
1978 ic->free_sectors += write_sections * ic->journal_section_entries;
1979 if (unlikely(!prev_free_sectors))
1980 wake_up_locked(&ic->endio_wait);
1982 spin_unlock_irq(&ic->endio_wait.lock);
1985 static void init_journal(struct dm_integrity_c *ic, unsigned start_section,
1986 unsigned n_sections, unsigned char commit_seq)
1993 for (n = 0; n < n_sections; n++) {
1994 i = start_section + n;
1995 wraparound_section(ic, &i);
1996 for (j = 0; j < ic->journal_section_sectors; j++) {
1997 struct journal_sector *js = access_journal(ic, i, j);
1998 memset(&js->entries, 0, JOURNAL_SECTOR_DATA);
1999 js->commit_id = dm_integrity_commit_id(ic, i, j, commit_seq);
2001 for (j = 0; j < ic->journal_section_entries; j++) {
2002 struct journal_entry *je = access_journal_entry(ic, i, j);
2003 journal_entry_set_unused(je);
2007 write_journal(ic, start_section, n_sections);
2010 static int find_commit_seq(struct dm_integrity_c *ic, unsigned i, unsigned j, commit_id_t id)
2013 for (k = 0; k < N_COMMIT_IDS; k++) {
2014 if (dm_integrity_commit_id(ic, i, j, k) == id)
2017 dm_integrity_io_error(ic, "journal commit id", -EIO);
2021 static void replay_journal(struct dm_integrity_c *ic)
2024 bool used_commit_ids[N_COMMIT_IDS];
2025 unsigned max_commit_id_sections[N_COMMIT_IDS];
2026 unsigned write_start, write_sections;
2027 unsigned continue_section;
2029 unsigned char unused, last_used, want_commit_seq;
2031 if (ic->mode == 'R')
2034 if (ic->journal_uptodate)
2040 if (!ic->just_formatted) {
2041 DEBUG_print("reading journal\n");
2042 rw_journal(ic, REQ_OP_READ, 0, 0, ic->journal_sections, NULL);
2044 DEBUG_bytes(lowmem_page_address(ic->journal_io[0].page), 64, "read journal");
2045 if (ic->journal_io) {
2046 struct journal_completion crypt_comp;
2048 crypt_comp.comp = COMPLETION_INITIALIZER_ONSTACK(crypt_comp.comp);
2049 crypt_comp.in_flight = (atomic_t)ATOMIC_INIT(0);
2050 encrypt_journal(ic, false, 0, ic->journal_sections, &crypt_comp);
2051 wait_for_completion(&crypt_comp.comp);
2053 DEBUG_bytes(lowmem_page_address(ic->journal[0].page), 64, "decrypted journal");
2056 if (dm_integrity_failed(ic))
2059 journal_empty = true;
2060 memset(used_commit_ids, 0, sizeof used_commit_ids);
2061 memset(max_commit_id_sections, 0, sizeof max_commit_id_sections);
2062 for (i = 0; i < ic->journal_sections; i++) {
2063 for (j = 0; j < ic->journal_section_sectors; j++) {
2065 struct journal_sector *js = access_journal(ic, i, j);
2066 k = find_commit_seq(ic, i, j, js->commit_id);
2069 used_commit_ids[k] = true;
2070 max_commit_id_sections[k] = i;
2072 if (journal_empty) {
2073 for (j = 0; j < ic->journal_section_entries; j++) {
2074 struct journal_entry *je = access_journal_entry(ic, i, j);
2075 if (!journal_entry_is_unused(je)) {
2076 journal_empty = false;
2083 if (!used_commit_ids[N_COMMIT_IDS - 1]) {
2084 unused = N_COMMIT_IDS - 1;
2085 while (unused && !used_commit_ids[unused - 1])
2088 for (unused = 0; unused < N_COMMIT_IDS; unused++)
2089 if (!used_commit_ids[unused])
2091 if (unused == N_COMMIT_IDS) {
2092 dm_integrity_io_error(ic, "journal commit ids", -EIO);
2096 DEBUG_print("first unused commit seq %d [%d,%d,%d,%d]\n",
2097 unused, used_commit_ids[0], used_commit_ids[1],
2098 used_commit_ids[2], used_commit_ids[3]);
2100 last_used = prev_commit_seq(unused);
2101 want_commit_seq = prev_commit_seq(last_used);
2103 if (!used_commit_ids[want_commit_seq] && used_commit_ids[prev_commit_seq(want_commit_seq)])
2104 journal_empty = true;
2106 write_start = max_commit_id_sections[last_used] + 1;
2107 if (unlikely(write_start >= ic->journal_sections))
2108 want_commit_seq = next_commit_seq(want_commit_seq);
2109 wraparound_section(ic, &write_start);
2112 for (write_sections = 0; write_sections < ic->journal_sections; write_sections++) {
2113 for (j = 0; j < ic->journal_section_sectors; j++) {
2114 struct journal_sector *js = access_journal(ic, i, j);
2116 if (js->commit_id != dm_integrity_commit_id(ic, i, j, want_commit_seq)) {
2118 * This could be caused by crash during writing.
2119 * We won't replay the inconsistent part of the
2122 DEBUG_print("commit id mismatch at position (%u, %u): %d != %d\n",
2123 i, j, find_commit_seq(ic, i, j, js->commit_id), want_commit_seq);
2128 if (unlikely(i >= ic->journal_sections))
2129 want_commit_seq = next_commit_seq(want_commit_seq);
2130 wraparound_section(ic, &i);
2134 if (!journal_empty) {
2135 DEBUG_print("replaying %u sections, starting at %u, commit seq %d\n",
2136 write_sections, write_start, want_commit_seq);
2137 do_journal_write(ic, write_start, write_sections, true);
2140 if (write_sections == ic->journal_sections && (ic->mode == 'J' || journal_empty)) {
2141 continue_section = write_start;
2142 ic->commit_seq = want_commit_seq;
2143 DEBUG_print("continuing from section %u, commit seq %d\n", write_start, ic->commit_seq);
2146 unsigned char erase_seq;
2148 DEBUG_print("clearing journal\n");
2150 erase_seq = prev_commit_seq(prev_commit_seq(last_used));
2152 init_journal(ic, s, 1, erase_seq);
2154 wraparound_section(ic, &s);
2155 if (ic->journal_sections >= 2) {
2156 init_journal(ic, s, ic->journal_sections - 2, erase_seq);
2157 s += ic->journal_sections - 2;
2158 wraparound_section(ic, &s);
2159 init_journal(ic, s, 1, erase_seq);
2162 continue_section = 0;
2163 ic->commit_seq = next_commit_seq(erase_seq);
2166 ic->committed_section = continue_section;
2167 ic->n_committed_sections = 0;
2169 ic->uncommitted_section = continue_section;
2170 ic->n_uncommitted_sections = 0;
2172 ic->free_section = continue_section;
2173 ic->free_section_entry = 0;
2174 ic->free_sectors = ic->journal_entries;
2176 ic->journal_tree_root = RB_ROOT;
2177 for (i = 0; i < ic->journal_entries; i++)
2178 init_journal_node(&ic->journal_tree[i]);
2181 static void dm_integrity_postsuspend(struct dm_target *ti)
2183 struct dm_integrity_c *ic = (struct dm_integrity_c *)ti->private;
2185 del_timer_sync(&ic->autocommit_timer);
2187 ic->suspending = true;
2189 queue_work(ic->commit_wq, &ic->commit_work);
2190 drain_workqueue(ic->commit_wq);
2192 if (ic->mode == 'J') {
2193 drain_workqueue(ic->writer_wq);
2194 dm_integrity_flush_buffers(ic);
2197 ic->suspending = false;
2199 BUG_ON(!RB_EMPTY_ROOT(&ic->in_progress));
2201 ic->journal_uptodate = true;
2204 static void dm_integrity_resume(struct dm_target *ti)
2206 struct dm_integrity_c *ic = (struct dm_integrity_c *)ti->private;
2211 static void dm_integrity_status(struct dm_target *ti, status_type_t type,
2212 unsigned status_flags, char *result, unsigned maxlen)
2214 struct dm_integrity_c *ic = (struct dm_integrity_c *)ti->private;
2219 case STATUSTYPE_INFO:
2223 case STATUSTYPE_TABLE: {
2224 __u64 watermark_percentage = (__u64)(ic->journal_entries - ic->free_sectors_threshold) * 100;
2225 watermark_percentage += ic->journal_entries / 2;
2226 do_div(watermark_percentage, ic->journal_entries);
2228 arg_count += ic->sectors_per_block != 1;
2229 arg_count += !!ic->internal_hash_alg.alg_string;
2230 arg_count += !!ic->journal_crypt_alg.alg_string;
2231 arg_count += !!ic->journal_mac_alg.alg_string;
2232 DMEMIT("%s %llu %u %c %u", ic->dev->name, (unsigned long long)ic->start,
2233 ic->tag_size, ic->mode, arg_count);
2234 DMEMIT(" journal_sectors:%u", ic->initial_sectors - SB_SECTORS);
2235 DMEMIT(" interleave_sectors:%u", 1U << ic->sb->log2_interleave_sectors);
2236 DMEMIT(" buffer_sectors:%u", 1U << ic->log2_buffer_sectors);
2237 DMEMIT(" journal_watermark:%u", (unsigned)watermark_percentage);
2238 DMEMIT(" commit_time:%u", ic->autocommit_msec);
2239 if (ic->sectors_per_block != 1)
2240 DMEMIT(" block_size:%u", ic->sectors_per_block << SECTOR_SHIFT);
2242 #define EMIT_ALG(a, n) \
2244 if (ic->a.alg_string) { \
2245 DMEMIT(" %s:%s", n, ic->a.alg_string); \
2246 if (ic->a.key_string) \
2247 DMEMIT(":%s", ic->a.key_string);\
2250 EMIT_ALG(internal_hash_alg, "internal_hash");
2251 EMIT_ALG(journal_crypt_alg, "journal_crypt");
2252 EMIT_ALG(journal_mac_alg, "journal_mac");
2258 static int dm_integrity_iterate_devices(struct dm_target *ti,
2259 iterate_devices_callout_fn fn, void *data)
2261 struct dm_integrity_c *ic = ti->private;
2263 return fn(ti, ic->dev, ic->start + ic->initial_sectors + ic->metadata_run, ti->len, data);
2266 static void dm_integrity_io_hints(struct dm_target *ti, struct queue_limits *limits)
2268 struct dm_integrity_c *ic = ti->private;
2270 if (ic->sectors_per_block > 1) {
2271 limits->logical_block_size = ic->sectors_per_block << SECTOR_SHIFT;
2272 limits->physical_block_size = ic->sectors_per_block << SECTOR_SHIFT;
2273 blk_limits_io_min(limits, ic->sectors_per_block << SECTOR_SHIFT);
2277 static void calculate_journal_section_size(struct dm_integrity_c *ic)
2279 unsigned sector_space = JOURNAL_SECTOR_DATA;
2281 ic->journal_sections = le32_to_cpu(ic->sb->journal_sections);
2282 ic->journal_entry_size = roundup(offsetof(struct journal_entry, last_bytes[ic->sectors_per_block]) + ic->tag_size,
2283 JOURNAL_ENTRY_ROUNDUP);
2285 if (ic->sb->flags & cpu_to_le32(SB_FLAG_HAVE_JOURNAL_MAC))
2286 sector_space -= JOURNAL_MAC_PER_SECTOR;
2287 ic->journal_entries_per_sector = sector_space / ic->journal_entry_size;
2288 ic->journal_section_entries = ic->journal_entries_per_sector * JOURNAL_BLOCK_SECTORS;
2289 ic->journal_section_sectors = (ic->journal_section_entries << ic->sb->log2_sectors_per_block) + JOURNAL_BLOCK_SECTORS;
2290 ic->journal_entries = ic->journal_section_entries * ic->journal_sections;
2293 static int calculate_device_limits(struct dm_integrity_c *ic)
2295 __u64 initial_sectors;
2296 sector_t last_sector, last_area, last_offset;
2298 calculate_journal_section_size(ic);
2299 initial_sectors = SB_SECTORS + (__u64)ic->journal_section_sectors * ic->journal_sections;
2300 if (initial_sectors + METADATA_PADDING_SECTORS >= ic->device_sectors || initial_sectors > UINT_MAX)
2302 ic->initial_sectors = initial_sectors;
2304 ic->metadata_run = roundup((__u64)ic->tag_size << (ic->sb->log2_interleave_sectors - ic->sb->log2_sectors_per_block),
2305 (__u64)(1 << SECTOR_SHIFT << METADATA_PADDING_SECTORS)) >> SECTOR_SHIFT;
2306 if (!(ic->metadata_run & (ic->metadata_run - 1)))
2307 ic->log2_metadata_run = __ffs(ic->metadata_run);
2309 ic->log2_metadata_run = -1;
2311 get_area_and_offset(ic, ic->provided_data_sectors - 1, &last_area, &last_offset);
2312 last_sector = get_data_sector(ic, last_area, last_offset);
2314 if (ic->start + last_sector < last_sector || ic->start + last_sector >= ic->device_sectors)
2320 static int initialize_superblock(struct dm_integrity_c *ic, unsigned journal_sectors, unsigned interleave_sectors)
2322 unsigned journal_sections;
2325 memset(ic->sb, 0, SB_SECTORS << SECTOR_SHIFT);
2326 memcpy(ic->sb->magic, SB_MAGIC, 8);
2327 ic->sb->version = SB_VERSION;
2328 ic->sb->integrity_tag_size = cpu_to_le16(ic->tag_size);
2329 ic->sb->log2_sectors_per_block = __ffs(ic->sectors_per_block);
2330 if (ic->journal_mac_alg.alg_string)
2331 ic->sb->flags |= cpu_to_le32(SB_FLAG_HAVE_JOURNAL_MAC);
2333 calculate_journal_section_size(ic);
2334 journal_sections = journal_sectors / ic->journal_section_sectors;
2335 if (!journal_sections)
2336 journal_sections = 1;
2337 ic->sb->journal_sections = cpu_to_le32(journal_sections);
2339 if (!interleave_sectors)
2340 interleave_sectors = DEFAULT_INTERLEAVE_SECTORS;
2341 ic->sb->log2_interleave_sectors = __fls(interleave_sectors);
2342 ic->sb->log2_interleave_sectors = max((__u8)MIN_LOG2_INTERLEAVE_SECTORS, ic->sb->log2_interleave_sectors);
2343 ic->sb->log2_interleave_sectors = min((__u8)MAX_LOG2_INTERLEAVE_SECTORS, ic->sb->log2_interleave_sectors);
2345 ic->provided_data_sectors = 0;
2346 for (test_bit = fls64(ic->device_sectors) - 1; test_bit >= 3; test_bit--) {
2347 __u64 prev_data_sectors = ic->provided_data_sectors;
2349 ic->provided_data_sectors |= (sector_t)1 << test_bit;
2350 if (calculate_device_limits(ic))
2351 ic->provided_data_sectors = prev_data_sectors;
2354 if (!ic->provided_data_sectors)
2357 ic->sb->provided_data_sectors = cpu_to_le64(ic->provided_data_sectors);
2362 static void dm_integrity_set(struct dm_target *ti, struct dm_integrity_c *ic)
2364 struct gendisk *disk = dm_disk(dm_table_get_md(ti->table));
2365 struct blk_integrity bi;
2367 memset(&bi, 0, sizeof(bi));
2368 bi.profile = &dm_integrity_profile;
2369 bi.tuple_size = ic->tag_size;
2370 bi.tag_size = bi.tuple_size;
2371 bi.interval_exp = ic->sb->log2_sectors_per_block + SECTOR_SHIFT;
2373 blk_integrity_register(disk, &bi);
2374 blk_queue_max_integrity_segments(disk->queue, UINT_MAX);
2377 /* FIXME: use new kvmalloc */
2378 static void *dm_integrity_kvmalloc(size_t size, gfp_t gfp)
2382 if (size <= PAGE_SIZE)
2383 ptr = kmalloc(size, GFP_KERNEL | gfp);
2384 if (!ptr && size <= KMALLOC_MAX_SIZE)
2385 ptr = kmalloc(size, GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY | gfp);
2387 ptr = __vmalloc(size, GFP_KERNEL | gfp, PAGE_KERNEL);
2392 static void dm_integrity_free_page_list(struct dm_integrity_c *ic, struct page_list *pl)
2398 for (i = 0; i < ic->journal_pages; i++)
2400 __free_page(pl[i].page);
2404 static struct page_list *dm_integrity_alloc_page_list(struct dm_integrity_c *ic)
2406 size_t page_list_desc_size = ic->journal_pages * sizeof(struct page_list);
2407 struct page_list *pl;
2410 pl = dm_integrity_kvmalloc(page_list_desc_size, __GFP_ZERO);
2414 for (i = 0; i < ic->journal_pages; i++) {
2415 pl[i].page = alloc_page(GFP_KERNEL);
2417 dm_integrity_free_page_list(ic, pl);
2421 pl[i - 1].next = &pl[i];
2427 static void dm_integrity_free_journal_scatterlist(struct dm_integrity_c *ic, struct scatterlist **sl)
2430 for (i = 0; i < ic->journal_sections; i++)
2435 static struct scatterlist **dm_integrity_alloc_journal_scatterlist(struct dm_integrity_c *ic, struct page_list *pl)
2437 struct scatterlist **sl;
2440 sl = dm_integrity_kvmalloc(ic->journal_sections * sizeof(struct scatterlist *), __GFP_ZERO);
2444 for (i = 0; i < ic->journal_sections; i++) {
2445 struct scatterlist *s;
2446 unsigned start_index, start_offset;
2447 unsigned end_index, end_offset;
2451 page_list_location(ic, i, 0, &start_index, &start_offset);
2452 page_list_location(ic, i, ic->journal_section_sectors - 1, &end_index, &end_offset);
2454 n_pages = (end_index - start_index + 1);
2456 s = dm_integrity_kvmalloc(n_pages * sizeof(struct scatterlist), 0);
2458 dm_integrity_free_journal_scatterlist(ic, sl);
2462 sg_init_table(s, n_pages);
2463 for (idx = start_index; idx <= end_index; idx++) {
2464 char *va = lowmem_page_address(pl[idx].page);
2465 unsigned start = 0, end = PAGE_SIZE;
2466 if (idx == start_index)
2467 start = start_offset;
2468 if (idx == end_index)
2469 end = end_offset + (1 << SECTOR_SHIFT);
2470 sg_set_buf(&s[idx - start_index], va + start, end - start);
2479 static void free_alg(struct alg_spec *a)
2481 kzfree(a->alg_string);
2483 memset(a, 0, sizeof *a);
2486 static int get_alg_and_key(const char *arg, struct alg_spec *a, char **error, char *error_inval)
2492 a->alg_string = kstrdup(strchr(arg, ':') + 1, GFP_KERNEL);
2496 k = strchr(a->alg_string, ':');
2499 a->key_string = k + 1;
2500 if (strlen(a->key_string) & 1)
2503 a->key_size = strlen(a->key_string) / 2;
2504 a->key = kmalloc(a->key_size, GFP_KERNEL);
2507 if (hex2bin(a->key, a->key_string, a->key_size))
2513 *error = error_inval;
2516 *error = "Out of memory for an argument";
2520 static int get_mac(struct crypto_shash **hash, struct alg_spec *a, char **error,
2521 char *error_alg, char *error_key)
2525 if (a->alg_string) {
2526 *hash = crypto_alloc_shash(a->alg_string, 0, CRYPTO_ALG_ASYNC);
2527 if (IS_ERR(*hash)) {
2535 r = crypto_shash_setkey(*hash, a->key, a->key_size);
2546 static int create_journal(struct dm_integrity_c *ic, char **error)
2550 __u64 journal_pages, journal_desc_size, journal_tree_size;
2551 unsigned char *crypt_data = NULL;
2553 ic->commit_ids[0] = cpu_to_le64(0x1111111111111111ULL);
2554 ic->commit_ids[1] = cpu_to_le64(0x2222222222222222ULL);
2555 ic->commit_ids[2] = cpu_to_le64(0x3333333333333333ULL);
2556 ic->commit_ids[3] = cpu_to_le64(0x4444444444444444ULL);
2558 journal_pages = roundup((__u64)ic->journal_sections * ic->journal_section_sectors,
2559 PAGE_SIZE >> SECTOR_SHIFT) >> (PAGE_SHIFT - SECTOR_SHIFT);
2560 journal_desc_size = journal_pages * sizeof(struct page_list);
2561 if (journal_pages >= totalram_pages - totalhigh_pages || journal_desc_size > ULONG_MAX) {
2562 *error = "Journal doesn't fit into memory";
2566 ic->journal_pages = journal_pages;
2568 ic->journal = dm_integrity_alloc_page_list(ic);
2570 *error = "Could not allocate memory for journal";
2574 if (ic->journal_crypt_alg.alg_string) {
2575 unsigned ivsize, blocksize;
2576 struct journal_completion comp;
2579 ic->journal_crypt = crypto_alloc_skcipher(ic->journal_crypt_alg.alg_string, 0, 0);
2580 if (IS_ERR(ic->journal_crypt)) {
2581 *error = "Invalid journal cipher";
2582 r = PTR_ERR(ic->journal_crypt);
2583 ic->journal_crypt = NULL;
2586 ivsize = crypto_skcipher_ivsize(ic->journal_crypt);
2587 blocksize = crypto_skcipher_blocksize(ic->journal_crypt);
2589 if (ic->journal_crypt_alg.key) {
2590 r = crypto_skcipher_setkey(ic->journal_crypt, ic->journal_crypt_alg.key,
2591 ic->journal_crypt_alg.key_size);
2593 *error = "Error setting encryption key";
2597 DEBUG_print("cipher %s, block size %u iv size %u\n",
2598 ic->journal_crypt_alg.alg_string, blocksize, ivsize);
2600 ic->journal_io = dm_integrity_alloc_page_list(ic);
2601 if (!ic->journal_io) {
2602 *error = "Could not allocate memory for journal io";
2607 if (blocksize == 1) {
2608 struct scatterlist *sg;
2609 SKCIPHER_REQUEST_ON_STACK(req, ic->journal_crypt);
2610 unsigned char iv[ivsize];
2611 skcipher_request_set_tfm(req, ic->journal_crypt);
2613 ic->journal_xor = dm_integrity_alloc_page_list(ic);
2614 if (!ic->journal_xor) {
2615 *error = "Could not allocate memory for journal xor";
2620 sg = dm_integrity_kvmalloc((ic->journal_pages + 1) * sizeof(struct scatterlist), 0);
2622 *error = "Unable to allocate sg list";
2626 sg_init_table(sg, ic->journal_pages + 1);
2627 for (i = 0; i < ic->journal_pages; i++) {
2628 char *va = lowmem_page_address(ic->journal_xor[i].page);
2630 sg_set_buf(&sg[i], va, PAGE_SIZE);
2632 sg_set_buf(&sg[i], &ic->commit_ids, sizeof ic->commit_ids);
2633 memset(iv, 0x00, ivsize);
2635 skcipher_request_set_crypt(req, sg, sg, PAGE_SIZE * ic->journal_pages + sizeof ic->commit_ids, iv);
2636 comp.comp = COMPLETION_INITIALIZER_ONSTACK(comp.comp);
2637 comp.in_flight = (atomic_t)ATOMIC_INIT(1);
2638 if (do_crypt(true, req, &comp))
2639 wait_for_completion(&comp.comp);
2641 r = dm_integrity_failed(ic);
2643 *error = "Unable to encrypt journal";
2646 DEBUG_bytes(lowmem_page_address(ic->journal_xor[0].page), 64, "xor data");
2648 crypto_free_skcipher(ic->journal_crypt);
2649 ic->journal_crypt = NULL;
2651 SKCIPHER_REQUEST_ON_STACK(req, ic->journal_crypt);
2652 unsigned char iv[ivsize];
2653 unsigned crypt_len = roundup(ivsize, blocksize);
2655 crypt_data = kmalloc(crypt_len, GFP_KERNEL);
2657 *error = "Unable to allocate crypt data";
2662 skcipher_request_set_tfm(req, ic->journal_crypt);
2664 ic->journal_scatterlist = dm_integrity_alloc_journal_scatterlist(ic, ic->journal);
2665 if (!ic->journal_scatterlist) {
2666 *error = "Unable to allocate sg list";
2670 ic->journal_io_scatterlist = dm_integrity_alloc_journal_scatterlist(ic, ic->journal_io);
2671 if (!ic->journal_io_scatterlist) {
2672 *error = "Unable to allocate sg list";
2676 ic->sk_requests = dm_integrity_kvmalloc(ic->journal_sections * sizeof(struct skcipher_request *), __GFP_ZERO);
2677 if (!ic->sk_requests) {
2678 *error = "Unable to allocate sk requests";
2682 for (i = 0; i < ic->journal_sections; i++) {
2683 struct scatterlist sg;
2684 struct skcipher_request *section_req;
2685 __u32 section_le = cpu_to_le32(i);
2687 memset(iv, 0x00, ivsize);
2688 memset(crypt_data, 0x00, crypt_len);
2689 memcpy(crypt_data, §ion_le, min((size_t)crypt_len, sizeof(section_le)));
2691 sg_init_one(&sg, crypt_data, crypt_len);
2692 skcipher_request_set_crypt(req, &sg, &sg, crypt_len, iv);
2693 comp.comp = COMPLETION_INITIALIZER_ONSTACK(comp.comp);
2694 comp.in_flight = (atomic_t)ATOMIC_INIT(1);
2695 if (do_crypt(true, req, &comp))
2696 wait_for_completion(&comp.comp);
2698 r = dm_integrity_failed(ic);
2700 *error = "Unable to generate iv";
2704 section_req = skcipher_request_alloc(ic->journal_crypt, GFP_KERNEL);
2706 *error = "Unable to allocate crypt request";
2710 section_req->iv = kmalloc(ivsize * 2, GFP_KERNEL);
2711 if (!section_req->iv) {
2712 skcipher_request_free(section_req);
2713 *error = "Unable to allocate iv";
2717 memcpy(section_req->iv + ivsize, crypt_data, ivsize);
2718 section_req->cryptlen = (size_t)ic->journal_section_sectors << SECTOR_SHIFT;
2719 ic->sk_requests[i] = section_req;
2720 DEBUG_bytes(crypt_data, ivsize, "iv(%u)", i);
2725 for (i = 0; i < N_COMMIT_IDS; i++) {
2728 for (j = 0; j < i; j++) {
2729 if (ic->commit_ids[j] == ic->commit_ids[i]) {
2730 ic->commit_ids[i] = cpu_to_le64(le64_to_cpu(ic->commit_ids[i]) + 1);
2731 goto retest_commit_id;
2734 DEBUG_print("commit id %u: %016llx\n", i, ic->commit_ids[i]);
2737 journal_tree_size = (__u64)ic->journal_entries * sizeof(struct journal_node);
2738 if (journal_tree_size > ULONG_MAX) {
2739 *error = "Journal doesn't fit into memory";
2743 ic->journal_tree = dm_integrity_kvmalloc(journal_tree_size, 0);
2744 if (!ic->journal_tree) {
2745 *error = "Could not allocate memory for journal tree";
2754 * Construct a integrity mapping
2758 * offset from the start of the device
2760 * D - direct writes, J - journal writes, R - recovery mode
2761 * number of optional arguments
2762 * optional arguments:
2764 * interleave_sectors
2773 static int dm_integrity_ctr(struct dm_target *ti, unsigned argc, char **argv)
2775 struct dm_integrity_c *ic;
2778 unsigned extra_args;
2779 struct dm_arg_set as;
2780 static struct dm_arg _args[] = {
2781 {0, 9, "Invalid number of feature args"},
2783 unsigned journal_sectors, interleave_sectors, buffer_sectors, journal_watermark, sync_msec;
2784 bool should_write_sb;
2786 unsigned long long start;
2788 #define DIRECT_ARGUMENTS 4
2790 if (argc <= DIRECT_ARGUMENTS) {
2791 ti->error = "Invalid argument count";
2795 ic = kzalloc(sizeof(struct dm_integrity_c), GFP_KERNEL);
2797 ti->error = "Cannot allocate integrity context";
2801 ti->per_io_data_size = sizeof(struct dm_integrity_io);
2803 ic->in_progress = RB_ROOT;
2804 init_waitqueue_head(&ic->endio_wait);
2805 bio_list_init(&ic->flush_bio_list);
2806 init_waitqueue_head(&ic->copy_to_journal_wait);
2807 init_completion(&ic->crypto_backoff);
2809 r = dm_get_device(ti, argv[0], dm_table_get_mode(ti->table), &ic->dev);
2811 ti->error = "Device lookup failed";
2815 if (sscanf(argv[1], "%llu%c", &start, &dummy) != 1 || start != (sector_t)start) {
2816 ti->error = "Invalid starting offset";
2822 if (strcmp(argv[2], "-")) {
2823 if (sscanf(argv[2], "%u%c", &ic->tag_size, &dummy) != 1 || !ic->tag_size) {
2824 ti->error = "Invalid tag size";
2830 if (!strcmp(argv[3], "J") || !strcmp(argv[3], "D") || !strcmp(argv[3], "R"))
2831 ic->mode = argv[3][0];
2833 ti->error = "Invalid mode (expecting J, D, R)";
2838 ic->device_sectors = i_size_read(ic->dev->bdev->bd_inode) >> SECTOR_SHIFT;
2839 journal_sectors = min((sector_t)DEFAULT_MAX_JOURNAL_SECTORS,
2840 ic->device_sectors >> DEFAULT_JOURNAL_SIZE_FACTOR);
2841 interleave_sectors = DEFAULT_INTERLEAVE_SECTORS;
2842 buffer_sectors = DEFAULT_BUFFER_SECTORS;
2843 journal_watermark = DEFAULT_JOURNAL_WATERMARK;
2844 sync_msec = DEFAULT_SYNC_MSEC;
2845 ic->sectors_per_block = 1;
2847 as.argc = argc - DIRECT_ARGUMENTS;
2848 as.argv = argv + DIRECT_ARGUMENTS;
2849 r = dm_read_arg_group(_args, &as, &extra_args, &ti->error);
2853 while (extra_args--) {
2854 const char *opt_string;
2856 opt_string = dm_shift_arg(&as);
2859 ti->error = "Not enough feature arguments";
2862 if (sscanf(opt_string, "journal_sectors:%u%c", &val, &dummy) == 1)
2863 journal_sectors = val;
2864 else if (sscanf(opt_string, "interleave_sectors:%u%c", &val, &dummy) == 1)
2865 interleave_sectors = val;
2866 else if (sscanf(opt_string, "buffer_sectors:%u%c", &val, &dummy) == 1)
2867 buffer_sectors = val;
2868 else if (sscanf(opt_string, "journal_watermark:%u%c", &val, &dummy) == 1 && val <= 100)
2869 journal_watermark = val;
2870 else if (sscanf(opt_string, "commit_time:%u%c", &val, &dummy) == 1)
2872 else if (sscanf(opt_string, "block_size:%u%c", &val, &dummy) == 1) {
2873 if (val < 1 << SECTOR_SHIFT ||
2874 val > MAX_SECTORS_PER_BLOCK << SECTOR_SHIFT ||
2877 ti->error = "Invalid block_size argument";
2880 ic->sectors_per_block = val >> SECTOR_SHIFT;
2881 } else if (!memcmp(opt_string, "internal_hash:", strlen("internal_hash:"))) {
2882 r = get_alg_and_key(opt_string, &ic->internal_hash_alg, &ti->error,
2883 "Invalid internal_hash argument");
2886 } else if (!memcmp(opt_string, "journal_crypt:", strlen("journal_crypt:"))) {
2887 r = get_alg_and_key(opt_string, &ic->journal_crypt_alg, &ti->error,
2888 "Invalid journal_crypt argument");
2891 } else if (!memcmp(opt_string, "journal_mac:", strlen("journal_mac:"))) {
2892 r = get_alg_and_key(opt_string, &ic->journal_mac_alg, &ti->error,
2893 "Invalid journal_mac argument");
2898 ti->error = "Invalid argument";
2903 r = get_mac(&ic->internal_hash, &ic->internal_hash_alg, &ti->error,
2904 "Invalid internal hash", "Error setting internal hash key");
2908 r = get_mac(&ic->journal_mac, &ic->journal_mac_alg, &ti->error,
2909 "Invalid journal mac", "Error setting journal mac key");
2913 if (!ic->tag_size) {
2914 if (!ic->internal_hash) {
2915 ti->error = "Unknown tag size";
2919 ic->tag_size = crypto_shash_digestsize(ic->internal_hash);
2921 if (ic->tag_size > MAX_TAG_SIZE) {
2922 ti->error = "Too big tag size";
2926 if (!(ic->tag_size & (ic->tag_size - 1)))
2927 ic->log2_tag_size = __ffs(ic->tag_size);
2929 ic->log2_tag_size = -1;
2931 ic->autocommit_jiffies = msecs_to_jiffies(sync_msec);
2932 ic->autocommit_msec = sync_msec;
2933 setup_timer(&ic->autocommit_timer, autocommit_fn, (unsigned long)ic);
2935 ic->io = dm_io_client_create();
2936 if (IS_ERR(ic->io)) {
2937 r = PTR_ERR(ic->io);
2939 ti->error = "Cannot allocate dm io";
2943 ic->journal_io_mempool = mempool_create_slab_pool(JOURNAL_IO_MEMPOOL, journal_io_cache);
2944 if (!ic->journal_io_mempool) {
2946 ti->error = "Cannot allocate mempool";
2950 ic->metadata_wq = alloc_workqueue("dm-integrity-metadata",
2951 WQ_MEM_RECLAIM, METADATA_WORKQUEUE_MAX_ACTIVE);
2952 if (!ic->metadata_wq) {
2953 ti->error = "Cannot allocate workqueue";
2959 * If this workqueue were percpu, it would cause bio reordering
2960 * and reduced performance.
2962 ic->wait_wq = alloc_workqueue("dm-integrity-wait", WQ_MEM_RECLAIM | WQ_UNBOUND, 1);
2964 ti->error = "Cannot allocate workqueue";
2969 ic->commit_wq = alloc_workqueue("dm-integrity-commit", WQ_MEM_RECLAIM, 1);
2970 if (!ic->commit_wq) {
2971 ti->error = "Cannot allocate workqueue";
2975 INIT_WORK(&ic->commit_work, integrity_commit);
2977 if (ic->mode == 'J') {
2978 ic->writer_wq = alloc_workqueue("dm-integrity-writer", WQ_MEM_RECLAIM, 1);
2979 if (!ic->writer_wq) {
2980 ti->error = "Cannot allocate workqueue";
2984 INIT_WORK(&ic->writer_work, integrity_writer);
2987 ic->sb = alloc_pages_exact(SB_SECTORS << SECTOR_SHIFT, GFP_KERNEL);
2990 ti->error = "Cannot allocate superblock area";
2994 r = sync_rw_sb(ic, REQ_OP_READ, 0);
2996 ti->error = "Error reading superblock";
2999 should_write_sb = false;
3000 if (memcmp(ic->sb->magic, SB_MAGIC, 8)) {
3001 if (ic->mode != 'R') {
3002 if (memchr_inv(ic->sb, 0, SB_SECTORS << SECTOR_SHIFT)) {
3004 ti->error = "The device is not initialized";
3009 r = initialize_superblock(ic, journal_sectors, interleave_sectors);
3011 ti->error = "Could not initialize superblock";
3014 if (ic->mode != 'R')
3015 should_write_sb = true;
3018 if (ic->sb->version != SB_VERSION) {
3020 ti->error = "Unknown version";
3023 if (le16_to_cpu(ic->sb->integrity_tag_size) != ic->tag_size) {
3025 ti->error = "Tag size doesn't match the information in superblock";
3028 if (ic->sb->log2_sectors_per_block != __ffs(ic->sectors_per_block)) {
3030 ti->error = "Block size doesn't match the information in superblock";
3033 /* make sure that ti->max_io_len doesn't overflow */
3034 if (ic->sb->log2_interleave_sectors < MIN_LOG2_INTERLEAVE_SECTORS ||
3035 ic->sb->log2_interleave_sectors > MAX_LOG2_INTERLEAVE_SECTORS) {
3037 ti->error = "Invalid interleave_sectors in the superblock";
3040 ic->provided_data_sectors = le64_to_cpu(ic->sb->provided_data_sectors);
3041 if (ic->provided_data_sectors != le64_to_cpu(ic->sb->provided_data_sectors)) {
3042 /* test for overflow */
3044 ti->error = "The superblock has 64-bit device size, but the kernel was compiled with 32-bit sectors";
3047 if (!!(ic->sb->flags & cpu_to_le32(SB_FLAG_HAVE_JOURNAL_MAC)) != !!ic->journal_mac_alg.alg_string) {
3049 ti->error = "Journal mac mismatch";
3052 r = calculate_device_limits(ic);
3054 ti->error = "The device is too small";
3058 if (!buffer_sectors)
3060 ic->log2_buffer_sectors = min3((int)__fls(buffer_sectors), (int)__ffs(ic->metadata_run), 31 - SECTOR_SHIFT);
3062 threshold = (__u64)ic->journal_entries * (100 - journal_watermark);
3064 do_div(threshold, 100);
3065 ic->free_sectors_threshold = threshold;
3067 DEBUG_print("initialized:\n");
3068 DEBUG_print(" integrity_tag_size %u\n", le16_to_cpu(ic->sb->integrity_tag_size));
3069 DEBUG_print(" journal_entry_size %u\n", ic->journal_entry_size);
3070 DEBUG_print(" journal_entries_per_sector %u\n", ic->journal_entries_per_sector);
3071 DEBUG_print(" journal_section_entries %u\n", ic->journal_section_entries);
3072 DEBUG_print(" journal_section_sectors %u\n", ic->journal_section_sectors);
3073 DEBUG_print(" journal_sections %u\n", (unsigned)le32_to_cpu(ic->sb->journal_sections));
3074 DEBUG_print(" journal_entries %u\n", ic->journal_entries);
3075 DEBUG_print(" log2_interleave_sectors %d\n", ic->sb->log2_interleave_sectors);
3076 DEBUG_print(" device_sectors 0x%llx\n", (unsigned long long)ic->device_sectors);
3077 DEBUG_print(" initial_sectors 0x%x\n", ic->initial_sectors);
3078 DEBUG_print(" metadata_run 0x%x\n", ic->metadata_run);
3079 DEBUG_print(" log2_metadata_run %d\n", ic->log2_metadata_run);
3080 DEBUG_print(" provided_data_sectors 0x%llx (%llu)\n", (unsigned long long)ic->provided_data_sectors,
3081 (unsigned long long)ic->provided_data_sectors);
3082 DEBUG_print(" log2_buffer_sectors %u\n", ic->log2_buffer_sectors);
3084 ic->bufio = dm_bufio_client_create(ic->dev->bdev, 1U << (SECTOR_SHIFT + ic->log2_buffer_sectors),
3086 if (IS_ERR(ic->bufio)) {
3087 r = PTR_ERR(ic->bufio);
3088 ti->error = "Cannot initialize dm-bufio";
3092 dm_bufio_set_sector_offset(ic->bufio, ic->start + ic->initial_sectors);
3094 if (ic->mode != 'R') {
3095 r = create_journal(ic, &ti->error);
3100 if (should_write_sb) {
3103 init_journal(ic, 0, ic->journal_sections, 0);
3104 r = dm_integrity_failed(ic);
3106 ti->error = "Error initializing journal";
3109 r = sync_rw_sb(ic, REQ_OP_WRITE, REQ_FUA);
3111 ti->error = "Error initializing superblock";
3114 ic->just_formatted = true;
3117 r = dm_set_target_max_io_len(ti, 1U << ic->sb->log2_interleave_sectors);
3121 if (!ic->internal_hash)
3122 dm_integrity_set(ti, ic);
3124 ti->num_flush_bios = 1;
3125 ti->flush_supported = true;
3129 dm_integrity_dtr(ti);
3133 static void dm_integrity_dtr(struct dm_target *ti)
3135 struct dm_integrity_c *ic = ti->private;
3137 BUG_ON(!RB_EMPTY_ROOT(&ic->in_progress));
3139 if (ic->metadata_wq)
3140 destroy_workqueue(ic->metadata_wq);
3142 destroy_workqueue(ic->wait_wq);
3144 destroy_workqueue(ic->commit_wq);
3146 destroy_workqueue(ic->writer_wq);
3148 dm_bufio_client_destroy(ic->bufio);
3149 mempool_destroy(ic->journal_io_mempool);
3151 dm_io_client_destroy(ic->io);
3153 dm_put_device(ti, ic->dev);
3154 dm_integrity_free_page_list(ic, ic->journal);
3155 dm_integrity_free_page_list(ic, ic->journal_io);
3156 dm_integrity_free_page_list(ic, ic->journal_xor);
3157 if (ic->journal_scatterlist)
3158 dm_integrity_free_journal_scatterlist(ic, ic->journal_scatterlist);
3159 if (ic->journal_io_scatterlist)
3160 dm_integrity_free_journal_scatterlist(ic, ic->journal_io_scatterlist);
3161 if (ic->sk_requests) {
3164 for (i = 0; i < ic->journal_sections; i++) {
3165 struct skcipher_request *req = ic->sk_requests[i];
3168 skcipher_request_free(req);
3171 kvfree(ic->sk_requests);
3173 kvfree(ic->journal_tree);
3175 free_pages_exact(ic->sb, SB_SECTORS << SECTOR_SHIFT);
3177 if (ic->internal_hash)
3178 crypto_free_shash(ic->internal_hash);
3179 free_alg(&ic->internal_hash_alg);
3181 if (ic->journal_crypt)
3182 crypto_free_skcipher(ic->journal_crypt);
3183 free_alg(&ic->journal_crypt_alg);
3185 if (ic->journal_mac)
3186 crypto_free_shash(ic->journal_mac);
3187 free_alg(&ic->journal_mac_alg);
3192 static struct target_type integrity_target = {
3193 .name = "integrity",
3194 .version = {1, 0, 0},
3195 .module = THIS_MODULE,
3196 .features = DM_TARGET_SINGLETON | DM_TARGET_INTEGRITY,
3197 .ctr = dm_integrity_ctr,
3198 .dtr = dm_integrity_dtr,
3199 .map = dm_integrity_map,
3200 .postsuspend = dm_integrity_postsuspend,
3201 .resume = dm_integrity_resume,
3202 .status = dm_integrity_status,
3203 .iterate_devices = dm_integrity_iterate_devices,
3204 .io_hints = dm_integrity_io_hints,
3207 int __init dm_integrity_init(void)
3211 journal_io_cache = kmem_cache_create("integrity_journal_io",
3212 sizeof(struct journal_io), 0, 0, NULL);
3213 if (!journal_io_cache) {
3214 DMERR("can't allocate journal io cache");
3218 r = dm_register_target(&integrity_target);
3221 DMERR("register failed %d", r);
3226 void dm_integrity_exit(void)
3228 dm_unregister_target(&integrity_target);
3229 kmem_cache_destroy(journal_io_cache);
3232 module_init(dm_integrity_init);
3233 module_exit(dm_integrity_exit);
3235 MODULE_AUTHOR("Milan Broz");
3236 MODULE_AUTHOR("Mikulas Patocka");
3237 MODULE_DESCRIPTION(DM_NAME " target for integrity tags extension");
3238 MODULE_LICENSE("GPL");