2 * Copyright (C) 2016-2017 Red Hat, Inc. All rights reserved.
3 * Copyright (C) 2016-2017 Milan Broz
4 * Copyright (C) 2016-2017 Mikulas Patocka
6 * This file is released under the GPL.
9 #include <linux/module.h>
10 #include <linux/device-mapper.h>
11 #include <linux/dm-io.h>
12 #include <linux/vmalloc.h>
13 #include <linux/sort.h>
14 #include <linux/rbtree.h>
15 #include <linux/delay.h>
16 #include <linux/random.h>
17 #include <crypto/hash.h>
18 #include <crypto/skcipher.h>
19 #include <linux/async_tx.h>
22 #define DM_MSG_PREFIX "integrity"
24 #define DEFAULT_INTERLEAVE_SECTORS 32768
25 #define DEFAULT_JOURNAL_SIZE_FACTOR 7
26 #define DEFAULT_BUFFER_SECTORS 128
27 #define DEFAULT_JOURNAL_WATERMARK 50
28 #define DEFAULT_SYNC_MSEC 10000
29 #define DEFAULT_MAX_JOURNAL_SECTORS 131072
30 #define MIN_LOG2_INTERLEAVE_SECTORS 3
31 #define MAX_LOG2_INTERLEAVE_SECTORS 31
32 #define METADATA_WORKQUEUE_MAX_ACTIVE 16
35 * Warning - DEBUG_PRINT prints security-sensitive data to the log,
36 * so it should not be enabled in the official kernel
39 //#define INTERNAL_VERIFY
45 #define SB_MAGIC "integrt"
52 __u8 log2_interleave_sectors;
53 __u16 integrity_tag_size;
54 __u32 journal_sections;
55 __u64 provided_data_sectors; /* userspace uses this value */
59 #define SB_FLAG_HAVE_JOURNAL_MAC 0x1
61 #define JOURNAL_ENTRY_ROUNDUP 8
63 typedef __u64 commit_id_t;
64 #define JOURNAL_MAC_PER_SECTOR 8
66 struct journal_entry {
74 commit_id_t last_bytes;
78 #if BITS_PER_LONG == 64
79 #define journal_entry_set_sector(je, x) do { smp_wmb(); ACCESS_ONCE((je)->u.sector) = cpu_to_le64(x); } while (0)
80 #define journal_entry_get_sector(je) le64_to_cpu((je)->u.sector)
81 #elif defined(CONFIG_LBDAF)
82 #define journal_entry_set_sector(je, x) do { (je)->u.s.sector_lo = cpu_to_le32(x); smp_wmb(); ACCESS_ONCE((je)->u.s.sector_hi) = cpu_to_le32((x) >> 32); } while (0)
83 #define journal_entry_get_sector(je) le64_to_cpu((je)->u.sector)
85 #define journal_entry_set_sector(je, x) do { (je)->u.s.sector_lo = cpu_to_le32(x); smp_wmb(); ACCESS_ONCE((je)->u.s.sector_hi) = cpu_to_le32(0); } while (0)
86 #define journal_entry_get_sector(je) le32_to_cpu((je)->u.s.sector_lo)
88 #define journal_entry_is_unused(je) ((je)->u.s.sector_hi == cpu_to_le32(-1))
89 #define journal_entry_set_unused(je) do { ((je)->u.s.sector_hi = cpu_to_le32(-1)); } while (0)
90 #define journal_entry_is_inprogress(je) ((je)->u.s.sector_hi == cpu_to_le32(-2))
91 #define journal_entry_set_inprogress(je) do { ((je)->u.s.sector_hi = cpu_to_le32(-2)); } while (0)
93 #define JOURNAL_BLOCK_SECTORS 8
94 #define JOURNAL_SECTOR_DATA ((1 << SECTOR_SHIFT) - sizeof(commit_id_t))
95 #define JOURNAL_MAC_SIZE (JOURNAL_MAC_PER_SECTOR * JOURNAL_BLOCK_SECTORS)
97 struct journal_sector {
98 __u8 entries[JOURNAL_SECTOR_DATA - JOURNAL_MAC_PER_SECTOR];
99 __u8 mac[JOURNAL_MAC_PER_SECTOR];
100 commit_id_t commit_id;
103 #define MAX_TAG_SIZE (JOURNAL_SECTOR_DATA - JOURNAL_MAC_PER_SECTOR - offsetof(struct journal_entry, tag))
105 #define METADATA_PADDING_SECTORS 8
107 #define N_COMMIT_IDS 4
109 static unsigned char prev_commit_seq(unsigned char seq)
111 return (seq + N_COMMIT_IDS - 1) % N_COMMIT_IDS;
114 static unsigned char next_commit_seq(unsigned char seq)
116 return (seq + 1) % N_COMMIT_IDS;
120 * In-memory structures
123 struct journal_node {
135 struct dm_integrity_c {
140 mempool_t *journal_io_mempool;
141 struct dm_io_client *io;
142 struct dm_bufio_client *bufio;
143 struct workqueue_struct *metadata_wq;
144 struct superblock *sb;
145 unsigned journal_pages;
146 struct page_list *journal;
147 struct page_list *journal_io;
148 struct page_list *journal_xor;
150 struct crypto_skcipher *journal_crypt;
151 struct scatterlist **journal_scatterlist;
152 struct scatterlist **journal_io_scatterlist;
153 struct skcipher_request **sk_requests;
155 struct crypto_shash *journal_mac;
157 struct journal_node *journal_tree;
158 struct rb_root journal_tree_root;
160 sector_t provided_data_sectors;
162 unsigned short journal_entry_size;
163 unsigned char journal_entries_per_sector;
164 unsigned char journal_section_entries;
165 unsigned char journal_section_sectors;
166 unsigned journal_sections;
167 unsigned journal_entries;
168 sector_t device_sectors;
169 unsigned initial_sectors;
170 unsigned metadata_run;
171 __s8 log2_metadata_run;
172 __u8 log2_buffer_sectors;
179 struct crypto_shash *internal_hash;
181 /* these variables are locked with endio_wait.lock */
182 struct rb_root in_progress;
183 wait_queue_head_t endio_wait;
184 struct workqueue_struct *wait_wq;
186 unsigned char commit_seq;
187 commit_id_t commit_ids[N_COMMIT_IDS];
189 unsigned committed_section;
190 unsigned n_committed_sections;
192 unsigned uncommitted_section;
193 unsigned n_uncommitted_sections;
195 unsigned free_section;
196 unsigned char free_section_entry;
197 unsigned free_sectors;
199 unsigned free_sectors_threshold;
201 struct workqueue_struct *commit_wq;
202 struct work_struct commit_work;
204 struct workqueue_struct *writer_wq;
205 struct work_struct writer_work;
207 struct bio_list flush_bio_list;
209 unsigned long autocommit_jiffies;
210 struct timer_list autocommit_timer;
211 unsigned autocommit_msec;
213 wait_queue_head_t copy_to_journal_wait;
215 struct completion crypto_backoff;
217 bool journal_uptodate;
220 struct alg_spec internal_hash_alg;
221 struct alg_spec journal_crypt_alg;
222 struct alg_spec journal_mac_alg;
225 struct dm_integrity_range {
226 sector_t logical_sector;
231 struct dm_integrity_io {
232 struct work_struct work;
234 struct dm_integrity_c *ic;
238 struct dm_integrity_range range;
240 sector_t metadata_block;
241 unsigned metadata_offset;
246 struct completion *completion;
248 struct block_device *orig_bi_bdev;
249 bio_end_io_t *orig_bi_end_io;
250 struct bio_integrity_payload *orig_bi_integrity;
251 struct bvec_iter orig_bi_iter;
254 struct journal_completion {
255 struct dm_integrity_c *ic;
257 struct completion comp;
261 struct dm_integrity_range range;
262 struct journal_completion *comp;
265 static struct kmem_cache *journal_io_cache;
267 #define JOURNAL_IO_MEMPOOL 32
270 #define DEBUG_print(x, ...) printk(KERN_DEBUG x, ##__VA_ARGS__)
271 static void __DEBUG_bytes(__u8 *bytes, size_t len, const char *msg, ...)
280 pr_cont(" %02x", *bytes);
286 #define DEBUG_bytes(bytes, len, msg, ...) __DEBUG_bytes(bytes, len, KERN_DEBUG msg, ##__VA_ARGS__)
288 #define DEBUG_print(x, ...) do { } while (0)
289 #define DEBUG_bytes(bytes, len, msg, ...) do { } while (0)
293 * DM Integrity profile, protection is performed layer above (dm-crypt)
295 static struct blk_integrity_profile dm_integrity_profile = {
296 .name = "DM-DIF-EXT-TAG",
301 static void dm_integrity_map_continue(struct dm_integrity_io *dio, bool from_map);
302 static void integrity_bio_wait(struct work_struct *w);
303 static void dm_integrity_dtr(struct dm_target *ti);
305 static void dm_integrity_io_error(struct dm_integrity_c *ic, const char *msg, int err)
307 if (!cmpxchg(&ic->failed, 0, err))
308 DMERR("Error on %s: %d", msg, err);
311 static int dm_integrity_failed(struct dm_integrity_c *ic)
313 return ACCESS_ONCE(ic->failed);
316 static commit_id_t dm_integrity_commit_id(struct dm_integrity_c *ic, unsigned i,
317 unsigned j, unsigned char seq)
320 * Xor the number with section and sector, so that if a piece of
321 * journal is written at wrong place, it is detected.
323 return ic->commit_ids[seq] ^ cpu_to_le64(((__u64)i << 32) ^ j);
326 static void get_area_and_offset(struct dm_integrity_c *ic, sector_t data_sector,
327 sector_t *area, sector_t *offset)
329 __u8 log2_interleave_sectors = ic->sb->log2_interleave_sectors;
331 *area = data_sector >> log2_interleave_sectors;
332 *offset = (unsigned)data_sector & ((1U << log2_interleave_sectors) - 1);
335 static __u64 get_metadata_sector_and_offset(struct dm_integrity_c *ic, sector_t area,
336 sector_t offset, unsigned *metadata_offset)
341 ms = area << ic->sb->log2_interleave_sectors;
342 if (likely(ic->log2_metadata_run >= 0))
343 ms += area << ic->log2_metadata_run;
345 ms += area * ic->metadata_run;
346 ms >>= ic->log2_buffer_sectors;
348 if (likely(ic->log2_tag_size >= 0)) {
349 ms += offset >> (SECTOR_SHIFT + ic->log2_buffer_sectors - ic->log2_tag_size);
350 mo = (offset << ic->log2_tag_size) & ((1U << SECTOR_SHIFT << ic->log2_buffer_sectors) - 1);
352 ms += (__u64)offset * ic->tag_size >> (SECTOR_SHIFT + ic->log2_buffer_sectors);
353 mo = (offset * ic->tag_size) & ((1U << SECTOR_SHIFT << ic->log2_buffer_sectors) - 1);
355 *metadata_offset = mo;
359 static sector_t get_data_sector(struct dm_integrity_c *ic, sector_t area, sector_t offset)
363 result = area << ic->sb->log2_interleave_sectors;
364 if (likely(ic->log2_metadata_run >= 0))
365 result += (area + 1) << ic->log2_metadata_run;
367 result += (area + 1) * ic->metadata_run;
369 result += (sector_t)ic->initial_sectors + offset;
373 static void wraparound_section(struct dm_integrity_c *ic, unsigned *sec_ptr)
375 if (unlikely(*sec_ptr >= ic->journal_sections))
376 *sec_ptr -= ic->journal_sections;
379 static int sync_rw_sb(struct dm_integrity_c *ic, int op, int op_flags)
381 struct dm_io_request io_req;
382 struct dm_io_region io_loc;
385 io_req.bi_op_flags = op_flags;
386 io_req.mem.type = DM_IO_KMEM;
387 io_req.mem.ptr.addr = ic->sb;
388 io_req.notify.fn = NULL;
389 io_req.client = ic->io;
390 io_loc.bdev = ic->dev->bdev;
391 io_loc.sector = ic->start;
392 io_loc.count = SB_SECTORS;
394 return dm_io(&io_req, 1, &io_loc, NULL);
397 static void access_journal_check(struct dm_integrity_c *ic, unsigned section, unsigned offset,
398 bool e, const char *function)
400 #if defined(CONFIG_DM_DEBUG) || defined(INTERNAL_VERIFY)
401 unsigned limit = e ? ic->journal_section_entries : ic->journal_section_sectors;
403 if (unlikely(section >= ic->journal_sections) ||
404 unlikely(offset >= limit)) {
405 printk(KERN_CRIT "%s: invalid access at (%u,%u), limit (%u,%u)\n",
406 function, section, offset, ic->journal_sections, limit);
412 static void page_list_location(struct dm_integrity_c *ic, unsigned section, unsigned offset,
413 unsigned *pl_index, unsigned *pl_offset)
417 access_journal_check(ic, section, offset, false, "page_list_location");
419 sector = section * ic->journal_section_sectors + offset;
421 *pl_index = sector >> (PAGE_SHIFT - SECTOR_SHIFT);
422 *pl_offset = (sector << SECTOR_SHIFT) & (PAGE_SIZE - 1);
425 static struct journal_sector *access_page_list(struct dm_integrity_c *ic, struct page_list *pl,
426 unsigned section, unsigned offset, unsigned *n_sectors)
428 unsigned pl_index, pl_offset;
431 page_list_location(ic, section, offset, &pl_index, &pl_offset);
434 *n_sectors = (PAGE_SIZE - pl_offset) >> SECTOR_SHIFT;
436 va = lowmem_page_address(pl[pl_index].page);
438 return (struct journal_sector *)(va + pl_offset);
441 static struct journal_sector *access_journal(struct dm_integrity_c *ic, unsigned section, unsigned offset)
443 return access_page_list(ic, ic->journal, section, offset, NULL);
446 static struct journal_entry *access_journal_entry(struct dm_integrity_c *ic, unsigned section, unsigned n)
448 unsigned rel_sector, offset;
449 struct journal_sector *js;
451 access_journal_check(ic, section, n, true, "access_journal_entry");
453 rel_sector = n % JOURNAL_BLOCK_SECTORS;
454 offset = n / JOURNAL_BLOCK_SECTORS;
456 js = access_journal(ic, section, rel_sector);
457 return (struct journal_entry *)((char *)js + offset * ic->journal_entry_size);
460 static struct journal_sector *access_journal_data(struct dm_integrity_c *ic, unsigned section, unsigned n)
462 access_journal_check(ic, section, n, true, "access_journal_data");
464 return access_journal(ic, section, n + JOURNAL_BLOCK_SECTORS);
467 static void section_mac(struct dm_integrity_c *ic, unsigned section, __u8 result[JOURNAL_MAC_SIZE])
469 SHASH_DESC_ON_STACK(desc, ic->journal_mac);
473 desc->tfm = ic->journal_mac;
474 desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP;
476 r = crypto_shash_init(desc);
478 dm_integrity_io_error(ic, "crypto_shash_init", r);
482 for (j = 0; j < ic->journal_section_entries; j++) {
483 struct journal_entry *je = access_journal_entry(ic, section, j);
484 r = crypto_shash_update(desc, (__u8 *)&je->u.sector, sizeof je->u.sector);
486 dm_integrity_io_error(ic, "crypto_shash_update", r);
491 size = crypto_shash_digestsize(ic->journal_mac);
493 if (likely(size <= JOURNAL_MAC_SIZE)) {
494 r = crypto_shash_final(desc, result);
496 dm_integrity_io_error(ic, "crypto_shash_final", r);
499 memset(result + size, 0, JOURNAL_MAC_SIZE - size);
502 r = crypto_shash_final(desc, digest);
504 dm_integrity_io_error(ic, "crypto_shash_final", r);
507 memcpy(result, digest, JOURNAL_MAC_SIZE);
512 memset(result, 0, JOURNAL_MAC_SIZE);
515 static void rw_section_mac(struct dm_integrity_c *ic, unsigned section, bool wr)
517 __u8 result[JOURNAL_MAC_SIZE];
520 if (!ic->journal_mac)
523 section_mac(ic, section, result);
525 for (j = 0; j < JOURNAL_BLOCK_SECTORS; j++) {
526 struct journal_sector *js = access_journal(ic, section, j);
529 memcpy(&js->mac, result + (j * JOURNAL_MAC_PER_SECTOR), JOURNAL_MAC_PER_SECTOR);
531 if (memcmp(&js->mac, result + (j * JOURNAL_MAC_PER_SECTOR), JOURNAL_MAC_PER_SECTOR))
532 dm_integrity_io_error(ic, "journal mac", -EILSEQ);
537 static void complete_journal_op(void *context)
539 struct journal_completion *comp = context;
540 BUG_ON(!atomic_read(&comp->in_flight));
541 if (likely(atomic_dec_and_test(&comp->in_flight)))
542 complete(&comp->comp);
545 static void xor_journal(struct dm_integrity_c *ic, bool encrypt, unsigned section,
546 unsigned n_sections, struct journal_completion *comp)
548 struct async_submit_ctl submit;
549 size_t n_bytes = (size_t)(n_sections * ic->journal_section_sectors) << SECTOR_SHIFT;
550 unsigned pl_index, pl_offset, section_index;
551 struct page_list *source_pl, *target_pl;
553 if (likely(encrypt)) {
554 source_pl = ic->journal;
555 target_pl = ic->journal_io;
557 source_pl = ic->journal_io;
558 target_pl = ic->journal;
561 page_list_location(ic, section, 0, &pl_index, &pl_offset);
563 atomic_add(roundup(pl_offset + n_bytes, PAGE_SIZE) >> PAGE_SHIFT, &comp->in_flight);
565 init_async_submit(&submit, ASYNC_TX_XOR_ZERO_DST, NULL, complete_journal_op, comp, NULL);
567 section_index = pl_index;
571 struct page *src_pages[2];
572 struct page *dst_page;
574 while (unlikely(pl_index == section_index)) {
577 rw_section_mac(ic, section, true);
582 page_list_location(ic, section, 0, §ion_index, &dummy);
585 this_step = min(n_bytes, (size_t)PAGE_SIZE - pl_offset);
586 dst_page = target_pl[pl_index].page;
587 src_pages[0] = source_pl[pl_index].page;
588 src_pages[1] = ic->journal_xor[pl_index].page;
590 async_xor(dst_page, src_pages, pl_offset, 2, this_step, &submit);
594 n_bytes -= this_step;
599 async_tx_issue_pending_all();
602 static void complete_journal_encrypt(struct crypto_async_request *req, int err)
604 struct journal_completion *comp = req->data;
606 if (likely(err == -EINPROGRESS)) {
607 complete(&comp->ic->crypto_backoff);
610 dm_integrity_io_error(comp->ic, "asynchronous encrypt", err);
612 complete_journal_op(comp);
615 static bool do_crypt(bool encrypt, struct skcipher_request *req, struct journal_completion *comp)
618 skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP,
619 complete_journal_encrypt, comp);
621 r = crypto_skcipher_encrypt(req);
623 r = crypto_skcipher_decrypt(req);
626 if (likely(r == -EINPROGRESS))
628 if (likely(r == -EBUSY)) {
629 wait_for_completion(&comp->ic->crypto_backoff);
630 reinit_completion(&comp->ic->crypto_backoff);
633 dm_integrity_io_error(comp->ic, "encrypt", r);
637 static void crypt_journal(struct dm_integrity_c *ic, bool encrypt, unsigned section,
638 unsigned n_sections, struct journal_completion *comp)
640 struct scatterlist **source_sg;
641 struct scatterlist **target_sg;
643 atomic_add(2, &comp->in_flight);
645 if (likely(encrypt)) {
646 source_sg = ic->journal_scatterlist;
647 target_sg = ic->journal_io_scatterlist;
649 source_sg = ic->journal_io_scatterlist;
650 target_sg = ic->journal_scatterlist;
654 struct skcipher_request *req;
659 rw_section_mac(ic, section, true);
661 req = ic->sk_requests[section];
662 ivsize = crypto_skcipher_ivsize(ic->journal_crypt);
665 memcpy(iv, iv + ivsize, ivsize);
667 req->src = source_sg[section];
668 req->dst = target_sg[section];
670 if (unlikely(do_crypt(encrypt, req, comp)))
671 atomic_inc(&comp->in_flight);
675 } while (n_sections);
677 atomic_dec(&comp->in_flight);
678 complete_journal_op(comp);
681 static void encrypt_journal(struct dm_integrity_c *ic, bool encrypt, unsigned section,
682 unsigned n_sections, struct journal_completion *comp)
685 return xor_journal(ic, encrypt, section, n_sections, comp);
687 return crypt_journal(ic, encrypt, section, n_sections, comp);
690 static void complete_journal_io(unsigned long error, void *context)
692 struct journal_completion *comp = context;
693 if (unlikely(error != 0))
694 dm_integrity_io_error(comp->ic, "writing journal", -EIO);
695 complete_journal_op(comp);
698 static void rw_journal(struct dm_integrity_c *ic, int op, int op_flags, unsigned section,
699 unsigned n_sections, struct journal_completion *comp)
701 struct dm_io_request io_req;
702 struct dm_io_region io_loc;
703 unsigned sector, n_sectors, pl_index, pl_offset;
706 if (unlikely(dm_integrity_failed(ic))) {
708 complete_journal_io(-1UL, comp);
712 sector = section * ic->journal_section_sectors;
713 n_sectors = n_sections * ic->journal_section_sectors;
715 pl_index = sector >> (PAGE_SHIFT - SECTOR_SHIFT);
716 pl_offset = (sector << SECTOR_SHIFT) & (PAGE_SIZE - 1);
719 io_req.bi_op_flags = op_flags;
720 io_req.mem.type = DM_IO_PAGE_LIST;
722 io_req.mem.ptr.pl = &ic->journal_io[pl_index];
724 io_req.mem.ptr.pl = &ic->journal[pl_index];
725 io_req.mem.offset = pl_offset;
726 if (likely(comp != NULL)) {
727 io_req.notify.fn = complete_journal_io;
728 io_req.notify.context = comp;
730 io_req.notify.fn = NULL;
732 io_req.client = ic->io;
733 io_loc.bdev = ic->dev->bdev;
734 io_loc.sector = ic->start + SB_SECTORS + sector;
735 io_loc.count = n_sectors;
737 r = dm_io(&io_req, 1, &io_loc, NULL);
739 dm_integrity_io_error(ic, op == REQ_OP_READ ? "reading journal" : "writing journal", r);
741 WARN_ONCE(1, "asynchronous dm_io failed: %d", r);
742 complete_journal_io(-1UL, comp);
747 static void write_journal(struct dm_integrity_c *ic, unsigned commit_start, unsigned commit_sections)
749 struct journal_completion io_comp;
750 struct journal_completion crypt_comp_1;
751 struct journal_completion crypt_comp_2;
755 io_comp.comp = COMPLETION_INITIALIZER_ONSTACK(io_comp.comp);
757 if (commit_start + commit_sections <= ic->journal_sections) {
758 io_comp.in_flight = (atomic_t)ATOMIC_INIT(1);
759 if (ic->journal_io) {
760 crypt_comp_1.ic = ic;
761 crypt_comp_1.comp = COMPLETION_INITIALIZER_ONSTACK(crypt_comp_1.comp);
762 crypt_comp_1.in_flight = (atomic_t)ATOMIC_INIT(0);
763 encrypt_journal(ic, true, commit_start, commit_sections, &crypt_comp_1);
764 wait_for_completion_io(&crypt_comp_1.comp);
766 for (i = 0; i < commit_sections; i++)
767 rw_section_mac(ic, commit_start + i, true);
769 rw_journal(ic, REQ_OP_WRITE, REQ_FUA, commit_start, commit_sections, &io_comp);
772 io_comp.in_flight = (atomic_t)ATOMIC_INIT(2);
773 to_end = ic->journal_sections - commit_start;
774 if (ic->journal_io) {
775 crypt_comp_1.ic = ic;
776 crypt_comp_1.comp = COMPLETION_INITIALIZER_ONSTACK(crypt_comp_1.comp);
777 crypt_comp_1.in_flight = (atomic_t)ATOMIC_INIT(0);
778 encrypt_journal(ic, true, commit_start, to_end, &crypt_comp_1);
779 if (try_wait_for_completion(&crypt_comp_1.comp)) {
780 rw_journal(ic, REQ_OP_WRITE, REQ_FUA, commit_start, to_end, &io_comp);
781 crypt_comp_1.comp = COMPLETION_INITIALIZER_ONSTACK(crypt_comp_1.comp);
782 crypt_comp_1.in_flight = (atomic_t)ATOMIC_INIT(0);
783 encrypt_journal(ic, true, 0, commit_sections - to_end, &crypt_comp_1);
784 wait_for_completion_io(&crypt_comp_1.comp);
786 crypt_comp_2.ic = ic;
787 crypt_comp_2.comp = COMPLETION_INITIALIZER_ONSTACK(crypt_comp_2.comp);
788 crypt_comp_2.in_flight = (atomic_t)ATOMIC_INIT(0);
789 encrypt_journal(ic, true, 0, commit_sections - to_end, &crypt_comp_2);
790 wait_for_completion_io(&crypt_comp_1.comp);
791 rw_journal(ic, REQ_OP_WRITE, REQ_FUA, commit_start, to_end, &io_comp);
792 wait_for_completion_io(&crypt_comp_2.comp);
795 for (i = 0; i < to_end; i++)
796 rw_section_mac(ic, commit_start + i, true);
797 rw_journal(ic, REQ_OP_WRITE, REQ_FUA, commit_start, to_end, &io_comp);
798 for (i = 0; i < commit_sections - to_end; i++)
799 rw_section_mac(ic, i, true);
801 rw_journal(ic, REQ_OP_WRITE, REQ_FUA, 0, commit_sections - to_end, &io_comp);
804 wait_for_completion_io(&io_comp.comp);
807 static void copy_from_journal(struct dm_integrity_c *ic, unsigned section, unsigned offset,
808 unsigned n_sectors, sector_t target, io_notify_fn fn, void *data)
810 struct dm_io_request io_req;
811 struct dm_io_region io_loc;
813 unsigned sector, pl_index, pl_offset;
815 if (unlikely(dm_integrity_failed(ic))) {
820 sector = section * ic->journal_section_sectors + JOURNAL_BLOCK_SECTORS + offset;
822 pl_index = sector >> (PAGE_SHIFT - SECTOR_SHIFT);
823 pl_offset = (sector << SECTOR_SHIFT) & (PAGE_SIZE - 1);
825 io_req.bi_op = REQ_OP_WRITE;
826 io_req.bi_op_flags = 0;
827 io_req.mem.type = DM_IO_PAGE_LIST;
828 io_req.mem.ptr.pl = &ic->journal[pl_index];
829 io_req.mem.offset = pl_offset;
830 io_req.notify.fn = fn;
831 io_req.notify.context = data;
832 io_req.client = ic->io;
833 io_loc.bdev = ic->dev->bdev;
834 io_loc.sector = ic->start + target;
835 io_loc.count = n_sectors;
837 r = dm_io(&io_req, 1, &io_loc, NULL);
839 WARN_ONCE(1, "asynchronous dm_io failed: %d", r);
844 static bool add_new_range(struct dm_integrity_c *ic, struct dm_integrity_range *new_range)
846 struct rb_node **n = &ic->in_progress.rb_node;
847 struct rb_node *parent;
852 struct dm_integrity_range *range = container_of(*n, struct dm_integrity_range, node);
855 if (new_range->logical_sector + new_range->n_sectors <= range->logical_sector) {
856 n = &range->node.rb_left;
857 } else if (new_range->logical_sector >= range->logical_sector + range->n_sectors) {
858 n = &range->node.rb_right;
864 rb_link_node(&new_range->node, parent, n);
865 rb_insert_color(&new_range->node, &ic->in_progress);
870 static void remove_range_unlocked(struct dm_integrity_c *ic, struct dm_integrity_range *range)
872 rb_erase(&range->node, &ic->in_progress);
873 wake_up_locked(&ic->endio_wait);
876 static void remove_range(struct dm_integrity_c *ic, struct dm_integrity_range *range)
880 spin_lock_irqsave(&ic->endio_wait.lock, flags);
881 remove_range_unlocked(ic, range);
882 spin_unlock_irqrestore(&ic->endio_wait.lock, flags);
885 static void init_journal_node(struct journal_node *node)
887 RB_CLEAR_NODE(&node->node);
888 node->sector = (sector_t)-1;
891 static void add_journal_node(struct dm_integrity_c *ic, struct journal_node *node, sector_t sector)
893 struct rb_node **link;
894 struct rb_node *parent;
896 node->sector = sector;
897 BUG_ON(!RB_EMPTY_NODE(&node->node));
899 link = &ic->journal_tree_root.rb_node;
903 struct journal_node *j;
905 j = container_of(parent, struct journal_node, node);
906 if (sector < j->sector)
907 link = &j->node.rb_left;
909 link = &j->node.rb_right;
912 rb_link_node(&node->node, parent, link);
913 rb_insert_color(&node->node, &ic->journal_tree_root);
916 static void remove_journal_node(struct dm_integrity_c *ic, struct journal_node *node)
918 BUG_ON(RB_EMPTY_NODE(&node->node));
919 rb_erase(&node->node, &ic->journal_tree_root);
920 init_journal_node(node);
923 #define NOT_FOUND (-1U)
925 static unsigned find_journal_node(struct dm_integrity_c *ic, sector_t sector, sector_t *next_sector)
927 struct rb_node *n = ic->journal_tree_root.rb_node;
928 unsigned found = NOT_FOUND;
929 *next_sector = (sector_t)-1;
931 struct journal_node *j = container_of(n, struct journal_node, node);
932 if (sector == j->sector) {
933 found = j - ic->journal_tree;
935 if (sector < j->sector) {
936 *next_sector = j->sector;
939 n = j->node.rb_right;
946 static bool test_journal_node(struct dm_integrity_c *ic, unsigned pos, sector_t sector)
948 struct journal_node *node, *next_node;
949 struct rb_node *next;
951 if (unlikely(pos >= ic->journal_entries))
953 node = &ic->journal_tree[pos];
954 if (unlikely(RB_EMPTY_NODE(&node->node)))
956 if (unlikely(node->sector != sector))
959 next = rb_next(&node->node);
963 next_node = container_of(next, struct journal_node, node);
964 return next_node->sector != sector;
967 static bool find_newer_committed_node(struct dm_integrity_c *ic, struct journal_node *node)
969 struct rb_node *next;
970 struct journal_node *next_node;
971 unsigned next_section;
973 BUG_ON(RB_EMPTY_NODE(&node->node));
975 next = rb_next(&node->node);
979 next_node = container_of(next, struct journal_node, node);
981 if (next_node->sector != node->sector)
984 next_section = (unsigned)(next_node - ic->journal_tree) / ic->journal_section_entries;
985 if (next_section >= ic->committed_section &&
986 next_section < ic->committed_section + ic->n_committed_sections)
988 if (next_section + ic->journal_sections < ic->committed_section + ic->n_committed_sections)
998 static int dm_integrity_rw_tag(struct dm_integrity_c *ic, unsigned char *tag, sector_t *metadata_block,
999 unsigned *metadata_offset, unsigned total_size, int op)
1002 unsigned char *data, *dp;
1003 struct dm_buffer *b;
1007 r = dm_integrity_failed(ic);
1011 data = dm_bufio_read(ic->bufio, *metadata_block, &b);
1012 if (unlikely(IS_ERR(data)))
1013 return PTR_ERR(data);
1015 to_copy = min((1U << SECTOR_SHIFT << ic->log2_buffer_sectors) - *metadata_offset, total_size);
1016 dp = data + *metadata_offset;
1017 if (op == TAG_READ) {
1018 memcpy(tag, dp, to_copy);
1019 } else if (op == TAG_WRITE) {
1020 memcpy(dp, tag, to_copy);
1021 dm_bufio_mark_buffer_dirty(b);
1023 /* e.g.: op == TAG_CMP */
1024 if (unlikely(memcmp(dp, tag, to_copy))) {
1027 for (i = 0; i < to_copy; i++) {
1028 if (dp[i] != tag[i])
1032 dm_bufio_release(b);
1036 dm_bufio_release(b);
1039 *metadata_offset += to_copy;
1040 if (unlikely(*metadata_offset == 1U << SECTOR_SHIFT << ic->log2_buffer_sectors)) {
1041 (*metadata_block)++;
1042 *metadata_offset = 0;
1044 total_size -= to_copy;
1045 } while (unlikely(total_size));
1050 static void dm_integrity_flush_buffers(struct dm_integrity_c *ic)
1053 r = dm_bufio_write_dirty_buffers(ic->bufio);
1055 dm_integrity_io_error(ic, "writing tags", r);
1058 static void sleep_on_endio_wait(struct dm_integrity_c *ic)
1060 DECLARE_WAITQUEUE(wait, current);
1061 __add_wait_queue(&ic->endio_wait, &wait);
1062 __set_current_state(TASK_UNINTERRUPTIBLE);
1063 spin_unlock_irq(&ic->endio_wait.lock);
1065 spin_lock_irq(&ic->endio_wait.lock);
1066 __remove_wait_queue(&ic->endio_wait, &wait);
1069 static void autocommit_fn(unsigned long data)
1071 struct dm_integrity_c *ic = (struct dm_integrity_c *)data;
1073 if (likely(!dm_integrity_failed(ic)))
1074 queue_work(ic->commit_wq, &ic->commit_work);
1077 static void schedule_autocommit(struct dm_integrity_c *ic)
1079 if (!timer_pending(&ic->autocommit_timer))
1080 mod_timer(&ic->autocommit_timer, jiffies + ic->autocommit_jiffies);
1083 static void submit_flush_bio(struct dm_integrity_c *ic, struct dm_integrity_io *dio)
1086 spin_lock_irq(&ic->endio_wait.lock);
1087 bio = dm_bio_from_per_bio_data(dio, sizeof(struct dm_integrity_io));
1088 bio_list_add(&ic->flush_bio_list, bio);
1089 spin_unlock_irq(&ic->endio_wait.lock);
1090 queue_work(ic->commit_wq, &ic->commit_work);
1093 static void do_endio(struct dm_integrity_c *ic, struct bio *bio)
1095 int r = dm_integrity_failed(ic);
1096 if (unlikely(r) && !bio->bi_error)
1101 static void do_endio_flush(struct dm_integrity_c *ic, struct dm_integrity_io *dio)
1103 struct bio *bio = dm_bio_from_per_bio_data(dio, sizeof(struct dm_integrity_io));
1105 if (unlikely(dio->fua) && likely(!bio->bi_error) && likely(!dm_integrity_failed(ic)))
1106 submit_flush_bio(ic, dio);
1111 static void dec_in_flight(struct dm_integrity_io *dio)
1113 if (atomic_dec_and_test(&dio->in_flight)) {
1114 struct dm_integrity_c *ic = dio->ic;
1117 remove_range(ic, &dio->range);
1119 if (unlikely(dio->write))
1120 schedule_autocommit(ic);
1122 bio = dm_bio_from_per_bio_data(dio, sizeof(struct dm_integrity_io));
1124 if (unlikely(dio->bi_error) && !bio->bi_error)
1125 bio->bi_error = dio->bi_error;
1126 if (likely(!bio->bi_error) && unlikely(bio_sectors(bio) != dio->range.n_sectors)) {
1127 dio->range.logical_sector += dio->range.n_sectors;
1128 bio_advance(bio, dio->range.n_sectors << SECTOR_SHIFT);
1129 INIT_WORK(&dio->work, integrity_bio_wait);
1130 queue_work(ic->wait_wq, &dio->work);
1133 do_endio_flush(ic, dio);
1137 static void integrity_end_io(struct bio *bio)
1139 struct dm_integrity_io *dio = dm_per_bio_data(bio, sizeof(struct dm_integrity_io));
1141 bio->bi_iter = dio->orig_bi_iter;
1142 bio->bi_bdev = dio->orig_bi_bdev;
1143 if (dio->orig_bi_integrity) {
1144 bio->bi_integrity = dio->orig_bi_integrity;
1145 bio->bi_opf |= REQ_INTEGRITY;
1147 bio->bi_end_io = dio->orig_bi_end_io;
1149 if (dio->completion)
1150 complete(dio->completion);
1155 static void integrity_sector_checksum(struct dm_integrity_c *ic, sector_t sector,
1156 const char *data, char *result)
1158 __u64 sector_le = cpu_to_le64(sector);
1159 SHASH_DESC_ON_STACK(req, ic->internal_hash);
1161 unsigned digest_size;
1163 req->tfm = ic->internal_hash;
1166 r = crypto_shash_init(req);
1167 if (unlikely(r < 0)) {
1168 dm_integrity_io_error(ic, "crypto_shash_init", r);
1172 r = crypto_shash_update(req, (const __u8 *)§or_le, sizeof sector_le);
1173 if (unlikely(r < 0)) {
1174 dm_integrity_io_error(ic, "crypto_shash_update", r);
1178 r = crypto_shash_update(req, data, 1 << SECTOR_SHIFT);
1179 if (unlikely(r < 0)) {
1180 dm_integrity_io_error(ic, "crypto_shash_update", r);
1184 r = crypto_shash_final(req, result);
1185 if (unlikely(r < 0)) {
1186 dm_integrity_io_error(ic, "crypto_shash_final", r);
1190 digest_size = crypto_shash_digestsize(ic->internal_hash);
1191 if (unlikely(digest_size < ic->tag_size))
1192 memset(result + digest_size, 0, ic->tag_size - digest_size);
1197 /* this shouldn't happen anyway, the hash functions have no reason to fail */
1198 get_random_bytes(result, ic->tag_size);
1201 static void integrity_metadata(struct work_struct *w)
1203 struct dm_integrity_io *dio = container_of(w, struct dm_integrity_io, work);
1204 struct dm_integrity_c *ic = dio->ic;
1208 if (ic->internal_hash) {
1209 struct bvec_iter iter;
1211 unsigned digest_size = crypto_shash_digestsize(ic->internal_hash);
1212 struct bio *bio = dm_bio_from_per_bio_data(dio, sizeof(struct dm_integrity_io));
1214 unsigned extra_space = unlikely(digest_size > ic->tag_size) ? digest_size - ic->tag_size : 0;
1215 char checksums_onstack[ic->tag_size + extra_space];
1216 unsigned sectors_to_process = dio->range.n_sectors;
1217 sector_t sector = dio->range.logical_sector;
1219 if (unlikely(ic->mode == 'R'))
1222 checksums = kmalloc((PAGE_SIZE >> SECTOR_SHIFT) * ic->tag_size + extra_space,
1223 GFP_NOIO | __GFP_NORETRY | __GFP_NOWARN);
1225 checksums = checksums_onstack;
1227 __bio_for_each_segment(bv, bio, iter, dio->orig_bi_iter) {
1229 char *mem, *checksums_ptr;
1232 mem = (char *)kmap_atomic(bv.bv_page) + bv.bv_offset;
1234 checksums_ptr = checksums;
1236 integrity_sector_checksum(ic, sector, mem + pos, checksums_ptr);
1237 checksums_ptr += ic->tag_size;
1238 sectors_to_process--;
1239 pos += 1 << SECTOR_SHIFT;
1241 } while (pos < bv.bv_len && sectors_to_process && checksums != checksums_onstack);
1244 r = dm_integrity_rw_tag(ic, checksums, &dio->metadata_block, &dio->metadata_offset,
1245 checksums_ptr - checksums, !dio->write ? TAG_CMP : TAG_WRITE);
1248 DMERR("Checksum failed at sector 0x%llx",
1249 (unsigned long long)(sector - ((r + ic->tag_size - 1) / ic->tag_size)));
1252 if (likely(checksums != checksums_onstack))
1257 if (!sectors_to_process)
1260 if (unlikely(pos < bv.bv_len)) {
1261 bv.bv_offset += pos;
1267 if (likely(checksums != checksums_onstack))
1270 struct bio_integrity_payload *bip = dio->orig_bi_integrity;
1274 struct bvec_iter iter;
1275 unsigned data_to_process = dio->range.n_sectors * ic->tag_size;
1277 bip_for_each_vec(biv, bip, iter) {
1281 BUG_ON(PageHighMem(biv.bv_page));
1282 tag = lowmem_page_address(biv.bv_page) + biv.bv_offset;
1283 this_len = min(biv.bv_len, data_to_process);
1284 r = dm_integrity_rw_tag(ic, tag, &dio->metadata_block, &dio->metadata_offset,
1285 this_len, !dio->write ? TAG_READ : TAG_WRITE);
1288 data_to_process -= this_len;
1289 if (!data_to_process)
1302 static int dm_integrity_map(struct dm_target *ti, struct bio *bio)
1304 struct dm_integrity_c *ic = ti->private;
1305 struct dm_integrity_io *dio = dm_per_bio_data(bio, sizeof(struct dm_integrity_io));
1307 sector_t area, offset;
1312 if (unlikely(bio->bi_opf & REQ_PREFLUSH)) {
1313 submit_flush_bio(ic, dio);
1314 return DM_MAPIO_SUBMITTED;
1317 dio->range.logical_sector = dm_target_offset(ti, bio->bi_iter.bi_sector);
1318 dio->write = bio_op(bio) == REQ_OP_WRITE;
1319 dio->fua = dio->write && bio->bi_opf & REQ_FUA;
1320 if (unlikely(dio->fua)) {
1322 * Don't pass down the FUA flag because we have to flush
1323 * disk cache anyway.
1325 bio->bi_opf &= ~REQ_FUA;
1327 if (unlikely(dio->range.logical_sector + bio_sectors(bio) > ic->provided_data_sectors)) {
1328 DMERR("Too big sector number: 0x%llx + 0x%x > 0x%llx",
1329 (unsigned long long)dio->range.logical_sector, bio_sectors(bio),
1330 (unsigned long long)ic->provided_data_sectors);
1334 if (unlikely(ic->mode == 'R') && unlikely(dio->write))
1337 get_area_and_offset(ic, dio->range.logical_sector, &area, &offset);
1338 dio->metadata_block = get_metadata_sector_and_offset(ic, area, offset, &dio->metadata_offset);
1339 bio->bi_iter.bi_sector = get_data_sector(ic, area, offset);
1341 dm_integrity_map_continue(dio, true);
1342 return DM_MAPIO_SUBMITTED;
1345 static bool __journal_read_write(struct dm_integrity_io *dio, struct bio *bio,
1346 unsigned journal_section, unsigned journal_entry)
1348 struct dm_integrity_c *ic = dio->ic;
1349 sector_t logical_sector;
1352 logical_sector = dio->range.logical_sector;
1353 n_sectors = dio->range.n_sectors;
1355 struct bio_vec bv = bio_iovec(bio);
1358 if (unlikely(bv.bv_len >> SECTOR_SHIFT > n_sectors))
1359 bv.bv_len = n_sectors << SECTOR_SHIFT;
1360 n_sectors -= bv.bv_len >> SECTOR_SHIFT;
1361 bio_advance_iter(bio, &bio->bi_iter, bv.bv_len);
1363 mem = kmap_atomic(bv.bv_page);
1364 if (likely(dio->write))
1365 flush_dcache_page(bv.bv_page);
1368 struct journal_entry *je = access_journal_entry(ic, journal_section, journal_entry);
1370 if (unlikely(!dio->write)) {
1371 struct journal_sector *js;
1373 if (unlikely(journal_entry_is_inprogress(je))) {
1374 flush_dcache_page(bv.bv_page);
1377 __io_wait_event(ic->copy_to_journal_wait, !journal_entry_is_inprogress(je));
1381 BUG_ON(journal_entry_get_sector(je) != logical_sector);
1382 js = access_journal_data(ic, journal_section, journal_entry);
1383 memcpy(mem + bv.bv_offset, js, JOURNAL_SECTOR_DATA);
1384 memcpy(mem + bv.bv_offset + JOURNAL_SECTOR_DATA, &je->last_bytes, sizeof je->last_bytes);
1385 #ifdef INTERNAL_VERIFY
1386 if (ic->internal_hash) {
1387 char checksums_onstack[max(crypto_shash_digestsize(ic->internal_hash), ic->tag_size)];
1389 integrity_sector_checksum(ic, logical_sector, mem + bv.bv_offset, checksums_onstack);
1390 if (unlikely(memcmp(checksums_onstack, je->tag, ic->tag_size))) {
1391 DMERR("Checksum failed when reading from journal, at sector 0x%llx",
1392 (unsigned long long)logical_sector);
1398 if (!ic->internal_hash) {
1399 struct bio_integrity_payload *bip = bio_integrity(bio);
1400 unsigned tag_todo = ic->tag_size;
1401 char *tag_ptr = je->tag;
1404 struct bio_vec biv = bvec_iter_bvec(bip->bip_vec, bip->bip_iter);
1405 unsigned tag_now = min(biv.bv_len, tag_todo);
1407 BUG_ON(PageHighMem(biv.bv_page));
1408 tag_addr = lowmem_page_address(biv.bv_page) + biv.bv_offset;
1409 if (likely(dio->write))
1410 memcpy(tag_ptr, tag_addr, tag_now);
1412 memcpy(tag_addr, tag_ptr, tag_now);
1413 bvec_iter_advance(bip->bip_vec, &bip->bip_iter, tag_now);
1415 tag_todo -= tag_now;
1416 } while (unlikely(tag_todo)); else {
1417 if (likely(dio->write))
1418 memset(tag_ptr, 0, tag_todo);
1422 if (likely(dio->write)) {
1423 struct journal_sector *js;
1425 js = access_journal_data(ic, journal_section, journal_entry);
1426 memcpy(js, mem + bv.bv_offset, 1 << SECTOR_SHIFT);
1427 je->last_bytes = js->commit_id;
1429 if (ic->internal_hash) {
1430 unsigned digest_size = crypto_shash_digestsize(ic->internal_hash);
1431 if (unlikely(digest_size > ic->tag_size)) {
1432 char checksums_onstack[digest_size];
1433 integrity_sector_checksum(ic, logical_sector, (char *)js, checksums_onstack);
1434 memcpy(je->tag, checksums_onstack, ic->tag_size);
1436 integrity_sector_checksum(ic, logical_sector, (char *)js, je->tag);
1439 journal_entry_set_sector(je, logical_sector);
1444 if (unlikely(journal_entry == ic->journal_section_entries)) {
1447 wraparound_section(ic, &journal_section);
1450 bv.bv_offset += 1 << SECTOR_SHIFT;
1451 } while (bv.bv_len -= 1 << SECTOR_SHIFT);
1453 if (unlikely(!dio->write))
1454 flush_dcache_page(bv.bv_page);
1456 } while (n_sectors);
1458 if (likely(dio->write)) {
1460 if (unlikely(waitqueue_active(&ic->copy_to_journal_wait)))
1461 wake_up(&ic->copy_to_journal_wait);
1462 if (ACCESS_ONCE(ic->free_sectors) <= ic->free_sectors_threshold) {
1463 queue_work(ic->commit_wq, &ic->commit_work);
1465 schedule_autocommit(ic);
1468 remove_range(ic, &dio->range);
1471 if (unlikely(bio->bi_iter.bi_size)) {
1472 sector_t area, offset;
1474 dio->range.logical_sector = logical_sector;
1475 get_area_and_offset(ic, dio->range.logical_sector, &area, &offset);
1476 dio->metadata_block = get_metadata_sector_and_offset(ic, area, offset, &dio->metadata_offset);
1483 static void dm_integrity_map_continue(struct dm_integrity_io *dio, bool from_map)
1485 struct dm_integrity_c *ic = dio->ic;
1486 struct bio *bio = dm_bio_from_per_bio_data(dio, sizeof(struct dm_integrity_io));
1487 unsigned journal_section, journal_entry;
1488 unsigned journal_read_pos;
1489 struct completion read_comp;
1490 bool need_sync_io = ic->internal_hash && !dio->write;
1492 if (need_sync_io && from_map) {
1493 INIT_WORK(&dio->work, integrity_bio_wait);
1494 queue_work(ic->metadata_wq, &dio->work);
1499 spin_lock_irq(&ic->endio_wait.lock);
1501 if (unlikely(dm_integrity_failed(ic))) {
1502 spin_unlock_irq(&ic->endio_wait.lock);
1506 dio->range.n_sectors = bio_sectors(bio);
1507 journal_read_pos = NOT_FOUND;
1508 if (likely(ic->mode == 'J')) {
1510 unsigned next_entry, i, pos;
1513 dio->range.n_sectors = min(dio->range.n_sectors, ic->free_sectors);
1514 if (unlikely(!dio->range.n_sectors))
1516 ic->free_sectors -= dio->range.n_sectors;
1517 journal_section = ic->free_section;
1518 journal_entry = ic->free_section_entry;
1520 next_entry = ic->free_section_entry + dio->range.n_sectors;
1521 ic->free_section_entry = next_entry % ic->journal_section_entries;
1522 ic->free_section += next_entry / ic->journal_section_entries;
1523 ic->n_uncommitted_sections += next_entry / ic->journal_section_entries;
1524 wraparound_section(ic, &ic->free_section);
1526 pos = journal_section * ic->journal_section_entries + journal_entry;
1527 ws = journal_section;
1529 for (i = 0; i < dio->range.n_sectors; i++) {
1530 struct journal_entry *je;
1532 add_journal_node(ic, &ic->journal_tree[pos], dio->range.logical_sector + i);
1534 if (unlikely(pos >= ic->journal_entries))
1537 je = access_journal_entry(ic, ws, we);
1538 BUG_ON(!journal_entry_is_unused(je));
1539 journal_entry_set_inprogress(je);
1541 if (unlikely(we == ic->journal_section_entries)) {
1544 wraparound_section(ic, &ws);
1548 spin_unlock_irq(&ic->endio_wait.lock);
1549 goto journal_read_write;
1551 sector_t next_sector;
1552 journal_read_pos = find_journal_node(ic, dio->range.logical_sector, &next_sector);
1553 if (likely(journal_read_pos == NOT_FOUND)) {
1554 if (unlikely(dio->range.n_sectors > next_sector - dio->range.logical_sector))
1555 dio->range.n_sectors = next_sector - dio->range.logical_sector;
1558 for (i = 1; i < dio->range.n_sectors; i++) {
1559 if (!test_journal_node(ic, journal_read_pos + i, dio->range.logical_sector + i))
1562 dio->range.n_sectors = i;
1566 if (unlikely(!add_new_range(ic, &dio->range))) {
1568 * We must not sleep in the request routine because it could
1569 * stall bios on current->bio_list.
1570 * So, we offload the bio to a workqueue if we have to sleep.
1574 spin_unlock_irq(&ic->endio_wait.lock);
1575 INIT_WORK(&dio->work, integrity_bio_wait);
1576 queue_work(ic->wait_wq, &dio->work);
1579 sleep_on_endio_wait(ic);
1583 spin_unlock_irq(&ic->endio_wait.lock);
1585 if (unlikely(journal_read_pos != NOT_FOUND)) {
1586 journal_section = journal_read_pos / ic->journal_section_entries;
1587 journal_entry = journal_read_pos % ic->journal_section_entries;
1588 goto journal_read_write;
1591 dio->in_flight = (atomic_t)ATOMIC_INIT(2);
1594 read_comp = COMPLETION_INITIALIZER_ONSTACK(read_comp);
1595 dio->completion = &read_comp;
1597 dio->completion = NULL;
1599 dio->orig_bi_iter = bio->bi_iter;
1601 dio->orig_bi_bdev = bio->bi_bdev;
1602 bio->bi_bdev = ic->dev->bdev;
1604 dio->orig_bi_integrity = bio_integrity(bio);
1605 bio->bi_integrity = NULL;
1606 bio->bi_opf &= ~REQ_INTEGRITY;
1608 dio->orig_bi_end_io = bio->bi_end_io;
1609 bio->bi_end_io = integrity_end_io;
1611 bio->bi_iter.bi_size = dio->range.n_sectors << SECTOR_SHIFT;
1612 bio->bi_iter.bi_sector += ic->start;
1613 generic_make_request(bio);
1616 wait_for_completion_io(&read_comp);
1617 integrity_metadata(&dio->work);
1619 INIT_WORK(&dio->work, integrity_metadata);
1620 queue_work(ic->metadata_wq, &dio->work);
1626 if (unlikely(__journal_read_write(dio, bio, journal_section, journal_entry)))
1629 do_endio_flush(ic, dio);
1633 static void integrity_bio_wait(struct work_struct *w)
1635 struct dm_integrity_io *dio = container_of(w, struct dm_integrity_io, work);
1637 dm_integrity_map_continue(dio, false);
1640 static void pad_uncommitted(struct dm_integrity_c *ic)
1642 if (ic->free_section_entry) {
1643 ic->free_sectors -= ic->journal_section_entries - ic->free_section_entry;
1644 ic->free_section_entry = 0;
1646 wraparound_section(ic, &ic->free_section);
1647 ic->n_uncommitted_sections++;
1651 static void integrity_commit(struct work_struct *w)
1653 struct dm_integrity_c *ic = container_of(w, struct dm_integrity_c, commit_work);
1654 unsigned commit_start, commit_sections;
1656 struct bio *flushes;
1658 del_timer(&ic->autocommit_timer);
1660 spin_lock_irq(&ic->endio_wait.lock);
1661 flushes = bio_list_get(&ic->flush_bio_list);
1662 if (unlikely(ic->mode != 'J')) {
1663 spin_unlock_irq(&ic->endio_wait.lock);
1664 dm_integrity_flush_buffers(ic);
1665 goto release_flush_bios;
1668 pad_uncommitted(ic);
1669 commit_start = ic->uncommitted_section;
1670 commit_sections = ic->n_uncommitted_sections;
1671 spin_unlock_irq(&ic->endio_wait.lock);
1673 if (!commit_sections)
1674 goto release_flush_bios;
1677 for (n = 0; n < commit_sections; n++) {
1678 for (j = 0; j < ic->journal_section_entries; j++) {
1679 struct journal_entry *je;
1680 je = access_journal_entry(ic, i, j);
1681 io_wait_event(ic->copy_to_journal_wait, !journal_entry_is_inprogress(je));
1683 for (j = 0; j < ic->journal_section_sectors; j++) {
1684 struct journal_sector *js;
1685 js = access_journal(ic, i, j);
1686 js->commit_id = dm_integrity_commit_id(ic, i, j, ic->commit_seq);
1689 if (unlikely(i >= ic->journal_sections))
1690 ic->commit_seq = next_commit_seq(ic->commit_seq);
1691 wraparound_section(ic, &i);
1695 write_journal(ic, commit_start, commit_sections);
1697 spin_lock_irq(&ic->endio_wait.lock);
1698 ic->uncommitted_section += commit_sections;
1699 wraparound_section(ic, &ic->uncommitted_section);
1700 ic->n_uncommitted_sections -= commit_sections;
1701 ic->n_committed_sections += commit_sections;
1702 spin_unlock_irq(&ic->endio_wait.lock);
1704 if (ACCESS_ONCE(ic->free_sectors) <= ic->free_sectors_threshold)
1705 queue_work(ic->writer_wq, &ic->writer_work);
1709 struct bio *next = flushes->bi_next;
1710 flushes->bi_next = NULL;
1711 do_endio(ic, flushes);
1716 static void complete_copy_from_journal(unsigned long error, void *context)
1718 struct journal_io *io = context;
1719 struct journal_completion *comp = io->comp;
1720 struct dm_integrity_c *ic = comp->ic;
1721 remove_range(ic, &io->range);
1722 mempool_free(io, ic->journal_io_mempool);
1723 if (unlikely(error != 0))
1724 dm_integrity_io_error(ic, "copying from journal", -EIO);
1725 complete_journal_op(comp);
1728 static void do_journal_write(struct dm_integrity_c *ic, unsigned write_start,
1729 unsigned write_sections, bool from_replay)
1732 struct journal_completion comp;
1735 comp.in_flight = (atomic_t)ATOMIC_INIT(1);
1736 comp.comp = COMPLETION_INITIALIZER_ONSTACK(comp.comp);
1739 for (n = 0; n < write_sections; n++, i++, wraparound_section(ic, &i)) {
1740 #ifndef INTERNAL_VERIFY
1741 if (unlikely(from_replay))
1743 rw_section_mac(ic, i, false);
1744 for (j = 0; j < ic->journal_section_entries; j++) {
1745 struct journal_entry *je = access_journal_entry(ic, i, j);
1746 sector_t sec, area, offset;
1747 unsigned k, l, next_loop;
1748 sector_t metadata_block;
1749 unsigned metadata_offset;
1750 struct journal_io *io;
1752 if (journal_entry_is_unused(je))
1754 BUG_ON(unlikely(journal_entry_is_inprogress(je)) && !from_replay);
1755 sec = journal_entry_get_sector(je);
1756 get_area_and_offset(ic, sec, &area, &offset);
1757 access_journal_data(ic, i, j)->commit_id = je->last_bytes;
1758 for (k = j + 1; k < ic->journal_section_entries; k++) {
1759 struct journal_entry *je2 = access_journal_entry(ic, i, k);
1760 sector_t sec2, area2, offset2;
1761 if (journal_entry_is_unused(je2))
1763 BUG_ON(unlikely(journal_entry_is_inprogress(je2)) && !from_replay);
1764 sec2 = journal_entry_get_sector(je2);
1765 get_area_and_offset(ic, sec2, &area2, &offset2);
1766 if (area2 != area || offset2 != offset + (k - j))
1768 access_journal_data(ic, i, k)->commit_id = je2->last_bytes;
1772 io = mempool_alloc(ic->journal_io_mempool, GFP_NOIO);
1774 io->range.logical_sector = sec;
1775 io->range.n_sectors = k - j;
1777 spin_lock_irq(&ic->endio_wait.lock);
1778 while (unlikely(!add_new_range(ic, &io->range)))
1779 sleep_on_endio_wait(ic);
1781 if (likely(!from_replay)) {
1782 struct journal_node *section_node = &ic->journal_tree[i * ic->journal_section_entries];
1784 /* don't write if there is newer committed sector */
1785 while (j < k && find_newer_committed_node(ic, §ion_node[j])) {
1786 struct journal_entry *je2 = access_journal_entry(ic, i, j);
1788 journal_entry_set_unused(je2);
1789 remove_journal_node(ic, §ion_node[j]);
1794 while (j < k && find_newer_committed_node(ic, §ion_node[k - 1])) {
1795 struct journal_entry *je2 = access_journal_entry(ic, i, k - 1);
1797 journal_entry_set_unused(je2);
1798 remove_journal_node(ic, §ion_node[k - 1]);
1802 remove_range_unlocked(ic, &io->range);
1803 spin_unlock_irq(&ic->endio_wait.lock);
1804 mempool_free(io, ic->journal_io_mempool);
1807 for (l = j; l < k; l++) {
1808 remove_journal_node(ic, §ion_node[l]);
1811 spin_unlock_irq(&ic->endio_wait.lock);
1813 metadata_block = get_metadata_sector_and_offset(ic, area, offset, &metadata_offset);
1814 for (l = j; l < k; l++) {
1816 struct journal_entry *je2 = access_journal_entry(ic, i, l);
1819 #ifndef INTERNAL_VERIFY
1820 unlikely(from_replay) &&
1822 ic->internal_hash) {
1823 char test_tag[max(crypto_shash_digestsize(ic->internal_hash), ic->tag_size)];
1825 integrity_sector_checksum(ic, sec + (l - j),
1826 (char *)access_journal_data(ic, i, l), test_tag);
1827 if (unlikely(memcmp(test_tag, je2->tag, ic->tag_size)))
1828 dm_integrity_io_error(ic, "tag mismatch when replaying journal", -EILSEQ);
1831 journal_entry_set_unused(je2);
1832 r = dm_integrity_rw_tag(ic, je2->tag, &metadata_block, &metadata_offset,
1833 ic->tag_size, TAG_WRITE);
1835 dm_integrity_io_error(ic, "reading tags", r);
1839 atomic_inc(&comp.in_flight);
1840 copy_from_journal(ic, i, j, k - j, get_data_sector(ic, area, offset),
1841 complete_copy_from_journal, io);
1847 dm_bufio_write_dirty_buffers_async(ic->bufio);
1849 complete_journal_op(&comp);
1850 wait_for_completion_io(&comp.comp);
1852 dm_integrity_flush_buffers(ic);
1855 static void integrity_writer(struct work_struct *w)
1857 struct dm_integrity_c *ic = container_of(w, struct dm_integrity_c, writer_work);
1858 unsigned write_start, write_sections;
1860 unsigned prev_free_sectors;
1862 /* the following test is not needed, but it tests the replay code */
1863 if (ACCESS_ONCE(ic->suspending))
1866 spin_lock_irq(&ic->endio_wait.lock);
1867 write_start = ic->committed_section;
1868 write_sections = ic->n_committed_sections;
1869 spin_unlock_irq(&ic->endio_wait.lock);
1871 if (!write_sections)
1874 do_journal_write(ic, write_start, write_sections, false);
1876 spin_lock_irq(&ic->endio_wait.lock);
1878 ic->committed_section += write_sections;
1879 wraparound_section(ic, &ic->committed_section);
1880 ic->n_committed_sections -= write_sections;
1882 prev_free_sectors = ic->free_sectors;
1883 ic->free_sectors += write_sections * ic->journal_section_entries;
1884 if (unlikely(!prev_free_sectors))
1885 wake_up_locked(&ic->endio_wait);
1887 spin_unlock_irq(&ic->endio_wait.lock);
1890 static void init_journal(struct dm_integrity_c *ic, unsigned start_section,
1891 unsigned n_sections, unsigned char commit_seq)
1898 for (n = 0; n < n_sections; n++) {
1899 i = start_section + n;
1900 wraparound_section(ic, &i);
1901 for (j = 0; j < ic->journal_section_sectors; j++) {
1902 struct journal_sector *js = access_journal(ic, i, j);
1903 memset(&js->entries, 0, JOURNAL_SECTOR_DATA);
1904 js->commit_id = dm_integrity_commit_id(ic, i, j, commit_seq);
1906 for (j = 0; j < ic->journal_section_entries; j++) {
1907 struct journal_entry *je = access_journal_entry(ic, i, j);
1908 journal_entry_set_unused(je);
1912 write_journal(ic, start_section, n_sections);
1915 static int find_commit_seq(struct dm_integrity_c *ic, unsigned i, unsigned j, commit_id_t id)
1918 for (k = 0; k < N_COMMIT_IDS; k++) {
1919 if (dm_integrity_commit_id(ic, i, j, k) == id)
1922 dm_integrity_io_error(ic, "journal commit id", -EIO);
1926 static void replay_journal(struct dm_integrity_c *ic)
1929 bool used_commit_ids[N_COMMIT_IDS];
1930 unsigned max_commit_id_sections[N_COMMIT_IDS];
1931 unsigned write_start, write_sections;
1932 unsigned continue_section;
1934 unsigned char unused, last_used, want_commit_seq;
1936 if (ic->mode == 'R')
1939 if (ic->journal_uptodate)
1945 if (!ic->just_formatted) {
1946 DEBUG_print("reading journal\n");
1947 rw_journal(ic, REQ_OP_READ, 0, 0, ic->journal_sections, NULL);
1949 DEBUG_bytes(lowmem_page_address(ic->journal_io[0].page), 64, "read journal");
1950 if (ic->journal_io) {
1951 struct journal_completion crypt_comp;
1953 crypt_comp.comp = COMPLETION_INITIALIZER_ONSTACK(crypt_comp.comp);
1954 crypt_comp.in_flight = (atomic_t)ATOMIC_INIT(0);
1955 encrypt_journal(ic, false, 0, ic->journal_sections, &crypt_comp);
1956 wait_for_completion(&crypt_comp.comp);
1958 DEBUG_bytes(lowmem_page_address(ic->journal[0].page), 64, "decrypted journal");
1961 if (dm_integrity_failed(ic))
1964 journal_empty = true;
1965 memset(used_commit_ids, 0, sizeof used_commit_ids);
1966 memset(max_commit_id_sections, 0, sizeof max_commit_id_sections);
1967 for (i = 0; i < ic->journal_sections; i++) {
1968 for (j = 0; j < ic->journal_section_sectors; j++) {
1970 struct journal_sector *js = access_journal(ic, i, j);
1971 k = find_commit_seq(ic, i, j, js->commit_id);
1974 used_commit_ids[k] = true;
1975 max_commit_id_sections[k] = i;
1977 if (journal_empty) {
1978 for (j = 0; j < ic->journal_section_entries; j++) {
1979 struct journal_entry *je = access_journal_entry(ic, i, j);
1980 if (!journal_entry_is_unused(je)) {
1981 journal_empty = false;
1988 if (!used_commit_ids[N_COMMIT_IDS - 1]) {
1989 unused = N_COMMIT_IDS - 1;
1990 while (unused && !used_commit_ids[unused - 1])
1993 for (unused = 0; unused < N_COMMIT_IDS; unused++)
1994 if (!used_commit_ids[unused])
1996 if (unused == N_COMMIT_IDS) {
1997 dm_integrity_io_error(ic, "journal commit ids", -EIO);
2001 DEBUG_print("first unused commit seq %d [%d,%d,%d,%d]\n",
2002 unused, used_commit_ids[0], used_commit_ids[1],
2003 used_commit_ids[2], used_commit_ids[3]);
2005 last_used = prev_commit_seq(unused);
2006 want_commit_seq = prev_commit_seq(last_used);
2008 if (!used_commit_ids[want_commit_seq] && used_commit_ids[prev_commit_seq(want_commit_seq)])
2009 journal_empty = true;
2011 write_start = max_commit_id_sections[last_used] + 1;
2012 if (unlikely(write_start >= ic->journal_sections))
2013 want_commit_seq = next_commit_seq(want_commit_seq);
2014 wraparound_section(ic, &write_start);
2017 for (write_sections = 0; write_sections < ic->journal_sections; write_sections++) {
2018 for (j = 0; j < ic->journal_section_sectors; j++) {
2019 struct journal_sector *js = access_journal(ic, i, j);
2021 if (js->commit_id != dm_integrity_commit_id(ic, i, j, want_commit_seq)) {
2023 * This could be caused by crash during writing.
2024 * We won't replay the inconsistent part of the
2027 DEBUG_print("commit id mismatch at position (%u, %u): %d != %d\n",
2028 i, j, find_commit_seq(ic, i, j, js->commit_id), want_commit_seq);
2033 if (unlikely(i >= ic->journal_sections))
2034 want_commit_seq = next_commit_seq(want_commit_seq);
2035 wraparound_section(ic, &i);
2039 if (!journal_empty) {
2040 DEBUG_print("replaying %u sections, starting at %u, commit seq %d\n",
2041 write_sections, write_start, want_commit_seq);
2042 do_journal_write(ic, write_start, write_sections, true);
2045 if (write_sections == ic->journal_sections && (ic->mode == 'J' || journal_empty)) {
2046 continue_section = write_start;
2047 ic->commit_seq = want_commit_seq;
2048 DEBUG_print("continuing from section %u, commit seq %d\n", write_start, ic->commit_seq);
2051 unsigned char erase_seq;
2053 DEBUG_print("clearing journal\n");
2055 erase_seq = prev_commit_seq(prev_commit_seq(last_used));
2057 init_journal(ic, s, 1, erase_seq);
2059 wraparound_section(ic, &s);
2060 if (ic->journal_sections >= 2) {
2061 init_journal(ic, s, ic->journal_sections - 2, erase_seq);
2062 s += ic->journal_sections - 2;
2063 wraparound_section(ic, &s);
2064 init_journal(ic, s, 1, erase_seq);
2067 continue_section = 0;
2068 ic->commit_seq = next_commit_seq(erase_seq);
2071 ic->committed_section = continue_section;
2072 ic->n_committed_sections = 0;
2074 ic->uncommitted_section = continue_section;
2075 ic->n_uncommitted_sections = 0;
2077 ic->free_section = continue_section;
2078 ic->free_section_entry = 0;
2079 ic->free_sectors = ic->journal_entries;
2081 ic->journal_tree_root = RB_ROOT;
2082 for (i = 0; i < ic->journal_entries; i++)
2083 init_journal_node(&ic->journal_tree[i]);
2086 static void dm_integrity_postsuspend(struct dm_target *ti)
2088 struct dm_integrity_c *ic = (struct dm_integrity_c *)ti->private;
2090 del_timer_sync(&ic->autocommit_timer);
2092 ic->suspending = true;
2094 queue_work(ic->commit_wq, &ic->commit_work);
2095 drain_workqueue(ic->commit_wq);
2097 if (ic->mode == 'J') {
2098 drain_workqueue(ic->writer_wq);
2099 dm_integrity_flush_buffers(ic);
2102 ic->suspending = false;
2104 BUG_ON(!RB_EMPTY_ROOT(&ic->in_progress));
2106 ic->journal_uptodate = true;
2109 static void dm_integrity_resume(struct dm_target *ti)
2111 struct dm_integrity_c *ic = (struct dm_integrity_c *)ti->private;
2116 static void dm_integrity_status(struct dm_target *ti, status_type_t type,
2117 unsigned status_flags, char *result, unsigned maxlen)
2119 struct dm_integrity_c *ic = (struct dm_integrity_c *)ti->private;
2124 case STATUSTYPE_INFO:
2128 case STATUSTYPE_TABLE: {
2129 __u64 watermark_percentage = (__u64)(ic->journal_entries - ic->free_sectors_threshold) * 100;
2130 watermark_percentage += ic->journal_entries / 2;
2131 do_div(watermark_percentage, ic->journal_entries);
2133 arg_count += !!ic->internal_hash_alg.alg_string;
2134 arg_count += !!ic->journal_crypt_alg.alg_string;
2135 arg_count += !!ic->journal_mac_alg.alg_string;
2136 DMEMIT("%s %llu %u %c %u", ic->dev->name, (unsigned long long)ic->start,
2137 ic->tag_size, ic->mode, arg_count);
2138 DMEMIT(" journal_sectors:%u", ic->initial_sectors - SB_SECTORS);
2139 DMEMIT(" interleave_sectors:%u", 1U << ic->sb->log2_interleave_sectors);
2140 DMEMIT(" buffer_sectors:%u", 1U << ic->log2_buffer_sectors);
2141 DMEMIT(" journal_watermark:%u", (unsigned)watermark_percentage);
2142 DMEMIT(" commit_time:%u", ic->autocommit_msec);
2144 #define EMIT_ALG(a, n) \
2146 if (ic->a.alg_string) { \
2147 DMEMIT(" %s:%s", n, ic->a.alg_string); \
2148 if (ic->a.key_string) \
2149 DMEMIT(":%s", ic->a.key_string);\
2152 EMIT_ALG(internal_hash_alg, "internal_hash");
2153 EMIT_ALG(journal_crypt_alg, "journal_crypt");
2154 EMIT_ALG(journal_mac_alg, "journal_mac");
2160 static int dm_integrity_iterate_devices(struct dm_target *ti,
2161 iterate_devices_callout_fn fn, void *data)
2163 struct dm_integrity_c *ic = ti->private;
2165 return fn(ti, ic->dev, ic->start + ic->initial_sectors + ic->metadata_run, ti->len, data);
2168 static void calculate_journal_section_size(struct dm_integrity_c *ic)
2170 unsigned sector_space = JOURNAL_SECTOR_DATA;
2172 ic->journal_sections = le32_to_cpu(ic->sb->journal_sections);
2173 ic->journal_entry_size = roundup(offsetof(struct journal_entry, tag) + ic->tag_size,
2174 JOURNAL_ENTRY_ROUNDUP);
2176 if (ic->sb->flags & cpu_to_le32(SB_FLAG_HAVE_JOURNAL_MAC))
2177 sector_space -= JOURNAL_MAC_PER_SECTOR;
2178 ic->journal_entries_per_sector = sector_space / ic->journal_entry_size;
2179 ic->journal_section_entries = ic->journal_entries_per_sector * JOURNAL_BLOCK_SECTORS;
2180 ic->journal_section_sectors = ic->journal_section_entries + JOURNAL_BLOCK_SECTORS;
2181 ic->journal_entries = ic->journal_section_entries * ic->journal_sections;
2184 static int calculate_device_limits(struct dm_integrity_c *ic)
2186 __u64 initial_sectors;
2187 sector_t last_sector, last_area, last_offset;
2189 calculate_journal_section_size(ic);
2190 initial_sectors = SB_SECTORS + (__u64)ic->journal_section_sectors * ic->journal_sections;
2191 if (initial_sectors + METADATA_PADDING_SECTORS >= ic->device_sectors || initial_sectors > UINT_MAX)
2193 ic->initial_sectors = initial_sectors;
2195 ic->metadata_run = roundup((__u64)ic->tag_size << ic->sb->log2_interleave_sectors,
2196 (__u64)(1 << SECTOR_SHIFT << METADATA_PADDING_SECTORS)) >> SECTOR_SHIFT;
2197 if (!(ic->metadata_run & (ic->metadata_run - 1)))
2198 ic->log2_metadata_run = __ffs(ic->metadata_run);
2200 ic->log2_metadata_run = -1;
2202 get_area_and_offset(ic, ic->provided_data_sectors - 1, &last_area, &last_offset);
2203 last_sector = get_data_sector(ic, last_area, last_offset);
2205 if (ic->start + last_sector < last_sector || ic->start + last_sector >= ic->device_sectors)
2211 static int initialize_superblock(struct dm_integrity_c *ic, unsigned journal_sectors, unsigned interleave_sectors)
2213 unsigned journal_sections;
2216 memset(ic->sb, 0, SB_SECTORS << SECTOR_SHIFT);
2217 memcpy(ic->sb->magic, SB_MAGIC, 8);
2218 ic->sb->version = SB_VERSION;
2219 ic->sb->integrity_tag_size = cpu_to_le16(ic->tag_size);
2220 if (ic->journal_mac_alg.alg_string)
2221 ic->sb->flags |= cpu_to_le32(SB_FLAG_HAVE_JOURNAL_MAC);
2223 calculate_journal_section_size(ic);
2224 journal_sections = journal_sectors / ic->journal_section_sectors;
2225 if (!journal_sections)
2226 journal_sections = 1;
2227 ic->sb->journal_sections = cpu_to_le32(journal_sections);
2229 if (!interleave_sectors)
2230 interleave_sectors = DEFAULT_INTERLEAVE_SECTORS;
2231 ic->sb->log2_interleave_sectors = __fls(interleave_sectors);
2232 ic->sb->log2_interleave_sectors = max((__u8)MIN_LOG2_INTERLEAVE_SECTORS, ic->sb->log2_interleave_sectors);
2233 ic->sb->log2_interleave_sectors = min((__u8)MAX_LOG2_INTERLEAVE_SECTORS, ic->sb->log2_interleave_sectors);
2235 ic->provided_data_sectors = 0;
2236 for (test_bit = fls64(ic->device_sectors) - 1; test_bit >= 3; test_bit--) {
2237 __u64 prev_data_sectors = ic->provided_data_sectors;
2239 ic->provided_data_sectors |= (sector_t)1 << test_bit;
2240 if (calculate_device_limits(ic))
2241 ic->provided_data_sectors = prev_data_sectors;
2244 if (!ic->provided_data_sectors)
2247 ic->sb->provided_data_sectors = cpu_to_le64(ic->provided_data_sectors);
2252 static void dm_integrity_set(struct dm_target *ti, struct dm_integrity_c *ic)
2254 struct gendisk *disk = dm_disk(dm_table_get_md(ti->table));
2255 struct blk_integrity bi;
2257 memset(&bi, 0, sizeof(bi));
2258 bi.profile = &dm_integrity_profile;
2259 bi.tuple_size = ic->tag_size * (queue_logical_block_size(disk->queue) >> SECTOR_SHIFT);
2260 bi.tag_size = ic->tag_size;
2262 blk_integrity_register(disk, &bi);
2263 blk_queue_max_integrity_segments(disk->queue, UINT_MAX);
2266 /* FIXME: use new kvmalloc */
2267 static void *dm_integrity_kvmalloc(size_t size, gfp_t gfp)
2271 if (size <= PAGE_SIZE)
2272 ptr = kmalloc(size, GFP_KERNEL | gfp);
2273 if (!ptr && size <= KMALLOC_MAX_SIZE)
2274 ptr = kmalloc(size, GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY | gfp);
2276 ptr = __vmalloc(size, GFP_KERNEL | gfp, PAGE_KERNEL);
2281 static void dm_integrity_free_page_list(struct dm_integrity_c *ic, struct page_list *pl)
2287 for (i = 0; i < ic->journal_pages; i++)
2289 __free_page(pl[i].page);
2293 static struct page_list *dm_integrity_alloc_page_list(struct dm_integrity_c *ic)
2295 size_t page_list_desc_size = ic->journal_pages * sizeof(struct page_list);
2296 struct page_list *pl;
2299 pl = dm_integrity_kvmalloc(page_list_desc_size, __GFP_ZERO);
2303 for (i = 0; i < ic->journal_pages; i++) {
2304 pl[i].page = alloc_page(GFP_KERNEL);
2306 dm_integrity_free_page_list(ic, pl);
2310 pl[i - 1].next = &pl[i];
2316 static void dm_integrity_free_journal_scatterlist(struct dm_integrity_c *ic, struct scatterlist **sl)
2319 for (i = 0; i < ic->journal_sections; i++)
2324 static struct scatterlist **dm_integrity_alloc_journal_scatterlist(struct dm_integrity_c *ic, struct page_list *pl)
2326 struct scatterlist **sl;
2329 sl = dm_integrity_kvmalloc(ic->journal_sections * sizeof(struct scatterlist *), __GFP_ZERO);
2333 for (i = 0; i < ic->journal_sections; i++) {
2334 struct scatterlist *s;
2335 unsigned start_index, start_offset;
2336 unsigned end_index, end_offset;
2340 page_list_location(ic, i, 0, &start_index, &start_offset);
2341 page_list_location(ic, i, ic->journal_section_sectors - 1, &end_index, &end_offset);
2343 n_pages = (end_index - start_index + 1);
2345 s = dm_integrity_kvmalloc(n_pages * sizeof(struct scatterlist), 0);
2347 dm_integrity_free_journal_scatterlist(ic, sl);
2351 sg_init_table(s, n_pages);
2352 for (idx = start_index; idx <= end_index; idx++) {
2353 char *va = lowmem_page_address(pl[idx].page);
2354 unsigned start = 0, end = PAGE_SIZE;
2355 if (idx == start_index)
2356 start = start_offset;
2357 if (idx == end_index)
2358 end = end_offset + (1 << SECTOR_SHIFT);
2359 sg_set_buf(&s[idx - start_index], va + start, end - start);
2368 static void free_alg(struct alg_spec *a)
2370 kzfree(a->alg_string);
2372 memset(a, 0, sizeof *a);
2375 static int get_alg_and_key(const char *arg, struct alg_spec *a, char **error, char *error_inval)
2381 a->alg_string = kstrdup(strchr(arg, ':') + 1, GFP_KERNEL);
2385 k = strchr(a->alg_string, ':');
2390 a->key_string = k + 1;
2391 if (strlen(a->key_string) & 1)
2394 a->key_size = strlen(a->key_string) / 2;
2395 a->key = kmalloc(a->key_size, GFP_KERNEL);
2398 for (i = 0; i < a->key_size; i++) {
2400 digit[0] = a->key_string[i * 2];
2401 digit[1] = a->key_string[i * 2 + 1];
2403 if (strspn(digit, "0123456789abcdefABCDEF") != 2)
2405 if (kstrtou8(digit, 16, &a->key[i]))
2412 *error = error_inval;
2415 *error = "Out of memory for an argument";
2419 static int get_mac(struct crypto_shash **hash, struct alg_spec *a, char **error,
2420 char *error_alg, char *error_key)
2424 if (a->alg_string) {
2425 *hash = crypto_alloc_shash(a->alg_string, 0, CRYPTO_ALG_ASYNC);
2426 if (IS_ERR(*hash)) {
2434 r = crypto_shash_setkey(*hash, a->key, a->key_size);
2445 static int create_journal(struct dm_integrity_c *ic, char **error)
2449 __u64 journal_pages, journal_desc_size, journal_tree_size;
2450 unsigned char *crypt_data = NULL;
2452 ic->commit_ids[0] = cpu_to_le64(0x1111111111111111ULL);
2453 ic->commit_ids[1] = cpu_to_le64(0x2222222222222222ULL);
2454 ic->commit_ids[2] = cpu_to_le64(0x3333333333333333ULL);
2455 ic->commit_ids[3] = cpu_to_le64(0x4444444444444444ULL);
2457 journal_pages = roundup((__u64)ic->journal_sections * ic->journal_section_sectors,
2458 PAGE_SIZE >> SECTOR_SHIFT) >> (PAGE_SHIFT - SECTOR_SHIFT);
2459 journal_desc_size = journal_pages * sizeof(struct page_list);
2460 if (journal_pages >= totalram_pages - totalhigh_pages || journal_desc_size > ULONG_MAX) {
2461 *error = "Journal doesn't fit into memory";
2465 ic->journal_pages = journal_pages;
2467 ic->journal = dm_integrity_alloc_page_list(ic);
2469 *error = "Could not allocate memory for journal";
2473 if (ic->journal_crypt_alg.alg_string) {
2474 unsigned ivsize, blocksize;
2475 struct journal_completion comp;
2478 ic->journal_crypt = crypto_alloc_skcipher(ic->journal_crypt_alg.alg_string, 0, 0);
2479 if (IS_ERR(ic->journal_crypt)) {
2480 *error = "Invalid journal cipher";
2481 r = PTR_ERR(ic->journal_crypt);
2482 ic->journal_crypt = NULL;
2485 ivsize = crypto_skcipher_ivsize(ic->journal_crypt);
2486 blocksize = crypto_skcipher_blocksize(ic->journal_crypt);
2488 if (ic->journal_crypt_alg.key) {
2489 r = crypto_skcipher_setkey(ic->journal_crypt, ic->journal_crypt_alg.key,
2490 ic->journal_crypt_alg.key_size);
2492 *error = "Error setting encryption key";
2496 DEBUG_print("cipher %s, block size %u iv size %u\n",
2497 ic->journal_crypt_alg.alg_string, blocksize, ivsize);
2499 ic->journal_io = dm_integrity_alloc_page_list(ic);
2500 if (!ic->journal_io) {
2501 *error = "Could not allocate memory for journal io";
2506 if (blocksize == 1) {
2507 struct scatterlist *sg;
2508 SKCIPHER_REQUEST_ON_STACK(req, ic->journal_crypt);
2509 unsigned char iv[ivsize];
2510 skcipher_request_set_tfm(req, ic->journal_crypt);
2512 ic->journal_xor = dm_integrity_alloc_page_list(ic);
2513 if (!ic->journal_xor) {
2514 *error = "Could not allocate memory for journal xor";
2519 sg = dm_integrity_kvmalloc((ic->journal_pages + 1) * sizeof(struct scatterlist), 0);
2521 *error = "Unable to allocate sg list";
2525 sg_init_table(sg, ic->journal_pages + 1);
2526 for (i = 0; i < ic->journal_pages; i++) {
2527 char *va = lowmem_page_address(ic->journal_xor[i].page);
2529 sg_set_buf(&sg[i], va, PAGE_SIZE);
2531 sg_set_buf(&sg[i], &ic->commit_ids, sizeof ic->commit_ids);
2532 memset(iv, 0x00, ivsize);
2534 skcipher_request_set_crypt(req, sg, sg, PAGE_SIZE * ic->journal_pages + sizeof ic->commit_ids, iv);
2535 comp.comp = COMPLETION_INITIALIZER_ONSTACK(comp.comp);
2536 comp.in_flight = (atomic_t)ATOMIC_INIT(1);
2537 if (do_crypt(true, req, &comp))
2538 wait_for_completion(&comp.comp);
2540 r = dm_integrity_failed(ic);
2542 *error = "Unable to encrypt journal";
2545 DEBUG_bytes(lowmem_page_address(ic->journal_xor[0].page), 64, "xor data");
2547 crypto_free_skcipher(ic->journal_crypt);
2548 ic->journal_crypt = NULL;
2550 SKCIPHER_REQUEST_ON_STACK(req, ic->journal_crypt);
2551 unsigned char iv[ivsize];
2552 unsigned crypt_len = roundup(ivsize, blocksize);
2554 crypt_data = kmalloc(crypt_len, GFP_KERNEL);
2556 *error = "Unable to allocate crypt data";
2561 skcipher_request_set_tfm(req, ic->journal_crypt);
2563 ic->journal_scatterlist = dm_integrity_alloc_journal_scatterlist(ic, ic->journal);
2564 if (!ic->journal_scatterlist) {
2565 *error = "Unable to allocate sg list";
2569 ic->journal_io_scatterlist = dm_integrity_alloc_journal_scatterlist(ic, ic->journal_io);
2570 if (!ic->journal_io_scatterlist) {
2571 *error = "Unable to allocate sg list";
2575 ic->sk_requests = dm_integrity_kvmalloc(ic->journal_sections * sizeof(struct skcipher_request *), __GFP_ZERO);
2576 if (!ic->sk_requests) {
2577 *error = "Unable to allocate sk requests";
2581 for (i = 0; i < ic->journal_sections; i++) {
2582 struct scatterlist sg;
2583 struct skcipher_request *section_req;
2584 __u32 section_le = cpu_to_le32(i);
2586 memset(iv, 0x00, ivsize);
2587 memset(crypt_data, 0x00, crypt_len);
2588 memcpy(crypt_data, §ion_le, min((size_t)crypt_len, sizeof(section_le)));
2590 sg_init_one(&sg, crypt_data, crypt_len);
2591 skcipher_request_set_crypt(req, &sg, &sg, crypt_len, iv);
2592 comp.comp = COMPLETION_INITIALIZER_ONSTACK(comp.comp);
2593 comp.in_flight = (atomic_t)ATOMIC_INIT(1);
2594 if (do_crypt(true, req, &comp))
2595 wait_for_completion(&comp.comp);
2597 r = dm_integrity_failed(ic);
2599 *error = "Unable to generate iv";
2603 section_req = skcipher_request_alloc(ic->journal_crypt, GFP_KERNEL);
2605 *error = "Unable to allocate crypt request";
2609 section_req->iv = kmalloc(ivsize * 2, GFP_KERNEL);
2610 if (!section_req->iv) {
2611 skcipher_request_free(section_req);
2612 *error = "Unable to allocate iv";
2616 memcpy(section_req->iv + ivsize, crypt_data, ivsize);
2617 section_req->cryptlen = (size_t)ic->journal_section_sectors << SECTOR_SHIFT;
2618 ic->sk_requests[i] = section_req;
2619 DEBUG_bytes(crypt_data, ivsize, "iv(%u)", i);
2624 for (i = 0; i < N_COMMIT_IDS; i++) {
2627 for (j = 0; j < i; j++) {
2628 if (ic->commit_ids[j] == ic->commit_ids[i]) {
2629 ic->commit_ids[i] = cpu_to_le64(le64_to_cpu(ic->commit_ids[i]) + 1);
2630 goto retest_commit_id;
2633 DEBUG_print("commit id %u: %016llx\n", i, ic->commit_ids[i]);
2636 journal_tree_size = (__u64)ic->journal_entries * sizeof(struct journal_node);
2637 if (journal_tree_size > ULONG_MAX) {
2638 *error = "Journal doesn't fit into memory";
2642 ic->journal_tree = dm_integrity_kvmalloc(journal_tree_size, 0);
2643 if (!ic->journal_tree) {
2644 *error = "Could not allocate memory for journal tree";
2653 * Construct a integrity mapping
2657 * offset from the start of the device
2659 * D - direct writes, J - journal writes, R - recovery mode
2660 * number of optional arguments
2661 * optional arguments:
2663 * interleave_sectors
2671 static int dm_integrity_ctr(struct dm_target *ti, unsigned argc, char **argv)
2673 struct dm_integrity_c *ic;
2676 unsigned extra_args;
2677 struct dm_arg_set as;
2678 static struct dm_arg _args[] = {
2679 {0, 8, "Invalid number of feature args"},
2681 unsigned journal_sectors, interleave_sectors, buffer_sectors, journal_watermark, sync_msec;
2682 bool should_write_sb;
2684 unsigned long long start;
2686 #define DIRECT_ARGUMENTS 4
2688 if (argc <= DIRECT_ARGUMENTS) {
2689 ti->error = "Invalid argument count";
2693 ic = kzalloc(sizeof(struct dm_integrity_c), GFP_KERNEL);
2695 ti->error = "Cannot allocate integrity context";
2699 ti->per_io_data_size = sizeof(struct dm_integrity_io);
2701 ic->in_progress = RB_ROOT;
2702 init_waitqueue_head(&ic->endio_wait);
2703 bio_list_init(&ic->flush_bio_list);
2704 init_waitqueue_head(&ic->copy_to_journal_wait);
2705 init_completion(&ic->crypto_backoff);
2707 r = dm_get_device(ti, argv[0], dm_table_get_mode(ti->table), &ic->dev);
2709 ti->error = "Device lookup failed";
2713 if (sscanf(argv[1], "%llu%c", &start, &dummy) != 1 || start != (sector_t)start) {
2714 ti->error = "Invalid starting offset";
2720 if (strcmp(argv[2], "-")) {
2721 if (sscanf(argv[2], "%u%c", &ic->tag_size, &dummy) != 1 || !ic->tag_size) {
2722 ti->error = "Invalid tag size";
2728 if (!strcmp(argv[3], "J") || !strcmp(argv[3], "D") || !strcmp(argv[3], "R"))
2729 ic->mode = argv[3][0];
2731 ti->error = "Invalid mode (expecting J, D, R)";
2736 ic->device_sectors = i_size_read(ic->dev->bdev->bd_inode) >> SECTOR_SHIFT;
2737 journal_sectors = min((sector_t)DEFAULT_MAX_JOURNAL_SECTORS,
2738 ic->device_sectors >> DEFAULT_JOURNAL_SIZE_FACTOR);
2739 interleave_sectors = DEFAULT_INTERLEAVE_SECTORS;
2740 buffer_sectors = DEFAULT_BUFFER_SECTORS;
2741 journal_watermark = DEFAULT_JOURNAL_WATERMARK;
2742 sync_msec = DEFAULT_SYNC_MSEC;
2744 as.argc = argc - DIRECT_ARGUMENTS;
2745 as.argv = argv + DIRECT_ARGUMENTS;
2746 r = dm_read_arg_group(_args, &as, &extra_args, &ti->error);
2750 while (extra_args--) {
2751 const char *opt_string;
2753 opt_string = dm_shift_arg(&as);
2756 ti->error = "Not enough feature arguments";
2759 if (sscanf(opt_string, "journal_sectors:%u%c", &val, &dummy) == 1)
2760 journal_sectors = val;
2761 else if (sscanf(opt_string, "interleave_sectors:%u%c", &val, &dummy) == 1)
2762 interleave_sectors = val;
2763 else if (sscanf(opt_string, "buffer_sectors:%u%c", &val, &dummy) == 1)
2764 buffer_sectors = val;
2765 else if (sscanf(opt_string, "journal_watermark:%u%c", &val, &dummy) == 1 && val <= 100)
2766 journal_watermark = val;
2767 else if (sscanf(opt_string, "commit_time:%u%c", &val, &dummy) == 1)
2769 else if (!memcmp(opt_string, "internal_hash:", strlen("internal_hash:"))) {
2770 r = get_alg_and_key(opt_string, &ic->internal_hash_alg, &ti->error,
2771 "Invalid internal_hash argument");
2774 } else if (!memcmp(opt_string, "journal_crypt:", strlen("journal_crypt:"))) {
2775 r = get_alg_and_key(opt_string, &ic->journal_crypt_alg, &ti->error,
2776 "Invalid journal_crypt argument");
2779 } else if (!memcmp(opt_string, "journal_mac:", strlen("journal_mac:"))) {
2780 r = get_alg_and_key(opt_string, &ic->journal_mac_alg, &ti->error,
2781 "Invalid journal_mac argument");
2786 ti->error = "Invalid argument";
2791 r = get_mac(&ic->internal_hash, &ic->internal_hash_alg, &ti->error,
2792 "Invalid internal hash", "Error setting internal hash key");
2796 r = get_mac(&ic->journal_mac, &ic->journal_mac_alg, &ti->error,
2797 "Invalid journal mac", "Error setting journal mac key");
2801 if (!ic->tag_size) {
2802 if (!ic->internal_hash) {
2803 ti->error = "Unknown tag size";
2807 ic->tag_size = crypto_shash_digestsize(ic->internal_hash);
2809 if (ic->tag_size > MAX_TAG_SIZE) {
2810 ti->error = "Too big tag size";
2814 if (!(ic->tag_size & (ic->tag_size - 1)))
2815 ic->log2_tag_size = __ffs(ic->tag_size);
2817 ic->log2_tag_size = -1;
2819 ic->autocommit_jiffies = msecs_to_jiffies(sync_msec);
2820 ic->autocommit_msec = sync_msec;
2821 setup_timer(&ic->autocommit_timer, autocommit_fn, (unsigned long)ic);
2823 ic->io = dm_io_client_create();
2824 if (IS_ERR(ic->io)) {
2825 r = PTR_ERR(ic->io);
2827 ti->error = "Cannot allocate dm io";
2831 ic->journal_io_mempool = mempool_create_slab_pool(JOURNAL_IO_MEMPOOL, journal_io_cache);
2832 if (!ic->journal_io_mempool) {
2834 ti->error = "Cannot allocate mempool";
2838 ic->metadata_wq = alloc_workqueue("dm-integrity-metadata",
2839 WQ_MEM_RECLAIM, METADATA_WORKQUEUE_MAX_ACTIVE);
2840 if (!ic->metadata_wq) {
2841 ti->error = "Cannot allocate workqueue";
2847 * If this workqueue were percpu, it would cause bio reordering
2848 * and reduced performance.
2850 ic->wait_wq = alloc_workqueue("dm-integrity-wait", WQ_MEM_RECLAIM | WQ_UNBOUND, 1);
2852 ti->error = "Cannot allocate workqueue";
2857 ic->commit_wq = alloc_workqueue("dm-integrity-commit", WQ_MEM_RECLAIM, 1);
2858 if (!ic->commit_wq) {
2859 ti->error = "Cannot allocate workqueue";
2863 INIT_WORK(&ic->commit_work, integrity_commit);
2865 if (ic->mode == 'J') {
2866 ic->writer_wq = alloc_workqueue("dm-integrity-writer", WQ_MEM_RECLAIM, 1);
2867 if (!ic->writer_wq) {
2868 ti->error = "Cannot allocate workqueue";
2872 INIT_WORK(&ic->writer_work, integrity_writer);
2875 ic->sb = alloc_pages_exact(SB_SECTORS << SECTOR_SHIFT, GFP_KERNEL);
2878 ti->error = "Cannot allocate superblock area";
2882 r = sync_rw_sb(ic, REQ_OP_READ, 0);
2884 ti->error = "Error reading superblock";
2887 should_write_sb = false;
2888 if (memcmp(ic->sb->magic, SB_MAGIC, 8)) {
2889 if (ic->mode != 'R') {
2890 if (memchr_inv(ic->sb, 0, SB_SECTORS << SECTOR_SHIFT)) {
2892 ti->error = "The device is not initialized";
2897 r = initialize_superblock(ic, journal_sectors, interleave_sectors);
2899 ti->error = "Could not initialize superblock";
2902 if (ic->mode != 'R')
2903 should_write_sb = true;
2906 if (ic->sb->version != SB_VERSION) {
2908 ti->error = "Unknown version";
2911 if (le16_to_cpu(ic->sb->integrity_tag_size) != ic->tag_size) {
2913 ti->error = "Invalid tag size";
2916 /* make sure that ti->max_io_len doesn't overflow */
2917 if (ic->sb->log2_interleave_sectors < MIN_LOG2_INTERLEAVE_SECTORS ||
2918 ic->sb->log2_interleave_sectors > MAX_LOG2_INTERLEAVE_SECTORS) {
2920 ti->error = "Invalid interleave_sectors in the superblock";
2923 ic->provided_data_sectors = le64_to_cpu(ic->sb->provided_data_sectors);
2924 if (ic->provided_data_sectors != le64_to_cpu(ic->sb->provided_data_sectors)) {
2925 /* test for overflow */
2927 ti->error = "The superblock has 64-bit device size, but the kernel was compiled with 32-bit sectors";
2930 if (!!(ic->sb->flags & cpu_to_le32(SB_FLAG_HAVE_JOURNAL_MAC)) != !!ic->journal_mac_alg.alg_string) {
2932 ti->error = "Journal mac mismatch";
2935 r = calculate_device_limits(ic);
2937 ti->error = "The device is too small";
2941 if (!buffer_sectors)
2943 ic->log2_buffer_sectors = min3((int)__fls(buffer_sectors), (int)__ffs(ic->metadata_run), 31 - SECTOR_SHIFT);
2945 threshold = (__u64)ic->journal_entries * (100 - journal_watermark);
2947 do_div(threshold, 100);
2948 ic->free_sectors_threshold = threshold;
2950 DEBUG_print("initialized:\n");
2951 DEBUG_print(" integrity_tag_size %u\n", le16_to_cpu(ic->sb->integrity_tag_size));
2952 DEBUG_print(" journal_entry_size %u\n", ic->journal_entry_size);
2953 DEBUG_print(" journal_entries_per_sector %u\n", ic->journal_entries_per_sector);
2954 DEBUG_print(" journal_section_entries %u\n", ic->journal_section_entries);
2955 DEBUG_print(" journal_section_sectors %u\n", ic->journal_section_sectors);
2956 DEBUG_print(" journal_sections %u\n", (unsigned)le32_to_cpu(ic->sb->journal_sections));
2957 DEBUG_print(" journal_entries %u\n", ic->journal_entries);
2958 DEBUG_print(" log2_interleave_sectors %d\n", ic->sb->log2_interleave_sectors);
2959 DEBUG_print(" device_sectors 0x%llx\n", (unsigned long long)ic->device_sectors);
2960 DEBUG_print(" initial_sectors 0x%x\n", ic->initial_sectors);
2961 DEBUG_print(" metadata_run 0x%x\n", ic->metadata_run);
2962 DEBUG_print(" log2_metadata_run %d\n", ic->log2_metadata_run);
2963 DEBUG_print(" provided_data_sectors 0x%llx (%llu)\n", (unsigned long long)ic->provided_data_sectors,
2964 (unsigned long long)ic->provided_data_sectors);
2965 DEBUG_print(" log2_buffer_sectors %u\n", ic->log2_buffer_sectors);
2967 ic->bufio = dm_bufio_client_create(ic->dev->bdev, 1U << (SECTOR_SHIFT + ic->log2_buffer_sectors),
2969 if (IS_ERR(ic->bufio)) {
2970 r = PTR_ERR(ic->bufio);
2971 ti->error = "Cannot initialize dm-bufio";
2975 dm_bufio_set_sector_offset(ic->bufio, ic->start + ic->initial_sectors);
2977 if (ic->mode != 'R') {
2978 r = create_journal(ic, &ti->error);
2983 if (should_write_sb) {
2986 init_journal(ic, 0, ic->journal_sections, 0);
2987 r = dm_integrity_failed(ic);
2989 ti->error = "Error initializing journal";
2992 r = sync_rw_sb(ic, REQ_OP_WRITE, REQ_FUA);
2994 ti->error = "Error initializing superblock";
2997 ic->just_formatted = true;
3000 r = dm_set_target_max_io_len(ti, 1U << ic->sb->log2_interleave_sectors);
3004 if (!ic->internal_hash)
3005 dm_integrity_set(ti, ic);
3007 ti->num_flush_bios = 1;
3008 ti->flush_supported = true;
3012 dm_integrity_dtr(ti);
3016 static void dm_integrity_dtr(struct dm_target *ti)
3018 struct dm_integrity_c *ic = ti->private;
3020 BUG_ON(!RB_EMPTY_ROOT(&ic->in_progress));
3022 if (ic->metadata_wq)
3023 destroy_workqueue(ic->metadata_wq);
3025 destroy_workqueue(ic->wait_wq);
3027 destroy_workqueue(ic->commit_wq);
3029 destroy_workqueue(ic->writer_wq);
3031 dm_bufio_client_destroy(ic->bufio);
3032 mempool_destroy(ic->journal_io_mempool);
3034 dm_io_client_destroy(ic->io);
3036 dm_put_device(ti, ic->dev);
3037 dm_integrity_free_page_list(ic, ic->journal);
3038 dm_integrity_free_page_list(ic, ic->journal_io);
3039 dm_integrity_free_page_list(ic, ic->journal_xor);
3040 if (ic->journal_scatterlist)
3041 dm_integrity_free_journal_scatterlist(ic, ic->journal_scatterlist);
3042 if (ic->journal_io_scatterlist)
3043 dm_integrity_free_journal_scatterlist(ic, ic->journal_io_scatterlist);
3044 if (ic->sk_requests) {
3047 for (i = 0; i < ic->journal_sections; i++) {
3048 struct skcipher_request *req = ic->sk_requests[i];
3051 skcipher_request_free(req);
3054 kvfree(ic->sk_requests);
3056 kvfree(ic->journal_tree);
3058 free_pages_exact(ic->sb, SB_SECTORS << SECTOR_SHIFT);
3060 if (ic->internal_hash)
3061 crypto_free_shash(ic->internal_hash);
3062 free_alg(&ic->internal_hash_alg);
3064 if (ic->journal_crypt)
3065 crypto_free_skcipher(ic->journal_crypt);
3066 free_alg(&ic->journal_crypt_alg);
3068 if (ic->journal_mac)
3069 crypto_free_shash(ic->journal_mac);
3070 free_alg(&ic->journal_mac_alg);
3075 static struct target_type integrity_target = {
3076 .name = "integrity",
3077 .version = {1, 0, 0},
3078 .module = THIS_MODULE,
3079 .features = DM_TARGET_SINGLETON | DM_TARGET_INTEGRITY,
3080 .ctr = dm_integrity_ctr,
3081 .dtr = dm_integrity_dtr,
3082 .map = dm_integrity_map,
3083 .postsuspend = dm_integrity_postsuspend,
3084 .resume = dm_integrity_resume,
3085 .status = dm_integrity_status,
3086 .iterate_devices = dm_integrity_iterate_devices,
3089 int __init dm_integrity_init(void)
3093 journal_io_cache = kmem_cache_create("integrity_journal_io",
3094 sizeof(struct journal_io), 0, 0, NULL);
3095 if (!journal_io_cache) {
3096 DMERR("can't allocate journal io cache");
3100 r = dm_register_target(&integrity_target);
3103 DMERR("register failed %d", r);
3108 void dm_integrity_exit(void)
3110 dm_unregister_target(&integrity_target);
3111 kmem_cache_destroy(journal_io_cache);
3114 module_init(dm_integrity_init);
3115 module_exit(dm_integrity_exit);
3117 MODULE_AUTHOR("Milan Broz");
3118 MODULE_AUTHOR("Mikulas Patocka");
3119 MODULE_DESCRIPTION(DM_NAME " target for integrity tags extension");
3120 MODULE_LICENSE("GPL");