2 * Block Translation Table
3 * Copyright (c) 2014-2015, Intel Corporation.
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 #include <linux/highmem.h>
15 #include <linux/debugfs.h>
16 #include <linux/blkdev.h>
17 #include <linux/module.h>
18 #include <linux/device.h>
19 #include <linux/mutex.h>
20 #include <linux/hdreg.h>
21 #include <linux/genhd.h>
22 #include <linux/sizes.h>
23 #include <linux/ndctl.h>
29 enum log_ent_request {
34 static int arena_read_bytes(struct arena_info *arena, resource_size_t offset,
37 struct nd_btt *nd_btt = arena->nd_btt;
38 struct nd_namespace_common *ndns = nd_btt->ndns;
40 /* arena offsets are 4K from the base of the device */
42 return nvdimm_read_bytes(ndns, offset, buf, n);
45 static int arena_write_bytes(struct arena_info *arena, resource_size_t offset,
48 struct nd_btt *nd_btt = arena->nd_btt;
49 struct nd_namespace_common *ndns = nd_btt->ndns;
51 /* arena offsets are 4K from the base of the device */
53 return nvdimm_write_bytes(ndns, offset, buf, n);
56 static int btt_info_write(struct arena_info *arena, struct btt_sb *super)
60 ret = arena_write_bytes(arena, arena->info2off, super,
61 sizeof(struct btt_sb));
65 return arena_write_bytes(arena, arena->infooff, super,
66 sizeof(struct btt_sb));
69 static int btt_info_read(struct arena_info *arena, struct btt_sb *super)
72 return arena_read_bytes(arena, arena->infooff, super,
73 sizeof(struct btt_sb));
77 * 'raw' version of btt_map write
79 * mapping is in little-endian
80 * mapping contains 'E' and 'Z' flags as desired
82 static int __btt_map_write(struct arena_info *arena, u32 lba, __le32 mapping)
84 u64 ns_off = arena->mapoff + (lba * MAP_ENT_SIZE);
86 WARN_ON(lba >= arena->external_nlba);
87 return arena_write_bytes(arena, ns_off, &mapping, MAP_ENT_SIZE);
90 static int btt_map_write(struct arena_info *arena, u32 lba, u32 mapping,
91 u32 z_flag, u32 e_flag)
97 * This 'mapping' is supposed to be just the LBA mapping, without
98 * any flags set, so strip the flag bits.
100 mapping &= MAP_LBA_MASK;
102 ze = (z_flag << 1) + e_flag;
106 * We want to set neither of the Z or E flags, and
107 * in the actual layout, this means setting the bit
108 * positions of both to '1' to indicate a 'normal'
111 mapping |= MAP_ENT_NORMAL;
114 mapping |= (1 << MAP_ERR_SHIFT);
117 mapping |= (1 << MAP_TRIM_SHIFT);
121 * The case where Z and E are both sent in as '1' could be
122 * construed as a valid 'normal' case, but we decide not to,
125 WARN_ONCE(1, "Invalid use of Z and E flags\n");
129 mapping_le = cpu_to_le32(mapping);
130 return __btt_map_write(arena, lba, mapping_le);
133 static int btt_map_read(struct arena_info *arena, u32 lba, u32 *mapping,
134 int *trim, int *error)
138 u32 raw_mapping, postmap, ze, z_flag, e_flag;
139 u64 ns_off = arena->mapoff + (lba * MAP_ENT_SIZE);
141 WARN_ON(lba >= arena->external_nlba);
143 ret = arena_read_bytes(arena, ns_off, &in, MAP_ENT_SIZE);
147 raw_mapping = le32_to_cpu(in);
149 z_flag = (raw_mapping & MAP_TRIM_MASK) >> MAP_TRIM_SHIFT;
150 e_flag = (raw_mapping & MAP_ERR_MASK) >> MAP_ERR_SHIFT;
151 ze = (z_flag << 1) + e_flag;
152 postmap = raw_mapping & MAP_LBA_MASK;
154 /* Reuse the {z,e}_flag variables for *trim and *error */
160 /* Initial state. Return postmap = premap */
186 static int btt_log_read_pair(struct arena_info *arena, u32 lane,
187 struct log_entry *ent)
190 return arena_read_bytes(arena,
191 arena->logoff + (2 * lane * LOG_ENT_SIZE), ent,
195 static struct dentry *debugfs_root;
197 static void arena_debugfs_init(struct arena_info *a, struct dentry *parent,
203 /* If for some reason, parent bttN was not created, exit */
207 snprintf(dirname, 32, "arena%d", idx);
208 d = debugfs_create_dir(dirname, parent);
209 if (IS_ERR_OR_NULL(d))
213 debugfs_create_x64("size", S_IRUGO, d, &a->size);
214 debugfs_create_x64("external_lba_start", S_IRUGO, d,
215 &a->external_lba_start);
216 debugfs_create_x32("internal_nlba", S_IRUGO, d, &a->internal_nlba);
217 debugfs_create_u32("internal_lbasize", S_IRUGO, d,
218 &a->internal_lbasize);
219 debugfs_create_x32("external_nlba", S_IRUGO, d, &a->external_nlba);
220 debugfs_create_u32("external_lbasize", S_IRUGO, d,
221 &a->external_lbasize);
222 debugfs_create_u32("nfree", S_IRUGO, d, &a->nfree);
223 debugfs_create_u16("version_major", S_IRUGO, d, &a->version_major);
224 debugfs_create_u16("version_minor", S_IRUGO, d, &a->version_minor);
225 debugfs_create_x64("nextoff", S_IRUGO, d, &a->nextoff);
226 debugfs_create_x64("infooff", S_IRUGO, d, &a->infooff);
227 debugfs_create_x64("dataoff", S_IRUGO, d, &a->dataoff);
228 debugfs_create_x64("mapoff", S_IRUGO, d, &a->mapoff);
229 debugfs_create_x64("logoff", S_IRUGO, d, &a->logoff);
230 debugfs_create_x64("info2off", S_IRUGO, d, &a->info2off);
231 debugfs_create_x32("flags", S_IRUGO, d, &a->flags);
234 static void btt_debugfs_init(struct btt *btt)
237 struct arena_info *arena;
239 btt->debugfs_dir = debugfs_create_dir(dev_name(&btt->nd_btt->dev),
241 if (IS_ERR_OR_NULL(btt->debugfs_dir))
244 list_for_each_entry(arena, &btt->arena_list, list) {
245 arena_debugfs_init(arena, btt->debugfs_dir, i);
251 * This function accepts two log entries, and uses the
252 * sequence number to find the 'older' entry.
253 * It also updates the sequence number in this old entry to
254 * make it the 'new' one if the mark_flag is set.
255 * Finally, it returns which of the entries was the older one.
257 * TODO The logic feels a bit kludge-y. make it better..
259 static int btt_log_get_old(struct log_entry *ent)
264 * the first ever time this is seen, the entry goes into [0]
265 * the next time, the following logic works out to put this
266 * (next) entry into [1]
268 if (ent[0].seq == 0) {
269 ent[0].seq = cpu_to_le32(1);
273 if (ent[0].seq == ent[1].seq)
275 if (le32_to_cpu(ent[0].seq) + le32_to_cpu(ent[1].seq) > 5)
278 if (le32_to_cpu(ent[0].seq) < le32_to_cpu(ent[1].seq)) {
279 if (le32_to_cpu(ent[1].seq) - le32_to_cpu(ent[0].seq) == 1)
284 if (le32_to_cpu(ent[0].seq) - le32_to_cpu(ent[1].seq) == 1)
293 static struct device *to_dev(struct arena_info *arena)
295 return &arena->nd_btt->dev;
299 * This function copies the desired (old/new) log entry into ent if
300 * it is not NULL. It returns the sub-slot number (0 or 1)
301 * where the desired log entry was found. Negative return values
304 static int btt_log_read(struct arena_info *arena, u32 lane,
305 struct log_entry *ent, int old_flag)
308 int old_ent, ret_ent;
309 struct log_entry log[2];
311 ret = btt_log_read_pair(arena, lane, log);
315 old_ent = btt_log_get_old(log);
316 if (old_ent < 0 || old_ent > 1) {
317 dev_info(to_dev(arena),
318 "log corruption (%d): lane %d seq [%d, %d]\n",
319 old_ent, lane, log[0].seq, log[1].seq);
320 /* TODO set error state? */
324 ret_ent = (old_flag ? old_ent : (1 - old_ent));
327 memcpy(ent, &log[ret_ent], LOG_ENT_SIZE);
333 * This function commits a log entry to media
334 * It does _not_ prepare the freelist entry for the next write
335 * btt_flog_write is the wrapper for updating the freelist elements
337 static int __btt_log_write(struct arena_info *arena, u32 lane,
338 u32 sub, struct log_entry *ent)
342 * Ignore the padding in log_entry for calculating log_half.
343 * The entry is 'committed' when we write the sequence number,
344 * and we want to ensure that that is the last thing written.
345 * We don't bother writing the padding as that would be extra
346 * media wear and write amplification
348 unsigned int log_half = (LOG_ENT_SIZE - 2 * sizeof(u64)) / 2;
349 u64 ns_off = arena->logoff + (((2 * lane) + sub) * LOG_ENT_SIZE);
352 /* split the 16B write into atomic, durable halves */
353 ret = arena_write_bytes(arena, ns_off, src, log_half);
359 return arena_write_bytes(arena, ns_off, src, log_half);
362 static int btt_flog_write(struct arena_info *arena, u32 lane, u32 sub,
363 struct log_entry *ent)
367 ret = __btt_log_write(arena, lane, sub, ent);
371 /* prepare the next free entry */
372 arena->freelist[lane].sub = 1 - arena->freelist[lane].sub;
373 if (++(arena->freelist[lane].seq) == 4)
374 arena->freelist[lane].seq = 1;
375 arena->freelist[lane].block = le32_to_cpu(ent->old_map);
381 * This function initializes the BTT map to the initial state, which is
382 * all-zeroes, and indicates an identity mapping
384 static int btt_map_init(struct arena_info *arena)
389 size_t chunk_size = SZ_2M;
390 size_t mapsize = arena->logoff - arena->mapoff;
392 zerobuf = kzalloc(chunk_size, GFP_KERNEL);
397 size_t size = min(mapsize, chunk_size);
399 ret = arena_write_bytes(arena, arena->mapoff + offset, zerobuf,
415 * This function initializes the BTT log with 'fake' entries pointing
416 * to the initial reserved set of blocks as being free
418 static int btt_log_init(struct arena_info *arena)
422 struct log_entry log, zerolog;
424 memset(&zerolog, 0, sizeof(zerolog));
426 for (i = 0; i < arena->nfree; i++) {
427 log.lba = cpu_to_le32(i);
428 log.old_map = cpu_to_le32(arena->external_nlba + i);
429 log.new_map = cpu_to_le32(arena->external_nlba + i);
430 log.seq = cpu_to_le32(LOG_SEQ_INIT);
431 ret = __btt_log_write(arena, i, 0, &log);
434 ret = __btt_log_write(arena, i, 1, &zerolog);
442 static int btt_freelist_init(struct arena_info *arena)
446 struct log_entry log_new, log_old;
448 arena->freelist = kcalloc(arena->nfree, sizeof(struct free_entry),
450 if (!arena->freelist)
453 for (i = 0; i < arena->nfree; i++) {
454 old = btt_log_read(arena, i, &log_old, LOG_OLD_ENT);
458 new = btt_log_read(arena, i, &log_new, LOG_NEW_ENT);
462 /* sub points to the next one to be overwritten */
463 arena->freelist[i].sub = 1 - new;
464 arena->freelist[i].seq = nd_inc_seq(le32_to_cpu(log_new.seq));
465 arena->freelist[i].block = le32_to_cpu(log_new.old_map);
467 /* This implies a newly created or untouched flog entry */
468 if (log_new.old_map == log_new.new_map)
471 /* Check if map recovery is needed */
472 ret = btt_map_read(arena, le32_to_cpu(log_new.lba), &map_entry,
476 if ((le32_to_cpu(log_new.new_map) != map_entry) &&
477 (le32_to_cpu(log_new.old_map) == map_entry)) {
479 * Last transaction wrote the flog, but wasn't able
480 * to complete the map write. So fix up the map.
482 ret = btt_map_write(arena, le32_to_cpu(log_new.lba),
483 le32_to_cpu(log_new.new_map), 0, 0);
493 static int btt_rtt_init(struct arena_info *arena)
495 arena->rtt = kcalloc(arena->nfree, sizeof(u32), GFP_KERNEL);
496 if (arena->rtt == NULL)
502 static int btt_maplocks_init(struct arena_info *arena)
506 arena->map_locks = kcalloc(arena->nfree, sizeof(struct aligned_lock),
508 if (!arena->map_locks)
511 for (i = 0; i < arena->nfree; i++)
512 spin_lock_init(&arena->map_locks[i].lock);
517 static struct arena_info *alloc_arena(struct btt *btt, size_t size,
518 size_t start, size_t arena_off)
520 struct arena_info *arena;
521 u64 logsize, mapsize, datasize;
522 u64 available = size;
524 arena = kzalloc(sizeof(struct arena_info), GFP_KERNEL);
527 arena->nd_btt = btt->nd_btt;
533 arena->external_lba_start = start;
534 arena->external_lbasize = btt->lbasize;
535 arena->internal_lbasize = roundup(arena->external_lbasize,
536 INT_LBASIZE_ALIGNMENT);
537 arena->nfree = BTT_DEFAULT_NFREE;
538 arena->version_major = 1;
539 arena->version_minor = 1;
541 if (available % BTT_PG_SIZE)
542 available -= (available % BTT_PG_SIZE);
544 /* Two pages are reserved for the super block and its copy */
545 available -= 2 * BTT_PG_SIZE;
547 /* The log takes a fixed amount of space based on nfree */
548 logsize = roundup(2 * arena->nfree * sizeof(struct log_entry),
550 available -= logsize;
552 /* Calculate optimal split between map and data area */
553 arena->internal_nlba = div_u64(available - BTT_PG_SIZE,
554 arena->internal_lbasize + MAP_ENT_SIZE);
555 arena->external_nlba = arena->internal_nlba - arena->nfree;
557 mapsize = roundup((arena->external_nlba * MAP_ENT_SIZE), BTT_PG_SIZE);
558 datasize = available - mapsize;
560 /* 'Absolute' values, relative to start of storage space */
561 arena->infooff = arena_off;
562 arena->dataoff = arena->infooff + BTT_PG_SIZE;
563 arena->mapoff = arena->dataoff + datasize;
564 arena->logoff = arena->mapoff + mapsize;
565 arena->info2off = arena->logoff + logsize;
569 static void free_arenas(struct btt *btt)
571 struct arena_info *arena, *next;
573 list_for_each_entry_safe(arena, next, &btt->arena_list, list) {
574 list_del(&arena->list);
576 kfree(arena->map_locks);
577 kfree(arena->freelist);
578 debugfs_remove_recursive(arena->debugfs_dir);
584 * This function reads an existing valid btt superblock and
585 * populates the corresponding arena_info struct
587 static void parse_arena_meta(struct arena_info *arena, struct btt_sb *super,
590 arena->internal_nlba = le32_to_cpu(super->internal_nlba);
591 arena->internal_lbasize = le32_to_cpu(super->internal_lbasize);
592 arena->external_nlba = le32_to_cpu(super->external_nlba);
593 arena->external_lbasize = le32_to_cpu(super->external_lbasize);
594 arena->nfree = le32_to_cpu(super->nfree);
595 arena->version_major = le16_to_cpu(super->version_major);
596 arena->version_minor = le16_to_cpu(super->version_minor);
598 arena->nextoff = (super->nextoff == 0) ? 0 : (arena_off +
599 le64_to_cpu(super->nextoff));
600 arena->infooff = arena_off;
601 arena->dataoff = arena_off + le64_to_cpu(super->dataoff);
602 arena->mapoff = arena_off + le64_to_cpu(super->mapoff);
603 arena->logoff = arena_off + le64_to_cpu(super->logoff);
604 arena->info2off = arena_off + le64_to_cpu(super->info2off);
606 arena->size = (le64_to_cpu(super->nextoff) > 0)
607 ? (le64_to_cpu(super->nextoff))
608 : (arena->info2off - arena->infooff + BTT_PG_SIZE);
610 arena->flags = le32_to_cpu(super->flags);
613 static int discover_arenas(struct btt *btt)
616 struct arena_info *arena;
617 struct btt_sb *super;
618 size_t remaining = btt->rawsize;
623 super = kzalloc(sizeof(*super), GFP_KERNEL);
628 /* Alloc memory for arena */
629 arena = alloc_arena(btt, 0, 0, 0);
635 arena->infooff = cur_off;
636 ret = btt_info_read(arena, super);
640 if (!nd_btt_arena_is_valid(btt->nd_btt, super)) {
641 if (remaining == btt->rawsize) {
642 btt->init_state = INIT_NOTFOUND;
643 dev_info(to_dev(arena), "No existing arenas\n");
646 dev_info(to_dev(arena),
647 "Found corrupted metadata!\n");
653 arena->external_lba_start = cur_nlba;
654 parse_arena_meta(arena, super, cur_off);
656 ret = btt_freelist_init(arena);
660 ret = btt_rtt_init(arena);
664 ret = btt_maplocks_init(arena);
668 list_add_tail(&arena->list, &btt->arena_list);
670 remaining -= arena->size;
671 cur_off += arena->size;
672 cur_nlba += arena->external_nlba;
675 if (arena->nextoff == 0)
678 btt->num_arenas = num_arenas;
679 btt->nlba = cur_nlba;
680 btt->init_state = INIT_READY;
693 static int create_arenas(struct btt *btt)
695 size_t remaining = btt->rawsize;
699 struct arena_info *arena;
700 size_t arena_size = min_t(u64, ARENA_MAX_SIZE, remaining);
702 remaining -= arena_size;
703 if (arena_size < ARENA_MIN_SIZE)
706 arena = alloc_arena(btt, arena_size, btt->nlba, cur_off);
711 btt->nlba += arena->external_nlba;
712 if (remaining >= ARENA_MIN_SIZE)
713 arena->nextoff = arena->size;
716 cur_off += arena_size;
717 list_add_tail(&arena->list, &btt->arena_list);
724 * This function completes arena initialization by writing
726 * It is only called for an uninitialized arena when a write
727 * to that arena occurs for the first time.
729 static int btt_arena_write_layout(struct arena_info *arena)
733 struct btt_sb *super;
734 struct nd_btt *nd_btt = arena->nd_btt;
735 const u8 *parent_uuid = nd_dev_to_uuid(&nd_btt->ndns->dev);
737 ret = btt_map_init(arena);
741 ret = btt_log_init(arena);
745 super = kzalloc(sizeof(struct btt_sb), GFP_NOIO);
749 strncpy(super->signature, BTT_SIG, BTT_SIG_LEN);
750 memcpy(super->uuid, nd_btt->uuid, 16);
751 memcpy(super->parent_uuid, parent_uuid, 16);
752 super->flags = cpu_to_le32(arena->flags);
753 super->version_major = cpu_to_le16(arena->version_major);
754 super->version_minor = cpu_to_le16(arena->version_minor);
755 super->external_lbasize = cpu_to_le32(arena->external_lbasize);
756 super->external_nlba = cpu_to_le32(arena->external_nlba);
757 super->internal_lbasize = cpu_to_le32(arena->internal_lbasize);
758 super->internal_nlba = cpu_to_le32(arena->internal_nlba);
759 super->nfree = cpu_to_le32(arena->nfree);
760 super->infosize = cpu_to_le32(sizeof(struct btt_sb));
761 super->nextoff = cpu_to_le64(arena->nextoff);
763 * Subtract arena->infooff (arena start) so numbers are relative
766 super->dataoff = cpu_to_le64(arena->dataoff - arena->infooff);
767 super->mapoff = cpu_to_le64(arena->mapoff - arena->infooff);
768 super->logoff = cpu_to_le64(arena->logoff - arena->infooff);
769 super->info2off = cpu_to_le64(arena->info2off - arena->infooff);
772 sum = nd_sb_checksum((struct nd_gen_sb *) super);
773 super->checksum = cpu_to_le64(sum);
775 ret = btt_info_write(arena, super);
782 * This function completes the initialization for the BTT namespace
783 * such that it is ready to accept IOs
785 static int btt_meta_init(struct btt *btt)
788 struct arena_info *arena;
790 mutex_lock(&btt->init_lock);
791 list_for_each_entry(arena, &btt->arena_list, list) {
792 ret = btt_arena_write_layout(arena);
796 ret = btt_freelist_init(arena);
800 ret = btt_rtt_init(arena);
804 ret = btt_maplocks_init(arena);
809 btt->init_state = INIT_READY;
812 mutex_unlock(&btt->init_lock);
816 static u32 btt_meta_size(struct btt *btt)
818 return btt->lbasize - btt->sector_size;
822 * This function calculates the arena in which the given LBA lies
823 * by doing a linear walk. This is acceptable since we expect only
824 * a few arenas. If we have backing devices that get much larger,
825 * we can construct a balanced binary tree of arenas at init time
826 * so that this range search becomes faster.
828 static int lba_to_arena(struct btt *btt, sector_t sector, __u32 *premap,
829 struct arena_info **arena)
831 struct arena_info *arena_list;
832 __u64 lba = div_u64(sector << SECTOR_SHIFT, btt->sector_size);
834 list_for_each_entry(arena_list, &btt->arena_list, list) {
835 if (lba < arena_list->external_nlba) {
840 lba -= arena_list->external_nlba;
847 * The following (lock_map, unlock_map) are mostly just to improve
848 * readability, since they index into an array of locks
850 static void lock_map(struct arena_info *arena, u32 premap)
851 __acquires(&arena->map_locks[idx].lock)
853 u32 idx = (premap * MAP_ENT_SIZE / L1_CACHE_BYTES) % arena->nfree;
855 spin_lock(&arena->map_locks[idx].lock);
858 static void unlock_map(struct arena_info *arena, u32 premap)
859 __releases(&arena->map_locks[idx].lock)
861 u32 idx = (premap * MAP_ENT_SIZE / L1_CACHE_BYTES) % arena->nfree;
863 spin_unlock(&arena->map_locks[idx].lock);
866 static u64 to_namespace_offset(struct arena_info *arena, u64 lba)
868 return arena->dataoff + ((u64)lba * arena->internal_lbasize);
871 static int btt_data_read(struct arena_info *arena, struct page *page,
872 unsigned int off, u32 lba, u32 len)
875 u64 nsoff = to_namespace_offset(arena, lba);
876 void *mem = kmap_atomic(page);
878 ret = arena_read_bytes(arena, nsoff, mem + off, len);
884 static int btt_data_write(struct arena_info *arena, u32 lba,
885 struct page *page, unsigned int off, u32 len)
888 u64 nsoff = to_namespace_offset(arena, lba);
889 void *mem = kmap_atomic(page);
891 ret = arena_write_bytes(arena, nsoff, mem + off, len);
897 static void zero_fill_data(struct page *page, unsigned int off, u32 len)
899 void *mem = kmap_atomic(page);
901 memset(mem + off, 0, len);
905 #ifdef CONFIG_BLK_DEV_INTEGRITY
906 static int btt_rw_integrity(struct btt *btt, struct bio_integrity_payload *bip,
907 struct arena_info *arena, u32 postmap, int rw)
909 unsigned int len = btt_meta_size(btt);
916 meta_nsoff = to_namespace_offset(arena, postmap) + btt->sector_size;
919 unsigned int cur_len;
923 bv = bvec_iter_bvec(bip->bip_vec, bip->bip_iter);
925 * The 'bv' obtained from bvec_iter_bvec has its .bv_len and
926 * .bv_offset already adjusted for iter->bi_bvec_done, and we
927 * can use those directly
930 cur_len = min(len, bv.bv_len);
931 mem = kmap_atomic(bv.bv_page);
933 ret = arena_write_bytes(arena, meta_nsoff,
934 mem + bv.bv_offset, cur_len);
936 ret = arena_read_bytes(arena, meta_nsoff,
937 mem + bv.bv_offset, cur_len);
944 meta_nsoff += cur_len;
945 bvec_iter_advance(bip->bip_vec, &bip->bip_iter, cur_len);
951 #else /* CONFIG_BLK_DEV_INTEGRITY */
952 static int btt_rw_integrity(struct btt *btt, struct bio_integrity_payload *bip,
953 struct arena_info *arena, u32 postmap, int rw)
959 static int btt_read_pg(struct btt *btt, struct bio_integrity_payload *bip,
960 struct page *page, unsigned int off, sector_t sector,
965 struct arena_info *arena = NULL;
966 u32 lane = 0, premap, postmap;
971 lane = nd_region_acquire_lane(btt->nd_region);
973 ret = lba_to_arena(btt, sector, &premap, &arena);
977 cur_len = min(btt->sector_size, len);
979 ret = btt_map_read(arena, premap, &postmap, &t_flag, &e_flag);
984 * We loop to make sure that the post map LBA didn't change
985 * from under us between writing the RTT and doing the actual
992 zero_fill_data(page, off, cur_len);
1001 arena->rtt[lane] = RTT_VALID | postmap;
1003 * Barrier to make sure this write is not reordered
1004 * to do the verification map_read before the RTT store
1008 ret = btt_map_read(arena, premap, &new_map, &t_flag,
1013 if (postmap == new_map)
1019 ret = btt_data_read(arena, page, off, postmap, cur_len);
1024 ret = btt_rw_integrity(btt, bip, arena, postmap, READ);
1029 arena->rtt[lane] = RTT_INVALID;
1030 nd_region_release_lane(btt->nd_region, lane);
1034 sector += btt->sector_size >> SECTOR_SHIFT;
1040 arena->rtt[lane] = RTT_INVALID;
1042 nd_region_release_lane(btt->nd_region, lane);
1046 static int btt_write_pg(struct btt *btt, struct bio_integrity_payload *bip,
1047 sector_t sector, struct page *page, unsigned int off,
1051 struct arena_info *arena = NULL;
1052 u32 premap = 0, old_postmap, new_postmap, lane = 0, i;
1053 struct log_entry log;
1059 lane = nd_region_acquire_lane(btt->nd_region);
1061 ret = lba_to_arena(btt, sector, &premap, &arena);
1064 cur_len = min(btt->sector_size, len);
1066 if ((arena->flags & IB_FLAG_ERROR_MASK) != 0) {
1071 new_postmap = arena->freelist[lane].block;
1073 /* Wait if the new block is being read from */
1074 for (i = 0; i < arena->nfree; i++)
1075 while (arena->rtt[i] == (RTT_VALID | new_postmap))
1079 if (new_postmap >= arena->internal_nlba) {
1084 ret = btt_data_write(arena, new_postmap, page, off, cur_len);
1089 ret = btt_rw_integrity(btt, bip, arena, new_postmap,
1095 lock_map(arena, premap);
1096 ret = btt_map_read(arena, premap, &old_postmap, NULL, NULL);
1099 if (old_postmap >= arena->internal_nlba) {
1104 log.lba = cpu_to_le32(premap);
1105 log.old_map = cpu_to_le32(old_postmap);
1106 log.new_map = cpu_to_le32(new_postmap);
1107 log.seq = cpu_to_le32(arena->freelist[lane].seq);
1108 sub = arena->freelist[lane].sub;
1109 ret = btt_flog_write(arena, lane, sub, &log);
1113 ret = btt_map_write(arena, premap, new_postmap, 0, 0);
1117 unlock_map(arena, premap);
1118 nd_region_release_lane(btt->nd_region, lane);
1122 sector += btt->sector_size >> SECTOR_SHIFT;
1128 unlock_map(arena, premap);
1130 nd_region_release_lane(btt->nd_region, lane);
1134 static int btt_do_bvec(struct btt *btt, struct bio_integrity_payload *bip,
1135 struct page *page, unsigned int len, unsigned int off,
1136 int rw, sector_t sector)
1141 ret = btt_read_pg(btt, bip, page, off, sector, len);
1142 flush_dcache_page(page);
1144 flush_dcache_page(page);
1145 ret = btt_write_pg(btt, bip, sector, page, off, len);
1151 static blk_qc_t btt_make_request(struct request_queue *q, struct bio *bio)
1153 struct bio_integrity_payload *bip = bio_integrity(bio);
1154 struct btt *btt = q->queuedata;
1155 struct bvec_iter iter;
1156 unsigned long start;
1157 struct bio_vec bvec;
1162 * bio_integrity_enabled also checks if the bio already has an
1163 * integrity payload attached. If it does, we *don't* do a
1164 * bio_integrity_prep here - the payload has been generated by
1165 * another kernel subsystem, and we just pass it through.
1167 if (bio_integrity_enabled(bio) && bio_integrity_prep(bio)) {
1168 bio->bi_error = -EIO;
1172 do_acct = nd_iostat_start(bio, &start);
1173 rw = bio_data_dir(bio);
1174 bio_for_each_segment(bvec, bio, iter) {
1175 unsigned int len = bvec.bv_len;
1177 BUG_ON(len > PAGE_SIZE);
1178 /* Make sure len is in multiples of sector size. */
1179 /* XXX is this right? */
1180 BUG_ON(len < btt->sector_size);
1181 BUG_ON(len % btt->sector_size);
1183 err = btt_do_bvec(btt, bip, bvec.bv_page, len, bvec.bv_offset,
1184 rw, iter.bi_sector);
1186 dev_info(&btt->nd_btt->dev,
1187 "io error in %s sector %lld, len %d,\n",
1188 (rw == READ) ? "READ" : "WRITE",
1189 (unsigned long long) iter.bi_sector, len);
1190 bio->bi_error = err;
1195 nd_iostat_end(bio, start);
1199 return BLK_QC_T_NONE;
1202 static int btt_rw_page(struct block_device *bdev, sector_t sector,
1203 struct page *page, int rw)
1205 struct btt *btt = bdev->bd_disk->private_data;
1207 btt_do_bvec(btt, NULL, page, PAGE_SIZE, 0, rw, sector);
1208 page_endio(page, rw & WRITE, 0);
1213 static int btt_getgeo(struct block_device *bd, struct hd_geometry *geo)
1215 /* some standard values */
1216 geo->heads = 1 << 6;
1217 geo->sectors = 1 << 5;
1218 geo->cylinders = get_capacity(bd->bd_disk) >> 11;
1222 static const struct block_device_operations btt_fops = {
1223 .owner = THIS_MODULE,
1224 .rw_page = btt_rw_page,
1225 .getgeo = btt_getgeo,
1226 .revalidate_disk = nvdimm_revalidate_disk,
1229 static int btt_blk_init(struct btt *btt)
1231 struct nd_btt *nd_btt = btt->nd_btt;
1232 struct nd_namespace_common *ndns = nd_btt->ndns;
1234 /* create a new disk and request queue for btt */
1235 btt->btt_queue = blk_alloc_queue(GFP_KERNEL);
1236 if (!btt->btt_queue)
1239 btt->btt_disk = alloc_disk(0);
1240 if (!btt->btt_disk) {
1241 blk_cleanup_queue(btt->btt_queue);
1245 nvdimm_namespace_disk_name(ndns, btt->btt_disk->disk_name);
1246 btt->btt_disk->driverfs_dev = &btt->nd_btt->dev;
1247 btt->btt_disk->first_minor = 0;
1248 btt->btt_disk->fops = &btt_fops;
1249 btt->btt_disk->private_data = btt;
1250 btt->btt_disk->queue = btt->btt_queue;
1251 btt->btt_disk->flags = GENHD_FL_EXT_DEVT;
1253 blk_queue_make_request(btt->btt_queue, btt_make_request);
1254 blk_queue_logical_block_size(btt->btt_queue, btt->sector_size);
1255 blk_queue_max_hw_sectors(btt->btt_queue, UINT_MAX);
1256 blk_queue_bounce_limit(btt->btt_queue, BLK_BOUNCE_ANY);
1257 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, btt->btt_queue);
1258 btt->btt_queue->queuedata = btt;
1260 set_capacity(btt->btt_disk, 0);
1261 add_disk(btt->btt_disk);
1262 if (btt_meta_size(btt)) {
1263 int rc = nd_integrity_init(btt->btt_disk, btt_meta_size(btt));
1266 del_gendisk(btt->btt_disk);
1267 put_disk(btt->btt_disk);
1268 blk_cleanup_queue(btt->btt_queue);
1272 set_capacity(btt->btt_disk, btt->nlba * btt->sector_size >> 9);
1273 revalidate_disk(btt->btt_disk);
1278 static void btt_blk_cleanup(struct btt *btt)
1280 del_gendisk(btt->btt_disk);
1281 put_disk(btt->btt_disk);
1282 blk_cleanup_queue(btt->btt_queue);
1286 * btt_init - initialize a block translation table for the given device
1287 * @nd_btt: device with BTT geometry and backing device info
1288 * @rawsize: raw size in bytes of the backing device
1289 * @lbasize: lba size of the backing device
1290 * @uuid: A uuid for the backing device - this is stored on media
1291 * @maxlane: maximum number of parallel requests the device can handle
1293 * Initialize a Block Translation Table on a backing device to provide
1294 * single sector power fail atomicity.
1300 * Pointer to a new struct btt on success, NULL on failure.
1302 static struct btt *btt_init(struct nd_btt *nd_btt, unsigned long long rawsize,
1303 u32 lbasize, u8 *uuid, struct nd_region *nd_region)
1307 struct device *dev = &nd_btt->dev;
1309 btt = kzalloc(sizeof(struct btt), GFP_KERNEL);
1313 btt->nd_btt = nd_btt;
1314 btt->rawsize = rawsize;
1315 btt->lbasize = lbasize;
1316 btt->sector_size = ((lbasize >= 4096) ? 4096 : 512);
1317 INIT_LIST_HEAD(&btt->arena_list);
1318 mutex_init(&btt->init_lock);
1319 btt->nd_region = nd_region;
1321 ret = discover_arenas(btt);
1323 dev_err(dev, "init: error in arena_discover: %d\n", ret);
1327 if (btt->init_state != INIT_READY && nd_region->ro) {
1328 dev_info(dev, "%s is read-only, unable to init btt metadata\n",
1329 dev_name(&nd_region->dev));
1331 } else if (btt->init_state != INIT_READY) {
1332 btt->num_arenas = (rawsize / ARENA_MAX_SIZE) +
1333 ((rawsize % ARENA_MAX_SIZE) ? 1 : 0);
1334 dev_dbg(dev, "init: %d arenas for %llu rawsize\n",
1335 btt->num_arenas, rawsize);
1337 ret = create_arenas(btt);
1339 dev_info(dev, "init: create_arenas: %d\n", ret);
1343 ret = btt_meta_init(btt);
1345 dev_err(dev, "init: error in meta_init: %d\n", ret);
1350 ret = btt_blk_init(btt);
1352 dev_err(dev, "init: error in blk_init: %d\n", ret);
1356 btt_debugfs_init(btt);
1366 * btt_fini - de-initialize a BTT
1367 * @btt: the BTT handle that was generated by btt_init
1369 * De-initialize a Block Translation Table on device removal
1374 static void btt_fini(struct btt *btt)
1377 btt_blk_cleanup(btt);
1379 debugfs_remove_recursive(btt->debugfs_dir);
1384 int nvdimm_namespace_attach_btt(struct nd_namespace_common *ndns)
1386 struct nd_btt *nd_btt = to_nd_btt(ndns->claim);
1387 struct nd_region *nd_region;
1391 if (!nd_btt->uuid || !nd_btt->ndns || !nd_btt->lbasize)
1394 rawsize = nvdimm_namespace_capacity(ndns) - SZ_4K;
1395 if (rawsize < ARENA_MIN_SIZE) {
1398 nd_region = to_nd_region(nd_btt->dev.parent);
1399 btt = btt_init(nd_btt, rawsize, nd_btt->lbasize, nd_btt->uuid,
1407 EXPORT_SYMBOL(nvdimm_namespace_attach_btt);
1409 int nvdimm_namespace_detach_btt(struct nd_namespace_common *ndns)
1411 struct nd_btt *nd_btt = to_nd_btt(ndns->claim);
1412 struct btt *btt = nd_btt->btt;
1419 EXPORT_SYMBOL(nvdimm_namespace_detach_btt);
1421 static int __init nd_btt_init(void)
1425 debugfs_root = debugfs_create_dir("btt", NULL);
1426 if (IS_ERR_OR_NULL(debugfs_root))
1432 static void __exit nd_btt_exit(void)
1434 debugfs_remove_recursive(debugfs_root);
1437 MODULE_ALIAS_ND_DEVICE(ND_DEVICE_BTT);
1438 MODULE_AUTHOR("Vishal Verma <vishal.l.verma@linux.intel.com>");
1439 MODULE_LICENSE("GPL v2");
1440 module_init(nd_btt_init);
1441 module_exit(nd_btt_exit);