2 * Copyright (C) 2015 IT University of Copenhagen
3 * Initial release: Matias Bjorling <m@bjorling.me>
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version
7 * 2 as published by the Free Software Foundation.
9 * This program is distributed in the hope that it will be useful, but
10 * WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * General Public License for more details.
14 * Implementation of a Round-robin page-based Hybrid FTL for Open-channel SSDs.
19 static struct kmem_cache *rrpc_gcb_cache, *rrpc_rq_cache;
20 static DECLARE_RWSEM(rrpc_lock);
22 static int rrpc_submit_io(struct rrpc *rrpc, struct bio *bio,
23 struct nvm_rq *rqd, unsigned long flags);
25 #define rrpc_for_each_lun(rrpc, rlun, i) \
26 for ((i) = 0, rlun = &(rrpc)->luns[0]; \
27 (i) < (rrpc)->nr_luns; (i)++, rlun = &(rrpc)->luns[(i)])
29 static void rrpc_page_invalidate(struct rrpc *rrpc, struct rrpc_addr *a)
31 struct nvm_tgt_dev *dev = rrpc->dev;
32 struct rrpc_block *rblk = a->rblk;
33 unsigned int pg_offset;
35 lockdep_assert_held(&rrpc->rev_lock);
37 if (a->addr == ADDR_EMPTY || !rblk)
40 spin_lock(&rblk->lock);
42 div_u64_rem(a->addr, dev->geo.sec_per_blk, &pg_offset);
43 WARN_ON(test_and_set_bit(pg_offset, rblk->invalid_pages));
44 rblk->nr_invalid_pages++;
46 spin_unlock(&rblk->lock);
48 rrpc->rev_trans_map[a->addr - rrpc->poffset].addr = ADDR_EMPTY;
51 static void rrpc_invalidate_range(struct rrpc *rrpc, sector_t slba,
56 spin_lock(&rrpc->rev_lock);
57 for (i = slba; i < slba + len; i++) {
58 struct rrpc_addr *gp = &rrpc->trans_map[i];
60 rrpc_page_invalidate(rrpc, gp);
63 spin_unlock(&rrpc->rev_lock);
66 static struct nvm_rq *rrpc_inflight_laddr_acquire(struct rrpc *rrpc,
67 sector_t laddr, unsigned int pages)
70 struct rrpc_inflight_rq *inf;
72 rqd = mempool_alloc(rrpc->rq_pool, GFP_ATOMIC);
74 return ERR_PTR(-ENOMEM);
76 inf = rrpc_get_inflight_rq(rqd);
77 if (rrpc_lock_laddr(rrpc, laddr, pages, inf)) {
78 mempool_free(rqd, rrpc->rq_pool);
85 static void rrpc_inflight_laddr_release(struct rrpc *rrpc, struct nvm_rq *rqd)
87 struct rrpc_inflight_rq *inf = rrpc_get_inflight_rq(rqd);
89 rrpc_unlock_laddr(rrpc, inf);
91 mempool_free(rqd, rrpc->rq_pool);
94 static void rrpc_discard(struct rrpc *rrpc, struct bio *bio)
96 sector_t slba = bio->bi_iter.bi_sector / NR_PHY_IN_LOG;
97 sector_t len = bio->bi_iter.bi_size / RRPC_EXPOSED_PAGE_SIZE;
101 rqd = rrpc_inflight_laddr_acquire(rrpc, slba, len);
109 pr_err("rrpc: unable to acquire inflight IO\n");
114 rrpc_invalidate_range(rrpc, slba, len);
115 rrpc_inflight_laddr_release(rrpc, rqd);
118 static int block_is_full(struct rrpc *rrpc, struct rrpc_block *rblk)
120 struct nvm_tgt_dev *dev = rrpc->dev;
122 return (rblk->next_page == dev->geo.sec_per_blk);
125 /* Calculate relative addr for the given block, considering instantiated LUNs */
126 static u64 block_to_rel_addr(struct rrpc *rrpc, struct rrpc_block *rblk)
128 struct nvm_tgt_dev *dev = rrpc->dev;
129 struct nvm_block *blk = rblk->parent;
130 int lun_blk = blk->id % (dev->geo.blks_per_lun * rrpc->nr_luns);
132 return lun_blk * dev->geo.sec_per_blk;
135 /* Calculate global addr for the given block */
136 static u64 block_to_addr(struct rrpc *rrpc, struct rrpc_block *rblk)
138 struct nvm_tgt_dev *dev = rrpc->dev;
139 struct nvm_block *blk = rblk->parent;
141 return blk->id * dev->geo.sec_per_blk;
144 static struct ppa_addr rrpc_ppa_to_gaddr(struct nvm_tgt_dev *dev, u64 addr)
146 struct ppa_addr paddr;
149 return linear_to_generic_addr(&dev->geo, paddr);
152 /* requires lun->lock taken */
153 static void rrpc_set_lun_cur(struct rrpc_lun *rlun, struct rrpc_block *new_rblk,
154 struct rrpc_block **cur_rblk)
156 struct rrpc *rrpc = rlun->rrpc;
159 spin_lock(&(*cur_rblk)->lock);
160 WARN_ON(!block_is_full(rrpc, *cur_rblk));
161 spin_unlock(&(*cur_rblk)->lock);
163 *cur_rblk = new_rblk;
166 static struct nvm_block *__rrpc_get_blk(struct rrpc *rrpc,
167 struct rrpc_lun *rlun)
169 struct nvm_lun *lun = rlun->parent;
170 struct nvm_block *blk = NULL;
172 if (list_empty(&lun->free_list))
175 blk = list_first_entry(&lun->free_list, struct nvm_block, list);
177 list_move_tail(&blk->list, &lun->used_list);
178 blk->state = NVM_BLK_ST_TGT;
179 lun->nr_free_blocks--;
185 static struct rrpc_block *rrpc_get_blk(struct rrpc *rrpc, struct rrpc_lun *rlun,
188 struct nvm_tgt_dev *dev = rrpc->dev;
189 struct nvm_lun *lun = rlun->parent;
190 struct nvm_block *blk;
191 struct rrpc_block *rblk;
192 int is_gc = flags & NVM_IOTYPE_GC;
194 spin_lock(&rlun->lock);
195 if (!is_gc && lun->nr_free_blocks < rlun->reserved_blocks) {
196 pr_err("nvm: rrpc: cannot give block to non GC request\n");
197 spin_unlock(&rlun->lock);
201 blk = __rrpc_get_blk(rrpc, rlun);
203 pr_err("nvm: rrpc: cannot get new block\n");
204 spin_unlock(&rlun->lock);
207 spin_unlock(&rlun->lock);
209 rblk = rrpc_get_rblk(rlun, blk->id);
211 bitmap_zero(rblk->invalid_pages, dev->geo.sec_per_blk);
213 rblk->nr_invalid_pages = 0;
214 atomic_set(&rblk->data_cmnt_size, 0);
219 static void rrpc_put_blk(struct rrpc *rrpc, struct rrpc_block *rblk)
221 struct nvm_block *blk = rblk->parent;
222 struct rrpc_lun *rlun = rblk->rlun;
223 struct nvm_lun *lun = rlun->parent;
225 spin_lock(&rlun->lock);
226 if (blk->state & NVM_BLK_ST_TGT) {
227 list_move_tail(&blk->list, &lun->free_list);
228 lun->nr_free_blocks++;
229 blk->state = NVM_BLK_ST_FREE;
230 } else if (blk->state & NVM_BLK_ST_BAD) {
231 list_move_tail(&blk->list, &lun->bb_list);
232 blk->state = NVM_BLK_ST_BAD;
235 pr_err("rrpc: erroneous block type (%lu -> %u)\n",
236 blk->id, blk->state);
237 list_move_tail(&blk->list, &lun->bb_list);
239 spin_unlock(&rlun->lock);
242 static void rrpc_put_blks(struct rrpc *rrpc)
244 struct rrpc_lun *rlun;
247 for (i = 0; i < rrpc->nr_luns; i++) {
248 rlun = &rrpc->luns[i];
250 rrpc_put_blk(rrpc, rlun->cur);
252 rrpc_put_blk(rrpc, rlun->gc_cur);
256 static struct rrpc_lun *get_next_lun(struct rrpc *rrpc)
258 int next = atomic_inc_return(&rrpc->next_lun);
260 return &rrpc->luns[next % rrpc->nr_luns];
263 static void rrpc_gc_kick(struct rrpc *rrpc)
265 struct rrpc_lun *rlun;
268 for (i = 0; i < rrpc->nr_luns; i++) {
269 rlun = &rrpc->luns[i];
270 queue_work(rrpc->krqd_wq, &rlun->ws_gc);
275 * timed GC every interval.
277 static void rrpc_gc_timer(unsigned long data)
279 struct rrpc *rrpc = (struct rrpc *)data;
282 mod_timer(&rrpc->gc_timer, jiffies + msecs_to_jiffies(10));
285 static void rrpc_end_sync_bio(struct bio *bio)
287 struct completion *waiting = bio->bi_private;
290 pr_err("nvm: gc request failed (%u).\n", bio->bi_error);
296 * rrpc_move_valid_pages -- migrate live data off the block
297 * @rrpc: the 'rrpc' structure
298 * @block: the block from which to migrate live pages
301 * GC algorithms may call this function to migrate remaining live
302 * pages off the block prior to erasing it. This function blocks
303 * further execution until the operation is complete.
305 static int rrpc_move_valid_pages(struct rrpc *rrpc, struct rrpc_block *rblk)
307 struct nvm_tgt_dev *dev = rrpc->dev;
308 struct request_queue *q = dev->q;
309 struct rrpc_rev_addr *rev;
314 int nr_sec_per_blk = dev->geo.sec_per_blk;
316 DECLARE_COMPLETION_ONSTACK(wait);
318 if (bitmap_full(rblk->invalid_pages, nr_sec_per_blk))
321 bio = bio_alloc(GFP_NOIO, 1);
323 pr_err("nvm: could not alloc bio to gc\n");
327 page = mempool_alloc(rrpc->page_pool, GFP_NOIO);
333 while ((slot = find_first_zero_bit(rblk->invalid_pages,
334 nr_sec_per_blk)) < nr_sec_per_blk) {
337 phys_addr = rblk->parent->id * nr_sec_per_blk + slot;
340 spin_lock(&rrpc->rev_lock);
341 /* Get logical address from physical to logical table */
342 rev = &rrpc->rev_trans_map[phys_addr - rrpc->poffset];
343 /* already updated by previous regular write */
344 if (rev->addr == ADDR_EMPTY) {
345 spin_unlock(&rrpc->rev_lock);
349 rqd = rrpc_inflight_laddr_acquire(rrpc, rev->addr, 1);
350 if (IS_ERR_OR_NULL(rqd)) {
351 spin_unlock(&rrpc->rev_lock);
356 spin_unlock(&rrpc->rev_lock);
358 /* Perform read to do GC */
359 bio->bi_iter.bi_sector = rrpc_get_sector(rev->addr);
360 bio_set_op_attrs(bio, REQ_OP_READ, 0);
361 bio->bi_private = &wait;
362 bio->bi_end_io = rrpc_end_sync_bio;
364 /* TODO: may fail when EXP_PG_SIZE > PAGE_SIZE */
365 bio_add_pc_page(q, bio, page, RRPC_EXPOSED_PAGE_SIZE, 0);
367 if (rrpc_submit_io(rrpc, bio, rqd, NVM_IOTYPE_GC)) {
368 pr_err("rrpc: gc read failed.\n");
369 rrpc_inflight_laddr_release(rrpc, rqd);
372 wait_for_completion_io(&wait);
374 rrpc_inflight_laddr_release(rrpc, rqd);
379 reinit_completion(&wait);
381 bio->bi_iter.bi_sector = rrpc_get_sector(rev->addr);
382 bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
383 bio->bi_private = &wait;
384 bio->bi_end_io = rrpc_end_sync_bio;
386 bio_add_pc_page(q, bio, page, RRPC_EXPOSED_PAGE_SIZE, 0);
388 /* turn the command around and write the data back to a new
391 if (rrpc_submit_io(rrpc, bio, rqd, NVM_IOTYPE_GC)) {
392 pr_err("rrpc: gc write failed.\n");
393 rrpc_inflight_laddr_release(rrpc, rqd);
396 wait_for_completion_io(&wait);
398 rrpc_inflight_laddr_release(rrpc, rqd);
406 mempool_free(page, rrpc->page_pool);
409 if (!bitmap_full(rblk->invalid_pages, nr_sec_per_blk)) {
410 pr_err("nvm: failed to garbage collect block\n");
417 static void rrpc_block_gc(struct work_struct *work)
419 struct rrpc_block_gc *gcb = container_of(work, struct rrpc_block_gc,
421 struct rrpc *rrpc = gcb->rrpc;
422 struct rrpc_block *rblk = gcb->rblk;
423 struct rrpc_lun *rlun = rblk->rlun;
424 struct nvm_tgt_dev *dev = rrpc->dev;
426 mempool_free(gcb, rrpc->gcb_pool);
427 pr_debug("nvm: block '%lu' being reclaimed\n", rblk->parent->id);
429 if (rrpc_move_valid_pages(rrpc, rblk))
432 if (nvm_erase_blk(dev->parent, rblk->parent, 0))
435 rrpc_put_blk(rrpc, rblk);
440 spin_lock(&rlun->lock);
441 list_add_tail(&rblk->prio, &rlun->prio_list);
442 spin_unlock(&rlun->lock);
445 /* the block with highest number of invalid pages, will be in the beginning
448 static struct rrpc_block *rblock_max_invalid(struct rrpc_block *ra,
449 struct rrpc_block *rb)
451 if (ra->nr_invalid_pages == rb->nr_invalid_pages)
454 return (ra->nr_invalid_pages < rb->nr_invalid_pages) ? rb : ra;
457 /* linearly find the block with highest number of invalid pages
460 static struct rrpc_block *block_prio_find_max(struct rrpc_lun *rlun)
462 struct list_head *prio_list = &rlun->prio_list;
463 struct rrpc_block *rblock, *max;
465 BUG_ON(list_empty(prio_list));
467 max = list_first_entry(prio_list, struct rrpc_block, prio);
468 list_for_each_entry(rblock, prio_list, prio)
469 max = rblock_max_invalid(max, rblock);
474 static void rrpc_lun_gc(struct work_struct *work)
476 struct rrpc_lun *rlun = container_of(work, struct rrpc_lun, ws_gc);
477 struct rrpc *rrpc = rlun->rrpc;
478 struct nvm_tgt_dev *dev = rrpc->dev;
479 struct nvm_lun *lun = rlun->parent;
480 struct rrpc_block_gc *gcb;
481 unsigned int nr_blocks_need;
483 nr_blocks_need = dev->geo.blks_per_lun / GC_LIMIT_INVERSE;
485 if (nr_blocks_need < rrpc->nr_luns)
486 nr_blocks_need = rrpc->nr_luns;
488 spin_lock(&rlun->lock);
489 while (nr_blocks_need > lun->nr_free_blocks &&
490 !list_empty(&rlun->prio_list)) {
491 struct rrpc_block *rblock = block_prio_find_max(rlun);
492 struct nvm_block *block = rblock->parent;
494 if (!rblock->nr_invalid_pages)
497 gcb = mempool_alloc(rrpc->gcb_pool, GFP_ATOMIC);
501 list_del_init(&rblock->prio);
503 BUG_ON(!block_is_full(rrpc, rblock));
505 pr_debug("rrpc: selected block '%lu' for GC\n", block->id);
509 INIT_WORK(&gcb->ws_gc, rrpc_block_gc);
511 queue_work(rrpc->kgc_wq, &gcb->ws_gc);
515 spin_unlock(&rlun->lock);
517 /* TODO: Hint that request queue can be started again */
520 static void rrpc_gc_queue(struct work_struct *work)
522 struct rrpc_block_gc *gcb = container_of(work, struct rrpc_block_gc,
524 struct rrpc *rrpc = gcb->rrpc;
525 struct rrpc_block *rblk = gcb->rblk;
526 struct rrpc_lun *rlun = rblk->rlun;
528 spin_lock(&rlun->lock);
529 list_add_tail(&rblk->prio, &rlun->prio_list);
530 spin_unlock(&rlun->lock);
532 mempool_free(gcb, rrpc->gcb_pool);
533 pr_debug("nvm: block '%lu' is full, allow GC (sched)\n",
537 static const struct block_device_operations rrpc_fops = {
538 .owner = THIS_MODULE,
541 static struct rrpc_lun *rrpc_get_lun_rr(struct rrpc *rrpc, int is_gc)
544 struct rrpc_lun *rlun, *max_free;
547 return get_next_lun(rrpc);
549 /* during GC, we don't care about RR, instead we want to make
550 * sure that we maintain evenness between the block luns.
552 max_free = &rrpc->luns[0];
553 /* prevent GC-ing lun from devouring pages of a lun with
554 * little free blocks. We don't take the lock as we only need an
557 rrpc_for_each_lun(rrpc, rlun, i) {
558 if (rlun->parent->nr_free_blocks >
559 max_free->parent->nr_free_blocks)
566 static struct rrpc_addr *rrpc_update_map(struct rrpc *rrpc, sector_t laddr,
567 struct rrpc_block *rblk, u64 paddr)
569 struct rrpc_addr *gp;
570 struct rrpc_rev_addr *rev;
572 BUG_ON(laddr >= rrpc->nr_sects);
574 gp = &rrpc->trans_map[laddr];
575 spin_lock(&rrpc->rev_lock);
577 rrpc_page_invalidate(rrpc, gp);
582 rev = &rrpc->rev_trans_map[gp->addr - rrpc->poffset];
584 spin_unlock(&rrpc->rev_lock);
589 static u64 rrpc_alloc_addr(struct rrpc *rrpc, struct rrpc_block *rblk)
591 u64 addr = ADDR_EMPTY;
593 spin_lock(&rblk->lock);
594 if (block_is_full(rrpc, rblk))
597 addr = block_to_addr(rrpc, rblk) + rblk->next_page;
601 spin_unlock(&rblk->lock);
605 /* Map logical address to a physical page. The mapping implements a round robin
606 * approach and allocates a page from the next lun available.
608 * Returns rrpc_addr with the physical address and block. Returns NULL if no
609 * blocks in the next rlun are available.
611 static struct rrpc_addr *rrpc_map_page(struct rrpc *rrpc, sector_t laddr,
614 struct rrpc_lun *rlun;
615 struct rrpc_block *rblk, **cur_rblk;
620 rlun = rrpc_get_lun_rr(rrpc, is_gc);
623 if (!is_gc && lun->nr_free_blocks < rrpc->nr_luns * 4)
627 * page allocation steps:
628 * 1. Try to allocate new page from current rblk
629 * 2a. If succeed, proceed to map it in and return
630 * 2b. If fail, first try to allocate a new block from media manger,
631 * and then retry step 1. Retry until the normal block pool is
633 * 3. If exhausted, and garbage collector is requesting the block,
634 * go to the reserved block and retry step 1.
635 * In the case that this fails as well, or it is not GC
636 * requesting, report not able to retrieve a block and let the
637 * caller handle further processing.
640 spin_lock(&rlun->lock);
641 cur_rblk = &rlun->cur;
644 paddr = rrpc_alloc_addr(rrpc, rblk);
646 if (paddr != ADDR_EMPTY)
649 if (!list_empty(&rlun->wblk_list)) {
651 rblk = list_first_entry(&rlun->wblk_list, struct rrpc_block,
653 rrpc_set_lun_cur(rlun, rblk, cur_rblk);
654 list_del(&rblk->prio);
657 spin_unlock(&rlun->lock);
659 rblk = rrpc_get_blk(rrpc, rlun, gc_force);
661 spin_lock(&rlun->lock);
662 list_add_tail(&rblk->prio, &rlun->wblk_list);
664 * another thread might already have added a new block,
665 * Therefore, make sure that one is used, instead of the
671 if (unlikely(is_gc) && !gc_force) {
672 /* retry from emergency gc block */
673 cur_rblk = &rlun->gc_cur;
676 spin_lock(&rlun->lock);
680 pr_err("rrpc: failed to allocate new block\n");
683 spin_unlock(&rlun->lock);
684 return rrpc_update_map(rrpc, laddr, rblk, paddr);
687 static void rrpc_run_gc(struct rrpc *rrpc, struct rrpc_block *rblk)
689 struct rrpc_block_gc *gcb;
691 gcb = mempool_alloc(rrpc->gcb_pool, GFP_ATOMIC);
693 pr_err("rrpc: unable to queue block for gc.");
700 INIT_WORK(&gcb->ws_gc, rrpc_gc_queue);
701 queue_work(rrpc->kgc_wq, &gcb->ws_gc);
704 static void __rrpc_mark_bad_block(struct nvm_tgt_dev *dev, struct ppa_addr *ppa)
706 nvm_mark_blk(dev->parent, *ppa, NVM_BLK_ST_BAD);
707 nvm_set_bb_tbl(dev->parent, ppa, 1, NVM_BLK_T_GRWN_BAD);
710 static void rrpc_mark_bad_block(struct rrpc *rrpc, struct nvm_rq *rqd)
712 struct nvm_tgt_dev *dev = rrpc->dev;
713 void *comp_bits = &rqd->ppa_status;
714 struct ppa_addr ppa, prev_ppa;
715 int nr_ppas = rqd->nr_ppas;
718 if (rqd->nr_ppas == 1)
719 __rrpc_mark_bad_block(dev, &rqd->ppa_addr);
721 ppa_set_empty(&prev_ppa);
723 while ((bit = find_next_bit(comp_bits, nr_ppas, bit + 1)) < nr_ppas) {
724 ppa = rqd->ppa_list[bit];
725 if (ppa_cmp_blk(ppa, prev_ppa))
728 __rrpc_mark_bad_block(dev, &ppa);
732 static void rrpc_end_io_write(struct rrpc *rrpc, struct rrpc_rq *rrqd,
733 sector_t laddr, uint8_t npages)
735 struct nvm_tgt_dev *dev = rrpc->dev;
737 struct rrpc_block *rblk;
741 for (i = 0; i < npages; i++) {
742 p = &rrpc->trans_map[laddr + i];
744 lun = rblk->parent->lun;
746 cmnt_size = atomic_inc_return(&rblk->data_cmnt_size);
747 if (unlikely(cmnt_size == dev->geo.sec_per_blk))
748 rrpc_run_gc(rrpc, rblk);
752 static void rrpc_end_io(struct nvm_rq *rqd)
754 struct rrpc *rrpc = container_of(rqd->ins, struct rrpc, instance);
755 struct nvm_tgt_dev *dev = rrpc->dev;
756 struct rrpc_rq *rrqd = nvm_rq_to_pdu(rqd);
757 uint8_t npages = rqd->nr_ppas;
758 sector_t laddr = rrpc_get_laddr(rqd->bio) - npages;
760 if (bio_data_dir(rqd->bio) == WRITE) {
761 if (rqd->error == NVM_RSP_ERR_FAILWRITE)
762 rrpc_mark_bad_block(rrpc, rqd);
764 rrpc_end_io_write(rrpc, rrqd, laddr, npages);
769 if (rrqd->flags & NVM_IOTYPE_GC)
772 rrpc_unlock_rq(rrpc, rqd);
775 nvm_dev_dma_free(dev->parent, rqd->ppa_list, rqd->dma_ppa_list);
777 mempool_free(rqd, rrpc->rq_pool);
780 static int rrpc_read_ppalist_rq(struct rrpc *rrpc, struct bio *bio,
781 struct nvm_rq *rqd, unsigned long flags, int npages)
783 struct nvm_tgt_dev *dev = rrpc->dev;
784 struct rrpc_inflight_rq *r = rrpc_get_inflight_rq(rqd);
785 struct rrpc_addr *gp;
786 sector_t laddr = rrpc_get_laddr(bio);
787 int is_gc = flags & NVM_IOTYPE_GC;
790 if (!is_gc && rrpc_lock_rq(rrpc, bio, rqd)) {
791 nvm_dev_dma_free(dev->parent, rqd->ppa_list, rqd->dma_ppa_list);
792 return NVM_IO_REQUEUE;
795 for (i = 0; i < npages; i++) {
796 /* We assume that mapping occurs at 4KB granularity */
797 BUG_ON(!(laddr + i >= 0 && laddr + i < rrpc->nr_sects));
798 gp = &rrpc->trans_map[laddr + i];
801 rqd->ppa_list[i] = rrpc_ppa_to_gaddr(dev, gp->addr);
804 rrpc_unlock_laddr(rrpc, r);
805 nvm_dev_dma_free(dev->parent, rqd->ppa_list,
811 rqd->opcode = NVM_OP_HBREAD;
816 static int rrpc_read_rq(struct rrpc *rrpc, struct bio *bio, struct nvm_rq *rqd,
819 int is_gc = flags & NVM_IOTYPE_GC;
820 sector_t laddr = rrpc_get_laddr(bio);
821 struct rrpc_addr *gp;
823 if (!is_gc && rrpc_lock_rq(rrpc, bio, rqd))
824 return NVM_IO_REQUEUE;
826 BUG_ON(!(laddr >= 0 && laddr < rrpc->nr_sects));
827 gp = &rrpc->trans_map[laddr];
830 rqd->ppa_addr = rrpc_ppa_to_gaddr(rrpc->dev, gp->addr);
833 rrpc_unlock_rq(rrpc, rqd);
837 rqd->opcode = NVM_OP_HBREAD;
842 static int rrpc_write_ppalist_rq(struct rrpc *rrpc, struct bio *bio,
843 struct nvm_rq *rqd, unsigned long flags, int npages)
845 struct nvm_tgt_dev *dev = rrpc->dev;
846 struct rrpc_inflight_rq *r = rrpc_get_inflight_rq(rqd);
848 sector_t laddr = rrpc_get_laddr(bio);
849 int is_gc = flags & NVM_IOTYPE_GC;
852 if (!is_gc && rrpc_lock_rq(rrpc, bio, rqd)) {
853 nvm_dev_dma_free(dev->parent, rqd->ppa_list, rqd->dma_ppa_list);
854 return NVM_IO_REQUEUE;
857 for (i = 0; i < npages; i++) {
858 /* We assume that mapping occurs at 4KB granularity */
859 p = rrpc_map_page(rrpc, laddr + i, is_gc);
862 rrpc_unlock_laddr(rrpc, r);
863 nvm_dev_dma_free(dev->parent, rqd->ppa_list,
866 return NVM_IO_REQUEUE;
869 rqd->ppa_list[i] = rrpc_ppa_to_gaddr(dev, p->addr);
872 rqd->opcode = NVM_OP_HBWRITE;
877 static int rrpc_write_rq(struct rrpc *rrpc, struct bio *bio,
878 struct nvm_rq *rqd, unsigned long flags)
881 int is_gc = flags & NVM_IOTYPE_GC;
882 sector_t laddr = rrpc_get_laddr(bio);
884 if (!is_gc && rrpc_lock_rq(rrpc, bio, rqd))
885 return NVM_IO_REQUEUE;
887 p = rrpc_map_page(rrpc, laddr, is_gc);
890 rrpc_unlock_rq(rrpc, rqd);
892 return NVM_IO_REQUEUE;
895 rqd->ppa_addr = rrpc_ppa_to_gaddr(rrpc->dev, p->addr);
896 rqd->opcode = NVM_OP_HBWRITE;
901 static int rrpc_setup_rq(struct rrpc *rrpc, struct bio *bio,
902 struct nvm_rq *rqd, unsigned long flags, uint8_t npages)
904 struct nvm_tgt_dev *dev = rrpc->dev;
907 rqd->ppa_list = nvm_dev_dma_alloc(dev->parent, GFP_KERNEL,
909 if (!rqd->ppa_list) {
910 pr_err("rrpc: not able to allocate ppa list\n");
914 if (bio_op(bio) == REQ_OP_WRITE)
915 return rrpc_write_ppalist_rq(rrpc, bio, rqd, flags,
918 return rrpc_read_ppalist_rq(rrpc, bio, rqd, flags, npages);
921 if (bio_op(bio) == REQ_OP_WRITE)
922 return rrpc_write_rq(rrpc, bio, rqd, flags);
924 return rrpc_read_rq(rrpc, bio, rqd, flags);
927 static int rrpc_submit_io(struct rrpc *rrpc, struct bio *bio,
928 struct nvm_rq *rqd, unsigned long flags)
930 struct nvm_tgt_dev *dev = rrpc->dev;
931 struct rrpc_rq *rrq = nvm_rq_to_pdu(rqd);
932 uint8_t nr_pages = rrpc_get_pages(bio);
933 int bio_size = bio_sectors(bio) << 9;
936 if (bio_size < dev->geo.sec_size)
938 else if (bio_size > dev->geo.max_rq_size)
941 err = rrpc_setup_rq(rrpc, bio, rqd, flags, nr_pages);
947 rqd->ins = &rrpc->instance;
948 rqd->nr_ppas = nr_pages;
951 err = nvm_submit_io(dev->parent, rqd);
953 pr_err("rrpc: I/O submission failed: %d\n", err);
955 if (!(flags & NVM_IOTYPE_GC)) {
956 rrpc_unlock_rq(rrpc, rqd);
957 if (rqd->nr_ppas > 1)
958 nvm_dev_dma_free(dev->parent,
959 rqd->ppa_list, rqd->dma_ppa_list);
967 static blk_qc_t rrpc_make_rq(struct request_queue *q, struct bio *bio)
969 struct rrpc *rrpc = q->queuedata;
973 blk_queue_split(q, &bio, q->bio_split);
975 if (bio_op(bio) == REQ_OP_DISCARD) {
976 rrpc_discard(rrpc, bio);
977 return BLK_QC_T_NONE;
980 rqd = mempool_alloc(rrpc->rq_pool, GFP_KERNEL);
982 pr_err_ratelimited("rrpc: not able to queue bio.");
984 return BLK_QC_T_NONE;
986 memset(rqd, 0, sizeof(struct nvm_rq));
988 err = rrpc_submit_io(rrpc, bio, rqd, NVM_IOTYPE_NONE);
991 return BLK_QC_T_NONE;
999 spin_lock(&rrpc->bio_lock);
1000 bio_list_add(&rrpc->requeue_bios, bio);
1001 spin_unlock(&rrpc->bio_lock);
1002 queue_work(rrpc->kgc_wq, &rrpc->ws_requeue);
1006 mempool_free(rqd, rrpc->rq_pool);
1007 return BLK_QC_T_NONE;
1010 static void rrpc_requeue(struct work_struct *work)
1012 struct rrpc *rrpc = container_of(work, struct rrpc, ws_requeue);
1013 struct bio_list bios;
1016 bio_list_init(&bios);
1018 spin_lock(&rrpc->bio_lock);
1019 bio_list_merge(&bios, &rrpc->requeue_bios);
1020 bio_list_init(&rrpc->requeue_bios);
1021 spin_unlock(&rrpc->bio_lock);
1023 while ((bio = bio_list_pop(&bios)))
1024 rrpc_make_rq(rrpc->disk->queue, bio);
1027 static void rrpc_gc_free(struct rrpc *rrpc)
1030 destroy_workqueue(rrpc->krqd_wq);
1033 destroy_workqueue(rrpc->kgc_wq);
1036 static int rrpc_gc_init(struct rrpc *rrpc)
1038 rrpc->krqd_wq = alloc_workqueue("rrpc-lun", WQ_MEM_RECLAIM|WQ_UNBOUND,
1043 rrpc->kgc_wq = alloc_workqueue("rrpc-bg", WQ_MEM_RECLAIM, 1);
1047 setup_timer(&rrpc->gc_timer, rrpc_gc_timer, (unsigned long)rrpc);
1052 static void rrpc_map_free(struct rrpc *rrpc)
1054 vfree(rrpc->rev_trans_map);
1055 vfree(rrpc->trans_map);
1058 static int rrpc_l2p_update(u64 slba, u32 nlb, __le64 *entries, void *private)
1060 struct rrpc *rrpc = (struct rrpc *)private;
1061 struct nvm_tgt_dev *dev = rrpc->dev;
1062 struct rrpc_addr *addr = rrpc->trans_map + slba;
1063 struct rrpc_rev_addr *raddr = rrpc->rev_trans_map;
1066 for (i = 0; i < nlb; i++) {
1067 u64 pba = le64_to_cpu(entries[i]);
1069 /* LNVM treats address-spaces as silos, LBA and PBA are
1070 * equally large and zero-indexed.
1072 if (unlikely(pba >= dev->total_secs && pba != U64_MAX)) {
1073 pr_err("nvm: L2P data entry is out of bounds!\n");
1077 /* Address zero is a special one. The first page on a disk is
1078 * protected. As it often holds internal device boot
1084 div_u64_rem(pba, rrpc->nr_sects, &mod);
1087 raddr[mod].addr = slba + i;
1093 static int rrpc_map_init(struct rrpc *rrpc)
1095 struct nvm_tgt_dev *dev = rrpc->dev;
1099 rrpc->trans_map = vzalloc(sizeof(struct rrpc_addr) * rrpc->nr_sects);
1100 if (!rrpc->trans_map)
1103 rrpc->rev_trans_map = vmalloc(sizeof(struct rrpc_rev_addr)
1105 if (!rrpc->rev_trans_map)
1108 for (i = 0; i < rrpc->nr_sects; i++) {
1109 struct rrpc_addr *p = &rrpc->trans_map[i];
1110 struct rrpc_rev_addr *r = &rrpc->rev_trans_map[i];
1112 p->addr = ADDR_EMPTY;
1113 r->addr = ADDR_EMPTY;
1116 if (!dev->ops->get_l2p_tbl)
1119 /* Bring up the mapping table from device */
1120 ret = dev->ops->get_l2p_tbl(dev->parent, rrpc->soffset, rrpc->nr_sects,
1121 rrpc_l2p_update, rrpc);
1123 pr_err("nvm: rrpc: could not read L2P table.\n");
1130 /* Minimum pages needed within a lun */
1131 #define PAGE_POOL_SIZE 16
1132 #define ADDR_POOL_SIZE 64
1134 static int rrpc_core_init(struct rrpc *rrpc)
1136 down_write(&rrpc_lock);
1137 if (!rrpc_gcb_cache) {
1138 rrpc_gcb_cache = kmem_cache_create("rrpc_gcb",
1139 sizeof(struct rrpc_block_gc), 0, 0, NULL);
1140 if (!rrpc_gcb_cache) {
1141 up_write(&rrpc_lock);
1145 rrpc_rq_cache = kmem_cache_create("rrpc_rq",
1146 sizeof(struct nvm_rq) + sizeof(struct rrpc_rq),
1148 if (!rrpc_rq_cache) {
1149 kmem_cache_destroy(rrpc_gcb_cache);
1150 up_write(&rrpc_lock);
1154 up_write(&rrpc_lock);
1156 rrpc->page_pool = mempool_create_page_pool(PAGE_POOL_SIZE, 0);
1157 if (!rrpc->page_pool)
1160 rrpc->gcb_pool = mempool_create_slab_pool(rrpc->dev->geo.nr_luns,
1162 if (!rrpc->gcb_pool)
1165 rrpc->rq_pool = mempool_create_slab_pool(64, rrpc_rq_cache);
1169 spin_lock_init(&rrpc->inflights.lock);
1170 INIT_LIST_HEAD(&rrpc->inflights.reqs);
1175 static void rrpc_core_free(struct rrpc *rrpc)
1177 mempool_destroy(rrpc->page_pool);
1178 mempool_destroy(rrpc->gcb_pool);
1179 mempool_destroy(rrpc->rq_pool);
1182 static void rrpc_luns_free(struct rrpc *rrpc)
1184 struct nvm_lun *lun;
1185 struct rrpc_lun *rlun;
1191 for (i = 0; i < rrpc->nr_luns; i++) {
1192 rlun = &rrpc->luns[i];
1196 vfree(rlun->blocks);
1202 static int rrpc_luns_init(struct rrpc *rrpc, int lun_begin, int lun_end)
1204 struct nvm_tgt_dev *dev = rrpc->dev;
1205 struct nvm_geo *geo = &dev->geo;
1206 struct rrpc_lun *rlun;
1207 int i, j, ret = -EINVAL;
1209 if (geo->sec_per_blk > MAX_INVALID_PAGES_STORAGE * BITS_PER_LONG) {
1210 pr_err("rrpc: number of pages per block too high.");
1214 spin_lock_init(&rrpc->rev_lock);
1216 rrpc->luns = kcalloc(rrpc->nr_luns, sizeof(struct rrpc_lun),
1222 for (i = 0; i < rrpc->nr_luns; i++) {
1223 int lunid = lun_begin + i;
1224 struct nvm_lun *lun;
1226 lun = dev->mt->get_lun(dev->parent, lunid);
1230 rlun = &rrpc->luns[i];
1232 rlun->blocks = vzalloc(sizeof(struct rrpc_block) *
1234 if (!rlun->blocks) {
1239 for (j = 0; j < geo->blks_per_lun; j++) {
1240 struct rrpc_block *rblk = &rlun->blocks[j];
1241 struct nvm_block *blk = &lun->blocks[j];
1245 INIT_LIST_HEAD(&rblk->prio);
1246 spin_lock_init(&rblk->lock);
1249 rlun->reserved_blocks = 2; /* for GC only */
1252 INIT_LIST_HEAD(&rlun->prio_list);
1253 INIT_LIST_HEAD(&rlun->wblk_list);
1255 INIT_WORK(&rlun->ws_gc, rrpc_lun_gc);
1256 spin_lock_init(&rlun->lock);
1264 /* returns 0 on success and stores the beginning address in *begin */
1265 static int rrpc_area_init(struct rrpc *rrpc, sector_t *begin)
1267 struct nvm_tgt_dev *dev = rrpc->dev;
1268 struct nvmm_type *mt = dev->mt;
1269 sector_t size = rrpc->nr_sects * dev->geo.sec_size;
1274 ret = mt->get_area(dev->parent, begin, size);
1276 *begin >>= (ilog2(dev->geo.sec_size) - 9);
1281 static void rrpc_area_free(struct rrpc *rrpc)
1283 struct nvm_tgt_dev *dev = rrpc->dev;
1284 struct nvmm_type *mt = dev->mt;
1285 sector_t begin = rrpc->soffset << (ilog2(dev->geo.sec_size) - 9);
1287 mt->put_area(dev->parent, begin);
1290 static void rrpc_free(struct rrpc *rrpc)
1293 rrpc_map_free(rrpc);
1294 rrpc_core_free(rrpc);
1295 rrpc_luns_free(rrpc);
1296 rrpc_area_free(rrpc);
1301 static void rrpc_exit(void *private)
1303 struct rrpc *rrpc = private;
1305 del_timer(&rrpc->gc_timer);
1307 flush_workqueue(rrpc->krqd_wq);
1308 flush_workqueue(rrpc->kgc_wq);
1313 static sector_t rrpc_capacity(void *private)
1315 struct rrpc *rrpc = private;
1316 struct nvm_tgt_dev *dev = rrpc->dev;
1317 sector_t reserved, provisioned;
1319 /* cur, gc, and two emergency blocks for each lun */
1320 reserved = rrpc->nr_luns * dev->geo.sec_per_blk * 4;
1321 provisioned = rrpc->nr_sects - reserved;
1323 if (reserved > rrpc->nr_sects) {
1324 pr_err("rrpc: not enough space available to expose storage.\n");
1328 sector_div(provisioned, 10);
1329 return provisioned * 9 * NR_PHY_IN_LOG;
1333 * Looks up the logical address from reverse trans map and check if its valid by
1334 * comparing the logical to physical address with the physical address.
1335 * Returns 0 on free, otherwise 1 if in use
1337 static void rrpc_block_map_update(struct rrpc *rrpc, struct rrpc_block *rblk)
1339 struct nvm_tgt_dev *dev = rrpc->dev;
1341 struct rrpc_addr *laddr;
1342 u64 bpaddr, paddr, pladdr;
1344 bpaddr = block_to_rel_addr(rrpc, rblk);
1345 for (offset = 0; offset < dev->geo.sec_per_blk; offset++) {
1346 paddr = bpaddr + offset;
1348 pladdr = rrpc->rev_trans_map[paddr].addr;
1349 if (pladdr == ADDR_EMPTY)
1352 laddr = &rrpc->trans_map[pladdr];
1354 if (paddr == laddr->addr) {
1357 set_bit(offset, rblk->invalid_pages);
1358 rblk->nr_invalid_pages++;
1363 static int rrpc_blocks_init(struct rrpc *rrpc)
1365 struct nvm_tgt_dev *dev = rrpc->dev;
1366 struct rrpc_lun *rlun;
1367 struct rrpc_block *rblk;
1368 int lun_iter, blk_iter;
1370 for (lun_iter = 0; lun_iter < rrpc->nr_luns; lun_iter++) {
1371 rlun = &rrpc->luns[lun_iter];
1373 for (blk_iter = 0; blk_iter < dev->geo.blks_per_lun;
1375 rblk = &rlun->blocks[blk_iter];
1376 rrpc_block_map_update(rrpc, rblk);
1383 static int rrpc_luns_configure(struct rrpc *rrpc)
1385 struct rrpc_lun *rlun;
1386 struct rrpc_block *rblk;
1389 for (i = 0; i < rrpc->nr_luns; i++) {
1390 rlun = &rrpc->luns[i];
1392 rblk = rrpc_get_blk(rrpc, rlun, 0);
1395 rrpc_set_lun_cur(rlun, rblk, &rlun->cur);
1397 /* Emergency gc block */
1398 rblk = rrpc_get_blk(rrpc, rlun, 1);
1401 rrpc_set_lun_cur(rlun, rblk, &rlun->gc_cur);
1406 rrpc_put_blks(rrpc);
1410 static struct nvm_tgt_type tt_rrpc;
1412 static void *rrpc_init(struct nvm_tgt_dev *dev, struct gendisk *tdisk,
1413 int lun_begin, int lun_end)
1415 struct request_queue *bqueue = dev->q;
1416 struct request_queue *tqueue = tdisk->queue;
1417 struct nvm_geo *geo = &dev->geo;
1422 if (!(dev->identity.dom & NVM_RSP_L2P)) {
1423 pr_err("nvm: rrpc: device does not support l2p (%x)\n",
1425 return ERR_PTR(-EINVAL);
1428 rrpc = kzalloc(sizeof(struct rrpc), GFP_KERNEL);
1430 return ERR_PTR(-ENOMEM);
1432 rrpc->instance.tt = &tt_rrpc;
1436 bio_list_init(&rrpc->requeue_bios);
1437 spin_lock_init(&rrpc->bio_lock);
1438 INIT_WORK(&rrpc->ws_requeue, rrpc_requeue);
1440 rrpc->nr_luns = geo->nr_luns;
1441 rrpc->nr_sects = (unsigned long long)geo->sec_per_lun * rrpc->nr_luns;
1443 /* simple round-robin strategy */
1444 atomic_set(&rrpc->next_lun, -1);
1446 ret = rrpc_area_init(rrpc, &soffset);
1448 pr_err("nvm: rrpc: could not initialize area\n");
1449 return ERR_PTR(ret);
1451 rrpc->soffset = soffset;
1453 ret = rrpc_luns_init(rrpc, lun_begin, lun_end);
1455 pr_err("nvm: rrpc: could not initialize luns\n");
1459 rrpc->poffset = geo->sec_per_lun * lun_begin;
1461 ret = rrpc_core_init(rrpc);
1463 pr_err("nvm: rrpc: could not initialize core\n");
1467 ret = rrpc_map_init(rrpc);
1469 pr_err("nvm: rrpc: could not initialize maps\n");
1473 ret = rrpc_blocks_init(rrpc);
1475 pr_err("nvm: rrpc: could not initialize state for blocks\n");
1479 ret = rrpc_luns_configure(rrpc);
1481 pr_err("nvm: rrpc: not enough blocks available in LUNs.\n");
1485 ret = rrpc_gc_init(rrpc);
1487 pr_err("nvm: rrpc: could not initialize gc\n");
1491 /* inherit the size from the underlying device */
1492 blk_queue_logical_block_size(tqueue, queue_physical_block_size(bqueue));
1493 blk_queue_max_hw_sectors(tqueue, queue_max_hw_sectors(bqueue));
1495 pr_info("nvm: rrpc initialized with %u luns and %llu pages.\n",
1496 rrpc->nr_luns, (unsigned long long)rrpc->nr_sects);
1498 mod_timer(&rrpc->gc_timer, jiffies + msecs_to_jiffies(10));
1503 return ERR_PTR(ret);
1506 /* round robin, page-based FTL, and cost-based GC */
1507 static struct nvm_tgt_type tt_rrpc = {
1509 .version = {1, 0, 0},
1511 .make_rq = rrpc_make_rq,
1512 .capacity = rrpc_capacity,
1513 .end_io = rrpc_end_io,
1519 static int __init rrpc_module_init(void)
1521 return nvm_register_tgt_type(&tt_rrpc);
1524 static void rrpc_module_exit(void)
1526 nvm_unregister_tgt_type(&tt_rrpc);
1529 module_init(rrpc_module_init);
1530 module_exit(rrpc_module_exit);
1531 MODULE_LICENSE("GPL v2");
1532 MODULE_DESCRIPTION("Block-Device Target for Open-Channel SSDs");