2 * Copyright (C) 2015 IT University of Copenhagen
3 * Initial release: Matias Bjorling <m@bjorling.me>
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version
7 * 2 as published by the Free Software Foundation.
9 * This program is distributed in the hope that it will be useful, but
10 * WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * General Public License for more details.
14 * Implementation of a Round-robin page-based Hybrid FTL for Open-channel SSDs.
19 static struct kmem_cache *rrpc_gcb_cache, *rrpc_rq_cache;
20 static DECLARE_RWSEM(rrpc_lock);
22 static int rrpc_submit_io(struct rrpc *rrpc, struct bio *bio,
23 struct nvm_rq *rqd, unsigned long flags);
25 #define rrpc_for_each_lun(rrpc, rlun, i) \
26 for ((i) = 0, rlun = &(rrpc)->luns[0]; \
27 (i) < (rrpc)->nr_luns; (i)++, rlun = &(rrpc)->luns[(i)])
29 static void rrpc_page_invalidate(struct rrpc *rrpc, struct rrpc_addr *a)
31 struct rrpc_block *rblk = a->rblk;
32 unsigned int pg_offset;
34 lockdep_assert_held(&rrpc->rev_lock);
36 if (a->addr == ADDR_EMPTY || !rblk)
39 spin_lock(&rblk->lock);
41 div_u64_rem(a->addr, rrpc->dev->sec_per_blk, &pg_offset);
42 WARN_ON(test_and_set_bit(pg_offset, rblk->invalid_pages));
43 rblk->nr_invalid_pages++;
45 spin_unlock(&rblk->lock);
47 rrpc->rev_trans_map[a->addr - rrpc->poffset].addr = ADDR_EMPTY;
50 static void rrpc_invalidate_range(struct rrpc *rrpc, sector_t slba,
55 spin_lock(&rrpc->rev_lock);
56 for (i = slba; i < slba + len; i++) {
57 struct rrpc_addr *gp = &rrpc->trans_map[i];
59 rrpc_page_invalidate(rrpc, gp);
62 spin_unlock(&rrpc->rev_lock);
65 static struct nvm_rq *rrpc_inflight_laddr_acquire(struct rrpc *rrpc,
66 sector_t laddr, unsigned int pages)
69 struct rrpc_inflight_rq *inf;
71 rqd = mempool_alloc(rrpc->rq_pool, GFP_ATOMIC);
73 return ERR_PTR(-ENOMEM);
75 inf = rrpc_get_inflight_rq(rqd);
76 if (rrpc_lock_laddr(rrpc, laddr, pages, inf)) {
77 mempool_free(rqd, rrpc->rq_pool);
84 static void rrpc_inflight_laddr_release(struct rrpc *rrpc, struct nvm_rq *rqd)
86 struct rrpc_inflight_rq *inf = rrpc_get_inflight_rq(rqd);
88 rrpc_unlock_laddr(rrpc, inf);
90 mempool_free(rqd, rrpc->rq_pool);
93 static void rrpc_discard(struct rrpc *rrpc, struct bio *bio)
95 sector_t slba = bio->bi_iter.bi_sector / NR_PHY_IN_LOG;
96 sector_t len = bio->bi_iter.bi_size / RRPC_EXPOSED_PAGE_SIZE;
100 rqd = rrpc_inflight_laddr_acquire(rrpc, slba, len);
108 pr_err("rrpc: unable to acquire inflight IO\n");
113 rrpc_invalidate_range(rrpc, slba, len);
114 rrpc_inflight_laddr_release(rrpc, rqd);
117 static int block_is_full(struct rrpc *rrpc, struct rrpc_block *rblk)
119 return (rblk->next_page == rrpc->dev->sec_per_blk);
122 /* Calculate relative addr for the given block, considering instantiated LUNs */
123 static u64 block_to_rel_addr(struct rrpc *rrpc, struct rrpc_block *rblk)
125 struct nvm_block *blk = rblk->parent;
126 int lun_blk = blk->id % (rrpc->dev->blks_per_lun * rrpc->nr_luns);
128 return lun_blk * rrpc->dev->sec_per_blk;
131 /* Calculate global addr for the given block */
132 static u64 block_to_addr(struct rrpc *rrpc, struct rrpc_block *rblk)
134 struct nvm_block *blk = rblk->parent;
136 return blk->id * rrpc->dev->sec_per_blk;
139 static struct ppa_addr rrpc_ppa_to_gaddr(struct nvm_dev *dev, u64 addr)
141 struct ppa_addr paddr;
144 return linear_to_generic_addr(dev, paddr);
147 /* requires lun->lock taken */
148 static void rrpc_set_lun_cur(struct rrpc_lun *rlun, struct rrpc_block *new_rblk,
149 struct rrpc_block **cur_rblk)
151 struct rrpc *rrpc = rlun->rrpc;
154 spin_lock(&(*cur_rblk)->lock);
155 WARN_ON(!block_is_full(rrpc, *cur_rblk));
156 spin_unlock(&(*cur_rblk)->lock);
158 *cur_rblk = new_rblk;
161 static struct rrpc_block *rrpc_get_blk(struct rrpc *rrpc, struct rrpc_lun *rlun,
164 struct nvm_block *blk;
165 struct rrpc_block *rblk;
167 blk = nvm_get_blk(rrpc->dev, rlun->parent, flags);
169 pr_err("nvm: rrpc: cannot get new block from media manager\n");
173 rblk = rrpc_get_rblk(rlun, blk->id);
175 bitmap_zero(rblk->invalid_pages, rrpc->dev->sec_per_blk);
177 rblk->nr_invalid_pages = 0;
178 atomic_set(&rblk->data_cmnt_size, 0);
183 static void rrpc_put_blk(struct rrpc *rrpc, struct rrpc_block *rblk)
185 nvm_put_blk(rrpc->dev, rblk->parent);
188 static void rrpc_put_blks(struct rrpc *rrpc)
190 struct rrpc_lun *rlun;
193 for (i = 0; i < rrpc->nr_luns; i++) {
194 rlun = &rrpc->luns[i];
196 rrpc_put_blk(rrpc, rlun->cur);
198 rrpc_put_blk(rrpc, rlun->gc_cur);
202 static struct rrpc_lun *get_next_lun(struct rrpc *rrpc)
204 int next = atomic_inc_return(&rrpc->next_lun);
206 return &rrpc->luns[next % rrpc->nr_luns];
209 static void rrpc_gc_kick(struct rrpc *rrpc)
211 struct rrpc_lun *rlun;
214 for (i = 0; i < rrpc->nr_luns; i++) {
215 rlun = &rrpc->luns[i];
216 queue_work(rrpc->krqd_wq, &rlun->ws_gc);
221 * timed GC every interval.
223 static void rrpc_gc_timer(unsigned long data)
225 struct rrpc *rrpc = (struct rrpc *)data;
228 mod_timer(&rrpc->gc_timer, jiffies + msecs_to_jiffies(10));
231 static void rrpc_end_sync_bio(struct bio *bio)
233 struct completion *waiting = bio->bi_private;
236 pr_err("nvm: gc request failed (%u).\n", bio->bi_error);
242 * rrpc_move_valid_pages -- migrate live data off the block
243 * @rrpc: the 'rrpc' structure
244 * @block: the block from which to migrate live pages
247 * GC algorithms may call this function to migrate remaining live
248 * pages off the block prior to erasing it. This function blocks
249 * further execution until the operation is complete.
251 static int rrpc_move_valid_pages(struct rrpc *rrpc, struct rrpc_block *rblk)
253 struct request_queue *q = rrpc->dev->q;
254 struct rrpc_rev_addr *rev;
259 int nr_sec_per_blk = rrpc->dev->sec_per_blk;
261 DECLARE_COMPLETION_ONSTACK(wait);
263 if (bitmap_full(rblk->invalid_pages, nr_sec_per_blk))
266 bio = bio_alloc(GFP_NOIO, 1);
268 pr_err("nvm: could not alloc bio to gc\n");
272 page = mempool_alloc(rrpc->page_pool, GFP_NOIO);
278 while ((slot = find_first_zero_bit(rblk->invalid_pages,
279 nr_sec_per_blk)) < nr_sec_per_blk) {
282 phys_addr = rblk->parent->id * nr_sec_per_blk + slot;
285 spin_lock(&rrpc->rev_lock);
286 /* Get logical address from physical to logical table */
287 rev = &rrpc->rev_trans_map[phys_addr - rrpc->poffset];
288 /* already updated by previous regular write */
289 if (rev->addr == ADDR_EMPTY) {
290 spin_unlock(&rrpc->rev_lock);
294 rqd = rrpc_inflight_laddr_acquire(rrpc, rev->addr, 1);
295 if (IS_ERR_OR_NULL(rqd)) {
296 spin_unlock(&rrpc->rev_lock);
301 spin_unlock(&rrpc->rev_lock);
303 /* Perform read to do GC */
304 bio->bi_iter.bi_sector = rrpc_get_sector(rev->addr);
305 bio_set_op_attrs(bio, REQ_OP_READ, 0);
306 bio->bi_private = &wait;
307 bio->bi_end_io = rrpc_end_sync_bio;
309 /* TODO: may fail when EXP_PG_SIZE > PAGE_SIZE */
310 bio_add_pc_page(q, bio, page, RRPC_EXPOSED_PAGE_SIZE, 0);
312 if (rrpc_submit_io(rrpc, bio, rqd, NVM_IOTYPE_GC)) {
313 pr_err("rrpc: gc read failed.\n");
314 rrpc_inflight_laddr_release(rrpc, rqd);
317 wait_for_completion_io(&wait);
319 rrpc_inflight_laddr_release(rrpc, rqd);
324 reinit_completion(&wait);
326 bio->bi_iter.bi_sector = rrpc_get_sector(rev->addr);
327 bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
328 bio->bi_private = &wait;
329 bio->bi_end_io = rrpc_end_sync_bio;
331 bio_add_pc_page(q, bio, page, RRPC_EXPOSED_PAGE_SIZE, 0);
333 /* turn the command around and write the data back to a new
336 if (rrpc_submit_io(rrpc, bio, rqd, NVM_IOTYPE_GC)) {
337 pr_err("rrpc: gc write failed.\n");
338 rrpc_inflight_laddr_release(rrpc, rqd);
341 wait_for_completion_io(&wait);
343 rrpc_inflight_laddr_release(rrpc, rqd);
351 mempool_free(page, rrpc->page_pool);
354 if (!bitmap_full(rblk->invalid_pages, nr_sec_per_blk)) {
355 pr_err("nvm: failed to garbage collect block\n");
362 static void rrpc_block_gc(struct work_struct *work)
364 struct rrpc_block_gc *gcb = container_of(work, struct rrpc_block_gc,
366 struct rrpc *rrpc = gcb->rrpc;
367 struct rrpc_block *rblk = gcb->rblk;
368 struct rrpc_lun *rlun = rblk->rlun;
369 struct nvm_dev *dev = rrpc->dev;
371 mempool_free(gcb, rrpc->gcb_pool);
372 pr_debug("nvm: block '%lu' being reclaimed\n", rblk->parent->id);
374 if (rrpc_move_valid_pages(rrpc, rblk))
377 if (nvm_erase_blk(dev, rblk->parent, 0))
380 rrpc_put_blk(rrpc, rblk);
385 spin_lock(&rlun->lock);
386 list_add_tail(&rblk->prio, &rlun->prio_list);
387 spin_unlock(&rlun->lock);
390 /* the block with highest number of invalid pages, will be in the beginning
393 static struct rrpc_block *rblock_max_invalid(struct rrpc_block *ra,
394 struct rrpc_block *rb)
396 if (ra->nr_invalid_pages == rb->nr_invalid_pages)
399 return (ra->nr_invalid_pages < rb->nr_invalid_pages) ? rb : ra;
402 /* linearly find the block with highest number of invalid pages
405 static struct rrpc_block *block_prio_find_max(struct rrpc_lun *rlun)
407 struct list_head *prio_list = &rlun->prio_list;
408 struct rrpc_block *rblock, *max;
410 BUG_ON(list_empty(prio_list));
412 max = list_first_entry(prio_list, struct rrpc_block, prio);
413 list_for_each_entry(rblock, prio_list, prio)
414 max = rblock_max_invalid(max, rblock);
419 static void rrpc_lun_gc(struct work_struct *work)
421 struct rrpc_lun *rlun = container_of(work, struct rrpc_lun, ws_gc);
422 struct rrpc *rrpc = rlun->rrpc;
423 struct nvm_lun *lun = rlun->parent;
424 struct rrpc_block_gc *gcb;
425 unsigned int nr_blocks_need;
427 nr_blocks_need = rrpc->dev->blks_per_lun / GC_LIMIT_INVERSE;
429 if (nr_blocks_need < rrpc->nr_luns)
430 nr_blocks_need = rrpc->nr_luns;
432 spin_lock(&rlun->lock);
433 while (nr_blocks_need > lun->nr_free_blocks &&
434 !list_empty(&rlun->prio_list)) {
435 struct rrpc_block *rblock = block_prio_find_max(rlun);
436 struct nvm_block *block = rblock->parent;
438 if (!rblock->nr_invalid_pages)
441 gcb = mempool_alloc(rrpc->gcb_pool, GFP_ATOMIC);
445 list_del_init(&rblock->prio);
447 BUG_ON(!block_is_full(rrpc, rblock));
449 pr_debug("rrpc: selected block '%lu' for GC\n", block->id);
453 INIT_WORK(&gcb->ws_gc, rrpc_block_gc);
455 queue_work(rrpc->kgc_wq, &gcb->ws_gc);
459 spin_unlock(&rlun->lock);
461 /* TODO: Hint that request queue can be started again */
464 static void rrpc_gc_queue(struct work_struct *work)
466 struct rrpc_block_gc *gcb = container_of(work, struct rrpc_block_gc,
468 struct rrpc *rrpc = gcb->rrpc;
469 struct rrpc_block *rblk = gcb->rblk;
470 struct rrpc_lun *rlun = rblk->rlun;
472 spin_lock(&rlun->lock);
473 list_add_tail(&rblk->prio, &rlun->prio_list);
474 spin_unlock(&rlun->lock);
476 mempool_free(gcb, rrpc->gcb_pool);
477 pr_debug("nvm: block '%lu' is full, allow GC (sched)\n",
481 static const struct block_device_operations rrpc_fops = {
482 .owner = THIS_MODULE,
485 static struct rrpc_lun *rrpc_get_lun_rr(struct rrpc *rrpc, int is_gc)
488 struct rrpc_lun *rlun, *max_free;
491 return get_next_lun(rrpc);
493 /* during GC, we don't care about RR, instead we want to make
494 * sure that we maintain evenness between the block luns.
496 max_free = &rrpc->luns[0];
497 /* prevent GC-ing lun from devouring pages of a lun with
498 * little free blocks. We don't take the lock as we only need an
501 rrpc_for_each_lun(rrpc, rlun, i) {
502 if (rlun->parent->nr_free_blocks >
503 max_free->parent->nr_free_blocks)
510 static struct rrpc_addr *rrpc_update_map(struct rrpc *rrpc, sector_t laddr,
511 struct rrpc_block *rblk, u64 paddr)
513 struct rrpc_addr *gp;
514 struct rrpc_rev_addr *rev;
516 BUG_ON(laddr >= rrpc->nr_sects);
518 gp = &rrpc->trans_map[laddr];
519 spin_lock(&rrpc->rev_lock);
521 rrpc_page_invalidate(rrpc, gp);
526 rev = &rrpc->rev_trans_map[gp->addr - rrpc->poffset];
528 spin_unlock(&rrpc->rev_lock);
533 static u64 rrpc_alloc_addr(struct rrpc *rrpc, struct rrpc_block *rblk)
535 u64 addr = ADDR_EMPTY;
537 spin_lock(&rblk->lock);
538 if (block_is_full(rrpc, rblk))
541 addr = block_to_addr(rrpc, rblk) + rblk->next_page;
545 spin_unlock(&rblk->lock);
549 /* Map logical address to a physical page. The mapping implements a round robin
550 * approach and allocates a page from the next lun available.
552 * Returns rrpc_addr with the physical address and block. Returns NULL if no
553 * blocks in the next rlun are available.
555 static struct rrpc_addr *rrpc_map_page(struct rrpc *rrpc, sector_t laddr,
558 struct rrpc_lun *rlun;
559 struct rrpc_block *rblk, **cur_rblk;
564 rlun = rrpc_get_lun_rr(rrpc, is_gc);
567 if (!is_gc && lun->nr_free_blocks < rrpc->nr_luns * 4)
571 * page allocation steps:
572 * 1. Try to allocate new page from current rblk
573 * 2a. If succeed, proceed to map it in and return
574 * 2b. If fail, first try to allocate a new block from media manger,
575 * and then retry step 1. Retry until the normal block pool is
577 * 3. If exhausted, and garbage collector is requesting the block,
578 * go to the reserved block and retry step 1.
579 * In the case that this fails as well, or it is not GC
580 * requesting, report not able to retrieve a block and let the
581 * caller handle further processing.
584 spin_lock(&rlun->lock);
585 cur_rblk = &rlun->cur;
588 paddr = rrpc_alloc_addr(rrpc, rblk);
590 if (paddr != ADDR_EMPTY)
593 if (!list_empty(&rlun->wblk_list)) {
595 rblk = list_first_entry(&rlun->wblk_list, struct rrpc_block,
597 rrpc_set_lun_cur(rlun, rblk, cur_rblk);
598 list_del(&rblk->prio);
601 spin_unlock(&rlun->lock);
603 rblk = rrpc_get_blk(rrpc, rlun, gc_force);
605 spin_lock(&rlun->lock);
606 list_add_tail(&rblk->prio, &rlun->wblk_list);
608 * another thread might already have added a new block,
609 * Therefore, make sure that one is used, instead of the
615 if (unlikely(is_gc) && !gc_force) {
616 /* retry from emergency gc block */
617 cur_rblk = &rlun->gc_cur;
620 spin_lock(&rlun->lock);
624 pr_err("rrpc: failed to allocate new block\n");
627 spin_unlock(&rlun->lock);
628 return rrpc_update_map(rrpc, laddr, rblk, paddr);
631 static void rrpc_run_gc(struct rrpc *rrpc, struct rrpc_block *rblk)
633 struct rrpc_block_gc *gcb;
635 gcb = mempool_alloc(rrpc->gcb_pool, GFP_ATOMIC);
637 pr_err("rrpc: unable to queue block for gc.");
644 INIT_WORK(&gcb->ws_gc, rrpc_gc_queue);
645 queue_work(rrpc->kgc_wq, &gcb->ws_gc);
648 static void __rrpc_mark_bad_block(struct nvm_dev *dev, struct ppa_addr *ppa)
650 nvm_mark_blk(dev, *ppa, NVM_BLK_ST_BAD);
651 nvm_set_bb_tbl(dev, ppa, 1, NVM_BLK_T_GRWN_BAD);
654 static void rrpc_mark_bad_block(struct rrpc *rrpc, struct nvm_rq *rqd)
656 struct nvm_dev *dev = rrpc->dev;
657 void *comp_bits = &rqd->ppa_status;
658 struct ppa_addr ppa, prev_ppa;
659 int nr_ppas = rqd->nr_ppas;
662 if (rqd->nr_ppas == 1)
663 __rrpc_mark_bad_block(dev, &rqd->ppa_addr);
665 ppa_set_empty(&prev_ppa);
667 while ((bit = find_next_bit(comp_bits, nr_ppas, bit + 1)) < nr_ppas) {
668 ppa = rqd->ppa_list[bit];
669 if (ppa_cmp_blk(ppa, prev_ppa))
672 __rrpc_mark_bad_block(dev, &ppa);
676 static void rrpc_end_io_write(struct rrpc *rrpc, struct rrpc_rq *rrqd,
677 sector_t laddr, uint8_t npages)
680 struct rrpc_block *rblk;
684 for (i = 0; i < npages; i++) {
685 p = &rrpc->trans_map[laddr + i];
687 lun = rblk->parent->lun;
689 cmnt_size = atomic_inc_return(&rblk->data_cmnt_size);
690 if (unlikely(cmnt_size == rrpc->dev->sec_per_blk))
691 rrpc_run_gc(rrpc, rblk);
695 static void rrpc_end_io(struct nvm_rq *rqd)
697 struct rrpc *rrpc = container_of(rqd->ins, struct rrpc, instance);
698 struct rrpc_rq *rrqd = nvm_rq_to_pdu(rqd);
699 uint8_t npages = rqd->nr_ppas;
700 sector_t laddr = rrpc_get_laddr(rqd->bio) - npages;
702 if (bio_data_dir(rqd->bio) == WRITE) {
703 if (rqd->error == NVM_RSP_ERR_FAILWRITE)
704 rrpc_mark_bad_block(rrpc, rqd);
706 rrpc_end_io_write(rrpc, rrqd, laddr, npages);
711 if (rrqd->flags & NVM_IOTYPE_GC)
714 rrpc_unlock_rq(rrpc, rqd);
717 nvm_dev_dma_free(rrpc->dev, rqd->ppa_list, rqd->dma_ppa_list);
719 mempool_free(rqd, rrpc->rq_pool);
722 static int rrpc_read_ppalist_rq(struct rrpc *rrpc, struct bio *bio,
723 struct nvm_rq *rqd, unsigned long flags, int npages)
725 struct rrpc_inflight_rq *r = rrpc_get_inflight_rq(rqd);
726 struct rrpc_addr *gp;
727 sector_t laddr = rrpc_get_laddr(bio);
728 int is_gc = flags & NVM_IOTYPE_GC;
731 if (!is_gc && rrpc_lock_rq(rrpc, bio, rqd)) {
732 nvm_dev_dma_free(rrpc->dev, rqd->ppa_list, rqd->dma_ppa_list);
733 return NVM_IO_REQUEUE;
736 for (i = 0; i < npages; i++) {
737 /* We assume that mapping occurs at 4KB granularity */
738 BUG_ON(!(laddr + i >= 0 && laddr + i < rrpc->nr_sects));
739 gp = &rrpc->trans_map[laddr + i];
742 rqd->ppa_list[i] = rrpc_ppa_to_gaddr(rrpc->dev,
746 rrpc_unlock_laddr(rrpc, r);
747 nvm_dev_dma_free(rrpc->dev, rqd->ppa_list,
753 rqd->opcode = NVM_OP_HBREAD;
758 static int rrpc_read_rq(struct rrpc *rrpc, struct bio *bio, struct nvm_rq *rqd,
761 struct rrpc_rq *rrqd = nvm_rq_to_pdu(rqd);
762 int is_gc = flags & NVM_IOTYPE_GC;
763 sector_t laddr = rrpc_get_laddr(bio);
764 struct rrpc_addr *gp;
766 if (!is_gc && rrpc_lock_rq(rrpc, bio, rqd))
767 return NVM_IO_REQUEUE;
769 BUG_ON(!(laddr >= 0 && laddr < rrpc->nr_sects));
770 gp = &rrpc->trans_map[laddr];
773 rqd->ppa_addr = rrpc_ppa_to_gaddr(rrpc->dev, gp->addr);
776 rrpc_unlock_rq(rrpc, rqd);
780 rqd->opcode = NVM_OP_HBREAD;
786 static int rrpc_write_ppalist_rq(struct rrpc *rrpc, struct bio *bio,
787 struct nvm_rq *rqd, unsigned long flags, int npages)
789 struct rrpc_inflight_rq *r = rrpc_get_inflight_rq(rqd);
791 sector_t laddr = rrpc_get_laddr(bio);
792 int is_gc = flags & NVM_IOTYPE_GC;
795 if (!is_gc && rrpc_lock_rq(rrpc, bio, rqd)) {
796 nvm_dev_dma_free(rrpc->dev, rqd->ppa_list, rqd->dma_ppa_list);
797 return NVM_IO_REQUEUE;
800 for (i = 0; i < npages; i++) {
801 /* We assume that mapping occurs at 4KB granularity */
802 p = rrpc_map_page(rrpc, laddr + i, is_gc);
805 rrpc_unlock_laddr(rrpc, r);
806 nvm_dev_dma_free(rrpc->dev, rqd->ppa_list,
809 return NVM_IO_REQUEUE;
812 rqd->ppa_list[i] = rrpc_ppa_to_gaddr(rrpc->dev,
816 rqd->opcode = NVM_OP_HBWRITE;
821 static int rrpc_write_rq(struct rrpc *rrpc, struct bio *bio,
822 struct nvm_rq *rqd, unsigned long flags)
824 struct rrpc_rq *rrqd = nvm_rq_to_pdu(rqd);
826 int is_gc = flags & NVM_IOTYPE_GC;
827 sector_t laddr = rrpc_get_laddr(bio);
829 if (!is_gc && rrpc_lock_rq(rrpc, bio, rqd))
830 return NVM_IO_REQUEUE;
832 p = rrpc_map_page(rrpc, laddr, is_gc);
835 rrpc_unlock_rq(rrpc, rqd);
837 return NVM_IO_REQUEUE;
840 rqd->ppa_addr = rrpc_ppa_to_gaddr(rrpc->dev, p->addr);
841 rqd->opcode = NVM_OP_HBWRITE;
847 static int rrpc_setup_rq(struct rrpc *rrpc, struct bio *bio,
848 struct nvm_rq *rqd, unsigned long flags, uint8_t npages)
851 rqd->ppa_list = nvm_dev_dma_alloc(rrpc->dev, GFP_KERNEL,
853 if (!rqd->ppa_list) {
854 pr_err("rrpc: not able to allocate ppa list\n");
858 if (bio_op(bio) == REQ_OP_WRITE)
859 return rrpc_write_ppalist_rq(rrpc, bio, rqd, flags,
862 return rrpc_read_ppalist_rq(rrpc, bio, rqd, flags, npages);
865 if (bio_op(bio) == REQ_OP_WRITE)
866 return rrpc_write_rq(rrpc, bio, rqd, flags);
868 return rrpc_read_rq(rrpc, bio, rqd, flags);
871 static int rrpc_submit_io(struct rrpc *rrpc, struct bio *bio,
872 struct nvm_rq *rqd, unsigned long flags)
875 struct rrpc_rq *rrq = nvm_rq_to_pdu(rqd);
876 uint8_t nr_pages = rrpc_get_pages(bio);
877 int bio_size = bio_sectors(bio) << 9;
879 if (bio_size < rrpc->dev->sec_size)
881 else if (bio_size > rrpc->dev->max_rq_size)
884 err = rrpc_setup_rq(rrpc, bio, rqd, flags, nr_pages);
890 rqd->ins = &rrpc->instance;
891 rqd->nr_ppas = nr_pages;
894 err = nvm_submit_io(rrpc->dev, rqd);
896 pr_err("rrpc: I/O submission failed: %d\n", err);
898 if (!(flags & NVM_IOTYPE_GC)) {
899 rrpc_unlock_rq(rrpc, rqd);
900 if (rqd->nr_ppas > 1)
901 nvm_dev_dma_free(rrpc->dev,
902 rqd->ppa_list, rqd->dma_ppa_list);
910 static blk_qc_t rrpc_make_rq(struct request_queue *q, struct bio *bio)
912 struct rrpc *rrpc = q->queuedata;
916 blk_queue_split(q, &bio, q->bio_split);
918 if (bio_op(bio) == REQ_OP_DISCARD) {
919 rrpc_discard(rrpc, bio);
920 return BLK_QC_T_NONE;
923 rqd = mempool_alloc(rrpc->rq_pool, GFP_KERNEL);
925 pr_err_ratelimited("rrpc: not able to queue bio.");
927 return BLK_QC_T_NONE;
929 memset(rqd, 0, sizeof(struct nvm_rq));
931 err = rrpc_submit_io(rrpc, bio, rqd, NVM_IOTYPE_NONE);
934 return BLK_QC_T_NONE;
942 spin_lock(&rrpc->bio_lock);
943 bio_list_add(&rrpc->requeue_bios, bio);
944 spin_unlock(&rrpc->bio_lock);
945 queue_work(rrpc->kgc_wq, &rrpc->ws_requeue);
949 mempool_free(rqd, rrpc->rq_pool);
950 return BLK_QC_T_NONE;
953 static void rrpc_requeue(struct work_struct *work)
955 struct rrpc *rrpc = container_of(work, struct rrpc, ws_requeue);
956 struct bio_list bios;
959 bio_list_init(&bios);
961 spin_lock(&rrpc->bio_lock);
962 bio_list_merge(&bios, &rrpc->requeue_bios);
963 bio_list_init(&rrpc->requeue_bios);
964 spin_unlock(&rrpc->bio_lock);
966 while ((bio = bio_list_pop(&bios)))
967 rrpc_make_rq(rrpc->disk->queue, bio);
970 static void rrpc_gc_free(struct rrpc *rrpc)
973 destroy_workqueue(rrpc->krqd_wq);
976 destroy_workqueue(rrpc->kgc_wq);
979 static int rrpc_gc_init(struct rrpc *rrpc)
981 rrpc->krqd_wq = alloc_workqueue("rrpc-lun", WQ_MEM_RECLAIM|WQ_UNBOUND,
986 rrpc->kgc_wq = alloc_workqueue("rrpc-bg", WQ_MEM_RECLAIM, 1);
990 setup_timer(&rrpc->gc_timer, rrpc_gc_timer, (unsigned long)rrpc);
995 static void rrpc_map_free(struct rrpc *rrpc)
997 vfree(rrpc->rev_trans_map);
998 vfree(rrpc->trans_map);
1001 static int rrpc_l2p_update(u64 slba, u32 nlb, __le64 *entries, void *private)
1003 struct rrpc *rrpc = (struct rrpc *)private;
1004 struct nvm_dev *dev = rrpc->dev;
1005 struct rrpc_addr *addr = rrpc->trans_map + slba;
1006 struct rrpc_rev_addr *raddr = rrpc->rev_trans_map;
1007 u64 elba = slba + nlb;
1010 if (unlikely(elba > dev->total_secs)) {
1011 pr_err("nvm: L2P data from device is out of bounds!\n");
1015 for (i = 0; i < nlb; i++) {
1016 u64 pba = le64_to_cpu(entries[i]);
1018 /* LNVM treats address-spaces as silos, LBA and PBA are
1019 * equally large and zero-indexed.
1021 if (unlikely(pba >= dev->total_secs && pba != U64_MAX)) {
1022 pr_err("nvm: L2P data entry is out of bounds!\n");
1026 /* Address zero is a special one. The first page on a disk is
1027 * protected. As it often holds internal device boot
1033 div_u64_rem(pba, rrpc->nr_sects, &mod);
1036 raddr[mod].addr = slba + i;
1042 static int rrpc_map_init(struct rrpc *rrpc)
1044 struct nvm_dev *dev = rrpc->dev;
1048 rrpc->trans_map = vzalloc(sizeof(struct rrpc_addr) * rrpc->nr_sects);
1049 if (!rrpc->trans_map)
1052 rrpc->rev_trans_map = vmalloc(sizeof(struct rrpc_rev_addr)
1054 if (!rrpc->rev_trans_map)
1057 for (i = 0; i < rrpc->nr_sects; i++) {
1058 struct rrpc_addr *p = &rrpc->trans_map[i];
1059 struct rrpc_rev_addr *r = &rrpc->rev_trans_map[i];
1061 p->addr = ADDR_EMPTY;
1062 r->addr = ADDR_EMPTY;
1065 if (!dev->ops->get_l2p_tbl)
1068 /* Bring up the mapping table from device */
1069 ret = dev->ops->get_l2p_tbl(dev, rrpc->soffset, rrpc->nr_sects,
1070 rrpc_l2p_update, rrpc);
1072 pr_err("nvm: rrpc: could not read L2P table.\n");
1079 /* Minimum pages needed within a lun */
1080 #define PAGE_POOL_SIZE 16
1081 #define ADDR_POOL_SIZE 64
1083 static int rrpc_core_init(struct rrpc *rrpc)
1085 down_write(&rrpc_lock);
1086 if (!rrpc_gcb_cache) {
1087 rrpc_gcb_cache = kmem_cache_create("rrpc_gcb",
1088 sizeof(struct rrpc_block_gc), 0, 0, NULL);
1089 if (!rrpc_gcb_cache) {
1090 up_write(&rrpc_lock);
1094 rrpc_rq_cache = kmem_cache_create("rrpc_rq",
1095 sizeof(struct nvm_rq) + sizeof(struct rrpc_rq),
1097 if (!rrpc_rq_cache) {
1098 kmem_cache_destroy(rrpc_gcb_cache);
1099 up_write(&rrpc_lock);
1103 up_write(&rrpc_lock);
1105 rrpc->page_pool = mempool_create_page_pool(PAGE_POOL_SIZE, 0);
1106 if (!rrpc->page_pool)
1109 rrpc->gcb_pool = mempool_create_slab_pool(rrpc->dev->nr_luns,
1111 if (!rrpc->gcb_pool)
1114 rrpc->rq_pool = mempool_create_slab_pool(64, rrpc_rq_cache);
1118 spin_lock_init(&rrpc->inflights.lock);
1119 INIT_LIST_HEAD(&rrpc->inflights.reqs);
1124 static void rrpc_core_free(struct rrpc *rrpc)
1126 mempool_destroy(rrpc->page_pool);
1127 mempool_destroy(rrpc->gcb_pool);
1128 mempool_destroy(rrpc->rq_pool);
1131 static void rrpc_luns_free(struct rrpc *rrpc)
1133 struct nvm_dev *dev = rrpc->dev;
1134 struct nvm_lun *lun;
1135 struct rrpc_lun *rlun;
1141 for (i = 0; i < rrpc->nr_luns; i++) {
1142 rlun = &rrpc->luns[i];
1146 dev->mt->release_lun(dev, lun->id);
1147 vfree(rlun->blocks);
1153 static int rrpc_luns_init(struct rrpc *rrpc, int lun_begin, int lun_end)
1155 struct nvm_dev *dev = rrpc->dev;
1156 struct rrpc_lun *rlun;
1157 int i, j, ret = -EINVAL;
1159 if (dev->sec_per_blk > MAX_INVALID_PAGES_STORAGE * BITS_PER_LONG) {
1160 pr_err("rrpc: number of pages per block too high.");
1164 spin_lock_init(&rrpc->rev_lock);
1166 rrpc->luns = kcalloc(rrpc->nr_luns, sizeof(struct rrpc_lun),
1172 for (i = 0; i < rrpc->nr_luns; i++) {
1173 int lunid = lun_begin + i;
1174 struct nvm_lun *lun;
1176 if (dev->mt->reserve_lun(dev, lunid)) {
1177 pr_err("rrpc: lun %u is already allocated\n", lunid);
1181 lun = dev->mt->get_lun(dev, lunid);
1185 rlun = &rrpc->luns[i];
1187 rlun->blocks = vzalloc(sizeof(struct rrpc_block) *
1188 rrpc->dev->blks_per_lun);
1189 if (!rlun->blocks) {
1194 for (j = 0; j < rrpc->dev->blks_per_lun; j++) {
1195 struct rrpc_block *rblk = &rlun->blocks[j];
1196 struct nvm_block *blk = &lun->blocks[j];
1200 INIT_LIST_HEAD(&rblk->prio);
1201 spin_lock_init(&rblk->lock);
1205 INIT_LIST_HEAD(&rlun->prio_list);
1206 INIT_LIST_HEAD(&rlun->wblk_list);
1208 INIT_WORK(&rlun->ws_gc, rrpc_lun_gc);
1209 spin_lock_init(&rlun->lock);
1217 /* returns 0 on success and stores the beginning address in *begin */
1218 static int rrpc_area_init(struct rrpc *rrpc, sector_t *begin)
1220 struct nvm_dev *dev = rrpc->dev;
1221 struct nvmm_type *mt = dev->mt;
1222 sector_t size = rrpc->nr_sects * dev->sec_size;
1227 ret = mt->get_area(dev, begin, size);
1229 *begin >>= (ilog2(dev->sec_size) - 9);
1234 static void rrpc_area_free(struct rrpc *rrpc)
1236 struct nvm_dev *dev = rrpc->dev;
1237 struct nvmm_type *mt = dev->mt;
1238 sector_t begin = rrpc->soffset << (ilog2(dev->sec_size) - 9);
1240 mt->put_area(dev, begin);
1243 static void rrpc_free(struct rrpc *rrpc)
1246 rrpc_map_free(rrpc);
1247 rrpc_core_free(rrpc);
1248 rrpc_luns_free(rrpc);
1249 rrpc_area_free(rrpc);
1254 static void rrpc_exit(void *private)
1256 struct rrpc *rrpc = private;
1258 del_timer(&rrpc->gc_timer);
1260 flush_workqueue(rrpc->krqd_wq);
1261 flush_workqueue(rrpc->kgc_wq);
1266 static sector_t rrpc_capacity(void *private)
1268 struct rrpc *rrpc = private;
1269 struct nvm_dev *dev = rrpc->dev;
1270 sector_t reserved, provisioned;
1272 /* cur, gc, and two emergency blocks for each lun */
1273 reserved = rrpc->nr_luns * dev->sec_per_blk * 4;
1274 provisioned = rrpc->nr_sects - reserved;
1276 if (reserved > rrpc->nr_sects) {
1277 pr_err("rrpc: not enough space available to expose storage.\n");
1281 sector_div(provisioned, 10);
1282 return provisioned * 9 * NR_PHY_IN_LOG;
1286 * Looks up the logical address from reverse trans map and check if its valid by
1287 * comparing the logical to physical address with the physical address.
1288 * Returns 0 on free, otherwise 1 if in use
1290 static void rrpc_block_map_update(struct rrpc *rrpc, struct rrpc_block *rblk)
1292 struct nvm_dev *dev = rrpc->dev;
1294 struct rrpc_addr *laddr;
1295 u64 bpaddr, paddr, pladdr;
1297 bpaddr = block_to_rel_addr(rrpc, rblk);
1298 for (offset = 0; offset < dev->sec_per_blk; offset++) {
1299 paddr = bpaddr + offset;
1301 pladdr = rrpc->rev_trans_map[paddr].addr;
1302 if (pladdr == ADDR_EMPTY)
1305 laddr = &rrpc->trans_map[pladdr];
1307 if (paddr == laddr->addr) {
1310 set_bit(offset, rblk->invalid_pages);
1311 rblk->nr_invalid_pages++;
1316 static int rrpc_blocks_init(struct rrpc *rrpc)
1318 struct rrpc_lun *rlun;
1319 struct rrpc_block *rblk;
1320 int lun_iter, blk_iter;
1322 for (lun_iter = 0; lun_iter < rrpc->nr_luns; lun_iter++) {
1323 rlun = &rrpc->luns[lun_iter];
1325 for (blk_iter = 0; blk_iter < rrpc->dev->blks_per_lun;
1327 rblk = &rlun->blocks[blk_iter];
1328 rrpc_block_map_update(rrpc, rblk);
1335 static int rrpc_luns_configure(struct rrpc *rrpc)
1337 struct rrpc_lun *rlun;
1338 struct rrpc_block *rblk;
1341 for (i = 0; i < rrpc->nr_luns; i++) {
1342 rlun = &rrpc->luns[i];
1344 rblk = rrpc_get_blk(rrpc, rlun, 0);
1347 rrpc_set_lun_cur(rlun, rblk, &rlun->cur);
1349 /* Emergency gc block */
1350 rblk = rrpc_get_blk(rrpc, rlun, 1);
1353 rrpc_set_lun_cur(rlun, rblk, &rlun->gc_cur);
1358 rrpc_put_blks(rrpc);
1362 static struct nvm_tgt_type tt_rrpc;
1364 static void *rrpc_init(struct nvm_dev *dev, struct gendisk *tdisk,
1365 int lun_begin, int lun_end)
1367 struct request_queue *bqueue = dev->q;
1368 struct request_queue *tqueue = tdisk->queue;
1373 if (!(dev->identity.dom & NVM_RSP_L2P)) {
1374 pr_err("nvm: rrpc: device does not support l2p (%x)\n",
1376 return ERR_PTR(-EINVAL);
1379 rrpc = kzalloc(sizeof(struct rrpc), GFP_KERNEL);
1381 return ERR_PTR(-ENOMEM);
1383 rrpc->instance.tt = &tt_rrpc;
1387 bio_list_init(&rrpc->requeue_bios);
1388 spin_lock_init(&rrpc->bio_lock);
1389 INIT_WORK(&rrpc->ws_requeue, rrpc_requeue);
1391 rrpc->nr_luns = lun_end - lun_begin + 1;
1392 rrpc->total_blocks = (unsigned long)dev->blks_per_lun * rrpc->nr_luns;
1393 rrpc->nr_sects = (unsigned long long)dev->sec_per_lun * rrpc->nr_luns;
1395 /* simple round-robin strategy */
1396 atomic_set(&rrpc->next_lun, -1);
1398 ret = rrpc_area_init(rrpc, &soffset);
1400 pr_err("nvm: rrpc: could not initialize area\n");
1401 return ERR_PTR(ret);
1403 rrpc->soffset = soffset;
1405 ret = rrpc_luns_init(rrpc, lun_begin, lun_end);
1407 pr_err("nvm: rrpc: could not initialize luns\n");
1411 rrpc->poffset = dev->sec_per_lun * lun_begin;
1412 rrpc->lun_offset = lun_begin;
1414 ret = rrpc_core_init(rrpc);
1416 pr_err("nvm: rrpc: could not initialize core\n");
1420 ret = rrpc_map_init(rrpc);
1422 pr_err("nvm: rrpc: could not initialize maps\n");
1426 ret = rrpc_blocks_init(rrpc);
1428 pr_err("nvm: rrpc: could not initialize state for blocks\n");
1432 ret = rrpc_luns_configure(rrpc);
1434 pr_err("nvm: rrpc: not enough blocks available in LUNs.\n");
1438 ret = rrpc_gc_init(rrpc);
1440 pr_err("nvm: rrpc: could not initialize gc\n");
1444 /* inherit the size from the underlying device */
1445 blk_queue_logical_block_size(tqueue, queue_physical_block_size(bqueue));
1446 blk_queue_max_hw_sectors(tqueue, queue_max_hw_sectors(bqueue));
1448 pr_info("nvm: rrpc initialized with %u luns and %llu pages.\n",
1449 rrpc->nr_luns, (unsigned long long)rrpc->nr_sects);
1451 mod_timer(&rrpc->gc_timer, jiffies + msecs_to_jiffies(10));
1456 return ERR_PTR(ret);
1459 /* round robin, page-based FTL, and cost-based GC */
1460 static struct nvm_tgt_type tt_rrpc = {
1462 .version = {1, 0, 0},
1464 .make_rq = rrpc_make_rq,
1465 .capacity = rrpc_capacity,
1466 .end_io = rrpc_end_io,
1472 static int __init rrpc_module_init(void)
1474 return nvm_register_tgt_type(&tt_rrpc);
1477 static void rrpc_module_exit(void)
1479 nvm_unregister_tgt_type(&tt_rrpc);
1482 module_init(rrpc_module_init);
1483 module_exit(rrpc_module_exit);
1484 MODULE_LICENSE("GPL v2");
1485 MODULE_DESCRIPTION("Block-Device Target for Open-Channel SSDs");