]> git.karo-electronics.de Git - karo-tx-linux.git/blobdiff - drivers/lightnvm/rrpc.c
Merge tag 'pci-v4.10-changes' of git://git.kernel.org/pub/scm/linux/kernel/git/helgaa...
[karo-tx-linux.git] / drivers / lightnvm / rrpc.c
index 5377c7a987aac8f08bfdd0805e3b60e36e422b0e..9fb7de395915ca8e3893f16273a5e6c20d4a763d 100644 (file)
@@ -45,7 +45,7 @@ static void rrpc_page_invalidate(struct rrpc *rrpc, struct rrpc_addr *a)
 
        spin_unlock(&rblk->lock);
 
-       rrpc->rev_trans_map[a->addr - rrpc->poffset].addr = ADDR_EMPTY;
+       rrpc->rev_trans_map[a->addr].addr = ADDR_EMPTY;
 }
 
 static void rrpc_invalidate_range(struct rrpc *rrpc, sector_t slba,
@@ -126,27 +126,26 @@ static int block_is_full(struct rrpc *rrpc, struct rrpc_block *rblk)
 static u64 block_to_rel_addr(struct rrpc *rrpc, struct rrpc_block *rblk)
 {
        struct nvm_tgt_dev *dev = rrpc->dev;
-       struct nvm_block *blk = rblk->parent;
-       int lun_blk = blk->id % (dev->geo.blks_per_lun * rrpc->nr_luns);
-
-       return lun_blk * dev->geo.sec_per_blk;
-}
-
-/* Calculate global addr for the given block */
-static u64 block_to_addr(struct rrpc *rrpc, struct rrpc_block *rblk)
-{
-       struct nvm_tgt_dev *dev = rrpc->dev;
-       struct nvm_block *blk = rblk->parent;
+       struct rrpc_lun *rlun = rblk->rlun;
 
-       return blk->id * dev->geo.sec_per_blk;
+       return rlun->id * dev->geo.sec_per_blk;
 }
 
-static struct ppa_addr rrpc_ppa_to_gaddr(struct nvm_tgt_dev *dev, u64 addr)
+static struct ppa_addr rrpc_ppa_to_gaddr(struct nvm_tgt_dev *dev,
+                                        struct rrpc_addr *gp)
 {
+       struct rrpc_block *rblk = gp->rblk;
+       struct rrpc_lun *rlun = rblk->rlun;
+       u64 addr = gp->addr;
        struct ppa_addr paddr;
 
        paddr.ppa = addr;
-       return linear_to_generic_addr(&dev->geo, paddr);
+       paddr = rrpc_linear_to_generic_addr(&dev->geo, paddr);
+       paddr.g.ch = rlun->bppa.g.ch;
+       paddr.g.lun = rlun->bppa.g.lun;
+       paddr.g.blk = rblk->id;
+
+       return paddr;
 }
 
 /* requires lun->lock taken */
@@ -163,51 +162,46 @@ static void rrpc_set_lun_cur(struct rrpc_lun *rlun, struct rrpc_block *new_rblk,
        *cur_rblk = new_rblk;
 }
 
-static struct nvm_block *__rrpc_get_blk(struct rrpc *rrpc,
+static struct rrpc_block *__rrpc_get_blk(struct rrpc *rrpc,
                                                        struct rrpc_lun *rlun)
 {
-       struct nvm_lun *lun = rlun->parent;
-       struct nvm_block *blk = NULL;
+       struct rrpc_block *rblk = NULL;
 
-       if (list_empty(&lun->free_list))
+       if (list_empty(&rlun->free_list))
                goto out;
 
-       blk = list_first_entry(&lun->free_list, struct nvm_block, list);
+       rblk = list_first_entry(&rlun->free_list, struct rrpc_block, list);
 
-       list_move_tail(&blk->list, &lun->used_list);
-       blk->state = NVM_BLK_ST_TGT;
-       lun->nr_free_blocks--;
+       list_move_tail(&rblk->list, &rlun->used_list);
+       rblk->state = NVM_BLK_ST_TGT;
+       rlun->nr_free_blocks--;
 
 out:
-       return blk;
+       return rblk;
 }
 
 static struct rrpc_block *rrpc_get_blk(struct rrpc *rrpc, struct rrpc_lun *rlun,
                                                        unsigned long flags)
 {
        struct nvm_tgt_dev *dev = rrpc->dev;
-       struct nvm_lun *lun = rlun->parent;
-       struct nvm_block *blk;
        struct rrpc_block *rblk;
        int is_gc = flags & NVM_IOTYPE_GC;
 
        spin_lock(&rlun->lock);
-       if (!is_gc && lun->nr_free_blocks < rlun->reserved_blocks) {
+       if (!is_gc && rlun->nr_free_blocks < rlun->reserved_blocks) {
                pr_err("nvm: rrpc: cannot give block to non GC request\n");
                spin_unlock(&rlun->lock);
                return NULL;
        }
 
-       blk = __rrpc_get_blk(rrpc, rlun);
-       if (!blk) {
+       rblk = __rrpc_get_blk(rrpc, rlun);
+       if (!rblk) {
                pr_err("nvm: rrpc: cannot get new block\n");
                spin_unlock(&rlun->lock);
                return NULL;
        }
        spin_unlock(&rlun->lock);
 
-       rblk = rrpc_get_rblk(rlun, blk->id);
-       blk->priv = rblk;
        bitmap_zero(rblk->invalid_pages, dev->geo.sec_per_blk);
        rblk->next_page = 0;
        rblk->nr_invalid_pages = 0;
@@ -218,23 +212,22 @@ static struct rrpc_block *rrpc_get_blk(struct rrpc *rrpc, struct rrpc_lun *rlun,
 
 static void rrpc_put_blk(struct rrpc *rrpc, struct rrpc_block *rblk)
 {
-       struct nvm_block *blk = rblk->parent;
        struct rrpc_lun *rlun = rblk->rlun;
-       struct nvm_lun *lun = rlun->parent;
 
        spin_lock(&rlun->lock);
-       if (blk->state & NVM_BLK_ST_TGT) {
-               list_move_tail(&blk->list, &lun->free_list);
-               lun->nr_free_blocks++;
-               blk->state = NVM_BLK_ST_FREE;
-       } else if (blk->state & NVM_BLK_ST_BAD) {
-               list_move_tail(&blk->list, &lun->bb_list);
-               blk->state = NVM_BLK_ST_BAD;
+       if (rblk->state & NVM_BLK_ST_TGT) {
+               list_move_tail(&rblk->list, &rlun->free_list);
+               rlun->nr_free_blocks++;
+               rblk->state = NVM_BLK_ST_FREE;
+       } else if (rblk->state & NVM_BLK_ST_BAD) {
+               list_move_tail(&rblk->list, &rlun->bb_list);
+               rblk->state = NVM_BLK_ST_BAD;
        } else {
                WARN_ON_ONCE(1);
-               pr_err("rrpc: erroneous block type (%lu -> %u)\n",
-                                                       blk->id, blk->state);
-               list_move_tail(&blk->list, &lun->bb_list);
+               pr_err("rrpc: erroneous type (ch:%d,lun:%d,blk%d-> %u)\n",
+                                       rlun->bppa.g.ch, rlun->bppa.g.lun,
+                                       rblk->id, rblk->state);
+               list_move_tail(&rblk->list, &rlun->bb_list);
        }
        spin_unlock(&rlun->lock);
 }
@@ -334,12 +327,12 @@ static int rrpc_move_valid_pages(struct rrpc *rrpc, struct rrpc_block *rblk)
                                            nr_sec_per_blk)) < nr_sec_per_blk) {
 
                /* Lock laddr */
-               phys_addr = rblk->parent->id * nr_sec_per_blk + slot;
+               phys_addr = rrpc_blk_to_ppa(rrpc, rblk) + slot;
 
 try:
                spin_lock(&rrpc->rev_lock);
                /* Get logical address from physical to logical table */
-               rev = &rrpc->rev_trans_map[phys_addr - rrpc->poffset];
+               rev = &rrpc->rev_trans_map[phys_addr];
                /* already updated by previous regular write */
                if (rev->addr == ADDR_EMPTY) {
                        spin_unlock(&rrpc->rev_lock);
@@ -422,14 +415,22 @@ static void rrpc_block_gc(struct work_struct *work)
        struct rrpc_block *rblk = gcb->rblk;
        struct rrpc_lun *rlun = rblk->rlun;
        struct nvm_tgt_dev *dev = rrpc->dev;
+       struct ppa_addr ppa;
 
        mempool_free(gcb, rrpc->gcb_pool);
-       pr_debug("nvm: block '%lu' being reclaimed\n", rblk->parent->id);
+       pr_debug("nvm: block 'ch:%d,lun:%d,blk:%d' being reclaimed\n",
+                       rlun->bppa.g.ch, rlun->bppa.g.lun,
+                       rblk->id);
 
        if (rrpc_move_valid_pages(rrpc, rblk))
                goto put_back;
 
-       if (nvm_erase_blk(dev->parent, rblk->parent, 0))
+       ppa.ppa = 0;
+       ppa.g.ch = rlun->bppa.g.ch;
+       ppa.g.lun = rlun->bppa.g.lun;
+       ppa.g.blk = rblk->id;
+
+       if (nvm_erase_blk(dev, &ppa, 0))
                goto put_back;
 
        rrpc_put_blk(rrpc, rblk);
@@ -445,7 +446,7 @@ put_back:
 /* the block with highest number of invalid pages, will be in the beginning
  * of the list
  */
-static struct rrpc_block *rblock_max_invalid(struct rrpc_block *ra,
+static struct rrpc_block *rblk_max_invalid(struct rrpc_block *ra,
                                                        struct rrpc_block *rb)
 {
        if (ra->nr_invalid_pages == rb->nr_invalid_pages)
@@ -460,13 +461,13 @@ static struct rrpc_block *rblock_max_invalid(struct rrpc_block *ra,
 static struct rrpc_block *block_prio_find_max(struct rrpc_lun *rlun)
 {
        struct list_head *prio_list = &rlun->prio_list;
-       struct rrpc_block *rblock, *max;
+       struct rrpc_block *rblk, *max;
 
        BUG_ON(list_empty(prio_list));
 
        max = list_first_entry(prio_list, struct rrpc_block, prio);
-       list_for_each_entry(rblock, prio_list, prio)
-               max = rblock_max_invalid(max, rblock);
+       list_for_each_entry(rblk, prio_list, prio)
+               max = rblk_max_invalid(max, rblk);
 
        return max;
 }
@@ -476,7 +477,6 @@ static void rrpc_lun_gc(struct work_struct *work)
        struct rrpc_lun *rlun = container_of(work, struct rrpc_lun, ws_gc);
        struct rrpc *rrpc = rlun->rrpc;
        struct nvm_tgt_dev *dev = rrpc->dev;
-       struct nvm_lun *lun = rlun->parent;
        struct rrpc_block_gc *gcb;
        unsigned int nr_blocks_need;
 
@@ -486,26 +486,27 @@ static void rrpc_lun_gc(struct work_struct *work)
                nr_blocks_need = rrpc->nr_luns;
 
        spin_lock(&rlun->lock);
-       while (nr_blocks_need > lun->nr_free_blocks &&
+       while (nr_blocks_need > rlun->nr_free_blocks &&
                                        !list_empty(&rlun->prio_list)) {
-               struct rrpc_block *rblock = block_prio_find_max(rlun);
-               struct nvm_block *block = rblock->parent;
+               struct rrpc_block *rblk = block_prio_find_max(rlun);
 
-               if (!rblock->nr_invalid_pages)
+               if (!rblk->nr_invalid_pages)
                        break;
 
                gcb = mempool_alloc(rrpc->gcb_pool, GFP_ATOMIC);
                if (!gcb)
                        break;
 
-               list_del_init(&rblock->prio);
+               list_del_init(&rblk->prio);
 
-               BUG_ON(!block_is_full(rrpc, rblock));
+               WARN_ON(!block_is_full(rrpc, rblk));
 
-               pr_debug("rrpc: selected block '%lu' for GC\n", block->id);
+               pr_debug("rrpc: selected block 'ch:%d,lun:%d,blk:%d' for GC\n",
+                                       rlun->bppa.g.ch, rlun->bppa.g.lun,
+                                       rblk->id);
 
                gcb->rrpc = rrpc;
-               gcb->rblk = rblock;
+               gcb->rblk = rblk;
                INIT_WORK(&gcb->ws_gc, rrpc_block_gc);
 
                queue_work(rrpc->kgc_wq, &gcb->ws_gc);
@@ -530,8 +531,9 @@ static void rrpc_gc_queue(struct work_struct *work)
        spin_unlock(&rlun->lock);
 
        mempool_free(gcb, rrpc->gcb_pool);
-       pr_debug("nvm: block '%lu' is full, allow GC (sched)\n",
-                                                       rblk->parent->id);
+       pr_debug("nvm: block 'ch:%d,lun:%d,blk:%d' full, allow GC (sched)\n",
+                                       rlun->bppa.g.ch, rlun->bppa.g.lun,
+                                       rblk->id);
 }
 
 static const struct block_device_operations rrpc_fops = {
@@ -555,8 +557,7 @@ static struct rrpc_lun *rrpc_get_lun_rr(struct rrpc *rrpc, int is_gc)
         * estimate.
         */
        rrpc_for_each_lun(rrpc, rlun, i) {
-               if (rlun->parent->nr_free_blocks >
-                                       max_free->parent->nr_free_blocks)
+               if (rlun->nr_free_blocks > max_free->nr_free_blocks)
                        max_free = rlun;
        }
 
@@ -579,7 +580,7 @@ static struct rrpc_addr *rrpc_update_map(struct rrpc *rrpc, sector_t laddr,
        gp->addr = paddr;
        gp->rblk = rblk;
 
-       rev = &rrpc->rev_trans_map[gp->addr - rrpc->poffset];
+       rev = &rrpc->rev_trans_map[gp->addr];
        rev->addr = laddr;
        spin_unlock(&rrpc->rev_lock);
 
@@ -594,7 +595,7 @@ static u64 rrpc_alloc_addr(struct rrpc *rrpc, struct rrpc_block *rblk)
        if (block_is_full(rrpc, rblk))
                goto out;
 
-       addr = block_to_addr(rrpc, rblk) + rblk->next_page;
+       addr = rblk->next_page;
 
        rblk->next_page++;
 out:
@@ -608,20 +609,22 @@ out:
  * Returns rrpc_addr with the physical address and block. Returns NULL if no
  * blocks in the next rlun are available.
  */
-static struct rrpc_addr *rrpc_map_page(struct rrpc *rrpc, sector_t laddr,
+static struct ppa_addr rrpc_map_page(struct rrpc *rrpc, sector_t laddr,
                                                                int is_gc)
 {
+       struct nvm_tgt_dev *tgt_dev = rrpc->dev;
        struct rrpc_lun *rlun;
        struct rrpc_block *rblk, **cur_rblk;
-       struct nvm_lun *lun;
+       struct rrpc_addr *p;
+       struct ppa_addr ppa;
        u64 paddr;
        int gc_force = 0;
 
+       ppa.ppa = ADDR_EMPTY;
        rlun = rrpc_get_lun_rr(rrpc, is_gc);
-       lun = rlun->parent;
 
-       if (!is_gc && lun->nr_free_blocks < rrpc->nr_luns * 4)
-               return NULL;
+       if (!is_gc && rlun->nr_free_blocks < rrpc->nr_luns * 4)
+               return ppa;
 
        /*
         * page allocation steps:
@@ -678,10 +681,15 @@ new_blk:
        }
 
        pr_err("rrpc: failed to allocate new block\n");
-       return NULL;
+       return ppa;
 done:
        spin_unlock(&rlun->lock);
-       return rrpc_update_map(rrpc, laddr, rblk, paddr);
+       p = rrpc_update_map(rrpc, laddr, rblk, paddr);
+       if (!p)
+               return ppa;
+
+       /* return global address */
+       return rrpc_ppa_to_gaddr(tgt_dev, p);
 }
 
 static void rrpc_run_gc(struct rrpc *rrpc, struct rrpc_block *rblk)
@@ -701,22 +709,44 @@ static void rrpc_run_gc(struct rrpc *rrpc, struct rrpc_block *rblk)
        queue_work(rrpc->kgc_wq, &gcb->ws_gc);
 }
 
-static void __rrpc_mark_bad_block(struct nvm_tgt_dev *dev, struct ppa_addr *ppa)
+static struct rrpc_lun *rrpc_ppa_to_lun(struct rrpc *rrpc, struct ppa_addr p)
 {
-               nvm_mark_blk(dev->parent, *ppa, NVM_BLK_ST_BAD);
-               nvm_set_bb_tbl(dev->parent, ppa, 1, NVM_BLK_T_GRWN_BAD);
+       struct rrpc_lun *rlun = NULL;
+       int i;
+
+       for (i = 0; i < rrpc->nr_luns; i++) {
+               if (rrpc->luns[i].bppa.g.ch == p.g.ch &&
+                               rrpc->luns[i].bppa.g.lun == p.g.lun) {
+                       rlun = &rrpc->luns[i];
+                       break;
+               }
+       }
+
+       return rlun;
 }
 
-static void rrpc_mark_bad_block(struct rrpc *rrpc, struct nvm_rq *rqd)
+static void __rrpc_mark_bad_block(struct rrpc *rrpc, struct ppa_addr ppa)
 {
        struct nvm_tgt_dev *dev = rrpc->dev;
+       struct rrpc_lun *rlun;
+       struct rrpc_block *rblk;
+
+       rlun = rrpc_ppa_to_lun(rrpc, ppa);
+       rblk = &rlun->blocks[ppa.g.blk];
+       rblk->state = NVM_BLK_ST_BAD;
+
+       nvm_set_tgt_bb_tbl(dev, &ppa, 1, NVM_BLK_T_GRWN_BAD);
+}
+
+static void rrpc_mark_bad_block(struct rrpc *rrpc, struct nvm_rq *rqd)
+{
        void *comp_bits = &rqd->ppa_status;
        struct ppa_addr ppa, prev_ppa;
        int nr_ppas = rqd->nr_ppas;
        int bit;
 
        if (rqd->nr_ppas == 1)
-               __rrpc_mark_bad_block(dev, &rqd->ppa_addr);
+               __rrpc_mark_bad_block(rrpc, rqd->ppa_addr);
 
        ppa_set_empty(&prev_ppa);
        bit = -1;
@@ -725,7 +755,7 @@ static void rrpc_mark_bad_block(struct rrpc *rrpc, struct nvm_rq *rqd)
                if (ppa_cmp_blk(ppa, prev_ppa))
                        continue;
 
-               __rrpc_mark_bad_block(dev, &ppa);
+               __rrpc_mark_bad_block(rrpc, ppa);
        }
 }
 
@@ -735,13 +765,11 @@ static void rrpc_end_io_write(struct rrpc *rrpc, struct rrpc_rq *rrqd,
        struct nvm_tgt_dev *dev = rrpc->dev;
        struct rrpc_addr *p;
        struct rrpc_block *rblk;
-       struct nvm_lun *lun;
        int cmnt_size, i;
 
        for (i = 0; i < npages; i++) {
                p = &rrpc->trans_map[laddr + i];
                rblk = p->rblk;
-               lun = rblk->parent->lun;
 
                cmnt_size = atomic_inc_return(&rblk->data_cmnt_size);
                if (unlikely(cmnt_size == dev->geo.sec_per_blk))
@@ -798,7 +826,7 @@ static int rrpc_read_ppalist_rq(struct rrpc *rrpc, struct bio *bio,
                gp = &rrpc->trans_map[laddr + i];
 
                if (gp->rblk) {
-                       rqd->ppa_list[i] = rrpc_ppa_to_gaddr(dev, gp->addr);
+                       rqd->ppa_list[i] = rrpc_ppa_to_gaddr(dev, gp);
                } else {
                        BUG_ON(is_gc);
                        rrpc_unlock_laddr(rrpc, r);
@@ -827,7 +855,7 @@ static int rrpc_read_rq(struct rrpc *rrpc, struct bio *bio, struct nvm_rq *rqd,
        gp = &rrpc->trans_map[laddr];
 
        if (gp->rblk) {
-               rqd->ppa_addr = rrpc_ppa_to_gaddr(rrpc->dev, gp->addr);
+               rqd->ppa_addr = rrpc_ppa_to_gaddr(rrpc->dev, gp);
        } else {
                BUG_ON(is_gc);
                rrpc_unlock_rq(rrpc, rqd);
@@ -844,7 +872,7 @@ static int rrpc_write_ppalist_rq(struct rrpc *rrpc, struct bio *bio,
 {
        struct nvm_tgt_dev *dev = rrpc->dev;
        struct rrpc_inflight_rq *r = rrpc_get_inflight_rq(rqd);
-       struct rrpc_addr *p;
+       struct ppa_addr p;
        sector_t laddr = rrpc_get_laddr(bio);
        int is_gc = flags & NVM_IOTYPE_GC;
        int i;
@@ -857,7 +885,7 @@ static int rrpc_write_ppalist_rq(struct rrpc *rrpc, struct bio *bio,
        for (i = 0; i < npages; i++) {
                /* We assume that mapping occurs at 4KB granularity */
                p = rrpc_map_page(rrpc, laddr + i, is_gc);
-               if (!p) {
+               if (p.ppa == ADDR_EMPTY) {
                        BUG_ON(is_gc);
                        rrpc_unlock_laddr(rrpc, r);
                        nvm_dev_dma_free(dev->parent, rqd->ppa_list,
@@ -866,7 +894,7 @@ static int rrpc_write_ppalist_rq(struct rrpc *rrpc, struct bio *bio,
                        return NVM_IO_REQUEUE;
                }
 
-               rqd->ppa_list[i] = rrpc_ppa_to_gaddr(dev, p->addr);
+               rqd->ppa_list[i] = p;
        }
 
        rqd->opcode = NVM_OP_HBWRITE;
@@ -877,7 +905,7 @@ static int rrpc_write_ppalist_rq(struct rrpc *rrpc, struct bio *bio,
 static int rrpc_write_rq(struct rrpc *rrpc, struct bio *bio,
                                struct nvm_rq *rqd, unsigned long flags)
 {
-       struct rrpc_addr *p;
+       struct ppa_addr p;
        int is_gc = flags & NVM_IOTYPE_GC;
        sector_t laddr = rrpc_get_laddr(bio);
 
@@ -885,14 +913,14 @@ static int rrpc_write_rq(struct rrpc *rrpc, struct bio *bio,
                return NVM_IO_REQUEUE;
 
        p = rrpc_map_page(rrpc, laddr, is_gc);
-       if (!p) {
+       if (p.ppa == ADDR_EMPTY) {
                BUG_ON(is_gc);
                rrpc_unlock_rq(rrpc, rqd);
                rrpc_gc_kick(rrpc);
                return NVM_IO_REQUEUE;
        }
 
-       rqd->ppa_addr = rrpc_ppa_to_gaddr(rrpc->dev, p->addr);
+       rqd->ppa_addr = p;
        rqd->opcode = NVM_OP_HBWRITE;
 
        return NVM_IO_OK;
@@ -948,15 +976,15 @@ static int rrpc_submit_io(struct rrpc *rrpc, struct bio *bio,
        rqd->nr_ppas = nr_pages;
        rrq->flags = flags;
 
-       err = nvm_submit_io(dev->parent, rqd);
+       err = nvm_submit_io(dev, rqd);
        if (err) {
                pr_err("rrpc: I/O submission failed: %d\n", err);
                bio_put(bio);
                if (!(flags & NVM_IOTYPE_GC)) {
                        rrpc_unlock_rq(rrpc, rqd);
                        if (rqd->nr_ppas > 1)
-                               nvm_dev_dma_free(dev->parent,
-                                       rqd->ppa_list, rqd->dma_ppa_list);
+                               nvm_dev_dma_free(dev->parent, rqd->ppa_list,
+                                                       rqd->dma_ppa_list);
                }
                return NVM_IO_ERR;
        }
@@ -1061,16 +1089,21 @@ static int rrpc_l2p_update(u64 slba, u32 nlb, __le64 *entries, void *private)
        struct nvm_tgt_dev *dev = rrpc->dev;
        struct rrpc_addr *addr = rrpc->trans_map + slba;
        struct rrpc_rev_addr *raddr = rrpc->rev_trans_map;
+       struct rrpc_lun *rlun;
+       struct rrpc_block *rblk;
        u64 i;
 
        for (i = 0; i < nlb; i++) {
+               struct ppa_addr gaddr;
                u64 pba = le64_to_cpu(entries[i]);
                unsigned int mod;
+
                /* LNVM treats address-spaces as silos, LBA and PBA are
                 * equally large and zero-indexed.
                 */
                if (unlikely(pba >= dev->total_secs && pba != U64_MAX)) {
                        pr_err("nvm: L2P data entry is out of bounds!\n");
+                       pr_err("nvm: Maybe loaded an old target L2P\n");
                        return -EINVAL;
                }
 
@@ -1083,7 +1116,27 @@ static int rrpc_l2p_update(u64 slba, u32 nlb, __le64 *entries, void *private)
 
                div_u64_rem(pba, rrpc->nr_sects, &mod);
 
+               gaddr = rrpc_recov_addr(dev, pba);
+               rlun = rrpc_ppa_to_lun(rrpc, gaddr);
+               if (!rlun) {
+                       pr_err("rrpc: l2p corruption on lba %llu\n",
+                                                       slba + i);
+                       return -EINVAL;
+               }
+
+               rblk = &rlun->blocks[gaddr.g.blk];
+               if (!rblk->state) {
+                       /* at this point, we don't know anything about the
+                        * block. It's up to the FTL on top to re-etablish the
+                        * block state. The block is assumed to be open.
+                        */
+                       list_move_tail(&rblk->list, &rlun->used_list);
+                       rblk->state = NVM_BLK_ST_TGT;
+                       rlun->nr_free_blocks--;
+               }
+
                addr[i].addr = pba;
+               addr[i].rblk = rblk;
                raddr[mod].addr = slba + i;
        }
 
@@ -1113,12 +1166,9 @@ static int rrpc_map_init(struct rrpc *rrpc)
                r->addr = ADDR_EMPTY;
        }
 
-       if (!dev->ops->get_l2p_tbl)
-               return 0;
-
        /* Bring up the mapping table from device */
-       ret = dev->ops->get_l2p_tbl(dev->parent, rrpc->soffset, rrpc->nr_sects,
-                                       rrpc_l2p_update, rrpc);
+       ret = nvm_get_l2p_tbl(dev, rrpc->soffset, rrpc->nr_sects,
+                                                       rrpc_l2p_update, rrpc);
        if (ret) {
                pr_err("nvm: rrpc: could not read L2P table.\n");
                return -EINVAL;
@@ -1181,7 +1231,6 @@ static void rrpc_core_free(struct rrpc *rrpc)
 
 static void rrpc_luns_free(struct rrpc *rrpc)
 {
-       struct nvm_lun *lun;
        struct rrpc_lun *rlun;
        int i;
 
@@ -1190,16 +1239,67 @@ static void rrpc_luns_free(struct rrpc *rrpc)
 
        for (i = 0; i < rrpc->nr_luns; i++) {
                rlun = &rrpc->luns[i];
-               lun = rlun->parent;
-               if (!lun)
-                       break;
                vfree(rlun->blocks);
        }
 
        kfree(rrpc->luns);
 }
 
-static int rrpc_luns_init(struct rrpc *rrpc, int lun_begin, int lun_end)
+static int rrpc_bb_discovery(struct nvm_tgt_dev *dev, struct rrpc_lun *rlun)
+{
+       struct nvm_geo *geo = &dev->geo;
+       struct rrpc_block *rblk;
+       struct ppa_addr ppa;
+       u8 *blks;
+       int nr_blks;
+       int i;
+       int ret;
+
+       if (!dev->parent->ops->get_bb_tbl)
+               return 0;
+
+       nr_blks = geo->blks_per_lun * geo->plane_mode;
+       blks = kmalloc(nr_blks, GFP_KERNEL);
+       if (!blks)
+               return -ENOMEM;
+
+       ppa.ppa = 0;
+       ppa.g.ch = rlun->bppa.g.ch;
+       ppa.g.lun = rlun->bppa.g.lun;
+
+       ret = nvm_get_tgt_bb_tbl(dev, ppa, blks);
+       if (ret) {
+               pr_err("rrpc: could not get BB table\n");
+               goto out;
+       }
+
+       nr_blks = nvm_bb_tbl_fold(dev->parent, blks, nr_blks);
+       if (nr_blks < 0)
+               return nr_blks;
+
+       for (i = 0; i < nr_blks; i++) {
+               if (blks[i] == NVM_BLK_T_FREE)
+                       continue;
+
+               rblk = &rlun->blocks[i];
+               list_move_tail(&rblk->list, &rlun->bb_list);
+               rblk->state = NVM_BLK_ST_BAD;
+               rlun->nr_free_blocks--;
+       }
+
+out:
+       kfree(blks);
+       return ret;
+}
+
+static void rrpc_set_lun_ppa(struct rrpc_lun *rlun, struct ppa_addr ppa)
+{
+       rlun->bppa.ppa = 0;
+       rlun->bppa.g.ch = ppa.g.ch;
+       rlun->bppa.g.lun = ppa.g.lun;
+}
+
+static int rrpc_luns_init(struct rrpc *rrpc, struct ppa_addr *luns)
 {
        struct nvm_tgt_dev *dev = rrpc->dev;
        struct nvm_geo *geo = &dev->geo;
@@ -1220,15 +1320,9 @@ static int rrpc_luns_init(struct rrpc *rrpc, int lun_begin, int lun_end)
 
        /* 1:1 mapping */
        for (i = 0; i < rrpc->nr_luns; i++) {
-               int lunid = lun_begin + i;
-               struct nvm_lun *lun;
-
-               lun = dev->mt->get_lun(dev->parent, lunid);
-               if (!lun)
-                       goto err;
-
                rlun = &rrpc->luns[i];
-               rlun->parent = lun;
+               rlun->id = i;
+               rrpc_set_lun_ppa(rlun, luns[i]);
                rlun->blocks = vzalloc(sizeof(struct rrpc_block) *
                                                        geo->blks_per_lun);
                if (!rlun->blocks) {
@@ -1236,24 +1330,36 @@ static int rrpc_luns_init(struct rrpc *rrpc, int lun_begin, int lun_end)
                        goto err;
                }
 
+               INIT_LIST_HEAD(&rlun->free_list);
+               INIT_LIST_HEAD(&rlun->used_list);
+               INIT_LIST_HEAD(&rlun->bb_list);
+
                for (j = 0; j < geo->blks_per_lun; j++) {
                        struct rrpc_block *rblk = &rlun->blocks[j];
-                       struct nvm_block *blk = &lun->blocks[j];
 
-                       rblk->parent = blk;
+                       rblk->id = j;
                        rblk->rlun = rlun;
+                       rblk->state = NVM_BLK_T_FREE;
                        INIT_LIST_HEAD(&rblk->prio);
+                       INIT_LIST_HEAD(&rblk->list);
                        spin_lock_init(&rblk->lock);
+
+                       list_add_tail(&rblk->list, &rlun->free_list);
                }
 
+               rlun->rrpc = rrpc;
+               rlun->nr_free_blocks = geo->blks_per_lun;
                rlun->reserved_blocks = 2; /* for GC only */
 
-               rlun->rrpc = rrpc;
                INIT_LIST_HEAD(&rlun->prio_list);
                INIT_LIST_HEAD(&rlun->wblk_list);
 
                INIT_WORK(&rlun->ws_gc, rrpc_lun_gc);
                spin_lock_init(&rlun->lock);
+
+               if (rrpc_bb_discovery(dev, rlun))
+                       goto err;
+
        }
 
        return 0;
@@ -1265,13 +1371,12 @@ err:
 static int rrpc_area_init(struct rrpc *rrpc, sector_t *begin)
 {
        struct nvm_tgt_dev *dev = rrpc->dev;
-       struct nvmm_type *mt = dev->mt;
        sector_t size = rrpc->nr_sects * dev->geo.sec_size;
        int ret;
 
        size >>= 9;
 
-       ret = mt->get_area(dev->parent, begin, size);
+       ret = nvm_get_area(dev, begin, size);
        if (!ret)
                *begin >>= (ilog2(dev->geo.sec_size) - 9);
 
@@ -1281,10 +1386,9 @@ static int rrpc_area_init(struct rrpc *rrpc, sector_t *begin)
 static void rrpc_area_free(struct rrpc *rrpc)
 {
        struct nvm_tgt_dev *dev = rrpc->dev;
-       struct nvmm_type *mt = dev->mt;
        sector_t begin = rrpc->soffset << (ilog2(dev->geo.sec_size) - 9);
 
-       mt->put_area(dev->parent, begin);
+       nvm_put_area(dev, begin);
 }
 
 static void rrpc_free(struct rrpc *rrpc)
@@ -1409,8 +1513,7 @@ err:
 
 static struct nvm_tgt_type tt_rrpc;
 
-static void *rrpc_init(struct nvm_tgt_dev *dev, struct gendisk *tdisk,
-                                               int lun_begin, int lun_end)
+static void *rrpc_init(struct nvm_tgt_dev *dev, struct gendisk *tdisk)
 {
        struct request_queue *bqueue = dev->q;
        struct request_queue *tqueue = tdisk->queue;
@@ -1450,14 +1553,12 @@ static void *rrpc_init(struct nvm_tgt_dev *dev, struct gendisk *tdisk,
        }
        rrpc->soffset = soffset;
 
-       ret = rrpc_luns_init(rrpc, lun_begin, lun_end);
+       ret = rrpc_luns_init(rrpc, dev->luns);
        if (ret) {
                pr_err("nvm: rrpc: could not initialize luns\n");
                goto err;
        }
 
-       rrpc->poffset = geo->sec_per_lun * lun_begin;
-
        ret = rrpc_core_init(rrpc);
        if (ret) {
                pr_err("nvm: rrpc: could not initialize core\n");